From 05cb8d9269e2bee7de3af5934f3c87ca56207257 Mon Sep 17 00:00:00 2001 From: ByeonJungHun Date: Tue, 19 Dec 2023 13:36:16 +0900 Subject: [PATCH] =?UTF-8?q?Ansible=20Script=20=EC=B6=94=EA=B0=80?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- ansible/00_old/agent_cluster_install.yaml | 9 + ansible/00_old/agent_datasaker.yml | 23 + ansible/00_old/api_cluster_install.yaml | 9 + ansible/00_old/authorized_keys.yml | 11 + ansible/00_old/bastion.yml | 95 + ansible/00_old/cmoa_install.yaml | 11 + ansible/00_old/datasaker.yml | 33 + ansible/00_old/dev_datasaker.yml | 50 + ansible/00_old/get-docker.sh | 645 +++ ansible/00_old/health_check.yml | 18 + ansible/00_old/install-centos-node.sh | 995 ++++ ansible/00_old/install-node-ubuntu.sh | 995 ++++ ansible/00_old/install-node.sh | 995 ++++ ansible/00_old/installer.sh | 28 + ansible/00_old/key_test.sh | 5 + ansible/00_old/local_datasaker.yml | 53 + ansible/00_old/node_role.yaml | 9 + ansible/00_old/roles.yaml | 27 + ansible/00_old/test.yml | 18 + ansible/00_old/vault_test.yaml | 10 + ansible/01_old/README.md | 1 + ansible/01_old/all_host | 20 + ansible/01_old/ansible.cfg | 10 + ansible/01_old/ansible_kubectl/Dockerfile | 26 + ansible/01_old/ansible_kubectl/kubeconfig | 23 + ansible/01_old/ansible_kubectl/testpod.yaml | 13 + ansible/01_old/infra-settings.yml | 19 + ansible/01_old/infra-test | 19 + ansible/01_old/inventory/.DS_Store | Bin 0 -> 6148 bytes ansible/01_old/inventory/agent-team | 2 + ansible/01_old/inventory/all_host | 14 + ansible/01_old/inventory/api-team | 2 + ansible/01_old/inventory/aws | 52 + ansible/01_old/inventory/cmoa | 13 + ansible/01_old/inventory/dsk-dev | 5 + ansible/01_old/inventory/etc | 7 + ansible/01_old/inventory/infra-team | 2 + ansible/01_old/inventory/passwd_inventory | 76 + ansible/01_old/inventory/teleport | 65 + ansible/01_old/inventory/teleport_test | 41 + ansible/01_old/inventory/test_node | 41 + ansible/01_old/inventory/zabbix_inventory | 76 + ansible/01_old/inventory2 | 3 + ansible/01_old/inventory_agent | 33 + ansible/01_old/inventory_api | 32 + ansible/01_old/inventory_bak | 22 + ansible/01_old/inventory_cent | 18 + ansible/01_old/inventory_dsk_dev | 35 + ansible/01_old/inventory_test | 75 + ansible/01_old/inventory_tmp_cluster | 29 + ansible/01_old/password_change.yml | 11 + ansible/01_old/password_change.yml_bak | 11 + ansible/01_old/restart.yml | 10 + ansible/01_old/roles/.DS_Store | Bin 0 -> 6148 bytes .../01_old/roles/agent_os_setting/README.md | 38 + .../roles/agent_os_setting/defaults/main.yml | 140 + .../agent_os_setting/files/get-docker.sh | 645 +++ .../files/ingress-nginx/.helmignore | 22 + .../files/ingress-nginx/CHANGELOG.md | 445 ++ .../files/ingress-nginx/Chart.yaml | 23 + .../files/ingress-nginx/OWNERS | 10 + .../files/ingress-nginx/README.md | 494 ++ .../files/ingress-nginx/README.md.gotmpl | 235 + .../controller-custom-ingressclass-flags.yaml | 7 + .../ci/daemonset-customconfig-values.yaml | 14 + .../ci/daemonset-customnodeport-values.yaml | 22 + .../ci/daemonset-extra-modules.yaml | 10 + .../ci/daemonset-headers-values.yaml | 14 + .../ci/daemonset-internal-lb-values.yaml | 14 + .../ci/daemonset-nodeport-values.yaml | 10 + .../ci/daemonset-podannotations-values.yaml | 17 + ...set-tcp-udp-configMapNamespace-values.yaml | 20 + ...emonset-tcp-udp-portNamePrefix-values.yaml | 18 + .../ci/daemonset-tcp-udp-values.yaml | 16 + .../ci/daemonset-tcp-values.yaml | 14 + .../ci/deamonset-default-values.yaml | 10 + .../ci/deamonset-metrics-values.yaml | 12 + .../ci/deamonset-psp-values.yaml | 13 + .../ci/deamonset-webhook-and-psp-values.yaml | 13 + .../ci/deamonset-webhook-values.yaml | 10 + ...eployment-autoscaling-behavior-values.yaml | 14 + .../ci/deployment-autoscaling-values.yaml | 11 + .../ci/deployment-customconfig-values.yaml | 12 + .../ci/deployment-customnodeport-values.yaml | 20 + .../ci/deployment-default-values.yaml | 8 + .../ci/deployment-extra-modules.yaml | 10 + .../ci/deployment-headers-values.yaml | 13 + .../ci/deployment-internal-lb-values.yaml | 13 + .../ci/deployment-metrics-values.yaml | 11 + .../ci/deployment-nodeport-values.yaml | 9 + .../ci/deployment-podannotations-values.yaml | 16 + .../ci/deployment-psp-values.yaml | 10 + ...ent-tcp-udp-configMapNamespace-values.yaml | 19 + ...loyment-tcp-udp-portNamePrefix-values.yaml | 17 + .../ci/deployment-tcp-udp-values.yaml | 15 + .../ci/deployment-tcp-values.yaml | 11 + .../ci/deployment-webhook-and-psp-values.yaml | 12 + .../deployment-webhook-extraEnvs-values.yaml | 12 + .../deployment-webhook-resources-values.yaml | 23 + .../ci/deployment-webhook-values.yaml | 9 + .../files/ingress-nginx/override-values.yaml | 10 + .../files/ingress-nginx/temp.yaml | 724 +++ .../files/ingress-nginx/temp2.yaml | 725 +++ .../files/ingress-nginx/templates/NOTES.txt | 80 + .../ingress-nginx/templates/_helpers.tpl | 185 + .../files/ingress-nginx/templates/_params.tpl | 62 + .../job-patch/clusterrole.yaml | 34 + .../job-patch/clusterrolebinding.yaml | 23 + .../job-patch/job-createSecret.yaml | 79 + .../job-patch/job-patchWebhook.yaml | 81 + .../admission-webhooks/job-patch/psp.yaml | 39 + .../admission-webhooks/job-patch/role.yaml | 24 + .../job-patch/rolebinding.yaml | 24 + .../job-patch/serviceaccount.yaml | 16 + .../validating-webhook.yaml | 48 + .../ingress-nginx/templates/clusterrole.yaml | 94 + .../templates/clusterrolebinding.yaml | 19 + .../controller-configmap-addheaders.yaml | 14 + .../controller-configmap-proxyheaders.yaml | 19 + .../templates/controller-configmap-tcp.yaml | 17 + .../templates/controller-configmap-udp.yaml | 17 + .../templates/controller-configmap.yaml | 29 + .../templates/controller-daemonset.yaml | 223 + .../templates/controller-deployment.yaml | 228 + .../templates/controller-hpa.yaml | 52 + .../templates/controller-ingressclass.yaml | 21 + .../templates/controller-keda.yaml | 42 + .../controller-poddisruptionbudget.yaml | 19 + .../templates/controller-prometheusrules.yaml | 21 + .../templates/controller-psp.yaml | 94 + .../templates/controller-role.yaml | 113 + .../templates/controller-rolebinding.yaml | 21 + .../controller-service-internal.yaml | 79 + .../templates/controller-service-metrics.yaml | 45 + .../templates/controller-service-webhook.yaml | 40 + .../templates/controller-service.yaml | 101 + .../templates/controller-serviceaccount.yaml | 18 + .../templates/controller-servicemonitor.yaml | 48 + .../controller-wehbooks-networkpolicy.yaml | 19 + .../templates/default-backend-deployment.yaml | 118 + .../templates/default-backend-hpa.yaml | 33 + .../default-backend-poddisruptionbudget.yaml | 21 + .../templates/default-backend-psp.yaml | 38 + .../templates/default-backend-role.yaml | 22 + .../default-backend-rolebinding.yaml | 21 + .../templates/default-backend-service.yaml | 41 + .../default-backend-serviceaccount.yaml | 14 + .../templates/dh-param-secret.yaml | 10 + .../files/ingress-nginx/values.yaml | 944 ++++ .../roles/agent_os_setting/handlers/main.yml | 22 + .../roles/agent_os_setting/meta/main.yml | 52 + .../tasks/00-centos-os-main.yml | 81 + .../tasks/00-ubuntu-os-main.yml | 75 + .../tasks/01-centos-os-containerd.yml | 47 + .../tasks/01-centos-os-crio.yml | 50 + .../tasks/01-centos-os-docker.yml | 58 + .../tasks/01-ubuntu-os-containerd.yml | 78 + .../tasks/01-ubuntu-os-crio.yml | 84 + .../tasks/01-ubuntu-os-docker.yml | 78 + .../agent_os_setting/tasks/02-k8s-main.yml | 42 + .../agent_os_setting/tasks/03-k8s-master.yml | 45 + .../tasks/04-k8s-master-yaml.yml | 21 + .../tasks/04-k8s-master-yaml.yml_bak | 15 + .../agent_os_setting/tasks/05-k8s-node.yml | 6 + .../tasks/06-worker-directory.yml | 24 + .../roles/agent_os_setting/tasks/main.yml | 35 + .../agent_os_setting/templates/calico.yaml.j2 | 4779 +++++++++++++++++ .../templates/components.yaml.j2 | 197 + .../agent_os_setting/templates/config.toml.j2 | 5 + .../agent_os_setting/templates/daemon.json.j2 | 9 + .../roles/agent_os_setting/templates/hosts.j2 | 6 + .../templates/myregistry.conf.j2 | 3 + .../templates/yaml2toml_macro.j2 | 58 + .../roles/agent_os_setting/tests/inventory | 2 + .../roles/agent_os_setting/tests/test.yml | 5 + .../roles/agent_os_setting/vars/main.yml | 2 + .../roles/api_os_setting/defaults/main.yml | 140 + .../roles/api_os_setting/files/get-docker.sh | 645 +++ .../roles/api_os_setting/handlers/main.yml | 22 + .../01_old/roles/api_os_setting/meta/main.yml | 52 + .../tasks/00-centos-os-main.yml | 82 + .../tasks/00-ubuntu-os-main.yml | 71 + .../tasks/01-centos-os-containerd.yml | 47 + .../tasks/01-centos-os-crio.yml | 53 + .../tasks/01-centos-os-docker.yml | 58 + .../tasks/01-ubuntu-os-containerd.yml | 78 + .../tasks/01-ubuntu-os-crio.yml | 84 + .../tasks/01-ubuntu-os-docker.yml | 50 + .../api_os_setting/tasks/02-k8s-main.yml | 43 + .../api_os_setting/tasks/03-k8s-master.yml | 51 + .../tasks/04-k8s-master-yaml.yml | 21 + .../api_os_setting/tasks/05-k8s-node.yml | 6 + .../roles/api_os_setting/tasks/main.yml | 35 + .../api_os_setting/templates/calico.yaml.j2 | 4779 +++++++++++++++++ .../templates/components.yaml.j2 | 197 + .../api_os_setting/templates/config.toml.j2 | 5 + .../api_os_setting/templates/daemon.json.j2 | 9 + .../roles/api_os_setting/templates/hosts.j2 | 6 + .../templates/myregistry.conf.j2 | 3 + .../templates/yaml2toml_macro.j2 | 58 + .../roles/api_os_setting/tests/inventory | 2 + .../roles/api_os_setting/tests/test.yml | 5 + .../01_old/roles/api_os_setting/vars/main.yml | 2 + .../01_old/roles/bastion/defaults/main.yml | 48 + .../01_old/roles/bastion/files/login_banner | 20 + .../01_old/roles/bastion/handlers/main.yml | 6 + .../01_old/roles/bastion/tasks/admin_set.yml | 7 + ansible/01_old/roles/bastion/tasks/banner.yml | 29 + ansible/01_old/roles/bastion/tasks/crictl.yml | 19 + .../01_old/roles/bastion/tasks/login_defs.yml | 48 + ansible/01_old/roles/bastion/tasks/main.yml | 12 + ansible/01_old/roles/bastion/tasks/pam.yml | 50 + .../01_old/roles/bastion/tasks/profile.yml | 24 + .../roles/bastion/tasks/sshd_config.yml | 30 + .../01_old/roles/bastion/tasks/sudoers.yml | 105 + .../roles/bastion/templates/allow_users.j2 | 11 + .../roles/bastion/templates/common-auth.j2 | 27 + .../roles/bastion/templates/pwquality.conf.j2 | 50 + .../roles/bastion/templates/sudoers_users.j2 | 6 + .../roles/cmoa_demo_install/defaults/main.yml | 64 + .../files/00-default/sa_patch.sh | 8 + .../files/00-default/secret_dockerhub.yaml | 7 + .../files/00-default/secret_nexus.yaml | 8 + .../files/01-storage/00-storageclass.yaml | 6 + .../files/01-storage/01-persistentvolume.yaml | 92 + .../files/01-storage/cmoa_minio | 63 + .../files/01-storage/minio/.helmignore | 23 + .../files/01-storage/minio/Chart.yaml | 18 + .../files/01-storage/minio/README.md | 235 + .../01-storage/minio/templates/NOTES.txt | 43 + .../minio/templates/_helper_create_bucket.txt | 109 + .../minio/templates/_helper_create_policy.txt | 75 + .../minio/templates/_helper_create_user.txt | 88 + .../templates/_helper_custom_command.txt | 58 + .../minio/templates/_helper_policy.tpl | 18 + .../01-storage/minio/templates/_helpers.tpl | 218 + .../01-storage/minio/templates/configmap.yaml | 24 + .../minio/templates/console-ingress.yaml | 58 + .../minio/templates/console-service.yaml | 48 + .../minio/templates/deployment.yaml | 174 + .../minio/templates/gateway-deployment.yaml | 173 + .../01-storage/minio/templates/ingress.yaml | 58 + .../minio/templates/networkpolicy.yaml | 27 + .../minio/templates/poddisruptionbudget.yaml | 14 + .../post-install-create-bucket-job.yaml | 87 + .../post-install-create-policy-job.yaml | 87 + .../post-install-create-user-job.yaml | 97 + .../post-install-custom-command.yaml | 87 + .../files/01-storage/minio/templates/pvc.yaml | 35 + .../01-storage/minio/templates/secrets.yaml | 22 + .../templates/securitycontextconstraints.yaml | 45 + .../01-storage/minio/templates/service.yaml | 49 + .../minio/templates/serviceaccount.yaml | 7 + .../minio/templates/servicemonitor.yaml | 51 + .../minio/templates/statefulset.yaml | 217 + .../files/01-storage/minio/values.yaml | 461 ++ .../files/02-base/00-kafka-broker-config.yaml | 161 + .../files/02-base/01-coredns.yaml | 35 + .../files/02-base/base/.helmignore | 22 + .../files/02-base/base/Chart.yaml | 5 + .../02-base/base/charts/analysis/.helmignore | 22 + .../02-base/base/charts/analysis/Chart.yaml | 5 + .../imxc-metric-analyzer-master.yaml | 87 + .../imxc-metric-analyzer-worker.yaml | 38 + .../02-base/base/charts/analysis/values.yaml | 68 + .../02-base/base/charts/cortex/.helmignore | 29 + .../02-base/base/charts/cortex/Chart.lock | 24 + .../02-base/base/charts/cortex/Chart.yaml | 56 + .../02-base/base/charts/cortex/README.md | 754 +++ .../base/charts/cortex/templates/NOTES.txt | 9 + .../base/charts/cortex/templates/_helpers.tpl | 155 + .../alertmanager/alertmanager-dep.yaml | 30 + .../alertmanager/alertmanager-svc.yaml | 10 + .../charts/cortex/templates/clusterrole.yaml | 12 + .../cortex/templates/clusterrolebinding.yaml | 16 + .../compactor/_helpers-compactor.tpl | 23 + .../compactor-poddisruptionbudget.yaml | 14 + .../compactor/compactor-servicemonitor.yaml | 42 + .../compactor/compactor-statefulset.yaml | 141 + .../templates/compactor/compactor-svc.yaml | 25 + .../charts/cortex/templates/configmap.yaml | 12 + .../templates/configs/_helpers-configs.tpl | 23 + .../cortex/templates/configs/configs-dep.yaml | 124 + .../configs/configs-poddisruptionbudget.yaml | 14 + .../configs/configs-servicemonitor.yaml | 42 + .../cortex/templates/configs/configs-svc.yaml | 23 + .../charts/cortex/templates/cortex-pv.yaml | 68 + .../distributor/_helpers-distributor.tpl | 23 + .../distributor/distributor-dep.yaml | 121 + .../distributor/distributor-hpa.yaml | 39 + .../distributor-poddisruptionbudget.yaml | 14 + .../distributor-servicemonitor.yaml | 42 + .../distributor/distributor-svc-headless.yaml | 23 + .../distributor/distributor-svc.yaml | 21 + .../templates/ingester/_helpers-ingester.tpl | 23 + .../templates/ingester/ingester-dep.yaml | 130 + .../templates/ingester/ingester-hpa.yaml | 29 + .../ingester-poddisruptionbudget.yaml | 14 + .../ingester/ingester-servicemonitor.yaml | 42 + .../ingester/ingester-statefulset.yaml | 153 + .../ingester/ingester-svc-headless.yaml | 22 + .../templates/ingester/ingester-svc.yaml | 21 + .../cortex/templates/nginx/_helpers-nginx.tpl | 23 + .../cortex/templates/nginx/nginx-config.yaml | 140 + .../cortex/templates/nginx/nginx-dep.yaml | 111 + .../cortex/templates/nginx/nginx-hpa.yaml | 39 + .../cortex/templates/nginx/nginx-ingress.yaml | 40 + .../nginx/nginx-poddisruptionbudget.yaml | 14 + .../cortex/templates/nginx/nginx-svc.yaml | 23 + .../cortex/templates/node-exporter.yaml | 96 + .../templates/querier/_helpers-querier.tpl | 23 + .../cortex/templates/querier/querier-dep.yaml | 115 + .../cortex/templates/querier/querier-hpa.yaml | 39 + .../querier/querier-poddisruptionbudget.yaml | 14 + .../querier/querier-servicemonitor.yaml | 42 + .../cortex/templates/querier/querier-svc.yaml | 21 + .../_helpers-query-frontend.tpl | 23 + .../query-frontend/query-frontend-dep.yaml | 107 + .../query-frontend-servicemonitor.yaml | 42 + .../query-frontend-svc-headless.yaml | 23 + .../query-frontend/query-frontend-svc.yaml | 21 + .../query-poddisruptionbudget.yaml | 14 + .../cortex/templates/ruler/_helpers-ruler.tpl | 30 + .../templates/ruler/ruler-configmap.yaml | 14 + .../cortex/templates/ruler/ruler-dep.yaml | 191 + .../ruler/ruler-poddisruptionbudget.yaml | 14 + .../templates/ruler/ruler-servicemonitor.yaml | 42 + .../cortex/templates/ruler/ruler-svc.yaml | 23 + .../cortex/templates/runtime-configmap.yaml | 18 + .../cortex/templates/secret-postgresql.yaml | 11 + .../base/charts/cortex/templates/secret.yaml | 11 + .../cortex/templates/serviceaccount.yaml | 12 + .../store-gateway/_helpers-store-gateway.tpl | 23 + .../store-gateway-poddisruptionbudget.yaml | 14 + .../store-gateway-servicemonitor.yaml | 42 + .../store-gateway-statefulset.yaml | 142 + .../store-gateway-svc-headless.yaml | 24 + .../store-gateway/store-gateway-svc.yaml | 23 + .../templates/svc-memberlist-headless.yaml | 18 + .../table-manager/_helpers-table-manager.tpl | 23 + .../table-manager/table-manager-dep.yaml | 106 + .../table-manager-poddisruptionbudget.yaml | 14 + .../table-manager-servicemonitor.yaml | 42 + .../table-manager/table-manager-svc.yaml | 23 + .../02-base/base/charts/cortex/values.yaml | 1605 ++++++ .../base/charts/elasticsearch/.helmignore | 2 + .../base/charts/elasticsearch/Chart.yaml | 12 + .../templates/1.headless_service.yaml | 14 + .../elasticsearch/templates/2.service.yaml | 17 + .../elasticsearch/templates/3.configmap.yaml | 41 + .../charts/elasticsearch/templates/4.pv.yaml | 74 + .../charts/elasticsearch/templates/5.pvc.yaml | 53 + .../templates/6.statefulset.yaml | 146 + .../elasticsearch/templates/7.secrets.yaml | 10 + .../templates/needtocheck_storageclass.yaml | 8 + .../base/charts/elasticsearch/values.yaml | 68 + .../base/charts/kafka-manager/.helmignore | 22 + .../base/charts/kafka-manager/Chart.yaml | 5 + .../templates/0.kafka-manager-service.yaml | 14 + .../templates/1.kafka-manager.yaml | 33 + .../base/charts/kafka-manager/values.yaml | 68 + .../02-base/base/charts/kafka/.helmignore | 22 + .../base/charts/kafka/1.broker-config.yaml | 161 + .../02-base/base/charts/kafka/Chart.yaml | 5 + .../base/charts/kafka/templates/2.dns.yaml | 14 + .../kafka/templates/3.bootstrap-service.yaml | 11 + .../kafka/templates/4.persistent-volume.yaml | 76 + .../base/charts/kafka/templates/5.kafka.yaml | 132 + .../charts/kafka/templates/6.outside.yaml | 89 + .../02-base/base/charts/kafka/values.yaml | 68 + .../02-base/base/charts/postgres/.helmignore | 22 + .../02-base/base/charts/postgres/Chart.yaml | 5 + .../templates/1.postgres-configmap.yaml | 11 + .../templates/2.postgres-storage.yaml | 38 + .../templates/3.postgres-service.yaml | 14 + .../templates/4.postgres-deployment.yaml | 45 + .../02-base/base/charts/postgres/values.yaml | 68 + .../02-base/base/charts/rabbitmq/.helmignore | 21 + .../02-base/base/charts/rabbitmq/Chart.lock | 6 + .../02-base/base/charts/rabbitmq/Chart.yaml | 26 + .../02-base/base/charts/rabbitmq/README.md | 566 ++ .../charts/rabbitmq/charts/common/.helmignore | 22 + .../charts/rabbitmq/charts/common/Chart.yaml | 23 + .../charts/rabbitmq/charts/common/README.md | 327 ++ .../charts/common/templates/_affinities.tpl | 102 + .../charts/common/templates/_capabilities.tpl | 117 + .../charts/common/templates/_errors.tpl | 23 + .../charts/common/templates/_images.tpl | 75 + .../charts/common/templates/_ingress.tpl | 55 + .../charts/common/templates/_labels.tpl | 18 + .../charts/common/templates/_names.tpl | 32 + .../charts/common/templates/_secrets.tpl | 129 + .../charts/common/templates/_storage.tpl | 23 + .../charts/common/templates/_tplvalues.tpl | 13 + .../charts/common/templates/_utils.tpl | 62 + .../charts/common/templates/_warnings.tpl | 14 + .../templates/validations/_cassandra.tpl | 72 + .../common/templates/validations/_mariadb.tpl | 103 + .../common/templates/validations/_mongodb.tpl | 108 + .../templates/validations/_postgresql.tpl | 131 + .../common/templates/validations/_redis.tpl | 76 + .../templates/validations/_validations.tpl | 46 + .../charts/rabbitmq/charts/common/values.yaml | 5 + .../charts/rabbitmq/ci/default-values.yaml | 1 + .../rabbitmq/ci/tolerations-values.yaml | 4 + .../base/charts/rabbitmq/templates/NOTES.txt | 167 + .../charts/rabbitmq/templates/_helpers.tpl | 247 + .../rabbitmq/templates/configuration.yaml | 16 + .../charts/rabbitmq/templates/extra-list.yaml | 4 + .../charts/rabbitmq/templates/ingress.yaml | 57 + .../rabbitmq/templates/networkpolicy.yaml | 37 + .../base/charts/rabbitmq/templates/pdb.yaml | 20 + .../rabbitmq/templates/prometheusrule.yaml | 24 + .../base/charts/rabbitmq/templates/pv.yaml | 22 + .../base/charts/rabbitmq/templates/pvc.yaml | 15 + .../base/charts/rabbitmq/templates/role.yaml | 18 + .../rabbitmq/templates/rolebinding.yaml | 18 + .../charts/rabbitmq/templates/secrets.yaml | 43 + .../rabbitmq/templates/serviceaccount.yaml | 14 + .../rabbitmq/templates/servicemonitor.yaml | 49 + .../rabbitmq/templates/statefulset.yaml | 382 ++ .../rabbitmq/templates/svc-headless.yaml | 40 + .../base/charts/rabbitmq/templates/svc.yaml | 95 + .../rabbitmq/templates/tls-secrets.yaml | 74 + .../base/charts/rabbitmq/values.schema.json | 100 + .../02-base/base/charts/rabbitmq/values.yaml | 1151 ++++ .../02-base/base/charts/redis/.helmignore | 21 + .../02-base/base/charts/redis/Chart.lock | 6 + .../02-base/base/charts/redis/Chart.yaml | 29 + .../files/02-base/base/charts/redis/README.md | 707 +++ .../charts/redis/charts/common/.helmignore | 22 + .../charts/redis/charts/common/Chart.yaml | 23 + .../base/charts/redis/charts/common/README.md | 316 ++ .../charts/common/templates/_affinities.tpl | 94 + .../charts/common/templates/_capabilities.tpl | 61 + .../redis/charts/common/templates/_images.tpl | 43 + .../charts/common/templates/_ingress.tpl | 42 + .../redis/charts/common/templates/_labels.tpl | 18 + .../redis/charts/common/templates/_names.tpl | 32 + .../charts/common/templates/_secrets.tpl | 127 + .../charts/common/templates/_storage.tpl | 23 + .../charts/common/templates/_tplvalues.tpl | 13 + .../redis/charts/common/templates/_utils.tpl | 62 + .../charts/common/templates/_warnings.tpl | 14 + .../templates/validations/_cassandra.tpl | 72 + .../common/templates/validations/_mariadb.tpl | 103 + .../common/templates/validations/_mongodb.tpl | 108 + .../templates/validations/_postgresql.tpl | 131 + .../common/templates/validations/_redis.tpl | 72 + .../templates/validations/_validations.tpl | 46 + .../charts/redis/charts/common/values.yaml | 3 + .../base/charts/redis/ci/default-values.yaml | 1 + .../charts/redis/ci/extra-flags-values.yaml | 11 + .../redis/ci/production-sentinel-values.yaml | 682 +++ .../base/charts/redis/templates/NOTES.txt | 136 + .../base/charts/redis/templates/_helpers.tpl | 421 ++ .../redis/templates/configmap-scripts.yaml | 393 ++ .../charts/redis/templates/configmap.yaml | 53 + .../charts/redis/templates/headless-svc.yaml | 28 + .../redis/templates/health-configmap.yaml | 176 + .../redis/templates/metrics-prometheus.yaml | 39 + .../charts/redis/templates/metrics-svc.yaml | 34 + .../charts/redis/templates/networkpolicy.yaml | 74 + .../base/charts/redis/templates/pdb.yaml | 22 + .../redis/templates/prometheusrule.yaml | 25 + .../base/charts/redis/templates/psp.yaml | 43 + .../templates/redis-master-statefulset.yaml | 378 ++ .../redis/templates/redis-master-svc.yaml | 43 + .../templates/redis-node-statefulset.yaml | 494 ++ .../base/charts/redis/templates/redis-pv.yaml | 92 + .../charts/redis/templates/redis-role.yaml | 22 + .../redis/templates/redis-rolebinding.yaml | 19 + .../redis/templates/redis-serviceaccount.yaml | 15 + .../templates/redis-slave-statefulset.yaml | 384 ++ .../redis/templates/redis-slave-svc.yaml | 43 + .../templates/redis-with-sentinel-svc.yaml | 43 + .../base/charts/redis/templates/secret.yaml | 15 + .../base/charts/redis/values.schema.json | 168 + .../02-base/base/charts/redis/values.yaml | 932 ++++ .../02-base/base/charts/zookeeper/.helmignore | 22 + .../02-base/base/charts/zookeeper/Chart.yaml | 5 + .../charts/zookeeper/templates/0.config.yaml | 35 + .../templates/1.service-leader-election.yaml | 16 + .../zookeeper/templates/2.service-client.yaml | 12 + .../templates/3.persistent-volume.yaml | 74 + .../zookeeper/templates/4.statefulset.yaml | 87 + .../charts/zookeeper/templates/5.pvc.yaml | 50 + .../02-base/base/charts/zookeeper/values.yaml | 68 + .../files/02-base/base/index.yaml | 3 + .../files/02-base/base/templates/role.yaml | 16 + .../files/02-base/base/values.yaml | 73 + .../03-ddl-dml/elasticsearch/es-ddl-put.sh | 3085 +++++++++++ ...ete_event_info_create_dest_source_index.sh | 220 + ..._event_info_reindex_to_dest_from_source.sh | 28 + ..._event_info_reindex_to_source_from_dest.sh | 30 + ..._kubernete_event_info_delete_dest_index.sh | 21 + ...icense_history_create_dest_source_index.sh | 184 + ...nse_history_reindex_to_dest_from_source.sh | 32 + ...nse_history_reindex_to_source_from_dest.sh | 30 + .../8_license_history_delete_dest_index.sh | 21 + .../patch/es-reindex-3.2.0-rel332/manual.txt | 31 + ...ete_event_info_create_dest_source_index.sh | 220 + ..._event_info_reindex_to_dest_from_source.sh | 28 + ..._event_info_reindex_to_source_from_dest.sh | 30 + ..._kubernete_event_info_delete_dest_index.sh | 21 + ...icense_history_create_dest_source_index.sh | 184 + ...nse_history_reindex_to_dest_from_source.sh | 32 + ...nse_history_reindex_to_source_from_dest.sh | 30 + .../8_license_history_delete_dest_index.sh | 21 + .../patch/es-reindex-3.2.0/manual.txt | 31 + .../03-ddl-dml/postgres/jaeger_menumeta.psql | 21 + .../03-ddl-dml/postgres/jspd_menumeta.psql | 22 + ...ete_event_info_create_dest_source_index.sh | 220 + ..._event_info_reindex_to_dest_from_source.sh | 28 + ..._event_info_reindex_to_source_from_dest.sh | 30 + ..._kubernete_event_info_delete_dest_index.sh | 21 + ...icense_history_create_dest_source_index.sh | 184 + ...nse_history_reindex_to_dest_from_source.sh | 32 + ...nse_history_reindex_to_source_from_dest.sh | 30 + .../8_license_history_delete_dest_index.sh | 21 + .../patch/es-reindex-3.2.0/manual.txt | 31 + .../patch/memu_meta/jaeger_menumeta.psql | 21 + .../patch/memu_meta/jspd_menumeta.psql | 22 + .../postgres/patch/postgres_patch_3.2.0.psql | 803 +++ .../postgres/patch/postgres_patch_3.3.0.psql | 919 ++++ .../postgres/patch/postgres_patch_3.3.2.psql | 459 ++ .../postgres/patch/postgres_patch_3.4.1.psql | 1379 +++++ .../postgres/patch/postgres_patch_3.4.2.psql | 8 + .../postgres/patch/postgres_patch_3.4.3.psql | 361 ++ .../postgres/patch/postgres_patch_3.4.6.psql | 360 ++ .../postgres/patch/postgres_patch_3.4.7.psql | 102 + .../postgres/patch/postgres_patch_3.4.8.psql | 387 ++ .../patch/postgres_patch_R30020210503.psql | 2844 ++++++++++ .../patch/postgres_patch_R30020210730.psql | 4 + .../postgres/postgres_insert_ddl.psql | 1667 ++++++ .../postgres/postgres_insert_dml.psql | 2380 ++++++++ .../files/04-keycloak/Chart.yaml | 23 + .../files/04-keycloak/OWNERS | 6 + .../files/04-keycloak/README.md | 765 +++ .../04-keycloak/charts/postgresql/.helmignore | 21 + .../04-keycloak/charts/postgresql/Chart.yaml | 24 + .../04-keycloak/charts/postgresql/README.md | 625 +++ .../postgresql/charts/common/.helmignore | 22 + .../postgresql/charts/common/Chart.yaml | 19 + .../charts/postgresql/charts/common/README.md | 228 + .../charts/common/templates/_capabilities.tpl | 22 + .../charts/common/templates/_images.tpl | 44 + .../charts/common/templates/_labels.tpl | 18 + .../charts/common/templates/_names.tpl | 32 + .../charts/common/templates/_secrets.tpl | 49 + .../charts/common/templates/_storage.tpl | 23 + .../charts/common/templates/_tplvalues.tpl | 13 + .../charts/common/templates/_warnings.tpl | 14 + .../postgresql/charts/common/values.yaml | 3 + .../postgresql/ci/commonAnnotations.yaml | 4 + .../charts/postgresql/ci/default-values.yaml | 1 + .../ci/shmvolume-disabled-values.yaml | 2 + .../charts/postgresql/files/README.md | 1 + .../charts/postgresql/files/conf.d/README.md | 4 + .../docker-entrypoint-initdb.d/README.md | 3 + .../charts/postgresql/requirements.lock | 6 + .../charts/postgresql/requirements.yaml | 4 + .../charts/postgresql/templates/NOTES.txt | 54 + .../charts/postgresql/templates/_helpers.tpl | 494 ++ .../postgresql/templates/configmap.yaml | 26 + .../templates/extended-config-configmap.yaml | 21 + .../templates/initialization-configmap.yaml | 24 + .../templates/metrics-configmap.yaml | 13 + .../postgresql/templates/metrics-svc.yaml | 25 + .../postgresql/templates/networkpolicy.yaml | 36 + .../templates/podsecuritypolicy.yaml | 37 + .../postgresql/templates/prometheusrule.yaml | 23 + .../charts/postgresql/templates/pv.yaml | 27 + .../charts/postgresql/templates/role.yaml | 19 + .../postgresql/templates/rolebinding.yaml | 19 + .../charts/postgresql/templates/secrets.yaml | 23 + .../postgresql/templates/serviceaccount.yaml | 11 + .../postgresql/templates/servicemonitor.yaml | 33 + .../templates/statefulset-slaves.yaml | 340 ++ .../postgresql/templates/statefulset.yaml | 510 ++ .../postgresql/templates/svc-headless.yaml | 18 + .../charts/postgresql/templates/svc-read.yaml | 42 + .../charts/postgresql/templates/svc.yaml | 40 + .../charts/postgresql/values-production.yaml | 591 ++ .../charts/postgresql/values.schema.json | 103 + .../04-keycloak/charts/postgresql/values.yaml | 604 +++ .../files/04-keycloak/ci/h2-values.yaml | 38 + .../04-keycloak/ci/postgres-ha-values.yaml | 73 + .../files/04-keycloak/requirements.lock | 6 + .../files/04-keycloak/requirements.yaml | 5 + .../files/04-keycloak/scripts/keycloak.cli | 13 + .../files/04-keycloak/templates/NOTES.txt | 61 + .../files/04-keycloak/templates/_helpers.tpl | 87 + .../templates/configmap-startup.yaml | 14 + .../files/04-keycloak/templates/hpa.yaml | 22 + .../files/04-keycloak/templates/ingress.yaml | 104 + .../04-keycloak/templates/networkpolicy.yaml | 46 + .../templates/poddisruptionbudget.yaml | 13 + .../04-keycloak/templates/prometheusrule.yaml | 24 + .../files/04-keycloak/templates/rbac.yaml | 25 + .../files/04-keycloak/templates/route.yaml | 34 + .../files/04-keycloak/templates/secrets.yaml | 29 + .../templates/service-headless.yaml | 18 + .../04-keycloak/templates/service-http.yaml | 59 + .../04-keycloak/templates/serviceaccount.yaml | 19 + .../04-keycloak/templates/servicemonitor.yaml | 39 + .../04-keycloak/templates/statefulset.yaml | 208 + .../templates/test/configmap-test.yaml | 50 + .../04-keycloak/templates/test/pod-test.yaml | 43 + .../files/04-keycloak/values.schema.json | 434 ++ .../files/04-keycloak/values.yaml | 552 ++ .../files/05-imxc/Chart.yaml | 5 + .../files/05-imxc/cmoa-manual.yaml | 36 + .../files/05-imxc/scripts/init-api-server.sh | 17 + .../files/05-imxc/scripts/init-auth-server.sh | 36 + .../files/05-imxc/scripts/init-noti-server.sh | 14 + .../files/05-imxc/scripts/init-resource.sh | 6 + .../files/05-imxc/scripts/init.json | 2148 ++++++++ .../files/05-imxc/templates/auth-server.yaml | 82 + .../05-imxc/templates/cloudmoa-datagate.yaml | 79 + .../templates/cloudmoa-metric-agent.yaml | 331 ++ .../templates/cloudmoa-metric-collector.yaml | 45 + .../templates/cmoa-kube-info-batch.yaml | 38 + .../templates/cmoa-kube-info-connector.yaml | 48 + .../templates/cmoa-kube-info-flat.yaml | 35 + .../files/05-imxc/templates/cmoa-manual.yaml | 36 + .../05-imxc/templates/eureka-server.yaml | 60 + .../05-imxc/templates/imxc-api-server.yaml | 245 + .../05-imxc/templates/imxc-collector.yaml | 79 + .../files/05-imxc/templates/noti-server.yaml | 121 + .../files/05-imxc/templates/streams-depl.yaml | 26 + .../05-imxc/templates/topology-agent.yaml | 107 + .../files/05-imxc/templates/zuul-server.yaml | 62 + .../files/05-imxc/values.yaml | 157 + .../06-imxc-ui/imxc-ui-jaeger/Chart.yaml | 5 + .../imxc-ui-jaeger/cmoa-manual.yaml | 36 + .../imxc-ui-jaeger/scripts/init-api-server.sh | 16 + .../scripts/init-auth-server.sh | 36 + .../scripts/init-noti-server.sh | 14 + .../imxc-ui-jaeger/scripts/init-resource.sh | 6 + .../imxc-ui-jaeger/scripts/init.json | 2148 ++++++++ .../templates/imxc-ui-config-jaeger.yaml | 75 + .../templates/imxc-ui-server-jaeger.yaml | 63 + .../06-imxc-ui/imxc-ui-jaeger/values.yaml | 94 + .../files/06-imxc-ui/imxc-ui-jspd/Chart.yaml | 5 + .../imxc-ui-jspd/scripts/init-api-server.sh | 16 + .../imxc-ui-jspd/scripts/init-auth-server.sh | 36 + .../imxc-ui-jspd/scripts/init-noti-server.sh | 14 + .../imxc-ui-jspd/scripts/init-resource.sh | 6 + .../06-imxc-ui/imxc-ui-jspd/scripts/init.json | 2148 ++++++++ .../templates/imxc-ui-config.yaml | 44 + .../templates/imxc-ui-server.yaml | 63 + .../files/06-imxc-ui/imxc-ui-jspd/values.yaml | 94 + .../roles/cmoa_demo_install/files/ip_change | 15 + .../roles/cmoa_demo_install/files/k8s_status | 86 + .../files/postgres_check_data | 6 + .../roles/cmoa_demo_install/files/rel_change | 15 + .../tasks/00-default-settings-master.yml | 30 + .../tasks/00-default-settings-node.yml | 27 + .../tasks/01-storage-install.yml | 45 + .../tasks/02-base-install.yml | 51 + .../cmoa_demo_install/tasks/03-ddl-dml.yml | 64 + .../tasks/04-keycloak-install.yml | 34 + .../tasks/05-imxc-install.yml | 16 + .../tasks/06-imxc-ui-install.yml | 112 + .../tasks/07-keycloak-setting.yml | 76 + .../cmoa_demo_install/tasks/08-finish.yml | 92 + .../cmoa_demo_install/tasks/helm-install.yml | 60 + .../roles/cmoa_demo_install/tasks/main.yml | 43 + .../cmoa_demo_install/templates/realm.json.j2 | 7 + .../roles/cmoa_demo_install/vars/main.yml | 7 + .../roles/cmoa_install/defaults/main.yml | 65 + .../cmoa_install/files/00-default/sa_patch.sh | 8 + .../files/00-default/secret_dockerhub.yaml | 7 + .../files/00-default/secret_nexus.yaml | 8 + .../files/01-storage/00-storageclass.yaml | 6 + .../files/01-storage/01-persistentvolume.yaml | 92 + .../cmoa_install/files/01-storage/cmoa_minio | 63 + .../files/01-storage/minio/.helmignore | 23 + .../files/01-storage/minio/Chart.yaml | 18 + .../files/01-storage/minio/README.md | 235 + .../01-storage/minio/templates/NOTES.txt | 43 + .../minio/templates/_helper_create_bucket.txt | 109 + .../minio/templates/_helper_create_policy.txt | 75 + .../minio/templates/_helper_create_user.txt | 88 + .../templates/_helper_custom_command.txt | 58 + .../minio/templates/_helper_policy.tpl | 18 + .../01-storage/minio/templates/_helpers.tpl | 218 + .../01-storage/minio/templates/configmap.yaml | 24 + .../minio/templates/console-ingress.yaml | 58 + .../minio/templates/console-service.yaml | 48 + .../minio/templates/deployment.yaml | 174 + .../minio/templates/gateway-deployment.yaml | 173 + .../01-storage/minio/templates/ingress.yaml | 58 + .../minio/templates/networkpolicy.yaml | 27 + .../minio/templates/poddisruptionbudget.yaml | 14 + .../post-install-create-bucket-job.yaml | 87 + .../post-install-create-policy-job.yaml | 87 + .../post-install-create-user-job.yaml | 97 + .../post-install-custom-command.yaml | 87 + .../files/01-storage/minio/templates/pvc.yaml | 35 + .../01-storage/minio/templates/secrets.yaml | 22 + .../templates/securitycontextconstraints.yaml | 45 + .../01-storage/minio/templates/service.yaml | 49 + .../minio/templates/serviceaccount.yaml | 7 + .../minio/templates/servicemonitor.yaml | 51 + .../minio/templates/statefulset.yaml | 217 + .../files/01-storage/minio/values.yaml | 461 ++ .../files/02-base/00-kafka-broker-config.yaml | 161 + .../files/02-base/01-coredns.yaml | 35 + .../files/02-base/base/.helmignore | 22 + .../files/02-base/base/Chart.yaml | 5 + .../02-base/base/charts/analysis/.helmignore | 22 + .../02-base/base/charts/analysis/Chart.yaml | 5 + .../imxc-metric-analyzer-master.yaml | 87 + .../imxc-metric-analyzer-worker.yaml | 38 + .../02-base/base/charts/analysis/values.yaml | 68 + .../02-base/base/charts/cortex/.helmignore | 29 + .../02-base/base/charts/cortex/Chart.lock | 24 + .../02-base/base/charts/cortex/Chart.yaml | 56 + .../02-base/base/charts/cortex/README.md | 754 +++ .../base/charts/cortex/templates/NOTES.txt | 9 + .../base/charts/cortex/templates/_helpers.tpl | 155 + .../alertmanager/alertmanager-dep.yaml | 30 + .../alertmanager/alertmanager-svc.yaml | 10 + .../charts/cortex/templates/clusterrole.yaml | 12 + .../cortex/templates/clusterrolebinding.yaml | 16 + .../compactor/_helpers-compactor.tpl | 23 + .../compactor-poddisruptionbudget.yaml | 14 + .../compactor/compactor-servicemonitor.yaml | 42 + .../compactor/compactor-statefulset.yaml | 141 + .../templates/compactor/compactor-svc.yaml | 25 + .../charts/cortex/templates/configmap.yaml | 12 + .../templates/configs/_helpers-configs.tpl | 23 + .../cortex/templates/configs/configs-dep.yaml | 124 + .../configs/configs-poddisruptionbudget.yaml | 14 + .../configs/configs-servicemonitor.yaml | 42 + .../cortex/templates/configs/configs-svc.yaml | 23 + .../charts/cortex/templates/cortex-pv.yaml | 68 + .../distributor/_helpers-distributor.tpl | 23 + .../distributor/distributor-dep.yaml | 121 + .../distributor/distributor-hpa.yaml | 39 + .../distributor-poddisruptionbudget.yaml | 14 + .../distributor-servicemonitor.yaml | 42 + .../distributor/distributor-svc-headless.yaml | 23 + .../distributor/distributor-svc.yaml | 21 + .../templates/ingester/_helpers-ingester.tpl | 23 + .../templates/ingester/ingester-dep.yaml | 130 + .../templates/ingester/ingester-hpa.yaml | 29 + .../ingester-poddisruptionbudget.yaml | 14 + .../ingester/ingester-servicemonitor.yaml | 42 + .../ingester/ingester-statefulset.yaml | 153 + .../ingester/ingester-svc-headless.yaml | 22 + .../templates/ingester/ingester-svc.yaml | 21 + .../cortex/templates/nginx/_helpers-nginx.tpl | 23 + .../cortex/templates/nginx/nginx-config.yaml | 140 + .../cortex/templates/nginx/nginx-dep.yaml | 111 + .../cortex/templates/nginx/nginx-hpa.yaml | 39 + .../cortex/templates/nginx/nginx-ingress.yaml | 40 + .../nginx/nginx-poddisruptionbudget.yaml | 14 + .../cortex/templates/nginx/nginx-svc.yaml | 23 + .../cortex/templates/node-exporter.yaml | 96 + .../templates/querier/_helpers-querier.tpl | 23 + .../cortex/templates/querier/querier-dep.yaml | 115 + .../cortex/templates/querier/querier-hpa.yaml | 39 + .../querier/querier-poddisruptionbudget.yaml | 14 + .../querier/querier-servicemonitor.yaml | 42 + .../cortex/templates/querier/querier-svc.yaml | 21 + .../_helpers-query-frontend.tpl | 23 + .../query-frontend/query-frontend-dep.yaml | 107 + .../query-frontend-servicemonitor.yaml | 42 + .../query-frontend-svc-headless.yaml | 23 + .../query-frontend/query-frontend-svc.yaml | 21 + .../query-poddisruptionbudget.yaml | 14 + .../cortex/templates/ruler/_helpers-ruler.tpl | 30 + .../templates/ruler/ruler-configmap.yaml | 14 + .../cortex/templates/ruler/ruler-dep.yaml | 191 + .../ruler/ruler-poddisruptionbudget.yaml | 14 + .../templates/ruler/ruler-servicemonitor.yaml | 42 + .../cortex/templates/ruler/ruler-svc.yaml | 23 + .../cortex/templates/runtime-configmap.yaml | 18 + .../cortex/templates/secret-postgresql.yaml | 11 + .../base/charts/cortex/templates/secret.yaml | 11 + .../cortex/templates/serviceaccount.yaml | 12 + .../store-gateway/_helpers-store-gateway.tpl | 23 + .../store-gateway-poddisruptionbudget.yaml | 14 + .../store-gateway-servicemonitor.yaml | 42 + .../store-gateway-statefulset.yaml | 142 + .../store-gateway-svc-headless.yaml | 24 + .../store-gateway/store-gateway-svc.yaml | 23 + .../templates/svc-memberlist-headless.yaml | 18 + .../table-manager/_helpers-table-manager.tpl | 23 + .../table-manager/table-manager-dep.yaml | 106 + .../table-manager-poddisruptionbudget.yaml | 14 + .../table-manager-servicemonitor.yaml | 42 + .../table-manager/table-manager-svc.yaml | 23 + .../02-base/base/charts/cortex/values.yaml | 1605 ++++++ .../base/charts/elasticsearch/.helmignore | 2 + .../base/charts/elasticsearch/Chart.yaml | 12 + .../templates/1.headless_service.yaml | 14 + .../elasticsearch/templates/2.service.yaml | 17 + .../elasticsearch/templates/3.configmap.yaml | 41 + .../charts/elasticsearch/templates/4.pv.yaml | 74 + .../charts/elasticsearch/templates/5.pvc.yaml | 53 + .../templates/6.statefulset.yaml | 146 + .../elasticsearch/templates/7.secrets.yaml | 10 + .../templates/needtocheck_storageclass.yaml | 8 + .../base/charts/elasticsearch/values.yaml | 68 + .../base/charts/kafka-manager/.helmignore | 22 + .../base/charts/kafka-manager/Chart.yaml | 5 + .../templates/0.kafka-manager-service.yaml | 14 + .../templates/1.kafka-manager.yaml | 33 + .../base/charts/kafka-manager/values.yaml | 68 + .../02-base/base/charts/kafka/.helmignore | 22 + .../base/charts/kafka/1.broker-config.yaml | 161 + .../02-base/base/charts/kafka/Chart.yaml | 5 + .../base/charts/kafka/templates/2.dns.yaml | 14 + .../kafka/templates/3.bootstrap-service.yaml | 11 + .../kafka/templates/4.persistent-volume.yaml | 76 + .../base/charts/kafka/templates/5.kafka.yaml | 132 + .../charts/kafka/templates/6.outside.yaml | 89 + .../02-base/base/charts/kafka/values.yaml | 68 + .../02-base/base/charts/postgres/.helmignore | 22 + .../02-base/base/charts/postgres/Chart.yaml | 5 + .../templates/1.postgres-configmap.yaml | 11 + .../templates/2.postgres-storage.yaml | 38 + .../templates/3.postgres-service.yaml | 14 + .../templates/4.postgres-deployment.yaml | 45 + .../02-base/base/charts/postgres/values.yaml | 68 + .../02-base/base/charts/rabbitmq/.helmignore | 21 + .../02-base/base/charts/rabbitmq/Chart.lock | 6 + .../02-base/base/charts/rabbitmq/Chart.yaml | 26 + .../02-base/base/charts/rabbitmq/README.md | 566 ++ .../charts/rabbitmq/charts/common/.helmignore | 22 + .../charts/rabbitmq/charts/common/Chart.yaml | 23 + .../charts/rabbitmq/charts/common/README.md | 327 ++ .../charts/common/templates/_affinities.tpl | 102 + .../charts/common/templates/_capabilities.tpl | 117 + .../charts/common/templates/_errors.tpl | 23 + .../charts/common/templates/_images.tpl | 75 + .../charts/common/templates/_ingress.tpl | 55 + .../charts/common/templates/_labels.tpl | 18 + .../charts/common/templates/_names.tpl | 32 + .../charts/common/templates/_secrets.tpl | 129 + .../charts/common/templates/_storage.tpl | 23 + .../charts/common/templates/_tplvalues.tpl | 13 + .../charts/common/templates/_utils.tpl | 62 + .../charts/common/templates/_warnings.tpl | 14 + .../templates/validations/_cassandra.tpl | 72 + .../common/templates/validations/_mariadb.tpl | 103 + .../common/templates/validations/_mongodb.tpl | 108 + .../templates/validations/_postgresql.tpl | 131 + .../common/templates/validations/_redis.tpl | 76 + .../templates/validations/_validations.tpl | 46 + .../charts/rabbitmq/charts/common/values.yaml | 5 + .../charts/rabbitmq/ci/default-values.yaml | 1 + .../rabbitmq/ci/tolerations-values.yaml | 4 + .../base/charts/rabbitmq/templates/NOTES.txt | 167 + .../charts/rabbitmq/templates/_helpers.tpl | 247 + .../rabbitmq/templates/configuration.yaml | 16 + .../charts/rabbitmq/templates/extra-list.yaml | 4 + .../charts/rabbitmq/templates/ingress.yaml | 57 + .../rabbitmq/templates/networkpolicy.yaml | 37 + .../base/charts/rabbitmq/templates/pdb.yaml | 20 + .../rabbitmq/templates/prometheusrule.yaml | 24 + .../base/charts/rabbitmq/templates/pv.yaml | 22 + .../base/charts/rabbitmq/templates/pvc.yaml | 15 + .../base/charts/rabbitmq/templates/role.yaml | 18 + .../rabbitmq/templates/rolebinding.yaml | 18 + .../charts/rabbitmq/templates/secrets.yaml | 43 + .../rabbitmq/templates/serviceaccount.yaml | 14 + .../rabbitmq/templates/servicemonitor.yaml | 49 + .../rabbitmq/templates/statefulset.yaml | 382 ++ .../rabbitmq/templates/svc-headless.yaml | 40 + .../base/charts/rabbitmq/templates/svc.yaml | 95 + .../rabbitmq/templates/tls-secrets.yaml | 74 + .../base/charts/rabbitmq/values.schema.json | 100 + .../02-base/base/charts/rabbitmq/values.yaml | 1151 ++++ .../02-base/base/charts/redis/.helmignore | 21 + .../02-base/base/charts/redis/Chart.lock | 6 + .../02-base/base/charts/redis/Chart.yaml | 29 + .../files/02-base/base/charts/redis/README.md | 707 +++ .../charts/redis/charts/common/.helmignore | 22 + .../charts/redis/charts/common/Chart.yaml | 23 + .../base/charts/redis/charts/common/README.md | 316 ++ .../charts/common/templates/_affinities.tpl | 94 + .../charts/common/templates/_capabilities.tpl | 61 + .../redis/charts/common/templates/_images.tpl | 43 + .../charts/common/templates/_ingress.tpl | 42 + .../redis/charts/common/templates/_labels.tpl | 18 + .../redis/charts/common/templates/_names.tpl | 32 + .../charts/common/templates/_secrets.tpl | 127 + .../charts/common/templates/_storage.tpl | 23 + .../charts/common/templates/_tplvalues.tpl | 13 + .../redis/charts/common/templates/_utils.tpl | 62 + .../charts/common/templates/_warnings.tpl | 14 + .../templates/validations/_cassandra.tpl | 72 + .../common/templates/validations/_mariadb.tpl | 103 + .../common/templates/validations/_mongodb.tpl | 108 + .../templates/validations/_postgresql.tpl | 131 + .../common/templates/validations/_redis.tpl | 72 + .../templates/validations/_validations.tpl | 46 + .../charts/redis/charts/common/values.yaml | 3 + .../base/charts/redis/ci/default-values.yaml | 1 + .../charts/redis/ci/extra-flags-values.yaml | 11 + .../redis/ci/production-sentinel-values.yaml | 682 +++ .../base/charts/redis/templates/NOTES.txt | 136 + .../base/charts/redis/templates/_helpers.tpl | 421 ++ .../redis/templates/configmap-scripts.yaml | 393 ++ .../charts/redis/templates/configmap.yaml | 53 + .../charts/redis/templates/headless-svc.yaml | 28 + .../redis/templates/health-configmap.yaml | 176 + .../redis/templates/metrics-prometheus.yaml | 39 + .../charts/redis/templates/metrics-svc.yaml | 34 + .../charts/redis/templates/networkpolicy.yaml | 74 + .../base/charts/redis/templates/pdb.yaml | 22 + .../redis/templates/prometheusrule.yaml | 25 + .../base/charts/redis/templates/psp.yaml | 43 + .../templates/redis-master-statefulset.yaml | 378 ++ .../redis/templates/redis-master-svc.yaml | 43 + .../templates/redis-node-statefulset.yaml | 494 ++ .../base/charts/redis/templates/redis-pv.yaml | 92 + .../charts/redis/templates/redis-role.yaml | 22 + .../redis/templates/redis-rolebinding.yaml | 19 + .../redis/templates/redis-serviceaccount.yaml | 15 + .../templates/redis-slave-statefulset.yaml | 384 ++ .../redis/templates/redis-slave-svc.yaml | 43 + .../templates/redis-with-sentinel-svc.yaml | 43 + .../base/charts/redis/templates/secret.yaml | 15 + .../base/charts/redis/values.schema.json | 168 + .../02-base/base/charts/redis/values.yaml | 932 ++++ .../02-base/base/charts/zookeeper/.helmignore | 22 + .../02-base/base/charts/zookeeper/Chart.yaml | 5 + .../charts/zookeeper/templates/0.config.yaml | 35 + .../templates/1.service-leader-election.yaml | 16 + .../zookeeper/templates/2.service-client.yaml | 12 + .../templates/3.persistent-volume.yaml | 74 + .../zookeeper/templates/4.statefulset.yaml | 87 + .../charts/zookeeper/templates/5.pvc.yaml | 50 + .../02-base/base/charts/zookeeper/values.yaml | 68 + .../files/02-base/base/index.yaml | 3 + .../files/02-base/base/templates/role.yaml | 16 + .../files/02-base/base/values.yaml | 73 + .../03-ddl-dml/elasticsearch/es-ddl-put.sh | 3085 +++++++++++ ...ete_event_info_create_dest_source_index.sh | 220 + ..._event_info_reindex_to_dest_from_source.sh | 28 + ..._event_info_reindex_to_source_from_dest.sh | 30 + ..._kubernete_event_info_delete_dest_index.sh | 21 + ...icense_history_create_dest_source_index.sh | 184 + ...nse_history_reindex_to_dest_from_source.sh | 32 + ...nse_history_reindex_to_source_from_dest.sh | 30 + .../8_license_history_delete_dest_index.sh | 21 + .../patch/es-reindex-3.2.0-rel332/manual.txt | 31 + ...ete_event_info_create_dest_source_index.sh | 220 + ..._event_info_reindex_to_dest_from_source.sh | 28 + ..._event_info_reindex_to_source_from_dest.sh | 30 + ..._kubernete_event_info_delete_dest_index.sh | 21 + ...icense_history_create_dest_source_index.sh | 184 + ...nse_history_reindex_to_dest_from_source.sh | 32 + ...nse_history_reindex_to_source_from_dest.sh | 30 + .../8_license_history_delete_dest_index.sh | 21 + .../patch/es-reindex-3.2.0/manual.txt | 31 + .../03-ddl-dml/postgres/jaeger_menumeta.psql | 21 + .../03-ddl-dml/postgres/jspd_menumeta.psql | 22 + ...ete_event_info_create_dest_source_index.sh | 220 + ..._event_info_reindex_to_dest_from_source.sh | 28 + ..._event_info_reindex_to_source_from_dest.sh | 30 + ..._kubernete_event_info_delete_dest_index.sh | 21 + ...icense_history_create_dest_source_index.sh | 184 + ...nse_history_reindex_to_dest_from_source.sh | 32 + ...nse_history_reindex_to_source_from_dest.sh | 30 + .../8_license_history_delete_dest_index.sh | 21 + .../patch/es-reindex-3.2.0/manual.txt | 31 + .../patch/memu_meta/jaeger_menumeta.psql | 21 + .../patch/memu_meta/jspd_menumeta.psql | 22 + .../postgres/patch/postgres_patch_3.2.0.psql | 803 +++ .../postgres/patch/postgres_patch_3.3.0.psql | 919 ++++ .../postgres/patch/postgres_patch_3.3.2.psql | 459 ++ .../postgres/patch/postgres_patch_3.4.1.psql | 1379 +++++ .../postgres/patch/postgres_patch_3.4.2.psql | 8 + .../postgres/patch/postgres_patch_3.4.3.psql | 361 ++ .../postgres/patch/postgres_patch_3.4.6.psql | 360 ++ .../postgres/patch/postgres_patch_3.4.7.psql | 102 + .../postgres/patch/postgres_patch_3.4.8.psql | 387 ++ .../patch/postgres_patch_R30020210503.psql | 2844 ++++++++++ .../patch/postgres_patch_R30020210730.psql | 4 + .../postgres/postgres_insert_ddl.psql | 1667 ++++++ .../postgres/postgres_insert_dml.psql | 2380 ++++++++ .../cmoa_install/files/04-keycloak/Chart.yaml | 23 + .../cmoa_install/files/04-keycloak/OWNERS | 6 + .../cmoa_install/files/04-keycloak/README.md | 765 +++ .../04-keycloak/charts/postgresql/.helmignore | 21 + .../04-keycloak/charts/postgresql/Chart.yaml | 24 + .../04-keycloak/charts/postgresql/README.md | 625 +++ .../postgresql/charts/common/.helmignore | 22 + .../postgresql/charts/common/Chart.yaml | 19 + .../charts/postgresql/charts/common/README.md | 228 + .../charts/common/templates/_capabilities.tpl | 22 + .../charts/common/templates/_images.tpl | 44 + .../charts/common/templates/_labels.tpl | 18 + .../charts/common/templates/_names.tpl | 32 + .../charts/common/templates/_secrets.tpl | 49 + .../charts/common/templates/_storage.tpl | 23 + .../charts/common/templates/_tplvalues.tpl | 13 + .../charts/common/templates/_warnings.tpl | 14 + .../postgresql/charts/common/values.yaml | 3 + .../postgresql/ci/commonAnnotations.yaml | 4 + .../charts/postgresql/ci/default-values.yaml | 1 + .../ci/shmvolume-disabled-values.yaml | 2 + .../charts/postgresql/files/README.md | 1 + .../charts/postgresql/files/conf.d/README.md | 4 + .../docker-entrypoint-initdb.d/README.md | 3 + .../charts/postgresql/requirements.lock | 6 + .../charts/postgresql/requirements.yaml | 4 + .../charts/postgresql/templates/NOTES.txt | 54 + .../charts/postgresql/templates/_helpers.tpl | 494 ++ .../postgresql/templates/configmap.yaml | 26 + .../templates/extended-config-configmap.yaml | 21 + .../templates/initialization-configmap.yaml | 24 + .../templates/metrics-configmap.yaml | 13 + .../postgresql/templates/metrics-svc.yaml | 25 + .../postgresql/templates/networkpolicy.yaml | 36 + .../templates/podsecuritypolicy.yaml | 37 + .../postgresql/templates/prometheusrule.yaml | 23 + .../charts/postgresql/templates/pv.yaml | 27 + .../charts/postgresql/templates/role.yaml | 19 + .../postgresql/templates/rolebinding.yaml | 19 + .../charts/postgresql/templates/secrets.yaml | 23 + .../postgresql/templates/serviceaccount.yaml | 11 + .../postgresql/templates/servicemonitor.yaml | 33 + .../templates/statefulset-slaves.yaml | 340 ++ .../postgresql/templates/statefulset.yaml | 510 ++ .../postgresql/templates/svc-headless.yaml | 18 + .../charts/postgresql/templates/svc-read.yaml | 42 + .../charts/postgresql/templates/svc.yaml | 40 + .../charts/postgresql/values-production.yaml | 591 ++ .../charts/postgresql/values.schema.json | 103 + .../04-keycloak/charts/postgresql/values.yaml | 604 +++ .../files/04-keycloak/ci/h2-values.yaml | 38 + .../04-keycloak/ci/postgres-ha-values.yaml | 73 + .../files/04-keycloak/requirements.lock | 6 + .../files/04-keycloak/requirements.yaml | 5 + .../files/04-keycloak/scripts/keycloak.cli | 13 + .../files/04-keycloak/templates/NOTES.txt | 61 + .../files/04-keycloak/templates/_helpers.tpl | 87 + .../templates/configmap-startup.yaml | 14 + .../files/04-keycloak/templates/hpa.yaml | 22 + .../files/04-keycloak/templates/ingress.yaml | 104 + .../04-keycloak/templates/networkpolicy.yaml | 46 + .../templates/poddisruptionbudget.yaml | 13 + .../04-keycloak/templates/prometheusrule.yaml | 24 + .../files/04-keycloak/templates/rbac.yaml | 25 + .../files/04-keycloak/templates/route.yaml | 34 + .../files/04-keycloak/templates/secrets.yaml | 29 + .../templates/service-headless.yaml | 18 + .../04-keycloak/templates/service-http.yaml | 59 + .../04-keycloak/templates/serviceaccount.yaml | 19 + .../04-keycloak/templates/servicemonitor.yaml | 39 + .../04-keycloak/templates/statefulset.yaml | 208 + .../templates/test/configmap-test.yaml | 50 + .../04-keycloak/templates/test/pod-test.yaml | 43 + .../files/04-keycloak/values.schema.json | 434 ++ .../files/04-keycloak/values.yaml | 552 ++ .../cmoa_install/files/05-imxc/Chart.yaml | 5 + .../files/05-imxc/cmoa-manual.yaml | 36 + .../files/05-imxc/scripts/init-api-server.sh | 17 + .../files/05-imxc/scripts/init-auth-server.sh | 36 + .../files/05-imxc/scripts/init-noti-server.sh | 14 + .../files/05-imxc/scripts/init-resource.sh | 6 + .../files/05-imxc/scripts/init.json | 2148 ++++++++ .../files/05-imxc/templates/auth-server.yaml | 82 + .../05-imxc/templates/cloudmoa-datagate.yaml | 79 + .../templates/cloudmoa-metric-agent.yaml | 331 ++ .../templates/cloudmoa-metric-collector.yaml | 45 + .../templates/cmoa-kube-info-batch.yaml | 38 + .../templates/cmoa-kube-info-connector.yaml | 48 + .../templates/cmoa-kube-info-flat.yaml | 35 + .../files/05-imxc/templates/cmoa-manual.yaml | 36 + .../05-imxc/templates/eureka-server.yaml | 60 + .../05-imxc/templates/imxc-api-server.yaml | 245 + .../05-imxc/templates/imxc-collector.yaml | 79 + .../files/05-imxc/templates/noti-server.yaml | 121 + .../files/05-imxc/templates/streams-depl.yaml | 26 + .../05-imxc/templates/topology-agent.yaml | 107 + .../files/05-imxc/templates/zuul-server.yaml | 62 + .../cmoa_install/files/05-imxc/values.yaml | 157 + .../06-imxc-ui/imxc-ui-jaeger/Chart.yaml | 5 + .../imxc-ui-jaeger/cmoa-manual.yaml | 36 + .../imxc-ui-jaeger/scripts/init-api-server.sh | 16 + .../scripts/init-auth-server.sh | 36 + .../scripts/init-noti-server.sh | 14 + .../imxc-ui-jaeger/scripts/init-resource.sh | 6 + .../imxc-ui-jaeger/scripts/init.json | 2148 ++++++++ .../templates/imxc-ui-config-jaeger.yaml | 75 + .../templates/imxc-ui-server-jaeger.yaml | 63 + .../06-imxc-ui/imxc-ui-jaeger/values.yaml | 94 + .../files/06-imxc-ui/imxc-ui-jspd/Chart.yaml | 5 + .../imxc-ui-jspd/scripts/init-api-server.sh | 16 + .../imxc-ui-jspd/scripts/init-auth-server.sh | 36 + .../imxc-ui-jspd/scripts/init-noti-server.sh | 14 + .../imxc-ui-jspd/scripts/init-resource.sh | 6 + .../06-imxc-ui/imxc-ui-jspd/scripts/init.json | 2148 ++++++++ .../templates/imxc-ui-config.yaml | 44 + .../templates/imxc-ui-server.yaml | 63 + .../files/06-imxc-ui/imxc-ui-jspd/values.yaml | 94 + .../01_old/roles/cmoa_install/files/ip_change | 15 + .../roles/cmoa_install/files/k8s_status | 86 + .../cmoa_install/files/postgres_check_data | 6 + .../roles/cmoa_install/files/rel_change | 15 + .../tasks/00-default-settings-master.yml | 30 + .../tasks/00-default-settings-node.yml | 27 + .../cmoa_install/tasks/01-storage-install.yml | 45 + .../cmoa_install/tasks/02-base-install.yml | 51 + .../roles/cmoa_install/tasks/03-ddl-dml.yml | 59 + .../tasks/04-keycloak-install.yml | 34 + .../cmoa_install/tasks/05-imxc-install.yml | 16 + .../cmoa_install/tasks/06-imxc-ui-install.yml | 112 + .../tasks/07-keycloak-setting.yml | 90 + .../roles/cmoa_install/tasks/08-finish.yml | 17 + .../roles/cmoa_install/tasks/helm-install.yml | 60 + .../01_old/roles/cmoa_install/tasks/main.yml | 43 + .../cmoa_install/templates/realm.json.j2 | 7 + .../01_old/roles/cmoa_install/vars/main.yml | 7 + .../roles/cmoa_install_bak/defaults/main.yml | 64 + .../files/00-default/sa_patch.sh | 6 + .../files/00-default/secret_dockerhub.yaml | 7 + .../files/00-default/secret_nexus.yaml | 8 + .../files/01-storage/00-storageclass.yaml | 6 + .../files/01-storage/01-persistentvolume.yaml | 92 + .../files/01-storage/cmoa_minio | 63 + .../files/01-storage/minio/.helmignore | 23 + .../files/01-storage/minio/Chart.yaml | 18 + .../files/01-storage/minio/README.md | 235 + .../01-storage/minio/templates/NOTES.txt | 43 + .../minio/templates/_helper_create_bucket.txt | 109 + .../minio/templates/_helper_create_policy.txt | 75 + .../minio/templates/_helper_create_user.txt | 88 + .../templates/_helper_custom_command.txt | 58 + .../minio/templates/_helper_policy.tpl | 18 + .../01-storage/minio/templates/_helpers.tpl | 218 + .../01-storage/minio/templates/configmap.yaml | 24 + .../minio/templates/console-ingress.yaml | 58 + .../minio/templates/console-service.yaml | 48 + .../minio/templates/deployment.yaml | 174 + .../minio/templates/gateway-deployment.yaml | 173 + .../01-storage/minio/templates/ingress.yaml | 58 + .../minio/templates/networkpolicy.yaml | 27 + .../minio/templates/poddisruptionbudget.yaml | 14 + .../post-install-create-bucket-job.yaml | 87 + .../post-install-create-policy-job.yaml | 87 + .../post-install-create-user-job.yaml | 97 + .../post-install-custom-command.yaml | 87 + .../files/01-storage/minio/templates/pvc.yaml | 35 + .../01-storage/minio/templates/secrets.yaml | 22 + .../templates/securitycontextconstraints.yaml | 45 + .../01-storage/minio/templates/service.yaml | 49 + .../minio/templates/serviceaccount.yaml | 7 + .../minio/templates/servicemonitor.yaml | 51 + .../minio/templates/statefulset.yaml | 217 + .../files/01-storage/minio/values.yaml | 461 ++ .../files/02-base/00-kafka-broker-config.yaml | 161 + .../files/02-base/01-coredns.yaml | 35 + .../files/02-base/base/.helmignore | 22 + .../files/02-base/base/Chart.yaml | 5 + .../02-base/base/charts/analysis/.helmignore | 22 + .../02-base/base/charts/analysis/Chart.yaml | 5 + .../imxc-metric-analyzer-master.yaml | 87 + .../imxc-metric-analyzer-worker.yaml | 38 + .../02-base/base/charts/analysis/values.yaml | 68 + .../02-base/base/charts/cortex/.helmignore | 29 + .../02-base/base/charts/cortex/Chart.lock | 24 + .../02-base/base/charts/cortex/Chart.yaml | 56 + .../02-base/base/charts/cortex/README.md | 754 +++ .../base/charts/cortex/templates/NOTES.txt | 9 + .../base/charts/cortex/templates/_helpers.tpl | 155 + .../alertmanager/alertmanager-dep.yaml | 30 + .../alertmanager/alertmanager-svc.yaml | 10 + .../charts/cortex/templates/clusterrole.yaml | 12 + .../cortex/templates/clusterrolebinding.yaml | 16 + .../compactor/_helpers-compactor.tpl | 23 + .../compactor-poddisruptionbudget.yaml | 14 + .../compactor/compactor-servicemonitor.yaml | 42 + .../compactor/compactor-statefulset.yaml | 141 + .../templates/compactor/compactor-svc.yaml | 25 + .../charts/cortex/templates/configmap.yaml | 12 + .../templates/configs/_helpers-configs.tpl | 23 + .../cortex/templates/configs/configs-dep.yaml | 124 + .../configs/configs-poddisruptionbudget.yaml | 14 + .../configs/configs-servicemonitor.yaml | 42 + .../cortex/templates/configs/configs-svc.yaml | 23 + .../charts/cortex/templates/cortex-pv.yaml | 68 + .../distributor/_helpers-distributor.tpl | 23 + .../distributor/distributor-dep.yaml | 121 + .../distributor/distributor-hpa.yaml | 39 + .../distributor-poddisruptionbudget.yaml | 14 + .../distributor-servicemonitor.yaml | 42 + .../distributor/distributor-svc-headless.yaml | 23 + .../distributor/distributor-svc.yaml | 21 + .../templates/ingester/_helpers-ingester.tpl | 23 + .../templates/ingester/ingester-dep.yaml | 130 + .../templates/ingester/ingester-hpa.yaml | 29 + .../ingester-poddisruptionbudget.yaml | 14 + .../ingester/ingester-servicemonitor.yaml | 42 + .../ingester/ingester-statefulset.yaml | 153 + .../ingester/ingester-svc-headless.yaml | 22 + .../templates/ingester/ingester-svc.yaml | 21 + .../cortex/templates/nginx/_helpers-nginx.tpl | 23 + .../cortex/templates/nginx/nginx-config.yaml | 140 + .../cortex/templates/nginx/nginx-dep.yaml | 111 + .../cortex/templates/nginx/nginx-hpa.yaml | 39 + .../cortex/templates/nginx/nginx-ingress.yaml | 40 + .../nginx/nginx-poddisruptionbudget.yaml | 14 + .../cortex/templates/nginx/nginx-svc.yaml | 23 + .../cortex/templates/node-exporter.yaml | 96 + .../templates/querier/_helpers-querier.tpl | 23 + .../cortex/templates/querier/querier-dep.yaml | 115 + .../cortex/templates/querier/querier-hpa.yaml | 39 + .../querier/querier-poddisruptionbudget.yaml | 14 + .../querier/querier-servicemonitor.yaml | 42 + .../cortex/templates/querier/querier-svc.yaml | 21 + .../_helpers-query-frontend.tpl | 23 + .../query-frontend/query-frontend-dep.yaml | 107 + .../query-frontend-servicemonitor.yaml | 42 + .../query-frontend-svc-headless.yaml | 23 + .../query-frontend/query-frontend-svc.yaml | 21 + .../query-poddisruptionbudget.yaml | 14 + .../cortex/templates/ruler/_helpers-ruler.tpl | 30 + .../templates/ruler/ruler-configmap.yaml | 14 + .../cortex/templates/ruler/ruler-dep.yaml | 191 + .../ruler/ruler-poddisruptionbudget.yaml | 14 + .../templates/ruler/ruler-servicemonitor.yaml | 42 + .../cortex/templates/ruler/ruler-svc.yaml | 23 + .../cortex/templates/runtime-configmap.yaml | 18 + .../cortex/templates/secret-postgresql.yaml | 11 + .../base/charts/cortex/templates/secret.yaml | 11 + .../cortex/templates/serviceaccount.yaml | 12 + .../store-gateway/_helpers-store-gateway.tpl | 23 + .../store-gateway-poddisruptionbudget.yaml | 14 + .../store-gateway-servicemonitor.yaml | 42 + .../store-gateway-statefulset.yaml | 142 + .../store-gateway-svc-headless.yaml | 24 + .../store-gateway/store-gateway-svc.yaml | 23 + .../templates/svc-memberlist-headless.yaml | 18 + .../table-manager/_helpers-table-manager.tpl | 23 + .../table-manager/table-manager-dep.yaml | 106 + .../table-manager-poddisruptionbudget.yaml | 14 + .../table-manager-servicemonitor.yaml | 42 + .../table-manager/table-manager-svc.yaml | 23 + .../02-base/base/charts/cortex/values.yaml | 1605 ++++++ .../base/charts/elasticsearch/.helmignore | 2 + .../base/charts/elasticsearch/Chart.yaml | 12 + .../templates/1.headless_service.yaml | 14 + .../elasticsearch/templates/2.service.yaml | 17 + .../elasticsearch/templates/3.configmap.yaml | 41 + .../charts/elasticsearch/templates/4.pv.yaml | 74 + .../charts/elasticsearch/templates/5.pvc.yaml | 53 + .../templates/6.statefulset.yaml | 146 + .../elasticsearch/templates/7.secrets.yaml | 10 + .../templates/needtocheck_storageclass.yaml | 8 + .../base/charts/elasticsearch/values.yaml | 68 + .../base/charts/kafka-manager/.helmignore | 22 + .../base/charts/kafka-manager/Chart.yaml | 5 + .../templates/0.kafka-manager-service.yaml | 14 + .../templates/1.kafka-manager.yaml | 33 + .../base/charts/kafka-manager/values.yaml | 68 + .../02-base/base/charts/kafka/.helmignore | 22 + .../base/charts/kafka/1.broker-config.yaml | 161 + .../02-base/base/charts/kafka/Chart.yaml | 5 + .../base/charts/kafka/templates/2.dns.yaml | 14 + .../kafka/templates/3.bootstrap-service.yaml | 11 + .../kafka/templates/4.persistent-volume.yaml | 76 + .../base/charts/kafka/templates/5.kafka.yaml | 132 + .../charts/kafka/templates/6.outside.yaml | 89 + .../02-base/base/charts/kafka/values.yaml | 68 + .../02-base/base/charts/postgres/.helmignore | 22 + .../02-base/base/charts/postgres/Chart.yaml | 5 + .../templates/1.postgres-configmap.yaml | 11 + .../templates/2.postgres-storage.yaml | 38 + .../templates/3.postgres-service.yaml | 14 + .../templates/4.postgres-deployment.yaml | 45 + .../02-base/base/charts/postgres/values.yaml | 68 + .../02-base/base/charts/rabbitmq/.helmignore | 21 + .../02-base/base/charts/rabbitmq/Chart.lock | 6 + .../02-base/base/charts/rabbitmq/Chart.yaml | 26 + .../02-base/base/charts/rabbitmq/README.md | 566 ++ .../charts/rabbitmq/charts/common/.helmignore | 22 + .../charts/rabbitmq/charts/common/Chart.yaml | 23 + .../charts/rabbitmq/charts/common/README.md | 327 ++ .../charts/common/templates/_affinities.tpl | 102 + .../charts/common/templates/_capabilities.tpl | 117 + .../charts/common/templates/_errors.tpl | 23 + .../charts/common/templates/_images.tpl | 75 + .../charts/common/templates/_ingress.tpl | 55 + .../charts/common/templates/_labels.tpl | 18 + .../charts/common/templates/_names.tpl | 32 + .../charts/common/templates/_secrets.tpl | 129 + .../charts/common/templates/_storage.tpl | 23 + .../charts/common/templates/_tplvalues.tpl | 13 + .../charts/common/templates/_utils.tpl | 62 + .../charts/common/templates/_warnings.tpl | 14 + .../templates/validations/_cassandra.tpl | 72 + .../common/templates/validations/_mariadb.tpl | 103 + .../common/templates/validations/_mongodb.tpl | 108 + .../templates/validations/_postgresql.tpl | 131 + .../common/templates/validations/_redis.tpl | 76 + .../templates/validations/_validations.tpl | 46 + .../charts/rabbitmq/charts/common/values.yaml | 5 + .../charts/rabbitmq/ci/default-values.yaml | 1 + .../rabbitmq/ci/tolerations-values.yaml | 4 + .../base/charts/rabbitmq/templates/NOTES.txt | 167 + .../charts/rabbitmq/templates/_helpers.tpl | 247 + .../rabbitmq/templates/configuration.yaml | 16 + .../charts/rabbitmq/templates/extra-list.yaml | 4 + .../charts/rabbitmq/templates/ingress.yaml | 57 + .../rabbitmq/templates/networkpolicy.yaml | 37 + .../base/charts/rabbitmq/templates/pdb.yaml | 20 + .../rabbitmq/templates/prometheusrule.yaml | 24 + .../base/charts/rabbitmq/templates/pv.yaml | 22 + .../base/charts/rabbitmq/templates/pvc.yaml | 15 + .../base/charts/rabbitmq/templates/role.yaml | 18 + .../rabbitmq/templates/rolebinding.yaml | 18 + .../charts/rabbitmq/templates/secrets.yaml | 43 + .../rabbitmq/templates/serviceaccount.yaml | 14 + .../rabbitmq/templates/servicemonitor.yaml | 49 + .../rabbitmq/templates/statefulset.yaml | 382 ++ .../rabbitmq/templates/svc-headless.yaml | 40 + .../base/charts/rabbitmq/templates/svc.yaml | 95 + .../rabbitmq/templates/tls-secrets.yaml | 74 + .../base/charts/rabbitmq/values.schema.json | 100 + .../02-base/base/charts/rabbitmq/values.yaml | 1151 ++++ .../02-base/base/charts/redis/.helmignore | 21 + .../02-base/base/charts/redis/Chart.lock | 6 + .../02-base/base/charts/redis/Chart.yaml | 29 + .../files/02-base/base/charts/redis/README.md | 707 +++ .../charts/redis/charts/common/.helmignore | 22 + .../charts/redis/charts/common/Chart.yaml | 23 + .../base/charts/redis/charts/common/README.md | 316 ++ .../charts/common/templates/_affinities.tpl | 94 + .../charts/common/templates/_capabilities.tpl | 61 + .../redis/charts/common/templates/_images.tpl | 43 + .../charts/common/templates/_ingress.tpl | 42 + .../redis/charts/common/templates/_labels.tpl | 18 + .../redis/charts/common/templates/_names.tpl | 32 + .../charts/common/templates/_secrets.tpl | 127 + .../charts/common/templates/_storage.tpl | 23 + .../charts/common/templates/_tplvalues.tpl | 13 + .../redis/charts/common/templates/_utils.tpl | 62 + .../charts/common/templates/_warnings.tpl | 14 + .../templates/validations/_cassandra.tpl | 72 + .../common/templates/validations/_mariadb.tpl | 103 + .../common/templates/validations/_mongodb.tpl | 108 + .../templates/validations/_postgresql.tpl | 131 + .../common/templates/validations/_redis.tpl | 72 + .../templates/validations/_validations.tpl | 46 + .../charts/redis/charts/common/values.yaml | 3 + .../base/charts/redis/ci/default-values.yaml | 1 + .../charts/redis/ci/extra-flags-values.yaml | 11 + .../redis/ci/production-sentinel-values.yaml | 682 +++ .../base/charts/redis/templates/NOTES.txt | 136 + .../base/charts/redis/templates/_helpers.tpl | 421 ++ .../redis/templates/configmap-scripts.yaml | 393 ++ .../charts/redis/templates/configmap.yaml | 53 + .../charts/redis/templates/headless-svc.yaml | 28 + .../redis/templates/health-configmap.yaml | 176 + .../redis/templates/metrics-prometheus.yaml | 39 + .../charts/redis/templates/metrics-svc.yaml | 34 + .../charts/redis/templates/networkpolicy.yaml | 74 + .../base/charts/redis/templates/pdb.yaml | 22 + .../redis/templates/prometheusrule.yaml | 25 + .../base/charts/redis/templates/psp.yaml | 43 + .../templates/redis-master-statefulset.yaml | 378 ++ .../redis/templates/redis-master-svc.yaml | 43 + .../templates/redis-node-statefulset.yaml | 494 ++ .../base/charts/redis/templates/redis-pv.yaml | 92 + .../charts/redis/templates/redis-role.yaml | 22 + .../redis/templates/redis-rolebinding.yaml | 19 + .../redis/templates/redis-serviceaccount.yaml | 15 + .../templates/redis-slave-statefulset.yaml | 384 ++ .../redis/templates/redis-slave-svc.yaml | 43 + .../templates/redis-with-sentinel-svc.yaml | 43 + .../base/charts/redis/templates/secret.yaml | 15 + .../base/charts/redis/values.schema.json | 168 + .../02-base/base/charts/redis/values.yaml | 932 ++++ .../02-base/base/charts/zookeeper/.helmignore | 22 + .../02-base/base/charts/zookeeper/Chart.yaml | 5 + .../charts/zookeeper/templates/0.config.yaml | 35 + .../templates/1.service-leader-election.yaml | 16 + .../zookeeper/templates/2.service-client.yaml | 12 + .../templates/3.persistent-volume.yaml | 74 + .../zookeeper/templates/4.statefulset.yaml | 87 + .../charts/zookeeper/templates/5.pvc.yaml | 50 + .../02-base/base/charts/zookeeper/values.yaml | 68 + .../files/02-base/base/index.yaml | 3 + .../files/02-base/base/templates/role.yaml | 16 + .../files/02-base/base/values.yaml | 73 + .../03-ddl-dml/elasticsearch/es-ddl-put.sh | 3085 +++++++++++ ...ete_event_info_create_dest_source_index.sh | 220 + ..._event_info_reindex_to_dest_from_source.sh | 28 + ..._event_info_reindex_to_source_from_dest.sh | 30 + ..._kubernete_event_info_delete_dest_index.sh | 21 + ...icense_history_create_dest_source_index.sh | 184 + ...nse_history_reindex_to_dest_from_source.sh | 32 + ...nse_history_reindex_to_source_from_dest.sh | 30 + .../8_license_history_delete_dest_index.sh | 21 + .../patch/es-reindex-3.2.0-rel332/manual.txt | 31 + ...ete_event_info_create_dest_source_index.sh | 220 + ..._event_info_reindex_to_dest_from_source.sh | 28 + ..._event_info_reindex_to_source_from_dest.sh | 30 + ..._kubernete_event_info_delete_dest_index.sh | 21 + ...icense_history_create_dest_source_index.sh | 184 + ...nse_history_reindex_to_dest_from_source.sh | 32 + ...nse_history_reindex_to_source_from_dest.sh | 30 + .../8_license_history_delete_dest_index.sh | 21 + .../patch/es-reindex-3.2.0/manual.txt | 31 + .../03-ddl-dml/postgres/jaeger_menumeta.psql | 21 + .../03-ddl-dml/postgres/jspd_menumeta.psql | 22 + ...ete_event_info_create_dest_source_index.sh | 220 + ..._event_info_reindex_to_dest_from_source.sh | 28 + ..._event_info_reindex_to_source_from_dest.sh | 30 + ..._kubernete_event_info_delete_dest_index.sh | 21 + ...icense_history_create_dest_source_index.sh | 184 + ...nse_history_reindex_to_dest_from_source.sh | 32 + ...nse_history_reindex_to_source_from_dest.sh | 30 + .../8_license_history_delete_dest_index.sh | 21 + .../patch/es-reindex-3.2.0/manual.txt | 31 + .../patch/memu_meta/jaeger_menumeta.psql | 21 + .../patch/memu_meta/jspd_menumeta.psql | 22 + .../postgres/patch/postgres_patch_3.2.0.psql | 803 +++ .../postgres/patch/postgres_patch_3.3.0.psql | 919 ++++ .../postgres/patch/postgres_patch_3.3.2.psql | 459 ++ .../postgres/patch/postgres_patch_3.4.1.psql | 1379 +++++ .../postgres/patch/postgres_patch_3.4.2.psql | 8 + .../postgres/patch/postgres_patch_3.4.3.psql | 361 ++ .../postgres/patch/postgres_patch_3.4.6.psql | 360 ++ .../postgres/patch/postgres_patch_3.4.7.psql | 102 + .../postgres/patch/postgres_patch_3.4.8.psql | 387 ++ .../patch/postgres_patch_R30020210503.psql | 2844 ++++++++++ .../patch/postgres_patch_R30020210730.psql | 4 + .../postgres/postgres_insert_ddl.psql | 1667 ++++++ .../postgres/postgres_insert_dml.psql | 2380 ++++++++ .../files/04-keycloak/Chart.yaml | 23 + .../cmoa_install_bak/files/04-keycloak/OWNERS | 6 + .../files/04-keycloak/README.md | 765 +++ .../04-keycloak/charts/postgresql/.helmignore | 21 + .../04-keycloak/charts/postgresql/Chart.yaml | 24 + .../04-keycloak/charts/postgresql/README.md | 625 +++ .../postgresql/charts/common/.helmignore | 22 + .../postgresql/charts/common/Chart.yaml | 19 + .../charts/postgresql/charts/common/README.md | 228 + .../charts/common/templates/_capabilities.tpl | 22 + .../charts/common/templates/_images.tpl | 44 + .../charts/common/templates/_labels.tpl | 18 + .../charts/common/templates/_names.tpl | 32 + .../charts/common/templates/_secrets.tpl | 49 + .../charts/common/templates/_storage.tpl | 23 + .../charts/common/templates/_tplvalues.tpl | 13 + .../charts/common/templates/_warnings.tpl | 14 + .../postgresql/charts/common/values.yaml | 3 + .../postgresql/ci/commonAnnotations.yaml | 4 + .../charts/postgresql/ci/default-values.yaml | 1 + .../ci/shmvolume-disabled-values.yaml | 2 + .../charts/postgresql/files/README.md | 1 + .../charts/postgresql/files/conf.d/README.md | 4 + .../docker-entrypoint-initdb.d/README.md | 3 + .../charts/postgresql/requirements.lock | 6 + .../charts/postgresql/requirements.yaml | 4 + .../charts/postgresql/templates/NOTES.txt | 54 + .../charts/postgresql/templates/_helpers.tpl | 494 ++ .../postgresql/templates/configmap.yaml | 26 + .../templates/extended-config-configmap.yaml | 21 + .../templates/initialization-configmap.yaml | 24 + .../templates/metrics-configmap.yaml | 13 + .../postgresql/templates/metrics-svc.yaml | 25 + .../postgresql/templates/networkpolicy.yaml | 36 + .../templates/podsecuritypolicy.yaml | 37 + .../postgresql/templates/prometheusrule.yaml | 23 + .../charts/postgresql/templates/pv.yaml | 27 + .../charts/postgresql/templates/role.yaml | 19 + .../postgresql/templates/rolebinding.yaml | 19 + .../charts/postgresql/templates/secrets.yaml | 23 + .../postgresql/templates/serviceaccount.yaml | 11 + .../postgresql/templates/servicemonitor.yaml | 33 + .../templates/statefulset-slaves.yaml | 340 ++ .../postgresql/templates/statefulset.yaml | 510 ++ .../postgresql/templates/svc-headless.yaml | 18 + .../charts/postgresql/templates/svc-read.yaml | 42 + .../charts/postgresql/templates/svc.yaml | 40 + .../charts/postgresql/values-production.yaml | 591 ++ .../charts/postgresql/values.schema.json | 103 + .../04-keycloak/charts/postgresql/values.yaml | 604 +++ .../files/04-keycloak/ci/h2-values.yaml | 38 + .../04-keycloak/ci/postgres-ha-values.yaml | 73 + .../files/04-keycloak/requirements.lock | 6 + .../files/04-keycloak/requirements.yaml | 5 + .../files/04-keycloak/scripts/keycloak.cli | 13 + .../files/04-keycloak/templates/NOTES.txt | 61 + .../files/04-keycloak/templates/_helpers.tpl | 87 + .../templates/configmap-startup.yaml | 14 + .../files/04-keycloak/templates/hpa.yaml | 22 + .../files/04-keycloak/templates/ingress.yaml | 104 + .../04-keycloak/templates/networkpolicy.yaml | 46 + .../templates/poddisruptionbudget.yaml | 13 + .../04-keycloak/templates/prometheusrule.yaml | 24 + .../files/04-keycloak/templates/rbac.yaml | 25 + .../files/04-keycloak/templates/route.yaml | 34 + .../files/04-keycloak/templates/secrets.yaml | 29 + .../templates/service-headless.yaml | 18 + .../04-keycloak/templates/service-http.yaml | 59 + .../04-keycloak/templates/serviceaccount.yaml | 19 + .../04-keycloak/templates/servicemonitor.yaml | 39 + .../04-keycloak/templates/statefulset.yaml | 208 + .../templates/test/configmap-test.yaml | 50 + .../04-keycloak/templates/test/pod-test.yaml | 43 + .../files/04-keycloak/values.schema.json | 434 ++ .../files/04-keycloak/values.yaml | 552 ++ .../cmoa_install_bak/files/05-imxc/Chart.yaml | 5 + .../files/05-imxc/cmoa-manual.yaml | 36 + .../files/05-imxc/scripts/init-api-server.sh | 17 + .../files/05-imxc/scripts/init-auth-server.sh | 36 + .../files/05-imxc/scripts/init-noti-server.sh | 14 + .../files/05-imxc/scripts/init-resource.sh | 6 + .../files/05-imxc/scripts/init.json | 2148 ++++++++ .../files/05-imxc/templates/auth-server.yaml | 82 + .../05-imxc/templates/cloudmoa-datagate.yaml | 79 + .../templates/cloudmoa-metric-agent.yaml | 331 ++ .../templates/cloudmoa-metric-collector.yaml | 45 + .../templates/cmoa-kube-info-batch.yaml | 38 + .../templates/cmoa-kube-info-connector.yaml | 48 + .../templates/cmoa-kube-info-flat.yaml | 35 + .../files/05-imxc/templates/cmoa-manual.yaml | 36 + .../05-imxc/templates/eureka-server.yaml | 60 + .../05-imxc/templates/imxc-api-server.yaml | 245 + .../05-imxc/templates/imxc-collector.yaml | 79 + .../files/05-imxc/templates/noti-server.yaml | 121 + .../files/05-imxc/templates/streams-depl.yaml | 26 + .../05-imxc/templates/topology-agent.yaml | 107 + .../files/05-imxc/templates/zuul-server.yaml | 62 + .../files/05-imxc/values.yaml | 157 + .../06-imxc-ui/imxc-ui-jaeger/Chart.yaml | 5 + .../imxc-ui-jaeger/cmoa-manual.yaml | 36 + .../imxc-ui-jaeger/scripts/init-api-server.sh | 16 + .../scripts/init-auth-server.sh | 36 + .../scripts/init-noti-server.sh | 14 + .../imxc-ui-jaeger/scripts/init-resource.sh | 6 + .../imxc-ui-jaeger/scripts/init.json | 2148 ++++++++ .../templates/imxc-ui-config-jaeger.yaml | 75 + .../templates/imxc-ui-server-jaeger.yaml | 63 + .../06-imxc-ui/imxc-ui-jaeger/values.yaml | 94 + .../files/06-imxc-ui/imxc-ui-jspd/Chart.yaml | 5 + .../imxc-ui-jspd/scripts/init-api-server.sh | 16 + .../imxc-ui-jspd/scripts/init-auth-server.sh | 36 + .../imxc-ui-jspd/scripts/init-noti-server.sh | 14 + .../imxc-ui-jspd/scripts/init-resource.sh | 6 + .../06-imxc-ui/imxc-ui-jspd/scripts/init.json | 2148 ++++++++ .../templates/imxc-ui-config.yaml | 44 + .../templates/imxc-ui-server.yaml | 63 + .../files/06-imxc-ui/imxc-ui-jspd/values.yaml | 94 + .../roles/cmoa_install_bak/files/ip_change | 15 + .../roles/cmoa_install_bak/files/k8s_status | 86 + .../files/postgres_check_data | 6 + .../roles/cmoa_install_bak/files/rel_change | 15 + .../tasks/00-default-settings-master.yml | 30 + .../tasks/00-default-settings-node.yml | 27 + .../tasks/01-storage-install.yml | 45 + .../tasks/02-base-install.yml | 51 + .../cmoa_install_bak/tasks/03-ddl-dml.yml | 64 + .../tasks/04-keycloak-install.yml | 34 + .../tasks/05-imxc-install.yml | 16 + .../tasks/06-imxc-ui-install.yml | 112 + .../tasks/07-keycloak-setting.yml | 76 + .../cmoa_install_bak/tasks/08-finish.yml | 92 + .../cmoa_install_bak/tasks/helm-install.yml | 60 + .../roles/cmoa_install_bak/tasks/main.yml | 43 + .../cmoa_install_bak/templates/realm.json.j2 | 7 + .../roles/cmoa_install_bak/vars/main.yml | 7 + .../01_old/roles/cmoa_os_setting/README.md | 38 + .../roles/cmoa_os_setting/defaults/main.yml | 140 + .../files/ingress-nginx/.helmignore | 22 + .../files/ingress-nginx/CHANGELOG.md | 445 ++ .../files/ingress-nginx/Chart.yaml | 23 + .../files/ingress-nginx/OWNERS | 10 + .../files/ingress-nginx/README.md | 494 ++ .../files/ingress-nginx/README.md.gotmpl | 235 + .../controller-custom-ingressclass-flags.yaml | 7 + .../ci/daemonset-customconfig-values.yaml | 14 + .../ci/daemonset-customnodeport-values.yaml | 22 + .../ci/daemonset-extra-modules.yaml | 10 + .../ci/daemonset-headers-values.yaml | 14 + .../ci/daemonset-internal-lb-values.yaml | 14 + .../ci/daemonset-nodeport-values.yaml | 10 + .../ci/daemonset-podannotations-values.yaml | 17 + ...set-tcp-udp-configMapNamespace-values.yaml | 20 + ...emonset-tcp-udp-portNamePrefix-values.yaml | 18 + .../ci/daemonset-tcp-udp-values.yaml | 16 + .../ci/daemonset-tcp-values.yaml | 14 + .../ci/deamonset-default-values.yaml | 10 + .../ci/deamonset-metrics-values.yaml | 12 + .../ci/deamonset-psp-values.yaml | 13 + .../ci/deamonset-webhook-and-psp-values.yaml | 13 + .../ci/deamonset-webhook-values.yaml | 10 + ...eployment-autoscaling-behavior-values.yaml | 14 + .../ci/deployment-autoscaling-values.yaml | 11 + .../ci/deployment-customconfig-values.yaml | 12 + .../ci/deployment-customnodeport-values.yaml | 20 + .../ci/deployment-default-values.yaml | 8 + .../ci/deployment-extra-modules.yaml | 10 + .../ci/deployment-headers-values.yaml | 13 + .../ci/deployment-internal-lb-values.yaml | 13 + .../ci/deployment-metrics-values.yaml | 11 + .../ci/deployment-nodeport-values.yaml | 9 + .../ci/deployment-podannotations-values.yaml | 16 + .../ci/deployment-psp-values.yaml | 10 + ...ent-tcp-udp-configMapNamespace-values.yaml | 19 + ...loyment-tcp-udp-portNamePrefix-values.yaml | 17 + .../ci/deployment-tcp-udp-values.yaml | 15 + .../ci/deployment-tcp-values.yaml | 11 + .../ci/deployment-webhook-and-psp-values.yaml | 12 + .../deployment-webhook-extraEnvs-values.yaml | 12 + .../deployment-webhook-resources-values.yaml | 23 + .../ci/deployment-webhook-values.yaml | 9 + .../files/ingress-nginx/override-values.yaml | 10 + .../files/ingress-nginx/temp.yaml | 724 +++ .../files/ingress-nginx/temp2.yaml | 725 +++ .../files/ingress-nginx/templates/NOTES.txt | 80 + .../ingress-nginx/templates/_helpers.tpl | 185 + .../files/ingress-nginx/templates/_params.tpl | 62 + .../job-patch/clusterrole.yaml | 34 + .../job-patch/clusterrolebinding.yaml | 23 + .../job-patch/job-createSecret.yaml | 79 + .../job-patch/job-patchWebhook.yaml | 81 + .../admission-webhooks/job-patch/psp.yaml | 39 + .../admission-webhooks/job-patch/role.yaml | 24 + .../job-patch/rolebinding.yaml | 24 + .../job-patch/serviceaccount.yaml | 16 + .../validating-webhook.yaml | 48 + .../ingress-nginx/templates/clusterrole.yaml | 94 + .../templates/clusterrolebinding.yaml | 19 + .../controller-configmap-addheaders.yaml | 14 + .../controller-configmap-proxyheaders.yaml | 19 + .../templates/controller-configmap-tcp.yaml | 17 + .../templates/controller-configmap-udp.yaml | 17 + .../templates/controller-configmap.yaml | 29 + .../templates/controller-daemonset.yaml | 223 + .../templates/controller-deployment.yaml | 228 + .../templates/controller-hpa.yaml | 52 + .../templates/controller-ingressclass.yaml | 21 + .../templates/controller-keda.yaml | 42 + .../controller-poddisruptionbudget.yaml | 19 + .../templates/controller-prometheusrules.yaml | 21 + .../templates/controller-psp.yaml | 94 + .../templates/controller-role.yaml | 113 + .../templates/controller-rolebinding.yaml | 21 + .../controller-service-internal.yaml | 79 + .../templates/controller-service-metrics.yaml | 45 + .../templates/controller-service-webhook.yaml | 40 + .../templates/controller-service.yaml | 101 + .../templates/controller-serviceaccount.yaml | 18 + .../templates/controller-servicemonitor.yaml | 48 + .../controller-wehbooks-networkpolicy.yaml | 19 + .../templates/default-backend-deployment.yaml | 118 + .../templates/default-backend-hpa.yaml | 33 + .../default-backend-poddisruptionbudget.yaml | 21 + .../templates/default-backend-psp.yaml | 38 + .../templates/default-backend-role.yaml | 22 + .../default-backend-rolebinding.yaml | 21 + .../templates/default-backend-service.yaml | 41 + .../default-backend-serviceaccount.yaml | 14 + .../templates/dh-param-secret.yaml | 10 + .../files/ingress-nginx/values.yaml | 944 ++++ .../roles/cmoa_os_setting/handlers/main.yml | 10 + .../roles/cmoa_os_setting/meta/main.yml | 52 + .../tasks/00-centos-os-main.yml | 82 + .../tasks/00-ubuntu-os-main.yml | 71 + .../tasks/01-centos-os-runtime.yml | 45 + .../tasks/01-ubuntu-os-runtime.yml | 78 + .../cmoa_os_setting/tasks/02-k8s-main.yml | 45 + .../cmoa_os_setting/tasks/03-k8s-master.yml | 45 + .../tasks/04-k8s-master-yaml.yml | 15 + .../cmoa_os_setting/tasks/05-k8s-node.yml | 6 + .../tasks/06-worker-directory.yml | 43 + .../roles/cmoa_os_setting/tasks/main.yml | 19 + .../cmoa_os_setting/templates/config.toml.j2 | 5 + .../roles/cmoa_os_setting/templates/hosts.j2 | 6 + .../templates/yaml2toml_macro.j2 | 58 + .../roles/cmoa_os_setting/tests/inventory | 2 + .../roles/cmoa_os_setting/tests/test.yml | 5 + .../roles/cmoa_os_setting/vars/main.yml | 2 + .../01_old/roles/connect-settings/README.md | 38 + .../roles/connect-settings/defaults/main.yml | 15 + .../files/00_old/gen_password.py | 44 + .../files/00_old/vault_test.py | 11 + .../roles/connect-settings/files/custom_excel | 108 + .../connect-settings/files/decrypt_password | 21 + .../roles/connect-settings/files/gen_password | 45 + .../roles/connect-settings/files/vault_get | 17 + .../roles/connect-settings/files/vault_put | 21 + .../roles/connect-settings/handlers/main.yml | 16 + .../roles/connect-settings/meta/main.yml | 52 + .../tasks/00_host_setting.yml | 142 + .../tasks/01_get_password.yml | 36 + .../tasks/02_change_password.yml | 21 + .../roles/connect-settings/tasks/03_vault.yml | 21 + .../tasks/04_excel_export.yml | 19 + .../tasks/99_decrypt_password.yml | 27 + .../roles/connect-settings/tasks/main.yml | 15 + .../connect-settings/templates/allow_users.j2 | 22 + .../roles/connect-settings/tests/inventory | 2 + .../roles/connect-settings/tests/test.yml | 5 + .../roles/connect-settings/vars/main.yml | 2 + .../datadog.datadog/.circleci/config.yml | 299 ++ .../roles/datadog.datadog/.github/CODEOWNERS | 4 + .../01_old/roles/datadog.datadog/.gitignore | 14 + .../01_old/roles/datadog.datadog/CHANGELOG.md | 474 ++ .../roles/datadog.datadog/CONTRIBUTING.md | 65 + ansible/01_old/roles/datadog.datadog/LICENSE | 203 + .../01_old/roles/datadog.datadog/README.md | 642 +++ .../ci_test/downgrade_to_5.yaml | 25 + .../ci_test/install_agent_5.yaml | 25 + .../ci_test/install_agent_6.yaml | 32 + .../ci_test/install_agent_6_macos.yaml | 26 + .../ci_test/install_agent_7.yaml | 34 + .../ci_test/install_agent_7_macos.yaml | 28 + .../datadog.datadog/ci_test/inventory/ci.ini | 2 + .../ci_test/inventory/ci_macos.ini | 2 + .../roles/datadog.datadog/defaults/main.yml | 221 + .../datadog.datadog/handlers/main-macos.yml | 27 + .../datadog.datadog/handlers/main-win.yml | 11 + .../roles/datadog.datadog/handlers/main.yml | 31 + .../datadog.datadog/manual_tests/.gitignore | 1 + .../datadog.datadog/manual_tests/Vagrantfile | 19 + .../datadog.datadog/manual_tests/inventory | 2 + .../manual_tests/inventory_win | 9 + .../datadog.datadog/manual_tests/readme.md | 43 + .../manual_tests/test_5_default.yml | 6 + .../manual_tests/test_5_full.yml | 47 + .../manual_tests/test_6_default.yml | 6 + .../manual_tests/test_6_full.yml | 75 + .../manual_tests/test_7_default.yml | 4 + .../manual_tests/test_7_full.yml | 80 + .../datadog.datadog/meta/.galaxy_install_info | 2 + .../roles/datadog.datadog/meta/main.yml | 67 + .../tasks/_agent-linux-macos-shared.yml | 91 + .../datadog.datadog/tasks/_apt-key-import.yml | 90 + .../tasks/_remove_rpm_keys.yml | 5 + .../datadog.datadog/tasks/agent-linux.yml | 161 + .../datadog.datadog/tasks/agent-macos.yml | 93 + .../roles/datadog.datadog/tasks/agent-win.yml | 103 + .../datadog.datadog/tasks/agent5-linux.yml | 77 + .../tasks/check-removed-config.yml | 9 + .../datadog.datadog/tasks/facts-ansible10.yml | 3 + .../datadog.datadog/tasks/facts-ansible9.yml | 3 + .../datadog.datadog/tasks/integration.yml | 86 + .../roles/datadog.datadog/tasks/main.yml | 71 + .../roles/datadog.datadog/tasks/os-check.yml | 5 + .../tasks/parse-version-macos.yml | 7 + .../tasks/parse-version-windows.yml | 18 + .../datadog.datadog/tasks/parse-version.yml | 104 + .../datadog.datadog/tasks/pkg-debian.yml | 127 + .../tasks/pkg-debian/install-latest.yml | 9 + .../tasks/pkg-debian/install-pinned.yml | 10 + .../roles/datadog.datadog/tasks/pkg-macos.yml | 86 + .../tasks/pkg-macos/macos_agent_latest.yml | 12 + .../tasks/pkg-macos/macos_agent_version.yml | 5 + .../datadog.datadog/tasks/pkg-redhat.yml | 169 + .../tasks/pkg-redhat/install-latest.yml | 18 + .../tasks/pkg-redhat/install-pinned.yml | 21 + .../roles/datadog.datadog/tasks/pkg-suse.yml | 107 + .../tasks/pkg-suse/install-latest.yml | 8 + .../tasks/pkg-suse/install-pinned.yml | 9 + .../tasks/pkg-windows-opts.yml | 92 + .../datadog.datadog/tasks/pkg-windows.yml | 87 + .../datadog.datadog/tasks/sanitize-checks.yml | 12 + .../tasks/set-parse-version.yml | 16 + .../tasks/win_agent_latest.yml | 12 + .../tasks/win_agent_version.yml | 10 + .../datadog.datadog/templates/checks.yaml.j2 | 1 + .../templates/com.datadoghq.agent.plist.j2 | 33 + .../datadog.datadog/templates/datadog.conf.j2 | 31 + .../datadog.datadog/templates/datadog.yaml.j2 | 19 + .../datadog.datadog/templates/install_info.j2 | 5 + .../templates/security-agent.yaml.j2 | 12 + .../templates/system-probe.yaml.j2 | 45 + .../datadog.datadog/templates/zypper.repo.j2 | 27 + ansible/01_old/roles/datasaker/README.md | 38 + .../01_old/roles/datasaker/defaults/main.yml | 34 + .../01_old/roles/datasaker/handlers/main.yml | 34 + .../roles/datasaker/tasks/check-agent.yml | 20 + .../roles/datasaker/tasks/dsk-common.yml | 10 + .../roles/datasaker/tasks/dsk-debian-pkg.yml | 60 + .../roles/datasaker/tasks/dsk-log-agent.yml | 15 + .../roles/datasaker/tasks/dsk-node-agent.yml | 15 + .../tasks/dsk-plan-postgres-agent.yml | 15 + .../roles/datasaker/tasks/dsk-trace-agent.yml | 15 + .../roles/datasaker/tasks/gather-facts.yml | 3 + .../roles/datasaker/tasks/main copy.yml | 12 + ansible/01_old/roles/datasaker/tasks/main.yml | 12 + .../datasaker/templates/global-config.yml.j2 | 22 + .../templates/log-agent-config.yml.j2 | 35 + .../templates/log-agent-config.yml.j2_bak | 37 + .../templates/node-agent-config.yml.j2 | 18 + .../plan-postgres-agent-config.yml.j2 | 18 + .../templates/trace-agent-config.yml.j2 | 5 + .../01_old/roles/dsk_bot.datasaker/README.md | 240 + .../roles/dsk_bot.datasaker/README_ko.md | 241 + .../roles/dsk_bot.datasaker/defaults/main.yml | 77 + .../files/libpq-13.5-1.el8.x86_64.rpm | Bin 0 -> 202384 bytes .../files/libpq-devel-13.5-1.el8.x86_64.rpm | Bin 0 -> 99352 bytes .../roles/dsk_bot.datasaker/handlers/main.yml | 34 + .../meta/.galaxy_install_info | 2 + .../roles/dsk_bot.datasaker/meta/main.yml | 28 + .../dsk_bot.datasaker/tasks/check-agent.yml | 49 + .../dsk_bot.datasaker/tasks/dsk-common.yml | 53 + .../tasks/dsk-debian-pkg.yml | 102 + .../tasks/dsk-docker-log-agent.yml | 53 + .../tasks/dsk-docker-node-agent.yml | 68 + .../tasks/dsk-docker-postgres-agent.yml | 62 + .../tasks/dsk-docker-trace-agent.yml | 37 + .../dsk_bot.datasaker/tasks/dsk-log-agent.yml | 65 + .../tasks/dsk-node-agent.yml | 20 + .../tasks/dsk-plan-postgres-agent.yml | 20 + .../tasks/dsk-postgres-agent.yml | 20 + .../tasks/dsk-redhat-pkg.yml | 61 + .../tasks/dsk-trace-agent.yml | 20 + .../dsk_bot.datasaker/tasks/gather-facts.yml | 3 + .../roles/dsk_bot.datasaker/tasks/main.yml | 47 + .../dsk_bot.datasaker/tasks/permissions.yml | 6 + .../tasks/remove-datasaker.yml | 135 + .../templates/docker-log-agent-config.yml.j2 | 51 + .../docker-plan-postgres-agent-config.yml.j2 | 13 + .../docker-postgres-agent-config.yml.j2 | 20 + .../templates/fluent-bit-repo.yml.j2 | 7 + .../templates/global-config.yml.j2 | 22 + .../templates/log-agent-config.yml.j2 | 42 + .../templates/log-agent-config.yml.j2_bak | 37 + .../templates/node-agent-config.yml.j2 | 18 + .../plan-postgres-agent-config.yml.j2 | 17 + .../templates/postgres-agent-config.yml.j2 | 19 + .../templates/trace-agent-config.yml.j2 | 7 + .../01_old/roles/kubernetes_install/README.md | 38 + .../kubernetes_install/defaults/main.yml | 140 + .../files/ingress-nginx/.helmignore | 22 + .../files/ingress-nginx/CHANGELOG.md | 445 ++ .../files/ingress-nginx/Chart.yaml | 23 + .../files/ingress-nginx/OWNERS | 10 + .../files/ingress-nginx/README.md | 494 ++ .../files/ingress-nginx/README.md.gotmpl | 235 + .../controller-custom-ingressclass-flags.yaml | 7 + .../ci/daemonset-customconfig-values.yaml | 14 + .../ci/daemonset-customnodeport-values.yaml | 22 + .../ci/daemonset-extra-modules.yaml | 10 + .../ci/daemonset-headers-values.yaml | 14 + .../ci/daemonset-internal-lb-values.yaml | 14 + .../ci/daemonset-nodeport-values.yaml | 10 + .../ci/daemonset-podannotations-values.yaml | 17 + ...set-tcp-udp-configMapNamespace-values.yaml | 20 + ...emonset-tcp-udp-portNamePrefix-values.yaml | 18 + .../ci/daemonset-tcp-udp-values.yaml | 16 + .../ci/daemonset-tcp-values.yaml | 14 + .../ci/deamonset-default-values.yaml | 10 + .../ci/deamonset-metrics-values.yaml | 12 + .../ci/deamonset-psp-values.yaml | 13 + .../ci/deamonset-webhook-and-psp-values.yaml | 13 + .../ci/deamonset-webhook-values.yaml | 10 + ...eployment-autoscaling-behavior-values.yaml | 14 + .../ci/deployment-autoscaling-values.yaml | 11 + .../ci/deployment-customconfig-values.yaml | 12 + .../ci/deployment-customnodeport-values.yaml | 20 + .../ci/deployment-default-values.yaml | 8 + .../ci/deployment-extra-modules.yaml | 10 + .../ci/deployment-headers-values.yaml | 13 + .../ci/deployment-internal-lb-values.yaml | 13 + .../ci/deployment-metrics-values.yaml | 11 + .../ci/deployment-nodeport-values.yaml | 9 + .../ci/deployment-podannotations-values.yaml | 16 + .../ci/deployment-psp-values.yaml | 10 + ...ent-tcp-udp-configMapNamespace-values.yaml | 19 + ...loyment-tcp-udp-portNamePrefix-values.yaml | 17 + .../ci/deployment-tcp-udp-values.yaml | 15 + .../ci/deployment-tcp-values.yaml | 11 + .../ci/deployment-webhook-and-psp-values.yaml | 12 + .../deployment-webhook-extraEnvs-values.yaml | 12 + .../deployment-webhook-resources-values.yaml | 23 + .../ci/deployment-webhook-values.yaml | 9 + .../files/ingress-nginx/override-values.yaml | 10 + .../files/ingress-nginx/temp.yaml | 724 +++ .../files/ingress-nginx/temp2.yaml | 725 +++ .../files/ingress-nginx/templates/NOTES.txt | 80 + .../ingress-nginx/templates/_helpers.tpl | 185 + .../files/ingress-nginx/templates/_params.tpl | 62 + .../job-patch/clusterrole.yaml | 34 + .../job-patch/clusterrolebinding.yaml | 23 + .../job-patch/job-createSecret.yaml | 79 + .../job-patch/job-patchWebhook.yaml | 81 + .../admission-webhooks/job-patch/psp.yaml | 39 + .../admission-webhooks/job-patch/role.yaml | 24 + .../job-patch/rolebinding.yaml | 24 + .../job-patch/serviceaccount.yaml | 16 + .../validating-webhook.yaml | 48 + .../ingress-nginx/templates/clusterrole.yaml | 94 + .../templates/clusterrolebinding.yaml | 19 + .../controller-configmap-addheaders.yaml | 14 + .../controller-configmap-proxyheaders.yaml | 19 + .../templates/controller-configmap-tcp.yaml | 17 + .../templates/controller-configmap-udp.yaml | 17 + .../templates/controller-configmap.yaml | 29 + .../templates/controller-daemonset.yaml | 223 + .../templates/controller-deployment.yaml | 228 + .../templates/controller-hpa.yaml | 52 + .../templates/controller-ingressclass.yaml | 21 + .../templates/controller-keda.yaml | 42 + .../controller-poddisruptionbudget.yaml | 19 + .../templates/controller-prometheusrules.yaml | 21 + .../templates/controller-psp.yaml | 94 + .../templates/controller-role.yaml | 113 + .../templates/controller-rolebinding.yaml | 21 + .../controller-service-internal.yaml | 79 + .../templates/controller-service-metrics.yaml | 45 + .../templates/controller-service-webhook.yaml | 40 + .../templates/controller-service.yaml | 101 + .../templates/controller-serviceaccount.yaml | 18 + .../templates/controller-servicemonitor.yaml | 48 + .../controller-wehbooks-networkpolicy.yaml | 19 + .../templates/default-backend-deployment.yaml | 118 + .../templates/default-backend-hpa.yaml | 33 + .../default-backend-poddisruptionbudget.yaml | 21 + .../templates/default-backend-psp.yaml | 38 + .../templates/default-backend-role.yaml | 22 + .../default-backend-rolebinding.yaml | 21 + .../templates/default-backend-service.yaml | 41 + .../default-backend-serviceaccount.yaml | 14 + .../templates/dh-param-secret.yaml | 10 + .../files/ingress-nginx/values.yaml | 944 ++++ .../roles/kubernetes_install/files/kubeconfig | 20 + .../kubernetes_install/handlers/main.yml | 10 + .../roles/kubernetes_install/meta/main.yml | 52 + .../tasks/helm-chart-nginx.yml | 13 + .../kubernetes_install/tasks/helm-install.yml | 60 + .../tasks/k8s-helm-chart.yml | 7 + .../kubernetes_install/tasks/k8s-main.yml | 68 + .../kubernetes_install/tasks/k8s-master.yml | 56 + .../kubernetes_install/tasks/k8s-node.yml | 6 + .../roles/kubernetes_install/tasks/main.yml | 10 + .../kubernetes_install/tasks/os-main.yml | 70 + .../kubernetes_install/tasks/os-runtime.yml | 45 + .../templates/config.toml.j2 | 5 + .../kubernetes_install/templates/hosts.j2 | 6 + .../templates/yaml2toml_macro.j2 | 58 + .../roles/kubernetes_install/tests/inventory | 2 + .../roles/kubernetes_install/tests/test.yml | 5 + .../roles/kubernetes_install/vars/main.yml | 2 + ansible/01_old/roles/node/tasks/main.yml | 3 + .../roles/node/templates/common-auth.j2 | 27 + .../roles/node/templates/pwquality.conf.j2 | 50 + ansible/01_old/roles/node/templates/sysctl.j2 | 79 + .../01_old/roles/password_change/README.md | 38 + .../roles/password_change/defaults/main.yml | 15 + .../files/00_old/gen_password.py | 44 + .../files/00_old/vault_test.py | 11 + .../roles/password_change/files/custom_excel | 108 + .../password_change/files/decrypt_password | 21 + .../roles/password_change/files/gen_password | 45 + .../roles/password_change/files/vault_get | 17 + .../roles/password_change/files/vault_put | 21 + .../roles/password_change/handlers/main.yml | 16 + .../roles/password_change/meta/main.yml | 52 + .../password_change/tasks/01_get_password.yml | 41 + .../tasks/02_change_password.yml | 24 + .../roles/password_change/tasks/03_vault.yml | 21 + .../password_change/tasks/04_excel_export.yml | 54 + .../tasks/99_decrypt_password.yml | 27 + .../roles/password_change/tasks/main.yml | 12 + .../password_change/templates/allow_users.j2 | 22 + .../roles/password_change/tests/inventory | 2 + .../roles/password_change/tests/test.yml | 5 + .../roles/password_change/vars/main.yml | 2 + .../01_old/roles/security-settings/.DS_Store | Bin 0 -> 6148 bytes .../roles/security-settings/defaults/main.yml | 46 + .../security-settings/files/login_banner | 20 + .../roles/security-settings/handlers/main.yml | 6 + .../security-settings/tasks/admin_set.yml | 7 + .../roles/security-settings/tasks/banner.yml | 29 + .../roles/security-settings/tasks/crictl.yml | 19 + .../security-settings/tasks/login_defs.yml | 48 + .../roles/security-settings/tasks/main.yml | 24 + .../roles/security-settings/tasks/pam.yml | 50 + .../roles/security-settings/tasks/profile.yml | 24 + .../security-settings/tasks/sshd_config.yml | 30 + .../roles/security-settings/tasks/sudoers.yml | 94 + .../templates/allow_users.j2 | 11 + .../templates/common-auth.j2 | 27 + .../templates/pwquality.conf.j2 | 50 + .../templates/sudoers_users.j2 | 6 + ansible/01_old/roles/teleport/.DS_Store | Bin 0 -> 6148 bytes ansible/01_old/roles/teleport/README.md | 38 + .../01_old/roles/teleport/defaults/main.yml | 9 + .../01_old/roles/teleport/handlers/main.yml | 10 + ansible/01_old/roles/teleport/meta/main.yml | 52 + ansible/01_old/roles/teleport/tasks/main.yml | 33 + .../roles/teleport/tasks/teleport_install.yml | 25 + .../roles/teleport/tasks/teleport_remove.yml | 27 + .../roles/teleport/tasks/teleport_update.yml | 47 + .../teleport/templates/install-node.sh.j2 | 999 ++++ .../roles/teleport/templates/teleport.yaml.j2 | 35 + ansible/01_old/roles/teleport/vars/main.yml | 2 + ansible/01_old/roles/test/defaults/main.yml | 65 + .../roles/test/files/00-default/sa_patch.sh | 8 + .../files/00-default/secret_dockerhub.yaml | 7 + .../test/files/00-default/secret_nexus.yaml | 8 + .../files/01-storage/00-storageclass.yaml | 6 + .../files/01-storage/01-persistentvolume.yaml | 92 + .../roles/test/files/01-storage/cmoa_minio | 63 + .../test/files/01-storage/minio/.helmignore | 23 + .../test/files/01-storage/minio/Chart.yaml | 18 + .../test/files/01-storage/minio/README.md | 235 + .../01-storage/minio/templates/NOTES.txt | 43 + .../minio/templates/_helper_create_bucket.txt | 109 + .../minio/templates/_helper_create_policy.txt | 75 + .../minio/templates/_helper_create_user.txt | 88 + .../templates/_helper_custom_command.txt | 58 + .../minio/templates/_helper_policy.tpl | 18 + .../01-storage/minio/templates/_helpers.tpl | 218 + .../01-storage/minio/templates/configmap.yaml | 24 + .../minio/templates/console-ingress.yaml | 58 + .../minio/templates/console-service.yaml | 48 + .../minio/templates/deployment.yaml | 174 + .../minio/templates/gateway-deployment.yaml | 173 + .../01-storage/minio/templates/ingress.yaml | 58 + .../minio/templates/networkpolicy.yaml | 27 + .../minio/templates/poddisruptionbudget.yaml | 14 + .../post-install-create-bucket-job.yaml | 87 + .../post-install-create-policy-job.yaml | 87 + .../post-install-create-user-job.yaml | 97 + .../post-install-custom-command.yaml | 87 + .../files/01-storage/minio/templates/pvc.yaml | 35 + .../01-storage/minio/templates/secrets.yaml | 22 + .../templates/securitycontextconstraints.yaml | 45 + .../01-storage/minio/templates/service.yaml | 49 + .../minio/templates/serviceaccount.yaml | 7 + .../minio/templates/servicemonitor.yaml | 51 + .../minio/templates/statefulset.yaml | 217 + .../test/files/01-storage/minio/values.yaml | 461 ++ .../files/02-base/00-kafka-broker-config.yaml | 161 + .../roles/test/files/02-base/01-coredns.yaml | 35 + .../roles/test/files/02-base/base/.helmignore | 22 + .../roles/test/files/02-base/base/Chart.yaml | 5 + .../02-base/base/charts/analysis/.helmignore | 22 + .../02-base/base/charts/analysis/Chart.yaml | 5 + .../imxc-metric-analyzer-master.yaml | 87 + .../imxc-metric-analyzer-worker.yaml | 38 + .../02-base/base/charts/analysis/values.yaml | 68 + .../02-base/base/charts/cortex/.helmignore | 29 + .../02-base/base/charts/cortex/Chart.lock | 24 + .../02-base/base/charts/cortex/Chart.yaml | 56 + .../02-base/base/charts/cortex/README.md | 754 +++ .../base/charts/cortex/templates/NOTES.txt | 9 + .../base/charts/cortex/templates/_helpers.tpl | 155 + .../alertmanager/alertmanager-dep.yaml | 30 + .../alertmanager/alertmanager-svc.yaml | 10 + .../charts/cortex/templates/clusterrole.yaml | 12 + .../cortex/templates/clusterrolebinding.yaml | 16 + .../compactor/_helpers-compactor.tpl | 23 + .../compactor-poddisruptionbudget.yaml | 14 + .../compactor/compactor-servicemonitor.yaml | 42 + .../compactor/compactor-statefulset.yaml | 141 + .../templates/compactor/compactor-svc.yaml | 25 + .../charts/cortex/templates/configmap.yaml | 12 + .../templates/configs/_helpers-configs.tpl | 23 + .../cortex/templates/configs/configs-dep.yaml | 124 + .../configs/configs-poddisruptionbudget.yaml | 14 + .../configs/configs-servicemonitor.yaml | 42 + .../cortex/templates/configs/configs-svc.yaml | 23 + .../charts/cortex/templates/cortex-pv.yaml | 68 + .../distributor/_helpers-distributor.tpl | 23 + .../distributor/distributor-dep.yaml | 121 + .../distributor/distributor-hpa.yaml | 39 + .../distributor-poddisruptionbudget.yaml | 14 + .../distributor-servicemonitor.yaml | 42 + .../distributor/distributor-svc-headless.yaml | 23 + .../distributor/distributor-svc.yaml | 21 + .../templates/ingester/_helpers-ingester.tpl | 23 + .../templates/ingester/ingester-dep.yaml | 130 + .../templates/ingester/ingester-hpa.yaml | 29 + .../ingester-poddisruptionbudget.yaml | 14 + .../ingester/ingester-servicemonitor.yaml | 42 + .../ingester/ingester-statefulset.yaml | 153 + .../ingester/ingester-svc-headless.yaml | 22 + .../templates/ingester/ingester-svc.yaml | 21 + .../cortex/templates/nginx/_helpers-nginx.tpl | 23 + .../cortex/templates/nginx/nginx-config.yaml | 140 + .../cortex/templates/nginx/nginx-dep.yaml | 111 + .../cortex/templates/nginx/nginx-hpa.yaml | 39 + .../cortex/templates/nginx/nginx-ingress.yaml | 40 + .../nginx/nginx-poddisruptionbudget.yaml | 14 + .../cortex/templates/nginx/nginx-svc.yaml | 23 + .../cortex/templates/node-exporter.yaml | 96 + .../templates/querier/_helpers-querier.tpl | 23 + .../cortex/templates/querier/querier-dep.yaml | 115 + .../cortex/templates/querier/querier-hpa.yaml | 39 + .../querier/querier-poddisruptionbudget.yaml | 14 + .../querier/querier-servicemonitor.yaml | 42 + .../cortex/templates/querier/querier-svc.yaml | 21 + .../_helpers-query-frontend.tpl | 23 + .../query-frontend/query-frontend-dep.yaml | 107 + .../query-frontend-servicemonitor.yaml | 42 + .../query-frontend-svc-headless.yaml | 23 + .../query-frontend/query-frontend-svc.yaml | 21 + .../query-poddisruptionbudget.yaml | 14 + .../cortex/templates/ruler/_helpers-ruler.tpl | 30 + .../templates/ruler/ruler-configmap.yaml | 14 + .../cortex/templates/ruler/ruler-dep.yaml | 191 + .../ruler/ruler-poddisruptionbudget.yaml | 14 + .../templates/ruler/ruler-servicemonitor.yaml | 42 + .../cortex/templates/ruler/ruler-svc.yaml | 23 + .../cortex/templates/runtime-configmap.yaml | 18 + .../cortex/templates/secret-postgresql.yaml | 11 + .../base/charts/cortex/templates/secret.yaml | 11 + .../cortex/templates/serviceaccount.yaml | 12 + .../store-gateway/_helpers-store-gateway.tpl | 23 + .../store-gateway-poddisruptionbudget.yaml | 14 + .../store-gateway-servicemonitor.yaml | 42 + .../store-gateway-statefulset.yaml | 142 + .../store-gateway-svc-headless.yaml | 24 + .../store-gateway/store-gateway-svc.yaml | 23 + .../templates/svc-memberlist-headless.yaml | 18 + .../table-manager/_helpers-table-manager.tpl | 23 + .../table-manager/table-manager-dep.yaml | 106 + .../table-manager-poddisruptionbudget.yaml | 14 + .../table-manager-servicemonitor.yaml | 42 + .../table-manager/table-manager-svc.yaml | 23 + .../02-base/base/charts/cortex/values.yaml | 1605 ++++++ .../base/charts/elasticsearch/.helmignore | 2 + .../base/charts/elasticsearch/Chart.yaml | 12 + .../templates/1.headless_service.yaml | 14 + .../elasticsearch/templates/2.service.yaml | 17 + .../elasticsearch/templates/3.configmap.yaml | 41 + .../charts/elasticsearch/templates/4.pv.yaml | 74 + .../charts/elasticsearch/templates/5.pvc.yaml | 53 + .../templates/6.statefulset.yaml | 146 + .../elasticsearch/templates/7.secrets.yaml | 10 + .../templates/needtocheck_storageclass.yaml | 8 + .../base/charts/elasticsearch/values.yaml | 68 + .../base/charts/kafka-manager/.helmignore | 22 + .../base/charts/kafka-manager/Chart.yaml | 5 + .../templates/0.kafka-manager-service.yaml | 14 + .../templates/1.kafka-manager.yaml | 33 + .../base/charts/kafka-manager/values.yaml | 68 + .../02-base/base/charts/kafka/.helmignore | 22 + .../base/charts/kafka/1.broker-config.yaml | 161 + .../02-base/base/charts/kafka/Chart.yaml | 5 + .../base/charts/kafka/templates/2.dns.yaml | 14 + .../kafka/templates/3.bootstrap-service.yaml | 11 + .../kafka/templates/4.persistent-volume.yaml | 76 + .../base/charts/kafka/templates/5.kafka.yaml | 132 + .../charts/kafka/templates/6.outside.yaml | 89 + .../02-base/base/charts/kafka/values.yaml | 68 + .../02-base/base/charts/postgres/.helmignore | 22 + .../02-base/base/charts/postgres/Chart.yaml | 5 + .../templates/1.postgres-configmap.yaml | 11 + .../templates/2.postgres-storage.yaml | 38 + .../templates/3.postgres-service.yaml | 14 + .../templates/4.postgres-deployment.yaml | 45 + .../02-base/base/charts/postgres/values.yaml | 68 + .../02-base/base/charts/rabbitmq/.helmignore | 21 + .../02-base/base/charts/rabbitmq/Chart.lock | 6 + .../02-base/base/charts/rabbitmq/Chart.yaml | 26 + .../02-base/base/charts/rabbitmq/README.md | 566 ++ .../charts/rabbitmq/charts/common/.helmignore | 22 + .../charts/rabbitmq/charts/common/Chart.yaml | 23 + .../charts/rabbitmq/charts/common/README.md | 327 ++ .../charts/common/templates/_affinities.tpl | 102 + .../charts/common/templates/_capabilities.tpl | 117 + .../charts/common/templates/_errors.tpl | 23 + .../charts/common/templates/_images.tpl | 75 + .../charts/common/templates/_ingress.tpl | 55 + .../charts/common/templates/_labels.tpl | 18 + .../charts/common/templates/_names.tpl | 32 + .../charts/common/templates/_secrets.tpl | 129 + .../charts/common/templates/_storage.tpl | 23 + .../charts/common/templates/_tplvalues.tpl | 13 + .../charts/common/templates/_utils.tpl | 62 + .../charts/common/templates/_warnings.tpl | 14 + .../templates/validations/_cassandra.tpl | 72 + .../common/templates/validations/_mariadb.tpl | 103 + .../common/templates/validations/_mongodb.tpl | 108 + .../templates/validations/_postgresql.tpl | 131 + .../common/templates/validations/_redis.tpl | 76 + .../templates/validations/_validations.tpl | 46 + .../charts/rabbitmq/charts/common/values.yaml | 5 + .../charts/rabbitmq/ci/default-values.yaml | 1 + .../rabbitmq/ci/tolerations-values.yaml | 4 + .../base/charts/rabbitmq/templates/NOTES.txt | 167 + .../charts/rabbitmq/templates/_helpers.tpl | 247 + .../rabbitmq/templates/configuration.yaml | 16 + .../charts/rabbitmq/templates/extra-list.yaml | 4 + .../charts/rabbitmq/templates/ingress.yaml | 57 + .../rabbitmq/templates/networkpolicy.yaml | 37 + .../base/charts/rabbitmq/templates/pdb.yaml | 20 + .../rabbitmq/templates/prometheusrule.yaml | 24 + .../base/charts/rabbitmq/templates/pv.yaml | 22 + .../base/charts/rabbitmq/templates/pvc.yaml | 15 + .../base/charts/rabbitmq/templates/role.yaml | 18 + .../rabbitmq/templates/rolebinding.yaml | 18 + .../charts/rabbitmq/templates/secrets.yaml | 43 + .../rabbitmq/templates/serviceaccount.yaml | 14 + .../rabbitmq/templates/servicemonitor.yaml | 49 + .../rabbitmq/templates/statefulset.yaml | 382 ++ .../rabbitmq/templates/svc-headless.yaml | 40 + .../base/charts/rabbitmq/templates/svc.yaml | 95 + .../rabbitmq/templates/tls-secrets.yaml | 74 + .../base/charts/rabbitmq/values.schema.json | 100 + .../02-base/base/charts/rabbitmq/values.yaml | 1151 ++++ .../02-base/base/charts/redis/.helmignore | 21 + .../02-base/base/charts/redis/Chart.lock | 6 + .../02-base/base/charts/redis/Chart.yaml | 29 + .../files/02-base/base/charts/redis/README.md | 707 +++ .../charts/redis/charts/common/.helmignore | 22 + .../charts/redis/charts/common/Chart.yaml | 23 + .../base/charts/redis/charts/common/README.md | 316 ++ .../charts/common/templates/_affinities.tpl | 94 + .../charts/common/templates/_capabilities.tpl | 61 + .../redis/charts/common/templates/_images.tpl | 43 + .../charts/common/templates/_ingress.tpl | 42 + .../redis/charts/common/templates/_labels.tpl | 18 + .../redis/charts/common/templates/_names.tpl | 32 + .../charts/common/templates/_secrets.tpl | 127 + .../charts/common/templates/_storage.tpl | 23 + .../charts/common/templates/_tplvalues.tpl | 13 + .../redis/charts/common/templates/_utils.tpl | 62 + .../charts/common/templates/_warnings.tpl | 14 + .../templates/validations/_cassandra.tpl | 72 + .../common/templates/validations/_mariadb.tpl | 103 + .../common/templates/validations/_mongodb.tpl | 108 + .../templates/validations/_postgresql.tpl | 131 + .../common/templates/validations/_redis.tpl | 72 + .../templates/validations/_validations.tpl | 46 + .../charts/redis/charts/common/values.yaml | 3 + .../base/charts/redis/ci/default-values.yaml | 1 + .../charts/redis/ci/extra-flags-values.yaml | 11 + .../redis/ci/production-sentinel-values.yaml | 682 +++ .../base/charts/redis/templates/NOTES.txt | 136 + .../base/charts/redis/templates/_helpers.tpl | 421 ++ .../redis/templates/configmap-scripts.yaml | 393 ++ .../charts/redis/templates/configmap.yaml | 53 + .../charts/redis/templates/headless-svc.yaml | 28 + .../redis/templates/health-configmap.yaml | 176 + .../redis/templates/metrics-prometheus.yaml | 39 + .../charts/redis/templates/metrics-svc.yaml | 34 + .../charts/redis/templates/networkpolicy.yaml | 74 + .../base/charts/redis/templates/pdb.yaml | 22 + .../redis/templates/prometheusrule.yaml | 25 + .../base/charts/redis/templates/psp.yaml | 43 + .../templates/redis-master-statefulset.yaml | 378 ++ .../redis/templates/redis-master-svc.yaml | 43 + .../templates/redis-node-statefulset.yaml | 494 ++ .../base/charts/redis/templates/redis-pv.yaml | 92 + .../charts/redis/templates/redis-role.yaml | 22 + .../redis/templates/redis-rolebinding.yaml | 19 + .../redis/templates/redis-serviceaccount.yaml | 15 + .../templates/redis-slave-statefulset.yaml | 384 ++ .../redis/templates/redis-slave-svc.yaml | 43 + .../templates/redis-with-sentinel-svc.yaml | 43 + .../base/charts/redis/templates/secret.yaml | 15 + .../base/charts/redis/values.schema.json | 168 + .../02-base/base/charts/redis/values.yaml | 932 ++++ .../02-base/base/charts/zookeeper/.helmignore | 22 + .../02-base/base/charts/zookeeper/Chart.yaml | 5 + .../charts/zookeeper/templates/0.config.yaml | 35 + .../templates/1.service-leader-election.yaml | 16 + .../zookeeper/templates/2.service-client.yaml | 12 + .../templates/3.persistent-volume.yaml | 74 + .../zookeeper/templates/4.statefulset.yaml | 87 + .../charts/zookeeper/templates/5.pvc.yaml | 50 + .../02-base/base/charts/zookeeper/values.yaml | 68 + .../roles/test/files/02-base/base/index.yaml | 3 + .../files/02-base/base/templates/role.yaml | 16 + .../roles/test/files/02-base/base/values.yaml | 73 + .../03-ddl-dml/elasticsearch/es-ddl-put.sh | 3085 +++++++++++ ...ete_event_info_create_dest_source_index.sh | 220 + ..._event_info_reindex_to_dest_from_source.sh | 28 + ..._event_info_reindex_to_source_from_dest.sh | 30 + ..._kubernete_event_info_delete_dest_index.sh | 21 + ...icense_history_create_dest_source_index.sh | 184 + ...nse_history_reindex_to_dest_from_source.sh | 32 + ...nse_history_reindex_to_source_from_dest.sh | 30 + .../8_license_history_delete_dest_index.sh | 21 + .../patch/es-reindex-3.2.0-rel332/manual.txt | 31 + ...ete_event_info_create_dest_source_index.sh | 220 + ..._event_info_reindex_to_dest_from_source.sh | 28 + ..._event_info_reindex_to_source_from_dest.sh | 30 + ..._kubernete_event_info_delete_dest_index.sh | 21 + ...icense_history_create_dest_source_index.sh | 184 + ...nse_history_reindex_to_dest_from_source.sh | 32 + ...nse_history_reindex_to_source_from_dest.sh | 30 + .../8_license_history_delete_dest_index.sh | 21 + .../patch/es-reindex-3.2.0/manual.txt | 31 + .../03-ddl-dml/postgres/jaeger_menumeta.psql | 21 + .../03-ddl-dml/postgres/jspd_menumeta.psql | 22 + ...ete_event_info_create_dest_source_index.sh | 220 + ..._event_info_reindex_to_dest_from_source.sh | 28 + ..._event_info_reindex_to_source_from_dest.sh | 30 + ..._kubernete_event_info_delete_dest_index.sh | 21 + ...icense_history_create_dest_source_index.sh | 184 + ...nse_history_reindex_to_dest_from_source.sh | 32 + ...nse_history_reindex_to_source_from_dest.sh | 30 + .../8_license_history_delete_dest_index.sh | 21 + .../patch/es-reindex-3.2.0/manual.txt | 31 + .../patch/memu_meta/jaeger_menumeta.psql | 21 + .../patch/memu_meta/jspd_menumeta.psql | 22 + .../postgres/patch/postgres_patch_3.2.0.psql | 803 +++ .../postgres/patch/postgres_patch_3.3.0.psql | 919 ++++ .../postgres/patch/postgres_patch_3.3.2.psql | 459 ++ .../postgres/patch/postgres_patch_3.4.1.psql | 1379 +++++ .../postgres/patch/postgres_patch_3.4.2.psql | 8 + .../postgres/patch/postgres_patch_3.4.3.psql | 361 ++ .../postgres/patch/postgres_patch_3.4.6.psql | 360 ++ .../postgres/patch/postgres_patch_3.4.7.psql | 102 + .../postgres/patch/postgres_patch_3.4.8.psql | 387 ++ .../patch/postgres_patch_R30020210503.psql | 2844 ++++++++++ .../patch/postgres_patch_R30020210730.psql | 4 + .../postgres/postgres_insert_ddl.psql | 1667 ++++++ .../postgres/postgres_insert_dml.psql | 2380 ++++++++ .../roles/test/files/04-keycloak/Chart.yaml | 23 + .../roles/test/files/04-keycloak/OWNERS | 6 + .../roles/test/files/04-keycloak/README.md | 765 +++ .../04-keycloak/charts/postgresql/.helmignore | 21 + .../04-keycloak/charts/postgresql/Chart.yaml | 24 + .../04-keycloak/charts/postgresql/README.md | 625 +++ .../postgresql/charts/common/.helmignore | 22 + .../postgresql/charts/common/Chart.yaml | 19 + .../charts/postgresql/charts/common/README.md | 228 + .../charts/common/templates/_capabilities.tpl | 22 + .../charts/common/templates/_images.tpl | 44 + .../charts/common/templates/_labels.tpl | 18 + .../charts/common/templates/_names.tpl | 32 + .../charts/common/templates/_secrets.tpl | 49 + .../charts/common/templates/_storage.tpl | 23 + .../charts/common/templates/_tplvalues.tpl | 13 + .../charts/common/templates/_warnings.tpl | 14 + .../postgresql/charts/common/values.yaml | 3 + .../postgresql/ci/commonAnnotations.yaml | 4 + .../charts/postgresql/ci/default-values.yaml | 1 + .../ci/shmvolume-disabled-values.yaml | 2 + .../charts/postgresql/files/README.md | 1 + .../charts/postgresql/files/conf.d/README.md | 4 + .../docker-entrypoint-initdb.d/README.md | 3 + .../charts/postgresql/requirements.lock | 6 + .../charts/postgresql/requirements.yaml | 4 + .../charts/postgresql/templates/NOTES.txt | 54 + .../charts/postgresql/templates/_helpers.tpl | 494 ++ .../postgresql/templates/configmap.yaml | 26 + .../templates/extended-config-configmap.yaml | 21 + .../templates/initialization-configmap.yaml | 24 + .../templates/metrics-configmap.yaml | 13 + .../postgresql/templates/metrics-svc.yaml | 25 + .../postgresql/templates/networkpolicy.yaml | 36 + .../templates/podsecuritypolicy.yaml | 37 + .../postgresql/templates/prometheusrule.yaml | 23 + .../charts/postgresql/templates/pv.yaml | 27 + .../charts/postgresql/templates/role.yaml | 19 + .../postgresql/templates/rolebinding.yaml | 19 + .../charts/postgresql/templates/secrets.yaml | 23 + .../postgresql/templates/serviceaccount.yaml | 11 + .../postgresql/templates/servicemonitor.yaml | 33 + .../templates/statefulset-slaves.yaml | 340 ++ .../postgresql/templates/statefulset.yaml | 510 ++ .../postgresql/templates/svc-headless.yaml | 18 + .../charts/postgresql/templates/svc-read.yaml | 42 + .../charts/postgresql/templates/svc.yaml | 40 + .../charts/postgresql/values-production.yaml | 591 ++ .../charts/postgresql/values.schema.json | 103 + .../04-keycloak/charts/postgresql/values.yaml | 604 +++ .../test/files/04-keycloak/ci/h2-values.yaml | 38 + .../04-keycloak/ci/postgres-ha-values.yaml | 73 + .../test/files/04-keycloak/requirements.lock | 6 + .../test/files/04-keycloak/requirements.yaml | 5 + .../files/04-keycloak/scripts/keycloak.cli | 13 + .../files/04-keycloak/templates/NOTES.txt | 61 + .../files/04-keycloak/templates/_helpers.tpl | 87 + .../templates/configmap-startup.yaml | 14 + .../test/files/04-keycloak/templates/hpa.yaml | 22 + .../files/04-keycloak/templates/ingress.yaml | 104 + .../04-keycloak/templates/networkpolicy.yaml | 46 + .../templates/poddisruptionbudget.yaml | 13 + .../04-keycloak/templates/prometheusrule.yaml | 24 + .../files/04-keycloak/templates/rbac.yaml | 25 + .../files/04-keycloak/templates/route.yaml | 34 + .../files/04-keycloak/templates/secrets.yaml | 29 + .../templates/service-headless.yaml | 18 + .../04-keycloak/templates/service-http.yaml | 59 + .../04-keycloak/templates/serviceaccount.yaml | 19 + .../04-keycloak/templates/servicemonitor.yaml | 39 + .../04-keycloak/templates/statefulset.yaml | 208 + .../templates/test/configmap-test.yaml | 50 + .../04-keycloak/templates/test/pod-test.yaml | 43 + .../test/files/04-keycloak/values.schema.json | 434 ++ .../roles/test/files/04-keycloak/values.yaml | 552 ++ .../roles/test/files/05-imxc/Chart.yaml | 5 + .../roles/test/files/05-imxc/cmoa-manual.yaml | 36 + .../files/05-imxc/scripts/init-api-server.sh | 17 + .../files/05-imxc/scripts/init-auth-server.sh | 36 + .../files/05-imxc/scripts/init-noti-server.sh | 14 + .../files/05-imxc/scripts/init-resource.sh | 6 + .../test/files/05-imxc/scripts/init.json | 2148 ++++++++ .../files/05-imxc/templates/auth-server.yaml | 82 + .../05-imxc/templates/cloudmoa-datagate.yaml | 79 + .../templates/cloudmoa-metric-agent.yaml | 331 ++ .../templates/cloudmoa-metric-collector.yaml | 45 + .../templates/cmoa-kube-info-batch.yaml | 38 + .../templates/cmoa-kube-info-connector.yaml | 48 + .../templates/cmoa-kube-info-flat.yaml | 35 + .../files/05-imxc/templates/cmoa-manual.yaml | 36 + .../05-imxc/templates/eureka-server.yaml | 60 + .../05-imxc/templates/imxc-api-server.yaml | 245 + .../05-imxc/templates/imxc-collector.yaml | 79 + .../files/05-imxc/templates/noti-server.yaml | 121 + .../files/05-imxc/templates/streams-depl.yaml | 26 + .../05-imxc/templates/topology-agent.yaml | 107 + .../files/05-imxc/templates/zuul-server.yaml | 62 + .../roles/test/files/05-imxc/values.yaml | 157 + .../06-imxc-ui/imxc-ui-jaeger/Chart.yaml | 5 + .../imxc-ui-jaeger/cmoa-manual.yaml | 36 + .../imxc-ui-jaeger/scripts/init-api-server.sh | 16 + .../scripts/init-auth-server.sh | 36 + .../scripts/init-noti-server.sh | 14 + .../imxc-ui-jaeger/scripts/init-resource.sh | 6 + .../imxc-ui-jaeger/scripts/init.json | 2148 ++++++++ .../templates/imxc-ui-config-jaeger.yaml | 75 + .../templates/imxc-ui-server-jaeger.yaml | 63 + .../06-imxc-ui/imxc-ui-jaeger/values.yaml | 94 + .../files/06-imxc-ui/imxc-ui-jspd/Chart.yaml | 5 + .../imxc-ui-jspd/scripts/init-api-server.sh | 16 + .../imxc-ui-jspd/scripts/init-auth-server.sh | 36 + .../imxc-ui-jspd/scripts/init-noti-server.sh | 14 + .../imxc-ui-jspd/scripts/init-resource.sh | 6 + .../06-imxc-ui/imxc-ui-jspd/scripts/init.json | 2148 ++++++++ .../templates/imxc-ui-config.yaml | 44 + .../templates/imxc-ui-server.yaml | 63 + .../files/06-imxc-ui/imxc-ui-jspd/values.yaml | 94 + ansible/01_old/roles/test/files/ip_change | 15 + ansible/01_old/roles/test/files/k8s_status | 86 + .../roles/test/files/postgres_check_data | 6 + ansible/01_old/roles/test/files/rel_change | 15 + .../test/tasks/00-default-settings-master.yml | 30 + .../test/tasks/00-default-settings-node.yml | 27 + .../roles/test/tasks/01-storage-install.yml | 45 + .../roles/test/tasks/02-base-install.yml | 51 + .../01_old/roles/test/tasks/03-ddl-dml.yml | 59 + .../roles/test/tasks/04-keycloak-install.yml | 34 + .../roles/test/tasks/05-imxc-install.yml | 16 + .../roles/test/tasks/06-imxc-ui-install.yml | 112 + .../roles/test/tasks/07-keycloak-setting.yml | 90 + ansible/01_old/roles/test/tasks/08-finish.yml | 17 + .../01_old/roles/test/tasks/helm-install.yml | 60 + ansible/01_old/roles/test/tasks/main.yml | 4 + .../01_old/roles/test/templates/realm.json.j2 | 7 + ansible/01_old/roles/test/vars/main.yml | 7 + .../roles/zabbix-agent/defaults/main.yml | 292 + .../roles/zabbix-agent/files/sample.conf | 3 + .../files/win_sample/doSomething.ps1 | 0 .../roles/zabbix-agent/handlers/main.yml | 40 + .../01_old/roles/zabbix-agent/meta/main.yml | 42 + .../molecule/with-server/Dockerfile.j2 | 14 + .../molecule/with-server/INSTALL.rst | 26 + .../molecule/with-server/molecule.yml | 73 + .../molecule/with-server/playbook.yml | 24 + .../molecule/with-server/prepare.yml | 114 + .../molecule/with-server/requirements.yml | 5 + .../molecule/with-server/tests/test_agent.py | 44 + .../with-server/tests/test_default.py | 41 + .../roles/zabbix-agent/tasks/Debian.yml | 151 + .../roles/zabbix-agent/tasks/Docker.yml | 32 + .../01_old/roles/zabbix-agent/tasks/Linux.yml | 239 + .../roles/zabbix-agent/tasks/RedHat.yml | 70 + .../roles/zabbix-agent/tasks/Windows.yml | 352 ++ .../roles/zabbix-agent/tasks/Windows_conf.yml | 56 + .../roles/zabbix-agent/tasks/XCP-ng.yml | 70 + .../01_old/roles/zabbix-agent/tasks/api.yml | 96 + .../01_old/roles/zabbix-agent/tasks/macOS.yml | 22 + .../01_old/roles/zabbix-agent/tasks/main.yml | 94 + .../roles/zabbix-agent/tasks/remove.yml | 25 + .../roles/zabbix-agent/tasks/selinux.yml | 110 + .../roles/zabbix-agent/tasks/tlspsk_auto.yml | 14 + .../zabbix-agent/tasks/tlspsk_auto_agent2.yml | 14 + .../tasks/tlspsk_auto_agent2_common.yml | 53 + .../tasks/tlspsk_auto_agent2_linux.yml | 80 + .../tasks/tlspsk_auto_agent2_windows.yml | 66 + .../zabbix-agent/tasks/tlspsk_auto_common.yml | 52 + .../zabbix-agent/tasks/tlspsk_auto_linux.yml | 80 + .../tasks/tlspsk_auto_windows.yml | 67 + .../zabbix-agent/tasks/userparameter.yml | 87 + .../userparameters/dev2_iac_pass_failed.j2 | 1 + .../userparameters/dev2_pass_failed.j2 | 1 + .../templates/userparameters/mysql.j2 | 3 + .../userparameters/root_pass_failed.j2 | 1 + .../templates/userparameters/win_sample.j2 | 1 + .../templates/userparameters/zombie.j2 | 1 + .../templates/userparameters/zombielist.j2 | 1 + .../templates/zabbix_agent2.conf.j2 | 140 + .../templates/zabbix_agentd.conf.j2 | 149 + .../01_old/roles/zabbix-agent/vars/Debian.yml | 48 + .../01_old/roles/zabbix-agent/vars/RedHat.yml | 21 + .../roles/zabbix-agent/vars/Windows.yml | 7 + .../01_old/roles/zabbix-agent/vars/main.yml | 0 ansible/01_old/ssh_key/README.md | 9 + ansible/01_old/ssh_key/authorized_keys.yml | 11 + ansible/01_old/ssh_key/ip_list | 32 + ansible/01_old/ssh_key/key.sh | 8 + ansible/01_old/ssh_key/test.sh | 8 + ansible/01_old/std_inven | 4 + ansible/01_old/teleport | 26 + ansible/01_old/teleport.yml | 11 + ansible/01_old/teleport_aws.yml | 12 + ansible/01_old/zabbix-agent.yaml | 28 + ansible/README.md | 17 + ansible/infra_setting/ansible.cfg | 10 + ansible/infra_setting/infra-settings.yml | 19 + ansible/infra_setting/inventory | 31 + ansible/infra_setting/passwd_inventory | 76 + ansible/infra_setting/roles/.DS_Store | Bin 0 -> 6148 bytes .../roles/connect-settings/.DS_Store | Bin 0 -> 6148 bytes .../roles/connect-settings/README.md | 38 + .../roles/connect-settings/defaults/main.yml | 15 + .../files/00_old/gen_password.py | 44 + .../files/00_old/vault_test.py | 11 + .../roles/connect-settings/files/custom_excel | 108 + .../connect-settings/files/decrypt_password | 21 + .../roles/connect-settings/files/gen_password | 45 + .../roles/connect-settings/files/vault_get | 17 + .../roles/connect-settings/files/vault_put | 21 + .../roles/connect-settings/handlers/main.yml | 16 + .../roles/connect-settings/meta/main.yml | 52 + .../tasks/00_host_setting.yml | 162 + .../tasks/01_get_password.yml | 36 + .../tasks/02_change_password.yml | 21 + .../roles/connect-settings/tasks/03_vault.yml | 21 + .../tasks/04_excel_export.yml | 19 + .../tasks/99_decrypt_password.yml | 27 + .../roles/connect-settings/tasks/main.yml | 15 + .../connect-settings/templates/allow_users.j2 | 22 + .../roles/connect-settings/tests/inventory | 2 + .../roles/connect-settings/tests/test.yml | 5 + .../roles/connect-settings/vars/main.yml | 2 + ansible/teleport_setting/ansible.cfg | 10 + ansible/teleport_setting/restart.yml | 10 + ansible/teleport_setting/roles/.DS_Store | Bin 0 -> 6148 bytes .../teleport_setting/roles/teleport/.DS_Store | Bin 0 -> 6148 bytes .../teleport_setting/roles/teleport/README.md | 38 + .../roles/teleport/defaults/main.yml | 9 + .../roles/teleport/handlers/main.yml | 10 + .../roles/teleport/meta/main.yml | 52 + .../roles/teleport/tasks/main.yml | 33 + .../roles/teleport/tasks/teleport_install.yml | 25 + .../roles/teleport/tasks/teleport_remove.yml | 27 + .../roles/teleport/tasks/teleport_update.yml | 47 + .../teleport/templates/install-node.sh.j2 | 999 ++++ .../roles/teleport/templates/teleport.yaml.j2 | 35 + .../roles/teleport/vars/main.yml | 2 + ansible/teleport_setting/teleport | 65 + ansible/teleport_setting/teleport.yml | 11 + ansible/teleport_setting/teleport_aws.yml | 12 + ansible/zabbix_agent/ansible.cfg | 10 + ansible/zabbix_agent/inventory | 31 + .../roles/zabbix-agent/defaults/main.yml | 292 + .../roles/zabbix-agent/files/sample.conf | 3 + .../files/win_sample/doSomething.ps1 | 0 .../roles/zabbix-agent/handlers/main.yml | 40 + .../roles/zabbix-agent/meta/main.yml | 42 + .../molecule/with-server/Dockerfile.j2 | 14 + .../molecule/with-server/INSTALL.rst | 26 + .../molecule/with-server/molecule.yml | 73 + .../molecule/with-server/playbook.yml | 24 + .../molecule/with-server/prepare.yml | 114 + .../molecule/with-server/requirements.yml | 5 + .../molecule/with-server/tests/test_agent.py | 44 + .../with-server/tests/test_default.py | 41 + .../roles/zabbix-agent/tasks/Debian.yml | 151 + .../roles/zabbix-agent/tasks/Docker.yml | 32 + .../roles/zabbix-agent/tasks/Linux.yml | 239 + .../roles/zabbix-agent/tasks/RedHat.yml | 70 + .../roles/zabbix-agent/tasks/Windows.yml | 352 ++ .../roles/zabbix-agent/tasks/Windows_conf.yml | 56 + .../roles/zabbix-agent/tasks/XCP-ng.yml | 70 + .../roles/zabbix-agent/tasks/api.yml | 96 + .../roles/zabbix-agent/tasks/macOS.yml | 22 + .../roles/zabbix-agent/tasks/main.yml | 94 + .../roles/zabbix-agent/tasks/remove.yml | 25 + .../roles/zabbix-agent/tasks/selinux.yml | 110 + .../roles/zabbix-agent/tasks/tlspsk_auto.yml | 14 + .../zabbix-agent/tasks/tlspsk_auto_agent2.yml | 14 + .../tasks/tlspsk_auto_agent2_common.yml | 53 + .../tasks/tlspsk_auto_agent2_linux.yml | 80 + .../tasks/tlspsk_auto_agent2_windows.yml | 66 + .../zabbix-agent/tasks/tlspsk_auto_common.yml | 52 + .../zabbix-agent/tasks/tlspsk_auto_linux.yml | 80 + .../tasks/tlspsk_auto_windows.yml | 67 + .../zabbix-agent/tasks/userparameter.yml | 87 + .../userparameters/dev2_iac_pass_failed.j2 | 1 + .../userparameters/dev2_pass_failed.j2 | 1 + .../templates/userparameters/mysql.j2 | 3 + .../userparameters/root_pass_failed.j2 | 1 + .../templates/userparameters/win_sample.j2 | 1 + .../templates/userparameters/zombie.j2 | 1 + .../templates/userparameters/zombielist.j2 | 1 + .../templates/zabbix_agent2.conf.j2 | 140 + .../templates/zabbix_agentd.conf.j2 | 149 + .../roles/zabbix-agent/vars/Debian.yml | 48 + .../roles/zabbix-agent/vars/RedHat.yml | 21 + .../roles/zabbix-agent/vars/Windows.yml | 7 + .../roles/zabbix-agent/vars/main.yml | 0 ansible/zabbix_agent/zabbix-agent.yaml | 28 + 2610 files changed, 281893 insertions(+) create mode 100644 ansible/00_old/agent_cluster_install.yaml create mode 100644 ansible/00_old/agent_datasaker.yml create mode 100644 ansible/00_old/api_cluster_install.yaml create mode 100644 ansible/00_old/authorized_keys.yml create mode 100644 ansible/00_old/bastion.yml create mode 100644 ansible/00_old/cmoa_install.yaml create mode 100644 ansible/00_old/datasaker.yml create mode 100644 ansible/00_old/dev_datasaker.yml create mode 100644 ansible/00_old/get-docker.sh create mode 100644 ansible/00_old/health_check.yml create mode 100644 ansible/00_old/install-centos-node.sh create mode 100644 ansible/00_old/install-node-ubuntu.sh create mode 100644 ansible/00_old/install-node.sh create mode 100644 ansible/00_old/installer.sh create mode 100755 ansible/00_old/key_test.sh create mode 100644 ansible/00_old/local_datasaker.yml create mode 100644 ansible/00_old/node_role.yaml create mode 100755 ansible/00_old/roles.yaml create mode 100644 ansible/00_old/test.yml create mode 100644 ansible/00_old/vault_test.yaml create mode 100644 ansible/01_old/README.md create mode 100755 ansible/01_old/all_host create mode 100755 ansible/01_old/ansible.cfg create mode 100644 ansible/01_old/ansible_kubectl/Dockerfile create mode 100644 ansible/01_old/ansible_kubectl/kubeconfig create mode 100644 ansible/01_old/ansible_kubectl/testpod.yaml create mode 100644 ansible/01_old/infra-settings.yml create mode 100644 ansible/01_old/infra-test create mode 100644 ansible/01_old/inventory/.DS_Store create mode 100755 ansible/01_old/inventory/agent-team create mode 100755 ansible/01_old/inventory/all_host create mode 100755 ansible/01_old/inventory/api-team create mode 100644 ansible/01_old/inventory/aws create mode 100755 ansible/01_old/inventory/cmoa create mode 100755 ansible/01_old/inventory/dsk-dev create mode 100755 ansible/01_old/inventory/etc create mode 100755 ansible/01_old/inventory/infra-team create mode 100644 ansible/01_old/inventory/passwd_inventory create mode 100755 ansible/01_old/inventory/teleport create mode 100644 ansible/01_old/inventory/teleport_test create mode 100755 ansible/01_old/inventory/test_node create mode 100755 ansible/01_old/inventory/zabbix_inventory create mode 100755 ansible/01_old/inventory2 create mode 100755 ansible/01_old/inventory_agent create mode 100755 ansible/01_old/inventory_api create mode 100755 ansible/01_old/inventory_bak create mode 100755 ansible/01_old/inventory_cent create mode 100644 ansible/01_old/inventory_dsk_dev create mode 100644 ansible/01_old/inventory_test create mode 100644 ansible/01_old/inventory_tmp_cluster create mode 100644 ansible/01_old/password_change.yml create mode 100644 ansible/01_old/password_change.yml_bak create mode 100644 ansible/01_old/restart.yml create mode 100644 ansible/01_old/roles/.DS_Store create mode 100644 ansible/01_old/roles/agent_os_setting/README.md create mode 100644 ansible/01_old/roles/agent_os_setting/defaults/main.yml create mode 100755 ansible/01_old/roles/agent_os_setting/files/get-docker.sh create mode 100644 ansible/01_old/roles/agent_os_setting/files/ingress-nginx/.helmignore create mode 100644 ansible/01_old/roles/agent_os_setting/files/ingress-nginx/CHANGELOG.md create mode 100644 ansible/01_old/roles/agent_os_setting/files/ingress-nginx/Chart.yaml create mode 100644 ansible/01_old/roles/agent_os_setting/files/ingress-nginx/OWNERS create mode 100644 ansible/01_old/roles/agent_os_setting/files/ingress-nginx/README.md create mode 100644 ansible/01_old/roles/agent_os_setting/files/ingress-nginx/README.md.gotmpl create mode 100644 ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/controller-custom-ingressclass-flags.yaml create mode 100644 ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/daemonset-customconfig-values.yaml create mode 100644 ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/daemonset-customnodeport-values.yaml create mode 100644 ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/daemonset-extra-modules.yaml create mode 100644 ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/daemonset-headers-values.yaml create mode 100644 ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/daemonset-internal-lb-values.yaml create mode 100644 ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/daemonset-nodeport-values.yaml create mode 100644 ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/daemonset-podannotations-values.yaml create mode 100644 ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/daemonset-tcp-udp-configMapNamespace-values.yaml create mode 100644 ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/daemonset-tcp-udp-portNamePrefix-values.yaml create mode 100644 ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/daemonset-tcp-udp-values.yaml create mode 100644 ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/daemonset-tcp-values.yaml create mode 100644 ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/deamonset-default-values.yaml create mode 100644 ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/deamonset-metrics-values.yaml create mode 100644 ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/deamonset-psp-values.yaml create mode 100644 ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/deamonset-webhook-and-psp-values.yaml create mode 100644 ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/deamonset-webhook-values.yaml create mode 100644 ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/deployment-autoscaling-behavior-values.yaml create mode 100644 ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/deployment-autoscaling-values.yaml create mode 100644 ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/deployment-customconfig-values.yaml create mode 100644 ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/deployment-customnodeport-values.yaml create mode 100644 ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/deployment-default-values.yaml create mode 100644 ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/deployment-extra-modules.yaml create mode 100644 ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/deployment-headers-values.yaml create mode 100644 ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/deployment-internal-lb-values.yaml create mode 100644 ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/deployment-metrics-values.yaml create mode 100644 ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/deployment-nodeport-values.yaml create mode 100644 ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/deployment-podannotations-values.yaml create mode 100644 ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/deployment-psp-values.yaml create mode 100644 ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/deployment-tcp-udp-configMapNamespace-values.yaml create mode 100644 ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/deployment-tcp-udp-portNamePrefix-values.yaml create mode 100644 ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/deployment-tcp-udp-values.yaml create mode 100644 ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/deployment-tcp-values.yaml create mode 100644 ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/deployment-webhook-and-psp-values.yaml create mode 100644 ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/deployment-webhook-extraEnvs-values.yaml create mode 100644 ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/deployment-webhook-resources-values.yaml create mode 100644 ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/deployment-webhook-values.yaml create mode 100644 ansible/01_old/roles/agent_os_setting/files/ingress-nginx/override-values.yaml create mode 100644 ansible/01_old/roles/agent_os_setting/files/ingress-nginx/temp.yaml create mode 100644 ansible/01_old/roles/agent_os_setting/files/ingress-nginx/temp2.yaml create mode 100644 ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/NOTES.txt create mode 100644 ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/_helpers.tpl create mode 100644 ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/_params.tpl create mode 100644 ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/admission-webhooks/job-patch/clusterrole.yaml create mode 100644 ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/admission-webhooks/job-patch/clusterrolebinding.yaml create mode 100644 ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/admission-webhooks/job-patch/job-createSecret.yaml create mode 100644 ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/admission-webhooks/job-patch/job-patchWebhook.yaml create mode 100644 ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/admission-webhooks/job-patch/psp.yaml create mode 100644 ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/admission-webhooks/job-patch/role.yaml create mode 100644 ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/admission-webhooks/job-patch/rolebinding.yaml create mode 100644 ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/admission-webhooks/job-patch/serviceaccount.yaml create mode 100644 ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/admission-webhooks/validating-webhook.yaml create mode 100644 ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/clusterrole.yaml create mode 100644 ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/clusterrolebinding.yaml create mode 100644 ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/controller-configmap-addheaders.yaml create mode 100644 ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/controller-configmap-proxyheaders.yaml create mode 100644 ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/controller-configmap-tcp.yaml create mode 100644 ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/controller-configmap-udp.yaml create mode 100644 ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/controller-configmap.yaml create mode 100644 ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/controller-daemonset.yaml create mode 100644 ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/controller-deployment.yaml create mode 100644 ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/controller-hpa.yaml create mode 100644 ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/controller-ingressclass.yaml create mode 100644 ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/controller-keda.yaml create mode 100644 ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/controller-poddisruptionbudget.yaml create mode 100644 ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/controller-prometheusrules.yaml create mode 100644 ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/controller-psp.yaml create mode 100644 ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/controller-role.yaml create mode 100644 ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/controller-rolebinding.yaml create mode 100644 ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/controller-service-internal.yaml create mode 100644 ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/controller-service-metrics.yaml create mode 100644 ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/controller-service-webhook.yaml create mode 100644 ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/controller-service.yaml create mode 100644 ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/controller-serviceaccount.yaml create mode 100644 ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/controller-servicemonitor.yaml create mode 100644 ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/controller-wehbooks-networkpolicy.yaml create mode 100644 ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/default-backend-deployment.yaml create mode 100644 ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/default-backend-hpa.yaml create mode 100644 ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/default-backend-poddisruptionbudget.yaml create mode 100644 ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/default-backend-psp.yaml create mode 100644 ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/default-backend-role.yaml create mode 100644 ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/default-backend-rolebinding.yaml create mode 100644 ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/default-backend-service.yaml create mode 100644 ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/default-backend-serviceaccount.yaml create mode 100644 ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/dh-param-secret.yaml create mode 100644 ansible/01_old/roles/agent_os_setting/files/ingress-nginx/values.yaml create mode 100644 ansible/01_old/roles/agent_os_setting/handlers/main.yml create mode 100644 ansible/01_old/roles/agent_os_setting/meta/main.yml create mode 100644 ansible/01_old/roles/agent_os_setting/tasks/00-centos-os-main.yml create mode 100644 ansible/01_old/roles/agent_os_setting/tasks/00-ubuntu-os-main.yml create mode 100644 ansible/01_old/roles/agent_os_setting/tasks/01-centos-os-containerd.yml create mode 100644 ansible/01_old/roles/agent_os_setting/tasks/01-centos-os-crio.yml create mode 100644 ansible/01_old/roles/agent_os_setting/tasks/01-centos-os-docker.yml create mode 100644 ansible/01_old/roles/agent_os_setting/tasks/01-ubuntu-os-containerd.yml create mode 100644 ansible/01_old/roles/agent_os_setting/tasks/01-ubuntu-os-crio.yml create mode 100644 ansible/01_old/roles/agent_os_setting/tasks/01-ubuntu-os-docker.yml create mode 100644 ansible/01_old/roles/agent_os_setting/tasks/02-k8s-main.yml create mode 100644 ansible/01_old/roles/agent_os_setting/tasks/03-k8s-master.yml create mode 100644 ansible/01_old/roles/agent_os_setting/tasks/04-k8s-master-yaml.yml create mode 100644 ansible/01_old/roles/agent_os_setting/tasks/04-k8s-master-yaml.yml_bak create mode 100644 ansible/01_old/roles/agent_os_setting/tasks/05-k8s-node.yml create mode 100644 ansible/01_old/roles/agent_os_setting/tasks/06-worker-directory.yml create mode 100644 ansible/01_old/roles/agent_os_setting/tasks/main.yml create mode 100644 ansible/01_old/roles/agent_os_setting/templates/calico.yaml.j2 create mode 100644 ansible/01_old/roles/agent_os_setting/templates/components.yaml.j2 create mode 100644 ansible/01_old/roles/agent_os_setting/templates/config.toml.j2 create mode 100644 ansible/01_old/roles/agent_os_setting/templates/daemon.json.j2 create mode 100644 ansible/01_old/roles/agent_os_setting/templates/hosts.j2 create mode 100644 ansible/01_old/roles/agent_os_setting/templates/myregistry.conf.j2 create mode 100644 ansible/01_old/roles/agent_os_setting/templates/yaml2toml_macro.j2 create mode 100644 ansible/01_old/roles/agent_os_setting/tests/inventory create mode 100644 ansible/01_old/roles/agent_os_setting/tests/test.yml create mode 100644 ansible/01_old/roles/agent_os_setting/vars/main.yml create mode 100644 ansible/01_old/roles/api_os_setting/defaults/main.yml create mode 100755 ansible/01_old/roles/api_os_setting/files/get-docker.sh create mode 100644 ansible/01_old/roles/api_os_setting/handlers/main.yml create mode 100644 ansible/01_old/roles/api_os_setting/meta/main.yml create mode 100644 ansible/01_old/roles/api_os_setting/tasks/00-centos-os-main.yml create mode 100644 ansible/01_old/roles/api_os_setting/tasks/00-ubuntu-os-main.yml create mode 100644 ansible/01_old/roles/api_os_setting/tasks/01-centos-os-containerd.yml create mode 100644 ansible/01_old/roles/api_os_setting/tasks/01-centos-os-crio.yml create mode 100644 ansible/01_old/roles/api_os_setting/tasks/01-centos-os-docker.yml create mode 100644 ansible/01_old/roles/api_os_setting/tasks/01-ubuntu-os-containerd.yml create mode 100644 ansible/01_old/roles/api_os_setting/tasks/01-ubuntu-os-crio.yml create mode 100644 ansible/01_old/roles/api_os_setting/tasks/01-ubuntu-os-docker.yml create mode 100644 ansible/01_old/roles/api_os_setting/tasks/02-k8s-main.yml create mode 100644 ansible/01_old/roles/api_os_setting/tasks/03-k8s-master.yml create mode 100644 ansible/01_old/roles/api_os_setting/tasks/04-k8s-master-yaml.yml create mode 100644 ansible/01_old/roles/api_os_setting/tasks/05-k8s-node.yml create mode 100644 ansible/01_old/roles/api_os_setting/tasks/main.yml create mode 100644 ansible/01_old/roles/api_os_setting/templates/calico.yaml.j2 create mode 100644 ansible/01_old/roles/api_os_setting/templates/components.yaml.j2 create mode 100644 ansible/01_old/roles/api_os_setting/templates/config.toml.j2 create mode 100644 ansible/01_old/roles/api_os_setting/templates/daemon.json.j2 create mode 100644 ansible/01_old/roles/api_os_setting/templates/hosts.j2 create mode 100644 ansible/01_old/roles/api_os_setting/templates/myregistry.conf.j2 create mode 100644 ansible/01_old/roles/api_os_setting/templates/yaml2toml_macro.j2 create mode 100644 ansible/01_old/roles/api_os_setting/tests/inventory create mode 100644 ansible/01_old/roles/api_os_setting/tests/test.yml create mode 100644 ansible/01_old/roles/api_os_setting/vars/main.yml create mode 100755 ansible/01_old/roles/bastion/defaults/main.yml create mode 100755 ansible/01_old/roles/bastion/files/login_banner create mode 100755 ansible/01_old/roles/bastion/handlers/main.yml create mode 100755 ansible/01_old/roles/bastion/tasks/admin_set.yml create mode 100755 ansible/01_old/roles/bastion/tasks/banner.yml create mode 100755 ansible/01_old/roles/bastion/tasks/crictl.yml create mode 100755 ansible/01_old/roles/bastion/tasks/login_defs.yml create mode 100755 ansible/01_old/roles/bastion/tasks/main.yml create mode 100755 ansible/01_old/roles/bastion/tasks/pam.yml create mode 100755 ansible/01_old/roles/bastion/tasks/profile.yml create mode 100755 ansible/01_old/roles/bastion/tasks/sshd_config.yml create mode 100755 ansible/01_old/roles/bastion/tasks/sudoers.yml create mode 100755 ansible/01_old/roles/bastion/templates/allow_users.j2 create mode 100755 ansible/01_old/roles/bastion/templates/common-auth.j2 create mode 100755 ansible/01_old/roles/bastion/templates/pwquality.conf.j2 create mode 100755 ansible/01_old/roles/bastion/templates/sudoers_users.j2 create mode 100644 ansible/01_old/roles/cmoa_demo_install/defaults/main.yml create mode 100755 ansible/01_old/roles/cmoa_demo_install/files/00-default/sa_patch.sh create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/00-default/secret_dockerhub.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/00-default/secret_nexus.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/01-storage/00-storageclass.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/01-storage/01-persistentvolume.yaml create mode 100755 ansible/01_old/roles/cmoa_demo_install/files/01-storage/cmoa_minio create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/.helmignore create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/Chart.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/README.md create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/NOTES.txt create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/_helper_create_bucket.txt create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/_helper_create_policy.txt create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/_helper_create_user.txt create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/_helper_custom_command.txt create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/_helper_policy.tpl create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/_helpers.tpl create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/configmap.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/console-ingress.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/console-service.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/deployment.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/gateway-deployment.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/ingress.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/networkpolicy.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/poddisruptionbudget.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/post-install-create-bucket-job.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/post-install-create-policy-job.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/post-install-create-user-job.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/post-install-custom-command.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/pvc.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/secrets.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/securitycontextconstraints.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/service.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/serviceaccount.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/servicemonitor.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/statefulset.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/values.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/00-kafka-broker-config.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/01-coredns.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/.helmignore create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/Chart.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/analysis/.helmignore create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/analysis/Chart.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/analysis/templates/imxc-metric-analyzer-master.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/analysis/templates/imxc-metric-analyzer-worker.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/analysis/values.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/.helmignore create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/Chart.lock create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/Chart.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/README.md create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/NOTES.txt create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/_helpers.tpl create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/alertmanager/alertmanager-dep.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/alertmanager/alertmanager-svc.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/clusterrole.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/clusterrolebinding.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/compactor/_helpers-compactor.tpl create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/compactor/compactor-poddisruptionbudget.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/compactor/compactor-servicemonitor.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/compactor/compactor-statefulset.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/compactor/compactor-svc.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/configmap.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/configs/_helpers-configs.tpl create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/configs/configs-dep.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/configs/configs-poddisruptionbudget.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/configs/configs-servicemonitor.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/configs/configs-svc.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/cortex-pv.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/distributor/_helpers-distributor.tpl create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/distributor/distributor-dep.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/distributor/distributor-hpa.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/distributor/distributor-poddisruptionbudget.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/distributor/distributor-servicemonitor.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/distributor/distributor-svc-headless.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/distributor/distributor-svc.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ingester/_helpers-ingester.tpl create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ingester/ingester-dep.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ingester/ingester-hpa.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ingester/ingester-poddisruptionbudget.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ingester/ingester-servicemonitor.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ingester/ingester-statefulset.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ingester/ingester-svc-headless.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ingester/ingester-svc.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/nginx/_helpers-nginx.tpl create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/nginx/nginx-config.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/nginx/nginx-dep.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/nginx/nginx-hpa.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/nginx/nginx-ingress.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/nginx/nginx-poddisruptionbudget.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/nginx/nginx-svc.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/node-exporter.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/querier/_helpers-querier.tpl create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/querier/querier-dep.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/querier/querier-hpa.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/querier/querier-poddisruptionbudget.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/querier/querier-servicemonitor.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/querier/querier-svc.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/query-frontend/_helpers-query-frontend.tpl create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/query-frontend/query-frontend-dep.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/query-frontend/query-frontend-servicemonitor.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/query-frontend/query-frontend-svc-headless.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/query-frontend/query-frontend-svc.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/query-frontend/query-poddisruptionbudget.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ruler/_helpers-ruler.tpl create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ruler/ruler-configmap.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ruler/ruler-dep.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ruler/ruler-poddisruptionbudget.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ruler/ruler-servicemonitor.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ruler/ruler-svc.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/runtime-configmap.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/secret-postgresql.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/secret.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/serviceaccount.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/store-gateway/_helpers-store-gateway.tpl create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-poddisruptionbudget.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-servicemonitor.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-statefulset.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-svc-headless.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-svc.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/svc-memberlist-headless.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/table-manager/_helpers-table-manager.tpl create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/table-manager/table-manager-dep.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/table-manager/table-manager-poddisruptionbudget.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/table-manager/table-manager-servicemonitor.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/table-manager/table-manager-svc.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/values.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/elasticsearch/.helmignore create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/elasticsearch/Chart.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/elasticsearch/templates/1.headless_service.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/elasticsearch/templates/2.service.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/elasticsearch/templates/3.configmap.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/elasticsearch/templates/4.pv.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/elasticsearch/templates/5.pvc.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/elasticsearch/templates/6.statefulset.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/elasticsearch/templates/7.secrets.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/elasticsearch/templates/needtocheck_storageclass.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/elasticsearch/values.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/kafka-manager/.helmignore create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/kafka-manager/Chart.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/kafka-manager/templates/0.kafka-manager-service.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/kafka-manager/templates/1.kafka-manager.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/kafka-manager/values.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/kafka/.helmignore create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/kafka/1.broker-config.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/kafka/Chart.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/kafka/templates/2.dns.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/kafka/templates/3.bootstrap-service.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/kafka/templates/4.persistent-volume.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/kafka/templates/5.kafka.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/kafka/templates/6.outside.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/kafka/values.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/postgres/.helmignore create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/postgres/Chart.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/postgres/templates/1.postgres-configmap.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/postgres/templates/2.postgres-storage.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/postgres/templates/3.postgres-service.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/postgres/templates/4.postgres-deployment.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/postgres/values.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/.helmignore create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/Chart.lock create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/Chart.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/README.md create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/.helmignore create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/Chart.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/README.md create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_affinities.tpl create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_capabilities.tpl create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_errors.tpl create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_images.tpl create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_ingress.tpl create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_labels.tpl create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_names.tpl create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_secrets.tpl create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_storage.tpl create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_tplvalues.tpl create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_utils.tpl create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_warnings.tpl create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_cassandra.tpl create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_mariadb.tpl create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_mongodb.tpl create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_postgresql.tpl create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_redis.tpl create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_validations.tpl create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/values.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/ci/default-values.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/ci/tolerations-values.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/NOTES.txt create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/_helpers.tpl create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/configuration.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/extra-list.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/ingress.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/networkpolicy.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/pdb.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/prometheusrule.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/pv.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/pvc.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/role.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/rolebinding.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/secrets.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/serviceaccount.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/servicemonitor.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/statefulset.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/svc-headless.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/svc.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/tls-secrets.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/values.schema.json create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/values.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/.helmignore create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/Chart.lock create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/Chart.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/README.md create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/.helmignore create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/Chart.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/README.md create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/_affinities.tpl create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/_capabilities.tpl create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/_images.tpl create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/_ingress.tpl create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/_labels.tpl create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/_names.tpl create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/_secrets.tpl create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/_storage.tpl create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/_tplvalues.tpl create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/_utils.tpl create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/_warnings.tpl create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/validations/_cassandra.tpl create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/validations/_mariadb.tpl create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/validations/_mongodb.tpl create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/validations/_postgresql.tpl create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/validations/_redis.tpl create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/validations/_validations.tpl create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/values.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/ci/default-values.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/ci/extra-flags-values.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/ci/production-sentinel-values.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/NOTES.txt create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/_helpers.tpl create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/configmap-scripts.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/configmap.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/headless-svc.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/health-configmap.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/metrics-prometheus.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/metrics-svc.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/networkpolicy.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/pdb.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/prometheusrule.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/psp.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/redis-master-statefulset.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/redis-master-svc.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/redis-node-statefulset.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/redis-pv.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/redis-role.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/redis-rolebinding.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/redis-serviceaccount.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/redis-slave-statefulset.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/redis-slave-svc.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/redis-with-sentinel-svc.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/secret.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/values.schema.json create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/values.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/zookeeper/.helmignore create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/zookeeper/Chart.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/zookeeper/templates/0.config.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/zookeeper/templates/1.service-leader-election.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/zookeeper/templates/2.service-client.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/zookeeper/templates/3.persistent-volume.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/zookeeper/templates/4.statefulset.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/zookeeper/templates/5.pvc.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/zookeeper/values.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/index.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/templates/role.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/02-base/base/values.yaml create mode 100755 ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/es-ddl-put.sh create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/1_kubernete_event_info_create_dest_source_index.sh create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/2_kubernete_event_info_reindex_to_dest_from_source.sh create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/3_kubernete_event_info_reindex_to_source_from_dest.sh create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/4_kubernete_event_info_delete_dest_index.sh create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/5_license_history_create_dest_source_index.sh create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/6_license_history_reindex_to_dest_from_source.sh create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/7_license_history_reindex_to_source_from_dest.sh create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/8_license_history_delete_dest_index.sh create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/manual.txt create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/1_kubernete_event_info_create_dest_source_index.sh create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/2_kubernete_event_info_reindex_to_dest_from_source.sh create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/3_kubernete_event_info_reindex_to_source_from_dest.sh create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/4_kubernete_event_info_delete_dest_index.sh create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/5_license_history_create_dest_source_index.sh create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/6_license_history_reindex_to_dest_from_source.sh create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/7_license_history_reindex_to_source_from_dest.sh create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/8_license_history_delete_dest_index.sh create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/manual.txt create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/jaeger_menumeta.psql create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/jspd_menumeta.psql create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/1_kubernete_event_info_create_dest_source_index.sh create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/2_kubernete_event_info_reindex_to_dest_from_source.sh create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/3_kubernete_event_info_reindex_to_source_from_dest.sh create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/4_kubernete_event_info_delete_dest_index.sh create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/5_license_history_create_dest_source_index.sh create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/6_license_history_reindex_to_dest_from_source.sh create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/7_license_history_reindex_to_source_from_dest.sh create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/8_license_history_delete_dest_index.sh create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/manual.txt create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/memu_meta/jaeger_menumeta.psql create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/memu_meta/jspd_menumeta.psql create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.2.0.psql create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.3.0.psql create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.3.2.psql create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.1.psql create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.2.psql create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.3.psql create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.6.psql create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.7.psql create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.8.psql create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/postgres_patch_R30020210503.psql create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/postgres_patch_R30020210730.psql create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/postgres_insert_ddl.psql create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/postgres_insert_dml.psql create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/Chart.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/OWNERS create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/README.md create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/.helmignore create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/Chart.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/README.md create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/charts/common/.helmignore create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/charts/common/Chart.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/charts/common/README.md create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/charts/common/templates/_capabilities.tpl create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/charts/common/templates/_images.tpl create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/charts/common/templates/_labels.tpl create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/charts/common/templates/_names.tpl create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/charts/common/templates/_secrets.tpl create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/charts/common/templates/_storage.tpl create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/charts/common/templates/_tplvalues.tpl create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/charts/common/templates/_warnings.tpl create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/charts/common/values.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/ci/commonAnnotations.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/ci/default-values.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/ci/shmvolume-disabled-values.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/files/README.md create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/files/conf.d/README.md create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/files/docker-entrypoint-initdb.d/README.md create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/requirements.lock create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/requirements.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/NOTES.txt create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/_helpers.tpl create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/configmap.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/extended-config-configmap.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/initialization-configmap.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/metrics-configmap.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/metrics-svc.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/networkpolicy.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/podsecuritypolicy.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/prometheusrule.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/pv.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/role.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/rolebinding.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/secrets.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/serviceaccount.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/servicemonitor.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/statefulset-slaves.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/statefulset.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/svc-headless.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/svc-read.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/svc.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/values-production.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/values.schema.json create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/values.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/ci/h2-values.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/ci/postgres-ha-values.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/requirements.lock create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/requirements.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/scripts/keycloak.cli create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/templates/NOTES.txt create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/templates/_helpers.tpl create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/templates/configmap-startup.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/templates/hpa.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/templates/ingress.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/templates/networkpolicy.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/templates/poddisruptionbudget.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/templates/prometheusrule.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/templates/rbac.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/templates/route.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/templates/secrets.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/templates/service-headless.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/templates/service-http.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/templates/serviceaccount.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/templates/servicemonitor.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/templates/statefulset.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/templates/test/configmap-test.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/templates/test/pod-test.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/values.schema.json create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/values.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/05-imxc/Chart.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/05-imxc/cmoa-manual.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/05-imxc/scripts/init-api-server.sh create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/05-imxc/scripts/init-auth-server.sh create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/05-imxc/scripts/init-noti-server.sh create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/05-imxc/scripts/init-resource.sh create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/05-imxc/scripts/init.json create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/05-imxc/templates/auth-server.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/05-imxc/templates/cloudmoa-datagate.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/05-imxc/templates/cloudmoa-metric-agent.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/05-imxc/templates/cloudmoa-metric-collector.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/05-imxc/templates/cmoa-kube-info-batch.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/05-imxc/templates/cmoa-kube-info-connector.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/05-imxc/templates/cmoa-kube-info-flat.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/05-imxc/templates/cmoa-manual.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/05-imxc/templates/eureka-server.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/05-imxc/templates/imxc-api-server.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/05-imxc/templates/imxc-collector.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/05-imxc/templates/noti-server.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/05-imxc/templates/streams-depl.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/05-imxc/templates/topology-agent.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/05-imxc/templates/zuul-server.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/05-imxc/values.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jaeger/Chart.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jaeger/cmoa-manual.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jaeger/scripts/init-api-server.sh create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jaeger/scripts/init-auth-server.sh create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jaeger/scripts/init-noti-server.sh create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jaeger/scripts/init-resource.sh create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jaeger/scripts/init.json create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jaeger/templates/imxc-ui-config-jaeger.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jaeger/templates/imxc-ui-server-jaeger.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jaeger/values.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jspd/Chart.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jspd/scripts/init-api-server.sh create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jspd/scripts/init-auth-server.sh create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jspd/scripts/init-noti-server.sh create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jspd/scripts/init-resource.sh create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jspd/scripts/init.json create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jspd/templates/imxc-ui-config.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jspd/templates/imxc-ui-server.yaml create mode 100644 ansible/01_old/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jspd/values.yaml create mode 100755 ansible/01_old/roles/cmoa_demo_install/files/ip_change create mode 100755 ansible/01_old/roles/cmoa_demo_install/files/k8s_status create mode 100755 ansible/01_old/roles/cmoa_demo_install/files/postgres_check_data create mode 100755 ansible/01_old/roles/cmoa_demo_install/files/rel_change create mode 100644 ansible/01_old/roles/cmoa_demo_install/tasks/00-default-settings-master.yml create mode 100644 ansible/01_old/roles/cmoa_demo_install/tasks/00-default-settings-node.yml create mode 100644 ansible/01_old/roles/cmoa_demo_install/tasks/01-storage-install.yml create mode 100644 ansible/01_old/roles/cmoa_demo_install/tasks/02-base-install.yml create mode 100644 ansible/01_old/roles/cmoa_demo_install/tasks/03-ddl-dml.yml create mode 100644 ansible/01_old/roles/cmoa_demo_install/tasks/04-keycloak-install.yml create mode 100644 ansible/01_old/roles/cmoa_demo_install/tasks/05-imxc-install.yml create mode 100644 ansible/01_old/roles/cmoa_demo_install/tasks/06-imxc-ui-install.yml create mode 100644 ansible/01_old/roles/cmoa_demo_install/tasks/07-keycloak-setting.yml create mode 100644 ansible/01_old/roles/cmoa_demo_install/tasks/08-finish.yml create mode 100644 ansible/01_old/roles/cmoa_demo_install/tasks/helm-install.yml create mode 100644 ansible/01_old/roles/cmoa_demo_install/tasks/main.yml create mode 100644 ansible/01_old/roles/cmoa_demo_install/templates/realm.json.j2 create mode 100644 ansible/01_old/roles/cmoa_demo_install/vars/main.yml create mode 100644 ansible/01_old/roles/cmoa_install/defaults/main.yml create mode 100755 ansible/01_old/roles/cmoa_install/files/00-default/sa_patch.sh create mode 100644 ansible/01_old/roles/cmoa_install/files/00-default/secret_dockerhub.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/00-default/secret_nexus.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/01-storage/00-storageclass.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/01-storage/01-persistentvolume.yaml create mode 100755 ansible/01_old/roles/cmoa_install/files/01-storage/cmoa_minio create mode 100644 ansible/01_old/roles/cmoa_install/files/01-storage/minio/.helmignore create mode 100644 ansible/01_old/roles/cmoa_install/files/01-storage/minio/Chart.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/01-storage/minio/README.md create mode 100644 ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/NOTES.txt create mode 100644 ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/_helper_create_bucket.txt create mode 100644 ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/_helper_create_policy.txt create mode 100644 ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/_helper_create_user.txt create mode 100644 ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/_helper_custom_command.txt create mode 100644 ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/_helper_policy.tpl create mode 100644 ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/_helpers.tpl create mode 100644 ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/configmap.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/console-ingress.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/console-service.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/deployment.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/gateway-deployment.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/ingress.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/networkpolicy.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/poddisruptionbudget.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/post-install-create-bucket-job.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/post-install-create-policy-job.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/post-install-create-user-job.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/post-install-custom-command.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/pvc.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/secrets.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/securitycontextconstraints.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/service.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/serviceaccount.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/servicemonitor.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/statefulset.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/01-storage/minio/values.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/00-kafka-broker-config.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/01-coredns.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/.helmignore create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/Chart.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/analysis/.helmignore create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/analysis/Chart.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/analysis/templates/imxc-metric-analyzer-master.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/analysis/templates/imxc-metric-analyzer-worker.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/analysis/values.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/.helmignore create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/Chart.lock create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/Chart.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/README.md create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/NOTES.txt create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/_helpers.tpl create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/alertmanager/alertmanager-dep.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/alertmanager/alertmanager-svc.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/clusterrole.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/clusterrolebinding.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/compactor/_helpers-compactor.tpl create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/compactor/compactor-poddisruptionbudget.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/compactor/compactor-servicemonitor.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/compactor/compactor-statefulset.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/compactor/compactor-svc.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/configmap.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/configs/_helpers-configs.tpl create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/configs/configs-dep.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/configs/configs-poddisruptionbudget.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/configs/configs-servicemonitor.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/configs/configs-svc.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/cortex-pv.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/distributor/_helpers-distributor.tpl create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/distributor/distributor-dep.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/distributor/distributor-hpa.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/distributor/distributor-poddisruptionbudget.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/distributor/distributor-servicemonitor.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/distributor/distributor-svc-headless.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/distributor/distributor-svc.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/ingester/_helpers-ingester.tpl create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/ingester/ingester-dep.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/ingester/ingester-hpa.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/ingester/ingester-poddisruptionbudget.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/ingester/ingester-servicemonitor.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/ingester/ingester-statefulset.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/ingester/ingester-svc-headless.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/ingester/ingester-svc.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/nginx/_helpers-nginx.tpl create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/nginx/nginx-config.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/nginx/nginx-dep.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/nginx/nginx-hpa.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/nginx/nginx-ingress.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/nginx/nginx-poddisruptionbudget.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/nginx/nginx-svc.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/node-exporter.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/querier/_helpers-querier.tpl create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/querier/querier-dep.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/querier/querier-hpa.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/querier/querier-poddisruptionbudget.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/querier/querier-servicemonitor.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/querier/querier-svc.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/query-frontend/_helpers-query-frontend.tpl create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/query-frontend/query-frontend-dep.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/query-frontend/query-frontend-servicemonitor.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/query-frontend/query-frontend-svc-headless.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/query-frontend/query-frontend-svc.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/query-frontend/query-poddisruptionbudget.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/ruler/_helpers-ruler.tpl create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/ruler/ruler-configmap.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/ruler/ruler-dep.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/ruler/ruler-poddisruptionbudget.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/ruler/ruler-servicemonitor.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/ruler/ruler-svc.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/runtime-configmap.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/secret-postgresql.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/secret.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/serviceaccount.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/store-gateway/_helpers-store-gateway.tpl create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-poddisruptionbudget.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-servicemonitor.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-statefulset.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-svc-headless.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-svc.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/svc-memberlist-headless.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/table-manager/_helpers-table-manager.tpl create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/table-manager/table-manager-dep.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/table-manager/table-manager-poddisruptionbudget.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/table-manager/table-manager-servicemonitor.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/table-manager/table-manager-svc.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/values.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/elasticsearch/.helmignore create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/elasticsearch/Chart.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/elasticsearch/templates/1.headless_service.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/elasticsearch/templates/2.service.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/elasticsearch/templates/3.configmap.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/elasticsearch/templates/4.pv.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/elasticsearch/templates/5.pvc.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/elasticsearch/templates/6.statefulset.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/elasticsearch/templates/7.secrets.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/elasticsearch/templates/needtocheck_storageclass.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/elasticsearch/values.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/kafka-manager/.helmignore create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/kafka-manager/Chart.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/kafka-manager/templates/0.kafka-manager-service.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/kafka-manager/templates/1.kafka-manager.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/kafka-manager/values.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/kafka/.helmignore create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/kafka/1.broker-config.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/kafka/Chart.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/kafka/templates/2.dns.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/kafka/templates/3.bootstrap-service.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/kafka/templates/4.persistent-volume.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/kafka/templates/5.kafka.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/kafka/templates/6.outside.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/kafka/values.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/postgres/.helmignore create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/postgres/Chart.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/postgres/templates/1.postgres-configmap.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/postgres/templates/2.postgres-storage.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/postgres/templates/3.postgres-service.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/postgres/templates/4.postgres-deployment.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/postgres/values.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/.helmignore create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/Chart.lock create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/Chart.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/README.md create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/.helmignore create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/Chart.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/README.md create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_affinities.tpl create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_capabilities.tpl create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_errors.tpl create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_images.tpl create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_ingress.tpl create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_labels.tpl create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_names.tpl create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_secrets.tpl create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_storage.tpl create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_tplvalues.tpl create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_utils.tpl create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_warnings.tpl create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_cassandra.tpl create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_mariadb.tpl create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_mongodb.tpl create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_postgresql.tpl create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_redis.tpl create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_validations.tpl create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/values.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/ci/default-values.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/ci/tolerations-values.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/NOTES.txt create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/_helpers.tpl create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/configuration.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/extra-list.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/ingress.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/networkpolicy.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/pdb.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/prometheusrule.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/pv.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/pvc.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/role.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/rolebinding.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/secrets.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/serviceaccount.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/servicemonitor.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/statefulset.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/svc-headless.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/svc.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/tls-secrets.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/values.schema.json create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/values.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/.helmignore create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/Chart.lock create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/Chart.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/README.md create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/.helmignore create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/Chart.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/README.md create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/_affinities.tpl create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/_capabilities.tpl create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/_images.tpl create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/_ingress.tpl create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/_labels.tpl create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/_names.tpl create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/_secrets.tpl create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/_storage.tpl create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/_tplvalues.tpl create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/_utils.tpl create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/_warnings.tpl create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/validations/_cassandra.tpl create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/validations/_mariadb.tpl create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/validations/_mongodb.tpl create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/validations/_postgresql.tpl create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/validations/_redis.tpl create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/validations/_validations.tpl create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/values.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/ci/default-values.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/ci/extra-flags-values.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/ci/production-sentinel-values.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/templates/NOTES.txt create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/templates/_helpers.tpl create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/templates/configmap-scripts.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/templates/configmap.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/templates/headless-svc.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/templates/health-configmap.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/templates/metrics-prometheus.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/templates/metrics-svc.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/templates/networkpolicy.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/templates/pdb.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/templates/prometheusrule.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/templates/psp.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/templates/redis-master-statefulset.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/templates/redis-master-svc.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/templates/redis-node-statefulset.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/templates/redis-pv.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/templates/redis-role.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/templates/redis-rolebinding.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/templates/redis-serviceaccount.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/templates/redis-slave-statefulset.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/templates/redis-slave-svc.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/templates/redis-with-sentinel-svc.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/templates/secret.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/values.schema.json create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/values.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/zookeeper/.helmignore create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/zookeeper/Chart.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/zookeeper/templates/0.config.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/zookeeper/templates/1.service-leader-election.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/zookeeper/templates/2.service-client.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/zookeeper/templates/3.persistent-volume.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/zookeeper/templates/4.statefulset.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/zookeeper/templates/5.pvc.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/charts/zookeeper/values.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/index.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/templates/role.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/02-base/base/values.yaml create mode 100755 ansible/01_old/roles/cmoa_install/files/03-ddl-dml/elasticsearch/es-ddl-put.sh create mode 100644 ansible/01_old/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/1_kubernete_event_info_create_dest_source_index.sh create mode 100644 ansible/01_old/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/2_kubernete_event_info_reindex_to_dest_from_source.sh create mode 100644 ansible/01_old/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/3_kubernete_event_info_reindex_to_source_from_dest.sh create mode 100644 ansible/01_old/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/4_kubernete_event_info_delete_dest_index.sh create mode 100644 ansible/01_old/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/5_license_history_create_dest_source_index.sh create mode 100644 ansible/01_old/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/6_license_history_reindex_to_dest_from_source.sh create mode 100644 ansible/01_old/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/7_license_history_reindex_to_source_from_dest.sh create mode 100644 ansible/01_old/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/8_license_history_delete_dest_index.sh create mode 100644 ansible/01_old/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/manual.txt create mode 100644 ansible/01_old/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/1_kubernete_event_info_create_dest_source_index.sh create mode 100644 ansible/01_old/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/2_kubernete_event_info_reindex_to_dest_from_source.sh create mode 100644 ansible/01_old/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/3_kubernete_event_info_reindex_to_source_from_dest.sh create mode 100644 ansible/01_old/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/4_kubernete_event_info_delete_dest_index.sh create mode 100644 ansible/01_old/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/5_license_history_create_dest_source_index.sh create mode 100644 ansible/01_old/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/6_license_history_reindex_to_dest_from_source.sh create mode 100644 ansible/01_old/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/7_license_history_reindex_to_source_from_dest.sh create mode 100644 ansible/01_old/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/8_license_history_delete_dest_index.sh create mode 100644 ansible/01_old/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/manual.txt create mode 100644 ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/jaeger_menumeta.psql create mode 100644 ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/jspd_menumeta.psql create mode 100644 ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/1_kubernete_event_info_create_dest_source_index.sh create mode 100644 ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/2_kubernete_event_info_reindex_to_dest_from_source.sh create mode 100644 ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/3_kubernete_event_info_reindex_to_source_from_dest.sh create mode 100644 ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/4_kubernete_event_info_delete_dest_index.sh create mode 100644 ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/5_license_history_create_dest_source_index.sh create mode 100644 ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/6_license_history_reindex_to_dest_from_source.sh create mode 100644 ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/7_license_history_reindex_to_source_from_dest.sh create mode 100644 ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/8_license_history_delete_dest_index.sh create mode 100644 ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/manual.txt create mode 100644 ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/patch/memu_meta/jaeger_menumeta.psql create mode 100644 ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/patch/memu_meta/jspd_menumeta.psql create mode 100644 ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.2.0.psql create mode 100644 ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.3.0.psql create mode 100644 ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.3.2.psql create mode 100644 ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.1.psql create mode 100644 ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.2.psql create mode 100644 ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.3.psql create mode 100644 ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.6.psql create mode 100644 ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.7.psql create mode 100644 ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.8.psql create mode 100644 ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/patch/postgres_patch_R30020210503.psql create mode 100644 ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/patch/postgres_patch_R30020210730.psql create mode 100644 ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/postgres_insert_ddl.psql create mode 100644 ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/postgres_insert_dml.psql create mode 100644 ansible/01_old/roles/cmoa_install/files/04-keycloak/Chart.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/04-keycloak/OWNERS create mode 100644 ansible/01_old/roles/cmoa_install/files/04-keycloak/README.md create mode 100644 ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/.helmignore create mode 100644 ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/Chart.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/README.md create mode 100644 ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/charts/common/.helmignore create mode 100644 ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/charts/common/Chart.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/charts/common/README.md create mode 100644 ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/charts/common/templates/_capabilities.tpl create mode 100644 ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/charts/common/templates/_images.tpl create mode 100644 ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/charts/common/templates/_labels.tpl create mode 100644 ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/charts/common/templates/_names.tpl create mode 100644 ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/charts/common/templates/_secrets.tpl create mode 100644 ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/charts/common/templates/_storage.tpl create mode 100644 ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/charts/common/templates/_tplvalues.tpl create mode 100644 ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/charts/common/templates/_warnings.tpl create mode 100644 ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/charts/common/values.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/ci/commonAnnotations.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/ci/default-values.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/ci/shmvolume-disabled-values.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/files/README.md create mode 100644 ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/files/conf.d/README.md create mode 100644 ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/files/docker-entrypoint-initdb.d/README.md create mode 100644 ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/requirements.lock create mode 100644 ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/requirements.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/NOTES.txt create mode 100644 ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/_helpers.tpl create mode 100644 ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/configmap.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/extended-config-configmap.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/initialization-configmap.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/metrics-configmap.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/metrics-svc.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/networkpolicy.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/podsecuritypolicy.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/prometheusrule.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/pv.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/role.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/rolebinding.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/secrets.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/serviceaccount.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/servicemonitor.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/statefulset-slaves.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/statefulset.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/svc-headless.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/svc-read.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/svc.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/values-production.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/values.schema.json create mode 100644 ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/values.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/04-keycloak/ci/h2-values.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/04-keycloak/ci/postgres-ha-values.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/04-keycloak/requirements.lock create mode 100644 ansible/01_old/roles/cmoa_install/files/04-keycloak/requirements.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/04-keycloak/scripts/keycloak.cli create mode 100644 ansible/01_old/roles/cmoa_install/files/04-keycloak/templates/NOTES.txt create mode 100644 ansible/01_old/roles/cmoa_install/files/04-keycloak/templates/_helpers.tpl create mode 100644 ansible/01_old/roles/cmoa_install/files/04-keycloak/templates/configmap-startup.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/04-keycloak/templates/hpa.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/04-keycloak/templates/ingress.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/04-keycloak/templates/networkpolicy.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/04-keycloak/templates/poddisruptionbudget.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/04-keycloak/templates/prometheusrule.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/04-keycloak/templates/rbac.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/04-keycloak/templates/route.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/04-keycloak/templates/secrets.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/04-keycloak/templates/service-headless.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/04-keycloak/templates/service-http.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/04-keycloak/templates/serviceaccount.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/04-keycloak/templates/servicemonitor.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/04-keycloak/templates/statefulset.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/04-keycloak/templates/test/configmap-test.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/04-keycloak/templates/test/pod-test.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/04-keycloak/values.schema.json create mode 100644 ansible/01_old/roles/cmoa_install/files/04-keycloak/values.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/05-imxc/Chart.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/05-imxc/cmoa-manual.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/05-imxc/scripts/init-api-server.sh create mode 100644 ansible/01_old/roles/cmoa_install/files/05-imxc/scripts/init-auth-server.sh create mode 100644 ansible/01_old/roles/cmoa_install/files/05-imxc/scripts/init-noti-server.sh create mode 100644 ansible/01_old/roles/cmoa_install/files/05-imxc/scripts/init-resource.sh create mode 100644 ansible/01_old/roles/cmoa_install/files/05-imxc/scripts/init.json create mode 100644 ansible/01_old/roles/cmoa_install/files/05-imxc/templates/auth-server.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/05-imxc/templates/cloudmoa-datagate.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/05-imxc/templates/cloudmoa-metric-agent.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/05-imxc/templates/cloudmoa-metric-collector.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/05-imxc/templates/cmoa-kube-info-batch.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/05-imxc/templates/cmoa-kube-info-connector.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/05-imxc/templates/cmoa-kube-info-flat.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/05-imxc/templates/cmoa-manual.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/05-imxc/templates/eureka-server.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/05-imxc/templates/imxc-api-server.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/05-imxc/templates/imxc-collector.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/05-imxc/templates/noti-server.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/05-imxc/templates/streams-depl.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/05-imxc/templates/topology-agent.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/05-imxc/templates/zuul-server.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/05-imxc/values.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jaeger/Chart.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jaeger/cmoa-manual.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jaeger/scripts/init-api-server.sh create mode 100644 ansible/01_old/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jaeger/scripts/init-auth-server.sh create mode 100644 ansible/01_old/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jaeger/scripts/init-noti-server.sh create mode 100644 ansible/01_old/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jaeger/scripts/init-resource.sh create mode 100644 ansible/01_old/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jaeger/scripts/init.json create mode 100644 ansible/01_old/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jaeger/templates/imxc-ui-config-jaeger.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jaeger/templates/imxc-ui-server-jaeger.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jaeger/values.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jspd/Chart.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jspd/scripts/init-api-server.sh create mode 100644 ansible/01_old/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jspd/scripts/init-auth-server.sh create mode 100644 ansible/01_old/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jspd/scripts/init-noti-server.sh create mode 100644 ansible/01_old/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jspd/scripts/init-resource.sh create mode 100644 ansible/01_old/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jspd/scripts/init.json create mode 100644 ansible/01_old/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jspd/templates/imxc-ui-config.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jspd/templates/imxc-ui-server.yaml create mode 100644 ansible/01_old/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jspd/values.yaml create mode 100755 ansible/01_old/roles/cmoa_install/files/ip_change create mode 100755 ansible/01_old/roles/cmoa_install/files/k8s_status create mode 100755 ansible/01_old/roles/cmoa_install/files/postgres_check_data create mode 100755 ansible/01_old/roles/cmoa_install/files/rel_change create mode 100644 ansible/01_old/roles/cmoa_install/tasks/00-default-settings-master.yml create mode 100644 ansible/01_old/roles/cmoa_install/tasks/00-default-settings-node.yml create mode 100644 ansible/01_old/roles/cmoa_install/tasks/01-storage-install.yml create mode 100644 ansible/01_old/roles/cmoa_install/tasks/02-base-install.yml create mode 100644 ansible/01_old/roles/cmoa_install/tasks/03-ddl-dml.yml create mode 100644 ansible/01_old/roles/cmoa_install/tasks/04-keycloak-install.yml create mode 100644 ansible/01_old/roles/cmoa_install/tasks/05-imxc-install.yml create mode 100644 ansible/01_old/roles/cmoa_install/tasks/06-imxc-ui-install.yml create mode 100644 ansible/01_old/roles/cmoa_install/tasks/07-keycloak-setting.yml create mode 100644 ansible/01_old/roles/cmoa_install/tasks/08-finish.yml create mode 100644 ansible/01_old/roles/cmoa_install/tasks/helm-install.yml create mode 100644 ansible/01_old/roles/cmoa_install/tasks/main.yml create mode 100644 ansible/01_old/roles/cmoa_install/templates/realm.json.j2 create mode 100644 ansible/01_old/roles/cmoa_install/vars/main.yml create mode 100644 ansible/01_old/roles/cmoa_install_bak/defaults/main.yml create mode 100755 ansible/01_old/roles/cmoa_install_bak/files/00-default/sa_patch.sh create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/00-default/secret_dockerhub.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/00-default/secret_nexus.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/01-storage/00-storageclass.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/01-storage/01-persistentvolume.yaml create mode 100755 ansible/01_old/roles/cmoa_install_bak/files/01-storage/cmoa_minio create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/.helmignore create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/Chart.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/README.md create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/NOTES.txt create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/_helper_create_bucket.txt create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/_helper_create_policy.txt create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/_helper_create_user.txt create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/_helper_custom_command.txt create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/_helper_policy.tpl create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/_helpers.tpl create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/configmap.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/console-ingress.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/console-service.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/deployment.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/gateway-deployment.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/ingress.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/networkpolicy.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/poddisruptionbudget.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/post-install-create-bucket-job.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/post-install-create-policy-job.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/post-install-create-user-job.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/post-install-custom-command.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/pvc.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/secrets.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/securitycontextconstraints.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/service.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/serviceaccount.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/servicemonitor.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/statefulset.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/values.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/00-kafka-broker-config.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/01-coredns.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/.helmignore create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/Chart.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/analysis/.helmignore create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/analysis/Chart.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/analysis/templates/imxc-metric-analyzer-master.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/analysis/templates/imxc-metric-analyzer-worker.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/analysis/values.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/.helmignore create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/Chart.lock create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/Chart.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/README.md create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/NOTES.txt create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/_helpers.tpl create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/alertmanager/alertmanager-dep.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/alertmanager/alertmanager-svc.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/clusterrole.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/clusterrolebinding.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/compactor/_helpers-compactor.tpl create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/compactor/compactor-poddisruptionbudget.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/compactor/compactor-servicemonitor.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/compactor/compactor-statefulset.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/compactor/compactor-svc.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/configmap.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/configs/_helpers-configs.tpl create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/configs/configs-dep.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/configs/configs-poddisruptionbudget.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/configs/configs-servicemonitor.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/configs/configs-svc.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/cortex-pv.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/distributor/_helpers-distributor.tpl create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/distributor/distributor-dep.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/distributor/distributor-hpa.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/distributor/distributor-poddisruptionbudget.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/distributor/distributor-servicemonitor.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/distributor/distributor-svc-headless.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/distributor/distributor-svc.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/ingester/_helpers-ingester.tpl create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/ingester/ingester-dep.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/ingester/ingester-hpa.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/ingester/ingester-poddisruptionbudget.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/ingester/ingester-servicemonitor.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/ingester/ingester-statefulset.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/ingester/ingester-svc-headless.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/ingester/ingester-svc.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/nginx/_helpers-nginx.tpl create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/nginx/nginx-config.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/nginx/nginx-dep.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/nginx/nginx-hpa.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/nginx/nginx-ingress.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/nginx/nginx-poddisruptionbudget.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/nginx/nginx-svc.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/node-exporter.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/querier/_helpers-querier.tpl create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/querier/querier-dep.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/querier/querier-hpa.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/querier/querier-poddisruptionbudget.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/querier/querier-servicemonitor.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/querier/querier-svc.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/query-frontend/_helpers-query-frontend.tpl create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/query-frontend/query-frontend-dep.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/query-frontend/query-frontend-servicemonitor.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/query-frontend/query-frontend-svc-headless.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/query-frontend/query-frontend-svc.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/query-frontend/query-poddisruptionbudget.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/ruler/_helpers-ruler.tpl create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/ruler/ruler-configmap.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/ruler/ruler-dep.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/ruler/ruler-poddisruptionbudget.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/ruler/ruler-servicemonitor.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/ruler/ruler-svc.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/runtime-configmap.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/secret-postgresql.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/secret.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/serviceaccount.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/store-gateway/_helpers-store-gateway.tpl create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-poddisruptionbudget.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-servicemonitor.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-statefulset.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-svc-headless.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-svc.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/svc-memberlist-headless.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/table-manager/_helpers-table-manager.tpl create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/table-manager/table-manager-dep.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/table-manager/table-manager-poddisruptionbudget.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/table-manager/table-manager-servicemonitor.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/table-manager/table-manager-svc.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/values.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/elasticsearch/.helmignore create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/elasticsearch/Chart.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/elasticsearch/templates/1.headless_service.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/elasticsearch/templates/2.service.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/elasticsearch/templates/3.configmap.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/elasticsearch/templates/4.pv.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/elasticsearch/templates/5.pvc.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/elasticsearch/templates/6.statefulset.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/elasticsearch/templates/7.secrets.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/elasticsearch/templates/needtocheck_storageclass.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/elasticsearch/values.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/kafka-manager/.helmignore create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/kafka-manager/Chart.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/kafka-manager/templates/0.kafka-manager-service.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/kafka-manager/templates/1.kafka-manager.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/kafka-manager/values.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/kafka/.helmignore create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/kafka/1.broker-config.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/kafka/Chart.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/kafka/templates/2.dns.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/kafka/templates/3.bootstrap-service.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/kafka/templates/4.persistent-volume.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/kafka/templates/5.kafka.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/kafka/templates/6.outside.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/kafka/values.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/postgres/.helmignore create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/postgres/Chart.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/postgres/templates/1.postgres-configmap.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/postgres/templates/2.postgres-storage.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/postgres/templates/3.postgres-service.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/postgres/templates/4.postgres-deployment.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/postgres/values.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/.helmignore create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/Chart.lock create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/Chart.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/README.md create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/charts/common/.helmignore create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/charts/common/Chart.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/charts/common/README.md create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/charts/common/templates/_affinities.tpl create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/charts/common/templates/_capabilities.tpl create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/charts/common/templates/_errors.tpl create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/charts/common/templates/_images.tpl create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/charts/common/templates/_ingress.tpl create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/charts/common/templates/_labels.tpl create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/charts/common/templates/_names.tpl create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/charts/common/templates/_secrets.tpl create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/charts/common/templates/_storage.tpl create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/charts/common/templates/_tplvalues.tpl create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/charts/common/templates/_utils.tpl create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/charts/common/templates/_warnings.tpl create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_cassandra.tpl create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_mariadb.tpl create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_mongodb.tpl create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_postgresql.tpl create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_redis.tpl create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_validations.tpl create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/charts/common/values.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/ci/default-values.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/ci/tolerations-values.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/templates/NOTES.txt create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/templates/_helpers.tpl create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/templates/configuration.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/templates/extra-list.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/templates/ingress.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/templates/networkpolicy.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/templates/pdb.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/templates/prometheusrule.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/templates/pv.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/templates/pvc.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/templates/role.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/templates/rolebinding.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/templates/secrets.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/templates/serviceaccount.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/templates/servicemonitor.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/templates/statefulset.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/templates/svc-headless.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/templates/svc.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/templates/tls-secrets.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/values.schema.json create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/values.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/.helmignore create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/Chart.lock create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/Chart.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/README.md create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/charts/common/.helmignore create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/charts/common/Chart.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/charts/common/README.md create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/charts/common/templates/_affinities.tpl create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/charts/common/templates/_capabilities.tpl create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/charts/common/templates/_images.tpl create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/charts/common/templates/_ingress.tpl create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/charts/common/templates/_labels.tpl create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/charts/common/templates/_names.tpl create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/charts/common/templates/_secrets.tpl create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/charts/common/templates/_storage.tpl create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/charts/common/templates/_tplvalues.tpl create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/charts/common/templates/_utils.tpl create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/charts/common/templates/_warnings.tpl create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/charts/common/templates/validations/_cassandra.tpl create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/charts/common/templates/validations/_mariadb.tpl create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/charts/common/templates/validations/_mongodb.tpl create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/charts/common/templates/validations/_postgresql.tpl create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/charts/common/templates/validations/_redis.tpl create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/charts/common/templates/validations/_validations.tpl create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/charts/common/values.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/ci/default-values.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/ci/extra-flags-values.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/ci/production-sentinel-values.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/templates/NOTES.txt create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/templates/_helpers.tpl create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/templates/configmap-scripts.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/templates/configmap.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/templates/headless-svc.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/templates/health-configmap.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/templates/metrics-prometheus.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/templates/metrics-svc.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/templates/networkpolicy.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/templates/pdb.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/templates/prometheusrule.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/templates/psp.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/templates/redis-master-statefulset.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/templates/redis-master-svc.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/templates/redis-node-statefulset.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/templates/redis-pv.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/templates/redis-role.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/templates/redis-rolebinding.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/templates/redis-serviceaccount.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/templates/redis-slave-statefulset.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/templates/redis-slave-svc.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/templates/redis-with-sentinel-svc.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/templates/secret.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/values.schema.json create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/values.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/zookeeper/.helmignore create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/zookeeper/Chart.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/zookeeper/templates/0.config.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/zookeeper/templates/1.service-leader-election.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/zookeeper/templates/2.service-client.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/zookeeper/templates/3.persistent-volume.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/zookeeper/templates/4.statefulset.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/zookeeper/templates/5.pvc.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/zookeeper/values.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/index.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/templates/role.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/02-base/base/values.yaml create mode 100755 ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/elasticsearch/es-ddl-put.sh create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/1_kubernete_event_info_create_dest_source_index.sh create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/2_kubernete_event_info_reindex_to_dest_from_source.sh create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/3_kubernete_event_info_reindex_to_source_from_dest.sh create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/4_kubernete_event_info_delete_dest_index.sh create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/5_license_history_create_dest_source_index.sh create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/6_license_history_reindex_to_dest_from_source.sh create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/7_license_history_reindex_to_source_from_dest.sh create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/8_license_history_delete_dest_index.sh create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/manual.txt create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/1_kubernete_event_info_create_dest_source_index.sh create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/2_kubernete_event_info_reindex_to_dest_from_source.sh create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/3_kubernete_event_info_reindex_to_source_from_dest.sh create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/4_kubernete_event_info_delete_dest_index.sh create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/5_license_history_create_dest_source_index.sh create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/6_license_history_reindex_to_dest_from_source.sh create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/7_license_history_reindex_to_source_from_dest.sh create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/8_license_history_delete_dest_index.sh create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/manual.txt create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/jaeger_menumeta.psql create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/jspd_menumeta.psql create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/1_kubernete_event_info_create_dest_source_index.sh create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/2_kubernete_event_info_reindex_to_dest_from_source.sh create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/3_kubernete_event_info_reindex_to_source_from_dest.sh create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/4_kubernete_event_info_delete_dest_index.sh create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/5_license_history_create_dest_source_index.sh create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/6_license_history_reindex_to_dest_from_source.sh create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/7_license_history_reindex_to_source_from_dest.sh create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/8_license_history_delete_dest_index.sh create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/manual.txt create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/patch/memu_meta/jaeger_menumeta.psql create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/patch/memu_meta/jspd_menumeta.psql create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/patch/postgres_patch_3.2.0.psql create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/patch/postgres_patch_3.3.0.psql create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/patch/postgres_patch_3.3.2.psql create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.1.psql create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.2.psql create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.3.psql create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.6.psql create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.7.psql create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.8.psql create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/patch/postgres_patch_R30020210503.psql create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/patch/postgres_patch_R30020210730.psql create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/postgres_insert_ddl.psql create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/postgres_insert_dml.psql create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/Chart.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/OWNERS create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/README.md create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/.helmignore create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/Chart.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/README.md create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/charts/common/.helmignore create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/charts/common/Chart.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/charts/common/README.md create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/charts/common/templates/_capabilities.tpl create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/charts/common/templates/_images.tpl create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/charts/common/templates/_labels.tpl create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/charts/common/templates/_names.tpl create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/charts/common/templates/_secrets.tpl create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/charts/common/templates/_storage.tpl create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/charts/common/templates/_tplvalues.tpl create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/charts/common/templates/_warnings.tpl create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/charts/common/values.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/ci/commonAnnotations.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/ci/default-values.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/ci/shmvolume-disabled-values.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/files/README.md create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/files/conf.d/README.md create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/files/docker-entrypoint-initdb.d/README.md create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/requirements.lock create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/requirements.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/templates/NOTES.txt create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/templates/_helpers.tpl create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/templates/configmap.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/templates/extended-config-configmap.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/templates/initialization-configmap.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/templates/metrics-configmap.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/templates/metrics-svc.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/templates/networkpolicy.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/templates/podsecuritypolicy.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/templates/prometheusrule.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/templates/pv.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/templates/role.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/templates/rolebinding.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/templates/secrets.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/templates/serviceaccount.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/templates/servicemonitor.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/templates/statefulset-slaves.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/templates/statefulset.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/templates/svc-headless.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/templates/svc-read.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/templates/svc.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/values-production.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/values.schema.json create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/values.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/ci/h2-values.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/ci/postgres-ha-values.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/requirements.lock create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/requirements.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/scripts/keycloak.cli create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/templates/NOTES.txt create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/templates/_helpers.tpl create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/templates/configmap-startup.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/templates/hpa.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/templates/ingress.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/templates/networkpolicy.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/templates/poddisruptionbudget.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/templates/prometheusrule.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/templates/rbac.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/templates/route.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/templates/secrets.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/templates/service-headless.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/templates/service-http.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/templates/serviceaccount.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/templates/servicemonitor.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/templates/statefulset.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/templates/test/configmap-test.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/templates/test/pod-test.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/values.schema.json create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/values.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/05-imxc/Chart.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/05-imxc/cmoa-manual.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/05-imxc/scripts/init-api-server.sh create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/05-imxc/scripts/init-auth-server.sh create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/05-imxc/scripts/init-noti-server.sh create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/05-imxc/scripts/init-resource.sh create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/05-imxc/scripts/init.json create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/05-imxc/templates/auth-server.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/05-imxc/templates/cloudmoa-datagate.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/05-imxc/templates/cloudmoa-metric-agent.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/05-imxc/templates/cloudmoa-metric-collector.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/05-imxc/templates/cmoa-kube-info-batch.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/05-imxc/templates/cmoa-kube-info-connector.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/05-imxc/templates/cmoa-kube-info-flat.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/05-imxc/templates/cmoa-manual.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/05-imxc/templates/eureka-server.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/05-imxc/templates/imxc-api-server.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/05-imxc/templates/imxc-collector.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/05-imxc/templates/noti-server.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/05-imxc/templates/streams-depl.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/05-imxc/templates/topology-agent.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/05-imxc/templates/zuul-server.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/05-imxc/values.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/06-imxc-ui/imxc-ui-jaeger/Chart.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/06-imxc-ui/imxc-ui-jaeger/cmoa-manual.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/06-imxc-ui/imxc-ui-jaeger/scripts/init-api-server.sh create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/06-imxc-ui/imxc-ui-jaeger/scripts/init-auth-server.sh create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/06-imxc-ui/imxc-ui-jaeger/scripts/init-noti-server.sh create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/06-imxc-ui/imxc-ui-jaeger/scripts/init-resource.sh create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/06-imxc-ui/imxc-ui-jaeger/scripts/init.json create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/06-imxc-ui/imxc-ui-jaeger/templates/imxc-ui-config-jaeger.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/06-imxc-ui/imxc-ui-jaeger/templates/imxc-ui-server-jaeger.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/06-imxc-ui/imxc-ui-jaeger/values.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/06-imxc-ui/imxc-ui-jspd/Chart.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/06-imxc-ui/imxc-ui-jspd/scripts/init-api-server.sh create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/06-imxc-ui/imxc-ui-jspd/scripts/init-auth-server.sh create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/06-imxc-ui/imxc-ui-jspd/scripts/init-noti-server.sh create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/06-imxc-ui/imxc-ui-jspd/scripts/init-resource.sh create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/06-imxc-ui/imxc-ui-jspd/scripts/init.json create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/06-imxc-ui/imxc-ui-jspd/templates/imxc-ui-config.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/06-imxc-ui/imxc-ui-jspd/templates/imxc-ui-server.yaml create mode 100644 ansible/01_old/roles/cmoa_install_bak/files/06-imxc-ui/imxc-ui-jspd/values.yaml create mode 100755 ansible/01_old/roles/cmoa_install_bak/files/ip_change create mode 100755 ansible/01_old/roles/cmoa_install_bak/files/k8s_status create mode 100755 ansible/01_old/roles/cmoa_install_bak/files/postgres_check_data create mode 100755 ansible/01_old/roles/cmoa_install_bak/files/rel_change create mode 100644 ansible/01_old/roles/cmoa_install_bak/tasks/00-default-settings-master.yml create mode 100644 ansible/01_old/roles/cmoa_install_bak/tasks/00-default-settings-node.yml create mode 100644 ansible/01_old/roles/cmoa_install_bak/tasks/01-storage-install.yml create mode 100644 ansible/01_old/roles/cmoa_install_bak/tasks/02-base-install.yml create mode 100644 ansible/01_old/roles/cmoa_install_bak/tasks/03-ddl-dml.yml create mode 100644 ansible/01_old/roles/cmoa_install_bak/tasks/04-keycloak-install.yml create mode 100644 ansible/01_old/roles/cmoa_install_bak/tasks/05-imxc-install.yml create mode 100644 ansible/01_old/roles/cmoa_install_bak/tasks/06-imxc-ui-install.yml create mode 100644 ansible/01_old/roles/cmoa_install_bak/tasks/07-keycloak-setting.yml create mode 100644 ansible/01_old/roles/cmoa_install_bak/tasks/08-finish.yml create mode 100644 ansible/01_old/roles/cmoa_install_bak/tasks/helm-install.yml create mode 100644 ansible/01_old/roles/cmoa_install_bak/tasks/main.yml create mode 100644 ansible/01_old/roles/cmoa_install_bak/templates/realm.json.j2 create mode 100644 ansible/01_old/roles/cmoa_install_bak/vars/main.yml create mode 100644 ansible/01_old/roles/cmoa_os_setting/README.md create mode 100644 ansible/01_old/roles/cmoa_os_setting/defaults/main.yml create mode 100644 ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/.helmignore create mode 100644 ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/CHANGELOG.md create mode 100644 ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/Chart.yaml create mode 100644 ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/OWNERS create mode 100644 ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/README.md create mode 100644 ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/README.md.gotmpl create mode 100644 ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/controller-custom-ingressclass-flags.yaml create mode 100644 ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/daemonset-customconfig-values.yaml create mode 100644 ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/daemonset-customnodeport-values.yaml create mode 100644 ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/daemonset-extra-modules.yaml create mode 100644 ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/daemonset-headers-values.yaml create mode 100644 ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/daemonset-internal-lb-values.yaml create mode 100644 ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/daemonset-nodeport-values.yaml create mode 100644 ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/daemonset-podannotations-values.yaml create mode 100644 ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/daemonset-tcp-udp-configMapNamespace-values.yaml create mode 100644 ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/daemonset-tcp-udp-portNamePrefix-values.yaml create mode 100644 ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/daemonset-tcp-udp-values.yaml create mode 100644 ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/daemonset-tcp-values.yaml create mode 100644 ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/deamonset-default-values.yaml create mode 100644 ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/deamonset-metrics-values.yaml create mode 100644 ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/deamonset-psp-values.yaml create mode 100644 ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/deamonset-webhook-and-psp-values.yaml create mode 100644 ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/deamonset-webhook-values.yaml create mode 100644 ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-autoscaling-behavior-values.yaml create mode 100644 ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-autoscaling-values.yaml create mode 100644 ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-customconfig-values.yaml create mode 100644 ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-customnodeport-values.yaml create mode 100644 ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-default-values.yaml create mode 100644 ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-extra-modules.yaml create mode 100644 ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-headers-values.yaml create mode 100644 ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-internal-lb-values.yaml create mode 100644 ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-metrics-values.yaml create mode 100644 ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-nodeport-values.yaml create mode 100644 ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-podannotations-values.yaml create mode 100644 ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-psp-values.yaml create mode 100644 ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-tcp-udp-configMapNamespace-values.yaml create mode 100644 ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-tcp-udp-portNamePrefix-values.yaml create mode 100644 ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-tcp-udp-values.yaml create mode 100644 ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-tcp-values.yaml create mode 100644 ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-webhook-and-psp-values.yaml create mode 100644 ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-webhook-extraEnvs-values.yaml create mode 100644 ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-webhook-resources-values.yaml create mode 100644 ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-webhook-values.yaml create mode 100644 ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/override-values.yaml create mode 100644 ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/temp.yaml create mode 100644 ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/temp2.yaml create mode 100644 ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/NOTES.txt create mode 100644 ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/_helpers.tpl create mode 100644 ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/_params.tpl create mode 100644 ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/admission-webhooks/job-patch/clusterrole.yaml create mode 100644 ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/admission-webhooks/job-patch/clusterrolebinding.yaml create mode 100644 ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/admission-webhooks/job-patch/job-createSecret.yaml create mode 100644 ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/admission-webhooks/job-patch/job-patchWebhook.yaml create mode 100644 ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/admission-webhooks/job-patch/psp.yaml create mode 100644 ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/admission-webhooks/job-patch/role.yaml create mode 100644 ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/admission-webhooks/job-patch/rolebinding.yaml create mode 100644 ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/admission-webhooks/job-patch/serviceaccount.yaml create mode 100644 ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/admission-webhooks/validating-webhook.yaml create mode 100644 ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/clusterrole.yaml create mode 100644 ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/clusterrolebinding.yaml create mode 100644 ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-configmap-addheaders.yaml create mode 100644 ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-configmap-proxyheaders.yaml create mode 100644 ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-configmap-tcp.yaml create mode 100644 ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-configmap-udp.yaml create mode 100644 ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-configmap.yaml create mode 100644 ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-daemonset.yaml create mode 100644 ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-deployment.yaml create mode 100644 ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-hpa.yaml create mode 100644 ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-ingressclass.yaml create mode 100644 ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-keda.yaml create mode 100644 ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-poddisruptionbudget.yaml create mode 100644 ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-prometheusrules.yaml create mode 100644 ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-psp.yaml create mode 100644 ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-role.yaml create mode 100644 ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-rolebinding.yaml create mode 100644 ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-service-internal.yaml create mode 100644 ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-service-metrics.yaml create mode 100644 ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-service-webhook.yaml create mode 100644 ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-service.yaml create mode 100644 ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-serviceaccount.yaml create mode 100644 ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-servicemonitor.yaml create mode 100644 ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-wehbooks-networkpolicy.yaml create mode 100644 ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/default-backend-deployment.yaml create mode 100644 ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/default-backend-hpa.yaml create mode 100644 ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/default-backend-poddisruptionbudget.yaml create mode 100644 ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/default-backend-psp.yaml create mode 100644 ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/default-backend-role.yaml create mode 100644 ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/default-backend-rolebinding.yaml create mode 100644 ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/default-backend-service.yaml create mode 100644 ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/default-backend-serviceaccount.yaml create mode 100644 ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/dh-param-secret.yaml create mode 100644 ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/values.yaml create mode 100644 ansible/01_old/roles/cmoa_os_setting/handlers/main.yml create mode 100644 ansible/01_old/roles/cmoa_os_setting/meta/main.yml create mode 100644 ansible/01_old/roles/cmoa_os_setting/tasks/00-centos-os-main.yml create mode 100644 ansible/01_old/roles/cmoa_os_setting/tasks/00-ubuntu-os-main.yml create mode 100644 ansible/01_old/roles/cmoa_os_setting/tasks/01-centos-os-runtime.yml create mode 100644 ansible/01_old/roles/cmoa_os_setting/tasks/01-ubuntu-os-runtime.yml create mode 100644 ansible/01_old/roles/cmoa_os_setting/tasks/02-k8s-main.yml create mode 100644 ansible/01_old/roles/cmoa_os_setting/tasks/03-k8s-master.yml create mode 100644 ansible/01_old/roles/cmoa_os_setting/tasks/04-k8s-master-yaml.yml create mode 100644 ansible/01_old/roles/cmoa_os_setting/tasks/05-k8s-node.yml create mode 100644 ansible/01_old/roles/cmoa_os_setting/tasks/06-worker-directory.yml create mode 100644 ansible/01_old/roles/cmoa_os_setting/tasks/main.yml create mode 100644 ansible/01_old/roles/cmoa_os_setting/templates/config.toml.j2 create mode 100644 ansible/01_old/roles/cmoa_os_setting/templates/hosts.j2 create mode 100644 ansible/01_old/roles/cmoa_os_setting/templates/yaml2toml_macro.j2 create mode 100644 ansible/01_old/roles/cmoa_os_setting/tests/inventory create mode 100644 ansible/01_old/roles/cmoa_os_setting/tests/test.yml create mode 100644 ansible/01_old/roles/cmoa_os_setting/vars/main.yml create mode 100644 ansible/01_old/roles/connect-settings/README.md create mode 100644 ansible/01_old/roles/connect-settings/defaults/main.yml create mode 100644 ansible/01_old/roles/connect-settings/files/00_old/gen_password.py create mode 100644 ansible/01_old/roles/connect-settings/files/00_old/vault_test.py create mode 100755 ansible/01_old/roles/connect-settings/files/custom_excel create mode 100755 ansible/01_old/roles/connect-settings/files/decrypt_password create mode 100755 ansible/01_old/roles/connect-settings/files/gen_password create mode 100755 ansible/01_old/roles/connect-settings/files/vault_get create mode 100755 ansible/01_old/roles/connect-settings/files/vault_put create mode 100644 ansible/01_old/roles/connect-settings/handlers/main.yml create mode 100644 ansible/01_old/roles/connect-settings/meta/main.yml create mode 100644 ansible/01_old/roles/connect-settings/tasks/00_host_setting.yml create mode 100644 ansible/01_old/roles/connect-settings/tasks/01_get_password.yml create mode 100644 ansible/01_old/roles/connect-settings/tasks/02_change_password.yml create mode 100644 ansible/01_old/roles/connect-settings/tasks/03_vault.yml create mode 100644 ansible/01_old/roles/connect-settings/tasks/04_excel_export.yml create mode 100644 ansible/01_old/roles/connect-settings/tasks/99_decrypt_password.yml create mode 100644 ansible/01_old/roles/connect-settings/tasks/main.yml create mode 100755 ansible/01_old/roles/connect-settings/templates/allow_users.j2 create mode 100644 ansible/01_old/roles/connect-settings/tests/inventory create mode 100644 ansible/01_old/roles/connect-settings/tests/test.yml create mode 100644 ansible/01_old/roles/connect-settings/vars/main.yml create mode 100644 ansible/01_old/roles/datadog.datadog/.circleci/config.yml create mode 100644 ansible/01_old/roles/datadog.datadog/.github/CODEOWNERS create mode 100644 ansible/01_old/roles/datadog.datadog/.gitignore create mode 100644 ansible/01_old/roles/datadog.datadog/CHANGELOG.md create mode 100644 ansible/01_old/roles/datadog.datadog/CONTRIBUTING.md create mode 100644 ansible/01_old/roles/datadog.datadog/LICENSE create mode 100644 ansible/01_old/roles/datadog.datadog/README.md create mode 100644 ansible/01_old/roles/datadog.datadog/ci_test/downgrade_to_5.yaml create mode 100644 ansible/01_old/roles/datadog.datadog/ci_test/install_agent_5.yaml create mode 100644 ansible/01_old/roles/datadog.datadog/ci_test/install_agent_6.yaml create mode 100644 ansible/01_old/roles/datadog.datadog/ci_test/install_agent_6_macos.yaml create mode 100644 ansible/01_old/roles/datadog.datadog/ci_test/install_agent_7.yaml create mode 100644 ansible/01_old/roles/datadog.datadog/ci_test/install_agent_7_macos.yaml create mode 100644 ansible/01_old/roles/datadog.datadog/ci_test/inventory/ci.ini create mode 100644 ansible/01_old/roles/datadog.datadog/ci_test/inventory/ci_macos.ini create mode 100644 ansible/01_old/roles/datadog.datadog/defaults/main.yml create mode 100644 ansible/01_old/roles/datadog.datadog/handlers/main-macos.yml create mode 100644 ansible/01_old/roles/datadog.datadog/handlers/main-win.yml create mode 100644 ansible/01_old/roles/datadog.datadog/handlers/main.yml create mode 100644 ansible/01_old/roles/datadog.datadog/manual_tests/.gitignore create mode 100644 ansible/01_old/roles/datadog.datadog/manual_tests/Vagrantfile create mode 100644 ansible/01_old/roles/datadog.datadog/manual_tests/inventory create mode 100644 ansible/01_old/roles/datadog.datadog/manual_tests/inventory_win create mode 100644 ansible/01_old/roles/datadog.datadog/manual_tests/readme.md create mode 100644 ansible/01_old/roles/datadog.datadog/manual_tests/test_5_default.yml create mode 100644 ansible/01_old/roles/datadog.datadog/manual_tests/test_5_full.yml create mode 100644 ansible/01_old/roles/datadog.datadog/manual_tests/test_6_default.yml create mode 100644 ansible/01_old/roles/datadog.datadog/manual_tests/test_6_full.yml create mode 100644 ansible/01_old/roles/datadog.datadog/manual_tests/test_7_default.yml create mode 100644 ansible/01_old/roles/datadog.datadog/manual_tests/test_7_full.yml create mode 100644 ansible/01_old/roles/datadog.datadog/meta/.galaxy_install_info create mode 100644 ansible/01_old/roles/datadog.datadog/meta/main.yml create mode 100644 ansible/01_old/roles/datadog.datadog/tasks/_agent-linux-macos-shared.yml create mode 100644 ansible/01_old/roles/datadog.datadog/tasks/_apt-key-import.yml create mode 100644 ansible/01_old/roles/datadog.datadog/tasks/_remove_rpm_keys.yml create mode 100644 ansible/01_old/roles/datadog.datadog/tasks/agent-linux.yml create mode 100644 ansible/01_old/roles/datadog.datadog/tasks/agent-macos.yml create mode 100644 ansible/01_old/roles/datadog.datadog/tasks/agent-win.yml create mode 100644 ansible/01_old/roles/datadog.datadog/tasks/agent5-linux.yml create mode 100644 ansible/01_old/roles/datadog.datadog/tasks/check-removed-config.yml create mode 100644 ansible/01_old/roles/datadog.datadog/tasks/facts-ansible10.yml create mode 100644 ansible/01_old/roles/datadog.datadog/tasks/facts-ansible9.yml create mode 100644 ansible/01_old/roles/datadog.datadog/tasks/integration.yml create mode 100644 ansible/01_old/roles/datadog.datadog/tasks/main.yml create mode 100644 ansible/01_old/roles/datadog.datadog/tasks/os-check.yml create mode 100644 ansible/01_old/roles/datadog.datadog/tasks/parse-version-macos.yml create mode 100644 ansible/01_old/roles/datadog.datadog/tasks/parse-version-windows.yml create mode 100644 ansible/01_old/roles/datadog.datadog/tasks/parse-version.yml create mode 100644 ansible/01_old/roles/datadog.datadog/tasks/pkg-debian.yml create mode 100644 ansible/01_old/roles/datadog.datadog/tasks/pkg-debian/install-latest.yml create mode 100644 ansible/01_old/roles/datadog.datadog/tasks/pkg-debian/install-pinned.yml create mode 100644 ansible/01_old/roles/datadog.datadog/tasks/pkg-macos.yml create mode 100644 ansible/01_old/roles/datadog.datadog/tasks/pkg-macos/macos_agent_latest.yml create mode 100644 ansible/01_old/roles/datadog.datadog/tasks/pkg-macos/macos_agent_version.yml create mode 100644 ansible/01_old/roles/datadog.datadog/tasks/pkg-redhat.yml create mode 100644 ansible/01_old/roles/datadog.datadog/tasks/pkg-redhat/install-latest.yml create mode 100644 ansible/01_old/roles/datadog.datadog/tasks/pkg-redhat/install-pinned.yml create mode 100644 ansible/01_old/roles/datadog.datadog/tasks/pkg-suse.yml create mode 100644 ansible/01_old/roles/datadog.datadog/tasks/pkg-suse/install-latest.yml create mode 100644 ansible/01_old/roles/datadog.datadog/tasks/pkg-suse/install-pinned.yml create mode 100644 ansible/01_old/roles/datadog.datadog/tasks/pkg-windows-opts.yml create mode 100644 ansible/01_old/roles/datadog.datadog/tasks/pkg-windows.yml create mode 100644 ansible/01_old/roles/datadog.datadog/tasks/sanitize-checks.yml create mode 100644 ansible/01_old/roles/datadog.datadog/tasks/set-parse-version.yml create mode 100644 ansible/01_old/roles/datadog.datadog/tasks/win_agent_latest.yml create mode 100644 ansible/01_old/roles/datadog.datadog/tasks/win_agent_version.yml create mode 100644 ansible/01_old/roles/datadog.datadog/templates/checks.yaml.j2 create mode 100644 ansible/01_old/roles/datadog.datadog/templates/com.datadoghq.agent.plist.j2 create mode 100644 ansible/01_old/roles/datadog.datadog/templates/datadog.conf.j2 create mode 100644 ansible/01_old/roles/datadog.datadog/templates/datadog.yaml.j2 create mode 100644 ansible/01_old/roles/datadog.datadog/templates/install_info.j2 create mode 100644 ansible/01_old/roles/datadog.datadog/templates/security-agent.yaml.j2 create mode 100644 ansible/01_old/roles/datadog.datadog/templates/system-probe.yaml.j2 create mode 100644 ansible/01_old/roles/datadog.datadog/templates/zypper.repo.j2 create mode 100644 ansible/01_old/roles/datasaker/README.md create mode 100644 ansible/01_old/roles/datasaker/defaults/main.yml create mode 100644 ansible/01_old/roles/datasaker/handlers/main.yml create mode 100644 ansible/01_old/roles/datasaker/tasks/check-agent.yml create mode 100644 ansible/01_old/roles/datasaker/tasks/dsk-common.yml create mode 100644 ansible/01_old/roles/datasaker/tasks/dsk-debian-pkg.yml create mode 100644 ansible/01_old/roles/datasaker/tasks/dsk-log-agent.yml create mode 100644 ansible/01_old/roles/datasaker/tasks/dsk-node-agent.yml create mode 100644 ansible/01_old/roles/datasaker/tasks/dsk-plan-postgres-agent.yml create mode 100644 ansible/01_old/roles/datasaker/tasks/dsk-trace-agent.yml create mode 100644 ansible/01_old/roles/datasaker/tasks/gather-facts.yml create mode 100644 ansible/01_old/roles/datasaker/tasks/main copy.yml create mode 100644 ansible/01_old/roles/datasaker/tasks/main.yml create mode 100644 ansible/01_old/roles/datasaker/templates/global-config.yml.j2 create mode 100644 ansible/01_old/roles/datasaker/templates/log-agent-config.yml.j2 create mode 100644 ansible/01_old/roles/datasaker/templates/log-agent-config.yml.j2_bak create mode 100644 ansible/01_old/roles/datasaker/templates/node-agent-config.yml.j2 create mode 100644 ansible/01_old/roles/datasaker/templates/plan-postgres-agent-config.yml.j2 create mode 100644 ansible/01_old/roles/datasaker/templates/trace-agent-config.yml.j2 create mode 100644 ansible/01_old/roles/dsk_bot.datasaker/README.md create mode 100644 ansible/01_old/roles/dsk_bot.datasaker/README_ko.md create mode 100644 ansible/01_old/roles/dsk_bot.datasaker/defaults/main.yml create mode 100644 ansible/01_old/roles/dsk_bot.datasaker/files/libpq-13.5-1.el8.x86_64.rpm create mode 100644 ansible/01_old/roles/dsk_bot.datasaker/files/libpq-devel-13.5-1.el8.x86_64.rpm create mode 100644 ansible/01_old/roles/dsk_bot.datasaker/handlers/main.yml create mode 100644 ansible/01_old/roles/dsk_bot.datasaker/meta/.galaxy_install_info create mode 100644 ansible/01_old/roles/dsk_bot.datasaker/meta/main.yml create mode 100644 ansible/01_old/roles/dsk_bot.datasaker/tasks/check-agent.yml create mode 100644 ansible/01_old/roles/dsk_bot.datasaker/tasks/dsk-common.yml create mode 100644 ansible/01_old/roles/dsk_bot.datasaker/tasks/dsk-debian-pkg.yml create mode 100644 ansible/01_old/roles/dsk_bot.datasaker/tasks/dsk-docker-log-agent.yml create mode 100644 ansible/01_old/roles/dsk_bot.datasaker/tasks/dsk-docker-node-agent.yml create mode 100644 ansible/01_old/roles/dsk_bot.datasaker/tasks/dsk-docker-postgres-agent.yml create mode 100644 ansible/01_old/roles/dsk_bot.datasaker/tasks/dsk-docker-trace-agent.yml create mode 100644 ansible/01_old/roles/dsk_bot.datasaker/tasks/dsk-log-agent.yml create mode 100644 ansible/01_old/roles/dsk_bot.datasaker/tasks/dsk-node-agent.yml create mode 100644 ansible/01_old/roles/dsk_bot.datasaker/tasks/dsk-plan-postgres-agent.yml create mode 100644 ansible/01_old/roles/dsk_bot.datasaker/tasks/dsk-postgres-agent.yml create mode 100644 ansible/01_old/roles/dsk_bot.datasaker/tasks/dsk-redhat-pkg.yml create mode 100644 ansible/01_old/roles/dsk_bot.datasaker/tasks/dsk-trace-agent.yml create mode 100644 ansible/01_old/roles/dsk_bot.datasaker/tasks/gather-facts.yml create mode 100644 ansible/01_old/roles/dsk_bot.datasaker/tasks/main.yml create mode 100644 ansible/01_old/roles/dsk_bot.datasaker/tasks/permissions.yml create mode 100644 ansible/01_old/roles/dsk_bot.datasaker/tasks/remove-datasaker.yml create mode 100644 ansible/01_old/roles/dsk_bot.datasaker/templates/docker-log-agent-config.yml.j2 create mode 100644 ansible/01_old/roles/dsk_bot.datasaker/templates/docker-plan-postgres-agent-config.yml.j2 create mode 100644 ansible/01_old/roles/dsk_bot.datasaker/templates/docker-postgres-agent-config.yml.j2 create mode 100644 ansible/01_old/roles/dsk_bot.datasaker/templates/fluent-bit-repo.yml.j2 create mode 100644 ansible/01_old/roles/dsk_bot.datasaker/templates/global-config.yml.j2 create mode 100644 ansible/01_old/roles/dsk_bot.datasaker/templates/log-agent-config.yml.j2 create mode 100644 ansible/01_old/roles/dsk_bot.datasaker/templates/log-agent-config.yml.j2_bak create mode 100644 ansible/01_old/roles/dsk_bot.datasaker/templates/node-agent-config.yml.j2 create mode 100644 ansible/01_old/roles/dsk_bot.datasaker/templates/plan-postgres-agent-config.yml.j2 create mode 100644 ansible/01_old/roles/dsk_bot.datasaker/templates/postgres-agent-config.yml.j2 create mode 100644 ansible/01_old/roles/dsk_bot.datasaker/templates/trace-agent-config.yml.j2 create mode 100644 ansible/01_old/roles/kubernetes_install/README.md create mode 100644 ansible/01_old/roles/kubernetes_install/defaults/main.yml create mode 100644 ansible/01_old/roles/kubernetes_install/files/ingress-nginx/.helmignore create mode 100644 ansible/01_old/roles/kubernetes_install/files/ingress-nginx/CHANGELOG.md create mode 100644 ansible/01_old/roles/kubernetes_install/files/ingress-nginx/Chart.yaml create mode 100644 ansible/01_old/roles/kubernetes_install/files/ingress-nginx/OWNERS create mode 100644 ansible/01_old/roles/kubernetes_install/files/ingress-nginx/README.md create mode 100644 ansible/01_old/roles/kubernetes_install/files/ingress-nginx/README.md.gotmpl create mode 100644 ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/controller-custom-ingressclass-flags.yaml create mode 100644 ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/daemonset-customconfig-values.yaml create mode 100644 ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/daemonset-customnodeport-values.yaml create mode 100644 ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/daemonset-extra-modules.yaml create mode 100644 ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/daemonset-headers-values.yaml create mode 100644 ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/daemonset-internal-lb-values.yaml create mode 100644 ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/daemonset-nodeport-values.yaml create mode 100644 ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/daemonset-podannotations-values.yaml create mode 100644 ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/daemonset-tcp-udp-configMapNamespace-values.yaml create mode 100644 ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/daemonset-tcp-udp-portNamePrefix-values.yaml create mode 100644 ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/daemonset-tcp-udp-values.yaml create mode 100644 ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/daemonset-tcp-values.yaml create mode 100644 ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/deamonset-default-values.yaml create mode 100644 ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/deamonset-metrics-values.yaml create mode 100644 ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/deamonset-psp-values.yaml create mode 100644 ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/deamonset-webhook-and-psp-values.yaml create mode 100644 ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/deamonset-webhook-values.yaml create mode 100644 ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/deployment-autoscaling-behavior-values.yaml create mode 100644 ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/deployment-autoscaling-values.yaml create mode 100644 ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/deployment-customconfig-values.yaml create mode 100644 ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/deployment-customnodeport-values.yaml create mode 100644 ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/deployment-default-values.yaml create mode 100644 ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/deployment-extra-modules.yaml create mode 100644 ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/deployment-headers-values.yaml create mode 100644 ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/deployment-internal-lb-values.yaml create mode 100644 ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/deployment-metrics-values.yaml create mode 100644 ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/deployment-nodeport-values.yaml create mode 100644 ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/deployment-podannotations-values.yaml create mode 100644 ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/deployment-psp-values.yaml create mode 100644 ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/deployment-tcp-udp-configMapNamespace-values.yaml create mode 100644 ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/deployment-tcp-udp-portNamePrefix-values.yaml create mode 100644 ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/deployment-tcp-udp-values.yaml create mode 100644 ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/deployment-tcp-values.yaml create mode 100644 ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/deployment-webhook-and-psp-values.yaml create mode 100644 ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/deployment-webhook-extraEnvs-values.yaml create mode 100644 ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/deployment-webhook-resources-values.yaml create mode 100644 ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/deployment-webhook-values.yaml create mode 100644 ansible/01_old/roles/kubernetes_install/files/ingress-nginx/override-values.yaml create mode 100644 ansible/01_old/roles/kubernetes_install/files/ingress-nginx/temp.yaml create mode 100644 ansible/01_old/roles/kubernetes_install/files/ingress-nginx/temp2.yaml create mode 100644 ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/NOTES.txt create mode 100644 ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/_helpers.tpl create mode 100644 ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/_params.tpl create mode 100644 ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/admission-webhooks/job-patch/clusterrole.yaml create mode 100644 ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/admission-webhooks/job-patch/clusterrolebinding.yaml create mode 100644 ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/admission-webhooks/job-patch/job-createSecret.yaml create mode 100644 ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/admission-webhooks/job-patch/job-patchWebhook.yaml create mode 100644 ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/admission-webhooks/job-patch/psp.yaml create mode 100644 ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/admission-webhooks/job-patch/role.yaml create mode 100644 ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/admission-webhooks/job-patch/rolebinding.yaml create mode 100644 ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/admission-webhooks/job-patch/serviceaccount.yaml create mode 100644 ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/admission-webhooks/validating-webhook.yaml create mode 100644 ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/clusterrole.yaml create mode 100644 ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/clusterrolebinding.yaml create mode 100644 ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/controller-configmap-addheaders.yaml create mode 100644 ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/controller-configmap-proxyheaders.yaml create mode 100644 ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/controller-configmap-tcp.yaml create mode 100644 ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/controller-configmap-udp.yaml create mode 100644 ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/controller-configmap.yaml create mode 100644 ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/controller-daemonset.yaml create mode 100644 ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/controller-deployment.yaml create mode 100644 ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/controller-hpa.yaml create mode 100644 ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/controller-ingressclass.yaml create mode 100644 ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/controller-keda.yaml create mode 100644 ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/controller-poddisruptionbudget.yaml create mode 100644 ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/controller-prometheusrules.yaml create mode 100644 ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/controller-psp.yaml create mode 100644 ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/controller-role.yaml create mode 100644 ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/controller-rolebinding.yaml create mode 100644 ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/controller-service-internal.yaml create mode 100644 ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/controller-service-metrics.yaml create mode 100644 ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/controller-service-webhook.yaml create mode 100644 ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/controller-service.yaml create mode 100644 ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/controller-serviceaccount.yaml create mode 100644 ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/controller-servicemonitor.yaml create mode 100644 ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/controller-wehbooks-networkpolicy.yaml create mode 100644 ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/default-backend-deployment.yaml create mode 100644 ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/default-backend-hpa.yaml create mode 100644 ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/default-backend-poddisruptionbudget.yaml create mode 100644 ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/default-backend-psp.yaml create mode 100644 ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/default-backend-role.yaml create mode 100644 ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/default-backend-rolebinding.yaml create mode 100644 ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/default-backend-service.yaml create mode 100644 ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/default-backend-serviceaccount.yaml create mode 100644 ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/dh-param-secret.yaml create mode 100644 ansible/01_old/roles/kubernetes_install/files/ingress-nginx/values.yaml create mode 100644 ansible/01_old/roles/kubernetes_install/files/kubeconfig create mode 100644 ansible/01_old/roles/kubernetes_install/handlers/main.yml create mode 100644 ansible/01_old/roles/kubernetes_install/meta/main.yml create mode 100644 ansible/01_old/roles/kubernetes_install/tasks/helm-chart-nginx.yml create mode 100644 ansible/01_old/roles/kubernetes_install/tasks/helm-install.yml create mode 100644 ansible/01_old/roles/kubernetes_install/tasks/k8s-helm-chart.yml create mode 100644 ansible/01_old/roles/kubernetes_install/tasks/k8s-main.yml create mode 100644 ansible/01_old/roles/kubernetes_install/tasks/k8s-master.yml create mode 100644 ansible/01_old/roles/kubernetes_install/tasks/k8s-node.yml create mode 100644 ansible/01_old/roles/kubernetes_install/tasks/main.yml create mode 100644 ansible/01_old/roles/kubernetes_install/tasks/os-main.yml create mode 100644 ansible/01_old/roles/kubernetes_install/tasks/os-runtime.yml create mode 100644 ansible/01_old/roles/kubernetes_install/templates/config.toml.j2 create mode 100644 ansible/01_old/roles/kubernetes_install/templates/hosts.j2 create mode 100644 ansible/01_old/roles/kubernetes_install/templates/yaml2toml_macro.j2 create mode 100644 ansible/01_old/roles/kubernetes_install/tests/inventory create mode 100644 ansible/01_old/roles/kubernetes_install/tests/test.yml create mode 100644 ansible/01_old/roles/kubernetes_install/vars/main.yml create mode 100644 ansible/01_old/roles/node/tasks/main.yml create mode 100755 ansible/01_old/roles/node/templates/common-auth.j2 create mode 100755 ansible/01_old/roles/node/templates/pwquality.conf.j2 create mode 100644 ansible/01_old/roles/node/templates/sysctl.j2 create mode 100644 ansible/01_old/roles/password_change/README.md create mode 100644 ansible/01_old/roles/password_change/defaults/main.yml create mode 100644 ansible/01_old/roles/password_change/files/00_old/gen_password.py create mode 100644 ansible/01_old/roles/password_change/files/00_old/vault_test.py create mode 100755 ansible/01_old/roles/password_change/files/custom_excel create mode 100755 ansible/01_old/roles/password_change/files/decrypt_password create mode 100755 ansible/01_old/roles/password_change/files/gen_password create mode 100755 ansible/01_old/roles/password_change/files/vault_get create mode 100755 ansible/01_old/roles/password_change/files/vault_put create mode 100644 ansible/01_old/roles/password_change/handlers/main.yml create mode 100644 ansible/01_old/roles/password_change/meta/main.yml create mode 100644 ansible/01_old/roles/password_change/tasks/01_get_password.yml create mode 100644 ansible/01_old/roles/password_change/tasks/02_change_password.yml create mode 100644 ansible/01_old/roles/password_change/tasks/03_vault.yml create mode 100644 ansible/01_old/roles/password_change/tasks/04_excel_export.yml create mode 100644 ansible/01_old/roles/password_change/tasks/99_decrypt_password.yml create mode 100644 ansible/01_old/roles/password_change/tasks/main.yml create mode 100755 ansible/01_old/roles/password_change/templates/allow_users.j2 create mode 100644 ansible/01_old/roles/password_change/tests/inventory create mode 100644 ansible/01_old/roles/password_change/tests/test.yml create mode 100644 ansible/01_old/roles/password_change/vars/main.yml create mode 100644 ansible/01_old/roles/security-settings/.DS_Store create mode 100755 ansible/01_old/roles/security-settings/defaults/main.yml create mode 100755 ansible/01_old/roles/security-settings/files/login_banner create mode 100755 ansible/01_old/roles/security-settings/handlers/main.yml create mode 100755 ansible/01_old/roles/security-settings/tasks/admin_set.yml create mode 100755 ansible/01_old/roles/security-settings/tasks/banner.yml create mode 100755 ansible/01_old/roles/security-settings/tasks/crictl.yml create mode 100755 ansible/01_old/roles/security-settings/tasks/login_defs.yml create mode 100755 ansible/01_old/roles/security-settings/tasks/main.yml create mode 100755 ansible/01_old/roles/security-settings/tasks/pam.yml create mode 100755 ansible/01_old/roles/security-settings/tasks/profile.yml create mode 100755 ansible/01_old/roles/security-settings/tasks/sshd_config.yml create mode 100755 ansible/01_old/roles/security-settings/tasks/sudoers.yml create mode 100755 ansible/01_old/roles/security-settings/templates/allow_users.j2 create mode 100755 ansible/01_old/roles/security-settings/templates/common-auth.j2 create mode 100755 ansible/01_old/roles/security-settings/templates/pwquality.conf.j2 create mode 100755 ansible/01_old/roles/security-settings/templates/sudoers_users.j2 create mode 100644 ansible/01_old/roles/teleport/.DS_Store create mode 100644 ansible/01_old/roles/teleport/README.md create mode 100644 ansible/01_old/roles/teleport/defaults/main.yml create mode 100644 ansible/01_old/roles/teleport/handlers/main.yml create mode 100644 ansible/01_old/roles/teleport/meta/main.yml create mode 100644 ansible/01_old/roles/teleport/tasks/main.yml create mode 100644 ansible/01_old/roles/teleport/tasks/teleport_install.yml create mode 100644 ansible/01_old/roles/teleport/tasks/teleport_remove.yml create mode 100644 ansible/01_old/roles/teleport/tasks/teleport_update.yml create mode 100644 ansible/01_old/roles/teleport/templates/install-node.sh.j2 create mode 100644 ansible/01_old/roles/teleport/templates/teleport.yaml.j2 create mode 100644 ansible/01_old/roles/teleport/vars/main.yml create mode 100644 ansible/01_old/roles/test/defaults/main.yml create mode 100755 ansible/01_old/roles/test/files/00-default/sa_patch.sh create mode 100644 ansible/01_old/roles/test/files/00-default/secret_dockerhub.yaml create mode 100644 ansible/01_old/roles/test/files/00-default/secret_nexus.yaml create mode 100644 ansible/01_old/roles/test/files/01-storage/00-storageclass.yaml create mode 100644 ansible/01_old/roles/test/files/01-storage/01-persistentvolume.yaml create mode 100755 ansible/01_old/roles/test/files/01-storage/cmoa_minio create mode 100644 ansible/01_old/roles/test/files/01-storage/minio/.helmignore create mode 100644 ansible/01_old/roles/test/files/01-storage/minio/Chart.yaml create mode 100644 ansible/01_old/roles/test/files/01-storage/minio/README.md create mode 100644 ansible/01_old/roles/test/files/01-storage/minio/templates/NOTES.txt create mode 100644 ansible/01_old/roles/test/files/01-storage/minio/templates/_helper_create_bucket.txt create mode 100644 ansible/01_old/roles/test/files/01-storage/minio/templates/_helper_create_policy.txt create mode 100644 ansible/01_old/roles/test/files/01-storage/minio/templates/_helper_create_user.txt create mode 100644 ansible/01_old/roles/test/files/01-storage/minio/templates/_helper_custom_command.txt create mode 100644 ansible/01_old/roles/test/files/01-storage/minio/templates/_helper_policy.tpl create mode 100644 ansible/01_old/roles/test/files/01-storage/minio/templates/_helpers.tpl create mode 100644 ansible/01_old/roles/test/files/01-storage/minio/templates/configmap.yaml create mode 100644 ansible/01_old/roles/test/files/01-storage/minio/templates/console-ingress.yaml create mode 100644 ansible/01_old/roles/test/files/01-storage/minio/templates/console-service.yaml create mode 100644 ansible/01_old/roles/test/files/01-storage/minio/templates/deployment.yaml create mode 100644 ansible/01_old/roles/test/files/01-storage/minio/templates/gateway-deployment.yaml create mode 100644 ansible/01_old/roles/test/files/01-storage/minio/templates/ingress.yaml create mode 100644 ansible/01_old/roles/test/files/01-storage/minio/templates/networkpolicy.yaml create mode 100644 ansible/01_old/roles/test/files/01-storage/minio/templates/poddisruptionbudget.yaml create mode 100644 ansible/01_old/roles/test/files/01-storage/minio/templates/post-install-create-bucket-job.yaml create mode 100644 ansible/01_old/roles/test/files/01-storage/minio/templates/post-install-create-policy-job.yaml create mode 100644 ansible/01_old/roles/test/files/01-storage/minio/templates/post-install-create-user-job.yaml create mode 100644 ansible/01_old/roles/test/files/01-storage/minio/templates/post-install-custom-command.yaml create mode 100644 ansible/01_old/roles/test/files/01-storage/minio/templates/pvc.yaml create mode 100644 ansible/01_old/roles/test/files/01-storage/minio/templates/secrets.yaml create mode 100644 ansible/01_old/roles/test/files/01-storage/minio/templates/securitycontextconstraints.yaml create mode 100644 ansible/01_old/roles/test/files/01-storage/minio/templates/service.yaml create mode 100644 ansible/01_old/roles/test/files/01-storage/minio/templates/serviceaccount.yaml create mode 100644 ansible/01_old/roles/test/files/01-storage/minio/templates/servicemonitor.yaml create mode 100644 ansible/01_old/roles/test/files/01-storage/minio/templates/statefulset.yaml create mode 100644 ansible/01_old/roles/test/files/01-storage/minio/values.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/00-kafka-broker-config.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/01-coredns.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/.helmignore create mode 100644 ansible/01_old/roles/test/files/02-base/base/Chart.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/analysis/.helmignore create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/analysis/Chart.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/analysis/templates/imxc-metric-analyzer-master.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/analysis/templates/imxc-metric-analyzer-worker.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/analysis/values.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/cortex/.helmignore create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/cortex/Chart.lock create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/cortex/Chart.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/cortex/README.md create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/NOTES.txt create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/_helpers.tpl create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/alertmanager/alertmanager-dep.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/alertmanager/alertmanager-svc.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/clusterrole.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/clusterrolebinding.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/compactor/_helpers-compactor.tpl create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/compactor/compactor-poddisruptionbudget.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/compactor/compactor-servicemonitor.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/compactor/compactor-statefulset.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/compactor/compactor-svc.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/configmap.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/configs/_helpers-configs.tpl create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/configs/configs-dep.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/configs/configs-poddisruptionbudget.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/configs/configs-servicemonitor.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/configs/configs-svc.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/cortex-pv.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/distributor/_helpers-distributor.tpl create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/distributor/distributor-dep.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/distributor/distributor-hpa.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/distributor/distributor-poddisruptionbudget.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/distributor/distributor-servicemonitor.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/distributor/distributor-svc-headless.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/distributor/distributor-svc.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/ingester/_helpers-ingester.tpl create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/ingester/ingester-dep.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/ingester/ingester-hpa.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/ingester/ingester-poddisruptionbudget.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/ingester/ingester-servicemonitor.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/ingester/ingester-statefulset.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/ingester/ingester-svc-headless.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/ingester/ingester-svc.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/nginx/_helpers-nginx.tpl create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/nginx/nginx-config.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/nginx/nginx-dep.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/nginx/nginx-hpa.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/nginx/nginx-ingress.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/nginx/nginx-poddisruptionbudget.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/nginx/nginx-svc.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/node-exporter.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/querier/_helpers-querier.tpl create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/querier/querier-dep.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/querier/querier-hpa.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/querier/querier-poddisruptionbudget.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/querier/querier-servicemonitor.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/querier/querier-svc.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/query-frontend/_helpers-query-frontend.tpl create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/query-frontend/query-frontend-dep.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/query-frontend/query-frontend-servicemonitor.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/query-frontend/query-frontend-svc-headless.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/query-frontend/query-frontend-svc.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/query-frontend/query-poddisruptionbudget.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/ruler/_helpers-ruler.tpl create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/ruler/ruler-configmap.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/ruler/ruler-dep.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/ruler/ruler-poddisruptionbudget.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/ruler/ruler-servicemonitor.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/ruler/ruler-svc.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/runtime-configmap.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/secret-postgresql.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/secret.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/serviceaccount.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/store-gateway/_helpers-store-gateway.tpl create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-poddisruptionbudget.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-servicemonitor.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-statefulset.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-svc-headless.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-svc.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/svc-memberlist-headless.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/table-manager/_helpers-table-manager.tpl create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/table-manager/table-manager-dep.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/table-manager/table-manager-poddisruptionbudget.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/table-manager/table-manager-servicemonitor.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/table-manager/table-manager-svc.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/cortex/values.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/elasticsearch/.helmignore create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/elasticsearch/Chart.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/elasticsearch/templates/1.headless_service.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/elasticsearch/templates/2.service.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/elasticsearch/templates/3.configmap.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/elasticsearch/templates/4.pv.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/elasticsearch/templates/5.pvc.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/elasticsearch/templates/6.statefulset.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/elasticsearch/templates/7.secrets.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/elasticsearch/templates/needtocheck_storageclass.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/elasticsearch/values.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/kafka-manager/.helmignore create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/kafka-manager/Chart.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/kafka-manager/templates/0.kafka-manager-service.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/kafka-manager/templates/1.kafka-manager.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/kafka-manager/values.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/kafka/.helmignore create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/kafka/1.broker-config.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/kafka/Chart.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/kafka/templates/2.dns.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/kafka/templates/3.bootstrap-service.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/kafka/templates/4.persistent-volume.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/kafka/templates/5.kafka.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/kafka/templates/6.outside.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/kafka/values.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/postgres/.helmignore create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/postgres/Chart.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/postgres/templates/1.postgres-configmap.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/postgres/templates/2.postgres-storage.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/postgres/templates/3.postgres-service.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/postgres/templates/4.postgres-deployment.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/postgres/values.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/.helmignore create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/Chart.lock create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/Chart.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/README.md create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/charts/common/.helmignore create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/charts/common/Chart.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/charts/common/README.md create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/charts/common/templates/_affinities.tpl create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/charts/common/templates/_capabilities.tpl create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/charts/common/templates/_errors.tpl create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/charts/common/templates/_images.tpl create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/charts/common/templates/_ingress.tpl create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/charts/common/templates/_labels.tpl create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/charts/common/templates/_names.tpl create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/charts/common/templates/_secrets.tpl create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/charts/common/templates/_storage.tpl create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/charts/common/templates/_tplvalues.tpl create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/charts/common/templates/_utils.tpl create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/charts/common/templates/_warnings.tpl create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_cassandra.tpl create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_mariadb.tpl create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_mongodb.tpl create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_postgresql.tpl create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_redis.tpl create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_validations.tpl create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/charts/common/values.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/ci/default-values.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/ci/tolerations-values.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/templates/NOTES.txt create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/templates/_helpers.tpl create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/templates/configuration.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/templates/extra-list.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/templates/ingress.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/templates/networkpolicy.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/templates/pdb.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/templates/prometheusrule.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/templates/pv.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/templates/pvc.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/templates/role.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/templates/rolebinding.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/templates/secrets.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/templates/serviceaccount.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/templates/servicemonitor.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/templates/statefulset.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/templates/svc-headless.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/templates/svc.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/templates/tls-secrets.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/values.schema.json create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/values.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/redis/.helmignore create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/redis/Chart.lock create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/redis/Chart.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/redis/README.md create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/redis/charts/common/.helmignore create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/redis/charts/common/Chart.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/redis/charts/common/README.md create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/redis/charts/common/templates/_affinities.tpl create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/redis/charts/common/templates/_capabilities.tpl create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/redis/charts/common/templates/_images.tpl create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/redis/charts/common/templates/_ingress.tpl create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/redis/charts/common/templates/_labels.tpl create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/redis/charts/common/templates/_names.tpl create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/redis/charts/common/templates/_secrets.tpl create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/redis/charts/common/templates/_storage.tpl create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/redis/charts/common/templates/_tplvalues.tpl create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/redis/charts/common/templates/_utils.tpl create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/redis/charts/common/templates/_warnings.tpl create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/redis/charts/common/templates/validations/_cassandra.tpl create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/redis/charts/common/templates/validations/_mariadb.tpl create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/redis/charts/common/templates/validations/_mongodb.tpl create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/redis/charts/common/templates/validations/_postgresql.tpl create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/redis/charts/common/templates/validations/_redis.tpl create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/redis/charts/common/templates/validations/_validations.tpl create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/redis/charts/common/values.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/redis/ci/default-values.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/redis/ci/extra-flags-values.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/redis/ci/production-sentinel-values.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/redis/templates/NOTES.txt create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/redis/templates/_helpers.tpl create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/redis/templates/configmap-scripts.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/redis/templates/configmap.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/redis/templates/headless-svc.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/redis/templates/health-configmap.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/redis/templates/metrics-prometheus.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/redis/templates/metrics-svc.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/redis/templates/networkpolicy.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/redis/templates/pdb.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/redis/templates/prometheusrule.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/redis/templates/psp.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/redis/templates/redis-master-statefulset.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/redis/templates/redis-master-svc.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/redis/templates/redis-node-statefulset.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/redis/templates/redis-pv.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/redis/templates/redis-role.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/redis/templates/redis-rolebinding.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/redis/templates/redis-serviceaccount.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/redis/templates/redis-slave-statefulset.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/redis/templates/redis-slave-svc.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/redis/templates/redis-with-sentinel-svc.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/redis/templates/secret.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/redis/values.schema.json create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/redis/values.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/zookeeper/.helmignore create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/zookeeper/Chart.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/zookeeper/templates/0.config.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/zookeeper/templates/1.service-leader-election.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/zookeeper/templates/2.service-client.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/zookeeper/templates/3.persistent-volume.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/zookeeper/templates/4.statefulset.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/zookeeper/templates/5.pvc.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/charts/zookeeper/values.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/index.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/templates/role.yaml create mode 100644 ansible/01_old/roles/test/files/02-base/base/values.yaml create mode 100755 ansible/01_old/roles/test/files/03-ddl-dml/elasticsearch/es-ddl-put.sh create mode 100644 ansible/01_old/roles/test/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/1_kubernete_event_info_create_dest_source_index.sh create mode 100644 ansible/01_old/roles/test/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/2_kubernete_event_info_reindex_to_dest_from_source.sh create mode 100644 ansible/01_old/roles/test/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/3_kubernete_event_info_reindex_to_source_from_dest.sh create mode 100644 ansible/01_old/roles/test/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/4_kubernete_event_info_delete_dest_index.sh create mode 100644 ansible/01_old/roles/test/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/5_license_history_create_dest_source_index.sh create mode 100644 ansible/01_old/roles/test/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/6_license_history_reindex_to_dest_from_source.sh create mode 100644 ansible/01_old/roles/test/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/7_license_history_reindex_to_source_from_dest.sh create mode 100644 ansible/01_old/roles/test/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/8_license_history_delete_dest_index.sh create mode 100644 ansible/01_old/roles/test/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/manual.txt create mode 100644 ansible/01_old/roles/test/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/1_kubernete_event_info_create_dest_source_index.sh create mode 100644 ansible/01_old/roles/test/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/2_kubernete_event_info_reindex_to_dest_from_source.sh create mode 100644 ansible/01_old/roles/test/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/3_kubernete_event_info_reindex_to_source_from_dest.sh create mode 100644 ansible/01_old/roles/test/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/4_kubernete_event_info_delete_dest_index.sh create mode 100644 ansible/01_old/roles/test/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/5_license_history_create_dest_source_index.sh create mode 100644 ansible/01_old/roles/test/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/6_license_history_reindex_to_dest_from_source.sh create mode 100644 ansible/01_old/roles/test/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/7_license_history_reindex_to_source_from_dest.sh create mode 100644 ansible/01_old/roles/test/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/8_license_history_delete_dest_index.sh create mode 100644 ansible/01_old/roles/test/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/manual.txt create mode 100644 ansible/01_old/roles/test/files/03-ddl-dml/postgres/jaeger_menumeta.psql create mode 100644 ansible/01_old/roles/test/files/03-ddl-dml/postgres/jspd_menumeta.psql create mode 100644 ansible/01_old/roles/test/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/1_kubernete_event_info_create_dest_source_index.sh create mode 100644 ansible/01_old/roles/test/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/2_kubernete_event_info_reindex_to_dest_from_source.sh create mode 100644 ansible/01_old/roles/test/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/3_kubernete_event_info_reindex_to_source_from_dest.sh create mode 100644 ansible/01_old/roles/test/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/4_kubernete_event_info_delete_dest_index.sh create mode 100644 ansible/01_old/roles/test/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/5_license_history_create_dest_source_index.sh create mode 100644 ansible/01_old/roles/test/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/6_license_history_reindex_to_dest_from_source.sh create mode 100644 ansible/01_old/roles/test/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/7_license_history_reindex_to_source_from_dest.sh create mode 100644 ansible/01_old/roles/test/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/8_license_history_delete_dest_index.sh create mode 100644 ansible/01_old/roles/test/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/manual.txt create mode 100644 ansible/01_old/roles/test/files/03-ddl-dml/postgres/patch/memu_meta/jaeger_menumeta.psql create mode 100644 ansible/01_old/roles/test/files/03-ddl-dml/postgres/patch/memu_meta/jspd_menumeta.psql create mode 100644 ansible/01_old/roles/test/files/03-ddl-dml/postgres/patch/postgres_patch_3.2.0.psql create mode 100644 ansible/01_old/roles/test/files/03-ddl-dml/postgres/patch/postgres_patch_3.3.0.psql create mode 100644 ansible/01_old/roles/test/files/03-ddl-dml/postgres/patch/postgres_patch_3.3.2.psql create mode 100644 ansible/01_old/roles/test/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.1.psql create mode 100644 ansible/01_old/roles/test/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.2.psql create mode 100644 ansible/01_old/roles/test/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.3.psql create mode 100644 ansible/01_old/roles/test/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.6.psql create mode 100644 ansible/01_old/roles/test/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.7.psql create mode 100644 ansible/01_old/roles/test/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.8.psql create mode 100644 ansible/01_old/roles/test/files/03-ddl-dml/postgres/patch/postgres_patch_R30020210503.psql create mode 100644 ansible/01_old/roles/test/files/03-ddl-dml/postgres/patch/postgres_patch_R30020210730.psql create mode 100644 ansible/01_old/roles/test/files/03-ddl-dml/postgres/postgres_insert_ddl.psql create mode 100644 ansible/01_old/roles/test/files/03-ddl-dml/postgres/postgres_insert_dml.psql create mode 100644 ansible/01_old/roles/test/files/04-keycloak/Chart.yaml create mode 100644 ansible/01_old/roles/test/files/04-keycloak/OWNERS create mode 100644 ansible/01_old/roles/test/files/04-keycloak/README.md create mode 100644 ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/.helmignore create mode 100644 ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/Chart.yaml create mode 100644 ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/README.md create mode 100644 ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/charts/common/.helmignore create mode 100644 ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/charts/common/Chart.yaml create mode 100644 ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/charts/common/README.md create mode 100644 ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/charts/common/templates/_capabilities.tpl create mode 100644 ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/charts/common/templates/_images.tpl create mode 100644 ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/charts/common/templates/_labels.tpl create mode 100644 ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/charts/common/templates/_names.tpl create mode 100644 ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/charts/common/templates/_secrets.tpl create mode 100644 ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/charts/common/templates/_storage.tpl create mode 100644 ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/charts/common/templates/_tplvalues.tpl create mode 100644 ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/charts/common/templates/_warnings.tpl create mode 100644 ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/charts/common/values.yaml create mode 100644 ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/ci/commonAnnotations.yaml create mode 100644 ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/ci/default-values.yaml create mode 100644 ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/ci/shmvolume-disabled-values.yaml create mode 100644 ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/files/README.md create mode 100644 ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/files/conf.d/README.md create mode 100644 ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/files/docker-entrypoint-initdb.d/README.md create mode 100644 ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/requirements.lock create mode 100644 ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/requirements.yaml create mode 100644 ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/templates/NOTES.txt create mode 100644 ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/templates/_helpers.tpl create mode 100644 ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/templates/configmap.yaml create mode 100644 ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/templates/extended-config-configmap.yaml create mode 100644 ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/templates/initialization-configmap.yaml create mode 100644 ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/templates/metrics-configmap.yaml create mode 100644 ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/templates/metrics-svc.yaml create mode 100644 ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/templates/networkpolicy.yaml create mode 100644 ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/templates/podsecuritypolicy.yaml create mode 100644 ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/templates/prometheusrule.yaml create mode 100644 ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/templates/pv.yaml create mode 100644 ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/templates/role.yaml create mode 100644 ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/templates/rolebinding.yaml create mode 100644 ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/templates/secrets.yaml create mode 100644 ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/templates/serviceaccount.yaml create mode 100644 ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/templates/servicemonitor.yaml create mode 100644 ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/templates/statefulset-slaves.yaml create mode 100644 ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/templates/statefulset.yaml create mode 100644 ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/templates/svc-headless.yaml create mode 100644 ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/templates/svc-read.yaml create mode 100644 ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/templates/svc.yaml create mode 100644 ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/values-production.yaml create mode 100644 ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/values.schema.json create mode 100644 ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/values.yaml create mode 100644 ansible/01_old/roles/test/files/04-keycloak/ci/h2-values.yaml create mode 100644 ansible/01_old/roles/test/files/04-keycloak/ci/postgres-ha-values.yaml create mode 100644 ansible/01_old/roles/test/files/04-keycloak/requirements.lock create mode 100644 ansible/01_old/roles/test/files/04-keycloak/requirements.yaml create mode 100644 ansible/01_old/roles/test/files/04-keycloak/scripts/keycloak.cli create mode 100644 ansible/01_old/roles/test/files/04-keycloak/templates/NOTES.txt create mode 100644 ansible/01_old/roles/test/files/04-keycloak/templates/_helpers.tpl create mode 100644 ansible/01_old/roles/test/files/04-keycloak/templates/configmap-startup.yaml create mode 100644 ansible/01_old/roles/test/files/04-keycloak/templates/hpa.yaml create mode 100644 ansible/01_old/roles/test/files/04-keycloak/templates/ingress.yaml create mode 100644 ansible/01_old/roles/test/files/04-keycloak/templates/networkpolicy.yaml create mode 100644 ansible/01_old/roles/test/files/04-keycloak/templates/poddisruptionbudget.yaml create mode 100644 ansible/01_old/roles/test/files/04-keycloak/templates/prometheusrule.yaml create mode 100644 ansible/01_old/roles/test/files/04-keycloak/templates/rbac.yaml create mode 100644 ansible/01_old/roles/test/files/04-keycloak/templates/route.yaml create mode 100644 ansible/01_old/roles/test/files/04-keycloak/templates/secrets.yaml create mode 100644 ansible/01_old/roles/test/files/04-keycloak/templates/service-headless.yaml create mode 100644 ansible/01_old/roles/test/files/04-keycloak/templates/service-http.yaml create mode 100644 ansible/01_old/roles/test/files/04-keycloak/templates/serviceaccount.yaml create mode 100644 ansible/01_old/roles/test/files/04-keycloak/templates/servicemonitor.yaml create mode 100644 ansible/01_old/roles/test/files/04-keycloak/templates/statefulset.yaml create mode 100644 ansible/01_old/roles/test/files/04-keycloak/templates/test/configmap-test.yaml create mode 100644 ansible/01_old/roles/test/files/04-keycloak/templates/test/pod-test.yaml create mode 100644 ansible/01_old/roles/test/files/04-keycloak/values.schema.json create mode 100644 ansible/01_old/roles/test/files/04-keycloak/values.yaml create mode 100644 ansible/01_old/roles/test/files/05-imxc/Chart.yaml create mode 100644 ansible/01_old/roles/test/files/05-imxc/cmoa-manual.yaml create mode 100644 ansible/01_old/roles/test/files/05-imxc/scripts/init-api-server.sh create mode 100644 ansible/01_old/roles/test/files/05-imxc/scripts/init-auth-server.sh create mode 100644 ansible/01_old/roles/test/files/05-imxc/scripts/init-noti-server.sh create mode 100644 ansible/01_old/roles/test/files/05-imxc/scripts/init-resource.sh create mode 100644 ansible/01_old/roles/test/files/05-imxc/scripts/init.json create mode 100644 ansible/01_old/roles/test/files/05-imxc/templates/auth-server.yaml create mode 100644 ansible/01_old/roles/test/files/05-imxc/templates/cloudmoa-datagate.yaml create mode 100644 ansible/01_old/roles/test/files/05-imxc/templates/cloudmoa-metric-agent.yaml create mode 100644 ansible/01_old/roles/test/files/05-imxc/templates/cloudmoa-metric-collector.yaml create mode 100644 ansible/01_old/roles/test/files/05-imxc/templates/cmoa-kube-info-batch.yaml create mode 100644 ansible/01_old/roles/test/files/05-imxc/templates/cmoa-kube-info-connector.yaml create mode 100644 ansible/01_old/roles/test/files/05-imxc/templates/cmoa-kube-info-flat.yaml create mode 100644 ansible/01_old/roles/test/files/05-imxc/templates/cmoa-manual.yaml create mode 100644 ansible/01_old/roles/test/files/05-imxc/templates/eureka-server.yaml create mode 100644 ansible/01_old/roles/test/files/05-imxc/templates/imxc-api-server.yaml create mode 100644 ansible/01_old/roles/test/files/05-imxc/templates/imxc-collector.yaml create mode 100644 ansible/01_old/roles/test/files/05-imxc/templates/noti-server.yaml create mode 100644 ansible/01_old/roles/test/files/05-imxc/templates/streams-depl.yaml create mode 100644 ansible/01_old/roles/test/files/05-imxc/templates/topology-agent.yaml create mode 100644 ansible/01_old/roles/test/files/05-imxc/templates/zuul-server.yaml create mode 100644 ansible/01_old/roles/test/files/05-imxc/values.yaml create mode 100644 ansible/01_old/roles/test/files/06-imxc-ui/imxc-ui-jaeger/Chart.yaml create mode 100644 ansible/01_old/roles/test/files/06-imxc-ui/imxc-ui-jaeger/cmoa-manual.yaml create mode 100644 ansible/01_old/roles/test/files/06-imxc-ui/imxc-ui-jaeger/scripts/init-api-server.sh create mode 100644 ansible/01_old/roles/test/files/06-imxc-ui/imxc-ui-jaeger/scripts/init-auth-server.sh create mode 100644 ansible/01_old/roles/test/files/06-imxc-ui/imxc-ui-jaeger/scripts/init-noti-server.sh create mode 100644 ansible/01_old/roles/test/files/06-imxc-ui/imxc-ui-jaeger/scripts/init-resource.sh create mode 100644 ansible/01_old/roles/test/files/06-imxc-ui/imxc-ui-jaeger/scripts/init.json create mode 100644 ansible/01_old/roles/test/files/06-imxc-ui/imxc-ui-jaeger/templates/imxc-ui-config-jaeger.yaml create mode 100644 ansible/01_old/roles/test/files/06-imxc-ui/imxc-ui-jaeger/templates/imxc-ui-server-jaeger.yaml create mode 100644 ansible/01_old/roles/test/files/06-imxc-ui/imxc-ui-jaeger/values.yaml create mode 100644 ansible/01_old/roles/test/files/06-imxc-ui/imxc-ui-jspd/Chart.yaml create mode 100644 ansible/01_old/roles/test/files/06-imxc-ui/imxc-ui-jspd/scripts/init-api-server.sh create mode 100644 ansible/01_old/roles/test/files/06-imxc-ui/imxc-ui-jspd/scripts/init-auth-server.sh create mode 100644 ansible/01_old/roles/test/files/06-imxc-ui/imxc-ui-jspd/scripts/init-noti-server.sh create mode 100644 ansible/01_old/roles/test/files/06-imxc-ui/imxc-ui-jspd/scripts/init-resource.sh create mode 100644 ansible/01_old/roles/test/files/06-imxc-ui/imxc-ui-jspd/scripts/init.json create mode 100644 ansible/01_old/roles/test/files/06-imxc-ui/imxc-ui-jspd/templates/imxc-ui-config.yaml create mode 100644 ansible/01_old/roles/test/files/06-imxc-ui/imxc-ui-jspd/templates/imxc-ui-server.yaml create mode 100644 ansible/01_old/roles/test/files/06-imxc-ui/imxc-ui-jspd/values.yaml create mode 100755 ansible/01_old/roles/test/files/ip_change create mode 100755 ansible/01_old/roles/test/files/k8s_status create mode 100755 ansible/01_old/roles/test/files/postgres_check_data create mode 100755 ansible/01_old/roles/test/files/rel_change create mode 100644 ansible/01_old/roles/test/tasks/00-default-settings-master.yml create mode 100644 ansible/01_old/roles/test/tasks/00-default-settings-node.yml create mode 100644 ansible/01_old/roles/test/tasks/01-storage-install.yml create mode 100644 ansible/01_old/roles/test/tasks/02-base-install.yml create mode 100644 ansible/01_old/roles/test/tasks/03-ddl-dml.yml create mode 100644 ansible/01_old/roles/test/tasks/04-keycloak-install.yml create mode 100644 ansible/01_old/roles/test/tasks/05-imxc-install.yml create mode 100644 ansible/01_old/roles/test/tasks/06-imxc-ui-install.yml create mode 100644 ansible/01_old/roles/test/tasks/07-keycloak-setting.yml create mode 100644 ansible/01_old/roles/test/tasks/08-finish.yml create mode 100644 ansible/01_old/roles/test/tasks/helm-install.yml create mode 100644 ansible/01_old/roles/test/tasks/main.yml create mode 100644 ansible/01_old/roles/test/templates/realm.json.j2 create mode 100644 ansible/01_old/roles/test/vars/main.yml create mode 100644 ansible/01_old/roles/zabbix-agent/defaults/main.yml create mode 100644 ansible/01_old/roles/zabbix-agent/files/sample.conf create mode 100644 ansible/01_old/roles/zabbix-agent/files/win_sample/doSomething.ps1 create mode 100644 ansible/01_old/roles/zabbix-agent/handlers/main.yml create mode 100644 ansible/01_old/roles/zabbix-agent/meta/main.yml create mode 100644 ansible/01_old/roles/zabbix-agent/molecule/with-server/Dockerfile.j2 create mode 100644 ansible/01_old/roles/zabbix-agent/molecule/with-server/INSTALL.rst create mode 100644 ansible/01_old/roles/zabbix-agent/molecule/with-server/molecule.yml create mode 100644 ansible/01_old/roles/zabbix-agent/molecule/with-server/playbook.yml create mode 100644 ansible/01_old/roles/zabbix-agent/molecule/with-server/prepare.yml create mode 100644 ansible/01_old/roles/zabbix-agent/molecule/with-server/requirements.yml create mode 100644 ansible/01_old/roles/zabbix-agent/molecule/with-server/tests/test_agent.py create mode 100644 ansible/01_old/roles/zabbix-agent/molecule/with-server/tests/test_default.py create mode 100644 ansible/01_old/roles/zabbix-agent/tasks/Debian.yml create mode 100644 ansible/01_old/roles/zabbix-agent/tasks/Docker.yml create mode 100644 ansible/01_old/roles/zabbix-agent/tasks/Linux.yml create mode 100644 ansible/01_old/roles/zabbix-agent/tasks/RedHat.yml create mode 100644 ansible/01_old/roles/zabbix-agent/tasks/Windows.yml create mode 100644 ansible/01_old/roles/zabbix-agent/tasks/Windows_conf.yml create mode 100644 ansible/01_old/roles/zabbix-agent/tasks/XCP-ng.yml create mode 100644 ansible/01_old/roles/zabbix-agent/tasks/api.yml create mode 100644 ansible/01_old/roles/zabbix-agent/tasks/macOS.yml create mode 100644 ansible/01_old/roles/zabbix-agent/tasks/main.yml create mode 100644 ansible/01_old/roles/zabbix-agent/tasks/remove.yml create mode 100644 ansible/01_old/roles/zabbix-agent/tasks/selinux.yml create mode 100644 ansible/01_old/roles/zabbix-agent/tasks/tlspsk_auto.yml create mode 100644 ansible/01_old/roles/zabbix-agent/tasks/tlspsk_auto_agent2.yml create mode 100644 ansible/01_old/roles/zabbix-agent/tasks/tlspsk_auto_agent2_common.yml create mode 100644 ansible/01_old/roles/zabbix-agent/tasks/tlspsk_auto_agent2_linux.yml create mode 100644 ansible/01_old/roles/zabbix-agent/tasks/tlspsk_auto_agent2_windows.yml create mode 100644 ansible/01_old/roles/zabbix-agent/tasks/tlspsk_auto_common.yml create mode 100644 ansible/01_old/roles/zabbix-agent/tasks/tlspsk_auto_linux.yml create mode 100644 ansible/01_old/roles/zabbix-agent/tasks/tlspsk_auto_windows.yml create mode 100644 ansible/01_old/roles/zabbix-agent/tasks/userparameter.yml create mode 100644 ansible/01_old/roles/zabbix-agent/templates/userparameters/dev2_iac_pass_failed.j2 create mode 100644 ansible/01_old/roles/zabbix-agent/templates/userparameters/dev2_pass_failed.j2 create mode 100644 ansible/01_old/roles/zabbix-agent/templates/userparameters/mysql.j2 create mode 100644 ansible/01_old/roles/zabbix-agent/templates/userparameters/root_pass_failed.j2 create mode 100644 ansible/01_old/roles/zabbix-agent/templates/userparameters/win_sample.j2 create mode 100644 ansible/01_old/roles/zabbix-agent/templates/userparameters/zombie.j2 create mode 100644 ansible/01_old/roles/zabbix-agent/templates/userparameters/zombielist.j2 create mode 100644 ansible/01_old/roles/zabbix-agent/templates/zabbix_agent2.conf.j2 create mode 100644 ansible/01_old/roles/zabbix-agent/templates/zabbix_agentd.conf.j2 create mode 100644 ansible/01_old/roles/zabbix-agent/vars/Debian.yml create mode 100644 ansible/01_old/roles/zabbix-agent/vars/RedHat.yml create mode 100644 ansible/01_old/roles/zabbix-agent/vars/Windows.yml create mode 100644 ansible/01_old/roles/zabbix-agent/vars/main.yml create mode 100644 ansible/01_old/ssh_key/README.md create mode 100644 ansible/01_old/ssh_key/authorized_keys.yml create mode 100644 ansible/01_old/ssh_key/ip_list create mode 100755 ansible/01_old/ssh_key/key.sh create mode 100755 ansible/01_old/ssh_key/test.sh create mode 100644 ansible/01_old/std_inven create mode 100755 ansible/01_old/teleport create mode 100644 ansible/01_old/teleport.yml create mode 100644 ansible/01_old/teleport_aws.yml create mode 100644 ansible/01_old/zabbix-agent.yaml create mode 100644 ansible/README.md create mode 100755 ansible/infra_setting/ansible.cfg create mode 100644 ansible/infra_setting/infra-settings.yml create mode 100644 ansible/infra_setting/inventory create mode 100644 ansible/infra_setting/passwd_inventory create mode 100644 ansible/infra_setting/roles/.DS_Store create mode 100644 ansible/infra_setting/roles/connect-settings/.DS_Store create mode 100644 ansible/infra_setting/roles/connect-settings/README.md create mode 100644 ansible/infra_setting/roles/connect-settings/defaults/main.yml create mode 100644 ansible/infra_setting/roles/connect-settings/files/00_old/gen_password.py create mode 100644 ansible/infra_setting/roles/connect-settings/files/00_old/vault_test.py create mode 100755 ansible/infra_setting/roles/connect-settings/files/custom_excel create mode 100755 ansible/infra_setting/roles/connect-settings/files/decrypt_password create mode 100755 ansible/infra_setting/roles/connect-settings/files/gen_password create mode 100755 ansible/infra_setting/roles/connect-settings/files/vault_get create mode 100755 ansible/infra_setting/roles/connect-settings/files/vault_put create mode 100644 ansible/infra_setting/roles/connect-settings/handlers/main.yml create mode 100644 ansible/infra_setting/roles/connect-settings/meta/main.yml create mode 100644 ansible/infra_setting/roles/connect-settings/tasks/00_host_setting.yml create mode 100644 ansible/infra_setting/roles/connect-settings/tasks/01_get_password.yml create mode 100644 ansible/infra_setting/roles/connect-settings/tasks/02_change_password.yml create mode 100644 ansible/infra_setting/roles/connect-settings/tasks/03_vault.yml create mode 100644 ansible/infra_setting/roles/connect-settings/tasks/04_excel_export.yml create mode 100644 ansible/infra_setting/roles/connect-settings/tasks/99_decrypt_password.yml create mode 100644 ansible/infra_setting/roles/connect-settings/tasks/main.yml create mode 100755 ansible/infra_setting/roles/connect-settings/templates/allow_users.j2 create mode 100644 ansible/infra_setting/roles/connect-settings/tests/inventory create mode 100644 ansible/infra_setting/roles/connect-settings/tests/test.yml create mode 100644 ansible/infra_setting/roles/connect-settings/vars/main.yml create mode 100755 ansible/teleport_setting/ansible.cfg create mode 100644 ansible/teleport_setting/restart.yml create mode 100644 ansible/teleport_setting/roles/.DS_Store create mode 100644 ansible/teleport_setting/roles/teleport/.DS_Store create mode 100644 ansible/teleport_setting/roles/teleport/README.md create mode 100644 ansible/teleport_setting/roles/teleport/defaults/main.yml create mode 100644 ansible/teleport_setting/roles/teleport/handlers/main.yml create mode 100644 ansible/teleport_setting/roles/teleport/meta/main.yml create mode 100644 ansible/teleport_setting/roles/teleport/tasks/main.yml create mode 100644 ansible/teleport_setting/roles/teleport/tasks/teleport_install.yml create mode 100644 ansible/teleport_setting/roles/teleport/tasks/teleport_remove.yml create mode 100644 ansible/teleport_setting/roles/teleport/tasks/teleport_update.yml create mode 100644 ansible/teleport_setting/roles/teleport/templates/install-node.sh.j2 create mode 100644 ansible/teleport_setting/roles/teleport/templates/teleport.yaml.j2 create mode 100644 ansible/teleport_setting/roles/teleport/vars/main.yml create mode 100755 ansible/teleport_setting/teleport create mode 100644 ansible/teleport_setting/teleport.yml create mode 100644 ansible/teleport_setting/teleport_aws.yml create mode 100755 ansible/zabbix_agent/ansible.cfg create mode 100644 ansible/zabbix_agent/inventory create mode 100644 ansible/zabbix_agent/roles/zabbix-agent/defaults/main.yml create mode 100644 ansible/zabbix_agent/roles/zabbix-agent/files/sample.conf create mode 100644 ansible/zabbix_agent/roles/zabbix-agent/files/win_sample/doSomething.ps1 create mode 100644 ansible/zabbix_agent/roles/zabbix-agent/handlers/main.yml create mode 100644 ansible/zabbix_agent/roles/zabbix-agent/meta/main.yml create mode 100644 ansible/zabbix_agent/roles/zabbix-agent/molecule/with-server/Dockerfile.j2 create mode 100644 ansible/zabbix_agent/roles/zabbix-agent/molecule/with-server/INSTALL.rst create mode 100644 ansible/zabbix_agent/roles/zabbix-agent/molecule/with-server/molecule.yml create mode 100644 ansible/zabbix_agent/roles/zabbix-agent/molecule/with-server/playbook.yml create mode 100644 ansible/zabbix_agent/roles/zabbix-agent/molecule/with-server/prepare.yml create mode 100644 ansible/zabbix_agent/roles/zabbix-agent/molecule/with-server/requirements.yml create mode 100644 ansible/zabbix_agent/roles/zabbix-agent/molecule/with-server/tests/test_agent.py create mode 100644 ansible/zabbix_agent/roles/zabbix-agent/molecule/with-server/tests/test_default.py create mode 100644 ansible/zabbix_agent/roles/zabbix-agent/tasks/Debian.yml create mode 100644 ansible/zabbix_agent/roles/zabbix-agent/tasks/Docker.yml create mode 100644 ansible/zabbix_agent/roles/zabbix-agent/tasks/Linux.yml create mode 100644 ansible/zabbix_agent/roles/zabbix-agent/tasks/RedHat.yml create mode 100644 ansible/zabbix_agent/roles/zabbix-agent/tasks/Windows.yml create mode 100644 ansible/zabbix_agent/roles/zabbix-agent/tasks/Windows_conf.yml create mode 100644 ansible/zabbix_agent/roles/zabbix-agent/tasks/XCP-ng.yml create mode 100644 ansible/zabbix_agent/roles/zabbix-agent/tasks/api.yml create mode 100644 ansible/zabbix_agent/roles/zabbix-agent/tasks/macOS.yml create mode 100644 ansible/zabbix_agent/roles/zabbix-agent/tasks/main.yml create mode 100644 ansible/zabbix_agent/roles/zabbix-agent/tasks/remove.yml create mode 100644 ansible/zabbix_agent/roles/zabbix-agent/tasks/selinux.yml create mode 100644 ansible/zabbix_agent/roles/zabbix-agent/tasks/tlspsk_auto.yml create mode 100644 ansible/zabbix_agent/roles/zabbix-agent/tasks/tlspsk_auto_agent2.yml create mode 100644 ansible/zabbix_agent/roles/zabbix-agent/tasks/tlspsk_auto_agent2_common.yml create mode 100644 ansible/zabbix_agent/roles/zabbix-agent/tasks/tlspsk_auto_agent2_linux.yml create mode 100644 ansible/zabbix_agent/roles/zabbix-agent/tasks/tlspsk_auto_agent2_windows.yml create mode 100644 ansible/zabbix_agent/roles/zabbix-agent/tasks/tlspsk_auto_common.yml create mode 100644 ansible/zabbix_agent/roles/zabbix-agent/tasks/tlspsk_auto_linux.yml create mode 100644 ansible/zabbix_agent/roles/zabbix-agent/tasks/tlspsk_auto_windows.yml create mode 100644 ansible/zabbix_agent/roles/zabbix-agent/tasks/userparameter.yml create mode 100644 ansible/zabbix_agent/roles/zabbix-agent/templates/userparameters/dev2_iac_pass_failed.j2 create mode 100644 ansible/zabbix_agent/roles/zabbix-agent/templates/userparameters/dev2_pass_failed.j2 create mode 100644 ansible/zabbix_agent/roles/zabbix-agent/templates/userparameters/mysql.j2 create mode 100644 ansible/zabbix_agent/roles/zabbix-agent/templates/userparameters/root_pass_failed.j2 create mode 100644 ansible/zabbix_agent/roles/zabbix-agent/templates/userparameters/win_sample.j2 create mode 100644 ansible/zabbix_agent/roles/zabbix-agent/templates/userparameters/zombie.j2 create mode 100644 ansible/zabbix_agent/roles/zabbix-agent/templates/userparameters/zombielist.j2 create mode 100644 ansible/zabbix_agent/roles/zabbix-agent/templates/zabbix_agent2.conf.j2 create mode 100644 ansible/zabbix_agent/roles/zabbix-agent/templates/zabbix_agentd.conf.j2 create mode 100644 ansible/zabbix_agent/roles/zabbix-agent/vars/Debian.yml create mode 100644 ansible/zabbix_agent/roles/zabbix-agent/vars/RedHat.yml create mode 100644 ansible/zabbix_agent/roles/zabbix-agent/vars/Windows.yml create mode 100644 ansible/zabbix_agent/roles/zabbix-agent/vars/main.yml create mode 100644 ansible/zabbix_agent/zabbix-agent.yaml diff --git a/ansible/00_old/agent_cluster_install.yaml b/ansible/00_old/agent_cluster_install.yaml new file mode 100644 index 0000000..f645fbb --- /dev/null +++ b/ansible/00_old/agent_cluster_install.yaml @@ -0,0 +1,9 @@ +--- +- hosts: cluster + become: true + gather_facts: true + environment: + KUBECONFIG: /root/.kube/ansible_config + roles: + - role: agent_os_setting + diff --git a/ansible/00_old/agent_datasaker.yml b/ansible/00_old/agent_datasaker.yml new file mode 100644 index 0000000..505048a --- /dev/null +++ b/ansible/00_old/agent_datasaker.yml @@ -0,0 +1,23 @@ +--- +- hosts: agent + become: true + roles: + - role: dsk_bot.datasaker + vars: + datasaker_api_key: "XQOt9G3oAtsOQyd3U25JwOu3/sE+zj/m3kRKL/d0OUAQn30jVlrBKN/gJp9cJ4C9CHU1D1vSEPRxaCk8NuwZh6+v48TiaingDy6F74YGySRvnH0gqdmfxLSGisD/g8/JqBlIwhhyMSVCVfAkcNysLnwLi4xLnZMlvVi2Lzo3MiekSfJS5adR3hAv6pCaCFe2rNW24pYS5PBYkP/kxp/cfYAN/UhVEs5J+h4/iQ5vozQgvWuskBpOjlUeEYZnMZ6Al91gAUmSRoxtzLc+QOdfp7+uDXpwXPm80bQz9bR20Elcr4+rNqLcc2ONwJwrSveDSvJn4xjB6n95hEYbaDHUpA==" + datasaker_agents: ["dsk-node-agent","dsk-log-agent"] + + datagate_trace_url: 10.10.43.111 + datagate_manifest_url: 10.10.43.111 + datagate_metric_url: 10.10.43.111 + datagate_plan_url: 10.10.43.111 + datagate_loggate_url: 10.10.43.111 + datasaker_api_url: 10.10.43.111:31501 + logs: + - collect: + type: file + file: + paths: ["/var/log/*.log","/datasaker/log/*.log"] + + #uninstall: True + #datasaker_clean: True diff --git a/ansible/00_old/api_cluster_install.yaml b/ansible/00_old/api_cluster_install.yaml new file mode 100644 index 0000000..f0a0a1b --- /dev/null +++ b/ansible/00_old/api_cluster_install.yaml @@ -0,0 +1,9 @@ +--- +- hosts: cluster + become: true + gather_facts: true + environment: + KUBECONFIG: /root/.kube/ansible_config + roles: + - role: api_os_setting + diff --git a/ansible/00_old/authorized_keys.yml b/ansible/00_old/authorized_keys.yml new file mode 100644 index 0000000..d01e291 --- /dev/null +++ b/ansible/00_old/authorized_keys.yml @@ -0,0 +1,11 @@ +--- +- hosts: cluster + remote_user: root + tasks: + - name: key add + authorized_key: + user: root + state: present + key: "{{ lookup('file', lookup('env','HOME') + '/.ssh/id_rsa.pub') }}" + manage_dir: False + diff --git a/ansible/00_old/bastion.yml b/ansible/00_old/bastion.yml new file mode 100644 index 0000000..05d46d5 --- /dev/null +++ b/ansible/00_old/bastion.yml @@ -0,0 +1,95 @@ +--- +- hosts: bastion + become: true + gather_facts: true + roles: + - role: bastion + vars: + - sshmainport: 2222 + admin_users: + - name: "minchulahn" + ip: "10.20.142.22" + description: "안민철" + key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDKDxtkcfx2ITlT2Yh7ZCT79do/25YQ2vROz38m8veAuBhOw+75oZJ4nN//zOWaaMvpC3Z7NIzOR+3UeukhnLZ591q8AaHcKjV8JEJMo2pvpH1vdLcTL9baLqWrxzgRimnZUNf5n5HNr+AKoXuPp//aVSJSoeznb66r04/rJSetT0QGDC8Kj5Q+MNvdd0/3U/nu7JxW9LIEaLoeiX6mVb4PpV7kl3rI3Vut/GnWakOhbS4yNvIFdR6d8rv305/BXJOz/aWy+0j7qK+NBzbSsI/l0vVUHfeD3whYGePCpWmj73ZsMTMjIjrC8DpRQlOJlAZ0GVpQnd/ayIWi4+V8VjvFcd6vSqrhhsNoOyo0Y/6cyO6iyvKqohMK6+HF1w6aXoaGCFFSl/3gw63saNAsdZPArnwf5yZ6GfPa/9bRn2k9g5xfp97Itpo6Iqq+PuRcZOes0EiIQe2hOoYQEIHIRhf8CZ+Xf6W1+XZB+WxEzUe4GCCwgUdTB6RIr4ThDxwCBV0=" + + - name: "havelight" + ip: "10.20.142.21" + description: "정재희" + key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDUAppqxDLltrMsYMwIxGi0FA5STA/R+H6oy7myfiJP2Lt4woCogMi3ELVKEhFkeJx4i8y9G80lynEYCHRH1kAQ/7YaJEVFrPXTvBw+OVxYdVS/gLl0rL89ky+n0dv6A9mancrvUOMacI5aN7/W+EhoLohRjRbWlsPGNnvAmO0AZnt595aMUjFkdhusGyBVunDUFSitj9TFkjxDhr6cx8Bi0FLpvdsoAvfqiw/MVKW2pMgj56AT5UCT0wvtSHSNY/C731jP/RKrxP0fnVhIkVys/XmLV/6SVEqL1XwqMTvRfi5+Q8cPsXrnPuUFHiNN4e/MGJkYi0lg7XbX8jDXv3ybdxZ7lGiUDebxjTKBCCghFae3eAwpJADEDfrzb8DHJZFwJVVdKGXvStTWTibcs14ilRPcB4SWIBx/cFCzwOBK/iw8CfEfsbVe6WQbDc4T4LrgL8cUzHPOO8CQcC4DV/O3BuoqQExu6xTmU8rhLT9kgatIdX0K5jgGbuqz7c2lelU=" + + - name: "sa_8001" + ip: "10.20.142.50" + description: "변정훈" + key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCgvFtLP7A1bR2ANxHiyTalgaI2pvxnCAzsqTAAh/+egIOi2vUIC2jRWGQXyoiTlupdNWQli2D93tEJBvG3VO5LOVocOHsFnFcV8RsiR4QGhqMeXRfMBWbf7Prby0qWv/VQ00gNWEgEjZUhOfBQeJsozGTd3dS4AgRnQkQmnvCT6TWD7+GwMg1SDlu/23y5aKLmpLkVT9kEG3yxZ3rWQfepjAubt+/saZPtyhkmc9+qhe2K+6PCZU2MCh6TYoKrcRUhVaJLvWqS35/Cv/9oxLg7lZwsasHFO9ANXWV9gBelCXLpYosN5hylUvl4JmSN+/qiOH3hpEbOtTCY/ZU0o1/xXLr0pmbYpZoT6zMKZ5fkweW7xidrg/bI1s/4+DVf4c/NJehw4PL3sqRmVdJsriFUifywh05Up5j1NQANiFlFngwEWy81cWRyvSL5q/plJHSvpd6g+WbsyC/QqYNAhxjnEosOb52QGZmLL7GqaC1hdKDOlJYZK63EBQ8YpHqGHo0=" + + allow_users: + - name: "wkd1994" + ip: "10.20.142.28" + description: "김동우" + key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDtmuAhVVyVJ87+t2xVtiS3bvTxxn0dmD7t4D2iSvSErIjRsXRCPLdc/yGWiezj+oVZtRPLJ2mjKToGUerdkcW8oqiQeL0+x/CjdlS2rQXvQa2HXCjB+MejwZyJ2bl7VDtIMdLianJBn7+XVc48+bIf7yait8yVH1aVWcS/AXOSo9LwX/uNW5VCL5BeXSGwXdwkuhjeJurR4WIVSBXuh1ql5Vy6BdSxcmLMihNlIL/DyuzfPLuQZbuSeaJ7eJKiHu63/SwBA1cPzj9tgI7zNvguapIHKXvoK8n5gNUXVRDGnD4J6xbzUQB3DbU8kaz7pDClxzgpkf3MnvP9QvnTyqV+aftYlb02as0PrwIxlTlW/sBxyEGdFe+JwoTctHkrSfp0lYRpyCv3eXJcdDu2l3dTJXAHlpcJuQRH2j9herURxML0w6re1iKJ8MAjOqUvh+B3A1U3x116zEGdsCNCRcfwehEir7fmGKaPvrmOiDOTlNswdL/OJ1RHKFuEZJPlUr8=" + + - name: "djkim" + ip: "10.20.142.36" + description: "김득진" + key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC9Go9pLADJUQtq+ptTAcSIpi+VYv5/Kik0lBuV8xEc++vNtix5kwi+XSsNShHM3MVeiE8J27rYfyNn79r5pVKMuasMRyP3mTDZtRKr7/piM8MXuGSu1jCsVrTBZX0Sf4wuOA1tSkG9QgjBMZfvE9jOSYozA1K85mVE28m2rTihPnL5zYsDKnx+xIcwUBTpkOCoHiAfAX9b5ADAfScJigSZDjFLvexJ1aapPV2Iajh8huIhWvCUhrqUv/ldUm+b1iiOT7GXdrM/cam3FnLZ0b5KI9CQb7084+4l0BlmtPkuFcIlTDm1K6YO7+Mewd+F9uQZwvxGuElBPg8NVgFLD7+nrf2VlJYYCAeChyDV5+ZD70pSTcvHpJbmLKMtRFGov73ZPJ3vld9XCGUCajaoZz5Kz+ANmSC9nl3FpxnYgvFWfS7iwyC+VkGRKUg96/crXz4D8fW/wIskt+3cVrW9Z66psH41ll979mC8xly0ITWwbQZv7rvbdWSDVKVRgbXQOSc=" + + - name: "sanghee1357" + ip: "10.20.142.40" + description: "김상희" + key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC45maYW137cwvdS8AE9UzNHq9AMYrkEJtoNDAOVkUXtpVQITqvBCc4B4FfR5JK2h+imnBDng5fu728YAB7q31BE3Wub8I+QWhnQgv+kH1yMWj2s329tkHvcyNWIHSBqw4z1N74Zba+7mojKioju27HdcRcN1L7tpXSCHrq5bU6++CMShpZ7a3wo20RfikFWd563Y15mE3uDqlbkcuzE0KGSNrdY6Gy9aiE3/poVQRLaCmXnUKNw9wM3UGN9DanJi6iosXrlZRkpwhV+tHh2x+BWCbyY8jj94RDJgMwoKw71tzlEp+B1k6a7g+lEo3KFP//3PQxc9fdKBdg1YzSAKGKjsqATEVclmQHVskk6wZQC/wcjFxrSOreSp6knswX9AhIvGhMtoVo9iMy9cm+F4AauzjjfszCMO484983hIYwsh321VB14Wg7NroCYMUh7krATeKmNWhK0YicYCXINVMphBAcXFhuJduPejz19ZN356t+F/LDqlCxW7kO9QfYUy0=" + + - name: "jinbekim" + ip: "10.10.142.48" + description: "김진범" + + - name: "bypark" + ip: "10.20.142.26" + description: "박병욱" + key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCZig/9xMWR3QhwHPbkvY2b9nmHiWkJHgnztGfIyxVTmkcsr9QViIvNUINlRBlE2+I5j7R2+0qI5GkAYndJsQoZiZ3iPqxnM5KdB9bEbWS5Tv7pbGyHyzaYPMUS3g6ZRMKnbJlAmhOLuq4TNYaUSESvaiYbCbaZK2JdsfPtSC99Gez6+HNoapILeg6xkxLnMsgUG6QzGaZyRABlPRbctGfx2U7cYe/7b7T+/yNtMU2FKrAJqcy0S1IUzc/dK2m5SQ3Y2GMohuGkv8mfs16i0wi3LfgEIatsmj2KB7Y7lIYW/GEZA2I+K2uH9Pu+F/kmGvAu5jNd1ztSo9MgElyu2NMXYhM3f/eDD+PdHKjUvOtE5twNBHQooPjBpp/mja4hnxLKepTqgP1t6azncPB8m6jC6MTbkhOHpgSNXurhx0kCurLA+l9KaySidhc0mFNJZGRKAhQoMIDFgXlzkZ4GmmtbfOJ/J1k7QqHZya5x6M4mOfvlPECFKVF24vzJVEulY3E=" + + - name: "joonsoopark" + ip: "10.20.142.33" + description: "박준수" + key: "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAICeOzKeL4ZUXw0lEHDZoBsp7M3oobrBI0sWBHdpk0X0T" + + - name: "baekchan1024" + ip: "10.20.142.39" + description: "백승찬" + key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDaqqy9YVwxh37xOU0nytBPd6GEJe30e1L/K5UXDZToteNebyfQrtFogxB6MpMNaAzAk6qbyPuZA3rgP8Y+qXgRlx88cxje5P5yOCsMW2o3xD5PiJ7lluWQ9tlS5ti4B9EWurJOsGF27XKKuSHN+dx9ZIb4sDqLYzmycPNwFaEtH6GQ2vjqpPMfjmKAuYmKD4L7mdA8lXTiRS2uYDkUxwQ+6PU+axTauD9qsXuGDAnGkVHKNE0o9OCf1uoyOhy6EB2sDz5Pymr7fbRJauWNxuSJdYPKY33GdDKpioP/1nRLSLtr1nvLHVrG/5CSNO1x20WYXFEGoMTzW4T5nYSS61apHkQ/0Csv0LBeHPc9gsMPobNJpIYlvGwdODQ+fpgxyB4SAQJKtQR1YB4w5OVtXVZAMvZZKI9gQQHZ8wQ4Zk0erGxKeyLxnDrKKNHLRPyUrjkL7H2a0i8BGpdk8sxW9NVrJJGgmQQiPbJx0yvIi1n55mUq+ZVjiF5qPvxtc5D133k=" + + - name: "jungry" + ip: "10.20.142.44" + description: "서정우" + + - name: "ose" + ip: "10.20.142.34" + description: "오승은" + key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDAlYSGJZpOOEPZqIa1/CXxiaUNj1wsgkp0kEyD2SX8r7ovwmSAWCS24v/IOSgsUTFRpL64vIeCtcZ8sj4Hwzd3F2h+carQP0v+leCkzPpQ7aP/BoPS27+fSCzaOZv/QJ+eIcXWHIbWkXf6MYQ35PykDeJIO61OMOlWhpNV425VSwfZoB72xZmEH+rIZjXHHs8vYtIG2sXZE22BLiVw6PEL/C4QB2khBT5ZAjX2xGEzUoSknzva/8Uu20adQBalFTIdyLV7V6CxkIPkSgfmZh/fqXfbfPsxHLPK2o2ueGbx3fcN3kAqFrqpJgjEIZmNj6qhVPtbN5TSUyIjtoPhC4JR0heqckz1qLah+8lSiUfHSblGW89QuUcedHdwHp/RiZW6HQO0cqS/QPNcgPLTiv68voBapS9rav+j0tt1RynNY+AdhCOoo4BbGW0pXqi0vaHzbbfbzxp78kx/7/KXmUHkzGSkmlXVbKqzDm5k/kRn0q4pimDun42b+MjNYu3gZz0=" + + - name: "gurwns1540" + ip: "10.20.142.35" + description: "윤혁준" + key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC1+kC8LzDxwc4gfiGzUQH+CeKGf+elX3oKciMmLQJmpddlcWuRthq1pszufHjypT/FfC/XVLZkGvjMDJUWro/Pen3RcdTcENteVZO/nzQ89nmS/D3tbg6nVWxiast6bDdSEdPF8CKSUAlA+8hTgSCWou7TtOuWGCKj+6HSHctBA41WFLpYInYHWTnC+LY1nwOurjG4qjmgdEzBXMhLWvuZDVE21oIUMEXbjW1dXhHNMKfyn/mUqSSG9zoXZSK0KB8OHhBsbxzFqu5cXC1TTpJOyX05730LUdwF9MevreUS3ws5NY8h0C6EVAOMQqeH5gkwVTHsyXQHtXB9nGI1g7sMIjEzJHkOygK17nAfapWhGFahhaaq42qdo7N3Pj8IjrY3S9EDXnPtQODROj3JVzo3Sgd2FUKDcAIWwJHMAwkaFqciPGIrj4ib81NbOoWn7oCjbIyDxgoxSp1vpW7C25rL22LtrCHyMWPbhV19FJIZqtg7f94JptzLND1pHDnsnfeNAxz9d6oKdcJW5bXUDeDCQxBio1RBF6nNzSRoiD0+FD29of9wNWRd2cBkR8uJV7P9XfXMzMK5q7Wqte/DABs3wJ3v/cth6kPrRV7j2h+4DGbEj5Mpz8XAFnGkZFmd/UiSbNqRBLKmp0lPpyxZrRU00xuqJ51pYB2wMwkQgOIVuw==" + + - name: "yyeun" + ip: "10.20.142.45" + description: "이예은" + + - name: "sujung" + ip: "10.20.142.27" + description: "정성락" + key: "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIKbI5DjRkABz65NnREzf5HKKIMPrIA4DrnDDXTrjnRH8" + + - name: "antcho" + ip: "10.20.142.46" + description: "조혜수" + + - name: "stdhsw" + ip: "10.20.142.32" + description: "한승우" + key: "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIANTMTgqbTtIKKRsZU9An9D3La9Fh1bUtiLE/Y0nL4CZ" + + - name: "seungjinjeong" + ip: "10.20.142.41" + description: "정승진" + key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDi8funYVM0eRmfplW5EdnfJOFEVDEMMw08VRn6FD9x9VuNWCEkY3iErzekBY2SRat8g6q0VXRyu7b/bhm/kD+BtI79fmz9FKxslTZCeKKN1KWfoZoXSRnvjOX1Y6NDnY2X5M+3kN40ek9ku6abN6lOtInTXJ1QOJIISa8l6vrB/j1xVVZghTYY5MBMc89cRZESGdBZWld0CtmoM+mnjh5vWCCA3VJTcDbj5LKtWllA6t58KwtGBikr8iaOpi83dQ91eXWzxTttl/LCe9bfgSxYlmvZILn0UZMu1WiWBhlIBzC6RlxorkDVRXcSRjguEt+/ys2rv6UTSkm150O4PgjgxlZPmTJt1m5y/St57LELUVbV6XGSq6+eZNTZOYBxxRkKcV0uByCBjxjsVlMmoEZoxedhSVT1Z8/AiMnjPBjXx2ease04EvtZs6rpDRd0puzcx1TKoCkyak60ymxc91X9lQg3kUl0av/G5kMKJQqW6v31GA1Vnh4K9haCVF/Ki/M=" diff --git a/ansible/00_old/cmoa_install.yaml b/ansible/00_old/cmoa_install.yaml new file mode 100644 index 0000000..9837135 --- /dev/null +++ b/ansible/00_old/cmoa_install.yaml @@ -0,0 +1,11 @@ +--- +- hosts: cluster + become: true + gather_facts: true + environment: + KUBECONFIG: /root/.kube/ansible_config + roles: + - role: cmoa_os_setting + - role: cmoa_install + delegate_to: 127.0.0.1 + diff --git a/ansible/00_old/datasaker.yml b/ansible/00_old/datasaker.yml new file mode 100644 index 0000000..9bc3d45 --- /dev/null +++ b/ansible/00_old/datasaker.yml @@ -0,0 +1,33 @@ +- hosts: servers + become: true + roles: + - role: dsk_bot.datasaker + #- role: agent-ansible + vars: + datasaker_api_key: "1VL7/mhddWkQaS/vf/VjjwjnwaUhtZnLL++ih9LxYSB7HVkPpZw1Duy/4gxLN/73Vga00bD79mVd6N4dP0BVxmGqLnR6xItnSLlO3M6LmOMuM8bLaBuxxOvxST3lxpvtI0B2ilyjqTLh5y+NJWFV7Awq4zpqnPnTZ5dryp3yc4zc3C7Vxu0f2CL7/oGT0LRj/1l7gchuUxw2TVDLFFRylb+cFt6/NNylBxIb1wKGILd7N6NGgnsdRcrv4ZvTEPusrDqxO3IRYF6z9ZNbkQ1BPeDINtVFTgwhqFZjxg6erd8oqscB9n1DHOi6+tJ8VSHi2w5hYxHq93EV4cxBfzXAug==" + datasaker_docker_agents: + - "dsk-docker-node-agent" + - "dsk-docker-trace-agent" + - "dsk-docker-log-agent" + - "dsk-docker-postgres-agent" + postgres_user_name: sample + postgres_user_password: 1q2w3e4r + postgres_database_address: 0.0.0.0 + postgres_database_port: 5432 + plan_postgres_user_name: sample + plan_postgres_user_password: 1q2w3e4r + plan_postgres_database_address: 0.0.0.0 + plan_postgres_database_name: sample + plan_postgres_database_port: 5432 + logs: + - collect: + type: file + file: + paths: + - /var/log/*.log + - /var/lib/docker/containers/*/*.log + custom_log_volume: + - /var/log/ + - /var/lib/docker/containers + #uninstall: True + #datasaker_clean: True diff --git a/ansible/00_old/dev_datasaker.yml b/ansible/00_old/dev_datasaker.yml new file mode 100644 index 0000000..e121186 --- /dev/null +++ b/ansible/00_old/dev_datasaker.yml @@ -0,0 +1,50 @@ +--- +- hosts: agent + become: true + roles: + - role: dsk_bot.datasaker + vars: + datasaker_api_key: "XQOt9G3oAtsOQyd3U25JwOu3/sE+zj/m3kRKL/d0OUAQn30jVlrBKN/gJp9cJ4C9CHU1D1vSEPRxaCk8NuwZh6+v48TiaingDy6F74YGySRvnH0gqdmfxLSGisD/g8/JqBlIwhhyMSVCVfAkcNysLnwLi4xLnZMlvVi2Lzo3MiekSfJS5adR3hAv6pCaCFe2rNW24pYS5PBYkP/kxp/cfYAN/UhVEs5J+h4/iQ5vozQgvWuskBpOjlUeEYZnMZ6Al91gAUmSRoxtzLc+QOdfp7+uDXpwXPm80bQz9bR20Elcr4+rNqLcc2ONwJwrSveDSvJn4xjB6n95hEYbaDHUpA==" + datasaker_agents: ["dsk-node-agent","dsk-log-agent"] + #datasaker_api_key: "eO58wEYK/2HThAV+5jgv7Or/qW3zJknBQF0FJt5Xo4kSZ9YH2/CJgfNUwKbGwlbzmihG9dVsSmmS40szOuvRVZJO0vPga98sJNI32AJdWaYX8oCNFouI0lYG+r9Y4vahrS7+FVwntyfkjETotqBDvoQ5HjGjvW0wviPagW/alNbI5pvpWwBHtgz9D83Y8DSvCvO64G4xhyIYZPSML11EqWUO8prYT8LfdD4n2oBp0QJ3cXKdvJAUc4w5LKbTASb8x8UTpVU3JH3Wnwe79PKftJ8YdxOtb5jjzXeOEEM2GD8xz4pbB7scCx5oJCWQLF1js6a2uFLENBgW+ztHRf1j2Q==" + #datasaker_api_key: "1VL7/mhddWkQaS/vf/VjjwjnwaUhtZnLL++ih9LxYSB7HVkPpZw1Duy/4gxLN/73Vga00bD79mVd6N4dP0BVxmGqLnR6xItnSLlO3M6LmOMuM8bLaBuxxOvxST3lxpvtI0B2ilyjqTLh5y+NJWFV7Awq4zpqnPnTZ5dryp3yc4zc3C7Vxu0f2CL7/oGT0LRj/1l7gchuUxw2TVDLFFRylb+cFt6/NNylBxIb1wKGILd7N6NGgnsdRcrv4ZvTEPusrDqxO3IRYF6z9ZNbkQ1BPeDINtVFTgwhqFZjxg6erd8oqscB9n1DHOi6+tJ8VSHi2w5hYxHq93EV4cxBfzXAug==" + datasaker_agents: ["dsk-node-agent","dsk-log-agent"] + #datasaker_docker_agents: ["dsk-docker-log-agent"] + #postgres_user_name: jhjung + #postgres_user_password: 1q2w3e4r + #postgres_database_address: 0.0.0.0 + #postgres_database_port: 5432 + #plan_postgres_user_name: jhjung + #plan_postgres_user_password: 1q2w3e4r + #plan_postgres_database_address: 0.0.0.0 + #plan_postgres_database_port: 5432 + #plan_postgres_database_name: test + datagate_trace_url: 10.10.43.111 + datagate_trace_port: 31300 + datagate_trace_timeout: 5s + + datagate_manifest_url: 10.10.43.111 + datagate_manifest_port: 31301 + datagate_manifest_timeout: 5s + + datagate_metric_url: 10.10.43.111 + datagate_metric_port: 31302 + datagate_metric_timeout: 5s + + datagate_plan_url: 10.10.43.111 + datagate_plan_port: 31303 + datagate_plan_timeout: 5s + + datagate_loggate_url: 10.10.43.111 + datagate_loggate_port: 31304 + datagate_loggate_timeout: 5s + + datasaker_api_url: 10.10.43.111:31501 + datasaker_api_send_interval: 1m + #uninstall: True + #datasaker_clean: True + logs: + - collect: + type: file + file: + paths: ["/var/log/*.log","/datasaker/log/*.log","/var/log/secure"] diff --git a/ansible/00_old/get-docker.sh b/ansible/00_old/get-docker.sh new file mode 100644 index 0000000..e8586ff --- /dev/null +++ b/ansible/00_old/get-docker.sh @@ -0,0 +1,645 @@ +#!/bin/sh +set -e +# Docker CE for Linux installation script +# +# See https://docs.docker.com/engine/install/ for the installation steps. +# +# This script is meant for quick & easy install via: +# $ curl -fsSL https://get.docker.com -o get-docker.sh +# $ sh get-docker.sh +# +# For test builds (ie. release candidates): +# $ curl -fsSL https://test.docker.com -o test-docker.sh +# $ sh test-docker.sh +# +# NOTE: Make sure to verify the contents of the script +# you downloaded matches the contents of install.sh +# located at https://github.com/docker/docker-install +# before executing. +# +# Git commit from https://github.com/docker/docker-install when +# the script was uploaded (Should only be modified by upload job): +SCRIPT_COMMIT_SHA="66474034547a96caa0a25be56051ff8b726a1b28" + +# strip "v" prefix if present +VERSION="${VERSION#v}" + +# The channel to install from: +# * nightly +# * test +# * stable +# * edge (deprecated) +DEFAULT_CHANNEL_VALUE="stable" +if [ -z "$CHANNEL" ]; then + CHANNEL=$DEFAULT_CHANNEL_VALUE +fi + +DEFAULT_DOWNLOAD_URL="https://download.docker.com" +if [ -z "$DOWNLOAD_URL" ]; then + DOWNLOAD_URL=$DEFAULT_DOWNLOAD_URL +fi + +DEFAULT_REPO_FILE="docker-ce.repo" +if [ -z "$REPO_FILE" ]; then + REPO_FILE="$DEFAULT_REPO_FILE" +fi + +mirror='' +DRY_RUN=${DRY_RUN:-} +while [ $# -gt 0 ]; do + case "$1" in + --mirror) + mirror="$2" + shift + ;; + --dry-run) + DRY_RUN=1 + ;; + --*) + echo "Illegal option $1" + ;; + esac + shift $(( $# > 0 ? 1 : 0 )) +done + +case "$mirror" in + Aliyun) + DOWNLOAD_URL="https://mirrors.aliyun.com/docker-ce" + ;; + AzureChinaCloud) + DOWNLOAD_URL="https://mirror.azure.cn/docker-ce" + ;; +esac + +command_exists() { + command -v "$@" > /dev/null 2>&1 +} + +# version_gte checks if the version specified in $VERSION is at least +# the given CalVer (YY.MM) version. returns 0 (success) if $VERSION is either +# unset (=latest) or newer or equal than the specified version. Returns 1 (fail) +# otherwise. +# +# examples: +# +# VERSION=20.10 +# version_gte 20.10 // 0 (success) +# version_gte 19.03 // 0 (success) +# version_gte 21.10 // 1 (fail) +version_gte() { + if [ -z "$VERSION" ]; then + return 0 + fi + eval calver_compare "$VERSION" "$1" +} + +# calver_compare compares two CalVer (YY.MM) version strings. returns 0 (success) +# if version A is newer or equal than version B, or 1 (fail) otherwise. Patch +# releases and pre-release (-alpha/-beta) are not taken into account +# +# examples: +# +# calver_compare 20.10 19.03 // 0 (success) +# calver_compare 20.10 20.10 // 0 (success) +# calver_compare 19.03 20.10 // 1 (fail) +calver_compare() ( + set +x + + yy_a="$(echo "$1" | cut -d'.' -f1)" + yy_b="$(echo "$2" | cut -d'.' -f1)" + if [ "$yy_a" -lt "$yy_b" ]; then + return 1 + fi + if [ "$yy_a" -gt "$yy_b" ]; then + return 0 + fi + mm_a="$(echo "$1" | cut -d'.' -f2)" + mm_b="$(echo "$2" | cut -d'.' -f2)" + if [ "${mm_a#0}" -lt "${mm_b#0}" ]; then + return 1 + fi + + return 0 +) + +is_dry_run() { + if [ -z "$DRY_RUN" ]; then + return 1 + else + return 0 + fi +} + +is_wsl() { + case "$(uname -r)" in + *microsoft* ) true ;; # WSL 2 + *Microsoft* ) true ;; # WSL 1 + * ) false;; + esac +} + +is_darwin() { + case "$(uname -s)" in + *darwin* ) true ;; + *Darwin* ) true ;; + * ) false;; + esac +} + +deprecation_notice() { + distro=$1 + distro_version=$2 + echo + printf "\033[91;1mDEPRECATION WARNING\033[0m\n" + printf " This Linux distribution (\033[1m%s %s\033[0m) reached end-of-life and is no longer supported by this script.\n" "$distro" "$distro_version" + echo " No updates or security fixes will be released for this distribution, and users are recommended" + echo " to upgrade to a currently maintained version of $distro." + echo + printf "Press \033[1mCtrl+C\033[0m now to abort this script, or wait for the installation to continue." + echo + sleep 10 +} + +get_distribution() { + lsb_dist="" + # Every system that we officially support has /etc/os-release + if [ -r /etc/os-release ]; then + lsb_dist="$(. /etc/os-release && echo "$ID")" + fi + # Returning an empty string here should be alright since the + # case statements don't act unless you provide an actual value + echo "$lsb_dist" +} + +echo_docker_as_nonroot() { + if is_dry_run; then + return + fi + if command_exists docker && [ -e /var/run/docker.sock ]; then + ( + set -x + $sh_c 'docker version' + ) || true + fi + + # intentionally mixed spaces and tabs here -- tabs are stripped by "<<-EOF", spaces are kept in the output + echo + echo "================================================================================" + echo + if version_gte "20.10"; then + echo "To run Docker as a non-privileged user, consider setting up the" + echo "Docker daemon in rootless mode for your user:" + echo + echo " dockerd-rootless-setuptool.sh install" + echo + echo "Visit https://docs.docker.com/go/rootless/ to learn about rootless mode." + echo + fi + echo + echo "To run the Docker daemon as a fully privileged service, but granting non-root" + echo "users access, refer to https://docs.docker.com/go/daemon-access/" + echo + echo "WARNING: Access to the remote API on a privileged Docker daemon is equivalent" + echo " to root access on the host. Refer to the 'Docker daemon attack surface'" + echo " documentation for details: https://docs.docker.com/go/attack-surface/" + echo + echo "================================================================================" + echo +} + +# Check if this is a forked Linux distro +check_forked() { + + # Check for lsb_release command existence, it usually exists in forked distros + if command_exists lsb_release; then + # Check if the `-u` option is supported + set +e + lsb_release -a -u > /dev/null 2>&1 + lsb_release_exit_code=$? + set -e + + # Check if the command has exited successfully, it means we're in a forked distro + if [ "$lsb_release_exit_code" = "0" ]; then + # Print info about current distro + cat <<-EOF + You're using '$lsb_dist' version '$dist_version'. + EOF + + # Get the upstream release info + lsb_dist=$(lsb_release -a -u 2>&1 | tr '[:upper:]' '[:lower:]' | grep -E 'id' | cut -d ':' -f 2 | tr -d '[:space:]') + dist_version=$(lsb_release -a -u 2>&1 | tr '[:upper:]' '[:lower:]' | grep -E 'codename' | cut -d ':' -f 2 | tr -d '[:space:]') + + # Print info about upstream distro + cat <<-EOF + Upstream release is '$lsb_dist' version '$dist_version'. + EOF + else + if [ -r /etc/debian_version ] && [ "$lsb_dist" != "ubuntu" ] && [ "$lsb_dist" != "raspbian" ]; then + if [ "$lsb_dist" = "osmc" ]; then + # OSMC runs Raspbian + lsb_dist=raspbian + else + # We're Debian and don't even know it! + lsb_dist=debian + fi + dist_version="$(sed 's/\/.*//' /etc/debian_version | sed 's/\..*//')" + case "$dist_version" in + 11) + dist_version="bullseye" + ;; + 10) + dist_version="buster" + ;; + 9) + dist_version="stretch" + ;; + 8) + dist_version="jessie" + ;; + esac + fi + fi + fi +} + +do_install() { + echo "# Executing docker install script, commit: $SCRIPT_COMMIT_SHA" + + if command_exists docker; then + cat >&2 <<-'EOF' + Warning: the "docker" command appears to already exist on this system. + + If you already have Docker installed, this script can cause trouble, which is + why we're displaying this warning and provide the opportunity to cancel the + installation. + + If you installed the current Docker package using this script and are using it + again to update Docker, you can safely ignore this message. + + You may press Ctrl+C now to abort this script. + EOF + ( set -x; sleep 20 ) + fi + + user="$(id -un 2>/dev/null || true)" + + sh_c='sh -c' + if [ "$user" != 'root' ]; then + if command_exists sudo; then + sh_c='sudo -E sh -c' + elif command_exists su; then + sh_c='su -c' + else + cat >&2 <<-'EOF' + Error: this installer needs the ability to run commands as root. + We are unable to find either "sudo" or "su" available to make this happen. + EOF + exit 1 + fi + fi + + if is_dry_run; then + sh_c="echo" + fi + + # perform some very rudimentary platform detection + lsb_dist=$( get_distribution ) + lsb_dist="$(echo "$lsb_dist" | tr '[:upper:]' '[:lower:]')" + + if is_wsl; then + echo + echo "WSL DETECTED: We recommend using Docker Desktop for Windows." + echo "Please get Docker Desktop from https://www.docker.com/products/docker-desktop" + echo + cat >&2 <<-'EOF' + + You may press Ctrl+C now to abort this script. + EOF + ( set -x; sleep 20 ) + fi + + case "$lsb_dist" in + + ubuntu) + if command_exists lsb_release; then + dist_version="$(lsb_release --codename | cut -f2)" + fi + if [ -z "$dist_version" ] && [ -r /etc/lsb-release ]; then + dist_version="$(. /etc/lsb-release && echo "$DISTRIB_CODENAME")" + fi + ;; + + debian|raspbian) + dist_version="$(sed 's/\/.*//' /etc/debian_version | sed 's/\..*//')" + case "$dist_version" in + 11) + dist_version="bullseye" + ;; + 10) + dist_version="buster" + ;; + 9) + dist_version="stretch" + ;; + 8) + dist_version="jessie" + ;; + esac + ;; + + centos|rhel|sles) + if [ -z "$dist_version" ] && [ -r /etc/os-release ]; then + dist_version="$(. /etc/os-release && echo "$VERSION_ID")" + fi + ;; + + *) + if command_exists lsb_release; then + dist_version="$(lsb_release --release | cut -f2)" + fi + if [ -z "$dist_version" ] && [ -r /etc/os-release ]; then + dist_version="$(. /etc/os-release && echo "$VERSION_ID")" + fi + ;; + + esac + + # Check if this is a forked Linux distro + check_forked + + # Print deprecation warnings for distro versions that recently reached EOL, + # but may still be commonly used (especially LTS versions). + case "$lsb_dist.$dist_version" in + debian.stretch|debian.jessie) + deprecation_notice "$lsb_dist" "$dist_version" + ;; + raspbian.stretch|raspbian.jessie) + deprecation_notice "$lsb_dist" "$dist_version" + ;; + ubuntu.xenial|ubuntu.trusty) + deprecation_notice "$lsb_dist" "$dist_version" + ;; + fedora.*) + if [ "$dist_version" -lt 33 ]; then + deprecation_notice "$lsb_dist" "$dist_version" + fi + ;; + esac + + # Run setup for each distro accordingly + case "$lsb_dist" in + ubuntu|debian|raspbian) + pre_reqs="apt-transport-https ca-certificates curl" + if ! command -v gpg > /dev/null; then + pre_reqs="$pre_reqs gnupg" + fi + apt_repo="deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] $DOWNLOAD_URL/linux/$lsb_dist $dist_version $CHANNEL" + ( + if ! is_dry_run; then + set -x + fi + $sh_c 'apt-get update -qq >/dev/null' + $sh_c "DEBIAN_FRONTEND=noninteractive apt-get install -y -qq $pre_reqs >/dev/null" + $sh_c 'mkdir -p /etc/apt/keyrings && chmod -R 0755 /etc/apt/keyrings' + $sh_c "curl -fsSL \"$DOWNLOAD_URL/linux/$lsb_dist/gpg\" | gpg --dearmor --yes -o /etc/apt/keyrings/docker.gpg" + $sh_c "chmod a+r /etc/apt/keyrings/docker.gpg" + $sh_c "echo \"$apt_repo\" > /etc/apt/sources.list.d/docker.list" + $sh_c 'apt-get update -qq >/dev/null' + ) + pkg_version="" + if [ -n "$VERSION" ]; then + if is_dry_run; then + echo "# WARNING: VERSION pinning is not supported in DRY_RUN" + else + # Will work for incomplete versions IE (17.12), but may not actually grab the "latest" if in the test channel + pkg_pattern="$(echo "$VERSION" | sed "s/-ce-/~ce~.*/g" | sed "s/-/.*/g")" + search_command="apt-cache madison 'docker-ce' | grep '$pkg_pattern' | head -1 | awk '{\$1=\$1};1' | cut -d' ' -f 3" + pkg_version="$($sh_c "$search_command")" + echo "INFO: Searching repository for VERSION '$VERSION'" + echo "INFO: $search_command" + if [ -z "$pkg_version" ]; then + echo + echo "ERROR: '$VERSION' not found amongst apt-cache madison results" + echo + exit 1 + fi + if version_gte "18.09"; then + search_command="apt-cache madison 'docker-ce-cli' | grep '$pkg_pattern' | head -1 | awk '{\$1=\$1};1' | cut -d' ' -f 3" + echo "INFO: $search_command" + cli_pkg_version="=$($sh_c "$search_command")" + fi + pkg_version="=$pkg_version" + fi + fi + ( + pkgs="docker-ce${pkg_version%=}" + if version_gte "18.09"; then + # older versions didn't ship the cli and containerd as separate packages + pkgs="$pkgs docker-ce-cli${cli_pkg_version%=} containerd.io" + fi + if version_gte "20.10" && [ "$(uname -m)" = "x86_64" ]; then + # also install the latest version of the "docker scan" cli-plugin (only supported on x86 currently) + pkgs="$pkgs docker-scan-plugin" + fi + if version_gte "20.10"; then + pkgs="$pkgs docker-compose-plugin docker-ce-rootless-extras$pkg_version" + fi + if version_gte "23.0"; then + pkgs="$pkgs docker-buildx-plugin" + fi + if ! is_dry_run; then + set -x + fi + $sh_c "DEBIAN_FRONTEND=noninteractive apt-get install -y -qq $pkgs >/dev/null" + ) + echo_docker_as_nonroot + exit 0 + ;; + centos|fedora|rhel) + if [ "$(uname -m)" != "s390x" ] && [ "$lsb_dist" = "rhel" ]; then + echo "Packages for RHEL are currently only available for s390x." + exit 1 + fi + if [ "$lsb_dist" = "fedora" ]; then + pkg_manager="dnf" + config_manager="dnf config-manager" + enable_channel_flag="--set-enabled" + disable_channel_flag="--set-disabled" + pre_reqs="dnf-plugins-core" + pkg_suffix="fc$dist_version" + else + pkg_manager="yum" + config_manager="yum-config-manager" + enable_channel_flag="--enable" + disable_channel_flag="--disable" + pre_reqs="yum-utils" + pkg_suffix="el" + fi + repo_file_url="$DOWNLOAD_URL/linux/$lsb_dist/$REPO_FILE" + ( + if ! is_dry_run; then + set -x + fi + $sh_c "$pkg_manager install -y -q $pre_reqs" + $sh_c "$config_manager --add-repo $repo_file_url" + + if [ "$CHANNEL" != "stable" ]; then + $sh_c "$config_manager $disable_channel_flag docker-ce-*" + $sh_c "$config_manager $enable_channel_flag docker-ce-$CHANNEL" + fi + $sh_c "$pkg_manager makecache" + ) + pkg_version="" + if [ -n "$VERSION" ]; then + if is_dry_run; then + echo "# WARNING: VERSION pinning is not supported in DRY_RUN" + else + pkg_pattern="$(echo "$VERSION" | sed "s/-ce-/\\\\.ce.*/g" | sed "s/-/.*/g").*$pkg_suffix" + search_command="$pkg_manager list --showduplicates 'docker-ce' | grep '$pkg_pattern' | tail -1 | awk '{print \$2}'" + pkg_version="$($sh_c "$search_command")" + echo "INFO: Searching repository for VERSION '$VERSION'" + echo "INFO: $search_command" + if [ -z "$pkg_version" ]; then + echo + echo "ERROR: '$VERSION' not found amongst $pkg_manager list results" + echo + exit 1 + fi + if version_gte "18.09"; then + # older versions don't support a cli package + search_command="$pkg_manager list --showduplicates 'docker-ce-cli' | grep '$pkg_pattern' | tail -1 | awk '{print \$2}'" + cli_pkg_version="$($sh_c "$search_command" | cut -d':' -f 2)" + fi + # Cut out the epoch and prefix with a '-' + pkg_version="-$(echo "$pkg_version" | cut -d':' -f 2)" + fi + fi + ( + pkgs="docker-ce$pkg_version" + if version_gte "18.09"; then + # older versions didn't ship the cli and containerd as separate packages + if [ -n "$cli_pkg_version" ]; then + pkgs="$pkgs docker-ce-cli-$cli_pkg_version containerd.io" + else + pkgs="$pkgs docker-ce-cli containerd.io" + fi + fi + if version_gte "20.10" && [ "$(uname -m)" = "x86_64" ]; then + # also install the latest version of the "docker scan" cli-plugin (only supported on x86 currently) + pkgs="$pkgs docker-scan-plugin" + fi + if version_gte "20.10"; then + pkgs="$pkgs docker-compose-plugin docker-ce-rootless-extras$pkg_version" + fi + if version_gte "23.0"; then + pkgs="$pkgs docker-buildx-plugin" + fi + if ! is_dry_run; then + set -x + fi + $sh_c "$pkg_manager install -y -q $pkgs" + ) + echo_docker_as_nonroot + exit 0 + ;; + sles) + if [ "$(uname -m)" != "s390x" ]; then + echo "Packages for SLES are currently only available for s390x" + exit 1 + fi + if [ "$dist_version" = "15.3" ]; then + sles_version="SLE_15_SP3" + else + sles_minor_version="${dist_version##*.}" + sles_version="15.$sles_minor_version" + fi + opensuse_repo="https://download.opensuse.org/repositories/security:SELinux/$sles_version/security:SELinux.repo" + repo_file_url="$DOWNLOAD_URL/linux/$lsb_dist/$REPO_FILE" + pre_reqs="ca-certificates curl libseccomp2 awk" + ( + if ! is_dry_run; then + set -x + fi + $sh_c "zypper install -y $pre_reqs" + $sh_c "zypper addrepo $repo_file_url" + if ! is_dry_run; then + cat >&2 <<-'EOF' + WARNING!! + openSUSE repository (https://download.opensuse.org/repositories/security:SELinux) will be enabled now. + Do you wish to continue? + You may press Ctrl+C now to abort this script. + EOF + ( set -x; sleep 30 ) + fi + $sh_c "zypper addrepo $opensuse_repo" + $sh_c "zypper --gpg-auto-import-keys refresh" + $sh_c "zypper lr -d" + ) + pkg_version="" + if [ -n "$VERSION" ]; then + if is_dry_run; then + echo "# WARNING: VERSION pinning is not supported in DRY_RUN" + else + pkg_pattern="$(echo "$VERSION" | sed "s/-ce-/\\\\.ce.*/g" | sed "s/-/.*/g")" + search_command="zypper search -s --match-exact 'docker-ce' | grep '$pkg_pattern' | tail -1 | awk '{print \$6}'" + pkg_version="$($sh_c "$search_command")" + echo "INFO: Searching repository for VERSION '$VERSION'" + echo "INFO: $search_command" + if [ -z "$pkg_version" ]; then + echo + echo "ERROR: '$VERSION' not found amongst zypper list results" + echo + exit 1 + fi + search_command="zypper search -s --match-exact 'docker-ce-cli' | grep '$pkg_pattern' | tail -1 | awk '{print \$6}'" + # It's okay for cli_pkg_version to be blank, since older versions don't support a cli package + cli_pkg_version="$($sh_c "$search_command")" + pkg_version="-$pkg_version" + fi + fi + ( + pkgs="docker-ce$pkg_version" + if version_gte "18.09"; then + if [ -n "$cli_pkg_version" ]; then + # older versions didn't ship the cli and containerd as separate packages + pkgs="$pkgs docker-ce-cli-$cli_pkg_version containerd.io" + else + pkgs="$pkgs docker-ce-cli containerd.io" + fi + fi + if version_gte "20.10"; then + pkgs="$pkgs docker-compose-plugin docker-ce-rootless-extras$pkg_version" + fi + if version_gte "23.0"; then + pkgs="$pkgs docker-buildx-plugin" + fi + if ! is_dry_run; then + set -x + fi + $sh_c "zypper -q install -y $pkgs" + ) + echo_docker_as_nonroot + exit 0 + ;; + *) + if [ -z "$lsb_dist" ]; then + if is_darwin; then + echo + echo "ERROR: Unsupported operating system 'macOS'" + echo "Please get Docker Desktop from https://www.docker.com/products/docker-desktop" + echo + exit 1 + fi + fi + echo + echo "ERROR: Unsupported distribution '$lsb_dist'" + echo + exit 1 + ;; + esac + exit 1 +} + +# wrapped up in a function so that we have some protection against only getting +# half the file during "curl | sh" +do_install diff --git a/ansible/00_old/health_check.yml b/ansible/00_old/health_check.yml new file mode 100644 index 0000000..3c3ae7d --- /dev/null +++ b/ansible/00_old/health_check.yml @@ -0,0 +1,18 @@ +--- +- name: Check the health of all servers + hosts: all + tasks: + - name: Check if server is reachable + ping: + register: result + ignore_errors: true + + - name: Print result + debug: + var: result + when: result is defined + + - name: Print error message + debug: + msg: "Server {{ inventory_hostname }} could not be reached." + when: result.ping is undefined diff --git a/ansible/00_old/install-centos-node.sh b/ansible/00_old/install-centos-node.sh new file mode 100644 index 0000000..f1a4003 --- /dev/null +++ b/ansible/00_old/install-centos-node.sh @@ -0,0 +1,995 @@ +#!/bin/bash +set -euo pipefail +SCRIPT_NAME="teleport-installer" + +# default values +ALIVE_CHECK_DELAY=3 +CONNECTIVITY_TEST_METHOD="" +COPY_COMMAND="cp" +DISTRO_TYPE="" +IGNORE_CONNECTIVITY_CHECK="${TELEPORT_IGNORE_CONNECTIVITY_CHECK:-false}" +LAUNCHD_CONFIG_PATH="/Library/LaunchDaemons" +LOG_FILENAME="$(mktemp -t ${SCRIPT_NAME}.log.XXXXXXXXXX)" +MACOS_STDERR_LOG="/var/log/teleport-stderr.log" +MACOS_STDOUT_LOG="/var/log/teleport-stdout.log" +SYSTEMD_UNIT_PATH="/lib/systemd/system/teleport.service" +TARGET_PORT_DEFAULT=443 +TELEPORT_ARCHIVE_PATH='teleport' +TELEPORT_BINARY_DIR="/usr/local/bin" +TELEPORT_BINARY_LIST="teleport tctl tsh" +TELEPORT_CONFIG_PATH="/etc/teleport.yaml" +TELEPORT_DATA_DIR="/var/lib/teleport" +TELEPORT_DOCS_URL="https://goteleport.com/docs/" +TELEPORT_FORMAT="" + +# initialise variables (because set -u disallows unbound variables) +f="" +l="" +DISABLE_TLS_VERIFICATION=false +NODENAME=$(hostname) +IGNORE_CHECKS=false +OVERRIDE_FORMAT="" +QUIET=false +APP_INSTALL_DECISION="" +INTERACTIVE=false + +# the default value of each variable is a templatable Go value so that it can +# optionally be replaced by the server before the script is served up +TELEPORT_VERSION='13.3.4' +TELEPORT_PACKAGE_NAME='teleport' +REPO_CHANNEL='' +TARGET_HOSTNAME='teleport.access.datasaker.io' +TARGET_PORT='443' +JOIN_TOKEN='d5bef76d6d1d2038dbf96a221e429d5e' +JOIN_METHOD='' +JOIN_METHOD_FLAG="" +[ -n "$JOIN_METHOD" ] && JOIN_METHOD_FLAG="--join-method ${JOIN_METHOD}" + +# inject labels into the configuration +LABELS='teleport.internal/resource-id=9182aa69-a343-4111-a587-1efe5b1daa1c' +LABELS_FLAG=() +[ -n "$LABELS" ] && LABELS_FLAG=(--labels "${LABELS}") + +# When all stanza generators have been updated to use the new +# `teleport configure` commands CA_PIN_HASHES can be removed along +# with the script passing it in in `join_tokens.go`. +CA_PIN_HASHES='sha256:941164dbcfedbe05de067f5ce14d4c4e6a0523c070f0bdb3959b01ba452b15f8' +CA_PINS='sha256:941164dbcfedbe05de067f5ce14d4c4e6a0523c070f0bdb3959b01ba452b15f8' +ARG_CA_PIN_HASHES="" +APP_INSTALL_MODE='false' +APP_NAME='' +APP_URI='' +DB_INSTALL_MODE='false' + +# usage message +# shellcheck disable=SC2086 +usage() { echo "Usage: $(basename $0) [-v teleport_version] [-h target_hostname] [-p target_port] [-j join_token] [-c ca_pin_hash]... [-q] [-l log_filename] [-a app_name] [-u app_uri] " 1>&2; exit 1; } +while getopts ":v:h:p:j:c:f:ql:ika:u:" o; do + case "${o}" in + v) TELEPORT_VERSION=${OPTARG};; + h) TARGET_HOSTNAME=${OPTARG};; + p) TARGET_PORT=${OPTARG};; + j) JOIN_TOKEN=${OPTARG};; + c) ARG_CA_PIN_HASHES="${ARG_CA_PIN_HASHES} ${OPTARG}";; + f) f=${OPTARG}; if [[ ${f} != "tarball" && ${f} != "deb" && ${f} != "rpm" ]]; then usage; fi;; + q) QUIET=true;; + l) l=${OPTARG};; + i) IGNORE_CHECKS=true; COPY_COMMAND="cp -f";; + k) DISABLE_TLS_VERIFICATION=true;; + a) APP_INSTALL_MODE=true && APP_NAME=${OPTARG};; + u) APP_INSTALL_MODE=true && APP_URI=${OPTARG};; + *) usage;; + esac +done +shift $((OPTIND-1)) + +if [[ "${ARG_CA_PIN_HASHES}" != "" ]]; then + CA_PIN_HASHES="${ARG_CA_PIN_HASHES}" +fi + +# function to construct a go template variable +# go's template parser is a bit finicky, so we dynamically build the value one character at a time +construct_go_template() { + OUTPUT="{" + OUTPUT+="{" + OUTPUT+="." + OUTPUT+="${1}" + OUTPUT+="}" + OUTPUT+="}" + echo "${OUTPUT}" +} + +# check whether we are root, exit if not +assert_running_as_root() { + if ! [ "$(id -u)" = 0 ]; then + echo "This script must be run as root." 1>&2 + exit 1 + fi +} + +# function to check whether variables are either blank or set to the default go template value +# (because they haven't been set by the go script generator or a command line argument) +# returns 1 if the variable is set to a default/zero value +# returns 0 otherwise (i.e. it needs to be set interactively) +check_variable() { + VARIABLE_VALUE="${!1}" + GO_TEMPLATE_NAME=$(construct_go_template "${2}") + if [[ "${VARIABLE_VALUE}" == "" ]] || [[ "${VARIABLE_VALUE}" == "${GO_TEMPLATE_NAME}" ]]; then + return 1 + fi + return 0 +} + +# function to check whether a provided value is "truthy" i.e. it looks like you're trying to say "yes" +is_truthy() { + declare -a TRUTHY_VALUES + TRUTHY_VALUES=("y" "Y" "yes" "YES" "ye" "YE" "yep" "YEP" "ya" "YA") + CHECK_VALUE="$1" + for ARRAY_VALUE in "${TRUTHY_VALUES[@]}"; do [[ "${CHECK_VALUE}" == "${ARRAY_VALUE}" ]] && return 0; done + return 1 +} + +# function to read input until the value you get is non-empty +read_nonblank_input() { + INPUT="" + VARIABLE_TO_ASSIGN="$1" + shift + PROMPT="$*" + until [[ "${INPUT}" != "" ]]; do + echo -n "${PROMPT}" + read -r INPUT + done + printf -v "${VARIABLE_TO_ASSIGN}" '%s' "${INPUT}" +} + +# error if we're not root +assert_running_as_root + +# set/read values interactively if not provided +# users will be prompted to enter their own value if all the following are true: +# - the current value is blank, or equal to the default Go template value +# - the value has not been provided by command line argument +! check_variable TELEPORT_VERSION version && INTERACTIVE=true && read_nonblank_input TELEPORT_VERSION "Enter Teleport version to install (without v): " +! check_variable TARGET_HOSTNAME hostname && INTERACTIVE=true && read_nonblank_input TARGET_HOSTNAME "Enter target hostname to connect to: " +! check_variable TARGET_PORT port && INTERACTIVE=true && { echo -n "Enter target port to connect to [${TARGET_PORT_DEFAULT}]: "; read -r TARGET_PORT; } +! check_variable JOIN_TOKEN token && INTERACTIVE=true && read_nonblank_input JOIN_TOKEN "Enter Teleport join token as provided: " +! check_variable CA_PIN_HASHES caPins && INTERACTIVE=true && read_nonblank_input CA_PIN_HASHES "Enter CA pin hash (separate multiple hashes with spaces): " +[ -n "${f}" ] && OVERRIDE_FORMAT=${f} +[ -n "${l}" ] && LOG_FILENAME=${l} +# if app service mode is not set (or is the default value) and we are running interactively (i.e. the user has provided some input already), +# prompt the user to choose whether to enable app_service +if [[ "${INTERACTIVE}" == "true" ]]; then + if ! check_variable APP_INSTALL_MODE appInstallMode; then + APP_INSTALL_MODE="false" + echo -n "Would you like to enable and configure Teleport's app_service, to use Teleport as a reverse proxy for a web application? [y/n, default: n] " + read -r APP_INSTALL_DECISION + if is_truthy "${APP_INSTALL_DECISION}"; then + APP_INSTALL_MODE="true" + fi + fi +fi +# prompt for extra needed values if we're running in app service mode +if [[ "${APP_INSTALL_MODE}" == "true" ]]; then + ! check_variable APP_NAME appName && read_nonblank_input APP_NAME "Enter app name to install (must be DNS-compatible; less than 63 characters, no spaces, only - or _ as punctuation): " + ! check_variable APP_URI appURI && read_nonblank_input APP_URI "Enter app URI (the host running the Teleport app service must be able to connect to this): " + # generate app public addr by concatenating values + APP_PUBLIC_ADDR="${APP_NAME}.${TARGET_HOSTNAME}" +fi + +# set default target port if value not provided +if [[ "${TARGET_PORT}" == "" ]]; then + TARGET_PORT=${TARGET_PORT_DEFAULT} +fi + +# clear log file if provided +if [[ "${LOG_FILENAME}" != "" ]]; then + if [ -f "${LOG_FILENAME}" ]; then + echo -n "" > "${LOG_FILENAME}" + fi +fi + +# log functions +log_date() { echo -n "$(date '+%Y-%m-%d %H:%M:%S %Z')"; } +log() { + LOG_LINE="$(log_date) [${SCRIPT_NAME}] $*" + if [[ ${QUIET} != "true" ]]; then + echo "${LOG_LINE}" + fi + if [[ "${LOG_FILENAME}" != "" ]]; then + echo "${LOG_LINE}" >> "${LOG_FILENAME}" + fi +} +# writes a line with no timestamp or starting data, always prints +log_only() { + LOG_LINE="$*" + echo "${LOG_LINE}" + if [[ "${LOG_FILENAME}" != "" ]]; then + echo "${LOG_LINE}" >> "${LOG_FILENAME}" + fi +} +# writes a line by itself as a header +log_header() { + LOG_LINE="$*" + echo "" + echo "${LOG_LINE}" + echo "" + if [[ "${LOG_FILENAME}" != "" ]]; then + echo "${LOG_LINE}" >> "${LOG_FILENAME}" + fi +} +# important log lines, print even when -q (quiet) is passed +log_important() { + LOG_LINE="$(log_date) [${SCRIPT_NAME}] ---> $*" + echo "${LOG_LINE}" + if [[ "${LOG_FILENAME}" != "" ]]; then + echo "${LOG_LINE}" >> "${LOG_FILENAME}" + fi +} +log_cleanup_message() { + log_only "This script does not overwrite any existing settings or Teleport installations." + log_only "Please clean up by running any of the following steps as necessary:" + log_only "- stop any running Teleport processes" + log_only " - pkill -f teleport" + log_only "- remove any data under ${TELEPORT_DATA_DIR}, along with the directory itself" + log_only " - rm -rf ${TELEPORT_DATA_DIR}" + log_only "- remove any configuration at ${TELEPORT_CONFIG_PATH}" + log_only " - rm -f ${TELEPORT_CONFIG_PATH}" + log_only "- remove any Teleport binaries (${TELEPORT_BINARY_LIST}) installed under ${TELEPORT_BINARY_DIR}" + for BINARY in ${TELEPORT_BINARY_LIST}; do EXAMPLE_DELETE_COMMAND+="${TELEPORT_BINARY_DIR}/${BINARY} "; done + log_only " - rm -f ${EXAMPLE_DELETE_COMMAND}" + log_only "Run this installer again when done." + log_only +} + +# other functions +# check whether a named program exists +check_exists() { NAME=$1; if type "${NAME}" >/dev/null 2>&1; then return 0; else return 1; fi; } +# checks for the existence of a list of named binaries and exits with error if any of them don't exist +check_exists_fatal() { + for TOOL in "$@"; do + if ! check_exists "${TOOL}"; then + log_important "Error: cannot find ${TOOL} - it needs to be installed" + exit 1 + fi + done +} +# check connectivity to the given host/port and make a request to see if Teleport is listening +# uses the global variable CONNECTIVITY_TEST_METHOD to return the name of the checker, as return +# values aren't really a thing that exists in bash +check_connectivity() { + HOST=$1 + PORT=$2 + # check with nc + if check_exists nc; then + CONNECTIVITY_TEST_METHOD="nc" + if nc -z -w3 "${HOST}" "${PORT}" >/dev/null 2>&1; then return 0; else return 1; fi + # if there's no nc, check with telnet + elif check_exists telnet; then + CONNECTIVITY_TEST_METHOD="telnet" + if echo -e '\x1dclose\x0d' | telnet "${HOST}" "${PORT}" >/dev/null 2>&1; then return 0; else return 1; fi + # if there's no nc or telnet, try and use /dev/tcp + elif [ -f /dev/tcp ]; then + CONNECTIVITY_TEST_METHOD="/dev/tcp" + if (head -1 < "/dev/tcp/${HOST}/${PORT}") >/dev/null 2>&1; then return 0; else return 1; fi + else + return 255 + fi +} +# check whether a teleport DEB is already installed and exit with error if so +check_deb_not_already_installed() { + check_exists_fatal dpkg awk + DEB_INSTALLED=$(dpkg -l | awk '{print $2}' | grep -E ^teleport || true) + if [[ ${DEB_INSTALLED} != "" ]]; then + log_important "It looks like there is already a Teleport DEB package installed (name: ${DEB_INSTALLED})." + log_important "You will need to remove that package before using this script." + exit 1 + fi +} +# check whether a teleport RPM is already installed and exit with error if so +check_rpm_not_already_installed() { + check_exists_fatal rpm + RPM_INSTALLED=$(rpm -qa | grep -E ^teleport || true) + if [[ ${RPM_INSTALLED} != "" ]]; then + log_important "It looks like there is already a Teleport RPM package installed (name: ${RPM_INSTALLED})." + log_important "You will need to remove that package before using this script." + exit 1 + fi +} +# function to check if given variable is set +check_set() { + CHECK_KEY=${1} || true + CHECK_VALUE=${!1} || true + if [[ "${CHECK_VALUE}" == "" ]]; then + log "Required variable ${CHECK_KEY} is not set" + exit 1 + else + log "${CHECK_KEY}: ${CHECK_VALUE}" + fi +} +# checks that teleport binary can be found in path and runs 'teleport version' +check_teleport_binary() { + FOUND_TELEPORT_VERSION=$(${TELEPORT_BINARY_DIR}/teleport version) + if [[ "${FOUND_TELEPORT_VERSION}" == "" ]]; then + log "Cannot find Teleport binary" + return 1 + else + log "Found: ${FOUND_TELEPORT_VERSION}"; + return 0 + fi +} +# wrapper to download with curl +download() { + URL=$1 + OUTPUT_PATH=$2 + CURL_COMMAND="curl -fsSL --retry 5 --retry-delay 5" + # optionally allow disabling of TLS verification (can be useful on older distros + # which often have an out-of-date set of CA certificate bundle which won't validate) + if [[ ${DISABLE_TLS_VERIFICATION} == "true" ]]; then + CURL_COMMAND+=" -k" + fi + log "Running ${CURL_COMMAND} ${URL}" + log "Downloading to ${OUTPUT_PATH}" + # handle errors with curl + if ! ${CURL_COMMAND} -o "${OUTPUT_PATH}" "${URL}"; then + log_important "curl error downloading ${URL}" + log "On an older OS, this may be related to the CA certificate bundle being too old." + log "You can pass the hidden -k flag to this script to disable TLS verification - this is not recommended!" + exit 1 + fi + # check that the file has a non-zero size as an extra validation + check_exists_fatal wc xargs + FILE_SIZE="$(wc -c <"${OUTPUT_PATH}" | xargs)" + if [ "${FILE_SIZE}" -eq 0 ]; then + log_important "The downloaded file has a size of 0 bytes, which means an error occurred. Cannot continue." + exit 1 + else + log "Downloaded file size: ${FILE_SIZE} bytes" + fi + # if we have a hashing utility installed, also download and validate the checksum + SHA_COMMAND="" + # shasum is installed by default on MacOS and some distros + if check_exists shasum; then + SHA_COMMAND="shasum -a 256" + # sha256sum is installed by default in some other distros + elif check_exists sha256sum; then + SHA_COMMAND="sha256sum" + fi + if [[ "${SHA_COMMAND}" != "" ]]; then + log "Will use ${SHA_COMMAND} to validate the checksum of the downloaded file" + SHA_URL="${URL}.sha256" + SHA_PATH="${OUTPUT_PATH}.sha256" + ${CURL_COMMAND} -o "${SHA_PATH}" "${SHA_URL}" + if ${SHA_COMMAND} --status -c "${SHA_PATH}"; then + log "The downloaded file's checksum validated correctly" + else + SHA_EXPECTED=$(cat "${SHA_PATH}") + SHA_ACTUAL=$(${SHA_COMMAND} "${OUTPUT_PATH}") + if check_exists awk; then + SHA_EXPECTED=$(echo "${SHA_EXPECTED}" | awk '{print $1}') + SHA_ACTUAL=$(echo "${SHA_ACTUAL}" | awk '{print $1}') + fi + log_important "Checksum of the downloaded file did not validate correctly" + log_important "Expected: ${SHA_EXPECTED}" + log_important "Got: ${SHA_ACTUAL}" + log_important "Try rerunning this script from the start. If the issue persists, contact Teleport support." + exit 1 + fi + else + log "shasum/sha256sum utilities not found, will skip checksum validation" + fi +} +# gets the filename from a full path (https://target.site/path/to/file.tar.gz -> file.tar.gz) +get_download_filename() { echo "${1##*/}"; } +# gets the pid of any running teleport process (and converts newlines to spaces) +get_teleport_pid() { + check_exists_fatal pgrep xargs + pgrep teleport | xargs echo +} +# returns a command which will start teleport using the config +get_teleport_start_command() { + echo "${TELEPORT_BINARY_DIR}/teleport start --config=${TELEPORT_CONFIG_PATH}" +} +# installs the teleport-provided launchd config +install_launchd_config() { + log "Installing Teleport launchd config to ${LAUNCHD_CONFIG_PATH}" + ${COPY_COMMAND} ./${TELEPORT_ARCHIVE_PATH}/examples/launchd/com.goteleport.teleport.plist ${LAUNCHD_CONFIG_PATH}/com.goteleport.teleport.plist +} +# installs the teleport-provided systemd unit +install_systemd_unit() { + log "Installing Teleport systemd unit to ${SYSTEMD_UNIT_PATH}" + ${COPY_COMMAND} ./${TELEPORT_ARCHIVE_PATH}/examples/systemd/teleport.service ${SYSTEMD_UNIT_PATH} + log "Reloading unit files (systemctl daemon-reload)" + systemctl daemon-reload +} +# formats the arguments as a yaml list +get_yaml_list() { + name="${1}" + list="${2}" + indentation="${3}" + echo "${indentation}${name}:" + for item in ${list}; do + echo "${indentation}- ${item}" + done +} + +# installs the provided teleport config (for app service) +install_teleport_app_config() { + log "Writing Teleport app service config to ${TELEPORT_CONFIG_PATH}" + CA_PINS_CONFIG=$(get_yaml_list "ca_pin" "${CA_PIN_HASHES}" " ") + cat << EOF > ${TELEPORT_CONFIG_PATH} +version: v3 +teleport: + nodename: ${NODENAME} + auth_token: ${JOIN_TOKEN} +${CA_PINS_CONFIG} + proxy_server: ${TARGET_HOSTNAME}:${TARGET_PORT} + log: + output: stderr + severity: INFO +auth_service: + enabled: no +ssh_service: + enabled: no +proxy_service: + enabled: no +app_service: + enabled: yes + apps: + - name: "${APP_NAME}" + uri: "${APP_URI}" + public_addr: ${APP_PUBLIC_ADDR} +EOF +} +# installs the provided teleport config (for database service) +install_teleport_database_config() { + log "Writing Teleport database service config to ${TELEPORT_CONFIG_PATH}" + CA_PINS_CONFIG=$(get_yaml_list "ca_pin" "${CA_PIN_HASHES}" " ") + + # This file is processed by `shellschek` as part of the lint step + # It detects an issue because of un-set variables - $index and $line. This check is called SC2154. + # However, that's not an issue, because those variables are replaced when we run go's text/template engine over it. + # When executing the script, those are no long variables but actual values. + # shellcheck disable=SC2154 + cat << EOF > ${TELEPORT_CONFIG_PATH} +version: v3 +teleport: + nodename: ${NODENAME} + auth_token: ${JOIN_TOKEN} +${CA_PINS_CONFIG} + proxy_server: ${TARGET_HOSTNAME}:${TARGET_PORT} + log: + output: stderr + severity: INFO +auth_service: + enabled: no +ssh_service: + enabled: no +proxy_service: + enabled: no +db_service: + enabled: "yes" + resources: + - labels: +EOF +} +# installs the provided teleport config (for node service) +install_teleport_node_config() { + log "Writing Teleport node service config to ${TELEPORT_CONFIG_PATH}" + ${TELEPORT_BINARY_DIR}/teleport node configure \ + --token ${JOIN_TOKEN} \ + ${JOIN_METHOD_FLAG} \ + --ca-pin ${CA_PINS} \ + --proxy ${TARGET_HOSTNAME}:${TARGET_PORT} \ + "${LABELS_FLAG[@]}" \ + --output ${TELEPORT_CONFIG_PATH} +} +# checks whether the given host is running MacOS +is_macos_host() { if [[ ${OSTYPE} == "darwin"* ]]; then return 0; else return 1; fi } +# checks whether teleport is already running on the host +is_running_teleport() { + check_exists_fatal pgrep + TELEPORT_PID=$(get_teleport_pid) + if [[ "${TELEPORT_PID}" != "" ]]; then return 0; else return 1; fi +} +# checks whether the given host is running systemd as its init system +is_using_systemd() { if [ -d /run/systemd/system ]; then return 0; else return 1; fi } +# prints a warning if the host isn't running systemd +no_systemd_warning() { + log_important "This host is not running systemd, so Teleport cannot be started automatically when it exits." + log_important "Please investigate an alternative way to keep Teleport running." + log_important "You can find information in our documentation: ${TELEPORT_DOCS_URL}" + log_important "For now, Teleport will be started in the foreground - you can press Ctrl+C to exit." + log_only + log_only "Run this command to start Teleport in future:" + log_only "$(get_teleport_start_command)" + log_only + log_only "------------------------------------------------------------------------" + log_only "| IMPORTANT: TELEPORT WILL STOP RUNNING AFTER YOU CLOSE THIS TERMINAL! |" + log_only "| YOU MUST CONFIGURE A SERVICE MANAGER TO MAKE IT RUN ON STARTUP! |" + log_only "------------------------------------------------------------------------" + log_only +} +# print a message giving the name of the node and a link to the docs +# gives some debugging instructions if the service didn't start successfully +print_welcome_message() { + log_only "" + if is_running_teleport; then + log_only "Teleport has been started." + log_only "" + if is_using_systemd; then + log_only "View its status with 'sudo systemctl status teleport.service'" + log_only "View Teleport logs using 'sudo journalctl -u teleport.service'" + log_only "To stop Teleport, run 'sudo systemctl stop teleport.service'" + log_only "To start Teleport again if you stop it, run 'sudo systemctl start teleport.service'" + elif is_macos_host; then + log_only "View Teleport logs in '${MACOS_STDERR_LOG}' and '${MACOS_STDOUT_LOG}'" + log_only "To stop Teleport, run 'sudo launchctl unload ${LAUNCHD_CONFIG_PATH}/com.goteleport.teleport.plist'" + log_only "To start Teleport again if you stop it, run 'sudo launchctl load ${LAUNCHD_CONFIG_PATH}/com.goteleport.teleport.plist'" + fi + log_only "" + log_only "You can see this node connected in the Teleport web UI or 'tsh ls' with the name '${NODENAME}'" + log_only "Find more details on how to use Teleport here: https://goteleport.com/docs/user-manual/" + else + log_important "The Teleport service was installed, but it does not appear to have started successfully." + if is_using_systemd; then + log_important "Check the Teleport service's status with 'systemctl status teleport.service'" + log_important "View Teleport logs with 'journalctl -u teleport.service'" + elif is_macos_host; then + log_important "Check Teleport logs in '${MACOS_STDERR_LOG}' and '${MACOS_STDOUT_LOG}'" + fi + log_important "Contact Teleport support for further assistance." + fi + log_only "" +} +# start teleport in foreground (when there's no systemd) +start_teleport_foreground() { + log "Starting Teleport in the foreground" + # shellcheck disable=SC2091 + $(get_teleport_start_command) +} +# start teleport via launchd (after installing config) +start_teleport_launchd() { + log "Starting Teleport via launchctl. It will automatically be started whenever the system reboots." + launchctl load ${LAUNCHD_CONFIG_PATH}/com.goteleport.teleport.plist + sleep ${ALIVE_CHECK_DELAY} +} +# start teleport via systemd (after installing unit) +start_teleport_systemd() { + log "Starting Teleport via systemd. It will automatically be started whenever the system reboots." + systemctl enable teleport.service + systemctl start teleport.service + sleep ${ALIVE_CHECK_DELAY} +} +# checks whether teleport binaries exist on the host +teleport_binaries_exist() { + for BINARY_NAME in teleport tctl tsh; do + if [ -f ${TELEPORT_BINARY_DIR}/${BINARY_NAME} ]; then return 0; else return 1; fi + done +} +# checks whether a teleport config exists on the host +teleport_config_exists() { if [ -f ${TELEPORT_CONFIG_PATH} ]; then return 0; else return 1; fi; } +# checks whether a teleport data dir exists on the host +teleport_datadir_exists() { if [ -d ${TELEPORT_DATA_DIR} ]; then return 0; else return 1; fi; } + +# error out if any required values are not set +check_set TELEPORT_VERSION +check_set TARGET_HOSTNAME +check_set TARGET_PORT +check_set JOIN_TOKEN +check_set CA_PIN_HASHES +if [[ "${APP_INSTALL_MODE}" == "true" ]]; then + check_set APP_NAME + check_set APP_URI + check_set APP_PUBLIC_ADDR +fi + +### +# main script starts here +### +# check connectivity to teleport server/port +if [[ "${IGNORE_CONNECTIVITY_CHECK}" == "true" ]]; then + log "TELEPORT_IGNORE_CONNECTIVITY_CHECK=true, not running connectivity check" +else + log "Checking TCP connectivity to Teleport server (${TARGET_HOSTNAME}:${TARGET_PORT})" + if ! check_connectivity "${TARGET_HOSTNAME}" "${TARGET_PORT}"; then + # if we don't have a connectivity test method assigned, we know we couldn't run the test + if [[ ${CONNECTIVITY_TEST_METHOD} == "" ]]; then + log "Couldn't find nc, telnet or /dev/tcp to do a connection test" + log "Going to blindly continue without testing connectivity" + else + log_important "Couldn't open a connection to the Teleport server (${TARGET_HOSTNAME}:${TARGET_PORT}) via ${CONNECTIVITY_TEST_METHOD}" + log_important "This issue will need to be fixed before the script can continue." + log_important "If you think this is an error, add 'export TELEPORT_IGNORE_CONNECTIVITY_CHECK=true && ' before the curl command which runs the script." + exit 1 + fi + else + log "Connectivity to Teleport server (via ${CONNECTIVITY_TEST_METHOD}) looks good" + fi +fi + +# use OSTYPE variable to figure out host type/arch +if [[ "${OSTYPE}" == "linux-gnu"* ]]; then + # linux host, now detect arch + TELEPORT_BINARY_TYPE="linux" + ARCH=$(uname -m) + log "Detected host: ${OSTYPE}, using Teleport binary type ${TELEPORT_BINARY_TYPE}" + if [[ ${ARCH} == "armv7l" ]]; then + TELEPORT_ARCH="arm" + elif [[ ${ARCH} == "aarch64" ]]; then + TELEPORT_ARCH="arm64" + elif [[ ${ARCH} == "x86_64" ]]; then + TELEPORT_ARCH="amd64" + elif [[ ${ARCH} == "i686" ]]; then + TELEPORT_ARCH="386" + else + log_important "Error: cannot detect architecture from uname -m: ${ARCH}" + exit 1 + fi + log "Detected arch: ${ARCH}, using Teleport arch ${TELEPORT_ARCH}" + # if the download format is already set, we have no need to detect distro + if [[ ${TELEPORT_FORMAT} == "" ]]; then + # detect distro + # if /etc/os-release doesn't exist, we need to use some other logic + if [ ! -f /etc/os-release ]; then + if [ -f /etc/centos-release ]; then + if grep -q 'CentOS release 6' /etc/centos-release; then + log_important "Detected host type: CentOS 6 [$(cat /etc/centos-release)]" + log_important "Teleport will not work on CentOS 6 -based servers due to the glibc version being too low." + exit 1 + fi + elif [ -f /etc/redhat-release ]; then + if grep -q 'Red Hat Enterprise Linux Server release 5' /etc/redhat-release; then + log_important "Detected host type: RHEL5 [$(cat /etc/redhat-release)]" + log_important "Teleport will not work on RHEL5-based servers due to the glibc version being too low." + exit 1 + elif grep -q 'Red Hat Enterprise Linux Server release 6' /etc/redhat-release; then + log_important "Detected host type: RHEL6 [$(cat /etc/redhat-release)]" + log_important "Teleport will not work on RHEL6-based servers due to the glibc version being too low." + exit 1 + fi + fi + # use ID_LIKE value from /etc/os-release (if set) + # this is 'debian' on ubuntu/raspbian, 'centos rhel fedora' on amazon linux etc + else + check_exists_fatal cut + DISTRO_TYPE=$(grep ID_LIKE /etc/os-release | cut -d= -f2) || true + if [[ ${DISTRO_TYPE} == "" ]]; then + # use exact ID value from /etc/os-release if ID_LIKE is not set + DISTRO_TYPE=$(grep -w ID /etc/os-release | cut -d= -f2) + fi + if [[ ${DISTRO_TYPE} =~ "debian" ]]; then + TELEPORT_FORMAT="deb" + elif [[ "$DISTRO_TYPE" =~ "amzn"* ]] || [[ ${DISTRO_TYPE} =~ "centos"* ]] || [[ ${DISTRO_TYPE} =~ "rhel" ]] || [[ ${DISTRO_TYPE} =~ "fedora"* ]]; then + TELEPORT_FORMAT="rpm" + else + log "Couldn't match a distro type using /etc/os-release, falling back to tarball installer" + TELEPORT_FORMAT="tarball" + fi + fi + log "Detected distro type: ${DISTRO_TYPE}" + #suse, also identified as sles, uses a different path for its systemd then other distro types like ubuntu + if [[ ${DISTRO_TYPE} =~ "suse"* ]] || [[ ${DISTRO_TYPE} =~ "sles"* ]]; then + SYSTEMD_UNIT_PATH="/etc/systemd/system/teleport.service" + fi + fi +elif [[ "${OSTYPE}" == "darwin"* ]]; then + # macos host, now detect arch + TELEPORT_BINARY_TYPE="darwin" + ARCH=$(uname -m) + log "Detected host: ${OSTYPE}, using Teleport binary type ${TELEPORT_BINARY_TYPE}" + if [[ ${ARCH} == "arm64" ]]; then + TELEPORT_ARCH="arm64" + elif [[ ${ARCH} == "x86_64" ]]; then + TELEPORT_ARCH="amd64" + else + log_important "Error: unsupported architecture from uname -m: ${ARCH}" + exit 1 + fi + log "Detected MacOS ${ARCH} architecture, using Teleport arch ${TELEPORT_ARCH}" + TELEPORT_FORMAT="tarball" +else + log_important "Error - unsupported platform: ${OSTYPE}" + exit 1 +fi +log "Using Teleport distribution: ${TELEPORT_FORMAT}" + +# create temporary directory and exit cleanup logic +TEMP_DIR=$(mktemp -d -t teleport-XXXXXXXXXX) +log "Created temp dir ${TEMP_DIR}" +pushd "${TEMP_DIR}" >/dev/null 2>&1 + +finish() { + popd >/dev/null 2>&1 + rm -rf "${TEMP_DIR}" +} +trap finish EXIT + +# optional format override (mostly for testing) +if [[ ${OVERRIDE_FORMAT} != "" ]]; then + TELEPORT_FORMAT="${OVERRIDE_FORMAT}" + log "Overriding TELEPORT_FORMAT to ${OVERRIDE_FORMAT}" +fi + +# check whether teleport is running already +# if it is, we exit gracefully with an error +if is_running_teleport; then + if [[ ${IGNORE_CHECKS} != "true" ]]; then + TELEPORT_PID=$(get_teleport_pid) + log_header "Warning: Teleport appears to already be running on this host (pid: ${TELEPORT_PID})" + log_cleanup_message + exit 1 + else + log "Ignoring is_running_teleport as requested" + fi +fi + +# check for existing config file +if teleport_config_exists; then + if [[ ${IGNORE_CHECKS} != "true" ]]; then + log_header "Warning: There is already a Teleport config file present at ${TELEPORT_CONFIG_PATH}." + log_cleanup_message + exit 1 + else + log "Ignoring teleport_config_exists as requested" + fi +fi + +# check for existing data directory +if teleport_datadir_exists; then + if [[ ${IGNORE_CHECKS} != "true" ]]; then + log_header "Warning: Found existing Teleport data directory (${TELEPORT_DATA_DIR})." + log_cleanup_message + exit 1 + else + log "Ignoring teleport_datadir_exists as requested" + fi +fi + +# check for existing binaries +if teleport_binaries_exist; then + if [[ ${IGNORE_CHECKS} != "true" ]]; then + log_header "Warning: Found existing Teleport binaries under ${TELEPORT_BINARY_DIR}." + log_cleanup_message + exit 1 + else + log "Ignoring teleport_binaries_exist as requested" + fi +fi + +install_from_file() { + # select correct URL/installation method based on distro + if [[ ${TELEPORT_FORMAT} == "tarball" ]]; then + URL="https://get.gravitational.com/${TELEPORT_PACKAGE_NAME}-v${TELEPORT_VERSION}-${TELEPORT_BINARY_TYPE}-${TELEPORT_ARCH}-bin.tar.gz" + + # check that needed tools are installed + check_exists_fatal curl tar + # download tarball + log "Downloading Teleport ${TELEPORT_FORMAT} release ${TELEPORT_VERSION}" + DOWNLOAD_FILENAME=$(get_download_filename "${URL}") + download "${URL}" "${TEMP_DIR}/${DOWNLOAD_FILENAME}" + # extract tarball + tar -xzf "${TEMP_DIR}/${DOWNLOAD_FILENAME}" -C "${TEMP_DIR}" + # install binaries to /usr/local/bin + for BINARY in ${TELEPORT_BINARY_LIST}; do + ${COPY_COMMAND} "${TELEPORT_ARCHIVE_PATH}/${BINARY}" "${TELEPORT_BINARY_DIR}/" + done + elif [[ ${TELEPORT_FORMAT} == "deb" ]]; then + # convert teleport arch to deb arch + if [[ ${TELEPORT_ARCH} == "amd64" ]]; then + DEB_ARCH="amd64" + elif [[ ${TELEPORT_ARCH} == "386" ]]; then + DEB_ARCH="i386" + elif [[ ${TELEPORT_ARCH} == "arm" ]]; then + DEB_ARCH="arm" + elif [[ ${TELEPORT_ARCH} == "arm64" ]]; then + DEB_ARCH="arm64" + fi + URL="https://get.gravitational.com/${TELEPORT_PACKAGE_NAME}_${TELEPORT_VERSION}_${DEB_ARCH}.deb" + check_deb_not_already_installed + # check that needed tools are installed + check_exists_fatal curl dpkg + # download deb and register cleanup operation + log "Downloading Teleport ${TELEPORT_FORMAT} release ${TELEPORT_VERSION}" + DOWNLOAD_FILENAME=$(get_download_filename "${URL}") + download "${URL}" "${TEMP_DIR}/${DOWNLOAD_FILENAME}" + # install deb + log "Using dpkg to install ${TEMP_DIR}/${DOWNLOAD_FILENAME}" + dpkg -i "${TEMP_DIR}/${DOWNLOAD_FILENAME}" + elif [[ ${TELEPORT_FORMAT} == "rpm" ]]; then + # convert teleport arch to rpm arch + if [[ ${TELEPORT_ARCH} == "amd64" ]]; then + RPM_ARCH="x86_64" + elif [[ ${TELEPORT_ARCH} == "386" ]]; then + RPM_ARCH="i386" + elif [[ ${TELEPORT_ARCH} == "arm" ]]; then + RPM_ARCH="arm" + elif [[ ${TELEPORT_ARCH} == "arm64" ]]; then + RPM_ARCH="arm64" + fi + URL="https://get.gravitational.com/${TELEPORT_PACKAGE_NAME}-${TELEPORT_VERSION}-1.${RPM_ARCH}.rpm" + check_rpm_not_already_installed + # check for package managers + if check_exists dnf; then + log "Found 'dnf' package manager, using it" + PACKAGE_MANAGER_COMMAND="dnf -y install" + elif check_exists yum; then + log "Found 'yum' package manager, using it" + PACKAGE_MANAGER_COMMAND="yum -y localinstall" + else + PACKAGE_MANAGER_COMMAND="" + log "Cannot find 'yum' or 'dnf' package manager commands, will try installing the rpm manually instead" + fi + # check that needed tools are installed + check_exists_fatal curl + log "Downloading Teleport ${TELEPORT_FORMAT} release ${TELEPORT_VERSION}" + DOWNLOAD_FILENAME=$(get_download_filename "${URL}") + download "${URL}" "${TEMP_DIR}/${DOWNLOAD_FILENAME}" + # install with package manager if available + if [[ ${PACKAGE_MANAGER_COMMAND} != "" ]]; then + log "Installing Teleport release from ${TEMP_DIR}/${DOWNLOAD_FILENAME} using ${PACKAGE_MANAGER_COMMAND}" + # install rpm with package manager + ${PACKAGE_MANAGER_COMMAND} "${TEMP_DIR}/${DOWNLOAD_FILENAME}" + # use rpm if we couldn't find a package manager + else + # install RPM (in upgrade mode) + log "Using rpm to install ${TEMP_DIR}/${DOWNLOAD_FILENAME}" + rpm -Uvh "${TEMP_DIR}/${DOWNLOAD_FILENAME}" + fi + else + log_important "Can't figure out what Teleport format to use" + exit 1 + fi +} + +install_from_repo() { + if [[ "${REPO_CHANNEL}" == "" ]]; then + # By default, use the current version's channel. + REPO_CHANNEL=stable/v"${TELEPORT_VERSION//.*/}" + fi + + # Populate $ID, $VERSION_ID, $VERSION_CODENAME and other env vars identifying the OS. + # shellcheck disable=SC1091 + . /etc/os-release + + PACKAGE_LIST=$(package_list) + if [ "$ID" == "debian" ] || [ "$ID" == "ubuntu" ]; then + # old versions of ubuntu require that keys get added by `apt-key add`, without + # adding the key apt shows a key signing error when installing teleport. + if [[ + ($ID == "ubuntu" && $VERSION_ID == "16.04") || \ + ($ID == "debian" && $VERSION_ID == "9" ) + ]]; then + apt install apt-transport-https gnupg -y + curl -fsSL https://apt.releases.teleport.dev/gpg | apt-key add - + echo "deb https://apt.releases.teleport.dev/${ID} ${VERSION_CODENAME} ${REPO_CHANNEL}" > /etc/apt/sources.list.d/teleport.list + else + curl -fsSL https://apt.releases.teleport.dev/gpg \ + -o /usr/share/keyrings/teleport-archive-keyring.asc + echo "deb [signed-by=/usr/share/keyrings/teleport-archive-keyring.asc] \ + https://apt.releases.teleport.dev/${ID} ${VERSION_CODENAME} ${REPO_CHANNEL}" > /etc/apt/sources.list.d/teleport.list + fi + apt-get update + apt-get install -y ${PACKAGE_LIST} + elif [ "$ID" = "amzn" ] || [ "$ID" = "rhel" ] || [ "$ID" = "centos" ] ; then + if [ "$ID" = "rhel" ]; then + VERSION_ID="${VERSION_ID//.*/}" # convert version numbers like '7.2' to only include the major version + fi + yum install -y yum-utils + yum-config-manager --add-repo \ + "$(rpm --eval "https://yum.releases.teleport.dev/$ID/$VERSION_ID/Teleport/%{_arch}/${REPO_CHANNEL}/teleport.repo")" + + # Remove metadata cache to prevent cache from other channel (eg, prior version) + # See: https://github.com/gravitational/teleport/issues/22581 + yum --disablerepo="*" --enablerepo="teleport" clean metadata + + yum install -y ${PACKAGE_LIST} + else + echo "Unsupported distro: $ID" + exit 1 + fi +} + +# package_list returns the list of packages to install. +# The list of packages can be fed into yum or apt because they already have the expected format when pinning versions. +package_list() { + TELEPORT_PACKAGE_PIN_VERSION=${TELEPORT_PACKAGE_NAME} + TELEPORT_UPDATER_PIN_VERSION="${TELEPORT_PACKAGE_NAME}-updater" + + if [[ "${TELEPORT_FORMAT}" == "deb" ]]; then + TELEPORT_PACKAGE_PIN_VERSION+="=${TELEPORT_VERSION}" + TELEPORT_UPDATER_PIN_VERSION+="=${TELEPORT_VERSION}" + + elif [[ "${TELEPORT_FORMAT}" == "rpm" ]]; then + TELEPORT_YUM_VERSION="${TELEPORT_VERSION//-/_}" + TELEPORT_PACKAGE_PIN_VERSION+="-${TELEPORT_YUM_VERSION}" + TELEPORT_UPDATER_PIN_VERSION+="-${TELEPORT_YUM_VERSION}" + fi + + PACKAGE_LIST=${TELEPORT_PACKAGE_PIN_VERSION} + # (warning): This expression is constant. Did you forget the $ on a variable? + # Disabling the warning above because expression is templated. + # shellcheck disable=SC2050 + if is_using_systemd && [[ "false" == "true" ]]; then + # Teleport Updater requires systemd. + PACKAGE_LIST+=" ${TELEPORT_UPDATER_PIN_VERSION}" + fi + echo ${PACKAGE_LIST} +} + +is_repo_available() { + if [[ "${OSTYPE}" != "linux-gnu" ]]; then + return 1 + fi + + # Populate $ID, $VERSION_ID and other env vars identifying the OS. + # shellcheck disable=SC1091 + . /etc/os-release + + # The following distros+version have a Teleport repository to install from. + case "${ID}-${VERSION_ID}" in + ubuntu-16.04* | ubuntu-18.04* | ubuntu-20.04* | ubuntu-22.04* | \ + debian-9* | debian-10* | debian-11* | \ + rhel-7* | rhel-8* | rhel-9* | \ + centos-7* | centos-8* | centos-9* | \ + amzn-2 | amzn-2023) + return 0;; + esac + + return 1 +} + +if is_repo_available; then + log "Installing repo for distro $ID." + install_from_repo +else + log "Installing from binary file." + install_from_file +fi + +# check that teleport binary can be found and runs +if ! check_teleport_binary; then + log_important "The Teleport binary could not be found at ${TELEPORT_BINARY_DIR} as expected." + log_important "This usually means that there was an error during installation." + log_important "Check this log for obvious signs of error and contact Teleport support" + log_important "for further assistance." + exit 1 +fi + +# install teleport config +# check the mode and write the appropriate config type +if [[ "${APP_INSTALL_MODE}" == "true" ]]; then + install_teleport_app_config +elif [[ "${DB_INSTALL_MODE}" == "true" ]]; then + install_teleport_database_config +else + install_teleport_node_config +fi + + +# Used to track whether a Teleport agent was installed using this method. +export TELEPORT_INSTALL_METHOD_NODE_SCRIPT="true" + +# install systemd unit if applicable (linux hosts) +if is_using_systemd; then + log "Host is using systemd" + # we only need to manually install the systemd config if teleport was installed via tarball + # all other packages will deploy it automatically + if [[ ${TELEPORT_FORMAT} == "tarball" ]]; then + install_systemd_unit + fi + start_teleport_systemd + print_welcome_message +# install launchd config on MacOS hosts +elif is_macos_host; then + log "Host is running MacOS" + install_launchd_config + start_teleport_launchd + print_welcome_message +# not a MacOS host and no systemd available, print a warning +# and temporarily start Teleport in the foreground +else + log "Host does not appear to be using systemd" + no_systemd_warning + start_teleport_foreground +fi + diff --git a/ansible/00_old/install-node-ubuntu.sh b/ansible/00_old/install-node-ubuntu.sh new file mode 100644 index 0000000..24cd8d3 --- /dev/null +++ b/ansible/00_old/install-node-ubuntu.sh @@ -0,0 +1,995 @@ +#!/bin/bash +set -euo pipefail +SCRIPT_NAME="teleport-installer" + +# default values +ALIVE_CHECK_DELAY=3 +CONNECTIVITY_TEST_METHOD="" +COPY_COMMAND="cp" +DISTRO_TYPE="" +IGNORE_CONNECTIVITY_CHECK="${TELEPORT_IGNORE_CONNECTIVITY_CHECK:-false}" +LAUNCHD_CONFIG_PATH="/Library/LaunchDaemons" +LOG_FILENAME="$(mktemp -t ${SCRIPT_NAME}.log.XXXXXXXXXX)" +MACOS_STDERR_LOG="/var/log/teleport-stderr.log" +MACOS_STDOUT_LOG="/var/log/teleport-stdout.log" +SYSTEMD_UNIT_PATH="/lib/systemd/system/teleport.service" +TARGET_PORT_DEFAULT=443 +TELEPORT_ARCHIVE_PATH='teleport' +TELEPORT_BINARY_DIR="/usr/local/bin" +TELEPORT_BINARY_LIST="teleport tctl tsh" +TELEPORT_CONFIG_PATH="/etc/teleport.yaml" +TELEPORT_DATA_DIR="/var/lib/teleport" +TELEPORT_DOCS_URL="https://goteleport.com/docs/" +TELEPORT_FORMAT="" + +# initialise variables (because set -u disallows unbound variables) +f="" +l="" +DISABLE_TLS_VERIFICATION=false +NODENAME=$(hostname) +IGNORE_CHECKS=false +OVERRIDE_FORMAT="" +QUIET=false +APP_INSTALL_DECISION="" +INTERACTIVE=false + +# the default value of each variable is a templatable Go value so that it can +# optionally be replaced by the server before the script is served up +TELEPORT_VERSION='13.3.4' +TELEPORT_PACKAGE_NAME='teleport' +REPO_CHANNEL='' +TARGET_HOSTNAME='teleport.access.datasaker.io' +TARGET_PORT='443' +JOIN_TOKEN='b0a997e4c3c200b8152a3f3025548189' +JOIN_METHOD='' +JOIN_METHOD_FLAG="" +[ -n "$JOIN_METHOD" ] && JOIN_METHOD_FLAG="--join-method ${JOIN_METHOD}" + +# inject labels into the configuration +LABELS='teleport.internal/resource-id=89a1a61b-54f4-4e42-94d9-7ab2b1c1c847' +LABELS_FLAG=() +[ -n "$LABELS" ] && LABELS_FLAG=(--labels "${LABELS}") + +# When all stanza generators have been updated to use the new +# `teleport configure` commands CA_PIN_HASHES can be removed along +# with the script passing it in in `join_tokens.go`. +CA_PIN_HASHES='sha256:941164dbcfedbe05de067f5ce14d4c4e6a0523c070f0bdb3959b01ba452b15f8' +CA_PINS='sha256:941164dbcfedbe05de067f5ce14d4c4e6a0523c070f0bdb3959b01ba452b15f8' +ARG_CA_PIN_HASHES="" +APP_INSTALL_MODE='false' +APP_NAME='' +APP_URI='' +DB_INSTALL_MODE='false' + +# usage message +# shellcheck disable=SC2086 +usage() { echo "Usage: $(basename $0) [-v teleport_version] [-h target_hostname] [-p target_port] [-j join_token] [-c ca_pin_hash]... [-q] [-l log_filename] [-a app_name] [-u app_uri] " 1>&2; exit 1; } +while getopts ":v:h:p:j:c:f:ql:ika:u:" o; do + case "${o}" in + v) TELEPORT_VERSION=${OPTARG};; + h) TARGET_HOSTNAME=${OPTARG};; + p) TARGET_PORT=${OPTARG};; + j) JOIN_TOKEN=${OPTARG};; + c) ARG_CA_PIN_HASHES="${ARG_CA_PIN_HASHES} ${OPTARG}";; + f) f=${OPTARG}; if [[ ${f} != "tarball" && ${f} != "deb" && ${f} != "rpm" ]]; then usage; fi;; + q) QUIET=true;; + l) l=${OPTARG};; + i) IGNORE_CHECKS=true; COPY_COMMAND="cp -f";; + k) DISABLE_TLS_VERIFICATION=true;; + a) APP_INSTALL_MODE=true && APP_NAME=${OPTARG};; + u) APP_INSTALL_MODE=true && APP_URI=${OPTARG};; + *) usage;; + esac +done +shift $((OPTIND-1)) + +if [[ "${ARG_CA_PIN_HASHES}" != "" ]]; then + CA_PIN_HASHES="${ARG_CA_PIN_HASHES}" +fi + +# function to construct a go template variable +# go's template parser is a bit finicky, so we dynamically build the value one character at a time +construct_go_template() { + OUTPUT="{" + OUTPUT+="{" + OUTPUT+="." + OUTPUT+="${1}" + OUTPUT+="}" + OUTPUT+="}" + echo "${OUTPUT}" +} + +# check whether we are root, exit if not +assert_running_as_root() { + if ! [ "$(id -u)" = 0 ]; then + echo "This script must be run as root." 1>&2 + exit 1 + fi +} + +# function to check whether variables are either blank or set to the default go template value +# (because they haven't been set by the go script generator or a command line argument) +# returns 1 if the variable is set to a default/zero value +# returns 0 otherwise (i.e. it needs to be set interactively) +check_variable() { + VARIABLE_VALUE="${!1}" + GO_TEMPLATE_NAME=$(construct_go_template "${2}") + if [[ "${VARIABLE_VALUE}" == "" ]] || [[ "${VARIABLE_VALUE}" == "${GO_TEMPLATE_NAME}" ]]; then + return 1 + fi + return 0 +} + +# function to check whether a provided value is "truthy" i.e. it looks like you're trying to say "yes" +is_truthy() { + declare -a TRUTHY_VALUES + TRUTHY_VALUES=("y" "Y" "yes" "YES" "ye" "YE" "yep" "YEP" "ya" "YA") + CHECK_VALUE="$1" + for ARRAY_VALUE in "${TRUTHY_VALUES[@]}"; do [[ "${CHECK_VALUE}" == "${ARRAY_VALUE}" ]] && return 0; done + return 1 +} + +# function to read input until the value you get is non-empty +read_nonblank_input() { + INPUT="" + VARIABLE_TO_ASSIGN="$1" + shift + PROMPT="$*" + until [[ "${INPUT}" != "" ]]; do + echo -n "${PROMPT}" + read -r INPUT + done + printf -v "${VARIABLE_TO_ASSIGN}" '%s' "${INPUT}" +} + +# error if we're not root +assert_running_as_root + +# set/read values interactively if not provided +# users will be prompted to enter their own value if all the following are true: +# - the current value is blank, or equal to the default Go template value +# - the value has not been provided by command line argument +! check_variable TELEPORT_VERSION version && INTERACTIVE=true && read_nonblank_input TELEPORT_VERSION "Enter Teleport version to install (without v): " +! check_variable TARGET_HOSTNAME hostname && INTERACTIVE=true && read_nonblank_input TARGET_HOSTNAME "Enter target hostname to connect to: " +! check_variable TARGET_PORT port && INTERACTIVE=true && { echo -n "Enter target port to connect to [${TARGET_PORT_DEFAULT}]: "; read -r TARGET_PORT; } +! check_variable JOIN_TOKEN token && INTERACTIVE=true && read_nonblank_input JOIN_TOKEN "Enter Teleport join token as provided: " +! check_variable CA_PIN_HASHES caPins && INTERACTIVE=true && read_nonblank_input CA_PIN_HASHES "Enter CA pin hash (separate multiple hashes with spaces): " +[ -n "${f}" ] && OVERRIDE_FORMAT=${f} +[ -n "${l}" ] && LOG_FILENAME=${l} +# if app service mode is not set (or is the default value) and we are running interactively (i.e. the user has provided some input already), +# prompt the user to choose whether to enable app_service +if [[ "${INTERACTIVE}" == "true" ]]; then + if ! check_variable APP_INSTALL_MODE appInstallMode; then + APP_INSTALL_MODE="false" + echo -n "Would you like to enable and configure Teleport's app_service, to use Teleport as a reverse proxy for a web application? [y/n, default: n] " + read -r APP_INSTALL_DECISION + if is_truthy "${APP_INSTALL_DECISION}"; then + APP_INSTALL_MODE="true" + fi + fi +fi +# prompt for extra needed values if we're running in app service mode +if [[ "${APP_INSTALL_MODE}" == "true" ]]; then + ! check_variable APP_NAME appName && read_nonblank_input APP_NAME "Enter app name to install (must be DNS-compatible; less than 63 characters, no spaces, only - or _ as punctuation): " + ! check_variable APP_URI appURI && read_nonblank_input APP_URI "Enter app URI (the host running the Teleport app service must be able to connect to this): " + # generate app public addr by concatenating values + APP_PUBLIC_ADDR="${APP_NAME}.${TARGET_HOSTNAME}" +fi + +# set default target port if value not provided +if [[ "${TARGET_PORT}" == "" ]]; then + TARGET_PORT=${TARGET_PORT_DEFAULT} +fi + +# clear log file if provided +if [[ "${LOG_FILENAME}" != "" ]]; then + if [ -f "${LOG_FILENAME}" ]; then + echo -n "" > "${LOG_FILENAME}" + fi +fi + +# log functions +log_date() { echo -n "$(date '+%Y-%m-%d %H:%M:%S %Z')"; } +log() { + LOG_LINE="$(log_date) [${SCRIPT_NAME}] $*" + if [[ ${QUIET} != "true" ]]; then + echo "${LOG_LINE}" + fi + if [[ "${LOG_FILENAME}" != "" ]]; then + echo "${LOG_LINE}" >> "${LOG_FILENAME}" + fi +} +# writes a line with no timestamp or starting data, always prints +log_only() { + LOG_LINE="$*" + echo "${LOG_LINE}" + if [[ "${LOG_FILENAME}" != "" ]]; then + echo "${LOG_LINE}" >> "${LOG_FILENAME}" + fi +} +# writes a line by itself as a header +log_header() { + LOG_LINE="$*" + echo "" + echo "${LOG_LINE}" + echo "" + if [[ "${LOG_FILENAME}" != "" ]]; then + echo "${LOG_LINE}" >> "${LOG_FILENAME}" + fi +} +# important log lines, print even when -q (quiet) is passed +log_important() { + LOG_LINE="$(log_date) [${SCRIPT_NAME}] ---> $*" + echo "${LOG_LINE}" + if [[ "${LOG_FILENAME}" != "" ]]; then + echo "${LOG_LINE}" >> "${LOG_FILENAME}" + fi +} +log_cleanup_message() { + log_only "This script does not overwrite any existing settings or Teleport installations." + log_only "Please clean up by running any of the following steps as necessary:" + log_only "- stop any running Teleport processes" + log_only " - pkill -f teleport" + log_only "- remove any data under ${TELEPORT_DATA_DIR}, along with the directory itself" + log_only " - rm -rf ${TELEPORT_DATA_DIR}" + log_only "- remove any configuration at ${TELEPORT_CONFIG_PATH}" + log_only " - rm -f ${TELEPORT_CONFIG_PATH}" + log_only "- remove any Teleport binaries (${TELEPORT_BINARY_LIST}) installed under ${TELEPORT_BINARY_DIR}" + for BINARY in ${TELEPORT_BINARY_LIST}; do EXAMPLE_DELETE_COMMAND+="${TELEPORT_BINARY_DIR}/${BINARY} "; done + log_only " - rm -f ${EXAMPLE_DELETE_COMMAND}" + log_only "Run this installer again when done." + log_only +} + +# other functions +# check whether a named program exists +check_exists() { NAME=$1; if type "${NAME}" >/dev/null 2>&1; then return 0; else return 1; fi; } +# checks for the existence of a list of named binaries and exits with error if any of them don't exist +check_exists_fatal() { + for TOOL in "$@"; do + if ! check_exists "${TOOL}"; then + log_important "Error: cannot find ${TOOL} - it needs to be installed" + exit 1 + fi + done +} +# check connectivity to the given host/port and make a request to see if Teleport is listening +# uses the global variable CONNECTIVITY_TEST_METHOD to return the name of the checker, as return +# values aren't really a thing that exists in bash +check_connectivity() { + HOST=$1 + PORT=$2 + # check with nc + if check_exists nc; then + CONNECTIVITY_TEST_METHOD="nc" + if nc -z -w3 "${HOST}" "${PORT}" >/dev/null 2>&1; then return 0; else return 1; fi + # if there's no nc, check with telnet + elif check_exists telnet; then + CONNECTIVITY_TEST_METHOD="telnet" + if echo -e '\x1dclose\x0d' | telnet "${HOST}" "${PORT}" >/dev/null 2>&1; then return 0; else return 1; fi + # if there's no nc or telnet, try and use /dev/tcp + elif [ -f /dev/tcp ]; then + CONNECTIVITY_TEST_METHOD="/dev/tcp" + if (head -1 < "/dev/tcp/${HOST}/${PORT}") >/dev/null 2>&1; then return 0; else return 1; fi + else + return 255 + fi +} +# check whether a teleport DEB is already installed and exit with error if so +check_deb_not_already_installed() { + check_exists_fatal dpkg awk + DEB_INSTALLED=$(dpkg -l | awk '{print $2}' | grep -E ^teleport || true) + if [[ ${DEB_INSTALLED} != "" ]]; then + log_important "It looks like there is already a Teleport DEB package installed (name: ${DEB_INSTALLED})." + log_important "You will need to remove that package before using this script." + exit 1 + fi +} +# check whether a teleport RPM is already installed and exit with error if so +check_rpm_not_already_installed() { + check_exists_fatal rpm + RPM_INSTALLED=$(rpm -qa | grep -E ^teleport || true) + if [[ ${RPM_INSTALLED} != "" ]]; then + log_important "It looks like there is already a Teleport RPM package installed (name: ${RPM_INSTALLED})." + log_important "You will need to remove that package before using this script." + exit 1 + fi +} +# function to check if given variable is set +check_set() { + CHECK_KEY=${1} || true + CHECK_VALUE=${!1} || true + if [[ "${CHECK_VALUE}" == "" ]]; then + log "Required variable ${CHECK_KEY} is not set" + exit 1 + else + log "${CHECK_KEY}: ${CHECK_VALUE}" + fi +} +# checks that teleport binary can be found in path and runs 'teleport version' +check_teleport_binary() { + FOUND_TELEPORT_VERSION=$(${TELEPORT_BINARY_DIR}/teleport version) + if [[ "${FOUND_TELEPORT_VERSION}" == "" ]]; then + log "Cannot find Teleport binary" + return 1 + else + log "Found: ${FOUND_TELEPORT_VERSION}"; + return 0 + fi +} +# wrapper to download with curl +download() { + URL=$1 + OUTPUT_PATH=$2 + CURL_COMMAND="curl -fsSL --retry 5 --retry-delay 5" + # optionally allow disabling of TLS verification (can be useful on older distros + # which often have an out-of-date set of CA certificate bundle which won't validate) + if [[ ${DISABLE_TLS_VERIFICATION} == "true" ]]; then + CURL_COMMAND+=" -k" + fi + log "Running ${CURL_COMMAND} ${URL}" + log "Downloading to ${OUTPUT_PATH}" + # handle errors with curl + if ! ${CURL_COMMAND} -o "${OUTPUT_PATH}" "${URL}"; then + log_important "curl error downloading ${URL}" + log "On an older OS, this may be related to the CA certificate bundle being too old." + log "You can pass the hidden -k flag to this script to disable TLS verification - this is not recommended!" + exit 1 + fi + # check that the file has a non-zero size as an extra validation + check_exists_fatal wc xargs + FILE_SIZE="$(wc -c <"${OUTPUT_PATH}" | xargs)" + if [ "${FILE_SIZE}" -eq 0 ]; then + log_important "The downloaded file has a size of 0 bytes, which means an error occurred. Cannot continue." + exit 1 + else + log "Downloaded file size: ${FILE_SIZE} bytes" + fi + # if we have a hashing utility installed, also download and validate the checksum + SHA_COMMAND="" + # shasum is installed by default on MacOS and some distros + if check_exists shasum; then + SHA_COMMAND="shasum -a 256" + # sha256sum is installed by default in some other distros + elif check_exists sha256sum; then + SHA_COMMAND="sha256sum" + fi + if [[ "${SHA_COMMAND}" != "" ]]; then + log "Will use ${SHA_COMMAND} to validate the checksum of the downloaded file" + SHA_URL="${URL}.sha256" + SHA_PATH="${OUTPUT_PATH}.sha256" + ${CURL_COMMAND} -o "${SHA_PATH}" "${SHA_URL}" + if ${SHA_COMMAND} --status -c "${SHA_PATH}"; then + log "The downloaded file's checksum validated correctly" + else + SHA_EXPECTED=$(cat "${SHA_PATH}") + SHA_ACTUAL=$(${SHA_COMMAND} "${OUTPUT_PATH}") + if check_exists awk; then + SHA_EXPECTED=$(echo "${SHA_EXPECTED}" | awk '{print $1}') + SHA_ACTUAL=$(echo "${SHA_ACTUAL}" | awk '{print $1}') + fi + log_important "Checksum of the downloaded file did not validate correctly" + log_important "Expected: ${SHA_EXPECTED}" + log_important "Got: ${SHA_ACTUAL}" + log_important "Try rerunning this script from the start. If the issue persists, contact Teleport support." + exit 1 + fi + else + log "shasum/sha256sum utilities not found, will skip checksum validation" + fi +} +# gets the filename from a full path (https://target.site/path/to/file.tar.gz -> file.tar.gz) +get_download_filename() { echo "${1##*/}"; } +# gets the pid of any running teleport process (and converts newlines to spaces) +get_teleport_pid() { + check_exists_fatal pgrep xargs + pgrep teleport | xargs echo +} +# returns a command which will start teleport using the config +get_teleport_start_command() { + echo "${TELEPORT_BINARY_DIR}/teleport start --config=${TELEPORT_CONFIG_PATH}" +} +# installs the teleport-provided launchd config +install_launchd_config() { + log "Installing Teleport launchd config to ${LAUNCHD_CONFIG_PATH}" + ${COPY_COMMAND} ./${TELEPORT_ARCHIVE_PATH}/examples/launchd/com.goteleport.teleport.plist ${LAUNCHD_CONFIG_PATH}/com.goteleport.teleport.plist +} +# installs the teleport-provided systemd unit +install_systemd_unit() { + log "Installing Teleport systemd unit to ${SYSTEMD_UNIT_PATH}" + ${COPY_COMMAND} ./${TELEPORT_ARCHIVE_PATH}/examples/systemd/teleport.service ${SYSTEMD_UNIT_PATH} + log "Reloading unit files (systemctl daemon-reload)" + systemctl daemon-reload +} +# formats the arguments as a yaml list +get_yaml_list() { + name="${1}" + list="${2}" + indentation="${3}" + echo "${indentation}${name}:" + for item in ${list}; do + echo "${indentation}- ${item}" + done +} + +# installs the provided teleport config (for app service) +install_teleport_app_config() { + log "Writing Teleport app service config to ${TELEPORT_CONFIG_PATH}" + CA_PINS_CONFIG=$(get_yaml_list "ca_pin" "${CA_PIN_HASHES}" " ") + cat << EOF > ${TELEPORT_CONFIG_PATH} +version: v3 +teleport: + nodename: ${NODENAME} + auth_token: ${JOIN_TOKEN} +${CA_PINS_CONFIG} + proxy_server: ${TARGET_HOSTNAME}:${TARGET_PORT} + log: + output: stderr + severity: INFO +auth_service: + enabled: no +ssh_service: + enabled: no +proxy_service: + enabled: no +app_service: + enabled: yes + apps: + - name: "${APP_NAME}" + uri: "${APP_URI}" + public_addr: ${APP_PUBLIC_ADDR} +EOF +} +# installs the provided teleport config (for database service) +install_teleport_database_config() { + log "Writing Teleport database service config to ${TELEPORT_CONFIG_PATH}" + CA_PINS_CONFIG=$(get_yaml_list "ca_pin" "${CA_PIN_HASHES}" " ") + + # This file is processed by `shellschek` as part of the lint step + # It detects an issue because of un-set variables - $index and $line. This check is called SC2154. + # However, that's not an issue, because those variables are replaced when we run go's text/template engine over it. + # When executing the script, those are no long variables but actual values. + # shellcheck disable=SC2154 + cat << EOF > ${TELEPORT_CONFIG_PATH} +version: v3 +teleport: + nodename: ${NODENAME} + auth_token: ${JOIN_TOKEN} +${CA_PINS_CONFIG} + proxy_server: ${TARGET_HOSTNAME}:${TARGET_PORT} + log: + output: stderr + severity: INFO +auth_service: + enabled: no +ssh_service: + enabled: no +proxy_service: + enabled: no +db_service: + enabled: "yes" + resources: + - labels: +EOF +} +# installs the provided teleport config (for node service) +install_teleport_node_config() { + log "Writing Teleport node service config to ${TELEPORT_CONFIG_PATH}" + ${TELEPORT_BINARY_DIR}/teleport node configure \ + --token ${JOIN_TOKEN} \ + ${JOIN_METHOD_FLAG} \ + --ca-pin ${CA_PINS} \ + --proxy ${TARGET_HOSTNAME}:${TARGET_PORT} \ + "${LABELS_FLAG[@]}" \ + --output ${TELEPORT_CONFIG_PATH} +} +# checks whether the given host is running MacOS +is_macos_host() { if [[ ${OSTYPE} == "darwin"* ]]; then return 0; else return 1; fi } +# checks whether teleport is already running on the host +is_running_teleport() { + check_exists_fatal pgrep + TELEPORT_PID=$(get_teleport_pid) + if [[ "${TELEPORT_PID}" != "" ]]; then return 0; else return 1; fi +} +# checks whether the given host is running systemd as its init system +is_using_systemd() { if [ -d /run/systemd/system ]; then return 0; else return 1; fi } +# prints a warning if the host isn't running systemd +no_systemd_warning() { + log_important "This host is not running systemd, so Teleport cannot be started automatically when it exits." + log_important "Please investigate an alternative way to keep Teleport running." + log_important "You can find information in our documentation: ${TELEPORT_DOCS_URL}" + log_important "For now, Teleport will be started in the foreground - you can press Ctrl+C to exit." + log_only + log_only "Run this command to start Teleport in future:" + log_only "$(get_teleport_start_command)" + log_only + log_only "------------------------------------------------------------------------" + log_only "| IMPORTANT: TELEPORT WILL STOP RUNNING AFTER YOU CLOSE THIS TERMINAL! |" + log_only "| YOU MUST CONFIGURE A SERVICE MANAGER TO MAKE IT RUN ON STARTUP! |" + log_only "------------------------------------------------------------------------" + log_only +} +# print a message giving the name of the node and a link to the docs +# gives some debugging instructions if the service didn't start successfully +print_welcome_message() { + log_only "" + if is_running_teleport; then + log_only "Teleport has been started." + log_only "" + if is_using_systemd; then + log_only "View its status with 'sudo systemctl status teleport.service'" + log_only "View Teleport logs using 'sudo journalctl -u teleport.service'" + log_only "To stop Teleport, run 'sudo systemctl stop teleport.service'" + log_only "To start Teleport again if you stop it, run 'sudo systemctl start teleport.service'" + elif is_macos_host; then + log_only "View Teleport logs in '${MACOS_STDERR_LOG}' and '${MACOS_STDOUT_LOG}'" + log_only "To stop Teleport, run 'sudo launchctl unload ${LAUNCHD_CONFIG_PATH}/com.goteleport.teleport.plist'" + log_only "To start Teleport again if you stop it, run 'sudo launchctl load ${LAUNCHD_CONFIG_PATH}/com.goteleport.teleport.plist'" + fi + log_only "" + log_only "You can see this node connected in the Teleport web UI or 'tsh ls' with the name '${NODENAME}'" + log_only "Find more details on how to use Teleport here: https://goteleport.com/docs/user-manual/" + else + log_important "The Teleport service was installed, but it does not appear to have started successfully." + if is_using_systemd; then + log_important "Check the Teleport service's status with 'systemctl status teleport.service'" + log_important "View Teleport logs with 'journalctl -u teleport.service'" + elif is_macos_host; then + log_important "Check Teleport logs in '${MACOS_STDERR_LOG}' and '${MACOS_STDOUT_LOG}'" + fi + log_important "Contact Teleport support for further assistance." + fi + log_only "" +} +# start teleport in foreground (when there's no systemd) +start_teleport_foreground() { + log "Starting Teleport in the foreground" + # shellcheck disable=SC2091 + $(get_teleport_start_command) +} +# start teleport via launchd (after installing config) +start_teleport_launchd() { + log "Starting Teleport via launchctl. It will automatically be started whenever the system reboots." + launchctl load ${LAUNCHD_CONFIG_PATH}/com.goteleport.teleport.plist + sleep ${ALIVE_CHECK_DELAY} +} +# start teleport via systemd (after installing unit) +start_teleport_systemd() { + log "Starting Teleport via systemd. It will automatically be started whenever the system reboots." + systemctl enable teleport.service + systemctl start teleport.service + sleep ${ALIVE_CHECK_DELAY} +} +# checks whether teleport binaries exist on the host +teleport_binaries_exist() { + for BINARY_NAME in teleport tctl tsh; do + if [ -f ${TELEPORT_BINARY_DIR}/${BINARY_NAME} ]; then return 0; else return 1; fi + done +} +# checks whether a teleport config exists on the host +teleport_config_exists() { if [ -f ${TELEPORT_CONFIG_PATH} ]; then return 0; else return 1; fi; } +# checks whether a teleport data dir exists on the host +teleport_datadir_exists() { if [ -d ${TELEPORT_DATA_DIR} ]; then return 0; else return 1; fi; } + +# error out if any required values are not set +check_set TELEPORT_VERSION +check_set TARGET_HOSTNAME +check_set TARGET_PORT +check_set JOIN_TOKEN +check_set CA_PIN_HASHES +if [[ "${APP_INSTALL_MODE}" == "true" ]]; then + check_set APP_NAME + check_set APP_URI + check_set APP_PUBLIC_ADDR +fi + +### +# main script starts here +### +# check connectivity to teleport server/port +if [[ "${IGNORE_CONNECTIVITY_CHECK}" == "true" ]]; then + log "TELEPORT_IGNORE_CONNECTIVITY_CHECK=true, not running connectivity check" +else + log "Checking TCP connectivity to Teleport server (${TARGET_HOSTNAME}:${TARGET_PORT})" + if ! check_connectivity "${TARGET_HOSTNAME}" "${TARGET_PORT}"; then + # if we don't have a connectivity test method assigned, we know we couldn't run the test + if [[ ${CONNECTIVITY_TEST_METHOD} == "" ]]; then + log "Couldn't find nc, telnet or /dev/tcp to do a connection test" + log "Going to blindly continue without testing connectivity" + else + log_important "Couldn't open a connection to the Teleport server (${TARGET_HOSTNAME}:${TARGET_PORT}) via ${CONNECTIVITY_TEST_METHOD}" + log_important "This issue will need to be fixed before the script can continue." + log_important "If you think this is an error, add 'export TELEPORT_IGNORE_CONNECTIVITY_CHECK=true && ' before the curl command which runs the script." + exit 1 + fi + else + log "Connectivity to Teleport server (via ${CONNECTIVITY_TEST_METHOD}) looks good" + fi +fi + +# use OSTYPE variable to figure out host type/arch +if [[ "${OSTYPE}" == "linux-gnu"* ]]; then + # linux host, now detect arch + TELEPORT_BINARY_TYPE="linux" + ARCH=$(uname -m) + log "Detected host: ${OSTYPE}, using Teleport binary type ${TELEPORT_BINARY_TYPE}" + if [[ ${ARCH} == "armv7l" ]]; then + TELEPORT_ARCH="arm" + elif [[ ${ARCH} == "aarch64" ]]; then + TELEPORT_ARCH="arm64" + elif [[ ${ARCH} == "x86_64" ]]; then + TELEPORT_ARCH="amd64" + elif [[ ${ARCH} == "i686" ]]; then + TELEPORT_ARCH="386" + else + log_important "Error: cannot detect architecture from uname -m: ${ARCH}" + exit 1 + fi + log "Detected arch: ${ARCH}, using Teleport arch ${TELEPORT_ARCH}" + # if the download format is already set, we have no need to detect distro + if [[ ${TELEPORT_FORMAT} == "" ]]; then + # detect distro + # if /etc/os-release doesn't exist, we need to use some other logic + if [ ! -f /etc/os-release ]; then + if [ -f /etc/centos-release ]; then + if grep -q 'CentOS release 6' /etc/centos-release; then + log_important "Detected host type: CentOS 6 [$(cat /etc/centos-release)]" + log_important "Teleport will not work on CentOS 6 -based servers due to the glibc version being too low." + exit 1 + fi + elif [ -f /etc/redhat-release ]; then + if grep -q 'Red Hat Enterprise Linux Server release 5' /etc/redhat-release; then + log_important "Detected host type: RHEL5 [$(cat /etc/redhat-release)]" + log_important "Teleport will not work on RHEL5-based servers due to the glibc version being too low." + exit 1 + elif grep -q 'Red Hat Enterprise Linux Server release 6' /etc/redhat-release; then + log_important "Detected host type: RHEL6 [$(cat /etc/redhat-release)]" + log_important "Teleport will not work on RHEL6-based servers due to the glibc version being too low." + exit 1 + fi + fi + # use ID_LIKE value from /etc/os-release (if set) + # this is 'debian' on ubuntu/raspbian, 'centos rhel fedora' on amazon linux etc + else + check_exists_fatal cut + DISTRO_TYPE=$(grep ID_LIKE /etc/os-release | cut -d= -f2) || true + if [[ ${DISTRO_TYPE} == "" ]]; then + # use exact ID value from /etc/os-release if ID_LIKE is not set + DISTRO_TYPE=$(grep -w ID /etc/os-release | cut -d= -f2) + fi + if [[ ${DISTRO_TYPE} =~ "debian" ]]; then + TELEPORT_FORMAT="deb" + elif [[ "$DISTRO_TYPE" =~ "amzn"* ]] || [[ ${DISTRO_TYPE} =~ "centos"* ]] || [[ ${DISTRO_TYPE} =~ "rhel" ]] || [[ ${DISTRO_TYPE} =~ "fedora"* ]]; then + TELEPORT_FORMAT="rpm" + else + log "Couldn't match a distro type using /etc/os-release, falling back to tarball installer" + TELEPORT_FORMAT="tarball" + fi + fi + log "Detected distro type: ${DISTRO_TYPE}" + #suse, also identified as sles, uses a different path for its systemd then other distro types like ubuntu + if [[ ${DISTRO_TYPE} =~ "suse"* ]] || [[ ${DISTRO_TYPE} =~ "sles"* ]]; then + SYSTEMD_UNIT_PATH="/etc/systemd/system/teleport.service" + fi + fi +elif [[ "${OSTYPE}" == "darwin"* ]]; then + # macos host, now detect arch + TELEPORT_BINARY_TYPE="darwin" + ARCH=$(uname -m) + log "Detected host: ${OSTYPE}, using Teleport binary type ${TELEPORT_BINARY_TYPE}" + if [[ ${ARCH} == "arm64" ]]; then + TELEPORT_ARCH="arm64" + elif [[ ${ARCH} == "x86_64" ]]; then + TELEPORT_ARCH="amd64" + else + log_important "Error: unsupported architecture from uname -m: ${ARCH}" + exit 1 + fi + log "Detected MacOS ${ARCH} architecture, using Teleport arch ${TELEPORT_ARCH}" + TELEPORT_FORMAT="tarball" +else + log_important "Error - unsupported platform: ${OSTYPE}" + exit 1 +fi +log "Using Teleport distribution: ${TELEPORT_FORMAT}" + +# create temporary directory and exit cleanup logic +TEMP_DIR=$(mktemp -d -t teleport-XXXXXXXXXX) +log "Created temp dir ${TEMP_DIR}" +pushd "${TEMP_DIR}" >/dev/null 2>&1 + +finish() { + popd >/dev/null 2>&1 + rm -rf "${TEMP_DIR}" +} +trap finish EXIT + +# optional format override (mostly for testing) +if [[ ${OVERRIDE_FORMAT} != "" ]]; then + TELEPORT_FORMAT="${OVERRIDE_FORMAT}" + log "Overriding TELEPORT_FORMAT to ${OVERRIDE_FORMAT}" +fi + +# check whether teleport is running already +# if it is, we exit gracefully with an error +if is_running_teleport; then + if [[ ${IGNORE_CHECKS} != "true" ]]; then + TELEPORT_PID=$(get_teleport_pid) + log_header "Warning: Teleport appears to already be running on this host (pid: ${TELEPORT_PID})" + log_cleanup_message + exit 1 + else + log "Ignoring is_running_teleport as requested" + fi +fi + +# check for existing config file +if teleport_config_exists; then + if [[ ${IGNORE_CHECKS} != "true" ]]; then + log_header "Warning: There is already a Teleport config file present at ${TELEPORT_CONFIG_PATH}." + log_cleanup_message + exit 1 + else + log "Ignoring teleport_config_exists as requested" + fi +fi + +# check for existing data directory +if teleport_datadir_exists; then + if [[ ${IGNORE_CHECKS} != "true" ]]; then + log_header "Warning: Found existing Teleport data directory (${TELEPORT_DATA_DIR})." + log_cleanup_message + exit 1 + else + log "Ignoring teleport_datadir_exists as requested" + fi +fi + +# check for existing binaries +if teleport_binaries_exist; then + if [[ ${IGNORE_CHECKS} != "true" ]]; then + log_header "Warning: Found existing Teleport binaries under ${TELEPORT_BINARY_DIR}." + log_cleanup_message + exit 1 + else + log "Ignoring teleport_binaries_exist as requested" + fi +fi + +install_from_file() { + # select correct URL/installation method based on distro + if [[ ${TELEPORT_FORMAT} == "tarball" ]]; then + URL="https://get.gravitational.com/${TELEPORT_PACKAGE_NAME}-v${TELEPORT_VERSION}-${TELEPORT_BINARY_TYPE}-${TELEPORT_ARCH}-bin.tar.gz" + + # check that needed tools are installed + check_exists_fatal curl tar + # download tarball + log "Downloading Teleport ${TELEPORT_FORMAT} release ${TELEPORT_VERSION}" + DOWNLOAD_FILENAME=$(get_download_filename "${URL}") + download "${URL}" "${TEMP_DIR}/${DOWNLOAD_FILENAME}" + # extract tarball + tar -xzf "${TEMP_DIR}/${DOWNLOAD_FILENAME}" -C "${TEMP_DIR}" + # install binaries to /usr/local/bin + for BINARY in ${TELEPORT_BINARY_LIST}; do + ${COPY_COMMAND} "${TELEPORT_ARCHIVE_PATH}/${BINARY}" "${TELEPORT_BINARY_DIR}/" + done + elif [[ ${TELEPORT_FORMAT} == "deb" ]]; then + # convert teleport arch to deb arch + if [[ ${TELEPORT_ARCH} == "amd64" ]]; then + DEB_ARCH="amd64" + elif [[ ${TELEPORT_ARCH} == "386" ]]; then + DEB_ARCH="i386" + elif [[ ${TELEPORT_ARCH} == "arm" ]]; then + DEB_ARCH="arm" + elif [[ ${TELEPORT_ARCH} == "arm64" ]]; then + DEB_ARCH="arm64" + fi + URL="https://get.gravitational.com/${TELEPORT_PACKAGE_NAME}_${TELEPORT_VERSION}_${DEB_ARCH}.deb" + check_deb_not_already_installed + # check that needed tools are installed + check_exists_fatal curl dpkg + # download deb and register cleanup operation + log "Downloading Teleport ${TELEPORT_FORMAT} release ${TELEPORT_VERSION}" + DOWNLOAD_FILENAME=$(get_download_filename "${URL}") + download "${URL}" "${TEMP_DIR}/${DOWNLOAD_FILENAME}" + # install deb + log "Using dpkg to install ${TEMP_DIR}/${DOWNLOAD_FILENAME}" + dpkg -i "${TEMP_DIR}/${DOWNLOAD_FILENAME}" + elif [[ ${TELEPORT_FORMAT} == "rpm" ]]; then + # convert teleport arch to rpm arch + if [[ ${TELEPORT_ARCH} == "amd64" ]]; then + RPM_ARCH="x86_64" + elif [[ ${TELEPORT_ARCH} == "386" ]]; then + RPM_ARCH="i386" + elif [[ ${TELEPORT_ARCH} == "arm" ]]; then + RPM_ARCH="arm" + elif [[ ${TELEPORT_ARCH} == "arm64" ]]; then + RPM_ARCH="arm64" + fi + URL="https://get.gravitational.com/${TELEPORT_PACKAGE_NAME}-${TELEPORT_VERSION}-1.${RPM_ARCH}.rpm" + check_rpm_not_already_installed + # check for package managers + if check_exists dnf; then + log "Found 'dnf' package manager, using it" + PACKAGE_MANAGER_COMMAND="dnf -y install" + elif check_exists yum; then + log "Found 'yum' package manager, using it" + PACKAGE_MANAGER_COMMAND="yum -y localinstall" + else + PACKAGE_MANAGER_COMMAND="" + log "Cannot find 'yum' or 'dnf' package manager commands, will try installing the rpm manually instead" + fi + # check that needed tools are installed + check_exists_fatal curl + log "Downloading Teleport ${TELEPORT_FORMAT} release ${TELEPORT_VERSION}" + DOWNLOAD_FILENAME=$(get_download_filename "${URL}") + download "${URL}" "${TEMP_DIR}/${DOWNLOAD_FILENAME}" + # install with package manager if available + if [[ ${PACKAGE_MANAGER_COMMAND} != "" ]]; then + log "Installing Teleport release from ${TEMP_DIR}/${DOWNLOAD_FILENAME} using ${PACKAGE_MANAGER_COMMAND}" + # install rpm with package manager + ${PACKAGE_MANAGER_COMMAND} "${TEMP_DIR}/${DOWNLOAD_FILENAME}" + # use rpm if we couldn't find a package manager + else + # install RPM (in upgrade mode) + log "Using rpm to install ${TEMP_DIR}/${DOWNLOAD_FILENAME}" + rpm -Uvh "${TEMP_DIR}/${DOWNLOAD_FILENAME}" + fi + else + log_important "Can't figure out what Teleport format to use" + exit 1 + fi +} + +install_from_repo() { + if [[ "${REPO_CHANNEL}" == "" ]]; then + # By default, use the current version's channel. + REPO_CHANNEL=stable/v"${TELEPORT_VERSION//.*/}" + fi + + # Populate $ID, $VERSION_ID, $VERSION_CODENAME and other env vars identifying the OS. + # shellcheck disable=SC1091 + . /etc/os-release + + PACKAGE_LIST=$(package_list) + if [ "$ID" == "debian" ] || [ "$ID" == "ubuntu" ]; then + # old versions of ubuntu require that keys get added by `apt-key add`, without + # adding the key apt shows a key signing error when installing teleport. + if [[ + ($ID == "ubuntu" && $VERSION_ID == "16.04") || \ + ($ID == "debian" && $VERSION_ID == "9" ) + ]]; then + apt install apt-transport-https gnupg -y + curl -fsSL https://apt.releases.teleport.dev/gpg | apt-key add - + echo "deb https://apt.releases.teleport.dev/${ID} ${VERSION_CODENAME} ${REPO_CHANNEL}" > /etc/apt/sources.list.d/teleport.list + else + curl -fsSL https://apt.releases.teleport.dev/gpg \ + -o /usr/share/keyrings/teleport-archive-keyring.asc + echo "deb [signed-by=/usr/share/keyrings/teleport-archive-keyring.asc] \ + https://apt.releases.teleport.dev/${ID} ${VERSION_CODENAME} ${REPO_CHANNEL}" > /etc/apt/sources.list.d/teleport.list + fi + apt-get update + apt-get install -y ${PACKAGE_LIST} + elif [ "$ID" = "amzn" ] || [ "$ID" = "rhel" ] || [ "$ID" = "centos" ] ; then + if [ "$ID" = "rhel" ]; then + VERSION_ID="${VERSION_ID//.*/}" # convert version numbers like '7.2' to only include the major version + fi + yum install -y yum-utils + yum-config-manager --add-repo \ + "$(rpm --eval "https://yum.releases.teleport.dev/$ID/$VERSION_ID/Teleport/%{_arch}/${REPO_CHANNEL}/teleport.repo")" + + # Remove metadata cache to prevent cache from other channel (eg, prior version) + # See: https://github.com/gravitational/teleport/issues/22581 + yum --disablerepo="*" --enablerepo="teleport" clean metadata + + yum install -y ${PACKAGE_LIST} + else + echo "Unsupported distro: $ID" + exit 1 + fi +} + +# package_list returns the list of packages to install. +# The list of packages can be fed into yum or apt because they already have the expected format when pinning versions. +package_list() { + TELEPORT_PACKAGE_PIN_VERSION=${TELEPORT_PACKAGE_NAME} + TELEPORT_UPDATER_PIN_VERSION="${TELEPORT_PACKAGE_NAME}-updater" + + if [[ "${TELEPORT_FORMAT}" == "deb" ]]; then + TELEPORT_PACKAGE_PIN_VERSION+="=${TELEPORT_VERSION}" + TELEPORT_UPDATER_PIN_VERSION+="=${TELEPORT_VERSION}" + + elif [[ "${TELEPORT_FORMAT}" == "rpm" ]]; then + TELEPORT_YUM_VERSION="${TELEPORT_VERSION//-/_}" + TELEPORT_PACKAGE_PIN_VERSION+="-${TELEPORT_YUM_VERSION}" + TELEPORT_UPDATER_PIN_VERSION+="-${TELEPORT_YUM_VERSION}" + fi + + PACKAGE_LIST=${TELEPORT_PACKAGE_PIN_VERSION} + # (warning): This expression is constant. Did you forget the $ on a variable? + # Disabling the warning above because expression is templated. + # shellcheck disable=SC2050 + if is_using_systemd && [[ "false" == "true" ]]; then + # Teleport Updater requires systemd. + PACKAGE_LIST+=" ${TELEPORT_UPDATER_PIN_VERSION}" + fi + echo ${PACKAGE_LIST} +} + +is_repo_available() { + if [[ "${OSTYPE}" != "linux-gnu" ]]; then + return 1 + fi + + # Populate $ID, $VERSION_ID and other env vars identifying the OS. + # shellcheck disable=SC1091 + . /etc/os-release + + # The following distros+version have a Teleport repository to install from. + case "${ID}-${VERSION_ID}" in + ubuntu-16.04* | ubuntu-18.04* | ubuntu-20.04* | ubuntu-22.04* | \ + debian-9* | debian-10* | debian-11* | \ + rhel-7* | rhel-8* | rhel-9* | \ + centos-7* | centos-8* | centos-9* | \ + amzn-2 | amzn-2023) + return 0;; + esac + + return 1 +} + +if is_repo_available; then + log "Installing repo for distro $ID." + install_from_repo +else + log "Installing from binary file." + install_from_file +fi + +# check that teleport binary can be found and runs +if ! check_teleport_binary; then + log_important "The Teleport binary could not be found at ${TELEPORT_BINARY_DIR} as expected." + log_important "This usually means that there was an error during installation." + log_important "Check this log for obvious signs of error and contact Teleport support" + log_important "for further assistance." + exit 1 +fi + +# install teleport config +# check the mode and write the appropriate config type +if [[ "${APP_INSTALL_MODE}" == "true" ]]; then + install_teleport_app_config +elif [[ "${DB_INSTALL_MODE}" == "true" ]]; then + install_teleport_database_config +else + install_teleport_node_config +fi + + +# Used to track whether a Teleport agent was installed using this method. +export TELEPORT_INSTALL_METHOD_NODE_SCRIPT="true" + +# install systemd unit if applicable (linux hosts) +if is_using_systemd; then + log "Host is using systemd" + # we only need to manually install the systemd config if teleport was installed via tarball + # all other packages will deploy it automatically + if [[ ${TELEPORT_FORMAT} == "tarball" ]]; then + install_systemd_unit + fi + start_teleport_systemd + print_welcome_message +# install launchd config on MacOS hosts +elif is_macos_host; then + log "Host is running MacOS" + install_launchd_config + start_teleport_launchd + print_welcome_message +# not a MacOS host and no systemd available, print a warning +# and temporarily start Teleport in the foreground +else + log "Host does not appear to be using systemd" + no_systemd_warning + start_teleport_foreground +fi + diff --git a/ansible/00_old/install-node.sh b/ansible/00_old/install-node.sh new file mode 100644 index 0000000..f5136e6 --- /dev/null +++ b/ansible/00_old/install-node.sh @@ -0,0 +1,995 @@ +#!/bin/bash +set -euo pipefail +SCRIPT_NAME="teleport-installer" + +# default values +ALIVE_CHECK_DELAY=3 +CONNECTIVITY_TEST_METHOD="" +COPY_COMMAND="cp" +DISTRO_TYPE="" +IGNORE_CONNECTIVITY_CHECK="${TELEPORT_IGNORE_CONNECTIVITY_CHECK:-false}" +LAUNCHD_CONFIG_PATH="/Library/LaunchDaemons" +LOG_FILENAME="$(mktemp -t ${SCRIPT_NAME}.log.XXXXXXXXXX)" +MACOS_STDERR_LOG="/var/log/teleport-stderr.log" +MACOS_STDOUT_LOG="/var/log/teleport-stdout.log" +SYSTEMD_UNIT_PATH="/lib/systemd/system/teleport.service" +TARGET_PORT_DEFAULT=443 +TELEPORT_ARCHIVE_PATH='teleport' +TELEPORT_BINARY_DIR="/usr/local/bin" +TELEPORT_BINARY_LIST="teleport tctl tsh" +TELEPORT_CONFIG_PATH="/etc/teleport.yaml" +TELEPORT_DATA_DIR="/var/lib/teleport" +TELEPORT_DOCS_URL="https://goteleport.com/docs/" +TELEPORT_FORMAT="" + +# initialise variables (because set -u disallows unbound variables) +f="" +l="" +DISABLE_TLS_VERIFICATION=false +NODENAME=$(hostname) +IGNORE_CHECKS=false +OVERRIDE_FORMAT="" +QUIET=false +APP_INSTALL_DECISION="" +INTERACTIVE=false + +# the default value of each variable is a templatable Go value so that it can +# optionally be replaced by the server before the script is served up +TELEPORT_VERSION='13.3.4' +TELEPORT_PACKAGE_NAME='teleport' +REPO_CHANNEL='' +TARGET_HOSTNAME='teleport.datasaker.io' +TARGET_PORT='443' +JOIN_TOKEN='2df40c1ac8f47d7b155a92c134a77a84' +JOIN_METHOD='' +JOIN_METHOD_FLAG="" +[ -n "$JOIN_METHOD" ] && JOIN_METHOD_FLAG="--join-method ${JOIN_METHOD}" + +# inject labels into the configuration +LABELS='teleport.internal/resource-id=0ec993a8-b1ec-4fa6-8fc5-4e73e3e5306e' +LABELS_FLAG=() +[ -n "$LABELS" ] && LABELS_FLAG=(--labels "${LABELS}") + +# When all stanza generators have been updated to use the new +# `teleport configure` commands CA_PIN_HASHES can be removed along +# with the script passing it in in `join_tokens.go`. +CA_PIN_HASHES='sha256:fcd4bcd57c9a2a7bd68c4140c2c46c0131cd31567c3e0c87e6b12258aa190836' +CA_PINS='sha256:fcd4bcd57c9a2a7bd68c4140c2c46c0131cd31567c3e0c87e6b12258aa190836' +ARG_CA_PIN_HASHES="" +APP_INSTALL_MODE='false' +APP_NAME='' +APP_URI='' +DB_INSTALL_MODE='false' + +# usage message +# shellcheck disable=SC2086 +usage() { echo "Usage: $(basename $0) [-v teleport_version] [-h target_hostname] [-p target_port] [-j join_token] [-c ca_pin_hash]... [-q] [-l log_filename] [-a app_name] [-u app_uri] " 1>&2; exit 1; } +while getopts ":v:h:p:j:c:f:ql:ika:u:" o; do + case "${o}" in + v) TELEPORT_VERSION=${OPTARG};; + h) TARGET_HOSTNAME=${OPTARG};; + p) TARGET_PORT=${OPTARG};; + j) JOIN_TOKEN=${OPTARG};; + c) ARG_CA_PIN_HASHES="${ARG_CA_PIN_HASHES} ${OPTARG}";; + f) f=${OPTARG}; if [[ ${f} != "tarball" && ${f} != "deb" && ${f} != "rpm" ]]; then usage; fi;; + q) QUIET=true;; + l) l=${OPTARG};; + i) IGNORE_CHECKS=true; COPY_COMMAND="cp -f";; + k) DISABLE_TLS_VERIFICATION=true;; + a) APP_INSTALL_MODE=true && APP_NAME=${OPTARG};; + u) APP_INSTALL_MODE=true && APP_URI=${OPTARG};; + *) usage;; + esac +done +shift $((OPTIND-1)) + +if [[ "${ARG_CA_PIN_HASHES}" != "" ]]; then + CA_PIN_HASHES="${ARG_CA_PIN_HASHES}" +fi + +# function to construct a go template variable +# go's template parser is a bit finicky, so we dynamically build the value one character at a time +construct_go_template() { + OUTPUT="{" + OUTPUT+="{" + OUTPUT+="." + OUTPUT+="${1}" + OUTPUT+="}" + OUTPUT+="}" + echo "${OUTPUT}" +} + +# check whether we are root, exit if not +assert_running_as_root() { + if ! [ "$(id -u)" = 0 ]; then + echo "This script must be run as root." 1>&2 + exit 1 + fi +} + +# function to check whether variables are either blank or set to the default go template value +# (because they haven't been set by the go script generator or a command line argument) +# returns 1 if the variable is set to a default/zero value +# returns 0 otherwise (i.e. it needs to be set interactively) +check_variable() { + VARIABLE_VALUE="${!1}" + GO_TEMPLATE_NAME=$(construct_go_template "${2}") + if [[ "${VARIABLE_VALUE}" == "" ]] || [[ "${VARIABLE_VALUE}" == "${GO_TEMPLATE_NAME}" ]]; then + return 1 + fi + return 0 +} + +# function to check whether a provided value is "truthy" i.e. it looks like you're trying to say "yes" +is_truthy() { + declare -a TRUTHY_VALUES + TRUTHY_VALUES=("y" "Y" "yes" "YES" "ye" "YE" "yep" "YEP" "ya" "YA") + CHECK_VALUE="$1" + for ARRAY_VALUE in "${TRUTHY_VALUES[@]}"; do [[ "${CHECK_VALUE}" == "${ARRAY_VALUE}" ]] && return 0; done + return 1 +} + +# function to read input until the value you get is non-empty +read_nonblank_input() { + INPUT="" + VARIABLE_TO_ASSIGN="$1" + shift + PROMPT="$*" + until [[ "${INPUT}" != "" ]]; do + echo -n "${PROMPT}" + read -r INPUT + done + printf -v "${VARIABLE_TO_ASSIGN}" '%s' "${INPUT}" +} + +# error if we're not root +assert_running_as_root + +# set/read values interactively if not provided +# users will be prompted to enter their own value if all the following are true: +# - the current value is blank, or equal to the default Go template value +# - the value has not been provided by command line argument +! check_variable TELEPORT_VERSION version && INTERACTIVE=true && read_nonblank_input TELEPORT_VERSION "Enter Teleport version to install (without v): " +! check_variable TARGET_HOSTNAME hostname && INTERACTIVE=true && read_nonblank_input TARGET_HOSTNAME "Enter target hostname to connect to: " +! check_variable TARGET_PORT port && INTERACTIVE=true && { echo -n "Enter target port to connect to [${TARGET_PORT_DEFAULT}]: "; read -r TARGET_PORT; } +! check_variable JOIN_TOKEN token && INTERACTIVE=true && read_nonblank_input JOIN_TOKEN "Enter Teleport join token as provided: " +! check_variable CA_PIN_HASHES caPins && INTERACTIVE=true && read_nonblank_input CA_PIN_HASHES "Enter CA pin hash (separate multiple hashes with spaces): " +[ -n "${f}" ] && OVERRIDE_FORMAT=${f} +[ -n "${l}" ] && LOG_FILENAME=${l} +# if app service mode is not set (or is the default value) and we are running interactively (i.e. the user has provided some input already), +# prompt the user to choose whether to enable app_service +if [[ "${INTERACTIVE}" == "true" ]]; then + if ! check_variable APP_INSTALL_MODE appInstallMode; then + APP_INSTALL_MODE="false" + echo -n "Would you like to enable and configure Teleport's app_service, to use Teleport as a reverse proxy for a web application? [y/n, default: n] " + read -r APP_INSTALL_DECISION + if is_truthy "${APP_INSTALL_DECISION}"; then + APP_INSTALL_MODE="true" + fi + fi +fi +# prompt for extra needed values if we're running in app service mode +if [[ "${APP_INSTALL_MODE}" == "true" ]]; then + ! check_variable APP_NAME appName && read_nonblank_input APP_NAME "Enter app name to install (must be DNS-compatible; less than 63 characters, no spaces, only - or _ as punctuation): " + ! check_variable APP_URI appURI && read_nonblank_input APP_URI "Enter app URI (the host running the Teleport app service must be able to connect to this): " + # generate app public addr by concatenating values + APP_PUBLIC_ADDR="${APP_NAME}.${TARGET_HOSTNAME}" +fi + +# set default target port if value not provided +if [[ "${TARGET_PORT}" == "" ]]; then + TARGET_PORT=${TARGET_PORT_DEFAULT} +fi + +# clear log file if provided +if [[ "${LOG_FILENAME}" != "" ]]; then + if [ -f "${LOG_FILENAME}" ]; then + echo -n "" > "${LOG_FILENAME}" + fi +fi + +# log functions +log_date() { echo -n "$(date '+%Y-%m-%d %H:%M:%S %Z')"; } +log() { + LOG_LINE="$(log_date) [${SCRIPT_NAME}] $*" + if [[ ${QUIET} != "true" ]]; then + echo "${LOG_LINE}" + fi + if [[ "${LOG_FILENAME}" != "" ]]; then + echo "${LOG_LINE}" >> "${LOG_FILENAME}" + fi +} +# writes a line with no timestamp or starting data, always prints +log_only() { + LOG_LINE="$*" + echo "${LOG_LINE}" + if [[ "${LOG_FILENAME}" != "" ]]; then + echo "${LOG_LINE}" >> "${LOG_FILENAME}" + fi +} +# writes a line by itself as a header +log_header() { + LOG_LINE="$*" + echo "" + echo "${LOG_LINE}" + echo "" + if [[ "${LOG_FILENAME}" != "" ]]; then + echo "${LOG_LINE}" >> "${LOG_FILENAME}" + fi +} +# important log lines, print even when -q (quiet) is passed +log_important() { + LOG_LINE="$(log_date) [${SCRIPT_NAME}] ---> $*" + echo "${LOG_LINE}" + if [[ "${LOG_FILENAME}" != "" ]]; then + echo "${LOG_LINE}" >> "${LOG_FILENAME}" + fi +} +log_cleanup_message() { + log_only "This script does not overwrite any existing settings or Teleport installations." + log_only "Please clean up by running any of the following steps as necessary:" + log_only "- stop any running Teleport processes" + log_only " - pkill -f teleport" + log_only "- remove any data under ${TELEPORT_DATA_DIR}, along with the directory itself" + log_only " - rm -rf ${TELEPORT_DATA_DIR}" + log_only "- remove any configuration at ${TELEPORT_CONFIG_PATH}" + log_only " - rm -f ${TELEPORT_CONFIG_PATH}" + log_only "- remove any Teleport binaries (${TELEPORT_BINARY_LIST}) installed under ${TELEPORT_BINARY_DIR}" + for BINARY in ${TELEPORT_BINARY_LIST}; do EXAMPLE_DELETE_COMMAND+="${TELEPORT_BINARY_DIR}/${BINARY} "; done + log_only " - rm -f ${EXAMPLE_DELETE_COMMAND}" + log_only "Run this installer again when done." + log_only +} + +# other functions +# check whether a named program exists +check_exists() { NAME=$1; if type "${NAME}" >/dev/null 2>&1; then return 0; else return 1; fi; } +# checks for the existence of a list of named binaries and exits with error if any of them don't exist +check_exists_fatal() { + for TOOL in "$@"; do + if ! check_exists "${TOOL}"; then + log_important "Error: cannot find ${TOOL} - it needs to be installed" + exit 1 + fi + done +} +# check connectivity to the given host/port and make a request to see if Teleport is listening +# uses the global variable CONNECTIVITY_TEST_METHOD to return the name of the checker, as return +# values aren't really a thing that exists in bash +check_connectivity() { + HOST=$1 + PORT=$2 + # check with nc + if check_exists nc; then + CONNECTIVITY_TEST_METHOD="nc" + if nc -z -w3 "${HOST}" "${PORT}" >/dev/null 2>&1; then return 0; else return 1; fi + # if there's no nc, check with telnet + elif check_exists telnet; then + CONNECTIVITY_TEST_METHOD="telnet" + if echo -e '\x1dclose\x0d' | telnet "${HOST}" "${PORT}" >/dev/null 2>&1; then return 0; else return 1; fi + # if there's no nc or telnet, try and use /dev/tcp + elif [ -f /dev/tcp ]; then + CONNECTIVITY_TEST_METHOD="/dev/tcp" + if (head -1 < "/dev/tcp/${HOST}/${PORT}") >/dev/null 2>&1; then return 0; else return 1; fi + else + return 255 + fi +} +# check whether a teleport DEB is already installed and exit with error if so +check_deb_not_already_installed() { + check_exists_fatal dpkg awk + DEB_INSTALLED=$(dpkg -l | awk '{print $2}' | grep -E ^teleport || true) + if [[ ${DEB_INSTALLED} != "" ]]; then + log_important "It looks like there is already a Teleport DEB package installed (name: ${DEB_INSTALLED})." + log_important "You will need to remove that package before using this script." + exit 1 + fi +} +# check whether a teleport RPM is already installed and exit with error if so +check_rpm_not_already_installed() { + check_exists_fatal rpm + RPM_INSTALLED=$(rpm -qa | grep -E ^teleport || true) + if [[ ${RPM_INSTALLED} != "" ]]; then + log_important "It looks like there is already a Teleport RPM package installed (name: ${RPM_INSTALLED})." + log_important "You will need to remove that package before using this script." + exit 1 + fi +} +# function to check if given variable is set +check_set() { + CHECK_KEY=${1} || true + CHECK_VALUE=${!1} || true + if [[ "${CHECK_VALUE}" == "" ]]; then + log "Required variable ${CHECK_KEY} is not set" + exit 1 + else + log "${CHECK_KEY}: ${CHECK_VALUE}" + fi +} +# checks that teleport binary can be found in path and runs 'teleport version' +check_teleport_binary() { + FOUND_TELEPORT_VERSION=$(${TELEPORT_BINARY_DIR}/teleport version) + if [[ "${FOUND_TELEPORT_VERSION}" == "" ]]; then + log "Cannot find Teleport binary" + return 1 + else + log "Found: ${FOUND_TELEPORT_VERSION}"; + return 0 + fi +} +# wrapper to download with curl +download() { + URL=$1 + OUTPUT_PATH=$2 + CURL_COMMAND="curl -fsSL --retry 5 --retry-delay 5" + # optionally allow disabling of TLS verification (can be useful on older distros + # which often have an out-of-date set of CA certificate bundle which won't validate) + if [[ ${DISABLE_TLS_VERIFICATION} == "true" ]]; then + CURL_COMMAND+=" -k" + fi + log "Running ${CURL_COMMAND} ${URL}" + log "Downloading to ${OUTPUT_PATH}" + # handle errors with curl + if ! ${CURL_COMMAND} -o "${OUTPUT_PATH}" "${URL}"; then + log_important "curl error downloading ${URL}" + log "On an older OS, this may be related to the CA certificate bundle being too old." + log "You can pass the hidden -k flag to this script to disable TLS verification - this is not recommended!" + exit 1 + fi + # check that the file has a non-zero size as an extra validation + check_exists_fatal wc xargs + FILE_SIZE="$(wc -c <"${OUTPUT_PATH}" | xargs)" + if [ "${FILE_SIZE}" -eq 0 ]; then + log_important "The downloaded file has a size of 0 bytes, which means an error occurred. Cannot continue." + exit 1 + else + log "Downloaded file size: ${FILE_SIZE} bytes" + fi + # if we have a hashing utility installed, also download and validate the checksum + SHA_COMMAND="" + # shasum is installed by default on MacOS and some distros + if check_exists shasum; then + SHA_COMMAND="shasum -a 256" + # sha256sum is installed by default in some other distros + elif check_exists sha256sum; then + SHA_COMMAND="sha256sum" + fi + if [[ "${SHA_COMMAND}" != "" ]]; then + log "Will use ${SHA_COMMAND} to validate the checksum of the downloaded file" + SHA_URL="${URL}.sha256" + SHA_PATH="${OUTPUT_PATH}.sha256" + ${CURL_COMMAND} -o "${SHA_PATH}" "${SHA_URL}" + if ${SHA_COMMAND} --status -c "${SHA_PATH}"; then + log "The downloaded file's checksum validated correctly" + else + SHA_EXPECTED=$(cat "${SHA_PATH}") + SHA_ACTUAL=$(${SHA_COMMAND} "${OUTPUT_PATH}") + if check_exists awk; then + SHA_EXPECTED=$(echo "${SHA_EXPECTED}" | awk '{print $1}') + SHA_ACTUAL=$(echo "${SHA_ACTUAL}" | awk '{print $1}') + fi + log_important "Checksum of the downloaded file did not validate correctly" + log_important "Expected: ${SHA_EXPECTED}" + log_important "Got: ${SHA_ACTUAL}" + log_important "Try rerunning this script from the start. If the issue persists, contact Teleport support." + exit 1 + fi + else + log "shasum/sha256sum utilities not found, will skip checksum validation" + fi +} +# gets the filename from a full path (https://target.site/path/to/file.tar.gz -> file.tar.gz) +get_download_filename() { echo "${1##*/}"; } +# gets the pid of any running teleport process (and converts newlines to spaces) +get_teleport_pid() { + check_exists_fatal pgrep xargs + pgrep teleport | xargs echo +} +# returns a command which will start teleport using the config +get_teleport_start_command() { + echo "${TELEPORT_BINARY_DIR}/teleport start --config=${TELEPORT_CONFIG_PATH}" +} +# installs the teleport-provided launchd config +install_launchd_config() { + log "Installing Teleport launchd config to ${LAUNCHD_CONFIG_PATH}" + ${COPY_COMMAND} ./${TELEPORT_ARCHIVE_PATH}/examples/launchd/com.goteleport.teleport.plist ${LAUNCHD_CONFIG_PATH}/com.goteleport.teleport.plist +} +# installs the teleport-provided systemd unit +install_systemd_unit() { + log "Installing Teleport systemd unit to ${SYSTEMD_UNIT_PATH}" + ${COPY_COMMAND} ./${TELEPORT_ARCHIVE_PATH}/examples/systemd/teleport.service ${SYSTEMD_UNIT_PATH} + log "Reloading unit files (systemctl daemon-reload)" + systemctl daemon-reload +} +# formats the arguments as a yaml list +get_yaml_list() { + name="${1}" + list="${2}" + indentation="${3}" + echo "${indentation}${name}:" + for item in ${list}; do + echo "${indentation}- ${item}" + done +} + +# installs the provided teleport config (for app service) +install_teleport_app_config() { + log "Writing Teleport app service config to ${TELEPORT_CONFIG_PATH}" + CA_PINS_CONFIG=$(get_yaml_list "ca_pin" "${CA_PIN_HASHES}" " ") + cat << EOF > ${TELEPORT_CONFIG_PATH} +version: v3 +teleport: + nodename: ${NODENAME} + auth_token: ${JOIN_TOKEN} +${CA_PINS_CONFIG} + proxy_server: ${TARGET_HOSTNAME}:${TARGET_PORT} + log: + output: stderr + severity: INFO +auth_service: + enabled: no +ssh_service: + enabled: no +proxy_service: + enabled: no +app_service: + enabled: yes + apps: + - name: "${APP_NAME}" + uri: "${APP_URI}" + public_addr: ${APP_PUBLIC_ADDR} +EOF +} +# installs the provided teleport config (for database service) +install_teleport_database_config() { + log "Writing Teleport database service config to ${TELEPORT_CONFIG_PATH}" + CA_PINS_CONFIG=$(get_yaml_list "ca_pin" "${CA_PIN_HASHES}" " ") + + # This file is processed by `shellschek` as part of the lint step + # It detects an issue because of un-set variables - $index and $line. This check is called SC2154. + # However, that's not an issue, because those variables are replaced when we run go's text/template engine over it. + # When executing the script, those are no long variables but actual values. + # shellcheck disable=SC2154 + cat << EOF > ${TELEPORT_CONFIG_PATH} +version: v3 +teleport: + nodename: ${NODENAME} + auth_token: ${JOIN_TOKEN} +${CA_PINS_CONFIG} + proxy_server: ${TARGET_HOSTNAME}:${TARGET_PORT} + log: + output: stderr + severity: INFO +auth_service: + enabled: no +ssh_service: + enabled: no +proxy_service: + enabled: no +db_service: + enabled: "yes" + resources: + - labels: +EOF +} +# installs the provided teleport config (for node service) +install_teleport_node_config() { + log "Writing Teleport node service config to ${TELEPORT_CONFIG_PATH}" + ${TELEPORT_BINARY_DIR}/teleport node configure \ + --token ${JOIN_TOKEN} \ + ${JOIN_METHOD_FLAG} \ + --ca-pin ${CA_PINS} \ + --proxy ${TARGET_HOSTNAME}:${TARGET_PORT} \ + "${LABELS_FLAG[@]}" \ + --output ${TELEPORT_CONFIG_PATH} +} +# checks whether the given host is running MacOS +is_macos_host() { if [[ ${OSTYPE} == "darwin"* ]]; then return 0; else return 1; fi } +# checks whether teleport is already running on the host +is_running_teleport() { + check_exists_fatal pgrep + TELEPORT_PID=$(get_teleport_pid) + if [[ "${TELEPORT_PID}" != "" ]]; then return 0; else return 1; fi +} +# checks whether the given host is running systemd as its init system +is_using_systemd() { if [ -d /run/systemd/system ]; then return 0; else return 1; fi } +# prints a warning if the host isn't running systemd +no_systemd_warning() { + log_important "This host is not running systemd, so Teleport cannot be started automatically when it exits." + log_important "Please investigate an alternative way to keep Teleport running." + log_important "You can find information in our documentation: ${TELEPORT_DOCS_URL}" + log_important "For now, Teleport will be started in the foreground - you can press Ctrl+C to exit." + log_only + log_only "Run this command to start Teleport in future:" + log_only "$(get_teleport_start_command)" + log_only + log_only "------------------------------------------------------------------------" + log_only "| IMPORTANT: TELEPORT WILL STOP RUNNING AFTER YOU CLOSE THIS TERMINAL! |" + log_only "| YOU MUST CONFIGURE A SERVICE MANAGER TO MAKE IT RUN ON STARTUP! |" + log_only "------------------------------------------------------------------------" + log_only +} +# print a message giving the name of the node and a link to the docs +# gives some debugging instructions if the service didn't start successfully +print_welcome_message() { + log_only "" + if is_running_teleport; then + log_only "Teleport has been started." + log_only "" + if is_using_systemd; then + log_only "View its status with 'sudo systemctl status teleport.service'" + log_only "View Teleport logs using 'sudo journalctl -u teleport.service'" + log_only "To stop Teleport, run 'sudo systemctl stop teleport.service'" + log_only "To start Teleport again if you stop it, run 'sudo systemctl start teleport.service'" + elif is_macos_host; then + log_only "View Teleport logs in '${MACOS_STDERR_LOG}' and '${MACOS_STDOUT_LOG}'" + log_only "To stop Teleport, run 'sudo launchctl unload ${LAUNCHD_CONFIG_PATH}/com.goteleport.teleport.plist'" + log_only "To start Teleport again if you stop it, run 'sudo launchctl load ${LAUNCHD_CONFIG_PATH}/com.goteleport.teleport.plist'" + fi + log_only "" + log_only "You can see this node connected in the Teleport web UI or 'tsh ls' with the name '${NODENAME}'" + log_only "Find more details on how to use Teleport here: https://goteleport.com/docs/user-manual/" + else + log_important "The Teleport service was installed, but it does not appear to have started successfully." + if is_using_systemd; then + log_important "Check the Teleport service's status with 'systemctl status teleport.service'" + log_important "View Teleport logs with 'journalctl -u teleport.service'" + elif is_macos_host; then + log_important "Check Teleport logs in '${MACOS_STDERR_LOG}' and '${MACOS_STDOUT_LOG}'" + fi + log_important "Contact Teleport support for further assistance." + fi + log_only "" +} +# start teleport in foreground (when there's no systemd) +start_teleport_foreground() { + log "Starting Teleport in the foreground" + # shellcheck disable=SC2091 + $(get_teleport_start_command) +} +# start teleport via launchd (after installing config) +start_teleport_launchd() { + log "Starting Teleport via launchctl. It will automatically be started whenever the system reboots." + launchctl load ${LAUNCHD_CONFIG_PATH}/com.goteleport.teleport.plist + sleep ${ALIVE_CHECK_DELAY} +} +# start teleport via systemd (after installing unit) +start_teleport_systemd() { + log "Starting Teleport via systemd. It will automatically be started whenever the system reboots." + systemctl enable teleport.service + systemctl start teleport.service + sleep ${ALIVE_CHECK_DELAY} +} +# checks whether teleport binaries exist on the host +teleport_binaries_exist() { + for BINARY_NAME in teleport tctl tsh; do + if [ -f ${TELEPORT_BINARY_DIR}/${BINARY_NAME} ]; then return 0; else return 1; fi + done +} +# checks whether a teleport config exists on the host +teleport_config_exists() { if [ -f ${TELEPORT_CONFIG_PATH} ]; then return 0; else return 1; fi; } +# checks whether a teleport data dir exists on the host +teleport_datadir_exists() { if [ -d ${TELEPORT_DATA_DIR} ]; then return 0; else return 1; fi; } + +# error out if any required values are not set +check_set TELEPORT_VERSION +check_set TARGET_HOSTNAME +check_set TARGET_PORT +check_set JOIN_TOKEN +check_set CA_PIN_HASHES +if [[ "${APP_INSTALL_MODE}" == "true" ]]; then + check_set APP_NAME + check_set APP_URI + check_set APP_PUBLIC_ADDR +fi + +### +# main script starts here +### +# check connectivity to teleport server/port +if [[ "${IGNORE_CONNECTIVITY_CHECK}" == "true" ]]; then + log "TELEPORT_IGNORE_CONNECTIVITY_CHECK=true, not running connectivity check" +else + log "Checking TCP connectivity to Teleport server (${TARGET_HOSTNAME}:${TARGET_PORT})" + if ! check_connectivity "${TARGET_HOSTNAME}" "${TARGET_PORT}"; then + # if we don't have a connectivity test method assigned, we know we couldn't run the test + if [[ ${CONNECTIVITY_TEST_METHOD} == "" ]]; then + log "Couldn't find nc, telnet or /dev/tcp to do a connection test" + log "Going to blindly continue without testing connectivity" + else + log_important "Couldn't open a connection to the Teleport server (${TARGET_HOSTNAME}:${TARGET_PORT}) via ${CONNECTIVITY_TEST_METHOD}" + log_important "This issue will need to be fixed before the script can continue." + log_important "If you think this is an error, add 'export TELEPORT_IGNORE_CONNECTIVITY_CHECK=true && ' before the curl command which runs the script." + exit 1 + fi + else + log "Connectivity to Teleport server (via ${CONNECTIVITY_TEST_METHOD}) looks good" + fi +fi + +# use OSTYPE variable to figure out host type/arch +if [[ "${OSTYPE}" == "linux-gnu"* ]]; then + # linux host, now detect arch + TELEPORT_BINARY_TYPE="linux" + ARCH=$(uname -m) + log "Detected host: ${OSTYPE}, using Teleport binary type ${TELEPORT_BINARY_TYPE}" + if [[ ${ARCH} == "armv7l" ]]; then + TELEPORT_ARCH="arm" + elif [[ ${ARCH} == "aarch64" ]]; then + TELEPORT_ARCH="arm64" + elif [[ ${ARCH} == "x86_64" ]]; then + TELEPORT_ARCH="amd64" + elif [[ ${ARCH} == "i686" ]]; then + TELEPORT_ARCH="386" + else + log_important "Error: cannot detect architecture from uname -m: ${ARCH}" + exit 1 + fi + log "Detected arch: ${ARCH}, using Teleport arch ${TELEPORT_ARCH}" + # if the download format is already set, we have no need to detect distro + if [[ ${TELEPORT_FORMAT} == "" ]]; then + # detect distro + # if /etc/os-release doesn't exist, we need to use some other logic + if [ ! -f /etc/os-release ]; then + if [ -f /etc/centos-release ]; then + if grep -q 'CentOS release 6' /etc/centos-release; then + log_important "Detected host type: CentOS 6 [$(cat /etc/centos-release)]" + log_important "Teleport will not work on CentOS 6 -based servers due to the glibc version being too low." + exit 1 + fi + elif [ -f /etc/redhat-release ]; then + if grep -q 'Red Hat Enterprise Linux Server release 5' /etc/redhat-release; then + log_important "Detected host type: RHEL5 [$(cat /etc/redhat-release)]" + log_important "Teleport will not work on RHEL5-based servers due to the glibc version being too low." + exit 1 + elif grep -q 'Red Hat Enterprise Linux Server release 6' /etc/redhat-release; then + log_important "Detected host type: RHEL6 [$(cat /etc/redhat-release)]" + log_important "Teleport will not work on RHEL6-based servers due to the glibc version being too low." + exit 1 + fi + fi + # use ID_LIKE value from /etc/os-release (if set) + # this is 'debian' on ubuntu/raspbian, 'centos rhel fedora' on amazon linux etc + else + check_exists_fatal cut + DISTRO_TYPE=$(grep ID_LIKE /etc/os-release | cut -d= -f2) || true + if [[ ${DISTRO_TYPE} == "" ]]; then + # use exact ID value from /etc/os-release if ID_LIKE is not set + DISTRO_TYPE=$(grep -w ID /etc/os-release | cut -d= -f2) + fi + if [[ ${DISTRO_TYPE} =~ "debian" ]]; then + TELEPORT_FORMAT="deb" + elif [[ "$DISTRO_TYPE" =~ "amzn"* ]] || [[ ${DISTRO_TYPE} =~ "centos"* ]] || [[ ${DISTRO_TYPE} =~ "rhel" ]] || [[ ${DISTRO_TYPE} =~ "fedora"* ]]; then + TELEPORT_FORMAT="rpm" + else + log "Couldn't match a distro type using /etc/os-release, falling back to tarball installer" + TELEPORT_FORMAT="tarball" + fi + fi + log "Detected distro type: ${DISTRO_TYPE}" + #suse, also identified as sles, uses a different path for its systemd then other distro types like ubuntu + if [[ ${DISTRO_TYPE} =~ "suse"* ]] || [[ ${DISTRO_TYPE} =~ "sles"* ]]; then + SYSTEMD_UNIT_PATH="/etc/systemd/system/teleport.service" + fi + fi +elif [[ "${OSTYPE}" == "darwin"* ]]; then + # macos host, now detect arch + TELEPORT_BINARY_TYPE="darwin" + ARCH=$(uname -m) + log "Detected host: ${OSTYPE}, using Teleport binary type ${TELEPORT_BINARY_TYPE}" + if [[ ${ARCH} == "arm64" ]]; then + TELEPORT_ARCH="arm64" + elif [[ ${ARCH} == "x86_64" ]]; then + TELEPORT_ARCH="amd64" + else + log_important "Error: unsupported architecture from uname -m: ${ARCH}" + exit 1 + fi + log "Detected MacOS ${ARCH} architecture, using Teleport arch ${TELEPORT_ARCH}" + TELEPORT_FORMAT="tarball" +else + log_important "Error - unsupported platform: ${OSTYPE}" + exit 1 +fi +log "Using Teleport distribution: ${TELEPORT_FORMAT}" + +# create temporary directory and exit cleanup logic +TEMP_DIR=$(mktemp -d -t teleport-XXXXXXXXXX) +log "Created temp dir ${TEMP_DIR}" +pushd "${TEMP_DIR}" >/dev/null 2>&1 + +finish() { + popd >/dev/null 2>&1 + rm -rf "${TEMP_DIR}" +} +trap finish EXIT + +# optional format override (mostly for testing) +if [[ ${OVERRIDE_FORMAT} != "" ]]; then + TELEPORT_FORMAT="${OVERRIDE_FORMAT}" + log "Overriding TELEPORT_FORMAT to ${OVERRIDE_FORMAT}" +fi + +# check whether teleport is running already +# if it is, we exit gracefully with an error +if is_running_teleport; then + if [[ ${IGNORE_CHECKS} != "true" ]]; then + TELEPORT_PID=$(get_teleport_pid) + log_header "Warning: Teleport appears to already be running on this host (pid: ${TELEPORT_PID})" + log_cleanup_message + exit 1 + else + log "Ignoring is_running_teleport as requested" + fi +fi + +# check for existing config file +if teleport_config_exists; then + if [[ ${IGNORE_CHECKS} != "true" ]]; then + log_header "Warning: There is already a Teleport config file present at ${TELEPORT_CONFIG_PATH}." + log_cleanup_message + exit 1 + else + log "Ignoring teleport_config_exists as requested" + fi +fi + +# check for existing data directory +if teleport_datadir_exists; then + if [[ ${IGNORE_CHECKS} != "true" ]]; then + log_header "Warning: Found existing Teleport data directory (${TELEPORT_DATA_DIR})." + log_cleanup_message + exit 1 + else + log "Ignoring teleport_datadir_exists as requested" + fi +fi + +# check for existing binaries +if teleport_binaries_exist; then + if [[ ${IGNORE_CHECKS} != "true" ]]; then + log_header "Warning: Found existing Teleport binaries under ${TELEPORT_BINARY_DIR}." + log_cleanup_message + exit 1 + else + log "Ignoring teleport_binaries_exist as requested" + fi +fi + +install_from_file() { + # select correct URL/installation method based on distro + if [[ ${TELEPORT_FORMAT} == "tarball" ]]; then + URL="https://get.gravitational.com/${TELEPORT_PACKAGE_NAME}-v${TELEPORT_VERSION}-${TELEPORT_BINARY_TYPE}-${TELEPORT_ARCH}-bin.tar.gz" + + # check that needed tools are installed + check_exists_fatal curl tar + # download tarball + log "Downloading Teleport ${TELEPORT_FORMAT} release ${TELEPORT_VERSION}" + DOWNLOAD_FILENAME=$(get_download_filename "${URL}") + download "${URL}" "${TEMP_DIR}/${DOWNLOAD_FILENAME}" + # extract tarball + tar -xzf "${TEMP_DIR}/${DOWNLOAD_FILENAME}" -C "${TEMP_DIR}" + # install binaries to /usr/local/bin + for BINARY in ${TELEPORT_BINARY_LIST}; do + ${COPY_COMMAND} "${TELEPORT_ARCHIVE_PATH}/${BINARY}" "${TELEPORT_BINARY_DIR}/" + done + elif [[ ${TELEPORT_FORMAT} == "deb" ]]; then + # convert teleport arch to deb arch + if [[ ${TELEPORT_ARCH} == "amd64" ]]; then + DEB_ARCH="amd64" + elif [[ ${TELEPORT_ARCH} == "386" ]]; then + DEB_ARCH="i386" + elif [[ ${TELEPORT_ARCH} == "arm" ]]; then + DEB_ARCH="arm" + elif [[ ${TELEPORT_ARCH} == "arm64" ]]; then + DEB_ARCH="arm64" + fi + URL="https://get.gravitational.com/${TELEPORT_PACKAGE_NAME}_${TELEPORT_VERSION}_${DEB_ARCH}.deb" + check_deb_not_already_installed + # check that needed tools are installed + check_exists_fatal curl dpkg + # download deb and register cleanup operation + log "Downloading Teleport ${TELEPORT_FORMAT} release ${TELEPORT_VERSION}" + DOWNLOAD_FILENAME=$(get_download_filename "${URL}") + download "${URL}" "${TEMP_DIR}/${DOWNLOAD_FILENAME}" + # install deb + log "Using dpkg to install ${TEMP_DIR}/${DOWNLOAD_FILENAME}" + dpkg -i "${TEMP_DIR}/${DOWNLOAD_FILENAME}" + elif [[ ${TELEPORT_FORMAT} == "rpm" ]]; then + # convert teleport arch to rpm arch + if [[ ${TELEPORT_ARCH} == "amd64" ]]; then + RPM_ARCH="x86_64" + elif [[ ${TELEPORT_ARCH} == "386" ]]; then + RPM_ARCH="i386" + elif [[ ${TELEPORT_ARCH} == "arm" ]]; then + RPM_ARCH="arm" + elif [[ ${TELEPORT_ARCH} == "arm64" ]]; then + RPM_ARCH="arm64" + fi + URL="https://get.gravitational.com/${TELEPORT_PACKAGE_NAME}-${TELEPORT_VERSION}-1.${RPM_ARCH}.rpm" + check_rpm_not_already_installed + # check for package managers + if check_exists dnf; then + log "Found 'dnf' package manager, using it" + PACKAGE_MANAGER_COMMAND="dnf -y install" + elif check_exists yum; then + log "Found 'yum' package manager, using it" + PACKAGE_MANAGER_COMMAND="yum -y localinstall" + else + PACKAGE_MANAGER_COMMAND="" + log "Cannot find 'yum' or 'dnf' package manager commands, will try installing the rpm manually instead" + fi + # check that needed tools are installed + check_exists_fatal curl + log "Downloading Teleport ${TELEPORT_FORMAT} release ${TELEPORT_VERSION}" + DOWNLOAD_FILENAME=$(get_download_filename "${URL}") + download "${URL}" "${TEMP_DIR}/${DOWNLOAD_FILENAME}" + # install with package manager if available + if [[ ${PACKAGE_MANAGER_COMMAND} != "" ]]; then + log "Installing Teleport release from ${TEMP_DIR}/${DOWNLOAD_FILENAME} using ${PACKAGE_MANAGER_COMMAND}" + # install rpm with package manager + ${PACKAGE_MANAGER_COMMAND} "${TEMP_DIR}/${DOWNLOAD_FILENAME}" + # use rpm if we couldn't find a package manager + else + # install RPM (in upgrade mode) + log "Using rpm to install ${TEMP_DIR}/${DOWNLOAD_FILENAME}" + rpm -Uvh "${TEMP_DIR}/${DOWNLOAD_FILENAME}" + fi + else + log_important "Can't figure out what Teleport format to use" + exit 1 + fi +} + +install_from_repo() { + if [[ "${REPO_CHANNEL}" == "" ]]; then + # By default, use the current version's channel. + REPO_CHANNEL=stable/v"${TELEPORT_VERSION//.*/}" + fi + + # Populate $ID, $VERSION_ID, $VERSION_CODENAME and other env vars identifying the OS. + # shellcheck disable=SC1091 + . /etc/os-release + + PACKAGE_LIST=$(package_list) + if [ "$ID" == "debian" ] || [ "$ID" == "ubuntu" ]; then + # old versions of ubuntu require that keys get added by `apt-key add`, without + # adding the key apt shows a key signing error when installing teleport. + if [[ + ($ID == "ubuntu" && $VERSION_ID == "16.04") || \ + ($ID == "debian" && $VERSION_ID == "9" ) + ]]; then + apt install apt-transport-https gnupg -y + curl -fsSL https://apt.releases.teleport.dev/gpg | apt-key add - + echo "deb https://apt.releases.teleport.dev/${ID} ${VERSION_CODENAME} ${REPO_CHANNEL}" > /etc/apt/sources.list.d/teleport.list + else + curl -fsSL https://apt.releases.teleport.dev/gpg \ + -o /usr/share/keyrings/teleport-archive-keyring.asc + echo "deb [signed-by=/usr/share/keyrings/teleport-archive-keyring.asc] \ + https://apt.releases.teleport.dev/${ID} ${VERSION_CODENAME} ${REPO_CHANNEL}" > /etc/apt/sources.list.d/teleport.list + fi + apt-get update + apt-get install -y ${PACKAGE_LIST} + elif [ "$ID" = "amzn" ] || [ "$ID" = "rhel" ] || [ "$ID" = "centos" ] ; then + if [ "$ID" = "rhel" ]; then + VERSION_ID="${VERSION_ID//.*/}" # convert version numbers like '7.2' to only include the major version + fi + yum install -y yum-utils + yum-config-manager --add-repo \ + "$(rpm --eval "https://yum.releases.teleport.dev/$ID/$VERSION_ID/Teleport/%{_arch}/${REPO_CHANNEL}/teleport.repo")" + + # Remove metadata cache to prevent cache from other channel (eg, prior version) + # See: https://github.com/gravitational/teleport/issues/22581 + yum --disablerepo="*" --enablerepo="teleport" clean metadata + + yum install -y ${PACKAGE_LIST} + else + echo "Unsupported distro: $ID" + exit 1 + fi +} + +# package_list returns the list of packages to install. +# The list of packages can be fed into yum or apt because they already have the expected format when pinning versions. +package_list() { + TELEPORT_PACKAGE_PIN_VERSION=${TELEPORT_PACKAGE_NAME} + TELEPORT_UPDATER_PIN_VERSION="${TELEPORT_PACKAGE_NAME}-updater" + + if [[ "${TELEPORT_FORMAT}" == "deb" ]]; then + TELEPORT_PACKAGE_PIN_VERSION+="=${TELEPORT_VERSION}" + TELEPORT_UPDATER_PIN_VERSION+="=${TELEPORT_VERSION}" + + elif [[ "${TELEPORT_FORMAT}" == "rpm" ]]; then + TELEPORT_YUM_VERSION="${TELEPORT_VERSION//-/_}" + TELEPORT_PACKAGE_PIN_VERSION+="-${TELEPORT_YUM_VERSION}" + TELEPORT_UPDATER_PIN_VERSION+="-${TELEPORT_YUM_VERSION}" + fi + + PACKAGE_LIST=${TELEPORT_PACKAGE_PIN_VERSION} + # (warning): This expression is constant. Did you forget the $ on a variable? + # Disabling the warning above because expression is templated. + # shellcheck disable=SC2050 + if is_using_systemd && [[ "false" == "true" ]]; then + # Teleport Updater requires systemd. + PACKAGE_LIST+=" ${TELEPORT_UPDATER_PIN_VERSION}" + fi + echo ${PACKAGE_LIST} +} + +is_repo_available() { + if [[ "${OSTYPE}" != "linux-gnu" ]]; then + return 1 + fi + + # Populate $ID, $VERSION_ID and other env vars identifying the OS. + # shellcheck disable=SC1091 + . /etc/os-release + + # The following distros+version have a Teleport repository to install from. + case "${ID}-${VERSION_ID}" in + ubuntu-16.04* | ubuntu-18.04* | ubuntu-20.04* | ubuntu-22.04* | \ + debian-9* | debian-10* | debian-11* | \ + rhel-7* | rhel-8* | rhel-9* | \ + centos-7* | centos-8* | centos-9* | \ + amzn-2 | amzn-2023) + return 0;; + esac + + return 1 +} + +if is_repo_available; then + log "Installing repo for distro $ID." + install_from_repo +else + log "Installing from binary file." + install_from_file +fi + +# check that teleport binary can be found and runs +if ! check_teleport_binary; then + log_important "The Teleport binary could not be found at ${TELEPORT_BINARY_DIR} as expected." + log_important "This usually means that there was an error during installation." + log_important "Check this log for obvious signs of error and contact Teleport support" + log_important "for further assistance." + exit 1 +fi + +# install teleport config +# check the mode and write the appropriate config type +if [[ "${APP_INSTALL_MODE}" == "true" ]]; then + install_teleport_app_config +elif [[ "${DB_INSTALL_MODE}" == "true" ]]; then + install_teleport_database_config +else + install_teleport_node_config +fi + + +# Used to track whether a Teleport agent was installed using this method. +export TELEPORT_INSTALL_METHOD_NODE_SCRIPT="true" + +# install systemd unit if applicable (linux hosts) +if is_using_systemd; then + log "Host is using systemd" + # we only need to manually install the systemd config if teleport was installed via tarball + # all other packages will deploy it automatically + if [[ ${TELEPORT_FORMAT} == "tarball" ]]; then + install_systemd_unit + fi + start_teleport_systemd + print_welcome_message +# install launchd config on MacOS hosts +elif is_macos_host; then + log "Host is running MacOS" + install_launchd_config + start_teleport_launchd + print_welcome_message +# not a MacOS host and no systemd available, print a warning +# and temporarily start Teleport in the foreground +else + log "Host does not appear to be using systemd" + no_systemd_warning + start_teleport_foreground +fi + diff --git a/ansible/00_old/installer.sh b/ansible/00_old/installer.sh new file mode 100644 index 0000000..832de0c --- /dev/null +++ b/ansible/00_old/installer.sh @@ -0,0 +1,28 @@ +#!/bin/sh + +apt_trusted_d_keyring="/etc/apt/trusted.gpg.d/datasaker-archive-keyring.gpg" +apt_usr_share_keyring="/usr/share/keyrings/datasaker-archive-keyring.gpg" + +# init keyring +if ! [ -f ${apt_usr_share_keyring} ]; then + echo "create archive-keyring.gpg" + sudo touch ${apt_usr_share_keyring} + sudo chmod a+r ${apt_usr_share_keyring} +fi + +# download keyring then add key to keyring +curl -fsSL -o /tmp/datasaker.gpg.key https://dsk-agent-s3.s3.ap-northeast-2.amazonaws.com/dsk-agent-s3/public/public.gpg.key +cat /tmp/datasaker.gpg.key | sudo gpg --import --batch --no-default-keyring --keyring "${apt_usr_share_keyring}" + +# copy keyring to trusted keyring +if ! [ -f ${apt_trusted_d_keyring} ]; then + sudo cp -a ${apt_usr_share_keyring} ${apt_trusted_d_keyring} +fi + +# add apt source list +if ! [ -f /etc/apt/sources.list.d/datasaker.list ]; then + echo "deb [signed-by=${apt_usr_share_keyring}] https://nexus.exem-oss.org/repository/debian-repos/ ubuntu main" | sudo tee /etc/apt/sources.list.d/datasaker.list > /dev/null +fi + +sudo apt update +sudo apt install $1 diff --git a/ansible/00_old/key_test.sh b/ansible/00_old/key_test.sh new file mode 100755 index 0000000..7865f0b --- /dev/null +++ b/ansible/00_old/key_test.sh @@ -0,0 +1,5 @@ +#!/usr/bin/expect -f +spawn ssh-copy-id root@$argv +expect "password:" +send "saasadmin1234\n" +expect eof diff --git a/ansible/00_old/local_datasaker.yml b/ansible/00_old/local_datasaker.yml new file mode 100644 index 0000000..1fc2b3c --- /dev/null +++ b/ansible/00_old/local_datasaker.yml @@ -0,0 +1,53 @@ +--- +- hosts: local + become: true + roles: + - role: dsk_bot.datasaker + vars: + datasaker_api_key: "XQOt9G3oAtsOQyd3U25JwOu3/sE+zj/m3kRKL/d0OUAQn30jVlrBKN/gJp9cJ4C9CHU1D1vSEPRxaCk8NuwZh6+v48TiaingDy6F74YGySRvnH0gqdmfxLSGisD/g8/JqBlIwhhyMSVCVfAkcNysLnwLi4xLnZMlvVi2Lzo3MiekSfJS5adR3hAv6pCaCFe2rNW24pYS5PBYkP/kxp/cfYAN/UhVEs5J+h4/iQ5vozQgvWuskBpOjlUeEYZnMZ6Al91gAUmSRoxtzLc+QOdfp7+uDXpwXPm80bQz9bR20Elcr4+rNqLcc2ONwJwrSveDSvJn4xjB6n95hEYbaDHUpA==" + datasaker_agents: ["dsk-node-agent","dsk-log-agent"] + #datasaker_api_key: "eO58wEYK/2HThAV+5jgv7Or/qW3zJknBQF0FJt5Xo4kSZ9YH2/CJgfNUwKbGwlbzmihG9dVsSmmS40szOuvRVZJO0vPga98sJNI32AJdWaYX8oCNFouI0lYG+r9Y4vahrS7+FVwntyfkjETotqBDvoQ5HjGjvW0wviPagW/alNbI5pvpWwBHtgz9D83Y8DSvCvO64G4xhyIYZPSML11EqWUO8prYT8LfdD4n2oBp0QJ3cXKdvJAUc4w5LKbTASb8x8UTpVU3JH3Wnwe79PKftJ8YdxOtb5jjzXeOEEM2GD8xz4pbB7scCx5oJCWQLF1js6a2uFLENBgW+ztHRf1j2Q==" + #datasaker_api_key: "1VL7/mhddWkQaS/vf/VjjwjnwaUhtZnLL++ih9LxYSB7HVkPpZw1Duy/4gxLN/73Vga00bD79mVd6N4dP0BVxmGqLnR6xItnSLlO3M6LmOMuM8bLaBuxxOvxST3lxpvtI0B2ilyjqTLh5y+NJWFV7Awq4zpqnPnTZ5dryp3yc4zc3C7Vxu0f2CL7/oGT0LRj/1l7gchuUxw2TVDLFFRylb+cFt6/NNylBxIb1wKGILd7N6NGgnsdRcrv4ZvTEPusrDqxO3IRYF6z9ZNbkQ1BPeDINtVFTgwhqFZjxg6erd8oqscB9n1DHOi6+tJ8VSHi2w5hYxHq93EV4cxBfzXAug==" + #datasaker_agents: ["dsk-node-agent","dsk-log-agent"] + datasaker_docker_agents: ["dsk-docker-node-agent","dsk-docker-log-agent"] + #datasaker_docker_agents: ["dsk-docker-log-agent"] + #postgres_user_name: jhjung + #postgres_user_password: 1q2w3e4r + #postgres_database_address: 0.0.0.0 + #postgres_database_port: 5432 + #plan_postgres_user_name: jhjung + #plan_postgres_user_password: 1q2w3e4r + #plan_postgres_database_address: 0.0.0.0 + #plan_postgres_database_port: 5432 + #plan_postgres_database_name: test + datagate_trace_url: 10.10.43.111 + datagate_trace_port: 31300 + datagate_trace_timeout: 5s + + datagate_manifest_url: 10.10.43.111 + datagate_manifest_port: 31301 + datagate_manifest_timeout: 5s + + datagate_metric_url: 10.10.43.111 + datagate_metric_port: 31302 + datagate_metric_timeout: 5s + + datagate_plan_url: 10.10.43.111 + datagate_plan_port: 31303 + datagate_plan_timeout: 5s + + datagate_loggate_url: 10.10.43.111 + datagate_loggate_port: 31304 + datagate_loggate_timeout: 5s + + datasaker_api_url: 10.10.43.111:31501 + datasaker_api_send_interval: 1m + #uninstall: True + #datasaker_clean: True + app_name: test + custom_log_volume: + - /var/lib/docker + - /var/docker + logs: + - collect: + type: driver diff --git a/ansible/00_old/node_role.yaml b/ansible/00_old/node_role.yaml new file mode 100644 index 0000000..3d7f4e4 --- /dev/null +++ b/ansible/00_old/node_role.yaml @@ -0,0 +1,9 @@ +--- +- hosts: agent + become: true + roles: + - teleport + vars: + # remove: True + # custom_labels: 'user=havelight,company=exem' + update: True \ No newline at end of file diff --git a/ansible/00_old/roles.yaml b/ansible/00_old/roles.yaml new file mode 100755 index 0000000..d4b982c --- /dev/null +++ b/ansible/00_old/roles.yaml @@ -0,0 +1,27 @@ +--- +- hosts: test + # become: true + # gather_facts: true + roles: + - role: datasaker + vars: + - datasaker_api_key: yCWIqbipuRMULli6qs4vWs8GfV9rQo8gciSKPvAozxiy05HcPru9LChyNQMtVk0xlmz7UqTj/s6682tiHa9wir/1hOxlLDYipWHPgHXZ1WEJDVvXD/z5Pw8G6IMcNwmgXwXfRZuRvWsSlHva28opykqE/oDHMcwnsABYljd+/VG8UEik08rRpI1t48We0HceZSuJ0aO+9FvoCcjPSHjrj17KCX1beS0UO3iHrRkQOFKOFfHK/fZ3G27YoZgs8ySH+90kLUP65AoAne5TFgXRVJJUCZgr5o2ajEyTi4bkwdt7v1X6/3fIO9kkElfQPZoCQ1u5S9eJfIkkmTEpWSLtuQ== + - datasaker_agents: ['dsk-node-agent','dsk-trace-agent','dsk-log-agent','dsk-plan-postgres-agent'] + # vars: + # - datagate_trace_url: test + # - datagate_trace_port: test + # - datagate_trace_timeout: test + # - datagate_manifes_url: test + # - datagate_manifest_port: test + # - datagate_manifest_timeout: test + # - datagate_metric_url: test + # - datagate_metric_port: test + # - datagate_metric_timeout: test + # - datagate_plan_url: test + # - datagate_plan_port: test + # - datagate_plan_timeout: test + # - datagate_loggate_url: test + # - datagate_loggate_port: test + # - datagate_loggate_timeout: test + # - datasaker_api_url: test + # - datasaker_api_send_interval: test diff --git a/ansible/00_old/test.yml b/ansible/00_old/test.yml new file mode 100644 index 0000000..ee3aef2 --- /dev/null +++ b/ansible/00_old/test.yml @@ -0,0 +1,18 @@ +--- +- hosts: all + vars: + test_check: {} + tasks: + - name: test + set_fact: + test_check: "{{ test_check | default({}, true) }}" + + - name: mapping + assert: + that: + - test_check is mapping + + - name: test + debug: + msg: + - "{{ test_check }}" diff --git a/ansible/00_old/vault_test.yaml b/ansible/00_old/vault_test.yaml new file mode 100644 index 0000000..720cbac --- /dev/null +++ b/ansible/00_old/vault_test.yaml @@ -0,0 +1,10 @@ +--- +- hosts: all + tasks: + - name: Get password from Vault + ansible.builtin.debug: + msg: "{{ lookup('hashi_vault', 'secret=secret/hostname2 token=hvs.CAESIOy5Troiesm65BQYj_QhF996yilil8whnWP5FWHp3eE8Gh4KHGh2cy40OTBCT09SdTl1c3FRNmFXenFBUmxVSkE url=http://vault.vault:8200') }}" + register: vault_result + + - name: Use password from Vault + ansible.builtin.command: echo "{{ vault_result.msg }}" diff --git a/ansible/01_old/README.md b/ansible/01_old/README.md new file mode 100644 index 0000000..0af0878 --- /dev/null +++ b/ansible/01_old/README.md @@ -0,0 +1 @@ +# ansible_script diff --git a/ansible/01_old/all_host b/ansible/01_old/all_host new file mode 100755 index 0000000..b355a2b --- /dev/null +++ b/ansible/01_old/all_host @@ -0,0 +1,20 @@ +[release] +10.10.43.100 +10.10.43.101 + +[dsk_dev] +10.10.43.[110:200] + +[cmoa] +10.10.43.[201:253] + +[ubuntu] +13.125.123.49 ansible_user=ubuntu + +[bastion] +10.10.43.43 ansible_port=2222 ansible_user=havelight + +[dev2:children] +release +dsk_dev +cmoa diff --git a/ansible/01_old/ansible.cfg b/ansible/01_old/ansible.cfg new file mode 100755 index 0000000..0ebf722 --- /dev/null +++ b/ansible/01_old/ansible.cfg @@ -0,0 +1,10 @@ +[defaults] +inventory = inventory +roles_path = roles +deprecation_warnings = False +display_skipped_hosts = no +ansible_home = . +stdout_callback = debug +host_key_checking=False +#private_key_file=/root/.ssh/dev2-iac +#remote_tmp = /tmp/.ansible/tmp diff --git a/ansible/01_old/ansible_kubectl/Dockerfile b/ansible/01_old/ansible_kubectl/Dockerfile new file mode 100644 index 0000000..e0f2af7 --- /dev/null +++ b/ansible/01_old/ansible_kubectl/Dockerfile @@ -0,0 +1,26 @@ +# Start from Python 3.9 base image +FROM nexus2.exem-oss.org/awx-ee:latest + +USER root + +# Update and install dependencies +RUN yum clean all && \ + yum makecache && \ + yum update -y && \ + yum install -y sudo nfs-utils + +# Python package management +RUN pip3 uninstall -y crypto pycrypto && \ + pip3 install pycryptodome hvac xlwt + +# Install kubectl +RUN curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" && \ + chmod +x kubectl && \ + mv kubectl /usr/local/bin/ + +# Copy kubeconfig (Not recommended for production) +COPY kubeconfig /root/.kube/config + +# Keep the container running +#CMD ["bash"] +CMD ["tail", "-f", "/dev/null"] diff --git a/ansible/01_old/ansible_kubectl/kubeconfig b/ansible/01_old/ansible_kubectl/kubeconfig new file mode 100644 index 0000000..7d4bd3e --- /dev/null +++ b/ansible/01_old/ansible_kubectl/kubeconfig @@ -0,0 +1,23 @@ +apiVersion: v1 +clusters: +- cluster: + certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUMvakNDQWVhZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQjRYRFRJeU1USXlPREF4TURJeU1Wb1hEVE15TVRJeU5UQXhNREl5TVZvd0ZURVRNQkVHQTFVRQpBeE1LYTNWaVpYSnVaWFJsY3pDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBT3drCldCR0hqaHRQQUZCVHNBNWJUMzIrNDl2WW4vbkZkd1h5MW5GNlBEYmUzL0ZMYzJDZGE2OENCSk95eTh2L2VvUW4KWGVleXRua05MTTdxaGFrbCtNUnBucFU2Wnp1NGdkdlJML1VHWDd3dTYwNFMvdkpQMXVNcFhMWEZEYUQzbFI2aQpEVm9jcUFmMGZsNFZDU21ldkJnTHpQOFl1cElrbllhc0FaRzJZbGQ2K1Y2RFNwWTA3aFBjQWNYdXo3c24vS202CjNqb2M0TnA1ZlFBSXVoZjVGdXA0UkE2YXAzWHZ3NllMNWExWFVUNzdjY1I3S0JHcDdtRFl3OVN1RloyVHFuOXIKSDUrQll0YytXdjlHNUZLOHcxVFAzYUFGYTJDMnhQYy9FWGxVa1UxOUsveE1ocDdiTm12eWlRd3VBbVI0NjJSKwo4aXBYNXFlam5hTnhITVVoeU5jQ0F3RUFBYU5aTUZjd0RnWURWUjBQQVFIL0JBUURBZ0trTUE4R0ExVWRFd0VCCi93UUZNQU1CQWY4d0hRWURWUjBPQkJZRUZNajA1VFJsa0w5SlZlNUdTS05UOThlbzVqOE1NQlVHQTFVZEVRUU8KTUF5Q0NtdDFZbVZ5Ym1WMFpYTXdEUVlKS29aSWh2Y05BUUVMQlFBRGdnRUJBTlA4aWFIZWh4a3RkTStSc3FJMwplU1VoUEFkdW5TSm5MdTJLRFZCYUgxeHVLQkNBbUUzNjBFbk1FUmFLQjNKWUVuVE9aeWR1Y3Z1amg0bVdVaGQrCjA2RVl0aDRYVW1sSytsVTJkWHdia2s4UGRrZnVrRE5ZOG8vV20vc05oSlF4VTlFMHhZSGwwSDgwVlQ1Mk5CR1oKSkRnUDREaUVibzluajBhaVJkaDFYMmROZkh5Vzl0VGZPM210OGVtUldSVVV1Ly91anNsMTJ1VjZNRjFTVmlRLwpCaU1iODMvVit5aHVWa09HVDk5c25BMTNCTkF1WVFDeUpaK3ZRdm5jdTdCbW5sMXdtRjhVTVNjYTFUMEZiOVR1CjFkNnVqWTM2U1pjbXlHSHU5SW1lb2FoYkdQMmNFNkI0YnlrNDA5UGFwbnl3dGRUNC9hREU5aGlQVGZMNkc0ZXEKMmRJPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== + server: https://10.10.43.240:6443 + name: saas-mgmt +contexts: +- context: + cluster: saas-mgmt + user: saas-mgmt + name: saas-mgmt +current-context: saas-mgmt +kind: Config +preferences: {} +users: +- name: k8s-prod.datasaker.io + user: + client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURLVENDQWhHZ0F3SUJBZ0lSQU1HZExUZ0psRzFLVXpoR1kzT2RIMVV3RFFZSktvWklodmNOQVFFTEJRQXcKR0RFV01CUUdBMVVFQXhNTmEzVmlaWEp1WlhSbGN5MWpZVEFlRncweU16QTRNak13TmpJME16ZGFGdzB5TXpBNApNall3TURJME16ZGFNREF4RnpBVkJnTlZCQW9URG5ONWMzUmxiVHB0WVhOMFpYSnpNUlV3RXdZRFZRUURFd3hyCmRXSmxZMlpuTFhKdmIzUXdnZ0VpTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElCRHdBd2dnRUtBb0lCQVFEVEFrVXEKeUp6N1ZYak5uY0NRUy9qK29YdTZNWUtGSUxDYjNvUFVDelhSbzZsL3VXQy9hL1R2SmxHWmNJNFRzQURKdGdOYwovZEM4YlJGdUNaVmEwNGljMWY2U2VoSVZhVGYwbHh6SHZOQjcwSUQ0ajB1Z2JVeCtDS0pXNkF3Wk9jU0t4Yk5yCi9VNThZeFVjVVIxa2NhYTNmeU8zTmJNNGpENkE2TVQySFZCS3p4Wk4vek5HdjZCUy9RYjROVVFjMFprUDUvb0cKc1ptL3dtWVJZb1dyTld2NXVzWVFnS25HMUNyTVdNMFgySGtDWkF1cXpHUU9CMWpvK2RZWVQvWnhab2lIZEx3ZgpaWjIwb2paOHQ1MnBMZ1VlbTJsQ2xraXJHdEFKRzI2MGluVDNLMTBBakdSMEE5ZVlJNzd5VEQrUHZ2c2tTUHVkCnIxb1FqRDRWYUZIMG1IWTFBZ01CQUFHalZqQlVNQTRHQTFVZER3RUIvd1FFQXdJSGdEQVRCZ05WSFNVRUREQUsKQmdnckJnRUZCUWNEQWpBTUJnTlZIUk1CQWY4RUFqQUFNQjhHQTFVZEl3UVlNQmFBRkVyeVN4QzRVNlNWTUtmVQpaWFBqNnlhSE9iSDlNQTBHQ1NxR1NJYjNEUUVCQ3dVQUE0SUJBUURES3BreklUeFlEUFBjK1pabkJaZjdOV2wvCjVTRmRRVU9IRmZ6K1ZSMjNOdnBra0VQaVcrWXNiU2dFQUcvbldVK3gvVFJqN3JORDg2cGlmODNJYXN6WjlpV3gKK1ZYRE9rekQrcG5qWXlVa2t6WW8vM1dZQklRaUdlNTRBNjlUY2VEYjV0a3J6RkFxd3JhUXI4VFJ6VVVaNzVDVQp6dmFqMkRZcUhZN1dkRlJTZUhqcm9EVHB2d1BXSjU2YjI1d0NndGdHV29uM2ZBWERjV1Z2ZzJsUnZMOHI0ZmpCCkNSRSswNjdwSGkvc2VpNU1QMFI5WkZGbjRrbm1peHE0TW1tcjd6UGZ6ZFhwS1dDOHRZKzZOemszMzdnZFhZNnYKbjVNOW1VYjk5ZngwL0J5ZGNQeGVqeXVZV3I2Y01DKzBQOE9uU1BSUjM1MS8xYVFaQXJLWnU3TmYySFVUCi0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K + client-key-data: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcEFJQkFBS0NBUUVBMHdKRktzaWMrMVY0elozQWtFdjQvcUY3dWpHQ2hTQ3dtOTZEMUFzMTBhT3BmN2xnCnYydjA3eVpSbVhDT0U3QUF5YllEWFAzUXZHMFJiZ21WV3RPSW5OWCtrbm9TRldrMzlKY2N4N3pRZTlDQStJOUwKb0cxTWZnaWlWdWdNR1RuRWlzV3phLzFPZkdNVkhGRWRaSEdtdDM4anR6V3pPSXcrZ09qRTloMVFTczhXVGY4egpScitnVXYwRytEVkVITkdaRCtmNkJyR1p2OEptRVdLRnF6VnIrYnJHRUlDcHh0UXF6RmpORjloNUFtUUxxc3hrCkRnZFk2UG5XR0UvMmNXYUloM1M4SDJXZHRLSTJmTGVkcVM0RkhwdHBRcFpJcXhyUUNSdHV0SXAwOXl0ZEFJeGsKZEFQWG1DTys4a3cvajc3N0pFajduYTlhRUl3K0ZXaFI5SmgyTlFJREFRQUJBb0lCQVFERnp1SUNhcEJuT01nSAprWFFja1d2NVlHNjVySUlieFBwckZGem00aDl3eUlrME9CZFBPNmdnclA1ZjVsajZjY3M3VFFxNEdTU2VEMjBBCmg3Rmd0TjdqaitTWGNpSVR1bEIvVlUzZ25NdWcxbVNoSHN3WnQzeTJ4ZWRScXpUMFRPaEg0M0FBc3pUcGZJVWsKeDVIVFFJdTJoMVIzQXJ0aExtL0ZydkE5ZkZ0eDFCM3d3TUtEdmtObDN2bU82TnMxY3J3cjJmOUw1TTNJUVJXQwpRbVNFOGFSUkk4Rnhob2FKb3JRY3U0VFpocDBGUzYrVkR3bG9OWkRGZ0M4Uk1YSWd0ZXZkVnpMdGxQUUVSUTA3CmhkdFZMcklGYktQNGtRaVk1emlUYlhKOXdFSFVLNWF6ekRVNnVNU3RZc3ZveVVpR3A4REozeWFPM0RwNS93MkYKaTFRcE1oRWRBb0dCQU5tT2NWL0tVODdiQ1IydnN2eU1DOVVZNS8wdWpZZU1ndk00dC9OWXlYUmRwVFYrSXk5aApBbGNmZHVnZUN4YlpPV2IzZVIwdE9uMTNGNlNYbnZodkRaZEZzT0w5SU0rd05ZZVJoUGFyZmVQb2JDQVVLZmVJCitTazllVUovNDVlckhkUWhyMlY1aXdwQXhIUXBza0ZWd3U1OHFQbzdLalFMTG1MeDFISkhMZDFIQW9HQkFQaEwKb3hqTXdFSmZudkZjLy9YUGJRSXgwWndiVUltQWFSYlIwUDIwZU1FWFhuNjZZQ1VIeDl4YVpIVHJqY2N1eXdiVgo1QjJHYThHMzVDN0oydldvZWVaamU2c2xJTE5UMm1jNG4wSWFRSFR1a2ZPWHpKQUhRSEZXQWI2TnZJTlpaK3hGCkJ1YndwWHRYUDlvVFh2MG41MEpTUzgyOW1LWG55c25RbDBNNzk5NmpBb0dBVHE1YmhuOVFMQ0cvZkVNTkFwVkwKdWpnVnZ0VlZUazZkRllYUDBXeXMveTdYRHkrZFhnZEJwMnl6dm1NUE02WkFRbU1DSkhFMUZDYzhIOFRPTTU5RwpWUTFaV2Q2ZVBUN0hQVTU5dmhCcnFUOW55M28vYTB6WWYvZkJvVEZMaUpEVWF1SDc0MEUvN2VkYXBZQm0vWVljCng4L0I5UzNzcDRIYnR1RXJLbUZmendVQ2dZRUE0SDdLMVdaelFzL2dEczBPWkxzS0RaenJyMkNHL2Z2TGlLVm0KZDYxUUxRMnJFNXdCeUJsejNFa2lZUkNGWFIxeTFoaFlLMVhaWWdxWlZyQ050K1YvYWc1eXgzaEhTN3k2VU8vQwpGdXRUY2lZdWNuZkNya3JRT21rUUpMRlVTOUp2Z3hHYVB2NUFNUGZmTkphbElQR09SOG5PM2hQWnk4OTY2K1FjCmo5N05xMDhDZ1lCSWFXb3ZaVDMrOE5hR1hSQTRBaU1RU0MweFNCYlhacitCSHZ4TVlUQ1loMlJKZTVTM0FzNGUKc2Z0d0I2c0MvYXI2bkppOFVpN05VdysrTUk0VUZORE05c0JWUXp3NUUwNlFCSE1TcGxQdXRCRGVXTE5RN0FDNwpkM2Q5bHE4RjhXU3o0c0JiVlZvUjdheUJpU1F2blU1Q3NOSXFBb2ExYUllTzNhdVcyQWd0OEE9PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo= +- name: saas-mgmt + user: + client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURJVENDQWdtZ0F3SUJBZ0lJSFVDZ2pMMXhPRHd3RFFZSktvWklodmNOQVFFTEJRQXdGVEVUTUJFR0ExVUUKQXhNS2EzVmlaWEp1WlhSbGN6QWVGdzB5TWpFeU1qZ3dNVEF5TWpGYUZ3MHlOREF4TURNd016QXhNalJhTURReApGekFWQmdOVkJBb1REbk41YzNSbGJUcHRZWE4wWlhKek1Sa3dGd1lEVlFRREV4QnJkV0psY201bGRHVnpMV0ZrCmJXbHVNSUlCSWpBTkJna3Foa2lHOXcwQkFRRUZBQU9DQVE4QU1JSUJDZ0tDQVFFQXBGWVNKT1YrM3QrbWVOQ2sKUnNMdWVyMmFSbm5qRHBvYTlBTkh5TWZ4WWRmYTNRTS9VZGlPUlEraWFTMU1jZjFPUFVzRlloOU1iS2p6S0tCTApGYTh2NkJ0dGN2bVIwTFRLcTY2MHFIUWZOMVUxUEhTUnhROElQTzg5bUhjYkI1Z2N3MktRRWNIWVNsaURBVHM3CllkK2JqMWtabWIrUVIxQlhlZlVoZ0pvYk1UazNrdERBMG1jRlVHSExKakxjUTF2UlNiR2s0NVJRY2d5SlBjdy8KM0VPTXkzcHRzeUpldk5tb3c2N0pmVXp3d1NBM0hOd0paTzB0enVETGprRTNMOXgwVC9idGVXTlI0eUEvTFp2QQppRGkrdmVnMFBJL2FYbHBvejVOVjNwdVQwOGJnRVVWZ0dUdWhRQWtxZkk1SmpjYXBLSXpLRzBMVWdxWXRjQWJRClE3ZDcvd0lEQVFBQm8xWXdWREFPQmdOVkhROEJBZjhFQkFNQ0JhQXdFd1lEVlIwbEJBd3dDZ1lJS3dZQkJRVUgKQXdJd0RBWURWUjBUQVFIL0JBSXdBREFmQmdOVkhTTUVHREFXZ0JUSTlPVTBaWkMvU1ZYdVJraWpVL2ZIcU9ZLwpEREFOQmdrcWhraUc5dzBCQVFzRkFBT0NBUUVBNGluVHEzU1krcXpTczlGYWwrMXdRNlNWSVJXOEw5anB6NGdsCnNCZGpZV05DbzRzTURTNG95bEh5ZTVhMXZZR3ZqdXBORjUyVG5sL2UzU3U0ekxWTHZNMXd4VHR3aUVqNjlOTGcKR2RwemtuUk5INTM0UHFhaXdxNk9xOWd5MXRIQmIyTHNadlFRN0FBanFKVG5SL01NNVI4cEllSnpFdk5xSHVMdAovWndXRWg2enJXVkVON3IxWVN6elpCRVgxaGh5dE5abUltTkhQNVFPcmxueXZLc1luV0hLL0JTTHNHK2dQc0htCkJzQkhtblNBeTh2Wk5OSWJZQk5SMmJ6WkZiVWExaTZ4aHlxRUNvdEpXMHVvOWlSN0tpbGFnelNJN1hNdUdhZFkKYkFvcHVZWDJTTkROMnhiZWJkOWhxRkRRcHZwUTlXSE00ZTBpNjhjOUgvOTA2MS9hQnc9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== + client-key-data: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcEFJQkFBS0NBUUVBcEZZU0pPViszdCttZU5Da1JzTHVlcjJhUm5uakRwb2E5QU5IeU1meFlkZmEzUU0vClVkaU9SUStpYVMxTWNmMU9QVXNGWWg5TWJLanpLS0JMRmE4djZCdHRjdm1SMExUS3E2NjBxSFFmTjFVMVBIU1IKeFE4SVBPODltSGNiQjVnY3cyS1FFY0hZU2xpREFUczdZZCtiajFrWm1iK1FSMUJYZWZVaGdKb2JNVGsza3REQQowbWNGVUdITEpqTGNRMXZSU2JHazQ1UlFjZ3lKUGN3LzNFT015M3B0c3lKZXZObW93NjdKZlV6d3dTQTNITndKClpPMHR6dURMamtFM0w5eDBUL2J0ZVdOUjR5QS9MWnZBaURpK3ZlZzBQSS9hWGxwb3o1TlYzcHVUMDhiZ0VVVmcKR1R1aFFBa3FmSTVKamNhcEtJektHMExVZ3FZdGNBYlFRN2Q3L3dJREFRQUJBb0lCQUJpd2JhMXBaVFFxdWFIeApCcDB5OEEwMHF4Ym5mUHRXbjdJRlJDV2dGRjIweGticUUvdEI0NjN3ZVYvLzFEcFQ2Z3MvV0NHenZoR2RHRnNFCktnT3AvREtNM0ZhbnRBWjlBdTNrSTNRamJnVXNJZ0ZoS2YxSEV0L0V1YVpNVHAxSGR4ckxsZ1YwNy8vTGFITW8KNlBUOVdTdWlJVHgrRVRrRmt2N1pteHp0Q2lUTXpCQTJ4YW9paWk4dEs1NkM4K1JSeW5wMVlhSnl0VkQra3diZwpwZmErVThlUFEwRXUyeFkzVVRkdHZLSDYrSVVHdUppMVI3Nk1qM3lZRFl3NkJmN2lzMU1tSXhKM3FpdjVFRFhZCjdDSXR2VmxyQTBWczJrREpHVXdYZGhQdHU2RmUzWXc1eFNaRnR2cXkyVnpXcEhvcVB0RG16cy85ZXpnbTExeUUKWDNmR00yRUNnWUVBd0VZSGd2cmxWUXU3RW44RUlQZFVsMnZOQnBueWxZNzFWTnY5Q0dpLzNyKzhQcFNKenBiRQp0azVoTTNmeHJ4Q0s3SGI4NXBKSDl2dzh2akFwb1VmeDVIYnJESkpkS1F4Rlp5RStSRnpHVG1KN0M2VERQanVaCjl1SVhFRkxQd3RndFJqZ2hhUk1MSm16SGlJVkQ0cmpjZHhRTlN3ZWc3ZEhOOFQ5UVVHajhiUmtDZ1lFQTJzMmYKVlFLMnhlaHJLT0VMYVo5a3pvZlYyUUEvN0RFSzdvOHBuOGZhSk1SRVJlSmdTb3MyZ0p2cU5mdVhNMVp1K2crLwpuSno0L0kxSW9aQUdLTllxZHlhLzVrdDRjTkZjMTFrK3MzaE91Z01lbnBKSXNUU2EyS1o4RHl0Sm5pWTZUOFZMClBlWnlSNy9xb2p2dWUyY3NvSTFPUWxHcGFPa09WQ0NaZTc0V1BOY0NnWUVBdHc3MWYrTFlEVXlKNDJaQ3pCQXUKM2F1cEhDdmVKajVoblZXRlowZ3p4U1BQV2RXYURyTEV2cjJKRmJPUXl4aDQ3QUd0YnVpKzA0djdXU2dKdXFBQQowWC9XOGJVNE5TaVZ1MGFQUGc4R1R3SzhHNjNXcFoyaFRNaWRKTkZ6TlJNVXA5SXhIUlVnZklqOHdDSUJMQTdNCitDS0ROWGdoNDhyb3hGTi9aODlNNWFFQ2dZQkFsZXVQTzJMYUhsWHJWaXA1UGd5U2pqUUlmdk5mYzhhSFRvajUKMmhOQlFSSHFFdjFiWTZadDVoZ0hZVUZyYlBzTEl6VHJOTWFtUGNvUHJxU3l6eXp2eU9kaVFpckdHbmF1Tm5DMApwekdONUxmWUZOUVNRclhtZDVZdElCajE3dERObFM0MWtsMXZZbTRPLzJQUTEwNnNBYW4xRjRmTEtPZ0syeWlUCkJ6UW5Od0tCZ1FDWVVZWEtNZm1vSEhGQkJuVjcxY2xQV1BRUC94bmY5SGdnalJJK2dvSDNkRzRxOXdYS2FpaHYKYndIRmNJQXFPYWkzTm1HT1dydzA4bkNENFYyMVZRQWd0ZFdjMFVmS254M2svdFFheHUyM00xaWdkV2JQbXcyUwp4em1TY0sweHVjdkdvQ0VaUWZ0cGlQOVFwUWJIQ0xGNUFIM2tRdExRZDdzOTErZkU2cGtsYlE9PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo= diff --git a/ansible/01_old/ansible_kubectl/testpod.yaml b/ansible/01_old/ansible_kubectl/testpod.yaml new file mode 100644 index 0000000..076a76e --- /dev/null +++ b/ansible/01_old/ansible_kubectl/testpod.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: Pod +metadata: + name: awx-test + namespace: ansible-awx +spec: + containers: + - image: nexus2.exem-oss.org/awx-ee:latest + imagePullPolicy: Always + name: awx-test + resources: {} + securityContext: + privileged: true diff --git a/ansible/01_old/infra-settings.yml b/ansible/01_old/infra-settings.yml new file mode 100644 index 0000000..b2a4dd3 --- /dev/null +++ b/ansible/01_old/infra-settings.yml @@ -0,0 +1,19 @@ +--- +- hosts: all + become: true + roles: + - connect-settings + # - teleport + vars: + username: dev2 + adminuser: root + manual_password: saasadmin1234 + sshmainport: 2222 + #encrypt: 1 + #debug_mode: True + teleport_uri: teleport.kr.datasaker.io + # remove: True + # custom_labels: 'user=havelight,company=exem' + update: True + # install: True + diff --git a/ansible/01_old/infra-test b/ansible/01_old/infra-test new file mode 100644 index 0000000..817b798 --- /dev/null +++ b/ansible/01_old/infra-test @@ -0,0 +1,19 @@ +[redhat] +10.10.43.177 ansible_port=2222 ansible_user=dev2 +10.10.43.178 ansible_port=2222 ansible_user=dev2 +10.10.43.179 ansible_port=2222 ansible_user=dev2 + +[ubuntu] +#10.10.43.234 ansible_port=22 ansible_user=root +10.10.43.180 ansible_port=22 ansible_user=ubuntu + +[tmp] +10.10.43.147 ansible_port=2222 ansible_user=ubuntu + +[proxy] +10.10.43.20 + +[agent:children] +redhat +ubuntu + diff --git a/ansible/01_old/inventory/.DS_Store b/ansible/01_old/inventory/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..5008ddfcf53c02e82d7eee2e57c38e5672ef89f6 GIT binary patch literal 6148 zcmeH~Jr2S!425mzP>H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0lGKLxdl_aGu+r_BB~vojm!M|QSDM65rKwuo9p#Gwiss~DaTwXSqd7OZ6%$n+j1 zNg7c^1NuAX?F^0q$H04IK%L!>G^BuX{O+IMGuSdZr8q3}VVKGgo;TqzP4c4Cxn&EB zjiu!kzQWh|v418@zX*zP-Va8<)zhJrX*in);ZYKogVyJtGAn{4i$^*jiDJ0?`ITgm zEcr-o=AgAPnRGkdw&?YCrfo6V>TdVi;>XVS>9oneeBInV7@VgUnY=Q* zGL#`LA1uz`8j}~&y!1y|Dzj5~s$D*Tju>TZjDnU>LI3ERwiK8rnwY#U#QaC9kI3O5 zA*K{M)D^#|3l(#TJxYA`J=Fs3Vc~1F$i^k%7;p@{CIhNI2&h8OVr5X54h*^l05)LO zg1PD?RE)6bS*#3V1;R`y(1bGEVlWd9yRd$q#mb-wCuW-uX5Y+ghr;yRvA@vn#5{v; zbPPBK9x||K9$V`E-+#XUe;DMR90QJlS~0*Hd;VS@DcQSqE;)7A3g`(`iSnxq>JSX} iI@S|)6|Y0JU`?nA(6d+>gazXM2xuDI;23yQ2JQf8*x3mH literal 0 HcmV?d00001 diff --git a/ansible/01_old/roles/agent_os_setting/README.md b/ansible/01_old/roles/agent_os_setting/README.md new file mode 100644 index 0000000..225dd44 --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/README.md @@ -0,0 +1,38 @@ +Role Name +========= + +A brief description of the role goes here. + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +Role Variables +-------------- + +A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. + +Dependencies +------------ + +A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + + - hosts: servers + roles: + - { role: username.rolename, x: 42 } + +License +------- + +BSD + +Author Information +------------------ + +An optional section for the role authors to include contact information, or a website (HTML is not allowed). diff --git a/ansible/01_old/roles/agent_os_setting/defaults/main.yml b/ansible/01_old/roles/agent_os_setting/defaults/main.yml new file mode 100644 index 0000000..23b5c58 --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/defaults/main.yml @@ -0,0 +1,140 @@ +helm_checksum: sha256:72f1c0fcfb17b41b89087e9232e50f20c606e44a0edc2bb9737e05d1c75b8c4f +helm_version: v3.10.2 + +kubernetes_version: 1.23.16 + +kubernetes_kubelet_extra_args: "" +kubernetes_kubeadm_init_extra_opts: "" +kubernetes_join_command_extra_opts: "" + +kubernetes_pod_network: + cni: 'calico' + cidr: '10.96.0.0/12' + +kubernetes_calico_manifest_file: https://docs.projectcalico.org/manifests/calico.yaml + +kubernetes_metric_server_file: https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml + +containerd_config: + version: 2 + root: /var/lib/containerd + state: /run/containerd + plugin_dir: "" + disabled_plugins: [] + required_plugins: [] + oom_score: 0 + grpc: + address: /run/containerd/containerd.sock + tcp_address: "" + tcp_tls_cert: "" + tcp_tls_key: "" + uid: 0 + gid: 0 + max_recv_message_size: 16777216 + max_send_message_size: 16777216 + ttrpc: + address: "" + uid: 0 + gid: 0 + debug: + address: "" + uid: 0 + gid: 0 + level: "" + metrics: + address: "" + grpc_histogram: false + cgroup: + path: "" + timeouts: + "io.containerd.timeout.shim.cleanup": 5s + "io.containerd.timeout.shim.load": 5s + "io.containerd.timeout.shim.shutdown": 3s + "io.containerd.timeout.task.state": 2s + plugins: + "io.containerd.gc.v1.scheduler": + pause_threshold: 0.02 + deletion_threshold: 0 + mutation_threshold: 100 + schedule_delay: 0s + startup_delay: 100ms + "io.containerd.grpc.v1.cri": + disable_tcp_service: true + stream_server_address: 127.0.0.1 + stream_server_port: "0" + stream_idle_timeout: 4h0m0s + enable_selinux: false + sandbox_image: k8s.gcr.io/pause:3.1 + stats_collect_period: 10 + systemd_cgroup: false + enable_tls_streaming: false + max_container_log_line_size: 16384 + disable_cgroup: false + disable_apparmor: false + restrict_oom_score_adj: false + max_concurrent_downloads: 3 + disable_proc_mount: false + containerd: + snapshotter: overlayfs + default_runtime_name: runc + no_pivot: false + default_runtime: + runtime_type: "" + runtime_engine: "" + runtime_root: "" + privileged_without_host_devices: false + untrusted_workload_runtime: + runtime_type: "" + runtime_engine: "" + runtime_root: "" + privileged_without_host_devices: false + runtimes: + runc: + runtime_type: io.containerd.runc.v1 + runtime_engine: "" + runtime_root: "" + privileged_without_host_devices: false + cni: + bin_dir: /opt/cni/bin + conf_dir: /etc/cni/net.d + max_conf_num: 1 + conf_template: "" + registry: + configs: + "10.10.31.243:5000": + tls: + insecure_skip_verify: true + mirrors: + "docker.io": + endpoint: + - https://registry-1.docker.io + "10.10.31.243:5000": + endpoint: + - http://10.10.31.243:5000 + x509_key_pair_streaming: + tls_cert_file: "" + tls_key_file: "" + "io.containerd.internal.v1.opt": + path: /opt/containerd + "io.containerd.internal.v1.restart": + interval: 10s + "io.containerd.metadata.v1.bolt": + content_sharing_policy: shared + "io.containerd.monitor.v1.cgroups": + no_prometheus: false + "io.containerd.runtime.v1.linux": + shim: containerd-shim + runtime: runc + runtime_root: "" + no_shim: false + shim_debug: false + "io.containerd.runtime.v2.task": + platforms: + - linux/amd64 + "io.containerd.service.v1.diff-service": + default: + - walking + "io.containerd.snapshotter.v1.devmapper": + root_path: "" + pool_name: "" + base_image_size: "" diff --git a/ansible/01_old/roles/agent_os_setting/files/get-docker.sh b/ansible/01_old/roles/agent_os_setting/files/get-docker.sh new file mode 100755 index 0000000..e8586ff --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/files/get-docker.sh @@ -0,0 +1,645 @@ +#!/bin/sh +set -e +# Docker CE for Linux installation script +# +# See https://docs.docker.com/engine/install/ for the installation steps. +# +# This script is meant for quick & easy install via: +# $ curl -fsSL https://get.docker.com -o get-docker.sh +# $ sh get-docker.sh +# +# For test builds (ie. release candidates): +# $ curl -fsSL https://test.docker.com -o test-docker.sh +# $ sh test-docker.sh +# +# NOTE: Make sure to verify the contents of the script +# you downloaded matches the contents of install.sh +# located at https://github.com/docker/docker-install +# before executing. +# +# Git commit from https://github.com/docker/docker-install when +# the script was uploaded (Should only be modified by upload job): +SCRIPT_COMMIT_SHA="66474034547a96caa0a25be56051ff8b726a1b28" + +# strip "v" prefix if present +VERSION="${VERSION#v}" + +# The channel to install from: +# * nightly +# * test +# * stable +# * edge (deprecated) +DEFAULT_CHANNEL_VALUE="stable" +if [ -z "$CHANNEL" ]; then + CHANNEL=$DEFAULT_CHANNEL_VALUE +fi + +DEFAULT_DOWNLOAD_URL="https://download.docker.com" +if [ -z "$DOWNLOAD_URL" ]; then + DOWNLOAD_URL=$DEFAULT_DOWNLOAD_URL +fi + +DEFAULT_REPO_FILE="docker-ce.repo" +if [ -z "$REPO_FILE" ]; then + REPO_FILE="$DEFAULT_REPO_FILE" +fi + +mirror='' +DRY_RUN=${DRY_RUN:-} +while [ $# -gt 0 ]; do + case "$1" in + --mirror) + mirror="$2" + shift + ;; + --dry-run) + DRY_RUN=1 + ;; + --*) + echo "Illegal option $1" + ;; + esac + shift $(( $# > 0 ? 1 : 0 )) +done + +case "$mirror" in + Aliyun) + DOWNLOAD_URL="https://mirrors.aliyun.com/docker-ce" + ;; + AzureChinaCloud) + DOWNLOAD_URL="https://mirror.azure.cn/docker-ce" + ;; +esac + +command_exists() { + command -v "$@" > /dev/null 2>&1 +} + +# version_gte checks if the version specified in $VERSION is at least +# the given CalVer (YY.MM) version. returns 0 (success) if $VERSION is either +# unset (=latest) or newer or equal than the specified version. Returns 1 (fail) +# otherwise. +# +# examples: +# +# VERSION=20.10 +# version_gte 20.10 // 0 (success) +# version_gte 19.03 // 0 (success) +# version_gte 21.10 // 1 (fail) +version_gte() { + if [ -z "$VERSION" ]; then + return 0 + fi + eval calver_compare "$VERSION" "$1" +} + +# calver_compare compares two CalVer (YY.MM) version strings. returns 0 (success) +# if version A is newer or equal than version B, or 1 (fail) otherwise. Patch +# releases and pre-release (-alpha/-beta) are not taken into account +# +# examples: +# +# calver_compare 20.10 19.03 // 0 (success) +# calver_compare 20.10 20.10 // 0 (success) +# calver_compare 19.03 20.10 // 1 (fail) +calver_compare() ( + set +x + + yy_a="$(echo "$1" | cut -d'.' -f1)" + yy_b="$(echo "$2" | cut -d'.' -f1)" + if [ "$yy_a" -lt "$yy_b" ]; then + return 1 + fi + if [ "$yy_a" -gt "$yy_b" ]; then + return 0 + fi + mm_a="$(echo "$1" | cut -d'.' -f2)" + mm_b="$(echo "$2" | cut -d'.' -f2)" + if [ "${mm_a#0}" -lt "${mm_b#0}" ]; then + return 1 + fi + + return 0 +) + +is_dry_run() { + if [ -z "$DRY_RUN" ]; then + return 1 + else + return 0 + fi +} + +is_wsl() { + case "$(uname -r)" in + *microsoft* ) true ;; # WSL 2 + *Microsoft* ) true ;; # WSL 1 + * ) false;; + esac +} + +is_darwin() { + case "$(uname -s)" in + *darwin* ) true ;; + *Darwin* ) true ;; + * ) false;; + esac +} + +deprecation_notice() { + distro=$1 + distro_version=$2 + echo + printf "\033[91;1mDEPRECATION WARNING\033[0m\n" + printf " This Linux distribution (\033[1m%s %s\033[0m) reached end-of-life and is no longer supported by this script.\n" "$distro" "$distro_version" + echo " No updates or security fixes will be released for this distribution, and users are recommended" + echo " to upgrade to a currently maintained version of $distro." + echo + printf "Press \033[1mCtrl+C\033[0m now to abort this script, or wait for the installation to continue." + echo + sleep 10 +} + +get_distribution() { + lsb_dist="" + # Every system that we officially support has /etc/os-release + if [ -r /etc/os-release ]; then + lsb_dist="$(. /etc/os-release && echo "$ID")" + fi + # Returning an empty string here should be alright since the + # case statements don't act unless you provide an actual value + echo "$lsb_dist" +} + +echo_docker_as_nonroot() { + if is_dry_run; then + return + fi + if command_exists docker && [ -e /var/run/docker.sock ]; then + ( + set -x + $sh_c 'docker version' + ) || true + fi + + # intentionally mixed spaces and tabs here -- tabs are stripped by "<<-EOF", spaces are kept in the output + echo + echo "================================================================================" + echo + if version_gte "20.10"; then + echo "To run Docker as a non-privileged user, consider setting up the" + echo "Docker daemon in rootless mode for your user:" + echo + echo " dockerd-rootless-setuptool.sh install" + echo + echo "Visit https://docs.docker.com/go/rootless/ to learn about rootless mode." + echo + fi + echo + echo "To run the Docker daemon as a fully privileged service, but granting non-root" + echo "users access, refer to https://docs.docker.com/go/daemon-access/" + echo + echo "WARNING: Access to the remote API on a privileged Docker daemon is equivalent" + echo " to root access on the host. Refer to the 'Docker daemon attack surface'" + echo " documentation for details: https://docs.docker.com/go/attack-surface/" + echo + echo "================================================================================" + echo +} + +# Check if this is a forked Linux distro +check_forked() { + + # Check for lsb_release command existence, it usually exists in forked distros + if command_exists lsb_release; then + # Check if the `-u` option is supported + set +e + lsb_release -a -u > /dev/null 2>&1 + lsb_release_exit_code=$? + set -e + + # Check if the command has exited successfully, it means we're in a forked distro + if [ "$lsb_release_exit_code" = "0" ]; then + # Print info about current distro + cat <<-EOF + You're using '$lsb_dist' version '$dist_version'. + EOF + + # Get the upstream release info + lsb_dist=$(lsb_release -a -u 2>&1 | tr '[:upper:]' '[:lower:]' | grep -E 'id' | cut -d ':' -f 2 | tr -d '[:space:]') + dist_version=$(lsb_release -a -u 2>&1 | tr '[:upper:]' '[:lower:]' | grep -E 'codename' | cut -d ':' -f 2 | tr -d '[:space:]') + + # Print info about upstream distro + cat <<-EOF + Upstream release is '$lsb_dist' version '$dist_version'. + EOF + else + if [ -r /etc/debian_version ] && [ "$lsb_dist" != "ubuntu" ] && [ "$lsb_dist" != "raspbian" ]; then + if [ "$lsb_dist" = "osmc" ]; then + # OSMC runs Raspbian + lsb_dist=raspbian + else + # We're Debian and don't even know it! + lsb_dist=debian + fi + dist_version="$(sed 's/\/.*//' /etc/debian_version | sed 's/\..*//')" + case "$dist_version" in + 11) + dist_version="bullseye" + ;; + 10) + dist_version="buster" + ;; + 9) + dist_version="stretch" + ;; + 8) + dist_version="jessie" + ;; + esac + fi + fi + fi +} + +do_install() { + echo "# Executing docker install script, commit: $SCRIPT_COMMIT_SHA" + + if command_exists docker; then + cat >&2 <<-'EOF' + Warning: the "docker" command appears to already exist on this system. + + If you already have Docker installed, this script can cause trouble, which is + why we're displaying this warning and provide the opportunity to cancel the + installation. + + If you installed the current Docker package using this script and are using it + again to update Docker, you can safely ignore this message. + + You may press Ctrl+C now to abort this script. + EOF + ( set -x; sleep 20 ) + fi + + user="$(id -un 2>/dev/null || true)" + + sh_c='sh -c' + if [ "$user" != 'root' ]; then + if command_exists sudo; then + sh_c='sudo -E sh -c' + elif command_exists su; then + sh_c='su -c' + else + cat >&2 <<-'EOF' + Error: this installer needs the ability to run commands as root. + We are unable to find either "sudo" or "su" available to make this happen. + EOF + exit 1 + fi + fi + + if is_dry_run; then + sh_c="echo" + fi + + # perform some very rudimentary platform detection + lsb_dist=$( get_distribution ) + lsb_dist="$(echo "$lsb_dist" | tr '[:upper:]' '[:lower:]')" + + if is_wsl; then + echo + echo "WSL DETECTED: We recommend using Docker Desktop for Windows." + echo "Please get Docker Desktop from https://www.docker.com/products/docker-desktop" + echo + cat >&2 <<-'EOF' + + You may press Ctrl+C now to abort this script. + EOF + ( set -x; sleep 20 ) + fi + + case "$lsb_dist" in + + ubuntu) + if command_exists lsb_release; then + dist_version="$(lsb_release --codename | cut -f2)" + fi + if [ -z "$dist_version" ] && [ -r /etc/lsb-release ]; then + dist_version="$(. /etc/lsb-release && echo "$DISTRIB_CODENAME")" + fi + ;; + + debian|raspbian) + dist_version="$(sed 's/\/.*//' /etc/debian_version | sed 's/\..*//')" + case "$dist_version" in + 11) + dist_version="bullseye" + ;; + 10) + dist_version="buster" + ;; + 9) + dist_version="stretch" + ;; + 8) + dist_version="jessie" + ;; + esac + ;; + + centos|rhel|sles) + if [ -z "$dist_version" ] && [ -r /etc/os-release ]; then + dist_version="$(. /etc/os-release && echo "$VERSION_ID")" + fi + ;; + + *) + if command_exists lsb_release; then + dist_version="$(lsb_release --release | cut -f2)" + fi + if [ -z "$dist_version" ] && [ -r /etc/os-release ]; then + dist_version="$(. /etc/os-release && echo "$VERSION_ID")" + fi + ;; + + esac + + # Check if this is a forked Linux distro + check_forked + + # Print deprecation warnings for distro versions that recently reached EOL, + # but may still be commonly used (especially LTS versions). + case "$lsb_dist.$dist_version" in + debian.stretch|debian.jessie) + deprecation_notice "$lsb_dist" "$dist_version" + ;; + raspbian.stretch|raspbian.jessie) + deprecation_notice "$lsb_dist" "$dist_version" + ;; + ubuntu.xenial|ubuntu.trusty) + deprecation_notice "$lsb_dist" "$dist_version" + ;; + fedora.*) + if [ "$dist_version" -lt 33 ]; then + deprecation_notice "$lsb_dist" "$dist_version" + fi + ;; + esac + + # Run setup for each distro accordingly + case "$lsb_dist" in + ubuntu|debian|raspbian) + pre_reqs="apt-transport-https ca-certificates curl" + if ! command -v gpg > /dev/null; then + pre_reqs="$pre_reqs gnupg" + fi + apt_repo="deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] $DOWNLOAD_URL/linux/$lsb_dist $dist_version $CHANNEL" + ( + if ! is_dry_run; then + set -x + fi + $sh_c 'apt-get update -qq >/dev/null' + $sh_c "DEBIAN_FRONTEND=noninteractive apt-get install -y -qq $pre_reqs >/dev/null" + $sh_c 'mkdir -p /etc/apt/keyrings && chmod -R 0755 /etc/apt/keyrings' + $sh_c "curl -fsSL \"$DOWNLOAD_URL/linux/$lsb_dist/gpg\" | gpg --dearmor --yes -o /etc/apt/keyrings/docker.gpg" + $sh_c "chmod a+r /etc/apt/keyrings/docker.gpg" + $sh_c "echo \"$apt_repo\" > /etc/apt/sources.list.d/docker.list" + $sh_c 'apt-get update -qq >/dev/null' + ) + pkg_version="" + if [ -n "$VERSION" ]; then + if is_dry_run; then + echo "# WARNING: VERSION pinning is not supported in DRY_RUN" + else + # Will work for incomplete versions IE (17.12), but may not actually grab the "latest" if in the test channel + pkg_pattern="$(echo "$VERSION" | sed "s/-ce-/~ce~.*/g" | sed "s/-/.*/g")" + search_command="apt-cache madison 'docker-ce' | grep '$pkg_pattern' | head -1 | awk '{\$1=\$1};1' | cut -d' ' -f 3" + pkg_version="$($sh_c "$search_command")" + echo "INFO: Searching repository for VERSION '$VERSION'" + echo "INFO: $search_command" + if [ -z "$pkg_version" ]; then + echo + echo "ERROR: '$VERSION' not found amongst apt-cache madison results" + echo + exit 1 + fi + if version_gte "18.09"; then + search_command="apt-cache madison 'docker-ce-cli' | grep '$pkg_pattern' | head -1 | awk '{\$1=\$1};1' | cut -d' ' -f 3" + echo "INFO: $search_command" + cli_pkg_version="=$($sh_c "$search_command")" + fi + pkg_version="=$pkg_version" + fi + fi + ( + pkgs="docker-ce${pkg_version%=}" + if version_gte "18.09"; then + # older versions didn't ship the cli and containerd as separate packages + pkgs="$pkgs docker-ce-cli${cli_pkg_version%=} containerd.io" + fi + if version_gte "20.10" && [ "$(uname -m)" = "x86_64" ]; then + # also install the latest version of the "docker scan" cli-plugin (only supported on x86 currently) + pkgs="$pkgs docker-scan-plugin" + fi + if version_gte "20.10"; then + pkgs="$pkgs docker-compose-plugin docker-ce-rootless-extras$pkg_version" + fi + if version_gte "23.0"; then + pkgs="$pkgs docker-buildx-plugin" + fi + if ! is_dry_run; then + set -x + fi + $sh_c "DEBIAN_FRONTEND=noninteractive apt-get install -y -qq $pkgs >/dev/null" + ) + echo_docker_as_nonroot + exit 0 + ;; + centos|fedora|rhel) + if [ "$(uname -m)" != "s390x" ] && [ "$lsb_dist" = "rhel" ]; then + echo "Packages for RHEL are currently only available for s390x." + exit 1 + fi + if [ "$lsb_dist" = "fedora" ]; then + pkg_manager="dnf" + config_manager="dnf config-manager" + enable_channel_flag="--set-enabled" + disable_channel_flag="--set-disabled" + pre_reqs="dnf-plugins-core" + pkg_suffix="fc$dist_version" + else + pkg_manager="yum" + config_manager="yum-config-manager" + enable_channel_flag="--enable" + disable_channel_flag="--disable" + pre_reqs="yum-utils" + pkg_suffix="el" + fi + repo_file_url="$DOWNLOAD_URL/linux/$lsb_dist/$REPO_FILE" + ( + if ! is_dry_run; then + set -x + fi + $sh_c "$pkg_manager install -y -q $pre_reqs" + $sh_c "$config_manager --add-repo $repo_file_url" + + if [ "$CHANNEL" != "stable" ]; then + $sh_c "$config_manager $disable_channel_flag docker-ce-*" + $sh_c "$config_manager $enable_channel_flag docker-ce-$CHANNEL" + fi + $sh_c "$pkg_manager makecache" + ) + pkg_version="" + if [ -n "$VERSION" ]; then + if is_dry_run; then + echo "# WARNING: VERSION pinning is not supported in DRY_RUN" + else + pkg_pattern="$(echo "$VERSION" | sed "s/-ce-/\\\\.ce.*/g" | sed "s/-/.*/g").*$pkg_suffix" + search_command="$pkg_manager list --showduplicates 'docker-ce' | grep '$pkg_pattern' | tail -1 | awk '{print \$2}'" + pkg_version="$($sh_c "$search_command")" + echo "INFO: Searching repository for VERSION '$VERSION'" + echo "INFO: $search_command" + if [ -z "$pkg_version" ]; then + echo + echo "ERROR: '$VERSION' not found amongst $pkg_manager list results" + echo + exit 1 + fi + if version_gte "18.09"; then + # older versions don't support a cli package + search_command="$pkg_manager list --showduplicates 'docker-ce-cli' | grep '$pkg_pattern' | tail -1 | awk '{print \$2}'" + cli_pkg_version="$($sh_c "$search_command" | cut -d':' -f 2)" + fi + # Cut out the epoch and prefix with a '-' + pkg_version="-$(echo "$pkg_version" | cut -d':' -f 2)" + fi + fi + ( + pkgs="docker-ce$pkg_version" + if version_gte "18.09"; then + # older versions didn't ship the cli and containerd as separate packages + if [ -n "$cli_pkg_version" ]; then + pkgs="$pkgs docker-ce-cli-$cli_pkg_version containerd.io" + else + pkgs="$pkgs docker-ce-cli containerd.io" + fi + fi + if version_gte "20.10" && [ "$(uname -m)" = "x86_64" ]; then + # also install the latest version of the "docker scan" cli-plugin (only supported on x86 currently) + pkgs="$pkgs docker-scan-plugin" + fi + if version_gte "20.10"; then + pkgs="$pkgs docker-compose-plugin docker-ce-rootless-extras$pkg_version" + fi + if version_gte "23.0"; then + pkgs="$pkgs docker-buildx-plugin" + fi + if ! is_dry_run; then + set -x + fi + $sh_c "$pkg_manager install -y -q $pkgs" + ) + echo_docker_as_nonroot + exit 0 + ;; + sles) + if [ "$(uname -m)" != "s390x" ]; then + echo "Packages for SLES are currently only available for s390x" + exit 1 + fi + if [ "$dist_version" = "15.3" ]; then + sles_version="SLE_15_SP3" + else + sles_minor_version="${dist_version##*.}" + sles_version="15.$sles_minor_version" + fi + opensuse_repo="https://download.opensuse.org/repositories/security:SELinux/$sles_version/security:SELinux.repo" + repo_file_url="$DOWNLOAD_URL/linux/$lsb_dist/$REPO_FILE" + pre_reqs="ca-certificates curl libseccomp2 awk" + ( + if ! is_dry_run; then + set -x + fi + $sh_c "zypper install -y $pre_reqs" + $sh_c "zypper addrepo $repo_file_url" + if ! is_dry_run; then + cat >&2 <<-'EOF' + WARNING!! + openSUSE repository (https://download.opensuse.org/repositories/security:SELinux) will be enabled now. + Do you wish to continue? + You may press Ctrl+C now to abort this script. + EOF + ( set -x; sleep 30 ) + fi + $sh_c "zypper addrepo $opensuse_repo" + $sh_c "zypper --gpg-auto-import-keys refresh" + $sh_c "zypper lr -d" + ) + pkg_version="" + if [ -n "$VERSION" ]; then + if is_dry_run; then + echo "# WARNING: VERSION pinning is not supported in DRY_RUN" + else + pkg_pattern="$(echo "$VERSION" | sed "s/-ce-/\\\\.ce.*/g" | sed "s/-/.*/g")" + search_command="zypper search -s --match-exact 'docker-ce' | grep '$pkg_pattern' | tail -1 | awk '{print \$6}'" + pkg_version="$($sh_c "$search_command")" + echo "INFO: Searching repository for VERSION '$VERSION'" + echo "INFO: $search_command" + if [ -z "$pkg_version" ]; then + echo + echo "ERROR: '$VERSION' not found amongst zypper list results" + echo + exit 1 + fi + search_command="zypper search -s --match-exact 'docker-ce-cli' | grep '$pkg_pattern' | tail -1 | awk '{print \$6}'" + # It's okay for cli_pkg_version to be blank, since older versions don't support a cli package + cli_pkg_version="$($sh_c "$search_command")" + pkg_version="-$pkg_version" + fi + fi + ( + pkgs="docker-ce$pkg_version" + if version_gte "18.09"; then + if [ -n "$cli_pkg_version" ]; then + # older versions didn't ship the cli and containerd as separate packages + pkgs="$pkgs docker-ce-cli-$cli_pkg_version containerd.io" + else + pkgs="$pkgs docker-ce-cli containerd.io" + fi + fi + if version_gte "20.10"; then + pkgs="$pkgs docker-compose-plugin docker-ce-rootless-extras$pkg_version" + fi + if version_gte "23.0"; then + pkgs="$pkgs docker-buildx-plugin" + fi + if ! is_dry_run; then + set -x + fi + $sh_c "zypper -q install -y $pkgs" + ) + echo_docker_as_nonroot + exit 0 + ;; + *) + if [ -z "$lsb_dist" ]; then + if is_darwin; then + echo + echo "ERROR: Unsupported operating system 'macOS'" + echo "Please get Docker Desktop from https://www.docker.com/products/docker-desktop" + echo + exit 1 + fi + fi + echo + echo "ERROR: Unsupported distribution '$lsb_dist'" + echo + exit 1 + ;; + esac + exit 1 +} + +# wrapped up in a function so that we have some protection against only getting +# half the file during "curl | sh" +do_install diff --git a/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/.helmignore b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/.helmignore new file mode 100644 index 0000000..50af031 --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/CHANGELOG.md b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/CHANGELOG.md new file mode 100644 index 0000000..27a52e8 --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/CHANGELOG.md @@ -0,0 +1,445 @@ +# Changelog + +This file documents all notable changes to [ingress-nginx](https://github.com/kubernetes/ingress-nginx) Helm Chart. The release numbering uses [semantic versioning](http://semver.org). + +### 4.2.1 + +- The sha of kube-webhook-certgen image & the opentelemetry image, in values file, was changed to new images built on alpine-v3.16.1 +- "[8896](https://github.com/kubernetes/ingress-nginx/pull/8896) updated to new images built today" + +### 4.2.0 + +- Support for Kubernetes v1.19.0 was removed +- "[8810](https://github.com/kubernetes/ingress-nginx/pull/8810) Prepare for v1.3.0" +- "[8808](https://github.com/kubernetes/ingress-nginx/pull/8808) revert arch var name" +- "[8805](https://github.com/kubernetes/ingress-nginx/pull/8805) Bump k8s.io/klog/v2 from 2.60.1 to 2.70.1" +- "[8803](https://github.com/kubernetes/ingress-nginx/pull/8803) Update to nginx base with alpine v3.16" +- "[8802](https://github.com/kubernetes/ingress-nginx/pull/8802) chore: start v1.3.0 release process" +- "[8798](https://github.com/kubernetes/ingress-nginx/pull/8798) Add v1.24.0 to test matrix" +- "[8796](https://github.com/kubernetes/ingress-nginx/pull/8796) fix: add MAC_OS variable for static-check" +- "[8793](https://github.com/kubernetes/ingress-nginx/pull/8793) changed to alpine-v3.16" +- "[8781](https://github.com/kubernetes/ingress-nginx/pull/8781) Bump github.com/stretchr/testify from 1.7.5 to 1.8.0" +- "[8778](https://github.com/kubernetes/ingress-nginx/pull/8778) chore: remove stable.txt from release process" +- "[8775](https://github.com/kubernetes/ingress-nginx/pull/8775) Remove stable" +- "[8773](https://github.com/kubernetes/ingress-nginx/pull/8773) Bump github/codeql-action from 2.1.14 to 2.1.15" +- "[8772](https://github.com/kubernetes/ingress-nginx/pull/8772) Bump ossf/scorecard-action from 1.1.1 to 1.1.2" +- "[8771](https://github.com/kubernetes/ingress-nginx/pull/8771) fix bullet md format" +- "[8770](https://github.com/kubernetes/ingress-nginx/pull/8770) Add condition for monitoring.coreos.com/v1 API" +- "[8769](https://github.com/kubernetes/ingress-nginx/pull/8769) Fix typos and add links to developer guide" +- "[8767](https://github.com/kubernetes/ingress-nginx/pull/8767) change v1.2.0 to v1.2.1 in deploy doc URLs" +- "[8765](https://github.com/kubernetes/ingress-nginx/pull/8765) Bump github/codeql-action from 1.0.26 to 2.1.14" +- "[8752](https://github.com/kubernetes/ingress-nginx/pull/8752) Bump github.com/spf13/cobra from 1.4.0 to 1.5.0" +- "[8751](https://github.com/kubernetes/ingress-nginx/pull/8751) Bump github.com/stretchr/testify from 1.7.2 to 1.7.5" +- "[8750](https://github.com/kubernetes/ingress-nginx/pull/8750) added announcement" +- "[8740](https://github.com/kubernetes/ingress-nginx/pull/8740) change sha e2etestrunner and echoserver" +- "[8738](https://github.com/kubernetes/ingress-nginx/pull/8738) Update docs to make it easier for noobs to follow step by step" +- "[8737](https://github.com/kubernetes/ingress-nginx/pull/8737) updated baseimage sha" +- "[8736](https://github.com/kubernetes/ingress-nginx/pull/8736) set ld-musl-path" +- "[8733](https://github.com/kubernetes/ingress-nginx/pull/8733) feat: migrate leaderelection lock to leases" +- "[8726](https://github.com/kubernetes/ingress-nginx/pull/8726) prometheus metric: upstream_latency_seconds" +- "[8720](https://github.com/kubernetes/ingress-nginx/pull/8720) Ci pin deps" +- "[8719](https://github.com/kubernetes/ingress-nginx/pull/8719) Working OpenTelemetry sidecar (base nginx image)" +- "[8714](https://github.com/kubernetes/ingress-nginx/pull/8714) Create Openssf scorecard" +- "[8708](https://github.com/kubernetes/ingress-nginx/pull/8708) Bump github.com/prometheus/common from 0.34.0 to 0.35.0" +- "[8703](https://github.com/kubernetes/ingress-nginx/pull/8703) Bump actions/dependency-review-action from 1 to 2" +- "[8701](https://github.com/kubernetes/ingress-nginx/pull/8701) Fix several typos" +- "[8699](https://github.com/kubernetes/ingress-nginx/pull/8699) fix the gosec test and a make target for it" +- "[8698](https://github.com/kubernetes/ingress-nginx/pull/8698) Bump actions/upload-artifact from 2.3.1 to 3.1.0" +- "[8697](https://github.com/kubernetes/ingress-nginx/pull/8697) Bump actions/setup-go from 2.2.0 to 3.2.0" +- "[8695](https://github.com/kubernetes/ingress-nginx/pull/8695) Bump actions/download-artifact from 2 to 3" +- "[8694](https://github.com/kubernetes/ingress-nginx/pull/8694) Bump crazy-max/ghaction-docker-buildx from 1.6.2 to 3.3.1" + +### 4.1.2 + +- "[8587](https://github.com/kubernetes/ingress-nginx/pull/8587) Add CAP_SYS_CHROOT to DS/PSP when needed" +- "[8458](https://github.com/kubernetes/ingress-nginx/pull/8458) Add portNamePreffix Helm chart parameter" +- "[8522](https://github.com/kubernetes/ingress-nginx/pull/8522) Add documentation for controller.service.loadBalancerIP in Helm chart" + +### 4.1.0 + +- "[8481](https://github.com/kubernetes/ingress-nginx/pull/8481) Fix log creation in chroot script" +- "[8479](https://github.com/kubernetes/ingress-nginx/pull/8479) changed nginx base img tag to img built with alpine3.14.6" +- "[8478](https://github.com/kubernetes/ingress-nginx/pull/8478) update base images and protobuf gomod" +- "[8468](https://github.com/kubernetes/ingress-nginx/pull/8468) Fallback to ngx.var.scheme for redirectScheme with use-forward-headers when X-Forwarded-Proto is empty" +- "[8456](https://github.com/kubernetes/ingress-nginx/pull/8456) Implement object deep inspector" +- "[8455](https://github.com/kubernetes/ingress-nginx/pull/8455) Update dependencies" +- "[8454](https://github.com/kubernetes/ingress-nginx/pull/8454) Update index.md" +- "[8447](https://github.com/kubernetes/ingress-nginx/pull/8447) typo fixing" +- "[8446](https://github.com/kubernetes/ingress-nginx/pull/8446) Fix suggested annotation-value-word-blocklist" +- "[8444](https://github.com/kubernetes/ingress-nginx/pull/8444) replace deprecated topology key in example with current one" +- "[8443](https://github.com/kubernetes/ingress-nginx/pull/8443) Add dependency review enforcement" +- "[8434](https://github.com/kubernetes/ingress-nginx/pull/8434) added new auth-tls-match-cn annotation" +- "[8426](https://github.com/kubernetes/ingress-nginx/pull/8426) Bump github.com/prometheus/common from 0.32.1 to 0.33.0" + +### 4.0.18 + +- "[8291](https://github.com/kubernetes/ingress-nginx/pull/8291) remove git tag env from cloud build" +- "[8286](https://github.com/kubernetes/ingress-nginx/pull/8286) Fix OpenTelemetry sidecar image build" +- "[8277](https://github.com/kubernetes/ingress-nginx/pull/8277) Add OpenSSF Best practices badge" +- "[8273](https://github.com/kubernetes/ingress-nginx/pull/8273) Issue#8241" +- "[8267](https://github.com/kubernetes/ingress-nginx/pull/8267) Add fsGroup value to admission-webhooks/job-patch charts" +- "[8262](https://github.com/kubernetes/ingress-nginx/pull/8262) Updated confusing error" +- "[8256](https://github.com/kubernetes/ingress-nginx/pull/8256) fix: deny locations with invalid auth-url annotation" +- "[8253](https://github.com/kubernetes/ingress-nginx/pull/8253) Add a certificate info metric" +- "[8236](https://github.com/kubernetes/ingress-nginx/pull/8236) webhook: remove useless code." +- "[8227](https://github.com/kubernetes/ingress-nginx/pull/8227) Update libraries in webhook image" +- "[8225](https://github.com/kubernetes/ingress-nginx/pull/8225) fix inconsistent-label-cardinality for prometheus metrics: nginx_ingress_controller_requests" +- "[8221](https://github.com/kubernetes/ingress-nginx/pull/8221) Do not validate ingresses with unknown ingress class in admission webhook endpoint" +- "[8210](https://github.com/kubernetes/ingress-nginx/pull/8210) Bump github.com/prometheus/client_golang from 1.11.0 to 1.12.1" +- "[8209](https://github.com/kubernetes/ingress-nginx/pull/8209) Bump google.golang.org/grpc from 1.43.0 to 1.44.0" +- "[8204](https://github.com/kubernetes/ingress-nginx/pull/8204) Add Artifact Hub lint" +- "[8203](https://github.com/kubernetes/ingress-nginx/pull/8203) Fix Indentation of example and link to cert-manager tutorial" +- "[8201](https://github.com/kubernetes/ingress-nginx/pull/8201) feat(metrics): add path and method labels to requests countera" +- "[8199](https://github.com/kubernetes/ingress-nginx/pull/8199) use functional options to reduce number of methods creating an EchoDeployment" +- "[8196](https://github.com/kubernetes/ingress-nginx/pull/8196) docs: fix inconsistent controller annotation" +- "[8191](https://github.com/kubernetes/ingress-nginx/pull/8191) Using Go install for misspell" +- "[8186](https://github.com/kubernetes/ingress-nginx/pull/8186) prometheus+grafana using servicemonitor" +- "[8185](https://github.com/kubernetes/ingress-nginx/pull/8185) Append elements on match, instead of removing for cors-annotations" +- "[8179](https://github.com/kubernetes/ingress-nginx/pull/8179) Bump github.com/opencontainers/runc from 1.0.3 to 1.1.0" +- "[8173](https://github.com/kubernetes/ingress-nginx/pull/8173) Adding annotations to the controller service account" +- "[8163](https://github.com/kubernetes/ingress-nginx/pull/8163) Update the $req_id placeholder description" +- "[8162](https://github.com/kubernetes/ingress-nginx/pull/8162) Versioned static manifests" +- "[8159](https://github.com/kubernetes/ingress-nginx/pull/8159) Adding some geoip variables and default values" +- "[8155](https://github.com/kubernetes/ingress-nginx/pull/8155) #7271 feat: avoid-pdb-creation-when-default-backend-disabled-and-replicas-gt-1" +- "[8151](https://github.com/kubernetes/ingress-nginx/pull/8151) Automatically generate helm docs" +- "[8143](https://github.com/kubernetes/ingress-nginx/pull/8143) Allow to configure delay before controller exits" +- "[8136](https://github.com/kubernetes/ingress-nginx/pull/8136) add ingressClass option to helm chart - back compatibility with ingress.class annotations" +- "[8126](https://github.com/kubernetes/ingress-nginx/pull/8126) Example for JWT" + + +### 4.0.15 + +- [8120] https://github.com/kubernetes/ingress-nginx/pull/8120 Update go in runner and release v1.1.1 +- [8119] https://github.com/kubernetes/ingress-nginx/pull/8119 Update to go v1.17.6 +- [8118] https://github.com/kubernetes/ingress-nginx/pull/8118 Remove deprecated libraries, update other libs +- [8117] https://github.com/kubernetes/ingress-nginx/pull/8117 Fix codegen errors +- [8115] https://github.com/kubernetes/ingress-nginx/pull/8115 chart/ghaction: set the correct permission to have access to push a release +- [8098] https://github.com/kubernetes/ingress-nginx/pull/8098 generating SHA for CA only certs in backend_ssl.go + comparision of P… +- [8088] https://github.com/kubernetes/ingress-nginx/pull/8088 Fix Edit this page link to use main branch +- [8072] https://github.com/kubernetes/ingress-nginx/pull/8072 Expose GeoIP2 Continent code as variable +- [8061] https://github.com/kubernetes/ingress-nginx/pull/8061 docs(charts): using helm-docs for chart +- [8058] https://github.com/kubernetes/ingress-nginx/pull/8058 Bump github.com/spf13/cobra from 1.2.1 to 1.3.0 +- [8054] https://github.com/kubernetes/ingress-nginx/pull/8054 Bump google.golang.org/grpc from 1.41.0 to 1.43.0 +- [8051] https://github.com/kubernetes/ingress-nginx/pull/8051 align bug report with feature request regarding kind documentation +- [8046] https://github.com/kubernetes/ingress-nginx/pull/8046 Report expired certificates (#8045) +- [8044] https://github.com/kubernetes/ingress-nginx/pull/8044 remove G109 check till gosec resolves issues +- [8042] https://github.com/kubernetes/ingress-nginx/pull/8042 docs_multiple_instances_one_cluster_ticket_7543 +- [8041] https://github.com/kubernetes/ingress-nginx/pull/8041 docs: fix typo'd executible name +- [8035] https://github.com/kubernetes/ingress-nginx/pull/8035 Comment busy owners +- [8029] https://github.com/kubernetes/ingress-nginx/pull/8029 Add stream-snippet as a ConfigMap and Annotation option +- [8023] https://github.com/kubernetes/ingress-nginx/pull/8023 fix nginx compilation flags +- [8021] https://github.com/kubernetes/ingress-nginx/pull/8021 Disable default modsecurity_rules_file if modsecurity-snippet is specified +- [8019] https://github.com/kubernetes/ingress-nginx/pull/8019 Revise main documentation page +- [8018] https://github.com/kubernetes/ingress-nginx/pull/8018 Preserve order of plugin invocation +- [8015] https://github.com/kubernetes/ingress-nginx/pull/8015 Add newline indenting to admission webhook annotations +- [8014] https://github.com/kubernetes/ingress-nginx/pull/8014 Add link to example error page manifest in docs +- [8009] https://github.com/kubernetes/ingress-nginx/pull/8009 Fix spelling in documentation and top-level files +- [8008] https://github.com/kubernetes/ingress-nginx/pull/8008 Add relabelings in controller-servicemonitor.yaml +- [8003] https://github.com/kubernetes/ingress-nginx/pull/8003 Minor improvements (formatting, consistency) in install guide +- [8001] https://github.com/kubernetes/ingress-nginx/pull/8001 fix: go-grpc Dockerfile +- [7999] https://github.com/kubernetes/ingress-nginx/pull/7999 images: use k8s-staging-test-infra/gcb-docker-gcloud +- [7996] https://github.com/kubernetes/ingress-nginx/pull/7996 doc: improvement +- [7983] https://github.com/kubernetes/ingress-nginx/pull/7983 Fix a couple of misspellings in the annotations documentation. +- [7979] https://github.com/kubernetes/ingress-nginx/pull/7979 allow set annotations for admission Jobs +- [7977] https://github.com/kubernetes/ingress-nginx/pull/7977 Add ssl_reject_handshake to defaul server +- [7975] https://github.com/kubernetes/ingress-nginx/pull/7975 add legacy version update v0.50.0 to main changelog +- [7972] https://github.com/kubernetes/ingress-nginx/pull/7972 updated service upstream definition + +### 4.0.14 + +- [8061] https://github.com/kubernetes/ingress-nginx/pull/8061 Using helm-docs to populate values table in README.md + +### 4.0.13 + +- [8008] https://github.com/kubernetes/ingress-nginx/pull/8008 Add relabelings in controller-servicemonitor.yaml + +### 4.0.12 + +- [7978] https://github.com/kubernetes/ingress-nginx/pull/7979 Support custom annotations in admissions Jobs + +### 4.0.11 + +- [7873] https://github.com/kubernetes/ingress-nginx/pull/7873 Makes the [appProtocol](https://kubernetes.io/docs/concepts/services-networking/_print/#application-protocol) field optional. + +### 4.0.10 + +- [7964] https://github.com/kubernetes/ingress-nginx/pull/7964 Update controller version to v1.1.0 + +### 4.0.9 + +- [6992] https://github.com/kubernetes/ingress-nginx/pull/6992 Add ability to specify labels for all resources + +### 4.0.7 + +- [7923] https://github.com/kubernetes/ingress-nginx/pull/7923 Release v1.0.5 of ingress-nginx +- [7806] https://github.com/kubernetes/ingress-nginx/pull/7806 Choice option for internal/external loadbalancer type service + +### 4.0.6 + +- [7804] https://github.com/kubernetes/ingress-nginx/pull/7804 Release v1.0.4 of ingress-nginx +- [7651] https://github.com/kubernetes/ingress-nginx/pull/7651 Support ipFamilyPolicy and ipFamilies fields in Helm Chart +- [7798] https://github.com/kubernetes/ingress-nginx/pull/7798 Exoscale: use HTTP Healthcheck mode +- [7793] https://github.com/kubernetes/ingress-nginx/pull/7793 Update kube-webhook-certgen to v1.1.1 + +### 4.0.5 + +- [7740] https://github.com/kubernetes/ingress-nginx/pull/7740 Release v1.0.3 of ingress-nginx + +### 4.0.3 + +- [7707] https://github.com/kubernetes/ingress-nginx/pull/7707 Release v1.0.2 of ingress-nginx + +### 4.0.2 + +- [7681] https://github.com/kubernetes/ingress-nginx/pull/7681 Release v1.0.1 of ingress-nginx + +### 4.0.1 + +- [7535] https://github.com/kubernetes/ingress-nginx/pull/7535 Release v1.0.0 ingress-nginx + +### 3.34.0 + +- [7256] https://github.com/kubernetes/ingress-nginx/pull/7256 Add namespace field in the namespace scoped resource templates + +### 3.33.0 + +- [7164] https://github.com/kubernetes/ingress-nginx/pull/7164 Update nginx to v1.20.1 + +### 3.32.0 + +- [7117] https://github.com/kubernetes/ingress-nginx/pull/7117 Add annotations for HPA + +### 3.31.0 + +- [7137] https://github.com/kubernetes/ingress-nginx/pull/7137 Add support for custom probes + +### 3.30.0 + +- [#7092](https://github.com/kubernetes/ingress-nginx/pull/7092) Removes the possibility of using localhost in ExternalNames as endpoints + +### 3.29.0 + +- [X] [#6945](https://github.com/kubernetes/ingress-nginx/pull/7020) Add option to specify job label for ServiceMonitor + +### 3.28.0 + +- [ ] [#6900](https://github.com/kubernetes/ingress-nginx/pull/6900) Support existing PSPs + +### 3.27.0 + +- Update ingress-nginx v0.45.0 + +### 3.26.0 + +- [X] [#6979](https://github.com/kubernetes/ingress-nginx/pull/6979) Changed servicePort value for metrics + +### 3.25.0 + +- [X] [#6957](https://github.com/kubernetes/ingress-nginx/pull/6957) Add ability to specify automountServiceAccountToken + +### 3.24.0 + +- [X] [#6908](https://github.com/kubernetes/ingress-nginx/pull/6908) Add volumes to default-backend deployment + +### 3.23.0 + +- Update ingress-nginx v0.44.0 + +### 3.22.0 + +- [X] [#6802](https://github.com/kubernetes/ingress-nginx/pull/6802) Add value for configuring a custom Diffie-Hellman parameters file +- [X] [#6815](https://github.com/kubernetes/ingress-nginx/pull/6815) Allow use of numeric namespaces in helm chart + +### 3.21.0 + +- [X] [#6783](https://github.com/kubernetes/ingress-nginx/pull/6783) Add custom annotations to ScaledObject +- [X] [#6761](https://github.com/kubernetes/ingress-nginx/pull/6761) Adding quotes in the serviceAccount name in Helm values +- [X] [#6767](https://github.com/kubernetes/ingress-nginx/pull/6767) Remove ClusterRole when scope option is enabled +- [X] [#6785](https://github.com/kubernetes/ingress-nginx/pull/6785) Update kube-webhook-certgen image to v1.5.1 + +### 3.20.1 + +- Do not create KEDA in case of DaemonSets. +- Fix KEDA v2 definition + +### 3.20.0 + +- [X] [#6730](https://github.com/kubernetes/ingress-nginx/pull/6730) Do not create HPA for defaultBackend if not enabled. + +### 3.19.0 + +- Update ingress-nginx v0.43.0 + +### 3.18.0 + +- [X] [#6688](https://github.com/kubernetes/ingress-nginx/pull/6688) Allow volume-type emptyDir in controller podsecuritypolicy +- [X] [#6691](https://github.com/kubernetes/ingress-nginx/pull/6691) Improve parsing of helm parameters + +### 3.17.0 + +- Update ingress-nginx v0.42.0 + +### 3.16.1 + +- Fix chart-releaser action + +### 3.16.0 + +- [X] [#6646](https://github.com/kubernetes/ingress-nginx/pull/6646) Added LoadBalancerIP value for internal service + +### 3.15.1 + +- Fix chart-releaser action + +### 3.15.0 + +- [X] [#6586](https://github.com/kubernetes/ingress-nginx/pull/6586) Fix 'maxmindLicenseKey' location in values.yaml + +### 3.14.0 + +- [X] [#6469](https://github.com/kubernetes/ingress-nginx/pull/6469) Allow custom service names for controller and backend + +### 3.13.0 + +- [X] [#6544](https://github.com/kubernetes/ingress-nginx/pull/6544) Fix default backend HPA name variable + +### 3.12.0 + +- [X] [#6514](https://github.com/kubernetes/ingress-nginx/pull/6514) Remove helm2 support and update docs + +### 3.11.1 + +- [X] [#6505](https://github.com/kubernetes/ingress-nginx/pull/6505) Reorder HPA resource list to work with GitOps tooling + +### 3.11.0 + +- Support Keda Autoscaling + +### 3.10.1 + +- Fix regression introduced in 0.41.0 with external authentication + +### 3.10.0 + +- Fix routing regression introduced in 0.41.0 with PathType Exact + +### 3.9.0 + +- [X] [#6423](https://github.com/kubernetes/ingress-nginx/pull/6423) Add Default backend HPA autoscaling + +### 3.8.0 + +- [X] [#6395](https://github.com/kubernetes/ingress-nginx/pull/6395) Update jettech/kube-webhook-certgen image +- [X] [#6377](https://github.com/kubernetes/ingress-nginx/pull/6377) Added loadBalancerSourceRanges for internal lbs +- [X] [#6356](https://github.com/kubernetes/ingress-nginx/pull/6356) Add securitycontext settings on defaultbackend +- [X] [#6401](https://github.com/kubernetes/ingress-nginx/pull/6401) Fix controller service annotations +- [X] [#6403](https://github.com/kubernetes/ingress-nginx/pull/6403) Initial helm chart changelog + +### 3.7.1 + +- [X] [#6326](https://github.com/kubernetes/ingress-nginx/pull/6326) Fix liveness and readiness probe path in daemonset chart + +### 3.7.0 + +- [X] [#6316](https://github.com/kubernetes/ingress-nginx/pull/6316) Numerals in podAnnotations in quotes [#6315](https://github.com/kubernetes/ingress-nginx/issues/6315) + +### 3.6.0 + +- [X] [#6305](https://github.com/kubernetes/ingress-nginx/pull/6305) Add default linux nodeSelector + +### 3.5.1 + +- [X] [#6299](https://github.com/kubernetes/ingress-nginx/pull/6299) Fix helm chart release + +### 3.5.0 + +- [X] [#6260](https://github.com/kubernetes/ingress-nginx/pull/6260) Allow Helm Chart to customize admission webhook's annotations, timeoutSeconds, namespaceSelector, objectSelector and cert files locations + +### 3.4.0 + +- [X] [#6268](https://github.com/kubernetes/ingress-nginx/pull/6268) Update to 0.40.2 in helm chart #6288 + +### 3.3.1 + +- [X] [#6259](https://github.com/kubernetes/ingress-nginx/pull/6259) Release helm chart +- [X] [#6258](https://github.com/kubernetes/ingress-nginx/pull/6258) Fix chart markdown link +- [X] [#6253](https://github.com/kubernetes/ingress-nginx/pull/6253) Release v0.40.0 + +### 3.3.1 + +- [X] [#6233](https://github.com/kubernetes/ingress-nginx/pull/6233) Add admission controller e2e test + +### 3.3.0 + +- [X] [#6203](https://github.com/kubernetes/ingress-nginx/pull/6203) Refactor parsing of key values +- [X] [#6162](https://github.com/kubernetes/ingress-nginx/pull/6162) Add helm chart options to expose metrics service as NodePort +- [X] [#6180](https://github.com/kubernetes/ingress-nginx/pull/6180) Fix helm chart admissionReviewVersions regression +- [X] [#6169](https://github.com/kubernetes/ingress-nginx/pull/6169) Fix Typo in example prometheus rules + +### 3.0.0 + +- [X] [#6167](https://github.com/kubernetes/ingress-nginx/pull/6167) Update chart requirements + +### 2.16.0 + +- [X] [#6154](https://github.com/kubernetes/ingress-nginx/pull/6154) add `topologySpreadConstraint` to controller + +### 2.15.0 + +- [X] [#6087](https://github.com/kubernetes/ingress-nginx/pull/6087) Adding parameter for externalTrafficPolicy in internal controller service spec + +### 2.14.0 + +- [X] [#6104](https://github.com/kubernetes/ingress-nginx/pull/6104) Misc fixes for nginx-ingress chart for better keel and prometheus-operator integration + +### 2.13.0 + +- [X] [#6093](https://github.com/kubernetes/ingress-nginx/pull/6093) Release v0.35.0 + +### 2.13.0 + +- [X] [#6093](https://github.com/kubernetes/ingress-nginx/pull/6093) Release v0.35.0 +- [X] [#6080](https://github.com/kubernetes/ingress-nginx/pull/6080) Switch images to k8s.gcr.io after Vanity Domain Flip + +### 2.12.1 + +- [X] [#6075](https://github.com/kubernetes/ingress-nginx/pull/6075) Sync helm chart affinity examples + +### 2.12.0 + +- [X] [#6039](https://github.com/kubernetes/ingress-nginx/pull/6039) Add configurable serviceMonitor metricRelabelling and targetLabels +- [X] [#6044](https://github.com/kubernetes/ingress-nginx/pull/6044) Fix YAML linting + +### 2.11.3 + +- [X] [#6038](https://github.com/kubernetes/ingress-nginx/pull/6038) Bump chart version PATCH + +### 2.11.2 + +- [X] [#5951](https://github.com/kubernetes/ingress-nginx/pull/5951) Bump chart patch version + +### 2.11.1 + +- [X] [#5900](https://github.com/kubernetes/ingress-nginx/pull/5900) Release helm chart for v0.34.1 + +### 2.11.0 + +- [X] [#5879](https://github.com/kubernetes/ingress-nginx/pull/5879) Update helm chart for v0.34.0 +- [X] [#5671](https://github.com/kubernetes/ingress-nginx/pull/5671) Make liveness probe more fault tolerant than readiness probe + +### 2.10.0 + +- [X] [#5843](https://github.com/kubernetes/ingress-nginx/pull/5843) Update jettech/kube-webhook-certgen image + +### 2.9.1 + +- [X] [#5823](https://github.com/kubernetes/ingress-nginx/pull/5823) Add quoting to sysctls because numeric values need to be presented as strings (#5823) + +### 2.9.0 + +- [X] [#5795](https://github.com/kubernetes/ingress-nginx/pull/5795) Use fully qualified images to avoid cri-o issues + + +### TODO + +Keep building the changelog using *git log charts* checking the tag diff --git a/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/Chart.yaml b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/Chart.yaml new file mode 100644 index 0000000..55c0b54 --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/Chart.yaml @@ -0,0 +1,23 @@ +annotations: + artifacthub.io/changes: | + - "[8896](https://github.com/kubernetes/ingress-nginx/pull/8896) updated to new images built today" + - "fix permissions about configmap" + artifacthub.io/prerelease: "false" +apiVersion: v2 +appVersion: 1.3.1 +description: Ingress controller for Kubernetes using NGINX as a reverse proxy and + load balancer +home: https://github.com/kubernetes/ingress-nginx +icon: https://upload.wikimedia.org/wikipedia/commons/thumb/c/c5/Nginx_logo.svg/500px-Nginx_logo.svg.png +keywords: +- ingress +- nginx +kubeVersion: '>=1.20.0-0' +maintainers: +- name: rikatz +- name: strongjz +- name: tao12345666333 +name: ingress-nginx +sources: +- https://github.com/kubernetes/ingress-nginx +version: 4.2.5 diff --git a/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/OWNERS b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/OWNERS new file mode 100644 index 0000000..6b7e049 --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/OWNERS @@ -0,0 +1,10 @@ +# See the OWNERS docs: https://github.com/kubernetes/community/blob/master/contributors/guide/owners.md + +approvers: +- ingress-nginx-helm-maintainers + +reviewers: +- ingress-nginx-helm-reviewers + +labels: +- area/helm diff --git a/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/README.md b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/README.md new file mode 100644 index 0000000..4e6a696 --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/README.md @@ -0,0 +1,494 @@ +# ingress-nginx + +[ingress-nginx](https://github.com/kubernetes/ingress-nginx) Ingress controller for Kubernetes using NGINX as a reverse proxy and load balancer + +![Version: 4.2.5](https://img.shields.io/badge/Version-4.2.5-informational?style=flat-square) ![AppVersion: 1.3.1](https://img.shields.io/badge/AppVersion-1.3.1-informational?style=flat-square) + +To use, add `ingressClassName: nginx` spec field or the `kubernetes.io/ingress.class: nginx` annotation to your Ingress resources. + +This chart bootstraps an ingress-nginx deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +## Prerequisites + +- Chart version 3.x.x: Kubernetes v1.16+ +- Chart version 4.x.x and above: Kubernetes v1.19+ + +## Get Repo Info + +```console +helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx +helm repo update +``` + +## Install Chart + +**Important:** only helm3 is supported + +```console +helm install [RELEASE_NAME] ingress-nginx/ingress-nginx +``` + +The command deploys ingress-nginx on the Kubernetes cluster in the default configuration. + +_See [configuration](#configuration) below._ + +_See [helm install](https://helm.sh/docs/helm/helm_install/) for command documentation._ + +## Uninstall Chart + +```console +helm uninstall [RELEASE_NAME] +``` + +This removes all the Kubernetes components associated with the chart and deletes the release. + +_See [helm uninstall](https://helm.sh/docs/helm/helm_uninstall/) for command documentation._ + +## Upgrading Chart + +```console +helm upgrade [RELEASE_NAME] [CHART] --install +``` + +_See [helm upgrade](https://helm.sh/docs/helm/helm_upgrade/) for command documentation._ + +### Upgrading With Zero Downtime in Production + +By default the ingress-nginx controller has service interruptions whenever it's pods are restarted or redeployed. In order to fix that, see the excellent blog post by Lindsay Landry from Codecademy: [Kubernetes: Nginx and Zero Downtime in Production](https://medium.com/codecademy-engineering/kubernetes-nginx-and-zero-downtime-in-production-2c910c6a5ed8). + +### Migrating from stable/nginx-ingress + +There are two main ways to migrate a release from `stable/nginx-ingress` to `ingress-nginx/ingress-nginx` chart: + +1. For Nginx Ingress controllers used for non-critical services, the easiest method is to [uninstall](#uninstall-chart) the old release and [install](#install-chart) the new one +1. For critical services in production that require zero-downtime, you will want to: + 1. [Install](#install-chart) a second Ingress controller + 1. Redirect your DNS traffic from the old controller to the new controller + 1. Log traffic from both controllers during this changeover + 1. [Uninstall](#uninstall-chart) the old controller once traffic has fully drained from it + 1. For details on all of these steps see [Upgrading With Zero Downtime in Production](#upgrading-with-zero-downtime-in-production) + +Note that there are some different and upgraded configurations between the two charts, described by Rimas Mocevicius from JFrog in the "Upgrading to ingress-nginx Helm chart" section of [Migrating from Helm chart nginx-ingress to ingress-nginx](https://rimusz.net/migrating-to-ingress-nginx). As the `ingress-nginx/ingress-nginx` chart continues to update, you will want to check current differences by running [helm configuration](#configuration) commands on both charts. + +## Configuration + +See [Customizing the Chart Before Installing](https://helm.sh/docs/intro/using_helm/#customizing-the-chart-before-installing). To see all configurable options with detailed comments, visit the chart's [values.yaml](./values.yaml), or run these configuration commands: + +```console +helm show values ingress-nginx/ingress-nginx +``` + +### PodDisruptionBudget + +Note that the PodDisruptionBudget resource will only be defined if the replicaCount is greater than one, +else it would make it impossible to evacuate a node. See [gh issue #7127](https://github.com/helm/charts/issues/7127) for more info. + +### Prometheus Metrics + +The Nginx ingress controller can export Prometheus metrics, by setting `controller.metrics.enabled` to `true`. + +You can add Prometheus annotations to the metrics service using `controller.metrics.service.annotations`. +Alternatively, if you use the Prometheus Operator, you can enable ServiceMonitor creation using `controller.metrics.serviceMonitor.enabled`. And set `controller.metrics.serviceMonitor.additionalLabels.release="prometheus"`. "release=prometheus" should match the label configured in the prometheus servicemonitor ( see `kubectl get servicemonitor prometheus-kube-prom-prometheus -oyaml -n prometheus`) + +### ingress-nginx nginx\_status page/stats server + +Previous versions of this chart had a `controller.stats.*` configuration block, which is now obsolete due to the following changes in nginx ingress controller: + +- In [0.16.1](https://github.com/kubernetes/ingress-nginx/blob/main/Changelog.md#0161), the vts (virtual host traffic status) dashboard was removed +- In [0.23.0](https://github.com/kubernetes/ingress-nginx/blob/main/Changelog.md#0230), the status page at port 18080 is now a unix socket webserver only available at localhost. + You can use `curl --unix-socket /tmp/nginx-status-server.sock http://localhost/nginx_status` inside the controller container to access it locally, or use the snippet from [nginx-ingress changelog](https://github.com/kubernetes/ingress-nginx/blob/main/Changelog.md#0230) to re-enable the http server + +### ExternalDNS Service Configuration + +Add an [ExternalDNS](https://github.com/kubernetes-incubator/external-dns) annotation to the LoadBalancer service: + +```yaml +controller: + service: + annotations: + external-dns.alpha.kubernetes.io/hostname: kubernetes-example.com. +``` + +### AWS L7 ELB with SSL Termination + +Annotate the controller as shown in the [nginx-ingress l7 patch](https://github.com/kubernetes/ingress-nginx/blob/ab3a789caae65eec4ad6e3b46b19750b481b6bce/deploy/aws/l7/service-l7.yaml): + +```yaml +controller: + service: + targetPorts: + http: http + https: http + annotations: + service.beta.kubernetes.io/aws-load-balancer-ssl-cert: arn:aws:acm:XX-XXXX-X:XXXXXXXXX:certificate/XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX + service.beta.kubernetes.io/aws-load-balancer-backend-protocol: "http" + service.beta.kubernetes.io/aws-load-balancer-ssl-ports: "https" + service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout: '3600' +``` + +### AWS route53-mapper + +To configure the LoadBalancer service with the [route53-mapper addon](https://github.com/kubernetes/kops/blob/be63d4f1a7a46daaf1c4c482527328236850f111/addons/route53-mapper/README.md), add the `domainName` annotation and `dns` label: + +```yaml +controller: + service: + labels: + dns: "route53" + annotations: + domainName: "kubernetes-example.com" +``` + +### Additional Internal Load Balancer + +This setup is useful when you need both external and internal load balancers but don't want to have multiple ingress controllers and multiple ingress objects per application. + +By default, the ingress object will point to the external load balancer address, but if correctly configured, you can make use of the internal one if the URL you are looking up resolves to the internal load balancer's URL. + +You'll need to set both the following values: + +`controller.service.internal.enabled` +`controller.service.internal.annotations` + +If one of them is missing the internal load balancer will not be deployed. Example you may have `controller.service.internal.enabled=true` but no annotations set, in this case no action will be taken. + +`controller.service.internal.annotations` varies with the cloud service you're using. + +Example for AWS: + +```yaml +controller: + service: + internal: + enabled: true + annotations: + # Create internal ELB + service.beta.kubernetes.io/aws-load-balancer-internal: "true" + # Any other annotation can be declared here. +``` + +Example for GCE: + +```yaml +controller: + service: + internal: + enabled: true + annotations: + # Create internal LB. More informations: https://cloud.google.com/kubernetes-engine/docs/how-to/internal-load-balancing + # For GKE versions 1.17 and later + networking.gke.io/load-balancer-type: "Internal" + # For earlier versions + # cloud.google.com/load-balancer-type: "Internal" + + # Any other annotation can be declared here. +``` + +Example for Azure: + +```yaml +controller: + service: + annotations: + # Create internal LB + service.beta.kubernetes.io/azure-load-balancer-internal: "true" + # Any other annotation can be declared here. +``` + +Example for Oracle Cloud Infrastructure: + +```yaml +controller: + service: + annotations: + # Create internal LB + service.beta.kubernetes.io/oci-load-balancer-internal: "true" + # Any other annotation can be declared here. +``` + +An use case for this scenario is having a split-view DNS setup where the public zone CNAME records point to the external balancer URL while the private zone CNAME records point to the internal balancer URL. This way, you only need one ingress kubernetes object. + +Optionally you can set `controller.service.loadBalancerIP` if you need a static IP for the resulting `LoadBalancer`. + +### Ingress Admission Webhooks + +With nginx-ingress-controller version 0.25+, the nginx ingress controller pod exposes an endpoint that will integrate with the `validatingwebhookconfiguration` Kubernetes feature to prevent bad ingress from being added to the cluster. +**This feature is enabled by default since 0.31.0.** + +With nginx-ingress-controller in 0.25.* work only with kubernetes 1.14+, 0.26 fix [this issue](https://github.com/kubernetes/ingress-nginx/pull/4521) + +### Helm Error When Upgrading: spec.clusterIP: Invalid value: "" + +If you are upgrading this chart from a version between 0.31.0 and 1.2.2 then you may get an error like this: + +```console +Error: UPGRADE FAILED: Service "?????-controller" is invalid: spec.clusterIP: Invalid value: "": field is immutable +``` + +Detail of how and why are in [this issue](https://github.com/helm/charts/pull/13646) but to resolve this you can set `xxxx.service.omitClusterIP` to `true` where `xxxx` is the service referenced in the error. + +As of version `1.26.0` of this chart, by simply not providing any clusterIP value, `invalid: spec.clusterIP: Invalid value: "": field is immutable` will no longer occur since `clusterIP: ""` will not be rendered. + +## Requirements + +Kubernetes: `>=1.20.0-0` + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| commonLabels | object | `{}` | | +| controller.addHeaders | object | `{}` | Will add custom headers before sending response traffic to the client according to: https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/#add-headers | +| controller.admissionWebhooks.annotations | object | `{}` | | +| controller.admissionWebhooks.certificate | string | `"/usr/local/certificates/cert"` | | +| controller.admissionWebhooks.createSecretJob.resources | object | `{}` | | +| controller.admissionWebhooks.enabled | bool | `true` | | +| controller.admissionWebhooks.existingPsp | string | `""` | Use an existing PSP instead of creating one | +| controller.admissionWebhooks.extraEnvs | list | `[]` | Additional environment variables to set | +| controller.admissionWebhooks.failurePolicy | string | `"Fail"` | Admission Webhook failure policy to use | +| controller.admissionWebhooks.key | string | `"/usr/local/certificates/key"` | | +| controller.admissionWebhooks.labels | object | `{}` | Labels to be added to admission webhooks | +| controller.admissionWebhooks.namespaceSelector | object | `{}` | | +| controller.admissionWebhooks.networkPolicyEnabled | bool | `false` | | +| controller.admissionWebhooks.objectSelector | object | `{}` | | +| controller.admissionWebhooks.patch.enabled | bool | `true` | | +| controller.admissionWebhooks.patch.image.digest | string | `"sha256:549e71a6ca248c5abd51cdb73dbc3083df62cf92ed5e6147c780e30f7e007a47"` | | +| controller.admissionWebhooks.patch.image.image | string | `"ingress-nginx/kube-webhook-certgen"` | | +| controller.admissionWebhooks.patch.image.pullPolicy | string | `"IfNotPresent"` | | +| controller.admissionWebhooks.patch.image.registry | string | `"registry.k8s.io"` | | +| controller.admissionWebhooks.patch.image.tag | string | `"v1.3.0"` | | +| controller.admissionWebhooks.patch.labels | object | `{}` | Labels to be added to patch job resources | +| controller.admissionWebhooks.patch.nodeSelector."kubernetes.io/os" | string | `"linux"` | | +| controller.admissionWebhooks.patch.podAnnotations | object | `{}` | | +| controller.admissionWebhooks.patch.priorityClassName | string | `""` | Provide a priority class name to the webhook patching job # | +| controller.admissionWebhooks.patch.securityContext.fsGroup | int | `2000` | | +| controller.admissionWebhooks.patch.securityContext.runAsNonRoot | bool | `true` | | +| controller.admissionWebhooks.patch.securityContext.runAsUser | int | `2000` | | +| controller.admissionWebhooks.patch.tolerations | list | `[]` | | +| controller.admissionWebhooks.patchWebhookJob.resources | object | `{}` | | +| controller.admissionWebhooks.port | int | `8443` | | +| controller.admissionWebhooks.service.annotations | object | `{}` | | +| controller.admissionWebhooks.service.externalIPs | list | `[]` | | +| controller.admissionWebhooks.service.loadBalancerSourceRanges | list | `[]` | | +| controller.admissionWebhooks.service.servicePort | int | `443` | | +| controller.admissionWebhooks.service.type | string | `"ClusterIP"` | | +| controller.affinity | object | `{}` | Affinity and anti-affinity rules for server scheduling to nodes # Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity # | +| controller.allowSnippetAnnotations | bool | `true` | This configuration defines if Ingress Controller should allow users to set their own *-snippet annotations, otherwise this is forbidden / dropped when users add those annotations. Global snippets in ConfigMap are still respected | +| controller.annotations | object | `{}` | Annotations to be added to the controller Deployment or DaemonSet # | +| controller.autoscaling.behavior | object | `{}` | | +| controller.autoscaling.enabled | bool | `false` | | +| controller.autoscaling.maxReplicas | int | `11` | | +| controller.autoscaling.minReplicas | int | `1` | | +| controller.autoscaling.targetCPUUtilizationPercentage | int | `50` | | +| controller.autoscaling.targetMemoryUtilizationPercentage | int | `50` | | +| controller.autoscalingTemplate | list | `[]` | | +| controller.config | object | `{}` | Will add custom configuration options to Nginx https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/ | +| controller.configAnnotations | object | `{}` | Annotations to be added to the controller config configuration configmap. | +| controller.configMapNamespace | string | `""` | Allows customization of the configmap / nginx-configmap namespace; defaults to $(POD_NAMESPACE) | +| controller.containerName | string | `"controller"` | Configures the controller container name | +| controller.containerPort | object | `{"http":80,"https":443}` | Configures the ports that the nginx-controller listens on | +| controller.customTemplate.configMapKey | string | `""` | | +| controller.customTemplate.configMapName | string | `""` | | +| controller.dnsConfig | object | `{}` | Optionally customize the pod dnsConfig. | +| controller.dnsPolicy | string | `"ClusterFirst"` | Optionally change this to ClusterFirstWithHostNet in case you have 'hostNetwork: true'. By default, while using host network, name resolution uses the host's DNS. If you wish nginx-controller to keep resolving names inside the k8s network, use ClusterFirstWithHostNet. | +| controller.electionID | string | `"ingress-controller-leader"` | Election ID to use for status update | +| controller.enableMimalloc | bool | `true` | Enable mimalloc as a drop-in replacement for malloc. # ref: https://github.com/microsoft/mimalloc # | +| controller.existingPsp | string | `""` | Use an existing PSP instead of creating one | +| controller.extraArgs | object | `{}` | Additional command line arguments to pass to nginx-ingress-controller E.g. to specify the default SSL certificate you can use | +| controller.extraContainers | list | `[]` | Additional containers to be added to the controller pod. See https://github.com/lemonldap-ng-controller/lemonldap-ng-controller as example. | +| controller.extraEnvs | list | `[]` | Additional environment variables to set | +| controller.extraInitContainers | list | `[]` | Containers, which are run before the app containers are started. | +| controller.extraModules | list | `[]` | | +| controller.extraVolumeMounts | list | `[]` | Additional volumeMounts to the controller main container. | +| controller.extraVolumes | list | `[]` | Additional volumes to the controller pod. | +| controller.healthCheckHost | string | `""` | Address to bind the health check endpoint. It is better to set this option to the internal node address if the ingress nginx controller is running in the `hostNetwork: true` mode. | +| controller.healthCheckPath | string | `"/healthz"` | Path of the health check endpoint. All requests received on the port defined by the healthz-port parameter are forwarded internally to this path. | +| controller.hostNetwork | bool | `false` | Required for use with CNI based kubernetes installations (such as ones set up by kubeadm), since CNI and hostport don't mix yet. Can be deprecated once https://github.com/kubernetes/kubernetes/issues/23920 is merged | +| controller.hostPort.enabled | bool | `false` | Enable 'hostPort' or not | +| controller.hostPort.ports.http | int | `80` | 'hostPort' http port | +| controller.hostPort.ports.https | int | `443` | 'hostPort' https port | +| controller.hostname | object | `{}` | Optionally customize the pod hostname. | +| controller.image.allowPrivilegeEscalation | bool | `true` | | +| controller.image.chroot | bool | `false` | | +| controller.image.digest | string | `"sha256:54f7fe2c6c5a9db9a0ebf1131797109bb7a4d91f56b9b362bde2abd237dd1974"` | | +| controller.image.digestChroot | string | `"sha256:a8466b19c621bd550b1645e27a004a5cc85009c858a9ab19490216735ac432b1"` | | +| controller.image.image | string | `"ingress-nginx/controller"` | | +| controller.image.pullPolicy | string | `"IfNotPresent"` | | +| controller.image.registry | string | `"registry.k8s.io"` | | +| controller.image.runAsUser | int | `101` | | +| controller.image.tag | string | `"v1.3.1"` | | +| controller.ingressClass | string | `"nginx"` | For backwards compatibility with ingress.class annotation, use ingressClass. Algorithm is as follows, first ingressClassName is considered, if not present, controller looks for ingress.class annotation | +| controller.ingressClassByName | bool | `false` | Process IngressClass per name (additionally as per spec.controller). | +| controller.ingressClassResource.controllerValue | string | `"k8s.io/ingress-nginx"` | Controller-value of the controller that is processing this ingressClass | +| controller.ingressClassResource.default | bool | `false` | Is this the default ingressClass for the cluster | +| controller.ingressClassResource.enabled | bool | `true` | Is this ingressClass enabled or not | +| controller.ingressClassResource.name | string | `"nginx"` | Name of the ingressClass | +| controller.ingressClassResource.parameters | object | `{}` | Parameters is a link to a custom resource containing additional configuration for the controller. This is optional if the controller does not require extra parameters. | +| controller.keda.apiVersion | string | `"keda.sh/v1alpha1"` | | +| controller.keda.behavior | object | `{}` | | +| controller.keda.cooldownPeriod | int | `300` | | +| controller.keda.enabled | bool | `false` | | +| controller.keda.maxReplicas | int | `11` | | +| controller.keda.minReplicas | int | `1` | | +| controller.keda.pollingInterval | int | `30` | | +| controller.keda.restoreToOriginalReplicaCount | bool | `false` | | +| controller.keda.scaledObject.annotations | object | `{}` | | +| controller.keda.triggers | list | `[]` | | +| controller.kind | string | `"Deployment"` | Use a `DaemonSet` or `Deployment` | +| controller.labels | object | `{}` | Labels to be added to the controller Deployment or DaemonSet and other resources that do not have option to specify labels # | +| controller.lifecycle | object | `{"preStop":{"exec":{"command":["/wait-shutdown"]}}}` | Improve connection draining when ingress controller pod is deleted using a lifecycle hook: With this new hook, we increased the default terminationGracePeriodSeconds from 30 seconds to 300, allowing the draining of connections up to five minutes. If the active connections end before that, the pod will terminate gracefully at that time. To effectively take advantage of this feature, the Configmap feature worker-shutdown-timeout new value is 240s instead of 10s. # | +| controller.livenessProbe.failureThreshold | int | `5` | | +| controller.livenessProbe.httpGet.path | string | `"/healthz"` | | +| controller.livenessProbe.httpGet.port | int | `10254` | | +| controller.livenessProbe.httpGet.scheme | string | `"HTTP"` | | +| controller.livenessProbe.initialDelaySeconds | int | `10` | | +| controller.livenessProbe.periodSeconds | int | `10` | | +| controller.livenessProbe.successThreshold | int | `1` | | +| controller.livenessProbe.timeoutSeconds | int | `1` | | +| controller.maxmindLicenseKey | string | `""` | Maxmind license key to download GeoLite2 Databases. # https://blog.maxmind.com/2019/12/18/significant-changes-to-accessing-and-using-geolite2-databases | +| controller.metrics.enabled | bool | `false` | | +| controller.metrics.port | int | `10254` | | +| controller.metrics.prometheusRule.additionalLabels | object | `{}` | | +| controller.metrics.prometheusRule.enabled | bool | `false` | | +| controller.metrics.prometheusRule.rules | list | `[]` | | +| controller.metrics.service.annotations | object | `{}` | | +| controller.metrics.service.externalIPs | list | `[]` | List of IP addresses at which the stats-exporter service is available # Ref: https://kubernetes.io/docs/user-guide/services/#external-ips # | +| controller.metrics.service.loadBalancerSourceRanges | list | `[]` | | +| controller.metrics.service.servicePort | int | `10254` | | +| controller.metrics.service.type | string | `"ClusterIP"` | | +| controller.metrics.serviceMonitor.additionalLabels | object | `{}` | | +| controller.metrics.serviceMonitor.enabled | bool | `false` | | +| controller.metrics.serviceMonitor.metricRelabelings | list | `[]` | | +| controller.metrics.serviceMonitor.namespace | string | `""` | | +| controller.metrics.serviceMonitor.namespaceSelector | object | `{}` | | +| controller.metrics.serviceMonitor.relabelings | list | `[]` | | +| controller.metrics.serviceMonitor.scrapeInterval | string | `"30s"` | | +| controller.metrics.serviceMonitor.targetLabels | list | `[]` | | +| controller.minAvailable | int | `1` | | +| controller.minReadySeconds | int | `0` | `minReadySeconds` to avoid killing pods before we are ready # | +| controller.name | string | `"controller"` | | +| controller.nodeSelector | object | `{"kubernetes.io/os":"linux"}` | Node labels for controller pod assignment # Ref: https://kubernetes.io/docs/user-guide/node-selection/ # | +| controller.podAnnotations | object | `{}` | Annotations to be added to controller pods # | +| controller.podLabels | object | `{}` | Labels to add to the pod container metadata | +| controller.podSecurityContext | object | `{}` | Security Context policies for controller pods | +| controller.priorityClassName | string | `""` | | +| controller.proxySetHeaders | object | `{}` | Will add custom headers before sending traffic to backends according to https://github.com/kubernetes/ingress-nginx/tree/main/docs/examples/customization/custom-headers | +| controller.publishService | object | `{"enabled":true,"pathOverride":""}` | Allows customization of the source of the IP address or FQDN to report in the ingress status field. By default, it reads the information provided by the service. If disable, the status field reports the IP address of the node or nodes where an ingress controller pod is running. | +| controller.publishService.enabled | bool | `true` | Enable 'publishService' or not | +| controller.publishService.pathOverride | string | `""` | Allows overriding of the publish service to bind to Must be / | +| controller.readinessProbe.failureThreshold | int | `3` | | +| controller.readinessProbe.httpGet.path | string | `"/healthz"` | | +| controller.readinessProbe.httpGet.port | int | `10254` | | +| controller.readinessProbe.httpGet.scheme | string | `"HTTP"` | | +| controller.readinessProbe.initialDelaySeconds | int | `10` | | +| controller.readinessProbe.periodSeconds | int | `10` | | +| controller.readinessProbe.successThreshold | int | `1` | | +| controller.readinessProbe.timeoutSeconds | int | `1` | | +| controller.replicaCount | int | `1` | | +| controller.reportNodeInternalIp | bool | `false` | Bare-metal considerations via the host network https://kubernetes.github.io/ingress-nginx/deploy/baremetal/#via-the-host-network Ingress status was blank because there is no Service exposing the NGINX Ingress controller in a configuration using the host network, the default --publish-service flag used in standard cloud setups does not apply | +| controller.resources.requests.cpu | string | `"100m"` | | +| controller.resources.requests.memory | string | `"90Mi"` | | +| controller.scope.enabled | bool | `false` | Enable 'scope' or not | +| controller.scope.namespace | string | `""` | Namespace to limit the controller to; defaults to $(POD_NAMESPACE) | +| controller.scope.namespaceSelector | string | `""` | When scope.enabled == false, instead of watching all namespaces, we watching namespaces whose labels only match with namespaceSelector. Format like foo=bar. Defaults to empty, means watching all namespaces. | +| controller.service.annotations | object | `{}` | | +| controller.service.appProtocol | bool | `true` | If enabled is adding an appProtocol option for Kubernetes service. An appProtocol field replacing annotations that were using for setting a backend protocol. Here is an example for AWS: service.beta.kubernetes.io/aws-load-balancer-backend-protocol: http It allows choosing the protocol for each backend specified in the Kubernetes service. See the following GitHub issue for more details about the purpose: https://github.com/kubernetes/kubernetes/issues/40244 Will be ignored for Kubernetes versions older than 1.20 # | +| controller.service.enableHttp | bool | `true` | | +| controller.service.enableHttps | bool | `true` | | +| controller.service.enabled | bool | `true` | | +| controller.service.external.enabled | bool | `true` | | +| controller.service.externalIPs | list | `[]` | List of IP addresses at which the controller services are available # Ref: https://kubernetes.io/docs/user-guide/services/#external-ips # | +| controller.service.internal.annotations | object | `{}` | Annotations are mandatory for the load balancer to come up. Varies with the cloud service. | +| controller.service.internal.enabled | bool | `false` | Enables an additional internal load balancer (besides the external one). | +| controller.service.internal.loadBalancerSourceRanges | list | `[]` | Restrict access For LoadBalancer service. Defaults to 0.0.0.0/0. | +| controller.service.ipFamilies | list | `["IPv4"]` | List of IP families (e.g. IPv4, IPv6) assigned to the service. This field is usually assigned automatically based on cluster configuration and the ipFamilyPolicy field. # Ref: https://kubernetes.io/docs/concepts/services-networking/dual-stack/ | +| controller.service.ipFamilyPolicy | string | `"SingleStack"` | Represents the dual-stack-ness requested or required by this Service. Possible values are SingleStack, PreferDualStack or RequireDualStack. The ipFamilies and clusterIPs fields depend on the value of this field. # Ref: https://kubernetes.io/docs/concepts/services-networking/dual-stack/ | +| controller.service.labels | object | `{}` | | +| controller.service.loadBalancerIP | string | `""` | Used by cloud providers to connect the resulting `LoadBalancer` to a pre-existing static IP according to https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer | +| controller.service.loadBalancerSourceRanges | list | `[]` | | +| controller.service.nodePorts.http | string | `""` | | +| controller.service.nodePorts.https | string | `""` | | +| controller.service.nodePorts.tcp | object | `{}` | | +| controller.service.nodePorts.udp | object | `{}` | | +| controller.service.ports.http | int | `80` | | +| controller.service.ports.https | int | `443` | | +| controller.service.targetPorts.http | string | `"http"` | | +| controller.service.targetPorts.https | string | `"https"` | | +| controller.service.type | string | `"LoadBalancer"` | | +| controller.shareProcessNamespace | bool | `false` | | +| controller.sysctls | object | `{}` | See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for notes on enabling and using sysctls | +| controller.tcp.annotations | object | `{}` | Annotations to be added to the tcp config configmap | +| controller.tcp.configMapNamespace | string | `""` | Allows customization of the tcp-services-configmap; defaults to $(POD_NAMESPACE) | +| controller.terminationGracePeriodSeconds | int | `300` | `terminationGracePeriodSeconds` to avoid killing pods before we are ready # wait up to five minutes for the drain of connections # | +| controller.tolerations | list | `[]` | Node tolerations for server scheduling to nodes with taints # Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ # | +| controller.topologySpreadConstraints | list | `[]` | Topology spread constraints rely on node labels to identify the topology domain(s) that each Node is in. # Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ # | +| controller.udp.annotations | object | `{}` | Annotations to be added to the udp config configmap | +| controller.udp.configMapNamespace | string | `""` | Allows customization of the udp-services-configmap; defaults to $(POD_NAMESPACE) | +| controller.updateStrategy | object | `{}` | The update strategy to apply to the Deployment or DaemonSet # | +| controller.watchIngressWithoutClass | bool | `false` | Process Ingress objects without ingressClass annotation/ingressClassName field Overrides value for --watch-ingress-without-class flag of the controller binary Defaults to false | +| defaultBackend.affinity | object | `{}` | | +| defaultBackend.autoscaling.annotations | object | `{}` | | +| defaultBackend.autoscaling.enabled | bool | `false` | | +| defaultBackend.autoscaling.maxReplicas | int | `2` | | +| defaultBackend.autoscaling.minReplicas | int | `1` | | +| defaultBackend.autoscaling.targetCPUUtilizationPercentage | int | `50` | | +| defaultBackend.autoscaling.targetMemoryUtilizationPercentage | int | `50` | | +| defaultBackend.containerSecurityContext | object | `{}` | Security Context policies for controller main container. See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for notes on enabling and using sysctls # | +| defaultBackend.enabled | bool | `false` | | +| defaultBackend.existingPsp | string | `""` | Use an existing PSP instead of creating one | +| defaultBackend.extraArgs | object | `{}` | | +| defaultBackend.extraEnvs | list | `[]` | Additional environment variables to set for defaultBackend pods | +| defaultBackend.extraVolumeMounts | list | `[]` | | +| defaultBackend.extraVolumes | list | `[]` | | +| defaultBackend.image.allowPrivilegeEscalation | bool | `false` | | +| defaultBackend.image.image | string | `"defaultbackend-amd64"` | | +| defaultBackend.image.pullPolicy | string | `"IfNotPresent"` | | +| defaultBackend.image.readOnlyRootFilesystem | bool | `true` | | +| defaultBackend.image.registry | string | `"registry.k8s.io"` | | +| defaultBackend.image.runAsNonRoot | bool | `true` | | +| defaultBackend.image.runAsUser | int | `65534` | | +| defaultBackend.image.tag | string | `"1.5"` | | +| defaultBackend.labels | object | `{}` | Labels to be added to the default backend resources | +| defaultBackend.livenessProbe.failureThreshold | int | `3` | | +| defaultBackend.livenessProbe.initialDelaySeconds | int | `30` | | +| defaultBackend.livenessProbe.periodSeconds | int | `10` | | +| defaultBackend.livenessProbe.successThreshold | int | `1` | | +| defaultBackend.livenessProbe.timeoutSeconds | int | `5` | | +| defaultBackend.minAvailable | int | `1` | | +| defaultBackend.name | string | `"defaultbackend"` | | +| defaultBackend.nodeSelector | object | `{"kubernetes.io/os":"linux"}` | Node labels for default backend pod assignment # Ref: https://kubernetes.io/docs/user-guide/node-selection/ # | +| defaultBackend.podAnnotations | object | `{}` | Annotations to be added to default backend pods # | +| defaultBackend.podLabels | object | `{}` | Labels to add to the pod container metadata | +| defaultBackend.podSecurityContext | object | `{}` | Security Context policies for controller pods See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for notes on enabling and using sysctls # | +| defaultBackend.port | int | `8080` | | +| defaultBackend.priorityClassName | string | `""` | | +| defaultBackend.readinessProbe.failureThreshold | int | `6` | | +| defaultBackend.readinessProbe.initialDelaySeconds | int | `0` | | +| defaultBackend.readinessProbe.periodSeconds | int | `5` | | +| defaultBackend.readinessProbe.successThreshold | int | `1` | | +| defaultBackend.readinessProbe.timeoutSeconds | int | `5` | | +| defaultBackend.replicaCount | int | `1` | | +| defaultBackend.resources | object | `{}` | | +| defaultBackend.service.annotations | object | `{}` | | +| defaultBackend.service.externalIPs | list | `[]` | List of IP addresses at which the default backend service is available # Ref: https://kubernetes.io/docs/user-guide/services/#external-ips # | +| defaultBackend.service.loadBalancerSourceRanges | list | `[]` | | +| defaultBackend.service.servicePort | int | `80` | | +| defaultBackend.service.type | string | `"ClusterIP"` | | +| defaultBackend.serviceAccount.automountServiceAccountToken | bool | `true` | | +| defaultBackend.serviceAccount.create | bool | `true` | | +| defaultBackend.serviceAccount.name | string | `""` | | +| defaultBackend.tolerations | list | `[]` | Node tolerations for server scheduling to nodes with taints # Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ # | +| dhParam | string | `nil` | A base64-encoded Diffie-Hellman parameter. This can be generated with: `openssl dhparam 4096 2> /dev/null | base64` # Ref: https://github.com/kubernetes/ingress-nginx/tree/main/docs/examples/customization/ssl-dh-param | +| imagePullSecrets | list | `[]` | Optional array of imagePullSecrets containing private registry credentials # Ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ | +| podSecurityPolicy.enabled | bool | `false` | | +| portNamePrefix | string | `""` | Prefix for TCP and UDP ports names in ingress controller service # Some cloud providers, like Yandex Cloud may have a requirements for a port name regex to support cloud load balancer integration | +| rbac.create | bool | `true` | | +| rbac.scope | bool | `false` | | +| revisionHistoryLimit | int | `10` | Rollback limit # | +| serviceAccount.annotations | object | `{}` | Annotations for the controller service account | +| serviceAccount.automountServiceAccountToken | bool | `true` | | +| serviceAccount.create | bool | `true` | | +| serviceAccount.name | string | `""` | | +| tcp | object | `{}` | TCP service key-value pairs # Ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/exposing-tcp-udp-services.md # | +| udp | object | `{}` | UDP service key-value pairs # Ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/exposing-tcp-udp-services.md # | + diff --git a/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/README.md.gotmpl b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/README.md.gotmpl new file mode 100644 index 0000000..8959961 --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/README.md.gotmpl @@ -0,0 +1,235 @@ +{{ template "chart.header" . }} +[ingress-nginx](https://github.com/kubernetes/ingress-nginx) Ingress controller for Kubernetes using NGINX as a reverse proxy and load balancer + +{{ template "chart.versionBadge" . }}{{ template "chart.typeBadge" . }}{{ template "chart.appVersionBadge" . }} + +To use, add `ingressClassName: nginx` spec field or the `kubernetes.io/ingress.class: nginx` annotation to your Ingress resources. + +This chart bootstraps an ingress-nginx deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +## Prerequisites + +- Chart version 3.x.x: Kubernetes v1.16+ +- Chart version 4.x.x and above: Kubernetes v1.19+ + +## Get Repo Info + +```console +helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx +helm repo update +``` + +## Install Chart + +**Important:** only helm3 is supported + +```console +helm install [RELEASE_NAME] ingress-nginx/ingress-nginx +``` + +The command deploys ingress-nginx on the Kubernetes cluster in the default configuration. + +_See [configuration](#configuration) below._ + +_See [helm install](https://helm.sh/docs/helm/helm_install/) for command documentation._ + +## Uninstall Chart + +```console +helm uninstall [RELEASE_NAME] +``` + +This removes all the Kubernetes components associated with the chart and deletes the release. + +_See [helm uninstall](https://helm.sh/docs/helm/helm_uninstall/) for command documentation._ + +## Upgrading Chart + +```console +helm upgrade [RELEASE_NAME] [CHART] --install +``` + +_See [helm upgrade](https://helm.sh/docs/helm/helm_upgrade/) for command documentation._ + +### Upgrading With Zero Downtime in Production + +By default the ingress-nginx controller has service interruptions whenever it's pods are restarted or redeployed. In order to fix that, see the excellent blog post by Lindsay Landry from Codecademy: [Kubernetes: Nginx and Zero Downtime in Production](https://medium.com/codecademy-engineering/kubernetes-nginx-and-zero-downtime-in-production-2c910c6a5ed8). + +### Migrating from stable/nginx-ingress + +There are two main ways to migrate a release from `stable/nginx-ingress` to `ingress-nginx/ingress-nginx` chart: + +1. For Nginx Ingress controllers used for non-critical services, the easiest method is to [uninstall](#uninstall-chart) the old release and [install](#install-chart) the new one +1. For critical services in production that require zero-downtime, you will want to: + 1. [Install](#install-chart) a second Ingress controller + 1. Redirect your DNS traffic from the old controller to the new controller + 1. Log traffic from both controllers during this changeover + 1. [Uninstall](#uninstall-chart) the old controller once traffic has fully drained from it + 1. For details on all of these steps see [Upgrading With Zero Downtime in Production](#upgrading-with-zero-downtime-in-production) + +Note that there are some different and upgraded configurations between the two charts, described by Rimas Mocevicius from JFrog in the "Upgrading to ingress-nginx Helm chart" section of [Migrating from Helm chart nginx-ingress to ingress-nginx](https://rimusz.net/migrating-to-ingress-nginx). As the `ingress-nginx/ingress-nginx` chart continues to update, you will want to check current differences by running [helm configuration](#configuration) commands on both charts. + +## Configuration + +See [Customizing the Chart Before Installing](https://helm.sh/docs/intro/using_helm/#customizing-the-chart-before-installing). To see all configurable options with detailed comments, visit the chart's [values.yaml](./values.yaml), or run these configuration commands: + +```console +helm show values ingress-nginx/ingress-nginx +``` + +### PodDisruptionBudget + +Note that the PodDisruptionBudget resource will only be defined if the replicaCount is greater than one, +else it would make it impossible to evacuate a node. See [gh issue #7127](https://github.com/helm/charts/issues/7127) for more info. + +### Prometheus Metrics + +The Nginx ingress controller can export Prometheus metrics, by setting `controller.metrics.enabled` to `true`. + +You can add Prometheus annotations to the metrics service using `controller.metrics.service.annotations`. +Alternatively, if you use the Prometheus Operator, you can enable ServiceMonitor creation using `controller.metrics.serviceMonitor.enabled`. And set `controller.metrics.serviceMonitor.additionalLabels.release="prometheus"`. "release=prometheus" should match the label configured in the prometheus servicemonitor ( see `kubectl get servicemonitor prometheus-kube-prom-prometheus -oyaml -n prometheus`) + +### ingress-nginx nginx\_status page/stats server + +Previous versions of this chart had a `controller.stats.*` configuration block, which is now obsolete due to the following changes in nginx ingress controller: + +- In [0.16.1](https://github.com/kubernetes/ingress-nginx/blob/main/Changelog.md#0161), the vts (virtual host traffic status) dashboard was removed +- In [0.23.0](https://github.com/kubernetes/ingress-nginx/blob/main/Changelog.md#0230), the status page at port 18080 is now a unix socket webserver only available at localhost. + You can use `curl --unix-socket /tmp/nginx-status-server.sock http://localhost/nginx_status` inside the controller container to access it locally, or use the snippet from [nginx-ingress changelog](https://github.com/kubernetes/ingress-nginx/blob/main/Changelog.md#0230) to re-enable the http server + +### ExternalDNS Service Configuration + +Add an [ExternalDNS](https://github.com/kubernetes-incubator/external-dns) annotation to the LoadBalancer service: + +```yaml +controller: + service: + annotations: + external-dns.alpha.kubernetes.io/hostname: kubernetes-example.com. +``` + +### AWS L7 ELB with SSL Termination + +Annotate the controller as shown in the [nginx-ingress l7 patch](https://github.com/kubernetes/ingress-nginx/blob/ab3a789caae65eec4ad6e3b46b19750b481b6bce/deploy/aws/l7/service-l7.yaml): + +```yaml +controller: + service: + targetPorts: + http: http + https: http + annotations: + service.beta.kubernetes.io/aws-load-balancer-ssl-cert: arn:aws:acm:XX-XXXX-X:XXXXXXXXX:certificate/XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX + service.beta.kubernetes.io/aws-load-balancer-backend-protocol: "http" + service.beta.kubernetes.io/aws-load-balancer-ssl-ports: "https" + service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout: '3600' +``` + +### AWS route53-mapper + +To configure the LoadBalancer service with the [route53-mapper addon](https://github.com/kubernetes/kops/blob/be63d4f1a7a46daaf1c4c482527328236850f111/addons/route53-mapper/README.md), add the `domainName` annotation and `dns` label: + +```yaml +controller: + service: + labels: + dns: "route53" + annotations: + domainName: "kubernetes-example.com" +``` + +### Additional Internal Load Balancer + +This setup is useful when you need both external and internal load balancers but don't want to have multiple ingress controllers and multiple ingress objects per application. + +By default, the ingress object will point to the external load balancer address, but if correctly configured, you can make use of the internal one if the URL you are looking up resolves to the internal load balancer's URL. + +You'll need to set both the following values: + +`controller.service.internal.enabled` +`controller.service.internal.annotations` + +If one of them is missing the internal load balancer will not be deployed. Example you may have `controller.service.internal.enabled=true` but no annotations set, in this case no action will be taken. + +`controller.service.internal.annotations` varies with the cloud service you're using. + +Example for AWS: + +```yaml +controller: + service: + internal: + enabled: true + annotations: + # Create internal ELB + service.beta.kubernetes.io/aws-load-balancer-internal: "true" + # Any other annotation can be declared here. +``` + +Example for GCE: + +```yaml +controller: + service: + internal: + enabled: true + annotations: + # Create internal LB. More informations: https://cloud.google.com/kubernetes-engine/docs/how-to/internal-load-balancing + # For GKE versions 1.17 and later + networking.gke.io/load-balancer-type: "Internal" + # For earlier versions + # cloud.google.com/load-balancer-type: "Internal" + + # Any other annotation can be declared here. +``` + +Example for Azure: + +```yaml +controller: + service: + annotations: + # Create internal LB + service.beta.kubernetes.io/azure-load-balancer-internal: "true" + # Any other annotation can be declared here. +``` + +Example for Oracle Cloud Infrastructure: + +```yaml +controller: + service: + annotations: + # Create internal LB + service.beta.kubernetes.io/oci-load-balancer-internal: "true" + # Any other annotation can be declared here. +``` + +An use case for this scenario is having a split-view DNS setup where the public zone CNAME records point to the external balancer URL while the private zone CNAME records point to the internal balancer URL. This way, you only need one ingress kubernetes object. + +Optionally you can set `controller.service.loadBalancerIP` if you need a static IP for the resulting `LoadBalancer`. + +### Ingress Admission Webhooks + +With nginx-ingress-controller version 0.25+, the nginx ingress controller pod exposes an endpoint that will integrate with the `validatingwebhookconfiguration` Kubernetes feature to prevent bad ingress from being added to the cluster. +**This feature is enabled by default since 0.31.0.** + +With nginx-ingress-controller in 0.25.* work only with kubernetes 1.14+, 0.26 fix [this issue](https://github.com/kubernetes/ingress-nginx/pull/4521) + +### Helm Error When Upgrading: spec.clusterIP: Invalid value: "" + +If you are upgrading this chart from a version between 0.31.0 and 1.2.2 then you may get an error like this: + +```console +Error: UPGRADE FAILED: Service "?????-controller" is invalid: spec.clusterIP: Invalid value: "": field is immutable +``` + +Detail of how and why are in [this issue](https://github.com/helm/charts/pull/13646) but to resolve this you can set `xxxx.service.omitClusterIP` to `true` where `xxxx` is the service referenced in the error. + +As of version `1.26.0` of this chart, by simply not providing any clusterIP value, `invalid: spec.clusterIP: Invalid value: "": field is immutable` will no longer occur since `clusterIP: ""` will not be rendered. + +{{ template "chart.requirementsSection" . }} + +{{ template "chart.valuesSection" . }} + +{{ template "helm-docs.versionFooter" . }} diff --git a/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/controller-custom-ingressclass-flags.yaml b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/controller-custom-ingressclass-flags.yaml new file mode 100644 index 0000000..b28a232 --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/controller-custom-ingressclass-flags.yaml @@ -0,0 +1,7 @@ +controller: + watchIngressWithoutClass: true + ingressClassResource: + name: custom-nginx + enabled: true + default: true + controllerValue: "k8s.io/custom-nginx" diff --git a/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/daemonset-customconfig-values.yaml b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/daemonset-customconfig-values.yaml new file mode 100644 index 0000000..4393a5b --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/daemonset-customconfig-values.yaml @@ -0,0 +1,14 @@ +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + kind: DaemonSet + allowSnippetAnnotations: false + admissionWebhooks: + enabled: false + service: + type: ClusterIP + + config: + use-proxy-protocol: "true" diff --git a/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/daemonset-customnodeport-values.yaml b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/daemonset-customnodeport-values.yaml new file mode 100644 index 0000000..1d94be2 --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/daemonset-customnodeport-values.yaml @@ -0,0 +1,22 @@ +controller: + kind: DaemonSet + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + + service: + type: NodePort + nodePorts: + tcp: + 9000: 30090 + udp: + 9001: 30091 + +tcp: + 9000: "default/test:8080" + +udp: + 9001: "default/test:8080" diff --git a/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/daemonset-extra-modules.yaml b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/daemonset-extra-modules.yaml new file mode 100644 index 0000000..f299dbf --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/daemonset-extra-modules.yaml @@ -0,0 +1,10 @@ +controller: + kind: DaemonSet + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + service: + type: ClusterIP + extraModules: + - name: opentelemetry + image: busybox diff --git a/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/daemonset-headers-values.yaml b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/daemonset-headers-values.yaml new file mode 100644 index 0000000..ab7d47b --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/daemonset-headers-values.yaml @@ -0,0 +1,14 @@ +controller: + kind: DaemonSet + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + addHeaders: + X-Frame-Options: deny + proxySetHeaders: + X-Forwarded-Proto: https + service: + type: ClusterIP diff --git a/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/daemonset-internal-lb-values.yaml b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/daemonset-internal-lb-values.yaml new file mode 100644 index 0000000..0a200a7 --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/daemonset-internal-lb-values.yaml @@ -0,0 +1,14 @@ +controller: + kind: DaemonSet + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + service: + type: ClusterIP + internal: + enabled: true + annotations: + service.beta.kubernetes.io/aws-load-balancer-internal: "true" diff --git a/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/daemonset-nodeport-values.yaml b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/daemonset-nodeport-values.yaml new file mode 100644 index 0000000..3b7aa2f --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/daemonset-nodeport-values.yaml @@ -0,0 +1,10 @@ +controller: + kind: DaemonSet + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + service: + type: NodePort diff --git a/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/daemonset-podannotations-values.yaml b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/daemonset-podannotations-values.yaml new file mode 100644 index 0000000..0b55306 --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/daemonset-podannotations-values.yaml @@ -0,0 +1,17 @@ +controller: + kind: DaemonSet + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + metrics: + enabled: true + service: + type: ClusterIP + podAnnotations: + prometheus.io/path: /metrics + prometheus.io/port: "10254" + prometheus.io/scheme: http + prometheus.io/scrape: "true" diff --git a/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/daemonset-tcp-udp-configMapNamespace-values.yaml b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/daemonset-tcp-udp-configMapNamespace-values.yaml new file mode 100644 index 0000000..acd86a7 --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/daemonset-tcp-udp-configMapNamespace-values.yaml @@ -0,0 +1,20 @@ +controller: + kind: DaemonSet + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + service: + type: ClusterIP + tcp: + configMapNamespace: default + udp: + configMapNamespace: default + +tcp: + 9000: "default/test:8080" + +udp: + 9001: "default/test:8080" diff --git a/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/daemonset-tcp-udp-portNamePrefix-values.yaml b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/daemonset-tcp-udp-portNamePrefix-values.yaml new file mode 100644 index 0000000..90b0f57 --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/daemonset-tcp-udp-portNamePrefix-values.yaml @@ -0,0 +1,18 @@ +controller: + kind: DaemonSet + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + service: + type: ClusterIP + +tcp: + 9000: "default/test:8080" + +udp: + 9001: "default/test:8080" + +portNamePrefix: "port" diff --git a/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/daemonset-tcp-udp-values.yaml b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/daemonset-tcp-udp-values.yaml new file mode 100644 index 0000000..25ee64d --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/daemonset-tcp-udp-values.yaml @@ -0,0 +1,16 @@ +controller: + kind: DaemonSet + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + service: + type: ClusterIP + +tcp: + 9000: "default/test:8080" + +udp: + 9001: "default/test:8080" diff --git a/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/daemonset-tcp-values.yaml b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/daemonset-tcp-values.yaml new file mode 100644 index 0000000..380c8b4 --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/daemonset-tcp-values.yaml @@ -0,0 +1,14 @@ +controller: + kind: DaemonSet + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + service: + type: ClusterIP + +tcp: + 9000: "default/test:8080" + 9001: "default/test:8080" diff --git a/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/deamonset-default-values.yaml b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/deamonset-default-values.yaml new file mode 100644 index 0000000..82fa23e --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/deamonset-default-values.yaml @@ -0,0 +1,10 @@ +controller: + kind: DaemonSet + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + service: + type: ClusterIP diff --git a/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/deamonset-metrics-values.yaml b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/deamonset-metrics-values.yaml new file mode 100644 index 0000000..cb3cb54 --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/deamonset-metrics-values.yaml @@ -0,0 +1,12 @@ +controller: + kind: DaemonSet + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + metrics: + enabled: true + service: + type: ClusterIP diff --git a/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/deamonset-psp-values.yaml b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/deamonset-psp-values.yaml new file mode 100644 index 0000000..8026a63 --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/deamonset-psp-values.yaml @@ -0,0 +1,13 @@ +controller: + kind: DaemonSet + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + service: + type: ClusterIP + +podSecurityPolicy: + enabled: true diff --git a/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/deamonset-webhook-and-psp-values.yaml b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/deamonset-webhook-and-psp-values.yaml new file mode 100644 index 0000000..fccdb13 --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/deamonset-webhook-and-psp-values.yaml @@ -0,0 +1,13 @@ +controller: + kind: DaemonSet + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: true + service: + type: ClusterIP + +podSecurityPolicy: + enabled: true diff --git a/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/deamonset-webhook-values.yaml b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/deamonset-webhook-values.yaml new file mode 100644 index 0000000..54d364d --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/deamonset-webhook-values.yaml @@ -0,0 +1,10 @@ +controller: + kind: DaemonSet + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: true + service: + type: ClusterIP diff --git a/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/deployment-autoscaling-behavior-values.yaml b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/deployment-autoscaling-behavior-values.yaml new file mode 100644 index 0000000..dca3f35 --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/deployment-autoscaling-behavior-values.yaml @@ -0,0 +1,14 @@ +controller: + autoscaling: + enabled: true + behavior: + scaleDown: + stabilizationWindowSeconds: 300 + policies: + - type: Pods + value: 1 + periodSeconds: 180 + admissionWebhooks: + enabled: false + service: + type: ClusterIP diff --git a/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/deployment-autoscaling-values.yaml b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/deployment-autoscaling-values.yaml new file mode 100644 index 0000000..b8b3ac6 --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/deployment-autoscaling-values.yaml @@ -0,0 +1,11 @@ +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + autoscaling: + enabled: true + admissionWebhooks: + enabled: false + service: + type: ClusterIP diff --git a/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/deployment-customconfig-values.yaml b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/deployment-customconfig-values.yaml new file mode 100644 index 0000000..1749418 --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/deployment-customconfig-values.yaml @@ -0,0 +1,12 @@ +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + config: + use-proxy-protocol: "true" + allowSnippetAnnotations: false + admissionWebhooks: + enabled: false + service: + type: ClusterIP diff --git a/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/deployment-customnodeport-values.yaml b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/deployment-customnodeport-values.yaml new file mode 100644 index 0000000..a564eaf --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/deployment-customnodeport-values.yaml @@ -0,0 +1,20 @@ +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + service: + type: NodePort + nodePorts: + tcp: + 9000: 30090 + udp: + 9001: 30091 + +tcp: + 9000: "default/test:8080" + +udp: + 9001: "default/test:8080" diff --git a/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/deployment-default-values.yaml b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/deployment-default-values.yaml new file mode 100644 index 0000000..9f46b4e --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/deployment-default-values.yaml @@ -0,0 +1,8 @@ +# Left blank to test default values +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + service: + type: ClusterIP diff --git a/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/deployment-extra-modules.yaml b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/deployment-extra-modules.yaml new file mode 100644 index 0000000..ec59235 --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/deployment-extra-modules.yaml @@ -0,0 +1,10 @@ +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + service: + type: ClusterIP + extraModules: + - name: opentelemetry + image: busybox diff --git a/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/deployment-headers-values.yaml b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/deployment-headers-values.yaml new file mode 100644 index 0000000..17a11ac --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/deployment-headers-values.yaml @@ -0,0 +1,13 @@ +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + addHeaders: + X-Frame-Options: deny + proxySetHeaders: + X-Forwarded-Proto: https + service: + type: ClusterIP diff --git a/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/deployment-internal-lb-values.yaml b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/deployment-internal-lb-values.yaml new file mode 100644 index 0000000..fd8df8d --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/deployment-internal-lb-values.yaml @@ -0,0 +1,13 @@ +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + service: + type: ClusterIP + internal: + enabled: true + annotations: + service.beta.kubernetes.io/aws-load-balancer-internal: "true" diff --git a/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/deployment-metrics-values.yaml b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/deployment-metrics-values.yaml new file mode 100644 index 0000000..9209ad5 --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/deployment-metrics-values.yaml @@ -0,0 +1,11 @@ +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + metrics: + enabled: true + service: + type: ClusterIP diff --git a/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/deployment-nodeport-values.yaml b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/deployment-nodeport-values.yaml new file mode 100644 index 0000000..cd9b323 --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/deployment-nodeport-values.yaml @@ -0,0 +1,9 @@ +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + service: + type: NodePort diff --git a/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/deployment-podannotations-values.yaml b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/deployment-podannotations-values.yaml new file mode 100644 index 0000000..b48d93c --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/deployment-podannotations-values.yaml @@ -0,0 +1,16 @@ +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + metrics: + enabled: true + service: + type: ClusterIP + podAnnotations: + prometheus.io/path: /metrics + prometheus.io/port: "10254" + prometheus.io/scheme: http + prometheus.io/scrape: "true" diff --git a/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/deployment-psp-values.yaml b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/deployment-psp-values.yaml new file mode 100644 index 0000000..2f332a7 --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/deployment-psp-values.yaml @@ -0,0 +1,10 @@ +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + service: + type: ClusterIP + +podSecurityPolicy: + enabled: true diff --git a/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/deployment-tcp-udp-configMapNamespace-values.yaml b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/deployment-tcp-udp-configMapNamespace-values.yaml new file mode 100644 index 0000000..c51a4e9 --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/deployment-tcp-udp-configMapNamespace-values.yaml @@ -0,0 +1,19 @@ +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + service: + type: ClusterIP + tcp: + configMapNamespace: default + udp: + configMapNamespace: default + +tcp: + 9000: "default/test:8080" + +udp: + 9001: "default/test:8080" diff --git a/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/deployment-tcp-udp-portNamePrefix-values.yaml b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/deployment-tcp-udp-portNamePrefix-values.yaml new file mode 100644 index 0000000..56323c5 --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/deployment-tcp-udp-portNamePrefix-values.yaml @@ -0,0 +1,17 @@ +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + service: + type: ClusterIP + +tcp: + 9000: "default/test:8080" + +udp: + 9001: "default/test:8080" + +portNamePrefix: "port" diff --git a/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/deployment-tcp-udp-values.yaml b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/deployment-tcp-udp-values.yaml new file mode 100644 index 0000000..5b45b69 --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/deployment-tcp-udp-values.yaml @@ -0,0 +1,15 @@ +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + service: + type: ClusterIP + +tcp: + 9000: "default/test:8080" + +udp: + 9001: "default/test:8080" diff --git a/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/deployment-tcp-values.yaml b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/deployment-tcp-values.yaml new file mode 100644 index 0000000..ac0b6e6 --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/deployment-tcp-values.yaml @@ -0,0 +1,11 @@ +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + service: + type: ClusterIP + +tcp: + 9000: "default/test:8080" + 9001: "default/test:8080" diff --git a/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/deployment-webhook-and-psp-values.yaml b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/deployment-webhook-and-psp-values.yaml new file mode 100644 index 0000000..6195bb3 --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/deployment-webhook-and-psp-values.yaml @@ -0,0 +1,12 @@ +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: true + service: + type: ClusterIP + +podSecurityPolicy: + enabled: true diff --git a/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/deployment-webhook-extraEnvs-values.yaml b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/deployment-webhook-extraEnvs-values.yaml new file mode 100644 index 0000000..95487b0 --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/deployment-webhook-extraEnvs-values.yaml @@ -0,0 +1,12 @@ +controller: + service: + type: ClusterIP + admissionWebhooks: + enabled: true + extraEnvs: + - name: FOO + value: foo + - name: TEST + value: test + patch: + enabled: true diff --git a/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/deployment-webhook-resources-values.yaml b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/deployment-webhook-resources-values.yaml new file mode 100644 index 0000000..49ebbb0 --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/deployment-webhook-resources-values.yaml @@ -0,0 +1,23 @@ +controller: + service: + type: ClusterIP + admissionWebhooks: + enabled: true + createSecretJob: + resources: + limits: + cpu: 10m + memory: 20Mi + requests: + cpu: 10m + memory: 20Mi + patchWebhookJob: + resources: + limits: + cpu: 10m + memory: 20Mi + requests: + cpu: 10m + memory: 20Mi + patch: + enabled: true diff --git a/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/deployment-webhook-values.yaml b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/deployment-webhook-values.yaml new file mode 100644 index 0000000..76669a5 --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/ci/deployment-webhook-values.yaml @@ -0,0 +1,9 @@ +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: true + service: + type: ClusterIP diff --git a/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/override-values.yaml b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/override-values.yaml new file mode 100644 index 0000000..e190f03 --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/override-values.yaml @@ -0,0 +1,10 @@ +controller: + kind: DaemonSet + + service: + type: LoadBalancer + nodePorts: + http: "30000" + https: "30001" + tcp: {} + udp: {} diff --git a/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/temp.yaml b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/temp.yaml new file mode 100644 index 0000000..2b28787 --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/temp.yaml @@ -0,0 +1,724 @@ +--- +# Source: ingress-nginx/templates/controller-serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: release-name-ingress-nginx + namespace: default +automountServiceAccountToken: true +--- +# Source: ingress-nginx/templates/controller-configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: release-name-ingress-nginx-controller + namespace: default +data: + allow-snippet-annotations: "true" +--- +# Source: ingress-nginx/templates/clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + name: release-name-ingress-nginx +rules: + - apiGroups: + - "" + resources: + - configmaps + - endpoints + - nodes + - pods + - secrets + - namespaces + verbs: + - list + - watch + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - list + - watch + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - apiGroups: + - networking.k8s.io + resources: + - ingresses/status + verbs: + - update + - apiGroups: + - networking.k8s.io + resources: + - ingressclasses + verbs: + - get + - list + - watch +--- +# Source: ingress-nginx/templates/clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + name: release-name-ingress-nginx +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: release-name-ingress-nginx +subjects: + - kind: ServiceAccount + name: release-name-ingress-nginx + namespace: "default" +--- +# Source: ingress-nginx/templates/controller-role.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: release-name-ingress-nginx + namespace: default +rules: + - apiGroups: + - "" + resources: + - namespaces + verbs: + - get + - apiGroups: + - "" + resources: + - configmaps + - pods + - secrets + - endpoints + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - watch + - apiGroups: + - networking.k8s.io + resources: + - ingresses/status + verbs: + - update + - apiGroups: + - networking.k8s.io + resources: + - ingressclasses + verbs: + - get + - list + - watch + # TODO(Jintao Zhang) + # Once we release a new version of the controller, + # we will be able to remove the configmap related permissions + # We have used the Lease API for selection + # ref: https://github.com/kubernetes/ingress-nginx/pull/8921 + - apiGroups: + - "" + resources: + - configmaps + resourceNames: + - ingress-controller-leader + verbs: + - get + - update + - apiGroups: + - "" + resources: + - configmaps + verbs: + - create + - apiGroups: + - coordination.k8s.io + resources: + - leases + resourceNames: + - ingress-controller-leader + verbs: + - get + - update + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +--- +# Source: ingress-nginx/templates/controller-rolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: release-name-ingress-nginx + namespace: default +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: release-name-ingress-nginx +subjects: + - kind: ServiceAccount + name: release-name-ingress-nginx + namespace: "default" +--- +# Source: ingress-nginx/templates/controller-service-webhook.yaml +apiVersion: v1 +kind: Service +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: release-name-ingress-nginx-controller-admission + namespace: default +spec: + type: ClusterIP + ports: + - name: https-webhook + port: 443 + targetPort: webhook + appProtocol: https + selector: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/component: controller +--- +# Source: ingress-nginx/templates/controller-service.yaml +apiVersion: v1 +kind: Service +metadata: + annotations: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: release-name-ingress-nginx-controller + namespace: default +spec: + type: LoadBalancer + ipFamilyPolicy: SingleStack + ipFamilies: + - IPv4 + ports: + - name: http + port: 80 + protocol: TCP + targetPort: http + appProtocol: http + - name: https + port: 443 + protocol: TCP + targetPort: https + appProtocol: https + selector: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/component: controller +--- +# Source: ingress-nginx/templates/controller-deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: release-name-ingress-nginx-controller + namespace: default +spec: + selector: + matchLabels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/component: controller + replicas: 1 + revisionHistoryLimit: 10 + minReadySeconds: 0 + template: + metadata: + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/component: controller + spec: + dnsPolicy: ClusterFirst + containers: + - name: controller + image: "registry.k8s.io/ingress-nginx/controller:v1.3.1@sha256:54f7fe2c6c5a9db9a0ebf1131797109bb7a4d91f56b9b362bde2abd237dd1974" + imagePullPolicy: IfNotPresent + lifecycle: + preStop: + exec: + command: + - /wait-shutdown + args: + - /nginx-ingress-controller + - --publish-service=$(POD_NAMESPACE)/release-name-ingress-nginx-controller + - --election-id=ingress-controller-leader + - --controller-class=k8s.io/ingress-nginx + - --ingress-class=nginx + - --configmap=$(POD_NAMESPACE)/release-name-ingress-nginx-controller + - --validating-webhook=:8443 + - --validating-webhook-certificate=/usr/local/certificates/cert + - --validating-webhook-key=/usr/local/certificates/key + securityContext: + capabilities: + drop: + - ALL + add: + - NET_BIND_SERVICE + runAsUser: 101 + allowPrivilegeEscalation: true + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LD_PRELOAD + value: /usr/local/lib/libmimalloc.so + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + readinessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + ports: + - name: http + containerPort: 80 + protocol: TCP + - name: https + containerPort: 443 + protocol: TCP + - name: webhook + containerPort: 8443 + protocol: TCP + volumeMounts: + - name: webhook-cert + mountPath: /usr/local/certificates/ + readOnly: true + resources: + requests: + cpu: 100m + memory: 90Mi + nodeSelector: + kubernetes.io/os: linux + serviceAccountName: release-name-ingress-nginx + terminationGracePeriodSeconds: 300 + volumes: + - name: webhook-cert + secret: + secretName: release-name-ingress-nginx-admission +--- +# Source: ingress-nginx/templates/controller-ingressclass.yaml +# We don't support namespaced ingressClass yet +# So a ClusterRole and a ClusterRoleBinding is required +apiVersion: networking.k8s.io/v1 +kind: IngressClass +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: nginx +spec: + controller: k8s.io/ingress-nginx +--- +# Source: ingress-nginx/templates/admission-webhooks/validating-webhook.yaml +# before changing this value, check the required kubernetes version +# https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#prerequisites +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook + name: release-name-ingress-nginx-admission +webhooks: + - name: validate.nginx.ingress.kubernetes.io + matchPolicy: Equivalent + rules: + - apiGroups: + - networking.k8s.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - ingresses + failurePolicy: Fail + sideEffects: None + admissionReviewVersions: + - v1 + clientConfig: + service: + namespace: "default" + name: release-name-ingress-nginx-controller-admission + path: /networking/v1/ingresses +--- +# Source: ingress-nginx/templates/admission-webhooks/job-patch/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: release-name-ingress-nginx-admission + namespace: default + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +--- +# Source: ingress-nginx/templates/admission-webhooks/job-patch/clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: release-name-ingress-nginx-admission + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +rules: + - apiGroups: + - admissionregistration.k8s.io + resources: + - validatingwebhookconfigurations + verbs: + - get + - update +--- +# Source: ingress-nginx/templates/admission-webhooks/job-patch/clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: release-name-ingress-nginx-admission + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: release-name-ingress-nginx-admission +subjects: + - kind: ServiceAccount + name: release-name-ingress-nginx-admission + namespace: "default" +--- +# Source: ingress-nginx/templates/admission-webhooks/job-patch/role.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: release-name-ingress-nginx-admission + namespace: default + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +rules: + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - create +--- +# Source: ingress-nginx/templates/admission-webhooks/job-patch/rolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: release-name-ingress-nginx-admission + namespace: default + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: release-name-ingress-nginx-admission +subjects: + - kind: ServiceAccount + name: release-name-ingress-nginx-admission + namespace: "default" +--- +# Source: ingress-nginx/templates/admission-webhooks/job-patch/job-createSecret.yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: release-name-ingress-nginx-admission-create + namespace: default + annotations: + "helm.sh/hook": pre-install,pre-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +spec: + template: + metadata: + name: release-name-ingress-nginx-admission-create + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook + spec: + containers: + - name: create + image: "registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.3.0@sha256:549e71a6ca248c5abd51cdb73dbc3083df62cf92ed5e6147c780e30f7e007a47" + imagePullPolicy: IfNotPresent + args: + - create + - --host=release-name-ingress-nginx-controller-admission,release-name-ingress-nginx-controller-admission.$(POD_NAMESPACE).svc + - --namespace=$(POD_NAMESPACE) + - --secret-name=release-name-ingress-nginx-admission + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + securityContext: + allowPrivilegeEscalation: false + restartPolicy: OnFailure + serviceAccountName: release-name-ingress-nginx-admission + nodeSelector: + kubernetes.io/os: linux + securityContext: + fsGroup: 2000 + runAsNonRoot: true + runAsUser: 2000 +--- +# Source: ingress-nginx/templates/admission-webhooks/job-patch/job-patchWebhook.yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: release-name-ingress-nginx-admission-patch + namespace: default + annotations: + "helm.sh/hook": post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +spec: + template: + metadata: + name: release-name-ingress-nginx-admission-patch + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook + spec: + containers: + - name: patch + image: "registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.3.0@sha256:549e71a6ca248c5abd51cdb73dbc3083df62cf92ed5e6147c780e30f7e007a47" + imagePullPolicy: IfNotPresent + args: + - patch + - --webhook-name=release-name-ingress-nginx-admission + - --namespace=$(POD_NAMESPACE) + - --patch-mutating=false + - --secret-name=release-name-ingress-nginx-admission + - --patch-failure-policy=Fail + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + securityContext: + allowPrivilegeEscalation: false + restartPolicy: OnFailure + serviceAccountName: release-name-ingress-nginx-admission + nodeSelector: + kubernetes.io/os: linux + securityContext: + fsGroup: 2000 + runAsNonRoot: true + runAsUser: 2000 diff --git a/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/temp2.yaml b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/temp2.yaml new file mode 100644 index 0000000..9ef52fc --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/temp2.yaml @@ -0,0 +1,725 @@ +--- +# Source: ingress-nginx/templates/controller-serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: release-name-ingress-nginx + namespace: default +automountServiceAccountToken: true +--- +# Source: ingress-nginx/templates/controller-configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: release-name-ingress-nginx-controller + namespace: default +data: + allow-snippet-annotations: "true" +--- +# Source: ingress-nginx/templates/clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + name: release-name-ingress-nginx +rules: + - apiGroups: + - "" + resources: + - configmaps + - endpoints + - nodes + - pods + - secrets + - namespaces + verbs: + - list + - watch + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - list + - watch + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - apiGroups: + - networking.k8s.io + resources: + - ingresses/status + verbs: + - update + - apiGroups: + - networking.k8s.io + resources: + - ingressclasses + verbs: + - get + - list + - watch +--- +# Source: ingress-nginx/templates/clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + name: release-name-ingress-nginx +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: release-name-ingress-nginx +subjects: + - kind: ServiceAccount + name: release-name-ingress-nginx + namespace: "default" +--- +# Source: ingress-nginx/templates/controller-role.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: release-name-ingress-nginx + namespace: default +rules: + - apiGroups: + - "" + resources: + - namespaces + verbs: + - get + - apiGroups: + - "" + resources: + - configmaps + - pods + - secrets + - endpoints + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - watch + - apiGroups: + - networking.k8s.io + resources: + - ingresses/status + verbs: + - update + - apiGroups: + - networking.k8s.io + resources: + - ingressclasses + verbs: + - get + - list + - watch + # TODO(Jintao Zhang) + # Once we release a new version of the controller, + # we will be able to remove the configmap related permissions + # We have used the Lease API for selection + # ref: https://github.com/kubernetes/ingress-nginx/pull/8921 + - apiGroups: + - "" + resources: + - configmaps + resourceNames: + - ingress-controller-leader + verbs: + - get + - update + - apiGroups: + - "" + resources: + - configmaps + verbs: + - create + - apiGroups: + - coordination.k8s.io + resources: + - leases + resourceNames: + - ingress-controller-leader + verbs: + - get + - update + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +--- +# Source: ingress-nginx/templates/controller-rolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: release-name-ingress-nginx + namespace: default +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: release-name-ingress-nginx +subjects: + - kind: ServiceAccount + name: release-name-ingress-nginx + namespace: "default" +--- +# Source: ingress-nginx/templates/controller-service-webhook.yaml +apiVersion: v1 +kind: Service +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: release-name-ingress-nginx-controller-admission + namespace: default +spec: + type: ClusterIP + ports: + - name: https-webhook + port: 443 + targetPort: webhook + appProtocol: https + selector: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/component: controller +--- +# Source: ingress-nginx/templates/controller-service.yaml +apiVersion: v1 +kind: Service +metadata: + annotations: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: release-name-ingress-nginx-controller + namespace: default +spec: + type: LoadBalancer + ipFamilyPolicy: SingleStack + ipFamilies: + - IPv4 + ports: + - name: http + port: 80 + protocol: TCP + targetPort: http + appProtocol: http + nodePort: 30000 + - name: https + port: 443 + protocol: TCP + targetPort: https + appProtocol: https + nodePort: 30001 + selector: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/component: controller +--- +# Source: ingress-nginx/templates/controller-daemonset.yaml +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: release-name-ingress-nginx-controller + namespace: default +spec: + selector: + matchLabels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/component: controller + revisionHistoryLimit: 10 + minReadySeconds: 0 + template: + metadata: + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/component: controller + spec: + dnsPolicy: ClusterFirst + containers: + - name: controller + image: "registry.k8s.io/ingress-nginx/controller:v1.3.1@sha256:54f7fe2c6c5a9db9a0ebf1131797109bb7a4d91f56b9b362bde2abd237dd1974" + imagePullPolicy: IfNotPresent + lifecycle: + preStop: + exec: + command: + - /wait-shutdown + args: + - /nginx-ingress-controller + - --publish-service=$(POD_NAMESPACE)/release-name-ingress-nginx-controller + - --election-id=ingress-controller-leader + - --controller-class=k8s.io/ingress-nginx + - --ingress-class=nginx + - --configmap=$(POD_NAMESPACE)/release-name-ingress-nginx-controller + - --validating-webhook=:8443 + - --validating-webhook-certificate=/usr/local/certificates/cert + - --validating-webhook-key=/usr/local/certificates/key + securityContext: + capabilities: + drop: + - ALL + add: + - NET_BIND_SERVICE + runAsUser: 101 + allowPrivilegeEscalation: true + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LD_PRELOAD + value: /usr/local/lib/libmimalloc.so + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + readinessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + ports: + - name: http + containerPort: 80 + protocol: TCP + - name: https + containerPort: 443 + protocol: TCP + - name: webhook + containerPort: 8443 + protocol: TCP + volumeMounts: + - name: webhook-cert + mountPath: /usr/local/certificates/ + readOnly: true + resources: + requests: + cpu: 100m + memory: 90Mi + nodeSelector: + kubernetes.io/os: linux + serviceAccountName: release-name-ingress-nginx + terminationGracePeriodSeconds: 300 + volumes: + - name: webhook-cert + secret: + secretName: release-name-ingress-nginx-admission +--- +# Source: ingress-nginx/templates/controller-ingressclass.yaml +# We don't support namespaced ingressClass yet +# So a ClusterRole and a ClusterRoleBinding is required +apiVersion: networking.k8s.io/v1 +kind: IngressClass +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: nginx +spec: + controller: k8s.io/ingress-nginx +--- +# Source: ingress-nginx/templates/admission-webhooks/validating-webhook.yaml +# before changing this value, check the required kubernetes version +# https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#prerequisites +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook + name: release-name-ingress-nginx-admission +webhooks: + - name: validate.nginx.ingress.kubernetes.io + matchPolicy: Equivalent + rules: + - apiGroups: + - networking.k8s.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - ingresses + failurePolicy: Fail + sideEffects: None + admissionReviewVersions: + - v1 + clientConfig: + service: + namespace: "default" + name: release-name-ingress-nginx-controller-admission + path: /networking/v1/ingresses +--- +# Source: ingress-nginx/templates/admission-webhooks/job-patch/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: release-name-ingress-nginx-admission + namespace: default + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +--- +# Source: ingress-nginx/templates/admission-webhooks/job-patch/clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: release-name-ingress-nginx-admission + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +rules: + - apiGroups: + - admissionregistration.k8s.io + resources: + - validatingwebhookconfigurations + verbs: + - get + - update +--- +# Source: ingress-nginx/templates/admission-webhooks/job-patch/clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: release-name-ingress-nginx-admission + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: release-name-ingress-nginx-admission +subjects: + - kind: ServiceAccount + name: release-name-ingress-nginx-admission + namespace: "default" +--- +# Source: ingress-nginx/templates/admission-webhooks/job-patch/role.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: release-name-ingress-nginx-admission + namespace: default + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +rules: + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - create +--- +# Source: ingress-nginx/templates/admission-webhooks/job-patch/rolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: release-name-ingress-nginx-admission + namespace: default + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: release-name-ingress-nginx-admission +subjects: + - kind: ServiceAccount + name: release-name-ingress-nginx-admission + namespace: "default" +--- +# Source: ingress-nginx/templates/admission-webhooks/job-patch/job-createSecret.yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: release-name-ingress-nginx-admission-create + namespace: default + annotations: + "helm.sh/hook": pre-install,pre-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +spec: + template: + metadata: + name: release-name-ingress-nginx-admission-create + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook + spec: + containers: + - name: create + image: "registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.3.0@sha256:549e71a6ca248c5abd51cdb73dbc3083df62cf92ed5e6147c780e30f7e007a47" + imagePullPolicy: IfNotPresent + args: + - create + - --host=release-name-ingress-nginx-controller-admission,release-name-ingress-nginx-controller-admission.$(POD_NAMESPACE).svc + - --namespace=$(POD_NAMESPACE) + - --secret-name=release-name-ingress-nginx-admission + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + securityContext: + allowPrivilegeEscalation: false + restartPolicy: OnFailure + serviceAccountName: release-name-ingress-nginx-admission + nodeSelector: + kubernetes.io/os: linux + securityContext: + fsGroup: 2000 + runAsNonRoot: true + runAsUser: 2000 +--- +# Source: ingress-nginx/templates/admission-webhooks/job-patch/job-patchWebhook.yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: release-name-ingress-nginx-admission-patch + namespace: default + annotations: + "helm.sh/hook": post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +spec: + template: + metadata: + name: release-name-ingress-nginx-admission-patch + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook + spec: + containers: + - name: patch + image: "registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.3.0@sha256:549e71a6ca248c5abd51cdb73dbc3083df62cf92ed5e6147c780e30f7e007a47" + imagePullPolicy: IfNotPresent + args: + - patch + - --webhook-name=release-name-ingress-nginx-admission + - --namespace=$(POD_NAMESPACE) + - --patch-mutating=false + - --secret-name=release-name-ingress-nginx-admission + - --patch-failure-policy=Fail + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + securityContext: + allowPrivilegeEscalation: false + restartPolicy: OnFailure + serviceAccountName: release-name-ingress-nginx-admission + nodeSelector: + kubernetes.io/os: linux + securityContext: + fsGroup: 2000 + runAsNonRoot: true + runAsUser: 2000 diff --git a/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/NOTES.txt b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/NOTES.txt new file mode 100644 index 0000000..8985c56 --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/NOTES.txt @@ -0,0 +1,80 @@ +The ingress-nginx controller has been installed. + +{{- if contains "NodePort" .Values.controller.service.type }} +Get the application URL by running these commands: + +{{- if (not (empty .Values.controller.service.nodePorts.http)) }} + export HTTP_NODE_PORT={{ .Values.controller.service.nodePorts.http }} +{{- else }} + export HTTP_NODE_PORT=$(kubectl --namespace {{ .Release.Namespace }} get services -o jsonpath="{.spec.ports[0].nodePort}" {{ include "ingress-nginx.controller.fullname" . }}) +{{- end }} +{{- if (not (empty .Values.controller.service.nodePorts.https)) }} + export HTTPS_NODE_PORT={{ .Values.controller.service.nodePorts.https }} +{{- else }} + export HTTPS_NODE_PORT=$(kubectl --namespace {{ .Release.Namespace }} get services -o jsonpath="{.spec.ports[1].nodePort}" {{ include "ingress-nginx.controller.fullname" . }}) +{{- end }} + export NODE_IP=$(kubectl --namespace {{ .Release.Namespace }} get nodes -o jsonpath="{.items[0].status.addresses[1].address}") + + echo "Visit http://$NODE_IP:$HTTP_NODE_PORT to access your application via HTTP." + echo "Visit https://$NODE_IP:$HTTPS_NODE_PORT to access your application via HTTPS." +{{- else if contains "LoadBalancer" .Values.controller.service.type }} +It may take a few minutes for the LoadBalancer IP to be available. +You can watch the status by running 'kubectl --namespace {{ .Release.Namespace }} get services -o wide -w {{ include "ingress-nginx.controller.fullname" . }}' +{{- else if contains "ClusterIP" .Values.controller.service.type }} +Get the application URL by running these commands: + export POD_NAME=$(kubectl --namespace {{ .Release.Namespace }} get pods -o jsonpath="{.items[0].metadata.name}" -l "app={{ template "ingress-nginx.name" . }},component={{ .Values.controller.name }},release={{ .Release.Name }}") + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:80 + echo "Visit http://127.0.0.1:8080 to access your application." +{{- end }} + +An example Ingress that makes use of the controller: + +{{- $isV1 := semverCompare ">=1" .Chart.AppVersion}} + apiVersion: networking.k8s.io/v1 + kind: Ingress + metadata: + name: example + namespace: foo + {{- if eq $isV1 false }} + annotations: + kubernetes.io/ingress.class: {{ .Values.controller.ingressClass }} + {{- end }} + spec: + {{- if $isV1 }} + ingressClassName: {{ .Values.controller.ingressClassResource.name }} + {{- end }} + rules: + - host: www.example.com + http: + paths: + - pathType: Prefix + backend: + service: + name: exampleService + port: + number: 80 + path: / + # This section is only required if TLS is to be enabled for the Ingress + tls: + - hosts: + - www.example.com + secretName: example-tls + +If TLS is enabled for the Ingress, a Secret containing the certificate and key must also be provided: + + apiVersion: v1 + kind: Secret + metadata: + name: example-tls + namespace: foo + data: + tls.crt: + tls.key: + type: kubernetes.io/tls + +{{- if .Values.controller.headers }} +################################################################################# +###### WARNING: `controller.headers` has been deprecated! ##### +###### It has been renamed to `controller.proxySetHeaders`. ##### +################################################################################# +{{- end }} diff --git a/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/_helpers.tpl b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/_helpers.tpl new file mode 100644 index 0000000..e69de0c --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/_helpers.tpl @@ -0,0 +1,185 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "ingress-nginx.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "ingress-nginx.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "ingress-nginx.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + + +{{/* +Container SecurityContext. +*/}} +{{- define "controller.containerSecurityContext" -}} +{{- if .Values.controller.containerSecurityContext -}} +{{- toYaml .Values.controller.containerSecurityContext -}} +{{- else -}} +capabilities: + drop: + - ALL + add: + - NET_BIND_SERVICE + {{- if .Values.controller.image.chroot }} + - SYS_CHROOT + {{- end }} +runAsUser: {{ .Values.controller.image.runAsUser }} +allowPrivilegeEscalation: {{ .Values.controller.image.allowPrivilegeEscalation }} +{{- end }} +{{- end -}} + +{{/* +Get specific image +*/}} +{{- define "ingress-nginx.image" -}} +{{- if .chroot -}} +{{- printf "%s-chroot" .image -}} +{{- else -}} +{{- printf "%s" .image -}} +{{- end }} +{{- end -}} + +{{/* +Get specific image digest +*/}} +{{- define "ingress-nginx.imageDigest" -}} +{{- if .chroot -}} +{{- if .digestChroot -}} +{{- printf "@%s" .digestChroot -}} +{{- end }} +{{- else -}} +{{ if .digest -}} +{{- printf "@%s" .digest -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified controller name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "ingress-nginx.controller.fullname" -}} +{{- printf "%s-%s" (include "ingress-nginx.fullname" .) .Values.controller.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Construct the path for the publish-service. + +By convention this will simply use the / to match the name of the +service generated. + +Users can provide an override for an explicit service they want bound via `.Values.controller.publishService.pathOverride` + +*/}} +{{- define "ingress-nginx.controller.publishServicePath" -}} +{{- $defServiceName := printf "%s/%s" "$(POD_NAMESPACE)" (include "ingress-nginx.controller.fullname" .) -}} +{{- $servicePath := default $defServiceName .Values.controller.publishService.pathOverride }} +{{- print $servicePath | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified default backend name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "ingress-nginx.defaultBackend.fullname" -}} +{{- printf "%s-%s" (include "ingress-nginx.fullname" .) .Values.defaultBackend.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Common labels +*/}} +{{- define "ingress-nginx.labels" -}} +helm.sh/chart: {{ include "ingress-nginx.chart" . }} +{{ include "ingress-nginx.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/part-of: {{ template "ingress-nginx.name" . }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- if .Values.commonLabels}} +{{ toYaml .Values.commonLabels }} +{{- end }} +{{- end -}} + +{{/* +Selector labels +*/}} +{{- define "ingress-nginx.selectorLabels" -}} +app.kubernetes.io/name: {{ include "ingress-nginx.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} + +{{/* +Create the name of the controller service account to use +*/}} +{{- define "ingress-nginx.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "ingress-nginx.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the backend service account to use - only used when podsecuritypolicy is also enabled +*/}} +{{- define "ingress-nginx.defaultBackend.serviceAccountName" -}} +{{- if .Values.defaultBackend.serviceAccount.create -}} + {{ default (printf "%s-backend" (include "ingress-nginx.fullname" .)) .Values.defaultBackend.serviceAccount.name }} +{{- else -}} + {{ default "default-backend" .Values.defaultBackend.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiGroup for PodSecurityPolicy. +*/}} +{{- define "podSecurityPolicy.apiGroup" -}} +{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "policy" -}} +{{- else -}} +{{- print "extensions" -}} +{{- end -}} +{{- end -}} + +{{/* +Check the ingress controller version tag is at most three versions behind the last release +*/}} +{{- define "isControllerTagValid" -}} +{{- if not (semverCompare ">=0.27.0-0" .Values.controller.image.tag) -}} +{{- fail "Controller container image tag should be 0.27.0 or higher" -}} +{{- end -}} +{{- end -}} + +{{/* +IngressClass parameters. +*/}} +{{- define "ingressClass.parameters" -}} + {{- if .Values.controller.ingressClassResource.parameters -}} + parameters: +{{ toYaml .Values.controller.ingressClassResource.parameters | indent 4}} + {{ end }} +{{- end -}} diff --git a/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/_params.tpl b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/_params.tpl new file mode 100644 index 0000000..305ce0d --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/_params.tpl @@ -0,0 +1,62 @@ +{{- define "ingress-nginx.params" -}} +- /nginx-ingress-controller +{{- if .Values.defaultBackend.enabled }} +- --default-backend-service=$(POD_NAMESPACE)/{{ include "ingress-nginx.defaultBackend.fullname" . }} +{{- end }} +{{- if and .Values.controller.publishService.enabled .Values.controller.service.enabled }} +{{- if .Values.controller.service.external.enabled }} +- --publish-service={{ template "ingress-nginx.controller.publishServicePath" . }} +{{- else if .Values.controller.service.internal.enabled }} +- --publish-service={{ template "ingress-nginx.controller.publishServicePath" . }}-internal +{{- end }} +{{- end }} +- --election-id={{ .Values.controller.electionID }} +- --controller-class={{ .Values.controller.ingressClassResource.controllerValue }} +{{- if .Values.controller.ingressClass }} +- --ingress-class={{ .Values.controller.ingressClass }} +{{- end }} +- --configmap={{ default "$(POD_NAMESPACE)" .Values.controller.configMapNamespace }}/{{ include "ingress-nginx.controller.fullname" . }} +{{- if .Values.tcp }} +- --tcp-services-configmap={{ default "$(POD_NAMESPACE)" .Values.controller.tcp.configMapNamespace }}/{{ include "ingress-nginx.fullname" . }}-tcp +{{- end }} +{{- if .Values.udp }} +- --udp-services-configmap={{ default "$(POD_NAMESPACE)" .Values.controller.udp.configMapNamespace }}/{{ include "ingress-nginx.fullname" . }}-udp +{{- end }} +{{- if .Values.controller.scope.enabled }} +- --watch-namespace={{ default "$(POD_NAMESPACE)" .Values.controller.scope.namespace }} +{{- end }} +{{- if and (not .Values.controller.scope.enabled) .Values.controller.scope.namespaceSelector }} +- --watch-namespace-selector={{ default "" .Values.controller.scope.namespaceSelector }} +{{- end }} +{{- if and .Values.controller.reportNodeInternalIp .Values.controller.hostNetwork }} +- --report-node-internal-ip-address={{ .Values.controller.reportNodeInternalIp }} +{{- end }} +{{- if .Values.controller.admissionWebhooks.enabled }} +- --validating-webhook=:{{ .Values.controller.admissionWebhooks.port }} +- --validating-webhook-certificate={{ .Values.controller.admissionWebhooks.certificate }} +- --validating-webhook-key={{ .Values.controller.admissionWebhooks.key }} +{{- end }} +{{- if .Values.controller.maxmindLicenseKey }} +- --maxmind-license-key={{ .Values.controller.maxmindLicenseKey }} +{{- end }} +{{- if .Values.controller.healthCheckHost }} +- --healthz-host={{ .Values.controller.healthCheckHost }} +{{- end }} +{{- if not (eq .Values.controller.healthCheckPath "/healthz") }} +- --health-check-path={{ .Values.controller.healthCheckPath }} +{{- end }} +{{- if .Values.controller.ingressClassByName }} +- --ingress-class-by-name=true +{{- end }} +{{- if .Values.controller.watchIngressWithoutClass }} +- --watch-ingress-without-class=true +{{- end }} +{{- range $key, $value := .Values.controller.extraArgs }} +{{- /* Accept keys without values or with false as value */}} +{{- if eq ($value | quote | len) 2 }} +- --{{ $key }} +{{- else }} +- --{{ $key }}={{ $value }} +{{- end }} +{{- end }} +{{- end -}} diff --git a/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/admission-webhooks/job-patch/clusterrole.yaml b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/admission-webhooks/job-patch/clusterrole.yaml new file mode 100644 index 0000000..5659a1f --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/admission-webhooks/job-patch/clusterrole.yaml @@ -0,0 +1,34 @@ +{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "ingress-nginx.fullname" . }}-admission + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: admission-webhook + {{- with .Values.controller.admissionWebhooks.patch.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +rules: + - apiGroups: + - admissionregistration.k8s.io + resources: + - validatingwebhookconfigurations + verbs: + - get + - update +{{- if .Values.podSecurityPolicy.enabled }} + - apiGroups: ['extensions'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + {{- with .Values.controller.admissionWebhooks.existingPsp }} + - {{ . }} + {{- else }} + - {{ include "ingress-nginx.fullname" . }}-admission + {{- end }} +{{- end }} +{{- end }} diff --git a/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/admission-webhooks/job-patch/clusterrolebinding.yaml b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/admission-webhooks/job-patch/clusterrolebinding.yaml new file mode 100644 index 0000000..abf17fb --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/admission-webhooks/job-patch/clusterrolebinding.yaml @@ -0,0 +1,23 @@ +{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ include "ingress-nginx.fullname" . }}-admission + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: admission-webhook + {{- with .Values.controller.admissionWebhooks.patch.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ include "ingress-nginx.fullname" . }}-admission +subjects: + - kind: ServiceAccount + name: {{ include "ingress-nginx.fullname" . }}-admission + namespace: {{ .Release.Namespace | quote }} +{{- end }} diff --git a/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/admission-webhooks/job-patch/job-createSecret.yaml b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/admission-webhooks/job-patch/job-createSecret.yaml new file mode 100644 index 0000000..7558e0b --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/admission-webhooks/job-patch/job-createSecret.yaml @@ -0,0 +1,79 @@ +{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled -}} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ include "ingress-nginx.fullname" . }}-admission-create + namespace: {{ .Release.Namespace }} + annotations: + "helm.sh/hook": pre-install,pre-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + {{- with .Values.controller.admissionWebhooks.annotations }} + {{- toYaml . | nindent 4 }} + {{- end }} + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: admission-webhook + {{- with .Values.controller.admissionWebhooks.patch.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: +{{- if .Capabilities.APIVersions.Has "batch/v1alpha1" }} + # Alpha feature since k8s 1.12 + ttlSecondsAfterFinished: 0 +{{- end }} + template: + metadata: + name: {{ include "ingress-nginx.fullname" . }}-admission-create + {{- if .Values.controller.admissionWebhooks.patch.podAnnotations }} + annotations: {{ toYaml .Values.controller.admissionWebhooks.patch.podAnnotations | nindent 8 }} + {{- end }} + labels: + {{- include "ingress-nginx.labels" . | nindent 8 }} + app.kubernetes.io/component: admission-webhook + {{- with .Values.controller.admissionWebhooks.patch.labels }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + {{- if .Values.controller.admissionWebhooks.patch.priorityClassName }} + priorityClassName: {{ .Values.controller.admissionWebhooks.patch.priorityClassName }} + {{- end }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: {{ toYaml .Values.imagePullSecrets | nindent 8 }} + {{- end }} + containers: + - name: create + {{- with .Values.controller.admissionWebhooks.patch.image }} + image: "{{- if .repository -}}{{ .repository }}{{ else }}{{ .registry }}/{{ .image }}{{- end -}}:{{ .tag }}{{- if (.digest) -}} @{{.digest}} {{- end -}}" + {{- end }} + imagePullPolicy: {{ .Values.controller.admissionWebhooks.patch.image.pullPolicy }} + args: + - create + - --host={{ include "ingress-nginx.controller.fullname" . }}-admission,{{ include "ingress-nginx.controller.fullname" . }}-admission.$(POD_NAMESPACE).svc + - --namespace=$(POD_NAMESPACE) + - --secret-name={{ include "ingress-nginx.fullname" . }}-admission + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + {{- if .Values.controller.admissionWebhooks.extraEnvs }} + {{- toYaml .Values.controller.admissionWebhooks.extraEnvs | nindent 12 }} + {{- end }} + securityContext: + allowPrivilegeEscalation: false + {{- if .Values.controller.admissionWebhooks.createSecretJob.resources }} + resources: {{ toYaml .Values.controller.admissionWebhooks.createSecretJob.resources | nindent 12 }} + {{- end }} + restartPolicy: OnFailure + serviceAccountName: {{ include "ingress-nginx.fullname" . }}-admission + {{- if .Values.controller.admissionWebhooks.patch.nodeSelector }} + nodeSelector: {{ toYaml .Values.controller.admissionWebhooks.patch.nodeSelector | nindent 8 }} + {{- end }} + {{- if .Values.controller.admissionWebhooks.patch.tolerations }} + tolerations: {{ toYaml .Values.controller.admissionWebhooks.patch.tolerations | nindent 8 }} + {{- end }} + {{- if .Values.controller.admissionWebhooks.patch.securityContext }} + securityContext: + {{- toYaml .Values.controller.admissionWebhooks.patch.securityContext | nindent 8 }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/admission-webhooks/job-patch/job-patchWebhook.yaml b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/admission-webhooks/job-patch/job-patchWebhook.yaml new file mode 100644 index 0000000..0528215 --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/admission-webhooks/job-patch/job-patchWebhook.yaml @@ -0,0 +1,81 @@ +{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled -}} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ include "ingress-nginx.fullname" . }}-admission-patch + namespace: {{ .Release.Namespace }} + annotations: + "helm.sh/hook": post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + {{- with .Values.controller.admissionWebhooks.annotations }} + {{- toYaml . | nindent 4 }} + {{- end }} + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: admission-webhook + {{- with .Values.controller.admissionWebhooks.patch.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: +{{- if .Capabilities.APIVersions.Has "batch/v1alpha1" }} + # Alpha feature since k8s 1.12 + ttlSecondsAfterFinished: 0 +{{- end }} + template: + metadata: + name: {{ include "ingress-nginx.fullname" . }}-admission-patch + {{- if .Values.controller.admissionWebhooks.patch.podAnnotations }} + annotations: {{ toYaml .Values.controller.admissionWebhooks.patch.podAnnotations | nindent 8 }} + {{- end }} + labels: + {{- include "ingress-nginx.labels" . | nindent 8 }} + app.kubernetes.io/component: admission-webhook + {{- with .Values.controller.admissionWebhooks.patch.labels }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + {{- if .Values.controller.admissionWebhooks.patch.priorityClassName }} + priorityClassName: {{ .Values.controller.admissionWebhooks.patch.priorityClassName }} + {{- end }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: {{ toYaml .Values.imagePullSecrets | nindent 8 }} + {{- end }} + containers: + - name: patch + {{- with .Values.controller.admissionWebhooks.patch.image }} + image: "{{- if .repository -}}{{ .repository }}{{ else }}{{ .registry }}/{{ .image }}{{- end -}}:{{ .tag }}{{- if (.digest) -}} @{{.digest}} {{- end -}}" + {{- end }} + imagePullPolicy: {{ .Values.controller.admissionWebhooks.patch.image.pullPolicy }} + args: + - patch + - --webhook-name={{ include "ingress-nginx.fullname" . }}-admission + - --namespace=$(POD_NAMESPACE) + - --patch-mutating=false + - --secret-name={{ include "ingress-nginx.fullname" . }}-admission + - --patch-failure-policy={{ .Values.controller.admissionWebhooks.failurePolicy }} + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + {{- if .Values.controller.admissionWebhooks.extraEnvs }} + {{- toYaml .Values.controller.admissionWebhooks.extraEnvs | nindent 12 }} + {{- end }} + securityContext: + allowPrivilegeEscalation: false + {{- if .Values.controller.admissionWebhooks.patchWebhookJob.resources }} + resources: {{ toYaml .Values.controller.admissionWebhooks.patchWebhookJob.resources | nindent 12 }} + {{- end }} + restartPolicy: OnFailure + serviceAccountName: {{ include "ingress-nginx.fullname" . }}-admission + {{- if .Values.controller.admissionWebhooks.patch.nodeSelector }} + nodeSelector: {{ toYaml .Values.controller.admissionWebhooks.patch.nodeSelector | nindent 8 }} + {{- end }} + {{- if .Values.controller.admissionWebhooks.patch.tolerations }} + tolerations: {{ toYaml .Values.controller.admissionWebhooks.patch.tolerations | nindent 8 }} + {{- end }} + {{- if .Values.controller.admissionWebhooks.patch.securityContext }} + securityContext: + {{- toYaml .Values.controller.admissionWebhooks.patch.securityContext | nindent 8 }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/admission-webhooks/job-patch/psp.yaml b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/admission-webhooks/job-patch/psp.yaml new file mode 100644 index 0000000..70edde3 --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/admission-webhooks/job-patch/psp.yaml @@ -0,0 +1,39 @@ +{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled .Values.podSecurityPolicy.enabled (empty .Values.controller.admissionWebhooks.existingPsp) -}} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ include "ingress-nginx.fullname" . }}-admission + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: admission-webhook + {{- with .Values.controller.admissionWebhooks.patch.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + allowPrivilegeEscalation: false + fsGroup: + ranges: + - max: 65535 + min: 1 + rule: MustRunAs + requiredDropCapabilities: + - ALL + runAsUser: + rule: MustRunAsNonRoot + seLinux: + rule: RunAsAny + supplementalGroups: + ranges: + - max: 65535 + min: 1 + rule: MustRunAs + volumes: + - configMap + - emptyDir + - projected + - secret + - downwardAPI +{{- end }} diff --git a/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/admission-webhooks/job-patch/role.yaml b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/admission-webhooks/job-patch/role.yaml new file mode 100644 index 0000000..795bac6 --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/admission-webhooks/job-patch/role.yaml @@ -0,0 +1,24 @@ +{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ include "ingress-nginx.fullname" . }}-admission + namespace: {{ .Release.Namespace }} + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: admission-webhook + {{- with .Values.controller.admissionWebhooks.patch.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +rules: + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - create +{{- end }} diff --git a/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/admission-webhooks/job-patch/rolebinding.yaml b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/admission-webhooks/job-patch/rolebinding.yaml new file mode 100644 index 0000000..698c5c8 --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/admission-webhooks/job-patch/rolebinding.yaml @@ -0,0 +1,24 @@ +{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ include "ingress-nginx.fullname" . }}-admission + namespace: {{ .Release.Namespace }} + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: admission-webhook + {{- with .Values.controller.admissionWebhooks.patch.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ include "ingress-nginx.fullname" . }}-admission +subjects: + - kind: ServiceAccount + name: {{ include "ingress-nginx.fullname" . }}-admission + namespace: {{ .Release.Namespace | quote }} +{{- end }} diff --git a/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/admission-webhooks/job-patch/serviceaccount.yaml b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/admission-webhooks/job-patch/serviceaccount.yaml new file mode 100644 index 0000000..eae4751 --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/admission-webhooks/job-patch/serviceaccount.yaml @@ -0,0 +1,16 @@ +{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "ingress-nginx.fullname" . }}-admission + namespace: {{ .Release.Namespace }} + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: admission-webhook + {{- with .Values.controller.admissionWebhooks.patch.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/admission-webhooks/validating-webhook.yaml b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/admission-webhooks/validating-webhook.yaml new file mode 100644 index 0000000..8caffcb --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/admission-webhooks/validating-webhook.yaml @@ -0,0 +1,48 @@ +{{- if .Values.controller.admissionWebhooks.enabled -}} +# before changing this value, check the required kubernetes version +# https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#prerequisites +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + {{- if .Values.controller.admissionWebhooks.annotations }} + annotations: {{ toYaml .Values.controller.admissionWebhooks.annotations | nindent 4 }} + {{- end }} + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: admission-webhook + {{- with .Values.controller.admissionWebhooks.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.fullname" . }}-admission +webhooks: + - name: validate.nginx.ingress.kubernetes.io + matchPolicy: Equivalent + rules: + - apiGroups: + - networking.k8s.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - ingresses + failurePolicy: {{ .Values.controller.admissionWebhooks.failurePolicy | default "Fail" }} + sideEffects: None + admissionReviewVersions: + - v1 + clientConfig: + service: + namespace: {{ .Release.Namespace | quote }} + name: {{ include "ingress-nginx.controller.fullname" . }}-admission + path: /networking/v1/ingresses + {{- if .Values.controller.admissionWebhooks.timeoutSeconds }} + timeoutSeconds: {{ .Values.controller.admissionWebhooks.timeoutSeconds }} + {{- end }} + {{- if .Values.controller.admissionWebhooks.namespaceSelector }} + namespaceSelector: {{ toYaml .Values.controller.admissionWebhooks.namespaceSelector | nindent 6 }} + {{- end }} + {{- if .Values.controller.admissionWebhooks.objectSelector }} + objectSelector: {{ toYaml .Values.controller.admissionWebhooks.objectSelector | nindent 6 }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/clusterrole.yaml b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/clusterrole.yaml new file mode 100644 index 0000000..0e725ec --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/clusterrole.yaml @@ -0,0 +1,94 @@ +{{- if .Values.rbac.create }} + +{{- if and .Values.rbac.scope (not .Values.controller.scope.enabled) -}} + {{ required "Invalid configuration: 'rbac.scope' should be equal to 'controller.scope.enabled' (true/false)." (index (dict) ".") }} +{{- end }} + +{{- if not .Values.rbac.scope -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.fullname" . }} +rules: + - apiGroups: + - "" + resources: + - configmaps + - endpoints + - nodes + - pods + - secrets +{{- if not .Values.controller.scope.enabled }} + - namespaces +{{- end}} + verbs: + - list + - watch + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - list + - watch +{{- if and .Values.controller.scope.enabled .Values.controller.scope.namespace }} + - apiGroups: + - "" + resources: + - namespaces + resourceNames: + - "{{ .Values.controller.scope.namespace }}" + verbs: + - get +{{- end }} + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - apiGroups: + - networking.k8s.io + resources: + - ingresses/status + verbs: + - update + - apiGroups: + - networking.k8s.io + resources: + - ingressclasses + verbs: + - get + - list + - watch +{{- end }} + +{{- end }} diff --git a/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/clusterrolebinding.yaml b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/clusterrolebinding.yaml new file mode 100644 index 0000000..acbbd8b --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/clusterrolebinding.yaml @@ -0,0 +1,19 @@ +{{- if and .Values.rbac.create (not .Values.rbac.scope) -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.fullname" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ include "ingress-nginx.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ template "ingress-nginx.serviceAccountName" . }} + namespace: {{ .Release.Namespace | quote }} +{{- end }} diff --git a/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/controller-configmap-addheaders.yaml b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/controller-configmap-addheaders.yaml new file mode 100644 index 0000000..dfd49a1 --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/controller-configmap-addheaders.yaml @@ -0,0 +1,14 @@ +{{- if .Values.controller.addHeaders -}} +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.fullname" . }}-custom-add-headers + namespace: {{ .Release.Namespace }} +data: {{ toYaml .Values.controller.addHeaders | nindent 2 }} +{{- end }} diff --git a/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/controller-configmap-proxyheaders.yaml b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/controller-configmap-proxyheaders.yaml new file mode 100644 index 0000000..f8d15fa --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/controller-configmap-proxyheaders.yaml @@ -0,0 +1,19 @@ +{{- if or .Values.controller.proxySetHeaders .Values.controller.headers -}} +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.fullname" . }}-custom-proxy-headers + namespace: {{ .Release.Namespace }} +data: +{{- if .Values.controller.proxySetHeaders }} +{{ toYaml .Values.controller.proxySetHeaders | indent 2 }} +{{ else if and .Values.controller.headers (not .Values.controller.proxySetHeaders) }} +{{ toYaml .Values.controller.headers | indent 2 }} +{{- end }} +{{- end }} diff --git a/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/controller-configmap-tcp.yaml b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/controller-configmap-tcp.yaml new file mode 100644 index 0000000..0f6088e --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/controller-configmap-tcp.yaml @@ -0,0 +1,17 @@ +{{- if .Values.tcp -}} +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +{{- if .Values.controller.tcp.annotations }} + annotations: {{ toYaml .Values.controller.tcp.annotations | nindent 4 }} +{{- end }} + name: {{ include "ingress-nginx.fullname" . }}-tcp + namespace: {{ .Release.Namespace }} +data: {{ tpl (toYaml .Values.tcp) . | nindent 2 }} +{{- end }} diff --git a/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/controller-configmap-udp.yaml b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/controller-configmap-udp.yaml new file mode 100644 index 0000000..3772ec5 --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/controller-configmap-udp.yaml @@ -0,0 +1,17 @@ +{{- if .Values.udp -}} +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +{{- if .Values.controller.udp.annotations }} + annotations: {{ toYaml .Values.controller.udp.annotations | nindent 4 }} +{{- end }} + name: {{ include "ingress-nginx.fullname" . }}-udp + namespace: {{ .Release.Namespace }} +data: {{ tpl (toYaml .Values.udp) . | nindent 2 }} +{{- end }} diff --git a/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/controller-configmap.yaml b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/controller-configmap.yaml new file mode 100644 index 0000000..f28b26e --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/controller-configmap.yaml @@ -0,0 +1,29 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +{{- if .Values.controller.configAnnotations }} + annotations: {{ toYaml .Values.controller.configAnnotations | nindent 4 }} +{{- end }} + name: {{ include "ingress-nginx.controller.fullname" . }} + namespace: {{ .Release.Namespace }} +data: + allow-snippet-annotations: "{{ .Values.controller.allowSnippetAnnotations }}" +{{- if .Values.controller.addHeaders }} + add-headers: {{ .Release.Namespace }}/{{ include "ingress-nginx.fullname" . }}-custom-add-headers +{{- end }} +{{- if or .Values.controller.proxySetHeaders .Values.controller.headers }} + proxy-set-headers: {{ .Release.Namespace }}/{{ include "ingress-nginx.fullname" . }}-custom-proxy-headers +{{- end }} +{{- if .Values.dhParam }} + ssl-dh-param: {{ printf "%s/%s" .Release.Namespace (include "ingress-nginx.controller.fullname" .) }} +{{- end }} +{{- range $key, $value := .Values.controller.config }} + {{- $key | nindent 2 }}: {{ $value | quote }} +{{- end }} + diff --git a/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/controller-daemonset.yaml b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/controller-daemonset.yaml new file mode 100644 index 0000000..80c268f --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/controller-daemonset.yaml @@ -0,0 +1,223 @@ +{{- if or (eq .Values.controller.kind "DaemonSet") (eq .Values.controller.kind "Both") -}} +{{- include "isControllerTagValid" . -}} +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.controller.fullname" . }} + namespace: {{ .Release.Namespace }} + {{- if .Values.controller.annotations }} + annotations: {{ toYaml .Values.controller.annotations | nindent 4 }} + {{- end }} +spec: + selector: + matchLabels: + {{- include "ingress-nginx.selectorLabels" . | nindent 6 }} + app.kubernetes.io/component: controller + revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} + {{- if .Values.controller.updateStrategy }} + updateStrategy: {{ toYaml .Values.controller.updateStrategy | nindent 4 }} + {{- end }} + minReadySeconds: {{ .Values.controller.minReadySeconds }} + template: + metadata: + {{- if .Values.controller.podAnnotations }} + annotations: + {{- range $key, $value := .Values.controller.podAnnotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} + {{- end }} + labels: + {{- include "ingress-nginx.selectorLabels" . | nindent 8 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if .Values.controller.podLabels }} + {{- toYaml .Values.controller.podLabels | nindent 8 }} + {{- end }} + spec: + {{- if .Values.controller.dnsConfig }} + dnsConfig: {{ toYaml .Values.controller.dnsConfig | nindent 8 }} + {{- end }} + {{- if .Values.controller.hostname }} + hostname: {{ toYaml .Values.controller.hostname | nindent 8 }} + {{- end }} + dnsPolicy: {{ .Values.controller.dnsPolicy }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: {{ toYaml .Values.imagePullSecrets | nindent 8 }} + {{- end }} + {{- if .Values.controller.priorityClassName }} + priorityClassName: {{ .Values.controller.priorityClassName }} + {{- end }} + {{- if or .Values.controller.podSecurityContext .Values.controller.sysctls }} + securityContext: + {{- end }} + {{- if .Values.controller.podSecurityContext }} + {{- toYaml .Values.controller.podSecurityContext | nindent 8 }} + {{- end }} + {{- if .Values.controller.sysctls }} + sysctls: + {{- range $sysctl, $value := .Values.controller.sysctls }} + - name: {{ $sysctl | quote }} + value: {{ $value | quote }} + {{- end }} + {{- end }} + {{- if .Values.controller.shareProcessNamespace }} + shareProcessNamespace: {{ .Values.controller.shareProcessNamespace }} + {{- end }} + containers: + - name: {{ .Values.controller.containerName }} + {{- with .Values.controller.image }} + image: "{{- if .repository -}}{{ .repository }}{{ else }}{{ .registry }}/{{ include "ingress-nginx.image" . }}{{- end -}}:{{ .tag }}{{ include "ingress-nginx.imageDigest" . }}" + {{- end }} + imagePullPolicy: {{ .Values.controller.image.pullPolicy }} + {{- if .Values.controller.lifecycle }} + lifecycle: {{ toYaml .Values.controller.lifecycle | nindent 12 }} + {{- end }} + args: + {{- include "ingress-nginx.params" . | nindent 12 }} + securityContext: {{ include "controller.containerSecurityContext" . | nindent 12 }} + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + {{- if .Values.controller.enableMimalloc }} + - name: LD_PRELOAD + value: /usr/local/lib/libmimalloc.so + {{- end }} + {{- if .Values.controller.extraEnvs }} + {{- toYaml .Values.controller.extraEnvs | nindent 12 }} + {{- end }} + {{- if .Values.controller.startupProbe }} + startupProbe: {{ toYaml .Values.controller.startupProbe | nindent 12 }} + {{- end }} + livenessProbe: {{ toYaml .Values.controller.livenessProbe | nindent 12 }} + readinessProbe: {{ toYaml .Values.controller.readinessProbe | nindent 12 }} + ports: + {{- range $key, $value := .Values.controller.containerPort }} + - name: {{ $key }} + containerPort: {{ $value }} + protocol: TCP + {{- if $.Values.controller.hostPort.enabled }} + hostPort: {{ index $.Values.controller.hostPort.ports $key | default $value }} + {{- end }} + {{- end }} + {{- if .Values.controller.metrics.enabled }} + - name: http-metrics + containerPort: {{ .Values.controller.metrics.port }} + protocol: TCP + {{- end }} + {{- if .Values.controller.admissionWebhooks.enabled }} + - name: webhook + containerPort: {{ .Values.controller.admissionWebhooks.port }} + protocol: TCP + {{- end }} + {{- range $key, $value := .Values.tcp }} + - name: {{ if $.Values.portNamePrefix }}{{ $.Values.portNamePrefix }}-{{ end }}{{ $key }}-tcp + containerPort: {{ $key }} + protocol: TCP + {{- if $.Values.controller.hostPort.enabled }} + hostPort: {{ $key }} + {{- end }} + {{- end }} + {{- range $key, $value := .Values.udp }} + - name: {{ if $.Values.portNamePrefix }}{{ $.Values.portNamePrefix }}-{{ end }}{{ $key }}-udp + containerPort: {{ $key }} + protocol: UDP + {{- if $.Values.controller.hostPort.enabled }} + hostPort: {{ $key }} + {{- end }} + {{- end }} + {{- if (or .Values.controller.customTemplate.configMapName .Values.controller.extraVolumeMounts .Values.controller.admissionWebhooks.enabled .Values.controller.extraModules) }} + volumeMounts: + {{- if .Values.controller.extraModules }} + - name: modules + mountPath: /modules_mount + {{- end }} + {{- if .Values.controller.customTemplate.configMapName }} + - mountPath: /etc/nginx/template + name: nginx-template-volume + readOnly: true + {{- end }} + {{- if .Values.controller.admissionWebhooks.enabled }} + - name: webhook-cert + mountPath: /usr/local/certificates/ + readOnly: true + {{- end }} + {{- if .Values.controller.extraVolumeMounts }} + {{- toYaml .Values.controller.extraVolumeMounts | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.controller.resources }} + resources: {{ toYaml .Values.controller.resources | nindent 12 }} + {{- end }} + {{- if .Values.controller.extraContainers }} + {{ toYaml .Values.controller.extraContainers | nindent 8 }} + {{- end }} + + + {{- if (or .Values.controller.extraInitContainers .Values.controller.extraModules) }} + initContainers: + {{- if .Values.controller.extraInitContainers }} + {{ toYaml .Values.controller.extraInitContainers | nindent 8 }} + {{- end }} + {{- if .Values.controller.extraModules }} + {{- range .Values.controller.extraModules }} + - name: {{ .Name }} + image: {{ .Image }} + command: ['sh', '-c', '/usr/local/bin/init_module.sh'] + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.controller.hostNetwork }} + hostNetwork: {{ .Values.controller.hostNetwork }} + {{- end }} + {{- if .Values.controller.nodeSelector }} + nodeSelector: {{ toYaml .Values.controller.nodeSelector | nindent 8 }} + {{- end }} + {{- if .Values.controller.tolerations }} + tolerations: {{ toYaml .Values.controller.tolerations | nindent 8 }} + {{- end }} + {{- if .Values.controller.affinity }} + affinity: {{ toYaml .Values.controller.affinity | nindent 8 }} + {{- end }} + {{- if .Values.controller.topologySpreadConstraints }} + topologySpreadConstraints: {{ toYaml .Values.controller.topologySpreadConstraints | nindent 8 }} + {{- end }} + serviceAccountName: {{ template "ingress-nginx.serviceAccountName" . }} + terminationGracePeriodSeconds: {{ .Values.controller.terminationGracePeriodSeconds }} + {{- if (or .Values.controller.customTemplate.configMapName .Values.controller.extraVolumeMounts .Values.controller.admissionWebhooks.enabled .Values.controller.extraVolumes .Values.controller.extraModules) }} + volumes: + {{- if .Values.controller.extraModules }} + - name: modules + emptyDir: {} + {{- end }} + {{- if .Values.controller.customTemplate.configMapName }} + - name: nginx-template-volume + configMap: + name: {{ .Values.controller.customTemplate.configMapName }} + items: + - key: {{ .Values.controller.customTemplate.configMapKey }} + path: nginx.tmpl + {{- end }} + {{- if .Values.controller.admissionWebhooks.enabled }} + - name: webhook-cert + secret: + secretName: {{ include "ingress-nginx.fullname" . }}-admission + {{- end }} + {{- if .Values.controller.extraVolumes }} + {{ toYaml .Values.controller.extraVolumes | nindent 8 }} + {{- end }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/controller-deployment.yaml b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/controller-deployment.yaml new file mode 100644 index 0000000..5ad1867 --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/controller-deployment.yaml @@ -0,0 +1,228 @@ +{{- if or (eq .Values.controller.kind "Deployment") (eq .Values.controller.kind "Both") -}} +{{- include "isControllerTagValid" . -}} +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.controller.fullname" . }} + namespace: {{ .Release.Namespace }} + {{- if .Values.controller.annotations }} + annotations: {{ toYaml .Values.controller.annotations | nindent 4 }} + {{- end }} +spec: + selector: + matchLabels: + {{- include "ingress-nginx.selectorLabels" . | nindent 6 }} + app.kubernetes.io/component: controller + {{- if not .Values.controller.autoscaling.enabled }} + replicas: {{ .Values.controller.replicaCount }} + {{- end }} + revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} + {{- if .Values.controller.updateStrategy }} + strategy: + {{ toYaml .Values.controller.updateStrategy | nindent 4 }} + {{- end }} + minReadySeconds: {{ .Values.controller.minReadySeconds }} + template: + metadata: + {{- if .Values.controller.podAnnotations }} + annotations: + {{- range $key, $value := .Values.controller.podAnnotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} + {{- end }} + labels: + {{- include "ingress-nginx.selectorLabels" . | nindent 8 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if .Values.controller.podLabels }} + {{- toYaml .Values.controller.podLabels | nindent 8 }} + {{- end }} + spec: + {{- if .Values.controller.dnsConfig }} + dnsConfig: {{ toYaml .Values.controller.dnsConfig | nindent 8 }} + {{- end }} + {{- if .Values.controller.hostname }} + hostname: {{ toYaml .Values.controller.hostname | nindent 8 }} + {{- end }} + dnsPolicy: {{ .Values.controller.dnsPolicy }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: {{ toYaml .Values.imagePullSecrets | nindent 8 }} + {{- end }} + {{- if .Values.controller.priorityClassName }} + priorityClassName: {{ .Values.controller.priorityClassName | quote }} + {{- end }} + {{- if or .Values.controller.podSecurityContext .Values.controller.sysctls }} + securityContext: + {{- end }} + {{- if .Values.controller.podSecurityContext }} + {{- toYaml .Values.controller.podSecurityContext | nindent 8 }} + {{- end }} + {{- if .Values.controller.sysctls }} + sysctls: + {{- range $sysctl, $value := .Values.controller.sysctls }} + - name: {{ $sysctl | quote }} + value: {{ $value | quote }} + {{- end }} + {{- end }} + {{- if .Values.controller.shareProcessNamespace }} + shareProcessNamespace: {{ .Values.controller.shareProcessNamespace }} + {{- end }} + containers: + - name: {{ .Values.controller.containerName }} + {{- with .Values.controller.image }} + image: "{{- if .repository -}}{{ .repository }}{{ else }}{{ .registry }}/{{ include "ingress-nginx.image" . }}{{- end -}}:{{ .tag }}{{ include "ingress-nginx.imageDigest" . }}" + {{- end }} + imagePullPolicy: {{ .Values.controller.image.pullPolicy }} + {{- if .Values.controller.lifecycle }} + lifecycle: {{ toYaml .Values.controller.lifecycle | nindent 12 }} + {{- end }} + args: + {{- include "ingress-nginx.params" . | nindent 12 }} + securityContext: {{ include "controller.containerSecurityContext" . | nindent 12 }} + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + {{- if .Values.controller.enableMimalloc }} + - name: LD_PRELOAD + value: /usr/local/lib/libmimalloc.so + {{- end }} + {{- if .Values.controller.extraEnvs }} + {{- toYaml .Values.controller.extraEnvs | nindent 12 }} + {{- end }} + {{- if .Values.controller.startupProbe }} + startupProbe: {{ toYaml .Values.controller.startupProbe | nindent 12 }} + {{- end }} + livenessProbe: {{ toYaml .Values.controller.livenessProbe | nindent 12 }} + readinessProbe: {{ toYaml .Values.controller.readinessProbe | nindent 12 }} + ports: + {{- range $key, $value := .Values.controller.containerPort }} + - name: {{ $key }} + containerPort: {{ $value }} + protocol: TCP + {{- if $.Values.controller.hostPort.enabled }} + hostPort: {{ index $.Values.controller.hostPort.ports $key | default $value }} + {{- end }} + {{- end }} + {{- if .Values.controller.metrics.enabled }} + - name: http-metrics + containerPort: {{ .Values.controller.metrics.port }} + protocol: TCP + {{- end }} + {{- if .Values.controller.admissionWebhooks.enabled }} + - name: webhook + containerPort: {{ .Values.controller.admissionWebhooks.port }} + protocol: TCP + {{- end }} + {{- range $key, $value := .Values.tcp }} + - name: {{ if $.Values.portNamePrefix }}{{ $.Values.portNamePrefix }}-{{ end }}{{ $key }}-tcp + containerPort: {{ $key }} + protocol: TCP + {{- if $.Values.controller.hostPort.enabled }} + hostPort: {{ $key }} + {{- end }} + {{- end }} + {{- range $key, $value := .Values.udp }} + - name: {{ if $.Values.portNamePrefix }}{{ $.Values.portNamePrefix }}-{{ end }}{{ $key }}-udp + containerPort: {{ $key }} + protocol: UDP + {{- if $.Values.controller.hostPort.enabled }} + hostPort: {{ $key }} + {{- end }} + {{- end }} + {{- if (or .Values.controller.customTemplate.configMapName .Values.controller.extraVolumeMounts .Values.controller.admissionWebhooks.enabled .Values.controller.extraModules) }} + volumeMounts: + {{- if .Values.controller.extraModules }} + - name: modules + mountPath: /modules_mount + {{- end }} + {{- if .Values.controller.customTemplate.configMapName }} + - mountPath: /etc/nginx/template + name: nginx-template-volume + readOnly: true + {{- end }} + {{- if .Values.controller.admissionWebhooks.enabled }} + - name: webhook-cert + mountPath: /usr/local/certificates/ + readOnly: true + {{- end }} + {{- if .Values.controller.extraVolumeMounts }} + {{- toYaml .Values.controller.extraVolumeMounts | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.controller.resources }} + resources: {{ toYaml .Values.controller.resources | nindent 12 }} + {{- end }} + {{- if .Values.controller.extraContainers }} + {{ toYaml .Values.controller.extraContainers | nindent 8 }} + {{- end }} + {{- if (or .Values.controller.extraInitContainers .Values.controller.extraModules) }} + initContainers: + {{- if .Values.controller.extraInitContainers }} + {{ toYaml .Values.controller.extraInitContainers | nindent 8 }} + {{- end }} + {{- if .Values.controller.extraModules }} + {{- range .Values.controller.extraModules }} + - name: {{ .name }} + image: {{ .image }} + command: ['sh', '-c', '/usr/local/bin/init_module.sh'] + volumeMounts: + - name: modules + mountPath: /modules_mount + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.controller.hostNetwork }} + hostNetwork: {{ .Values.controller.hostNetwork }} + {{- end }} + {{- if .Values.controller.nodeSelector }} + nodeSelector: {{ toYaml .Values.controller.nodeSelector | nindent 8 }} + {{- end }} + {{- if .Values.controller.tolerations }} + tolerations: {{ toYaml .Values.controller.tolerations | nindent 8 }} + {{- end }} + {{- if .Values.controller.affinity }} + affinity: {{ toYaml .Values.controller.affinity | nindent 8 }} + {{- end }} + {{- if .Values.controller.topologySpreadConstraints }} + topologySpreadConstraints: {{ toYaml .Values.controller.topologySpreadConstraints | nindent 8 }} + {{- end }} + serviceAccountName: {{ template "ingress-nginx.serviceAccountName" . }} + terminationGracePeriodSeconds: {{ .Values.controller.terminationGracePeriodSeconds }} + {{- if (or .Values.controller.customTemplate.configMapName .Values.controller.extraVolumeMounts .Values.controller.admissionWebhooks.enabled .Values.controller.extraVolumes .Values.controller.extraModules) }} + volumes: + {{- if .Values.controller.extraModules }} + - name: modules + emptyDir: {} + {{- end }} + {{- if .Values.controller.customTemplate.configMapName }} + - name: nginx-template-volume + configMap: + name: {{ .Values.controller.customTemplate.configMapName }} + items: + - key: {{ .Values.controller.customTemplate.configMapKey }} + path: nginx.tmpl + {{- end }} + {{- if .Values.controller.admissionWebhooks.enabled }} + - name: webhook-cert + secret: + secretName: {{ include "ingress-nginx.fullname" . }}-admission + {{- end }} + {{- if .Values.controller.extraVolumes }} + {{ toYaml .Values.controller.extraVolumes | nindent 8 }} + {{- end }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/controller-hpa.yaml b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/controller-hpa.yaml new file mode 100644 index 0000000..e0979f1 --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/controller-hpa.yaml @@ -0,0 +1,52 @@ +{{- if and .Values.controller.autoscaling.enabled (or (eq .Values.controller.kind "Deployment") (eq .Values.controller.kind "Both")) -}} +{{- if not .Values.controller.keda.enabled }} + +apiVersion: autoscaling/v2beta2 +kind: HorizontalPodAutoscaler +metadata: + annotations: + {{- with .Values.controller.autoscaling.annotations }} + {{- toYaml . | trimSuffix "\n" | nindent 4 }} + {{- end }} + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.controller.fullname" . }} + namespace: {{ .Release.Namespace }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "ingress-nginx.controller.fullname" . }} + minReplicas: {{ .Values.controller.autoscaling.minReplicas }} + maxReplicas: {{ .Values.controller.autoscaling.maxReplicas }} + metrics: + {{- with .Values.controller.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: {{ . }} + {{- end }} + {{- with .Values.controller.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ . }} + {{- end }} + {{- with .Values.controller.autoscalingTemplate }} + {{- toYaml . | nindent 2 }} + {{- end }} + {{- with .Values.controller.autoscaling.behavior }} + behavior: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} +{{- end }} + diff --git a/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/controller-ingressclass.yaml b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/controller-ingressclass.yaml new file mode 100644 index 0000000..9492784 --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/controller-ingressclass.yaml @@ -0,0 +1,21 @@ +{{- if .Values.controller.ingressClassResource.enabled -}} +# We don't support namespaced ingressClass yet +# So a ClusterRole and a ClusterRoleBinding is required +apiVersion: networking.k8s.io/v1 +kind: IngressClass +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ .Values.controller.ingressClassResource.name }} +{{- if .Values.controller.ingressClassResource.default }} + annotations: + ingressclass.kubernetes.io/is-default-class: "true" +{{- end }} +spec: + controller: {{ .Values.controller.ingressClassResource.controllerValue }} + {{ template "ingressClass.parameters" . }} +{{- end }} diff --git a/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/controller-keda.yaml b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/controller-keda.yaml new file mode 100644 index 0000000..875157e --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/controller-keda.yaml @@ -0,0 +1,42 @@ +{{- if and .Values.controller.keda.enabled (or (eq .Values.controller.kind "Deployment") (eq .Values.controller.kind "Both")) -}} +# https://keda.sh/docs/ + +apiVersion: {{ .Values.controller.keda.apiVersion }} +kind: ScaledObject +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.controller.fullname" . }} + {{- if .Values.controller.keda.scaledObject.annotations }} + annotations: {{ toYaml .Values.controller.keda.scaledObject.annotations | nindent 4 }} + {{- end }} +spec: + scaleTargetRef: +{{- if eq .Values.controller.keda.apiVersion "keda.k8s.io/v1alpha1" }} + deploymentName: {{ include "ingress-nginx.controller.fullname" . }} +{{- else if eq .Values.controller.keda.apiVersion "keda.sh/v1alpha1" }} + name: {{ include "ingress-nginx.controller.fullname" . }} +{{- end }} + pollingInterval: {{ .Values.controller.keda.pollingInterval }} + cooldownPeriod: {{ .Values.controller.keda.cooldownPeriod }} + minReplicaCount: {{ .Values.controller.keda.minReplicas }} + maxReplicaCount: {{ .Values.controller.keda.maxReplicas }} + triggers: +{{- with .Values.controller.keda.triggers }} +{{ toYaml . | indent 2 }} +{{ end }} + advanced: + restoreToOriginalReplicaCount: {{ .Values.controller.keda.restoreToOriginalReplicaCount }} +{{- if .Values.controller.keda.behavior }} + horizontalPodAutoscalerConfig: + behavior: +{{ with .Values.controller.keda.behavior -}} +{{ toYaml . | indent 8 }} +{{ end }} + +{{- end }} +{{- end }} diff --git a/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/controller-poddisruptionbudget.yaml b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/controller-poddisruptionbudget.yaml new file mode 100644 index 0000000..8dfbe98 --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/controller-poddisruptionbudget.yaml @@ -0,0 +1,19 @@ +{{- if or (and .Values.controller.autoscaling.enabled (gt (.Values.controller.autoscaling.minReplicas | int) 1)) (and (not .Values.controller.autoscaling.enabled) (gt (.Values.controller.replicaCount | int) 1)) }} +apiVersion: {{ ternary "policy/v1" "policy/v1beta1" (semverCompare ">=1.21.0-0" .Capabilities.KubeVersion.Version) }} +kind: PodDisruptionBudget +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.controller.fullname" . }} + namespace: {{ .Release.Namespace }} +spec: + selector: + matchLabels: + {{- include "ingress-nginx.selectorLabels" . | nindent 6 }} + app.kubernetes.io/component: controller + minAvailable: {{ .Values.controller.minAvailable }} +{{- end }} diff --git a/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/controller-prometheusrules.yaml b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/controller-prometheusrules.yaml new file mode 100644 index 0000000..78b5362 --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/controller-prometheusrules.yaml @@ -0,0 +1,21 @@ +{{- if and ( .Values.controller.metrics.enabled ) ( .Values.controller.metrics.prometheusRule.enabled ) ( .Capabilities.APIVersions.Has "monitoring.coreos.com/v1" ) -}} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ include "ingress-nginx.controller.fullname" . }} +{{- if .Values.controller.metrics.prometheusRule.namespace }} + namespace: {{ .Values.controller.metrics.prometheusRule.namespace | quote }} +{{- end }} + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- if .Values.controller.metrics.prometheusRule.additionalLabels }} + {{- toYaml .Values.controller.metrics.prometheusRule.additionalLabels | nindent 4 }} + {{- end }} +spec: +{{- if .Values.controller.metrics.prometheusRule.rules }} + groups: + - name: {{ template "ingress-nginx.name" . }} + rules: {{- toYaml .Values.controller.metrics.prometheusRule.rules | nindent 4 }} +{{- end }} +{{- end }} diff --git a/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/controller-psp.yaml b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/controller-psp.yaml new file mode 100644 index 0000000..2e0499c --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/controller-psp.yaml @@ -0,0 +1,94 @@ +{{- if (semverCompare "<1.25.0-0" .Capabilities.KubeVersion.Version) }} +{{- if and .Values.podSecurityPolicy.enabled (empty .Values.controller.existingPsp) -}} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ include "ingress-nginx.fullname" . }} + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + allowedCapabilities: + - NET_BIND_SERVICE + {{- if .Values.controller.image.chroot }} + - SYS_CHROOT + {{- end }} +{{- if .Values.controller.sysctls }} + allowedUnsafeSysctls: + {{- range $sysctl, $value := .Values.controller.sysctls }} + - {{ $sysctl }} + {{- end }} +{{- end }} + privileged: false + allowPrivilegeEscalation: true + # Allow core volume types. + volumes: + - 'configMap' + - 'emptyDir' + #- 'projected' + - 'secret' + #- 'downwardAPI' +{{- if .Values.controller.hostNetwork }} + hostNetwork: {{ .Values.controller.hostNetwork }} +{{- end }} +{{- if or .Values.controller.hostNetwork .Values.controller.hostPort.enabled }} + hostPorts: +{{- if .Values.controller.hostNetwork }} +{{- range $key, $value := .Values.controller.containerPort }} + # {{ $key }} + - min: {{ $value }} + max: {{ $value }} +{{- end }} +{{- else if .Values.controller.hostPort.enabled }} +{{- range $key, $value := .Values.controller.hostPort.ports }} + # {{ $key }} + - min: {{ $value }} + max: {{ $value }} +{{- end }} +{{- end }} +{{- if .Values.controller.metrics.enabled }} + # metrics + - min: {{ .Values.controller.metrics.port }} + max: {{ .Values.controller.metrics.port }} +{{- end }} +{{- if .Values.controller.admissionWebhooks.enabled }} + # admission webhooks + - min: {{ .Values.controller.admissionWebhooks.port }} + max: {{ .Values.controller.admissionWebhooks.port }} +{{- end }} +{{- range $key, $value := .Values.tcp }} + # {{ $key }}-tcp + - min: {{ $key }} + max: {{ $key }} +{{- end }} +{{- range $key, $value := .Values.udp }} + # {{ $key }}-udp + - min: {{ $key }} + max: {{ $key }} +{{- end }} +{{- end }} + hostIPC: false + hostPID: false + runAsUser: + # Require the container to run without root privileges. + rule: 'MustRunAsNonRoot' + supplementalGroups: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + readOnlyRootFilesystem: false + seLinux: + rule: 'RunAsAny' +{{- end }} +{{- end }} diff --git a/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/controller-role.yaml b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/controller-role.yaml new file mode 100644 index 0000000..330be8c --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/controller-role.yaml @@ -0,0 +1,113 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.fullname" . }} + namespace: {{ .Release.Namespace }} +rules: + - apiGroups: + - "" + resources: + - namespaces + verbs: + - get + - apiGroups: + - "" + resources: + - configmaps + - pods + - secrets + - endpoints + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - watch + - apiGroups: + - networking.k8s.io + resources: + - ingresses/status + verbs: + - update + - apiGroups: + - networking.k8s.io + resources: + - ingressclasses + verbs: + - get + - list + - watch + # TODO(Jintao Zhang) + # Once we release a new version of the controller, + # we will be able to remove the configmap related permissions + # We have used the Lease API for selection + # ref: https://github.com/kubernetes/ingress-nginx/pull/8921 + - apiGroups: + - "" + resources: + - configmaps + resourceNames: + - {{ .Values.controller.electionID }} + verbs: + - get + - update + - apiGroups: + - "" + resources: + - configmaps + verbs: + - create + - apiGroups: + - coordination.k8s.io + resources: + - leases + resourceNames: + - {{ .Values.controller.electionID }} + verbs: + - get + - update + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +{{- if .Values.podSecurityPolicy.enabled }} + - apiGroups: [{{ template "podSecurityPolicy.apiGroup" . }}] + resources: ['podsecuritypolicies'] + verbs: ['use'] + {{- with .Values.controller.existingPsp }} + resourceNames: [{{ . }}] + {{- else }} + resourceNames: [{{ include "ingress-nginx.fullname" . }}] + {{- end }} +{{- end }} +{{- end }} diff --git a/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/controller-rolebinding.yaml b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/controller-rolebinding.yaml new file mode 100644 index 0000000..e846a11 --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/controller-rolebinding.yaml @@ -0,0 +1,21 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.fullname" . }} + namespace: {{ .Release.Namespace }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ include "ingress-nginx.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ template "ingress-nginx.serviceAccountName" . }} + namespace: {{ .Release.Namespace | quote }} +{{- end }} diff --git a/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/controller-service-internal.yaml b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/controller-service-internal.yaml new file mode 100644 index 0000000..aae3e15 --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/controller-service-internal.yaml @@ -0,0 +1,79 @@ +{{- if and .Values.controller.service.enabled .Values.controller.service.internal.enabled .Values.controller.service.internal.annotations}} +apiVersion: v1 +kind: Service +metadata: + annotations: + {{- range $key, $value := .Values.controller.service.internal.annotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- if .Values.controller.service.labels }} + {{- toYaml .Values.controller.service.labels | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.controller.fullname" . }}-internal + namespace: {{ .Release.Namespace }} +spec: + type: "{{ .Values.controller.service.type }}" +{{- if .Values.controller.service.internal.loadBalancerIP }} + loadBalancerIP: {{ .Values.controller.service.internal.loadBalancerIP }} +{{- end }} +{{- if .Values.controller.service.internal.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{ toYaml .Values.controller.service.internal.loadBalancerSourceRanges | nindent 4 }} +{{- end }} +{{- if .Values.controller.service.internal.externalTrafficPolicy }} + externalTrafficPolicy: {{ .Values.controller.service.internal.externalTrafficPolicy }} +{{- end }} + ports: + {{- $setNodePorts := (or (eq .Values.controller.service.type "NodePort") (eq .Values.controller.service.type "LoadBalancer")) }} + {{- if .Values.controller.service.enableHttp }} + - name: http + port: {{ .Values.controller.service.ports.http }} + protocol: TCP + targetPort: {{ .Values.controller.service.targetPorts.http }} + {{- if semverCompare ">=1.20" .Capabilities.KubeVersion.Version }} + appProtocol: http + {{- end }} + {{- if (and $setNodePorts (not (empty .Values.controller.service.nodePorts.http))) }} + nodePort: {{ .Values.controller.service.nodePorts.http }} + {{- end }} + {{- end }} + {{- if .Values.controller.service.enableHttps }} + - name: https + port: {{ .Values.controller.service.ports.https }} + protocol: TCP + targetPort: {{ .Values.controller.service.targetPorts.https }} + {{- if semverCompare ">=1.20" .Capabilities.KubeVersion.Version }} + appProtocol: https + {{- end }} + {{- if (and $setNodePorts (not (empty .Values.controller.service.nodePorts.https))) }} + nodePort: {{ .Values.controller.service.nodePorts.https }} + {{- end }} + {{- end }} + {{- range $key, $value := .Values.tcp }} + - name: {{ if $.Values.portNamePrefix }}{{ $.Values.portNamePrefix }}-{{ end }}{{ $key }}-tcp + port: {{ $key }} + protocol: TCP + targetPort: {{ if $.Values.portNamePrefix }}{{ $.Values.portNamePrefix }}-{{ end }}{{ $key }}-tcp + {{- if $.Values.controller.service.nodePorts.tcp }} + {{- if index $.Values.controller.service.nodePorts.tcp $key }} + nodePort: {{ index $.Values.controller.service.nodePorts.tcp $key }} + {{- end }} + {{- end }} + {{- end }} + {{- range $key, $value := .Values.udp }} + - name: {{ if $.Values.portNamePrefix }}{{ $.Values.portNamePrefix }}-{{ end }}{{ $key }}-udp + port: {{ $key }} + protocol: UDP + targetPort: {{ if $.Values.portNamePrefix }}{{ $.Values.portNamePrefix }}-{{ end }}{{ $key }}-udp + {{- if $.Values.controller.service.nodePorts.udp }} + {{- if index $.Values.controller.service.nodePorts.udp $key }} + nodePort: {{ index $.Values.controller.service.nodePorts.udp $key }} + {{- end }} + {{- end }} + {{- end }} + selector: + {{- include "ingress-nginx.selectorLabels" . | nindent 4 }} + app.kubernetes.io/component: controller +{{- end }} diff --git a/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/controller-service-metrics.yaml b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/controller-service-metrics.yaml new file mode 100644 index 0000000..1c1d5bd --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/controller-service-metrics.yaml @@ -0,0 +1,45 @@ +{{- if .Values.controller.metrics.enabled -}} +apiVersion: v1 +kind: Service +metadata: +{{- if .Values.controller.metrics.service.annotations }} + annotations: {{ toYaml .Values.controller.metrics.service.annotations | nindent 4 }} +{{- end }} + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- if .Values.controller.metrics.service.labels }} + {{- toYaml .Values.controller.metrics.service.labels | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.controller.fullname" . }}-metrics + namespace: {{ .Release.Namespace }} +spec: + type: {{ .Values.controller.metrics.service.type }} +{{- if .Values.controller.metrics.service.clusterIP }} + clusterIP: {{ .Values.controller.metrics.service.clusterIP }} +{{- end }} +{{- if .Values.controller.metrics.service.externalIPs }} + externalIPs: {{ toYaml .Values.controller.metrics.service.externalIPs | nindent 4 }} +{{- end }} +{{- if .Values.controller.metrics.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.controller.metrics.service.loadBalancerIP }} +{{- end }} +{{- if .Values.controller.metrics.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{ toYaml .Values.controller.metrics.service.loadBalancerSourceRanges | nindent 4 }} +{{- end }} +{{- if .Values.controller.metrics.service.externalTrafficPolicy }} + externalTrafficPolicy: {{ .Values.controller.metrics.service.externalTrafficPolicy }} +{{- end }} + ports: + - name: http-metrics + port: {{ .Values.controller.metrics.service.servicePort }} + protocol: TCP + targetPort: http-metrics + {{- $setNodePorts := (or (eq .Values.controller.metrics.service.type "NodePort") (eq .Values.controller.metrics.service.type "LoadBalancer")) }} + {{- if (and $setNodePorts (not (empty .Values.controller.metrics.service.nodePort))) }} + nodePort: {{ .Values.controller.metrics.service.nodePort }} + {{- end }} + selector: + {{- include "ingress-nginx.selectorLabels" . | nindent 4 }} + app.kubernetes.io/component: controller +{{- end }} diff --git a/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/controller-service-webhook.yaml b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/controller-service-webhook.yaml new file mode 100644 index 0000000..2aae24f --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/controller-service-webhook.yaml @@ -0,0 +1,40 @@ +{{- if .Values.controller.admissionWebhooks.enabled -}} +apiVersion: v1 +kind: Service +metadata: +{{- if .Values.controller.admissionWebhooks.service.annotations }} + annotations: {{ toYaml .Values.controller.admissionWebhooks.service.annotations | nindent 4 }} +{{- end }} + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.controller.fullname" . }}-admission + namespace: {{ .Release.Namespace }} +spec: + type: {{ .Values.controller.admissionWebhooks.service.type }} +{{- if .Values.controller.admissionWebhooks.service.clusterIP }} + clusterIP: {{ .Values.controller.admissionWebhooks.service.clusterIP }} +{{- end }} +{{- if .Values.controller.admissionWebhooks.service.externalIPs }} + externalIPs: {{ toYaml .Values.controller.admissionWebhooks.service.externalIPs | nindent 4 }} +{{- end }} +{{- if .Values.controller.admissionWebhooks.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.controller.admissionWebhooks.service.loadBalancerIP }} +{{- end }} +{{- if .Values.controller.admissionWebhooks.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{ toYaml .Values.controller.admissionWebhooks.service.loadBalancerSourceRanges | nindent 4 }} +{{- end }} + ports: + - name: https-webhook + port: 443 + targetPort: webhook + {{- if semverCompare ">=1.20" .Capabilities.KubeVersion.Version }} + appProtocol: https + {{- end }} + selector: + {{- include "ingress-nginx.selectorLabels" . | nindent 4 }} + app.kubernetes.io/component: controller +{{- end }} diff --git a/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/controller-service.yaml b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/controller-service.yaml new file mode 100644 index 0000000..2b28196 --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/controller-service.yaml @@ -0,0 +1,101 @@ +{{- if and .Values.controller.service.enabled .Values.controller.service.external.enabled -}} +apiVersion: v1 +kind: Service +metadata: + annotations: + {{- range $key, $value := .Values.controller.service.annotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- if .Values.controller.service.labels }} + {{- toYaml .Values.controller.service.labels | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.controller.fullname" . }} + namespace: {{ .Release.Namespace }} +spec: + type: {{ .Values.controller.service.type }} +{{- if .Values.controller.service.clusterIP }} + clusterIP: {{ .Values.controller.service.clusterIP }} +{{- end }} +{{- if .Values.controller.service.externalIPs }} + externalIPs: {{ toYaml .Values.controller.service.externalIPs | nindent 4 }} +{{- end }} +{{- if .Values.controller.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.controller.service.loadBalancerIP }} +{{- end }} +{{- if .Values.controller.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{ toYaml .Values.controller.service.loadBalancerSourceRanges | nindent 4 }} +{{- end }} +{{- if .Values.controller.service.externalTrafficPolicy }} + externalTrafficPolicy: {{ .Values.controller.service.externalTrafficPolicy }} +{{- end }} +{{- if .Values.controller.service.sessionAffinity }} + sessionAffinity: {{ .Values.controller.service.sessionAffinity }} +{{- end }} +{{- if .Values.controller.service.healthCheckNodePort }} + healthCheckNodePort: {{ .Values.controller.service.healthCheckNodePort }} +{{- end }} +{{- if semverCompare ">=1.21.0-0" .Capabilities.KubeVersion.Version -}} +{{- if .Values.controller.service.ipFamilyPolicy }} + ipFamilyPolicy: {{ .Values.controller.service.ipFamilyPolicy }} +{{- end }} +{{- end }} +{{- if semverCompare ">=1.21.0-0" .Capabilities.KubeVersion.Version -}} +{{- if .Values.controller.service.ipFamilies }} + ipFamilies: {{ toYaml .Values.controller.service.ipFamilies | nindent 4 }} +{{- end }} +{{- end }} + ports: + {{- $setNodePorts := (or (eq .Values.controller.service.type "NodePort") (eq .Values.controller.service.type "LoadBalancer")) }} + {{- if .Values.controller.service.enableHttp }} + - name: http + port: {{ .Values.controller.service.ports.http }} + protocol: TCP + targetPort: {{ .Values.controller.service.targetPorts.http }} + {{- if and (semverCompare ">=1.20" .Capabilities.KubeVersion.Version) (.Values.controller.service.appProtocol) }} + appProtocol: http + {{- end }} + {{- if (and $setNodePorts (not (empty .Values.controller.service.nodePorts.http))) }} + nodePort: {{ .Values.controller.service.nodePorts.http }} + {{- end }} + {{- end }} + {{- if .Values.controller.service.enableHttps }} + - name: https + port: {{ .Values.controller.service.ports.https }} + protocol: TCP + targetPort: {{ .Values.controller.service.targetPorts.https }} + {{- if and (semverCompare ">=1.20" .Capabilities.KubeVersion.Version) (.Values.controller.service.appProtocol) }} + appProtocol: https + {{- end }} + {{- if (and $setNodePorts (not (empty .Values.controller.service.nodePorts.https))) }} + nodePort: {{ .Values.controller.service.nodePorts.https }} + {{- end }} + {{- end }} + {{- range $key, $value := .Values.tcp }} + - name: {{ if $.Values.portNamePrefix }}{{ $.Values.portNamePrefix }}-{{ end }}{{ $key }}-tcp + port: {{ $key }} + protocol: TCP + targetPort: {{ if $.Values.portNamePrefix }}{{ $.Values.portNamePrefix }}-{{ end }}{{ $key }}-tcp + {{- if $.Values.controller.service.nodePorts.tcp }} + {{- if index $.Values.controller.service.nodePorts.tcp $key }} + nodePort: {{ index $.Values.controller.service.nodePorts.tcp $key }} + {{- end }} + {{- end }} + {{- end }} + {{- range $key, $value := .Values.udp }} + - name: {{ if $.Values.portNamePrefix }}{{ $.Values.portNamePrefix }}-{{ end }}{{ $key }}-udp + port: {{ $key }} + protocol: UDP + targetPort: {{ if $.Values.portNamePrefix }}{{ $.Values.portNamePrefix }}-{{ end }}{{ $key }}-udp + {{- if $.Values.controller.service.nodePorts.udp }} + {{- if index $.Values.controller.service.nodePorts.udp $key }} + nodePort: {{ index $.Values.controller.service.nodePorts.udp $key }} + {{- end }} + {{- end }} + {{- end }} + selector: + {{- include "ingress-nginx.selectorLabels" . | nindent 4 }} + app.kubernetes.io/component: controller +{{- end }} diff --git a/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/controller-serviceaccount.yaml b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/controller-serviceaccount.yaml new file mode 100644 index 0000000..824b2a1 --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/controller-serviceaccount.yaml @@ -0,0 +1,18 @@ +{{- if or .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ template "ingress-nginx.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} + {{- if .Values.serviceAccount.annotations }} + annotations: + {{ toYaml .Values.serviceAccount.annotations | indent 4 }} + {{- end }} +automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }} +{{- end }} diff --git a/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/controller-servicemonitor.yaml b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/controller-servicemonitor.yaml new file mode 100644 index 0000000..973d36b --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/controller-servicemonitor.yaml @@ -0,0 +1,48 @@ +{{- if and .Values.controller.metrics.enabled .Values.controller.metrics.serviceMonitor.enabled -}} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "ingress-nginx.controller.fullname" . }} +{{- if .Values.controller.metrics.serviceMonitor.namespace }} + namespace: {{ .Values.controller.metrics.serviceMonitor.namespace | quote }} +{{- end }} + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- if .Values.controller.metrics.serviceMonitor.additionalLabels }} + {{- toYaml .Values.controller.metrics.serviceMonitor.additionalLabels | nindent 4 }} + {{- end }} +spec: + endpoints: + - port: http-metrics + interval: {{ .Values.controller.metrics.serviceMonitor.scrapeInterval }} + {{- if .Values.controller.metrics.serviceMonitor.honorLabels }} + honorLabels: true + {{- end }} + {{- if .Values.controller.metrics.serviceMonitor.relabelings }} + relabelings: {{ toYaml .Values.controller.metrics.serviceMonitor.relabelings | nindent 8 }} + {{- end }} + {{- if .Values.controller.metrics.serviceMonitor.metricRelabelings }} + metricRelabelings: {{ toYaml .Values.controller.metrics.serviceMonitor.metricRelabelings | nindent 8 }} + {{- end }} +{{- if .Values.controller.metrics.serviceMonitor.jobLabel }} + jobLabel: {{ .Values.controller.metrics.serviceMonitor.jobLabel | quote }} +{{- end }} +{{- if .Values.controller.metrics.serviceMonitor.namespaceSelector }} + namespaceSelector: {{ toYaml .Values.controller.metrics.serviceMonitor.namespaceSelector | nindent 4 }} +{{- else }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} +{{- end }} +{{- if .Values.controller.metrics.serviceMonitor.targetLabels }} + targetLabels: + {{- range .Values.controller.metrics.serviceMonitor.targetLabels }} + - {{ . }} + {{- end }} +{{- end }} + selector: + matchLabels: + {{- include "ingress-nginx.selectorLabels" . | nindent 6 }} + app.kubernetes.io/component: controller +{{- end }} diff --git a/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/controller-wehbooks-networkpolicy.yaml b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/controller-wehbooks-networkpolicy.yaml new file mode 100644 index 0000000..f74c2fb --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/controller-wehbooks-networkpolicy.yaml @@ -0,0 +1,19 @@ +{{- if .Values.controller.admissionWebhooks.enabled }} +{{- if .Values.controller.admissionWebhooks.networkPolicyEnabled }} + +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: {{ include "ingress-nginx.fullname" . }}-webhooks-allow + namespace: {{ .Release.Namespace }} +spec: + ingress: + - {} + podSelector: + matchLabels: + app.kubernetes.io/name: {{ include "ingress-nginx.name" . }} + policyTypes: + - Ingress + +{{- end }} +{{- end }} diff --git a/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/default-backend-deployment.yaml b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/default-backend-deployment.yaml new file mode 100644 index 0000000..fd3e96e --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/default-backend-deployment.yaml @@ -0,0 +1,118 @@ +{{- if .Values.defaultBackend.enabled -}} +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: default-backend + {{- with .Values.defaultBackend.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.defaultBackend.fullname" . }} + namespace: {{ .Release.Namespace }} +spec: + selector: + matchLabels: + {{- include "ingress-nginx.selectorLabels" . | nindent 6 }} + app.kubernetes.io/component: default-backend +{{- if not .Values.defaultBackend.autoscaling.enabled }} + replicas: {{ .Values.defaultBackend.replicaCount }} +{{- end }} + revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} + template: + metadata: + {{- if .Values.defaultBackend.podAnnotations }} + annotations: {{ toYaml .Values.defaultBackend.podAnnotations | nindent 8 }} + {{- end }} + labels: + {{- include "ingress-nginx.selectorLabels" . | nindent 8 }} + app.kubernetes.io/component: default-backend + {{- with .Values.defaultBackend.labels }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if .Values.defaultBackend.podLabels }} + {{- toYaml .Values.defaultBackend.podLabels | nindent 8 }} + {{- end }} + spec: + {{- if .Values.imagePullSecrets }} + imagePullSecrets: {{ toYaml .Values.imagePullSecrets | nindent 8 }} + {{- end }} + {{- if .Values.defaultBackend.priorityClassName }} + priorityClassName: {{ .Values.defaultBackend.priorityClassName }} + {{- end }} + {{- if .Values.defaultBackend.podSecurityContext }} + securityContext: {{ toYaml .Values.defaultBackend.podSecurityContext | nindent 8 }} + {{- end }} + containers: + - name: {{ template "ingress-nginx.name" . }}-default-backend + {{- with .Values.defaultBackend.image }} + image: "{{- if .repository -}}{{ .repository }}{{ else }}{{ .registry }}/{{ .image }}{{- end -}}:{{ .tag }}{{- if (.digest) -}} @{{.digest}} {{- end -}}" + {{- end }} + imagePullPolicy: {{ .Values.defaultBackend.image.pullPolicy }} + {{- if .Values.defaultBackend.extraArgs }} + args: + {{- range $key, $value := .Values.defaultBackend.extraArgs }} + {{- /* Accept keys without values or with false as value */}} + {{- if eq ($value | quote | len) 2 }} + - --{{ $key }} + {{- else }} + - --{{ $key }}={{ $value }} + {{- end }} + {{- end }} + {{- end }} + securityContext: + capabilities: + drop: + - ALL + runAsUser: {{ .Values.defaultBackend.image.runAsUser }} + runAsNonRoot: {{ .Values.defaultBackend.image.runAsNonRoot }} + allowPrivilegeEscalation: {{ .Values.defaultBackend.image.allowPrivilegeEscalation }} + readOnlyRootFilesystem: {{ .Values.defaultBackend.image.readOnlyRootFilesystem}} + {{- if .Values.defaultBackend.extraEnvs }} + env: {{ toYaml .Values.defaultBackend.extraEnvs | nindent 12 }} + {{- end }} + livenessProbe: + httpGet: + path: /healthz + port: {{ .Values.defaultBackend.port }} + scheme: HTTP + initialDelaySeconds: {{ .Values.defaultBackend.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.defaultBackend.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.defaultBackend.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.defaultBackend.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.defaultBackend.livenessProbe.failureThreshold }} + readinessProbe: + httpGet: + path: /healthz + port: {{ .Values.defaultBackend.port }} + scheme: HTTP + initialDelaySeconds: {{ .Values.defaultBackend.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.defaultBackend.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.defaultBackend.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.defaultBackend.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.defaultBackend.readinessProbe.failureThreshold }} + ports: + - name: http + containerPort: {{ .Values.defaultBackend.port }} + protocol: TCP + {{- if .Values.defaultBackend.extraVolumeMounts }} + volumeMounts: {{- toYaml .Values.defaultBackend.extraVolumeMounts | nindent 12 }} + {{- end }} + {{- if .Values.defaultBackend.resources }} + resources: {{ toYaml .Values.defaultBackend.resources | nindent 12 }} + {{- end }} + {{- if .Values.defaultBackend.nodeSelector }} + nodeSelector: {{ toYaml .Values.defaultBackend.nodeSelector | nindent 8 }} + {{- end }} + serviceAccountName: {{ template "ingress-nginx.defaultBackend.serviceAccountName" . }} + {{- if .Values.defaultBackend.tolerations }} + tolerations: {{ toYaml .Values.defaultBackend.tolerations | nindent 8 }} + {{- end }} + {{- if .Values.defaultBackend.affinity }} + affinity: {{ toYaml .Values.defaultBackend.affinity | nindent 8 }} + {{- end }} + terminationGracePeriodSeconds: 60 + {{- if .Values.defaultBackend.extraVolumes }} + volumes: {{ toYaml .Values.defaultBackend.extraVolumes | nindent 8 }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/default-backend-hpa.yaml b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/default-backend-hpa.yaml new file mode 100644 index 0000000..594d265 --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/default-backend-hpa.yaml @@ -0,0 +1,33 @@ +{{- if and .Values.defaultBackend.enabled .Values.defaultBackend.autoscaling.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: default-backend + {{- with .Values.defaultBackend.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ template "ingress-nginx.defaultBackend.fullname" . }} + namespace: {{ .Release.Namespace }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ template "ingress-nginx.defaultBackend.fullname" . }} + minReplicas: {{ .Values.defaultBackend.autoscaling.minReplicas }} + maxReplicas: {{ .Values.defaultBackend.autoscaling.maxReplicas }} + metrics: +{{- with .Values.defaultBackend.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ . }} +{{- end }} +{{- with .Values.defaultBackend.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + targetAverageUtilization: {{ . }} +{{- end }} +{{- end }} diff --git a/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/default-backend-poddisruptionbudget.yaml b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/default-backend-poddisruptionbudget.yaml new file mode 100644 index 0000000..00891ce --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/default-backend-poddisruptionbudget.yaml @@ -0,0 +1,21 @@ +{{- if .Values.defaultBackend.enabled -}} +{{- if or (gt (.Values.defaultBackend.replicaCount | int) 1) (gt (.Values.defaultBackend.autoscaling.minReplicas | int) 1) }} +apiVersion: {{ ternary "policy/v1" "policy/v1beta1" (semverCompare ">=1.21.0-0" .Capabilities.KubeVersion.Version) }} +kind: PodDisruptionBudget +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: default-backend + {{- with .Values.defaultBackend.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.defaultBackend.fullname" . }} + namespace: {{ .Release.Namespace }} +spec: + selector: + matchLabels: + {{- include "ingress-nginx.selectorLabels" . | nindent 6 }} + app.kubernetes.io/component: default-backend + minAvailable: {{ .Values.defaultBackend.minAvailable }} +{{- end }} +{{- end }} diff --git a/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/default-backend-psp.yaml b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/default-backend-psp.yaml new file mode 100644 index 0000000..c144c8f --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/default-backend-psp.yaml @@ -0,0 +1,38 @@ +{{- if (semverCompare "<1.25.0-0" .Capabilities.KubeVersion.Version) }} +{{- if and .Values.podSecurityPolicy.enabled .Values.defaultBackend.enabled (empty .Values.defaultBackend.existingPsp) -}} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ include "ingress-nginx.fullname" . }}-backend + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: default-backend + {{- with .Values.defaultBackend.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + allowPrivilegeEscalation: false + fsGroup: + ranges: + - max: 65535 + min: 1 + rule: MustRunAs + requiredDropCapabilities: + - ALL + runAsUser: + rule: MustRunAsNonRoot + seLinux: + rule: RunAsAny + supplementalGroups: + ranges: + - max: 65535 + min: 1 + rule: MustRunAs + volumes: + - configMap + - emptyDir + - projected + - secret + - downwardAPI +{{- end }} +{{- end }} diff --git a/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/default-backend-role.yaml b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/default-backend-role.yaml new file mode 100644 index 0000000..a2b457c --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/default-backend-role.yaml @@ -0,0 +1,22 @@ +{{- if and .Values.rbac.create .Values.podSecurityPolicy.enabled .Values.defaultBackend.enabled -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: default-backend + {{- with .Values.defaultBackend.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.fullname" . }}-backend + namespace: {{ .Release.Namespace }} +rules: + - apiGroups: [{{ template "podSecurityPolicy.apiGroup" . }}] + resources: ['podsecuritypolicies'] + verbs: ['use'] + {{- with .Values.defaultBackend.existingPsp }} + resourceNames: [{{ . }}] + {{- else }} + resourceNames: [{{ include "ingress-nginx.fullname" . }}-backend] + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/default-backend-rolebinding.yaml b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/default-backend-rolebinding.yaml new file mode 100644 index 0000000..dbaa516 --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/default-backend-rolebinding.yaml @@ -0,0 +1,21 @@ +{{- if and .Values.rbac.create .Values.podSecurityPolicy.enabled .Values.defaultBackend.enabled -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: default-backend + {{- with .Values.defaultBackend.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.fullname" . }}-backend + namespace: {{ .Release.Namespace }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ include "ingress-nginx.fullname" . }}-backend +subjects: + - kind: ServiceAccount + name: {{ template "ingress-nginx.defaultBackend.serviceAccountName" . }} + namespace: {{ .Release.Namespace | quote }} +{{- end }} diff --git a/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/default-backend-service.yaml b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/default-backend-service.yaml new file mode 100644 index 0000000..5f1d09a --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/default-backend-service.yaml @@ -0,0 +1,41 @@ +{{- if .Values.defaultBackend.enabled -}} +apiVersion: v1 +kind: Service +metadata: +{{- if .Values.defaultBackend.service.annotations }} + annotations: {{ toYaml .Values.defaultBackend.service.annotations | nindent 4 }} +{{- end }} + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: default-backend + {{- with .Values.defaultBackend.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.defaultBackend.fullname" . }} + namespace: {{ .Release.Namespace }} +spec: + type: {{ .Values.defaultBackend.service.type }} +{{- if .Values.defaultBackend.service.clusterIP }} + clusterIP: {{ .Values.defaultBackend.service.clusterIP }} +{{- end }} +{{- if .Values.defaultBackend.service.externalIPs }} + externalIPs: {{ toYaml .Values.defaultBackend.service.externalIPs | nindent 4 }} +{{- end }} +{{- if .Values.defaultBackend.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.defaultBackend.service.loadBalancerIP }} +{{- end }} +{{- if .Values.defaultBackend.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{ toYaml .Values.defaultBackend.service.loadBalancerSourceRanges | nindent 4 }} +{{- end }} + ports: + - name: http + port: {{ .Values.defaultBackend.service.servicePort }} + protocol: TCP + targetPort: http + {{- if semverCompare ">=1.20" .Capabilities.KubeVersion.Version }} + appProtocol: http + {{- end }} + selector: + {{- include "ingress-nginx.selectorLabels" . | nindent 4 }} + app.kubernetes.io/component: default-backend +{{- end }} diff --git a/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/default-backend-serviceaccount.yaml b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/default-backend-serviceaccount.yaml new file mode 100644 index 0000000..b45a95a --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/default-backend-serviceaccount.yaml @@ -0,0 +1,14 @@ +{{- if and .Values.defaultBackend.enabled .Values.defaultBackend.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: default-backend + {{- with .Values.defaultBackend.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ template "ingress-nginx.defaultBackend.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +automountServiceAccountToken: {{ .Values.defaultBackend.serviceAccount.automountServiceAccountToken }} +{{- end }} diff --git a/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/dh-param-secret.yaml b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/dh-param-secret.yaml new file mode 100644 index 0000000..12e7a4f --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/templates/dh-param-secret.yaml @@ -0,0 +1,10 @@ +{{- with .Values.dhParam -}} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "ingress-nginx.controller.fullname" $ }} + labels: + {{- include "ingress-nginx.labels" $ | nindent 4 }} +data: + dhparam.pem: {{ . }} +{{- end }} diff --git a/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/values.yaml b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/values.yaml new file mode 100644 index 0000000..9ec174f --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/files/ingress-nginx/values.yaml @@ -0,0 +1,944 @@ +## nginx configuration +## Ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/nginx-configuration/index.md +## + +## Overrides for generated resource names +# See templates/_helpers.tpl +# nameOverride: +# fullnameOverride: + +## Labels to apply to all resources +## +commonLabels: {} +# scmhash: abc123 +# myLabel: aakkmd + +controller: + name: controller + image: + ## Keep false as default for now! + chroot: false + registry: registry.k8s.io + image: ingress-nginx/controller + ## for backwards compatibility consider setting the full image url via the repository value below + ## use *either* current default registry/image or repository format or installing chart by providing the values.yaml will fail + ## repository: + tag: "v1.3.1" + digest: sha256:54f7fe2c6c5a9db9a0ebf1131797109bb7a4d91f56b9b362bde2abd237dd1974 + digestChroot: sha256:a8466b19c621bd550b1645e27a004a5cc85009c858a9ab19490216735ac432b1 + pullPolicy: IfNotPresent + # www-data -> uid 101 + runAsUser: 101 + allowPrivilegeEscalation: true + + # -- Use an existing PSP instead of creating one + existingPsp: "" + + # -- Configures the controller container name + containerName: controller + + # -- Configures the ports that the nginx-controller listens on + containerPort: + http: 80 + https: 443 + + # -- Will add custom configuration options to Nginx https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/ + config: {} + + # -- Annotations to be added to the controller config configuration configmap. + configAnnotations: {} + + # -- Will add custom headers before sending traffic to backends according to https://github.com/kubernetes/ingress-nginx/tree/main/docs/examples/customization/custom-headers + proxySetHeaders: {} + + # -- Will add custom headers before sending response traffic to the client according to: https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/#add-headers + addHeaders: {} + + # -- Optionally customize the pod dnsConfig. + dnsConfig: {} + + # -- Optionally customize the pod hostname. + hostname: {} + + # -- Optionally change this to ClusterFirstWithHostNet in case you have 'hostNetwork: true'. + # By default, while using host network, name resolution uses the host's DNS. If you wish nginx-controller + # to keep resolving names inside the k8s network, use ClusterFirstWithHostNet. + dnsPolicy: ClusterFirst + + # -- Bare-metal considerations via the host network https://kubernetes.github.io/ingress-nginx/deploy/baremetal/#via-the-host-network + # Ingress status was blank because there is no Service exposing the NGINX Ingress controller in a configuration using the host network, the default --publish-service flag used in standard cloud setups does not apply + reportNodeInternalIp: false + + # -- Process Ingress objects without ingressClass annotation/ingressClassName field + # Overrides value for --watch-ingress-without-class flag of the controller binary + # Defaults to false + watchIngressWithoutClass: false + + # -- Process IngressClass per name (additionally as per spec.controller). + ingressClassByName: false + + # -- This configuration defines if Ingress Controller should allow users to set + # their own *-snippet annotations, otherwise this is forbidden / dropped + # when users add those annotations. + # Global snippets in ConfigMap are still respected + allowSnippetAnnotations: true + + # -- Required for use with CNI based kubernetes installations (such as ones set up by kubeadm), + # since CNI and hostport don't mix yet. Can be deprecated once https://github.com/kubernetes/kubernetes/issues/23920 + # is merged + hostNetwork: false + + ## Use host ports 80 and 443 + ## Disabled by default + hostPort: + # -- Enable 'hostPort' or not + enabled: false + ports: + # -- 'hostPort' http port + http: 80 + # -- 'hostPort' https port + https: 443 + + # -- Election ID to use for status update + electionID: ingress-controller-leader + + ## This section refers to the creation of the IngressClass resource + ## IngressClass resources are supported since k8s >= 1.18 and required since k8s >= 1.19 + ingressClassResource: + # -- Name of the ingressClass + name: nginx + # -- Is this ingressClass enabled or not + enabled: true + # -- Is this the default ingressClass for the cluster + default: false + # -- Controller-value of the controller that is processing this ingressClass + controllerValue: "k8s.io/ingress-nginx" + + # -- Parameters is a link to a custom resource containing additional + # configuration for the controller. This is optional if the controller + # does not require extra parameters. + parameters: {} + + # -- For backwards compatibility with ingress.class annotation, use ingressClass. + # Algorithm is as follows, first ingressClassName is considered, if not present, controller looks for ingress.class annotation + ingressClass: nginx + + # -- Labels to add to the pod container metadata + podLabels: {} + # key: value + + # -- Security Context policies for controller pods + podSecurityContext: {} + + # -- See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for notes on enabling and using sysctls + sysctls: {} + # sysctls: + # "net.core.somaxconn": "8192" + + # -- Allows customization of the source of the IP address or FQDN to report + # in the ingress status field. By default, it reads the information provided + # by the service. If disable, the status field reports the IP address of the + # node or nodes where an ingress controller pod is running. + publishService: + # -- Enable 'publishService' or not + enabled: true + # -- Allows overriding of the publish service to bind to + # Must be / + pathOverride: "" + + # Limit the scope of the controller to a specific namespace + scope: + # -- Enable 'scope' or not + enabled: false + # -- Namespace to limit the controller to; defaults to $(POD_NAMESPACE) + namespace: "" + # -- When scope.enabled == false, instead of watching all namespaces, we watching namespaces whose labels + # only match with namespaceSelector. Format like foo=bar. Defaults to empty, means watching all namespaces. + namespaceSelector: "" + + # -- Allows customization of the configmap / nginx-configmap namespace; defaults to $(POD_NAMESPACE) + configMapNamespace: "" + + tcp: + # -- Allows customization of the tcp-services-configmap; defaults to $(POD_NAMESPACE) + configMapNamespace: "" + # -- Annotations to be added to the tcp config configmap + annotations: {} + + udp: + # -- Allows customization of the udp-services-configmap; defaults to $(POD_NAMESPACE) + configMapNamespace: "" + # -- Annotations to be added to the udp config configmap + annotations: {} + + # -- Maxmind license key to download GeoLite2 Databases. + ## https://blog.maxmind.com/2019/12/18/significant-changes-to-accessing-and-using-geolite2-databases + maxmindLicenseKey: "" + + # -- Additional command line arguments to pass to nginx-ingress-controller + # E.g. to specify the default SSL certificate you can use + extraArgs: {} + ## extraArgs: + ## default-ssl-certificate: "/" + + # -- Additional environment variables to set + extraEnvs: [] + # extraEnvs: + # - name: FOO + # valueFrom: + # secretKeyRef: + # key: FOO + # name: secret-resource + + # -- Use a `DaemonSet` or `Deployment` + kind: Deployment + + # -- Annotations to be added to the controller Deployment or DaemonSet + ## + annotations: {} + # keel.sh/pollSchedule: "@every 60m" + + # -- Labels to be added to the controller Deployment or DaemonSet and other resources that do not have option to specify labels + ## + labels: {} + # keel.sh/policy: patch + # keel.sh/trigger: poll + + + # -- The update strategy to apply to the Deployment or DaemonSet + ## + updateStrategy: {} + # rollingUpdate: + # maxUnavailable: 1 + # type: RollingUpdate + + # -- `minReadySeconds` to avoid killing pods before we are ready + ## + minReadySeconds: 0 + + + # -- Node tolerations for server scheduling to nodes with taints + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + tolerations: [] + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + + # -- Affinity and anti-affinity rules for server scheduling to nodes + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## + affinity: {} + # # An example of preferred pod anti-affinity, weight is in the range 1-100 + # podAntiAffinity: + # preferredDuringSchedulingIgnoredDuringExecution: + # - weight: 100 + # podAffinityTerm: + # labelSelector: + # matchExpressions: + # - key: app.kubernetes.io/name + # operator: In + # values: + # - ingress-nginx + # - key: app.kubernetes.io/instance + # operator: In + # values: + # - ingress-nginx + # - key: app.kubernetes.io/component + # operator: In + # values: + # - controller + # topologyKey: kubernetes.io/hostname + + # # An example of required pod anti-affinity + # podAntiAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # - labelSelector: + # matchExpressions: + # - key: app.kubernetes.io/name + # operator: In + # values: + # - ingress-nginx + # - key: app.kubernetes.io/instance + # operator: In + # values: + # - ingress-nginx + # - key: app.kubernetes.io/component + # operator: In + # values: + # - controller + # topologyKey: "kubernetes.io/hostname" + + # -- Topology spread constraints rely on node labels to identify the topology domain(s) that each Node is in. + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + ## + topologySpreadConstraints: [] + # - maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # whenUnsatisfiable: DoNotSchedule + # labelSelector: + # matchLabels: + # app.kubernetes.io/instance: ingress-nginx-internal + + # -- `terminationGracePeriodSeconds` to avoid killing pods before we are ready + ## wait up to five minutes for the drain of connections + ## + terminationGracePeriodSeconds: 300 + + # -- Node labels for controller pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: + kubernetes.io/os: linux + + ## Liveness and readiness probe values + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes + ## + ## startupProbe: + ## httpGet: + ## # should match container.healthCheckPath + ## path: "/healthz" + ## port: 10254 + ## scheme: HTTP + ## initialDelaySeconds: 5 + ## periodSeconds: 5 + ## timeoutSeconds: 2 + ## successThreshold: 1 + ## failureThreshold: 5 + livenessProbe: + httpGet: + # should match container.healthCheckPath + path: "/healthz" + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + httpGet: + # should match container.healthCheckPath + path: "/healthz" + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 3 + + + # -- Path of the health check endpoint. All requests received on the port defined by + # the healthz-port parameter are forwarded internally to this path. + healthCheckPath: "/healthz" + + # -- Address to bind the health check endpoint. + # It is better to set this option to the internal node address + # if the ingress nginx controller is running in the `hostNetwork: true` mode. + healthCheckHost: "" + + # -- Annotations to be added to controller pods + ## + podAnnotations: {} + + replicaCount: 1 + + minAvailable: 1 + + ## Define requests resources to avoid probe issues due to CPU utilization in busy nodes + ## ref: https://github.com/kubernetes/ingress-nginx/issues/4735#issuecomment-551204903 + ## Ideally, there should be no limits. + ## https://engineering.indeedblog.com/blog/2019/12/cpu-throttling-regression-fix/ + resources: + ## limits: + ## cpu: 100m + ## memory: 90Mi + requests: + cpu: 100m + memory: 90Mi + + # Mutually exclusive with keda autoscaling + autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 11 + targetCPUUtilizationPercentage: 50 + targetMemoryUtilizationPercentage: 50 + behavior: {} + # scaleDown: + # stabilizationWindowSeconds: 300 + # policies: + # - type: Pods + # value: 1 + # periodSeconds: 180 + # scaleUp: + # stabilizationWindowSeconds: 300 + # policies: + # - type: Pods + # value: 2 + # periodSeconds: 60 + + autoscalingTemplate: [] + # Custom or additional autoscaling metrics + # ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-custom-metrics + # - type: Pods + # pods: + # metric: + # name: nginx_ingress_controller_nginx_process_requests_total + # target: + # type: AverageValue + # averageValue: 10000m + + # Mutually exclusive with hpa autoscaling + keda: + apiVersion: "keda.sh/v1alpha1" + ## apiVersion changes with keda 1.x vs 2.x + ## 2.x = keda.sh/v1alpha1 + ## 1.x = keda.k8s.io/v1alpha1 + enabled: false + minReplicas: 1 + maxReplicas: 11 + pollingInterval: 30 + cooldownPeriod: 300 + restoreToOriginalReplicaCount: false + scaledObject: + annotations: {} + # Custom annotations for ScaledObject resource + # annotations: + # key: value + triggers: [] + # - type: prometheus + # metadata: + # serverAddress: http://:9090 + # metricName: http_requests_total + # threshold: '100' + # query: sum(rate(http_requests_total{deployment="my-deployment"}[2m])) + + behavior: {} + # scaleDown: + # stabilizationWindowSeconds: 300 + # policies: + # - type: Pods + # value: 1 + # periodSeconds: 180 + # scaleUp: + # stabilizationWindowSeconds: 300 + # policies: + # - type: Pods + # value: 2 + # periodSeconds: 60 + + # -- Enable mimalloc as a drop-in replacement for malloc. + ## ref: https://github.com/microsoft/mimalloc + ## + enableMimalloc: true + + ## Override NGINX template + customTemplate: + configMapName: "" + configMapKey: "" + + service: + enabled: true + + # -- If enabled is adding an appProtocol option for Kubernetes service. An appProtocol field replacing annotations that were + # using for setting a backend protocol. Here is an example for AWS: service.beta.kubernetes.io/aws-load-balancer-backend-protocol: http + # It allows choosing the protocol for each backend specified in the Kubernetes service. + # See the following GitHub issue for more details about the purpose: https://github.com/kubernetes/kubernetes/issues/40244 + # Will be ignored for Kubernetes versions older than 1.20 + ## + appProtocol: true + + annotations: {} + labels: {} + # clusterIP: "" + + # -- List of IP addresses at which the controller services are available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + # -- Used by cloud providers to connect the resulting `LoadBalancer` to a pre-existing static IP according to https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer + loadBalancerIP: "" + loadBalancerSourceRanges: [] + + enableHttp: true + enableHttps: true + + ## Set external traffic policy to: "Local" to preserve source IP on providers supporting it. + ## Ref: https://kubernetes.io/docs/tutorials/services/source-ip/#source-ip-for-services-with-typeloadbalancer + # externalTrafficPolicy: "" + + ## Must be either "None" or "ClientIP" if set. Kubernetes will default to "None". + ## Ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + # sessionAffinity: "" + + ## Specifies the health check node port (numeric port number) for the service. If healthCheckNodePort isn’t specified, + ## the service controller allocates a port from your cluster’s NodePort range. + ## Ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + # healthCheckNodePort: 0 + + # -- Represents the dual-stack-ness requested or required by this Service. Possible values are + # SingleStack, PreferDualStack or RequireDualStack. + # The ipFamilies and clusterIPs fields depend on the value of this field. + ## Ref: https://kubernetes.io/docs/concepts/services-networking/dual-stack/ + ipFamilyPolicy: "SingleStack" + + # -- List of IP families (e.g. IPv4, IPv6) assigned to the service. This field is usually assigned automatically + # based on cluster configuration and the ipFamilyPolicy field. + ## Ref: https://kubernetes.io/docs/concepts/services-networking/dual-stack/ + ipFamilies: + - IPv4 + + ports: + http: 80 + https: 443 + + targetPorts: + http: http + https: https + + type: LoadBalancer + + ## type: NodePort + ## nodePorts: + ## http: 32080 + ## https: 32443 + ## tcp: + ## 8080: 32808 + nodePorts: + http: "" + https: "" + tcp: {} + udp: {} + + external: + enabled: true + + internal: + # -- Enables an additional internal load balancer (besides the external one). + enabled: false + # -- Annotations are mandatory for the load balancer to come up. Varies with the cloud service. + annotations: {} + + # loadBalancerIP: "" + + # -- Restrict access For LoadBalancer service. Defaults to 0.0.0.0/0. + loadBalancerSourceRanges: [] + + ## Set external traffic policy to: "Local" to preserve source IP on + ## providers supporting it + ## Ref: https://kubernetes.io/docs/tutorials/services/source-ip/#source-ip-for-services-with-typeloadbalancer + # externalTrafficPolicy: "" + + # shareProcessNamespace enables process namespace sharing within the pod. + # This can be used for example to signal log rotation using `kill -USR1` from a sidecar. + shareProcessNamespace: false + + # -- Additional containers to be added to the controller pod. + # See https://github.com/lemonldap-ng-controller/lemonldap-ng-controller as example. + extraContainers: [] + # - name: my-sidecar + # image: nginx:latest + # - name: lemonldap-ng-controller + # image: lemonldapng/lemonldap-ng-controller:0.2.0 + # args: + # - /lemonldap-ng-controller + # - --alsologtostderr + # - --configmap=$(POD_NAMESPACE)/lemonldap-ng-configuration + # env: + # - name: POD_NAME + # valueFrom: + # fieldRef: + # fieldPath: metadata.name + # - name: POD_NAMESPACE + # valueFrom: + # fieldRef: + # fieldPath: metadata.namespace + # volumeMounts: + # - name: copy-portal-skins + # mountPath: /srv/var/lib/lemonldap-ng/portal/skins + + # -- Additional volumeMounts to the controller main container. + extraVolumeMounts: [] + # - name: copy-portal-skins + # mountPath: /var/lib/lemonldap-ng/portal/skins + + # -- Additional volumes to the controller pod. + extraVolumes: [] + # - name: copy-portal-skins + # emptyDir: {} + + # -- Containers, which are run before the app containers are started. + extraInitContainers: [] + # - name: init-myservice + # image: busybox + # command: ['sh', '-c', 'until nslookup myservice; do echo waiting for myservice; sleep 2; done;'] + + extraModules: [] + ## Modules, which are mounted into the core nginx image + # - name: opentelemetry + # image: registry.k8s.io/ingress-nginx/opentelemetry:v20220801-g00ee51f09@sha256:482562feba02ad178411efc284f8eb803a185e3ea5588b6111ccbc20b816b427 + # + # The image must contain a `/usr/local/bin/init_module.sh` executable, which + # will be executed as initContainers, to move its config files within the + # mounted volume. + + admissionWebhooks: + annotations: {} + # ignore-check.kube-linter.io/no-read-only-rootfs: "This deployment needs write access to root filesystem". + + ## Additional annotations to the admission webhooks. + ## These annotations will be added to the ValidatingWebhookConfiguration and + ## the Jobs Spec of the admission webhooks. + enabled: true + # -- Additional environment variables to set + extraEnvs: [] + # extraEnvs: + # - name: FOO + # valueFrom: + # secretKeyRef: + # key: FOO + # name: secret-resource + # -- Admission Webhook failure policy to use + failurePolicy: Fail + # timeoutSeconds: 10 + port: 8443 + certificate: "/usr/local/certificates/cert" + key: "/usr/local/certificates/key" + namespaceSelector: {} + objectSelector: {} + # -- Labels to be added to admission webhooks + labels: {} + + # -- Use an existing PSP instead of creating one + existingPsp: "" + networkPolicyEnabled: false + + service: + annotations: {} + # clusterIP: "" + externalIPs: [] + # loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 443 + type: ClusterIP + + createSecretJob: + resources: {} + # limits: + # cpu: 10m + # memory: 20Mi + # requests: + # cpu: 10m + # memory: 20Mi + + patchWebhookJob: + resources: {} + + patch: + enabled: true + image: + registry: registry.k8s.io + image: ingress-nginx/kube-webhook-certgen + ## for backwards compatibility consider setting the full image url via the repository value below + ## use *either* current default registry/image or repository format or installing chart by providing the values.yaml will fail + ## repository: + tag: v1.3.0 + digest: sha256:549e71a6ca248c5abd51cdb73dbc3083df62cf92ed5e6147c780e30f7e007a47 + pullPolicy: IfNotPresent + # -- Provide a priority class name to the webhook patching job + ## + priorityClassName: "" + podAnnotations: {} + nodeSelector: + kubernetes.io/os: linux + tolerations: [] + # -- Labels to be added to patch job resources + labels: {} + securityContext: + runAsNonRoot: true + runAsUser: 2000 + fsGroup: 2000 + + + metrics: + port: 10254 + # if this port is changed, change healthz-port: in extraArgs: accordingly + enabled: false + + service: + annotations: {} + # prometheus.io/scrape: "true" + # prometheus.io/port: "10254" + + # clusterIP: "" + + # -- List of IP addresses at which the stats-exporter service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + # loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 10254 + type: ClusterIP + # externalTrafficPolicy: "" + # nodePort: "" + + serviceMonitor: + enabled: false + additionalLabels: {} + ## The label to use to retrieve the job name from. + ## jobLabel: "app.kubernetes.io/name" + namespace: "" + namespaceSelector: {} + ## Default: scrape .Release.Namespace only + ## To scrape all, use the following: + ## namespaceSelector: + ## any: true + scrapeInterval: 30s + # honorLabels: true + targetLabels: [] + relabelings: [] + metricRelabelings: [] + + prometheusRule: + enabled: false + additionalLabels: {} + # namespace: "" + rules: [] + # # These are just examples rules, please adapt them to your needs + # - alert: NGINXConfigFailed + # expr: count(nginx_ingress_controller_config_last_reload_successful == 0) > 0 + # for: 1s + # labels: + # severity: critical + # annotations: + # description: bad ingress config - nginx config test failed + # summary: uninstall the latest ingress changes to allow config reloads to resume + # - alert: NGINXCertificateExpiry + # expr: (avg(nginx_ingress_controller_ssl_expire_time_seconds) by (host) - time()) < 604800 + # for: 1s + # labels: + # severity: critical + # annotations: + # description: ssl certificate(s) will expire in less then a week + # summary: renew expiring certificates to avoid downtime + # - alert: NGINXTooMany500s + # expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"5.+"} ) / sum(nginx_ingress_controller_requests) ) > 5 + # for: 1m + # labels: + # severity: warning + # annotations: + # description: Too many 5XXs + # summary: More than 5% of all requests returned 5XX, this requires your attention + # - alert: NGINXTooMany400s + # expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"4.+"} ) / sum(nginx_ingress_controller_requests) ) > 5 + # for: 1m + # labels: + # severity: warning + # annotations: + # description: Too many 4XXs + # summary: More than 5% of all requests returned 4XX, this requires your attention + + # -- Improve connection draining when ingress controller pod is deleted using a lifecycle hook: + # With this new hook, we increased the default terminationGracePeriodSeconds from 30 seconds + # to 300, allowing the draining of connections up to five minutes. + # If the active connections end before that, the pod will terminate gracefully at that time. + # To effectively take advantage of this feature, the Configmap feature + # worker-shutdown-timeout new value is 240s instead of 10s. + ## + lifecycle: + preStop: + exec: + command: + - /wait-shutdown + + priorityClassName: "" + +# -- Rollback limit +## +revisionHistoryLimit: 10 + +## Default 404 backend +## +defaultBackend: + ## + enabled: false + + name: defaultbackend + image: + registry: registry.k8s.io + image: defaultbackend-amd64 + ## for backwards compatibility consider setting the full image url via the repository value below + ## use *either* current default registry/image or repository format or installing chart by providing the values.yaml will fail + ## repository: + tag: "1.5" + pullPolicy: IfNotPresent + # nobody user -> uid 65534 + runAsUser: 65534 + runAsNonRoot: true + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + + # -- Use an existing PSP instead of creating one + existingPsp: "" + + extraArgs: {} + + serviceAccount: + create: true + name: "" + automountServiceAccountToken: true + # -- Additional environment variables to set for defaultBackend pods + extraEnvs: [] + + port: 8080 + + ## Readiness and liveness probes for default backend + ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/ + ## + livenessProbe: + failureThreshold: 3 + initialDelaySeconds: 30 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + readinessProbe: + failureThreshold: 6 + initialDelaySeconds: 0 + periodSeconds: 5 + successThreshold: 1 + timeoutSeconds: 5 + + # -- Node tolerations for server scheduling to nodes with taints + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + tolerations: [] + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + + affinity: {} + + # -- Security Context policies for controller pods + # See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for + # notes on enabling and using sysctls + ## + podSecurityContext: {} + + # -- Security Context policies for controller main container. + # See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for + # notes on enabling and using sysctls + ## + containerSecurityContext: {} + + # -- Labels to add to the pod container metadata + podLabels: {} + # key: value + + # -- Node labels for default backend pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: + kubernetes.io/os: linux + + # -- Annotations to be added to default backend pods + ## + podAnnotations: {} + + replicaCount: 1 + + minAvailable: 1 + + resources: {} + # limits: + # cpu: 10m + # memory: 20Mi + # requests: + # cpu: 10m + # memory: 20Mi + + extraVolumeMounts: [] + ## Additional volumeMounts to the default backend container. + # - name: copy-portal-skins + # mountPath: /var/lib/lemonldap-ng/portal/skins + + extraVolumes: [] + ## Additional volumes to the default backend pod. + # - name: copy-portal-skins + # emptyDir: {} + + autoscaling: + annotations: {} + enabled: false + minReplicas: 1 + maxReplicas: 2 + targetCPUUtilizationPercentage: 50 + targetMemoryUtilizationPercentage: 50 + + service: + annotations: {} + + # clusterIP: "" + + # -- List of IP addresses at which the default backend service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + # loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 80 + type: ClusterIP + + priorityClassName: "" + # -- Labels to be added to the default backend resources + labels: {} + +## Enable RBAC as per https://github.com/kubernetes/ingress-nginx/blob/main/docs/deploy/rbac.md and https://github.com/kubernetes/ingress-nginx/issues/266 +rbac: + create: true + scope: false + +## If true, create & use Pod Security Policy resources +## https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +podSecurityPolicy: + enabled: false + +serviceAccount: + create: true + name: "" + automountServiceAccountToken: true + # -- Annotations for the controller service account + annotations: {} + +# -- Optional array of imagePullSecrets containing private registry credentials +## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ +imagePullSecrets: [] +# - name: secretName + +# -- TCP service key-value pairs +## Ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/exposing-tcp-udp-services.md +## +tcp: {} +# 8080: "default/example-tcp-svc:9000" + +# -- UDP service key-value pairs +## Ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/exposing-tcp-udp-services.md +## +udp: {} +# 53: "kube-system/kube-dns:53" + +# -- Prefix for TCP and UDP ports names in ingress controller service +## Some cloud providers, like Yandex Cloud may have a requirements for a port name regex to support cloud load balancer integration +portNamePrefix: "" + +# -- (string) A base64-encoded Diffie-Hellman parameter. +# This can be generated with: `openssl dhparam 4096 2> /dev/null | base64` +## Ref: https://github.com/kubernetes/ingress-nginx/tree/main/docs/examples/customization/ssl-dh-param +dhParam: diff --git a/ansible/01_old/roles/agent_os_setting/handlers/main.yml b/ansible/01_old/roles/agent_os_setting/handlers/main.yml new file mode 100644 index 0000000..df2b47e --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/handlers/main.yml @@ -0,0 +1,22 @@ +--- +- name: Reload systemd configuration + service: + daemon_reload: True + +- name: Restart containerd service + service: + name: containerd + enabled: true + state: restarted + +- name: Restart docker service + service: + name: docker + enabled: true + state: restarted + +- name: Restart crio service + service: + name: crio + enabled: true + state: restarted diff --git a/ansible/01_old/roles/agent_os_setting/meta/main.yml b/ansible/01_old/roles/agent_os_setting/meta/main.yml new file mode 100644 index 0000000..c572acc --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/meta/main.yml @@ -0,0 +1,52 @@ +galaxy_info: + author: your name + description: your role description + company: your company (optional) + + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: http://example.com/issue/tracker + + # Choose a valid license ID from https://spdx.org - some suggested licenses: + # - BSD-3-Clause (default) + # - MIT + # - GPL-2.0-or-later + # - GPL-3.0-only + # - Apache-2.0 + # - CC-BY-4.0 + license: license (GPL-2.0-or-later, MIT, etc) + + min_ansible_version: 2.1 + + # If this a Container Enabled role, provide the minimum Ansible Container version. + # min_ansible_container_version: + + # + # Provide a list of supported platforms, and for each platform a list of versions. + # If you don't wish to enumerate all versions for a particular platform, use 'all'. + # To view available platforms and versions (or releases), visit: + # https://galaxy.ansible.com/api/v1/platforms/ + # + # platforms: + # - name: Fedora + # versions: + # - all + # - 25 + # - name: SomePlatform + # versions: + # - all + # - 1.0 + # - 7 + # - 99.99 + + galaxy_tags: [] + # List tags for your role here, one per line. A tag is a keyword that describes + # and categorizes the role. Users find roles by searching for tags. Be sure to + # remove the '[]' above, if you add tags to this list. + # + # NOTE: A tag is limited to a single word comprised of alphanumeric characters. + # Maximum 20 tags per role. + +dependencies: [] + # List your role dependencies here, one per line. Be sure to remove the '[]' above, + # if you add dependencies to this list. diff --git a/ansible/01_old/roles/agent_os_setting/tasks/00-centos-os-main.yml b/ansible/01_old/roles/agent_os_setting/tasks/00-centos-os-main.yml new file mode 100644 index 0000000..699b484 --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/tasks/00-centos-os-main.yml @@ -0,0 +1,81 @@ +--- +- name: Update and upgrade yum packages + yum: + name: "*" + state: latest + +- name: Install yum packages + yum: + name: ['cloud-utils', 'ca-certificates', 'socat', 'conntrack', 'gnupg', 'bash-completion', 'net-tools'] + state: present + +- name: Disable firewalld + systemd: name=firewalld state=stopped + ignore_errors: yes + tags: + - install + - atomic + - firewalld + +- name: Disable SWAP since kubernetes can't work with swap enabled (1/2) + command: 'swapoff -a' + + +- name: Disable SWAP in fstab since kubernetes can't work with swap enabled (2/2) + become: true + lineinfile: + path: /etc/fstab + regexp: '^/dev/mapper/.*swap' + line: '# {{ item }}' + when: item is search('^/dev/mapper/.*swap') + loop: "{{ lookup('file', '/etc/fstab').split('\n') }}" + +- name: Add br_netfilter to module autoload + lineinfile: + path: /etc/modules-load.d/k8s2.conf + line: "{{ item }}" + create: true + with_items: + - 'overlay' + - 'br_netfilter' + +- name: Add br_netfilter to module autoload + modprobe: + name: "{{ item }}" + state: present + become: true + with_items: + - 'overlay' + - 'br_netfilter' + +- name: Add br_netfilter to module autoload + lineinfile: + path: /etc/sysctl.d/k8s.conf + line: "{{ item }}" + create: true + with_items: + - 'net.bridge.bridge-nf-call-iptables = 1' + - 'net.bridge.bridge-nf-call-ip6tables = 1' + - 'net.ipv4.ip_forward = 1' + +- name: Disable net.bridge.bridge-nf-call-iptables + sysctl: + name: "{{ item }}" + value: 1 + with_items: + - 'net.bridge.bridge-nf-call-iptables' + - 'net.bridge.bridge-nf-call-ip6tables' + +- name: Disable net.ipv4.ip_forward + sysctl: + name: net.ipv4.ip_forward + value: "1" + +- name: Setting hosts file + template: + src: hosts.j2 + dest: /etc/hosts + +- name: Disable SELinux + ansible.posix.selinux: + state: disabled diff --git a/ansible/01_old/roles/agent_os_setting/tasks/00-ubuntu-os-main.yml b/ansible/01_old/roles/agent_os_setting/tasks/00-ubuntu-os-main.yml new file mode 100644 index 0000000..526872c --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/tasks/00-ubuntu-os-main.yml @@ -0,0 +1,75 @@ +--- +- name: Update and upgrade apt packages + apt: + upgrade: yes + update_cache: yes + force_apt_get: yes + cache_valid_time: 86400 + +- name: Install apt packages + apt: + name: ['cloud-utils', 'apt-transport-https', 'ca-certificates', 'curl', 'socat', 'conntrack', 'gnupg', 'lsb-release', 'bash-completion', 'chrony'] + state: present + +- name: Disable ufw + command: 'ufw disable' + when: ansible_distribution_version == '20.04' + +- name: Disable SWAP since kubernetes can't work with swap enabled (1/2) + command: 'swapoff -a' + +- name: Disable SWAP in fstab since kubernetes can't work with swap enabled (2/2) + replace: + path: /etc/fstab + regexp: '^([^#].*?\sswap\s+sw\s+.*)$' + replace: '# \1' + +- name: Add br_netfilter to module autoload + lineinfile: + path: /etc/modules-load.d/k8s.conf + line: "{{ item }}" + create: true + with_items: + - 'overlay' + - 'br_netfilter' + +- name: Add br_netfilter to module autoload + modprobe: + name: "{{ item }}" + state: present + become: true + with_items: + - 'overlay' + - 'br_netfilter' + +- name: Add br_netfilter to module autoload + lineinfile: + path: /etc/sysctl.d/k8s.conf + line: "{{ item }}" + create: true + with_items: + - 'net.bridge.bridge-nf-call-iptables = 1' + - 'net.bridge.bridge-nf-call-ip6tables = 1' + - 'net.ipv4.ip_forward = 1' + +- name: Disable net.bridge.bridge-nf-call-iptables + sysctl: + name: "{{ item }}" + value: 1 + with_items: + - 'net.bridge.bridge-nf-call-iptables' + - 'net.bridge.bridge-nf-call-ip6tables' + +- name: Disable net.ipv4.ip_forward + sysctl: + name: net.ipv4.ip_forward + value: "1" + +- name: Setting hosts file + template: + src: hosts.j2 + dest: /etc/hosts + +- name: Disable SELinux + ansible.posix.selinux: + state: disabled \ No newline at end of file diff --git a/ansible/01_old/roles/agent_os_setting/tasks/01-centos-os-containerd.yml b/ansible/01_old/roles/agent_os_setting/tasks/01-centos-os-containerd.yml new file mode 100644 index 0000000..3c6f2ba --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/tasks/01-centos-os-containerd.yml @@ -0,0 +1,47 @@ +--- +- name: Add containerd yum repository + command: yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo + +- name: Create containerd configuration directory + file: + path: /etc/containerd + state: directory + +- name: Configure containerd + template: + src: config.toml.j2 + dest: /etc/containerd/config.toml + notify: + - Restart containerd service + +- name: Install required packages + yum: + name: ['containerd'] + state: present + notify: + - Reload systemd configuration + - Restart containerd service + +- meta: flush_handlers + +- name: Enable containerd service + service: + name: containerd + enabled: True + state: started + +- name: Add kubernetes yum repository + ansible.builtin.yum_repository: + name: kubernetes + description: kubernetes + baseurl: https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64 + enabled: 1 + gpgcheck: 1 + gpgkey: https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg + +- name: Install kubernetes utils + ansible.builtin.yum: + name: ['kubelet-{{kubernetes_version}}','kubeadm-{{kubernetes_version}}','kubectl-{{kubernetes_version}}'] + exclude: kubernetes + notify: + - Reload systemd configuration \ No newline at end of file diff --git a/ansible/01_old/roles/agent_os_setting/tasks/01-centos-os-crio.yml b/ansible/01_old/roles/agent_os_setting/tasks/01-centos-os-crio.yml new file mode 100644 index 0000000..54a757d --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/tasks/01-centos-os-crio.yml @@ -0,0 +1,50 @@ +--- +- name: Add crio yum repository + command: sudo curl -L -o /etc/yum.repos.d/devel:kubic:libcontainers:stable.repo https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/CentOS_7/devel:kubic:libcontainers:stable.repo + +- name: Add crio yum repository + command: sudo curl -L -o /etc/yum.repos.d/devel:kubic:libcontainers:stable:cri-o:1.23.repo https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable:cri-o:1.23/CentOS_7/devel:kubic:libcontainers:stable:cri-o:1.23.repo + +- name: Create crio configuration directory + file: + path: /etc/containers/registries.conf.d + state: directory + +- name: Configure crio + template: + src: myregistry.conf.j2 + dest: /etc/containers/registries.conf.d/myregistry.conf + notify: + - Restart crio service + +- name: Install required packages + yum: + name: ['crio'] + state: present + notify: + - Reload systemd configuration + - Restart crio service + +- meta: flush_handlers + +- name: Enable crio service + service: + name: crio + enabled: True + state: started + +- name: Add kubernetes yum repository + ansible.builtin.yum_repository: + name: kubernetes + description: kubernetes + baseurl: https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64 + enabled: 1 + gpgcheck: 1 + gpgkey: https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg + +- name: Install kubernetes utils + ansible.builtin.yum: + name: ['kubelet-{{kubernetes_version}}','kubeadm-{{kubernetes_version}}','kubectl-{{kubernetes_version}}'] + exclude: kubernetes + notify: + - Reload systemd configuration \ No newline at end of file diff --git a/ansible/01_old/roles/agent_os_setting/tasks/01-centos-os-docker.yml b/ansible/01_old/roles/agent_os_setting/tasks/01-centos-os-docker.yml new file mode 100644 index 0000000..d9a5881 --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/tasks/01-centos-os-docker.yml @@ -0,0 +1,58 @@ +--- +- name: Add docker script + command: curl -fsSL https://get.docker.com -o /root/get-docker.sh + +- name: install docker + command: sh /root/get-docker.sh + +- name: Create docker configuration directory + file: + path: /etc/docker + state: directory + +#- name: Install required packages +# yum: +# name: ['docker-ce'] +# state: present +# notify: +# - Reload systemd configuration +# - Restart docker service + +- name: Configure docker + template: + src: daemon.json.j2 + dest: /etc/docker/daemon.json + notify: + - Reload systemd configuration + - Restart docker service + +#- name: Delete containerd config +# file: +# path: /etc/containerd/config.toml +# state: absent +# notify: +# - Restart containerd service + +- meta: flush_handlers + +- name: Enable docker service + service: + name: docker + enabled: True + state: started + +- name: Add kubernetes yum repository + ansible.builtin.yum_repository: + name: kubernetes + description: kubernetes + baseurl: https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64 + enabled: 1 + gpgcheck: 1 + gpgkey: https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg + +- name: Install kubernetes utils + ansible.builtin.yum: + name: ['kubelet-{{kubernetes_version}}','kubeadm-{{kubernetes_version}}','kubectl-{{kubernetes_version}}'] + exclude: kubernetes + notify: + - Reload systemd configuration \ No newline at end of file diff --git a/ansible/01_old/roles/agent_os_setting/tasks/01-ubuntu-os-containerd.yml b/ansible/01_old/roles/agent_os_setting/tasks/01-ubuntu-os-containerd.yml new file mode 100644 index 0000000..556485e --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/tasks/01-ubuntu-os-containerd.yml @@ -0,0 +1,78 @@ +--- +- name: Add docker apt key + apt_key: + url: https://download.docker.com/linux/{{ ansible_distribution | lower }}/gpg + +- name: Add docker apt repository + apt_repository: + repo: deb [arch=amd64] https://download.docker.com/linux/{{ ansible_distribution | lower }} {{ ansible_distribution_release }} stable + filename: docker + register: containerd_apt_repo_task + +- name: apt list --upgradable + command: apt list --upgradable + when: containerd_apt_repo_task.changed + +- name: apt update + apt: + update_cache: yes + when: containerd_apt_repo_task.changed + +- name: Create containerd configuration directory + file: + path: /etc/containerd + state: directory + +- name: Configure containerd + template: + src: config.toml.j2 + dest: /etc/containerd/config.toml + notify: + - Restart containerd service + +- name: Install required packages + apt: + name: + - containerd.io + notify: + - Reload systemd configuration + - Restart containerd service + +- meta: flush_handlers + +- name: Enable containerd service + service: + name: containerd + enabled: True + state: started + +- name: Install kubernetes + block: + - name: 'Add kubernetes repo key' + apt_key: + url: https://packages.cloud.google.com/apt/doc/apt-key.gpg + state: present + become: true + - name: Add kubernetes repository + apt_repository: + repo: deb http://apt.kubernetes.io kubernetes-xenial main + state: present + filename: 'kubernetes' + become: true + - name: Install kubernetes components + apt: + name: ['kubelet={{kubernetes_version}}-*', 'kubeadm={{kubernetes_version}}-*', 'kubectl={{kubernetes_version}}-*'] + state: present + update_cache: yes + force: yes + dpkg_options: force-downgrade + +- name: Hold kubernetes packages + dpkg_selections: + name: "{{item}}" + selection: hold + with_items: + - kubelet + - kubectl + - kubeadm + diff --git a/ansible/01_old/roles/agent_os_setting/tasks/01-ubuntu-os-crio.yml b/ansible/01_old/roles/agent_os_setting/tasks/01-ubuntu-os-crio.yml new file mode 100644 index 0000000..9db8e7d --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/tasks/01-ubuntu-os-crio.yml @@ -0,0 +1,84 @@ +--- +- name: Import GPG key_1 + apt_key: + url: https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable:cri-o:{{ crio.version }}/{{ crio.os }}/Release.key + state: present + become: true + +- name: Import GPG key_2 + apt_key: + url: https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/{{ crio.os }}/Release.key + state: present + become: true + +- name: Add crio repository_1 + apt_repository: + repo: deb https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/{{crio.os }}/ / + state: present + filename: devel:kubic:libcontainers:stable.list + +- name: Add crio repository_2 + apt_repository: + repo: deb http://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable:/cri-o:/{{ crio.version }}/{{ crio.os }}/ / + state: present + filename: devel:kubic:libcontainers:stable:cri-o:{{ crio.version }}.list + +- name: Create crio configuration directory + file: + path: /etc/containers/registries.conf.d + state: directory + +- name: Configure crio + template: + src: myregistry.conf.j2 + dest: /etc/containers/registries.conf.d/myregistry.conf + notify: + - Restart crio service + +- name: Install required packages + apt: + name: ['cri-o', 'cri-o-runc'] + state: present + update_cache: yes + notify: + - Reload systemd configuration + - Restart crio service + +- meta: flush_handlers + +- name: Enable crio service + service: + name: crio + enabled: True + state: started + +- name: Install kubernetes + block: + - name: 'Add kubernetes repo key' + apt_key: + url: https://packages.cloud.google.com/apt/doc/apt-key.gpg + state: present + become: true + - name: Add kubernetes repository + apt_repository: + repo: deb http://apt.kubernetes.io kubernetes-xenial main + state: present + filename: 'kubernetes' + become: true + - name: Install kubernetes components + apt: + name: ['kubelet={{kubernetes_version}}-*', 'kubeadm={{kubernetes_version}}-*', 'kubectl={{kubernetes_version}}-*'] + state: present + update_cache: yes + force: yes + dpkg_options: force-downgrade + +- name: Hold kubernetes packages + dpkg_selections: + name: "{{item}}" + selection: hold + with_items: + - kubelet + - kubectl + - kubeadm + diff --git a/ansible/01_old/roles/agent_os_setting/tasks/01-ubuntu-os-docker.yml b/ansible/01_old/roles/agent_os_setting/tasks/01-ubuntu-os-docker.yml new file mode 100644 index 0000000..556485e --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/tasks/01-ubuntu-os-docker.yml @@ -0,0 +1,78 @@ +--- +- name: Add docker apt key + apt_key: + url: https://download.docker.com/linux/{{ ansible_distribution | lower }}/gpg + +- name: Add docker apt repository + apt_repository: + repo: deb [arch=amd64] https://download.docker.com/linux/{{ ansible_distribution | lower }} {{ ansible_distribution_release }} stable + filename: docker + register: containerd_apt_repo_task + +- name: apt list --upgradable + command: apt list --upgradable + when: containerd_apt_repo_task.changed + +- name: apt update + apt: + update_cache: yes + when: containerd_apt_repo_task.changed + +- name: Create containerd configuration directory + file: + path: /etc/containerd + state: directory + +- name: Configure containerd + template: + src: config.toml.j2 + dest: /etc/containerd/config.toml + notify: + - Restart containerd service + +- name: Install required packages + apt: + name: + - containerd.io + notify: + - Reload systemd configuration + - Restart containerd service + +- meta: flush_handlers + +- name: Enable containerd service + service: + name: containerd + enabled: True + state: started + +- name: Install kubernetes + block: + - name: 'Add kubernetes repo key' + apt_key: + url: https://packages.cloud.google.com/apt/doc/apt-key.gpg + state: present + become: true + - name: Add kubernetes repository + apt_repository: + repo: deb http://apt.kubernetes.io kubernetes-xenial main + state: present + filename: 'kubernetes' + become: true + - name: Install kubernetes components + apt: + name: ['kubelet={{kubernetes_version}}-*', 'kubeadm={{kubernetes_version}}-*', 'kubectl={{kubernetes_version}}-*'] + state: present + update_cache: yes + force: yes + dpkg_options: force-downgrade + +- name: Hold kubernetes packages + dpkg_selections: + name: "{{item}}" + selection: hold + with_items: + - kubelet + - kubectl + - kubeadm + diff --git a/ansible/01_old/roles/agent_os_setting/tasks/02-k8s-main.yml b/ansible/01_old/roles/agent_os_setting/tasks/02-k8s-main.yml new file mode 100644 index 0000000..bb7fa34 --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/tasks/02-k8s-main.yml @@ -0,0 +1,42 @@ +--- +- name: Enable kubelet service + systemd: + name: kubelet + enabled: true + masked: false + +- name: Check if Kubernetes has already been initialized. + stat: + path: /etc/kubernetes/admin.conf + register: kubernetes_init_stat + +# Set up master. +- include_tasks: 03-k8s-master.yml + when: kubernetes_role == 'master' + +# Set up nodes. +- name: Get the kubeadm join command from the Kubernetes master. + command: kubeadm token create --print-join-command + changed_when: false + when: kubernetes_role == 'master' + register: kubernetes_join_command_result + +- name: Get kubeconfig + fetch: + src: /etc/kubernetes/admin.conf + dest: ~/.kube/ansible_config + flat: yes + when: kubernetes_role == 'master' + +- name: Set the kubeadm join command globally. + set_fact: + kubernetes_join_command: > + {{ kubernetes_join_command_result.stdout }} + {{ kubernetes_join_command_extra_opts }} + when: kubernetes_join_command_result.stdout is defined + delegate_to: "{{ item }}" + delegate_facts: true + with_items: "{{ groups['all'] }}" + +- include_tasks: 05-k8s-node.yml + when: kubernetes_role == 'node' diff --git a/ansible/01_old/roles/agent_os_setting/tasks/03-k8s-master.yml b/ansible/01_old/roles/agent_os_setting/tasks/03-k8s-master.yml new file mode 100644 index 0000000..954cdbb --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/tasks/03-k8s-master.yml @@ -0,0 +1,45 @@ +--- +- name: Initialize Kubernetes master with kubeadm init. + command: > + kubeadm init + --pod-network-cidr={{ kubernetes_pod_network.cidr }} + --apiserver-advertise-address={{ kubernetes_apiserver_advertise_address | default(ansible_default_ipv4.address, true) }} + {{ kubernetes_kubeadm_init_extra_opts }} + register: kubeadmin_init + when: not kubernetes_init_stat.stat.exists + +- name: Print the init output to screen. + debug: + var: kubeadmin_init.stdout + verbosity: 2 + when: not kubernetes_init_stat.stat.exists + +- name: Ensure .kube directory exists. + file: + path: ~/.kube + state: directory + +- name: Symlink the kubectl admin.conf to ~/.kube/conf. + file: + src: /etc/kubernetes/admin.conf + dest: ~/.kube/config + state: link + force: yes + +- name: copy the kubectl config to ~/.kube/ansible_config + copy: + src: /etc/kubernetes/admin.conf + dest: ~/.kube/ansible_config + remote_src: true + +- name: Configure Calico networking and Metric Server + include_tasks: 04-k8s-master-yaml.yml + +- name: Kubectl Cheat Sheet + lineinfile: + path: ~/.bashrc + line: "{{ item }}" + with_items: + - source <(kubectl completion bash) + - alias k=kubectl + - complete -o default -F __start_kubectl k diff --git a/ansible/01_old/roles/agent_os_setting/tasks/04-k8s-master-yaml.yml b/ansible/01_old/roles/agent_os_setting/tasks/04-k8s-master-yaml.yml new file mode 100644 index 0000000..c52166f --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/tasks/04-k8s-master-yaml.yml @@ -0,0 +1,21 @@ +--- +- name: Copy calico yaml + template: + src: calico.yaml.j2 + dest: /tmp/calico.yaml + +- name: Copy metric server yaml + template: + src: components.yaml.j2 + dest: /tmp/components.yaml + +- name: Configure Calico networking. + command: kubectl apply -f /tmp/calico.yaml + register: calico_result + changed_when: "'created' in calico_result.stdout" + when: kubernetes_pod_network.cni == 'calico' + +- name: Configure Metric Server + command: kubectl apply -f /tmp/components.yaml + register: metric_server_result + changed_when: "'created' in metric_server_result.stdout" diff --git a/ansible/01_old/roles/agent_os_setting/tasks/04-k8s-master-yaml.yml_bak b/ansible/01_old/roles/agent_os_setting/tasks/04-k8s-master-yaml.yml_bak new file mode 100644 index 0000000..996a122 --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/tasks/04-k8s-master-yaml.yml_bak @@ -0,0 +1,15 @@ +--- +- name: Configure Calico networking. + command: "{{ item }}" + with_items: + - kubectl apply -f {{ kubernetes_calico_manifest_file }} + register: calico_result + changed_when: "'created' in calico_result.stdout" + when: kubernetes_pod_network.cni == 'calico' + +- name: Configure Metric Server + command: "{{ item }}" + with_items: + - kubectl apply -f {{ kubernetes_metric_server_file }} + register: metric_server_result + changed_when: "'created' in metric_server_result.stdout" diff --git a/ansible/01_old/roles/agent_os_setting/tasks/05-k8s-node.yml b/ansible/01_old/roles/agent_os_setting/tasks/05-k8s-node.yml new file mode 100644 index 0000000..304cbf1 --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/tasks/05-k8s-node.yml @@ -0,0 +1,6 @@ +--- +- name: Join node to Kubernetes master + shell: > + {{ kubernetes_join_command }} + creates=/etc/kubernetes/kubelet.conf + tags: ['skip_ansible_lint'] diff --git a/ansible/01_old/roles/agent_os_setting/tasks/06-worker-directory.yml b/ansible/01_old/roles/agent_os_setting/tasks/06-worker-directory.yml new file mode 100644 index 0000000..3a6caf0 --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/tasks/06-worker-directory.yml @@ -0,0 +1,24 @@ +--- +- name: make worker1 directory + ansible.builtin.file: + path: "{{ item }}" + state: directory + mode: u+rwx,g+rwx,o+rwx + recurse: yes + owner: root + group: root + with_items: + - /media/data + when: inventory_hostname in groups["worker1"] + +- name: make worker2 directory + ansible.builtin.file: + path: "{{ item }}" + state: directory + mode: u+rwx,g+rwx,o+rwx + recurse: yes + owner: root + group: root + with_items: + - /media/data + when: inventory_hostname in groups["worker2"] diff --git a/ansible/01_old/roles/agent_os_setting/tasks/main.yml b/ansible/01_old/roles/agent_os_setting/tasks/main.yml new file mode 100644 index 0000000..d027ae4 --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/tasks/main.yml @@ -0,0 +1,35 @@ +--- +- include: 00-centos-os-main.yml + tags: centos + when: ansible_distribution == 'CentOS' + +- include: 00-ubuntu-os-main.yml + tags: ubuntu + when: ansible_distribution == 'Ubuntu' + +- include: 01-centos-os-docker.yml + tags: cent-docker + when: ansible_distribution == 'CentOS' and runtime == 'docker' + +- include: 01-centos-os-containerd.yml + tags: cent-containerd + when: ansible_distribution == 'CentOS' and runtime == 'containerd' + +- include: 01-centos-os-crio.yml + tags: cent-crio + when: ansible_distribution == 'CentOS' and runtime == 'crio' + +- include: 01-ubuntu-os-docker.yml + tags: ubuntu-docker + when: ansible_distribution == 'Ubuntu' and runtime == 'docker' + +- include: 01-ubuntu-os-containerd.yml + tags: ubuntu-containerd + when: ansible_distribution == 'Ubuntu' and runtime == 'containerd' + +- include: 01-ubuntu-os-crio.yml + tags: ubuntu-crio + when: ansible_distribution == 'Ubuntu' and runtime == 'crio' + +- include: 02-k8s-main.yml + tags: k8s-main diff --git a/ansible/01_old/roles/agent_os_setting/templates/calico.yaml.j2 b/ansible/01_old/roles/agent_os_setting/templates/calico.yaml.j2 new file mode 100644 index 0000000..59cf309 --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/templates/calico.yaml.j2 @@ -0,0 +1,4779 @@ +--- +# Source: calico/templates/calico-kube-controllers.yaml +# This manifest creates a Pod Disruption Budget for Controller to allow K8s Cluster Autoscaler to evict + +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: calico-kube-controllers + namespace: kube-system + labels: + k8s-app: calico-kube-controllers +spec: + maxUnavailable: 1 + selector: + matchLabels: + k8s-app: calico-kube-controllers +--- +# Source: calico/templates/calico-kube-controllers.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: calico-kube-controllers + namespace: kube-system +--- +# Source: calico/templates/calico-node.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: calico-node + namespace: kube-system +--- +# Source: calico/templates/calico-config.yaml +# This ConfigMap is used to configure a self-hosted Calico installation. +kind: ConfigMap +apiVersion: v1 +metadata: + name: calico-config + namespace: kube-system +data: + # Typha is disabled. + typha_service_name: "none" + # Configure the backend to use. + calico_backend: "bird" + + # Configure the MTU to use for workload interfaces and tunnels. + # By default, MTU is auto-detected, and explicitly setting this field should not be required. + # You can override auto-detection by providing a non-zero value. + veth_mtu: "0" + + # The CNI network configuration to install on each node. The special + # values in this config will be automatically populated. + cni_network_config: |- + { + "name": "k8s-pod-network", + "cniVersion": "0.3.1", + "plugins": [ + { + "type": "calico", + "log_level": "info", + "log_file_path": "/var/log/calico/cni/cni.log", + "datastore_type": "kubernetes", + "nodename": "__KUBERNETES_NODE_NAME__", + "mtu": __CNI_MTU__, + "ipam": { + "type": "calico-ipam" + }, + "policy": { + "type": "k8s" + }, + "kubernetes": { + "kubeconfig": "__KUBECONFIG_FILEPATH__" + } + }, + { + "type": "portmap", + "snat": true, + "capabilities": {"portMappings": true} + }, + { + "type": "bandwidth", + "capabilities": {"bandwidth": true} + } + ] + } +--- +# Source: calico/templates/kdd-crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: bgpconfigurations.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: BGPConfiguration + listKind: BGPConfigurationList + plural: bgpconfigurations + singular: bgpconfiguration + preserveUnknownFields: false + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: BGPConfiguration contains the configuration for any BGP routing. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: BGPConfigurationSpec contains the values of the BGP configuration. + properties: + asNumber: + description: 'ASNumber is the default AS number used by a node. [Default: + 64512]' + format: int32 + type: integer + bindMode: + description: BindMode indicates whether to listen for BGP connections + on all addresses (None) or only on the node's canonical IP address + Node.Spec.BGP.IPvXAddress (NodeIP). Default behaviour is to listen + for BGP connections on all addresses. + type: string + communities: + description: Communities is a list of BGP community values and their + arbitrary names for tagging routes. + items: + description: Community contains standard or large community value + and its name. + properties: + name: + description: Name given to community value. + type: string + value: + description: Value must be of format `aa:nn` or `aa:nn:mm`. + For standard community use `aa:nn` format, where `aa` and + `nn` are 16 bit number. For large community use `aa:nn:mm` + format, where `aa`, `nn` and `mm` are 32 bit number. Where, + `aa` is an AS Number, `nn` and `mm` are per-AS identifier. + pattern: ^(\d+):(\d+)$|^(\d+):(\d+):(\d+)$ + type: string + type: object + type: array + ignoredInterfaces: + description: IgnoredInterfaces indicates the network interfaces that + needs to be excluded when reading device routes. + items: + type: string + type: array + listenPort: + description: ListenPort is the port where BGP protocol should listen. + Defaults to 179 + maximum: 65535 + minimum: 1 + type: integer + logSeverityScreen: + description: 'LogSeverityScreen is the log severity above which logs + are sent to the stdout. [Default: INFO]' + type: string + nodeMeshMaxRestartTime: + description: Time to allow for software restart for node-to-mesh peerings. When + specified, this is configured as the graceful restart timeout. When + not specified, the BIRD default of 120s is used. This field can + only be set on the default BGPConfiguration instance and requires + that NodeMesh is enabled + type: string + nodeMeshPassword: + description: Optional BGP password for full node-to-mesh peerings. + This field can only be set on the default BGPConfiguration instance + and requires that NodeMesh is enabled + properties: + secretKeyRef: + description: Selects a key of a secret in the node pod's namespace. + properties: + key: + description: The key of the secret to select from. Must be + a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be + defined + type: boolean + required: + - key + type: object + type: object + nodeToNodeMeshEnabled: + description: 'NodeToNodeMeshEnabled sets whether full node to node + BGP mesh is enabled. [Default: true]' + type: boolean + prefixAdvertisements: + description: PrefixAdvertisements contains per-prefix advertisement + configuration. + items: + description: PrefixAdvertisement configures advertisement properties + for the specified CIDR. + properties: + cidr: + description: CIDR for which properties should be advertised. + type: string + communities: + description: Communities can be list of either community names + already defined in `Specs.Communities` or community value + of format `aa:nn` or `aa:nn:mm`. For standard community use + `aa:nn` format, where `aa` and `nn` are 16 bit number. For + large community use `aa:nn:mm` format, where `aa`, `nn` and + `mm` are 32 bit number. Where,`aa` is an AS Number, `nn` and + `mm` are per-AS identifier. + items: + type: string + type: array + type: object + type: array + serviceClusterIPs: + description: ServiceClusterIPs are the CIDR blocks from which service + cluster IPs are allocated. If specified, Calico will advertise these + blocks, as well as any cluster IPs within them. + items: + description: ServiceClusterIPBlock represents a single allowed ClusterIP + CIDR block. + properties: + cidr: + type: string + type: object + type: array + serviceExternalIPs: + description: ServiceExternalIPs are the CIDR blocks for Kubernetes + Service External IPs. Kubernetes Service ExternalIPs will only be + advertised if they are within one of these blocks. + items: + description: ServiceExternalIPBlock represents a single allowed + External IP CIDR block. + properties: + cidr: + type: string + type: object + type: array + serviceLoadBalancerIPs: + description: ServiceLoadBalancerIPs are the CIDR blocks for Kubernetes + Service LoadBalancer IPs. Kubernetes Service status.LoadBalancer.Ingress + IPs will only be advertised if they are within one of these blocks. + items: + description: ServiceLoadBalancerIPBlock represents a single allowed + LoadBalancer IP CIDR block. + properties: + cidr: + type: string + type: object + type: array + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +# Source: calico/templates/kdd-crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: bgppeers.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: BGPPeer + listKind: BGPPeerList + plural: bgppeers + singular: bgppeer + preserveUnknownFields: false + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: BGPPeerSpec contains the specification for a BGPPeer resource. + properties: + asNumber: + description: The AS Number of the peer. + format: int32 + type: integer + keepOriginalNextHop: + description: Option to keep the original nexthop field when routes + are sent to a BGP Peer. Setting "true" configures the selected BGP + Peers node to use the "next hop keep;" instead of "next hop self;"(default) + in the specific branch of the Node on "bird.cfg". + type: boolean + maxRestartTime: + description: Time to allow for software restart. When specified, + this is configured as the graceful restart timeout. When not specified, + the BIRD default of 120s is used. + type: string + node: + description: The node name identifying the Calico node instance that + is targeted by this peer. If this is not set, and no nodeSelector + is specified, then this BGP peer selects all nodes in the cluster. + type: string + nodeSelector: + description: Selector for the nodes that should have this peering. When + this is set, the Node field must be empty. + type: string + numAllowedLocalASNumbers: + description: Maximum number of local AS numbers that are allowed in + the AS path for received routes. This removes BGP loop prevention + and should only be used if absolutely necesssary. + format: int32 + type: integer + password: + description: Optional BGP password for the peerings generated by this + BGPPeer resource. + properties: + secretKeyRef: + description: Selects a key of a secret in the node pod's namespace. + properties: + key: + description: The key of the secret to select from. Must be + a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be + defined + type: boolean + required: + - key + type: object + type: object + peerIP: + description: The IP address of the peer followed by an optional port + number to peer with. If port number is given, format should be `[]:port` + or `:` for IPv4. If optional port number is not set, + and this peer IP and ASNumber belongs to a calico/node with ListenPort + set in BGPConfiguration, then we use that port to peer. + type: string + peerSelector: + description: Selector for the remote nodes to peer with. When this + is set, the PeerIP and ASNumber fields must be empty. For each + peering between the local node and selected remote nodes, we configure + an IPv4 peering if both ends have NodeBGPSpec.IPv4Address specified, + and an IPv6 peering if both ends have NodeBGPSpec.IPv6Address specified. The + remote AS number comes from the remote node's NodeBGPSpec.ASNumber, + or the global default if that is not set. + type: string + reachableBy: + description: Add an exact, i.e. /32, static route toward peer IP in + order to prevent route flapping. ReachableBy contains the address + of the gateway which peer can be reached by. + type: string + sourceAddress: + description: Specifies whether and how to configure a source address + for the peerings generated by this BGPPeer resource. Default value + "UseNodeIP" means to configure the node IP as the source address. "None" + means not to configure a source address. + type: string + ttlSecurity: + description: TTLSecurity enables the generalized TTL security mechanism + (GTSM) which protects against spoofed packets by ignoring received + packets with a smaller than expected TTL value. The provided value + is the number of hops (edges) between the peers. + type: integer + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +# Source: calico/templates/kdd-crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: blockaffinities.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: BlockAffinity + listKind: BlockAffinityList + plural: blockaffinities + singular: blockaffinity + preserveUnknownFields: false + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: BlockAffinitySpec contains the specification for a BlockAffinity + resource. + properties: + cidr: + type: string + deleted: + description: Deleted indicates that this block affinity is being deleted. + This field is a string for compatibility with older releases that + mistakenly treat this field as a string. + type: string + node: + type: string + state: + type: string + required: + - cidr + - deleted + - node + - state + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +# Source: calico/templates/kdd-crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: (devel) + creationTimestamp: null + name: caliconodestatuses.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: CalicoNodeStatus + listKind: CalicoNodeStatusList + plural: caliconodestatuses + singular: caliconodestatus + preserveUnknownFields: false + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: CalicoNodeStatusSpec contains the specification for a CalicoNodeStatus + resource. + properties: + classes: + description: Classes declares the types of information to monitor + for this calico/node, and allows for selective status reporting + about certain subsets of information. + items: + type: string + type: array + node: + description: The node name identifies the Calico node instance for + node status. + type: string + updatePeriodSeconds: + description: UpdatePeriodSeconds is the period at which CalicoNodeStatus + should be updated. Set to 0 to disable CalicoNodeStatus refresh. + Maximum update period is one day. + format: int32 + type: integer + type: object + status: + description: CalicoNodeStatusStatus defines the observed state of CalicoNodeStatus. + No validation needed for status since it is updated by Calico. + properties: + agent: + description: Agent holds agent status on the node. + properties: + birdV4: + description: BIRDV4 represents the latest observed status of bird4. + properties: + lastBootTime: + description: LastBootTime holds the value of lastBootTime + from bird.ctl output. + type: string + lastReconfigurationTime: + description: LastReconfigurationTime holds the value of lastReconfigTime + from bird.ctl output. + type: string + routerID: + description: Router ID used by bird. + type: string + state: + description: The state of the BGP Daemon. + type: string + version: + description: Version of the BGP daemon + type: string + type: object + birdV6: + description: BIRDV6 represents the latest observed status of bird6. + properties: + lastBootTime: + description: LastBootTime holds the value of lastBootTime + from bird.ctl output. + type: string + lastReconfigurationTime: + description: LastReconfigurationTime holds the value of lastReconfigTime + from bird.ctl output. + type: string + routerID: + description: Router ID used by bird. + type: string + state: + description: The state of the BGP Daemon. + type: string + version: + description: Version of the BGP daemon + type: string + type: object + type: object + bgp: + description: BGP holds node BGP status. + properties: + numberEstablishedV4: + description: The total number of IPv4 established bgp sessions. + type: integer + numberEstablishedV6: + description: The total number of IPv6 established bgp sessions. + type: integer + numberNotEstablishedV4: + description: The total number of IPv4 non-established bgp sessions. + type: integer + numberNotEstablishedV6: + description: The total number of IPv6 non-established bgp sessions. + type: integer + peersV4: + description: PeersV4 represents IPv4 BGP peers status on the node. + items: + description: CalicoNodePeer contains the status of BGP peers + on the node. + properties: + peerIP: + description: IP address of the peer whose condition we are + reporting. + type: string + since: + description: Since the state or reason last changed. + type: string + state: + description: State is the BGP session state. + type: string + type: + description: Type indicates whether this peer is configured + via the node-to-node mesh, or via en explicit global or + per-node BGPPeer object. + type: string + type: object + type: array + peersV6: + description: PeersV6 represents IPv6 BGP peers status on the node. + items: + description: CalicoNodePeer contains the status of BGP peers + on the node. + properties: + peerIP: + description: IP address of the peer whose condition we are + reporting. + type: string + since: + description: Since the state or reason last changed. + type: string + state: + description: State is the BGP session state. + type: string + type: + description: Type indicates whether this peer is configured + via the node-to-node mesh, or via en explicit global or + per-node BGPPeer object. + type: string + type: object + type: array + required: + - numberEstablishedV4 + - numberEstablishedV6 + - numberNotEstablishedV4 + - numberNotEstablishedV6 + type: object + lastUpdated: + description: LastUpdated is a timestamp representing the server time + when CalicoNodeStatus object last updated. It is represented in + RFC3339 form and is in UTC. + format: date-time + nullable: true + type: string + routes: + description: Routes reports routes known to the Calico BGP daemon + on the node. + properties: + routesV4: + description: RoutesV4 represents IPv4 routes on the node. + items: + description: CalicoNodeRoute contains the status of BGP routes + on the node. + properties: + destination: + description: Destination of the route. + type: string + gateway: + description: Gateway for the destination. + type: string + interface: + description: Interface for the destination + type: string + learnedFrom: + description: LearnedFrom contains information regarding + where this route originated. + properties: + peerIP: + description: If sourceType is NodeMesh or BGPPeer, IP + address of the router that sent us this route. + type: string + sourceType: + description: Type of the source where a route is learned + from. + type: string + type: object + type: + description: Type indicates if the route is being used for + forwarding or not. + type: string + type: object + type: array + routesV6: + description: RoutesV6 represents IPv6 routes on the node. + items: + description: CalicoNodeRoute contains the status of BGP routes + on the node. + properties: + destination: + description: Destination of the route. + type: string + gateway: + description: Gateway for the destination. + type: string + interface: + description: Interface for the destination + type: string + learnedFrom: + description: LearnedFrom contains information regarding + where this route originated. + properties: + peerIP: + description: If sourceType is NodeMesh or BGPPeer, IP + address of the router that sent us this route. + type: string + sourceType: + description: Type of the source where a route is learned + from. + type: string + type: object + type: + description: Type indicates if the route is being used for + forwarding or not. + type: string + type: object + type: array + type: object + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +# Source: calico/templates/kdd-crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: clusterinformations.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: ClusterInformation + listKind: ClusterInformationList + plural: clusterinformations + singular: clusterinformation + preserveUnknownFields: false + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: ClusterInformation contains the cluster specific information. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ClusterInformationSpec contains the values of describing + the cluster. + properties: + calicoVersion: + description: CalicoVersion is the version of Calico that the cluster + is running + type: string + clusterGUID: + description: ClusterGUID is the GUID of the cluster + type: string + clusterType: + description: ClusterType describes the type of the cluster + type: string + datastoreReady: + description: DatastoreReady is used during significant datastore migrations + to signal to components such as Felix that it should wait before + accessing the datastore. + type: boolean + variant: + description: Variant declares which variant of Calico should be active. + type: string + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +# Source: calico/templates/kdd-crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: felixconfigurations.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: FelixConfiguration + listKind: FelixConfigurationList + plural: felixconfigurations + singular: felixconfiguration + preserveUnknownFields: false + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: Felix Configuration contains the configuration for Felix. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: FelixConfigurationSpec contains the values of the Felix configuration. + properties: + allowIPIPPacketsFromWorkloads: + description: 'AllowIPIPPacketsFromWorkloads controls whether Felix + will add a rule to drop IPIP encapsulated traffic from workloads + [Default: false]' + type: boolean + allowVXLANPacketsFromWorkloads: + description: 'AllowVXLANPacketsFromWorkloads controls whether Felix + will add a rule to drop VXLAN encapsulated traffic from workloads + [Default: false]' + type: boolean + awsSrcDstCheck: + description: 'Set source-destination-check on AWS EC2 instances. Accepted + value must be one of "DoNothing", "Enable" or "Disable". [Default: + DoNothing]' + enum: + - DoNothing + - Enable + - Disable + type: string + bpfConnectTimeLoadBalancingEnabled: + description: 'BPFConnectTimeLoadBalancingEnabled when in BPF mode, + controls whether Felix installs the connection-time load balancer. The + connect-time load balancer is required for the host to be able to + reach Kubernetes services and it improves the performance of pod-to-service + connections. The only reason to disable it is for debugging purposes. [Default: + true]' + type: boolean + bpfDataIfacePattern: + description: BPFDataIfacePattern is a regular expression that controls + which interfaces Felix should attach BPF programs to in order to + catch traffic to/from the network. This needs to match the interfaces + that Calico workload traffic flows over as well as any interfaces + that handle incoming traffic to nodeports and services from outside + the cluster. It should not match the workload interfaces (usually + named cali...). + type: string + bpfDisableUnprivileged: + description: 'BPFDisableUnprivileged, if enabled, Felix sets the kernel.unprivileged_bpf_disabled + sysctl to disable unprivileged use of BPF. This ensures that unprivileged + users cannot access Calico''s BPF maps and cannot insert their own + BPF programs to interfere with Calico''s. [Default: true]' + type: boolean + bpfEnabled: + description: 'BPFEnabled, if enabled Felix will use the BPF dataplane. + [Default: false]' + type: boolean + bpfEnforceRPF: + description: 'BPFEnforceRPF enforce strict RPF on all host interfaces + with BPF programs regardless of what is the per-interfaces or global + setting. Possible values are Disabled, Strict or Loose. [Default: + Strict]' + type: string + bpfExtToServiceConnmark: + description: 'BPFExtToServiceConnmark in BPF mode, control a 32bit + mark that is set on connections from an external client to a local + service. This mark allows us to control how packets of that connection + are routed within the host and how is routing interpreted by RPF + check. [Default: 0]' + type: integer + bpfExternalServiceMode: + description: 'BPFExternalServiceMode in BPF mode, controls how connections + from outside the cluster to services (node ports and cluster IPs) + are forwarded to remote workloads. If set to "Tunnel" then both + request and response traffic is tunneled to the remote node. If + set to "DSR", the request traffic is tunneled but the response traffic + is sent directly from the remote node. In "DSR" mode, the remote + node appears to use the IP of the ingress node; this requires a + permissive L2 network. [Default: Tunnel]' + type: string + bpfHostConntrackBypass: + description: 'BPFHostConntrackBypass Controls whether to bypass Linux + conntrack in BPF mode for workloads and services. [Default: true + - bypass Linux conntrack]' + type: boolean + bpfKubeProxyEndpointSlicesEnabled: + description: BPFKubeProxyEndpointSlicesEnabled in BPF mode, controls + whether Felix's embedded kube-proxy accepts EndpointSlices or not. + type: boolean + bpfKubeProxyIptablesCleanupEnabled: + description: 'BPFKubeProxyIptablesCleanupEnabled, if enabled in BPF + mode, Felix will proactively clean up the upstream Kubernetes kube-proxy''s + iptables chains. Should only be enabled if kube-proxy is not running. [Default: + true]' + type: boolean + bpfKubeProxyMinSyncPeriod: + description: 'BPFKubeProxyMinSyncPeriod, in BPF mode, controls the + minimum time between updates to the dataplane for Felix''s embedded + kube-proxy. Lower values give reduced set-up latency. Higher values + reduce Felix CPU usage by batching up more work. [Default: 1s]' + type: string + bpfL3IfacePattern: + description: BPFL3IfacePattern is a regular expression that allows + to list tunnel devices like wireguard or vxlan (i.e., L3 devices) + in addition to BPFDataIfacePattern. That is, tunnel interfaces not + created by Calico, that Calico workload traffic flows over as well + as any interfaces that handle incoming traffic to nodeports and + services from outside the cluster. + type: string + bpfLogLevel: + description: 'BPFLogLevel controls the log level of the BPF programs + when in BPF dataplane mode. One of "Off", "Info", or "Debug". The + logs are emitted to the BPF trace pipe, accessible with the command + `tc exec bpf debug`. [Default: Off].' + type: string + bpfMapSizeConntrack: + description: 'BPFMapSizeConntrack sets the size for the conntrack + map. This map must be large enough to hold an entry for each active + connection. Warning: changing the size of the conntrack map can + cause disruption.' + type: integer + bpfMapSizeIPSets: + description: BPFMapSizeIPSets sets the size for ipsets map. The IP + sets map must be large enough to hold an entry for each endpoint + matched by every selector in the source/destination matches in network + policy. Selectors such as "all()" can result in large numbers of + entries (one entry per endpoint in that case). + type: integer + bpfMapSizeIfState: + description: BPFMapSizeIfState sets the size for ifstate map. The + ifstate map must be large enough to hold an entry for each device + (host + workloads) on a host. + type: integer + bpfMapSizeNATAffinity: + type: integer + bpfMapSizeNATBackend: + description: BPFMapSizeNATBackend sets the size for nat back end map. + This is the total number of endpoints. This is mostly more than + the size of the number of services. + type: integer + bpfMapSizeNATFrontend: + description: BPFMapSizeNATFrontend sets the size for nat front end + map. FrontendMap should be large enough to hold an entry for each + nodeport, external IP and each port in each service. + type: integer + bpfMapSizeRoute: + description: BPFMapSizeRoute sets the size for the routes map. The + routes map should be large enough to hold one entry per workload + and a handful of entries per host (enough to cover its own IPs and + tunnel IPs). + type: integer + bpfPSNATPorts: + anyOf: + - type: integer + - type: string + description: 'BPFPSNATPorts sets the range from which we randomly + pick a port if there is a source port collision. This should be + within the ephemeral range as defined by RFC 6056 (1024–65535) and + preferably outside the ephemeral ranges used by common operating + systems. Linux uses 32768–60999, while others mostly use the IANA + defined range 49152–65535. It is not necessarily a problem if this + range overlaps with the operating systems. Both ends of the range + are inclusive. [Default: 20000:29999]' + pattern: ^.* + x-kubernetes-int-or-string: true + bpfPolicyDebugEnabled: + description: BPFPolicyDebugEnabled when true, Felix records detailed + information about the BPF policy programs, which can be examined + with the calico-bpf command-line tool. + type: boolean + chainInsertMode: + description: 'ChainInsertMode controls whether Felix hooks the kernel''s + top-level iptables chains by inserting a rule at the top of the + chain or by appending a rule at the bottom. insert is the safe default + since it prevents Calico''s rules from being bypassed. If you switch + to append mode, be sure that the other rules in the chains signal + acceptance by falling through to the Calico rules, otherwise the + Calico policy will be bypassed. [Default: insert]' + type: string + dataplaneDriver: + description: DataplaneDriver filename of the external dataplane driver + to use. Only used if UseInternalDataplaneDriver is set to false. + type: string + dataplaneWatchdogTimeout: + description: "DataplaneWatchdogTimeout is the readiness/liveness timeout + used for Felix's (internal) dataplane driver. Increase this value + if you experience spurious non-ready or non-live events when Felix + is under heavy load. Decrease the value to get felix to report non-live + or non-ready more quickly. [Default: 90s] \n Deprecated: replaced + by the generic HealthTimeoutOverrides." + type: string + debugDisableLogDropping: + type: boolean + debugMemoryProfilePath: + type: string + debugSimulateCalcGraphHangAfter: + type: string + debugSimulateDataplaneHangAfter: + type: string + defaultEndpointToHostAction: + description: 'DefaultEndpointToHostAction controls what happens to + traffic that goes from a workload endpoint to the host itself (after + the traffic hits the endpoint egress policy). By default Calico + blocks traffic from workload endpoints to the host itself with an + iptables "DROP" action. If you want to allow some or all traffic + from endpoint to host, set this parameter to RETURN or ACCEPT. Use + RETURN if you have your own rules in the iptables "INPUT" chain; + Calico will insert its rules at the top of that chain, then "RETURN" + packets to the "INPUT" chain once it has completed processing workload + endpoint egress policy. Use ACCEPT to unconditionally accept packets + from workloads after processing workload endpoint egress policy. + [Default: Drop]' + type: string + deviceRouteProtocol: + description: This defines the route protocol added to programmed device + routes, by default this will be RTPROT_BOOT when left blank. + type: integer + deviceRouteSourceAddress: + description: This is the IPv4 source address to use on programmed + device routes. By default the source address is left blank, leaving + the kernel to choose the source address used. + type: string + deviceRouteSourceAddressIPv6: + description: This is the IPv6 source address to use on programmed + device routes. By default the source address is left blank, leaving + the kernel to choose the source address used. + type: string + disableConntrackInvalidCheck: + type: boolean + endpointReportingDelay: + type: string + endpointReportingEnabled: + type: boolean + externalNodesList: + description: ExternalNodesCIDRList is a list of CIDR's of external-non-calico-nodes + which may source tunnel traffic and have the tunneled traffic be + accepted at calico nodes. + items: + type: string + type: array + failsafeInboundHostPorts: + description: 'FailsafeInboundHostPorts is a list of UDP/TCP ports + and CIDRs that Felix will allow incoming traffic to host endpoints + on irrespective of the security policy. This is useful to avoid + accidentally cutting off a host with incorrect configuration. For + back-compatibility, if the protocol is not specified, it defaults + to "tcp". If a CIDR is not specified, it will allow traffic from + all addresses. To disable all inbound host ports, use the value + none. The default value allows ssh access and DHCP. [Default: tcp:22, + udp:68, tcp:179, tcp:2379, tcp:2380, tcp:6443, tcp:6666, tcp:6667]' + items: + description: ProtoPort is combination of protocol, port, and CIDR. + Protocol and port must be specified. + properties: + net: + type: string + port: + type: integer + protocol: + type: string + required: + - port + - protocol + type: object + type: array + failsafeOutboundHostPorts: + description: 'FailsafeOutboundHostPorts is a list of UDP/TCP ports + and CIDRs that Felix will allow outgoing traffic from host endpoints + to irrespective of the security policy. This is useful to avoid + accidentally cutting off a host with incorrect configuration. For + back-compatibility, if the protocol is not specified, it defaults + to "tcp". If a CIDR is not specified, it will allow traffic from + all addresses. To disable all outbound host ports, use the value + none. The default value opens etcd''s standard ports to ensure that + Felix does not get cut off from etcd as well as allowing DHCP and + DNS. [Default: tcp:179, tcp:2379, tcp:2380, tcp:6443, tcp:6666, + tcp:6667, udp:53, udp:67]' + items: + description: ProtoPort is combination of protocol, port, and CIDR. + Protocol and port must be specified. + properties: + net: + type: string + port: + type: integer + protocol: + type: string + required: + - port + - protocol + type: object + type: array + featureDetectOverride: + description: FeatureDetectOverride is used to override feature detection + based on auto-detected platform capabilities. Values are specified + in a comma separated list with no spaces, example; "SNATFullyRandom=true,MASQFullyRandom=false,RestoreSupportsLock=". "true" + or "false" will force the feature, empty or omitted values are auto-detected. + type: string + featureGates: + description: FeatureGates is used to enable or disable tech-preview + Calico features. Values are specified in a comma separated list + with no spaces, example; "BPFConnectTimeLoadBalancingWorkaround=enabled,XyZ=false". + This is used to enable features that are not fully production ready. + type: string + floatingIPs: + description: FloatingIPs configures whether or not Felix will program + non-OpenStack floating IP addresses. (OpenStack-derived floating + IPs are always programmed, regardless of this setting.) + enum: + - Enabled + - Disabled + type: string + genericXDPEnabled: + description: 'GenericXDPEnabled enables Generic XDP so network cards + that don''t support XDP offload or driver modes can use XDP. This + is not recommended since it doesn''t provide better performance + than iptables. [Default: false]' + type: boolean + healthEnabled: + type: boolean + healthHost: + type: string + healthPort: + type: integer + healthTimeoutOverrides: + description: HealthTimeoutOverrides allows the internal watchdog timeouts + of individual subcomponents to be overriden. This is useful for + working around "false positive" liveness timeouts that can occur + in particularly stressful workloads or if CPU is constrained. For + a list of active subcomponents, see Felix's logs. + items: + properties: + name: + type: string + timeout: + type: string + required: + - name + - timeout + type: object + type: array + interfaceExclude: + description: 'InterfaceExclude is a comma-separated list of interfaces + that Felix should exclude when monitoring for host endpoints. The + default value ensures that Felix ignores Kubernetes'' IPVS dummy + interface, which is used internally by kube-proxy. If you want to + exclude multiple interface names using a single value, the list + supports regular expressions. For regular expressions you must wrap + the value with ''/''. For example having values ''/^kube/,veth1'' + will exclude all interfaces that begin with ''kube'' and also the + interface ''veth1''. [Default: kube-ipvs0]' + type: string + interfacePrefix: + description: 'InterfacePrefix is the interface name prefix that identifies + workload endpoints and so distinguishes them from host endpoint + interfaces. Note: in environments other than bare metal, the orchestrators + configure this appropriately. For example our Kubernetes and Docker + integrations set the ''cali'' value, and our OpenStack integration + sets the ''tap'' value. [Default: cali]' + type: string + interfaceRefreshInterval: + description: InterfaceRefreshInterval is the period at which Felix + rescans local interfaces to verify their state. The rescan can be + disabled by setting the interval to 0. + type: string + ipipEnabled: + description: 'IPIPEnabled overrides whether Felix should configure + an IPIP interface on the host. Optional as Felix determines this + based on the existing IP pools. [Default: nil (unset)]' + type: boolean + ipipMTU: + description: 'IPIPMTU is the MTU to set on the tunnel device. See + Configuring MTU [Default: 1440]' + type: integer + ipsetsRefreshInterval: + description: 'IpsetsRefreshInterval is the period at which Felix re-checks + all iptables state to ensure that no other process has accidentally + broken Calico''s rules. Set to 0 to disable iptables refresh. [Default: + 90s]' + type: string + iptablesBackend: + description: IptablesBackend specifies which backend of iptables will + be used. The default is Auto. + type: string + iptablesFilterAllowAction: + type: string + iptablesLockFilePath: + description: 'IptablesLockFilePath is the location of the iptables + lock file. You may need to change this if the lock file is not in + its standard location (for example if you have mapped it into Felix''s + container at a different path). [Default: /run/xtables.lock]' + type: string + iptablesLockProbeInterval: + description: 'IptablesLockProbeInterval is the time that Felix will + wait between attempts to acquire the iptables lock if it is not + available. Lower values make Felix more responsive when the lock + is contended, but use more CPU. [Default: 50ms]' + type: string + iptablesLockTimeout: + description: 'IptablesLockTimeout is the time that Felix will wait + for the iptables lock, or 0, to disable. To use this feature, Felix + must share the iptables lock file with all other processes that + also take the lock. When running Felix inside a container, this + requires the /run directory of the host to be mounted into the calico/node + or calico/felix container. [Default: 0s disabled]' + type: string + iptablesMangleAllowAction: + type: string + iptablesMarkMask: + description: 'IptablesMarkMask is the mask that Felix selects its + IPTables Mark bits from. Should be a 32 bit hexadecimal number with + at least 8 bits set, none of which clash with any other mark bits + in use on the system. [Default: 0xff000000]' + format: int32 + type: integer + iptablesNATOutgoingInterfaceFilter: + type: string + iptablesPostWriteCheckInterval: + description: 'IptablesPostWriteCheckInterval is the period after Felix + has done a write to the dataplane that it schedules an extra read + back in order to check the write was not clobbered by another process. + This should only occur if another application on the system doesn''t + respect the iptables lock. [Default: 1s]' + type: string + iptablesRefreshInterval: + description: 'IptablesRefreshInterval is the period at which Felix + re-checks the IP sets in the dataplane to ensure that no other process + has accidentally broken Calico''s rules. Set to 0 to disable IP + sets refresh. Note: the default for this value is lower than the + other refresh intervals as a workaround for a Linux kernel bug that + was fixed in kernel version 4.11. If you are using v4.11 or greater + you may want to set this to, a higher value to reduce Felix CPU + usage. [Default: 10s]' + type: string + ipv6Support: + description: IPv6Support controls whether Felix enables support for + IPv6 (if supported by the in-use dataplane). + type: boolean + kubeNodePortRanges: + description: 'KubeNodePortRanges holds list of port ranges used for + service node ports. Only used if felix detects kube-proxy running + in ipvs mode. Felix uses these ranges to separate host and workload + traffic. [Default: 30000:32767].' + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + logDebugFilenameRegex: + description: LogDebugFilenameRegex controls which source code files + have their Debug log output included in the logs. Only logs from + files with names that match the given regular expression are included. The + filter only applies to Debug level logs. + type: string + logFilePath: + description: 'LogFilePath is the full path to the Felix log. Set to + none to disable file logging. [Default: /var/log/calico/felix.log]' + type: string + logPrefix: + description: 'LogPrefix is the log prefix that Felix uses when rendering + LOG rules. [Default: calico-packet]' + type: string + logSeverityFile: + description: 'LogSeverityFile is the log severity above which logs + are sent to the log file. [Default: Info]' + type: string + logSeverityScreen: + description: 'LogSeverityScreen is the log severity above which logs + are sent to the stdout. [Default: Info]' + type: string + logSeveritySys: + description: 'LogSeveritySys is the log severity above which logs + are sent to the syslog. Set to None for no logging to syslog. [Default: + Info]' + type: string + maxIpsetSize: + type: integer + metadataAddr: + description: 'MetadataAddr is the IP address or domain name of the + server that can answer VM queries for cloud-init metadata. In OpenStack, + this corresponds to the machine running nova-api (or in Ubuntu, + nova-api-metadata). A value of none (case insensitive) means that + Felix should not set up any NAT rule for the metadata path. [Default: + 127.0.0.1]' + type: string + metadataPort: + description: 'MetadataPort is the port of the metadata server. This, + combined with global.MetadataAddr (if not ''None''), is used to + set up a NAT rule, from 169.254.169.254:80 to MetadataAddr:MetadataPort. + In most cases this should not need to be changed [Default: 8775].' + type: integer + mtuIfacePattern: + description: MTUIfacePattern is a regular expression that controls + which interfaces Felix should scan in order to calculate the host's + MTU. This should not match workload interfaces (usually named cali...). + type: string + natOutgoingAddress: + description: NATOutgoingAddress specifies an address to use when performing + source NAT for traffic in a natOutgoing pool that is leaving the + network. By default the address used is an address on the interface + the traffic is leaving on (ie it uses the iptables MASQUERADE target) + type: string + natPortRange: + anyOf: + - type: integer + - type: string + description: NATPortRange specifies the range of ports that is used + for port mapping when doing outgoing NAT. When unset the default + behavior of the network stack is used. + pattern: ^.* + x-kubernetes-int-or-string: true + netlinkTimeout: + type: string + openstackRegion: + description: 'OpenstackRegion is the name of the region that a particular + Felix belongs to. In a multi-region Calico/OpenStack deployment, + this must be configured somehow for each Felix (here in the datamodel, + or in felix.cfg or the environment on each compute node), and must + match the [calico] openstack_region value configured in neutron.conf + on each node. [Default: Empty]' + type: string + policySyncPathPrefix: + description: 'PolicySyncPathPrefix is used to by Felix to communicate + policy changes to external services, like Application layer policy. + [Default: Empty]' + type: string + prometheusGoMetricsEnabled: + description: 'PrometheusGoMetricsEnabled disables Go runtime metrics + collection, which the Prometheus client does by default, when set + to false. This reduces the number of metrics reported, reducing + Prometheus load. [Default: true]' + type: boolean + prometheusMetricsEnabled: + description: 'PrometheusMetricsEnabled enables the Prometheus metrics + server in Felix if set to true. [Default: false]' + type: boolean + prometheusMetricsHost: + description: 'PrometheusMetricsHost is the host that the Prometheus + metrics server should bind to. [Default: empty]' + type: string + prometheusMetricsPort: + description: 'PrometheusMetricsPort is the TCP port that the Prometheus + metrics server should bind to. [Default: 9091]' + type: integer + prometheusProcessMetricsEnabled: + description: 'PrometheusProcessMetricsEnabled disables process metrics + collection, which the Prometheus client does by default, when set + to false. This reduces the number of metrics reported, reducing + Prometheus load. [Default: true]' + type: boolean + prometheusWireGuardMetricsEnabled: + description: 'PrometheusWireGuardMetricsEnabled disables wireguard + metrics collection, which the Prometheus client does by default, + when set to false. This reduces the number of metrics reported, + reducing Prometheus load. [Default: true]' + type: boolean + removeExternalRoutes: + description: Whether or not to remove device routes that have not + been programmed by Felix. Disabling this will allow external applications + to also add device routes. This is enabled by default which means + we will remove externally added routes. + type: boolean + reportingInterval: + description: 'ReportingInterval is the interval at which Felix reports + its status into the datastore or 0 to disable. Must be non-zero + in OpenStack deployments. [Default: 30s]' + type: string + reportingTTL: + description: 'ReportingTTL is the time-to-live setting for process-wide + status reports. [Default: 90s]' + type: string + routeRefreshInterval: + description: 'RouteRefreshInterval is the period at which Felix re-checks + the routes in the dataplane to ensure that no other process has + accidentally broken Calico''s rules. Set to 0 to disable route refresh. + [Default: 90s]' + type: string + routeSource: + description: 'RouteSource configures where Felix gets its routing + information. - WorkloadIPs: use workload endpoints to construct + routes. - CalicoIPAM: the default - use IPAM data to construct routes.' + type: string + routeSyncDisabled: + description: RouteSyncDisabled will disable all operations performed + on the route table. Set to true to run in network-policy mode only. + type: boolean + routeTableRange: + description: Deprecated in favor of RouteTableRanges. Calico programs + additional Linux route tables for various purposes. RouteTableRange + specifies the indices of the route tables that Calico should use. + properties: + max: + type: integer + min: + type: integer + required: + - max + - min + type: object + routeTableRanges: + description: Calico programs additional Linux route tables for various + purposes. RouteTableRanges specifies a set of table index ranges + that Calico should use. Deprecates`RouteTableRange`, overrides `RouteTableRange`. + items: + properties: + max: + type: integer + min: + type: integer + required: + - max + - min + type: object + type: array + serviceLoopPrevention: + description: 'When service IP advertisement is enabled, prevent routing + loops to service IPs that are not in use, by dropping or rejecting + packets that do not get DNAT''d by kube-proxy. Unless set to "Disabled", + in which case such routing loops continue to be allowed. [Default: + Drop]' + type: string + sidecarAccelerationEnabled: + description: 'SidecarAccelerationEnabled enables experimental sidecar + acceleration [Default: false]' + type: boolean + usageReportingEnabled: + description: 'UsageReportingEnabled reports anonymous Calico version + number and cluster size to projectcalico.org. Logs warnings returned + by the usage server. For example, if a significant security vulnerability + has been discovered in the version of Calico being used. [Default: + true]' + type: boolean + usageReportingInitialDelay: + description: 'UsageReportingInitialDelay controls the minimum delay + before Felix makes a report. [Default: 300s]' + type: string + usageReportingInterval: + description: 'UsageReportingInterval controls the interval at which + Felix makes reports. [Default: 86400s]' + type: string + useInternalDataplaneDriver: + description: UseInternalDataplaneDriver, if true, Felix will use its + internal dataplane programming logic. If false, it will launch + an external dataplane driver and communicate with it over protobuf. + type: boolean + vxlanEnabled: + description: 'VXLANEnabled overrides whether Felix should create the + VXLAN tunnel device for IPv4 VXLAN networking. Optional as Felix + determines this based on the existing IP pools. [Default: nil (unset)]' + type: boolean + vxlanMTU: + description: 'VXLANMTU is the MTU to set on the IPv4 VXLAN tunnel + device. See Configuring MTU [Default: 1410]' + type: integer + vxlanMTUV6: + description: 'VXLANMTUV6 is the MTU to set on the IPv6 VXLAN tunnel + device. See Configuring MTU [Default: 1390]' + type: integer + vxlanPort: + type: integer + vxlanVNI: + type: integer + wireguardEnabled: + description: 'WireguardEnabled controls whether Wireguard is enabled + for IPv4 (encapsulating IPv4 traffic over an IPv4 underlay network). + [Default: false]' + type: boolean + wireguardEnabledV6: + description: 'WireguardEnabledV6 controls whether Wireguard is enabled + for IPv6 (encapsulating IPv6 traffic over an IPv6 underlay network). + [Default: false]' + type: boolean + wireguardHostEncryptionEnabled: + description: 'WireguardHostEncryptionEnabled controls whether Wireguard + host-to-host encryption is enabled. [Default: false]' + type: boolean + wireguardInterfaceName: + description: 'WireguardInterfaceName specifies the name to use for + the IPv4 Wireguard interface. [Default: wireguard.cali]' + type: string + wireguardInterfaceNameV6: + description: 'WireguardInterfaceNameV6 specifies the name to use for + the IPv6 Wireguard interface. [Default: wg-v6.cali]' + type: string + wireguardKeepAlive: + description: 'WireguardKeepAlive controls Wireguard PersistentKeepalive + option. Set 0 to disable. [Default: 0]' + type: string + wireguardListeningPort: + description: 'WireguardListeningPort controls the listening port used + by IPv4 Wireguard. [Default: 51820]' + type: integer + wireguardListeningPortV6: + description: 'WireguardListeningPortV6 controls the listening port + used by IPv6 Wireguard. [Default: 51821]' + type: integer + wireguardMTU: + description: 'WireguardMTU controls the MTU on the IPv4 Wireguard + interface. See Configuring MTU [Default: 1440]' + type: integer + wireguardMTUV6: + description: 'WireguardMTUV6 controls the MTU on the IPv6 Wireguard + interface. See Configuring MTU [Default: 1420]' + type: integer + wireguardRoutingRulePriority: + description: 'WireguardRoutingRulePriority controls the priority value + to use for the Wireguard routing rule. [Default: 99]' + type: integer + workloadSourceSpoofing: + description: WorkloadSourceSpoofing controls whether pods can use + the allowedSourcePrefixes annotation to send traffic with a source + IP address that is not theirs. This is disabled by default. When + set to "Any", pods can request any prefix. + type: string + xdpEnabled: + description: 'XDPEnabled enables XDP acceleration for suitable untracked + incoming deny rules. [Default: true]' + type: boolean + xdpRefreshInterval: + description: 'XDPRefreshInterval is the period at which Felix re-checks + all XDP state to ensure that no other process has accidentally broken + Calico''s BPF maps or attached programs. Set to 0 to disable XDP + refresh. [Default: 90s]' + type: string + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +# Source: calico/templates/kdd-crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: globalnetworkpolicies.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: GlobalNetworkPolicy + listKind: GlobalNetworkPolicyList + plural: globalnetworkpolicies + singular: globalnetworkpolicy + preserveUnknownFields: false + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + properties: + applyOnForward: + description: ApplyOnForward indicates to apply the rules in this policy + on forward traffic. + type: boolean + doNotTrack: + description: DoNotTrack indicates whether packets matched by the rules + in this policy should go through the data plane's connection tracking, + such as Linux conntrack. If True, the rules in this policy are + applied before any data plane connection tracking, and packets allowed + by this policy are marked as not to be tracked. + type: boolean + egress: + description: The ordered set of egress rules. Each rule contains + a set of packet match criteria and a corresponding action to apply. + items: + description: "A Rule encapsulates a set of match criteria and an + action. Both selector-based security Policy and security Profiles + reference rules - separated out as a list of rules for both ingress + and egress packet matching. \n Each positive match criteria has + a negated version, prefixed with \"Not\". All the match criteria + within a rule must be satisfied for a packet to match. A single + rule can contain the positive and negative version of a match + and both must be satisfied for the rule to match." + properties: + action: + type: string + destination: + description: Destination contains the match criteria that apply + to destination entity. + properties: + namespaceSelector: + description: "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and another selector are defined on the same rule, then + only workload endpoints that are matched by both selectors + will be selected by the rule. \n For NetworkPolicy, an + empty NamespaceSelector implies that the Selector is limited + to selecting only workload endpoints in the same namespace + as the NetworkPolicy. \n For NetworkPolicy, `global()` + NamespaceSelector implies that the Selector is limited + to selecting only GlobalNetworkSet or HostEndpoint. \n + For GlobalNetworkPolicy, an empty NamespaceSelector implies + the Selector applies to workload endpoints across all + namespaces." + type: string + nets: + description: Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label \"my_label\". \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label \"my_label\". + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + services: + description: "Services is an optional field that contains + options for matching Kubernetes Services. If specified, + only traffic that originates from or terminates at endpoints + within the selected service(s) will be matched, and only + to/from each endpoint's port. \n Services cannot be specified + on the same rule as Selector, NotSelector, NamespaceSelector, + Nets, NotNets or ServiceAccounts. \n Ports and NotPorts + can only be specified with Services on ingress rules." + properties: + name: + description: Name specifies the name of a Kubernetes + Service to match. + type: string + namespace: + description: Namespace specifies the namespace of the + given Service. If left empty, the rule will match + within this policy's namespace. + type: string + type: object + type: object + http: + description: HTTP contains match criteria that apply to HTTP + requests. + properties: + methods: + description: Methods is an optional field that restricts + the rule to apply only to HTTP requests that use one of + the listed HTTP Methods (e.g. GET, PUT, etc.) Multiple + methods are OR'd together. + items: + type: string + type: array + paths: + description: 'Paths is an optional field that restricts + the rule to apply to HTTP requests that use one of the + listed HTTP Paths. Multiple paths are OR''d together. + e.g: - exact: /foo - prefix: /bar NOTE: Each entry may + ONLY specify either a `exact` or a `prefix` match. The + validator will check for it.' + items: + description: 'HTTPPath specifies an HTTP path to match. + It may be either of the form: exact: : which matches + the path exactly or prefix: : which matches + the path prefix' + properties: + exact: + type: string + prefix: + type: string + type: object + type: array + type: object + icmp: + description: ICMP is an optional field that restricts the rule + to apply to a specific type and code of ICMP traffic. This + should only be specified if the Protocol field is set to "ICMP" + or "ICMPv6". + properties: + code: + description: Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernel's iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + ipVersion: + description: IPVersion is an optional field that restricts the + rule to only match a specific IP version. + type: integer + metadata: + description: Metadata contains additional information for this + rule + properties: + annotations: + additionalProperties: + type: string + description: Annotations is a set of key value pairs that + give extra information about the rule + type: object + type: object + notICMP: + description: NotICMP is the negated version of the ICMP field. + properties: + code: + description: Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernel's iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + notProtocol: + anyOf: + - type: integer + - type: string + description: NotProtocol is the negated version of the Protocol + field. + pattern: ^.* + x-kubernetes-int-or-string: true + protocol: + anyOf: + - type: integer + - type: string + description: "Protocol is an optional field that restricts the + rule to only apply to traffic of a specific IP protocol. Required + if any of the EntityRules contain Ports (because ports only + apply to certain protocols). \n Must be one of these string + values: \"TCP\", \"UDP\", \"ICMP\", \"ICMPv6\", \"SCTP\", + \"UDPLite\" or an integer in the range 1-255." + pattern: ^.* + x-kubernetes-int-or-string: true + source: + description: Source contains the match criteria that apply to + source entity. + properties: + namespaceSelector: + description: "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and another selector are defined on the same rule, then + only workload endpoints that are matched by both selectors + will be selected by the rule. \n For NetworkPolicy, an + empty NamespaceSelector implies that the Selector is limited + to selecting only workload endpoints in the same namespace + as the NetworkPolicy. \n For NetworkPolicy, `global()` + NamespaceSelector implies that the Selector is limited + to selecting only GlobalNetworkSet or HostEndpoint. \n + For GlobalNetworkPolicy, an empty NamespaceSelector implies + the Selector applies to workload endpoints across all + namespaces." + type: string + nets: + description: Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label \"my_label\". \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label \"my_label\". + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + services: + description: "Services is an optional field that contains + options for matching Kubernetes Services. If specified, + only traffic that originates from or terminates at endpoints + within the selected service(s) will be matched, and only + to/from each endpoint's port. \n Services cannot be specified + on the same rule as Selector, NotSelector, NamespaceSelector, + Nets, NotNets or ServiceAccounts. \n Ports and NotPorts + can only be specified with Services on ingress rules." + properties: + name: + description: Name specifies the name of a Kubernetes + Service to match. + type: string + namespace: + description: Namespace specifies the namespace of the + given Service. If left empty, the rule will match + within this policy's namespace. + type: string + type: object + type: object + required: + - action + type: object + type: array + ingress: + description: The ordered set of ingress rules. Each rule contains + a set of packet match criteria and a corresponding action to apply. + items: + description: "A Rule encapsulates a set of match criteria and an + action. Both selector-based security Policy and security Profiles + reference rules - separated out as a list of rules for both ingress + and egress packet matching. \n Each positive match criteria has + a negated version, prefixed with \"Not\". All the match criteria + within a rule must be satisfied for a packet to match. A single + rule can contain the positive and negative version of a match + and both must be satisfied for the rule to match." + properties: + action: + type: string + destination: + description: Destination contains the match criteria that apply + to destination entity. + properties: + namespaceSelector: + description: "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and another selector are defined on the same rule, then + only workload endpoints that are matched by both selectors + will be selected by the rule. \n For NetworkPolicy, an + empty NamespaceSelector implies that the Selector is limited + to selecting only workload endpoints in the same namespace + as the NetworkPolicy. \n For NetworkPolicy, `global()` + NamespaceSelector implies that the Selector is limited + to selecting only GlobalNetworkSet or HostEndpoint. \n + For GlobalNetworkPolicy, an empty NamespaceSelector implies + the Selector applies to workload endpoints across all + namespaces." + type: string + nets: + description: Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label \"my_label\". \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label \"my_label\". + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + services: + description: "Services is an optional field that contains + options for matching Kubernetes Services. If specified, + only traffic that originates from or terminates at endpoints + within the selected service(s) will be matched, and only + to/from each endpoint's port. \n Services cannot be specified + on the same rule as Selector, NotSelector, NamespaceSelector, + Nets, NotNets or ServiceAccounts. \n Ports and NotPorts + can only be specified with Services on ingress rules." + properties: + name: + description: Name specifies the name of a Kubernetes + Service to match. + type: string + namespace: + description: Namespace specifies the namespace of the + given Service. If left empty, the rule will match + within this policy's namespace. + type: string + type: object + type: object + http: + description: HTTP contains match criteria that apply to HTTP + requests. + properties: + methods: + description: Methods is an optional field that restricts + the rule to apply only to HTTP requests that use one of + the listed HTTP Methods (e.g. GET, PUT, etc.) Multiple + methods are OR'd together. + items: + type: string + type: array + paths: + description: 'Paths is an optional field that restricts + the rule to apply to HTTP requests that use one of the + listed HTTP Paths. Multiple paths are OR''d together. + e.g: - exact: /foo - prefix: /bar NOTE: Each entry may + ONLY specify either a `exact` or a `prefix` match. The + validator will check for it.' + items: + description: 'HTTPPath specifies an HTTP path to match. + It may be either of the form: exact: : which matches + the path exactly or prefix: : which matches + the path prefix' + properties: + exact: + type: string + prefix: + type: string + type: object + type: array + type: object + icmp: + description: ICMP is an optional field that restricts the rule + to apply to a specific type and code of ICMP traffic. This + should only be specified if the Protocol field is set to "ICMP" + or "ICMPv6". + properties: + code: + description: Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernel's iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + ipVersion: + description: IPVersion is an optional field that restricts the + rule to only match a specific IP version. + type: integer + metadata: + description: Metadata contains additional information for this + rule + properties: + annotations: + additionalProperties: + type: string + description: Annotations is a set of key value pairs that + give extra information about the rule + type: object + type: object + notICMP: + description: NotICMP is the negated version of the ICMP field. + properties: + code: + description: Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernel's iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + notProtocol: + anyOf: + - type: integer + - type: string + description: NotProtocol is the negated version of the Protocol + field. + pattern: ^.* + x-kubernetes-int-or-string: true + protocol: + anyOf: + - type: integer + - type: string + description: "Protocol is an optional field that restricts the + rule to only apply to traffic of a specific IP protocol. Required + if any of the EntityRules contain Ports (because ports only + apply to certain protocols). \n Must be one of these string + values: \"TCP\", \"UDP\", \"ICMP\", \"ICMPv6\", \"SCTP\", + \"UDPLite\" or an integer in the range 1-255." + pattern: ^.* + x-kubernetes-int-or-string: true + source: + description: Source contains the match criteria that apply to + source entity. + properties: + namespaceSelector: + description: "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and another selector are defined on the same rule, then + only workload endpoints that are matched by both selectors + will be selected by the rule. \n For NetworkPolicy, an + empty NamespaceSelector implies that the Selector is limited + to selecting only workload endpoints in the same namespace + as the NetworkPolicy. \n For NetworkPolicy, `global()` + NamespaceSelector implies that the Selector is limited + to selecting only GlobalNetworkSet or HostEndpoint. \n + For GlobalNetworkPolicy, an empty NamespaceSelector implies + the Selector applies to workload endpoints across all + namespaces." + type: string + nets: + description: Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label \"my_label\". \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label \"my_label\". + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + services: + description: "Services is an optional field that contains + options for matching Kubernetes Services. If specified, + only traffic that originates from or terminates at endpoints + within the selected service(s) will be matched, and only + to/from each endpoint's port. \n Services cannot be specified + on the same rule as Selector, NotSelector, NamespaceSelector, + Nets, NotNets or ServiceAccounts. \n Ports and NotPorts + can only be specified with Services on ingress rules." + properties: + name: + description: Name specifies the name of a Kubernetes + Service to match. + type: string + namespace: + description: Namespace specifies the namespace of the + given Service. If left empty, the rule will match + within this policy's namespace. + type: string + type: object + type: object + required: + - action + type: object + type: array + namespaceSelector: + description: NamespaceSelector is an optional field for an expression + used to select a pod based on namespaces. + type: string + order: + description: Order is an optional field that specifies the order in + which the policy is applied. Policies with higher "order" are applied + after those with lower order. If the order is omitted, it may be + considered to be "infinite" - i.e. the policy will be applied last. Policies + with identical order will be applied in alphanumerical order based + on the Policy "Name". + type: number + preDNAT: + description: PreDNAT indicates to apply the rules in this policy before + any DNAT. + type: boolean + selector: + description: "The selector is an expression used to pick pick out + the endpoints that the policy should be applied to. \n Selector + expressions follow this syntax: \n \tlabel == \"string_literal\" + \ -> comparison, e.g. my_label == \"foo bar\" \tlabel != \"string_literal\" + \ -> not equal; also matches if label is not present \tlabel in + { \"a\", \"b\", \"c\", ... } -> true if the value of label X is + one of \"a\", \"b\", \"c\" \tlabel not in { \"a\", \"b\", \"c\", + ... } -> true if the value of label X is not one of \"a\", \"b\", + \"c\" \thas(label_name) -> True if that label is present \t! expr + -> negation of expr \texpr && expr -> Short-circuit and \texpr + || expr -> Short-circuit or \t( expr ) -> parens for grouping \tall() + or the empty selector -> matches all endpoints. \n Label names are + allowed to contain alphanumerics, -, _ and /. String literals are + more permissive but they do not support escape characters. \n Examples + (with made-up labels): \n \ttype == \"webserver\" && deployment + == \"prod\" \ttype in {\"frontend\", \"backend\"} \tdeployment != + \"dev\" \t! has(label_name)" + type: string + serviceAccountSelector: + description: ServiceAccountSelector is an optional field for an expression + used to select a pod based on service accounts. + type: string + types: + description: "Types indicates whether this policy applies to ingress, + or to egress, or to both. When not explicitly specified (and so + the value on creation is empty or nil), Calico defaults Types according + to what Ingress and Egress rules are present in the policy. The + default is: \n - [ PolicyTypeIngress ], if there are no Egress rules + (including the case where there are also no Ingress rules) \n + - [ PolicyTypeEgress ], if there are Egress rules but no Ingress + rules \n - [ PolicyTypeIngress, PolicyTypeEgress ], if there are + both Ingress and Egress rules. \n When the policy is read back again, + Types will always be one of these values, never empty or nil." + items: + description: PolicyType enumerates the possible values of the PolicySpec + Types field. + type: string + type: array + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +# Source: calico/templates/kdd-crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: globalnetworksets.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: GlobalNetworkSet + listKind: GlobalNetworkSetList + plural: globalnetworksets + singular: globalnetworkset + preserveUnknownFields: false + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: GlobalNetworkSet contains a set of arbitrary IP sub-networks/CIDRs + that share labels to allow rules to refer to them via selectors. The labels + of GlobalNetworkSet are not namespaced. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: GlobalNetworkSetSpec contains the specification for a NetworkSet + resource. + properties: + nets: + description: The list of IP networks that belong to this set. + items: + type: string + type: array + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +# Source: calico/templates/kdd-crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: hostendpoints.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: HostEndpoint + listKind: HostEndpointList + plural: hostendpoints + singular: hostendpoint + preserveUnknownFields: false + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: HostEndpointSpec contains the specification for a HostEndpoint + resource. + properties: + expectedIPs: + description: "The expected IP addresses (IPv4 and IPv6) of the endpoint. + If \"InterfaceName\" is not present, Calico will look for an interface + matching any of the IPs in the list and apply policy to that. Note: + \tWhen using the selector match criteria in an ingress or egress + security Policy \tor Profile, Calico converts the selector into + a set of IP addresses. For host \tendpoints, the ExpectedIPs field + is used for that purpose. (If only the interface \tname is specified, + Calico does not learn the IPs of the interface for use in match + \tcriteria.)" + items: + type: string + type: array + interfaceName: + description: "Either \"*\", or the name of a specific Linux interface + to apply policy to; or empty. \"*\" indicates that this HostEndpoint + governs all traffic to, from or through the default network namespace + of the host named by the \"Node\" field; entering and leaving that + namespace via any interface, including those from/to non-host-networked + local workloads. \n If InterfaceName is not \"*\", this HostEndpoint + only governs traffic that enters or leaves the host through the + specific interface named by InterfaceName, or - when InterfaceName + is empty - through the specific interface that has one of the IPs + in ExpectedIPs. Therefore, when InterfaceName is empty, at least + one expected IP must be specified. Only external interfaces (such + as \"eth0\") are supported here; it isn't possible for a HostEndpoint + to protect traffic through a specific local workload interface. + \n Note: Only some kinds of policy are implemented for \"*\" HostEndpoints; + initially just pre-DNAT policy. Please check Calico documentation + for the latest position." + type: string + node: + description: The node name identifying the Calico node instance. + type: string + ports: + description: Ports contains the endpoint's named ports, which may + be referenced in security policy rules. + items: + properties: + name: + type: string + port: + type: integer + protocol: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + required: + - name + - port + - protocol + type: object + type: array + profiles: + description: A list of identifiers of security Profile objects that + apply to this endpoint. Each profile is applied in the order that + they appear in this list. Profile rules are applied after the selector-based + security policy. + items: + type: string + type: array + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +# Source: calico/templates/kdd-crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: ipamblocks.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: IPAMBlock + listKind: IPAMBlockList + plural: ipamblocks + singular: ipamblock + preserveUnknownFields: false + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: IPAMBlockSpec contains the specification for an IPAMBlock + resource. + properties: + affinity: + description: Affinity of the block, if this block has one. If set, + it will be of the form "host:". If not set, this block + is not affine to a host. + type: string + allocations: + description: Array of allocations in-use within this block. nil entries + mean the allocation is free. For non-nil entries at index i, the + index is the ordinal of the allocation within this block and the + value is the index of the associated attributes in the Attributes + array. + items: + type: integer + # TODO: This nullable is manually added in. We should update controller-gen + # to handle []*int properly itself. + nullable: true + type: array + attributes: + description: Attributes is an array of arbitrary metadata associated + with allocations in the block. To find attributes for a given allocation, + use the value of the allocation's entry in the Allocations array + as the index of the element in this array. + items: + properties: + handle_id: + type: string + secondary: + additionalProperties: + type: string + type: object + type: object + type: array + cidr: + description: The block's CIDR. + type: string + deleted: + description: Deleted is an internal boolean used to workaround a limitation + in the Kubernetes API whereby deletion will not return a conflict + error if the block has been updated. It should not be set manually. + type: boolean + sequenceNumber: + default: 0 + description: We store a sequence number that is updated each time + the block is written. Each allocation will also store the sequence + number of the block at the time of its creation. When releasing + an IP, passing the sequence number associated with the allocation + allows us to protect against a race condition and ensure the IP + hasn't been released and re-allocated since the release request. + format: int64 + type: integer + sequenceNumberForAllocation: + additionalProperties: + format: int64 + type: integer + description: Map of allocated ordinal within the block to sequence + number of the block at the time of allocation. Kubernetes does not + allow numerical keys for maps, so the key is cast to a string. + type: object + strictAffinity: + description: StrictAffinity on the IPAMBlock is deprecated and no + longer used by the code. Use IPAMConfig StrictAffinity instead. + type: boolean + unallocated: + description: Unallocated is an ordered list of allocations which are + free in the block. + items: + type: integer + type: array + required: + - allocations + - attributes + - cidr + - strictAffinity + - unallocated + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +# Source: calico/templates/kdd-crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: ipamconfigs.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: IPAMConfig + listKind: IPAMConfigList + plural: ipamconfigs + singular: ipamconfig + preserveUnknownFields: false + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: IPAMConfigSpec contains the specification for an IPAMConfig + resource. + properties: + autoAllocateBlocks: + type: boolean + maxBlocksPerHost: + description: MaxBlocksPerHost, if non-zero, is the max number of blocks + that can be affine to each host. + maximum: 2147483647 + minimum: 0 + type: integer + strictAffinity: + type: boolean + required: + - autoAllocateBlocks + - strictAffinity + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +# Source: calico/templates/kdd-crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: ipamhandles.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: IPAMHandle + listKind: IPAMHandleList + plural: ipamhandles + singular: ipamhandle + preserveUnknownFields: false + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: IPAMHandleSpec contains the specification for an IPAMHandle + resource. + properties: + block: + additionalProperties: + type: integer + type: object + deleted: + type: boolean + handleID: + type: string + required: + - block + - handleID + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +# Source: calico/templates/kdd-crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: ippools.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: IPPool + listKind: IPPoolList + plural: ippools + singular: ippool + preserveUnknownFields: false + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: IPPoolSpec contains the specification for an IPPool resource. + properties: + allowedUses: + description: AllowedUse controls what the IP pool will be used for. If + not specified or empty, defaults to ["Tunnel", "Workload"] for back-compatibility + items: + type: string + type: array + blockSize: + description: The block size to use for IP address assignments from + this pool. Defaults to 26 for IPv4 and 122 for IPv6. + type: integer + cidr: + description: The pool CIDR. + type: string + disableBGPExport: + description: 'Disable exporting routes from this IP Pool''s CIDR over + BGP. [Default: false]' + type: boolean + disabled: + description: When disabled is true, Calico IPAM will not assign addresses + from this pool. + type: boolean + ipip: + description: 'Deprecated: this field is only used for APIv1 backwards + compatibility. Setting this field is not allowed, this field is + for internal use only.' + properties: + enabled: + description: When enabled is true, ipip tunneling will be used + to deliver packets to destinations within this pool. + type: boolean + mode: + description: The IPIP mode. This can be one of "always" or "cross-subnet". A + mode of "always" will also use IPIP tunneling for routing to + destination IP addresses within this pool. A mode of "cross-subnet" + will only use IPIP tunneling when the destination node is on + a different subnet to the originating node. The default value + (if not specified) is "always". + type: string + type: object + ipipMode: + description: Contains configuration for IPIP tunneling for this pool. + If not specified, then this is defaulted to "Never" (i.e. IPIP tunneling + is disabled). + type: string + nat-outgoing: + description: 'Deprecated: this field is only used for APIv1 backwards + compatibility. Setting this field is not allowed, this field is + for internal use only.' + type: boolean + natOutgoing: + description: When natOutgoing is true, packets sent from Calico networked + containers in this pool to destinations outside of this pool will + be masqueraded. + type: boolean + nodeSelector: + description: Allows IPPool to allocate for a specific node by label + selector. + type: string + vxlanMode: + description: Contains configuration for VXLAN tunneling for this pool. + If not specified, then this is defaulted to "Never" (i.e. VXLAN + tunneling is disabled). + type: string + required: + - cidr + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +# Source: calico/templates/kdd-crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: (devel) + creationTimestamp: null + name: ipreservations.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: IPReservation + listKind: IPReservationList + plural: ipreservations + singular: ipreservation + preserveUnknownFields: false + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: IPReservationSpec contains the specification for an IPReservation + resource. + properties: + reservedCIDRs: + description: ReservedCIDRs is a list of CIDRs and/or IP addresses + that Calico IPAM will exclude from new allocations. + items: + type: string + type: array + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +# Source: calico/templates/kdd-crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: kubecontrollersconfigurations.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: KubeControllersConfiguration + listKind: KubeControllersConfigurationList + plural: kubecontrollersconfigurations + singular: kubecontrollersconfiguration + preserveUnknownFields: false + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: KubeControllersConfigurationSpec contains the values of the + Kubernetes controllers configuration. + properties: + controllers: + description: Controllers enables and configures individual Kubernetes + controllers + properties: + namespace: + description: Namespace enables and configures the namespace controller. + Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform reconciliation + with the Calico datastore. [Default: 5m]' + type: string + type: object + node: + description: Node enables and configures the node controller. + Enabled by default, set to nil to disable. + properties: + hostEndpoint: + description: HostEndpoint controls syncing nodes to host endpoints. + Disabled by default, set to nil to disable. + properties: + autoCreate: + description: 'AutoCreate enables automatic creation of + host endpoints for every node. [Default: Disabled]' + type: string + type: object + leakGracePeriod: + description: 'LeakGracePeriod is the period used by the controller + to determine if an IP address has been leaked. Set to 0 + to disable IP garbage collection. [Default: 15m]' + type: string + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform reconciliation + with the Calico datastore. [Default: 5m]' + type: string + syncLabels: + description: 'SyncLabels controls whether to copy Kubernetes + node labels to Calico nodes. [Default: Enabled]' + type: string + type: object + policy: + description: Policy enables and configures the policy controller. + Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform reconciliation + with the Calico datastore. [Default: 5m]' + type: string + type: object + serviceAccount: + description: ServiceAccount enables and configures the service + account controller. Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform reconciliation + with the Calico datastore. [Default: 5m]' + type: string + type: object + workloadEndpoint: + description: WorkloadEndpoint enables and configures the workload + endpoint controller. Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform reconciliation + with the Calico datastore. [Default: 5m]' + type: string + type: object + type: object + debugProfilePort: + description: DebugProfilePort configures the port to serve memory + and cpu profiles on. If not specified, profiling is disabled. + format: int32 + type: integer + etcdV3CompactionPeriod: + description: 'EtcdV3CompactionPeriod is the period between etcdv3 + compaction requests. Set to 0 to disable. [Default: 10m]' + type: string + healthChecks: + description: 'HealthChecks enables or disables support for health + checks [Default: Enabled]' + type: string + logSeverityScreen: + description: 'LogSeverityScreen is the log severity above which logs + are sent to the stdout. [Default: Info]' + type: string + prometheusMetricsPort: + description: 'PrometheusMetricsPort is the TCP port that the Prometheus + metrics server should bind to. Set to 0 to disable. [Default: 9094]' + type: integer + required: + - controllers + type: object + status: + description: KubeControllersConfigurationStatus represents the status + of the configuration. It's useful for admins to be able to see the actual + config that was applied, which can be modified by environment variables + on the kube-controllers process. + properties: + environmentVars: + additionalProperties: + type: string + description: EnvironmentVars contains the environment variables on + the kube-controllers that influenced the RunningConfig. + type: object + runningConfig: + description: RunningConfig contains the effective config that is running + in the kube-controllers pod, after merging the API resource with + any environment variables. + properties: + controllers: + description: Controllers enables and configures individual Kubernetes + controllers + properties: + namespace: + description: Namespace enables and configures the namespace + controller. Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform + reconciliation with the Calico datastore. [Default: + 5m]' + type: string + type: object + node: + description: Node enables and configures the node controller. + Enabled by default, set to nil to disable. + properties: + hostEndpoint: + description: HostEndpoint controls syncing nodes to host + endpoints. Disabled by default, set to nil to disable. + properties: + autoCreate: + description: 'AutoCreate enables automatic creation + of host endpoints for every node. [Default: Disabled]' + type: string + type: object + leakGracePeriod: + description: 'LeakGracePeriod is the period used by the + controller to determine if an IP address has been leaked. + Set to 0 to disable IP garbage collection. [Default: + 15m]' + type: string + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform + reconciliation with the Calico datastore. [Default: + 5m]' + type: string + syncLabels: + description: 'SyncLabels controls whether to copy Kubernetes + node labels to Calico nodes. [Default: Enabled]' + type: string + type: object + policy: + description: Policy enables and configures the policy controller. + Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform + reconciliation with the Calico datastore. [Default: + 5m]' + type: string + type: object + serviceAccount: + description: ServiceAccount enables and configures the service + account controller. Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform + reconciliation with the Calico datastore. [Default: + 5m]' + type: string + type: object + workloadEndpoint: + description: WorkloadEndpoint enables and configures the workload + endpoint controller. Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform + reconciliation with the Calico datastore. [Default: + 5m]' + type: string + type: object + type: object + debugProfilePort: + description: DebugProfilePort configures the port to serve memory + and cpu profiles on. If not specified, profiling is disabled. + format: int32 + type: integer + etcdV3CompactionPeriod: + description: 'EtcdV3CompactionPeriod is the period between etcdv3 + compaction requests. Set to 0 to disable. [Default: 10m]' + type: string + healthChecks: + description: 'HealthChecks enables or disables support for health + checks [Default: Enabled]' + type: string + logSeverityScreen: + description: 'LogSeverityScreen is the log severity above which + logs are sent to the stdout. [Default: Info]' + type: string + prometheusMetricsPort: + description: 'PrometheusMetricsPort is the TCP port that the Prometheus + metrics server should bind to. Set to 0 to disable. [Default: + 9094]' + type: integer + required: + - controllers + type: object + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +# Source: calico/templates/kdd-crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: networkpolicies.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: NetworkPolicy + listKind: NetworkPolicyList + plural: networkpolicies + singular: networkpolicy + preserveUnknownFields: false + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + properties: + egress: + description: The ordered set of egress rules. Each rule contains + a set of packet match criteria and a corresponding action to apply. + items: + description: "A Rule encapsulates a set of match criteria and an + action. Both selector-based security Policy and security Profiles + reference rules - separated out as a list of rules for both ingress + and egress packet matching. \n Each positive match criteria has + a negated version, prefixed with \"Not\". All the match criteria + within a rule must be satisfied for a packet to match. A single + rule can contain the positive and negative version of a match + and both must be satisfied for the rule to match." + properties: + action: + type: string + destination: + description: Destination contains the match criteria that apply + to destination entity. + properties: + namespaceSelector: + description: "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and another selector are defined on the same rule, then + only workload endpoints that are matched by both selectors + will be selected by the rule. \n For NetworkPolicy, an + empty NamespaceSelector implies that the Selector is limited + to selecting only workload endpoints in the same namespace + as the NetworkPolicy. \n For NetworkPolicy, `global()` + NamespaceSelector implies that the Selector is limited + to selecting only GlobalNetworkSet or HostEndpoint. \n + For GlobalNetworkPolicy, an empty NamespaceSelector implies + the Selector applies to workload endpoints across all + namespaces." + type: string + nets: + description: Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label \"my_label\". \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label \"my_label\". + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + services: + description: "Services is an optional field that contains + options for matching Kubernetes Services. If specified, + only traffic that originates from or terminates at endpoints + within the selected service(s) will be matched, and only + to/from each endpoint's port. \n Services cannot be specified + on the same rule as Selector, NotSelector, NamespaceSelector, + Nets, NotNets or ServiceAccounts. \n Ports and NotPorts + can only be specified with Services on ingress rules." + properties: + name: + description: Name specifies the name of a Kubernetes + Service to match. + type: string + namespace: + description: Namespace specifies the namespace of the + given Service. If left empty, the rule will match + within this policy's namespace. + type: string + type: object + type: object + http: + description: HTTP contains match criteria that apply to HTTP + requests. + properties: + methods: + description: Methods is an optional field that restricts + the rule to apply only to HTTP requests that use one of + the listed HTTP Methods (e.g. GET, PUT, etc.) Multiple + methods are OR'd together. + items: + type: string + type: array + paths: + description: 'Paths is an optional field that restricts + the rule to apply to HTTP requests that use one of the + listed HTTP Paths. Multiple paths are OR''d together. + e.g: - exact: /foo - prefix: /bar NOTE: Each entry may + ONLY specify either a `exact` or a `prefix` match. The + validator will check for it.' + items: + description: 'HTTPPath specifies an HTTP path to match. + It may be either of the form: exact: : which matches + the path exactly or prefix: : which matches + the path prefix' + properties: + exact: + type: string + prefix: + type: string + type: object + type: array + type: object + icmp: + description: ICMP is an optional field that restricts the rule + to apply to a specific type and code of ICMP traffic. This + should only be specified if the Protocol field is set to "ICMP" + or "ICMPv6". + properties: + code: + description: Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernel's iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + ipVersion: + description: IPVersion is an optional field that restricts the + rule to only match a specific IP version. + type: integer + metadata: + description: Metadata contains additional information for this + rule + properties: + annotations: + additionalProperties: + type: string + description: Annotations is a set of key value pairs that + give extra information about the rule + type: object + type: object + notICMP: + description: NotICMP is the negated version of the ICMP field. + properties: + code: + description: Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernel's iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + notProtocol: + anyOf: + - type: integer + - type: string + description: NotProtocol is the negated version of the Protocol + field. + pattern: ^.* + x-kubernetes-int-or-string: true + protocol: + anyOf: + - type: integer + - type: string + description: "Protocol is an optional field that restricts the + rule to only apply to traffic of a specific IP protocol. Required + if any of the EntityRules contain Ports (because ports only + apply to certain protocols). \n Must be one of these string + values: \"TCP\", \"UDP\", \"ICMP\", \"ICMPv6\", \"SCTP\", + \"UDPLite\" or an integer in the range 1-255." + pattern: ^.* + x-kubernetes-int-or-string: true + source: + description: Source contains the match criteria that apply to + source entity. + properties: + namespaceSelector: + description: "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and another selector are defined on the same rule, then + only workload endpoints that are matched by both selectors + will be selected by the rule. \n For NetworkPolicy, an + empty NamespaceSelector implies that the Selector is limited + to selecting only workload endpoints in the same namespace + as the NetworkPolicy. \n For NetworkPolicy, `global()` + NamespaceSelector implies that the Selector is limited + to selecting only GlobalNetworkSet or HostEndpoint. \n + For GlobalNetworkPolicy, an empty NamespaceSelector implies + the Selector applies to workload endpoints across all + namespaces." + type: string + nets: + description: Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label \"my_label\". \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label \"my_label\". + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + services: + description: "Services is an optional field that contains + options for matching Kubernetes Services. If specified, + only traffic that originates from or terminates at endpoints + within the selected service(s) will be matched, and only + to/from each endpoint's port. \n Services cannot be specified + on the same rule as Selector, NotSelector, NamespaceSelector, + Nets, NotNets or ServiceAccounts. \n Ports and NotPorts + can only be specified with Services on ingress rules." + properties: + name: + description: Name specifies the name of a Kubernetes + Service to match. + type: string + namespace: + description: Namespace specifies the namespace of the + given Service. If left empty, the rule will match + within this policy's namespace. + type: string + type: object + type: object + required: + - action + type: object + type: array + ingress: + description: The ordered set of ingress rules. Each rule contains + a set of packet match criteria and a corresponding action to apply. + items: + description: "A Rule encapsulates a set of match criteria and an + action. Both selector-based security Policy and security Profiles + reference rules - separated out as a list of rules for both ingress + and egress packet matching. \n Each positive match criteria has + a negated version, prefixed with \"Not\". All the match criteria + within a rule must be satisfied for a packet to match. A single + rule can contain the positive and negative version of a match + and both must be satisfied for the rule to match." + properties: + action: + type: string + destination: + description: Destination contains the match criteria that apply + to destination entity. + properties: + namespaceSelector: + description: "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and another selector are defined on the same rule, then + only workload endpoints that are matched by both selectors + will be selected by the rule. \n For NetworkPolicy, an + empty NamespaceSelector implies that the Selector is limited + to selecting only workload endpoints in the same namespace + as the NetworkPolicy. \n For NetworkPolicy, `global()` + NamespaceSelector implies that the Selector is limited + to selecting only GlobalNetworkSet or HostEndpoint. \n + For GlobalNetworkPolicy, an empty NamespaceSelector implies + the Selector applies to workload endpoints across all + namespaces." + type: string + nets: + description: Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label \"my_label\". \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label \"my_label\". + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + services: + description: "Services is an optional field that contains + options for matching Kubernetes Services. If specified, + only traffic that originates from or terminates at endpoints + within the selected service(s) will be matched, and only + to/from each endpoint's port. \n Services cannot be specified + on the same rule as Selector, NotSelector, NamespaceSelector, + Nets, NotNets or ServiceAccounts. \n Ports and NotPorts + can only be specified with Services on ingress rules." + properties: + name: + description: Name specifies the name of a Kubernetes + Service to match. + type: string + namespace: + description: Namespace specifies the namespace of the + given Service. If left empty, the rule will match + within this policy's namespace. + type: string + type: object + type: object + http: + description: HTTP contains match criteria that apply to HTTP + requests. + properties: + methods: + description: Methods is an optional field that restricts + the rule to apply only to HTTP requests that use one of + the listed HTTP Methods (e.g. GET, PUT, etc.) Multiple + methods are OR'd together. + items: + type: string + type: array + paths: + description: 'Paths is an optional field that restricts + the rule to apply to HTTP requests that use one of the + listed HTTP Paths. Multiple paths are OR''d together. + e.g: - exact: /foo - prefix: /bar NOTE: Each entry may + ONLY specify either a `exact` or a `prefix` match. The + validator will check for it.' + items: + description: 'HTTPPath specifies an HTTP path to match. + It may be either of the form: exact: : which matches + the path exactly or prefix: : which matches + the path prefix' + properties: + exact: + type: string + prefix: + type: string + type: object + type: array + type: object + icmp: + description: ICMP is an optional field that restricts the rule + to apply to a specific type and code of ICMP traffic. This + should only be specified if the Protocol field is set to "ICMP" + or "ICMPv6". + properties: + code: + description: Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernel's iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + ipVersion: + description: IPVersion is an optional field that restricts the + rule to only match a specific IP version. + type: integer + metadata: + description: Metadata contains additional information for this + rule + properties: + annotations: + additionalProperties: + type: string + description: Annotations is a set of key value pairs that + give extra information about the rule + type: object + type: object + notICMP: + description: NotICMP is the negated version of the ICMP field. + properties: + code: + description: Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernel's iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + notProtocol: + anyOf: + - type: integer + - type: string + description: NotProtocol is the negated version of the Protocol + field. + pattern: ^.* + x-kubernetes-int-or-string: true + protocol: + anyOf: + - type: integer + - type: string + description: "Protocol is an optional field that restricts the + rule to only apply to traffic of a specific IP protocol. Required + if any of the EntityRules contain Ports (because ports only + apply to certain protocols). \n Must be one of these string + values: \"TCP\", \"UDP\", \"ICMP\", \"ICMPv6\", \"SCTP\", + \"UDPLite\" or an integer in the range 1-255." + pattern: ^.* + x-kubernetes-int-or-string: true + source: + description: Source contains the match criteria that apply to + source entity. + properties: + namespaceSelector: + description: "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and another selector are defined on the same rule, then + only workload endpoints that are matched by both selectors + will be selected by the rule. \n For NetworkPolicy, an + empty NamespaceSelector implies that the Selector is limited + to selecting only workload endpoints in the same namespace + as the NetworkPolicy. \n For NetworkPolicy, `global()` + NamespaceSelector implies that the Selector is limited + to selecting only GlobalNetworkSet or HostEndpoint. \n + For GlobalNetworkPolicy, an empty NamespaceSelector implies + the Selector applies to workload endpoints across all + namespaces." + type: string + nets: + description: Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label \"my_label\". \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label \"my_label\". + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + services: + description: "Services is an optional field that contains + options for matching Kubernetes Services. If specified, + only traffic that originates from or terminates at endpoints + within the selected service(s) will be matched, and only + to/from each endpoint's port. \n Services cannot be specified + on the same rule as Selector, NotSelector, NamespaceSelector, + Nets, NotNets or ServiceAccounts. \n Ports and NotPorts + can only be specified with Services on ingress rules." + properties: + name: + description: Name specifies the name of a Kubernetes + Service to match. + type: string + namespace: + description: Namespace specifies the namespace of the + given Service. If left empty, the rule will match + within this policy's namespace. + type: string + type: object + type: object + required: + - action + type: object + type: array + order: + description: Order is an optional field that specifies the order in + which the policy is applied. Policies with higher "order" are applied + after those with lower order. If the order is omitted, it may be + considered to be "infinite" - i.e. the policy will be applied last. Policies + with identical order will be applied in alphanumerical order based + on the Policy "Name". + type: number + selector: + description: "The selector is an expression used to pick pick out + the endpoints that the policy should be applied to. \n Selector + expressions follow this syntax: \n \tlabel == \"string_literal\" + \ -> comparison, e.g. my_label == \"foo bar\" \tlabel != \"string_literal\" + \ -> not equal; also matches if label is not present \tlabel in + { \"a\", \"b\", \"c\", ... } -> true if the value of label X is + one of \"a\", \"b\", \"c\" \tlabel not in { \"a\", \"b\", \"c\", + ... } -> true if the value of label X is not one of \"a\", \"b\", + \"c\" \thas(label_name) -> True if that label is present \t! expr + -> negation of expr \texpr && expr -> Short-circuit and \texpr + || expr -> Short-circuit or \t( expr ) -> parens for grouping \tall() + or the empty selector -> matches all endpoints. \n Label names are + allowed to contain alphanumerics, -, _ and /. String literals are + more permissive but they do not support escape characters. \n Examples + (with made-up labels): \n \ttype == \"webserver\" && deployment + == \"prod\" \ttype in {\"frontend\", \"backend\"} \tdeployment != + \"dev\" \t! has(label_name)" + type: string + serviceAccountSelector: + description: ServiceAccountSelector is an optional field for an expression + used to select a pod based on service accounts. + type: string + types: + description: "Types indicates whether this policy applies to ingress, + or to egress, or to both. When not explicitly specified (and so + the value on creation is empty or nil), Calico defaults Types according + to what Ingress and Egress are present in the policy. The default + is: \n - [ PolicyTypeIngress ], if there are no Egress rules (including + the case where there are also no Ingress rules) \n - [ PolicyTypeEgress + ], if there are Egress rules but no Ingress rules \n - [ PolicyTypeIngress, + PolicyTypeEgress ], if there are both Ingress and Egress rules. + \n When the policy is read back again, Types will always be one + of these values, never empty or nil." + items: + description: PolicyType enumerates the possible values of the PolicySpec + Types field. + type: string + type: array + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +# Source: calico/templates/kdd-crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: networksets.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: NetworkSet + listKind: NetworkSetList + plural: networksets + singular: networkset + preserveUnknownFields: false + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: NetworkSet is the Namespaced-equivalent of the GlobalNetworkSet. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: NetworkSetSpec contains the specification for a NetworkSet + resource. + properties: + nets: + description: The list of IP networks that belong to this set. + items: + type: string + type: array + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +# Source: calico/templates/calico-kube-controllers-rbac.yaml +# Include a clusterrole for the kube-controllers component, +# and bind it to the calico-kube-controllers serviceaccount. +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: calico-kube-controllers +rules: + # Nodes are watched to monitor for deletions. + - apiGroups: [""] + resources: + - nodes + verbs: + - watch + - list + - get + # Pods are watched to check for existence as part of IPAM controller. + - apiGroups: [""] + resources: + - pods + verbs: + - get + - list + - watch + # IPAM resources are manipulated in response to node and block updates, as well as periodic triggers. + - apiGroups: ["crd.projectcalico.org"] + resources: + - ipreservations + verbs: + - list + - apiGroups: ["crd.projectcalico.org"] + resources: + - blockaffinities + - ipamblocks + - ipamhandles + verbs: + - get + - list + - create + - update + - delete + - watch + # Pools are watched to maintain a mapping of blocks to IP pools. + - apiGroups: ["crd.projectcalico.org"] + resources: + - ippools + verbs: + - list + - watch + # kube-controllers manages hostendpoints. + - apiGroups: ["crd.projectcalico.org"] + resources: + - hostendpoints + verbs: + - get + - list + - create + - update + - delete + # Needs access to update clusterinformations. + - apiGroups: ["crd.projectcalico.org"] + resources: + - clusterinformations + verbs: + - get + - list + - create + - update + - watch + # KubeControllersConfiguration is where it gets its config + - apiGroups: ["crd.projectcalico.org"] + resources: + - kubecontrollersconfigurations + verbs: + # read its own config + - get + # create a default if none exists + - create + # update status + - update + # watch for changes + - watch +--- +# Source: calico/templates/calico-node-rbac.yaml +# Include a clusterrole for the calico-node DaemonSet, +# and bind it to the calico-node serviceaccount. +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: calico-node +rules: + # Used for creating service account tokens to be used by the CNI plugin + - apiGroups: [""] + resources: + - serviceaccounts/token + resourceNames: + - calico-node + verbs: + - create + # The CNI plugin needs to get pods, nodes, and namespaces. + - apiGroups: [""] + resources: + - pods + - nodes + - namespaces + verbs: + - get + # EndpointSlices are used for Service-based network policy rule + # enforcement. + - apiGroups: ["discovery.k8s.io"] + resources: + - endpointslices + verbs: + - watch + - list + - apiGroups: [""] + resources: + - endpoints + - services + verbs: + # Used to discover service IPs for advertisement. + - watch + - list + # Used to discover Typhas. + - get + # Pod CIDR auto-detection on kubeadm needs access to config maps. + - apiGroups: [""] + resources: + - configmaps + verbs: + - get + - apiGroups: [""] + resources: + - nodes/status + verbs: + # Needed for clearing NodeNetworkUnavailable flag. + - patch + # Calico stores some configuration information in node annotations. + - update + # Watch for changes to Kubernetes NetworkPolicies. + - apiGroups: ["networking.k8s.io"] + resources: + - networkpolicies + verbs: + - watch + - list + # Used by Calico for policy information. + - apiGroups: [""] + resources: + - pods + - namespaces + - serviceaccounts + verbs: + - list + - watch + # The CNI plugin patches pods/status. + - apiGroups: [""] + resources: + - pods/status + verbs: + - patch + # Calico monitors various CRDs for config. + - apiGroups: ["crd.projectcalico.org"] + resources: + - globalfelixconfigs + - felixconfigurations + - bgppeers + - globalbgpconfigs + - bgpconfigurations + - ippools + - ipreservations + - ipamblocks + - globalnetworkpolicies + - globalnetworksets + - networkpolicies + - networksets + - clusterinformations + - hostendpoints + - blockaffinities + - caliconodestatuses + verbs: + - get + - list + - watch + # Calico must create and update some CRDs on startup. + - apiGroups: ["crd.projectcalico.org"] + resources: + - ippools + - felixconfigurations + - clusterinformations + verbs: + - create + - update + # Calico must update some CRDs. + - apiGroups: [ "crd.projectcalico.org" ] + resources: + - caliconodestatuses + verbs: + - update + # Calico stores some configuration information on the node. + - apiGroups: [""] + resources: + - nodes + verbs: + - get + - list + - watch + # These permissions are only required for upgrade from v2.6, and can + # be removed after upgrade or on fresh installations. + - apiGroups: ["crd.projectcalico.org"] + resources: + - bgpconfigurations + - bgppeers + verbs: + - create + - update + # These permissions are required for Calico CNI to perform IPAM allocations. + - apiGroups: ["crd.projectcalico.org"] + resources: + - blockaffinities + - ipamblocks + - ipamhandles + verbs: + - get + - list + - create + - update + - delete + # The CNI plugin and calico/node need to be able to create a default + # IPAMConfiguration + - apiGroups: ["crd.projectcalico.org"] + resources: + - ipamconfigs + verbs: + - get + - create + # Block affinities must also be watchable by confd for route aggregation. + - apiGroups: ["crd.projectcalico.org"] + resources: + - blockaffinities + verbs: + - watch + # The Calico IPAM migration needs to get daemonsets. These permissions can be + # removed if not upgrading from an installation using host-local IPAM. + - apiGroups: ["apps"] + resources: + - daemonsets + verbs: + - get +--- +# Source: calico/templates/calico-kube-controllers-rbac.yaml +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: calico-kube-controllers +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: calico-kube-controllers +subjects: +- kind: ServiceAccount + name: calico-kube-controllers + namespace: kube-system +--- +# Source: calico/templates/calico-node-rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: calico-node +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: calico-node +subjects: +- kind: ServiceAccount + name: calico-node + namespace: kube-system +--- +# Source: calico/templates/calico-node.yaml +# This manifest installs the calico-node container, as well +# as the CNI plugins and network config on +# each master and worker node in a Kubernetes cluster. +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: calico-node + namespace: kube-system + labels: + k8s-app: calico-node +spec: + selector: + matchLabels: + k8s-app: calico-node + updateStrategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 1 + template: + metadata: + labels: + k8s-app: calico-node + spec: + nodeSelector: + kubernetes.io/os: linux + hostNetwork: true + tolerations: + # Make sure calico-node gets scheduled on all nodes. + - effect: NoSchedule + operator: Exists + # Mark the pod as a critical add-on for rescheduling. + - key: CriticalAddonsOnly + operator: Exists + - effect: NoExecute + operator: Exists + serviceAccountName: calico-node + # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force + # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods. + terminationGracePeriodSeconds: 0 + priorityClassName: system-node-critical + initContainers: + # This container performs upgrade from host-local IPAM to calico-ipam. + # It can be deleted if this is a fresh installation, or if you have already + # upgraded to use calico-ipam. + - name: upgrade-ipam + image: docker.io/calico/cni:v3.25.0 + imagePullPolicy: IfNotPresent + command: ["/opt/cni/bin/calico-ipam", "-upgrade"] + envFrom: + - configMapRef: + # Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode. + name: kubernetes-services-endpoint + optional: true + env: + - name: KUBERNETES_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: CALICO_NETWORKING_BACKEND + valueFrom: + configMapKeyRef: + name: calico-config + key: calico_backend + volumeMounts: + - mountPath: /var/lib/cni/networks + name: host-local-net-dir + - mountPath: /host/opt/cni/bin + name: cni-bin-dir + securityContext: + privileged: true + # This container installs the CNI binaries + # and CNI network config file on each node. + - name: install-cni + image: docker.io/calico/cni:v3.25.0 + imagePullPolicy: IfNotPresent + command: ["/opt/cni/bin/install"] + envFrom: + - configMapRef: + # Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode. + name: kubernetes-services-endpoint + optional: true + env: + # Name of the CNI config file to create. + - name: CNI_CONF_NAME + value: "10-calico.conflist" + # The CNI network config to install on each node. + - name: CNI_NETWORK_CONFIG + valueFrom: + configMapKeyRef: + name: calico-config + key: cni_network_config + # Set the hostname based on the k8s node name. + - name: KUBERNETES_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + # CNI MTU Config variable + - name: CNI_MTU + valueFrom: + configMapKeyRef: + name: calico-config + key: veth_mtu + # Prevents the container from sleeping forever. + - name: SLEEP + value: "false" + volumeMounts: + - mountPath: /host/opt/cni/bin + name: cni-bin-dir + - mountPath: /host/etc/cni/net.d + name: cni-net-dir + securityContext: + privileged: true + # This init container mounts the necessary filesystems needed by the BPF data plane + # i.e. bpf at /sys/fs/bpf and cgroup2 at /run/calico/cgroup. Calico-node initialisation is executed + # in best effort fashion, i.e. no failure for errors, to not disrupt pod creation in iptable mode. + - name: "mount-bpffs" + image: docker.io/calico/node:v3.25.0 + imagePullPolicy: IfNotPresent + command: ["calico-node", "-init", "-best-effort"] + volumeMounts: + - mountPath: /sys/fs + name: sys-fs + # Bidirectional is required to ensure that the new mount we make at /sys/fs/bpf propagates to the host + # so that it outlives the init container. + mountPropagation: Bidirectional + - mountPath: /var/run/calico + name: var-run-calico + # Bidirectional is required to ensure that the new mount we make at /run/calico/cgroup propagates to the host + # so that it outlives the init container. + mountPropagation: Bidirectional + # Mount /proc/ from host which usually is an init program at /nodeproc. It's needed by mountns binary, + # executed by calico-node, to mount root cgroup2 fs at /run/calico/cgroup to attach CTLB programs correctly. + - mountPath: /nodeproc + name: nodeproc + readOnly: true + securityContext: + privileged: true + containers: + # Runs calico-node container on each Kubernetes node. This + # container programs network policy and routes on each + # host. + - name: calico-node + image: docker.io/calico/node:v3.25.0 + imagePullPolicy: IfNotPresent + envFrom: + - configMapRef: + # Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode. + name: kubernetes-services-endpoint + optional: true + env: + # Use Kubernetes API as the backing datastore. + - name: DATASTORE_TYPE + value: "kubernetes" + # Wait for the datastore. + - name: WAIT_FOR_DATASTORE + value: "true" + # Set based on the k8s node name. + - name: NODENAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + # Choose the backend to use. + - name: CALICO_NETWORKING_BACKEND + valueFrom: + configMapKeyRef: + name: calico-config + key: calico_backend + # Cluster type to identify the deployment type + - name: CLUSTER_TYPE + value: "k8s,bgp" + # Auto-detect the BGP IP address. + - name: IP + value: "autodetect" + # Enable IPIP + - name: CALICO_IPV4POOL_IPIP + value: "Always" + # Enable or Disable VXLAN on the default IP pool. + - name: CALICO_IPV4POOL_VXLAN + value: "Never" + # Enable or Disable VXLAN on the default IPv6 IP pool. + - name: CALICO_IPV6POOL_VXLAN + value: "Never" + # Set MTU for tunnel device used if ipip is enabled + - name: FELIX_IPINIPMTU + valueFrom: + configMapKeyRef: + name: calico-config + key: veth_mtu + # Set MTU for the VXLAN tunnel device. + - name: FELIX_VXLANMTU + valueFrom: + configMapKeyRef: + name: calico-config + key: veth_mtu + # Set MTU for the Wireguard tunnel device. + - name: FELIX_WIREGUARDMTU + valueFrom: + configMapKeyRef: + name: calico-config + key: veth_mtu + # The default IPv4 pool to create on startup if none exists. Pod IPs will be + # chosen from this range. Changing this value after installation will have + # no effect. This should fall within `--cluster-cidr`. + # - name: CALICO_IPV4POOL_CIDR + # value: "192.168.0.0/16" + # Disable file logging so `kubectl logs` works. + - name: CALICO_DISABLE_FILE_LOGGING + value: "true" + # Set Felix endpoint to host default action to ACCEPT. + - name: FELIX_DEFAULTENDPOINTTOHOSTACTION + value: "ACCEPT" + # Disable IPv6 on Kubernetes. + - name: FELIX_IPV6SUPPORT + value: "false" + - name: FELIX_HEALTHENABLED + value: "true" + securityContext: + privileged: true + resources: + requests: + cpu: 250m + lifecycle: + preStop: + exec: + command: + - /bin/calico-node + - -shutdown + livenessProbe: + exec: + command: + - /bin/calico-node + - -felix-live + - -bird-live + periodSeconds: 10 + initialDelaySeconds: 10 + failureThreshold: 6 + timeoutSeconds: 10 + readinessProbe: + exec: + command: + - /bin/calico-node + - -felix-ready + - -bird-ready + periodSeconds: 10 + timeoutSeconds: 10 + volumeMounts: + # For maintaining CNI plugin API credentials. + - mountPath: /host/etc/cni/net.d + name: cni-net-dir + readOnly: false + - mountPath: /lib/modules + name: lib-modules + readOnly: true + - mountPath: /run/xtables.lock + name: xtables-lock + readOnly: false + - mountPath: /var/run/calico + name: var-run-calico + readOnly: false + - mountPath: /var/lib/calico + name: var-lib-calico + readOnly: false + - name: policysync + mountPath: /var/run/nodeagent + # For eBPF mode, we need to be able to mount the BPF filesystem at /sys/fs/bpf so we mount in the + # parent directory. + - name: bpffs + mountPath: /sys/fs/bpf + - name: cni-log-dir + mountPath: /var/log/calico/cni + readOnly: true + volumes: + # Used by calico-node. + - name: lib-modules + hostPath: + path: /lib/modules + - name: var-run-calico + hostPath: + path: /var/run/calico + - name: var-lib-calico + hostPath: + path: /var/lib/calico + - name: xtables-lock + hostPath: + path: /run/xtables.lock + type: FileOrCreate + - name: sys-fs + hostPath: + path: /sys/fs/ + type: DirectoryOrCreate + - name: bpffs + hostPath: + path: /sys/fs/bpf + type: Directory + # mount /proc at /nodeproc to be used by mount-bpffs initContainer to mount root cgroup2 fs. + - name: nodeproc + hostPath: + path: /proc + # Used to install CNI. + - name: cni-bin-dir + hostPath: + path: /opt/cni/bin + - name: cni-net-dir + hostPath: + path: /etc/cni/net.d + # Used to access CNI logs. + - name: cni-log-dir + hostPath: + path: /var/log/calico/cni + # Mount in the directory for host-local IPAM allocations. This is + # used when upgrading from host-local to calico-ipam, and can be removed + # if not using the upgrade-ipam init container. + - name: host-local-net-dir + hostPath: + path: /var/lib/cni/networks + # Used to create per-pod Unix Domain Sockets + - name: policysync + hostPath: + type: DirectoryOrCreate + path: /var/run/nodeagent +--- +# Source: calico/templates/calico-kube-controllers.yaml +# See https://github.com/projectcalico/kube-controllers +apiVersion: apps/v1 +kind: Deployment +metadata: + name: calico-kube-controllers + namespace: kube-system + labels: + k8s-app: calico-kube-controllers +spec: + # The controllers can only have a single active instance. + replicas: 1 + selector: + matchLabels: + k8s-app: calico-kube-controllers + strategy: + type: Recreate + template: + metadata: + name: calico-kube-controllers + namespace: kube-system + labels: + k8s-app: calico-kube-controllers + spec: + nodeSelector: + kubernetes.io/os: linux + tolerations: + # Mark the pod as a critical add-on for rescheduling. + - key: CriticalAddonsOnly + operator: Exists + - key: node-role.kubernetes.io/master + effect: NoSchedule + - key: node-role.kubernetes.io/control-plane + effect: NoSchedule + serviceAccountName: calico-kube-controllers + priorityClassName: system-cluster-critical + containers: + - name: calico-kube-controllers + image: docker.io/calico/kube-controllers:v3.25.0 + imagePullPolicy: IfNotPresent + env: + # Choose which controllers to run. + - name: ENABLED_CONTROLLERS + value: node + - name: DATASTORE_TYPE + value: kubernetes + livenessProbe: + exec: + command: + - /usr/bin/check-status + - -l + periodSeconds: 10 + initialDelaySeconds: 10 + failureThreshold: 6 + timeoutSeconds: 10 + readinessProbe: + exec: + command: + - /usr/bin/check-status + - -r + periodSeconds: 10 diff --git a/ansible/01_old/roles/agent_os_setting/templates/components.yaml.j2 b/ansible/01_old/roles/agent_os_setting/templates/components.yaml.j2 new file mode 100644 index 0000000..787f274 --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/templates/components.yaml.j2 @@ -0,0 +1,197 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + k8s-app: metrics-server + name: metrics-server + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + k8s-app: metrics-server + rbac.authorization.k8s.io/aggregate-to-admin: "true" + rbac.authorization.k8s.io/aggregate-to-edit: "true" + rbac.authorization.k8s.io/aggregate-to-view: "true" + name: system:aggregated-metrics-reader +rules: +- apiGroups: + - metrics.k8s.io + resources: + - pods + - nodes + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + k8s-app: metrics-server + name: system:metrics-server +rules: +- apiGroups: + - "" + resources: + - nodes/metrics + verbs: + - get +- apiGroups: + - "" + resources: + - pods + - nodes + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + k8s-app: metrics-server + name: metrics-server-auth-reader + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: extension-apiserver-authentication-reader +subjects: +- kind: ServiceAccount + name: metrics-server + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + k8s-app: metrics-server + name: metrics-server:system:auth-delegator +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:auth-delegator +subjects: +- kind: ServiceAccount + name: metrics-server + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + k8s-app: metrics-server + name: system:metrics-server +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:metrics-server +subjects: +- kind: ServiceAccount + name: metrics-server + namespace: kube-system +--- +apiVersion: v1 +kind: Service +metadata: + labels: + k8s-app: metrics-server + name: metrics-server + namespace: kube-system +spec: + ports: + - name: https + port: 443 + protocol: TCP + targetPort: https + selector: + k8s-app: metrics-server +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + k8s-app: metrics-server + name: metrics-server + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: metrics-server + strategy: + rollingUpdate: + maxUnavailable: 0 + template: + metadata: + labels: + k8s-app: metrics-server + spec: + containers: + - args: + - --cert-dir=/tmp + - --secure-port=4443 + - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname + - --kubelet-use-node-status-port + - --metric-resolution=15s + - --kubelet-insecure-tls + image: k8s.gcr.io/metrics-server/metrics-server:v0.6.2 + imagePullPolicy: IfNotPresent + livenessProbe: + failureThreshold: 3 + httpGet: + path: /livez + port: https + scheme: HTTPS + periodSeconds: 10 + name: metrics-server + ports: + - containerPort: 4443 + name: https + protocol: TCP + readinessProbe: + failureThreshold: 3 + httpGet: + path: /readyz + port: https + scheme: HTTPS + initialDelaySeconds: 20 + periodSeconds: 10 + resources: + requests: + cpu: 100m + memory: 200Mi + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsNonRoot: true + runAsUser: 1000 + volumeMounts: + - mountPath: /tmp + name: tmp-dir + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-cluster-critical + serviceAccountName: metrics-server + volumes: + - emptyDir: {} + name: tmp-dir +--- +apiVersion: apiregistration.k8s.io/v1 +kind: APIService +metadata: + labels: + k8s-app: metrics-server + name: v1beta1.metrics.k8s.io +spec: + group: metrics.k8s.io + groupPriorityMinimum: 100 + insecureSkipTLSVerify: true + service: + name: metrics-server + namespace: kube-system + version: v1beta1 + versionPriority: 100 diff --git a/ansible/01_old/roles/agent_os_setting/templates/config.toml.j2 b/ansible/01_old/roles/agent_os_setting/templates/config.toml.j2 new file mode 100644 index 0000000..0217565 --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/templates/config.toml.j2 @@ -0,0 +1,5 @@ +# {{ ansible_managed }} + +{% from 'yaml2toml_macro.j2' import yaml2toml with context -%} + +{{ yaml2toml(containerd_config) }} diff --git a/ansible/01_old/roles/agent_os_setting/templates/daemon.json.j2 b/ansible/01_old/roles/agent_os_setting/templates/daemon.json.j2 new file mode 100644 index 0000000..6c2b554 --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/templates/daemon.json.j2 @@ -0,0 +1,9 @@ +{ + "exec-opts": ["native.cgroupdriver=systemd"], + "log-driver": "json-file", + "log-opts": { + "max-size": "100m" + }, + "storage-driver": "overlay2", + "insecure-registries": ["10.10.31.243:5000"] +} diff --git a/ansible/01_old/roles/agent_os_setting/templates/hosts.j2 b/ansible/01_old/roles/agent_os_setting/templates/hosts.j2 new file mode 100644 index 0000000..18804b7 --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/templates/hosts.j2 @@ -0,0 +1,6 @@ +127.0.0.1 localhost +:: 1 localhost + +{% for host in groups.all %} +{{ hostvars[host].ansible_default_ipv4.address }} {{ hostvars[host].ansible_fqdn }} {{ hostvars[host].ansible_hostname }} +{%endfor%} diff --git a/ansible/01_old/roles/agent_os_setting/templates/myregistry.conf.j2 b/ansible/01_old/roles/agent_os_setting/templates/myregistry.conf.j2 new file mode 100644 index 0000000..687d62d --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/templates/myregistry.conf.j2 @@ -0,0 +1,3 @@ +[[registry]] +location = "10.10.31.243:5000" +insecure = true \ No newline at end of file diff --git a/ansible/01_old/roles/agent_os_setting/templates/yaml2toml_macro.j2 b/ansible/01_old/roles/agent_os_setting/templates/yaml2toml_macro.j2 new file mode 100644 index 0000000..33f69d0 --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/templates/yaml2toml_macro.j2 @@ -0,0 +1,58 @@ +{%- macro yaml2inline_toml(item, depth) -%} + {%- if item is string or item is number -%} + {#- First, process all primitive types. -#} + {{ item | to_json }} + {%- elif item is mapping -%} + {#- Second, process all mappings. -#} + {#- Note that inline mappings must not contain newlines (except inside contained lists). -#} + {{ "{" }} + {%- for key, value in item.items() | sort -%} + {{ " " + + (key | to_json) + + " = " + + yaml2inline_toml(value, depth) + }} + {%- if not loop.last -%}{{ "," }}{%- endif -%} + {%- endfor -%} + {{ " }" }} + {%- else -%} + {#- Third, process all lists. -#} + {%- if item | length == 0 -%}{{ "[]" }}{%- else -%} + {{ "[" }} + {%- for entry in item -%} + {{ "\n" + + (" " * (depth + 1)) + + yaml2inline_toml(entry, depth + 1) + }} + {%- if not loop.last -%}{{ "," }}{%- endif -%} + {%- endfor -%} + {{ "\n" + (" " * depth) + "]" }} + {%- endif -%} + {%- endif -%} +{%- endmacro -%} + +{%- macro yaml2toml(item, super_keys=[]) -%} + {%- for key, value in item.items() | sort -%} + {%- if value is not mapping -%} + {#- First, process all non-mappings. -#} + {{ (" " * (super_keys | length)) + + (key | to_json) + + " = " + + (yaml2inline_toml(value, super_keys | length)) + + "\n" + }} + {%- endif -%} + {%- endfor -%} + {%- for key, value in item.items() | sort -%} + {%- if value is mapping -%} + {#- Second, process all mappings. -#} + {{ "\n" + + (" " * (super_keys | length)) + + "[" + + ((super_keys+[key]) | map('to_json') | join(".")) + + "]\n" + + yaml2toml(value, super_keys+[key]) + }} + {%- endif -%} + {%- endfor -%} +{%- endmacro -%} diff --git a/ansible/01_old/roles/agent_os_setting/tests/inventory b/ansible/01_old/roles/agent_os_setting/tests/inventory new file mode 100644 index 0000000..878877b --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/tests/inventory @@ -0,0 +1,2 @@ +localhost + diff --git a/ansible/01_old/roles/agent_os_setting/tests/test.yml b/ansible/01_old/roles/agent_os_setting/tests/test.yml new file mode 100644 index 0000000..191e731 --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - apache diff --git a/ansible/01_old/roles/agent_os_setting/vars/main.yml b/ansible/01_old/roles/agent_os_setting/vars/main.yml new file mode 100644 index 0000000..2aa5032 --- /dev/null +++ b/ansible/01_old/roles/agent_os_setting/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for apache diff --git a/ansible/01_old/roles/api_os_setting/defaults/main.yml b/ansible/01_old/roles/api_os_setting/defaults/main.yml new file mode 100644 index 0000000..01b6cb8 --- /dev/null +++ b/ansible/01_old/roles/api_os_setting/defaults/main.yml @@ -0,0 +1,140 @@ +helm_checksum: sha256:72f1c0fcfb17b41b89087e9232e50f20c606e44a0edc2bb9737e05d1c75b8c4f +helm_version: v3.10.2 + +kubernetes_version: 1.25.2 + +kubernetes_kubelet_extra_args: "" +kubernetes_kubeadm_init_extra_opts: "" +kubernetes_join_command_extra_opts: "" + +kubernetes_pod_network: + cni: 'calico' + cidr: '10.96.0.0/12' + +kubernetes_calico_manifest_file: "{{ role_path }}/files/calico.yaml" + +kubernetes_metric_server_file: "{{ role_path }}/files/components.yaml" + +containerd_config: + version: 2 + root: /var/lib/containerd + state: /run/containerd + plugin_dir: "" + disabled_plugins: [] + required_plugins: [] + oom_score: 0 + grpc: + address: /run/containerd/containerd.sock + tcp_address: "" + tcp_tls_cert: "" + tcp_tls_key: "" + uid: 0 + gid: 0 + max_recv_message_size: 16777216 + max_send_message_size: 16777216 + ttrpc: + address: "" + uid: 0 + gid: 0 + debug: + address: "" + uid: 0 + gid: 0 + level: "" + metrics: + address: "" + grpc_histogram: false + cgroup: + path: "" + timeouts: + "io.containerd.timeout.shim.cleanup": 5s + "io.containerd.timeout.shim.load": 5s + "io.containerd.timeout.shim.shutdown": 3s + "io.containerd.timeout.task.state": 2s + plugins: + "io.containerd.gc.v1.scheduler": + pause_threshold: 0.02 + deletion_threshold: 0 + mutation_threshold: 100 + schedule_delay: 0s + startup_delay: 100ms + "io.containerd.grpc.v1.cri": + disable_tcp_service: true + stream_server_address: 127.0.0.1 + stream_server_port: "0" + stream_idle_timeout: 4h0m0s + enable_selinux: false + sandbox_image: k8s.gcr.io/pause:3.1 + stats_collect_period: 10 + systemd_cgroup: false + enable_tls_streaming: false + max_container_log_line_size: 16384 + disable_cgroup: false + disable_apparmor: false + restrict_oom_score_adj: false + max_concurrent_downloads: 3 + disable_proc_mount: false + containerd: + snapshotter: overlayfs + default_runtime_name: runc + no_pivot: false + default_runtime: + runtime_type: "" + runtime_engine: "" + runtime_root: "" + privileged_without_host_devices: false + untrusted_workload_runtime: + runtime_type: "" + runtime_engine: "" + runtime_root: "" + privileged_without_host_devices: false + runtimes: + runc: + runtime_type: io.containerd.runc.v1 + runtime_engine: "" + runtime_root: "" + privileged_without_host_devices: false + cni: + bin_dir: /opt/cni/bin + conf_dir: /etc/cni/net.d + max_conf_num: 1 + conf_template: "" + registry: + configs: + "10.10.31.243:5000": + tls: + insecure_skip_verify: true + mirrors: + "docker.io": + endpoint: + - https://registry-1.docker.io + "10.10.31.243:5000": + endpoint: + - http://10.10.31.243:5000 + x509_key_pair_streaming: + tls_cert_file: "" + tls_key_file: "" + "io.containerd.internal.v1.opt": + path: /opt/containerd + "io.containerd.internal.v1.restart": + interval: 10s + "io.containerd.metadata.v1.bolt": + content_sharing_policy: shared + "io.containerd.monitor.v1.cgroups": + no_prometheus: false + "io.containerd.runtime.v1.linux": + shim: containerd-shim + runtime: runc + runtime_root: "" + no_shim: false + shim_debug: false + "io.containerd.runtime.v2.task": + platforms: + - linux/amd64 + "io.containerd.service.v1.diff-service": + default: + - walking + "io.containerd.snapshotter.v1.devmapper": + root_path: "" + pool_name: "" + base_image_size: "" diff --git a/ansible/01_old/roles/api_os_setting/files/get-docker.sh b/ansible/01_old/roles/api_os_setting/files/get-docker.sh new file mode 100755 index 0000000..e8586ff --- /dev/null +++ b/ansible/01_old/roles/api_os_setting/files/get-docker.sh @@ -0,0 +1,645 @@ +#!/bin/sh +set -e +# Docker CE for Linux installation script +# +# See https://docs.docker.com/engine/install/ for the installation steps. +# +# This script is meant for quick & easy install via: +# $ curl -fsSL https://get.docker.com -o get-docker.sh +# $ sh get-docker.sh +# +# For test builds (ie. release candidates): +# $ curl -fsSL https://test.docker.com -o test-docker.sh +# $ sh test-docker.sh +# +# NOTE: Make sure to verify the contents of the script +# you downloaded matches the contents of install.sh +# located at https://github.com/docker/docker-install +# before executing. +# +# Git commit from https://github.com/docker/docker-install when +# the script was uploaded (Should only be modified by upload job): +SCRIPT_COMMIT_SHA="66474034547a96caa0a25be56051ff8b726a1b28" + +# strip "v" prefix if present +VERSION="${VERSION#v}" + +# The channel to install from: +# * nightly +# * test +# * stable +# * edge (deprecated) +DEFAULT_CHANNEL_VALUE="stable" +if [ -z "$CHANNEL" ]; then + CHANNEL=$DEFAULT_CHANNEL_VALUE +fi + +DEFAULT_DOWNLOAD_URL="https://download.docker.com" +if [ -z "$DOWNLOAD_URL" ]; then + DOWNLOAD_URL=$DEFAULT_DOWNLOAD_URL +fi + +DEFAULT_REPO_FILE="docker-ce.repo" +if [ -z "$REPO_FILE" ]; then + REPO_FILE="$DEFAULT_REPO_FILE" +fi + +mirror='' +DRY_RUN=${DRY_RUN:-} +while [ $# -gt 0 ]; do + case "$1" in + --mirror) + mirror="$2" + shift + ;; + --dry-run) + DRY_RUN=1 + ;; + --*) + echo "Illegal option $1" + ;; + esac + shift $(( $# > 0 ? 1 : 0 )) +done + +case "$mirror" in + Aliyun) + DOWNLOAD_URL="https://mirrors.aliyun.com/docker-ce" + ;; + AzureChinaCloud) + DOWNLOAD_URL="https://mirror.azure.cn/docker-ce" + ;; +esac + +command_exists() { + command -v "$@" > /dev/null 2>&1 +} + +# version_gte checks if the version specified in $VERSION is at least +# the given CalVer (YY.MM) version. returns 0 (success) if $VERSION is either +# unset (=latest) or newer or equal than the specified version. Returns 1 (fail) +# otherwise. +# +# examples: +# +# VERSION=20.10 +# version_gte 20.10 // 0 (success) +# version_gte 19.03 // 0 (success) +# version_gte 21.10 // 1 (fail) +version_gte() { + if [ -z "$VERSION" ]; then + return 0 + fi + eval calver_compare "$VERSION" "$1" +} + +# calver_compare compares two CalVer (YY.MM) version strings. returns 0 (success) +# if version A is newer or equal than version B, or 1 (fail) otherwise. Patch +# releases and pre-release (-alpha/-beta) are not taken into account +# +# examples: +# +# calver_compare 20.10 19.03 // 0 (success) +# calver_compare 20.10 20.10 // 0 (success) +# calver_compare 19.03 20.10 // 1 (fail) +calver_compare() ( + set +x + + yy_a="$(echo "$1" | cut -d'.' -f1)" + yy_b="$(echo "$2" | cut -d'.' -f1)" + if [ "$yy_a" -lt "$yy_b" ]; then + return 1 + fi + if [ "$yy_a" -gt "$yy_b" ]; then + return 0 + fi + mm_a="$(echo "$1" | cut -d'.' -f2)" + mm_b="$(echo "$2" | cut -d'.' -f2)" + if [ "${mm_a#0}" -lt "${mm_b#0}" ]; then + return 1 + fi + + return 0 +) + +is_dry_run() { + if [ -z "$DRY_RUN" ]; then + return 1 + else + return 0 + fi +} + +is_wsl() { + case "$(uname -r)" in + *microsoft* ) true ;; # WSL 2 + *Microsoft* ) true ;; # WSL 1 + * ) false;; + esac +} + +is_darwin() { + case "$(uname -s)" in + *darwin* ) true ;; + *Darwin* ) true ;; + * ) false;; + esac +} + +deprecation_notice() { + distro=$1 + distro_version=$2 + echo + printf "\033[91;1mDEPRECATION WARNING\033[0m\n" + printf " This Linux distribution (\033[1m%s %s\033[0m) reached end-of-life and is no longer supported by this script.\n" "$distro" "$distro_version" + echo " No updates or security fixes will be released for this distribution, and users are recommended" + echo " to upgrade to a currently maintained version of $distro." + echo + printf "Press \033[1mCtrl+C\033[0m now to abort this script, or wait for the installation to continue." + echo + sleep 10 +} + +get_distribution() { + lsb_dist="" + # Every system that we officially support has /etc/os-release + if [ -r /etc/os-release ]; then + lsb_dist="$(. /etc/os-release && echo "$ID")" + fi + # Returning an empty string here should be alright since the + # case statements don't act unless you provide an actual value + echo "$lsb_dist" +} + +echo_docker_as_nonroot() { + if is_dry_run; then + return + fi + if command_exists docker && [ -e /var/run/docker.sock ]; then + ( + set -x + $sh_c 'docker version' + ) || true + fi + + # intentionally mixed spaces and tabs here -- tabs are stripped by "<<-EOF", spaces are kept in the output + echo + echo "================================================================================" + echo + if version_gte "20.10"; then + echo "To run Docker as a non-privileged user, consider setting up the" + echo "Docker daemon in rootless mode for your user:" + echo + echo " dockerd-rootless-setuptool.sh install" + echo + echo "Visit https://docs.docker.com/go/rootless/ to learn about rootless mode." + echo + fi + echo + echo "To run the Docker daemon as a fully privileged service, but granting non-root" + echo "users access, refer to https://docs.docker.com/go/daemon-access/" + echo + echo "WARNING: Access to the remote API on a privileged Docker daemon is equivalent" + echo " to root access on the host. Refer to the 'Docker daemon attack surface'" + echo " documentation for details: https://docs.docker.com/go/attack-surface/" + echo + echo "================================================================================" + echo +} + +# Check if this is a forked Linux distro +check_forked() { + + # Check for lsb_release command existence, it usually exists in forked distros + if command_exists lsb_release; then + # Check if the `-u` option is supported + set +e + lsb_release -a -u > /dev/null 2>&1 + lsb_release_exit_code=$? + set -e + + # Check if the command has exited successfully, it means we're in a forked distro + if [ "$lsb_release_exit_code" = "0" ]; then + # Print info about current distro + cat <<-EOF + You're using '$lsb_dist' version '$dist_version'. + EOF + + # Get the upstream release info + lsb_dist=$(lsb_release -a -u 2>&1 | tr '[:upper:]' '[:lower:]' | grep -E 'id' | cut -d ':' -f 2 | tr -d '[:space:]') + dist_version=$(lsb_release -a -u 2>&1 | tr '[:upper:]' '[:lower:]' | grep -E 'codename' | cut -d ':' -f 2 | tr -d '[:space:]') + + # Print info about upstream distro + cat <<-EOF + Upstream release is '$lsb_dist' version '$dist_version'. + EOF + else + if [ -r /etc/debian_version ] && [ "$lsb_dist" != "ubuntu" ] && [ "$lsb_dist" != "raspbian" ]; then + if [ "$lsb_dist" = "osmc" ]; then + # OSMC runs Raspbian + lsb_dist=raspbian + else + # We're Debian and don't even know it! + lsb_dist=debian + fi + dist_version="$(sed 's/\/.*//' /etc/debian_version | sed 's/\..*//')" + case "$dist_version" in + 11) + dist_version="bullseye" + ;; + 10) + dist_version="buster" + ;; + 9) + dist_version="stretch" + ;; + 8) + dist_version="jessie" + ;; + esac + fi + fi + fi +} + +do_install() { + echo "# Executing docker install script, commit: $SCRIPT_COMMIT_SHA" + + if command_exists docker; then + cat >&2 <<-'EOF' + Warning: the "docker" command appears to already exist on this system. + + If you already have Docker installed, this script can cause trouble, which is + why we're displaying this warning and provide the opportunity to cancel the + installation. + + If you installed the current Docker package using this script and are using it + again to update Docker, you can safely ignore this message. + + You may press Ctrl+C now to abort this script. + EOF + ( set -x; sleep 20 ) + fi + + user="$(id -un 2>/dev/null || true)" + + sh_c='sh -c' + if [ "$user" != 'root' ]; then + if command_exists sudo; then + sh_c='sudo -E sh -c' + elif command_exists su; then + sh_c='su -c' + else + cat >&2 <<-'EOF' + Error: this installer needs the ability to run commands as root. + We are unable to find either "sudo" or "su" available to make this happen. + EOF + exit 1 + fi + fi + + if is_dry_run; then + sh_c="echo" + fi + + # perform some very rudimentary platform detection + lsb_dist=$( get_distribution ) + lsb_dist="$(echo "$lsb_dist" | tr '[:upper:]' '[:lower:]')" + + if is_wsl; then + echo + echo "WSL DETECTED: We recommend using Docker Desktop for Windows." + echo "Please get Docker Desktop from https://www.docker.com/products/docker-desktop" + echo + cat >&2 <<-'EOF' + + You may press Ctrl+C now to abort this script. + EOF + ( set -x; sleep 20 ) + fi + + case "$lsb_dist" in + + ubuntu) + if command_exists lsb_release; then + dist_version="$(lsb_release --codename | cut -f2)" + fi + if [ -z "$dist_version" ] && [ -r /etc/lsb-release ]; then + dist_version="$(. /etc/lsb-release && echo "$DISTRIB_CODENAME")" + fi + ;; + + debian|raspbian) + dist_version="$(sed 's/\/.*//' /etc/debian_version | sed 's/\..*//')" + case "$dist_version" in + 11) + dist_version="bullseye" + ;; + 10) + dist_version="buster" + ;; + 9) + dist_version="stretch" + ;; + 8) + dist_version="jessie" + ;; + esac + ;; + + centos|rhel|sles) + if [ -z "$dist_version" ] && [ -r /etc/os-release ]; then + dist_version="$(. /etc/os-release && echo "$VERSION_ID")" + fi + ;; + + *) + if command_exists lsb_release; then + dist_version="$(lsb_release --release | cut -f2)" + fi + if [ -z "$dist_version" ] && [ -r /etc/os-release ]; then + dist_version="$(. /etc/os-release && echo "$VERSION_ID")" + fi + ;; + + esac + + # Check if this is a forked Linux distro + check_forked + + # Print deprecation warnings for distro versions that recently reached EOL, + # but may still be commonly used (especially LTS versions). + case "$lsb_dist.$dist_version" in + debian.stretch|debian.jessie) + deprecation_notice "$lsb_dist" "$dist_version" + ;; + raspbian.stretch|raspbian.jessie) + deprecation_notice "$lsb_dist" "$dist_version" + ;; + ubuntu.xenial|ubuntu.trusty) + deprecation_notice "$lsb_dist" "$dist_version" + ;; + fedora.*) + if [ "$dist_version" -lt 33 ]; then + deprecation_notice "$lsb_dist" "$dist_version" + fi + ;; + esac + + # Run setup for each distro accordingly + case "$lsb_dist" in + ubuntu|debian|raspbian) + pre_reqs="apt-transport-https ca-certificates curl" + if ! command -v gpg > /dev/null; then + pre_reqs="$pre_reqs gnupg" + fi + apt_repo="deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] $DOWNLOAD_URL/linux/$lsb_dist $dist_version $CHANNEL" + ( + if ! is_dry_run; then + set -x + fi + $sh_c 'apt-get update -qq >/dev/null' + $sh_c "DEBIAN_FRONTEND=noninteractive apt-get install -y -qq $pre_reqs >/dev/null" + $sh_c 'mkdir -p /etc/apt/keyrings && chmod -R 0755 /etc/apt/keyrings' + $sh_c "curl -fsSL \"$DOWNLOAD_URL/linux/$lsb_dist/gpg\" | gpg --dearmor --yes -o /etc/apt/keyrings/docker.gpg" + $sh_c "chmod a+r /etc/apt/keyrings/docker.gpg" + $sh_c "echo \"$apt_repo\" > /etc/apt/sources.list.d/docker.list" + $sh_c 'apt-get update -qq >/dev/null' + ) + pkg_version="" + if [ -n "$VERSION" ]; then + if is_dry_run; then + echo "# WARNING: VERSION pinning is not supported in DRY_RUN" + else + # Will work for incomplete versions IE (17.12), but may not actually grab the "latest" if in the test channel + pkg_pattern="$(echo "$VERSION" | sed "s/-ce-/~ce~.*/g" | sed "s/-/.*/g")" + search_command="apt-cache madison 'docker-ce' | grep '$pkg_pattern' | head -1 | awk '{\$1=\$1};1' | cut -d' ' -f 3" + pkg_version="$($sh_c "$search_command")" + echo "INFO: Searching repository for VERSION '$VERSION'" + echo "INFO: $search_command" + if [ -z "$pkg_version" ]; then + echo + echo "ERROR: '$VERSION' not found amongst apt-cache madison results" + echo + exit 1 + fi + if version_gte "18.09"; then + search_command="apt-cache madison 'docker-ce-cli' | grep '$pkg_pattern' | head -1 | awk '{\$1=\$1};1' | cut -d' ' -f 3" + echo "INFO: $search_command" + cli_pkg_version="=$($sh_c "$search_command")" + fi + pkg_version="=$pkg_version" + fi + fi + ( + pkgs="docker-ce${pkg_version%=}" + if version_gte "18.09"; then + # older versions didn't ship the cli and containerd as separate packages + pkgs="$pkgs docker-ce-cli${cli_pkg_version%=} containerd.io" + fi + if version_gte "20.10" && [ "$(uname -m)" = "x86_64" ]; then + # also install the latest version of the "docker scan" cli-plugin (only supported on x86 currently) + pkgs="$pkgs docker-scan-plugin" + fi + if version_gte "20.10"; then + pkgs="$pkgs docker-compose-plugin docker-ce-rootless-extras$pkg_version" + fi + if version_gte "23.0"; then + pkgs="$pkgs docker-buildx-plugin" + fi + if ! is_dry_run; then + set -x + fi + $sh_c "DEBIAN_FRONTEND=noninteractive apt-get install -y -qq $pkgs >/dev/null" + ) + echo_docker_as_nonroot + exit 0 + ;; + centos|fedora|rhel) + if [ "$(uname -m)" != "s390x" ] && [ "$lsb_dist" = "rhel" ]; then + echo "Packages for RHEL are currently only available for s390x." + exit 1 + fi + if [ "$lsb_dist" = "fedora" ]; then + pkg_manager="dnf" + config_manager="dnf config-manager" + enable_channel_flag="--set-enabled" + disable_channel_flag="--set-disabled" + pre_reqs="dnf-plugins-core" + pkg_suffix="fc$dist_version" + else + pkg_manager="yum" + config_manager="yum-config-manager" + enable_channel_flag="--enable" + disable_channel_flag="--disable" + pre_reqs="yum-utils" + pkg_suffix="el" + fi + repo_file_url="$DOWNLOAD_URL/linux/$lsb_dist/$REPO_FILE" + ( + if ! is_dry_run; then + set -x + fi + $sh_c "$pkg_manager install -y -q $pre_reqs" + $sh_c "$config_manager --add-repo $repo_file_url" + + if [ "$CHANNEL" != "stable" ]; then + $sh_c "$config_manager $disable_channel_flag docker-ce-*" + $sh_c "$config_manager $enable_channel_flag docker-ce-$CHANNEL" + fi + $sh_c "$pkg_manager makecache" + ) + pkg_version="" + if [ -n "$VERSION" ]; then + if is_dry_run; then + echo "# WARNING: VERSION pinning is not supported in DRY_RUN" + else + pkg_pattern="$(echo "$VERSION" | sed "s/-ce-/\\\\.ce.*/g" | sed "s/-/.*/g").*$pkg_suffix" + search_command="$pkg_manager list --showduplicates 'docker-ce' | grep '$pkg_pattern' | tail -1 | awk '{print \$2}'" + pkg_version="$($sh_c "$search_command")" + echo "INFO: Searching repository for VERSION '$VERSION'" + echo "INFO: $search_command" + if [ -z "$pkg_version" ]; then + echo + echo "ERROR: '$VERSION' not found amongst $pkg_manager list results" + echo + exit 1 + fi + if version_gte "18.09"; then + # older versions don't support a cli package + search_command="$pkg_manager list --showduplicates 'docker-ce-cli' | grep '$pkg_pattern' | tail -1 | awk '{print \$2}'" + cli_pkg_version="$($sh_c "$search_command" | cut -d':' -f 2)" + fi + # Cut out the epoch and prefix with a '-' + pkg_version="-$(echo "$pkg_version" | cut -d':' -f 2)" + fi + fi + ( + pkgs="docker-ce$pkg_version" + if version_gte "18.09"; then + # older versions didn't ship the cli and containerd as separate packages + if [ -n "$cli_pkg_version" ]; then + pkgs="$pkgs docker-ce-cli-$cli_pkg_version containerd.io" + else + pkgs="$pkgs docker-ce-cli containerd.io" + fi + fi + if version_gte "20.10" && [ "$(uname -m)" = "x86_64" ]; then + # also install the latest version of the "docker scan" cli-plugin (only supported on x86 currently) + pkgs="$pkgs docker-scan-plugin" + fi + if version_gte "20.10"; then + pkgs="$pkgs docker-compose-plugin docker-ce-rootless-extras$pkg_version" + fi + if version_gte "23.0"; then + pkgs="$pkgs docker-buildx-plugin" + fi + if ! is_dry_run; then + set -x + fi + $sh_c "$pkg_manager install -y -q $pkgs" + ) + echo_docker_as_nonroot + exit 0 + ;; + sles) + if [ "$(uname -m)" != "s390x" ]; then + echo "Packages for SLES are currently only available for s390x" + exit 1 + fi + if [ "$dist_version" = "15.3" ]; then + sles_version="SLE_15_SP3" + else + sles_minor_version="${dist_version##*.}" + sles_version="15.$sles_minor_version" + fi + opensuse_repo="https://download.opensuse.org/repositories/security:SELinux/$sles_version/security:SELinux.repo" + repo_file_url="$DOWNLOAD_URL/linux/$lsb_dist/$REPO_FILE" + pre_reqs="ca-certificates curl libseccomp2 awk" + ( + if ! is_dry_run; then + set -x + fi + $sh_c "zypper install -y $pre_reqs" + $sh_c "zypper addrepo $repo_file_url" + if ! is_dry_run; then + cat >&2 <<-'EOF' + WARNING!! + openSUSE repository (https://download.opensuse.org/repositories/security:SELinux) will be enabled now. + Do you wish to continue? + You may press Ctrl+C now to abort this script. + EOF + ( set -x; sleep 30 ) + fi + $sh_c "zypper addrepo $opensuse_repo" + $sh_c "zypper --gpg-auto-import-keys refresh" + $sh_c "zypper lr -d" + ) + pkg_version="" + if [ -n "$VERSION" ]; then + if is_dry_run; then + echo "# WARNING: VERSION pinning is not supported in DRY_RUN" + else + pkg_pattern="$(echo "$VERSION" | sed "s/-ce-/\\\\.ce.*/g" | sed "s/-/.*/g")" + search_command="zypper search -s --match-exact 'docker-ce' | grep '$pkg_pattern' | tail -1 | awk '{print \$6}'" + pkg_version="$($sh_c "$search_command")" + echo "INFO: Searching repository for VERSION '$VERSION'" + echo "INFO: $search_command" + if [ -z "$pkg_version" ]; then + echo + echo "ERROR: '$VERSION' not found amongst zypper list results" + echo + exit 1 + fi + search_command="zypper search -s --match-exact 'docker-ce-cli' | grep '$pkg_pattern' | tail -1 | awk '{print \$6}'" + # It's okay for cli_pkg_version to be blank, since older versions don't support a cli package + cli_pkg_version="$($sh_c "$search_command")" + pkg_version="-$pkg_version" + fi + fi + ( + pkgs="docker-ce$pkg_version" + if version_gte "18.09"; then + if [ -n "$cli_pkg_version" ]; then + # older versions didn't ship the cli and containerd as separate packages + pkgs="$pkgs docker-ce-cli-$cli_pkg_version containerd.io" + else + pkgs="$pkgs docker-ce-cli containerd.io" + fi + fi + if version_gte "20.10"; then + pkgs="$pkgs docker-compose-plugin docker-ce-rootless-extras$pkg_version" + fi + if version_gte "23.0"; then + pkgs="$pkgs docker-buildx-plugin" + fi + if ! is_dry_run; then + set -x + fi + $sh_c "zypper -q install -y $pkgs" + ) + echo_docker_as_nonroot + exit 0 + ;; + *) + if [ -z "$lsb_dist" ]; then + if is_darwin; then + echo + echo "ERROR: Unsupported operating system 'macOS'" + echo "Please get Docker Desktop from https://www.docker.com/products/docker-desktop" + echo + exit 1 + fi + fi + echo + echo "ERROR: Unsupported distribution '$lsb_dist'" + echo + exit 1 + ;; + esac + exit 1 +} + +# wrapped up in a function so that we have some protection against only getting +# half the file during "curl | sh" +do_install diff --git a/ansible/01_old/roles/api_os_setting/handlers/main.yml b/ansible/01_old/roles/api_os_setting/handlers/main.yml new file mode 100644 index 0000000..df2b47e --- /dev/null +++ b/ansible/01_old/roles/api_os_setting/handlers/main.yml @@ -0,0 +1,22 @@ +--- +- name: Reload systemd configuration + service: + daemon_reload: True + +- name: Restart containerd service + service: + name: containerd + enabled: true + state: restarted + +- name: Restart docker service + service: + name: docker + enabled: true + state: restarted + +- name: Restart crio service + service: + name: crio + enabled: true + state: restarted diff --git a/ansible/01_old/roles/api_os_setting/meta/main.yml b/ansible/01_old/roles/api_os_setting/meta/main.yml new file mode 100644 index 0000000..c572acc --- /dev/null +++ b/ansible/01_old/roles/api_os_setting/meta/main.yml @@ -0,0 +1,52 @@ +galaxy_info: + author: your name + description: your role description + company: your company (optional) + + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: http://example.com/issue/tracker + + # Choose a valid license ID from https://spdx.org - some suggested licenses: + # - BSD-3-Clause (default) + # - MIT + # - GPL-2.0-or-later + # - GPL-3.0-only + # - Apache-2.0 + # - CC-BY-4.0 + license: license (GPL-2.0-or-later, MIT, etc) + + min_ansible_version: 2.1 + + # If this a Container Enabled role, provide the minimum Ansible Container version. + # min_ansible_container_version: + + # + # Provide a list of supported platforms, and for each platform a list of versions. + # If you don't wish to enumerate all versions for a particular platform, use 'all'. + # To view available platforms and versions (or releases), visit: + # https://galaxy.ansible.com/api/v1/platforms/ + # + # platforms: + # - name: Fedora + # versions: + # - all + # - 25 + # - name: SomePlatform + # versions: + # - all + # - 1.0 + # - 7 + # - 99.99 + + galaxy_tags: [] + # List tags for your role here, one per line. A tag is a keyword that describes + # and categorizes the role. Users find roles by searching for tags. Be sure to + # remove the '[]' above, if you add tags to this list. + # + # NOTE: A tag is limited to a single word comprised of alphanumeric characters. + # Maximum 20 tags per role. + +dependencies: [] + # List your role dependencies here, one per line. Be sure to remove the '[]' above, + # if you add dependencies to this list. diff --git a/ansible/01_old/roles/api_os_setting/tasks/00-centos-os-main.yml b/ansible/01_old/roles/api_os_setting/tasks/00-centos-os-main.yml new file mode 100644 index 0000000..9b831b8 --- /dev/null +++ b/ansible/01_old/roles/api_os_setting/tasks/00-centos-os-main.yml @@ -0,0 +1,82 @@ +--- +- name: Update and upgrade yum packages + yum: + name: "*" + state: latest + +- name: Install yum packages + yum: + name: ['cloud-utils', 'ca-certificates', 'socat', 'conntrack', 'gnupg', 'bash-completion'] + state: present + +- name: Disable firewalld + systemd: name=firewalld state=stopped + ignore_errors: yes + tags: + - install + - atomic + - firewalld + +- name: Disable SWAP since kubernetes can't work with swap enabled (1/2) + command: 'swapoff -a' + + # - name: Disable SWAP in fstab since kubernetes can't work with swap enabled (2/2) + # replace: + # path: /etc/fstab + # regexp: '^([^#].*?\sswap\s+sw\s+.*)$' + # replace: '# \1' + +- name: Disable SWAP in fstab since kubernetes can't work with swap enabled (2/2) + become: true + lineinfile: + path: /etc/fstab + regexp: '^/dev/mapper/.*swap' + line: '# {{ item }}' + # when: item is search('^/dev/mapper/.*swap') + loop: "{{ lookup('file', '/etc/fstab').split('\n') }}" + +- name: Add br_netfilter to module autoload + lineinfile: + path: /etc/modules-load.d/k8s2.conf + line: "{{ item }}" + create: true + with_items: + - 'overlay' + - 'br_netfilter' + +- name: Add br_netfilter to module autoload + modprobe: + name: "{{ item }}" + state: present + become: true + with_items: + - 'overlay' + - 'br_netfilter' + +- name: Add br_netfilter to module autoload + lineinfile: + path: /etc/sysctl.d/k8s.conf + line: "{{ item }}" + create: true + with_items: + - 'net.bridge.bridge-nf-call-iptables = 1' + - 'net.bridge.bridge-nf-call-ip6tables = 1' + - 'net.ipv4.ip_forward = 1' + +- name: Disable net.bridge.bridge-nf-call-iptables + sysctl: + name: "{{ item }}" + value: 1 + with_items: + - 'net.bridge.bridge-nf-call-iptables' + - 'net.bridge.bridge-nf-call-ip6tables' + +- name: Disable net.ipv4.ip_forward + sysctl: + name: net.ipv4.ip_forward + value: "1" + +- name: Setting hosts file + template: + src: hosts.j2 + dest: /etc/hosts diff --git a/ansible/01_old/roles/api_os_setting/tasks/00-ubuntu-os-main.yml b/ansible/01_old/roles/api_os_setting/tasks/00-ubuntu-os-main.yml new file mode 100644 index 0000000..8c460d5 --- /dev/null +++ b/ansible/01_old/roles/api_os_setting/tasks/00-ubuntu-os-main.yml @@ -0,0 +1,71 @@ +--- +- name: Update and upgrade apt packages + apt: + upgrade: yes + update_cache: yes + force_apt_get: yes + cache_valid_time: 86400 + +- name: Install apt packages + apt: + name: ['cloud-utils', 'apt-transport-https', 'ca-certificates', 'curl', 'socat', 'conntrack', 'gnupg', 'lsb-release', 'bash-completion', 'chrony'] + state: present + +- name: Disable ufw + command: 'ufw disable' + when: ansible_distribution_version == '20.04' + +- name: Disable SWAP since kubernetes can't work with swap enabled (1/2) + command: 'swapoff -a' + +- name: Disable SWAP in fstab since kubernetes can't work with swap enabled (2/2) + replace: + path: /etc/fstab + regexp: '^([^#].*?\sswap\s+sw\s+.*)$' + replace: '# \1' + +- name: Add br_netfilter to module autoload + lineinfile: + path: /etc/modules-load.d/k8s.conf + line: "{{ item }}" + create: true + with_items: + - 'overlay' + - 'br_netfilter' + +- name: Add br_netfilter to module autoload + modprobe: + name: "{{ item }}" + state: present + become: true + with_items: + - 'overlay' + - 'br_netfilter' + +- name: Add br_netfilter to module autoload + lineinfile: + path: /etc/sysctl.d/k8s.conf + line: "{{ item }}" + create: true + with_items: + - 'net.bridge.bridge-nf-call-iptables = 1' + - 'net.bridge.bridge-nf-call-ip6tables = 1' + - 'net.ipv4.ip_forward = 1' + +- name: Disable net.bridge.bridge-nf-call-iptables + sysctl: + name: "{{ item }}" + value: 1 + with_items: + - 'net.bridge.bridge-nf-call-iptables' + - 'net.bridge.bridge-nf-call-ip6tables' + +- name: Disable net.ipv4.ip_forward + sysctl: + name: net.ipv4.ip_forward + value: "1" + +- name: Setting hosts file + template: + src: hosts.j2 + dest: /etc/hosts diff --git a/ansible/01_old/roles/api_os_setting/tasks/01-centos-os-containerd.yml b/ansible/01_old/roles/api_os_setting/tasks/01-centos-os-containerd.yml new file mode 100644 index 0000000..3c6f2ba --- /dev/null +++ b/ansible/01_old/roles/api_os_setting/tasks/01-centos-os-containerd.yml @@ -0,0 +1,47 @@ +--- +- name: Add containerd yum repository + command: yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo + +- name: Create containerd configuration directory + file: + path: /etc/containerd + state: directory + +- name: Configure containerd + template: + src: config.toml.j2 + dest: /etc/containerd/config.toml + notify: + - Restart containerd service + +- name: Install required packages + yum: + name: ['containerd'] + state: present + notify: + - Reload systemd configuration + - Restart containerd service + +- meta: flush_handlers + +- name: Enable containerd service + service: + name: containerd + enabled: True + state: started + +- name: Add kubernetes yum repository + ansible.builtin.yum_repository: + name: kubernetes + description: kubernetes + baseurl: https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64 + enabled: 1 + gpgcheck: 1 + gpgkey: https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg + +- name: Install kubernetes utils + ansible.builtin.yum: + name: ['kubelet-{{kubernetes_version}}','kubeadm-{{kubernetes_version}}','kubectl-{{kubernetes_version}}'] + exclude: kubernetes + notify: + - Reload systemd configuration \ No newline at end of file diff --git a/ansible/01_old/roles/api_os_setting/tasks/01-centos-os-crio.yml b/ansible/01_old/roles/api_os_setting/tasks/01-centos-os-crio.yml new file mode 100644 index 0000000..c78346d --- /dev/null +++ b/ansible/01_old/roles/api_os_setting/tasks/01-centos-os-crio.yml @@ -0,0 +1,53 @@ +--- +- name: Add crio yum repository + command: sudo curl -L -o /etc/yum.repos.d/devel:kubic:libcontainers:stable.repo https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/CentOS_7/devel:kubic:libcontainers:stable.repo + +- name: Add crio yum repository + command: sudo curl -L -o /etc/yum.repos.d/devel:kubic:libcontainers:stable:cri-o:1.23.repo https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable:cri-o:1.23/CentOS_7/devel:kubic:libcontainers:stable:cri-o:1.23.repo + +- name: Create crio configuration directory + file: + path: /etc/containers/registries.conf.d + state: directory + +- name: Configure crio + template: + src: myregistry.conf.j2 + dest: /etc/containers/registries.conf.d/myregistry.conf + notify: + - Restart crio service + +- name: Install required packages + yum: + name: ['crio'] + state: present + notify: + - Reload systemd configuration + - Restart crio service + +- meta: flush_handlers + +- name: Enable crio service + service: + name: crio + enabled: True + state: started + +- name: Add kubernetes yum repository + ansible.builtin.yum_repository: + name: kubernetes + description: kubernetes + baseurl: https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64 + enabled: 1 + gpgcheck: 1 + gpgkey: https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg + +- name: Install kubernetes utils + ansible.builtin.yum: + name: ['kubelet-{{kubernetes_version}}','kubeadm-{{kubernetes_version}}','kubectl-{{kubernetes_version}}'] + exclude: kubernetes + notify: + - Reload systemd configuration + +- name: Manual Daemon Reload + command: systemctl daemon-reload diff --git a/ansible/01_old/roles/api_os_setting/tasks/01-centos-os-docker.yml b/ansible/01_old/roles/api_os_setting/tasks/01-centos-os-docker.yml new file mode 100644 index 0000000..d9a5881 --- /dev/null +++ b/ansible/01_old/roles/api_os_setting/tasks/01-centos-os-docker.yml @@ -0,0 +1,58 @@ +--- +- name: Add docker script + command: curl -fsSL https://get.docker.com -o /root/get-docker.sh + +- name: install docker + command: sh /root/get-docker.sh + +- name: Create docker configuration directory + file: + path: /etc/docker + state: directory + +#- name: Install required packages +# yum: +# name: ['docker-ce'] +# state: present +# notify: +# - Reload systemd configuration +# - Restart docker service + +- name: Configure docker + template: + src: daemon.json.j2 + dest: /etc/docker/daemon.json + notify: + - Reload systemd configuration + - Restart docker service + +#- name: Delete containerd config +# file: +# path: /etc/containerd/config.toml +# state: absent +# notify: +# - Restart containerd service + +- meta: flush_handlers + +- name: Enable docker service + service: + name: docker + enabled: True + state: started + +- name: Add kubernetes yum repository + ansible.builtin.yum_repository: + name: kubernetes + description: kubernetes + baseurl: https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64 + enabled: 1 + gpgcheck: 1 + gpgkey: https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg + +- name: Install kubernetes utils + ansible.builtin.yum: + name: ['kubelet-{{kubernetes_version}}','kubeadm-{{kubernetes_version}}','kubectl-{{kubernetes_version}}'] + exclude: kubernetes + notify: + - Reload systemd configuration \ No newline at end of file diff --git a/ansible/01_old/roles/api_os_setting/tasks/01-ubuntu-os-containerd.yml b/ansible/01_old/roles/api_os_setting/tasks/01-ubuntu-os-containerd.yml new file mode 100644 index 0000000..556485e --- /dev/null +++ b/ansible/01_old/roles/api_os_setting/tasks/01-ubuntu-os-containerd.yml @@ -0,0 +1,78 @@ +--- +- name: Add docker apt key + apt_key: + url: https://download.docker.com/linux/{{ ansible_distribution | lower }}/gpg + +- name: Add docker apt repository + apt_repository: + repo: deb [arch=amd64] https://download.docker.com/linux/{{ ansible_distribution | lower }} {{ ansible_distribution_release }} stable + filename: docker + register: containerd_apt_repo_task + +- name: apt list --upgradable + command: apt list --upgradable + when: containerd_apt_repo_task.changed + +- name: apt update + apt: + update_cache: yes + when: containerd_apt_repo_task.changed + +- name: Create containerd configuration directory + file: + path: /etc/containerd + state: directory + +- name: Configure containerd + template: + src: config.toml.j2 + dest: /etc/containerd/config.toml + notify: + - Restart containerd service + +- name: Install required packages + apt: + name: + - containerd.io + notify: + - Reload systemd configuration + - Restart containerd service + +- meta: flush_handlers + +- name: Enable containerd service + service: + name: containerd + enabled: True + state: started + +- name: Install kubernetes + block: + - name: 'Add kubernetes repo key' + apt_key: + url: https://packages.cloud.google.com/apt/doc/apt-key.gpg + state: present + become: true + - name: Add kubernetes repository + apt_repository: + repo: deb http://apt.kubernetes.io kubernetes-xenial main + state: present + filename: 'kubernetes' + become: true + - name: Install kubernetes components + apt: + name: ['kubelet={{kubernetes_version}}-*', 'kubeadm={{kubernetes_version}}-*', 'kubectl={{kubernetes_version}}-*'] + state: present + update_cache: yes + force: yes + dpkg_options: force-downgrade + +- name: Hold kubernetes packages + dpkg_selections: + name: "{{item}}" + selection: hold + with_items: + - kubelet + - kubectl + - kubeadm + diff --git a/ansible/01_old/roles/api_os_setting/tasks/01-ubuntu-os-crio.yml b/ansible/01_old/roles/api_os_setting/tasks/01-ubuntu-os-crio.yml new file mode 100644 index 0000000..9db8e7d --- /dev/null +++ b/ansible/01_old/roles/api_os_setting/tasks/01-ubuntu-os-crio.yml @@ -0,0 +1,84 @@ +--- +- name: Import GPG key_1 + apt_key: + url: https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable:cri-o:{{ crio.version }}/{{ crio.os }}/Release.key + state: present + become: true + +- name: Import GPG key_2 + apt_key: + url: https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/{{ crio.os }}/Release.key + state: present + become: true + +- name: Add crio repository_1 + apt_repository: + repo: deb https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/{{crio.os }}/ / + state: present + filename: devel:kubic:libcontainers:stable.list + +- name: Add crio repository_2 + apt_repository: + repo: deb http://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable:/cri-o:/{{ crio.version }}/{{ crio.os }}/ / + state: present + filename: devel:kubic:libcontainers:stable:cri-o:{{ crio.version }}.list + +- name: Create crio configuration directory + file: + path: /etc/containers/registries.conf.d + state: directory + +- name: Configure crio + template: + src: myregistry.conf.j2 + dest: /etc/containers/registries.conf.d/myregistry.conf + notify: + - Restart crio service + +- name: Install required packages + apt: + name: ['cri-o', 'cri-o-runc'] + state: present + update_cache: yes + notify: + - Reload systemd configuration + - Restart crio service + +- meta: flush_handlers + +- name: Enable crio service + service: + name: crio + enabled: True + state: started + +- name: Install kubernetes + block: + - name: 'Add kubernetes repo key' + apt_key: + url: https://packages.cloud.google.com/apt/doc/apt-key.gpg + state: present + become: true + - name: Add kubernetes repository + apt_repository: + repo: deb http://apt.kubernetes.io kubernetes-xenial main + state: present + filename: 'kubernetes' + become: true + - name: Install kubernetes components + apt: + name: ['kubelet={{kubernetes_version}}-*', 'kubeadm={{kubernetes_version}}-*', 'kubectl={{kubernetes_version}}-*'] + state: present + update_cache: yes + force: yes + dpkg_options: force-downgrade + +- name: Hold kubernetes packages + dpkg_selections: + name: "{{item}}" + selection: hold + with_items: + - kubelet + - kubectl + - kubeadm + diff --git a/ansible/01_old/roles/api_os_setting/tasks/01-ubuntu-os-docker.yml b/ansible/01_old/roles/api_os_setting/tasks/01-ubuntu-os-docker.yml new file mode 100644 index 0000000..df89c16 --- /dev/null +++ b/ansible/01_old/roles/api_os_setting/tasks/01-ubuntu-os-docker.yml @@ -0,0 +1,50 @@ +--- +- name: Add docker script + command: curl -fsSL https://get.docker.com -o /root/get-docker.sh + +- name: install docker + command: sh /root/get-docker.sh + +- name: Create docker configuration directory + file: + path: /etc/docker + state: directory + +- name: Configure docker + template: + src: daemon.json.j2 + dest: /etc/docker/daemon.json + notify: + - Reload systemd configuration + - Restart docker service + +- name: Install kubernetes + block: + - name: 'Add kubernetes repo key' + apt_key: + url: https://packages.cloud.google.com/apt/doc/apt-key.gpg + state: present + become: true + - name: Add kubernetes repository + apt_repository: + repo: deb http://apt.kubernetes.io kubernetes-xenial main + state: present + filename: 'kubernetes' + become: true + - name: Install kubernetes components + apt: + name: ['kubelet={{kubernetes_version}}-*', 'kubeadm={{kubernetes_version}}-*', 'kubectl={{kubernetes_version}}-*'] + state: present + update_cache: yes + force: yes + dpkg_options: force-downgrade + +- name: Hold kubernetes packages + dpkg_selections: + name: "{{item}}" + selection: hold + with_items: + - kubelet + - kubectl + - kubeadm + diff --git a/ansible/01_old/roles/api_os_setting/tasks/02-k8s-main.yml b/ansible/01_old/roles/api_os_setting/tasks/02-k8s-main.yml new file mode 100644 index 0000000..2b4a948 --- /dev/null +++ b/ansible/01_old/roles/api_os_setting/tasks/02-k8s-main.yml @@ -0,0 +1,43 @@ +--- +- name: Enable kubelet service + systemd: + name: kubelet + enabled: true + masked: false + +- name: Check if Kubernetes has already been initialized. + stat: + path: /etc/kubernetes/admin.conf + register: kubernetes_init_stat + +# Set up master. +- include_tasks: 03-k8s-master.yml + when: kubernetes_role == 'master' + +# Set up nodes. +- name: Get the kubeadm join command from the Kubernetes master. + command: kubeadm token create --print-join-command + changed_when: false + when: kubernetes_role == 'master' + register: kubernetes_join_command_result + +- name: Get kubeconfig + fetch: + src: /etc/kubernetes/admin.conf + dest: /tmp/ansible_config + flat: yes + when: kubernetes_role == 'master' + +- name: Set the kubeadm join command globally. + set_fact: + kubernetes_join_command: > + {{ kubernetes_join_command_result.stdout }} + {{ kubernetes_join_command_extra_opts }} + when: kubernetes_join_command_result.stdout is defined + delegate_to: "{{ item }}" + delegate_facts: true + with_items: "{{ groups['all'] }}" + +- include_tasks: 05-k8s-node.yml + when: kubernetes_role == 'node' + diff --git a/ansible/01_old/roles/api_os_setting/tasks/03-k8s-master.yml b/ansible/01_old/roles/api_os_setting/tasks/03-k8s-master.yml new file mode 100644 index 0000000..8a841c0 --- /dev/null +++ b/ansible/01_old/roles/api_os_setting/tasks/03-k8s-master.yml @@ -0,0 +1,51 @@ +--- +- name: Initialize Kubernetes master with kubeadm init. + command: > + kubeadm init + --pod-network-cidr={{ kubernetes_pod_network.cidr }} + --apiserver-advertise-address={{ kubernetes_apiserver_advertise_address | default(ansible_default_ipv4.address, true) }} + {{ kubernetes_kubeadm_init_extra_opts }} + register: kubeadmin_init + when: not kubernetes_init_stat.stat.exists + +- name: Print the init output to screen. + debug: + var: kubeadmin_init.stdout + verbosity: 2 + when: not kubernetes_init_stat.stat.exists + +- name: Ensure .kube directory exists. + file: + path: ~/.kube + state: directory + +- name: Symlink the kubectl admin.conf to ~/.kube/conf. + file: + src: /etc/kubernetes/admin.conf + dest: ~/.kube/config + state: link + force: yes + +- name: copy the kubectl config to ~/.kube/ansible_config + copy: + src: /etc/kubernetes/admin.conf + dest: ~/.kube/ansible_config + remote_src: true + +- name: Get kubeconfig + fetch: + src: /etc/kubernetes/admin.conf + dest: ~/.kube/ansible_config + flat: yes + +- name: Configure Calico networking and Metric Server + include_tasks: 04-k8s-master-yaml.yml + +- name: Kubectl Cheat Sheet + lineinfile: + path: ~/.bashrc + line: "{{ item }}" + with_items: + - source <(kubectl completion bash) + - alias k=kubectl + - complete -o default -F __start_kubectl k diff --git a/ansible/01_old/roles/api_os_setting/tasks/04-k8s-master-yaml.yml b/ansible/01_old/roles/api_os_setting/tasks/04-k8s-master-yaml.yml new file mode 100644 index 0000000..c52166f --- /dev/null +++ b/ansible/01_old/roles/api_os_setting/tasks/04-k8s-master-yaml.yml @@ -0,0 +1,21 @@ +--- +- name: Copy calico yaml + template: + src: calico.yaml.j2 + dest: /tmp/calico.yaml + +- name: Copy metric server yaml + template: + src: components.yaml.j2 + dest: /tmp/components.yaml + +- name: Configure Calico networking. + command: kubectl apply -f /tmp/calico.yaml + register: calico_result + changed_when: "'created' in calico_result.stdout" + when: kubernetes_pod_network.cni == 'calico' + +- name: Configure Metric Server + command: kubectl apply -f /tmp/components.yaml + register: metric_server_result + changed_when: "'created' in metric_server_result.stdout" diff --git a/ansible/01_old/roles/api_os_setting/tasks/05-k8s-node.yml b/ansible/01_old/roles/api_os_setting/tasks/05-k8s-node.yml new file mode 100644 index 0000000..304cbf1 --- /dev/null +++ b/ansible/01_old/roles/api_os_setting/tasks/05-k8s-node.yml @@ -0,0 +1,6 @@ +--- +- name: Join node to Kubernetes master + shell: > + {{ kubernetes_join_command }} + creates=/etc/kubernetes/kubelet.conf + tags: ['skip_ansible_lint'] diff --git a/ansible/01_old/roles/api_os_setting/tasks/main.yml b/ansible/01_old/roles/api_os_setting/tasks/main.yml new file mode 100644 index 0000000..d027ae4 --- /dev/null +++ b/ansible/01_old/roles/api_os_setting/tasks/main.yml @@ -0,0 +1,35 @@ +--- +- include: 00-centos-os-main.yml + tags: centos + when: ansible_distribution == 'CentOS' + +- include: 00-ubuntu-os-main.yml + tags: ubuntu + when: ansible_distribution == 'Ubuntu' + +- include: 01-centos-os-docker.yml + tags: cent-docker + when: ansible_distribution == 'CentOS' and runtime == 'docker' + +- include: 01-centos-os-containerd.yml + tags: cent-containerd + when: ansible_distribution == 'CentOS' and runtime == 'containerd' + +- include: 01-centos-os-crio.yml + tags: cent-crio + when: ansible_distribution == 'CentOS' and runtime == 'crio' + +- include: 01-ubuntu-os-docker.yml + tags: ubuntu-docker + when: ansible_distribution == 'Ubuntu' and runtime == 'docker' + +- include: 01-ubuntu-os-containerd.yml + tags: ubuntu-containerd + when: ansible_distribution == 'Ubuntu' and runtime == 'containerd' + +- include: 01-ubuntu-os-crio.yml + tags: ubuntu-crio + when: ansible_distribution == 'Ubuntu' and runtime == 'crio' + +- include: 02-k8s-main.yml + tags: k8s-main diff --git a/ansible/01_old/roles/api_os_setting/templates/calico.yaml.j2 b/ansible/01_old/roles/api_os_setting/templates/calico.yaml.j2 new file mode 100644 index 0000000..59cf309 --- /dev/null +++ b/ansible/01_old/roles/api_os_setting/templates/calico.yaml.j2 @@ -0,0 +1,4779 @@ +--- +# Source: calico/templates/calico-kube-controllers.yaml +# This manifest creates a Pod Disruption Budget for Controller to allow K8s Cluster Autoscaler to evict + +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: calico-kube-controllers + namespace: kube-system + labels: + k8s-app: calico-kube-controllers +spec: + maxUnavailable: 1 + selector: + matchLabels: + k8s-app: calico-kube-controllers +--- +# Source: calico/templates/calico-kube-controllers.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: calico-kube-controllers + namespace: kube-system +--- +# Source: calico/templates/calico-node.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: calico-node + namespace: kube-system +--- +# Source: calico/templates/calico-config.yaml +# This ConfigMap is used to configure a self-hosted Calico installation. +kind: ConfigMap +apiVersion: v1 +metadata: + name: calico-config + namespace: kube-system +data: + # Typha is disabled. + typha_service_name: "none" + # Configure the backend to use. + calico_backend: "bird" + + # Configure the MTU to use for workload interfaces and tunnels. + # By default, MTU is auto-detected, and explicitly setting this field should not be required. + # You can override auto-detection by providing a non-zero value. + veth_mtu: "0" + + # The CNI network configuration to install on each node. The special + # values in this config will be automatically populated. + cni_network_config: |- + { + "name": "k8s-pod-network", + "cniVersion": "0.3.1", + "plugins": [ + { + "type": "calico", + "log_level": "info", + "log_file_path": "/var/log/calico/cni/cni.log", + "datastore_type": "kubernetes", + "nodename": "__KUBERNETES_NODE_NAME__", + "mtu": __CNI_MTU__, + "ipam": { + "type": "calico-ipam" + }, + "policy": { + "type": "k8s" + }, + "kubernetes": { + "kubeconfig": "__KUBECONFIG_FILEPATH__" + } + }, + { + "type": "portmap", + "snat": true, + "capabilities": {"portMappings": true} + }, + { + "type": "bandwidth", + "capabilities": {"bandwidth": true} + } + ] + } +--- +# Source: calico/templates/kdd-crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: bgpconfigurations.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: BGPConfiguration + listKind: BGPConfigurationList + plural: bgpconfigurations + singular: bgpconfiguration + preserveUnknownFields: false + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: BGPConfiguration contains the configuration for any BGP routing. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: BGPConfigurationSpec contains the values of the BGP configuration. + properties: + asNumber: + description: 'ASNumber is the default AS number used by a node. [Default: + 64512]' + format: int32 + type: integer + bindMode: + description: BindMode indicates whether to listen for BGP connections + on all addresses (None) or only on the node's canonical IP address + Node.Spec.BGP.IPvXAddress (NodeIP). Default behaviour is to listen + for BGP connections on all addresses. + type: string + communities: + description: Communities is a list of BGP community values and their + arbitrary names for tagging routes. + items: + description: Community contains standard or large community value + and its name. + properties: + name: + description: Name given to community value. + type: string + value: + description: Value must be of format `aa:nn` or `aa:nn:mm`. + For standard community use `aa:nn` format, where `aa` and + `nn` are 16 bit number. For large community use `aa:nn:mm` + format, where `aa`, `nn` and `mm` are 32 bit number. Where, + `aa` is an AS Number, `nn` and `mm` are per-AS identifier. + pattern: ^(\d+):(\d+)$|^(\d+):(\d+):(\d+)$ + type: string + type: object + type: array + ignoredInterfaces: + description: IgnoredInterfaces indicates the network interfaces that + needs to be excluded when reading device routes. + items: + type: string + type: array + listenPort: + description: ListenPort is the port where BGP protocol should listen. + Defaults to 179 + maximum: 65535 + minimum: 1 + type: integer + logSeverityScreen: + description: 'LogSeverityScreen is the log severity above which logs + are sent to the stdout. [Default: INFO]' + type: string + nodeMeshMaxRestartTime: + description: Time to allow for software restart for node-to-mesh peerings. When + specified, this is configured as the graceful restart timeout. When + not specified, the BIRD default of 120s is used. This field can + only be set on the default BGPConfiguration instance and requires + that NodeMesh is enabled + type: string + nodeMeshPassword: + description: Optional BGP password for full node-to-mesh peerings. + This field can only be set on the default BGPConfiguration instance + and requires that NodeMesh is enabled + properties: + secretKeyRef: + description: Selects a key of a secret in the node pod's namespace. + properties: + key: + description: The key of the secret to select from. Must be + a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be + defined + type: boolean + required: + - key + type: object + type: object + nodeToNodeMeshEnabled: + description: 'NodeToNodeMeshEnabled sets whether full node to node + BGP mesh is enabled. [Default: true]' + type: boolean + prefixAdvertisements: + description: PrefixAdvertisements contains per-prefix advertisement + configuration. + items: + description: PrefixAdvertisement configures advertisement properties + for the specified CIDR. + properties: + cidr: + description: CIDR for which properties should be advertised. + type: string + communities: + description: Communities can be list of either community names + already defined in `Specs.Communities` or community value + of format `aa:nn` or `aa:nn:mm`. For standard community use + `aa:nn` format, where `aa` and `nn` are 16 bit number. For + large community use `aa:nn:mm` format, where `aa`, `nn` and + `mm` are 32 bit number. Where,`aa` is an AS Number, `nn` and + `mm` are per-AS identifier. + items: + type: string + type: array + type: object + type: array + serviceClusterIPs: + description: ServiceClusterIPs are the CIDR blocks from which service + cluster IPs are allocated. If specified, Calico will advertise these + blocks, as well as any cluster IPs within them. + items: + description: ServiceClusterIPBlock represents a single allowed ClusterIP + CIDR block. + properties: + cidr: + type: string + type: object + type: array + serviceExternalIPs: + description: ServiceExternalIPs are the CIDR blocks for Kubernetes + Service External IPs. Kubernetes Service ExternalIPs will only be + advertised if they are within one of these blocks. + items: + description: ServiceExternalIPBlock represents a single allowed + External IP CIDR block. + properties: + cidr: + type: string + type: object + type: array + serviceLoadBalancerIPs: + description: ServiceLoadBalancerIPs are the CIDR blocks for Kubernetes + Service LoadBalancer IPs. Kubernetes Service status.LoadBalancer.Ingress + IPs will only be advertised if they are within one of these blocks. + items: + description: ServiceLoadBalancerIPBlock represents a single allowed + LoadBalancer IP CIDR block. + properties: + cidr: + type: string + type: object + type: array + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +# Source: calico/templates/kdd-crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: bgppeers.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: BGPPeer + listKind: BGPPeerList + plural: bgppeers + singular: bgppeer + preserveUnknownFields: false + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: BGPPeerSpec contains the specification for a BGPPeer resource. + properties: + asNumber: + description: The AS Number of the peer. + format: int32 + type: integer + keepOriginalNextHop: + description: Option to keep the original nexthop field when routes + are sent to a BGP Peer. Setting "true" configures the selected BGP + Peers node to use the "next hop keep;" instead of "next hop self;"(default) + in the specific branch of the Node on "bird.cfg". + type: boolean + maxRestartTime: + description: Time to allow for software restart. When specified, + this is configured as the graceful restart timeout. When not specified, + the BIRD default of 120s is used. + type: string + node: + description: The node name identifying the Calico node instance that + is targeted by this peer. If this is not set, and no nodeSelector + is specified, then this BGP peer selects all nodes in the cluster. + type: string + nodeSelector: + description: Selector for the nodes that should have this peering. When + this is set, the Node field must be empty. + type: string + numAllowedLocalASNumbers: + description: Maximum number of local AS numbers that are allowed in + the AS path for received routes. This removes BGP loop prevention + and should only be used if absolutely necesssary. + format: int32 + type: integer + password: + description: Optional BGP password for the peerings generated by this + BGPPeer resource. + properties: + secretKeyRef: + description: Selects a key of a secret in the node pod's namespace. + properties: + key: + description: The key of the secret to select from. Must be + a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be + defined + type: boolean + required: + - key + type: object + type: object + peerIP: + description: The IP address of the peer followed by an optional port + number to peer with. If port number is given, format should be `[]:port` + or `:` for IPv4. If optional port number is not set, + and this peer IP and ASNumber belongs to a calico/node with ListenPort + set in BGPConfiguration, then we use that port to peer. + type: string + peerSelector: + description: Selector for the remote nodes to peer with. When this + is set, the PeerIP and ASNumber fields must be empty. For each + peering between the local node and selected remote nodes, we configure + an IPv4 peering if both ends have NodeBGPSpec.IPv4Address specified, + and an IPv6 peering if both ends have NodeBGPSpec.IPv6Address specified. The + remote AS number comes from the remote node's NodeBGPSpec.ASNumber, + or the global default if that is not set. + type: string + reachableBy: + description: Add an exact, i.e. /32, static route toward peer IP in + order to prevent route flapping. ReachableBy contains the address + of the gateway which peer can be reached by. + type: string + sourceAddress: + description: Specifies whether and how to configure a source address + for the peerings generated by this BGPPeer resource. Default value + "UseNodeIP" means to configure the node IP as the source address. "None" + means not to configure a source address. + type: string + ttlSecurity: + description: TTLSecurity enables the generalized TTL security mechanism + (GTSM) which protects against spoofed packets by ignoring received + packets with a smaller than expected TTL value. The provided value + is the number of hops (edges) between the peers. + type: integer + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +# Source: calico/templates/kdd-crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: blockaffinities.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: BlockAffinity + listKind: BlockAffinityList + plural: blockaffinities + singular: blockaffinity + preserveUnknownFields: false + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: BlockAffinitySpec contains the specification for a BlockAffinity + resource. + properties: + cidr: + type: string + deleted: + description: Deleted indicates that this block affinity is being deleted. + This field is a string for compatibility with older releases that + mistakenly treat this field as a string. + type: string + node: + type: string + state: + type: string + required: + - cidr + - deleted + - node + - state + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +# Source: calico/templates/kdd-crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: (devel) + creationTimestamp: null + name: caliconodestatuses.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: CalicoNodeStatus + listKind: CalicoNodeStatusList + plural: caliconodestatuses + singular: caliconodestatus + preserveUnknownFields: false + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: CalicoNodeStatusSpec contains the specification for a CalicoNodeStatus + resource. + properties: + classes: + description: Classes declares the types of information to monitor + for this calico/node, and allows for selective status reporting + about certain subsets of information. + items: + type: string + type: array + node: + description: The node name identifies the Calico node instance for + node status. + type: string + updatePeriodSeconds: + description: UpdatePeriodSeconds is the period at which CalicoNodeStatus + should be updated. Set to 0 to disable CalicoNodeStatus refresh. + Maximum update period is one day. + format: int32 + type: integer + type: object + status: + description: CalicoNodeStatusStatus defines the observed state of CalicoNodeStatus. + No validation needed for status since it is updated by Calico. + properties: + agent: + description: Agent holds agent status on the node. + properties: + birdV4: + description: BIRDV4 represents the latest observed status of bird4. + properties: + lastBootTime: + description: LastBootTime holds the value of lastBootTime + from bird.ctl output. + type: string + lastReconfigurationTime: + description: LastReconfigurationTime holds the value of lastReconfigTime + from bird.ctl output. + type: string + routerID: + description: Router ID used by bird. + type: string + state: + description: The state of the BGP Daemon. + type: string + version: + description: Version of the BGP daemon + type: string + type: object + birdV6: + description: BIRDV6 represents the latest observed status of bird6. + properties: + lastBootTime: + description: LastBootTime holds the value of lastBootTime + from bird.ctl output. + type: string + lastReconfigurationTime: + description: LastReconfigurationTime holds the value of lastReconfigTime + from bird.ctl output. + type: string + routerID: + description: Router ID used by bird. + type: string + state: + description: The state of the BGP Daemon. + type: string + version: + description: Version of the BGP daemon + type: string + type: object + type: object + bgp: + description: BGP holds node BGP status. + properties: + numberEstablishedV4: + description: The total number of IPv4 established bgp sessions. + type: integer + numberEstablishedV6: + description: The total number of IPv6 established bgp sessions. + type: integer + numberNotEstablishedV4: + description: The total number of IPv4 non-established bgp sessions. + type: integer + numberNotEstablishedV6: + description: The total number of IPv6 non-established bgp sessions. + type: integer + peersV4: + description: PeersV4 represents IPv4 BGP peers status on the node. + items: + description: CalicoNodePeer contains the status of BGP peers + on the node. + properties: + peerIP: + description: IP address of the peer whose condition we are + reporting. + type: string + since: + description: Since the state or reason last changed. + type: string + state: + description: State is the BGP session state. + type: string + type: + description: Type indicates whether this peer is configured + via the node-to-node mesh, or via en explicit global or + per-node BGPPeer object. + type: string + type: object + type: array + peersV6: + description: PeersV6 represents IPv6 BGP peers status on the node. + items: + description: CalicoNodePeer contains the status of BGP peers + on the node. + properties: + peerIP: + description: IP address of the peer whose condition we are + reporting. + type: string + since: + description: Since the state or reason last changed. + type: string + state: + description: State is the BGP session state. + type: string + type: + description: Type indicates whether this peer is configured + via the node-to-node mesh, or via en explicit global or + per-node BGPPeer object. + type: string + type: object + type: array + required: + - numberEstablishedV4 + - numberEstablishedV6 + - numberNotEstablishedV4 + - numberNotEstablishedV6 + type: object + lastUpdated: + description: LastUpdated is a timestamp representing the server time + when CalicoNodeStatus object last updated. It is represented in + RFC3339 form and is in UTC. + format: date-time + nullable: true + type: string + routes: + description: Routes reports routes known to the Calico BGP daemon + on the node. + properties: + routesV4: + description: RoutesV4 represents IPv4 routes on the node. + items: + description: CalicoNodeRoute contains the status of BGP routes + on the node. + properties: + destination: + description: Destination of the route. + type: string + gateway: + description: Gateway for the destination. + type: string + interface: + description: Interface for the destination + type: string + learnedFrom: + description: LearnedFrom contains information regarding + where this route originated. + properties: + peerIP: + description: If sourceType is NodeMesh or BGPPeer, IP + address of the router that sent us this route. + type: string + sourceType: + description: Type of the source where a route is learned + from. + type: string + type: object + type: + description: Type indicates if the route is being used for + forwarding or not. + type: string + type: object + type: array + routesV6: + description: RoutesV6 represents IPv6 routes on the node. + items: + description: CalicoNodeRoute contains the status of BGP routes + on the node. + properties: + destination: + description: Destination of the route. + type: string + gateway: + description: Gateway for the destination. + type: string + interface: + description: Interface for the destination + type: string + learnedFrom: + description: LearnedFrom contains information regarding + where this route originated. + properties: + peerIP: + description: If sourceType is NodeMesh or BGPPeer, IP + address of the router that sent us this route. + type: string + sourceType: + description: Type of the source where a route is learned + from. + type: string + type: object + type: + description: Type indicates if the route is being used for + forwarding or not. + type: string + type: object + type: array + type: object + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +# Source: calico/templates/kdd-crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: clusterinformations.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: ClusterInformation + listKind: ClusterInformationList + plural: clusterinformations + singular: clusterinformation + preserveUnknownFields: false + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: ClusterInformation contains the cluster specific information. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ClusterInformationSpec contains the values of describing + the cluster. + properties: + calicoVersion: + description: CalicoVersion is the version of Calico that the cluster + is running + type: string + clusterGUID: + description: ClusterGUID is the GUID of the cluster + type: string + clusterType: + description: ClusterType describes the type of the cluster + type: string + datastoreReady: + description: DatastoreReady is used during significant datastore migrations + to signal to components such as Felix that it should wait before + accessing the datastore. + type: boolean + variant: + description: Variant declares which variant of Calico should be active. + type: string + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +# Source: calico/templates/kdd-crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: felixconfigurations.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: FelixConfiguration + listKind: FelixConfigurationList + plural: felixconfigurations + singular: felixconfiguration + preserveUnknownFields: false + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: Felix Configuration contains the configuration for Felix. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: FelixConfigurationSpec contains the values of the Felix configuration. + properties: + allowIPIPPacketsFromWorkloads: + description: 'AllowIPIPPacketsFromWorkloads controls whether Felix + will add a rule to drop IPIP encapsulated traffic from workloads + [Default: false]' + type: boolean + allowVXLANPacketsFromWorkloads: + description: 'AllowVXLANPacketsFromWorkloads controls whether Felix + will add a rule to drop VXLAN encapsulated traffic from workloads + [Default: false]' + type: boolean + awsSrcDstCheck: + description: 'Set source-destination-check on AWS EC2 instances. Accepted + value must be one of "DoNothing", "Enable" or "Disable". [Default: + DoNothing]' + enum: + - DoNothing + - Enable + - Disable + type: string + bpfConnectTimeLoadBalancingEnabled: + description: 'BPFConnectTimeLoadBalancingEnabled when in BPF mode, + controls whether Felix installs the connection-time load balancer. The + connect-time load balancer is required for the host to be able to + reach Kubernetes services and it improves the performance of pod-to-service + connections. The only reason to disable it is for debugging purposes. [Default: + true]' + type: boolean + bpfDataIfacePattern: + description: BPFDataIfacePattern is a regular expression that controls + which interfaces Felix should attach BPF programs to in order to + catch traffic to/from the network. This needs to match the interfaces + that Calico workload traffic flows over as well as any interfaces + that handle incoming traffic to nodeports and services from outside + the cluster. It should not match the workload interfaces (usually + named cali...). + type: string + bpfDisableUnprivileged: + description: 'BPFDisableUnprivileged, if enabled, Felix sets the kernel.unprivileged_bpf_disabled + sysctl to disable unprivileged use of BPF. This ensures that unprivileged + users cannot access Calico''s BPF maps and cannot insert their own + BPF programs to interfere with Calico''s. [Default: true]' + type: boolean + bpfEnabled: + description: 'BPFEnabled, if enabled Felix will use the BPF dataplane. + [Default: false]' + type: boolean + bpfEnforceRPF: + description: 'BPFEnforceRPF enforce strict RPF on all host interfaces + with BPF programs regardless of what is the per-interfaces or global + setting. Possible values are Disabled, Strict or Loose. [Default: + Strict]' + type: string + bpfExtToServiceConnmark: + description: 'BPFExtToServiceConnmark in BPF mode, control a 32bit + mark that is set on connections from an external client to a local + service. This mark allows us to control how packets of that connection + are routed within the host and how is routing interpreted by RPF + check. [Default: 0]' + type: integer + bpfExternalServiceMode: + description: 'BPFExternalServiceMode in BPF mode, controls how connections + from outside the cluster to services (node ports and cluster IPs) + are forwarded to remote workloads. If set to "Tunnel" then both + request and response traffic is tunneled to the remote node. If + set to "DSR", the request traffic is tunneled but the response traffic + is sent directly from the remote node. In "DSR" mode, the remote + node appears to use the IP of the ingress node; this requires a + permissive L2 network. [Default: Tunnel]' + type: string + bpfHostConntrackBypass: + description: 'BPFHostConntrackBypass Controls whether to bypass Linux + conntrack in BPF mode for workloads and services. [Default: true + - bypass Linux conntrack]' + type: boolean + bpfKubeProxyEndpointSlicesEnabled: + description: BPFKubeProxyEndpointSlicesEnabled in BPF mode, controls + whether Felix's embedded kube-proxy accepts EndpointSlices or not. + type: boolean + bpfKubeProxyIptablesCleanupEnabled: + description: 'BPFKubeProxyIptablesCleanupEnabled, if enabled in BPF + mode, Felix will proactively clean up the upstream Kubernetes kube-proxy''s + iptables chains. Should only be enabled if kube-proxy is not running. [Default: + true]' + type: boolean + bpfKubeProxyMinSyncPeriod: + description: 'BPFKubeProxyMinSyncPeriod, in BPF mode, controls the + minimum time between updates to the dataplane for Felix''s embedded + kube-proxy. Lower values give reduced set-up latency. Higher values + reduce Felix CPU usage by batching up more work. [Default: 1s]' + type: string + bpfL3IfacePattern: + description: BPFL3IfacePattern is a regular expression that allows + to list tunnel devices like wireguard or vxlan (i.e., L3 devices) + in addition to BPFDataIfacePattern. That is, tunnel interfaces not + created by Calico, that Calico workload traffic flows over as well + as any interfaces that handle incoming traffic to nodeports and + services from outside the cluster. + type: string + bpfLogLevel: + description: 'BPFLogLevel controls the log level of the BPF programs + when in BPF dataplane mode. One of "Off", "Info", or "Debug". The + logs are emitted to the BPF trace pipe, accessible with the command + `tc exec bpf debug`. [Default: Off].' + type: string + bpfMapSizeConntrack: + description: 'BPFMapSizeConntrack sets the size for the conntrack + map. This map must be large enough to hold an entry for each active + connection. Warning: changing the size of the conntrack map can + cause disruption.' + type: integer + bpfMapSizeIPSets: + description: BPFMapSizeIPSets sets the size for ipsets map. The IP + sets map must be large enough to hold an entry for each endpoint + matched by every selector in the source/destination matches in network + policy. Selectors such as "all()" can result in large numbers of + entries (one entry per endpoint in that case). + type: integer + bpfMapSizeIfState: + description: BPFMapSizeIfState sets the size for ifstate map. The + ifstate map must be large enough to hold an entry for each device + (host + workloads) on a host. + type: integer + bpfMapSizeNATAffinity: + type: integer + bpfMapSizeNATBackend: + description: BPFMapSizeNATBackend sets the size for nat back end map. + This is the total number of endpoints. This is mostly more than + the size of the number of services. + type: integer + bpfMapSizeNATFrontend: + description: BPFMapSizeNATFrontend sets the size for nat front end + map. FrontendMap should be large enough to hold an entry for each + nodeport, external IP and each port in each service. + type: integer + bpfMapSizeRoute: + description: BPFMapSizeRoute sets the size for the routes map. The + routes map should be large enough to hold one entry per workload + and a handful of entries per host (enough to cover its own IPs and + tunnel IPs). + type: integer + bpfPSNATPorts: + anyOf: + - type: integer + - type: string + description: 'BPFPSNATPorts sets the range from which we randomly + pick a port if there is a source port collision. This should be + within the ephemeral range as defined by RFC 6056 (1024–65535) and + preferably outside the ephemeral ranges used by common operating + systems. Linux uses 32768–60999, while others mostly use the IANA + defined range 49152–65535. It is not necessarily a problem if this + range overlaps with the operating systems. Both ends of the range + are inclusive. [Default: 20000:29999]' + pattern: ^.* + x-kubernetes-int-or-string: true + bpfPolicyDebugEnabled: + description: BPFPolicyDebugEnabled when true, Felix records detailed + information about the BPF policy programs, which can be examined + with the calico-bpf command-line tool. + type: boolean + chainInsertMode: + description: 'ChainInsertMode controls whether Felix hooks the kernel''s + top-level iptables chains by inserting a rule at the top of the + chain or by appending a rule at the bottom. insert is the safe default + since it prevents Calico''s rules from being bypassed. If you switch + to append mode, be sure that the other rules in the chains signal + acceptance by falling through to the Calico rules, otherwise the + Calico policy will be bypassed. [Default: insert]' + type: string + dataplaneDriver: + description: DataplaneDriver filename of the external dataplane driver + to use. Only used if UseInternalDataplaneDriver is set to false. + type: string + dataplaneWatchdogTimeout: + description: "DataplaneWatchdogTimeout is the readiness/liveness timeout + used for Felix's (internal) dataplane driver. Increase this value + if you experience spurious non-ready or non-live events when Felix + is under heavy load. Decrease the value to get felix to report non-live + or non-ready more quickly. [Default: 90s] \n Deprecated: replaced + by the generic HealthTimeoutOverrides." + type: string + debugDisableLogDropping: + type: boolean + debugMemoryProfilePath: + type: string + debugSimulateCalcGraphHangAfter: + type: string + debugSimulateDataplaneHangAfter: + type: string + defaultEndpointToHostAction: + description: 'DefaultEndpointToHostAction controls what happens to + traffic that goes from a workload endpoint to the host itself (after + the traffic hits the endpoint egress policy). By default Calico + blocks traffic from workload endpoints to the host itself with an + iptables "DROP" action. If you want to allow some or all traffic + from endpoint to host, set this parameter to RETURN or ACCEPT. Use + RETURN if you have your own rules in the iptables "INPUT" chain; + Calico will insert its rules at the top of that chain, then "RETURN" + packets to the "INPUT" chain once it has completed processing workload + endpoint egress policy. Use ACCEPT to unconditionally accept packets + from workloads after processing workload endpoint egress policy. + [Default: Drop]' + type: string + deviceRouteProtocol: + description: This defines the route protocol added to programmed device + routes, by default this will be RTPROT_BOOT when left blank. + type: integer + deviceRouteSourceAddress: + description: This is the IPv4 source address to use on programmed + device routes. By default the source address is left blank, leaving + the kernel to choose the source address used. + type: string + deviceRouteSourceAddressIPv6: + description: This is the IPv6 source address to use on programmed + device routes. By default the source address is left blank, leaving + the kernel to choose the source address used. + type: string + disableConntrackInvalidCheck: + type: boolean + endpointReportingDelay: + type: string + endpointReportingEnabled: + type: boolean + externalNodesList: + description: ExternalNodesCIDRList is a list of CIDR's of external-non-calico-nodes + which may source tunnel traffic and have the tunneled traffic be + accepted at calico nodes. + items: + type: string + type: array + failsafeInboundHostPorts: + description: 'FailsafeInboundHostPorts is a list of UDP/TCP ports + and CIDRs that Felix will allow incoming traffic to host endpoints + on irrespective of the security policy. This is useful to avoid + accidentally cutting off a host with incorrect configuration. For + back-compatibility, if the protocol is not specified, it defaults + to "tcp". If a CIDR is not specified, it will allow traffic from + all addresses. To disable all inbound host ports, use the value + none. The default value allows ssh access and DHCP. [Default: tcp:22, + udp:68, tcp:179, tcp:2379, tcp:2380, tcp:6443, tcp:6666, tcp:6667]' + items: + description: ProtoPort is combination of protocol, port, and CIDR. + Protocol and port must be specified. + properties: + net: + type: string + port: + type: integer + protocol: + type: string + required: + - port + - protocol + type: object + type: array + failsafeOutboundHostPorts: + description: 'FailsafeOutboundHostPorts is a list of UDP/TCP ports + and CIDRs that Felix will allow outgoing traffic from host endpoints + to irrespective of the security policy. This is useful to avoid + accidentally cutting off a host with incorrect configuration. For + back-compatibility, if the protocol is not specified, it defaults + to "tcp". If a CIDR is not specified, it will allow traffic from + all addresses. To disable all outbound host ports, use the value + none. The default value opens etcd''s standard ports to ensure that + Felix does not get cut off from etcd as well as allowing DHCP and + DNS. [Default: tcp:179, tcp:2379, tcp:2380, tcp:6443, tcp:6666, + tcp:6667, udp:53, udp:67]' + items: + description: ProtoPort is combination of protocol, port, and CIDR. + Protocol and port must be specified. + properties: + net: + type: string + port: + type: integer + protocol: + type: string + required: + - port + - protocol + type: object + type: array + featureDetectOverride: + description: FeatureDetectOverride is used to override feature detection + based on auto-detected platform capabilities. Values are specified + in a comma separated list with no spaces, example; "SNATFullyRandom=true,MASQFullyRandom=false,RestoreSupportsLock=". "true" + or "false" will force the feature, empty or omitted values are auto-detected. + type: string + featureGates: + description: FeatureGates is used to enable or disable tech-preview + Calico features. Values are specified in a comma separated list + with no spaces, example; "BPFConnectTimeLoadBalancingWorkaround=enabled,XyZ=false". + This is used to enable features that are not fully production ready. + type: string + floatingIPs: + description: FloatingIPs configures whether or not Felix will program + non-OpenStack floating IP addresses. (OpenStack-derived floating + IPs are always programmed, regardless of this setting.) + enum: + - Enabled + - Disabled + type: string + genericXDPEnabled: + description: 'GenericXDPEnabled enables Generic XDP so network cards + that don''t support XDP offload or driver modes can use XDP. This + is not recommended since it doesn''t provide better performance + than iptables. [Default: false]' + type: boolean + healthEnabled: + type: boolean + healthHost: + type: string + healthPort: + type: integer + healthTimeoutOverrides: + description: HealthTimeoutOverrides allows the internal watchdog timeouts + of individual subcomponents to be overriden. This is useful for + working around "false positive" liveness timeouts that can occur + in particularly stressful workloads or if CPU is constrained. For + a list of active subcomponents, see Felix's logs. + items: + properties: + name: + type: string + timeout: + type: string + required: + - name + - timeout + type: object + type: array + interfaceExclude: + description: 'InterfaceExclude is a comma-separated list of interfaces + that Felix should exclude when monitoring for host endpoints. The + default value ensures that Felix ignores Kubernetes'' IPVS dummy + interface, which is used internally by kube-proxy. If you want to + exclude multiple interface names using a single value, the list + supports regular expressions. For regular expressions you must wrap + the value with ''/''. For example having values ''/^kube/,veth1'' + will exclude all interfaces that begin with ''kube'' and also the + interface ''veth1''. [Default: kube-ipvs0]' + type: string + interfacePrefix: + description: 'InterfacePrefix is the interface name prefix that identifies + workload endpoints and so distinguishes them from host endpoint + interfaces. Note: in environments other than bare metal, the orchestrators + configure this appropriately. For example our Kubernetes and Docker + integrations set the ''cali'' value, and our OpenStack integration + sets the ''tap'' value. [Default: cali]' + type: string + interfaceRefreshInterval: + description: InterfaceRefreshInterval is the period at which Felix + rescans local interfaces to verify their state. The rescan can be + disabled by setting the interval to 0. + type: string + ipipEnabled: + description: 'IPIPEnabled overrides whether Felix should configure + an IPIP interface on the host. Optional as Felix determines this + based on the existing IP pools. [Default: nil (unset)]' + type: boolean + ipipMTU: + description: 'IPIPMTU is the MTU to set on the tunnel device. See + Configuring MTU [Default: 1440]' + type: integer + ipsetsRefreshInterval: + description: 'IpsetsRefreshInterval is the period at which Felix re-checks + all iptables state to ensure that no other process has accidentally + broken Calico''s rules. Set to 0 to disable iptables refresh. [Default: + 90s]' + type: string + iptablesBackend: + description: IptablesBackend specifies which backend of iptables will + be used. The default is Auto. + type: string + iptablesFilterAllowAction: + type: string + iptablesLockFilePath: + description: 'IptablesLockFilePath is the location of the iptables + lock file. You may need to change this if the lock file is not in + its standard location (for example if you have mapped it into Felix''s + container at a different path). [Default: /run/xtables.lock]' + type: string + iptablesLockProbeInterval: + description: 'IptablesLockProbeInterval is the time that Felix will + wait between attempts to acquire the iptables lock if it is not + available. Lower values make Felix more responsive when the lock + is contended, but use more CPU. [Default: 50ms]' + type: string + iptablesLockTimeout: + description: 'IptablesLockTimeout is the time that Felix will wait + for the iptables lock, or 0, to disable. To use this feature, Felix + must share the iptables lock file with all other processes that + also take the lock. When running Felix inside a container, this + requires the /run directory of the host to be mounted into the calico/node + or calico/felix container. [Default: 0s disabled]' + type: string + iptablesMangleAllowAction: + type: string + iptablesMarkMask: + description: 'IptablesMarkMask is the mask that Felix selects its + IPTables Mark bits from. Should be a 32 bit hexadecimal number with + at least 8 bits set, none of which clash with any other mark bits + in use on the system. [Default: 0xff000000]' + format: int32 + type: integer + iptablesNATOutgoingInterfaceFilter: + type: string + iptablesPostWriteCheckInterval: + description: 'IptablesPostWriteCheckInterval is the period after Felix + has done a write to the dataplane that it schedules an extra read + back in order to check the write was not clobbered by another process. + This should only occur if another application on the system doesn''t + respect the iptables lock. [Default: 1s]' + type: string + iptablesRefreshInterval: + description: 'IptablesRefreshInterval is the period at which Felix + re-checks the IP sets in the dataplane to ensure that no other process + has accidentally broken Calico''s rules. Set to 0 to disable IP + sets refresh. Note: the default for this value is lower than the + other refresh intervals as a workaround for a Linux kernel bug that + was fixed in kernel version 4.11. If you are using v4.11 or greater + you may want to set this to, a higher value to reduce Felix CPU + usage. [Default: 10s]' + type: string + ipv6Support: + description: IPv6Support controls whether Felix enables support for + IPv6 (if supported by the in-use dataplane). + type: boolean + kubeNodePortRanges: + description: 'KubeNodePortRanges holds list of port ranges used for + service node ports. Only used if felix detects kube-proxy running + in ipvs mode. Felix uses these ranges to separate host and workload + traffic. [Default: 30000:32767].' + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + logDebugFilenameRegex: + description: LogDebugFilenameRegex controls which source code files + have their Debug log output included in the logs. Only logs from + files with names that match the given regular expression are included. The + filter only applies to Debug level logs. + type: string + logFilePath: + description: 'LogFilePath is the full path to the Felix log. Set to + none to disable file logging. [Default: /var/log/calico/felix.log]' + type: string + logPrefix: + description: 'LogPrefix is the log prefix that Felix uses when rendering + LOG rules. [Default: calico-packet]' + type: string + logSeverityFile: + description: 'LogSeverityFile is the log severity above which logs + are sent to the log file. [Default: Info]' + type: string + logSeverityScreen: + description: 'LogSeverityScreen is the log severity above which logs + are sent to the stdout. [Default: Info]' + type: string + logSeveritySys: + description: 'LogSeveritySys is the log severity above which logs + are sent to the syslog. Set to None for no logging to syslog. [Default: + Info]' + type: string + maxIpsetSize: + type: integer + metadataAddr: + description: 'MetadataAddr is the IP address or domain name of the + server that can answer VM queries for cloud-init metadata. In OpenStack, + this corresponds to the machine running nova-api (or in Ubuntu, + nova-api-metadata). A value of none (case insensitive) means that + Felix should not set up any NAT rule for the metadata path. [Default: + 127.0.0.1]' + type: string + metadataPort: + description: 'MetadataPort is the port of the metadata server. This, + combined with global.MetadataAddr (if not ''None''), is used to + set up a NAT rule, from 169.254.169.254:80 to MetadataAddr:MetadataPort. + In most cases this should not need to be changed [Default: 8775].' + type: integer + mtuIfacePattern: + description: MTUIfacePattern is a regular expression that controls + which interfaces Felix should scan in order to calculate the host's + MTU. This should not match workload interfaces (usually named cali...). + type: string + natOutgoingAddress: + description: NATOutgoingAddress specifies an address to use when performing + source NAT for traffic in a natOutgoing pool that is leaving the + network. By default the address used is an address on the interface + the traffic is leaving on (ie it uses the iptables MASQUERADE target) + type: string + natPortRange: + anyOf: + - type: integer + - type: string + description: NATPortRange specifies the range of ports that is used + for port mapping when doing outgoing NAT. When unset the default + behavior of the network stack is used. + pattern: ^.* + x-kubernetes-int-or-string: true + netlinkTimeout: + type: string + openstackRegion: + description: 'OpenstackRegion is the name of the region that a particular + Felix belongs to. In a multi-region Calico/OpenStack deployment, + this must be configured somehow for each Felix (here in the datamodel, + or in felix.cfg or the environment on each compute node), and must + match the [calico] openstack_region value configured in neutron.conf + on each node. [Default: Empty]' + type: string + policySyncPathPrefix: + description: 'PolicySyncPathPrefix is used to by Felix to communicate + policy changes to external services, like Application layer policy. + [Default: Empty]' + type: string + prometheusGoMetricsEnabled: + description: 'PrometheusGoMetricsEnabled disables Go runtime metrics + collection, which the Prometheus client does by default, when set + to false. This reduces the number of metrics reported, reducing + Prometheus load. [Default: true]' + type: boolean + prometheusMetricsEnabled: + description: 'PrometheusMetricsEnabled enables the Prometheus metrics + server in Felix if set to true. [Default: false]' + type: boolean + prometheusMetricsHost: + description: 'PrometheusMetricsHost is the host that the Prometheus + metrics server should bind to. [Default: empty]' + type: string + prometheusMetricsPort: + description: 'PrometheusMetricsPort is the TCP port that the Prometheus + metrics server should bind to. [Default: 9091]' + type: integer + prometheusProcessMetricsEnabled: + description: 'PrometheusProcessMetricsEnabled disables process metrics + collection, which the Prometheus client does by default, when set + to false. This reduces the number of metrics reported, reducing + Prometheus load. [Default: true]' + type: boolean + prometheusWireGuardMetricsEnabled: + description: 'PrometheusWireGuardMetricsEnabled disables wireguard + metrics collection, which the Prometheus client does by default, + when set to false. This reduces the number of metrics reported, + reducing Prometheus load. [Default: true]' + type: boolean + removeExternalRoutes: + description: Whether or not to remove device routes that have not + been programmed by Felix. Disabling this will allow external applications + to also add device routes. This is enabled by default which means + we will remove externally added routes. + type: boolean + reportingInterval: + description: 'ReportingInterval is the interval at which Felix reports + its status into the datastore or 0 to disable. Must be non-zero + in OpenStack deployments. [Default: 30s]' + type: string + reportingTTL: + description: 'ReportingTTL is the time-to-live setting for process-wide + status reports. [Default: 90s]' + type: string + routeRefreshInterval: + description: 'RouteRefreshInterval is the period at which Felix re-checks + the routes in the dataplane to ensure that no other process has + accidentally broken Calico''s rules. Set to 0 to disable route refresh. + [Default: 90s]' + type: string + routeSource: + description: 'RouteSource configures where Felix gets its routing + information. - WorkloadIPs: use workload endpoints to construct + routes. - CalicoIPAM: the default - use IPAM data to construct routes.' + type: string + routeSyncDisabled: + description: RouteSyncDisabled will disable all operations performed + on the route table. Set to true to run in network-policy mode only. + type: boolean + routeTableRange: + description: Deprecated in favor of RouteTableRanges. Calico programs + additional Linux route tables for various purposes. RouteTableRange + specifies the indices of the route tables that Calico should use. + properties: + max: + type: integer + min: + type: integer + required: + - max + - min + type: object + routeTableRanges: + description: Calico programs additional Linux route tables for various + purposes. RouteTableRanges specifies a set of table index ranges + that Calico should use. Deprecates`RouteTableRange`, overrides `RouteTableRange`. + items: + properties: + max: + type: integer + min: + type: integer + required: + - max + - min + type: object + type: array + serviceLoopPrevention: + description: 'When service IP advertisement is enabled, prevent routing + loops to service IPs that are not in use, by dropping or rejecting + packets that do not get DNAT''d by kube-proxy. Unless set to "Disabled", + in which case such routing loops continue to be allowed. [Default: + Drop]' + type: string + sidecarAccelerationEnabled: + description: 'SidecarAccelerationEnabled enables experimental sidecar + acceleration [Default: false]' + type: boolean + usageReportingEnabled: + description: 'UsageReportingEnabled reports anonymous Calico version + number and cluster size to projectcalico.org. Logs warnings returned + by the usage server. For example, if a significant security vulnerability + has been discovered in the version of Calico being used. [Default: + true]' + type: boolean + usageReportingInitialDelay: + description: 'UsageReportingInitialDelay controls the minimum delay + before Felix makes a report. [Default: 300s]' + type: string + usageReportingInterval: + description: 'UsageReportingInterval controls the interval at which + Felix makes reports. [Default: 86400s]' + type: string + useInternalDataplaneDriver: + description: UseInternalDataplaneDriver, if true, Felix will use its + internal dataplane programming logic. If false, it will launch + an external dataplane driver and communicate with it over protobuf. + type: boolean + vxlanEnabled: + description: 'VXLANEnabled overrides whether Felix should create the + VXLAN tunnel device for IPv4 VXLAN networking. Optional as Felix + determines this based on the existing IP pools. [Default: nil (unset)]' + type: boolean + vxlanMTU: + description: 'VXLANMTU is the MTU to set on the IPv4 VXLAN tunnel + device. See Configuring MTU [Default: 1410]' + type: integer + vxlanMTUV6: + description: 'VXLANMTUV6 is the MTU to set on the IPv6 VXLAN tunnel + device. See Configuring MTU [Default: 1390]' + type: integer + vxlanPort: + type: integer + vxlanVNI: + type: integer + wireguardEnabled: + description: 'WireguardEnabled controls whether Wireguard is enabled + for IPv4 (encapsulating IPv4 traffic over an IPv4 underlay network). + [Default: false]' + type: boolean + wireguardEnabledV6: + description: 'WireguardEnabledV6 controls whether Wireguard is enabled + for IPv6 (encapsulating IPv6 traffic over an IPv6 underlay network). + [Default: false]' + type: boolean + wireguardHostEncryptionEnabled: + description: 'WireguardHostEncryptionEnabled controls whether Wireguard + host-to-host encryption is enabled. [Default: false]' + type: boolean + wireguardInterfaceName: + description: 'WireguardInterfaceName specifies the name to use for + the IPv4 Wireguard interface. [Default: wireguard.cali]' + type: string + wireguardInterfaceNameV6: + description: 'WireguardInterfaceNameV6 specifies the name to use for + the IPv6 Wireguard interface. [Default: wg-v6.cali]' + type: string + wireguardKeepAlive: + description: 'WireguardKeepAlive controls Wireguard PersistentKeepalive + option. Set 0 to disable. [Default: 0]' + type: string + wireguardListeningPort: + description: 'WireguardListeningPort controls the listening port used + by IPv4 Wireguard. [Default: 51820]' + type: integer + wireguardListeningPortV6: + description: 'WireguardListeningPortV6 controls the listening port + used by IPv6 Wireguard. [Default: 51821]' + type: integer + wireguardMTU: + description: 'WireguardMTU controls the MTU on the IPv4 Wireguard + interface. See Configuring MTU [Default: 1440]' + type: integer + wireguardMTUV6: + description: 'WireguardMTUV6 controls the MTU on the IPv6 Wireguard + interface. See Configuring MTU [Default: 1420]' + type: integer + wireguardRoutingRulePriority: + description: 'WireguardRoutingRulePriority controls the priority value + to use for the Wireguard routing rule. [Default: 99]' + type: integer + workloadSourceSpoofing: + description: WorkloadSourceSpoofing controls whether pods can use + the allowedSourcePrefixes annotation to send traffic with a source + IP address that is not theirs. This is disabled by default. When + set to "Any", pods can request any prefix. + type: string + xdpEnabled: + description: 'XDPEnabled enables XDP acceleration for suitable untracked + incoming deny rules. [Default: true]' + type: boolean + xdpRefreshInterval: + description: 'XDPRefreshInterval is the period at which Felix re-checks + all XDP state to ensure that no other process has accidentally broken + Calico''s BPF maps or attached programs. Set to 0 to disable XDP + refresh. [Default: 90s]' + type: string + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +# Source: calico/templates/kdd-crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: globalnetworkpolicies.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: GlobalNetworkPolicy + listKind: GlobalNetworkPolicyList + plural: globalnetworkpolicies + singular: globalnetworkpolicy + preserveUnknownFields: false + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + properties: + applyOnForward: + description: ApplyOnForward indicates to apply the rules in this policy + on forward traffic. + type: boolean + doNotTrack: + description: DoNotTrack indicates whether packets matched by the rules + in this policy should go through the data plane's connection tracking, + such as Linux conntrack. If True, the rules in this policy are + applied before any data plane connection tracking, and packets allowed + by this policy are marked as not to be tracked. + type: boolean + egress: + description: The ordered set of egress rules. Each rule contains + a set of packet match criteria and a corresponding action to apply. + items: + description: "A Rule encapsulates a set of match criteria and an + action. Both selector-based security Policy and security Profiles + reference rules - separated out as a list of rules for both ingress + and egress packet matching. \n Each positive match criteria has + a negated version, prefixed with \"Not\". All the match criteria + within a rule must be satisfied for a packet to match. A single + rule can contain the positive and negative version of a match + and both must be satisfied for the rule to match." + properties: + action: + type: string + destination: + description: Destination contains the match criteria that apply + to destination entity. + properties: + namespaceSelector: + description: "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and another selector are defined on the same rule, then + only workload endpoints that are matched by both selectors + will be selected by the rule. \n For NetworkPolicy, an + empty NamespaceSelector implies that the Selector is limited + to selecting only workload endpoints in the same namespace + as the NetworkPolicy. \n For NetworkPolicy, `global()` + NamespaceSelector implies that the Selector is limited + to selecting only GlobalNetworkSet or HostEndpoint. \n + For GlobalNetworkPolicy, an empty NamespaceSelector implies + the Selector applies to workload endpoints across all + namespaces." + type: string + nets: + description: Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label \"my_label\". \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label \"my_label\". + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + services: + description: "Services is an optional field that contains + options for matching Kubernetes Services. If specified, + only traffic that originates from or terminates at endpoints + within the selected service(s) will be matched, and only + to/from each endpoint's port. \n Services cannot be specified + on the same rule as Selector, NotSelector, NamespaceSelector, + Nets, NotNets or ServiceAccounts. \n Ports and NotPorts + can only be specified with Services on ingress rules." + properties: + name: + description: Name specifies the name of a Kubernetes + Service to match. + type: string + namespace: + description: Namespace specifies the namespace of the + given Service. If left empty, the rule will match + within this policy's namespace. + type: string + type: object + type: object + http: + description: HTTP contains match criteria that apply to HTTP + requests. + properties: + methods: + description: Methods is an optional field that restricts + the rule to apply only to HTTP requests that use one of + the listed HTTP Methods (e.g. GET, PUT, etc.) Multiple + methods are OR'd together. + items: + type: string + type: array + paths: + description: 'Paths is an optional field that restricts + the rule to apply to HTTP requests that use one of the + listed HTTP Paths. Multiple paths are OR''d together. + e.g: - exact: /foo - prefix: /bar NOTE: Each entry may + ONLY specify either a `exact` or a `prefix` match. The + validator will check for it.' + items: + description: 'HTTPPath specifies an HTTP path to match. + It may be either of the form: exact: : which matches + the path exactly or prefix: : which matches + the path prefix' + properties: + exact: + type: string + prefix: + type: string + type: object + type: array + type: object + icmp: + description: ICMP is an optional field that restricts the rule + to apply to a specific type and code of ICMP traffic. This + should only be specified if the Protocol field is set to "ICMP" + or "ICMPv6". + properties: + code: + description: Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernel's iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + ipVersion: + description: IPVersion is an optional field that restricts the + rule to only match a specific IP version. + type: integer + metadata: + description: Metadata contains additional information for this + rule + properties: + annotations: + additionalProperties: + type: string + description: Annotations is a set of key value pairs that + give extra information about the rule + type: object + type: object + notICMP: + description: NotICMP is the negated version of the ICMP field. + properties: + code: + description: Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernel's iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + notProtocol: + anyOf: + - type: integer + - type: string + description: NotProtocol is the negated version of the Protocol + field. + pattern: ^.* + x-kubernetes-int-or-string: true + protocol: + anyOf: + - type: integer + - type: string + description: "Protocol is an optional field that restricts the + rule to only apply to traffic of a specific IP protocol. Required + if any of the EntityRules contain Ports (because ports only + apply to certain protocols). \n Must be one of these string + values: \"TCP\", \"UDP\", \"ICMP\", \"ICMPv6\", \"SCTP\", + \"UDPLite\" or an integer in the range 1-255." + pattern: ^.* + x-kubernetes-int-or-string: true + source: + description: Source contains the match criteria that apply to + source entity. + properties: + namespaceSelector: + description: "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and another selector are defined on the same rule, then + only workload endpoints that are matched by both selectors + will be selected by the rule. \n For NetworkPolicy, an + empty NamespaceSelector implies that the Selector is limited + to selecting only workload endpoints in the same namespace + as the NetworkPolicy. \n For NetworkPolicy, `global()` + NamespaceSelector implies that the Selector is limited + to selecting only GlobalNetworkSet or HostEndpoint. \n + For GlobalNetworkPolicy, an empty NamespaceSelector implies + the Selector applies to workload endpoints across all + namespaces." + type: string + nets: + description: Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label \"my_label\". \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label \"my_label\". + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + services: + description: "Services is an optional field that contains + options for matching Kubernetes Services. If specified, + only traffic that originates from or terminates at endpoints + within the selected service(s) will be matched, and only + to/from each endpoint's port. \n Services cannot be specified + on the same rule as Selector, NotSelector, NamespaceSelector, + Nets, NotNets or ServiceAccounts. \n Ports and NotPorts + can only be specified with Services on ingress rules." + properties: + name: + description: Name specifies the name of a Kubernetes + Service to match. + type: string + namespace: + description: Namespace specifies the namespace of the + given Service. If left empty, the rule will match + within this policy's namespace. + type: string + type: object + type: object + required: + - action + type: object + type: array + ingress: + description: The ordered set of ingress rules. Each rule contains + a set of packet match criteria and a corresponding action to apply. + items: + description: "A Rule encapsulates a set of match criteria and an + action. Both selector-based security Policy and security Profiles + reference rules - separated out as a list of rules for both ingress + and egress packet matching. \n Each positive match criteria has + a negated version, prefixed with \"Not\". All the match criteria + within a rule must be satisfied for a packet to match. A single + rule can contain the positive and negative version of a match + and both must be satisfied for the rule to match." + properties: + action: + type: string + destination: + description: Destination contains the match criteria that apply + to destination entity. + properties: + namespaceSelector: + description: "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and another selector are defined on the same rule, then + only workload endpoints that are matched by both selectors + will be selected by the rule. \n For NetworkPolicy, an + empty NamespaceSelector implies that the Selector is limited + to selecting only workload endpoints in the same namespace + as the NetworkPolicy. \n For NetworkPolicy, `global()` + NamespaceSelector implies that the Selector is limited + to selecting only GlobalNetworkSet or HostEndpoint. \n + For GlobalNetworkPolicy, an empty NamespaceSelector implies + the Selector applies to workload endpoints across all + namespaces." + type: string + nets: + description: Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label \"my_label\". \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label \"my_label\". + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + services: + description: "Services is an optional field that contains + options for matching Kubernetes Services. If specified, + only traffic that originates from or terminates at endpoints + within the selected service(s) will be matched, and only + to/from each endpoint's port. \n Services cannot be specified + on the same rule as Selector, NotSelector, NamespaceSelector, + Nets, NotNets or ServiceAccounts. \n Ports and NotPorts + can only be specified with Services on ingress rules." + properties: + name: + description: Name specifies the name of a Kubernetes + Service to match. + type: string + namespace: + description: Namespace specifies the namespace of the + given Service. If left empty, the rule will match + within this policy's namespace. + type: string + type: object + type: object + http: + description: HTTP contains match criteria that apply to HTTP + requests. + properties: + methods: + description: Methods is an optional field that restricts + the rule to apply only to HTTP requests that use one of + the listed HTTP Methods (e.g. GET, PUT, etc.) Multiple + methods are OR'd together. + items: + type: string + type: array + paths: + description: 'Paths is an optional field that restricts + the rule to apply to HTTP requests that use one of the + listed HTTP Paths. Multiple paths are OR''d together. + e.g: - exact: /foo - prefix: /bar NOTE: Each entry may + ONLY specify either a `exact` or a `prefix` match. The + validator will check for it.' + items: + description: 'HTTPPath specifies an HTTP path to match. + It may be either of the form: exact: : which matches + the path exactly or prefix: : which matches + the path prefix' + properties: + exact: + type: string + prefix: + type: string + type: object + type: array + type: object + icmp: + description: ICMP is an optional field that restricts the rule + to apply to a specific type and code of ICMP traffic. This + should only be specified if the Protocol field is set to "ICMP" + or "ICMPv6". + properties: + code: + description: Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernel's iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + ipVersion: + description: IPVersion is an optional field that restricts the + rule to only match a specific IP version. + type: integer + metadata: + description: Metadata contains additional information for this + rule + properties: + annotations: + additionalProperties: + type: string + description: Annotations is a set of key value pairs that + give extra information about the rule + type: object + type: object + notICMP: + description: NotICMP is the negated version of the ICMP field. + properties: + code: + description: Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernel's iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + notProtocol: + anyOf: + - type: integer + - type: string + description: NotProtocol is the negated version of the Protocol + field. + pattern: ^.* + x-kubernetes-int-or-string: true + protocol: + anyOf: + - type: integer + - type: string + description: "Protocol is an optional field that restricts the + rule to only apply to traffic of a specific IP protocol. Required + if any of the EntityRules contain Ports (because ports only + apply to certain protocols). \n Must be one of these string + values: \"TCP\", \"UDP\", \"ICMP\", \"ICMPv6\", \"SCTP\", + \"UDPLite\" or an integer in the range 1-255." + pattern: ^.* + x-kubernetes-int-or-string: true + source: + description: Source contains the match criteria that apply to + source entity. + properties: + namespaceSelector: + description: "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and another selector are defined on the same rule, then + only workload endpoints that are matched by both selectors + will be selected by the rule. \n For NetworkPolicy, an + empty NamespaceSelector implies that the Selector is limited + to selecting only workload endpoints in the same namespace + as the NetworkPolicy. \n For NetworkPolicy, `global()` + NamespaceSelector implies that the Selector is limited + to selecting only GlobalNetworkSet or HostEndpoint. \n + For GlobalNetworkPolicy, an empty NamespaceSelector implies + the Selector applies to workload endpoints across all + namespaces." + type: string + nets: + description: Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label \"my_label\". \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label \"my_label\". + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + services: + description: "Services is an optional field that contains + options for matching Kubernetes Services. If specified, + only traffic that originates from or terminates at endpoints + within the selected service(s) will be matched, and only + to/from each endpoint's port. \n Services cannot be specified + on the same rule as Selector, NotSelector, NamespaceSelector, + Nets, NotNets or ServiceAccounts. \n Ports and NotPorts + can only be specified with Services on ingress rules." + properties: + name: + description: Name specifies the name of a Kubernetes + Service to match. + type: string + namespace: + description: Namespace specifies the namespace of the + given Service. If left empty, the rule will match + within this policy's namespace. + type: string + type: object + type: object + required: + - action + type: object + type: array + namespaceSelector: + description: NamespaceSelector is an optional field for an expression + used to select a pod based on namespaces. + type: string + order: + description: Order is an optional field that specifies the order in + which the policy is applied. Policies with higher "order" are applied + after those with lower order. If the order is omitted, it may be + considered to be "infinite" - i.e. the policy will be applied last. Policies + with identical order will be applied in alphanumerical order based + on the Policy "Name". + type: number + preDNAT: + description: PreDNAT indicates to apply the rules in this policy before + any DNAT. + type: boolean + selector: + description: "The selector is an expression used to pick pick out + the endpoints that the policy should be applied to. \n Selector + expressions follow this syntax: \n \tlabel == \"string_literal\" + \ -> comparison, e.g. my_label == \"foo bar\" \tlabel != \"string_literal\" + \ -> not equal; also matches if label is not present \tlabel in + { \"a\", \"b\", \"c\", ... } -> true if the value of label X is + one of \"a\", \"b\", \"c\" \tlabel not in { \"a\", \"b\", \"c\", + ... } -> true if the value of label X is not one of \"a\", \"b\", + \"c\" \thas(label_name) -> True if that label is present \t! expr + -> negation of expr \texpr && expr -> Short-circuit and \texpr + || expr -> Short-circuit or \t( expr ) -> parens for grouping \tall() + or the empty selector -> matches all endpoints. \n Label names are + allowed to contain alphanumerics, -, _ and /. String literals are + more permissive but they do not support escape characters. \n Examples + (with made-up labels): \n \ttype == \"webserver\" && deployment + == \"prod\" \ttype in {\"frontend\", \"backend\"} \tdeployment != + \"dev\" \t! has(label_name)" + type: string + serviceAccountSelector: + description: ServiceAccountSelector is an optional field for an expression + used to select a pod based on service accounts. + type: string + types: + description: "Types indicates whether this policy applies to ingress, + or to egress, or to both. When not explicitly specified (and so + the value on creation is empty or nil), Calico defaults Types according + to what Ingress and Egress rules are present in the policy. The + default is: \n - [ PolicyTypeIngress ], if there are no Egress rules + (including the case where there are also no Ingress rules) \n + - [ PolicyTypeEgress ], if there are Egress rules but no Ingress + rules \n - [ PolicyTypeIngress, PolicyTypeEgress ], if there are + both Ingress and Egress rules. \n When the policy is read back again, + Types will always be one of these values, never empty or nil." + items: + description: PolicyType enumerates the possible values of the PolicySpec + Types field. + type: string + type: array + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +# Source: calico/templates/kdd-crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: globalnetworksets.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: GlobalNetworkSet + listKind: GlobalNetworkSetList + plural: globalnetworksets + singular: globalnetworkset + preserveUnknownFields: false + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: GlobalNetworkSet contains a set of arbitrary IP sub-networks/CIDRs + that share labels to allow rules to refer to them via selectors. The labels + of GlobalNetworkSet are not namespaced. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: GlobalNetworkSetSpec contains the specification for a NetworkSet + resource. + properties: + nets: + description: The list of IP networks that belong to this set. + items: + type: string + type: array + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +# Source: calico/templates/kdd-crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: hostendpoints.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: HostEndpoint + listKind: HostEndpointList + plural: hostendpoints + singular: hostendpoint + preserveUnknownFields: false + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: HostEndpointSpec contains the specification for a HostEndpoint + resource. + properties: + expectedIPs: + description: "The expected IP addresses (IPv4 and IPv6) of the endpoint. + If \"InterfaceName\" is not present, Calico will look for an interface + matching any of the IPs in the list and apply policy to that. Note: + \tWhen using the selector match criteria in an ingress or egress + security Policy \tor Profile, Calico converts the selector into + a set of IP addresses. For host \tendpoints, the ExpectedIPs field + is used for that purpose. (If only the interface \tname is specified, + Calico does not learn the IPs of the interface for use in match + \tcriteria.)" + items: + type: string + type: array + interfaceName: + description: "Either \"*\", or the name of a specific Linux interface + to apply policy to; or empty. \"*\" indicates that this HostEndpoint + governs all traffic to, from or through the default network namespace + of the host named by the \"Node\" field; entering and leaving that + namespace via any interface, including those from/to non-host-networked + local workloads. \n If InterfaceName is not \"*\", this HostEndpoint + only governs traffic that enters or leaves the host through the + specific interface named by InterfaceName, or - when InterfaceName + is empty - through the specific interface that has one of the IPs + in ExpectedIPs. Therefore, when InterfaceName is empty, at least + one expected IP must be specified. Only external interfaces (such + as \"eth0\") are supported here; it isn't possible for a HostEndpoint + to protect traffic through a specific local workload interface. + \n Note: Only some kinds of policy are implemented for \"*\" HostEndpoints; + initially just pre-DNAT policy. Please check Calico documentation + for the latest position." + type: string + node: + description: The node name identifying the Calico node instance. + type: string + ports: + description: Ports contains the endpoint's named ports, which may + be referenced in security policy rules. + items: + properties: + name: + type: string + port: + type: integer + protocol: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + required: + - name + - port + - protocol + type: object + type: array + profiles: + description: A list of identifiers of security Profile objects that + apply to this endpoint. Each profile is applied in the order that + they appear in this list. Profile rules are applied after the selector-based + security policy. + items: + type: string + type: array + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +# Source: calico/templates/kdd-crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: ipamblocks.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: IPAMBlock + listKind: IPAMBlockList + plural: ipamblocks + singular: ipamblock + preserveUnknownFields: false + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: IPAMBlockSpec contains the specification for an IPAMBlock + resource. + properties: + affinity: + description: Affinity of the block, if this block has one. If set, + it will be of the form "host:". If not set, this block + is not affine to a host. + type: string + allocations: + description: Array of allocations in-use within this block. nil entries + mean the allocation is free. For non-nil entries at index i, the + index is the ordinal of the allocation within this block and the + value is the index of the associated attributes in the Attributes + array. + items: + type: integer + # TODO: This nullable is manually added in. We should update controller-gen + # to handle []*int properly itself. + nullable: true + type: array + attributes: + description: Attributes is an array of arbitrary metadata associated + with allocations in the block. To find attributes for a given allocation, + use the value of the allocation's entry in the Allocations array + as the index of the element in this array. + items: + properties: + handle_id: + type: string + secondary: + additionalProperties: + type: string + type: object + type: object + type: array + cidr: + description: The block's CIDR. + type: string + deleted: + description: Deleted is an internal boolean used to workaround a limitation + in the Kubernetes API whereby deletion will not return a conflict + error if the block has been updated. It should not be set manually. + type: boolean + sequenceNumber: + default: 0 + description: We store a sequence number that is updated each time + the block is written. Each allocation will also store the sequence + number of the block at the time of its creation. When releasing + an IP, passing the sequence number associated with the allocation + allows us to protect against a race condition and ensure the IP + hasn't been released and re-allocated since the release request. + format: int64 + type: integer + sequenceNumberForAllocation: + additionalProperties: + format: int64 + type: integer + description: Map of allocated ordinal within the block to sequence + number of the block at the time of allocation. Kubernetes does not + allow numerical keys for maps, so the key is cast to a string. + type: object + strictAffinity: + description: StrictAffinity on the IPAMBlock is deprecated and no + longer used by the code. Use IPAMConfig StrictAffinity instead. + type: boolean + unallocated: + description: Unallocated is an ordered list of allocations which are + free in the block. + items: + type: integer + type: array + required: + - allocations + - attributes + - cidr + - strictAffinity + - unallocated + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +# Source: calico/templates/kdd-crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: ipamconfigs.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: IPAMConfig + listKind: IPAMConfigList + plural: ipamconfigs + singular: ipamconfig + preserveUnknownFields: false + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: IPAMConfigSpec contains the specification for an IPAMConfig + resource. + properties: + autoAllocateBlocks: + type: boolean + maxBlocksPerHost: + description: MaxBlocksPerHost, if non-zero, is the max number of blocks + that can be affine to each host. + maximum: 2147483647 + minimum: 0 + type: integer + strictAffinity: + type: boolean + required: + - autoAllocateBlocks + - strictAffinity + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +# Source: calico/templates/kdd-crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: ipamhandles.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: IPAMHandle + listKind: IPAMHandleList + plural: ipamhandles + singular: ipamhandle + preserveUnknownFields: false + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: IPAMHandleSpec contains the specification for an IPAMHandle + resource. + properties: + block: + additionalProperties: + type: integer + type: object + deleted: + type: boolean + handleID: + type: string + required: + - block + - handleID + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +# Source: calico/templates/kdd-crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: ippools.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: IPPool + listKind: IPPoolList + plural: ippools + singular: ippool + preserveUnknownFields: false + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: IPPoolSpec contains the specification for an IPPool resource. + properties: + allowedUses: + description: AllowedUse controls what the IP pool will be used for. If + not specified or empty, defaults to ["Tunnel", "Workload"] for back-compatibility + items: + type: string + type: array + blockSize: + description: The block size to use for IP address assignments from + this pool. Defaults to 26 for IPv4 and 122 for IPv6. + type: integer + cidr: + description: The pool CIDR. + type: string + disableBGPExport: + description: 'Disable exporting routes from this IP Pool''s CIDR over + BGP. [Default: false]' + type: boolean + disabled: + description: When disabled is true, Calico IPAM will not assign addresses + from this pool. + type: boolean + ipip: + description: 'Deprecated: this field is only used for APIv1 backwards + compatibility. Setting this field is not allowed, this field is + for internal use only.' + properties: + enabled: + description: When enabled is true, ipip tunneling will be used + to deliver packets to destinations within this pool. + type: boolean + mode: + description: The IPIP mode. This can be one of "always" or "cross-subnet". A + mode of "always" will also use IPIP tunneling for routing to + destination IP addresses within this pool. A mode of "cross-subnet" + will only use IPIP tunneling when the destination node is on + a different subnet to the originating node. The default value + (if not specified) is "always". + type: string + type: object + ipipMode: + description: Contains configuration for IPIP tunneling for this pool. + If not specified, then this is defaulted to "Never" (i.e. IPIP tunneling + is disabled). + type: string + nat-outgoing: + description: 'Deprecated: this field is only used for APIv1 backwards + compatibility. Setting this field is not allowed, this field is + for internal use only.' + type: boolean + natOutgoing: + description: When natOutgoing is true, packets sent from Calico networked + containers in this pool to destinations outside of this pool will + be masqueraded. + type: boolean + nodeSelector: + description: Allows IPPool to allocate for a specific node by label + selector. + type: string + vxlanMode: + description: Contains configuration for VXLAN tunneling for this pool. + If not specified, then this is defaulted to "Never" (i.e. VXLAN + tunneling is disabled). + type: string + required: + - cidr + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +# Source: calico/templates/kdd-crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: (devel) + creationTimestamp: null + name: ipreservations.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: IPReservation + listKind: IPReservationList + plural: ipreservations + singular: ipreservation + preserveUnknownFields: false + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: IPReservationSpec contains the specification for an IPReservation + resource. + properties: + reservedCIDRs: + description: ReservedCIDRs is a list of CIDRs and/or IP addresses + that Calico IPAM will exclude from new allocations. + items: + type: string + type: array + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +# Source: calico/templates/kdd-crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: kubecontrollersconfigurations.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: KubeControllersConfiguration + listKind: KubeControllersConfigurationList + plural: kubecontrollersconfigurations + singular: kubecontrollersconfiguration + preserveUnknownFields: false + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: KubeControllersConfigurationSpec contains the values of the + Kubernetes controllers configuration. + properties: + controllers: + description: Controllers enables and configures individual Kubernetes + controllers + properties: + namespace: + description: Namespace enables and configures the namespace controller. + Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform reconciliation + with the Calico datastore. [Default: 5m]' + type: string + type: object + node: + description: Node enables and configures the node controller. + Enabled by default, set to nil to disable. + properties: + hostEndpoint: + description: HostEndpoint controls syncing nodes to host endpoints. + Disabled by default, set to nil to disable. + properties: + autoCreate: + description: 'AutoCreate enables automatic creation of + host endpoints for every node. [Default: Disabled]' + type: string + type: object + leakGracePeriod: + description: 'LeakGracePeriod is the period used by the controller + to determine if an IP address has been leaked. Set to 0 + to disable IP garbage collection. [Default: 15m]' + type: string + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform reconciliation + with the Calico datastore. [Default: 5m]' + type: string + syncLabels: + description: 'SyncLabels controls whether to copy Kubernetes + node labels to Calico nodes. [Default: Enabled]' + type: string + type: object + policy: + description: Policy enables and configures the policy controller. + Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform reconciliation + with the Calico datastore. [Default: 5m]' + type: string + type: object + serviceAccount: + description: ServiceAccount enables and configures the service + account controller. Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform reconciliation + with the Calico datastore. [Default: 5m]' + type: string + type: object + workloadEndpoint: + description: WorkloadEndpoint enables and configures the workload + endpoint controller. Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform reconciliation + with the Calico datastore. [Default: 5m]' + type: string + type: object + type: object + debugProfilePort: + description: DebugProfilePort configures the port to serve memory + and cpu profiles on. If not specified, profiling is disabled. + format: int32 + type: integer + etcdV3CompactionPeriod: + description: 'EtcdV3CompactionPeriod is the period between etcdv3 + compaction requests. Set to 0 to disable. [Default: 10m]' + type: string + healthChecks: + description: 'HealthChecks enables or disables support for health + checks [Default: Enabled]' + type: string + logSeverityScreen: + description: 'LogSeverityScreen is the log severity above which logs + are sent to the stdout. [Default: Info]' + type: string + prometheusMetricsPort: + description: 'PrometheusMetricsPort is the TCP port that the Prometheus + metrics server should bind to. Set to 0 to disable. [Default: 9094]' + type: integer + required: + - controllers + type: object + status: + description: KubeControllersConfigurationStatus represents the status + of the configuration. It's useful for admins to be able to see the actual + config that was applied, which can be modified by environment variables + on the kube-controllers process. + properties: + environmentVars: + additionalProperties: + type: string + description: EnvironmentVars contains the environment variables on + the kube-controllers that influenced the RunningConfig. + type: object + runningConfig: + description: RunningConfig contains the effective config that is running + in the kube-controllers pod, after merging the API resource with + any environment variables. + properties: + controllers: + description: Controllers enables and configures individual Kubernetes + controllers + properties: + namespace: + description: Namespace enables and configures the namespace + controller. Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform + reconciliation with the Calico datastore. [Default: + 5m]' + type: string + type: object + node: + description: Node enables and configures the node controller. + Enabled by default, set to nil to disable. + properties: + hostEndpoint: + description: HostEndpoint controls syncing nodes to host + endpoints. Disabled by default, set to nil to disable. + properties: + autoCreate: + description: 'AutoCreate enables automatic creation + of host endpoints for every node. [Default: Disabled]' + type: string + type: object + leakGracePeriod: + description: 'LeakGracePeriod is the period used by the + controller to determine if an IP address has been leaked. + Set to 0 to disable IP garbage collection. [Default: + 15m]' + type: string + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform + reconciliation with the Calico datastore. [Default: + 5m]' + type: string + syncLabels: + description: 'SyncLabels controls whether to copy Kubernetes + node labels to Calico nodes. [Default: Enabled]' + type: string + type: object + policy: + description: Policy enables and configures the policy controller. + Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform + reconciliation with the Calico datastore. [Default: + 5m]' + type: string + type: object + serviceAccount: + description: ServiceAccount enables and configures the service + account controller. Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform + reconciliation with the Calico datastore. [Default: + 5m]' + type: string + type: object + workloadEndpoint: + description: WorkloadEndpoint enables and configures the workload + endpoint controller. Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform + reconciliation with the Calico datastore. [Default: + 5m]' + type: string + type: object + type: object + debugProfilePort: + description: DebugProfilePort configures the port to serve memory + and cpu profiles on. If not specified, profiling is disabled. + format: int32 + type: integer + etcdV3CompactionPeriod: + description: 'EtcdV3CompactionPeriod is the period between etcdv3 + compaction requests. Set to 0 to disable. [Default: 10m]' + type: string + healthChecks: + description: 'HealthChecks enables or disables support for health + checks [Default: Enabled]' + type: string + logSeverityScreen: + description: 'LogSeverityScreen is the log severity above which + logs are sent to the stdout. [Default: Info]' + type: string + prometheusMetricsPort: + description: 'PrometheusMetricsPort is the TCP port that the Prometheus + metrics server should bind to. Set to 0 to disable. [Default: + 9094]' + type: integer + required: + - controllers + type: object + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +# Source: calico/templates/kdd-crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: networkpolicies.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: NetworkPolicy + listKind: NetworkPolicyList + plural: networkpolicies + singular: networkpolicy + preserveUnknownFields: false + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + properties: + egress: + description: The ordered set of egress rules. Each rule contains + a set of packet match criteria and a corresponding action to apply. + items: + description: "A Rule encapsulates a set of match criteria and an + action. Both selector-based security Policy and security Profiles + reference rules - separated out as a list of rules for both ingress + and egress packet matching. \n Each positive match criteria has + a negated version, prefixed with \"Not\". All the match criteria + within a rule must be satisfied for a packet to match. A single + rule can contain the positive and negative version of a match + and both must be satisfied for the rule to match." + properties: + action: + type: string + destination: + description: Destination contains the match criteria that apply + to destination entity. + properties: + namespaceSelector: + description: "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and another selector are defined on the same rule, then + only workload endpoints that are matched by both selectors + will be selected by the rule. \n For NetworkPolicy, an + empty NamespaceSelector implies that the Selector is limited + to selecting only workload endpoints in the same namespace + as the NetworkPolicy. \n For NetworkPolicy, `global()` + NamespaceSelector implies that the Selector is limited + to selecting only GlobalNetworkSet or HostEndpoint. \n + For GlobalNetworkPolicy, an empty NamespaceSelector implies + the Selector applies to workload endpoints across all + namespaces." + type: string + nets: + description: Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label \"my_label\". \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label \"my_label\". + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + services: + description: "Services is an optional field that contains + options for matching Kubernetes Services. If specified, + only traffic that originates from or terminates at endpoints + within the selected service(s) will be matched, and only + to/from each endpoint's port. \n Services cannot be specified + on the same rule as Selector, NotSelector, NamespaceSelector, + Nets, NotNets or ServiceAccounts. \n Ports and NotPorts + can only be specified with Services on ingress rules." + properties: + name: + description: Name specifies the name of a Kubernetes + Service to match. + type: string + namespace: + description: Namespace specifies the namespace of the + given Service. If left empty, the rule will match + within this policy's namespace. + type: string + type: object + type: object + http: + description: HTTP contains match criteria that apply to HTTP + requests. + properties: + methods: + description: Methods is an optional field that restricts + the rule to apply only to HTTP requests that use one of + the listed HTTP Methods (e.g. GET, PUT, etc.) Multiple + methods are OR'd together. + items: + type: string + type: array + paths: + description: 'Paths is an optional field that restricts + the rule to apply to HTTP requests that use one of the + listed HTTP Paths. Multiple paths are OR''d together. + e.g: - exact: /foo - prefix: /bar NOTE: Each entry may + ONLY specify either a `exact` or a `prefix` match. The + validator will check for it.' + items: + description: 'HTTPPath specifies an HTTP path to match. + It may be either of the form: exact: : which matches + the path exactly or prefix: : which matches + the path prefix' + properties: + exact: + type: string + prefix: + type: string + type: object + type: array + type: object + icmp: + description: ICMP is an optional field that restricts the rule + to apply to a specific type and code of ICMP traffic. This + should only be specified if the Protocol field is set to "ICMP" + or "ICMPv6". + properties: + code: + description: Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernel's iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + ipVersion: + description: IPVersion is an optional field that restricts the + rule to only match a specific IP version. + type: integer + metadata: + description: Metadata contains additional information for this + rule + properties: + annotations: + additionalProperties: + type: string + description: Annotations is a set of key value pairs that + give extra information about the rule + type: object + type: object + notICMP: + description: NotICMP is the negated version of the ICMP field. + properties: + code: + description: Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernel's iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + notProtocol: + anyOf: + - type: integer + - type: string + description: NotProtocol is the negated version of the Protocol + field. + pattern: ^.* + x-kubernetes-int-or-string: true + protocol: + anyOf: + - type: integer + - type: string + description: "Protocol is an optional field that restricts the + rule to only apply to traffic of a specific IP protocol. Required + if any of the EntityRules contain Ports (because ports only + apply to certain protocols). \n Must be one of these string + values: \"TCP\", \"UDP\", \"ICMP\", \"ICMPv6\", \"SCTP\", + \"UDPLite\" or an integer in the range 1-255." + pattern: ^.* + x-kubernetes-int-or-string: true + source: + description: Source contains the match criteria that apply to + source entity. + properties: + namespaceSelector: + description: "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and another selector are defined on the same rule, then + only workload endpoints that are matched by both selectors + will be selected by the rule. \n For NetworkPolicy, an + empty NamespaceSelector implies that the Selector is limited + to selecting only workload endpoints in the same namespace + as the NetworkPolicy. \n For NetworkPolicy, `global()` + NamespaceSelector implies that the Selector is limited + to selecting only GlobalNetworkSet or HostEndpoint. \n + For GlobalNetworkPolicy, an empty NamespaceSelector implies + the Selector applies to workload endpoints across all + namespaces." + type: string + nets: + description: Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label \"my_label\". \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label \"my_label\". + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + services: + description: "Services is an optional field that contains + options for matching Kubernetes Services. If specified, + only traffic that originates from or terminates at endpoints + within the selected service(s) will be matched, and only + to/from each endpoint's port. \n Services cannot be specified + on the same rule as Selector, NotSelector, NamespaceSelector, + Nets, NotNets or ServiceAccounts. \n Ports and NotPorts + can only be specified with Services on ingress rules." + properties: + name: + description: Name specifies the name of a Kubernetes + Service to match. + type: string + namespace: + description: Namespace specifies the namespace of the + given Service. If left empty, the rule will match + within this policy's namespace. + type: string + type: object + type: object + required: + - action + type: object + type: array + ingress: + description: The ordered set of ingress rules. Each rule contains + a set of packet match criteria and a corresponding action to apply. + items: + description: "A Rule encapsulates a set of match criteria and an + action. Both selector-based security Policy and security Profiles + reference rules - separated out as a list of rules for both ingress + and egress packet matching. \n Each positive match criteria has + a negated version, prefixed with \"Not\". All the match criteria + within a rule must be satisfied for a packet to match. A single + rule can contain the positive and negative version of a match + and both must be satisfied for the rule to match." + properties: + action: + type: string + destination: + description: Destination contains the match criteria that apply + to destination entity. + properties: + namespaceSelector: + description: "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and another selector are defined on the same rule, then + only workload endpoints that are matched by both selectors + will be selected by the rule. \n For NetworkPolicy, an + empty NamespaceSelector implies that the Selector is limited + to selecting only workload endpoints in the same namespace + as the NetworkPolicy. \n For NetworkPolicy, `global()` + NamespaceSelector implies that the Selector is limited + to selecting only GlobalNetworkSet or HostEndpoint. \n + For GlobalNetworkPolicy, an empty NamespaceSelector implies + the Selector applies to workload endpoints across all + namespaces." + type: string + nets: + description: Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label \"my_label\". \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label \"my_label\". + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + services: + description: "Services is an optional field that contains + options for matching Kubernetes Services. If specified, + only traffic that originates from or terminates at endpoints + within the selected service(s) will be matched, and only + to/from each endpoint's port. \n Services cannot be specified + on the same rule as Selector, NotSelector, NamespaceSelector, + Nets, NotNets or ServiceAccounts. \n Ports and NotPorts + can only be specified with Services on ingress rules." + properties: + name: + description: Name specifies the name of a Kubernetes + Service to match. + type: string + namespace: + description: Namespace specifies the namespace of the + given Service. If left empty, the rule will match + within this policy's namespace. + type: string + type: object + type: object + http: + description: HTTP contains match criteria that apply to HTTP + requests. + properties: + methods: + description: Methods is an optional field that restricts + the rule to apply only to HTTP requests that use one of + the listed HTTP Methods (e.g. GET, PUT, etc.) Multiple + methods are OR'd together. + items: + type: string + type: array + paths: + description: 'Paths is an optional field that restricts + the rule to apply to HTTP requests that use one of the + listed HTTP Paths. Multiple paths are OR''d together. + e.g: - exact: /foo - prefix: /bar NOTE: Each entry may + ONLY specify either a `exact` or a `prefix` match. The + validator will check for it.' + items: + description: 'HTTPPath specifies an HTTP path to match. + It may be either of the form: exact: : which matches + the path exactly or prefix: : which matches + the path prefix' + properties: + exact: + type: string + prefix: + type: string + type: object + type: array + type: object + icmp: + description: ICMP is an optional field that restricts the rule + to apply to a specific type and code of ICMP traffic. This + should only be specified if the Protocol field is set to "ICMP" + or "ICMPv6". + properties: + code: + description: Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernel's iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + ipVersion: + description: IPVersion is an optional field that restricts the + rule to only match a specific IP version. + type: integer + metadata: + description: Metadata contains additional information for this + rule + properties: + annotations: + additionalProperties: + type: string + description: Annotations is a set of key value pairs that + give extra information about the rule + type: object + type: object + notICMP: + description: NotICMP is the negated version of the ICMP field. + properties: + code: + description: Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernel's iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + notProtocol: + anyOf: + - type: integer + - type: string + description: NotProtocol is the negated version of the Protocol + field. + pattern: ^.* + x-kubernetes-int-or-string: true + protocol: + anyOf: + - type: integer + - type: string + description: "Protocol is an optional field that restricts the + rule to only apply to traffic of a specific IP protocol. Required + if any of the EntityRules contain Ports (because ports only + apply to certain protocols). \n Must be one of these string + values: \"TCP\", \"UDP\", \"ICMP\", \"ICMPv6\", \"SCTP\", + \"UDPLite\" or an integer in the range 1-255." + pattern: ^.* + x-kubernetes-int-or-string: true + source: + description: Source contains the match criteria that apply to + source entity. + properties: + namespaceSelector: + description: "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and another selector are defined on the same rule, then + only workload endpoints that are matched by both selectors + will be selected by the rule. \n For NetworkPolicy, an + empty NamespaceSelector implies that the Selector is limited + to selecting only workload endpoints in the same namespace + as the NetworkPolicy. \n For NetworkPolicy, `global()` + NamespaceSelector implies that the Selector is limited + to selecting only GlobalNetworkSet or HostEndpoint. \n + For GlobalNetworkPolicy, an empty NamespaceSelector implies + the Selector applies to workload endpoints across all + namespaces." + type: string + nets: + description: Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label \"my_label\". \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label \"my_label\". + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + services: + description: "Services is an optional field that contains + options for matching Kubernetes Services. If specified, + only traffic that originates from or terminates at endpoints + within the selected service(s) will be matched, and only + to/from each endpoint's port. \n Services cannot be specified + on the same rule as Selector, NotSelector, NamespaceSelector, + Nets, NotNets or ServiceAccounts. \n Ports and NotPorts + can only be specified with Services on ingress rules." + properties: + name: + description: Name specifies the name of a Kubernetes + Service to match. + type: string + namespace: + description: Namespace specifies the namespace of the + given Service. If left empty, the rule will match + within this policy's namespace. + type: string + type: object + type: object + required: + - action + type: object + type: array + order: + description: Order is an optional field that specifies the order in + which the policy is applied. Policies with higher "order" are applied + after those with lower order. If the order is omitted, it may be + considered to be "infinite" - i.e. the policy will be applied last. Policies + with identical order will be applied in alphanumerical order based + on the Policy "Name". + type: number + selector: + description: "The selector is an expression used to pick pick out + the endpoints that the policy should be applied to. \n Selector + expressions follow this syntax: \n \tlabel == \"string_literal\" + \ -> comparison, e.g. my_label == \"foo bar\" \tlabel != \"string_literal\" + \ -> not equal; also matches if label is not present \tlabel in + { \"a\", \"b\", \"c\", ... } -> true if the value of label X is + one of \"a\", \"b\", \"c\" \tlabel not in { \"a\", \"b\", \"c\", + ... } -> true if the value of label X is not one of \"a\", \"b\", + \"c\" \thas(label_name) -> True if that label is present \t! expr + -> negation of expr \texpr && expr -> Short-circuit and \texpr + || expr -> Short-circuit or \t( expr ) -> parens for grouping \tall() + or the empty selector -> matches all endpoints. \n Label names are + allowed to contain alphanumerics, -, _ and /. String literals are + more permissive but they do not support escape characters. \n Examples + (with made-up labels): \n \ttype == \"webserver\" && deployment + == \"prod\" \ttype in {\"frontend\", \"backend\"} \tdeployment != + \"dev\" \t! has(label_name)" + type: string + serviceAccountSelector: + description: ServiceAccountSelector is an optional field for an expression + used to select a pod based on service accounts. + type: string + types: + description: "Types indicates whether this policy applies to ingress, + or to egress, or to both. When not explicitly specified (and so + the value on creation is empty or nil), Calico defaults Types according + to what Ingress and Egress are present in the policy. The default + is: \n - [ PolicyTypeIngress ], if there are no Egress rules (including + the case where there are also no Ingress rules) \n - [ PolicyTypeEgress + ], if there are Egress rules but no Ingress rules \n - [ PolicyTypeIngress, + PolicyTypeEgress ], if there are both Ingress and Egress rules. + \n When the policy is read back again, Types will always be one + of these values, never empty or nil." + items: + description: PolicyType enumerates the possible values of the PolicySpec + Types field. + type: string + type: array + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +# Source: calico/templates/kdd-crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: networksets.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: NetworkSet + listKind: NetworkSetList + plural: networksets + singular: networkset + preserveUnknownFields: false + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: NetworkSet is the Namespaced-equivalent of the GlobalNetworkSet. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: NetworkSetSpec contains the specification for a NetworkSet + resource. + properties: + nets: + description: The list of IP networks that belong to this set. + items: + type: string + type: array + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +# Source: calico/templates/calico-kube-controllers-rbac.yaml +# Include a clusterrole for the kube-controllers component, +# and bind it to the calico-kube-controllers serviceaccount. +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: calico-kube-controllers +rules: + # Nodes are watched to monitor for deletions. + - apiGroups: [""] + resources: + - nodes + verbs: + - watch + - list + - get + # Pods are watched to check for existence as part of IPAM controller. + - apiGroups: [""] + resources: + - pods + verbs: + - get + - list + - watch + # IPAM resources are manipulated in response to node and block updates, as well as periodic triggers. + - apiGroups: ["crd.projectcalico.org"] + resources: + - ipreservations + verbs: + - list + - apiGroups: ["crd.projectcalico.org"] + resources: + - blockaffinities + - ipamblocks + - ipamhandles + verbs: + - get + - list + - create + - update + - delete + - watch + # Pools are watched to maintain a mapping of blocks to IP pools. + - apiGroups: ["crd.projectcalico.org"] + resources: + - ippools + verbs: + - list + - watch + # kube-controllers manages hostendpoints. + - apiGroups: ["crd.projectcalico.org"] + resources: + - hostendpoints + verbs: + - get + - list + - create + - update + - delete + # Needs access to update clusterinformations. + - apiGroups: ["crd.projectcalico.org"] + resources: + - clusterinformations + verbs: + - get + - list + - create + - update + - watch + # KubeControllersConfiguration is where it gets its config + - apiGroups: ["crd.projectcalico.org"] + resources: + - kubecontrollersconfigurations + verbs: + # read its own config + - get + # create a default if none exists + - create + # update status + - update + # watch for changes + - watch +--- +# Source: calico/templates/calico-node-rbac.yaml +# Include a clusterrole for the calico-node DaemonSet, +# and bind it to the calico-node serviceaccount. +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: calico-node +rules: + # Used for creating service account tokens to be used by the CNI plugin + - apiGroups: [""] + resources: + - serviceaccounts/token + resourceNames: + - calico-node + verbs: + - create + # The CNI plugin needs to get pods, nodes, and namespaces. + - apiGroups: [""] + resources: + - pods + - nodes + - namespaces + verbs: + - get + # EndpointSlices are used for Service-based network policy rule + # enforcement. + - apiGroups: ["discovery.k8s.io"] + resources: + - endpointslices + verbs: + - watch + - list + - apiGroups: [""] + resources: + - endpoints + - services + verbs: + # Used to discover service IPs for advertisement. + - watch + - list + # Used to discover Typhas. + - get + # Pod CIDR auto-detection on kubeadm needs access to config maps. + - apiGroups: [""] + resources: + - configmaps + verbs: + - get + - apiGroups: [""] + resources: + - nodes/status + verbs: + # Needed for clearing NodeNetworkUnavailable flag. + - patch + # Calico stores some configuration information in node annotations. + - update + # Watch for changes to Kubernetes NetworkPolicies. + - apiGroups: ["networking.k8s.io"] + resources: + - networkpolicies + verbs: + - watch + - list + # Used by Calico for policy information. + - apiGroups: [""] + resources: + - pods + - namespaces + - serviceaccounts + verbs: + - list + - watch + # The CNI plugin patches pods/status. + - apiGroups: [""] + resources: + - pods/status + verbs: + - patch + # Calico monitors various CRDs for config. + - apiGroups: ["crd.projectcalico.org"] + resources: + - globalfelixconfigs + - felixconfigurations + - bgppeers + - globalbgpconfigs + - bgpconfigurations + - ippools + - ipreservations + - ipamblocks + - globalnetworkpolicies + - globalnetworksets + - networkpolicies + - networksets + - clusterinformations + - hostendpoints + - blockaffinities + - caliconodestatuses + verbs: + - get + - list + - watch + # Calico must create and update some CRDs on startup. + - apiGroups: ["crd.projectcalico.org"] + resources: + - ippools + - felixconfigurations + - clusterinformations + verbs: + - create + - update + # Calico must update some CRDs. + - apiGroups: [ "crd.projectcalico.org" ] + resources: + - caliconodestatuses + verbs: + - update + # Calico stores some configuration information on the node. + - apiGroups: [""] + resources: + - nodes + verbs: + - get + - list + - watch + # These permissions are only required for upgrade from v2.6, and can + # be removed after upgrade or on fresh installations. + - apiGroups: ["crd.projectcalico.org"] + resources: + - bgpconfigurations + - bgppeers + verbs: + - create + - update + # These permissions are required for Calico CNI to perform IPAM allocations. + - apiGroups: ["crd.projectcalico.org"] + resources: + - blockaffinities + - ipamblocks + - ipamhandles + verbs: + - get + - list + - create + - update + - delete + # The CNI plugin and calico/node need to be able to create a default + # IPAMConfiguration + - apiGroups: ["crd.projectcalico.org"] + resources: + - ipamconfigs + verbs: + - get + - create + # Block affinities must also be watchable by confd for route aggregation. + - apiGroups: ["crd.projectcalico.org"] + resources: + - blockaffinities + verbs: + - watch + # The Calico IPAM migration needs to get daemonsets. These permissions can be + # removed if not upgrading from an installation using host-local IPAM. + - apiGroups: ["apps"] + resources: + - daemonsets + verbs: + - get +--- +# Source: calico/templates/calico-kube-controllers-rbac.yaml +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: calico-kube-controllers +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: calico-kube-controllers +subjects: +- kind: ServiceAccount + name: calico-kube-controllers + namespace: kube-system +--- +# Source: calico/templates/calico-node-rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: calico-node +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: calico-node +subjects: +- kind: ServiceAccount + name: calico-node + namespace: kube-system +--- +# Source: calico/templates/calico-node.yaml +# This manifest installs the calico-node container, as well +# as the CNI plugins and network config on +# each master and worker node in a Kubernetes cluster. +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: calico-node + namespace: kube-system + labels: + k8s-app: calico-node +spec: + selector: + matchLabels: + k8s-app: calico-node + updateStrategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 1 + template: + metadata: + labels: + k8s-app: calico-node + spec: + nodeSelector: + kubernetes.io/os: linux + hostNetwork: true + tolerations: + # Make sure calico-node gets scheduled on all nodes. + - effect: NoSchedule + operator: Exists + # Mark the pod as a critical add-on for rescheduling. + - key: CriticalAddonsOnly + operator: Exists + - effect: NoExecute + operator: Exists + serviceAccountName: calico-node + # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force + # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods. + terminationGracePeriodSeconds: 0 + priorityClassName: system-node-critical + initContainers: + # This container performs upgrade from host-local IPAM to calico-ipam. + # It can be deleted if this is a fresh installation, or if you have already + # upgraded to use calico-ipam. + - name: upgrade-ipam + image: docker.io/calico/cni:v3.25.0 + imagePullPolicy: IfNotPresent + command: ["/opt/cni/bin/calico-ipam", "-upgrade"] + envFrom: + - configMapRef: + # Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode. + name: kubernetes-services-endpoint + optional: true + env: + - name: KUBERNETES_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: CALICO_NETWORKING_BACKEND + valueFrom: + configMapKeyRef: + name: calico-config + key: calico_backend + volumeMounts: + - mountPath: /var/lib/cni/networks + name: host-local-net-dir + - mountPath: /host/opt/cni/bin + name: cni-bin-dir + securityContext: + privileged: true + # This container installs the CNI binaries + # and CNI network config file on each node. + - name: install-cni + image: docker.io/calico/cni:v3.25.0 + imagePullPolicy: IfNotPresent + command: ["/opt/cni/bin/install"] + envFrom: + - configMapRef: + # Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode. + name: kubernetes-services-endpoint + optional: true + env: + # Name of the CNI config file to create. + - name: CNI_CONF_NAME + value: "10-calico.conflist" + # The CNI network config to install on each node. + - name: CNI_NETWORK_CONFIG + valueFrom: + configMapKeyRef: + name: calico-config + key: cni_network_config + # Set the hostname based on the k8s node name. + - name: KUBERNETES_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + # CNI MTU Config variable + - name: CNI_MTU + valueFrom: + configMapKeyRef: + name: calico-config + key: veth_mtu + # Prevents the container from sleeping forever. + - name: SLEEP + value: "false" + volumeMounts: + - mountPath: /host/opt/cni/bin + name: cni-bin-dir + - mountPath: /host/etc/cni/net.d + name: cni-net-dir + securityContext: + privileged: true + # This init container mounts the necessary filesystems needed by the BPF data plane + # i.e. bpf at /sys/fs/bpf and cgroup2 at /run/calico/cgroup. Calico-node initialisation is executed + # in best effort fashion, i.e. no failure for errors, to not disrupt pod creation in iptable mode. + - name: "mount-bpffs" + image: docker.io/calico/node:v3.25.0 + imagePullPolicy: IfNotPresent + command: ["calico-node", "-init", "-best-effort"] + volumeMounts: + - mountPath: /sys/fs + name: sys-fs + # Bidirectional is required to ensure that the new mount we make at /sys/fs/bpf propagates to the host + # so that it outlives the init container. + mountPropagation: Bidirectional + - mountPath: /var/run/calico + name: var-run-calico + # Bidirectional is required to ensure that the new mount we make at /run/calico/cgroup propagates to the host + # so that it outlives the init container. + mountPropagation: Bidirectional + # Mount /proc/ from host which usually is an init program at /nodeproc. It's needed by mountns binary, + # executed by calico-node, to mount root cgroup2 fs at /run/calico/cgroup to attach CTLB programs correctly. + - mountPath: /nodeproc + name: nodeproc + readOnly: true + securityContext: + privileged: true + containers: + # Runs calico-node container on each Kubernetes node. This + # container programs network policy and routes on each + # host. + - name: calico-node + image: docker.io/calico/node:v3.25.0 + imagePullPolicy: IfNotPresent + envFrom: + - configMapRef: + # Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode. + name: kubernetes-services-endpoint + optional: true + env: + # Use Kubernetes API as the backing datastore. + - name: DATASTORE_TYPE + value: "kubernetes" + # Wait for the datastore. + - name: WAIT_FOR_DATASTORE + value: "true" + # Set based on the k8s node name. + - name: NODENAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + # Choose the backend to use. + - name: CALICO_NETWORKING_BACKEND + valueFrom: + configMapKeyRef: + name: calico-config + key: calico_backend + # Cluster type to identify the deployment type + - name: CLUSTER_TYPE + value: "k8s,bgp" + # Auto-detect the BGP IP address. + - name: IP + value: "autodetect" + # Enable IPIP + - name: CALICO_IPV4POOL_IPIP + value: "Always" + # Enable or Disable VXLAN on the default IP pool. + - name: CALICO_IPV4POOL_VXLAN + value: "Never" + # Enable or Disable VXLAN on the default IPv6 IP pool. + - name: CALICO_IPV6POOL_VXLAN + value: "Never" + # Set MTU for tunnel device used if ipip is enabled + - name: FELIX_IPINIPMTU + valueFrom: + configMapKeyRef: + name: calico-config + key: veth_mtu + # Set MTU for the VXLAN tunnel device. + - name: FELIX_VXLANMTU + valueFrom: + configMapKeyRef: + name: calico-config + key: veth_mtu + # Set MTU for the Wireguard tunnel device. + - name: FELIX_WIREGUARDMTU + valueFrom: + configMapKeyRef: + name: calico-config + key: veth_mtu + # The default IPv4 pool to create on startup if none exists. Pod IPs will be + # chosen from this range. Changing this value after installation will have + # no effect. This should fall within `--cluster-cidr`. + # - name: CALICO_IPV4POOL_CIDR + # value: "192.168.0.0/16" + # Disable file logging so `kubectl logs` works. + - name: CALICO_DISABLE_FILE_LOGGING + value: "true" + # Set Felix endpoint to host default action to ACCEPT. + - name: FELIX_DEFAULTENDPOINTTOHOSTACTION + value: "ACCEPT" + # Disable IPv6 on Kubernetes. + - name: FELIX_IPV6SUPPORT + value: "false" + - name: FELIX_HEALTHENABLED + value: "true" + securityContext: + privileged: true + resources: + requests: + cpu: 250m + lifecycle: + preStop: + exec: + command: + - /bin/calico-node + - -shutdown + livenessProbe: + exec: + command: + - /bin/calico-node + - -felix-live + - -bird-live + periodSeconds: 10 + initialDelaySeconds: 10 + failureThreshold: 6 + timeoutSeconds: 10 + readinessProbe: + exec: + command: + - /bin/calico-node + - -felix-ready + - -bird-ready + periodSeconds: 10 + timeoutSeconds: 10 + volumeMounts: + # For maintaining CNI plugin API credentials. + - mountPath: /host/etc/cni/net.d + name: cni-net-dir + readOnly: false + - mountPath: /lib/modules + name: lib-modules + readOnly: true + - mountPath: /run/xtables.lock + name: xtables-lock + readOnly: false + - mountPath: /var/run/calico + name: var-run-calico + readOnly: false + - mountPath: /var/lib/calico + name: var-lib-calico + readOnly: false + - name: policysync + mountPath: /var/run/nodeagent + # For eBPF mode, we need to be able to mount the BPF filesystem at /sys/fs/bpf so we mount in the + # parent directory. + - name: bpffs + mountPath: /sys/fs/bpf + - name: cni-log-dir + mountPath: /var/log/calico/cni + readOnly: true + volumes: + # Used by calico-node. + - name: lib-modules + hostPath: + path: /lib/modules + - name: var-run-calico + hostPath: + path: /var/run/calico + - name: var-lib-calico + hostPath: + path: /var/lib/calico + - name: xtables-lock + hostPath: + path: /run/xtables.lock + type: FileOrCreate + - name: sys-fs + hostPath: + path: /sys/fs/ + type: DirectoryOrCreate + - name: bpffs + hostPath: + path: /sys/fs/bpf + type: Directory + # mount /proc at /nodeproc to be used by mount-bpffs initContainer to mount root cgroup2 fs. + - name: nodeproc + hostPath: + path: /proc + # Used to install CNI. + - name: cni-bin-dir + hostPath: + path: /opt/cni/bin + - name: cni-net-dir + hostPath: + path: /etc/cni/net.d + # Used to access CNI logs. + - name: cni-log-dir + hostPath: + path: /var/log/calico/cni + # Mount in the directory for host-local IPAM allocations. This is + # used when upgrading from host-local to calico-ipam, and can be removed + # if not using the upgrade-ipam init container. + - name: host-local-net-dir + hostPath: + path: /var/lib/cni/networks + # Used to create per-pod Unix Domain Sockets + - name: policysync + hostPath: + type: DirectoryOrCreate + path: /var/run/nodeagent +--- +# Source: calico/templates/calico-kube-controllers.yaml +# See https://github.com/projectcalico/kube-controllers +apiVersion: apps/v1 +kind: Deployment +metadata: + name: calico-kube-controllers + namespace: kube-system + labels: + k8s-app: calico-kube-controllers +spec: + # The controllers can only have a single active instance. + replicas: 1 + selector: + matchLabels: + k8s-app: calico-kube-controllers + strategy: + type: Recreate + template: + metadata: + name: calico-kube-controllers + namespace: kube-system + labels: + k8s-app: calico-kube-controllers + spec: + nodeSelector: + kubernetes.io/os: linux + tolerations: + # Mark the pod as a critical add-on for rescheduling. + - key: CriticalAddonsOnly + operator: Exists + - key: node-role.kubernetes.io/master + effect: NoSchedule + - key: node-role.kubernetes.io/control-plane + effect: NoSchedule + serviceAccountName: calico-kube-controllers + priorityClassName: system-cluster-critical + containers: + - name: calico-kube-controllers + image: docker.io/calico/kube-controllers:v3.25.0 + imagePullPolicy: IfNotPresent + env: + # Choose which controllers to run. + - name: ENABLED_CONTROLLERS + value: node + - name: DATASTORE_TYPE + value: kubernetes + livenessProbe: + exec: + command: + - /usr/bin/check-status + - -l + periodSeconds: 10 + initialDelaySeconds: 10 + failureThreshold: 6 + timeoutSeconds: 10 + readinessProbe: + exec: + command: + - /usr/bin/check-status + - -r + periodSeconds: 10 diff --git a/ansible/01_old/roles/api_os_setting/templates/components.yaml.j2 b/ansible/01_old/roles/api_os_setting/templates/components.yaml.j2 new file mode 100644 index 0000000..787f274 --- /dev/null +++ b/ansible/01_old/roles/api_os_setting/templates/components.yaml.j2 @@ -0,0 +1,197 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + k8s-app: metrics-server + name: metrics-server + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + k8s-app: metrics-server + rbac.authorization.k8s.io/aggregate-to-admin: "true" + rbac.authorization.k8s.io/aggregate-to-edit: "true" + rbac.authorization.k8s.io/aggregate-to-view: "true" + name: system:aggregated-metrics-reader +rules: +- apiGroups: + - metrics.k8s.io + resources: + - pods + - nodes + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + k8s-app: metrics-server + name: system:metrics-server +rules: +- apiGroups: + - "" + resources: + - nodes/metrics + verbs: + - get +- apiGroups: + - "" + resources: + - pods + - nodes + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + k8s-app: metrics-server + name: metrics-server-auth-reader + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: extension-apiserver-authentication-reader +subjects: +- kind: ServiceAccount + name: metrics-server + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + k8s-app: metrics-server + name: metrics-server:system:auth-delegator +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:auth-delegator +subjects: +- kind: ServiceAccount + name: metrics-server + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + k8s-app: metrics-server + name: system:metrics-server +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:metrics-server +subjects: +- kind: ServiceAccount + name: metrics-server + namespace: kube-system +--- +apiVersion: v1 +kind: Service +metadata: + labels: + k8s-app: metrics-server + name: metrics-server + namespace: kube-system +spec: + ports: + - name: https + port: 443 + protocol: TCP + targetPort: https + selector: + k8s-app: metrics-server +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + k8s-app: metrics-server + name: metrics-server + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: metrics-server + strategy: + rollingUpdate: + maxUnavailable: 0 + template: + metadata: + labels: + k8s-app: metrics-server + spec: + containers: + - args: + - --cert-dir=/tmp + - --secure-port=4443 + - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname + - --kubelet-use-node-status-port + - --metric-resolution=15s + - --kubelet-insecure-tls + image: k8s.gcr.io/metrics-server/metrics-server:v0.6.2 + imagePullPolicy: IfNotPresent + livenessProbe: + failureThreshold: 3 + httpGet: + path: /livez + port: https + scheme: HTTPS + periodSeconds: 10 + name: metrics-server + ports: + - containerPort: 4443 + name: https + protocol: TCP + readinessProbe: + failureThreshold: 3 + httpGet: + path: /readyz + port: https + scheme: HTTPS + initialDelaySeconds: 20 + periodSeconds: 10 + resources: + requests: + cpu: 100m + memory: 200Mi + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsNonRoot: true + runAsUser: 1000 + volumeMounts: + - mountPath: /tmp + name: tmp-dir + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-cluster-critical + serviceAccountName: metrics-server + volumes: + - emptyDir: {} + name: tmp-dir +--- +apiVersion: apiregistration.k8s.io/v1 +kind: APIService +metadata: + labels: + k8s-app: metrics-server + name: v1beta1.metrics.k8s.io +spec: + group: metrics.k8s.io + groupPriorityMinimum: 100 + insecureSkipTLSVerify: true + service: + name: metrics-server + namespace: kube-system + version: v1beta1 + versionPriority: 100 diff --git a/ansible/01_old/roles/api_os_setting/templates/config.toml.j2 b/ansible/01_old/roles/api_os_setting/templates/config.toml.j2 new file mode 100644 index 0000000..0217565 --- /dev/null +++ b/ansible/01_old/roles/api_os_setting/templates/config.toml.j2 @@ -0,0 +1,5 @@ +# {{ ansible_managed }} + +{% from 'yaml2toml_macro.j2' import yaml2toml with context -%} + +{{ yaml2toml(containerd_config) }} diff --git a/ansible/01_old/roles/api_os_setting/templates/daemon.json.j2 b/ansible/01_old/roles/api_os_setting/templates/daemon.json.j2 new file mode 100644 index 0000000..6c2b554 --- /dev/null +++ b/ansible/01_old/roles/api_os_setting/templates/daemon.json.j2 @@ -0,0 +1,9 @@ +{ + "exec-opts": ["native.cgroupdriver=systemd"], + "log-driver": "json-file", + "log-opts": { + "max-size": "100m" + }, + "storage-driver": "overlay2", + "insecure-registries": ["10.10.31.243:5000"] +} diff --git a/ansible/01_old/roles/api_os_setting/templates/hosts.j2 b/ansible/01_old/roles/api_os_setting/templates/hosts.j2 new file mode 100644 index 0000000..18804b7 --- /dev/null +++ b/ansible/01_old/roles/api_os_setting/templates/hosts.j2 @@ -0,0 +1,6 @@ +127.0.0.1 localhost +:: 1 localhost + +{% for host in groups.all %} +{{ hostvars[host].ansible_default_ipv4.address }} {{ hostvars[host].ansible_fqdn }} {{ hostvars[host].ansible_hostname }} +{%endfor%} diff --git a/ansible/01_old/roles/api_os_setting/templates/myregistry.conf.j2 b/ansible/01_old/roles/api_os_setting/templates/myregistry.conf.j2 new file mode 100644 index 0000000..687d62d --- /dev/null +++ b/ansible/01_old/roles/api_os_setting/templates/myregistry.conf.j2 @@ -0,0 +1,3 @@ +[[registry]] +location = "10.10.31.243:5000" +insecure = true \ No newline at end of file diff --git a/ansible/01_old/roles/api_os_setting/templates/yaml2toml_macro.j2 b/ansible/01_old/roles/api_os_setting/templates/yaml2toml_macro.j2 new file mode 100644 index 0000000..33f69d0 --- /dev/null +++ b/ansible/01_old/roles/api_os_setting/templates/yaml2toml_macro.j2 @@ -0,0 +1,58 @@ +{%- macro yaml2inline_toml(item, depth) -%} + {%- if item is string or item is number -%} + {#- First, process all primitive types. -#} + {{ item | to_json }} + {%- elif item is mapping -%} + {#- Second, process all mappings. -#} + {#- Note that inline mappings must not contain newlines (except inside contained lists). -#} + {{ "{" }} + {%- for key, value in item.items() | sort -%} + {{ " " + + (key | to_json) + + " = " + + yaml2inline_toml(value, depth) + }} + {%- if not loop.last -%}{{ "," }}{%- endif -%} + {%- endfor -%} + {{ " }" }} + {%- else -%} + {#- Third, process all lists. -#} + {%- if item | length == 0 -%}{{ "[]" }}{%- else -%} + {{ "[" }} + {%- for entry in item -%} + {{ "\n" + + (" " * (depth + 1)) + + yaml2inline_toml(entry, depth + 1) + }} + {%- if not loop.last -%}{{ "," }}{%- endif -%} + {%- endfor -%} + {{ "\n" + (" " * depth) + "]" }} + {%- endif -%} + {%- endif -%} +{%- endmacro -%} + +{%- macro yaml2toml(item, super_keys=[]) -%} + {%- for key, value in item.items() | sort -%} + {%- if value is not mapping -%} + {#- First, process all non-mappings. -#} + {{ (" " * (super_keys | length)) + + (key | to_json) + + " = " + + (yaml2inline_toml(value, super_keys | length)) + + "\n" + }} + {%- endif -%} + {%- endfor -%} + {%- for key, value in item.items() | sort -%} + {%- if value is mapping -%} + {#- Second, process all mappings. -#} + {{ "\n" + + (" " * (super_keys | length)) + + "[" + + ((super_keys+[key]) | map('to_json') | join(".")) + + "]\n" + + yaml2toml(value, super_keys+[key]) + }} + {%- endif -%} + {%- endfor -%} +{%- endmacro -%} diff --git a/ansible/01_old/roles/api_os_setting/tests/inventory b/ansible/01_old/roles/api_os_setting/tests/inventory new file mode 100644 index 0000000..878877b --- /dev/null +++ b/ansible/01_old/roles/api_os_setting/tests/inventory @@ -0,0 +1,2 @@ +localhost + diff --git a/ansible/01_old/roles/api_os_setting/tests/test.yml b/ansible/01_old/roles/api_os_setting/tests/test.yml new file mode 100644 index 0000000..191e731 --- /dev/null +++ b/ansible/01_old/roles/api_os_setting/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - apache diff --git a/ansible/01_old/roles/api_os_setting/vars/main.yml b/ansible/01_old/roles/api_os_setting/vars/main.yml new file mode 100644 index 0000000..2aa5032 --- /dev/null +++ b/ansible/01_old/roles/api_os_setting/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for apache diff --git a/ansible/01_old/roles/bastion/defaults/main.yml b/ansible/01_old/roles/bastion/defaults/main.yml new file mode 100755 index 0000000..3e2af89 --- /dev/null +++ b/ansible/01_old/roles/bastion/defaults/main.yml @@ -0,0 +1,48 @@ +# Password aging settings +os_auth_pw_max_age: 90 +os_auth_pw_min_age: 1 +os_auth_pw_warn_age: 7 +passhistory: 2 + +# Inactivity and Failed attempts lockout settings +fail_deny: 5 +fail_unlock: 0 +inactive_lock: 0 +shell_timeout: 300 + +# tally settings +onerr: 'fail' +deny: 5 +unlock_time: 300 + +# Password complexity settings +pwquality_minlen: 9 +pwquality_maxrepeat: 3 +pwquality_lcredit: -1 +pwquality_ucredit: -1 +pwquality_dcredit: -1 +pwquality_ocredit: -1 + +# SSH settings +sshrootlogin: 'yes' +sshmainport: 22 +ssh_service_name: sshd + +# Crictl setup +crictl_app: crictl +crictl_version: 1.25.0 +crictl_os: linux +crictl_arch: amd64 +crictl_dl_url: https://github.com/kubernetes-sigs/cri-tools/releases/download/v{{ crictl_version }}/{{ crictl_app }}-v{{ crictl_version }}-{{ crictl_os }}-{{ crictl_arch }}.tar.gz +crictl_bin_path: /usr/local/bin +crictl_file_owner: root +crictl_file_group: root + +# temp +username: root +password: saasadmin1234 + +# common user flag +common_user: False + +pause_time: 1 diff --git a/ansible/01_old/roles/bastion/files/login_banner b/ansible/01_old/roles/bastion/files/login_banner new file mode 100755 index 0000000..d294eeb --- /dev/null +++ b/ansible/01_old/roles/bastion/files/login_banner @@ -0,0 +1,20 @@ +#!/bin/sh +printf ''' + |-----------------------------------------------------------------| + | This system is for the use of authorized users only. | + | Individuals using this computer system without authority, or in | + | excess of their authority, are subject to having all of their | + | activities on this system monitored and recorded by system | + | personnel. | + | | + | In the course of monitoring individuals improperly using this | + | system, or in the course of system maintenance, the activities | + | of authorized users may also be monitored. | + | | + | Anyone using this system expressly consents to such monitoring | + | and is advised that if such monitoring reveals possible | + | evidence of criminal activity, system personnel may provide the | + | evidence of such monitoring to law enforcement officials. | + |-----------------------------------------------------------------| +''' + diff --git a/ansible/01_old/roles/bastion/handlers/main.yml b/ansible/01_old/roles/bastion/handlers/main.yml new file mode 100755 index 0000000..abab7ef --- /dev/null +++ b/ansible/01_old/roles/bastion/handlers/main.yml @@ -0,0 +1,6 @@ +--- +- name: restart sshd + service: + name: "{{ ssh_service_name }}" + state: restarted + enabled: true diff --git a/ansible/01_old/roles/bastion/tasks/admin_set.yml b/ansible/01_old/roles/bastion/tasks/admin_set.yml new file mode 100755 index 0000000..3836c16 --- /dev/null +++ b/ansible/01_old/roles/bastion/tasks/admin_set.yml @@ -0,0 +1,7 @@ +--- +- name: user change + user: + name: "{{ username }}" + password: "{{ password | password_hash('sha512') }}" + state: present + diff --git a/ansible/01_old/roles/bastion/tasks/banner.yml b/ansible/01_old/roles/bastion/tasks/banner.yml new file mode 100755 index 0000000..6a172c9 --- /dev/null +++ b/ansible/01_old/roles/bastion/tasks/banner.yml @@ -0,0 +1,29 @@ +--- +- name: Create a tar.gz archive of a single file. + archive: + path: /etc/update-motd.d/* + dest: /etc/update-motd.d/motd.tar.gz + format: gz + force_archive: true + +- name: remove a motd.d files + file: + path: /etc/update-motd.d/{{ item }} + state: absent + with_items: + - 10-help-text + - 85-fwupd + - 90-updates-available + - 91-release-upgrade + - 95-hwe-eol + - 98-fsck-at-reboot + - 50-motd-news + - 88-esm-announce + +- name: Create login banner + copy: + src: login_banner + dest: /etc/update-motd.d/00-header + owner: root + group: root + mode: 0755 diff --git a/ansible/01_old/roles/bastion/tasks/crictl.yml b/ansible/01_old/roles/bastion/tasks/crictl.yml new file mode 100755 index 0000000..125a878 --- /dev/null +++ b/ansible/01_old/roles/bastion/tasks/crictl.yml @@ -0,0 +1,19 @@ +--- +- name: Downloading and extracting {{ crictl_app }} {{ crictl_version }} + unarchive: + src: "{{ crictl_dl_url }}" + dest: "{{ crictl_bin_path }}" + owner: "{{ crictl_file_owner }}" + group: "{{ crictl_file_group }}" + extra_opts: + - crictl + remote_src: yes + +- name: Crictl command crontab setting + ansible.builtin.cron: + name: crontab command + minute: "0" + hour: "3" + user: root + job: "/usr/local/bin/crictl rmi --prune" + diff --git a/ansible/01_old/roles/bastion/tasks/login_defs.yml b/ansible/01_old/roles/bastion/tasks/login_defs.yml new file mode 100755 index 0000000..f25702a --- /dev/null +++ b/ansible/01_old/roles/bastion/tasks/login_defs.yml @@ -0,0 +1,48 @@ +--- +- name: Set pass max days + lineinfile: + dest: /etc/login.defs + state: present + regexp: '^PASS_MAX_DAYS.*$' + line: "PASS_MAX_DAYS\t{{os_auth_pw_max_age}}" + backrefs: yes + +- name: Set pass min days + lineinfile: + dest: /etc/login.defs + state: present + regexp: '^PASS_MIN_DAYS.*$' + line: "PASS_MIN_DAYS\t{{os_auth_pw_min_age}}" + backrefs: yes + +- name: Set pass min length + lineinfile: + dest: /etc/login.defs + state: present + regexp: '^PASS_MIN_LEN.*$' + line: "PASS_MIN_LEN\t{{pwquality_minlen}}" + backrefs: yes + +- name: Set pass warn days + lineinfile: + dest: /etc/login.defs + state: present + regexp: '^PASS_WARN_AGE.*$' + line: "PASS_WARN_AGE\t{{os_auth_pw_warn_age}}" + backrefs: yes + +- name: Set password encryption to SHA512 + lineinfile: + dest: /etc/login.defs + state: present + regexp: '^ENCRYPT_METHOD\s.*$' + line: "ENCRYPT_METHOD\tSHA512" + backrefs: yes + +- name: Disable MD5 crypt explicitly + lineinfile: + dest: /etc/login.defs + state: present + regexp: '^MD5_CRYPT_ENAB.*$' + line: "MD5_CRYPT_ENAB NO" + backrefs: yes diff --git a/ansible/01_old/roles/bastion/tasks/main.yml b/ansible/01_old/roles/bastion/tasks/main.yml new file mode 100755 index 0000000..2e1dbe8 --- /dev/null +++ b/ansible/01_old/roles/bastion/tasks/main.yml @@ -0,0 +1,12 @@ +--- +- pause: + seconds: "{{ pause_time }}" + +- include: sshd_config.yml + tags: sshd_config + +- include: sudoers.yml + tags: sudoers + +- include: admin_set.yml + tags: admin_set diff --git a/ansible/01_old/roles/bastion/tasks/pam.yml b/ansible/01_old/roles/bastion/tasks/pam.yml new file mode 100755 index 0000000..ae1c637 --- /dev/null +++ b/ansible/01_old/roles/bastion/tasks/pam.yml @@ -0,0 +1,50 @@ +--- +- name: Add pam_tally2.so + template: + src: common-auth.j2 + dest: /etc/pam.d/common-auth + owner: root + group: root + mode: 0644 + +- name: Create pwquality.conf password complexity configuration + block: + - apt: + name: libpam-pwquality + state: present + install_recommends: false + - template: + src: pwquality.conf.j2 + dest: /etc/security/pwquality.conf + owner: root + group: root + mode: 0644 + +- name: Add pam_tally2.so + block: + - lineinfile: + dest: /etc/pam.d/common-account + regexp: '^account\srequisite' + line: "account requisite pam_deny.so" + + - lineinfile: + dest: /etc/pam.d/common-account + regexp: '^account\srequired' + line: "account required pam_tally2.so" + +- name: password reuse is limited + lineinfile: + dest: /etc/pam.d/common-password + line: "password required pam_pwhistory.so remember=5" + +- name: password hashing algorithm is SHA-512 + lineinfile: + dest: /etc/pam.d/common-password + regexp: '^password\s+\[success' + line: "password [success=1 default=ignore] pam_unix.so sha512" + +- name: Shadow Password Suite Parameters + lineinfile: + dest: /etc/pam.d/common-password + regexp: '^password\s+\[success' + line: "password [success=1 default=ignore] pam_unix.so sha512" diff --git a/ansible/01_old/roles/bastion/tasks/profile.yml b/ansible/01_old/roles/bastion/tasks/profile.yml new file mode 100755 index 0000000..fb1b456 --- /dev/null +++ b/ansible/01_old/roles/bastion/tasks/profile.yml @@ -0,0 +1,24 @@ +--- +- name: Set session timeout + lineinfile: + dest: /etc/profile + regexp: '^TMOUT=.*' + insertbefore: '^readonly TMOUT' + line: 'TMOUT={{shell_timeout}}' + state: "{{ 'absent' if (shell_timeout == 0) else 'present' }}" + +- name: Set TMOUT readonly + lineinfile: + dest: /etc/profile + regexp: '^readonly TMOUT' + insertafter: 'TMOUT={{shell_timeout}}' + line: 'readonly TMOUT' + state: "{{ 'absent' if (shell_timeout == 0) else 'present' }}" + +- name: Set export TMOUT + lineinfile: + dest: /etc/profile + regexp: '^export TMOUT.*' + insertafter: 'readonly TMOUT' + line: 'export TMOUT' + state: "{{ 'absent' if (shell_timeout == 0) else 'present' }}" diff --git a/ansible/01_old/roles/bastion/tasks/sshd_config.yml b/ansible/01_old/roles/bastion/tasks/sshd_config.yml new file mode 100755 index 0000000..6b9f7a3 --- /dev/null +++ b/ansible/01_old/roles/bastion/tasks/sshd_config.yml @@ -0,0 +1,30 @@ +--- +- name: Configure ssh root login to {{sshrootlogin}} + lineinfile: + dest: /etc/ssh/sshd_config + regexp: '^(#)?PermitRootLogin.*' + line: 'PermitRootLogin {{sshrootlogin}}' + insertbefore: '^Match.*' + state: present + owner: root + group: root + mode: 0640 + notify: restart sshd + +- name: SSH Listen on Main Port + lineinfile: + dest: /etc/ssh/sshd_config + insertbefore: '^#*AddressFamily' + line: 'Port {{sshmainport}}' + state: present + owner: root + group: root + mode: 0640 + notify: restart sshd + +- name: "Setting sshd allow users" + template: + src: allow_users.j2 + dest: "/etc/ssh/sshd_config.d/allow_users.conf" + notify: restart sshd + diff --git a/ansible/01_old/roles/bastion/tasks/sudoers.yml b/ansible/01_old/roles/bastion/tasks/sudoers.yml new file mode 100755 index 0000000..b01dfd4 --- /dev/null +++ b/ansible/01_old/roles/bastion/tasks/sudoers.yml @@ -0,0 +1,105 @@ +--- +- name: Get all ssh sessions + shell: ps -ef | grep sshd | grep -v root | grep -v "{{ ansible_user }}" | awk '{print $2}' + register: ssh_sessions + ignore_errors: true + +- name: Terminate ssh sessions + shell: kill -9 {{ item }} + with_items: "{{ ssh_sessions.stdout_lines }}" + when: ssh_sessions is defined + ignore_errors: true + +- name: "Create devops group" + ansible.builtin.group: + name: "devops" + state: present + +- name: "get current users" + shell: "cat /etc/passwd | egrep -iv '(false|nologin|sync|root|dev2-iac)' | awk -F: '{print $1}'" + register: deleting_users + +- name: "Delete users" + ansible.builtin.user: + name: "{{ item }}" + state: absent + remove: yes + with_items: "{{ deleting_users.stdout_lines }}" + when: item != ansible_user + ignore_errors: true + + +- name: "Create admin user" + ansible.builtin.user: + name: "{{ item.name }}" + group: "devops" + shell: "/bin/bash" + system: yes + state: present + with_items: "{{ admin_users }}" + when: + - item.name is defined + ignore_errors: true + +- name: "admin user password change" + user: + name: "{{ item.name }}" + password: "{{ password | password_hash('sha512') }}" + state: present + with_items: "{{ admin_users }}" + when: + - item.name is defined + ignore_errors: true + +- name: "Add admin user key" + authorized_key: + user: "{{ item.name }}" + state: present + key: "{{ item.key }}" + with_items: "{{ admin_users }}" + when: + - item.name is defined + - item.key is defined + ignore_errors: true + +- name: "Create common user" + ansible.builtin.user: + name: "{{ item.name }}" + group: "users" + shell: "/bin/bash" + system: yes + state: present + with_items: "{{ allow_users }}" + when: + - item.name is defined + - common_user == True + ignore_errors: true + +- name: "Change common user password change" + user: + name: "{{ item.name }}" + password: "{{ password | password_hash('sha512') }}" + state: present + with_items: "{{ allow_users }}" + when: + - item.name is defined + - common_user == True + ignore_errors: true + +- name: "Add common user key" + authorized_key: + user: "{{ item.name }}" + state: present + key: "{{ item.key }}" + with_items: "{{ allow_users }}" + when: + - item.name is defined + - item.key is defined + - common_user == True + ignore_errors: true + +- name: "Setting sudoers allow users" + template: + src: sudoers_users.j2 + dest: "/etc/sudoers.d/sudoers_users" + ignore_errors: true diff --git a/ansible/01_old/roles/bastion/templates/allow_users.j2 b/ansible/01_old/roles/bastion/templates/allow_users.j2 new file mode 100755 index 0000000..fab55dc --- /dev/null +++ b/ansible/01_old/roles/bastion/templates/allow_users.j2 @@ -0,0 +1,11 @@ +AllowUsers dev2-iac@10.10.43.* +{% if admin_users is defined %} +{% for user in admin_users %} +AllowUsers {{ user.name }}@{{ user.ip }} +{% endfor %} +{% endif %} +{% if allow_users is defined %} +{% for user in allow_users %} +AllowUsers {{ user.name }}@{{ user.ip }} +{% endfor %} +{% endif %} diff --git a/ansible/01_old/roles/bastion/templates/common-auth.j2 b/ansible/01_old/roles/bastion/templates/common-auth.j2 new file mode 100755 index 0000000..64a603b --- /dev/null +++ b/ansible/01_old/roles/bastion/templates/common-auth.j2 @@ -0,0 +1,27 @@ +# +# /etc/pam.d/common-auth - authentication settings common to all services +# +# This file is included from other service-specific PAM config files, +# and should contain a list of the authentication modules that define +# the central authentication scheme for use on the system +# (e.g., /etc/shadow, LDAP, Kerberos, etc.). The default is to use the +# traditional Unix authentication mechanisms. +# +# As of pam 1.0.1-6, this file is managed by pam-auth-update by default. +# To take advantage of this, it is recommended that you configure any +# local modules either before or after the default block, and use +# pam-auth-update to manage selection of other modules. See +# pam-auth-update(8) for details. +auth required pam_tally2.so onerr={{onerr}} even_deny_root deny={{deny}} unlock_time={{unlock_time}} + +# here are the per-package modules (the "Primary" block) +auth [success=1 default=ignore] pam_unix.so nullok +# here's the fallback if no module succeeds +auth requisite pam_deny.so +# prime the stack with a positive return value if there isn't one already; +# this avoids us returning an error just because nothing sets a success code +auth required pam_permit.so +# since the modules above will each just jump around +# and here are more per-package modules (the "Additional" block) +auth optional pam_cap.so +# end of pam-auth-update config diff --git a/ansible/01_old/roles/bastion/templates/pwquality.conf.j2 b/ansible/01_old/roles/bastion/templates/pwquality.conf.j2 new file mode 100755 index 0000000..3ec2cbe --- /dev/null +++ b/ansible/01_old/roles/bastion/templates/pwquality.conf.j2 @@ -0,0 +1,50 @@ +# Configuration for systemwide password quality limits +# Defaults: +# +# Number of characters in the new password that must not be present in the +# old password. +# difok = 5 +# +# Minimum acceptable size for the new password (plus one if +# credits are not disabled which is the default). (See pam_cracklib manual.) +# Cannot be set to lower value than 6. +minlen = {{pwquality_minlen}} +# +# The maximum credit for having digits in the new password. If less than 0 +# it is the minimum number of digits in the new password. +dcredit = {{pwquality_dcredit}} +# +# The maximum credit for having uppercase characters in the new password. +# If less than 0 it is the minimum number of uppercase characters in the new +# password. +ucredit = {{pwquality_ucredit}} +# +# The maximum credit for having lowercase characters in the new password. +# If less than 0 it is the minimum number of lowercase characters in the new +# password. +lcredit = {{pwquality_lcredit}} +# +# The maximum credit for having other characters in the new password. +# If less than 0 it is the minimum number of other characters in the new +# password. +ocredit = {{pwquality_ocredit}} +# +# The minimum number of required classes of characters for the new +# password (digits, uppercase, lowercase, others). +# minclass = 0 +# +# The maximum number of allowed consecutive same characters in the new password. +# The check is disabled if the value is 0. +maxrepeat = {{pwquality_maxrepeat}} +# +# The maximum number of allowed consecutive characters of the same class in the +# new password. +# The check is disabled if the value is 0. +# maxclassrepeat = 0 +# +# Whether to check for the words from the passwd entry GECOS string of the user. +# The check is enabled if the value is not 0. +# gecoscheck = 0 +# +# Path to the cracklib dictionaries. Default is to use the cracklib default. +# dictpath = diff --git a/ansible/01_old/roles/bastion/templates/sudoers_users.j2 b/ansible/01_old/roles/bastion/templates/sudoers_users.j2 new file mode 100755 index 0000000..4c30d95 --- /dev/null +++ b/ansible/01_old/roles/bastion/templates/sudoers_users.j2 @@ -0,0 +1,6 @@ +dev2-iac ALL=(ALL) NOPASSWD: ALL +{% if allow_users is defined %} +{% for user in admin_users %} +{{ user.name }} ALL=(ALL) NOPASSWD: ALL +{% endfor %} +{% endif %} diff --git a/ansible/01_old/roles/cmoa_demo_install/defaults/main.yml b/ansible/01_old/roles/cmoa_demo_install/defaults/main.yml new file mode 100644 index 0000000..11b9651 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/defaults/main.yml @@ -0,0 +1,64 @@ +# helm file install +helm_checksum: sha256:950439759ece902157cf915b209b8d694e6f675eaab5099fb7894f30eeaee9a2 +helm_version: v3.10.3 + +# cmoa info +cmoa_namespace: imxc +cmoa_version: rel3.4.8 + +# default ip/version (not change) +before_ip: 111.111.111.111 +before_version: rel0.0.0 + +# files/00-default in role +docker_secret_file: secret_nexus.yaml + +# all, jaeger, jspd +imxc_ui: all + +# [docker_config_path] +docker_config_nexus: dockerconfig/docker_config_nexus.json + +# [jaeger] +jaeger_servicename: imxc-ui-service-jaeger +jaeger_service_port: 80 +jaeger_nodePort: 31080 # only imxc-ui-jaeger option (imxc-ui-jaeger template default port=31084) + +# [minio] +minio_service_name: minio +minio_service_port: 9000 +minio_nodePort: 32002 +minio_user: cloudmoa +minio_pass: admin1234 +bucket_name: cortex-bucket +days: 42 +rule_id: cloudmoa + +# [Elasticsearch] +elasticsearch_service_name: elasticsearch +elasticsearch_service_port: 9200 +elasticsearch_nodePort: 30200 + +# [Keycloak] +# Keycloak configuration settings +keycloak_http_port: 31082 +keycloak_https_port: 8443 +keycloak_management_http_port: 31990 +keycloak_realm: exem + +# Keycloak administration console user +keycloak_admin_user: admin +keycloak_admin_password: admin +keycloak_auth_realm: master +keycloak_auth_client: admin-cli +keycloak_context: /auth + +# keycloak_clients +keycloak_clients: + - name: 'authorization_server' + client_id: authorization_server + realm: exem + redirect_uris: "http://10.10.30.75:31080/*,http://10.10.30.75:31084/*,http://localhost:8080/*,http://localhost:8081/*" + public_client: True + + diff --git a/ansible/01_old/roles/cmoa_demo_install/files/00-default/sa_patch.sh b/ansible/01_old/roles/cmoa_demo_install/files/00-default/sa_patch.sh new file mode 100755 index 0000000..618a35b --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/00-default/sa_patch.sh @@ -0,0 +1,8 @@ +#!/bin/bash + +export KUBECONFIG=$1 + +kubectl wait node --for=condition=ready --all --timeout=60s + +#kubectl -n imxc patch sa default -p '{"imagePullSecrets": [{"name": "regcred"}]}' +kubectl -n default patch sa default -p '{"imagePullSecrets": [{"name": "regcred"}]}' diff --git a/ansible/01_old/roles/cmoa_demo_install/files/00-default/secret_dockerhub.yaml b/ansible/01_old/roles/cmoa_demo_install/files/00-default/secret_dockerhub.yaml new file mode 100644 index 0000000..268027b --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/00-default/secret_dockerhub.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: Secret +metadata: + name: regcred +data: + .dockerconfigjson: ewogICJhdXRocyI6IHsKICAgICJodHRwczovL2luZGV4LmRvY2tlci5pby92MS8iOiB7CiAgICAgICJhdXRoIjogIlpYaGxiV1JsZGpJNk0yWXlObVV6T0RjdFlqY3paQzAwTkRVMUxUazNaRFV0T1dWaU9EWmtObVl4WXpOayIKICAgIH0KICB9Cn0KCg== +type: kubernetes.io/dockerconfigjson diff --git a/ansible/01_old/roles/cmoa_demo_install/files/00-default/secret_nexus.yaml b/ansible/01_old/roles/cmoa_demo_install/files/00-default/secret_nexus.yaml new file mode 100644 index 0000000..6a2543f --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/00-default/secret_nexus.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +data: + .dockerconfigjson: ewogICJhdXRocyI6IHsKICAgICIxMC4xMC4zMS4yNDM6NTAwMCI6IHsKICAgICAgImF1dGgiOiAiWTI5eVpUcGpiM0psWVdSdGFXNHhNak0wIgogICAgfQogIH0KfQoK +kind: Secret +metadata: + name: regcred +type: kubernetes.io/dockerconfigjson + diff --git a/ansible/01_old/roles/cmoa_demo_install/files/01-storage/00-storageclass.yaml b/ansible/01_old/roles/cmoa_demo_install/files/01-storage/00-storageclass.yaml new file mode 100644 index 0000000..8f41292 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/01-storage/00-storageclass.yaml @@ -0,0 +1,6 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: exem-local-storage +provisioner: kubernetes.io/no-provisioner +volumeBindingMode: WaitForFirstConsumer diff --git a/ansible/01_old/roles/cmoa_demo_install/files/01-storage/01-persistentvolume.yaml b/ansible/01_old/roles/cmoa_demo_install/files/01-storage/01-persistentvolume.yaml new file mode 100644 index 0000000..1bd4546 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/01-storage/01-persistentvolume.yaml @@ -0,0 +1,92 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: minio-pv-0 +spec: + capacity: + storage: 50Gi + volumeMode: Filesystem + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Retain + storageClassName: exem-local-storage + local: + path: /media/data/minio/pv1 + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: cmoa + operator: In + values: + - worker1 + +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: minio-pv-1 +spec: + capacity: + storage: 50Gi + volumeMode: Filesystem + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Retain + storageClassName: exem-local-storage + local: + path: /media/data/minio/pv2 + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: cmoa + operator: In + values: + - worker1 +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: minio-pv-2 +spec: + capacity: + storage: 50Gi + volumeMode: Filesystem + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Retain + storageClassName: exem-local-storage + local: + path: /media/data/minio/pv3 + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: cmoa + operator: In + values: + - worker2 +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: minio-pv-3 +spec: + capacity: + storage: 50Gi + volumeMode: Filesystem + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Retain + storageClassName: exem-local-storage + local: + path: /media/data/minio/pv4 + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: cmoa + operator: In + values: + - worker2 diff --git a/ansible/01_old/roles/cmoa_demo_install/files/01-storage/cmoa_minio b/ansible/01_old/roles/cmoa_demo_install/files/01-storage/cmoa_minio new file mode 100755 index 0000000..522b87d --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/01-storage/cmoa_minio @@ -0,0 +1,63 @@ +#! /usr/bin/python3 +#-*- coding:utf-8 -*- + +import os, sys, time, urllib3 +from minio import Minio +from minio.lifecycleconfig import Expiration, LifecycleConfig, Rule, Transition +from minio.commonconfig import ENABLED, Filter + +def minio_conn(ipaddr, portnum, ac_key, sec_key): + conn='{}:{}'.format(ipaddr,portnum) + url='http://{}'.format(conn) + print(url) + minio_client = Minio( + conn, access_key=ac_key, secret_key=sec_key, secure=False, + http_client=urllib3.ProxyManager( + url, timeout=urllib3.Timeout.DEFAULT_TIMEOUT, + retries=urllib3.Retry( + total=5, backoff_factor=0.2, + status_forcelist=[ + 500, 502, 503, 504 + ], + ), + ), + ) + + return minio_client + +def minio_create_buckets(minio_client, bucket_name, days, rule_id="cloudmoa"): + config = LifecycleConfig( + [ + Rule( + ENABLED, + rule_filter=Filter(prefix=""), + rule_id=rule_id, + expiration=Expiration(days=days), + ), + ], + ) + minio_client.set_bucket_lifecycle(bucket_name, config) + +def minio_delete_bucket(client, bucket_name): + client.delete_bucket_lifecycle(bucket_name) + +def main(): + s3_url = os.sys.argv[1].split(':')[0] + s3_url_port = os.sys.argv[1].split(':')[1] + minio_user = os.sys.argv[2] + minio_pass = os.sys.argv[3] + bucket_name = os.sys.argv[4] + minio_days = os.sys.argv[5] + rule_id = os.sys.argv[6] + + print(s3_url, s3_url_port, minio_user, minio_pass) + + minio_client=minio_conn(s3_url, s3_url_port, minio_user, minio_pass) + minio_create_buckets(minio_client, bucket_name, minio_days, rule_id) + +if __name__ == "__main__": + try: + main() + except Exception as err: + print("[Usage] minio {url:port} {username} {password} {bucketName} {days} {ruleId}") + print(err) \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/.helmignore b/ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/.helmignore new file mode 100644 index 0000000..a9fe727 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +# OWNERS file for Kubernetes +OWNERS \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/Chart.yaml b/ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/Chart.yaml new file mode 100644 index 0000000..fc21076 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/Chart.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +description: Multi-Cloud Object Storage +name: minio +version: 4.0.2 +appVersion: RELEASE.2022-05-08T23-50-31Z +keywords: + - minio + - storage + - object-storage + - s3 + - cluster +home: https://min.io +icon: https://min.io/resources/img/logo/MINIO_wordmark.png +sources: +- https://github.com/minio/minio +maintainers: +- name: MinIO, Inc + email: dev@minio.io diff --git a/ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/README.md b/ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/README.md new file mode 100644 index 0000000..ad3eb7d --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/README.md @@ -0,0 +1,235 @@ +# MinIO Helm Chart + +[![Slack](https://slack.min.io/slack?type=svg)](https://slack.min.io) [![license](https://img.shields.io/badge/license-AGPL%20V3-blue)](https://github.com/minio/minio/blob/master/LICENSE) + +MinIO is a High Performance Object Storage released under GNU Affero General Public License v3.0. It is API compatible with Amazon S3 cloud storage service. Use MinIO to build high performance infrastructure for machine learning, analytics and application data workloads. + +For more detailed documentation please visit [here](https://docs.minio.io/) + +## Introduction + +This chart bootstraps MinIO Cluster on [Kubernetes](http://kubernetes.io) using the [Helm](https://helm.sh) package manager. + +## Prerequisites + +- Helm cli with Kubernetes cluster configured. +- PV provisioner support in the underlying infrastructure. (We recommend using ) +- Use Kubernetes version v1.19 and later for best experience. + +## Configure MinIO Helm repo + +```bash +helm repo add minio https://charts.min.io/ +``` + +### Installing the Chart + +Install this chart using: + +```bash +helm install --namespace minio --set rootUser=rootuser,rootPassword=rootpass123 --generate-name minio/minio +``` + +The command deploys MinIO on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation. + +### Upgrading the Chart + +You can use Helm to update MinIO version in a live release. Assuming your release is named as `my-release`, get the values using the command: + +```bash +helm get values my-release > old_values.yaml +``` + +Then change the field `image.tag` in `old_values.yaml` file with MinIO image tag you want to use. Now update the chart using + +```bash +helm upgrade -f old_values.yaml my-release minio/minio +``` + +Default upgrade strategies are specified in the `values.yaml` file. Update these fields if you'd like to use a different strategy. + +### Configuration + +Refer the [Values file](./values.yaml) for all the possible config fields. + +You can specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```bash +helm install --name my-release --set persistence.size=1Ti minio/minio +``` + +The above command deploys MinIO server with a 1Ti backing persistent volume. + +Alternately, you can provide a YAML file that specifies parameter values while installing the chart. For example, + +```bash +helm install --name my-release -f values.yaml minio/minio +``` + +### Persistence + +This chart provisions a PersistentVolumeClaim and mounts corresponding persistent volume to default location `/export`. You'll need physical storage available in the Kubernetes cluster for this to work. If you'd rather use `emptyDir`, disable PersistentVolumeClaim by: + +```bash +helm install --set persistence.enabled=false minio/minio +``` + +> *"An emptyDir volume is first created when a Pod is assigned to a Node, and exists as long as that Pod is running on that node. When a Pod is removed from a node for any reason, the data in the emptyDir is deleted forever."* + +### Existing PersistentVolumeClaim + +If a Persistent Volume Claim already exists, specify it during installation. + +1. Create the PersistentVolume +2. Create the PersistentVolumeClaim +3. Install the chart + +```bash +helm install --set persistence.existingClaim=PVC_NAME minio/minio +``` + +### NetworkPolicy + +To enable network policy for MinIO, +install [a networking plugin that implements the Kubernetes +NetworkPolicy spec](https://kubernetes.io/docs/tasks/administer-cluster/declare-network-policy#before-you-begin), +and set `networkPolicy.enabled` to `true`. + +For Kubernetes v1.5 & v1.6, you must also turn on NetworkPolicy by setting +the DefaultDeny namespace annotation. Note: this will enforce policy for *all* pods in the namespace: + +``` +kubectl annotate namespace default "net.beta.kubernetes.io/network-policy={\"ingress\":{\"isolation\":\"DefaultDeny\"}}" +``` + +With NetworkPolicy enabled, traffic will be limited to just port 9000. + +For more precise policy, set `networkPolicy.allowExternal=true`. This will +only allow pods with the generated client label to connect to MinIO. +This label will be displayed in the output of a successful install. + +### Existing secret + +Instead of having this chart create the secret for you, you can supply a preexisting secret, much +like an existing PersistentVolumeClaim. + +First, create the secret: + +```bash +kubectl create secret generic my-minio-secret --from-literal=rootUser=foobarbaz --from-literal=rootPassword=foobarbazqux +``` + +Then install the chart, specifying that you want to use an existing secret: + +```bash +helm install --set existingSecret=my-minio-secret minio/minio +``` + +The following fields are expected in the secret: + +| .data.\ in Secret | Corresponding variable | Description | Required | +|:------------------------|:-----------------------|:---------------|:---------| +| `rootUser` | `rootUser` | Root user. | yes | +| `rootPassword` | `rootPassword` | Root password. | yes | + +All corresponding variables will be ignored in values file. + +### Configure TLS + +To enable TLS for MinIO containers, acquire TLS certificates from a CA or create self-signed certificates. While creating / acquiring certificates ensure the corresponding domain names are set as per the standard [DNS naming conventions](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-identity) in a Kubernetes StatefulSet (for a distributed MinIO setup). Then create a secret using + +```bash +kubectl create secret generic tls-ssl-minio --from-file=path/to/private.key --from-file=path/to/public.crt +``` + +Then install the chart, specifying that you want to use the TLS secret: + +```bash +helm install --set tls.enabled=true,tls.certSecret=tls-ssl-minio minio/minio +``` + +### Installing certificates from third party CAs + +MinIO can connect to other servers, including MinIO nodes or other server types such as NATs and Redis. If these servers use certificates that were not registered with a known CA, add trust for these certificates to MinIO Server by bundling these certificates into a Kubernetes secret and providing it to Helm via the `trustedCertsSecret` value. If `.Values.tls.enabled` is `true` and you're installing certificates for third party CAs, remember to include MinIO's own certificate with key `public.crt`, if it also needs to be trusted. + +For instance, given that TLS is enabled and you need to add trust for MinIO's own CA and for the CA of a Keycloak server, a Kubernetes secret can be created from the certificate files using `kubectl`: + +``` +kubectl -n minio create secret generic minio-trusted-certs --from-file=public.crt --from-file=keycloak.crt +``` + +If TLS is not enabled, you would need only the third party CA: + +``` +kubectl -n minio create secret generic minio-trusted-certs --from-file=keycloak.crt +``` + +The name of the generated secret can then be passed to Helm using a values file or the `--set` parameter: + +``` +trustedCertsSecret: "minio-trusted-certs" + +or + +--set trustedCertsSecret=minio-trusted-certs +``` + +### Create buckets after install + +Install the chart, specifying the buckets you want to create after install: + +```bash +helm install --set buckets[0].name=bucket1,buckets[0].policy=none,buckets[0].purge=false minio/minio +``` + +Description of the configuration parameters used above - + +- `buckets[].name` - name of the bucket to create, must be a string with length > 0 +- `buckets[].policy` - can be one of none|download|upload|public +- `buckets[].purge` - purge if bucket exists already + +33# Create policies after install +Install the chart, specifying the policies you want to create after install: + +```bash +helm install --set policies[0].name=mypolicy,policies[0].statements[0].resources[0]='arn:aws:s3:::bucket1',policies[0].statements[0].actions[0]='s3:ListBucket',policies[0].statements[0].actions[1]='s3:GetObject' minio/minio +``` + +Description of the configuration parameters used above - + +- `policies[].name` - name of the policy to create, must be a string with length > 0 +- `policies[].statements[]` - list of statements, includes actions and resources +- `policies[].statements[].resources[]` - list of resources that applies the statement +- `policies[].statements[].actions[]` - list of actions granted + +### Create user after install + +Install the chart, specifying the users you want to create after install: + +```bash +helm install --set users[0].accessKey=accessKey,users[0].secretKey=secretKey,users[0].policy=none,users[1].accessKey=accessKey2,users[1].secretRef=existingSecret,users[1].secretKey=password,users[1].policy=none minio/minio +``` + +Description of the configuration parameters used above - + +- `users[].accessKey` - accessKey of user +- `users[].secretKey` - secretKey of usersecretRef +- `users[].existingSecret` - secret name that contains the secretKey of user +- `users[].existingSecretKey` - data key in existingSecret secret containing the secretKey +- `users[].policy` - name of the policy to assign to user + +## Uninstalling the Chart + +Assuming your release is named as `my-release`, delete it using the command: + +```bash +helm delete my-release +``` + +or + +```bash +helm uninstall my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. diff --git a/ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/NOTES.txt b/ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/NOTES.txt new file mode 100644 index 0000000..9337196 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/NOTES.txt @@ -0,0 +1,43 @@ +{{- if eq .Values.service.type "ClusterIP" "NodePort" }} +MinIO can be accessed via port {{ .Values.service.port }} on the following DNS name from within your cluster: +{{ template "minio.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local + +To access MinIO from localhost, run the below commands: + + 1. export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + + 2. kubectl port-forward $POD_NAME 9000 --namespace {{ .Release.Namespace }} + +Read more about port forwarding here: http://kubernetes.io/docs/user-guide/kubectl/kubectl_port-forward/ + +You can now access MinIO server on http://localhost:9000. Follow the below steps to connect to MinIO server with mc client: + + 1. Download the MinIO mc client - https://docs.minio.io/docs/minio-client-quickstart-guide + + 2. export MC_HOST_{{ template "minio.fullname" . }}-local=http://$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "minio.secretName" . }} -o jsonpath="{.data.rootUser}" | base64 --decode):$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "minio.secretName" . }} -o jsonpath="{.data.rootPassword}" | base64 --decode)@localhost:{{ .Values.service.port }} + + 3. mc ls {{ template "minio.fullname" . }}-local + +{{- end }} +{{- if eq .Values.service.type "LoadBalancer" }} +MinIO can be accessed via port {{ .Values.service.port }} on an external IP address. Get the service external IP address by: +kubectl get svc --namespace {{ .Release.Namespace }} -l app={{ template "minio.fullname" . }} + +Note that the public IP may take a couple of minutes to be available. + +You can now access MinIO server on http://:9000. Follow the below steps to connect to MinIO server with mc client: + + 1. Download the MinIO mc client - https://docs.minio.io/docs/minio-client-quickstart-guide + + 2. export MC_HOST_{{ template "minio.fullname" . }}-local=http://$(kubectl get secret {{ template "minio.secretName" . }} --namespace {{ .Release.Namespace }} -o jsonpath="{.data.rootUser}" | base64 --decode):$(kubectl get secret {{ template "minio.secretName" . }} -o jsonpath="{.data.rootPassword}" | base64 --decode)@:{{ .Values.service.port }} + + 3. mc ls {{ template "minio.fullname" . }} + +Alternately, you can use your browser or the MinIO SDK to access the server - https://docs.minio.io/categories/17 +{{- end }} + +{{ if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }} +Note: Since NetworkPolicy is enabled, only pods with label +{{ template "minio.fullname" . }}-client=true" +will be able to connect to this minio cluster. +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/_helper_create_bucket.txt b/ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/_helper_create_bucket.txt new file mode 100644 index 0000000..35a48fc --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/_helper_create_bucket.txt @@ -0,0 +1,109 @@ +#!/bin/sh +set -e ; # Have script exit in the event of a failed command. + +{{- if .Values.configPathmc }} +MC_CONFIG_DIR="{{ .Values.configPathmc }}" +MC="/usr/bin/mc --insecure --config-dir ${MC_CONFIG_DIR}" +{{- else }} +MC="/usr/bin/mc --insecure" +{{- end }} + +# connectToMinio +# Use a check-sleep-check loop to wait for MinIO service to be available +connectToMinio() { + SCHEME=$1 + ATTEMPTS=0 ; LIMIT=29 ; # Allow 30 attempts + set -e ; # fail if we can't read the keys. + ACCESS=$(cat /config/rootUser) ; SECRET=$(cat /config/rootPassword) ; + set +e ; # The connections to minio are allowed to fail. + echo "Connecting to MinIO server: $SCHEME://$MINIO_ENDPOINT:$MINIO_PORT" ; + MC_COMMAND="${MC} alias set myminio $SCHEME://$MINIO_ENDPOINT:$MINIO_PORT $ACCESS $SECRET" ; + $MC_COMMAND ; + STATUS=$? ; + until [ $STATUS = 0 ] + do + ATTEMPTS=`expr $ATTEMPTS + 1` ; + echo \"Failed attempts: $ATTEMPTS\" ; + if [ $ATTEMPTS -gt $LIMIT ]; then + exit 1 ; + fi ; + sleep 2 ; # 1 second intervals between attempts + $MC_COMMAND ; + STATUS=$? ; + done ; + set -e ; # reset `e` as active + return 0 +} + +# checkBucketExists ($bucket) +# Check if the bucket exists, by using the exit code of `mc ls` +checkBucketExists() { + BUCKET=$1 + CMD=$(${MC} ls myminio/$BUCKET > /dev/null 2>&1) + return $? +} + +# createBucket ($bucket, $policy, $purge) +# Ensure bucket exists, purging if asked to +createBucket() { + BUCKET=$1 + POLICY=$2 + PURGE=$3 + VERSIONING=$4 + + # Purge the bucket, if set & exists + # Since PURGE is user input, check explicitly for `true` + if [ $PURGE = true ]; then + if checkBucketExists $BUCKET ; then + echo "Purging bucket '$BUCKET'." + set +e ; # don't exit if this fails + ${MC} rm -r --force myminio/$BUCKET + set -e ; # reset `e` as active + else + echo "Bucket '$BUCKET' does not exist, skipping purge." + fi + fi + + # Create the bucket if it does not exist + if ! checkBucketExists $BUCKET ; then + echo "Creating bucket '$BUCKET'" + ${MC} mb myminio/$BUCKET + else + echo "Bucket '$BUCKET' already exists." + fi + + + # set versioning for bucket + if [ ! -z $VERSIONING ] ; then + if [ $VERSIONING = true ] ; then + echo "Enabling versioning for '$BUCKET'" + ${MC} version enable myminio/$BUCKET + elif [ $VERSIONING = false ] ; then + echo "Suspending versioning for '$BUCKET'" + ${MC} version suspend myminio/$BUCKET + fi + else + echo "Bucket '$BUCKET' versioning unchanged." + fi + + # At this point, the bucket should exist, skip checking for existence + # Set policy on the bucket + echo "Setting policy of bucket '$BUCKET' to '$POLICY'." + ${MC} policy set $POLICY myminio/$BUCKET +} + +# Try connecting to MinIO instance +{{- if .Values.tls.enabled }} +scheme=https +{{- else }} +scheme=http +{{- end }} +connectToMinio $scheme + +{{ if .Values.buckets }} +{{ $global := . }} +# Create the buckets +{{- range .Values.buckets }} +createBucket {{ tpl .name $global }} {{ .policy }} {{ .purge }} {{ .versioning }} +{{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/_helper_create_policy.txt b/ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/_helper_create_policy.txt new file mode 100644 index 0000000..d565b16 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/_helper_create_policy.txt @@ -0,0 +1,75 @@ +#!/bin/sh +set -e ; # Have script exit in the event of a failed command. + +{{- if .Values.configPathmc }} +MC_CONFIG_DIR="{{ .Values.configPathmc }}" +MC="/usr/bin/mc --insecure --config-dir ${MC_CONFIG_DIR}" +{{- else }} +MC="/usr/bin/mc --insecure" +{{- end }} + +# connectToMinio +# Use a check-sleep-check loop to wait for MinIO service to be available +connectToMinio() { + SCHEME=$1 + ATTEMPTS=0 ; LIMIT=29 ; # Allow 30 attempts + set -e ; # fail if we can't read the keys. + ACCESS=$(cat /config/rootUser) ; SECRET=$(cat /config/rootPassword) ; + set +e ; # The connections to minio are allowed to fail. + echo "Connecting to MinIO server: $SCHEME://$MINIO_ENDPOINT:$MINIO_PORT" ; + MC_COMMAND="${MC} alias set myminio $SCHEME://$MINIO_ENDPOINT:$MINIO_PORT $ACCESS $SECRET" ; + $MC_COMMAND ; + STATUS=$? ; + until [ $STATUS = 0 ] + do + ATTEMPTS=`expr $ATTEMPTS + 1` ; + echo \"Failed attempts: $ATTEMPTS\" ; + if [ $ATTEMPTS -gt $LIMIT ]; then + exit 1 ; + fi ; + sleep 2 ; # 1 second intervals between attempts + $MC_COMMAND ; + STATUS=$? ; + done ; + set -e ; # reset `e` as active + return 0 +} + +# checkPolicyExists ($policy) +# Check if the policy exists, by using the exit code of `mc admin policy info` +checkPolicyExists() { + POLICY=$1 + CMD=$(${MC} admin policy info myminio $POLICY > /dev/null 2>&1) + return $? +} + +# createPolicy($name, $filename) +createPolicy () { + NAME=$1 + FILENAME=$2 + + # Create the name if it does not exist + echo "Checking policy: $NAME (in /config/$FILENAME.json)" + if ! checkPolicyExists $NAME ; then + echo "Creating policy '$NAME'" + else + echo "Policy '$NAME' already exists." + fi + ${MC} admin policy add myminio $NAME /config/$FILENAME.json + +} + +# Try connecting to MinIO instance +{{- if .Values.tls.enabled }} +scheme=https +{{- else }} +scheme=http +{{- end }} +connectToMinio $scheme + +{{ if .Values.policies }} +# Create the policies +{{- range $idx, $policy := .Values.policies }} +createPolicy {{ $policy.name }} policy_{{ $idx }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/_helper_create_user.txt b/ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/_helper_create_user.txt new file mode 100644 index 0000000..7771428 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/_helper_create_user.txt @@ -0,0 +1,88 @@ +#!/bin/sh +set -e ; # Have script exit in the event of a failed command. + +{{- if .Values.configPathmc }} +MC_CONFIG_DIR="{{ .Values.configPathmc }}" +MC="/usr/bin/mc --insecure --config-dir ${MC_CONFIG_DIR}" +{{- else }} +MC="/usr/bin/mc --insecure" +{{- end }} + +# connectToMinio +# Use a check-sleep-check loop to wait for MinIO service to be available +connectToMinio() { + SCHEME=$1 + ATTEMPTS=0 ; LIMIT=29 ; # Allow 30 attempts + set -e ; # fail if we can't read the keys. + ACCESS=$(cat /config/rootUser) ; SECRET=$(cat /config/rootPassword) ; + set +e ; # The connections to minio are allowed to fail. + echo "Connecting to MinIO server: $SCHEME://$MINIO_ENDPOINT:$MINIO_PORT" ; + MC_COMMAND="${MC} alias set myminio $SCHEME://$MINIO_ENDPOINT:$MINIO_PORT $ACCESS $SECRET" ; + $MC_COMMAND ; + STATUS=$? ; + until [ $STATUS = 0 ] + do + ATTEMPTS=`expr $ATTEMPTS + 1` ; + echo \"Failed attempts: $ATTEMPTS\" ; + if [ $ATTEMPTS -gt $LIMIT ]; then + exit 1 ; + fi ; + sleep 2 ; # 1 second intervals between attempts + $MC_COMMAND ; + STATUS=$? ; + done ; + set -e ; # reset `e` as active + return 0 +} + +# checkUserExists ($username) +# Check if the user exists, by using the exit code of `mc admin user info` +checkUserExists() { + USER=$1 + CMD=$(${MC} admin user info myminio $USER > /dev/null 2>&1) + return $? +} + +# createUser ($username, $password, $policy) +createUser() { + USER=$1 + PASS=$2 + POLICY=$3 + + # Create the user if it does not exist + if ! checkUserExists $USER ; then + echo "Creating user '$USER'" + ${MC} admin user add myminio $USER $PASS + else + echo "User '$USER' already exists." + fi + + + # set policy for user + if [ ! -z $POLICY -a $POLICY != " " ] ; then + echo "Adding policy '$POLICY' for '$USER'" + ${MC} admin policy set myminio $POLICY user=$USER + else + echo "User '$USER' has no policy attached." + fi +} + +# Try connecting to MinIO instance +{{- if .Values.tls.enabled }} +scheme=https +{{- else }} +scheme=http +{{- end }} +connectToMinio $scheme + +{{ if .Values.users }} +{{ $global := . }} +# Create the users +{{- range .Values.users }} +{{- if .existingSecret }} +createUser {{ tpl .accessKey $global }} $(cat /config/secrets/{{ tpl .accessKey $global }}) {{ .policy }} +{{ else }} +createUser {{ tpl .accessKey $global }} {{ .secretKey }} {{ .policy }} +{{- end }} +{{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/_helper_custom_command.txt b/ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/_helper_custom_command.txt new file mode 100644 index 0000000..b583a77 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/_helper_custom_command.txt @@ -0,0 +1,58 @@ +#!/bin/sh +set -e ; # Have script exit in the event of a failed command. + +{{- if .Values.configPathmc }} +MC_CONFIG_DIR="{{ .Values.configPathmc }}" +MC="/usr/bin/mc --insecure --config-dir ${MC_CONFIG_DIR}" +{{- else }} +MC="/usr/bin/mc --insecure" +{{- end }} + +# connectToMinio +# Use a check-sleep-check loop to wait for MinIO service to be available +connectToMinio() { + SCHEME=$1 + ATTEMPTS=0 ; LIMIT=29 ; # Allow 30 attempts + set -e ; # fail if we can't read the keys. + ACCESS=$(cat /config/rootUser) ; SECRET=$(cat /config/rootPassword) ; + set +e ; # The connections to minio are allowed to fail. + echo "Connecting to MinIO server: $SCHEME://$MINIO_ENDPOINT:$MINIO_PORT" ; + MC_COMMAND="${MC} alias set myminio $SCHEME://$MINIO_ENDPOINT:$MINIO_PORT $ACCESS $SECRET" ; + $MC_COMMAND ; + STATUS=$? ; + until [ $STATUS = 0 ] + do + ATTEMPTS=`expr $ATTEMPTS + 1` ; + echo \"Failed attempts: $ATTEMPTS\" ; + if [ $ATTEMPTS -gt $LIMIT ]; then + exit 1 ; + fi ; + sleep 2 ; # 1 second intervals between attempts + $MC_COMMAND ; + STATUS=$? ; + done ; + set -e ; # reset `e` as active + return 0 +} + +# runCommand ($@) +# Run custom mc command +runCommand() { + ${MC} "$@" + return $? +} + +# Try connecting to MinIO instance +{{- if .Values.tls.enabled }} +scheme=https +{{- else }} +scheme=http +{{- end }} +connectToMinio $scheme + +{{ if .Values.customCommands }} +# Run custom commands +{{- range .Values.customCommands }} +runCommand {{ .command }} +{{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/_helper_policy.tpl b/ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/_helper_policy.tpl new file mode 100644 index 0000000..83a2e15 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/_helper_policy.tpl @@ -0,0 +1,18 @@ +{{- $statements_length := len .statements -}} +{{- $statements_length := sub $statements_length 1 -}} +{ + "Version": "2012-10-17", + "Statement": [ +{{- range $i, $statement := .statements }} + { + "Effect": "Allow", + "Action": [ +"{{ $statement.actions | join "\",\n\"" }}" + ]{{ if $statement.resources }}, + "Resource": [ +"{{ $statement.resources | join "\",\n\"" }}" + ]{{ end }} + }{{ if lt $i $statements_length }},{{end }} +{{- end }} + ] +} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/_helpers.tpl b/ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/_helpers.tpl new file mode 100644 index 0000000..4e38194 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/_helpers.tpl @@ -0,0 +1,218 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "minio.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "minio.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "minio.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "minio.networkPolicy.apiVersion" -}} +{{- if semverCompare ">=1.4-0, <1.7-0" .Capabilities.KubeVersion.Version -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare ">=1.7-0, <1.16-0" .Capabilities.KubeVersion.Version -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else if semverCompare "^1.16-0" .Capabilities.KubeVersion.Version -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for deployment. +*/}} +{{- define "minio.deployment.apiVersion" -}} +{{- if semverCompare "<1.9-0" .Capabilities.KubeVersion.Version -}} +{{- print "apps/v1beta2" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for statefulset. +*/}} +{{- define "minio.statefulset.apiVersion" -}} +{{- if semverCompare "<1.16-0" .Capabilities.KubeVersion.Version -}} +{{- print "apps/v1beta2" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for ingress. +*/}} +{{- define "minio.ingress.apiVersion" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for console ingress. +*/}} +{{- define "minio.consoleIngress.apiVersion" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Determine secret name. +*/}} +{{- define "minio.secretName" -}} +{{- if .Values.existingSecret -}} +{{- .Values.existingSecret }} +{{- else -}} +{{- include "minio.fullname" . -}} +{{- end -}} +{{- end -}} + +{{/* +Determine name for scc role and rolebinding +*/}} +{{- define "minio.sccRoleName" -}} +{{- printf "%s-%s" "scc" (include "minio.fullname" .) | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Properly format optional additional arguments to MinIO binary +*/}} +{{- define "minio.extraArgs" -}} +{{- range .Values.extraArgs -}} +{{ " " }}{{ . }} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "minio.imagePullSecrets" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +Also, we can not use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} +{{- if .Values.global.imagePullSecrets }} +imagePullSecrets: +{{- range .Values.global.imagePullSecrets }} + - name: {{ . }} +{{- end }} +{{- else if .Values.imagePullSecrets }} +imagePullSecrets: + {{ toYaml .Values.imagePullSecrets }} +{{- end -}} +{{- else if .Values.imagePullSecrets }} +imagePullSecrets: + {{ toYaml .Values.imagePullSecrets }} +{{- end -}} +{{- end -}} + +{{/* +Formats volumeMount for MinIO TLS keys and trusted certs +*/}} +{{- define "minio.tlsKeysVolumeMount" -}} +{{- if .Values.tls.enabled }} +- name: cert-secret-volume + mountPath: {{ .Values.certsPath }} +{{- end }} +{{- if or .Values.tls.enabled (ne .Values.trustedCertsSecret "") }} +{{- $casPath := printf "%s/CAs" .Values.certsPath | clean }} +- name: trusted-cert-secret-volume + mountPath: {{ $casPath }} +{{- end }} +{{- end -}} + +{{/* +Formats volume for MinIO TLS keys and trusted certs +*/}} +{{- define "minio.tlsKeysVolume" -}} +{{- if .Values.tls.enabled }} +- name: cert-secret-volume + secret: + secretName: {{ .Values.tls.certSecret }} + items: + - key: {{ .Values.tls.publicCrt }} + path: public.crt + - key: {{ .Values.tls.privateKey }} + path: private.key +{{- end }} +{{- if or .Values.tls.enabled (ne .Values.trustedCertsSecret "") }} +{{- $certSecret := eq .Values.trustedCertsSecret "" | ternary .Values.tls.certSecret .Values.trustedCertsSecret }} +{{- $publicCrt := eq .Values.trustedCertsSecret "" | ternary .Values.tls.publicCrt "" }} +- name: trusted-cert-secret-volume + secret: + secretName: {{ $certSecret }} + {{- if ne $publicCrt "" }} + items: + - key: {{ $publicCrt }} + path: public.crt + {{- end }} +{{- end }} +{{- end -}} + +{{/* +Returns the available value for certain key in an existing secret (if it exists), +otherwise it generates a random value. +*/}} +{{- define "minio.getValueFromSecret" }} + {{- $len := (default 16 .Length) | int -}} + {{- $obj := (lookup "v1" "Secret" .Namespace .Name).data -}} + {{- if $obj }} + {{- index $obj .Key | b64dec -}} + {{- else -}} + {{- randAlphaNum $len -}} + {{- end -}} +{{- end }} + +{{- define "minio.root.username" -}} + {{- if .Values.rootUser }} + {{- .Values.rootUser | toString }} + {{- else }} + {{- include "minio.getValueFromSecret" (dict "Namespace" .Release.Namespace "Name" (include "minio.fullname" .) "Length" 20 "Key" "rootUser") }} + {{- end }} +{{- end -}} + +{{- define "minio.root.password" -}} + {{- if .Values.rootPassword }} + {{- .Values.rootPassword | toString }} + {{- else }} + {{- include "minio.getValueFromSecret" (dict "Namespace" .Release.Namespace "Name" (include "minio.fullname" .) "Length" 40 "Key" "rootPassword") }} + {{- end }} +{{- end -}} \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/configmap.yaml b/ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/configmap.yaml new file mode 100644 index 0000000..95a7c60 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/configmap.yaml @@ -0,0 +1,24 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "minio.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +data: + initialize: |- +{{ include (print $.Template.BasePath "/_helper_create_bucket.txt") . | indent 4 }} + add-user: |- +{{ include (print $.Template.BasePath "/_helper_create_user.txt") . | indent 4 }} + add-policy: |- +{{ include (print $.Template.BasePath "/_helper_create_policy.txt") . | indent 4 }} +{{- range $idx, $policy := .Values.policies }} + # {{ $policy.name }} + policy_{{ $idx }}.json: |- +{{ include (print $.Template.BasePath "/_helper_policy.tpl") . | indent 4 }} +{{ end }} + custom-command: |- +{{ include (print $.Template.BasePath "/_helper_custom_command.txt") . | indent 4 }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/console-ingress.yaml b/ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/console-ingress.yaml new file mode 100644 index 0000000..2ce9a93 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/console-ingress.yaml @@ -0,0 +1,58 @@ +{{- if .Values.consoleIngress.enabled -}} +{{- $fullName := printf "%s-console" (include "minio.fullname" .) -}} +{{- $servicePort := .Values.consoleService.port -}} +{{- $ingressPath := .Values.consoleIngress.path -}} +apiVersion: {{ template "minio.consoleIngress.apiVersion" . }} +kind: Ingress +metadata: + name: {{ $fullName }} + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- with .Values.consoleIngress.labels }} +{{ toYaml . | indent 4 }} +{{- end }} + +{{- with .Values.consoleIngress.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +spec: +{{- if .Values.consoleIngress.ingressClassName }} + ingressClassName: {{ .Values.consoleIngress.ingressClassName }} +{{- end }} +{{- if .Values.consoleIngress.tls }} + tls: + {{- range .Values.consoleIngress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} +{{- end }} + rules: + {{- range .Values.consoleIngress.hosts }} + - http: + paths: + - path: {{ $ingressPath }} + {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} + pathType: Prefix + backend: + service: + name: {{ $fullName }} + port: + number: {{ $servicePort }} + {{- else }} + backend: + serviceName: {{ $fullName }} + servicePort: {{ $servicePort }} + {{- end }} + {{- if . }} + host: {{ . | quote }} + {{- end }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/console-service.yaml b/ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/console-service.yaml new file mode 100644 index 0000000..f4b1294 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/console-service.yaml @@ -0,0 +1,48 @@ +{{ $scheme := "http" }} +{{- if .Values.tls.enabled }} +{{ $scheme = "https" }} +{{ end }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "minio.fullname" . }}-console + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- if .Values.consoleService.annotations }} + annotations: +{{ toYaml .Values.consoleService.annotations | indent 4 }} +{{- end }} +spec: +{{- if (or (eq .Values.consoleService.type "ClusterIP" "") (empty .Values.consoleService.type)) }} + type: ClusterIP + {{- if not (empty .Values.consoleService.clusterIP) }} + clusterIP: {{ .Values.consoleService.clusterIP }} + {{end}} +{{- else if eq .Values.consoleService.type "LoadBalancer" }} + type: {{ .Values.consoleService.type }} + loadBalancerIP: {{ default "" .Values.consoleService.loadBalancerIP }} +{{- else }} + type: {{ .Values.consoleService.type }} +{{- end }} + ports: + - name: {{ $scheme }} + port: {{ .Values.consoleService.port }} + protocol: TCP +{{- if (and (eq .Values.consoleService.type "NodePort") ( .Values.consoleService.nodePort)) }} + nodePort: {{ .Values.consoleService.nodePort }} +{{- else }} + targetPort: {{ .Values.consoleService.port }} +{{- end}} +{{- if .Values.consoleService.externalIPs }} + externalIPs: +{{- range $i , $ip := .Values.consoleService.externalIPs }} + - {{ $ip }} +{{- end }} +{{- end }} + selector: + app: {{ template "minio.name" . }} + release: {{ .Release.Name }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/deployment.yaml b/ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/deployment.yaml new file mode 100644 index 0000000..a06bc35 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/deployment.yaml @@ -0,0 +1,174 @@ +{{- if eq .Values.mode "standalone" }} +{{ $scheme := "http" }} +{{- if .Values.tls.enabled }} +{{ $scheme = "https" }} +{{ end }} +{{ $bucketRoot := or ($.Values.bucketRoot) ($.Values.mountPath) }} +apiVersion: {{ template "minio.deployment.apiVersion" . }} +kind: Deployment +metadata: + name: {{ template "minio.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- if .Values.additionalLabels }} +{{ toYaml .Values.additionalLabels | trimSuffix "\n" | indent 4 }} +{{- end }} +{{- if .Values.additionalAnnotations }} + annotations: +{{ toYaml .Values.additionalAnnotations | trimSuffix "\n" | indent 4 }} +{{- end }} +spec: + strategy: + type: {{ .Values.DeploymentUpdate.type }} + {{- if eq .Values.DeploymentUpdate.type "RollingUpdate" }} + rollingUpdate: + maxSurge: {{ .Values.DeploymentUpdate.maxSurge }} + maxUnavailable: {{ .Values.DeploymentUpdate.maxUnavailable }} + {{- end}} + replicas: 1 + selector: + matchLabels: + app: {{ template "minio.name" . }} + release: {{ .Release.Name }} + template: + metadata: + name: {{ template "minio.fullname" . }} + labels: + app: {{ template "minio.name" . }} + release: {{ .Release.Name }} +{{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 8 }} +{{- end }} + annotations: +{{- if not .Values.ignoreChartChecksums }} + checksum/secrets: {{ include (print $.Template.BasePath "/secrets.yaml") . | sha256sum }} + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} +{{- end }} +{{- if .Values.podAnnotations }} +{{ toYaml .Values.podAnnotations | trimSuffix "\n" | indent 8 }} +{{- end }} + spec: + {{- if .Values.priorityClassName }} + priorityClassName: "{{ .Values.priorityClassName }}" + {{- end }} +{{- if and .Values.securityContext.enabled .Values.persistence.enabled }} + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + runAsGroup: {{ .Values.securityContext.runAsGroup }} + fsGroup: {{ .Values.securityContext.fsGroup }} + {{- if and (ge .Capabilities.KubeVersion.Major "1") (ge .Capabilities.KubeVersion.Minor "20") }} + fsGroupChangePolicy: {{ .Values.securityContext.fsGroupChangePolicy }} + {{- end }} +{{- end }} +{{ if .Values.serviceAccount.create }} + serviceAccountName: {{ .Values.serviceAccount.name }} +{{- end }} + containers: + - name: {{ .Chart.Name }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + command: + - "/bin/sh" + - "-ce" + - "/usr/bin/docker-entrypoint.sh minio server {{ $bucketRoot }} -S {{ .Values.certsPath }} --address :{{ .Values.minioAPIPort }} --console-address :{{ .Values.minioConsolePort }} {{- template "minio.extraArgs" . }}" + volumeMounts: + - name: minio-user + mountPath: "/tmp/credentials" + readOnly: true + {{- if .Values.persistence.enabled }} + - name: export + mountPath: {{ .Values.mountPath }} + {{- if .Values.persistence.subPath }} + subPath: "{{ .Values.persistence.subPath }}" + {{- end }} + {{- end }} + {{- if .Values.extraSecret }} + - name: extra-secret + mountPath: "/tmp/minio-config-env" + {{- end }} + {{- include "minio.tlsKeysVolumeMount" . | indent 12 }} + ports: + - name: {{ $scheme }} + containerPort: {{ .Values.minioAPIPort }} + - name: {{ $scheme }}-console + containerPort: {{ .Values.minioConsolePort }} + env: + - name: MINIO_ROOT_USER + valueFrom: + secretKeyRef: + name: {{ template "minio.secretName" . }} + key: rootUser + - name: MINIO_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "minio.secretName" . }} + key: rootPassword + {{- if .Values.extraSecret }} + - name: MINIO_CONFIG_ENV_FILE + value: "/tmp/minio-config-env/config.env" + {{- end}} + {{- if .Values.metrics.serviceMonitor.public }} + - name: MINIO_PROMETHEUS_AUTH_TYPE + value: "public" + {{- end}} + {{- if .Values.etcd.endpoints }} + - name: MINIO_ETCD_ENDPOINTS + value: {{ join "," .Values.etcd.endpoints | quote }} + {{- if .Values.etcd.clientCert }} + - name: MINIO_ETCD_CLIENT_CERT + value: "/tmp/credentials/etcd_client_cert.pem" + {{- end }} + {{- if .Values.etcd.clientCertKey }} + - name: MINIO_ETCD_CLIENT_CERT_KEY + value: "/tmp/credentials/etcd_client_cert_key.pem" + {{- end }} + {{- if .Values.etcd.pathPrefix }} + - name: MINIO_ETCD_PATH_PREFIX + value: {{ .Values.etcd.pathPrefix }} + {{- end }} + {{- if .Values.etcd.corednsPathPrefix }} + - name: MINIO_ETCD_COREDNS_PATH + value: {{ .Values.etcd.corednsPathPrefix }} + {{- end }} + {{- end }} + {{- range $key, $val := .Values.environment }} + - name: {{ $key }} + value: {{ $val | quote }} + {{- end}} + resources: +{{ toYaml .Values.resources | indent 12 }} +{{- with .Values.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 8 }} +{{- end }} +{{- include "minio.imagePullSecrets" . | indent 6 }} +{{- with .Values.affinity }} + affinity: +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} +{{- end }} + volumes: + - name: export + {{- if .Values.persistence.enabled }} + persistentVolumeClaim: + claimName: {{ .Values.persistence.existingClaim | default (include "minio.fullname" .) }} + {{- else }} + emptyDir: {} + {{- end }} + {{- if .Values.extraSecret }} + - name: extra-secret + secret: + secretName: {{ .Values.extraSecret }} + {{- end }} + - name: minio-user + secret: + secretName: {{ template "minio.secretName" . }} + {{- include "minio.tlsKeysVolume" . | indent 8 }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/gateway-deployment.yaml b/ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/gateway-deployment.yaml new file mode 100644 index 0000000..b14f86b --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/gateway-deployment.yaml @@ -0,0 +1,173 @@ +{{- if eq .Values.mode "gateway" }} +{{ $scheme := "http" }} +{{- if .Values.tls.enabled }} +{{ $scheme = "https" }} +{{ end }} +{{ $bucketRoot := or ($.Values.bucketRoot) ($.Values.mountPath) }} +apiVersion: {{ template "minio.deployment.apiVersion" . }} +kind: Deployment +metadata: + name: {{ template "minio.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- if .Values.additionalLabels }} +{{ toYaml .Values.additionalLabels | trimSuffix "\n" | indent 4 }} +{{- end }} +{{- if .Values.additionalAnnotations }} + annotations: +{{ toYaml .Values.additionalAnnotations | trimSuffix "\n" | indent 4 }} +{{- end }} +spec: + strategy: + type: {{ .Values.DeploymentUpdate.type }} + {{- if eq .Values.DeploymentUpdate.type "RollingUpdate" }} + rollingUpdate: + maxSurge: {{ .Values.DeploymentUpdate.maxSurge }} + maxUnavailable: {{ .Values.DeploymentUpdate.maxUnavailable }} + {{- end}} + replicas: {{ .Values.gateway.replicas }} + selector: + matchLabels: + app: {{ template "minio.name" . }} + release: {{ .Release.Name }} + template: + metadata: + name: {{ template "minio.fullname" . }} + labels: + app: {{ template "minio.name" . }} + release: {{ .Release.Name }} +{{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 8 }} +{{- end }} + annotations: +{{- if not .Values.ignoreChartChecksums }} + checksum/secrets: {{ include (print $.Template.BasePath "/secrets.yaml") . | sha256sum }} + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} +{{- end }} +{{- if .Values.podAnnotations }} +{{ toYaml .Values.podAnnotations | trimSuffix "\n" | indent 8 }} +{{- end }} + spec: + {{- if .Values.priorityClassName }} + priorityClassName: "{{ .Values.priorityClassName }}" + {{- end }} +{{- if and .Values.securityContext.enabled .Values.persistence.enabled }} + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + runAsGroup: {{ .Values.securityContext.runAsGroup }} + fsGroup: {{ .Values.securityContext.fsGroup }} +{{- end }} +{{ if .Values.serviceAccount.create }} + serviceAccountName: {{ .Values.serviceAccount.name }} +{{- end }} + containers: + - name: {{ .Chart.Name }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + command: + - "/bin/sh" + - "-ce" + {{- if eq .Values.gateway.type "nas" }} + - "/usr/bin/docker-entrypoint.sh minio gateway nas {{ $bucketRoot }} -S {{ .Values.certsPath }} --address :{{ .Values.minioAPIPort }} --console-address :{{ .Values.minioConsolePort }} {{- template "minio.extraArgs" . }} " + {{- end }} + volumeMounts: + - name: minio-user + mountPath: "/tmp/credentials" + readOnly: true + {{- if .Values.persistence.enabled }} + - name: export + mountPath: {{ .Values.mountPath }} + {{- if .Values.persistence.subPath }} + subPath: "{{ .Values.persistence.subPath }}" + {{- end }} + {{- end }} + {{- if .Values.extraSecret }} + - name: extra-secret + mountPath: "/tmp/minio-config-env" + {{- end }} + {{- include "minio.tlsKeysVolumeMount" . | indent 12 }} + ports: + - name: {{ $scheme }} + containerPort: {{ .Values.minioAPIPort }} + - name: {{ $scheme }}-console + containerPort: {{ .Values.minioConsolePort }} + env: + - name: MINIO_ROOT_USER + valueFrom: + secretKeyRef: + name: {{ template "minio.secretName" . }} + key: rootUser + - name: MINIO_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "minio.secretName" . }} + key: rootPassword + {{- if .Values.extraSecret }} + - name: MINIO_CONFIG_ENV_FILE + value: "/tmp/minio-config-env/config.env" + {{- end}} + {{- if .Values.metrics.serviceMonitor.public }} + - name: MINIO_PROMETHEUS_AUTH_TYPE + value: "public" + {{- end}} + {{- if .Values.etcd.endpoints }} + - name: MINIO_ETCD_ENDPOINTS + value: {{ join "," .Values.etcd.endpoints | quote }} + {{- if .Values.etcd.clientCert }} + - name: MINIO_ETCD_CLIENT_CERT + value: "/tmp/credentials/etcd_client.crt" + {{- end }} + {{- if .Values.etcd.clientCertKey }} + - name: MINIO_ETCD_CLIENT_CERT_KEY + value: "/tmp/credentials/etcd_client.key" + {{- end }} + {{- if .Values.etcd.pathPrefix }} + - name: MINIO_ETCD_PATH_PREFIX + value: {{ .Values.etcd.pathPrefix }} + {{- end }} + {{- if .Values.etcd.corednsPathPrefix }} + - name: MINIO_ETCD_COREDNS_PATH + value: {{ .Values.etcd.corednsPathPrefix }} + {{- end }} + {{- end }} + {{- range $key, $val := .Values.environment }} + - name: {{ $key }} + value: {{ $val | quote }} + {{- end}} + resources: +{{ toYaml .Values.resources | indent 12 }} +{{- with .Values.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 8 }} +{{- end }} +{{- include "minio.imagePullSecrets" . | indent 6 }} +{{- with .Values.affinity }} + affinity: +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} +{{- end }} + volumes: + - name: export + {{- if .Values.persistence.enabled }} + persistentVolumeClaim: + claimName: {{ .Values.persistence.existingClaim | default (include "minio.fullname" .) }} + {{- else }} + emptyDir: {} + {{- end }} + - name: minio-user + secret: + secretName: {{ template "minio.secretName" . }} + {{- if .Values.extraSecret }} + - name: extra-secret + secret: + secretName: {{ .Values.extraSecret }} + {{- end }} + {{- include "minio.tlsKeysVolume" . | indent 8 }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/ingress.yaml b/ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/ingress.yaml new file mode 100644 index 0000000..8d9a837 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/ingress.yaml @@ -0,0 +1,58 @@ +{{- if .Values.ingress.enabled -}} +{{- $fullName := include "minio.fullname" . -}} +{{- $servicePort := .Values.service.port -}} +{{- $ingressPath := .Values.ingress.path -}} +apiVersion: {{ template "minio.ingress.apiVersion" . }} +kind: Ingress +metadata: + name: {{ $fullName }} + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- with .Values.ingress.labels }} +{{ toYaml . | indent 4 }} +{{- end }} + +{{- with .Values.ingress.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +spec: +{{- if .Values.ingress.ingressClassName }} + ingressClassName: {{ .Values.ingress.ingressClassName }} +{{- end }} +{{- if .Values.ingress.tls }} + tls: + {{- range .Values.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} +{{- end }} + rules: + {{- range .Values.ingress.hosts }} + - http: + paths: + - path: {{ $ingressPath }} + {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} + pathType: Prefix + backend: + service: + name: {{ $fullName }} + port: + number: {{ $servicePort }} + {{- else }} + backend: + serviceName: {{ $fullName }} + servicePort: {{ $servicePort }} + {{- end }} + {{- if . }} + host: {{ . | quote }} + {{- end }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/networkpolicy.yaml b/ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/networkpolicy.yaml new file mode 100644 index 0000000..68a2599 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/networkpolicy.yaml @@ -0,0 +1,27 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ template "minio.networkPolicy.apiVersion" . }} +metadata: + name: {{ template "minio.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + podSelector: + matchLabels: + app: {{ template "minio.name" . }} + release: {{ .Release.Name }} + ingress: + - ports: + - port: {{ .Values.service.port }} + - port: {{ .Values.consoleService.port }} + {{- if not .Values.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ template "minio.name" . }}-client: "true" + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/poddisruptionbudget.yaml b/ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/poddisruptionbudget.yaml new file mode 100644 index 0000000..8037eb7 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/poddisruptionbudget.yaml @@ -0,0 +1,14 @@ +{{- if .Values.podDisruptionBudget.enabled }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: minio + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }} +spec: + maxUnavailable: {{ .Values.podDisruptionBudget.maxUnavailable }} + selector: + matchLabels: + app: {{ template "minio.name" . }} +{{- end }} \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/post-install-create-bucket-job.yaml b/ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/post-install-create-bucket-job.yaml new file mode 100644 index 0000000..434b31d --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/post-install-create-bucket-job.yaml @@ -0,0 +1,87 @@ +{{- if .Values.buckets }} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ template "minio.fullname" . }}-make-bucket-job + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }}-make-bucket-job + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + annotations: + "helm.sh/hook": post-install,post-upgrade + "helm.sh/hook-delete-policy": hook-succeeded,before-hook-creation +{{- with .Values.makeBucketJob.annotations }} +{{ toYaml . | indent 4 }} +{{- end }} +spec: + template: + metadata: + labels: + app: {{ template "minio.name" . }}-job + release: {{ .Release.Name }} +{{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 8 }} +{{- end }} +{{- if .Values.makeBucketJob.podAnnotations }} + annotations: +{{ toYaml .Values.makeBucketJob.podAnnotations | indent 8 }} +{{- end }} + spec: + restartPolicy: OnFailure +{{- include "minio.imagePullSecrets" . | indent 6 }} +{{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.makeBucketJob.nodeSelector | indent 8 }} +{{- end }} +{{- with .Values.makeBucketJob.affinity }} + affinity: +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.makeBucketJob.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} +{{- end }} +{{- if .Values.makeBucketJob.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.makeBucketJob.securityContext.runAsUser }} + runAsGroup: {{ .Values.makeBucketJob.securityContext.runAsGroup }} + fsGroup: {{ .Values.makeBucketJob.securityContext.fsGroup }} +{{- end }} + volumes: + - name: minio-configuration + projected: + sources: + - configMap: + name: {{ template "minio.fullname" . }} + - secret: + name: {{ template "minio.secretName" . }} + {{- if .Values.tls.enabled }} + - name: cert-secret-volume-mc + secret: + secretName: {{ .Values.tls.certSecret }} + items: + - key: {{ .Values.tls.publicCrt }} + path: CAs/public.crt + {{ end }} + containers: + - name: minio-mc + image: "{{ .Values.mcImage.repository }}:{{ .Values.mcImage.tag }}" + imagePullPolicy: {{ .Values.mcImage.pullPolicy }} + command: ["/bin/sh", "/config/initialize"] + env: + - name: MINIO_ENDPOINT + value: {{ template "minio.fullname" . }} + - name: MINIO_PORT + value: {{ .Values.service.port | quote }} + volumeMounts: + - name: minio-configuration + mountPath: /config + {{- if .Values.tls.enabled }} + - name: cert-secret-volume-mc + mountPath: {{ .Values.configPathmc }}certs + {{ end }} + resources: +{{ toYaml .Values.makeBucketJob.resources | indent 10 }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/post-install-create-policy-job.yaml b/ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/post-install-create-policy-job.yaml new file mode 100644 index 0000000..ae78769 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/post-install-create-policy-job.yaml @@ -0,0 +1,87 @@ +{{- if .Values.policies }} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ template "minio.fullname" . }}-make-policies-job + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }}-make-policies-job + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + annotations: + "helm.sh/hook": post-install,post-upgrade + "helm.sh/hook-delete-policy": hook-succeeded,before-hook-creation +{{- with .Values.makePolicyJob.annotations }} +{{ toYaml . | indent 4 }} +{{- end }} +spec: + template: + metadata: + labels: + app: {{ template "minio.name" . }}-job + release: {{ .Release.Name }} +{{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 8 }} +{{- end }} +{{- if .Values.makePolicyJob.podAnnotations }} + annotations: +{{ toYaml .Values.makePolicyJob.podAnnotations | indent 8 }} +{{- end }} + spec: + restartPolicy: OnFailure +{{- include "minio.imagePullSecrets" . | indent 6 }} +{{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.makePolicyJob.nodeSelector | indent 8 }} +{{- end }} +{{- with .Values.makePolicyJob.affinity }} + affinity: +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.makePolicyJob.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} +{{- end }} +{{- if .Values.makePolicyJob.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.makePolicyJob.securityContext.runAsUser }} + runAsGroup: {{ .Values.makePolicyJob.securityContext.runAsGroup }} + fsGroup: {{ .Values.makePolicyJob.securityContext.fsGroup }} +{{- end }} + volumes: + - name: minio-configuration + projected: + sources: + - configMap: + name: {{ template "minio.fullname" . }} + - secret: + name: {{ template "minio.secretName" . }} + {{- if .Values.tls.enabled }} + - name: cert-secret-volume-mc + secret: + secretName: {{ .Values.tls.certSecret }} + items: + - key: {{ .Values.tls.publicCrt }} + path: CAs/public.crt + {{ end }} + containers: + - name: minio-mc + image: "{{ .Values.mcImage.repository }}:{{ .Values.mcImage.tag }}" + imagePullPolicy: {{ .Values.mcImage.pullPolicy }} + command: ["/bin/sh", "/config/add-policy"] + env: + - name: MINIO_ENDPOINT + value: {{ template "minio.fullname" . }} + - name: MINIO_PORT + value: {{ .Values.service.port | quote }} + volumeMounts: + - name: minio-configuration + mountPath: /config + {{- if .Values.tls.enabled }} + - name: cert-secret-volume-mc + mountPath: {{ .Values.configPathmc }}certs + {{ end }} + resources: +{{ toYaml .Values.makePolicyJob.resources | indent 10 }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/post-install-create-user-job.yaml b/ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/post-install-create-user-job.yaml new file mode 100644 index 0000000..d3750e8 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/post-install-create-user-job.yaml @@ -0,0 +1,97 @@ +{{- $global := . -}} +{{- if .Values.users }} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ template "minio.fullname" . }}-make-user-job + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }}-make-user-job + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + annotations: + "helm.sh/hook": post-install,post-upgrade + "helm.sh/hook-delete-policy": hook-succeeded,before-hook-creation +{{- with .Values.makeUserJob.annotations }} +{{ toYaml . | indent 4 }} +{{- end }} +spec: + template: + metadata: + labels: + app: {{ template "minio.name" . }}-job + release: {{ .Release.Name }} +{{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 8 }} +{{- end }} +{{- if .Values.makeUserJob.podAnnotations }} + annotations: +{{ toYaml .Values.makeUserJob.podAnnotations | indent 8 }} +{{- end }} + spec: + restartPolicy: OnFailure +{{- include "minio.imagePullSecrets" . | indent 6 }} +{{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.makeUserJob.nodeSelector | indent 8 }} +{{- end }} +{{- with .Values.makeUserJob.affinity }} + affinity: +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.makeUserJob.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} +{{- end }} +{{- if .Values.makeUserJob.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.makeUserJob.securityContext.runAsUser }} + runAsGroup: {{ .Values.makeUserJob.securityContext.runAsGroup }} + fsGroup: {{ .Values.makeUserJob.securityContext.fsGroup }} +{{- end }} + volumes: + - name: minio-configuration + projected: + sources: + - configMap: + name: {{ template "minio.fullname" . }} + - secret: + name: {{ template "minio.secretName" . }} + {{- range .Values.users }} + {{- if .existingSecret }} + - secret: + name: {{ tpl .existingSecret $global }} + items: + - key: {{ .existingSecretKey }} + path: secrets/{{ tpl .accessKey $global }} + {{- end }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: cert-secret-volume-mc + secret: + secretName: {{ .Values.tls.certSecret }} + items: + - key: {{ .Values.tls.publicCrt }} + path: CAs/public.crt + {{ end }} + containers: + - name: minio-mc + image: "{{ .Values.mcImage.repository }}:{{ .Values.mcImage.tag }}" + imagePullPolicy: {{ .Values.mcImage.pullPolicy }} + command: ["/bin/sh", "/config/add-user"] + env: + - name: MINIO_ENDPOINT + value: {{ template "minio.fullname" . }} + - name: MINIO_PORT + value: {{ .Values.service.port | quote }} + volumeMounts: + - name: minio-configuration + mountPath: /config + {{- if .Values.tls.enabled }} + - name: cert-secret-volume-mc + mountPath: {{ .Values.configPathmc }}certs + {{ end }} + resources: +{{ toYaml .Values.makeUserJob.resources | indent 10 }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/post-install-custom-command.yaml b/ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/post-install-custom-command.yaml new file mode 100644 index 0000000..7e83faf --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/post-install-custom-command.yaml @@ -0,0 +1,87 @@ +{{- if .Values.customCommands }} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ template "minio.fullname" . }}-custom-command-job + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }}-custom-command-job + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + annotations: + "helm.sh/hook": post-install,post-upgrade + "helm.sh/hook-delete-policy": hook-succeeded,before-hook-creation +{{- with .Values.customCommandJob.annotations }} +{{ toYaml . | indent 4 }} +{{- end }} +spec: + template: + metadata: + labels: + app: {{ template "minio.name" . }}-job + release: {{ .Release.Name }} +{{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 8 }} +{{- end }} +{{- if .Values.customCommandJob.podAnnotations }} + annotations: +{{ toYaml .Values.customCommandJob.podAnnotations | indent 8 }} +{{- end }} + spec: + restartPolicy: OnFailure +{{- include "minio.imagePullSecrets" . | indent 6 }} +{{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.customCommandJob.nodeSelector | indent 8 }} +{{- end }} +{{- with .Values.customCommandJob.affinity }} + affinity: +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.customCommandJob.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} +{{- end }} +{{- if .Values.customCommandJob.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.customCommandJob.securityContext.runAsUser }} + runAsGroup: {{ .Values.customCommandJob.securityContext.runAsGroup }} + fsGroup: {{ .Values.customCommandJob.securityContext.fsGroup }} +{{- end }} + volumes: + - name: minio-configuration + projected: + sources: + - configMap: + name: {{ template "minio.fullname" . }} + - secret: + name: {{ template "minio.secretName" . }} + {{- if .Values.tls.enabled }} + - name: cert-secret-volume-mc + secret: + secretName: {{ .Values.tls.certSecret }} + items: + - key: {{ .Values.tls.publicCrt }} + path: CAs/public.crt + {{ end }} + containers: + - name: minio-mc + image: "{{ .Values.mcImage.repository }}:{{ .Values.mcImage.tag }}" + imagePullPolicy: {{ .Values.mcImage.pullPolicy }} + command: ["/bin/sh", "/config/custom-command"] + env: + - name: MINIO_ENDPOINT + value: {{ template "minio.fullname" . }} + - name: MINIO_PORT + value: {{ .Values.service.port | quote }} + volumeMounts: + - name: minio-configuration + mountPath: /config + {{- if .Values.tls.enabled }} + - name: cert-secret-volume-mc + mountPath: {{ .Values.configPathmc }}certs + {{ end }} + resources: +{{ toYaml .Values.customCommandJob.resources | indent 10 }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/pvc.yaml b/ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/pvc.yaml new file mode 100644 index 0000000..369aade --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/pvc.yaml @@ -0,0 +1,35 @@ +{{- if eq .Values.mode "standalone" }} +{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) }} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: {{ template "minio.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- if .Values.persistence.annotations }} + annotations: +{{ toYaml .Values.persistence.annotations | trimSuffix "\n" | indent 4 }} +{{- end }} +spec: + accessModes: + - {{ .Values.persistence.accessMode | quote }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + +{{- if .Values.persistence.storageClass }} +{{- if (eq "-" .Values.persistence.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.persistence.storageClass }}" +{{- end }} +{{- end }} +{{- if .Values.persistence.VolumeName }} + volumeName: "{{ .Values.persistence.VolumeName }}" +{{- end }} +{{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/secrets.yaml b/ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/secrets.yaml new file mode 100644 index 0000000..da2ecab --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/secrets.yaml @@ -0,0 +1,22 @@ +{{- if not .Values.existingSecret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "minio.secretName" . }} + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +type: Opaque +data: + rootUser: {{ include "minio.root.username" . | b64enc | quote }} + rootPassword: {{ include "minio.root.password" . | b64enc | quote }} + {{- if .Values.etcd.clientCert }} + etcd_client.crt: {{ .Values.etcd.clientCert | toString | b64enc | quote }} + {{- end }} + {{- if .Values.etcd.clientCertKey }} + etcd_client.key: {{ .Values.etcd.clientCertKey | toString | b64enc | quote }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/securitycontextconstraints.yaml b/ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/securitycontextconstraints.yaml new file mode 100644 index 0000000..4bac7e3 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/securitycontextconstraints.yaml @@ -0,0 +1,45 @@ +{{- if and .Values.securityContext.enabled .Values.persistence.enabled (.Capabilities.APIVersions.Has "security.openshift.io/v1") }} +apiVersion: security.openshift.io/v1 +kind: SecurityContextConstraints +metadata: + name: {{ template "minio.fullname" . }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +allowHostDirVolumePlugin: false +allowHostIPC: false +allowHostNetwork: false +allowHostPID: false +allowHostPorts: false +allowPrivilegeEscalation: true +allowPrivilegedContainer: false +allowedCapabilities: [] +readOnlyRootFilesystem: false +defaultAddCapabilities: [] +requiredDropCapabilities: +- KILL +- MKNOD +- SETUID +- SETGID +fsGroup: + type: MustRunAs + ranges: + - max: {{ .Values.securityContext.fsGroup }} + min: {{ .Values.securityContext.fsGroup }} +runAsUser: + type: MustRunAs + uid: {{ .Values.securityContext.runAsUser }} +seLinuxContext: + type: MustRunAs +supplementalGroups: + type: RunAsAny +volumes: +- configMap +- downwardAPI +- emptyDir +- persistentVolumeClaim +- projected +- secret +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/service.yaml b/ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/service.yaml new file mode 100644 index 0000000..64aa990 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/service.yaml @@ -0,0 +1,49 @@ +{{ $scheme := "http" }} +{{- if .Values.tls.enabled }} +{{ $scheme = "https" }} +{{ end }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "minio.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + monitoring: "true" +{{- if .Values.service.annotations }} + annotations: +{{ toYaml .Values.service.annotations | indent 4 }} +{{- end }} +spec: +{{- if (or (eq .Values.service.type "ClusterIP" "") (empty .Values.service.type)) }} + type: ClusterIP + {{- if not (empty .Values.service.clusterIP) }} + clusterIP: {{ .Values.service.clusterIP }} + {{end}} +{{- else if eq .Values.service.type "LoadBalancer" }} + type: {{ .Values.service.type }} + loadBalancerIP: {{ default "" .Values.service.loadBalancerIP }} +{{- else }} + type: {{ .Values.service.type }} +{{- end }} + ports: + - name: {{ $scheme }} + port: {{ .Values.service.port }} + protocol: TCP +{{- if (and (eq .Values.service.type "NodePort") ( .Values.service.nodePort)) }} + nodePort: {{ .Values.service.nodePort }} +{{- else }} + targetPort: 9000 +{{- end}} +{{- if .Values.service.externalIPs }} + externalIPs: +{{- range $i , $ip := .Values.service.externalIPs }} + - {{ $ip }} +{{- end }} +{{- end }} + selector: + app: {{ template "minio.name" . }} + release: {{ .Release.Name }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/serviceaccount.yaml b/ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/serviceaccount.yaml new file mode 100644 index 0000000..6a4bd94 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/serviceaccount.yaml @@ -0,0 +1,7 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Values.serviceAccount.name | quote }} + namespace: {{ .Release.Namespace | quote }} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/servicemonitor.yaml b/ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/servicemonitor.yaml new file mode 100644 index 0000000..809848f --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/servicemonitor.yaml @@ -0,0 +1,51 @@ +{{- if .Values.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "minio.fullname" . }} + {{- if .Values.metrics.serviceMonitor.namespace }} + namespace: {{ .Values.metrics.serviceMonitor.namespace }} + {{ else }} + namespace: {{ .Release.Namespace | quote }} + {{- end }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- if .Values.metrics.serviceMonitor.additionalLabels }} +{{ toYaml .Values.metrics.serviceMonitor.additionalLabels | indent 4 }} + {{- end }} +spec: + endpoints: + {{- if .Values.tls.enabled }} + - port: https + scheme: https + {{ else }} + - port: http + scheme: http + {{- end }} + path: /minio/v2/metrics/cluster + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.relabelConfigs }} +{{ toYaml .Values.metrics.serviceMonitor.relabelConfigs | indent 6 }} + {{- end }} + {{- if not .Values.metrics.serviceMonitor.public }} + bearerTokenSecret: + name: {{ template "minio.fullname" . }}-prometheus + key: token + {{- end }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace | quote }} + selector: + matchLabels: + app: {{ include "minio.name" . }} + release: {{ .Release.Name }} + monitoring: "true" +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/statefulset.yaml b/ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/statefulset.yaml new file mode 100644 index 0000000..b4160f0 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/templates/statefulset.yaml @@ -0,0 +1,217 @@ +{{- if eq .Values.mode "distributed" }} +{{ $poolCount := .Values.pools | int }} +{{ $nodeCount := .Values.replicas | int }} +{{ $drivesPerNode := .Values.drivesPerNode | int }} +{{ $scheme := "http" }} +{{- if .Values.tls.enabled }} +{{ $scheme = "https" }} +{{ end }} +{{ $mountPath := .Values.mountPath }} +{{ $bucketRoot := or ($.Values.bucketRoot) ($.Values.mountPath) }} +{{ $subPath := .Values.persistence.subPath }} +{{ $penabled := .Values.persistence.enabled }} +{{ $accessMode := .Values.persistence.accessMode }} +{{ $storageClass := .Values.persistence.storageClass }} +{{ $psize := .Values.persistence.size }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "minio.fullname" . }}-svc + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +spec: + publishNotReadyAddresses: true + clusterIP: None + ports: + - name: {{ $scheme }} + port: {{ .Values.service.port }} + protocol: TCP + selector: + app: {{ template "minio.name" . }} + release: {{ .Release.Name }} +--- +apiVersion: {{ template "minio.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ template "minio.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- if .Values.additionalLabels }} +{{ toYaml .Values.additionalLabels | trimSuffix "\n" | indent 4 }} +{{- end }} +{{- if .Values.additionalAnnotations }} + annotations: +{{ toYaml .Values.additionalAnnotations | trimSuffix "\n" | indent 4 }} +{{- end }} +spec: + updateStrategy: + type: {{ .Values.StatefulSetUpdate.updateStrategy }} + podManagementPolicy: "Parallel" + serviceName: {{ template "minio.fullname" . }}-svc + replicas: {{ mul $poolCount $nodeCount }} + selector: + matchLabels: + app: {{ template "minio.name" . }} + release: {{ .Release.Name }} + template: + metadata: + name: {{ template "minio.fullname" . }} + labels: + app: {{ template "minio.name" . }} + release: {{ .Release.Name }} +{{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 8 }} +{{- end }} + annotations: +{{- if not .Values.ignoreChartChecksums }} + checksum/secrets: {{ include (print $.Template.BasePath "/secrets.yaml") . | sha256sum }} + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} +{{- end }} +{{- if .Values.podAnnotations }} +{{ toYaml .Values.podAnnotations | trimSuffix "\n" | indent 8 }} +{{- end }} + spec: + {{- if .Values.priorityClassName }} + priorityClassName: "{{ .Values.priorityClassName }}" + {{- end }} +{{- if and .Values.securityContext.enabled .Values.persistence.enabled }} + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + runAsGroup: {{ .Values.securityContext.runAsGroup }} + fsGroup: {{ .Values.securityContext.fsGroup }} + {{- if and (ge .Capabilities.KubeVersion.Major "1") (ge .Capabilities.KubeVersion.Minor "20") }} + fsGroupChangePolicy: {{ .Values.securityContext.fsGroupChangePolicy }} + {{- end }} +{{- end }} +{{ if .Values.serviceAccount.create }} + serviceAccountName: {{ .Values.serviceAccount.name }} +{{- end }} + containers: + - name: {{ .Chart.Name }} + image: {{ .Values.image.repository }}:{{ .Values.image.tag }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + + command: [ "/bin/sh", + "-ce", + "/usr/bin/docker-entrypoint.sh minio server {{- range $i := until $poolCount }}{{ $factor := mul $i $nodeCount }}{{ $endIndex := add $factor $nodeCount }}{{ $beginIndex := mul $i $nodeCount }} {{ $scheme }}://{{ template `minio.fullname` $ }}-{{ `{` }}{{ $beginIndex }}...{{ sub $endIndex 1 }}{{ `}`}}.{{ template `minio.fullname` $ }}-svc.{{ $.Release.Namespace }}.svc.{{ $.Values.clusterDomain }}{{if (gt $drivesPerNode 1)}}{{ $bucketRoot }}-{{ `{` }}0...{{ sub $drivesPerNode 1 }}{{ `}` }}{{else}}{{ $bucketRoot }}{{end}}{{- end}} -S {{ .Values.certsPath }} --address :{{ .Values.minioAPIPort }} --console-address :{{ .Values.minioConsolePort }} {{- template `minio.extraArgs` . }}" ] + volumeMounts: + {{- if $penabled }} + {{- if (gt $drivesPerNode 1) }} + {{- range $i := until $drivesPerNode }} + - name: export-{{ $i }} + mountPath: {{ $mountPath }}-{{ $i }} + {{- if and $penabled $subPath }} + subPath: {{ $subPath }} + {{- end }} + {{- end }} + {{- else }} + - name: export + mountPath: {{ $mountPath }} + {{- if and $penabled $subPath }} + subPath: {{ $subPath }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.extraSecret }} + - name: extra-secret + mountPath: "/tmp/minio-config-env" + {{- end }} + {{- include "minio.tlsKeysVolumeMount" . | indent 12 }} + ports: + - name: {{ $scheme }} + containerPort: {{ .Values.minioAPIPort }} + - name: {{ $scheme }}-console + containerPort: {{ .Values.minioConsolePort }} + env: + - name: MINIO_ROOT_USER + valueFrom: + secretKeyRef: + name: {{ template "minio.secretName" . }} + key: rootUser + - name: MINIO_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "minio.secretName" . }} + key: rootPassword + {{- if .Values.extraSecret }} + - name: MINIO_CONFIG_ENV_FILE + value: "/tmp/minio-config-env/config.env" + {{- end}} + {{- if .Values.metrics.serviceMonitor.public }} + - name: MINIO_PROMETHEUS_AUTH_TYPE + value: "public" + {{- end}} + {{- range $key, $val := .Values.environment }} + - name: {{ $key }} + value: {{ $val | quote }} + {{- end}} + resources: +{{ toYaml .Values.resources | indent 12 }} + {{- with .Values.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 8 }} + {{- end }} +{{- include "minio.imagePullSecrets" . | indent 6 }} + {{- with .Values.affinity }} + affinity: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} + {{- end }} + volumes: + - name: minio-user + secret: + secretName: {{ template "minio.secretName" . }} + {{- if .Values.extraSecret }} + - name: extra-secret + secret: + secretName: {{ .Values.extraSecret }} + {{- end }} + {{- include "minio.tlsKeysVolume" . | indent 8 }} +{{- if .Values.persistence.enabled }} + volumeClaimTemplates: + {{- if gt $drivesPerNode 1 }} + {{- range $diskId := until $drivesPerNode}} + - metadata: + name: export-{{ $diskId }} + {{- if $.Values.persistence.annotations }} + annotations: +{{ toYaml $.Values.persistence.annotations | trimSuffix "\n" | indent 10 }} + {{- end }} + spec: + accessModes: [ {{ $accessMode | quote }} ] + {{- if $storageClass }} + storageClassName: {{ $storageClass }} + {{- end }} + resources: + requests: + storage: {{ $psize }} + {{- end }} + {{- else }} + - metadata: + name: export + {{- if $.Values.persistence.annotations }} + annotations: +{{ toYaml $.Values.persistence.annotations | trimSuffix "\n" | indent 10 }} + {{- end }} + spec: + accessModes: [ {{ $accessMode | quote }} ] + {{- if $storageClass }} + storageClassName: {{ $storageClass }} + {{- end }} + resources: + requests: + storage: {{ $psize }} + {{- end }} +{{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/values.yaml b/ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/values.yaml new file mode 100644 index 0000000..a957f7f --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/01-storage/minio/values.yaml @@ -0,0 +1,461 @@ +## Provide a name in place of minio for `app:` labels +## +nameOverride: "" + +## Provide a name to substitute for the full names of resources +## +fullnameOverride: "" + +## set kubernetes cluster domain where minio is running +## +clusterDomain: cluster.local + +## Set default image, imageTag, and imagePullPolicy. mode is used to indicate the +## +image: + repository: 10.10.31.243:5000/cmoa3/minio + tag: RELEASE.2022-05-08T23-50-31Z + pullPolicy: IfNotPresent + +imagePullSecrets: + - name: "regcred" +# - name: "image-pull-secret" + +## Set default image, imageTag, and imagePullPolicy for the `mc` (the minio +## client used to create a default bucket). +## +mcImage: + repository: 10.10.31.243:5000/cmoa3/mc + tag: RELEASE.2022-05-09T04-08-26Z + pullPolicy: IfNotPresent + +## minio mode, i.e. standalone or distributed or gateway. +mode: distributed ## other supported values are "standalone", "gateway" + +## Additional labels to include with deployment or statefulset +additionalLabels: [] + +## Additional annotations to include with deployment or statefulset +additionalAnnotations: [] + +## Typically the deployment/statefulset includes checksums of secrets/config, +## So that when these change on a subsequent helm install, the deployment/statefulset +## is restarted. This can result in unnecessary restarts under GitOps tooling such as +## flux, so set to "true" to disable this behaviour. +ignoreChartChecksums: false + +## Additional arguments to pass to minio binary +extraArgs: [] + +## Port number for MinIO S3 API Access +minioAPIPort: "9000" + +## Port number for MinIO Browser COnsole Access +minioConsolePort: "9001" + +## Update strategy for Deployments +DeploymentUpdate: + type: RollingUpdate + maxUnavailable: 0 + maxSurge: 100% + +## Update strategy for StatefulSets +StatefulSetUpdate: + updateStrategy: RollingUpdate + +## Pod priority settings +## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ +## +priorityClassName: "" + +## Set default rootUser, rootPassword +## AccessKey and secretKey is generated when not set +## Distributed MinIO ref: https://docs.minio.io/docs/distributed-minio-quickstart-guide +## +rootUser: "admin" +rootPassword: "passW0rd" + +## Use existing Secret that store following variables: +## +## | Chart var | .data. in Secret | +## |:----------------------|:-------------------------| +## | rootUser | rootUser | +## | rootPassword | rootPassword | +## +## All mentioned variables will be ignored in values file. +## .data.rootUser and .data.rootPassword are mandatory, +## others depend on enabled status of corresponding sections. +existingSecret: "" + +## Directory on the MinIO pof +certsPath: "/etc/minio/certs/" +configPathmc: "/etc/minio/mc/" + +## Path where PV would be mounted on the MinIO Pod +mountPath: "/export" +## Override the root directory which the minio server should serve from. +## If left empty, it defaults to the value of {{ .Values.mountPath }} +## If defined, it must be a sub-directory of the path specified in {{ .Values.mountPath }} +## +bucketRoot: "" + +# Number of drives attached to a node +drivesPerNode: 2 +# Number of MinIO containers running +#replicas: 16 +replicas: 2 +# Number of expanded MinIO clusters +pools: 1 + +# Deploy if 'mode == gateway' - 4 replicas. +gateway: + type: "nas" # currently only "nas" are supported. + replicas: 4 + +## TLS Settings for MinIO +tls: + enabled: false + ## Create a secret with private.key and public.crt files and pass that here. Ref: https://github.com/minio/minio/tree/master/docs/tls/kubernetes#2-create-kubernetes-secret + certSecret: "" + publicCrt: public.crt + privateKey: private.key + +## Trusted Certificates Settings for MinIO. Ref: https://docs.minio.io/docs/how-to-secure-access-to-minio-server-with-tls#install-certificates-from-third-party-cas +## Bundle multiple trusted certificates into one secret and pass that here. Ref: https://github.com/minio/minio/tree/master/docs/tls/kubernetes#2-create-kubernetes-secret +## When using self-signed certificates, remember to include MinIO's own certificate in the bundle with key public.crt. +## If certSecret is left empty and tls is enabled, this chart installs the public certificate from .Values.tls.certSecret. +trustedCertsSecret: "" + +## Enable persistence using Persistent Volume Claims +## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ +## +persistence: + enabled: true + annotations: {} + + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + existingClaim: "" + + ## minio data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + ## Storage class of PV to bind. By default it looks for standard storage class. + ## If the PV uses a different storage class, specify that here. + storageClass: "exem-local-storage" + VolumeName: "" + accessMode: ReadWriteOnce + size: 50Gi + + ## If subPath is set mount a sub folder of a volume instead of the root of the volume. + ## This is especially handy for volume plugins that don't natively support sub mounting (like glusterfs). + ## + subPath: "" + +## Expose the MinIO service to be accessed from outside the cluster (LoadBalancer service). +## or access it from within the cluster (ClusterIP service). Set the service type and the port to serve it. +## ref: http://kubernetes.io/docs/user-guide/services/ +## +#service: +# type: NodePort +# clusterIP: ~ + ## Make sure to match it to minioAPIPort +# port: "9000" +# nodePort: "32002" + +service: + type: ClusterIP + clusterIP: ~ + ## Make sure to match it to minioAPIPort + port: "9000" + +## Configure Ingress based on the documentation here: https://kubernetes.io/docs/concepts/services-networking/ingress/ +## + +ingress: + enabled: false + # ingressClassName: "" + labels: {} + # node-role.kubernetes.io/ingress: platform + + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + # kubernetes.io/ingress.allow-http: "false" + # kubernetes.io/ingress.global-static-ip-name: "" + # nginx.ingress.kubernetes.io/secure-backends: "true" + # nginx.ingress.kubernetes.io/backend-protocol: "HTTPS" + # nginx.ingress.kubernetes.io/whitelist-source-range: 0.0.0.0/0 + path: / + hosts: + - minio-example.local + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +consoleService: + type: NodePort + clusterIP: ~ + ## Make sure to match it to minioConsolePort + port: "9001" + nodePort: "32001" + +consoleIngress: + enabled: false + # ingressClassName: "" + labels: {} + # node-role.kubernetes.io/ingress: platform + + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + # kubernetes.io/ingress.allow-http: "false" + # kubernetes.io/ingress.global-static-ip-name: "" + # nginx.ingress.kubernetes.io/secure-backends: "true" + # nginx.ingress.kubernetes.io/backend-protocol: "HTTPS" + # nginx.ingress.kubernetes.io/whitelist-source-range: 0.0.0.0/0 + path: / + hosts: + - console.minio-example.local + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +## Node labels for pod assignment +## Ref: https://kubernetes.io/docs/user-guide/node-selection/ +## +nodeSelector: {} +tolerations: [] +affinity: {} + +## Add stateful containers to have security context, if enabled MinIO will run as this +## user and group NOTE: securityContext is only enabled if persistence.enabled=true +securityContext: + enabled: true + runAsUser: 1000 + runAsGroup: 1000 + fsGroup: 1000 + fsGroupChangePolicy: "OnRootMismatch" + +# Additational pod annotations +podAnnotations: {} + +# Additional pod labels +podLabels: {} + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: + requests: + #memory: 16Gi + memory: 1Gi + cpu: 200m + +## List of policies to be created after minio install +## +## In addition to default policies [readonly|readwrite|writeonly|consoleAdmin|diagnostics] +## you can define additional policies with custom supported actions and resources +policies: [] +## writeexamplepolicy policy grants creation or deletion of buckets with name +## starting with example. In addition, grants objects write permissions on buckets starting with +## example. +# - name: writeexamplepolicy +# statements: +# - resources: +# - 'arn:aws:s3:::example*/*' +# actions: +# - "s3:AbortMultipartUpload" +# - "s3:GetObject" +# - "s3:DeleteObject" +# - "s3:PutObject" +# - "s3:ListMultipartUploadParts" +# - resources: +# - 'arn:aws:s3:::example*' +# actions: +# - "s3:CreateBucket" +# - "s3:DeleteBucket" +# - "s3:GetBucketLocation" +# - "s3:ListBucket" +# - "s3:ListBucketMultipartUploads" +## readonlyexamplepolicy policy grants access to buckets with name starting with example. +## In addition, grants objects read permissions on buckets starting with example. +# - name: readonlyexamplepolicy +# statements: +# - resources: +# - 'arn:aws:s3:::example*/*' +# actions: +# - "s3:GetObject" +# - resources: +# - 'arn:aws:s3:::example*' +# actions: +# - "s3:GetBucketLocation" +# - "s3:ListBucket" +# - "s3:ListBucketMultipartUploads" +## Additional Annotations for the Kubernetes Job makePolicyJob +makePolicyJob: + podAnnotations: + annotations: + securityContext: + enabled: false + runAsUser: 1000 + runAsGroup: 1000 + fsGroup: 1000 + resources: + requests: + memory: 128Mi + nodeSelector: {} + tolerations: [] + affinity: {} + +## List of users to be created after minio install +## +users: + ## Username, password and policy to be assigned to the user + ## Default policies are [readonly|readwrite|writeonly|consoleAdmin|diagnostics] + ## Add new policies as explained here https://docs.min.io/docs/minio-multi-user-quickstart-guide.html + ## NOTE: this will fail if LDAP is enabled in your MinIO deployment + ## make sure to disable this if you are using LDAP. + - accessKey: cloudmoa + secretKey: admin1234 + policy: consoleAdmin + # Or you can refer to specific secret + #- accessKey: externalSecret + # existingSecret: my-secret + # existingSecretKey: password + # policy: readonly + + +## Additional Annotations for the Kubernetes Job makeUserJob +makeUserJob: + podAnnotations: + annotations: + securityContext: + enabled: false + runAsUser: 1000 + runAsGroup: 1000 + fsGroup: 1000 + resources: + requests: + memory: 128Mi + nodeSelector: {} + tolerations: [] + affinity: {} + +## List of buckets to be created after minio install +## +buckets: + - name: cortex-bucket + policy: none + purge: false + versioning: false + + # # Name of the bucket + # - name: bucket1 + # # Policy to be set on the + # # bucket [none|download|upload|public] + # policy: none + # # Purge if bucket exists already + # purge: false + # # set versioning for + # # bucket [true|false] + # versioning: false + # - name: bucket2 + # policy: none + # purge: false + # versioning: true + +## Additional Annotations for the Kubernetes Job makeBucketJob +makeBucketJob: + podAnnotations: + annotations: + securityContext: + enabled: false + runAsUser: 1000 + runAsGroup: 1000 + fsGroup: 1000 + resources: + requests: + memory: 128Mi + nodeSelector: {} + tolerations: [] + affinity: {} + +## List of command to run after minio install +## NOTE: the mc command TARGET is always "myminio" +customCommands: + # - command: "admin policy set myminio consoleAdmin group='cn=ops,cn=groups,dc=example,dc=com'" + +## Additional Annotations for the Kubernetes Job customCommandJob +customCommandJob: + podAnnotations: + annotations: + securityContext: + enabled: false + runAsUser: 1000 + runAsGroup: 1000 + fsGroup: 1000 + resources: + requests: + memory: 128Mi + nodeSelector: {} + tolerations: [] + affinity: {} + +## Use this field to add environment variables relevant to MinIO server. These fields will be passed on to MinIO container(s) +## when Chart is deployed +environment: + ## Please refer for comprehensive list https://docs.min.io/minio/baremetal/reference/minio-server/minio-server.html + ## MINIO_SUBNET_LICENSE: "License key obtained from https://subnet.min.io" + ## MINIO_BROWSER: "off" + +## The name of a secret in the same kubernetes namespace which contain secret values +## This can be useful for LDAP password, etc +## The key in the secret must be 'config.env' +## +# extraSecret: minio-extraenv + +networkPolicy: + enabled: false + allowExternal: true + +## PodDisruptionBudget settings +## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ +## +podDisruptionBudget: + enabled: false + maxUnavailable: 1 + +## Specify the service account to use for the MinIO pods. If 'create' is set to 'false' +## and 'name' is left unspecified, the account 'default' will be used. +serviceAccount: + create: true + ## The name of the service account to use. If 'create' is 'true', a service account with that name + ## will be created. + name: "minio-sa" + +metrics: + serviceMonitor: + enabled: false + public: true + additionalLabels: {} + relabelConfigs: {} + # namespace: monitoring + # interval: 30s + # scrapeTimeout: 10s + +## ETCD settings: https://github.com/minio/minio/blob/master/docs/sts/etcd.md +## Define endpoints to enable this section. +etcd: + endpoints: [] + pathPrefix: "" + corednsPathPrefix: "" + clientCert: "" + clientCertKey: "" diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/00-kafka-broker-config.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/00-kafka-broker-config.yaml new file mode 100644 index 0000000..ddf76e1 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/00-kafka-broker-config.yaml @@ -0,0 +1,161 @@ +kind: ConfigMap +metadata: + name: broker-config + namespace: imxc +apiVersion: v1 +data: + init.sh: |- + #!/bin/bash + set -e + set -x + cp /etc/kafka-configmap/log4j.properties /etc/kafka/ + KAFKA_BROKER_ID=${HOSTNAME##*-} + SEDS=("s/#init#broker.id=#init#/broker.id=$KAFKA_BROKER_ID/") + LABELS="kafka-broker-id=$KAFKA_BROKER_ID" + ANNOTATIONS="" + hash kubectl 2>/dev/null || { + SEDS+=("s/#init#broker.rack=#init#/#init#broker.rack=# kubectl not found in path/") + } && { + ZONE=$(kubectl get node "$NODE_NAME" -o=go-template='{{index .metadata.labels "failure-domain.beta.kubernetes.io/zone"}}') + if [ $? -ne 0 ]; then + SEDS+=("s/#init#broker.rack=#init#/#init#broker.rack=# zone lookup failed, see -c init-config logs/") + elif [ "x$ZONE" == "x" ]; then + SEDS+=("s/#init#broker.rack=#init#/#init#broker.rack=# zone label not found for node $NODE_NAME/") + else + SEDS+=("s/#init#broker.rack=#init#/broker.rack=$ZONE/") + LABELS="$LABELS kafka-broker-rack=$ZONE" + fi + # Node Port 설정 주석처리 + # OUTSIDE_HOST=$(kubectl get node "$NODE_NAME" -o jsonpath='{.status.addresses[?(@.type=="InternalIP")].address}') + OUTSIDE_HOST=kafka-outside-${KAFKA_BROKER_ID} + GLOBAL_HOST=kafka-global-${KAFKA_BROKER_ID} + if [ $? -ne 0 ]; then + echo "Outside (i.e. cluster-external access) host lookup command failed" + else + OUTSIDE_PORT=3240${KAFKA_BROKER_ID} + GLOBAL_PORT=3250${KAFKA_BROKER_ID} + # datagate 도입했으므로 Kube DNS 기반 통신 + SEDS+=("s|#init#advertised.listeners=OUTSIDE://#init#|advertised.listeners=OUTSIDE://${OUTSIDE_HOST}:${OUTSIDE_PORT},GLOBAL://${GLOBAL_HOST}:${GLOBAL_PORT}|") + ANNOTATIONS="$ANNOTATIONS kafka-listener-outside-host=$OUTSIDE_HOST kafka-listener-outside-port=$OUTSIDE_PORT" + fi + if [ ! -z "$LABELS" ]; then + kubectl -n $POD_NAMESPACE label pod $POD_NAME $LABELS || echo "Failed to label $POD_NAMESPACE.$POD_NAME - RBAC issue?" + fi + if [ ! -z "$ANNOTATIONS" ]; then + kubectl -n $POD_NAMESPACE annotate pod $POD_NAME $ANNOTATIONS || echo "Failed to annotate $POD_NAMESPACE.$POD_NAME - RBAC issue?" + fi + } + printf '%s\n' "${SEDS[@]}" | sed -f - /etc/kafka-configmap/server.properties > /etc/kafka/server.properties.tmp + [ $? -eq 0 ] && mv /etc/kafka/server.properties.tmp /etc/kafka/server.properties + server.properties: |- + log.dirs=/var/lib/kafka/data/topics + ############################# Zookeeper ############################# + zookeeper.connect=zookeeper:2181 + #zookeeper.connection.timeout.ms=6000 + ############################# Group Coordinator Settings ############################# + #group.initial.rebalance.delay.ms=0 + ############################# Thread ############################# + #background.threads=10 + #num.recovery.threads.per.data.dir=1 + ############################# Topic ############################# + auto.create.topics.enable=true + delete.topic.enable=true + default.replication.factor=2 + ############################# Msg Replication ############################# + min.insync.replicas=1 + num.io.threads=10 + num.network.threads=4 + num.replica.fetchers=4 + replica.fetch.min.bytes=1 + socket.receive.buffer.bytes=1048576 + socket.send.buffer.bytes=1048576 + replica.socket.receive.buffer.bytes=1048576 + socket.request.max.bytes=204857600 + ############################# Partition ############################# + #auto.leader.rebalance.enable=true + num.partitions=12 + ############################# Log size ############################# + message.max.bytes=204857600 + max.message.bytes=204857600 + ############################# Log Flush Policy ############################# + #log.flush.interval.messages=10000 + #log.flush.interval.ms=1000 + ############################# Log Retention Policy ############################# + log.retention.minutes=1 + offsets.retention.minutes=1440 + #log.retention.bytes=1073741824 + #log.segment.bytes=1073741824 + log.retention.check.interval.ms=10000 + ############################# Internal Topic Settings ############################# + offsets.topic.replication.factor=1 + #transaction.state.log.replication.factor=1 + #transaction.state.log.min.isr=1 + ############################# ETC ############################# + listeners=OUTSIDE://:9094,PLAINTEXT://:9092,GLOBAL://:9095 + listener.security.protocol.map=PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL,OUTSIDE:PLAINTEXT,GLOBAL:PLAINTEXT + #listeners=PLAINTEXT://:9092 + inter.broker.listener.name=PLAINTEXT + #init#broker.id=#init# + #init#broker.rack=#init# + log4j.properties: |- + # Unspecified loggers and loggers with additivity=true output to server.log and stdout + # Note that INFO only applies to unspecified loggers, the log level of the child logger is used otherwise + log4j.rootLogger=INFO, stdout + log4j.appender.stdout=org.apache.log4j.ConsoleAppender + log4j.appender.stdout.layout=org.apache.log4j.PatternLayout + log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n + log4j.appender.kafkaAppender=org.apache.log4j.DailyRollingFileAppender + log4j.appender.kafkaAppender.DatePattern='.'yyyy-MM-dd-HH + log4j.appender.kafkaAppender.File=${kafka.logs.dir}/server.log + log4j.appender.kafkaAppender.layout=org.apache.log4j.PatternLayout + log4j.appender.kafkaAppender.layout.ConversionPattern=[%d] %p %m (%c)%n + log4j.appender.stateChangeAppender=org.apache.log4j.DailyRollingFileAppender + log4j.appender.stateChangeAppender.DatePattern='.'yyyy-MM-dd-HH + log4j.appender.stateChangeAppender.File=${kafka.logs.dir}/state-change.log + log4j.appender.stateChangeAppender.layout=org.apache.log4j.PatternLayout + log4j.appender.stateChangeAppender.layout.ConversionPattern=[%d] %p %m (%c)%n + log4j.appender.requestAppender=org.apache.log4j.DailyRollingFileAppender + log4j.appender.requestAppender.DatePattern='.'yyyy-MM-dd-HH + log4j.appender.requestAppender.File=${kafka.logs.dir}/kafka-request.log + log4j.appender.requestAppender.layout=org.apache.log4j.PatternLayout + log4j.appender.requestAppender.layout.ConversionPattern=[%d] %p %m (%c)%n + log4j.appender.cleanerAppender=org.apache.log4j.DailyRollingFileAppender + log4j.appender.cleanerAppender.DatePattern='.'yyyy-MM-dd-HH + log4j.appender.cleanerAppender.File=${kafka.logs.dir}/log-cleaner.log + log4j.appender.cleanerAppender.layout=org.apache.log4j.PatternLayout + log4j.appender.cleanerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n + log4j.appender.controllerAppender=org.apache.log4j.DailyRollingFileAppender + log4j.appender.controllerAppender.DatePattern='.'yyyy-MM-dd-HH + log4j.appender.controllerAppender.File=${kafka.logs.dir}/controller.log + log4j.appender.controllerAppender.layout=org.apache.log4j.PatternLayout + log4j.appender.controllerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n + log4j.appender.authorizerAppender=org.apache.log4j.DailyRollingFileAppender + log4j.appender.authorizerAppender.DatePattern='.'yyyy-MM-dd-HH + log4j.appender.authorizerAppender.File=${kafka.logs.dir}/kafka-authorizer.log + log4j.appender.authorizerAppender.layout=org.apache.log4j.PatternLayout + log4j.appender.authorizerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n + # Change the two lines below to adjust ZK client logging + log4j.logger.org.I0Itec.zkclient.ZkClient=INFO + log4j.logger.org.apache.zookeeper=INFO + # Change the two lines below to adjust the general broker logging level (output to server.log and stdout) + log4j.logger.kafka=INFO + log4j.logger.org.apache.kafka=INFO + # Change to DEBUG or TRACE to enable request logging + log4j.logger.kafka.request.logger=WARN, requestAppender + log4j.additivity.kafka.request.logger=false + # Uncomment the lines below and change log4j.logger.kafka.network.RequestChannel$ to TRACE for additional output + # related to the handling of requests + #log4j.logger.kafka.network.Processor=TRACE, requestAppender + #log4j.logger.kafka.server.KafkaApis=TRACE, requestAppender + #log4j.additivity.kafka.server.KafkaApis=false + log4j.logger.kafka.network.RequestChannel$=WARN, requestAppender + log4j.additivity.kafka.network.RequestChannel$=false + log4j.logger.kafka.controller=TRACE, controllerAppender + log4j.additivity.kafka.controller=false + log4j.logger.kafka.log.LogCleaner=INFO, cleanerAppender + log4j.additivity.kafka.log.LogCleaner=false + log4j.logger.state.change.logger=TRACE, stateChangeAppender + log4j.additivity.state.change.logger=false + # Change to DEBUG to enable audit log for the authorizer + log4j.logger.kafka.authorizer.logger=WARN, authorizerAppender + log4j.additivity.kafka.authorizer.logger=false diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/01-coredns.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/01-coredns.yaml new file mode 100644 index 0000000..c1cb74b --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/01-coredns.yaml @@ -0,0 +1,35 @@ +apiVersion: v1 +kind: Service +metadata: + annotations: + prometheus.io/port: "9153" + prometheus.io/scrape: "true" + labels: + addonmanager.kubernetes.io/mode: Reconcile + k8s-app: kube-dns + kubernetes.io/name: coredns + name: coredns + namespace: kube-system +spec: + internalTrafficPolicy: Cluster + ipFamilies: + - IPv4 + ipFamilyPolicy: SingleStack + ports: + - name: dns + port: 53 + protocol: UDP + targetPort: 53 + - name: dns-tcp + port: 53 + protocol: TCP + targetPort: 53 + - name: metrics + port: 9153 + protocol: TCP + targetPort: 9153 + selector: + k8s-app: kube-dns + sessionAffinity: None + type: ClusterIP + diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/.helmignore b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/.helmignore new file mode 100644 index 0000000..50af031 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/Chart.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/Chart.yaml new file mode 100644 index 0000000..74d1d30 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for Kubernetes +name: base +version: 0.1.0 diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/analysis/.helmignore b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/analysis/.helmignore new file mode 100644 index 0000000..50af031 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/analysis/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/analysis/Chart.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/analysis/Chart.yaml new file mode 100644 index 0000000..74b9505 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/analysis/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for Kubernetes +name: analysis +version: 0.1.0 diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/analysis/templates/imxc-metric-analyzer-master.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/analysis/templates/imxc-metric-analyzer-master.yaml new file mode 100644 index 0000000..21a9298 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/analysis/templates/imxc-metric-analyzer-master.yaml @@ -0,0 +1,87 @@ +#docker run -d --hostname my-rabbit --name some-rabbit -p 8080:15672 -p 5672:5672 rabbitmq:3-management + +--- +kind: Service +apiVersion: v1 +metadata: + name: metric-analyzer-master + namespace: imxc +spec: +# clusterIP: None # We need a headless service to allow the pods to discover each + ports: # other during autodiscover phase for cluster creation. + - name: http # A ClusterIP will prevent resolving dns requests for other pods + protocol: TCP # under the same service. + port: 15672 + targetPort: 15672 +# nodePort: 30001 + - name: amqp + protocol: TCP + port: 5672 + targetPort: 5672 +# nodePort: 30002 + selector: + app: metric-analyzer-master +# type: NodePort +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: metric-analyzer-master + name: metric-analyzer-master + namespace: imxc +spec: + replicas: 1 + selector: + matchLabels: + app: metric-analyzer-master + template: + metadata: + labels: + app: metric-analyzer-master + spec: + containers: + - image: {{ .Values.global.IMXC_IN_REGISTRY }}/metric_analyzer:{{ .Values.global.METRIC_ANALYZER_MASTER_VERSION }} + imagePullPolicy: IfNotPresent + name: master +# volumeMounts: +# - mountPath: /etc/localtime +# name: timezone-config + env: + - name: BROKER + value: base-rabbitmq + - name: IMXC_RABBITMQ_CLIENT_ID + value: "user" + - name: IMXC_RABBITMQ_CLIENT_PASSWORD + value: "eorbahrhkswp" + - name: POSTGRES_SERVER + value: postgres + - name: POSTGRES_USER + value: admin + - name: POSTGRES_PW + value: eorbahrhkswp + - name: POSTGRES_DB + value: postgresdb + - name: PROMETHEUS_URL + value: http://base-cortex-nginx/prometheus + - name: POSTGRES_PORT + value: "5432" + - name: ES_SERVER + value: elasticsearch + - name: ES_PORT + value: "9200" + - name: ES_ID + value: "elastic" + - name: ES_PWD + value: "elastic" + - name: LOG_LEVEL + value: INFO + - name: AI_TYPE + value: BASELINE + - name: BASELINE_SIZE + value: "3" + - name: CHECK_DAY + value: "2" + resources: + requests: + memory: "100Mi" diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/analysis/templates/imxc-metric-analyzer-worker.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/analysis/templates/imxc-metric-analyzer-worker.yaml new file mode 100644 index 0000000..7e6eaea --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/analysis/templates/imxc-metric-analyzer-worker.yaml @@ -0,0 +1,38 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: metric-analyzer-worker + name: metric-analyzer-worker + namespace: imxc +spec: + replicas: 10 + selector: + matchLabels: + app: metric-analyzer-worker + template: + metadata: + labels: + app: metric-analyzer-worker + spec: + containers: + - image: {{ .Values.global.IMXC_IN_REGISTRY }}/metric_analyzer_worker:{{ .Values.global.METRIC_ANALYZER_WORKER_VERSION }} + imagePullPolicy: IfNotPresent + name: worker +# volumeMounts: +# - mountPath: /etc/localtime +# name: timezone-config + env: + - name: BROKER + value: base-rabbitmq + - name: IMXC_RABBITMQ_CLIENT_ID + value: "user" + - name: IMXC_RABBITMQ_CLIENT_PASSWORD + value: "eorbahrhkswp" +# volumes: +# - hostPath: +# path: /usr/share/zoneinfo/Asia/Seoul +# name: timezone-config + resources: + requests: + memory: "100Mi" diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/analysis/values.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/analysis/values.yaml new file mode 100644 index 0000000..d764210 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/analysis/values.yaml @@ -0,0 +1,68 @@ +# Default values for analysis. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: 10.10.31.243:5000/cmoa3/nginx + tag: stable + pullPolicy: IfNotPresent + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 80 + +ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: [] + + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +nodeSelector: {} + +tolerations: [] + +affinity: {} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/.helmignore b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/.helmignore new file mode 100644 index 0000000..db3418b --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/.helmignore @@ -0,0 +1,29 @@ +# Git +.git/ +.gitignore +.github/ + +# IDE +.project +.idea/ +*.tmproj + +# Common backup files +*.swp +*.bak +*.tmp +*~ + +# Cortex ignore +docs/ +tools/ +ct.yaml +ci/ +README.md.gotmpl +.prettierignore +CHANGELOG.md +MAINTAINERS.md +LICENSE +Makefile +renovate.json + diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/Chart.lock b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/Chart.lock new file mode 100644 index 0000000..f909218 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/Chart.lock @@ -0,0 +1,24 @@ +dependencies: +- name: memcached + repository: https://charts.bitnami.com/bitnami + version: 5.15.12 +- name: memcached + repository: https://charts.bitnami.com/bitnami + version: 5.15.12 +- name: memcached + repository: https://charts.bitnami.com/bitnami + version: 5.15.12 +- name: memcached + repository: https://charts.bitnami.com/bitnami + version: 5.15.12 +- name: memcached + repository: https://charts.bitnami.com/bitnami + version: 5.15.12 +- name: memcached + repository: https://charts.bitnami.com/bitnami + version: 5.15.12 +- name: memcached + repository: https://charts.bitnami.com/bitnami + version: 5.15.12 +digest: sha256:a6b7c1239f9cabc85dd647798a6f92ae8a9486756ab1e87fc11af2180ab03ee4 +generated: "2021-12-25T19:21:57.666697218Z" diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/Chart.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/Chart.yaml new file mode 100644 index 0000000..9122fe6 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/Chart.yaml @@ -0,0 +1,56 @@ +apiVersion: v2 +appVersion: v1.11.0 +dependencies: +- alias: memcached + condition: memcached.enabled + name: memcached + repository: https://charts.bitnami.com/bitnami + version: 5.15.12 +- alias: memcached-index-read + condition: memcached-index-read.enabled + name: memcached + repository: https://charts.bitnami.com/bitnami + version: 5.15.12 +- alias: memcached-index-write + condition: memcached-index-write.enabled + name: memcached + repository: https://charts.bitnami.com/bitnami + version: 5.15.12 +- alias: memcached-frontend + condition: memcached-frontend.enabled + name: memcached + repository: https://charts.bitnami.com/bitnami + version: 5.15.12 +- alias: memcached-blocks-index + name: memcached + repository: https://charts.bitnami.com/bitnami + tags: + - blocks-storage-memcached + version: 5.15.12 +- alias: memcached-blocks + name: memcached + repository: https://charts.bitnami.com/bitnami + tags: + - blocks-storage-memcached + version: 5.15.12 +- alias: memcached-blocks-metadata + name: memcached + repository: https://charts.bitnami.com/bitnami + tags: + - blocks-storage-memcached + version: 5.15.12 +description: Horizontally scalable, highly available, multi-tenant, long term Prometheus. +home: https://cortexmetrics.io/ +icon: https://avatars2.githubusercontent.com/u/43045022?s=200&v=4 +kubeVersion: ^1.19.0-0 +maintainers: +- email: thayward@infoblox.com + name: Tom Hayward + url: https://github.com/kd7lxl +- email: Niclas.Schad@plusserver.com + name: Niclas Schad + url: https://github.com/ShuzZzle +name: cortex +sources: +- https://github.com/cortexproject/cortex-helm-chart +version: 1.2.0 diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/README.md b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/README.md new file mode 100644 index 0000000..9a793d3 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/README.md @@ -0,0 +1,754 @@ + + +# cortex + +![Version: 1.2.0](https://img.shields.io/badge/Version-1.2.0-informational?style=flat-square) ![AppVersion: v1.11.0](https://img.shields.io/badge/AppVersion-v1.11.0-informational?style=flat-square) + +Horizontally scalable, highly available, multi-tenant, long term Prometheus. + +**Homepage:** + +## Maintainers + +| Name | Email | Url | +| ---- | ------ | --- | +| Tom Hayward | thayward@infoblox.com | https://github.com/kd7lxl | +| Niclas Schad | Niclas.Schad@plusserver.com | https://github.com/ShuzZzle | + +## Documentation + +Checkout our documentation for the cortex-helm-chart [here](https://cortexproject.github.io/cortex-helm-chart/) + +## Dependencies + +### Key-Value store + +Cortex requires a Key-Value (KV) store to store the ring. It can use traditional KV stores like [Consul](https://www.consul.io/) or [etcd](https://etcd.io/), but it can also build its own KV store on top of memberlist library using a gossip algorithm. + +The recommended approach is to use the built-in memberlist as a KV store, where supported. + +External KV stores can be installed alongside Cortex using their respective helm charts https://github.com/bitnami/charts/tree/master/bitnami/etcd and https://github.com/helm/charts/tree/master/stable/consul. + +### Storage + +Cortex requires a storage backend to store metrics and indexes. +See [cortex documentation](https://cortexmetrics.io/docs/) for details on storage types and documentation + +## Installation + +[Helm](https://helm.sh) must be installed to use the charts. +Please refer to Helm's [documentation](https://helm.sh/docs/) to get started. + +Once Helm is set up properly, add the repo as follows: + +```bash + helm repo add cortex-helm https://cortexproject.github.io/cortex-helm-chart +``` + +Cortex can now be installed with the following command: + +```bash + helm install cortex --namespace cortex cortex-helm/cortex +``` + +If you have custom options or values you want to override: + +```bash + helm install cortex --namespace cortex -f my-cortex-values.yaml cortex-helm/cortex +``` + +Specific versions of the chart can be installed using the `--version` option, with the default being the latest release. +What versions are available for installation can be listed with the following command: + +```bash + helm search repo cortex-helm +``` + +As part of this chart many different pods and services are installed which all +have varying resource requirements. Please make sure that you have sufficient +resources (CPU/memory) available in your cluster before installing Cortex Helm +chart. + +## Upgrades + +To upgrade Cortex use the following command: + +```bash + helm upgrade cortex -f my-cortex-values.yaml cortex-helm/cortex +``` +Note that it might be necessary to use `--reset-values` since some default values in the values.yaml might have changed or were removed. + +Source code can be found [here](https://cortexmetrics.io/) + +## Requirements + +Kubernetes: `^1.19.0-0` + +| Repository | Name | Version | +|------------|------|---------| +| https://charts.bitnami.com/bitnami | memcached(memcached) | 5.15.12 | +| https://charts.bitnami.com/bitnami | memcached-index-read(memcached) | 5.15.12 | +| https://charts.bitnami.com/bitnami | memcached-index-write(memcached) | 5.15.12 | +| https://charts.bitnami.com/bitnami | memcached-frontend(memcached) | 5.15.12 | +| https://charts.bitnami.com/bitnami | memcached-blocks-index(memcached) | 5.15.12 | +| https://charts.bitnami.com/bitnami | memcached-blocks(memcached) | 5.15.12 | +| https://charts.bitnami.com/bitnami | memcached-blocks-metadata(memcached) | 5.15.12 | + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| alertmanager.​affinity | object | `{}` | | +| alertmanager.​annotations | object | `{}` | | +| alertmanager.​containerSecurityContext.​enabled | bool | `true` | | +| alertmanager.​containerSecurityContext.​readOnlyRootFilesystem | bool | `true` | | +| alertmanager.​enabled | bool | `true` | | +| alertmanager.​env | list | `[]` | Extra env variables to pass to the cortex container | +| alertmanager.​extraArgs | object | `{}` | Additional Cortex container arguments, e.g. log level (debug, info, warn, error) | +| alertmanager.​extraContainers | list | `[]` | Additional containers to be added to the cortex pod. | +| alertmanager.​extraPorts | list | `[]` | Additional ports to the cortex services. Useful to expose extra container ports. | +| alertmanager.​extraVolumeMounts | list | `[]` | Extra volume mounts that will be added to the cortex container | +| alertmanager.​extraVolumes | list | `[]` | Additional volumes to the cortex pod. | +| alertmanager.​initContainers | list | `[]` | Init containers to be added to the cortex pod. | +| alertmanager.​livenessProbe.​httpGet.​path | string | `"/ready"` | | +| alertmanager.​livenessProbe.​httpGet.​port | string | `"http-metrics"` | | +| alertmanager.​nodeSelector | object | `{}` | | +| alertmanager.​persistentVolume.​accessModes | list | `["ReadWriteOnce"]` | Alertmanager data Persistent Volume access modes Must match those of existing PV or dynamic provisioner Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ | +| alertmanager.​persistentVolume.​annotations | object | `{}` | Alertmanager data Persistent Volume Claim annotations | +| alertmanager.​persistentVolume.​enabled | bool | `true` | If true and alertmanager.statefulSet.enabled is true, Alertmanager will create/use a Persistent Volume Claim If false, use emptyDir | +| alertmanager.​persistentVolume.​size | string | `"2Gi"` | Alertmanager data Persistent Volume size | +| alertmanager.​persistentVolume.​storageClass | string | `nil` | Alertmanager data Persistent Volume Storage Class If defined, storageClassName: If set to "-", storageClassName: "", which disables dynamic provisioning If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner. | +| alertmanager.​persistentVolume.​subPath | string | `""` | Subdirectory of Alertmanager data Persistent Volume to mount Useful if the volume's root directory is not empty | +| alertmanager.​podAnnotations | object | `{"prometheus.io/port":"8080","prometheus.io/scrape":"true"}` | Pod Annotations | +| alertmanager.​podDisruptionBudget | object | `{"maxUnavailable":1}` | If not set then a PodDisruptionBudget will not be created | +| alertmanager.​podLabels | object | `{}` | Pod Labels | +| alertmanager.​readinessProbe.​httpGet.​path | string | `"/ready"` | | +| alertmanager.​readinessProbe.​httpGet.​port | string | `"http-metrics"` | | +| alertmanager.​replicas | int | `1` | | +| alertmanager.​resources | object | `{}` | | +| alertmanager.​securityContext | object | `{}` | | +| alertmanager.​service.​annotations | object | `{}` | | +| alertmanager.​service.​labels | object | `{}` | | +| alertmanager.​serviceAccount.​name | string | `""` | "" disables the individual serviceAccount and uses the global serviceAccount for that component | +| alertmanager.​serviceMonitor.​additionalLabels | object | `{}` | | +| alertmanager.​serviceMonitor.​enabled | bool | `false` | | +| alertmanager.​serviceMonitor.​extraEndpointSpec | object | `{}` | Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint | +| alertmanager.​serviceMonitor.​metricRelabelings | list | `[]` | | +| alertmanager.​serviceMonitor.​relabelings | list | `[]` | | +| alertmanager.​sidecar | object | `{"containerSecurityContext":{"enabled":true,"readOnlyRootFilesystem":true},"defaultFolderName":null,"enableUniqueFilenames":false,"enabled":false,"folder":"/data","folderAnnotation":null,"image":{"repository":"quay.io/kiwigrid/k8s-sidecar","sha":"","tag":"1.10.7"},"imagePullPolicy":"IfNotPresent","label":"cortex_alertmanager","labelValue":null,"resources":{},"searchNamespace":null,"skipTlsVerify":false,"watchMethod":null}` | Sidecars that collect the configmaps with specified label and stores the included files them into the respective folders | +| alertmanager.​sidecar.​skipTlsVerify | bool | `false` | skipTlsVerify Set to true to skip tls verification for kube api calls | +| alertmanager.​startupProbe.​failureThreshold | int | `10` | | +| alertmanager.​startupProbe.​httpGet.​path | string | `"/ready"` | | +| alertmanager.​startupProbe.​httpGet.​port | string | `"http-metrics"` | | +| alertmanager.​statefulSet.​enabled | bool | `false` | If true, use a statefulset instead of a deployment for pod management. This is useful for using a persistent volume for storing silences between restarts. | +| alertmanager.​statefulStrategy.​type | string | `"RollingUpdate"` | | +| alertmanager.​strategy.​rollingUpdate.​maxSurge | int | `0` | | +| alertmanager.​strategy.​rollingUpdate.​maxUnavailable | int | `1` | | +| alertmanager.​strategy.​type | string | `"RollingUpdate"` | | +| alertmanager.​terminationGracePeriodSeconds | int | `60` | | +| alertmanager.​tolerations | list | `[]` | Tolerations for pod assignment ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ | +| clusterDomain | string | `"cluster.local"` | Kubernetes cluster DNS domain | +| compactor.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​key | string | `"app.kubernetes.io/component"` | | +| compactor.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​operator | string | `"In"` | | +| compactor.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​values[0] | string | `"compactor"` | | +| compactor.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​topologyKey | string | `"kubernetes.io/hostname"` | | +| compactor.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​weight | int | `100` | | +| compactor.​annotations | object | `{}` | | +| compactor.​containerSecurityContext.​enabled | bool | `true` | | +| compactor.​containerSecurityContext.​readOnlyRootFilesystem | bool | `true` | | +| compactor.​enabled | bool | `true` | | +| compactor.​env | list | `[]` | | +| compactor.​extraArgs | object | `{}` | Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) | +| compactor.​extraContainers | list | `[]` | | +| compactor.​extraPorts | list | `[]` | | +| compactor.​extraVolumeMounts | list | `[]` | | +| compactor.​extraVolumes | list | `[]` | | +| compactor.​initContainers | list | `[]` | | +| compactor.​livenessProbe.​httpGet.​path | string | `"/ready"` | | +| compactor.​livenessProbe.​httpGet.​port | string | `"http-metrics"` | | +| compactor.​livenessProbe.​httpGet.​scheme | string | `"HTTP"` | | +| compactor.​nodeSelector | object | `{}` | | +| compactor.​persistentVolume.​accessModes | list | `["ReadWriteOnce"]` | compactor data Persistent Volume access modes Must match those of existing PV or dynamic provisioner Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ | +| compactor.​persistentVolume.​annotations | object | `{}` | compactor data Persistent Volume Claim annotations | +| compactor.​persistentVolume.​enabled | bool | `true` | If true compactor will create/use a Persistent Volume Claim If false, use emptyDir | +| compactor.​persistentVolume.​size | string | `"2Gi"` | | +| compactor.​persistentVolume.​storageClass | string | `nil` | compactor data Persistent Volume Storage Class If defined, storageClassName: If set to "-", storageClassName: "", which disables dynamic provisioning If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner. | +| compactor.​persistentVolume.​subPath | string | `""` | Subdirectory of compactor data Persistent Volume to mount Useful if the volume's root directory is not empty | +| compactor.​podAnnotations | object | `{"prometheus.io/port":"8080","prometheus.io/scrape":"true"}` | Pod Annotations | +| compactor.​podDisruptionBudget.​maxUnavailable | int | `1` | | +| compactor.​podLabels | object | `{}` | Pod Labels | +| compactor.​readinessProbe.​httpGet.​path | string | `"/ready"` | | +| compactor.​readinessProbe.​httpGet.​port | string | `"http-metrics"` | | +| compactor.​replicas | int | `1` | | +| compactor.​resources | object | `{}` | | +| compactor.​securityContext | object | `{}` | | +| compactor.​service.​annotations | object | `{}` | | +| compactor.​service.​labels | object | `{}` | | +| compactor.​serviceAccount.​name | string | `""` | "" disables the individual serviceAccount and uses the global serviceAccount for that component | +| compactor.​serviceMonitor.​additionalLabels | object | `{}` | | +| compactor.​serviceMonitor.​enabled | bool | `false` | | +| compactor.​serviceMonitor.​extraEndpointSpec | object | `{}` | Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint | +| compactor.​serviceMonitor.​metricRelabelings | list | `[]` | | +| compactor.​serviceMonitor.​relabelings | list | `[]` | | +| compactor.​startupProbe.​failureThreshold | int | `60` | | +| compactor.​startupProbe.​httpGet.​path | string | `"/ready"` | | +| compactor.​startupProbe.​httpGet.​port | string | `"http-metrics"` | | +| compactor.​startupProbe.​httpGet.​scheme | string | `"HTTP"` | | +| compactor.​startupProbe.​initialDelaySeconds | int | `120` | | +| compactor.​startupProbe.​periodSeconds | int | `30` | | +| compactor.​strategy.​type | string | `"RollingUpdate"` | | +| compactor.​terminationGracePeriodSeconds | int | `240` | | +| compactor.​tolerations | list | `[]` | | +| config.​alertmanager.​enable_api | bool | `false` | Enable the experimental alertmanager config api. | +| config.​alertmanager.​external_url | string | `"/api/prom/alertmanager"` | | +| config.​alertmanager.​storage | object | `{}` | Type of backend to use to store alertmanager configs. Supported values are: "configdb", "gcs", "s3", "local". refer to: https://cortexmetrics.io/docs/configuration/configuration-file/#alertmanager_config | +| config.​api.​prometheus_http_prefix | string | `"/prometheus"` | | +| config.​api.​response_compression_enabled | bool | `true` | Use GZIP compression for API responses. Some endpoints serve large YAML or JSON blobs which can benefit from compression. | +| config.​auth_enabled | bool | `false` | | +| config.​blocks_storage.​bucket_store.​bucket_index.​enabled | bool | `true` | | +| config.​blocks_storage.​bucket_store.​sync_dir | string | `"/data/tsdb-sync"` | | +| config.​blocks_storage.​tsdb.​dir | string | `"/data/tsdb"` | | +| config.​distributor.​pool.​health_check_ingesters | bool | `true` | | +| config.​distributor.​shard_by_all_labels | bool | `true` | Distribute samples based on all labels, as opposed to solely by user and metric name. | +| config.​frontend.​log_queries_longer_than | string | `"10s"` | | +| config.​ingester.​lifecycler.​final_sleep | string | `"30s"` | Duration to sleep for before exiting, to ensure metrics are scraped. | +| config.​ingester.​lifecycler.​join_after | string | `"10s"` | We don't want to join immediately, but wait a bit to see other ingesters and their tokens first. It can take a while to have the full picture when using gossip | +| config.​ingester.​lifecycler.​num_tokens | int | `512` | | +| config.​ingester.​lifecycler.​observe_period | string | `"10s"` | To avoid generating same tokens by multiple ingesters, they can "observe" the ring for a while, after putting their own tokens into it. This is only useful when using gossip, since multiple ingesters joining at the same time can have conflicting tokens if they don't see each other yet. | +| config.​ingester.​lifecycler.​ring.​kvstore.​store | string | `"memberlist"` | | +| config.​ingester.​lifecycler.​ring.​replication_factor | int | `3` | Ingester replication factor per default is 3 | +| config.​ingester_client.​grpc_client_config.​max_recv_msg_size | int | `10485760` | | +| config.​ingester_client.​grpc_client_config.​max_send_msg_size | int | `10485760` | | +| config.​limits.​enforce_metric_name | bool | `true` | Enforce that every sample has a metric name | +| config.​limits.​max_query_lookback | string | `"0s"` | | +| config.​limits.​reject_old_samples | bool | `true` | | +| config.​limits.​reject_old_samples_max_age | string | `"168h"` | | +| config.​memberlist.​bind_port | int | `7946` | | +| config.​memberlist.​join_members | list | `["{{ include \"cortex.fullname\" $ }}-memberlist"]` | the service name of the memberlist if using memberlist discovery | +| config.​querier.​active_query_tracker_dir | string | `"/data/active-query-tracker"` | | +| config.​querier.​query_ingesters_within | string | `"13h"` | Maximum lookback beyond which queries are not sent to ingester. 0 means all queries are sent to ingester. Ingesters by default have no data older than 12 hours, so we can safely set this 13 hours | +| config.​querier.​query_store_after | string | `"12h"` | The time after which a metric should be queried from storage and not just ingesters. | +| config.​querier.​store_gateway_addresses | string | automatic | Comma separated list of store-gateway addresses in DNS Service Discovery format. This option should is set automatically when using the blocks storage and the store-gateway sharding is disabled (when enabled, the store-gateway instances form a ring and addresses are picked from the ring). | +| config.​query_range.​align_queries_with_step | bool | `true` | | +| config.​query_range.​cache_results | bool | `true` | | +| config.​query_range.​results_cache.​cache.​memcached.​expiration | string | `"1h"` | | +| config.​query_range.​results_cache.​cache.​memcached_client.​timeout | string | `"1s"` | | +| config.​query_range.​split_queries_by_interval | string | `"24h"` | | +| config.​ruler.​enable_alertmanager_discovery | bool | `false` | | +| config.​ruler.​enable_api | bool | `true` | Enable the experimental ruler config api. | +| config.​ruler.​storage | object | `{}` | Method to use for backend rule storage (configdb, azure, gcs, s3, swift, local) refer to https://cortexmetrics.io/docs/configuration/configuration-file/#ruler_config | +| config.​runtime_config.​file | string | `"/etc/cortex-runtime-config/runtime_config.yaml"` | | +| config.​server.​grpc_listen_port | int | `9095` | | +| config.​server.​grpc_server_max_concurrent_streams | int | `10000` | | +| config.​server.​grpc_server_max_recv_msg_size | int | `10485760` | | +| config.​server.​grpc_server_max_send_msg_size | int | `10485760` | | +| config.​server.​http_listen_port | int | `8080` | | +| config.​storage | object | `{"engine":"blocks","index_queries_cache_config":{"memcached":{"expiration":"1h"},"memcached_client":{"timeout":"1s"}}}` | See https://github.com/cortexproject/cortex/blob/master/docs/configuration/config-file-reference.md#storage_config | +| config.​storage.​index_queries_cache_config.​memcached.​expiration | string | `"1h"` | How long keys stay in the memcache | +| config.​storage.​index_queries_cache_config.​memcached_client.​timeout | string | `"1s"` | Maximum time to wait before giving up on memcached requests. | +| config.​store_gateway | object | `{"sharding_enabled":false}` | https://cortexmetrics.io/docs/configuration/configuration-file/#store_gateway_config | +| configs.​affinity | object | `{}` | | +| configs.​annotations | object | `{}` | | +| configs.​containerSecurityContext.​enabled | bool | `true` | | +| configs.​containerSecurityContext.​readOnlyRootFilesystem | bool | `true` | | +| configs.​enabled | bool | `false` | | +| configs.​env | list | `[]` | | +| configs.​extraArgs | object | `{}` | Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) | +| configs.​extraContainers | list | `[]` | | +| configs.​extraPorts | list | `[]` | | +| configs.​extraVolumeMounts | list | `[]` | | +| configs.​extraVolumes | list | `[]` | | +| configs.​initContainers | list | `[]` | | +| configs.​livenessProbe.​httpGet.​path | string | `"/ready"` | | +| configs.​livenessProbe.​httpGet.​port | string | `"http-metrics"` | | +| configs.​nodeSelector | object | `{}` | | +| configs.​persistentVolume.​subPath | string | `nil` | | +| configs.​podAnnotations | object | `{"prometheus.io/port":"8080","prometheus.io/scrape":"true"}` | Pod Annotations | +| configs.​podDisruptionBudget.​maxUnavailable | int | `1` | | +| configs.​podLabels | object | `{}` | Pod Labels | +| configs.​readinessProbe.​httpGet.​path | string | `"/ready"` | | +| configs.​readinessProbe.​httpGet.​port | string | `"http-metrics"` | | +| configs.​replicas | int | `1` | | +| configs.​resources | object | `{}` | | +| configs.​securityContext | object | `{}` | | +| configs.​service.​annotations | object | `{}` | | +| configs.​service.​labels | object | `{}` | | +| configs.​serviceAccount.​name | string | `""` | "" disables the individual serviceAccount and uses the global serviceAccount for that component | +| configs.​serviceMonitor.​additionalLabels | object | `{}` | | +| configs.​serviceMonitor.​enabled | bool | `false` | | +| configs.​serviceMonitor.​extraEndpointSpec | object | `{}` | Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint | +| configs.​serviceMonitor.​metricRelabelings | list | `[]` | | +| configs.​serviceMonitor.​relabelings | list | `[]` | | +| configs.​startupProbe.​failureThreshold | int | `10` | | +| configs.​startupProbe.​httpGet.​path | string | `"/ready"` | | +| configs.​startupProbe.​httpGet.​port | string | `"http-metrics"` | | +| configs.​strategy.​rollingUpdate.​maxSurge | int | `0` | | +| configs.​strategy.​rollingUpdate.​maxUnavailable | int | `1` | | +| configs.​strategy.​type | string | `"RollingUpdate"` | | +| configs.​terminationGracePeriodSeconds | int | `180` | | +| configs.​tolerations | list | `[]` | | +| configsdb_postgresql.​auth.​existing_secret.​key | string | `nil` | | +| configsdb_postgresql.​auth.​existing_secret.​name | string | `nil` | | +| configsdb_postgresql.​auth.​password | string | `nil` | | +| configsdb_postgresql.​enabled | bool | `false` | | +| configsdb_postgresql.​uri | string | `nil` | | +| distributor.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​key | string | `"app.kubernetes.io/component"` | | +| distributor.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​operator | string | `"In"` | | +| distributor.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​values[0] | string | `"distributor"` | | +| distributor.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​topologyKey | string | `"kubernetes.io/hostname"` | | +| distributor.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​weight | int | `100` | | +| distributor.​annotations | object | `{}` | | +| distributor.​autoscaling.​behavior | object | `{}` | Ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-configurable-scaling-behavior | +| distributor.​autoscaling.​enabled | bool | `false` | Creates a HorizontalPodAutoscaler for the distributor pods. | +| distributor.​autoscaling.​maxReplicas | int | `30` | | +| distributor.​autoscaling.​minReplicas | int | `2` | | +| distributor.​autoscaling.​targetCPUUtilizationPercentage | int | `80` | | +| distributor.​autoscaling.​targetMemoryUtilizationPercentage | int | `0` | | +| distributor.​containerSecurityContext.​enabled | bool | `true` | | +| distributor.​containerSecurityContext.​readOnlyRootFilesystem | bool | `true` | | +| distributor.​env | list | `[]` | | +| distributor.​extraArgs | object | `{}` | Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) | +| distributor.​extraContainers | list | `[]` | | +| distributor.​extraPorts | list | `[]` | | +| distributor.​extraVolumeMounts | list | `[]` | | +| distributor.​extraVolumes | list | `[]` | | +| distributor.​initContainers | list | `[]` | | +| distributor.​lifecycle | object | `{}` | | +| distributor.​livenessProbe.​httpGet.​path | string | `"/ready"` | | +| distributor.​livenessProbe.​httpGet.​port | string | `"http-metrics"` | | +| distributor.​nodeSelector | object | `{}` | | +| distributor.​persistentVolume.​subPath | string | `nil` | | +| distributor.​podAnnotations | object | `{"prometheus.io/port":"8080","prometheus.io/scrape":"true"}` | Pod Annotations | +| distributor.​podDisruptionBudget.​maxUnavailable | int | `1` | | +| distributor.​podLabels | object | `{}` | Pod Labels | +| distributor.​readinessProbe.​httpGet.​path | string | `"/ready"` | | +| distributor.​readinessProbe.​httpGet.​port | string | `"http-metrics"` | | +| distributor.​replicas | int | `2` | | +| distributor.​resources | object | `{}` | | +| distributor.​securityContext | object | `{}` | | +| distributor.​service.​annotations | object | `{}` | | +| distributor.​service.​labels | object | `{}` | | +| distributor.​serviceAccount.​name | string | `""` | "" disables the individual serviceAccount and uses the global serviceAccount for that component | +| distributor.​serviceMonitor.​additionalLabels | object | `{}` | | +| distributor.​serviceMonitor.​enabled | bool | `false` | | +| distributor.​serviceMonitor.​extraEndpointSpec | object | `{}` | Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint | +| distributor.​serviceMonitor.​metricRelabelings | list | `[]` | | +| distributor.​serviceMonitor.​relabelings | list | `[]` | | +| distributor.​startupProbe.​failureThreshold | int | `10` | | +| distributor.​startupProbe.​httpGet.​path | string | `"/ready"` | | +| distributor.​startupProbe.​httpGet.​port | string | `"http-metrics"` | | +| distributor.​strategy.​rollingUpdate.​maxSurge | int | `0` | | +| distributor.​strategy.​rollingUpdate.​maxUnavailable | int | `1` | | +| distributor.​strategy.​type | string | `"RollingUpdate"` | | +| distributor.​terminationGracePeriodSeconds | int | `60` | | +| distributor.​tolerations | list | `[]` | | +| externalConfigSecretName | string | `"secret-with-config.yaml"` | | +| externalConfigVersion | string | `"0"` | | +| image.​pullPolicy | string | `"IfNotPresent"` | | +| image.​pullSecrets | list | `[]` | Optionally specify an array of imagePullSecrets. Secrets must be manually created in the namespace. ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ | +| image.​repository | string | `"quay.io/cortexproject/cortex"` | | +| image.​tag | string | `""` | Allows you to override the cortex version in this chart. Use at your own risk. | +| ingester.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​key | string | `"app.kubernetes.io/component"` | | +| ingester.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​operator | string | `"In"` | | +| ingester.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​values[0] | string | `"ingester"` | | +| ingester.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​topologyKey | string | `"kubernetes.io/hostname"` | | +| ingester.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​weight | int | `100` | | +| ingester.​annotations | object | `{}` | | +| ingester.​autoscaling.​behavior.​scaleDown.​policies | list | `[{"periodSeconds":1800,"type":"Pods","value":1}]` | see https://cortexmetrics.io/docs/guides/ingesters-scaling-up-and-down/#scaling-down for scaledown details | +| ingester.​autoscaling.​behavior.​scaleDown.​stabilizationWindowSeconds | int | `3600` | uses metrics from the past 1h to make scaleDown decisions | +| ingester.​autoscaling.​behavior.​scaleUp.​policies | list | `[{"periodSeconds":1800,"type":"Pods","value":1}]` | This default scaleup policy allows adding 1 pod every 30 minutes. Ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-configurable-scaling-behavior | +| ingester.​autoscaling.​enabled | bool | `false` | | +| ingester.​autoscaling.​maxReplicas | int | `30` | | +| ingester.​autoscaling.​minReplicas | int | `3` | | +| ingester.​autoscaling.​targetMemoryUtilizationPercentage | int | `80` | | +| ingester.​containerSecurityContext.​enabled | bool | `true` | | +| ingester.​containerSecurityContext.​readOnlyRootFilesystem | bool | `true` | | +| ingester.​env | list | `[]` | | +| ingester.​extraArgs | object | `{}` | Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) | +| ingester.​extraContainers | list | `[]` | | +| ingester.​extraPorts | list | `[]` | | +| ingester.​extraVolumeMounts | list | `[]` | | +| ingester.​extraVolumes | list | `[]` | | +| ingester.​initContainers | list | `[]` | | +| ingester.​lifecycle.​preStop | object | `{"httpGet":{"path":"/ingester/shutdown","port":"http-metrics"}}` | The /shutdown preStop hook is recommended as part of the ingester scaledown process, but can be removed to optimize rolling restarts in instances that will never be scaled down or when using chunks storage with WAL disabled. https://cortexmetrics.io/docs/guides/ingesters-scaling-up-and-down/#scaling-down | +| ingester.​livenessProbe | object | `{}` | Startup/liveness probes for ingesters are not recommended. Ref: https://cortexmetrics.io/docs/guides/running-cortex-on-kubernetes/#take-extra-care-with-ingesters | +| ingester.​nodeSelector | object | `{}` | | +| ingester.​persistentVolume.​accessModes | list | `["ReadWriteOnce"]` | Ingester data Persistent Volume access modes Must match those of existing PV or dynamic provisioner Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ | +| ingester.​persistentVolume.​annotations | object | `{}` | Ingester data Persistent Volume Claim annotations | +| ingester.​persistentVolume.​enabled | bool | `true` | If true and ingester.statefulSet.enabled is true, Ingester will create/use a Persistent Volume Claim If false, use emptyDir | +| ingester.​persistentVolume.​size | string | `"2Gi"` | Ingester data Persistent Volume size | +| ingester.​persistentVolume.​storageClass | string | `nil` | Ingester data Persistent Volume Storage Class If defined, storageClassName: If set to "-", storageClassName: "", which disables dynamic provisioning If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner. | +| ingester.​persistentVolume.​subPath | string | `""` | Subdirectory of Ingester data Persistent Volume to mount Useful if the volume's root directory is not empty | +| ingester.​podAnnotations | object | `{"prometheus.io/port":"8080","prometheus.io/scrape":"true"}` | Pod Annotations | +| ingester.​podDisruptionBudget.​maxUnavailable | int | `1` | | +| ingester.​podLabels | object | `{}` | Pod Labels | +| ingester.​readinessProbe.​httpGet.​path | string | `"/ready"` | | +| ingester.​readinessProbe.​httpGet.​port | string | `"http-metrics"` | | +| ingester.​replicas | int | `3` | | +| ingester.​resources | object | `{}` | | +| ingester.​securityContext | object | `{}` | | +| ingester.​service.​annotations | object | `{}` | | +| ingester.​service.​labels | object | `{}` | | +| ingester.​serviceAccount.​name | string | `nil` | | +| ingester.​serviceMonitor.​additionalLabels | object | `{}` | | +| ingester.​serviceMonitor.​enabled | bool | `false` | | +| ingester.​serviceMonitor.​extraEndpointSpec | object | `{}` | Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint | +| ingester.​serviceMonitor.​metricRelabelings | list | `[]` | | +| ingester.​serviceMonitor.​relabelings | list | `[]` | | +| ingester.​startupProbe | object | `{}` | Startup/liveness probes for ingesters are not recommended. Ref: https://cortexmetrics.io/docs/guides/running-cortex-on-kubernetes/#take-extra-care-with-ingesters | +| ingester.​statefulSet.​enabled | bool | `false` | If true, use a statefulset instead of a deployment for pod management. This is useful when using WAL | +| ingester.​statefulSet.​podManagementPolicy | string | `"OrderedReady"` | ref: https://cortexmetrics.io/docs/guides/ingesters-scaling-up-and-down/#scaling-down and https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-management-policies for scaledown details | +| ingester.​statefulStrategy.​type | string | `"RollingUpdate"` | | +| ingester.​strategy.​rollingUpdate.​maxSurge | int | `0` | | +| ingester.​strategy.​rollingUpdate.​maxUnavailable | int | `1` | | +| ingester.​strategy.​type | string | `"RollingUpdate"` | | +| ingester.​terminationGracePeriodSeconds | int | `240` | | +| ingester.​tolerations | list | `[]` | | +| ingress.​annotations | object | `{}` | | +| ingress.​enabled | bool | `false` | | +| ingress.​hosts[0].​host | string | `"chart-example.local"` | | +| ingress.​hosts[0].​paths[0] | string | `"/"` | | +| ingress.​ingressClass.​enabled | bool | `false` | | +| ingress.​ingressClass.​name | string | `"nginx"` | | +| ingress.​tls | list | `[]` | | +| memcached | object | `{"architecture":"high-availability","enabled":false,"extraEnv":[{"name":"MEMCACHED_CACHE_SIZE","value":"1024"},{"name":"MEMCACHED_MAX_CONNECTIONS","value":"1024"},{"name":"MEMCACHED_THREADS","value":"4"}],"metrics":{"enabled":true,"serviceMonitor":{"enabled":false}},"replicaCount":2,"resources":{}}` | chunk caching for legacy chunk storage engine | +| memcached-blocks-index.​architecture | string | `"high-availability"` | | +| memcached-blocks-index.​extraEnv[0] | object | `{"name":"MEMCACHED_CACHE_SIZE","value":"1024"}` | MEMCACHED_CACHE_SIZE is the amount of memory allocated to memcached for object storage | +| memcached-blocks-index.​extraEnv[1] | object | `{"name":"MEMCACHED_MAX_CONNECTIONS","value":"1024"}` | MEMCACHED_MAX_CONNECTIONS is the maximum number of simultaneous connections to the memcached service | +| memcached-blocks-index.​extraEnv[2] | object | `{"name":"MEMCACHED_THREADS","value":"4"}` | MEMCACHED_THREADS is the number of threads to use when processing incoming requests. By default, memcached is configured to use 4 concurrent threads. The threading improves the performance of storing and retrieving data in the cache, using a locking system to prevent different threads overwriting or updating the same values. | +| memcached-blocks-index.​metrics.​enabled | bool | `true` | | +| memcached-blocks-index.​metrics.​serviceMonitor.​enabled | bool | `false` | | +| memcached-blocks-index.​replicaCount | int | `2` | | +| memcached-blocks-index.​resources | object | `{}` | | +| memcached-blocks-metadata.​architecture | string | `"high-availability"` | | +| memcached-blocks-metadata.​extraEnv[0] | object | `{"name":"MEMCACHED_CACHE_SIZE","value":"1024"}` | MEMCACHED_CACHE_SIZE is the amount of memory allocated to memcached for object storage | +| memcached-blocks-metadata.​extraEnv[1] | object | `{"name":"MEMCACHED_MAX_CONNECTIONS","value":"1024"}` | MEMCACHED_MAX_CONNECTIONS is the maximum number of simultaneous connections to the memcached service | +| memcached-blocks-metadata.​extraEnv[2] | object | `{"name":"MEMCACHED_THREADS","value":"4"}` | MEMCACHED_THREADS is the number of threads to use when processing incoming requests. By default, memcached is configured to use 4 concurrent threads. The threading improves the performance of storing and retrieving data in the cache, using a locking system to prevent different threads overwriting or updating the same values. | +| memcached-blocks-metadata.​metrics.​enabled | bool | `true` | | +| memcached-blocks-metadata.​metrics.​serviceMonitor.​enabled | bool | `false` | | +| memcached-blocks-metadata.​replicaCount | int | `2` | | +| memcached-blocks-metadata.​resources | object | `{}` | | +| memcached-blocks.​architecture | string | `"high-availability"` | | +| memcached-blocks.​extraEnv[0] | object | `{"name":"MEMCACHED_CACHE_SIZE","value":"1024"}` | MEMCACHED_CACHE_SIZE is the amount of memory allocated to memcached for object storage | +| memcached-blocks.​extraEnv[1] | object | `{"name":"MEMCACHED_MAX_CONNECTIONS","value":"1024"}` | MEMCACHED_MAX_CONNECTIONS is the maximum number of simultaneous connections to the memcached service | +| memcached-blocks.​extraEnv[2] | object | `{"name":"MEMCACHED_THREADS","value":"4"}` | MEMCACHED_THREADS is the number of threads to use when processing incoming requests. By default, memcached is configured to use 4 concurrent threads. The threading improves the performance of storing and retrieving data in the cache, using a locking system to prevent different threads overwriting or updating the same values. | +| memcached-blocks.​metrics.​enabled | bool | `true` | | +| memcached-blocks.​metrics.​serviceMonitor.​enabled | bool | `false` | | +| memcached-blocks.​replicaCount | int | `2` | | +| memcached-blocks.​resources | object | `{}` | | +| memcached-frontend.​architecture | string | `"high-availability"` | | +| memcached-frontend.​enabled | bool | `false` | | +| memcached-frontend.​extraEnv[0] | object | `{"name":"MEMCACHED_CACHE_SIZE","value":"1024"}` | MEMCACHED_CACHE_SIZE is the amount of memory allocated to memcached for object storage | +| memcached-frontend.​extraEnv[1] | object | `{"name":"MEMCACHED_MAX_CONNECTIONS","value":"1024"}` | MEMCACHED_MAX_CONNECTIONS is the maximum number of simultaneous connections to the memcached service | +| memcached-frontend.​extraEnv[2] | object | `{"name":"MEMCACHED_THREADS","value":"4"}` | MEMCACHED_THREADS is the number of threads to use when processing incoming requests. By default, memcached is configured to use 4 concurrent threads. The threading improves the performance of storing and retrieving data in the cache, using a locking system to prevent different threads overwriting or updating the same values. | +| memcached-frontend.​metrics.​enabled | bool | `true` | | +| memcached-frontend.​metrics.​serviceMonitor.​enabled | bool | `false` | | +| memcached-frontend.​replicaCount | int | `2` | | +| memcached-frontend.​resources | object | `{}` | | +| memcached-index-read | object | `{"architecture":"high-availability","enabled":false,"extraEnv":[{"name":"MEMCACHED_CACHE_SIZE","value":"1024"},{"name":"MEMCACHED_MAX_CONNECTIONS","value":"1024"},{"name":"MEMCACHED_THREADS","value":"4"}],"metrics":{"enabled":true,"serviceMonitor":{"enabled":false}},"replicaCount":2,"resources":{}}` | index read caching for legacy chunk storage engine | +| memcached-index-read.​extraEnv[0] | object | `{"name":"MEMCACHED_CACHE_SIZE","value":"1024"}` | MEMCACHED_CACHE_SIZE is the amount of memory allocated to memcached for object storage | +| memcached-index-read.​extraEnv[1] | object | `{"name":"MEMCACHED_MAX_CONNECTIONS","value":"1024"}` | MEMCACHED_MAX_CONNECTIONS is the maximum number of simultaneous connections to the memcached service | +| memcached-index-read.​extraEnv[2] | object | `{"name":"MEMCACHED_THREADS","value":"4"}` | MEMCACHED_THREADS is the number of threads to use when processing incoming requests. By default, memcached is configured to use 4 concurrent threads. The threading improves the performance of storing and retrieving data in the cache, using a locking system to prevent different threads overwriting or updating the same values. | +| memcached-index-write | object | `{"architecture":"high-availability","enabled":false,"extraEnv":[{"name":"MEMCACHED_CACHE_SIZE","value":"1024"},{"name":"MEMCACHED_MAX_CONNECTIONS","value":"1024"},{"name":"MEMCACHED_THREADS","value":"4"}],"metrics":{"enabled":true,"serviceMonitor":{"enabled":false}},"replicaCount":2,"resources":{}}` | index write caching for legacy chunk storage engine | +| memcached-index-write.​extraEnv[0] | object | `{"name":"MEMCACHED_CACHE_SIZE","value":"1024"}` | MEMCACHED_CACHE_SIZE is the amount of memory allocated to memcached for object storage | +| memcached-index-write.​extraEnv[1] | object | `{"name":"MEMCACHED_MAX_CONNECTIONS","value":"1024"}` | MEMCACHED_MAX_CONNECTIONS is the maximum number of simultaneous connections to the memcached service | +| memcached-index-write.​extraEnv[2] | object | `{"name":"MEMCACHED_THREADS","value":"4"}` | MEMCACHED_THREADS is the number of threads to use when processing incoming requests. By default, memcached is configured to use 4 concurrent threads. The threading improves the performance of storing and retrieving data in the cache, using a locking system to prevent different threads overwriting or updating the same values. | +| memcached.​extraEnv[0] | object | `{"name":"MEMCACHED_CACHE_SIZE","value":"1024"}` | MEMCACHED_CACHE_SIZE is the amount of memory allocated to memcached for object storage | +| memcached.​extraEnv[1] | object | `{"name":"MEMCACHED_MAX_CONNECTIONS","value":"1024"}` | MEMCACHED_MAX_CONNECTIONS is the maximum number of simultaneous connections to the memcached service | +| memcached.​extraEnv[2] | object | `{"name":"MEMCACHED_THREADS","value":"4"}` | MEMCACHED_THREADS is the number of threads to use when processing incoming requests. By default, memcached is configured to use 4 concurrent threads. The threading improves the performance of storing and retrieving data in the cache, using a locking system to prevent different threads overwriting or updating the same values. | +| nginx.​affinity | object | `{}` | | +| nginx.​annotations | object | `{}` | | +| nginx.​autoscaling.​behavior | object | `{}` | Ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-configurable-scaling-behavior | +| nginx.​autoscaling.​enabled | bool | `false` | Creates a HorizontalPodAutoscaler for the nginx pods. | +| nginx.​autoscaling.​maxReplicas | int | `30` | | +| nginx.​autoscaling.​minReplicas | int | `2` | | +| nginx.​autoscaling.​targetCPUUtilizationPercentage | int | `80` | | +| nginx.​autoscaling.​targetMemoryUtilizationPercentage | int | `0` | | +| nginx.​config.​auth_orgs | list | `[]` | (optional) List of [auth tenants](https://cortexmetrics.io/docs/guides/auth/) to set in the nginx config | +| nginx.​config.​basicAuthSecretName | string | `""` | (optional) Name of basic auth secret. In order to use this option, a secret with htpasswd formatted contents at the key ".htpasswd" must exist. For example: apiVersion: v1 kind: Secret metadata: name: my-secret namespace: stringData: .htpasswd: | user1:$apr1$/woC1jnP$KAh0SsVn5qeSMjTtn0E9Q0 user2:$apr1$QdR8fNLT$vbCEEzDj7LyqCMyNpSoBh/ Please note that the use of basic auth will not identify organizations the way X-Scope-OrgID does. Thus, the use of basic auth alone will not prevent one tenant from viewing the metrics of another. To ensure tenants are scoped appropriately, explicitly set the `X-Scope-OrgID` header in the nginx config. Example setHeaders: X-Scope-OrgID: $remote_user | +| nginx.​config.​client_max_body_size | string | `"1M"` | ref: http://nginx.org/en/docs/http/ngx_http_core_module.html#client_max_body_size | +| nginx.​config.​dnsResolver | string | `"coredns.kube-system.svc.cluster.local"` | | +| nginx.​config.​httpSnippet | string | `""` | arbitrary snippet to inject in the http { } section of the nginx config | +| nginx.​config.​mainSnippet | string | `""` | arbitrary snippet to inject in the top section of the nginx config | +| nginx.​config.​serverSnippet | string | `""` | arbitrary snippet to inject in the server { } section of the nginx config | +| nginx.​config.​setHeaders | object | `{}` | | +| nginx.​containerSecurityContext.​enabled | bool | `true` | | +| nginx.​containerSecurityContext.​readOnlyRootFilesystem | bool | `false` | | +| nginx.​enabled | bool | `true` | | +| nginx.​env | list | `[]` | | +| nginx.​extraArgs | object | `{}` | Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) | +| nginx.​extraContainers | list | `[]` | | +| nginx.​extraPorts | list | `[]` | | +| nginx.​extraVolumeMounts | list | `[]` | | +| nginx.​extraVolumes | list | `[]` | | +| nginx.​http_listen_port | int | `80` | | +| nginx.​image.​pullPolicy | string | `"IfNotPresent"` | | +| nginx.​image.​repository | string | `"nginx"` | | +| nginx.​image.​tag | float | `1.21` | | +| nginx.​initContainers | list | `[]` | | +| nginx.​livenessProbe.​httpGet.​path | string | `"/healthz"` | | +| nginx.​livenessProbe.​httpGet.​port | string | `"http-metrics"` | | +| nginx.​nodeSelector | object | `{}` | | +| nginx.​persistentVolume.​subPath | string | `nil` | | +| nginx.​podAnnotations | object | `{}` | Pod Annotations | +| nginx.​podDisruptionBudget.​maxUnavailable | int | `1` | | +| nginx.​podLabels | object | `{}` | Pod Labels | +| nginx.​readinessProbe.​httpGet.​path | string | `"/healthz"` | | +| nginx.​readinessProbe.​httpGet.​port | string | `"http-metrics"` | | +| nginx.​replicas | int | `2` | | +| nginx.​resources | object | `{}` | | +| nginx.​securityContext | object | `{}` | | +| nginx.​service.​annotations | object | `{}` | | +| nginx.​service.​labels | object | `{}` | | +| nginx.​service.​type | string | `"ClusterIP"` | | +| nginx.​serviceAccount.​name | string | `""` | "" disables the individual serviceAccount and uses the global serviceAccount for that component | +| nginx.​startupProbe.​failureThreshold | int | `10` | | +| nginx.​startupProbe.​httpGet.​path | string | `"/healthz"` | | +| nginx.​startupProbe.​httpGet.​port | string | `"http-metrics"` | | +| nginx.​strategy.​rollingUpdate.​maxSurge | int | `0` | | +| nginx.​strategy.​rollingUpdate.​maxUnavailable | int | `1` | | +| nginx.​strategy.​type | string | `"RollingUpdate"` | | +| nginx.​terminationGracePeriodSeconds | int | `10` | | +| nginx.​tolerations | list | `[]` | | +| querier.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​key | string | `"app.kubernetes.io/component"` | | +| querier.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​operator | string | `"In"` | | +| querier.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​values[0] | string | `"querier"` | | +| querier.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​topologyKey | string | `"kubernetes.io/hostname"` | | +| querier.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​weight | int | `100` | | +| querier.​annotations | object | `{}` | | +| querier.​autoscaling.​behavior | object | `{}` | Ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-configurable-scaling-behavior | +| querier.​autoscaling.​enabled | bool | `false` | Creates a HorizontalPodAutoscaler for the querier pods. | +| querier.​autoscaling.​maxReplicas | int | `30` | | +| querier.​autoscaling.​minReplicas | int | `2` | | +| querier.​autoscaling.​targetCPUUtilizationPercentage | int | `80` | | +| querier.​autoscaling.​targetMemoryUtilizationPercentage | int | `0` | | +| querier.​containerSecurityContext.​enabled | bool | `true` | | +| querier.​containerSecurityContext.​readOnlyRootFilesystem | bool | `true` | | +| querier.​env | list | `[]` | | +| querier.​extraArgs | object | `{}` | Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) | +| querier.​extraContainers | list | `[]` | | +| querier.​extraPorts | list | `[]` | | +| querier.​extraVolumeMounts | list | `[]` | | +| querier.​extraVolumes | list | `[]` | | +| querier.​initContainers | list | `[]` | | +| querier.​lifecycle | object | `{}` | | +| querier.​livenessProbe.​httpGet.​path | string | `"/ready"` | | +| querier.​livenessProbe.​httpGet.​port | string | `"http-metrics"` | | +| querier.​nodeSelector | object | `{}` | | +| querier.​persistentVolume.​subPath | string | `nil` | | +| querier.​podAnnotations | object | `{"prometheus.io/port":"8080","prometheus.io/scrape":"true"}` | Pod Annotations | +| querier.​podDisruptionBudget.​maxUnavailable | int | `1` | | +| querier.​podLabels | object | `{}` | Pod Labels | +| querier.​readinessProbe.​httpGet.​path | string | `"/ready"` | | +| querier.​readinessProbe.​httpGet.​port | string | `"http-metrics"` | | +| querier.​replicas | int | `2` | | +| querier.​resources | object | `{}` | | +| querier.​securityContext | object | `{}` | | +| querier.​service.​annotations | object | `{}` | | +| querier.​service.​labels | object | `{}` | | +| querier.​serviceAccount.​name | string | `""` | "" disables the individual serviceAccount and uses the global serviceAccount for that component | +| querier.​serviceMonitor.​additionalLabels | object | `{}` | | +| querier.​serviceMonitor.​enabled | bool | `false` | | +| querier.​serviceMonitor.​extraEndpointSpec | object | `{}` | Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint | +| querier.​serviceMonitor.​metricRelabelings | list | `[]` | | +| querier.​serviceMonitor.​relabelings | list | `[]` | | +| querier.​startupProbe.​failureThreshold | int | `10` | | +| querier.​startupProbe.​httpGet.​path | string | `"/ready"` | | +| querier.​startupProbe.​httpGet.​port | string | `"http-metrics"` | | +| querier.​strategy.​rollingUpdate.​maxSurge | int | `0` | | +| querier.​strategy.​rollingUpdate.​maxUnavailable | int | `1` | | +| querier.​strategy.​type | string | `"RollingUpdate"` | | +| querier.​terminationGracePeriodSeconds | int | `180` | | +| querier.​tolerations | list | `[]` | | +| query_frontend.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​key | string | `"app.kubernetes.io/component"` | | +| query_frontend.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​operator | string | `"In"` | | +| query_frontend.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​values[0] | string | `"query-frontend"` | | +| query_frontend.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​topologyKey | string | `"kubernetes.io/hostname"` | | +| query_frontend.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​weight | int | `100` | | +| query_frontend.​annotations | object | `{}` | | +| query_frontend.​containerSecurityContext.​enabled | bool | `true` | | +| query_frontend.​containerSecurityContext.​readOnlyRootFilesystem | bool | `true` | | +| query_frontend.​env | list | `[]` | | +| query_frontend.​extraArgs | object | `{}` | Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) | +| query_frontend.​extraContainers | list | `[]` | | +| query_frontend.​extraPorts | list | `[]` | | +| query_frontend.​extraVolumeMounts | list | `[]` | | +| query_frontend.​extraVolumes | list | `[]` | | +| query_frontend.​initContainers | list | `[]` | | +| query_frontend.​lifecycle | object | `{}` | | +| query_frontend.​livenessProbe.​httpGet.​path | string | `"/ready"` | | +| query_frontend.​livenessProbe.​httpGet.​port | string | `"http-metrics"` | | +| query_frontend.​nodeSelector | object | `{}` | | +| query_frontend.​persistentVolume.​subPath | string | `nil` | | +| query_frontend.​podAnnotations | object | `{"prometheus.io/port":"8080","prometheus.io/scrape":"true"}` | Pod Annotations | +| query_frontend.​podDisruptionBudget.​maxUnavailable | int | `1` | | +| query_frontend.​podLabels | object | `{}` | Pod Labels | +| query_frontend.​readinessProbe.​httpGet.​path | string | `"/ready"` | | +| query_frontend.​readinessProbe.​httpGet.​port | string | `"http-metrics"` | | +| query_frontend.​replicas | int | `2` | | +| query_frontend.​resources | object | `{}` | | +| query_frontend.​securityContext | object | `{}` | | +| query_frontend.​service.​annotations | object | `{}` | | +| query_frontend.​service.​labels | object | `{}` | | +| query_frontend.​serviceAccount.​name | string | `""` | "" disables the individual serviceAccount and uses the global serviceAccount for that component | +| query_frontend.​serviceMonitor.​additionalLabels | object | `{}` | | +| query_frontend.​serviceMonitor.​enabled | bool | `false` | | +| query_frontend.​serviceMonitor.​extraEndpointSpec | object | `{}` | Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint | +| query_frontend.​serviceMonitor.​metricRelabelings | list | `[]` | | +| query_frontend.​serviceMonitor.​relabelings | list | `[]` | | +| query_frontend.​startupProbe.​failureThreshold | int | `10` | | +| query_frontend.​startupProbe.​httpGet.​path | string | `"/ready"` | | +| query_frontend.​startupProbe.​httpGet.​port | string | `"http-metrics"` | | +| query_frontend.​strategy.​rollingUpdate.​maxSurge | int | `0` | | +| query_frontend.​strategy.​rollingUpdate.​maxUnavailable | int | `1` | | +| query_frontend.​strategy.​type | string | `"RollingUpdate"` | | +| query_frontend.​terminationGracePeriodSeconds | int | `180` | | +| query_frontend.​tolerations | list | `[]` | | +| ruler.​affinity | object | `{}` | | +| ruler.​annotations | object | `{}` | | +| ruler.​containerSecurityContext.​enabled | bool | `true` | | +| ruler.​containerSecurityContext.​readOnlyRootFilesystem | bool | `true` | | +| ruler.​directories | object | `{}` | allow configuring rules via configmap. ref: https://cortexproject.github.io/cortex-helm-chart/guides/configure_rules_via_configmap.html | +| ruler.​enabled | bool | `true` | | +| ruler.​env | list | `[]` | | +| ruler.​extraArgs | object | `{}` | Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) | +| ruler.​extraContainers | list | `[]` | | +| ruler.​extraPorts | list | `[]` | | +| ruler.​extraVolumeMounts | list | `[]` | | +| ruler.​extraVolumes | list | `[]` | | +| ruler.​initContainers | list | `[]` | | +| ruler.​livenessProbe.​httpGet.​path | string | `"/ready"` | | +| ruler.​livenessProbe.​httpGet.​port | string | `"http-metrics"` | | +| ruler.​nodeSelector | object | `{}` | | +| ruler.​persistentVolume.​subPath | string | `nil` | | +| ruler.​podAnnotations | object | `{"prometheus.io/port":"8080","prometheus.io/scrape":"true"}` | Pod Annotations | +| ruler.​podDisruptionBudget.​maxUnavailable | int | `1` | | +| ruler.​podLabels | object | `{}` | Pod Labels | +| ruler.​readinessProbe.​httpGet.​path | string | `"/ready"` | | +| ruler.​readinessProbe.​httpGet.​port | string | `"http-metrics"` | | +| ruler.​replicas | int | `1` | | +| ruler.​resources | object | `{}` | | +| ruler.​securityContext | object | `{}` | | +| ruler.​service.​annotations | object | `{}` | | +| ruler.​service.​labels | object | `{}` | | +| ruler.​serviceAccount.​name | string | `""` | "" disables the individual serviceAccount and uses the global serviceAccount for that component | +| ruler.​serviceMonitor.​additionalLabels | object | `{}` | | +| ruler.​serviceMonitor.​enabled | bool | `false` | | +| ruler.​serviceMonitor.​extraEndpointSpec | object | `{}` | Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint | +| ruler.​serviceMonitor.​metricRelabelings | list | `[]` | | +| ruler.​serviceMonitor.​relabelings | list | `[]` | | +| ruler.​sidecar | object | `{"containerSecurityContext":{"enabled":true,"readOnlyRootFilesystem":true},"defaultFolderName":null,"enableUniqueFilenames":false,"enabled":false,"folder":"/tmp/rules","folderAnnotation":null,"image":{"repository":"quay.io/kiwigrid/k8s-sidecar","sha":"","tag":"1.10.7"},"imagePullPolicy":"IfNotPresent","label":"cortex_rules","labelValue":null,"resources":{},"searchNamespace":null,"watchMethod":null}` | Sidecars that collect the configmaps with specified label and stores the included files them into the respective folders | +| ruler.​sidecar.​defaultFolderName | string | `nil` | The default folder name, it will create a subfolder under the `folder` and put rules in there instead | +| ruler.​sidecar.​folder | string | `"/tmp/rules"` | folder in the pod that should hold the collected rules (unless `defaultFolderName` is set) | +| ruler.​sidecar.​folderAnnotation | string | `nil` | If specified, the sidecar will look for annotation with this name to create folder and put graph here. You can use this parameter together with `provider.foldersFromFilesStructure`to annotate configmaps and create folder structure. | +| ruler.​sidecar.​label | string | `"cortex_rules"` | label that the configmaps with rules are marked with | +| ruler.​sidecar.​labelValue | string | `nil` | value of label that the configmaps with rules are set to | +| ruler.​sidecar.​searchNamespace | string | `nil` | If specified, the sidecar will search for rules config-maps inside this namespace. Otherwise the namespace in which the sidecar is running will be used. It's also possible to specify ALL to search in all namespaces | +| ruler.​startupProbe.​failureThreshold | int | `10` | | +| ruler.​startupProbe.​httpGet.​path | string | `"/ready"` | | +| ruler.​startupProbe.​httpGet.​port | string | `"http-metrics"` | | +| ruler.​strategy.​rollingUpdate.​maxSurge | int | `0` | | +| ruler.​strategy.​rollingUpdate.​maxUnavailable | int | `1` | | +| ruler.​strategy.​type | string | `"RollingUpdate"` | | +| ruler.​terminationGracePeriodSeconds | int | `180` | | +| ruler.​tolerations | list | `[]` | | +| runtimeconfigmap.​annotations | object | `{}` | | +| runtimeconfigmap.​create | bool | `true` | If true, a configmap for the `runtime_config` will be created. If false, the configmap _must_ exist already on the cluster or pods will fail to create. | +| runtimeconfigmap.​runtime_config | object | `{}` | https://cortexmetrics.io/docs/configuration/arguments/#runtime-configuration-file | +| serviceAccount.​annotations | object | `{}` | | +| serviceAccount.​automountServiceAccountToken | bool | `true` | | +| serviceAccount.​create | bool | `true` | | +| serviceAccount.​name | string | `nil` | | +| store_gateway.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​key | string | `"app.kubernetes.io/component"` | | +| store_gateway.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​operator | string | `"In"` | | +| store_gateway.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​values[0] | string | `"store-gateway"` | | +| store_gateway.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​topologyKey | string | `"kubernetes.io/hostname"` | | +| store_gateway.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​weight | int | `100` | | +| store_gateway.​annotations | object | `{}` | | +| store_gateway.​containerSecurityContext.​enabled | bool | `true` | | +| store_gateway.​containerSecurityContext.​readOnlyRootFilesystem | bool | `true` | | +| store_gateway.​env | list | `[]` | | +| store_gateway.​extraArgs | object | `{}` | Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) | +| store_gateway.​extraContainers | list | `[]` | | +| store_gateway.​extraPorts | list | `[]` | | +| store_gateway.​extraVolumeMounts | list | `[]` | | +| store_gateway.​extraVolumes | list | `[]` | | +| store_gateway.​initContainers | list | `[]` | | +| store_gateway.​livenessProbe.​httpGet.​path | string | `"/ready"` | | +| store_gateway.​livenessProbe.​httpGet.​port | string | `"http-metrics"` | | +| store_gateway.​livenessProbe.​httpGet.​scheme | string | `"HTTP"` | | +| store_gateway.​nodeSelector | object | `{}` | | +| store_gateway.​persistentVolume.​accessModes | list | `["ReadWriteOnce"]` | Store-gateway data Persistent Volume access modes Must match those of existing PV or dynamic provisioner Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ | +| store_gateway.​persistentVolume.​annotations | object | `{}` | Store-gateway data Persistent Volume Claim annotations | +| store_gateway.​persistentVolume.​enabled | bool | `true` | If true Store-gateway will create/use a Persistent Volume Claim If false, use emptyDir | +| store_gateway.​persistentVolume.​size | string | `"2Gi"` | Store-gateway data Persistent Volume size | +| store_gateway.​persistentVolume.​storageClass | string | `nil` | Store-gateway data Persistent Volume Storage Class If defined, storageClassName: If set to "-", storageClassName: "", which disables dynamic provisioning If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner. | +| store_gateway.​persistentVolume.​subPath | string | `""` | Subdirectory of Store-gateway data Persistent Volume to mount Useful if the volume's root directory is not empty | +| store_gateway.​podAnnotations | object | `{"prometheus.io/port":"8080","prometheus.io/scrape":"true"}` | Pod Annotations | +| store_gateway.​podDisruptionBudget.​maxUnavailable | int | `1` | | +| store_gateway.​podLabels | object | `{}` | Pod Labels | +| store_gateway.​readinessProbe.​httpGet.​path | string | `"/ready"` | | +| store_gateway.​readinessProbe.​httpGet.​port | string | `"http-metrics"` | | +| store_gateway.​replicas | int | `1` | | +| store_gateway.​resources | object | `{}` | | +| store_gateway.​securityContext | object | `{}` | | +| store_gateway.​service.​annotations | object | `{}` | | +| store_gateway.​service.​labels | object | `{}` | | +| store_gateway.​serviceAccount.​name | string | `""` | "" disables the individual serviceAccount and uses the global serviceAccount for that component | +| store_gateway.​serviceMonitor.​additionalLabels | object | `{}` | | +| store_gateway.​serviceMonitor.​enabled | bool | `false` | | +| store_gateway.​serviceMonitor.​extraEndpointSpec | object | `{}` | Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint | +| store_gateway.​serviceMonitor.​metricRelabelings | list | `[]` | | +| store_gateway.​serviceMonitor.​relabelings | list | `[]` | | +| store_gateway.​startupProbe.​failureThreshold | int | `60` | | +| store_gateway.​startupProbe.​httpGet.​path | string | `"/ready"` | | +| store_gateway.​startupProbe.​httpGet.​port | string | `"http-metrics"` | | +| store_gateway.​startupProbe.​httpGet.​scheme | string | `"HTTP"` | | +| store_gateway.​startupProbe.​initialDelaySeconds | int | `120` | | +| store_gateway.​startupProbe.​periodSeconds | int | `30` | | +| store_gateway.​strategy.​type | string | `"RollingUpdate"` | | +| store_gateway.​terminationGracePeriodSeconds | int | `240` | | +| store_gateway.​tolerations | list | `[]` | | +| table_manager.​affinity | object | `{}` | | +| table_manager.​annotations | object | `{}` | | +| table_manager.​containerSecurityContext.​enabled | bool | `true` | | +| table_manager.​containerSecurityContext.​readOnlyRootFilesystem | bool | `true` | | +| table_manager.​env | list | `[]` | | +| table_manager.​extraArgs | object | `{}` | Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) | +| table_manager.​extraContainers | list | `[]` | | +| table_manager.​extraPorts | list | `[]` | | +| table_manager.​extraVolumeMounts | list | `[]` | | +| table_manager.​extraVolumes | list | `[]` | | +| table_manager.​initContainers | list | `[]` | | +| table_manager.​livenessProbe.​httpGet.​path | string | `"/ready"` | | +| table_manager.​livenessProbe.​httpGet.​port | string | `"http-metrics"` | | +| table_manager.​nodeSelector | object | `{}` | | +| table_manager.​persistentVolume.​subPath | string | `nil` | | +| table_manager.​podAnnotations | object | `{"prometheus.io/port":"8080","prometheus.io/scrape":"true"}` | Pod Annotations | +| table_manager.​podDisruptionBudget.​maxUnavailable | int | `1` | | +| table_manager.​podLabels | object | `{}` | Pod Labels | +| table_manager.​readinessProbe.​httpGet.​path | string | `"/ready"` | | +| table_manager.​readinessProbe.​httpGet.​port | string | `"http-metrics"` | | +| table_manager.​replicas | int | `1` | | +| table_manager.​resources | object | `{}` | | +| table_manager.​securityContext | object | `{}` | | +| table_manager.​service.​annotations | object | `{}` | | +| table_manager.​service.​labels | object | `{}` | | +| table_manager.​serviceAccount.​name | string | `""` | "" disables the individual serviceAccount and uses the global serviceAccount for that component | +| table_manager.​serviceMonitor.​additionalLabels | object | `{}` | | +| table_manager.​serviceMonitor.​enabled | bool | `false` | | +| table_manager.​serviceMonitor.​extraEndpointSpec | object | `{}` | Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint | +| table_manager.​serviceMonitor.​metricRelabelings | list | `[]` | | +| table_manager.​serviceMonitor.​relabelings | list | `[]` | | +| table_manager.​startupProbe.​failureThreshold | int | `10` | | +| table_manager.​startupProbe.​httpGet.​path | string | `"/ready"` | | +| table_manager.​startupProbe.​httpGet.​port | string | `"http-metrics"` | | +| table_manager.​strategy.​rollingUpdate.​maxSurge | int | `0` | | +| table_manager.​strategy.​rollingUpdate.​maxUnavailable | int | `1` | | +| table_manager.​strategy.​type | string | `"RollingUpdate"` | | +| table_manager.​terminationGracePeriodSeconds | int | `180` | | +| table_manager.​tolerations | list | `[]` | | +| tags.​blocks-storage-memcached | bool | `false` | Set to true to enable block storage memcached caching | +| useConfigMap | bool | `false` | | +| useExternalConfig | bool | `false` | | + diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/NOTES.txt b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/NOTES.txt new file mode 100644 index 0000000..1bd3203 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/NOTES.txt @@ -0,0 +1,9 @@ +{{- if eq .Values.config.storage.engine "chunks" }} +Cortex chunks storage has been deprecated, and it's now in maintenance mode: all Cortex users are encouraged to migrate to the blocks storage. +No new features will be added to the chunks storage. +Unlike the official cortex default configuration this helm-chart does not run the chunk engine by default. +{{- end }} + +Verify the application is working by running these commands: + kubectl --namespace {{ .Release.Namespace }} port-forward service/{{ include "cortex.querierFullname" . }} {{ .Values.config.server.http_listen_port }} + curl http://127.0.0.1:{{ .Values.config.server.http_listen_port }}/services diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/_helpers.tpl b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/_helpers.tpl new file mode 100644 index 0000000..81914c9 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/_helpers.tpl @@ -0,0 +1,155 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "cortex.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "cortex.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "cortex.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create the name of the service account +*/}} +{{- define "cortex.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "cortex.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Create the app name of cortex clients. Defaults to the same logic as "cortex.fullname", and default client expects "prometheus". +*/}} +{{- define "client.name" -}} +{{- if .Values.client.name -}} +{{- .Values.client.name -}} +{{- else if .Values.client.fullnameOverride -}} +{{- .Values.client.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default "prometheus" .Values.client.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + + +{{/* +Common labels +*/}} +{{- define "cortex.labels" -}} +helm.sh/chart: {{ include "cortex.chart" . }} +{{ include "cortex.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "cortex.selectorLabels" -}} +app.kubernetes.io/name: {{ include "cortex.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create configuration parameters for memcached configuration +*/}} +{{- define "cortex.memcached" -}} +{{- if and (eq .Values.config.storage.engine "blocks") (index .Values "tags" "blocks-storage-memcached") }} +- "-blocks-storage.bucket-store.index-cache.backend=memcached" +- "-blocks-storage.bucket-store.index-cache.memcached.addresses=dns+{{ .Release.Name }}-memcached-blocks-index.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}:11211" +- "-blocks-storage.bucket-store.chunks-cache.backend=memcached" +- "-blocks-storage.bucket-store.chunks-cache.memcached.addresses=dns+{{ .Release.Name }}-memcached-blocks.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}:11211" +- "-blocks-storage.bucket-store.metadata-cache.backend=memcached" +- "-blocks-storage.bucket-store.metadata-cache.memcached.addresses=dns+{{ .Release.Name }}-memcached-blocks-metadata.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}:11211" +{{- end -}} +{{- if and (ne .Values.config.storage.engine "blocks") .Values.memcached.enabled }} +- "-store.chunks-cache.memcached.addresses=dns+{{ .Release.Name }}-memcached.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}:11211" +{{- end -}} +{{- if and (ne .Values.config.storage.engine "blocks") (index .Values "memcached-index-read" "enabled") }} +- "-store.index-cache-read.memcached.addresses=dns+{{ .Release.Name }}-memcached-index-read.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}:11211" +{{- end -}} +{{- if and (ne .Values.config.storage.engine "blocks") (index .Values "memcached-index-write" "enabled") }} +- "-store.index-cache-write.memcached.addresses=dns+{{ .Release.Name }}-memcached-index-write.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}:11211" +{{- end -}} +{{- end -}} + +{{/* +Create configuration for frontend memcached configuration +*/}} +{{- define "cortex.frontend-memcached" -}} +{{- if index .Values "memcached-frontend" "enabled" }} +- "-frontend.memcached.addresses=dns+{{ template "cortex.fullname" . }}-memcached-frontend.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}:11211" +{{- end -}} +{{- end -}} + +{{/* +Determine the policy api version +*/}} +{{- define "cortex.pdbVersion" -}} +{{- if or (.Capabilities.APIVersions.Has "policy/v1/PodDisruptionBudget") (semverCompare ">=1.21" .Capabilities.KubeVersion.Version) -}} +policy/v1 +{{- else -}} +policy/v1beta1 +{{- end -}} +{{- end -}} + +{{/* +Get checksum of config secret or configMap +*/}} +{{- define "cortex.configChecksum" -}} +{{- if .Values.useExternalConfig -}} +{{- .Values.externalConfigVersion -}} +{{- else if .Values.useConfigMap -}} +{{- include (print $.Template.BasePath "/configmap.yaml") . | sha256sum -}} +{{- else -}} +{{- include (print $.Template.BasePath "/secret.yaml") . | sha256sum -}} +{{- end -}} +{{- end -}} + +{{/* +Get volume of config secret of configMap +*/}} +{{- define "cortex.configVolume" -}} +- name: config + {{- if .Values.useExternalConfig }} + secret: + secretName: {{ .Values.externalConfigSecretName }} + {{- else if .Values.useConfigMap }} + configMap: + name: {{ template "cortex.fullname" . }}-config + {{- else }} + secret: + secretName: {{ template "cortex.fullname" . }} + {{- end }} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/alertmanager/alertmanager-dep.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/alertmanager/alertmanager-dep.yaml new file mode 100644 index 0000000..49c4ca7 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/alertmanager/alertmanager-dep.yaml @@ -0,0 +1,30 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: alertmanager + namespace: imxc +spec: + replicas: 1 + selector: + matchLabels: + name: alertmanager + template: + metadata: + labels: + name: alertmanager + spec: + containers: + - name: alertmanager +# image: quay.io/cortexproject/cortex:v1.9.0 +# image: registry.cloud.intermax:5000/library/cortex:v1.11.0 + image: {{ .Values.global.IMXC_IN_REGISTRY }}/cortex:v1.11.0 + imagePullPolicy: IfNotPresent + args: + - -target=alertmanager +# - -log.level=debug + - -server.http-listen-port=80 + - -alertmanager.configs.url=http://{{ template "cortex.fullname" . }}-configs:8080 + - -alertmanager.web.external-url=/alertmanager + ports: + - containerPort: 80 diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/alertmanager/alertmanager-svc.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/alertmanager/alertmanager-svc.yaml new file mode 100644 index 0000000..989feb2 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/alertmanager/alertmanager-svc.yaml @@ -0,0 +1,10 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: alertmanager +spec: + ports: + - port: 80 + selector: + name: alertmanager diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/clusterrole.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/clusterrole.yaml new file mode 100644 index 0000000..cf7f25a --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/clusterrole.yaml @@ -0,0 +1,12 @@ +{{- if or .Values.ruler.sidecar.enabled .Values.alertmanager.sidecar.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "cortex.fullname" . }}-clusterrole + labels: + {{- include "cortex.labels" . | nindent 4 }} +rules: + - apiGroups: [""] # "" indicates the core API group + resources: ["configmaps", "secrets"] + verbs: ["get", "watch", "list"] +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/clusterrolebinding.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/clusterrolebinding.yaml new file mode 100644 index 0000000..c1d9884 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/clusterrolebinding.yaml @@ -0,0 +1,16 @@ +{{- if or .Values.ruler.sidecar.enabled .Values.alertmanager.sidecar.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "cortex.fullname" . }}-clusterrolebinding + labels: + {{- include "cortex.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "cortex.fullname" . }}-clusterrole +subjects: + - kind: ServiceAccount + name: {{ template "cortex.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/compactor/_helpers-compactor.tpl b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/compactor/_helpers-compactor.tpl new file mode 100644 index 0000000..f89b33c --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/compactor/_helpers-compactor.tpl @@ -0,0 +1,23 @@ + +{{/* +compactor fullname +*/}} +{{- define "cortex.compactorFullname" -}} +{{ include "cortex.fullname" . }}-compactor +{{- end }} + +{{/* +compactor common labels +*/}} +{{- define "cortex.compactorLabels" -}} +{{ include "cortex.labels" . }} +app.kubernetes.io/component: compactor +{{- end }} + +{{/* +compactor selector labels +*/}} +{{- define "cortex.compactorSelectorLabels" -}} +{{ include "cortex.selectorLabels" . }} +app.kubernetes.io/component: compactor +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/compactor/compactor-poddisruptionbudget.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/compactor/compactor-poddisruptionbudget.yaml new file mode 100644 index 0000000..8634e4c --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/compactor/compactor-poddisruptionbudget.yaml @@ -0,0 +1,14 @@ +{{- if and (gt (int .Values.compactor.replicas) 1) (.Values.compactor.podDisruptionBudget) }} +apiVersion: {{ include "cortex.pdbVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "cortex.compactorFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.compactorLabels" . | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "cortex.compactorSelectorLabels" . | nindent 6 }} + {{- toYaml .Values.compactor.podDisruptionBudget | nindent 2 }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/compactor/compactor-servicemonitor.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/compactor/compactor-servicemonitor.yaml new file mode 100644 index 0000000..a33e849 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/compactor/compactor-servicemonitor.yaml @@ -0,0 +1,42 @@ +{{- if .Values.compactor.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "cortex.compactorFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.compactorLabels" . | nindent 4 }} + {{- if .Values.compactor.serviceMonitor.additionalLabels }} +{{ toYaml .Values.compactor.serviceMonitor.additionalLabels | indent 4 }} + {{- end }} + {{- if .Values.compactor.serviceMonitor.annotations }} + annotations: +{{ toYaml .Values.compactor.serviceMonitor.annotations | indent 4 }} + {{- end }} +spec: + selector: + matchLabels: + {{- include "cortex.compactorSelectorLabels" . | nindent 6 }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace | quote }} + endpoints: + - port: http-metrics + {{- if .Values.compactor.serviceMonitor.interval }} + interval: {{ .Values.compactor.serviceMonitor.interval }} + {{- end }} + {{- if .Values.compactor.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.compactor.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.compactor.serviceMonitor.relabelings }} + relabelings: + {{- toYaml .Values.compactor.serviceMonitor.relabelings | nindent 4 }} + {{- end }} + {{- if .Values.compactor.serviceMonitor.metricRelabelings }} + metricRelabelings: + {{- toYaml .Values.compactor.serviceMonitor.metricRelabelings | nindent 4 }} + {{- end }} + {{- with .Values.compactor.serviceMonitor.extraEndpointSpec }} + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/compactor/compactor-statefulset.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/compactor/compactor-statefulset.yaml new file mode 100644 index 0000000..c0a1baf --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/compactor/compactor-statefulset.yaml @@ -0,0 +1,141 @@ +{{- if eq .Values.config.storage.engine "blocks" -}} +{{- if .Values.compactor.enabled -}} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "cortex.compactorFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.compactorLabels" . | nindent 4 }} + app.kubernetes.io/part-of: memberlist + annotations: + {{- toYaml .Values.compactor.annotations | nindent 4 }} +spec: + replicas: {{ .Values.compactor.replicas }} + selector: + matchLabels: + {{- include "cortex.compactorSelectorLabels" . | nindent 6 }} + updateStrategy: + {{- toYaml .Values.compactor.strategy | nindent 4 }} + serviceName: {{ template "cortex.fullname" . }}-compactor + {{- if .Values.compactor.persistentVolume.enabled }} + volumeClaimTemplates: + - metadata: + name: storage + {{- if .Values.compactor.persistentVolume.annotations }} + annotations: + {{ toYaml .Values.compactor.persistentVolume.annotations | nindent 10 }} + {{- end }} + spec: + {{- if .Values.compactor.persistentVolume.storageClass }} + {{- if (eq "-" .Values.compactor.persistentVolume.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.compactor.persistentVolume.storageClass }}" + {{- end }} + {{- end }} + accessModes: + {{ toYaml .Values.compactor.persistentVolume.accessModes | nindent 10 }} + resources: + requests: + storage: "{{ .Values.compactor.persistentVolume.size }}" + {{- end }} + template: + metadata: + labels: + {{- include "cortex.compactorLabels" . | nindent 8 }} + app.kubernetes.io/part-of: memberlist + {{- with .Values.compactor.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + annotations: + checksum/config: {{ include "cortex.configChecksum" . }} + {{- with .Values.compactor.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ .Values.compactor.serviceAccount.name | default (include "cortex.serviceAccountName" . ) }} + {{- if .Values.compactor.priorityClassName }} + priorityClassName: {{ .Values.compactor.priorityClassName }} + {{- end }} + {{- if .Values.compactor.securityContext.enabled }} + securityContext: {{- omit .Values.compactor.securityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + initContainers: + {{- toYaml .Values.compactor.initContainers | nindent 8 }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} + nodeSelector: + {{- toYaml .Values.compactor.nodeSelector | nindent 8 }} + affinity: + {{- toYaml .Values.compactor.affinity | nindent 8 }} + tolerations: + {{- toYaml .Values.compactor.tolerations | nindent 8 }} + terminationGracePeriodSeconds: {{ .Values.compactor.terminationGracePeriodSeconds }} + volumes: + {{- include "cortex.configVolume" . | nindent 8 }} + - name: runtime-config + configMap: + name: {{ template "cortex.fullname" . }}-runtime-config + {{- if not .Values.compactor.persistentVolume.enabled }} + - name: storage + emptyDir: {} + {{- end }} + {{- if .Values.compactor.extraVolumes }} + {{- toYaml .Values.compactor.extraVolumes | nindent 8 }} + {{- end }} + containers: + {{- if .Values.compactor.extraContainers }} + {{ toYaml .Values.compactor.extraContainers | nindent 8 }} + {{- end }} + - name: compactor + image: "{{ .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: + - "-target=compactor" + - "-config.file=/etc/cortex/cortex.yaml" + {{- include "cortex.memcached" . | nindent 12}} + {{- range $key, $value := .Values.compactor.extraArgs }} + - "-{{ $key }}={{ $value }}" + {{- end }} + volumeMounts: + {{- if .Values.compactor.extraVolumeMounts }} + {{- toYaml .Values.compactor.extraVolumeMounts | nindent 12}} + {{- end }} + - name: config + mountPath: /etc/cortex + - name: runtime-config + mountPath: /etc/cortex-runtime-config + - name: storage + mountPath: "/data" + {{- if .Values.compactor.persistentVolume.subPath }} + subPath: {{ .Values.compactor.persistentVolume.subPath }} + {{- end }} + ports: + - name: http-metrics + containerPort: {{ .Values.config.server.http_listen_port }} + protocol: TCP + - name: gossip + containerPort: {{ .Values.config.memberlist.bind_port }} + protocol: TCP + startupProbe: + {{- toYaml .Values.compactor.startupProbe | nindent 12 }} + livenessProbe: + {{- toYaml .Values.compactor.livenessProbe | nindent 12 }} + readinessProbe: + {{- toYaml .Values.compactor.readinessProbe | nindent 12 }} + resources: + {{- toYaml .Values.compactor.resources | nindent 12 }} + {{- if .Values.compactor.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.compactor.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.compactor.env }} + env: + {{- toYaml .Values.compactor.env | nindent 12 }} + {{- end }} +{{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/compactor/compactor-svc.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/compactor/compactor-svc.yaml new file mode 100644 index 0000000..ae20f78 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/compactor/compactor-svc.yaml @@ -0,0 +1,25 @@ +{{- if eq .Values.config.storage.engine "blocks" -}} +{{- if .Values.compactor.enabled -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "cortex.compactorFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.compactorLabels" . | nindent 4 }} + {{- with .Values.compactor.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- toYaml .Values.compactor.service.annotations | nindent 4 }} +spec: + type: ClusterIP + ports: + - port: {{ .Values.config.server.http_listen_port }} + protocol: TCP + name: http-metrics + targetPort: http-metrics + selector: + {{- include "cortex.compactorSelectorLabels" . | nindent 4 }} +{{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/configmap.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/configmap.yaml new file mode 100644 index 0000000..001b13a --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/configmap.yaml @@ -0,0 +1,12 @@ +{{- if (and (not .Values.useExternalConfig) (.Values.useConfigMap)) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "cortex.fullname" . }}-config + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.labels" . | nindent 4 }} +data: + cortex.yaml: | + {{- tpl (toYaml .Values.config) . | nindent 4 }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/configs/_helpers-configs.tpl b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/configs/_helpers-configs.tpl new file mode 100644 index 0000000..c8945dc --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/configs/_helpers-configs.tpl @@ -0,0 +1,23 @@ + +{{/* +configs fullname +*/}} +{{- define "cortex.configsFullname" -}} +{{ include "cortex.fullname" . }}-configs +{{- end }} + +{{/* +configs common labels +*/}} +{{- define "cortex.configsLabels" -}} +{{ include "cortex.labels" . }} +app.kubernetes.io/component: configs +{{- end }} + +{{/* +configs selector labels +*/}} +{{- define "cortex.configsSelectorLabels" -}} +{{ include "cortex.selectorLabels" . }} +app.kubernetes.io/component: configs +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/configs/configs-dep.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/configs/configs-dep.yaml new file mode 100644 index 0000000..86048ce --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/configs/configs-dep.yaml @@ -0,0 +1,124 @@ +{{- if .Values.configs.enabled -}} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "cortex.configsFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.configsLabels" . | nindent 4 }} + annotations: + {{- toYaml .Values.configs.annotations | nindent 4 }} +spec: + replicas: {{ .Values.configs.replicas }} + selector: + matchLabels: + {{- include "cortex.configsSelectorLabels" . | nindent 6 }} + strategy: + {{- toYaml .Values.configs.strategy | nindent 4 }} + template: + metadata: + labels: + {{- include "cortex.configsLabels" . | nindent 8 }} + {{- with .Values.configs.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + annotations: + checksum/config: {{ include "cortex.configChecksum" . }} + {{- with .Values.configs.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ .Values.configs.serviceAccount.name | default (include "cortex.serviceAccountName" . ) }} + {{- if .Values.configs.priorityClassName }} + priorityClassName: {{ .Values.configs.priorityClassName }} + {{- end }} + {{- if .Values.configs.securityContext.enabled }} + securityContext: {{- omit .Values.configs.securityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + initContainers: + {{- toYaml .Values.configs.initContainers | nindent 8 }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} + containers: + - name: configs + image: "{{ .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: + - "-target=configs" + - "-config.file=/etc/cortex/cortex.yaml" + {{- if .Values.configsdb_postgresql.enabled }} + - "-configs.database.uri={{ .Values.configsdb_postgresql.uri }}" + - "-configs.database.password-file=/etc/postgresql/password" + - "-configs.database.migrations-dir=/migrations" + {{- else }} + - "-configs.database.uri=memory://" + {{- end }} + {{- range $key, $value := .Values.configs.extraArgs }} + - "-{{ $key }}={{ $value }}" + {{- end }} + volumeMounts: + - name: config + mountPath: /etc/cortex + subPath: {{ .Values.configs.persistentVolume.subPath }} + - name: runtime-config + mountPath: /etc/cortex-runtime-config + {{- if .Values.configsdb_postgresql.enabled }} + - name: postgres-password + mountPath: /etc/postgresql + {{- end }} + {{- if .Values.configs.extraVolumeMounts }} + {{- toYaml .Values.configs.extraVolumeMounts | nindent 12}} + {{- end }} + ports: + - name: http-metrics + containerPort: {{ .Values.config.server.http_listen_port }} + protocol: TCP + - name: gossip + containerPort: {{ .Values.config.memberlist.bind_port }} + protocol: TCP + startupProbe: + {{- toYaml .Values.configs.startupProbe | nindent 12 }} + livenessProbe: + {{- toYaml .Values.configs.livenessProbe | nindent 12 }} + readinessProbe: + {{- toYaml .Values.configs.readinessProbe | nindent 12 }} + resources: + {{- toYaml .Values.configs.resources | nindent 12 }} + {{- if .Values.configs.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.configs.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.configs.env }} + env: + {{- toYaml .Values.configs.env | nindent 12 }} + {{- end }} + {{- if .Values.configs.extraContainers }} + {{- toYaml .Values.configs.extraContainers | nindent 8}} + {{- end }} + nodeSelector: + {{- toYaml .Values.configs.nodeSelector | nindent 8 }} + affinity: + {{- toYaml .Values.configs.affinity | nindent 8 }} + tolerations: + {{- toYaml .Values.configs.tolerations | nindent 8 }} + terminationGracePeriodSeconds: {{ .Values.configs.terminationGracePeriodSeconds }} + volumes: + {{- include "cortex.configVolume" . | nindent 8 }} + {{- if .Values.configsdb_postgresql.enabled }} + - name: postgres-password + secret: + secretName: {{ if .Values.configsdb_postgresql.auth.existing_secret.name }}{{ .Values.configsdb_postgresql.auth.existing_secret.name }}{{ else }}{{ template "cortex.fullname" . }}-postgresql{{ end }} + items: + - key: {{ if .Values.configsdb_postgresql.auth.existing_secret.name }}{{ .Values.configsdb_postgresql.auth.existing_secret.key }}{{ else }}postgresql-password{{ end }} + path: password + {{- end }} + - name: runtime-config + configMap: + name: {{ template "cortex.fullname" . }}-runtime-config + {{- if .Values.configs.extraVolumes }} + {{- toYaml .Values.configs.extraVolumes | nindent 8}} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/configs/configs-poddisruptionbudget.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/configs/configs-poddisruptionbudget.yaml new file mode 100644 index 0000000..b6e46b4 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/configs/configs-poddisruptionbudget.yaml @@ -0,0 +1,14 @@ +{{- if and (gt (int .Values.configs.replicas) 1) (.Values.configs.podDisruptionBudget) }} +apiVersion: {{ include "cortex.pdbVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "cortex.configsFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.configsLabels" . | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "cortex.configsSelectorLabels" . | nindent 6 }} + {{- toYaml .Values.configs.podDisruptionBudget | nindent 2 }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/configs/configs-servicemonitor.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/configs/configs-servicemonitor.yaml new file mode 100644 index 0000000..393bc32 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/configs/configs-servicemonitor.yaml @@ -0,0 +1,42 @@ +{{- if .Values.configs.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "cortex.configsFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.configsLabels" . | nindent 4 }} + {{- if .Values.configs.serviceMonitor.additionalLabels }} +{{ toYaml .Values.configs.serviceMonitor.additionalLabels | indent 4 }} + {{- end }} + {{- if .Values.configs.serviceMonitor.annotations }} + annotations: +{{ toYaml .Values.configs.serviceMonitor.annotations | indent 4 }} + {{- end }} +spec: + selector: + matchLabels: + {{- include "cortex.configsSelectorLabels" . | nindent 6 }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace | quote }} + endpoints: + - port: http-metrics + {{- if .Values.configs.serviceMonitor.interval }} + interval: {{ .Values.configs.serviceMonitor.interval }} + {{- end }} + {{- if .Values.configs.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.configs.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.configs.serviceMonitor.relabelings }} + relabelings: + {{- toYaml .Values.configs.serviceMonitor.relabelings | nindent 4 }} + {{- end }} + {{- if .Values.configs.serviceMonitor.metricRelabelings }} + metricRelabelings: + {{- toYaml .Values.configs.serviceMonitor.metricRelabelings | nindent 4 }} + {{- end }} + {{- with .Values.configs.serviceMonitor.extraEndpointSpec }} + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/configs/configs-svc.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/configs/configs-svc.yaml new file mode 100644 index 0000000..6dbc2cd --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/configs/configs-svc.yaml @@ -0,0 +1,23 @@ +{{- if .Values.configs.enabled -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "cortex.configsFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.configsLabels" . | nindent 4 }} + {{- with .Values.configs.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- toYaml .Values.configs.service.annotations | nindent 4 }} +spec: + type: ClusterIP + ports: + - port: {{ .Values.config.server.http_listen_port }} + protocol: TCP + name: http-metrics + targetPort: http-metrics + selector: + {{- include "cortex.configsSelectorLabels" . | nindent 4 }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/cortex-pv.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/cortex-pv.yaml new file mode 100644 index 0000000..472f83e --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/cortex-pv.yaml @@ -0,0 +1,68 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: ingester-pv-0 +spec: + capacity: + storage: 2Gi + volumeMode: Filesystem + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Retain + storageClassName: {{ .Values.global.DEFAULT_STORAGE_CLASS }} + local: + path: {{ .Values.global.IMXC_INGESTER_PV_PATH1 }} + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .Values.global.affinity_key }} + operator: In + values: + - {{ .Values.global.affinity_value1 }} +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: ingester-pv-1 +spec: + capacity: + storage: 2Gi + volumeMode: Filesystem + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Retain + storageClassName: {{ .Values.global.DEFAULT_STORAGE_CLASS }} + local: + path: {{ .Values.global.IMXC_INGESTER_PV_PATH2 }} + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .Values.global.affinity_key }} + operator: In + values: + - {{ .Values.global.affinity_value2 }} +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: ingester-pv-2 +spec: + capacity: + storage: 2Gi + volumeMode: Filesystem + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Retain + storageClassName: {{ .Values.global.DEFAULT_STORAGE_CLASS }} + local: + path: {{ .Values.global.IMXC_INGESTER_PV_PATH3 }} + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .Values.global.affinity_key }} + operator: In + values: + - {{ .Values.global.affinity_value3 }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/distributor/_helpers-distributor.tpl b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/distributor/_helpers-distributor.tpl new file mode 100644 index 0000000..24e8d00 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/distributor/_helpers-distributor.tpl @@ -0,0 +1,23 @@ + +{{/* +distributor fullname +*/}} +{{- define "cortex.distributorFullname" -}} +{{ include "cortex.fullname" . }}-distributor +{{- end }} + +{{/* +distributor common labels +*/}} +{{- define "cortex.distributorLabels" -}} +{{ include "cortex.labels" . }} +app.kubernetes.io/component: distributor +{{- end }} + +{{/* +distributor selector labels +*/}} +{{- define "cortex.distributorSelectorLabels" -}} +{{ include "cortex.selectorLabels" . }} +app.kubernetes.io/component: distributor +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/distributor/distributor-dep.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/distributor/distributor-dep.yaml new file mode 100644 index 0000000..fc9c0ba --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/distributor/distributor-dep.yaml @@ -0,0 +1,121 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "cortex.distributorFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.distributorLabels" . | nindent 4 }} + app.kubernetes.io/part-of: memberlist + annotations: + {{- toYaml .Values.distributor.annotations | nindent 4 }} +spec: + {{- if not .Values.distributor.autoscaling.enabled }} + replicas: {{ .Values.distributor.replicas }} + {{- end }} + selector: + matchLabels: + {{- include "cortex.distributorSelectorLabels" . | nindent 6 }} + strategy: + {{- toYaml .Values.distributor.strategy | nindent 4 }} + template: + metadata: + labels: + {{- include "cortex.distributorLabels" . | nindent 8 }} + app.kubernetes.io/part-of: memberlist + {{- with .Values.distributor.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + annotations: + checksum/config: {{ include "cortex.configChecksum" . }} + {{- with .Values.distributor.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ .Values.distributor.serviceAccount.name | default (include "cortex.serviceAccountName" . ) }} + {{- if .Values.distributor.priorityClassName }} + priorityClassName: {{ .Values.distributor.priorityClassName }} + {{- end }} + {{- if .Values.distributor.securityContext.enabled }} + securityContext: {{- omit .Values.distributor.securityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + initContainers: + {{- toYaml .Values.distributor.initContainers | nindent 8 }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} + containers: + - name: distributor + image: "{{ .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: + - "-target=distributor" + - "-config.file=/etc/cortex/cortex.yaml" + {{- range $key, $value := .Values.distributor.extraArgs }} + - "-{{ $key }}={{ $value }}" + {{- end }} + volumeMounts: + {{- if .Values.distributor.extraVolumeMounts }} + {{- toYaml .Values.distributor.extraVolumeMounts | nindent 12}} + {{- end }} + - name: config + mountPath: /etc/cortex + - name: runtime-config + mountPath: /etc/cortex-runtime-config + - name: storage + mountPath: "/data" + subPath: {{ .Values.distributor.persistentVolume.subPath }} + ports: + - name: http-metrics + containerPort: {{ .Values.config.server.http_listen_port }} + protocol: TCP + - name: gossip + containerPort: {{ .Values.config.memberlist.bind_port }} + protocol: TCP + - name: grpc + containerPort: {{ .Values.config.server.grpc_listen_port }} + protocol: TCP + startupProbe: + {{- toYaml .Values.distributor.startupProbe | nindent 12 }} + livenessProbe: + {{- toYaml .Values.distributor.livenessProbe | nindent 12 }} + readinessProbe: + {{- toYaml .Values.distributor.readinessProbe | nindent 12 }} + resources: + {{- toYaml .Values.distributor.resources | nindent 12 }} + {{- if .Values.distributor.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.distributor.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.distributor.env }} + env: + {{- toYaml .Values.distributor.env | nindent 12 }} + {{- end }} + {{- with .Values.distributor.lifecycle }} + lifecycle: + {{- toYaml . | nindent 12 }} + {{- end }} + resources: + requests: + cpu: "100m" + {{- if .Values.distributor.extraContainers }} + {{- toYaml .Values.distributor.extraContainers | nindent 8}} + {{- end }} + nodeSelector: + {{- toYaml .Values.distributor.nodeSelector | nindent 8 }} + affinity: + {{- toYaml .Values.distributor.affinity | nindent 8 }} + tolerations: + {{- toYaml .Values.distributor.tolerations | nindent 8 }} + terminationGracePeriodSeconds: {{ .Values.distributor.terminationGracePeriodSeconds }} + volumes: + {{- include "cortex.configVolume" . | nindent 8 }} + - name: runtime-config + configMap: + name: {{ template "cortex.fullname" . }}-runtime-config + - name: storage + emptyDir: {} + {{- if .Values.distributor.extraVolumes }} + {{- toYaml .Values.distributor.extraVolumes | nindent 8}} + {{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/distributor/distributor-hpa.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/distributor/distributor-hpa.yaml new file mode 100644 index 0000000..0c1c9f6 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/distributor/distributor-hpa.yaml @@ -0,0 +1,39 @@ +{{- with .Values.distributor.autoscaling -}} +{{- if .enabled }} +apiVersion: autoscaling/v2beta2 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "cortex.distributorFullname" $ }} + namespace: {{ $.Release.Namespace }} + labels: + {{- include "cortex.distributorLabels" $ | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "cortex.distributorFullname" $ }} + minReplicas: {{ .minReplicas }} + maxReplicas: {{ .maxReplicas }} + metrics: + {{- with .targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: {{ . }} + {{- end }} + {{- with .targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ . }} + {{- end }} + {{- with .behavior }} + behavior: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/distributor/distributor-poddisruptionbudget.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/distributor/distributor-poddisruptionbudget.yaml new file mode 100644 index 0000000..7b05701 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/distributor/distributor-poddisruptionbudget.yaml @@ -0,0 +1,14 @@ +{{- if and (gt (int .Values.distributor.replicas) 1) (.Values.distributor.podDisruptionBudget) }} +apiVersion: {{ include "cortex.pdbVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "cortex.distributorFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.distributorLabels" . | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "cortex.distributorSelectorLabels" . | nindent 6 }} + {{- toYaml .Values.distributor.podDisruptionBudget | nindent 2 }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/distributor/distributor-servicemonitor.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/distributor/distributor-servicemonitor.yaml new file mode 100644 index 0000000..5db8389 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/distributor/distributor-servicemonitor.yaml @@ -0,0 +1,42 @@ +{{- if .Values.distributor.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "cortex.distributorFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.distributorLabels" . | nindent 4 }} + {{- if .Values.distributor.serviceMonitor.additionalLabels }} +{{ toYaml .Values.distributor.serviceMonitor.additionalLabels | indent 4 }} + {{- end }} + {{- if .Values.distributor.serviceMonitor.annotations }} + annotations: +{{ toYaml .Values.distributor.serviceMonitor.annotations | indent 4 }} + {{- end }} +spec: + selector: + matchLabels: + {{- include "cortex.distributorSelectorLabels" . | nindent 6 }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace | quote }} + endpoints: + - port: http-metrics + {{- if .Values.distributor.serviceMonitor.interval }} + interval: {{ .Values.distributor.serviceMonitor.interval }} + {{- end }} + {{- if .Values.distributor.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.distributor.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.distributor.serviceMonitor.relabelings }} + relabelings: + {{- toYaml .Values.distributor.serviceMonitor.relabelings | nindent 4 }} + {{- end }} + {{- if .Values.distributor.serviceMonitor.metricRelabelings }} + metricRelabelings: + {{- toYaml .Values.distributor.serviceMonitor.metricRelabelings | nindent 4 }} + {{- end }} + {{- with .Values.distributor.serviceMonitor.extraEndpointSpec }} + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/distributor/distributor-svc-headless.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/distributor/distributor-svc-headless.yaml new file mode 100644 index 0000000..1c4f7f6 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/distributor/distributor-svc-headless.yaml @@ -0,0 +1,23 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "cortex.distributorFullname" . }}-headless + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.distributorLabels" . | nindent 4 }} + {{- with .Values.distributor.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- toYaml .Values.distributor.service.annotations | nindent 4 }} +spec: + type: ClusterIP + clusterIP: None + publishNotReadyAddresses: true + ports: + - port: {{ .Values.config.server.grpc_listen_port }} + protocol: TCP + name: grpc + targetPort: grpc + selector: + {{- include "cortex.distributorSelectorLabels" . | nindent 4 }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/distributor/distributor-svc.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/distributor/distributor-svc.yaml new file mode 100644 index 0000000..2db7197 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/distributor/distributor-svc.yaml @@ -0,0 +1,21 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "cortex.distributorFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.distributorLabels" . | nindent 4 }} + {{- with .Values.distributor.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- toYaml .Values.distributor.service.annotations | nindent 4 }} +spec: + type: ClusterIP + ports: + - port: {{ .Values.config.server.http_listen_port }} + protocol: TCP + name: http-metrics + targetPort: http-metrics + selector: + {{- include "cortex.distributorSelectorLabels" . | nindent 4 }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ingester/_helpers-ingester.tpl b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ingester/_helpers-ingester.tpl new file mode 100644 index 0000000..4705327 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ingester/_helpers-ingester.tpl @@ -0,0 +1,23 @@ + +{{/* +ingester fullname +*/}} +{{- define "cortex.ingesterFullname" -}} +{{ include "cortex.fullname" . }}-ingester +{{- end }} + +{{/* +ingester common labels +*/}} +{{- define "cortex.ingesterLabels" -}} +{{ include "cortex.labels" . }} +app.kubernetes.io/component: ingester +{{- end }} + +{{/* +ingester selector labels +*/}} +{{- define "cortex.ingesterSelectorLabels" -}} +{{ include "cortex.selectorLabels" . }} +app.kubernetes.io/component: ingester +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ingester/ingester-dep.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ingester/ingester-dep.yaml new file mode 100644 index 0000000..b26d3a3 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ingester/ingester-dep.yaml @@ -0,0 +1,130 @@ +{{- if not .Values.ingester.statefulSet.enabled -}} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "cortex.ingesterFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.ingesterLabels" . | nindent 4 }} + app.kubernetes.io/part-of: memberlist + annotations: + {{- toYaml .Values.ingester.annotations | nindent 4 }} +spec: + {{- if not .Values.ingester.autoscaling.enabled }} + replicas: {{ .Values.ingester.replicas }} + {{- end }} + selector: + matchLabels: + {{- include "cortex.ingesterSelectorLabels" . | nindent 6 }} + strategy: + {{- toYaml .Values.ingester.strategy | nindent 4 }} + template: + metadata: + labels: + {{- include "cortex.ingesterLabels" . | nindent 8 }} + app.kubernetes.io/part-of: memberlist + {{- with .Values.ingester.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + annotations: + checksum/config: {{ include "cortex.configChecksum" . }} + {{- with .Values.ingester.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ .Values.ingester.serviceAccount.name | default (include "cortex.serviceAccountName" . ) }} + {{- if .Values.ingester.priorityClassName }} + priorityClassName: {{ .Values.ingester.priorityClassName }} + {{- end }} + {{- if .Values.ingester.securityContext.enabled }} + securityContext: {{- omit .Values.ingester.securityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + initContainers: + {{- toYaml .Values.ingester.initContainers | nindent 8 }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} + containers: + - name: ingester + image: "{{ .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: + - "-target=ingester" + - "-config.file=/etc/cortex/cortex.yaml" + {{- include "cortex.memcached" . | nindent 12}} + {{- range $key, $value := .Values.ingester.extraArgs }} + - "-{{ $key }}={{ $value }}" + {{- end }} + volumeMounts: + {{- if .Values.ingester.extraVolumeMounts }} + {{- toYaml .Values.ingester.extraVolumeMounts | nindent 12}} + {{- end }} + - name: config + mountPath: /etc/cortex + - name: runtime-config + mountPath: /etc/cortex-runtime-config + - name: storage + mountPath: "/data" + {{- with .Values.ingester.persistentVolume.subPath }} + subPath: {{ . }} + {{- end }} + ports: + - name: http-metrics + containerPort: {{ .Values.config.server.http_listen_port }} + protocol: TCP + - name: grpc + containerPort: {{ .Values.config.server.grpc_listen_port }} + protocol: TCP + - name: gossip + containerPort: {{ .Values.config.memberlist.bind_port }} + protocol: TCP + {{- if .Values.ingester.startupProbe }} + startupProbe: + {{- toYaml .Values.ingester.startupProbe | nindent 12 }} + {{- end }} + {{- if .Values.ingester.livenessProbe }} + livenessProbe: + {{- toYaml .Values.ingester.livenessProbe | nindent 12 }} + {{- end }} + readinessProbe: + {{- toYaml .Values.ingester.readinessProbe | nindent 12 }} + resources: + {{- toYaml .Values.ingester.resources | nindent 12 }} + {{- if .Values.ingester.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.ingester.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + env: + {{- if .Values.ingester.env }} + {{ toYaml .Values.ingester.env | nindent 12 }} + {{- end }} + {{- with .Values.ingester.lifecycle }} + lifecycle: + {{- toYaml . | nindent 12 }} + {{- end }} + resources: + requests: + cpu: "100m" + {{- with .Values.ingester.extraContainers }} + {{- toYaml . | nindent 8 }} + {{- end }} + nodeSelector: + {{- toYaml .Values.ingester.nodeSelector | nindent 8 }} + affinity: + {{- toYaml .Values.ingester.affinity | nindent 8 }} + tolerations: + {{- toYaml .Values.ingester.tolerations | nindent 8 }} + terminationGracePeriodSeconds: {{ .Values.ingester.terminationGracePeriodSeconds }} + volumes: + {{- include "cortex.configVolume" . | nindent 8 }} + - name: runtime-config + configMap: + name: {{ template "cortex.fullname" . }}-runtime-config + - name: storage + emptyDir: {} + {{- if .Values.ingester.extraVolumes }} + {{- toYaml .Values.ingester.extraVolumes | nindent 8}} + {{- end }} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ingester/ingester-hpa.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ingester/ingester-hpa.yaml new file mode 100644 index 0000000..97c5290 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ingester/ingester-hpa.yaml @@ -0,0 +1,29 @@ +{{- with .Values.ingester.autoscaling -}} +{{- if .enabled }} +apiVersion: autoscaling/v2beta2 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "cortex.ingesterFullname" $ }} + namespace: {{ $.Release.Namespace }} + labels: + {{- include "cortex.ingesterLabels" $ | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: {{ if $.Values.ingester.statefulSet.enabled }}StatefulSet{{ else }}Deployment{{ end }} + name: {{ include "cortex.ingesterFullname" $ }} + minReplicas: {{ .minReplicas }} + maxReplicas: {{ .maxReplicas }} + metrics: + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: {{ .targetMemoryUtilizationPercentage }} + {{- with .behavior }} + behavior: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ingester/ingester-poddisruptionbudget.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ingester/ingester-poddisruptionbudget.yaml new file mode 100644 index 0000000..a47ecb4 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ingester/ingester-poddisruptionbudget.yaml @@ -0,0 +1,14 @@ +{{- if and (gt (int .Values.ingester.replicas) 1) (.Values.ingester.podDisruptionBudget) }} +apiVersion: {{ include "cortex.pdbVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "cortex.ingesterFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.ingesterLabels" . | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "cortex.ingesterSelectorLabels" . | nindent 6 }} + {{- toYaml .Values.ingester.podDisruptionBudget | nindent 2 }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ingester/ingester-servicemonitor.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ingester/ingester-servicemonitor.yaml new file mode 100644 index 0000000..310ca54 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ingester/ingester-servicemonitor.yaml @@ -0,0 +1,42 @@ +{{- if .Values.ingester.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "cortex.ingesterFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.ingesterLabels" . | nindent 4 }} + {{- if .Values.ingester.serviceMonitor.additionalLabels }} +{{ toYaml .Values.ingester.serviceMonitor.additionalLabels | indent 4 }} + {{- end }} + {{- if .Values.ingester.serviceMonitor.annotations }} + annotations: +{{ toYaml .Values.ingester.serviceMonitor.annotations | indent 4 }} + {{- end }} +spec: + selector: + matchLabels: + {{- include "cortex.ingesterSelectorLabels" . | nindent 6 }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace | quote }} + endpoints: + - port: http-metrics + {{- if .Values.ingester.serviceMonitor.interval }} + interval: {{ .Values.ingester.serviceMonitor.interval }} + {{- end }} + {{- if .Values.ingester.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.ingester.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.ingester.serviceMonitor.relabelings }} + relabelings: + {{- toYaml .Values.ingester.serviceMonitor.relabelings | nindent 4 }} + {{- end }} + {{- if .Values.ingester.serviceMonitor.metricRelabelings }} + metricRelabelings: + {{- toYaml .Values.ingester.serviceMonitor.metricRelabelings | nindent 4 }} + {{- end }} + {{- with .Values.ingester.serviceMonitor.extraEndpointSpec }} + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ingester/ingester-statefulset.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ingester/ingester-statefulset.yaml new file mode 100644 index 0000000..8016441 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ingester/ingester-statefulset.yaml @@ -0,0 +1,153 @@ +{{- if .Values.ingester.statefulSet.enabled -}} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "cortex.ingesterFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.ingesterLabels" . | nindent 4 }} + app.kubernetes.io/part-of: memberlist + annotations: + {{- toYaml .Values.ingester.annotations | nindent 4 }} +spec: + {{- if not .Values.ingester.autoscaling.enabled }} + replicas: {{ .Values.ingester.replicas }} + {{- end }} + selector: + matchLabels: + {{- include "cortex.ingesterSelectorLabels" . | nindent 6 }} + updateStrategy: + {{- toYaml .Values.ingester.statefulStrategy | nindent 4 }} + podManagementPolicy: "{{ .Values.ingester.statefulSet.podManagementPolicy }}" + serviceName: {{ template "cortex.fullname" . }}-ingester-headless + {{- if .Values.ingester.persistentVolume.enabled }} + volumeClaimTemplates: + - metadata: + name: storage + {{- if .Values.ingester.persistentVolume.annotations }} + annotations: + {{ toYaml .Values.ingester.persistentVolume.annotations | nindent 10 }} + {{- end }} + spec: + {{- if .Values.ingester.persistentVolume.storageClass }} + {{- if (eq "-" .Values.ingester.persistentVolume.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.ingester.persistentVolume.storageClass }}" + {{- end }} + {{- end }} + accessModes: + {{ toYaml .Values.ingester.persistentVolume.accessModes | nindent 10 }} + resources: + requests: + storage: "{{ .Values.ingester.persistentVolume.size }}" + {{- end }} + template: + metadata: + labels: + {{- include "cortex.ingesterLabels" . | nindent 8 }} + app.kubernetes.io/part-of: memberlist + {{- with .Values.ingester.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + annotations: + checksum/config: {{ include "cortex.configChecksum" . }} + {{- with .Values.ingester.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ .Values.ingester.serviceAccount.name | default (include "cortex.serviceAccountName" . ) }} + {{- if .Values.ingester.priorityClassName }} + priorityClassName: {{ .Values.ingester.priorityClassName }} + {{- end }} + {{- if .Values.ingester.securityContext.enabled }} + securityContext: {{- omit .Values.ingester.securityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + initContainers: + {{- toYaml .Values.ingester.initContainers | nindent 8 }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} + nodeSelector: + {{- toYaml .Values.ingester.nodeSelector | nindent 8 }} + affinity: + {{- toYaml .Values.ingester.affinity | nindent 8 }} + tolerations: + {{- toYaml .Values.ingester.tolerations | nindent 8 }} + terminationGracePeriodSeconds: {{ .Values.ingester.terminationGracePeriodSeconds }} + volumes: + {{- include "cortex.configVolume" . | nindent 8 }} + - name: runtime-config + configMap: + name: {{ template "cortex.fullname" . }}-runtime-config + {{- if not .Values.ingester.persistentVolume.enabled }} + - name: storage + emptyDir: {} + {{- end }} + {{- if .Values.ingester.extraVolumes }} + {{- toYaml .Values.ingester.extraVolumes | nindent 8 }} + {{- end }} + containers: + {{- if .Values.ingester.extraContainers }} + {{- toYaml .Values.ingester.extraContainers | nindent 8 }} + {{- end }} + - name: ingester + image: "{{ .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: + - "-target=ingester" + - "-config.file=/etc/cortex/cortex.yaml" + {{- include "cortex.memcached" . | nindent 12}} + {{- range $key, $value := .Values.ingester.extraArgs }} + - "-{{ $key }}={{ $value }}" + {{- end }} + volumeMounts: + {{- if .Values.ingester.extraVolumeMounts }} + {{- toYaml .Values.ingester.extraVolumeMounts | nindent 12}} + {{- end }} + - name: config + mountPath: /etc/cortex + - name: runtime-config + mountPath: /etc/cortex-runtime-config + - name: storage + mountPath: "/data" + {{- with .Values.ingester.persistentVolume.subPath }} + subPath: {{ . }} + {{- end }} + ports: + - name: http-metrics + containerPort: {{ .Values.config.server.http_listen_port }} + protocol: TCP + - name: grpc + containerPort: {{ .Values.config.server.grpc_listen_port }} + protocol: TCP + - name: gossip + containerPort: {{ .Values.config.memberlist.bind_port }} + protocol: TCP + {{- if .Values.ingester.startupProbe }} + startupProbe: + {{- toYaml .Values.ingester.startupProbe | nindent 12 }} + {{- end }} + {{- if .Values.ingester.livenessProbe }} + livenessProbe: + {{- toYaml .Values.ingester.livenessProbe | nindent 12 }} + {{- end }} + readinessProbe: + {{- toYaml .Values.ingester.readinessProbe | nindent 12 }} + resources: + {{- toYaml .Values.ingester.resources | nindent 12 }} + {{- if .Values.ingester.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.ingester.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.ingester.env }} + env: + {{- toYaml .Values.ingester.env | nindent 12 }} + {{- end }} + {{- with .Values.ingester.lifecycle }} + lifecycle: + {{- toYaml . | nindent 12 }} + {{- end }} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ingester/ingester-svc-headless.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ingester/ingester-svc-headless.yaml new file mode 100644 index 0000000..b783caa --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ingester/ingester-svc-headless.yaml @@ -0,0 +1,22 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "cortex.ingesterFullname" . }}-headless + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.ingesterLabels" . | nindent 4 }} + {{- with .Values.ingester.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- toYaml .Values.ingester.service.annotations | nindent 4 }} +spec: + type: ClusterIP + clusterIP: None + ports: + - port: {{ .Values.config.server.grpc_listen_port }} + protocol: TCP + name: grpc + targetPort: grpc + selector: + {{- include "cortex.ingesterSelectorLabels" . | nindent 4 }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ingester/ingester-svc.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ingester/ingester-svc.yaml new file mode 100644 index 0000000..02183ae --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ingester/ingester-svc.yaml @@ -0,0 +1,21 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "cortex.ingesterFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.ingesterLabels" . | nindent 4 }} + {{- with .Values.ingester.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- toYaml .Values.ingester.service.annotations | nindent 4 }} +spec: + type: ClusterIP + ports: + - port: {{ .Values.config.server.http_listen_port }} + protocol: TCP + name: http-metrics + targetPort: http-metrics + selector: + {{- include "cortex.ingesterSelectorLabels" . | nindent 4 }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/nginx/_helpers-nginx.tpl b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/nginx/_helpers-nginx.tpl new file mode 100644 index 0000000..61d8b78 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/nginx/_helpers-nginx.tpl @@ -0,0 +1,23 @@ + +{{/* +nginx fullname +*/}} +{{- define "cortex.nginxFullname" -}} +{{ include "cortex.fullname" . }}-nginx +{{- end }} + +{{/* +nginx common labels +*/}} +{{- define "cortex.nginxLabels" -}} +{{ include "cortex.labels" . }} +app.kubernetes.io/component: nginx +{{- end }} + +{{/* +nginx selector labels +*/}} +{{- define "cortex.nginxSelectorLabels" -}} +{{ include "cortex.selectorLabels" . }} +app.kubernetes.io/component: nginx +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/nginx/nginx-config.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/nginx/nginx-config.yaml new file mode 100644 index 0000000..fd3474d --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/nginx/nginx-config.yaml @@ -0,0 +1,140 @@ +{{- if .Values.nginx.enabled }} +{{- $rootDomain := printf "%s.svc.%s:%d" .Release.Namespace .Values.clusterDomain (.Values.config.server.http_listen_port | int) }} +kind: ConfigMap +apiVersion: v1 +metadata: + name: {{ include "cortex.nginxFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.nginxLabels" . | nindent 4 }} +data: + nginx.conf: |- + worker_processes 5; ## Default: 1 + error_log /dev/stderr; + pid /tmp/nginx.pid; + worker_rlimit_nofile 8192; + + events { + worker_connections 4096; ## Default: 1024 + } + + {{- with .Values.nginx.config.mainSnippet }} + {{ tpl . $ | nindent 4 }} + {{- end }} + + http { + default_type application/octet-stream; + client_max_body_size {{.Values.nginx.config.client_max_body_size}}; + log_format main '$remote_addr - $remote_user [$time_local] $status ' + '"$request" $body_bytes_sent "$http_referer" ' + '"$http_user_agent" "$http_x_forwarded_for" $http_x_scope_orgid'; + access_log /dev/stderr main; + sendfile on; + tcp_nopush on; + resolver {{ default (printf "coredns.kube-system.svc.%s" .Values.clusterDomain ) .Values.nginx.config.dnsResolver }}; + + {{- with .Values.nginx.config.httpSnippet }} + {{ tpl . $ | nindent 6 }} + {{- end }} + + server { # simple reverse-proxy + listen {{ .Values.nginx.http_listen_port }}; + proxy_connect_timeout 300s; + proxy_send_timeout 300s; + proxy_read_timeout 300s; + proxy_http_version 1.1; + proxy_set_header X-Scope-OrgID 0; + + {{- range $key, $value := .Values.nginx.config.setHeaders }} + proxy_set_header {{ $key }} {{ $value }}; + {{- end }} + + {{ if .Values.nginx.config.basicAuthSecretName -}} + auth_basic "Restricted Content"; + auth_basic_user_file /etc/apache2/.htpasswd; + {{- end }} + + {{- with .Values.nginx.config.serverSnippet }} + {{ tpl . $ | nindent 8 }} + {{- end }} + + location = /healthz { + # auth_basic off is not set here, even when a basic auth directive is + # included in the server block, as Nginx's NGX_HTTP_REWRITE_PHASE + # (point when this return statement is evaluated) comes before the + # NGX_HTTP_ACCESS_PHASE (point when basic auth is evaluated). Thus, + # this return statement returns a response before basic auth is + # evaluated. + return 200 'alive'; + } + + # Distributor Config + location = /ring { + proxy_pass http://{{ template "cortex.fullname" . }}-distributor.{{ $rootDomain }}$request_uri; + } + + location = /all_user_stats { + proxy_pass http://{{ template "cortex.fullname" . }}-distributor.{{ $rootDomain }}$request_uri; + } + + location = /api/prom/push { + proxy_pass http://{{ template "cortex.fullname" . }}-distributor.{{ $rootDomain }}$request_uri; + } + + ## New Remote write API. Ref: https://cortexmetrics.io/docs/api/#remote-write + location = /api/v1/push { + proxy_pass http://{{ template "cortex.fullname" . }}-distributor.{{ $rootDomain }}$request_uri; + } + + # Alertmanager Config + location ~ /api/prom/alertmanager/.* { + proxy_pass http://{{ template "cortex.fullname" . }}-alertmanager.{{ $rootDomain }}$request_uri; + } + + location ~ /api/v1/alerts { + proxy_pass http://{{ template "cortex.fullname" . }}-alertmanager.{{ $rootDomain }}$request_uri; + } + + location ~ /multitenant_alertmanager/status { + proxy_pass http://{{ template "cortex.fullname" . }}-alertmanager.{{ $rootDomain }}$request_uri; + } + + # Ruler Config + location ~ /api/v1/rules { + proxy_pass http://{{ template "cortex.fullname" . }}-ruler.{{ $rootDomain }}$request_uri; + } + + location ~ /ruler/ring { + proxy_pass http://{{ template "cortex.fullname" . }}-ruler.{{ $rootDomain }}$request_uri; + } + + # Config Config + location ~ /api/prom/configs/.* { + proxy_pass http://{{ template "cortex.fullname" . }}-configs.{{ $rootDomain }}$request_uri; + } + + # Query Config + location ~ /api/prom/.* { + proxy_pass http://{{ template "cortex.fullname" . }}-query-frontend.{{ $rootDomain }}$request_uri; + } + + ## New Query frontend APIs as per https://cortexmetrics.io/docs/api/#querier--query-frontend + location ~ ^{{.Values.config.api.prometheus_http_prefix}}/api/v1/(read|metadata|labels|series|query_range|query) { + proxy_pass http://{{ template "cortex.fullname" . }}-query-frontend.{{ $rootDomain }}$request_uri; + } + + location ~ {{.Values.config.api.prometheus_http_prefix}}/api/v1/label/.* { + proxy_pass http://{{ template "cortex.fullname" . }}-query-frontend.{{ $rootDomain }}$request_uri; + } + {{- if and (.Values.config.auth_enabled) (.Values.nginx.config.auth_orgs) }} + # Auth orgs + {{- range $org := compact .Values.nginx.config.auth_orgs | uniq }} + location = /api/v1/push/{{ $org }} { + proxy_set_header X-Scope-OrgID {{ $org }}; + proxy_pass http://{{ template "cortex.fullname" $ }}-distributor.{{ $rootDomain }}/api/v1/push; + } + {{- end }} + {{- end }} + } + } +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/nginx/nginx-dep.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/nginx/nginx-dep.yaml new file mode 100644 index 0000000..bbd3a9d --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/nginx/nginx-dep.yaml @@ -0,0 +1,111 @@ +{{- if .Values.nginx.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "cortex.nginxFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.nginxLabels" . | nindent 4 }} + annotations: + {{- toYaml .Values.nginx.annotations | nindent 4 }} +spec: + {{- if not .Values.nginx.autoscaling.enabled }} + replicas: {{ .Values.nginx.replicas }} + {{- end }} + selector: + matchLabels: + {{- include "cortex.nginxSelectorLabels" . | nindent 6 }} + strategy: + {{- toYaml .Values.nginx.strategy | nindent 4 }} + template: + metadata: + labels: + {{- include "cortex.nginxLabels" . | nindent 8 }} + {{- with .Values.nginx.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + annotations: + checksum/config: {{ include (print $.Template.BasePath "/nginx/nginx-config.yaml") . | sha256sum }} + {{- with .Values.nginx.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ .Values.nginx.serviceAccount.name | default (include "cortex.serviceAccountName" . ) }} + {{- if .Values.nginx.priorityClassName }} + priorityClassName: {{ .Values.nginx.priorityClassName }} + {{- end }} + {{- if .Values.nginx.securityContext.enabled }} + securityContext: {{- omit .Values.nginx.securityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + initContainers: + {{- toYaml .Values.nginx.initContainers | nindent 8 }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} + containers: + - name: nginx + image: "{{ .Values.nginx.image.repository }}:{{ .Values.nginx.image.tag }}" + imagePullPolicy: {{ .Values.nginx.image.pullPolicy }} + {{- if .Values.nginx.extraArgs }} + args: + {{- range $key, $value := .Values.nginx.extraArgs }} + - "-{{ $key }}={{ $value }}" + {{- end }} + {{- end }} + volumeMounts: + {{- if .Values.nginx.extraVolumeMounts }} + {{- toYaml .Values.nginx.extraVolumeMounts | nindent 12}} + {{- end }} + - name: config + mountPath: /etc/nginx + {{- if .Values.nginx.config.basicAuthSecretName }} + - name: htpasswd + mountPath: /etc/apache2 + readOnly: true + {{- end }} + ports: + - name: http-metrics + containerPort: {{ .Values.nginx.http_listen_port }} + protocol: TCP + startupProbe: + {{- toYaml .Values.nginx.startupProbe | nindent 12 }} + livenessProbe: + {{- toYaml .Values.nginx.livenessProbe | nindent 12 }} + readinessProbe: + {{- toYaml .Values.nginx.readinessProbe | nindent 12 }} + resources: + {{- toYaml .Values.nginx.resources | nindent 12 }} + {{- if .Values.nginx.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.nginx.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.nginx.env }} + env: + {{- toYaml .Values.nginx.env | nindent 12 }} + {{- end }} + {{- if .Values.nginx.extraContainers }} + {{ toYaml .Values.nginx.extraContainers | indent 8}} + {{- end }} + nodeSelector: + {{- toYaml .Values.nginx.nodeSelector | nindent 8 }} + affinity: + {{- toYaml .Values.nginx.affinity | nindent 8 }} + tolerations: + {{- toYaml .Values.nginx.tolerations | nindent 8 }} + terminationGracePeriodSeconds: {{ .Values.nginx.terminationGracePeriodSeconds }} + volumes: + - name: config + configMap: + name: {{ template "cortex.fullname" . }}-nginx + {{- if .Values.nginx.config.basicAuthSecretName }} + - name: htpasswd + secret: + defaultMode: 420 + secretName: {{ .Values.nginx.config.basicAuthSecretName }} + {{- end }} + {{- if .Values.nginx.extraVolumes }} + {{- toYaml .Values.nginx.extraVolumes | nindent 8}} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/nginx/nginx-hpa.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/nginx/nginx-hpa.yaml new file mode 100644 index 0000000..b93a13d --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/nginx/nginx-hpa.yaml @@ -0,0 +1,39 @@ +{{- if and .Values.nginx.enabled .Values.nginx.autoscaling.enabled }} +{{- with .Values.nginx.autoscaling -}} +apiVersion: autoscaling/v2beta2 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "cortex.nginxFullname" $ }} + namespace: {{ $.Release.Namespace }} + labels: + {{- include "cortex.nginxLabels" $ | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "cortex.nginxFullname" $ }} + minReplicas: {{ .minReplicas }} + maxReplicas: {{ .maxReplicas }} + metrics: + {{- with .targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: {{ . }} + {{- end }} + {{- with .targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ . }} + {{- end }} + {{- with .behavior }} + behavior: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/nginx/nginx-ingress.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/nginx/nginx-ingress.yaml new file mode 100644 index 0000000..51e6609 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/nginx/nginx-ingress.yaml @@ -0,0 +1,40 @@ +{{- if and .Values.ingress.enabled .Values.nginx.enabled -}} +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: {{ include "cortex.nginxFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.nginxLabels" . | nindent 4 }} + annotations: + {{- toYaml .Values.ingress.annotations | nindent 4 }} +spec: +{{- if .Values.ingress.ingressClass.enabled }} + ingressClassName: {{ .Values.ingress.ingressClass.name }} +{{- end }} +{{- if .Values.ingress.tls }} + tls: + {{- range .Values.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} +{{- end }} + rules: + {{- range .Values.ingress.hosts }} + - host: {{ .host | quote }} + http: + paths: + {{- range .paths }} + - path: {{ . }} + pathType: "Prefix" + backend: + service: + name: {{ include "cortex.nginxFullname" $ }} + port: + number: {{ $.Values.nginx.http_listen_port }} + {{- end }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/nginx/nginx-poddisruptionbudget.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/nginx/nginx-poddisruptionbudget.yaml new file mode 100644 index 0000000..959764a --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/nginx/nginx-poddisruptionbudget.yaml @@ -0,0 +1,14 @@ +{{- if and (.Values.nginx.enabled) (gt (int .Values.nginx.replicas) 1) (.Values.nginx.podDisruptionBudget) }} +apiVersion: {{ include "cortex.pdbVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "cortex.nginxFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.nginxLabels" . | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "cortex.nginxSelectorLabels" . | nindent 6 }} + {{- toYaml .Values.nginx.podDisruptionBudget | nindent 2 }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/nginx/nginx-svc.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/nginx/nginx-svc.yaml new file mode 100644 index 0000000..72a2c44 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/nginx/nginx-svc.yaml @@ -0,0 +1,23 @@ +{{- if .Values.nginx.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "cortex.nginxFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.nginxLabels" . | nindent 4 }} + {{- with .Values.nginx.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- toYaml .Values.nginx.service.annotations | nindent 4 }} +spec: + type: {{ .Values.nginx.service.type }} + ports: + - port: {{ .Values.nginx.http_listen_port }} + protocol: TCP + name: http-metrics + targetPort: http-metrics + selector: + {{- include "cortex.nginxSelectorLabels" . | nindent 4 }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/node-exporter.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/node-exporter.yaml new file mode 100644 index 0000000..7bb3983 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/node-exporter.yaml @@ -0,0 +1,96 @@ +apiVersion: v1 +kind: Service +metadata: + annotations: + prometheus.io/scrape: 'true' + labels: + app: node-exporter + name: node-exporter + name: node-exporter + namespace: imxc +spec: + clusterIP: None + ports: + - name: scrape + port: 9100 + protocol: TCP + selector: + app: node-exporter + type: ClusterIP +--- +{{- if semverCompare ">=1.16-0" .Capabilities.KubeVersion.GitVersion }} +apiVersion: apps/v1 +{{- else }} +apiVersion: extensions/v1beta1 +{{- end }} +kind: DaemonSet +metadata: + name: node-exporter + namespace: imxc +spec: +{{- if semverCompare ">=1.16-0" .Capabilities.KubeVersion.GitVersion }} + selector: + matchLabels: + app: node-exporter +{{- end }} + template: + metadata: + labels: + app: node-exporter + name: node-exporter + spec: + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - image: {{ .Values.global.IMXC_IN_REGISTRY }}/node-exporter + name: node-exporter + resources: + limits: + cpu: 250m + memory: 180Mi + requests: + cpu: 102m + memory: 180Mi + ports: + - containerPort: 9100 + hostPort: 9100 + name: scrape + args: + - --path.procfs=/host/proc + - --path.sysfs=/host/sys + - --path.rootfs=/host/root + - --collector.filesystem.ignored-mount-points=^/(dev|proc|sys|run|var/lib/docker/.+|var/lib/kubelet/pods/.+)($|/) + - --collector.tcpstat + # --log.level=debug + env: + - name: GOMAXPROCS + value: "1" + volumeMounts: + - mountPath: /host/proc + name: proc + readOnly: false + - mountPath: /host/sys + name: sys + readOnly: false + - mountPath: /host/root + mountPropagation: HostToContainer + name: root + readOnly: true + hostNetwork: true + hostPID: true + securityContext: + runAsNonRoot: true + runAsUser: 65534 + volumes: + - hostPath: + path: /proc + name: proc + - hostPath: + path: /sys + name: sys + - hostPath: + path: / + name: root diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/querier/_helpers-querier.tpl b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/querier/_helpers-querier.tpl new file mode 100644 index 0000000..c0a6204 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/querier/_helpers-querier.tpl @@ -0,0 +1,23 @@ + +{{/* +querier fullname +*/}} +{{- define "cortex.querierFullname" -}} +{{ include "cortex.fullname" . }}-querier +{{- end }} + +{{/* +querier common labels +*/}} +{{- define "cortex.querierLabels" -}} +{{ include "cortex.labels" . }} +app.kubernetes.io/component: querier +{{- end }} + +{{/* +querier selector labels +*/}} +{{- define "cortex.querierSelectorLabels" -}} +{{ include "cortex.selectorLabels" . }} +app.kubernetes.io/component: querier +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/querier/querier-dep.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/querier/querier-dep.yaml new file mode 100644 index 0000000..a84ba8a --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/querier/querier-dep.yaml @@ -0,0 +1,115 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "cortex.querierFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.querierLabels" . | nindent 4 }} + annotations: + {{- toYaml .Values.querier.annotations | nindent 4 }} +spec: + {{- if not .Values.querier.autoscaling.enabled }} + replicas: {{ .Values.querier.replicas }} + {{- end }} + selector: + matchLabels: + {{- include "cortex.querierSelectorLabels" . | nindent 6 }} + strategy: + {{- toYaml .Values.querier.strategy | nindent 4 }} + template: + metadata: + labels: + {{- include "cortex.querierLabels" . | nindent 8 }} + {{- with .Values.querier.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + annotations: + checksum/config: {{ include "cortex.configChecksum" . }} + {{- with .Values.querier.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ .Values.querier.serviceAccount.name | default (include "cortex.serviceAccountName" . ) }} + {{- if .Values.querier.priorityClassName }} + priorityClassName: {{ .Values.querier.priorityClassName }} + {{- end }} + {{- if .Values.querier.securityContext.enabled }} + securityContext: {{- omit .Values.querier.securityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + initContainers: + {{- toYaml .Values.querier.initContainers | nindent 8 }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} + containers: + - name: querier + image: "{{ .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: + - "-target=querier" + - "-config.file=/etc/cortex/cortex.yaml" + - "-querier.frontend-address={{ template "cortex.fullname" . }}-query-frontend-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}:{{ .Values.config.server.grpc_listen_port }}" + {{- include "cortex.memcached" . | nindent 12}} + {{- range $key, $value := .Values.querier.extraArgs }} + - "-{{ $key }}={{ $value }}" + {{- end }} + volumeMounts: + {{- if .Values.querier.extraVolumeMounts }} + {{- toYaml .Values.querier.extraVolumeMounts | nindent 12}} + {{- end }} + - name: config + mountPath: /etc/cortex + - name: runtime-config + mountPath: /etc/cortex-runtime-config + - name: storage + mountPath: "/data" + subPath: {{ .Values.querier.persistentVolume.subPath }} + ports: + - name: http-metrics + containerPort: {{ .Values.config.server.http_listen_port }} + protocol: TCP + startupProbe: + {{- toYaml .Values.querier.startupProbe | nindent 12 }} + livenessProbe: + {{- toYaml .Values.querier.livenessProbe | nindent 12 }} + readinessProbe: + {{- toYaml .Values.querier.readinessProbe | nindent 12 }} + resources: + {{- toYaml .Values.querier.resources | nindent 12 }} + {{- if .Values.querier.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.querier.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + env: + {{- if .Values.querier.env }} + {{- toYaml .Values.querier.env | nindent 12 }} + {{- end }} + {{- with .Values.querier.lifecycle }} + lifecycle: + {{- toYaml . | nindent 12 }} + {{- end }} + resources: + requests: + cpu: "100m" + {{- if .Values.querier.extraContainers }} + {{- toYaml .Values.querier.extraContainers | nindent 8}} + {{- end }} + nodeSelector: + {{- toYaml .Values.querier.nodeSelector | nindent 8 }} + affinity: + {{- toYaml .Values.querier.affinity | nindent 8 }} + tolerations: + {{- toYaml .Values.querier.tolerations | nindent 8 }} + terminationGracePeriodSeconds: {{ .Values.querier.terminationGracePeriodSeconds }} + volumes: + {{- include "cortex.configVolume" . | nindent 8 }} + - name: runtime-config + configMap: + name: {{ template "cortex.fullname" . }}-runtime-config + - name: storage + emptyDir: {} + {{- if .Values.querier.extraVolumes }} + {{- toYaml .Values.querier.extraVolumes | nindent 8}} + {{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/querier/querier-hpa.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/querier/querier-hpa.yaml new file mode 100644 index 0000000..f078526 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/querier/querier-hpa.yaml @@ -0,0 +1,39 @@ +{{- with .Values.querier.autoscaling -}} +{{- if .enabled }} +apiVersion: autoscaling/v2beta2 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "cortex.querierFullname" $ }} + namespace: {{ $.Release.Namespace }} + labels: + {{- include "cortex.querierLabels" $ | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "cortex.querierFullname" $ }} + minReplicas: {{ .minReplicas }} + maxReplicas: {{ .maxReplicas }} + metrics: + {{- with .targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: {{ . }} + {{- end }} + {{- with .targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ . }} + {{- end }} + {{- with .behavior }} + behavior: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/querier/querier-poddisruptionbudget.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/querier/querier-poddisruptionbudget.yaml new file mode 100644 index 0000000..b69de62 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/querier/querier-poddisruptionbudget.yaml @@ -0,0 +1,14 @@ +{{- if and (gt (int .Values.querier.replicas) 1) (.Values.querier.podDisruptionBudget) }} +apiVersion: {{ include "cortex.pdbVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "cortex.querierFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.querierLabels" . | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "cortex.querierSelectorLabels" . | nindent 6 }} + {{- toYaml .Values.querier.podDisruptionBudget | nindent 2 }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/querier/querier-servicemonitor.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/querier/querier-servicemonitor.yaml new file mode 100644 index 0000000..c84d1a4 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/querier/querier-servicemonitor.yaml @@ -0,0 +1,42 @@ +{{- if .Values.querier.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "cortex.querierFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.querierLabels" . | nindent 4 }} + {{- if .Values.querier.serviceMonitor.additionalLabels }} +{{ toYaml .Values.querier.serviceMonitor.additionalLabels | indent 4 }} + {{- end }} + {{- if .Values.querier.serviceMonitor.annotations }} + annotations: +{{ toYaml .Values.querier.serviceMonitor.annotations | indent 4 }} + {{- end }} +spec: + selector: + matchLabels: + {{- include "cortex.querierSelectorLabels" . | nindent 6 }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace | quote }} + endpoints: + - port: http-metrics + {{- if .Values.querier.serviceMonitor.interval }} + interval: {{ .Values.querier.serviceMonitor.interval }} + {{- end }} + {{- if .Values.querier.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.querier.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.querier.serviceMonitor.relabelings }} + relabelings: + {{- toYaml .Values.querier.serviceMonitor.relabelings | nindent 4 }} + {{- end }} + {{- if .Values.querier.serviceMonitor.metricRelabelings }} + metricRelabelings: + {{- toYaml .Values.querier.serviceMonitor.metricRelabelings | nindent 4 }} + {{- end }} + {{- with .Values.querier.serviceMonitor.extraEndpointSpec }} + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/querier/querier-svc.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/querier/querier-svc.yaml new file mode 100644 index 0000000..0701b7d --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/querier/querier-svc.yaml @@ -0,0 +1,21 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "cortex.querierFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.querierLabels" . | nindent 4 }} + {{- with .Values.querier.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- toYaml .Values.querier.service.annotations | nindent 4 }} +spec: + type: ClusterIP + ports: + - port: {{ .Values.config.server.http_listen_port }} + protocol: TCP + name: http-metrics + targetPort: http-metrics + selector: + {{- include "cortex.querierSelectorLabels" . | nindent 4 }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/query-frontend/_helpers-query-frontend.tpl b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/query-frontend/_helpers-query-frontend.tpl new file mode 100644 index 0000000..c1f74c9 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/query-frontend/_helpers-query-frontend.tpl @@ -0,0 +1,23 @@ + +{{/* +query-frontend fullname +*/}} +{{- define "cortex.queryFrontendFullname" -}} +{{ include "cortex.fullname" . }}-query-frontend +{{- end }} + +{{/* +query-frontend common labels +*/}} +{{- define "cortex.queryFrontendLabels" -}} +{{ include "cortex.labels" . }} +app.kubernetes.io/component: query-frontend +{{- end }} + +{{/* +query-frontend selector labels +*/}} +{{- define "cortex.queryFrontendSelectorLabels" -}} +{{ include "cortex.selectorLabels" . }} +app.kubernetes.io/component: query-frontend +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/query-frontend/query-frontend-dep.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/query-frontend/query-frontend-dep.yaml new file mode 100644 index 0000000..3e31d18 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/query-frontend/query-frontend-dep.yaml @@ -0,0 +1,107 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "cortex.queryFrontendFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.queryFrontendLabels" . | nindent 4 }} + annotations: + {{- toYaml .Values.query_frontend.annotations | nindent 4 }} +spec: + replicas: {{ .Values.query_frontend.replicas }} + selector: + matchLabels: + {{- include "cortex.queryFrontendSelectorLabels" . | nindent 6 }} + strategy: + {{- toYaml .Values.query_frontend.strategy | nindent 4 }} + template: + metadata: + labels: + {{- include "cortex.queryFrontendLabels" . | nindent 8 }} + {{- with .Values.query_frontend.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + annotations: + checksum/config: {{ include "cortex.configChecksum" . }} + {{- with .Values.query_frontend.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ .Values.query_frontend.serviceAccount.name | default (include "cortex.serviceAccountName" . ) }} + {{- if .Values.query_frontend.priorityClassName }} + priorityClassName: {{ .Values.query_frontend.priorityClassName }} + {{- end }} + {{- if .Values.query_frontend.securityContext.enabled }} + securityContext: {{- omit .Values.query_frontend.securityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + initContainers: + {{- toYaml .Values.query_frontend.initContainers | nindent 8 }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} + containers: + - name: query-frontend + image: "{{ .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: + - "-target=query-frontend" + - "-config.file=/etc/cortex/cortex.yaml" + {{- include "cortex.frontend-memcached" . | nindent 12 }} + {{- range $key, $value := .Values.query_frontend.extraArgs }} + - "-{{ $key }}={{ $value }}" + {{- end }} + volumeMounts: + {{- if .Values.query_frontend.extraVolumeMounts }} + {{- toYaml .Values.query_frontend.extraVolumeMounts | nindent 12}} + {{- end }} + - name: config + mountPath: /etc/cortex + - name: runtime-config + mountPath: /etc/cortex-runtime-config + ports: + - name: http-metrics + containerPort: {{ .Values.config.server.http_listen_port }} + protocol: TCP + - name: grpc + containerPort: {{ .Values.config.server.grpc_listen_port }} + protocol: TCP + startupProbe: + {{- toYaml .Values.query_frontend.startupProbe | nindent 12 }} + livenessProbe: + {{- toYaml .Values.query_frontend.livenessProbe | nindent 12 }} + readinessProbe: + {{- toYaml .Values.query_frontend.readinessProbe | nindent 12 }} + resources: + {{- toYaml .Values.query_frontend.resources | nindent 12 }} + {{- if .Values.query_frontend.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.query_frontend.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.query_frontend.env }} + env: + {{- toYaml .Values.query_frontend.env | nindent 12 }} + {{- end }} + {{- with .Values.query_frontend.lifecycle }} + lifecycle: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- if .Values.query_frontend.extraContainers }} + {{- toYaml .Values.query_frontend.extraContainers | nindent 8}} + {{- end }} + nodeSelector: + {{- toYaml .Values.query_frontend.nodeSelector | nindent 8 }} + affinity: + {{- toYaml .Values.query_frontend.affinity | nindent 8 }} + tolerations: + {{- toYaml .Values.query_frontend.tolerations | nindent 8 }} + terminationGracePeriodSeconds: {{ .Values.query_frontend.terminationGracePeriodSeconds }} + volumes: + {{- include "cortex.configVolume" . | nindent 8 }} + - name: runtime-config + configMap: + name: {{ template "cortex.fullname" . }}-runtime-config + {{- if .Values.query_frontend.extraVolumes }} + {{- toYaml .Values.query_frontend.extraVolumes | nindent 8}} + {{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/query-frontend/query-frontend-servicemonitor.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/query-frontend/query-frontend-servicemonitor.yaml new file mode 100644 index 0000000..2d76c6b --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/query-frontend/query-frontend-servicemonitor.yaml @@ -0,0 +1,42 @@ +{{- if .Values.query_frontend.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "cortex.queryFrontendFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.queryFrontendLabels" . | nindent 4 }} + {{- if .Values.query_frontend.serviceMonitor.additionalLabels }} +{{ toYaml .Values.query_frontend.serviceMonitor.additionalLabels | indent 4 }} + {{- end }} + {{- if .Values.query_frontend.serviceMonitor.annotations }} + annotations: +{{ toYaml .Values.query_frontend.serviceMonitor.annotations | indent 4 }} + {{- end }} +spec: + selector: + matchLabels: + {{- include "cortex.queryFrontendSelectorLabels" . | nindent 6 }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace | quote }} + endpoints: + - port: http-metrics + {{- if .Values.query_frontend.serviceMonitor.interval }} + interval: {{ .Values.query_frontend.serviceMonitor.interval }} + {{- end }} + {{- if .Values.query_frontend.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.query_frontend.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.query_frontend.serviceMonitor.relabelings }} + relabelings: + {{- toYaml .Values.query_frontend.serviceMonitor.relabelings | nindent 4 }} + {{- end }} + {{- if .Values.query_frontend.serviceMonitor.metricRelabelings }} + metricRelabelings: + {{- toYaml .Values.query_frontend.serviceMonitor.metricRelabelings | nindent 4 }} + {{- end }} + {{- with .Values.query_frontend.serviceMonitor.extraEndpointSpec }} + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/query-frontend/query-frontend-svc-headless.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/query-frontend/query-frontend-svc-headless.yaml new file mode 100644 index 0000000..939457c --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/query-frontend/query-frontend-svc-headless.yaml @@ -0,0 +1,23 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "cortex.queryFrontendFullname" . }}-headless + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.queryFrontendLabels" . | nindent 4 }} + {{- with .Values.query_frontend.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- toYaml .Values.query_frontend.service.annotations | nindent 4 }} +spec: + type: ClusterIP + clusterIP: None + publishNotReadyAddresses: true + ports: + - port: {{ .Values.config.server.grpc_listen_port }} + protocol: TCP + name: grpc + targetPort: grpc + selector: + {{- include "cortex.queryFrontendSelectorLabels" . | nindent 4 }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/query-frontend/query-frontend-svc.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/query-frontend/query-frontend-svc.yaml new file mode 100644 index 0000000..85ff2e8 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/query-frontend/query-frontend-svc.yaml @@ -0,0 +1,21 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "cortex.queryFrontendFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.queryFrontendLabels" . | nindent 4 }} + {{- with .Values.query_frontend.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- toYaml .Values.query_frontend.service.annotations | nindent 4 }} +spec: + type: ClusterIP + ports: + - port: {{ .Values.config.server.http_listen_port }} + protocol: TCP + name: http-metrics + targetPort: http-metrics + selector: + {{- include "cortex.queryFrontendSelectorLabels" . | nindent 4 }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/query-frontend/query-poddisruptionbudget.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/query-frontend/query-poddisruptionbudget.yaml new file mode 100644 index 0000000..5256949 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/query-frontend/query-poddisruptionbudget.yaml @@ -0,0 +1,14 @@ +{{- if and (gt (int .Values.query_frontend.replicas) 1) (.Values.query_frontend.podDisruptionBudget) }} +apiVersion: {{ include "cortex.pdbVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "cortex.queryFrontendFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.queryFrontendLabels" . | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "cortex.queryFrontendSelectorLabels" . | nindent 6 }} + {{- toYaml .Values.query_frontend.podDisruptionBudget | nindent 2 }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ruler/_helpers-ruler.tpl b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ruler/_helpers-ruler.tpl new file mode 100644 index 0000000..86270d0 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ruler/_helpers-ruler.tpl @@ -0,0 +1,30 @@ + +{{/* +ruler fullname +*/}} +{{- define "cortex.rulerFullname" -}} +{{ include "cortex.fullname" . }}-ruler +{{- end }} + +{{/* +ruler common labels +*/}} +{{- define "cortex.rulerLabels" -}} +{{ include "cortex.labels" . }} +app.kubernetes.io/component: ruler +{{- end }} + +{{/* +ruler selector labels +*/}} +{{- define "cortex.rulerSelectorLabels" -}} +{{ include "cortex.selectorLabels" . }} +app.kubernetes.io/component: ruler +{{- end }} + +{{/* +format rules dir +*/}} +{{- define "cortex.rulerRulesDirName" -}} +rules-{{ . | replace "_" "-" | trimSuffix "-" }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ruler/ruler-configmap.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ruler/ruler-configmap.yaml new file mode 100644 index 0000000..8448108 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ruler/ruler-configmap.yaml @@ -0,0 +1,14 @@ +{{- if .Values.ruler.enabled }} +{{- range $dir, $files := .Values.ruler.directories }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "cortex.rulerFullname" $ }}-{{ include "cortex.rulerRulesDirName" $dir }} + namespace: {{ $.Release.Namespace }} + labels: + {{- include "cortex.rulerLabels" $ | nindent 4 }} +data: + {{- toYaml $files | nindent 2}} +{{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ruler/ruler-dep.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ruler/ruler-dep.yaml new file mode 100644 index 0000000..a8e034d --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ruler/ruler-dep.yaml @@ -0,0 +1,191 @@ +{{- if .Values.ruler.enabled -}} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "cortex.rulerFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.rulerLabels" . | nindent 4 }} + app.kubernetes.io/part-of: memberlist + annotations: + {{- toYaml .Values.ruler.annotations | nindent 4 }} +spec: + replicas: {{ .Values.ruler.replicas }} + selector: + matchLabels: + {{- include "cortex.rulerSelectorLabels" . | nindent 6 }} + strategy: + {{- toYaml .Values.ruler.strategy | nindent 4 }} + template: + metadata: + labels: + {{- include "cortex.rulerLabels" . | nindent 8 }} + app.kubernetes.io/part-of: memberlist + {{- with .Values.ruler.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + annotations: + checksum/config: {{ include "cortex.configChecksum" . }} + {{- with .Values.ruler.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ .Values.ruler.serviceAccount.name | default (include "cortex.serviceAccountName" . ) }} + {{- if .Values.ruler.priorityClassName }} + priorityClassName: {{ .Values.ruler.priorityClassName }} + {{- end }} + {{- if .Values.ruler.securityContext.enabled }} + securityContext: {{- omit .Values.ruler.securityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + initContainers: + {{- toYaml .Values.ruler.initContainers | nindent 8 }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} + containers: + {{- if .Values.ruler.sidecar.enabled }} + - name: {{ template "cortex.name" . }}-sc-rules + {{- if .Values.ruler.sidecar.image.sha }} + image: "{{ .Values.ruler.sidecar.image.repository }}:{{ .Values.ruler.sidecar.image.tag }}@sha256:{{ .Values.ruler.sidecar.image.sha }}" + {{- else }} + image: "{{ .Values.ruler.sidecar.image.repository }}:{{ .Values.ruler.sidecar.image.tag }}" + {{- end }} + imagePullPolicy: {{ .Values.ruler.sidecar.imagePullPolicy }} + env: + {{- if .Values.ruler.sidecar.watchMethod }} + - name: METHOD + value: {{ .Values.ruler.sidecar.watchMethod }} + {{ end }} + - name: LABEL + value: "{{ .Values.ruler.sidecar.label }}" + {{- if .Values.ruler.sidecar.labelValue }} + - name: LABEL_VALUE + value: {{ quote .Values.ruler.sidecar.labelValue }} + {{- end }} + - name: FOLDER + value: "{{ .Values.ruler.sidecar.folder }}{{- with .Values.ruler.sidecar.defaultFolderName }}/{{ . }}{{- end }}" + - name: RESOURCE + value: "both" + {{- if .Values.ruler.sidecar.enableUniqueFilenames }} + - name: UNIQUE_FILENAMES + value: "{{ .Values.ruler.sidecar.enableUniqueFilenames }}" + {{- end }} + {{- if .Values.ruler.sidecar.searchNamespace }} + - name: NAMESPACE + value: "{{ .Values.ruler.sidecar.searchNamespace }}" + {{- end }} + {{- if .Values.ruler.sidecar.skipTlsVerify }} + - name: SKIP_TLS_VERIFY + value: "{{ .Values.ruler.sidecar.skipTlsVerify }}" + {{- end }} + {{- if .Values.ruler.sidecar.folderAnnotation }} + - name: FOLDER_ANNOTATION + value: "{{ .Values.ruler.sidecar.folderAnnotation }}" + {{- end }} + resources: + {{- toYaml .Values.ruler.sidecar.resources | nindent 12 }} + {{- if .Values.ruler.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.ruler.sidecar.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + volumeMounts: + - name: sc-rules-volume + mountPath: {{ .Values.ruler.sidecar.folder | quote }} + {{- end }} + - name: rules + image: "{{ .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: + - "-target=ruler" + - "-config.file=/etc/cortex/cortex.yaml" + {{- if .Values.configs.enabled }} + - "-ruler.configs.url=http://{{ template "cortex.configsFullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}:{{ .Values.config.server.http_listen_port }}" + {{- end }} + {{- if not .Values.config.ruler.alertmanager_url }} + {{- if .Values.config.ruler.enable_alertmanager_discovery }} + - "-ruler.alertmanager-url=http://_http-metrics._tcp.{{ template "cortex.name" . }}-alertmanager-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}/api/prom/alertmanager/" + {{- else }} + - "-ruler.alertmanager-url=http://{{ template "cortex.alertmanagerFullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}:{{ .Values.config.server.http_listen_port }}/api/prom/alertmanager/" + {{- end }} + {{- end }} + {{- include "cortex.memcached" . | nindent 12}} + {{- range $key, $value := .Values.ruler.extraArgs }} + - "-{{ $key }}={{ $value }}" + {{- end }} + volumeMounts: + {{- if .Values.ruler.extraVolumeMounts }} + {{- toYaml .Values.ruler.extraVolumeMounts | nindent 12}} + {{- end }} + {{- if .Values.ruler.sidecar.enabled }} + - name: sc-rules-volume + mountPath: {{ .Values.ruler.sidecar.folder | quote }} + {{ end }} + - name: config + mountPath: /etc/cortex + - name: runtime-config + mountPath: /etc/cortex-runtime-config + - name: storage + mountPath: /data + subPath: {{ .Values.ruler.persistentVolume.subPath }} + - name: tmp + mountPath: /rules + {{- range $dir, $_ := .Values.ruler.directories }} + - name: {{ include "cortex.rulerRulesDirName" $dir }} + mountPath: /etc/cortex/rules/{{ $dir }} + {{- end }} + ports: + - name: http-metrics + containerPort: {{ .Values.config.server.http_listen_port }} + protocol: TCP + - name: gossip + containerPort: {{ .Values.config.memberlist.bind_port }} + protocol: TCP + startupProbe: + {{- toYaml .Values.ruler.startupProbe | nindent 12 }} + livenessProbe: + {{- toYaml .Values.ruler.livenessProbe | nindent 12 }} + readinessProbe: + {{- toYaml .Values.ruler.readinessProbe | nindent 12 }} + resources: + {{- toYaml .Values.ruler.resources | nindent 12 }} + {{- if .Values.ruler.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.ruler.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.ruler.env }} + env: + {{- toYaml .Values.ruler.env | nindent 12 }} + {{- end }} + {{- if .Values.ruler.extraContainers }} + {{- toYaml .Values.ruler.extraContainers | nindent 8}} + {{- end }} + nodeSelector: + {{- toYaml .Values.ruler.nodeSelector | nindent 8 }} + affinity: + {{- toYaml .Values.ruler.affinity | nindent 8 }} + tolerations: + {{- toYaml .Values.ruler.tolerations | nindent 8 }} + terminationGracePeriodSeconds: {{ .Values.ruler.terminationGracePeriodSeconds }} + volumes: + {{- include "cortex.configVolume" . | nindent 8 }} + - name: runtime-config + configMap: + name: {{ template "cortex.fullname" . }}-runtime-config + - name: tmp + emptyDir: {} + {{- range $dir, $_ := .Values.ruler.directories }} + - name: {{ include "cortex.rulerRulesDirName" $dir }} + configMap: + name: {{ include "cortex.rulerFullname" $ }}-{{ include "cortex.rulerRulesDirName" $dir }} + {{- end }} + - name: storage + emptyDir: {} + {{- if .Values.ruler.sidecar.enabled }} + - name: sc-rules-volume + emptyDir: {} + {{- end }} + {{- if .Values.ruler.extraVolumes }} + {{- toYaml .Values.ruler.extraVolumes | nindent 8}} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ruler/ruler-poddisruptionbudget.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ruler/ruler-poddisruptionbudget.yaml new file mode 100644 index 0000000..52fb3e0 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ruler/ruler-poddisruptionbudget.yaml @@ -0,0 +1,14 @@ +{{- if and (gt (int .Values.ruler.replicas) 1) (.Values.ruler.podDisruptionBudget) }} +apiVersion: {{ include "cortex.pdbVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "cortex.rulerFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.rulerLabels" . | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "cortex.rulerSelectorLabels" . | nindent 6 }} + {{- toYaml .Values.ruler.podDisruptionBudget | nindent 2 }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ruler/ruler-servicemonitor.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ruler/ruler-servicemonitor.yaml new file mode 100644 index 0000000..de6744f --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ruler/ruler-servicemonitor.yaml @@ -0,0 +1,42 @@ +{{- if .Values.ruler.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "cortex.rulerFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.rulerLabels" . | nindent 4 }} + {{- if .Values.ruler.serviceMonitor.additionalLabels }} +{{ toYaml .Values.ruler.serviceMonitor.additionalLabels | indent 4 }} + {{- end }} + {{- if .Values.ruler.serviceMonitor.annotations }} + annotations: +{{ toYaml .Values.ruler.serviceMonitor.annotations | indent 4 }} + {{- end }} +spec: + selector: + matchLabels: + {{- include "cortex.rulerSelectorLabels" . | nindent 6 }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace | quote }} + endpoints: + - port: http-metrics + {{- if .Values.ruler.serviceMonitor.interval }} + interval: {{ .Values.ruler.serviceMonitor.interval }} + {{- end }} + {{- if .Values.ruler.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.ruler.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.ruler.serviceMonitor.relabelings }} + relabelings: + {{- toYaml .Values.ruler.serviceMonitor.relabelings | nindent 4 }} + {{- end }} + {{- if .Values.ruler.serviceMonitor.metricRelabelings }} + metricRelabelings: + {{- toYaml .Values.ruler.serviceMonitor.metricRelabelings | nindent 4 }} + {{- end }} + {{- with .Values.ruler.serviceMonitor.extraEndpointSpec }} + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ruler/ruler-svc.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ruler/ruler-svc.yaml new file mode 100644 index 0000000..7752ef4 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ruler/ruler-svc.yaml @@ -0,0 +1,23 @@ +{{- if .Values.ruler.enabled -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "cortex.rulerFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.rulerLabels" . | nindent 4 }} + {{- with .Values.ruler.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- toYaml .Values.ruler.service.annotations | nindent 4 }} +spec: + type: ClusterIP + ports: + - port: {{ .Values.config.server.http_listen_port }} + protocol: TCP + name: http-metrics + targetPort: http-metrics + selector: + {{- include "cortex.rulerSelectorLabels" . | nindent 4 }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/runtime-configmap.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/runtime-configmap.yaml new file mode 100644 index 0000000..2b30599 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/runtime-configmap.yaml @@ -0,0 +1,18 @@ +{{- with .Values.runtimeconfigmap }} +{{- if .create }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "cortex.fullname" $ }}-runtime-config + namespace: {{ $.Release.Namespace }} + labels: + {{- include "cortex.labels" $ | nindent 4 }} + {{- with .annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +data: + runtime_config.yaml: | + {{- tpl (toYaml .runtime_config) $ | nindent 4 }} +{{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/secret-postgresql.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/secret-postgresql.yaml new file mode 100644 index 0000000..9194971 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/secret-postgresql.yaml @@ -0,0 +1,11 @@ +{{- if and .Values.configsdb_postgresql.enabled .Values.configsdb_postgresql.auth.password -}} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "cortex.fullname" . }}-postgresql + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.labels" . | nindent 4 }} +data: + postgresql-password: {{ .Values.configsdb_postgresql.auth.password | b64enc}} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/secret.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/secret.yaml new file mode 100644 index 0000000..ff0e78f --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/secret.yaml @@ -0,0 +1,11 @@ +{{- if (and (not .Values.useExternalConfig) (not .Values.useConfigMap)) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "cortex.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.labels" . | nindent 4 }} +data: + cortex.yaml: {{ tpl (toYaml .Values.config) . | b64enc }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/serviceaccount.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/serviceaccount.yaml new file mode 100644 index 0000000..963f866 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/serviceaccount.yaml @@ -0,0 +1,12 @@ +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "cortex.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.labels" . | nindent 4 }} + annotations: + {{- toYaml .Values.serviceAccount.annotations | nindent 4 }} +automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/store-gateway/_helpers-store-gateway.tpl b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/store-gateway/_helpers-store-gateway.tpl new file mode 100644 index 0000000..3cca867 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/store-gateway/_helpers-store-gateway.tpl @@ -0,0 +1,23 @@ + +{{/* +store-gateway fullname +*/}} +{{- define "cortex.storeGatewayFullname" -}} +{{ include "cortex.fullname" . }}-store-gateway +{{- end }} + +{{/* +store-gateway common labels +*/}} +{{- define "cortex.storeGatewayLabels" -}} +{{ include "cortex.labels" . }} +app.kubernetes.io/component: store-gateway +{{- end }} + +{{/* +store-gateway selector labels +*/}} +{{- define "cortex.storeGatewaySelectorLabels" -}} +{{ include "cortex.selectorLabels" . }} +app.kubernetes.io/component: store-gateway +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-poddisruptionbudget.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-poddisruptionbudget.yaml new file mode 100644 index 0000000..1019cc8 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-poddisruptionbudget.yaml @@ -0,0 +1,14 @@ +{{- if and (gt (int .Values.store_gateway.replicas) 1) (.Values.store_gateway.podDisruptionBudget) }} +apiVersion: {{ include "cortex.pdbVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "cortex.storeGatewayFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.storeGatewayLabels" . | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "cortex.storeGatewaySelectorLabels" . | nindent 6 }} + {{- toYaml .Values.store_gateway.podDisruptionBudget | nindent 2 }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-servicemonitor.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-servicemonitor.yaml new file mode 100644 index 0000000..39eaeda --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-servicemonitor.yaml @@ -0,0 +1,42 @@ +{{- if .Values.store_gateway.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "cortex.storeGatewayFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.storeGatewayLabels" . | nindent 4 }} + {{- if .Values.store_gateway.serviceMonitor.additionalLabels }} +{{ toYaml .Values.store_gateway.serviceMonitor.additionalLabels | indent 4 }} + {{- end }} + {{- if .Values.store_gateway.serviceMonitor.annotations }} + annotations: +{{ toYaml .Values.store_gateway.serviceMonitor.annotations | indent 4 }} + {{- end }} +spec: + selector: + matchLabels: + {{- include "cortex.storeGatewaySelectorLabels" . | nindent 6 }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace | quote }} + endpoints: + - port: http-metrics + {{- if .Values.store_gateway.serviceMonitor.interval }} + interval: {{ .Values.store_gateway.serviceMonitor.interval }} + {{- end }} + {{- if .Values.store_gateway.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.store_gateway.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.store_gateway.serviceMonitor.relabelings }} + relabelings: + {{- toYaml .Values.store_gateway.serviceMonitor.relabelings | nindent 4 }} + {{- end }} + {{- if .Values.store_gateway.serviceMonitor.metricRelabelings }} + metricRelabelings: + {{- toYaml .Values.store_gateway.serviceMonitor.metricRelabelings | nindent 4 }} + {{- end }} + {{- with .Values.store_gateway.serviceMonitor.extraEndpointSpec }} + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-statefulset.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-statefulset.yaml new file mode 100644 index 0000000..0238c75 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-statefulset.yaml @@ -0,0 +1,142 @@ +{{- if eq .Values.config.storage.engine "blocks" -}} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "cortex.storeGatewayFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.storeGatewayLabels" . | nindent 4 }} + app.kubernetes.io/part-of: memberlist + annotations: + {{- toYaml .Values.store_gateway.annotations | nindent 4 }} +spec: + replicas: {{ .Values.store_gateway.replicas }} + selector: + matchLabels: + {{- include "cortex.storeGatewaySelectorLabels" . | nindent 6 }} + updateStrategy: + {{- toYaml .Values.store_gateway.strategy | nindent 4 }} + serviceName: {{ template "cortex.fullname" . }}-store-gateway-headless + {{- if .Values.store_gateway.persistentVolume.enabled }} + volumeClaimTemplates: + - metadata: + name: storage + {{- if .Values.store_gateway.persistentVolume.annotations }} + annotations: + {{ toYaml .Values.store_gateway.persistentVolume.annotations | nindent 10 }} + {{- end }} + spec: + {{- if .Values.store_gateway.persistentVolume.storageClass }} + {{- if (eq "-" .Values.store_gateway.persistentVolume.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.store_gateway.persistentVolume.storageClass }}" + {{- end }} + {{- end }} + accessModes: + {{- toYaml .Values.store_gateway.persistentVolume.accessModes | nindent 10 }} + resources: + requests: + storage: "{{ .Values.store_gateway.persistentVolume.size }}" + {{- end }} + template: + metadata: + labels: + {{- include "cortex.storeGatewayLabels" . | nindent 8 }} + app.kubernetes.io/part-of: memberlist + {{- with .Values.store_gateway.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + annotations: + checksum/config: {{ include "cortex.configChecksum" . }} + {{- with .Values.store_gateway.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ .Values.store_gateway.serviceAccount.name | default (include "cortex.serviceAccountName" . ) }} + {{- if .Values.store_gateway.priorityClassName }} + priorityClassName: {{ .Values.store_gateway.priorityClassName }} + {{- end }} + {{- if .Values.store_gateway.securityContext.enabled }} + securityContext: {{- omit .Values.store_gateway.securityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + initContainers: + {{- toYaml .Values.store_gateway.initContainers | nindent 8 }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} + nodeSelector: + {{- toYaml .Values.store_gateway.nodeSelector | nindent 8 }} + affinity: + {{- toYaml .Values.store_gateway.affinity | nindent 8 }} + tolerations: + {{- toYaml .Values.store_gateway.tolerations | nindent 8 }} + terminationGracePeriodSeconds: {{ .Values.store_gateway.terminationGracePeriodSeconds }} + volumes: + {{- include "cortex.configVolume" . | nindent 8 }} + - name: runtime-config + configMap: + name: {{ template "cortex.fullname" . }}-runtime-config + {{- if not .Values.store_gateway.persistentVolume.enabled }} + - name: storage + emptyDir: {} + {{- end }} + {{- if .Values.store_gateway.extraVolumes }} + {{- toYaml .Values.store_gateway.extraVolumes | nindent 8 }} + {{- end }} + containers: + {{- if .Values.store_gateway.extraContainers }} + {{ toYaml .Values.store_gateway.extraContainers | nindent 8 }} + {{- end }} + - name: store-gateway + image: "{{ .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: + - "-target=store-gateway" + - "-config.file=/etc/cortex/cortex.yaml" + {{- include "cortex.memcached" . | nindent 12}} + {{- range $key, $value := .Values.store_gateway.extraArgs }} + - "-{{ $key }}={{ $value }}" + {{- end }} + volumeMounts: + {{- if .Values.store_gateway.extraVolumeMounts }} + {{- toYaml .Values.store_gateway.extraVolumeMounts | nindent 12}} + {{- end }} + - name: config + mountPath: /etc/cortex + - name: runtime-config + mountPath: /etc/cortex-runtime-config + - name: storage + mountPath: "/data" + {{- if .Values.store_gateway.persistentVolume.subPath }} + subPath: {{ .Values.store_gateway.persistentVolume.subPath }} + {{- end }} + ports: + - name: http-metrics + containerPort: {{ .Values.config.server.http_listen_port }} + protocol: TCP + - name: grpc + containerPort: {{ .Values.config.server.grpc_listen_port }} + protocol: TCP + - name: gossip + containerPort: {{ .Values.config.memberlist.bind_port }} + protocol: TCP + startupProbe: + {{- toYaml .Values.store_gateway.startupProbe | nindent 12 }} + livenessProbe: + {{- toYaml .Values.store_gateway.livenessProbe | nindent 12 }} + readinessProbe: + {{- toYaml .Values.store_gateway.readinessProbe | nindent 12 }} + resources: + {{- toYaml .Values.store_gateway.resources | nindent 12 }} + {{- if .Values.store_gateway.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.store_gateway.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.store_gateway.env }} + env: + {{- toYaml .Values.store_gateway.env | nindent 12 }} + {{- end }} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-svc-headless.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-svc-headless.yaml new file mode 100644 index 0000000..c56ec77 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-svc-headless.yaml @@ -0,0 +1,24 @@ +{{- if eq .Values.config.storage.engine "blocks" -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "cortex.storeGatewayFullname" . }}-headless + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.storeGatewayLabels" . | nindent 4 }} + {{- with .Values.store_gateway.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- toYaml .Values.store_gateway.service.annotations | nindent 4 }} +spec: + type: ClusterIP + clusterIP: None + ports: + - port: {{ .Values.config.server.grpc_listen_port }} + protocol: TCP + name: grpc + targetPort: grpc + selector: + {{- include "cortex.storeGatewaySelectorLabels" . | nindent 4 }} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-svc.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-svc.yaml new file mode 100644 index 0000000..f58019b --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-svc.yaml @@ -0,0 +1,23 @@ +{{- if eq .Values.config.storage.engine "blocks" -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "cortex.storeGatewayFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.storeGatewayLabels" . | nindent 4 }} + {{- with .Values.store_gateway.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- toYaml .Values.store_gateway.service.annotations | nindent 4 }} +spec: + type: ClusterIP + ports: + - port: {{ .Values.config.server.http_listen_port }} + protocol: TCP + name: http-metrics + targetPort: http-metrics + selector: + {{- include "cortex.storeGatewaySelectorLabels" . | nindent 4 }} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/svc-memberlist-headless.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/svc-memberlist-headless.yaml new file mode 100644 index 0000000..fc41461 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/svc-memberlist-headless.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "cortex.fullname" . }}-memberlist + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.labels" . | nindent 4 }} +spec: + type: ClusterIP + clusterIP: None + ports: + - port: {{ .Values.config.memberlist.bind_port }} + protocol: TCP + name: gossip + targetPort: gossip + selector: + {{- include "cortex.selectorLabels" . | nindent 4 }} + app.kubernetes.io/part-of: memberlist diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/table-manager/_helpers-table-manager.tpl b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/table-manager/_helpers-table-manager.tpl new file mode 100644 index 0000000..4798c6d --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/table-manager/_helpers-table-manager.tpl @@ -0,0 +1,23 @@ + +{{/* +table-manager fullname +*/}} +{{- define "cortex.tableManagerFullname" -}} +{{ include "cortex.fullname" . }}-table-manager +{{- end }} + +{{/* +table-manager common labels +*/}} +{{- define "cortex.tableManagerLabels" -}} +{{ include "cortex.labels" . }} +app.kubernetes.io/component: table-manager +{{- end }} + +{{/* +table-manager selector labels +*/}} +{{- define "cortex.tableManagerSelectorLabels" -}} +{{ include "cortex.selectorLabels" . }} +app.kubernetes.io/component: table-manager +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/table-manager/table-manager-dep.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/table-manager/table-manager-dep.yaml new file mode 100644 index 0000000..d24dcc3 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/table-manager/table-manager-dep.yaml @@ -0,0 +1,106 @@ +{{- if ne .Values.config.storage.engine "blocks" -}} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "cortex.tableManagerFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.tableManagerLabels" . | nindent 4 }} + annotations: + {{- toYaml .Values.table_manager.annotations | nindent 4 }} +spec: + replicas: {{ .Values.table_manager.replicas }} + selector: + matchLabels: + {{- include "cortex.tableManagerSelectorLabels" . | nindent 6 }} + strategy: + {{- toYaml .Values.table_manager.strategy | nindent 4 }} + template: + metadata: + labels: + {{- include "cortex.tableManagerLabels" . | nindent 8 }} + {{- with .Values.table_manager.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + annotations: + checksum/config: {{ include "cortex.configChecksum" . }} + {{- with .Values.table_manager.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ .Values.table_manager.serviceAccount.name | default (include "cortex.serviceAccountName" . ) }} + {{- if .Values.table_manager.priorityClassName }} + priorityClassName: {{ .Values.table_manager.priorityClassName }} + {{- end }} + {{- if .Values.table_manager.securityContext.enabled }} + securityContext: {{- omit .Values.table_manager.securityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + initContainers: + {{- toYaml .Values.table_manager.initContainers | nindent 8 }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} + containers: + - name: table-manager + image: "{{ .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: + - "-target=table-manager" + - "-config.file=/etc/cortex/cortex.yaml" + {{- range $key, $value := .Values.table_manager.extraArgs }} + - "-{{ $key }}={{ $value }}" + {{- end }} + volumeMounts: + {{- if .Values.table_manager.extraVolumeMounts }} + {{- toYaml .Values.table_manager.extraVolumeMounts | nindent 12}} + {{- end }} + - name: config + mountPath: /etc/cortex + - name: runtime-config + mountPath: /etc/cortex-runtime-config + - name: storage + mountPath: "/data" + subPath: {{ .Values.table_manager.persistentVolume.subPath }} + ports: + - name: http-metrics + containerPort: {{ .Values.config.server.http_listen_port }} + protocol: TCP + startupProbe: + {{- toYaml .Values.table_manager.startupProbe | nindent 12 }} + livenessProbe: + {{- toYaml .Values.table_manager.livenessProbe | nindent 12 }} + readinessProbe: + {{- toYaml .Values.table_manager.readinessProbe | nindent 12 }} + resources: + {{- toYaml .Values.table_manager.resources | nindent 12 }} + {{- if .Values.table_manager.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.table_manager.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.table_manager.env }} + env: + {{- toYaml .Values.table_manager.env | nindent 12 }} + {{- end }} + {{- if .Values.table_manager.extraContainers }} + {{- toYaml .Values.table_manager.extraContainers | nindent 8}} + {{- end }} + nodeSelector: + {{- toYaml .Values.table_manager.nodeSelector | nindent 8 }} + affinity: + {{- toYaml .Values.table_manager.affinity | nindent 8 }} + tolerations: + {{- toYaml .Values.table_manager.tolerations | nindent 8 }} + terminationGracePeriodSeconds: {{ .Values.table_manager.terminationGracePeriodSeconds }} + volumes: + {{- include "cortex.configVolume" . | nindent 8 }} + - name: runtime-config + configMap: + name: {{ template "cortex.fullname" . }}-runtime-config + - name: storage + emptyDir: {} + {{- if .Values.table_manager.extraVolumes }} + {{- toYaml .Values.table_manager.extraVolumes | nindent 8}} + {{- end }} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/table-manager/table-manager-poddisruptionbudget.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/table-manager/table-manager-poddisruptionbudget.yaml new file mode 100644 index 0000000..91adabf --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/table-manager/table-manager-poddisruptionbudget.yaml @@ -0,0 +1,14 @@ +{{- if and (gt (int .Values.table_manager.replicas) 1) (.Values.table_manager.podDisruptionBudget) }} +apiVersion: {{ include "cortex.pdbVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "cortex.tableManagerFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.tableManagerLabels" . | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "cortex.tableManagerSelectorLabels" . | nindent 6 }} + {{- toYaml .Values.table_manager.podDisruptionBudget | nindent 2 }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/table-manager/table-manager-servicemonitor.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/table-manager/table-manager-servicemonitor.yaml new file mode 100644 index 0000000..9748724 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/table-manager/table-manager-servicemonitor.yaml @@ -0,0 +1,42 @@ +{{- if .Values.table_manager.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "cortex.tableManagerFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.tableManagerLabels" . | nindent 4 }} + {{- if .Values.table_manager.serviceMonitor.additionalLabels }} +{{ toYaml .Values.table_manager.serviceMonitor.additionalLabels | indent 4 }} + {{- end }} + {{- if .Values.table_manager.serviceMonitor.annotations }} + annotations: +{{ toYaml .Values.table_manager.serviceMonitor.annotations | indent 4 }} + {{- end }} +spec: + selector: + matchLabels: + {{- include "cortex.tableManagerSelectorLabels" . | nindent 6 }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace | quote }} + endpoints: + - port: http-metrics + {{- if .Values.table_manager.serviceMonitor.interval }} + interval: {{ .Values.table_manager.serviceMonitor.interval }} + {{- end }} + {{- if .Values.table_manager.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.table_manager.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.table_manager.serviceMonitor.relabelings }} + relabelings: + {{- toYaml .Values.table_manager.serviceMonitor.relabelings | nindent 4 }} + {{- end }} + {{- if .Values.table_manager.serviceMonitor.metricRelabelings }} + metricRelabelings: + {{- toYaml .Values.table_manager.serviceMonitor.metricRelabelings | nindent 4 }} + {{- end }} + {{- with .Values.table_manager.serviceMonitor.extraEndpointSpec }} + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/table-manager/table-manager-svc.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/table-manager/table-manager-svc.yaml new file mode 100644 index 0000000..ff3c57d --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/table-manager/table-manager-svc.yaml @@ -0,0 +1,23 @@ +{{- if ne .Values.config.storage.engine "blocks" -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "cortex.tableManagerFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.tableManagerLabels" . | nindent 4 }} + {{- with .Values.table_manager.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- toYaml .Values.table_manager.service.annotations | nindent 4 }} +spec: + type: ClusterIP + ports: + - port: {{ .Values.config.server.http_listen_port }} + protocol: TCP + name: http-metrics + targetPort: http-metrics + selector: + {{- include "cortex.tableManagerSelectorLabels" . | nindent 4 }} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/values.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/values.yaml new file mode 100644 index 0000000..4a0f8c8 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/cortex/values.yaml @@ -0,0 +1,1605 @@ +image: + #repository: quay.io/cortexproject/cortex + repository: 10.10.31.243:5000/cmoa3/cortex + # -- Allows you to override the cortex version in this chart. Use at your own risk. + #tag: "" + tag: v1.11.0 + pullPolicy: IfNotPresent + + # -- Optionally specify an array of imagePullSecrets. + # Secrets must be manually created in the namespace. + # ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + # pullSecrets: [] + pullSecrets: + - regcred + + +# -- Kubernetes cluster DNS domain +clusterDomain: cluster.local + +tags: + # -- Set to true to enable block storage memcached caching + blocks-storage-memcached: false + +ingress: + enabled: false + ingressClass: + enabled: false + name: "nginx" + annotations: {} + hosts: + - host: chart-example.local + paths: + - / + tls: [] + +serviceAccount: + create: true + name: + annotations: {} + automountServiceAccountToken: true + +useConfigMap: false +useExternalConfig: false +externalConfigSecretName: 'secret-with-config.yaml' +externalConfigVersion: '0' + +config: + auth_enabled: false + api: + prometheus_http_prefix: '/prometheus' + # -- Use GZIP compression for API responses. Some endpoints serve large YAML or JSON blobs + # which can benefit from compression. + response_compression_enabled: true + ingester: + walconfig: + wal_enabled: true + flush_on_shutdown_with_wal_enabled: true + recover_from_wal: true + lifecycler: + # -- We don't want to join immediately, but wait a bit to see other ingesters and their tokens first. + # It can take a while to have the full picture when using gossip + join_after: 10s + + # -- To avoid generating same tokens by multiple ingesters, they can "observe" the ring for a while, + # after putting their own tokens into it. This is only useful when using gossip, since multiple + # ingesters joining at the same time can have conflicting tokens if they don't see each other yet. + observe_period: 10s + # -- Duration to sleep for before exiting, to ensure metrics are scraped. + final_sleep: 30s + num_tokens: 512 + ring: + # -- Ingester replication factor per default is 3 + replication_factor: 3 + kvstore: + store: "memberlist" + limits: + # -- Enforce that every sample has a metric name + enforce_metric_name: true + reject_old_samples: true + reject_old_samples_max_age: 168h + max_query_lookback: 0s + server: + http_listen_port: 8080 + grpc_listen_port: 9095 + grpc_server_max_recv_msg_size: 10485760 + grpc_server_max_send_msg_size: 10485760 + grpc_server_max_concurrent_streams: 10000 + ingester_client: + grpc_client_config: + max_recv_msg_size: 10485760 + max_send_msg_size: 10485760 + # -- See https://github.com/cortexproject/cortex/blob/master/docs/configuration/config-file-reference.md#storage_config + storage: + engine: blocks + index_queries_cache_config: + memcached: + # -- How long keys stay in the memcache + expiration: 1h + memcached_client: + # -- Maximum time to wait before giving up on memcached requests. + timeout: 1s + blocks_storage: + # custume backend setting related to using s3 + backend: s3 + s3: + bucket_name: cortex-bucket + # -- The S3 bucket endpoint. It could be an AWS S3 endpoint listed at + # https://docs.aws.amazon.com/general/latest/gr/s3.html or the address of an + # S3-compatible service in hostname:port format. + endpoint: minio.imxc.svc.cluster.local:9000 + secret_access_key: admin1234 + access_key_id: cloudmoa + insecure: true + + tsdb: + dir: /data/tsdb + bucket_store: + sync_dir: /data/tsdb-sync + bucket_index: + enabled: true + # -- https://cortexmetrics.io/docs/configuration/configuration-file/#store_gateway_config + store_gateway: + sharding_enabled: false + distributor: + # -- Distribute samples based on all labels, as opposed to solely by user and + # metric name. + shard_by_all_labels: true + pool: + health_check_ingesters: true + memberlist: + bind_port: 7946 + # -- the service name of the memberlist + # if using memberlist discovery + join_members: + - '{{ include "cortex.fullname" $ }}-memberlist' + querier: + active_query_tracker_dir: /data/active-query-tracker + # -- Maximum lookback beyond which queries are not sent to ingester. 0 means all + # queries are sent to ingester. Ingesters by default have no data older than 12 hours, + # so we can safely set this 13 hours + query_ingesters_within: 9h + # -- The time after which a metric should be queried from storage and not just + # ingesters. + query_store_after: 7h + # -- Comma separated list of store-gateway addresses in DNS Service Discovery + # format. This option should is set automatically when using the blocks storage and the + # store-gateway sharding is disabled (when enabled, the store-gateway instances + # form a ring and addresses are picked from the ring). + # @default -- automatic + store_gateway_addresses: |- + {{ if and (eq .Values.config.storage.engine "blocks") (not .Values.config.store_gateway.sharding_enabled) -}} + dns+{{ include "cortex.storeGatewayFullname" $ }}-headless:9095 + {{- end }} + query_range: + split_queries_by_interval: 24h + align_queries_with_step: true + cache_results: true + results_cache: + cache: + memcached: + expiration: 1h + memcached_client: + timeout: 1s + ruler: + enable_alertmanager_discovery: false + # -- Enable the experimental ruler config api. + alertmanager_url: 'http://alertmanager.imxc/alertmanager' + enable_api: true + # -- Method to use for backend rule storage (configdb, azure, gcs, s3, swift, local) refer to https://cortexmetrics.io/docs/configuration/configuration-file/#ruler_config + storage: {} + runtime_config: + file: /etc/cortex-runtime-config/runtime_config.yaml + alertmanager: + # -- Enable the experimental alertmanager config api. + enable_api: true + external_url: 'http://alertmanager.imxc/alertmanager' + #external_url: '/api/prom/alertmanager' + # -- Type of backend to use to store alertmanager configs. Supported values are: "configdb", "gcs", "s3", "local". refer to: https://cortexmetrics.io/docs/configuration/configuration-file/#alertmanager_config + storage: {} + frontend: + log_queries_longer_than: 10s + # S3 사용 관련 커스텀 설정 + alertmanager_storage: + s3: + bucket_name: cortex-alertmanager + endpoint: minio.imxc.svc.cluster.local:9000 + secret_access_key: admin1234 + access_key_id: cloudmoa + insecure: true + ruler_storage: + s3: + bucket_name: cortex-ruler + endpoint: minio.imxc.svc.cluster.local:9000 + secret_access_key: admin1234 + access_key_id: cloudmoa + insecure: true + +runtimeconfigmap: + # -- If true, a configmap for the `runtime_config` will be created. + # If false, the configmap _must_ exist already on the cluster or pods will fail to create. + create: true + annotations: {} + # -- https://cortexmetrics.io/docs/configuration/arguments/#runtime-configuration-file + # 설정부 + runtime_config: {} +alertmanager: + enabled: true + replicas: 1 + + statefulSet: + # -- If true, use a statefulset instead of a deployment for pod management. + # This is useful for using a persistent volume for storing silences between restarts. + enabled: false + + service: + annotations: {} + labels: {} + + serviceAccount: + # -- "" disables the individual serviceAccount and uses the global serviceAccount for that component + name: "" + + serviceMonitor: + enabled: false + additionalLabels: {} + relabelings: [] + metricRelabelings: [] + # -- Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint + extraEndpointSpec: {} + + resources: {} + + # -- Additional Cortex container arguments, e.g. log level (debug, info, warn, error) + extraArgs: {} + # -experimental.alertmanager.enable-api: "true" + # -alertmanager.web.external-url: /alertmanager + # -- Pod Labels + podLabels: {} + + # -- Pod Annotations + podAnnotations: + prometheus.io/scrape: 'true' + prometheus.io/port: '8080' + + nodeSelector: {} + affinity: {} + annotations: {} + + persistentVolume: + # -- If true and alertmanager.statefulSet.enabled is true, + # Alertmanager will create/use a Persistent Volume Claim + # If false, use emptyDir + enabled: false + + # -- Alertmanager data Persistent Volume Claim annotations + annotations: {} + + # -- Alertmanager data Persistent Volume access modes + # Must match those of existing PV or dynamic provisioner + # Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + accessModes: + - ReadWriteOnce + + # -- Alertmanager data Persistent Volume size + size: 2Gi + + # -- Subdirectory of Alertmanager data Persistent Volume to mount + # Useful if the volume's root directory is not empty + subPath: '' + + # -- Alertmanager data Persistent Volume Storage Class + # If defined, storageClassName: + # If set to "-", storageClassName: "", which disables dynamic provisioning + # If undefined (the default) or set to null, no storageClassName spec is + # set, choosing the default provisioner. + storageClass: null + + startupProbe: + httpGet: + path: /ready + port: http-metrics + failureThreshold: 10 + livenessProbe: + httpGet: + path: /ready + port: http-metrics + readinessProbe: + httpGet: + path: /ready + port: http-metrics + + securityContext: {} + + containerSecurityContext: + enabled: true + readOnlyRootFilesystem: true + + # -- Tolerations for pod assignment + # ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + tolerations: [] + + # -- If not set then a PodDisruptionBudget will not be created + podDisruptionBudget: + maxUnavailable: 1 + + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + statefulStrategy: + type: RollingUpdate + + terminationGracePeriodSeconds: 60 + + # -- Init containers to be added to the cortex pod. + initContainers: [] + + # -- Additional containers to be added to the cortex pod. + extraContainers: [] + + # -- Additional volumes to the cortex pod. + extraVolumes: [] + + # -- Extra volume mounts that will be added to the cortex container + extraVolumeMounts: [] + + # -- Additional ports to the cortex services. Useful to expose extra container ports. + extraPorts: [] + + # -- Extra env variables to pass to the cortex container + env: [] + + # -- Sidecars that collect the configmaps with specified label and stores the included files them into the respective folders + sidecar: + image: + repository: 10.10.31.243:5000/cmoa3/k8s-sidecar + tag: 1.10.7 + sha: "" + imagePullPolicy: IfNotPresent + resources: {} + # -- skipTlsVerify Set to true to skip tls verification for kube api calls + skipTlsVerify: false + enableUniqueFilenames: false + enabled: false + label: cortex_alertmanager + watchMethod: null + labelValue: null + folder: /data + defaultFolderName: null + searchNamespace: null + folderAnnotation: null + containerSecurityContext: + enabled: true + readOnlyRootFilesystem: true + +distributor: + replicas: 2 + + service: + annotations: {} + labels: {} + + serviceAccount: + # -- "" disables the individual serviceAccount and uses the global serviceAccount for that component + name: "" + + serviceMonitor: + enabled: false + additionalLabels: {} + relabelings: [] + metricRelabelings: [] + # -- Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint + extraEndpointSpec: {} + + resources: {} + + # -- Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) + extraArgs: + -validation.max-label-names-per-series: "45" + + # -- Pod Labels + podLabels: {} + + # -- Pod Annotations + podAnnotations: + prometheus.io/scrape: 'true' + prometheus.io/port: '8080' + + nodeSelector: {} + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app.kubernetes.io/component + operator: In + values: + - distributor + topologyKey: 'kubernetes.io/hostname' + + annotations: {} + + autoscaling: + # -- Creates a HorizontalPodAutoscaler for the distributor pods. + enabled: false + minReplicas: 2 + maxReplicas: 30 + targetCPUUtilizationPercentage: 80 + targetMemoryUtilizationPercentage: 0 # 80 + # -- Ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-configurable-scaling-behavior + behavior: {} + + persistentVolume: + subPath: + + startupProbe: + httpGet: + path: /ready + port: http-metrics + failureThreshold: 10 + livenessProbe: + httpGet: + path: /ready + port: http-metrics + readinessProbe: + httpGet: + path: /ready + port: http-metrics + + securityContext: {} + + containerSecurityContext: + enabled: true + readOnlyRootFilesystem: true + + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + + terminationGracePeriodSeconds: 60 + + tolerations: [] + + podDisruptionBudget: + maxUnavailable: 1 + + initContainers: [] + extraContainers: [] + extraVolumes: [] + extraVolumeMounts: [] + extraPorts: [] + env: [] + lifecycle: {} + +ingester: + replicas: 3 + + statefulSet: + # -- If true, use a statefulset instead of a deployment for pod management. + # This is useful when using WAL + enabled: true + # -- ref: https://cortexmetrics.io/docs/guides/ingesters-scaling-up-and-down/#scaling-down and https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-management-policies for scaledown details + podManagementPolicy: OrderedReady + + service: + annotations: {} + labels: {} + + serviceAccount: + name: + + serviceMonitor: + enabled: false + additionalLabels: {} + relabelings: [] + metricRelabelings: [] + # -- Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint + extraEndpointSpec: {} + + resources: {} + + # -- Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) + extraArgs: {} + + # -- Pod Labels + podLabels: {} + + # -- Pod Annotations + podAnnotations: + prometheus.io/scrape: 'true' + prometheus.io/port: '8080' + + nodeSelector: {} + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app.kubernetes.io/component + operator: In + values: + - ingester + topologyKey: 'kubernetes.io/hostname' + + annotations: {} + + autoscaling: + enabled: false + minReplicas: 3 + maxReplicas: 30 + targetMemoryUtilizationPercentage: 80 + behavior: + scaleDown: + # -- see https://cortexmetrics.io/docs/guides/ingesters-scaling-up-and-down/#scaling-down for scaledown details + policies: + - type: Pods + value: 1 + # set to no less than 2x the maximum between -blocks-storage.bucket-store.sync-interval and -compactor.cleanup-interval + periodSeconds: 1800 + # -- uses metrics from the past 1h to make scaleDown decisions + stabilizationWindowSeconds: 3600 + scaleUp: + # -- This default scaleup policy allows adding 1 pod every 30 minutes. + # Ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-configurable-scaling-behavior + policies: + - type: Pods + value: 1 + periodSeconds: 1800 + + lifecycle: + # -- The /shutdown preStop hook is recommended as part of the ingester + # scaledown process, but can be removed to optimize rolling restarts in + # instances that will never be scaled down or when using chunks storage + # with WAL disabled. + # https://cortexmetrics.io/docs/guides/ingesters-scaling-up-and-down/#scaling-down + preStop: + httpGet: + path: "/ingester/shutdown" + port: http-metrics + + persistentVolume: + # -- If true and ingester.statefulSet.enabled is true, + # Ingester will create/use a Persistent Volume Claim + # If false, use emptyDir + enabled: true + + # -- Ingester data Persistent Volume Claim annotations + annotations: {} + + # -- Ingester data Persistent Volume access modes + # Must match those of existing PV or dynamic provisioner + # Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + accessModes: + - ReadWriteOnce + + # -- Ingester data Persistent Volume size + size: 2Gi + + # -- Subdirectory of Ingester data Persistent Volume to mount + # Useful if the volume's root directory is not empty + subPath: '' + + # -- Ingester data Persistent Volume Storage Class + # If defined, storageClassName: + # If set to "-", storageClassName: "", which disables dynamic provisioning + # If undefined (the default) or set to null, no storageClassName spec is + # set, choosing the default provisioner. + storageClass: exem-local-storage + + # -- Startup/liveness probes for ingesters are not recommended. + # Ref: https://cortexmetrics.io/docs/guides/running-cortex-on-kubernetes/#take-extra-care-with-ingesters + startupProbe: {} + + # -- Startup/liveness probes for ingesters are not recommended. + # Ref: https://cortexmetrics.io/docs/guides/running-cortex-on-kubernetes/#take-extra-care-with-ingesters + livenessProbe: {} + readinessProbe: + httpGet: + path: /ready + port: http-metrics + + securityContext: {} + + containerSecurityContext: + enabled: true + readOnlyRootFilesystem: true + + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + statefulStrategy: + type: RollingUpdate + + terminationGracePeriodSeconds: 240 + + tolerations: [] + + podDisruptionBudget: + maxUnavailable: 1 + + initContainers: [] + extraContainers: [] + extraVolumes: [] + extraVolumeMounts: [] + extraPorts: [] + env: [] + +ruler: + enabled: true + replicas: 1 + + service: + annotations: {} + labels: {} + + serviceAccount: + # -- "" disables the individual serviceAccount and uses the global serviceAccount for that component + name: "" + + serviceMonitor: + enabled: false + additionalLabels: {} + relabelings: [] + metricRelabelings: [] + # -- Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint + extraEndpointSpec: {} + + resources: {} + + # -- Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) + extraArgs: + #-ruler.configs.url: http://cortex-configs:8080 + #-ruler.alertmanager-url: http://cortex-alertmanager:8080 + -ruler.storage.type: configdb + + # -- Pod Labels + podLabels: {} + + # -- Pod Annotations + podAnnotations: + prometheus.io/scrape: 'true' + prometheus.io/port: '8080' + + nodeSelector: {} + affinity: {} + annotations: {} + persistentVolume: + subPath: + + startupProbe: + httpGet: + path: /ready + port: http-metrics + failureThreshold: 10 + livenessProbe: + httpGet: + path: /ready + port: http-metrics + readinessProbe: + httpGet: + path: /ready + port: http-metrics + + securityContext: {} + + containerSecurityContext: + enabled: true + readOnlyRootFilesystem: true + + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + + terminationGracePeriodSeconds: 180 + + tolerations: [] + + podDisruptionBudget: + maxUnavailable: 1 + + initContainers: [] + extraContainers: [] + extraVolumes: [] + extraVolumeMounts: [] + extraPorts: [] + env: [] + # -- allow configuring rules via configmap. ref: https://cortexproject.github.io/cortex-helm-chart/guides/configure_rules_via_configmap.html + directories: {} + + # -- Sidecars that collect the configmaps with specified label and stores the included files them into the respective folders + sidecar: + image: + repository: 10.10.31.243:5000/cmoa3/k8s-sidecar + tag: 1.10.7 + sha: "" + imagePullPolicy: IfNotPresent + resources: {} + # limits: + # cpu: 100m + # memory: 100Mi + # requests: + # cpu: 50m + # memory: 50Mi + # skipTlsVerify Set to true to skip tls verification for kube api calls + # skipTlsVerify: true + enableUniqueFilenames: false + enabled: false + # -- label that the configmaps with rules are marked with + label: cortex_rules + watchMethod: null + # -- value of label that the configmaps with rules are set to + labelValue: null + # -- folder in the pod that should hold the collected rules (unless `defaultFolderName` is set) + folder: /tmp/rules + # -- The default folder name, it will create a subfolder under the `folder` and put rules in there instead + defaultFolderName: null + # -- If specified, the sidecar will search for rules config-maps inside this namespace. + # Otherwise the namespace in which the sidecar is running will be used. + # It's also possible to specify ALL to search in all namespaces + searchNamespace: null + # -- If specified, the sidecar will look for annotation with this name to create folder and put graph here. + # You can use this parameter together with `provider.foldersFromFilesStructure`to annotate configmaps and create folder structure. + folderAnnotation: null + containerSecurityContext: + enabled: true + readOnlyRootFilesystem: true + +querier: + replicas: 2 + + service: + annotations: {} + labels: {} + + serviceAccount: + # -- "" disables the individual serviceAccount and uses the global serviceAccount for that component + name: "" + + serviceMonitor: + enabled: false + additionalLabels: {} + relabelings: [] + metricRelabelings: [] + # -- Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint + extraEndpointSpec: {} + + resources: {} + + # -- Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) + extraArgs: {} + + # -- Pod Labels + podLabels: {} + + # -- Pod Annotations + podAnnotations: + prometheus.io/scrape: 'true' + prometheus.io/port: '8080' + + nodeSelector: {} + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app.kubernetes.io/component + operator: In + values: + - querier + topologyKey: 'kubernetes.io/hostname' + + annotations: {} + + autoscaling: + # -- Creates a HorizontalPodAutoscaler for the querier pods. + enabled: false + minReplicas: 2 + maxReplicas: 30 + targetCPUUtilizationPercentage: 80 + targetMemoryUtilizationPercentage: 0 # 80 + # -- Ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-configurable-scaling-behavior + behavior: {} + + persistentVolume: + subPath: + + startupProbe: + httpGet: + path: /ready + port: http-metrics + failureThreshold: 10 + livenessProbe: + httpGet: + path: /ready + port: http-metrics + readinessProbe: + httpGet: + path: /ready + port: http-metrics + + securityContext: {} + + containerSecurityContext: + enabled: true + readOnlyRootFilesystem: true + + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + + terminationGracePeriodSeconds: 180 + + tolerations: [] + + podDisruptionBudget: + maxUnavailable: 1 + + initContainers: [] + extraContainers: [] + extraVolumes: [] + extraVolumeMounts: [] + extraPorts: [] + env: [] + lifecycle: {} + +query_frontend: + replicas: 2 + + service: + annotations: {} + labels: {} + + serviceAccount: + # -- "" disables the individual serviceAccount and uses the global serviceAccount for that component + name: "" + + serviceMonitor: + enabled: false + additionalLabels: {} + relabelings: [] + metricRelabelings: [] + # -- Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint + extraEndpointSpec: {} + + resources: {} + + # -- Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) + extraArgs: {} + + # -- Pod Labels + podLabels: {} + + # -- Pod Annotations + podAnnotations: + prometheus.io/scrape: 'true' + prometheus.io/port: '8080' + + nodeSelector: {} + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app.kubernetes.io/component + operator: In + values: + - query-frontend + topologyKey: 'kubernetes.io/hostname' + + annotations: {} + persistentVolume: + subPath: + + startupProbe: + httpGet: + path: /ready + port: http-metrics + failureThreshold: 10 + livenessProbe: + httpGet: + path: /ready + port: http-metrics + readinessProbe: + httpGet: + path: /ready + port: http-metrics + + securityContext: {} + containerSecurityContext: + enabled: true + readOnlyRootFilesystem: true + + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + + terminationGracePeriodSeconds: 180 + + tolerations: [] + + podDisruptionBudget: + maxUnavailable: 1 + + initContainers: [] + extraContainers: [] + extraVolumes: [] + extraVolumeMounts: [] + extraPorts: [] + env: [] + lifecycle: {} + +table_manager: + replicas: 1 + + service: + annotations: {} + labels: {} + + serviceAccount: + # -- "" disables the individual serviceAccount and uses the global serviceAccount for that component + name: "" + + serviceMonitor: + enabled: false + additionalLabels: {} + relabelings: [] + metricRelabelings: [] + # -- Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint + extraEndpointSpec: {} + + resources: {} + + # -- Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) + extraArgs: {} + + # -- Pod Labels + podLabels: {} + + # -- Pod Annotations + podAnnotations: + prometheus.io/scrape: 'true' + prometheus.io/port: '8080' + + nodeSelector: {} + affinity: {} + annotations: {} + persistentVolume: + subPath: + + startupProbe: + httpGet: + path: /ready + port: http-metrics + failureThreshold: 10 + livenessProbe: + httpGet: + path: /ready + port: http-metrics + readinessProbe: + httpGet: + path: /ready + port: http-metrics + + securityContext: {} + + containerSecurityContext: + enabled: true + readOnlyRootFilesystem: true + + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + + terminationGracePeriodSeconds: 180 + + tolerations: [] + + podDisruptionBudget: + maxUnavailable: 1 + + initContainers: [] + extraContainers: [] + extraVolumes: [] + extraVolumeMounts: [] + extraPorts: [] + env: [] + +configs: + enabled: true + replicas: 1 + + service: + annotations: {} + labels: {} + + serviceAccount: + # -- "" disables the individual serviceAccount and uses the global serviceAccount for that component + name: "" + + serviceMonitor: + enabled: false + additionalLabels: {} + relabelings: [] + metricRelabelings: [] + # -- Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint + extraEndpointSpec: {} + + resources: {} + + # -- Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) + extraArgs: + # -configs.database.migrations-dir: /migrations + # -- Pod Labels + podLabels: {} + + # -- Pod Annotations + podAnnotations: + prometheus.io/scrape: 'true' + prometheus.io/port: '8080' + + nodeSelector: {} + affinity: {} + annotations: {} + persistentVolume: + subPath: + + startupProbe: + httpGet: + path: /ready + port: http-metrics + failureThreshold: 10 + livenessProbe: + httpGet: + path: /ready + port: http-metrics + readinessProbe: + httpGet: + path: /ready + port: http-metrics + + securityContext: {} + + containerSecurityContext: + enabled: true + readOnlyRootFilesystem: true + + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + + terminationGracePeriodSeconds: 180 + + tolerations: [] + + podDisruptionBudget: + maxUnavailable: 1 + + initContainers: [] + extraContainers: [] + extraVolumes: [] + extraVolumeMounts: [] + extraPorts: [] + env: [] + +nginx: + enabled: true + replicas: 2 + http_listen_port: 80 + config: + dnsResolver: coredns.kube-system.svc.cluster.local + # -- ref: http://nginx.org/en/docs/http/ngx_http_core_module.html#client_max_body_size + client_max_body_size: 20M + # -- arbitrary snippet to inject in the http { } section of the nginx config + httpSnippet: "" + # -- arbitrary snippet to inject in the top section of the nginx config + mainSnippet: "" + # -- arbitrary snippet to inject in the server { } section of the nginx config + serverSnippet: "" + setHeaders: {} + # -- (optional) List of [auth tenants](https://cortexmetrics.io/docs/guides/auth/) to set in the nginx config + auth_orgs: [] + # -- (optional) Name of basic auth secret. + # In order to use this option, a secret with htpasswd formatted contents at + # the key ".htpasswd" must exist. For example: + # + # apiVersion: v1 + # kind: Secret + # metadata: + # name: my-secret + # namespace: + # stringData: + # .htpasswd: | + # user1:$apr1$/woC1jnP$KAh0SsVn5qeSMjTtn0E9Q0 + # user2:$apr1$QdR8fNLT$vbCEEzDj7LyqCMyNpSoBh/ + # + # Please note that the use of basic auth will not identify organizations + # the way X-Scope-OrgID does. Thus, the use of basic auth alone will not + # prevent one tenant from viewing the metrics of another. To ensure tenants + # are scoped appropriately, explicitly set the `X-Scope-OrgID` header + # in the nginx config. Example + # setHeaders: + # X-Scope-OrgID: $remote_user + basicAuthSecretName: "" + + image: + repository: 10.10.31.243:5000/cmoa3/nginx + tag: 1.21 + pullPolicy: IfNotPresent + + service: + type: ClusterIP + annotations: {} + labels: {} + + serviceAccount: + # -- "" disables the individual serviceAccount and uses the global serviceAccount for that component + name: "" + + resources: {} + + # -- Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) + extraArgs: {} + + # -- Pod Labels + podLabels: {} + + # -- Pod Annotations + podAnnotations: {} + + nodeSelector: {} + affinity: {} + annotations: {} + persistentVolume: + subPath: + + startupProbe: + httpGet: + path: /healthz + port: http-metrics + failureThreshold: 10 + livenessProbe: + httpGet: + path: /healthz + port: http-metrics + readinessProbe: + httpGet: + path: /healthz + port: http-metrics + + securityContext: {} + + containerSecurityContext: + enabled: true + readOnlyRootFilesystem: false + + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + + terminationGracePeriodSeconds: 10 + + tolerations: [] + + podDisruptionBudget: + maxUnavailable: 1 + + initContainers: [] + extraContainers: [] + extraVolumes: [] + extraVolumeMounts: [] + extraPorts: [] + env: [] + + autoscaling: + # -- Creates a HorizontalPodAutoscaler for the nginx pods. + enabled: false + minReplicas: 2 + maxReplicas: 30 + targetCPUUtilizationPercentage: 80 + targetMemoryUtilizationPercentage: 0 # 80 + # -- Ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-configurable-scaling-behavior + behavior: {} + +store_gateway: + replicas: 1 + + service: + annotations: {} + labels: {} + + serviceAccount: + # -- "" disables the individual serviceAccount and uses the global serviceAccount for that component + name: "" + + serviceMonitor: + enabled: false + additionalLabels: {} + relabelings: [] + metricRelabelings: [] + # -- Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint + extraEndpointSpec: {} + + resources: {} + + # -- Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) + extraArgs: {} + + # -- Pod Labels + podLabels: {} + + # -- Pod Annotations + podAnnotations: + prometheus.io/scrape: 'true' + prometheus.io/port: '8080' + + nodeSelector: {} + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app.kubernetes.io/component + operator: In + values: + - store-gateway + topologyKey: 'kubernetes.io/hostname' + + annotations: {} + + persistentVolume: + # -- If true Store-gateway will create/use a Persistent Volume Claim + # If false, use emptyDir + enabled: false + + # -- Store-gateway data Persistent Volume Claim annotations + annotations: {} + + # -- Store-gateway data Persistent Volume access modes + # Must match those of existing PV or dynamic provisioner + # Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + accessModes: + - ReadWriteOnce + + # -- Store-gateway data Persistent Volume size + size: 2Gi + + # -- Subdirectory of Store-gateway data Persistent Volume to mount + # Useful if the volume's root directory is not empty + subPath: '' + + # -- Store-gateway data Persistent Volume Storage Class + # If defined, storageClassName: + # If set to "-", storageClassName: "", which disables dynamic provisioning + # If undefined (the default) or set to null, no storageClassName spec is + # set, choosing the default provisioner. + storageClass: null + + startupProbe: + failureThreshold: 60 + initialDelaySeconds: 120 + periodSeconds: 30 + httpGet: + path: /ready + port: http-metrics + scheme: HTTP + livenessProbe: + httpGet: + path: /ready + port: http-metrics + scheme: HTTP + readinessProbe: + httpGet: + path: /ready + port: http-metrics + + securityContext: {} + + containerSecurityContext: + enabled: true + readOnlyRootFilesystem: true + + strategy: + type: RollingUpdate + + terminationGracePeriodSeconds: 240 + + tolerations: [] + + podDisruptionBudget: + maxUnavailable: 1 + + initContainers: [] + extraContainers: [] + extraVolumes: [] + extraVolumeMounts: [] + extraPorts: [] + env: [] + +compactor: + enabled: true + replicas: 1 + + service: + annotations: {} + labels: {} + + serviceAccount: + # -- "" disables the individual serviceAccount and uses the global serviceAccount for that component + name: "" + + serviceMonitor: + enabled: false + additionalLabels: {} + relabelings: [] + metricRelabelings: [] + # -- Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint + extraEndpointSpec: {} + + resources: {} + + # -- Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) + extraArgs: {} + + # -- Pod Labels + podLabels: {} + + # -- Pod Annotations + podAnnotations: + prometheus.io/scrape: 'true' + prometheus.io/port: '8080' + + nodeSelector: {} + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app.kubernetes.io/component + operator: In + values: + - compactor + topologyKey: 'kubernetes.io/hostname' + + annotations: {} + + persistentVolume: + # -- If true compactor will create/use a Persistent Volume Claim + # If false, use emptyDir + enabled: false + + # -- compactor data Persistent Volume Claim annotations + annotations: {} + + # -- compactor data Persistent Volume access modes + # Must match those of existing PV or dynamic provisioner + # Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + accessModes: + - ReadWriteOnce + + # compactor data Persistent Volume size + size: 2Gi + + # -- Subdirectory of compactor data Persistent Volume to mount + # Useful if the volume's root directory is not empty + subPath: '' + + # -- compactor data Persistent Volume Storage Class + # If defined, storageClassName: + # If set to "-", storageClassName: "", which disables dynamic provisioning + # If undefined (the default) or set to null, no storageClassName spec is + # set, choosing the default provisioner. + storageClass: null + + startupProbe: + failureThreshold: 60 + initialDelaySeconds: 120 + periodSeconds: 30 + httpGet: + path: /ready + port: http-metrics + scheme: HTTP + livenessProbe: + httpGet: + path: /ready + port: http-metrics + scheme: HTTP + readinessProbe: + httpGet: + path: /ready + port: http-metrics + + securityContext: {} + containerSecurityContext: + enabled: true + readOnlyRootFilesystem: true + + strategy: + type: RollingUpdate + + terminationGracePeriodSeconds: 240 + + tolerations: [] + + podDisruptionBudget: + maxUnavailable: 1 + + initContainers: [] + extraContainers: [] + extraVolumes: [] + extraVolumeMounts: [] + extraPorts: [] + env: [] + +# -- chunk caching for legacy chunk storage engine +memcached: + enabled: false + architecture: "high-availability" + replicaCount: 2 + resources: {} + extraEnv: + # -- MEMCACHED_CACHE_SIZE is the amount of memory allocated to memcached for object storage + - name: MEMCACHED_CACHE_SIZE + value: "1024" + # -- MEMCACHED_MAX_CONNECTIONS is the maximum number of simultaneous connections to the memcached service + - name: MEMCACHED_MAX_CONNECTIONS + value: "1024" + # -- MEMCACHED_THREADS is the number of threads to use when processing incoming requests. + # By default, memcached is configured to use 4 concurrent threads. The threading improves the performance of + # storing and retrieving data in the cache, using a locking system to prevent different threads overwriting or updating the same values. + - name: MEMCACHED_THREADS + value: "4" + metrics: + enabled: true + serviceMonitor: + enabled: false + +# -- index read caching for legacy chunk storage engine +memcached-index-read: + enabled: false + architecture: "high-availability" + replicaCount: 2 + resources: {} + extraEnv: + # -- MEMCACHED_CACHE_SIZE is the amount of memory allocated to memcached for object storage + - name: MEMCACHED_CACHE_SIZE + value: "1024" + # -- MEMCACHED_MAX_CONNECTIONS is the maximum number of simultaneous connections to the memcached service + - name: MEMCACHED_MAX_CONNECTIONS + value: "1024" + # -- MEMCACHED_THREADS is the number of threads to use when processing incoming requests. + # By default, memcached is configured to use 4 concurrent threads. The threading improves the performance of + # storing and retrieving data in the cache, using a locking system to prevent different threads overwriting or updating the same values. + - name: MEMCACHED_THREADS + value: "4" + metrics: + enabled: true + serviceMonitor: + enabled: false + +# -- index write caching for legacy chunk storage engine +memcached-index-write: + enabled: false + architecture: "high-availability" + replicaCount: 2 + resources: {} + extraEnv: + # -- MEMCACHED_CACHE_SIZE is the amount of memory allocated to memcached for object storage + - name: MEMCACHED_CACHE_SIZE + value: "1024" + # -- MEMCACHED_MAX_CONNECTIONS is the maximum number of simultaneous connections to the memcached service + - name: MEMCACHED_MAX_CONNECTIONS + value: "1024" + # -- MEMCACHED_THREADS is the number of threads to use when processing incoming requests. + # By default, memcached is configured to use 4 concurrent threads. The threading improves the performance of + # storing and retrieving data in the cache, using a locking system to prevent different threads overwriting or updating the same values. + - name: MEMCACHED_THREADS + value: "4" + metrics: + enabled: true + serviceMonitor: + enabled: false + +memcached-frontend: + enabled: false + architecture: "high-availability" + replicaCount: 2 + resources: {} + extraEnv: + # -- MEMCACHED_CACHE_SIZE is the amount of memory allocated to memcached for object storage + - name: MEMCACHED_CACHE_SIZE + value: "1024" + # -- MEMCACHED_MAX_CONNECTIONS is the maximum number of simultaneous connections to the memcached service + - name: MEMCACHED_MAX_CONNECTIONS + value: "1024" + # -- MEMCACHED_THREADS is the number of threads to use when processing incoming requests. + # By default, memcached is configured to use 4 concurrent threads. The threading improves the performance of + # storing and retrieving data in the cache, using a locking system to prevent different threads overwriting or updating the same values. + - name: MEMCACHED_THREADS + value: "4" + metrics: + enabled: true + serviceMonitor: + enabled: false + +memcached-blocks-index: + architecture: "high-availability" + replicaCount: 2 + resources: {} + extraEnv: + # -- MEMCACHED_CACHE_SIZE is the amount of memory allocated to memcached for object storage + - name: MEMCACHED_CACHE_SIZE + value: "1024" + # -- MEMCACHED_MAX_CONNECTIONS is the maximum number of simultaneous connections to the memcached service + - name: MEMCACHED_MAX_CONNECTIONS + value: "1024" + # -- MEMCACHED_THREADS is the number of threads to use when processing incoming requests. + # By default, memcached is configured to use 4 concurrent threads. The threading improves the performance of + # storing and retrieving data in the cache, using a locking system to prevent different threads overwriting or updating the same values. + - name: MEMCACHED_THREADS + value: "4" + metrics: + enabled: true + serviceMonitor: + enabled: false + +memcached-blocks: + architecture: "high-availability" + replicaCount: 2 + resources: {} + extraEnv: + # -- MEMCACHED_CACHE_SIZE is the amount of memory allocated to memcached for object storage + - name: MEMCACHED_CACHE_SIZE + value: "1024" + # -- MEMCACHED_MAX_CONNECTIONS is the maximum number of simultaneous connections to the memcached service + - name: MEMCACHED_MAX_CONNECTIONS + value: "1024" + # -- MEMCACHED_THREADS is the number of threads to use when processing incoming requests. + # By default, memcached is configured to use 4 concurrent threads. The threading improves the performance of + # storing and retrieving data in the cache, using a locking system to prevent different threads overwriting or updating the same values. + - name: MEMCACHED_THREADS + value: "4" + metrics: + enabled: true + serviceMonitor: + enabled: false + +memcached-blocks-metadata: + # enabled/disabled via the tags.blocks-storage-memcached boolean + architecture: "high-availability" + replicaCount: 2 + resources: {} + extraEnv: + # -- MEMCACHED_CACHE_SIZE is the amount of memory allocated to memcached for object storage + - name: MEMCACHED_CACHE_SIZE + value: "1024" + # -- MEMCACHED_MAX_CONNECTIONS is the maximum number of simultaneous connections to the memcached service + - name: MEMCACHED_MAX_CONNECTIONS + value: "1024" + # -- MEMCACHED_THREADS is the number of threads to use when processing incoming requests. + # By default, memcached is configured to use 4 concurrent threads. The threading improves the performance of + # storing and retrieving data in the cache, using a locking system to prevent different threads overwriting or updating the same values. + - name: MEMCACHED_THREADS + value: "4" + metrics: + enabled: true + serviceMonitor: + enabled: false + +configsdb_postgresql: + enabled: true + uri: postgres://admin@postgres/configs?sslmode=disable + auth: + password: eorbahrhkswp + existing_secret: + name: + key: diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/elasticsearch/.helmignore b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/elasticsearch/.helmignore new file mode 100644 index 0000000..e12c0b4 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/elasticsearch/.helmignore @@ -0,0 +1,2 @@ +tests/ +.pytest_cache/ diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/elasticsearch/Chart.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/elasticsearch/Chart.yaml new file mode 100644 index 0000000..be38643 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/elasticsearch/Chart.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +appVersion: 7.6.0 +description: Official Elastic helm chart for Elasticsearch +home: https://github.com/elastic/helm-charts +icon: https://helm.elastic.co/icons/elasticsearch.png +maintainers: +- email: helm-charts@elastic.co + name: Elastic +name: elasticsearch +sources: +- https://github.com/elastic/elasticsearch +version: 7.6.0 diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/elasticsearch/templates/1.headless_service.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/elasticsearch/templates/1.headless_service.yaml new file mode 100644 index 0000000..2631417 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/elasticsearch/templates/1.headless_service.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Service +metadata: + namespace: imxc + name: elasticsearch-headless + labels: + app: elasticsearch +spec: + clusterIP: None + selector: + app: elasticsearch + ports: + - name: transport + port: 9300 diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/elasticsearch/templates/2.service.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/elasticsearch/templates/2.service.yaml new file mode 100644 index 0000000..505cc5a --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/elasticsearch/templates/2.service.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Service +metadata: + namespace: imxc + name: elasticsearch + labels: + app: elasticsearch +spec: + selector: + app: elasticsearch + ports: + - name: http + port: 9200 + targetPort: 9200 +# nodePort: 30200 +# type: NodePort + type: ClusterIP diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/elasticsearch/templates/3.configmap.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/elasticsearch/templates/3.configmap.yaml new file mode 100644 index 0000000..ee0a42d --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/elasticsearch/templates/3.configmap.yaml @@ -0,0 +1,41 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + namespace: imxc + name: elasticsearch-config + labels: + app: elasticsearch +data: +# discovery.seed_hosts: ["elasticsearch-0.elasticsearch", "elasticsearch-1.elasticsearch", "elasticsearch-2.elasticsearch"] +# cluster.initial_master_nodes: ["elasticsearch-0","elasticsearch-1", "elasticsearch-2"] +# ES_JAVA_OPTS: -Xms8g -Xmx8g + elasticsearch.yml: | + cluster.name: imxc-elasticsearch-cluster + network.host: ${POD_NAME} + discovery.seed_hosts: ["elasticsearch-0.elasticsearch", "elasticsearch-1.elasticsearch"] + cluster.initial_master_nodes: ["elasticsearch-0","elasticsearch-1"] + xpack.ml.enabled: false + xpack.security.enabled: true + xpack.security.transport.ssl.enabled: true + xpack.security.transport.ssl.verification_mode: certificate + xpack.security.transport.ssl.client_authentication: required + xpack.security.transport.ssl.keystore.path: elastic-certificates.p12 + xpack.security.transport.ssl.truststore.path: elastic-certificates.p12 + xpack.security.transport.filter.enabled: true + xpack.security.transport.filter.allow: _all + xpack.security.http.ssl.enabled: true + xpack.security.http.ssl.keystore.path: http.p12 + node.ml: false + cluster.routing.rebalance.enable: "all" + cluster.routing.allocation.allow_rebalance: "indices_all_active" + cluster.routing.allocation.cluster_concurrent_rebalance: 2 + cluster.routing.allocation.balance.shard: 0.3 + cluster.routing.allocation.balance.index: 0.7 + cluster.routing.allocation.balance.threshold: 1 + cluster.routing.allocation.disk.threshold_enabled: true + cluster.routing.allocation.disk.watermark.low: "85%" + cluster.routing.allocation.disk.watermark.high: "90%" + cluster.routing.allocation.disk.watermark.flood_stage: "95%" + thread_pool.write.queue_size: 1000 + thread_pool.write.size: 2 + ES_JAVA_OPTS: -Xms8g -Xmx8g diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/elasticsearch/templates/4.pv.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/elasticsearch/templates/4.pv.yaml new file mode 100644 index 0000000..5a53f57 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/elasticsearch/templates/4.pv.yaml @@ -0,0 +1,74 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: data-elasticsearch-cluster-0 + labels: + type: local + app: elasticsearch +spec: + capacity: + storage: 30Gi + accessModes: + - ReadWriteOnce + hostPath: + path: {{ .Values.global.ELASTICSEARCH_PATH1 }} + persistentVolumeReclaimPolicy: Retain + storageClassName: elasticsearch-storage + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .Values.global.affinity_key }} + operator: In + values: + - {{ .Values.global.affinity_value1 }} +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: data-elasticsearch-cluster-1 + labels: + type: local + app: elasticsearch +spec: + capacity: + storage: 30Gi + accessModes: + - ReadWriteOnce + hostPath: + path: {{ .Values.global.ELASTICSEARCH_PATH2 }} + persistentVolumeReclaimPolicy: Retain + storageClassName: elasticsearch-storage + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .Values.global.affinity_key }} + operator: In + values: + - {{ .Values.global.affinity_value2 }} +--- +#apiVersion: v1 +#kind: PersistentVolume +#metadata: +# name: data-elasticsearch-cluster-2 +# labels: +# type: local +# app: elasticsearch +#spec: +# capacity: +# storage: 30Gi +# accessModes: +# - ReadWriteOnce +# hostPath: +# path: {{ .Values.global.ELASTICSEARCH_PATH3 }} +# persistentVolumeReclaimPolicy: Retain +# storageClassName: elasticsearch-storage +# nodeAffinity: +# required: +# nodeSelectorTerms: +# - matchExpressions: +# - key: kubernetes.io/hostname +# operator: In +# values: +# - {{ .Values.global.ELASTICSEARCH_HOST3 }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/elasticsearch/templates/5.pvc.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/elasticsearch/templates/5.pvc.yaml new file mode 100644 index 0000000..a4ae2db --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/elasticsearch/templates/5.pvc.yaml @@ -0,0 +1,53 @@ +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + namespace: imxc + name: elasticsearch-data-elasticsearch-0 +spec: + accessModes: + - ReadWriteOnce + volumeMode: Filesystem + resources: + requests: + storage: 30Gi + storageClassName: elasticsearch-storage + selector: + matchLabels: + type: local + app: elasticsearch +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + namespace: imxc + name: elasticsearch-data-elasticsearch-1 +spec: + accessModes: + - ReadWriteOnce + volumeMode: Filesystem + resources: + requests: + storage: 30Gi + storageClassName: elasticsearch-storage + selector: + matchLabels: + type: local + app: elasticsearch +--- +#kind: PersistentVolumeClaim +#apiVersion: v1 +#metadata: +# namespace: imxc +# name: elasticsearch-data-elasticsearch-2 +#spec: +# accessModes: +# - ReadWriteOnce +# volumeMode: Filesystem +# resources: +# requests: +# storage: 30Gi +# storageClassName: elasticsearch-storage +# selector: +# matchLabels: +# type: local +# app: elasticsearch \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/elasticsearch/templates/6.statefulset.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/elasticsearch/templates/6.statefulset.yaml new file mode 100644 index 0000000..2cbd4b8 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/elasticsearch/templates/6.statefulset.yaml @@ -0,0 +1,146 @@ +{{- if semverCompare ">=1.16-0" .Capabilities.KubeVersion.GitVersion }} +apiVersion: apps/v1 +{{- else }} +apiVersion: apps/v1beta1 +{{- end }} +kind: StatefulSet +metadata: + namespace: imxc + name: elasticsearch +spec: +{{- if semverCompare ">=1.16-0" .Capabilities.KubeVersion.GitVersion }} + selector: + matchLabels: + app: elasticsearch +{{- end }} + serviceName: elasticsearch + replicas: 2 #3 + updateStrategy: + type: RollingUpdate + template: + metadata: + labels: + app: elasticsearch + spec: + securityContext: + fsGroup: 1000 + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: "app" + operator: In + values: + - elasticsearch + topologyKey: "kubernetes.io/hostname" + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: elastic-node + operator: In + values: + - "true" + initContainers: + - name: init-sysctl + image: {{ .Values.global.IMXC_IN_REGISTRY }}/busybox:latest + imagePullPolicy: IfNotPresent + securityContext: + privileged: true + #command: ["sysctl", "-w", "vm.max_map_count=262144"] + command: ["/bin/sh", "-c"] + args: ["sysctl -w vm.max_map_count=262144; chown -R 1000:1000 /usr/share/elasticsearch/data"] + volumeMounts: + - name: elasticsearch-data + mountPath: /usr/share/elasticsearch/data + containers: + - name: elasticsearch + resources: + requests: + cpu: 1000m + memory: 16000Mi #32000Mi + limits: + cpu: 2000m + memory: 16000Mi #32000Mi + securityContext: + privileged: true + runAsUser: 1000 + capabilities: + add: + - IPC_LOCK + - SYS_RESOURCE + image: {{ .Values.global.IMXC_IN_REGISTRY }}/elasticsearch:{{ .Values.global.ELASTICSEARCH_VERSION }} + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: ES_JAVA_OPTS + valueFrom: + configMapKeyRef: + name: elasticsearch-config + key: ES_JAVA_OPTS + # log4j patch + - name: LOG4J_FORMAT_MSG_NO_LOOKUPS + value: "true" + - name: ELASTIC_USERNAME + value: {{ .Values.global.CMOA_ES_ID }} + - name: ELASTIC_PASSWORD + value: {{ .Values.global.CMOA_ES_PW }} + readinessProbe: + httpGet: + scheme: HTTPS + path: /_cluster/health?local=true + port: 9200 + httpHeaders: + - name: Authorization + # encode base64 by elastic:elastic + value: Basic ZWxhc3RpYzplbGFzdGlj + initialDelaySeconds: 5 + ports: + - containerPort: 9200 + name: es-http + - containerPort: 9300 + name: es-transport + volumeMounts: + - name: elasticsearch-data + mountPath: /usr/share/elasticsearch/data + - name: elasticsearch-config + mountPath: /usr/share/elasticsearch/config/elasticsearch.yml + subPath: elasticsearch.yml + - name: es-cert-certificate + mountPath: /usr/share/elasticsearch/config/elastic-certificates.p12 + subPath: elastic-certificates.p12 + - name: es-cert-ca + mountPath: /usr/share/elasticsearch/config/elastic-stack-ca.p12 + subPath: elastic-stack-ca.p12 + - name: es-cert-http + mountPath: /usr/share/elasticsearch/config/http.p12 + subPath: http.p12 + volumes: + - name: elasticsearch-config + configMap: + name: elasticsearch-config + items: + - key: elasticsearch.yml + path: elasticsearch.yml + - name: es-cert-certificate + secret: + secretName: es-cert + - name: es-cert-ca + secret: + secretName: es-cert + - name: es-cert-http + secret: + secretName: es-cert + volumeClaimTemplates: + - metadata: + name: elasticsearch-data + spec: + accessModes: [ "ReadWriteOnce" ] + storageClassName: elasticsearch-storage + resources: + requests: + storage: 10Gi diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/elasticsearch/templates/7.secrets.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/elasticsearch/templates/7.secrets.yaml new file mode 100644 index 0000000..2a24b92 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/elasticsearch/templates/7.secrets.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +data: + elastic-certificates.p12: MIINbwIBAzCCDSgGCSqGSIb3DQEHAaCCDRkEgg0VMIINETCCBW0GCSqGSIb3DQEHAaCCBV4EggVaMIIFVjCCBVIGCyqGSIb3DQEMCgECoIIE+zCCBPcwKQYKKoZIhvcNAQwBAzAbBBRrCrEWs79GCUPrYkFrkDBEF9uz4gIDAMNQBIIEyJUjcP339Anee6bdJls469HbsqYGgzidG41xto7ignNdZdP9LTTca+w8sN8tbVnTUZi4kQYcPSQqv+cWobi66KpgvQ7HhA/YE9K5L7wR7KEj0o61LYvucHm19hRdt788EvBy4mi8cDAr3m49NNuLUM6wyeCEKr2W2dwZFIyxFTPVv6/ef6cuHyDNLXJtjUmOIzNDL8Olqk8JGAd9bwXlizcShfmbiHHX8pAhK0u9JThFQePvCGiKA4LwzeuuwuEniznMlUQ4T/TjLjLLYcoS4vktfOJKPOgL3esjsc5hPoVgbw+ZpNCxRq1RVs/5eOBkxzXhJ7hdNELJDcMjitBfl71MlSDtMV4FhlVuhjilsuHx6URucsEE2l1V3asg4QP1PoSiACqncr2WhCcrKu0d8DztlIkCYG7D8oiAx4nEzsm0xmOhIcigHw6GP4MNeCieJCgAwLkJf1m73IYcxyaKsJAc57jfs9ue62KkVHL2NxNRjTps2j0Cl5NJQRE4CTkieU0etsNS1nJEwiJunVTyHXAa53MF6j40awEqs2Ko4gQENPpuQc599yJb+ZTHfHPe8bpfrmnxiEAaeiABu+OVH9bdLK5gtCyD5vXGZKVtHbyR+0+UlBggw/horFQIP+x7SKO53+ho0iCnYyQK52kJiv93JNgStGHpxf1SkPTtWHOraR2qSZTX6F7vjBtIq3Y6ocb6yo/jMNhzk3spHdz+F99S6uV3NLmDfX2vJmu1YSaPwaNZGDggcFI/g2S5ylBWyHpk2rB5gtklUIQEWxFFvbFOp37ffcdC0mZ6SgpOxj+IxuVLqTvyDLjrfteEvfjRAFXsT8E4XikC8QKjQ+KAwDYETidOiYB0/ByCh7t1KbcKJWU8XYxqzukX88CyVtO9Lp/f97x3ycvaF1UfzLBrm/bnTa0jPEP2/OdzpbjQJcEGX64+QY92k38zjPe4tedUz5H/C9aw8Q8r/DSxUhn2sdDXssR9jytITLLOJHDJX7XCfZxtoW60bwRm5MyXc4bJmjZT2BgxTWIVokaOhk0IZwpbC/oxh1QkaHBioP6+slASXg8Xu9l+mACevb1b9RvpN+fhurW2wOHl4Kul775BCohuTtiqKAce8KEACwncwYz+ZfcPTkbLRy6+p6NI3zNWpZE+iFlPtLh+2+T/QQHEfKTNUxcXLt8WCMOZuCe776T41nY8UhbUQJKqlEvom3MzCcsvFBoahlpjv+rg9/Ay7ESMil49e2x3qbD2929X0BHz//RcvPO5fvSEK/tC2uHzWzqHf0ZaRwtO19Z95Uv3GjGNF0SO8qri830LfJ+ctjk320qLyZmxA9QgPoI2oMHSxkaX1fgVeiN9coBM8yJbPK8ZdOOg4abnYOhqrTJXaoSFo+SYyAVZoTiQIIk/JScL5Qcw9IJw6sSKmOdChy2spYQKeo1NU9ecLD8YRBqRP0EET7e7NDPKlIWQ1vB5y2hokyL7bxvbGgzqQBAyo9wKJ3v1g4IYEWA9mluvQapOMVEHBYh6wv2nTJpE9EqMxpYQBU1w+vgX0EUgZDEOBkbvd5wubAeERt0mJqjea6vxWJIbeqMVIIoJSZEDaPE5qVNYaosoc8yvAZ9+U3lZlZObHzHEAIUx/2pP/jFEMB8GCSqGSIb3DQEJFDESHhAAaQBuAHMAdABhAG4AYwBlMCEGCSqGSIb3DQEJFTEUBBJUaW1lIDE2NTM5NzE4MTk0NzgwggecBgkqhkiG9w0BBwagggeNMIIHiQIBADCCB4IGCSqGSIb3DQEHATApBgoqhkiG9w0BDAEGMBsEFP43u2ii0k7JTUfInMhUBwjWZrS/AgMAw1CAggdItHB4SBc5KdDVc8eXuF8Ex1WP/Y2wz76PoNNpYm2LeIVozsp5c/2RDN2KqhcvhTihlY44esqWWVCOx+OTwmAPFwzZSrMaOYpcOP3fRWaHJLw98cK8a1ZuNv3eXWecf333TrsvU/bpT3v0KNO915qnSbtNwlvXaOMm6jbw6eBnkB7i6jxA7kgVAW6soa3ZHOrV78quBSbAjXZddHsI8x3MS4rxdvkp6GHet22/fQxjxz8UlQEDqzQgK7F4RqULRJeU//JID7VJqfbHRHfnYsKszsirhWKeJsxLVhG1VU/zRgxs0C35NfQeR/o7jmFpE7CCvvC0Rea2pybNojb51HLvyycXtpGn0gAdTBVNnwK1X58uSDWH7jM61uX9f+/gcDZqlUj6UVc6mzqxAgzDtf6B32G0VQq2szaJjbRVEVXhCAOIdVj6pRpI3l3gRv8OkNAWsGwYDMjeFxnrEpw1AQkEj7FRgI6iNOxEfUhOVYIEsflGTUdcd+K+zlCCHAJoMzbqiwPyHHgvLOp04A7fog+H3/cn6Tdmrp/J7TxpaW1ZwwcHtTRLoq0F77Sj8XJule3CzaDtg6IBen/Yo7H9hhK3ORodlGjJYA285dHAd1mtqmHmoWeDNoVrlVyymge78yXGmlFsBWF83VUChRx+9noF3Zhz+QMPBNsKHk4TM9yRHiWpMZIdkEZKq+obCPU2PmC21wnWx13nhb88gaNyBjHxFsGE91SgEyQh/cPhi01Y7+yNYQvYOXJe3EQ6oqFCBkPUnrbAMiHDP//AVN/tUrgVbmpIclfFprP2YIRcfGa7qch48RFbmhnX5N/OYLaPnNYdbxOiwZ0f/KIpDKWS67kS2N+jDKWs/SCLs2g89q1z2EGvbVwKMD6Vl559EZxAfNRv+eZu0MvTejEkuykIHJpXCyP+8EphUyWW9Cqll1ux4rXMUDkgl5sh1WgSoIEASX2j5TJ3fIh0nBkjAkBi0n2BINZgVWKj9U1zHNdRF67Eb+97lUuY6JIkbFhLSgZiIZqnI9bnW8OKUJFtvVtlSKG4xqdOeAroB8GLw2iR/GjF2Dvy4rIZo+qeTCIN+bm+iFkCri7L2K0/KR25h7bAtXwBxwMct5F4A1vltlLs408efMRJ7dg3iqMGhRyXdwxKexWJLbp02uJQVU9/ogYeLfSiIZEm25qjEMQZqRpQpwLaH5JB9oLKqdLEdeuxOfqb6weHDOtITlFHToeRNzIEmbiT9gbdpMwKTxs/rtwMHgGU6kIJmIFgnw2gauKvpiIuDCY79JpSNipsicvvLTIa4cc8sZCCllZ1wAmbNDsCH6p0bh8CooMjGf2vUbRClSe9+R19/lRMFGSp4N6fElW7MxNw85xpkFjG0s053fvIJmfPhxVqUHMP3fFQv0DUvvQNvNTsRGdDjohkC0095v9EWy7n9Frv2wIM2G7uVHvrlgkQfPK2JsYZKsUE0KXa4HUQptWL71kp7RQSmOmXFzsthjYVXu/pfXA+u+PAtHvQpo1nTPreXn3UZqiEiQmNkmMPLAYzpIi35tjNewfw5XwDj77pqH5OFcMZDTKbiInV1LuvFlKxCEYh4gvTThC0XTsrsiHgldtNcw9ZB017uPW9AAqbj2IB0d5b0ZB3yMZ67uzt1pretcxmEfSoA64QWOC9lBYp4DVE9QxcCnsSgibWreqpdJHmX5MR4umwIb6WaM1pJdCY1bW4tO3ZVT4DA/4ry7jqxUH4AcZRNK0zYR6DAtZndB7LTJhT+8d5EBtmAHzC5HT9KLmHV6mAG1QLMlwhNXmtM0YCJsKxcZo+xLBy/2cHl41EU4ACiuEq1JrM5j9fQk+hmJHT+JB0aqv+kvdxGmgBuVWGHQBtNTV6TYeLzqzDpIl9uXi3qFKFBuTQOska2zAMv7gLOe79w1cVb/SJKdcYjWtLR0v6wfaRgVeBwLvTvh7nNXhXRqKfQKe3e2Tjgq4nV4kOQHI21WDKGSd4ONyyvXGMwNzRgcZwpDFAcvshZATwaBtAo4JWi6D3vJB6H1PHRtyqHjErKkPazoZMjR2sZI8S4BMo4R5fa1ZztZO4p2lJYUIAQHj872UdGXHTXgyZKU8t/ifiVfxon5UtZJRi0Xq5OMdN//Qtq2kVwQxntf0eWsygkKMtNr1XLzu0TAMUMItnohdQWUw5w8UeXYOAYfZFqZEhKfcwkJsfq1q56ptzVBI3T2hDFM7xuVFNn5y+FCTx9pB9FCbln/3ZlKuUiTH/eLMKdQYGkRX4X0qzkx3YqAn6jDLQPEG3Rz0JP53T43uLxGpqa8+jn1XIUCNj50mqZGiah7bdo1qsDHbFWYCe7uoOjPapontpaoEQaZog1INqBNerS19a+i4S0/uAsGApykwUhk/zGfr9UudpKJWd7AznlF3+yfZfk/9mCSajBpoWafCIWmOvxJD77L86YAs9STuhWUGQvL2rxPf2uyS4WAi2+DgbdrGTSiwNB/1YX8iHp/cw6DA+MCEwCQYFKw4DAhoFAAQUSvLiFrAQlmfgL3Cewez5Fw2+0okEFH+RyXvcJHVaYbaqjejrXkgUS0JsAgMBhqA= + elastic-stack-ca.p12: MIIJ2wIBAzCCCZQGCSqGSIb3DQEHAaCCCYUEggmBMIIJfTCCBWEGCSqGSIb3DQEHAaCCBVIEggVOMIIFSjCCBUYGCyqGSIb3DQEMCgECoIIE+zCCBPcwKQYKKoZIhvcNAQwBAzAbBBTQSr5nf5M77CSAHwj38PF//hiFVgIDAMNQBIIEyBrOipz1FxDRF9VG/4bMmue7Dt+Qm37ySQ/ZfV3hFTg6xwjEcHje6hvhzQtFeWppCvd4+7U/MG8G5xL0vfV5GzX1RhVlpgYRfClqMZo3URqBNu6Y5t3sum+X37zbXQ1GI6wo3YURStZkDHlVtObZB667qqj5rO4fIajzRalaxTFda8aS2xAmQklMcCEXASsO5j0+ufVKiOiG2SIEV2LjjYlUymP7d9+LAZ2I6vR+k/jo2oNoPeq0v68qFd9aOB2ojI9Q/PDFA7Nj1kKMK7KjpxGN5/Ocfr8qrxF1mviA6rPdl8GV3WCFMFKcJER4fRmskWGNE/AdwU3laXvJux/qz4rjiYoJX+5rSyXBDxdznaFiSyN1LYkFJ+nao6HSAmPPyfEPVPRICc6XHMUM4BZOVlJO49M1xg7NFQUtkyVm8+ooDwXCiGEUHDZNw+hCcuUewp0ZXki695D0tESnzi3BE56w7CRySeaNR8psAtL74IUtov9I66GlBEI7HSbyLTT9Fa7+o+ElJWnFqIyW8WzNF3T5fvRv2LfKjYO5KiISlOM03KlETWE1F60TZqW3EbP9WjLhRnovFcJVsNyha+wDVTu44DAylMX4Oh2xKYm2YW+Oi0aeCFmJbDp/TlxYhm5ACYUxma6CVxbEgHkxwjWyFfiNQp2MBL/5HFJGxuny2lVnN8yUSCvDdnOlVTB36/EByY/oA8S+GF/QRYd3PMew56s7aBgPt8mhncN5Cdm+GCD/Nb/ibcuTId9HAaT6o3wMsc7bYusjHGCjFbz9fEdU2MdpLJO+FXVM9E1sEKoTpPLeJDh2a9RUWJQPUCLu8MgEdiJohtEpOtvM7y5+XbuAkYaDsBw3ym5M/kwovN09X1m5x5qM0QSRIVKHf1qo6wo68VMeVQDEBNxJ5/tuZ11qE3siGRfwDnUkCpb9H54+w3zaScPHGAdwplYYwaqnFMwi8nFMtjZvGOLT2wqPLPnKVeQGt4TCVWPXuB4kYnmbTWoJbUT5Wpurcnyn8l6uzLmypCD4k8YiQoDb1b9HIFUAypn580KIUF19eCSGeIHl4hbmusuISxQ1qXk7Ijbj7PiVtMKy5h8rG/c57KJvfvnMQy9hauM5kcZmlTUvrHDw+7cUFB96/wXbvqmcPKGKutgXRqHcTYyBOPEJnSUMBIM2r59wgFjlMuQLrJurzwzox/IEKu/KMilIBDp4k+MHz6NrINWfbV7xa6yAja1kWyvUmwYjCHhlXZmhCb2fmhP1lsnN4BNAkDsdfxHBRCBISy6fuHSY+c4RsokxZ4RomHhVvJsEY/AE4DCvVXDunY8t4ARrQCqXYso3+kVjm6+aelKk+KgyLZ3St0eAIl/Y2xqEXgh0wHGrx3CLZqGqq864f5MmrxiytmlSzHP4RSad20drsN3VchaJZkyrGbKEs6ZJDU2dq5NiC5unqx5tLw6XNRTydIC2PaiVl9m3GLUCh6hQSRJnvcXrqOd8a9K1uV5OoA3TRdc2V5lyxWRIJsdK5KfiAiTsNeM+Tt+Dh2pZjt2l2h4n4BjgYApxG8u10BP1iZ1e1OsCRgLGbgiuXtXrlrjwvJzrB5i11oy9mt3vqgtbjAciQpsQYGGfnVqyGXfEc55hIYWClNAFZDE4MBMGCSqGSIb3DQEJFDEGHgQAYwBhMCEGCSqGSIb3DQEJFTEUBBJUaW1lIDE2NTM5NzE3OTU1MTUwggQUBgkqhkiG9w0BBwagggQFMIIEAQIBADCCA/oGCSqGSIb3DQEHATApBgoqhkiG9w0BDAEGMBsEFEVjuzIvhFF9BzWGr3Ee4cw/mLcqAgMAw1CAggPAwroH+zLRt2Jtb8IWeOaIbXAv4sVGUljreWkJE8dkoXNcEQpATEt5H7L4uwnDsevLi1yfWtUDN1OxM8gb7iR4Jysrd+8uM1r0nn9YStz/I3qhN9Fb6yAb+ENTCzwo/oAnyDBM/lXR9fL0EPHRfsDmK+6kC+hZ4AZIao+1oWRD0Bu970yK6gwv7TIRCsS/RBZfC/d4Slz1+IQChiWS4ttTzxK/IuhaFbia0JYtUpjmMGMBQwYRyvITgYpOIct39Il/mabQ4BA1/wk7Oecfe3RHzIfM49AxJtwKppfVfaRJjtK1aoO/GKS6CZuvIIX8q3Mt32OEaoRN9FJM9EkUkKCcYhtRfq0/8MTO97MbrcKeO8XICn8vZwOMM7k7IFtCq44/3QBXa9fpc2BFMVYOoQ22W2ZuMNMRp6OYc6Da1BG4Ik9mt1T4k9NkvfrhpNceR27v6Q0pZNUTN26aPr11/SfS/IZmLGXF7cGAfxITMOQwK2ig6qivXzvwLxfnyW4aHF7K/jL59kDg9Vf9zKmlvPJpHSEWv53U9SFYvvrMISd6E8np0bHRM5p49mgH/KXGauRRaLWUxlBwrhjeZRimTF9x//a0luGf5tIW8ymi32wn8LNiu7fbnkldnivfgWVmktNrPMH+70HNlCWkfaNibSHpzyDQRTzg9PjHEcFH+pQAXCc+A8y8FSvlT+nx9dpXXRK5pqbrGnWyrm5D3oY1ceO0E85R9Fx4Ss0f+mMBtNDYpz7zS5BSX36MNn0gm6MkhlOVbbcAob4WbZAEM7zaiV1ilLegXPZYPCGQydN02Q+lJ7HHZ18T4mzTrjF6M1PFIx31cR1r0ZtJhkCrOWdlTrmovvYYEgEStsiE3pi6dW4v1NgcJVevpnJJ//vpGXasH9Ue/ZNdk1tj/h7cQ/qbKlmvrcuH/UQ969RsNX+K3B1xeYnfbV88BXqFLuqhuWy38wwvBvKO37vq+ioPNIjwaIyCVzoF9/MAx2aNOdk/x04mSNVYh5q0ZKv+3JC3W2vJxV2aonc/ybFgi2GZz2erVYNZTSXz+bEefx8QWzcW6/zr437jh/peQRyQ92PsN+eZV9GB2lrwmF7K2579vNQoVcpzTvTFf+eZZhF8u/1HZW4uFHRUyqE3rHyOukSFukD7XWnFL1yUcWw/SGNIm1HNZD3nXjqcwdAIXl7OvqdO0z/Qt2bny6KpOSJqjMUjB5AX5/yt2xlZBDhlsoGtRfbSWefGf7qTdpg2T9+ClMb7vS1dLzrGRzNgGc7KO2IQdkNcfj+1MD4wITAJBgUrDgMCGgUABBSoZ3hv7XnZag72Gq3IDQUfHtup5gQUHZH4AQTUUCeOS0WnPOdFYNvm1KUCAwGGoA== + http.p12: MIINZwIBAzCCDSAGCSqGSIb3DQEHAaCCDREEgg0NMIINCTCCBWUGCSqGSIb3DQEHAaCCBVYEggVSMIIFTjCCBUoGCyqGSIb3DQEMCgECoIIE+zCCBPcwKQYKKoZIhvcNAQwBAzAbBBRl7KAO2Y5ZolA3Si0i+pNdXpn42AIDAMNQBIIEyE9fBFRMMy358/KJQcAD9Ts0Xs0TR0UEl/an+IaNTz/9doU6Es6P22roJUK8j4l09I8ptGGKYdeGzrVBzWEjPhGAZ3EXZPHi2Sr/QKbaiWUnYvqqbPVoWNLukrPvK5NpEyPO2ulfxXN46wHzQMnk5l+BjR4wzqKquxgSzacXRJCqznVj59shjLoTK9FtJ3KVEl+JfukcAh/3EqkP7PRAXrPeQ5UcvYbYMZgxw8xHYg/sdKqyHBxwQqNtvGlfGHQ6jyb4/CS2vu0ZehGHQoMgmry2pvNMjA9ypSVWRGspcrdcQOJNgYtHmBiBScoURLB+9KJX2ivY8zJFI5e8Hb48sLASkp4HQemBWMQTukSnlgddsAtIKgpoRZWpcJ7PunHuWXAKZPCMH6uF14G71/lhluRjjy5GEnkKhKkKnlX15kmLmylTZJVdMbMRnsGK7exsVS8ot7sYJ9EMIvKJUqKf/RmZvUxZqlGp1oy3Uo5JgBU5MF61wnkad+L1UJsB2ZzPV0S/jYKPFVzBsWXj9IH74D02TcQz774+FQqAXlVLlpglmlnMwOU3IboKOH2Z4LIj7Kx7wfZZMi3/sQbYJM2PWCd8OS/keDf53ZwMKNxWPh1ZB7kX4mqhmMHdNgRblcWXP3LtWKck31Vq1UdGfK4/T/nudD1ve15NPUP1DvcVsDOWnRF4s3IDXZwXWqvag+hz0zVyB/T0X1XkqrPtBNX/o5qeTDP30W2GVdGL6SIlgZHaqqNuamHlhGra43ExKTwRPBsskTrziC2fb/JeqXxJBES/YufiomXw14BnQUpyBfVeV3cDDEZUnfu7lJz19jS+2aTtA6v9Qnps+q0rNnLa54JLf9bWlw4RomSWcJCqkkW/EG0AdTKrqNFYPZVZTLvt+4B8ehWrUWas8MK5jAXeTklr0ao5acGOKWip1wmqIRKRAIT2OBbs9jCmigb2xJNDK4RdUtDYsJeltJ69DvnG7bmTLjfsOQcVIaI40k91N8nnda9+/6BdKFDQtMDB6efGkciWp9ce24uGUzKszD7CmKTlCJiqn/V2bbOKGdk4Tafy4B2HzeaX+fMFjpWu01UMaJJrvYbAnXww1Yg2IjbwdAMTv7z8zPIJ0a+drouylUfvKKeun6BnLe0fR+XbRRs77Rengb30c1plozEFHZjzmQ10uVQSh1wWURJnVSru6b1pyVI+KR3WZHB4vgDx+BDlQjxCk53+Hxm5wv8SgpvNxVkepPVF8ucut9FkGNHov1gyatlEKSzYlrFt0mFQWg20rKMrkB6pEDO8f5W2InR3znO15NTbw/l3BXYGOe1lS0tHljc5zJkmMTdVrJnFEd2RqNPNmFWEn+1bm4NeAr6QEY9fiyBCMWBHEELTfHtu4iS37D1cBEKudpCszaWJiPgEeDu75+IuXa/guZdxWJj/ktDfZQJpp9ork2QScgu31l7QdGfC24C2E6kQp4UHZ3k7wXSTUt61bdmK7BHqjiz3HuP76phzd7nZxwLCpEg8fhtwhNgPx3IrU1B4JX40Wzsy1Tz/8oIcvjykDmI967chWtw/WSschamGBelNt+TV1gVKoLlMpL9QxFcAqXhEC6Nr9nXRZRJAIRun3Vj+EabZoR2YsdghDE9boTE8MBcGCSqGSIb3DQEJFDEKHggAaAB0AHQAcDAhBgkqhkiG9w0BCRUxFAQSVGltZSAxNjUzOTcyMDczODY4MIIHnAYJKoZIhvcNAQcGoIIHjTCCB4kCAQAwggeCBgkqhkiG9w0BBwEwKQYKKoZIhvcNAQwBBjAbBBRmhTM5a6OsdDd4LLR/07U/28/dqgIDAMNQgIIHSCCLUDdxl9rcX65CAYiQD1mrnoDJe+c8hWww8KI+RD1/3U8skUZ+NHjf2cjCrDQdtVZcycc37lkJ4HEU0keMdVE7I9tja81EfQclnZAUgx/zzLQqVV9qc1AcKX0pzUczLewoQZdXQHdpXh0u8Hf4xFeYM3EAGxB0mUYGwZXWSxYSdaHmxTgeftqNHF6tudt0vpPgq9Rbqp7zP8z48VUOSUkbNTXZOgNVpMgs/yKivvURdWBwJMkpOs/daeR+QbOLkhrhTtT8FjwFUlpnQ//8i7UsBBJKcEKvlrfBEDWcIGw8M6oAssoPsCGyXnsP7ZCVBDBgv941mBTJ9Z9vMoKPpr9jZzSVJrU2+DDuxkfSy1KL0vUvZm5PGSiZA72OpRZkNi8ZUbJTRKf71R+hsCtX/ZUQtMlGCX50XUEQl44cvyX32XQb2VlyGvWu0rqgEVS+QZbuWJoZBZAedhzHvnfGiIsnn2PhRyKBvALyGcWAgK0XvC26WF676g2oMk8sjBrp8saPDvMXj06XmD6746i5KC52gLiRAcwlT4zJoA0OB5jYgxXv+/GP9iXNIK578cCGpBes28b7R+hLDBCc/fMv1jMhKWPVXWJZ6VkcpUgH73uxFl43guTZzJfHI1kMF1+PbOviWPdlSj1D44ajloMJP5FXubIfYEIqV19BdU42ZXZ8ISIZYTAj9OhNCUkkTjjGH2VhFz/FjZDxdk9m/Sw+du8dg1v0+6XIMScjuutbLxxol8Dx1yfRSgZZGN+D3vi0hW1OgcpnUhVI/x48LjdWm1IA0XWOzFiJAe98BiL0roTsUk0pgyujzvLcwDFGP9hnQ0YLdCy22UsQ39hRyQzwGAVO8O49bU8sgNy75+4++8Z3pqI91hdoHyzNMSx6fJn/Qd6UcAdTF0divh17q5bZi+x3D7AQEvh5NwePD0HIqBZexT0yNTVTHragJZUetI5FZgE1cZrfchckP/Ub5jdn3e/Cvu8J/yZFAM8glJvO1D+4BZ+/MVAw3AkO7kLhGeXMXr9s9+A/uPlznoC6b9bpjj3X46bFz7dPIYC0aeya87vISA0/5VPkkUZ+U6A9nLkCIcl5XQElMjrzidFJyBmtxHXLrAu5yiWorl3KVOf9QOrKrZt1UrNihIaSIq/46jI5yBQX6LV7fUBrZKe/oMbuf6W0LliNJbKSwZi0RRHo0jBPotUiOsn1qmnh+hZp6rwi1KGOsCAPSMSGnURwoXAdTUmAyPriDjDBKjm2EiDZJ9T3XgNDHVU24SqKjsSoByrD4FcVyqFAl3w0CaSNXloZswE0UqGKoQUy6Up0ceWoeHYfA/FJyaGfkFGRkmYun+wUJZvhpoLv6bn377CziWTSc0o3nl+UZ4pTsRJOlG0FOxzWApjSd8bPIdezPxak2DM0qj6aiUocfEBMLnFn4Sjj1vVFmIGPNXiOPlJF0Ef99I5Gno3YAd4ZHBqpkeUq7+bWur+xhv5zsXs5ARK6TVOVqlMPiKRpDX7lEQoya++U6HIj6zb7arSZivM5YrZeqHFKK4gpORvpg6icApQCBniDgmNxZJFobgzvIwKTABJjoivHs4zIIw6TCjbz38GEFdzbsUuCXQo3tFWaxgiGkxtLnjYr0PTIxFdBfQ5dkRkkxLvUg7uR1uP9IcmO/8QzzyLeSA+I+teZME8QCzui6CY/lhIfjxJimawejCJx33nS9uXNibQ0my41SmXRDGVgiH6el8veIbEHU9RY+elVR6eqlemCuIHfU8QNPNbe7Gzqaaoccd2VUY3PXNHxU87DC7Nttvn99Ow5zxZ8xZUQVfLFntS9d2hgKp8gJ9lgVKzEuYCiL59wuxbNtnAb8mET0Buw24JeQew9e8DdYL2vDLhQz+IqPXKAhlf7BSpPyQTOeaba657CNmkzdiNk3RHGeTRrq4c3/nl1M+ZsPwf8WxoTcmu+W0Y7/j9nps8r+fKlNB23hOEIWZ4KN+Y4qZRKltTARhqmdjLIhUtWh4D49eTe5sS3MqzsZJJwsEHPPOvZKvOG5UU3jXMg9R4F8CaYgx/M4ClwIIlHvcdW7R7sXke9E/qccIG3jQ5b/mgHCk3pVkAyrRWfBZqXxlfWn+cfzVALtUXWePwhN8+i3CQbjLLOgE6yH3/rBfXQQVYHwrZqoyFchDwlFF5FtF5GThnj04kvhZbq0EcF4lbiULAOiBkJong4Op287QYgq4W8szOn9F2m/4M2XNaI3X7w67GADFHs5TtPXjWx1l6kKIwMM2pcpltXblqgH087payQHx1LnCpztxcxmeoFb3owvwKWmQpV0Gh6CIKfa7hqwCsNggOcKEQWwRJtADEXzPhRYG0mPelWLQMdLLaEzUqh9HElXu3awKazlHa1HkV0nywgldm23DPCKj5Fi6hux7vl7vt8K0Q4KA8Xoys4Pw43eRi9puQM3jOJgxX8Q/MsABHHxPBa94bOsRLFUa/Td70xbHpOrCCp64M7cm6kDKAwPjAhMAkGBSsOAwIaBQAEFEi1rtKgyohIpB9yF4t2L1CpwF+ABBSDiyukmk2pIV5XfqW5AtbEC9LvtQIDAYag +kind: Secret +metadata: + creationTimestamp: null + name: es-cert + namespace: imxc diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/elasticsearch/templates/needtocheck_storageclass.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/elasticsearch/templates/needtocheck_storageclass.yaml new file mode 100644 index 0000000..d2bff8e --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/elasticsearch/templates/needtocheck_storageclass.yaml @@ -0,0 +1,8 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: local-storage +provisioner: kubernetes.io/no-provisioner +reclaimPolicy: Delete +volumeBindingMode: WaitForFirstConsumer + diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/elasticsearch/values.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/elasticsearch/values.yaml new file mode 100644 index 0000000..7b0bd6d --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/elasticsearch/values.yaml @@ -0,0 +1,68 @@ +# Default values for sample. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: 10.10.31.243:5000/cmoa3/nginx + tag: stable + pullPolicy: IfNotPresent + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 80 + +ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: [] + + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +nodeSelector: {} + +tolerations: [] + +affinity: {} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/kafka-manager/.helmignore b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/kafka-manager/.helmignore new file mode 100644 index 0000000..50af031 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/kafka-manager/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/kafka-manager/Chart.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/kafka-manager/Chart.yaml new file mode 100644 index 0000000..61a7b7f --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/kafka-manager/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for Kubernetes +name: kafka-manager +version: 0.1.0 diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/kafka-manager/templates/0.kafka-manager-service.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/kafka-manager/templates/0.kafka-manager-service.yaml new file mode 100644 index 0000000..b20900d --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/kafka-manager/templates/0.kafka-manager-service.yaml @@ -0,0 +1,14 @@ +kind: Service +apiVersion: v1 +metadata: + name: kafka-manager + namespace: imxc +spec: + type: NodePort + ports: + - protocol: TCP + port: 80 + nodePort : 32090 + targetPort: 80 + selector: + app: kafka-manager diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/kafka-manager/templates/1.kafka-manager.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/kafka-manager/templates/1.kafka-manager.yaml new file mode 100644 index 0000000..4edcf32 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/kafka-manager/templates/1.kafka-manager.yaml @@ -0,0 +1,33 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: kafka-manager + namespace: imxc +spec: + replicas: 1 + selector: + matchLabels: + app: kafka-manager + template: + metadata: + labels: + app: kafka-manager + spec: + containers: + - name: kafka-manager + image: {{ .Values.global.IMXC_IN_REGISTRY }}/kafka-manager:{{ .Values.global.KAFKA_MANAGER_VERSION }} + resources: + requests: + cpu: 100m + memory: 500Mi + limits: + cpu: 200m + memory: 1000Mi + ports: + - containerPort: 80 + env: + - name: ZK_HOSTS + value: zookeeper:2181 + command: + - ./bin/kafka-manager + - -Dhttp.port=80 diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/kafka-manager/values.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/kafka-manager/values.yaml new file mode 100644 index 0000000..b5532cd --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/kafka-manager/values.yaml @@ -0,0 +1,68 @@ +# Default values for kafka-manager. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: 10.10.31.243:5000/cmoa3/nginx + tag: stable + pullPolicy: IfNotPresent + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 80 + +ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: [] + + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +nodeSelector: {} + +tolerations: [] + +affinity: {} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/kafka/.helmignore b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/kafka/.helmignore new file mode 100644 index 0000000..50af031 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/kafka/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/kafka/1.broker-config.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/kafka/1.broker-config.yaml new file mode 100644 index 0000000..ddf76e1 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/kafka/1.broker-config.yaml @@ -0,0 +1,161 @@ +kind: ConfigMap +metadata: + name: broker-config + namespace: imxc +apiVersion: v1 +data: + init.sh: |- + #!/bin/bash + set -e + set -x + cp /etc/kafka-configmap/log4j.properties /etc/kafka/ + KAFKA_BROKER_ID=${HOSTNAME##*-} + SEDS=("s/#init#broker.id=#init#/broker.id=$KAFKA_BROKER_ID/") + LABELS="kafka-broker-id=$KAFKA_BROKER_ID" + ANNOTATIONS="" + hash kubectl 2>/dev/null || { + SEDS+=("s/#init#broker.rack=#init#/#init#broker.rack=# kubectl not found in path/") + } && { + ZONE=$(kubectl get node "$NODE_NAME" -o=go-template='{{index .metadata.labels "failure-domain.beta.kubernetes.io/zone"}}') + if [ $? -ne 0 ]; then + SEDS+=("s/#init#broker.rack=#init#/#init#broker.rack=# zone lookup failed, see -c init-config logs/") + elif [ "x$ZONE" == "x" ]; then + SEDS+=("s/#init#broker.rack=#init#/#init#broker.rack=# zone label not found for node $NODE_NAME/") + else + SEDS+=("s/#init#broker.rack=#init#/broker.rack=$ZONE/") + LABELS="$LABELS kafka-broker-rack=$ZONE" + fi + # Node Port 설정 주석처리 + # OUTSIDE_HOST=$(kubectl get node "$NODE_NAME" -o jsonpath='{.status.addresses[?(@.type=="InternalIP")].address}') + OUTSIDE_HOST=kafka-outside-${KAFKA_BROKER_ID} + GLOBAL_HOST=kafka-global-${KAFKA_BROKER_ID} + if [ $? -ne 0 ]; then + echo "Outside (i.e. cluster-external access) host lookup command failed" + else + OUTSIDE_PORT=3240${KAFKA_BROKER_ID} + GLOBAL_PORT=3250${KAFKA_BROKER_ID} + # datagate 도입했으므로 Kube DNS 기반 통신 + SEDS+=("s|#init#advertised.listeners=OUTSIDE://#init#|advertised.listeners=OUTSIDE://${OUTSIDE_HOST}:${OUTSIDE_PORT},GLOBAL://${GLOBAL_HOST}:${GLOBAL_PORT}|") + ANNOTATIONS="$ANNOTATIONS kafka-listener-outside-host=$OUTSIDE_HOST kafka-listener-outside-port=$OUTSIDE_PORT" + fi + if [ ! -z "$LABELS" ]; then + kubectl -n $POD_NAMESPACE label pod $POD_NAME $LABELS || echo "Failed to label $POD_NAMESPACE.$POD_NAME - RBAC issue?" + fi + if [ ! -z "$ANNOTATIONS" ]; then + kubectl -n $POD_NAMESPACE annotate pod $POD_NAME $ANNOTATIONS || echo "Failed to annotate $POD_NAMESPACE.$POD_NAME - RBAC issue?" + fi + } + printf '%s\n' "${SEDS[@]}" | sed -f - /etc/kafka-configmap/server.properties > /etc/kafka/server.properties.tmp + [ $? -eq 0 ] && mv /etc/kafka/server.properties.tmp /etc/kafka/server.properties + server.properties: |- + log.dirs=/var/lib/kafka/data/topics + ############################# Zookeeper ############################# + zookeeper.connect=zookeeper:2181 + #zookeeper.connection.timeout.ms=6000 + ############################# Group Coordinator Settings ############################# + #group.initial.rebalance.delay.ms=0 + ############################# Thread ############################# + #background.threads=10 + #num.recovery.threads.per.data.dir=1 + ############################# Topic ############################# + auto.create.topics.enable=true + delete.topic.enable=true + default.replication.factor=2 + ############################# Msg Replication ############################# + min.insync.replicas=1 + num.io.threads=10 + num.network.threads=4 + num.replica.fetchers=4 + replica.fetch.min.bytes=1 + socket.receive.buffer.bytes=1048576 + socket.send.buffer.bytes=1048576 + replica.socket.receive.buffer.bytes=1048576 + socket.request.max.bytes=204857600 + ############################# Partition ############################# + #auto.leader.rebalance.enable=true + num.partitions=12 + ############################# Log size ############################# + message.max.bytes=204857600 + max.message.bytes=204857600 + ############################# Log Flush Policy ############################# + #log.flush.interval.messages=10000 + #log.flush.interval.ms=1000 + ############################# Log Retention Policy ############################# + log.retention.minutes=1 + offsets.retention.minutes=1440 + #log.retention.bytes=1073741824 + #log.segment.bytes=1073741824 + log.retention.check.interval.ms=10000 + ############################# Internal Topic Settings ############################# + offsets.topic.replication.factor=1 + #transaction.state.log.replication.factor=1 + #transaction.state.log.min.isr=1 + ############################# ETC ############################# + listeners=OUTSIDE://:9094,PLAINTEXT://:9092,GLOBAL://:9095 + listener.security.protocol.map=PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL,OUTSIDE:PLAINTEXT,GLOBAL:PLAINTEXT + #listeners=PLAINTEXT://:9092 + inter.broker.listener.name=PLAINTEXT + #init#broker.id=#init# + #init#broker.rack=#init# + log4j.properties: |- + # Unspecified loggers and loggers with additivity=true output to server.log and stdout + # Note that INFO only applies to unspecified loggers, the log level of the child logger is used otherwise + log4j.rootLogger=INFO, stdout + log4j.appender.stdout=org.apache.log4j.ConsoleAppender + log4j.appender.stdout.layout=org.apache.log4j.PatternLayout + log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n + log4j.appender.kafkaAppender=org.apache.log4j.DailyRollingFileAppender + log4j.appender.kafkaAppender.DatePattern='.'yyyy-MM-dd-HH + log4j.appender.kafkaAppender.File=${kafka.logs.dir}/server.log + log4j.appender.kafkaAppender.layout=org.apache.log4j.PatternLayout + log4j.appender.kafkaAppender.layout.ConversionPattern=[%d] %p %m (%c)%n + log4j.appender.stateChangeAppender=org.apache.log4j.DailyRollingFileAppender + log4j.appender.stateChangeAppender.DatePattern='.'yyyy-MM-dd-HH + log4j.appender.stateChangeAppender.File=${kafka.logs.dir}/state-change.log + log4j.appender.stateChangeAppender.layout=org.apache.log4j.PatternLayout + log4j.appender.stateChangeAppender.layout.ConversionPattern=[%d] %p %m (%c)%n + log4j.appender.requestAppender=org.apache.log4j.DailyRollingFileAppender + log4j.appender.requestAppender.DatePattern='.'yyyy-MM-dd-HH + log4j.appender.requestAppender.File=${kafka.logs.dir}/kafka-request.log + log4j.appender.requestAppender.layout=org.apache.log4j.PatternLayout + log4j.appender.requestAppender.layout.ConversionPattern=[%d] %p %m (%c)%n + log4j.appender.cleanerAppender=org.apache.log4j.DailyRollingFileAppender + log4j.appender.cleanerAppender.DatePattern='.'yyyy-MM-dd-HH + log4j.appender.cleanerAppender.File=${kafka.logs.dir}/log-cleaner.log + log4j.appender.cleanerAppender.layout=org.apache.log4j.PatternLayout + log4j.appender.cleanerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n + log4j.appender.controllerAppender=org.apache.log4j.DailyRollingFileAppender + log4j.appender.controllerAppender.DatePattern='.'yyyy-MM-dd-HH + log4j.appender.controllerAppender.File=${kafka.logs.dir}/controller.log + log4j.appender.controllerAppender.layout=org.apache.log4j.PatternLayout + log4j.appender.controllerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n + log4j.appender.authorizerAppender=org.apache.log4j.DailyRollingFileAppender + log4j.appender.authorizerAppender.DatePattern='.'yyyy-MM-dd-HH + log4j.appender.authorizerAppender.File=${kafka.logs.dir}/kafka-authorizer.log + log4j.appender.authorizerAppender.layout=org.apache.log4j.PatternLayout + log4j.appender.authorizerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n + # Change the two lines below to adjust ZK client logging + log4j.logger.org.I0Itec.zkclient.ZkClient=INFO + log4j.logger.org.apache.zookeeper=INFO + # Change the two lines below to adjust the general broker logging level (output to server.log and stdout) + log4j.logger.kafka=INFO + log4j.logger.org.apache.kafka=INFO + # Change to DEBUG or TRACE to enable request logging + log4j.logger.kafka.request.logger=WARN, requestAppender + log4j.additivity.kafka.request.logger=false + # Uncomment the lines below and change log4j.logger.kafka.network.RequestChannel$ to TRACE for additional output + # related to the handling of requests + #log4j.logger.kafka.network.Processor=TRACE, requestAppender + #log4j.logger.kafka.server.KafkaApis=TRACE, requestAppender + #log4j.additivity.kafka.server.KafkaApis=false + log4j.logger.kafka.network.RequestChannel$=WARN, requestAppender + log4j.additivity.kafka.network.RequestChannel$=false + log4j.logger.kafka.controller=TRACE, controllerAppender + log4j.additivity.kafka.controller=false + log4j.logger.kafka.log.LogCleaner=INFO, cleanerAppender + log4j.additivity.kafka.log.LogCleaner=false + log4j.logger.state.change.logger=TRACE, stateChangeAppender + log4j.additivity.state.change.logger=false + # Change to DEBUG to enable audit log for the authorizer + log4j.logger.kafka.authorizer.logger=WARN, authorizerAppender + log4j.additivity.kafka.authorizer.logger=false diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/kafka/Chart.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/kafka/Chart.yaml new file mode 100644 index 0000000..9565567 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/kafka/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for Kubernetes +name: kafka +version: 0.1.0 diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/kafka/templates/2.dns.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/kafka/templates/2.dns.yaml new file mode 100644 index 0000000..8ffb3f8 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/kafka/templates/2.dns.yaml @@ -0,0 +1,14 @@ +# A headless service to create DNS records +--- +apiVersion: v1 +kind: Service +metadata: + name: kafka-headless + namespace: imxc +spec: + ports: + - port: 9092 + clusterIP: None + selector: + app: kafka +--- diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/kafka/templates/3.bootstrap-service.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/kafka/templates/3.bootstrap-service.yaml new file mode 100644 index 0000000..1cd7406 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/kafka/templates/3.bootstrap-service.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: Service +metadata: +# name: bootstrap + name: kafka + namespace: imxc +spec: + ports: + - port: 9092 + selector: + app: kafka diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/kafka/templates/4.persistent-volume.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/kafka/templates/4.persistent-volume.yaml new file mode 100644 index 0000000..6f67ab4 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/kafka/templates/4.persistent-volume.yaml @@ -0,0 +1,76 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: data-kafka-cluster-1 + labels: + type: local + app: kafka +spec: + capacity: + storage: 30Gi + accessModes: + - ReadWriteOnce + hostPath: + path: {{ .Values.global.IMXC_KAFKA_PV_PATH1 }} + persistentVolumeReclaimPolicy: Retain + storageClassName: kafka-broker + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .Values.global.affinity_key }} + operator: In + values: + - {{ .Values.global.affinity_value1 }} + +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: data-kafka-cluster-2 + labels: + type: local + app: kafka +spec: + capacity: + storage: 30Gi + accessModes: + - ReadWriteOnce + hostPath: + path: {{ .Values.global.IMXC_KAFKA_PV_PATH2 }} + persistentVolumeReclaimPolicy: Retain + storageClassName: kafka-broker + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .Values.global.affinity_key }} + operator: In + values: + - {{ .Values.global.affinity_value2 }} +--- +# On-prem/워커노드 두개/브로커 두개 환경에서 발생할 수 있는 affinity 충돌때문에 주석처리 +#apiVersion: v1 +#kind: PersistentVolume +#metadata: +# name: data-kafka-cluster-3 +# labels: +# type: local +# app: kafka +#spec: +# capacity: +# storage: 30Gi +# accessModes: +# - ReadWriteOnce +# hostPath: +# path: {{ .Values.global.IMXC_KAFKA_PV_PATH3 }} +# persistentVolumeReclaimPolicy: Retain +# storageClassName: kafka-broker +# nodeAffinity: +# required: +# nodeSelectorTerms: +# - matchExpressions: +# - key: kubernetes.io/hostname +# operator: In +# values: + # - {{ .Values.global.IMXC_KAFKA_HOST3 }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/kafka/templates/5.kafka.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/kafka/templates/5.kafka.yaml new file mode 100644 index 0000000..1982584 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/kafka/templates/5.kafka.yaml @@ -0,0 +1,132 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: kafka + namespace: imxc +spec: + selector: + matchLabels: + app: kafka + serviceName: "kafka-headless" + replicas: 2 + updateStrategy: + type: RollingUpdate + podManagementPolicy: Parallel + template: + metadata: + labels: + app: kafka + annotations: + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: "app" + operator: In + values: + - kafka + topologyKey: "kubernetes.io/hostname" + podAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: "app" + operator: In + values: + - zookeeper + topologyKey: "kubernetes.io/hostname" + terminationGracePeriodSeconds: 30 + initContainers: + - name: init-config + image: {{ .Values.global.IMXC_IN_REGISTRY }}/kafka-initutils:{{ .Values.global.KAFKA_INITUTILS_VERSION }} + env: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + command: ['/bin/bash', '/etc/kafka-configmap/init.sh'] + volumeMounts: + - name: configmap + mountPath: /etc/kafka-configmap + - name: config + mountPath: /etc/kafka + - name: extensions + mountPath: /opt/kafka/libs/extensions + containers: + - name: broker + image: {{ .Values.global.IMXC_IN_REGISTRY }}/kafka:{{ .Values.global.KAFKA_VERSION }} + resources: + requests: + cpu: 100m + memory: 6000Mi + limits: + # This limit was intentionally set low as a reminder that + # the entire Yolean/kubernetes-kafka is meant to be tweaked + # before you run production workloads + cpu: 500m + memory: 10000Mi + env: + - name: CLASSPATH + value: /opt/kafka/libs/extensions/* + - name: KAFKA_LOG4J_OPTS + value: -Dlog4j.configuration=file:/etc/kafka/log4j.properties + - name: JMX_PORT + value: "5555" + - name: KAFKA_OPTS + value: -javaagent:/opt/kafka/jmx_prometheus_javaagent-0.15.0.jar=9010:/opt/kafka/config.yaml + ports: + - name: inside + containerPort: 9092 + - name: outside + containerPort: 9094 + - name: global + containerPort: 9095 + - name: jmx + containerPort: 9010 + command: + - ./bin/kafka-server-start.sh + - /etc/kafka/server.properties + lifecycle: + preStop: + exec: + command: ["sh", "-ce", "rm -rf /var/lib/kafka/data/*;kill -s TERM 1; while $(kill -0 1 2>/dev/null); do sleep 1; done"] +# readinessProbe: +# tcpSocket: +# port: 9092 +# timeoutSeconds: 1 + volumeMounts: + - name: config + mountPath: /etc/kafka + - name: data + mountPath: /var/lib/kafka/data + - name: extensions + mountPath: /opt/kafka/libs/extensions + volumes: + - name: configmap + configMap: + name: broker-config + - name: config + emptyDir: {} + - name: extensions + emptyDir: {} + volumeClaimTemplates: + - metadata: + name: data + spec: + accessModes: [ "ReadWriteOnce" ] + storageClassName: kafka-broker + resources: + requests: + storage: 30Gi diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/kafka/templates/6.outside.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/kafka/templates/6.outside.yaml new file mode 100644 index 0000000..c2d8170 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/kafka/templates/6.outside.yaml @@ -0,0 +1,89 @@ +kind: Service +apiVersion: v1 +metadata: + name: kafka-outside-0 + namespace: imxc +spec: + selector: + app: kafka + kafka-broker-id: "0" + ports: + - protocol: TCP + targetPort: 9094 + port: 32400 + type: ClusterIP +--- +kind: Service +apiVersion: v1 +metadata: + name: kafka-outside-1 + namespace: imxc +spec: + selector: + app: kafka + kafka-broker-id: "1" + ports: + - protocol: TCP + targetPort: 9094 + port: 32401 + type: ClusterIP +--- +kind: Service +apiVersion: v1 +metadata: + name: kafka-global-0 + namespace: imxc +spec: + selector: + app: kafka + kafka-broker-id: "0" + ports: + - protocol: TCP + targetPort: 9095 + port: 32500 + type: ClusterIP +--- +kind: Service +apiVersion: v1 +metadata: + name: kafka-global-1 + namespace: imxc +spec: + selector: + app: kafka + kafka-broker-id: "1" + ports: + - protocol: TCP + targetPort: 9095 + port: 32501 + type: ClusterIP +--- +apiVersion: v1 +kind: Service +metadata: + name: kafka-broker + namespace: imxc +spec: + type: ClusterIP + ports: + - port: 9094 + name: kafka + protocol: TCP + targetPort: 9094 + selector: + app: kafka +--- +apiVersion: v1 +kind: Service +metadata: + name: kafka-broker-global + namespace: imxc +spec: + type: ClusterIP + ports: + - port: 9095 + name: kafka + protocol: TCP + targetPort: 9095 + selector: + app: kafka diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/kafka/values.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/kafka/values.yaml new file mode 100644 index 0000000..cb0e677 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/kafka/values.yaml @@ -0,0 +1,68 @@ +# Default values for kafka. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: 10.10.31.243:5000/cmoa3/nginx + tag: stable + pullPolicy: IfNotPresent + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 80 + +ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: [] + + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +nodeSelector: {} + +tolerations: [] + +affinity: {} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/postgres/.helmignore b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/postgres/.helmignore new file mode 100644 index 0000000..50af031 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/postgres/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/postgres/Chart.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/postgres/Chart.yaml new file mode 100644 index 0000000..d602e29 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/postgres/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for Kubernetes +name: postgres +version: 0.1.0 diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/postgres/templates/1.postgres-configmap.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/postgres/templates/1.postgres-configmap.yaml new file mode 100644 index 0000000..95c8bda --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/postgres/templates/1.postgres-configmap.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: postgres-config + namespace: imxc + labels: + app: postgres +data: + POSTGRES_DB: postgresdb + POSTGRES_USER: admin + POSTGRES_PASSWORD: eorbahrhkswp diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/postgres/templates/2.postgres-storage.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/postgres/templates/2.postgres-storage.yaml new file mode 100644 index 0000000..dfbd714 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/postgres/templates/2.postgres-storage.yaml @@ -0,0 +1,38 @@ +kind: PersistentVolume +apiVersion: v1 +metadata: + name: postgres-pv-volume + labels: + type: local + app: postgres +spec: + storageClassName: manual + capacity: + storage: 5Gi + accessModes: + - ReadWriteMany + hostPath: + path: "{{ .Values.global.IMXC_POSTGRES_PV_PATH }}" + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .Values.global.affinity_key }} + operator: In + values: + - {{ .Values.global.affinity_value1 }} +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: postgres-pv-claim + namespace: imxc + labels: + app: postgres +spec: + storageClassName: manual + accessModes: + - ReadWriteMany + resources: + requests: + storage: 5Gi diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/postgres/templates/3.postgres-service.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/postgres/templates/3.postgres-service.yaml new file mode 100644 index 0000000..31e90a2 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/postgres/templates/3.postgres-service.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Service +metadata: + name: postgres + namespace: imxc + labels: + app: postgres +spec: + type: ClusterIP + ports: + - port: 5432 + # nodePort: 5432 + selector: + app: postgres diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/postgres/templates/4.postgres-deployment.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/postgres/templates/4.postgres-deployment.yaml new file mode 100644 index 0000000..14993e8 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/postgres/templates/4.postgres-deployment.yaml @@ -0,0 +1,45 @@ +{{- if semverCompare ">=1.16-0" .Capabilities.KubeVersion.GitVersion }} +apiVersion: apps/v1 +{{- else }} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Deployment +metadata: + name: postgres + namespace: imxc +spec: +{{- if semverCompare ">=1.16-0" .Capabilities.KubeVersion.GitVersion }} + selector: + matchLabels: + app: postgres +{{- end }} + replicas: 1 + template: + metadata: + labels: + app: postgres + spec: + containers: + - name: postgres + image: {{ .Values.global.IMXC_IN_REGISTRY }}/postgres:{{ .Values.global.POSTGRES_VERSION }} + resources: + requests: + cpu: 100m + memory: 2000Mi + limits: + cpu: 300m + memory: 2000Mi + imagePullPolicy: "IfNotPresent" + ports: + - containerPort: 5432 + args: ["-c","max_connections=1000","-c","shared_buffers=512MB","-c","deadlock_timeout=5s","-c","statement_timeout=15s","-c","idle_in_transaction_session_timeout=60s"] + envFrom: + - configMapRef: + name: postgres-config + volumeMounts: + - mountPath: /var/lib/postgresql/data + name: postgredb + volumes: + - name: postgredb + persistentVolumeClaim: + claimName: postgres-pv-claim diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/postgres/values.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/postgres/values.yaml new file mode 100644 index 0000000..9972ab8 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/postgres/values.yaml @@ -0,0 +1,68 @@ +# Default values for postgres. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: 10.10.31.243:5000/cmoa3/nginx + tag: stable + pullPolicy: IfNotPresent + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 80 + +ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: [] + + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +nodeSelector: {} + +tolerations: [] + +affinity: {} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/.helmignore b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/.helmignore new file mode 100644 index 0000000..f0c1319 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/Chart.lock b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/Chart.lock new file mode 100644 index 0000000..21ff14f --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/Chart.lock @@ -0,0 +1,6 @@ +dependencies: +- name: common + repository: https://charts.bitnami.com/bitnami + version: 1.8.0 +digest: sha256:3e342a25057f87853e52d83e1d14e6d8727c15fd85aaae22e7594489cc129f15 +generated: "2021-08-09T15:49:41.56962208Z" diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/Chart.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/Chart.yaml new file mode 100644 index 0000000..3b08f9c --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/Chart.yaml @@ -0,0 +1,26 @@ +annotations: + category: Infrastructure +apiVersion: v2 +appVersion: 3.8.22 +dependencies: +- name: common + repository: https://charts.bitnami.com/bitnami + tags: + - bitnami-common + version: 1.x.x +description: Open source message broker software that implements the Advanced Message + Queuing Protocol (AMQP) +home: https://github.com/bitnami/charts/tree/master/bitnami/rabbitmq +icon: https://bitnami.com/assets/stacks/rabbitmq/img/rabbitmq-stack-220x234.png +keywords: +- rabbitmq +- message queue +- AMQP +maintainers: +- email: containers@bitnami.com + name: Bitnami +name: rabbitmq +sources: +- https://github.com/bitnami/bitnami-docker-rabbitmq +- https://www.rabbitmq.com +version: 8.20.5 diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/README.md b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/README.md new file mode 100644 index 0000000..9b26b09 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/README.md @@ -0,0 +1,566 @@ +# RabbitMQ + +[RabbitMQ](https://www.rabbitmq.com/) is an open source message broker software that implements the Advanced Message Queuing Protocol (AMQP). + +## TL;DR + +```bash +$ helm repo add bitnami https://charts.bitnami.com/bitnami +$ helm install my-release bitnami/rabbitmq +``` + +## Introduction + +This chart bootstraps a [RabbitMQ](https://github.com/bitnami/bitnami-docker-rabbitmq) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This chart has been tested to work with NGINX Ingress, cert-manager, fluentd and Prometheus on top of the [BKPR](https://kubeprod.io/). + +## Prerequisites + +- Kubernetes 1.12+ +- Helm 3.1.0 +- PV provisioner support in the underlying infrastructure + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```bash +$ helm install my-release bitnami/rabbitmq +``` + +The command deploys RabbitMQ on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```bash +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Parameters + +### Global parameters + +| Name | Description | Value | +| ------------------------- | ----------------------------------------------- | ----- | +| `global.imageRegistry` | Global Docker image registry | `""` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` | +| `global.storageClass` | Global StorageClass for Persistent Volume(s) | `""` | + + +### RabitMQ Image parameters + +| Name | Description | Value | +| ------------------- | -------------------------------------------------------------- | ---------------------- | +| `image.registry` | RabbitMQ image registry | `docker.io` | +| `image.repository` | RabbitMQ image repository | `bitnami/rabbitmq` | +| `image.tag` | RabbitMQ image tag (immutable tags are recommended) | `3.8.21-debian-10-r13` | +| `image.pullPolicy` | RabbitMQ image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | +| `image.debug` | Set to true if you would like to see extra information on logs | `false` | + + +### Common parameters + +| Name | Description | Value | +| ---------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------- | +| `nameOverride` | String to partially override rabbitmq.fullname template (will maintain the release name) | `""` | +| `fullnameOverride` | String to fully override rabbitmq.fullname template | `""` | +| `kubeVersion` | Force target Kubernetes version (using Helm capabilities if not set) | `""` | +| `clusterDomain` | Kubernetes Cluster Domain | `cluster.local` | +| `extraDeploy` | Array of extra objects to deploy with the release | `[]` | +| `diagnosticMode.enabled` | Enable diagnostic mode (all probes will be disabled and the command will be overridden) | `false` | +| `diagnosticMode.command` | Command to override all containers in the deployment | `[]` | +| `diagnosticMode.args` | Args to override all containers in the deployment | `[]` | +| `hostAliases` | Deployment pod host aliases | `[]` | +| `commonAnnotations` | Annotations to add to all deployed objects | `{}` | +| `auth.username` | RabbitMQ application username | `user` | +| `auth.password` | RabbitMQ application password | `""` | +| `auth.existingPasswordSecret` | Existing secret with RabbitMQ credentials (must contain a value for `rabbitmq-password` key) | `""` | +| `auth.erlangCookie` | Erlang cookie to determine whether different nodes are allowed to communicate with each other | `""` | +| `auth.existingErlangSecret` | Existing secret with RabbitMQ Erlang cookie (must contain a value for `rabbitmq-erlang-cookie` key) | `""` | +| `auth.tls.enabled` | Enable TLS support on RabbitMQ | `false` | +| `auth.tls.autoGenerated` | Generate automatically self-signed TLS certificates | `false` | +| `auth.tls.failIfNoPeerCert` | When set to true, TLS connection will be rejected if client fails to provide a certificate | `true` | +| `auth.tls.sslOptionsVerify` | Should [peer verification](https://www.rabbitmq.com/ssl.html#peer-verification) be enabled? | `verify_peer` | +| `auth.tls.caCertificate` | Certificate Authority (CA) bundle content | `""` | +| `auth.tls.serverCertificate` | Server certificate content | `""` | +| `auth.tls.serverKey` | Server private key content | `""` | +| `auth.tls.existingSecret` | Existing secret with certificate content to RabbitMQ credentials | `""` | +| `auth.tls.existingSecretFullChain` | Whether or not the existing secret contains the full chain in the certificate (`tls.crt`). Will be used in place of `ca.cert` if `true`. | `false` | +| `logs` | Path of the RabbitMQ server's Erlang log file. Value for the `RABBITMQ_LOGS` environment variable | `-` | +| `ulimitNofiles` | RabbitMQ Max File Descriptors | `65536` | +| `maxAvailableSchedulers` | RabbitMQ maximum available scheduler threads | `""` | +| `onlineSchedulers` | RabbitMQ online scheduler threads | `""` | +| `memoryHighWatermark.enabled` | Enable configuring Memory high watermark on RabbitMQ | `false` | +| `memoryHighWatermark.type` | Memory high watermark type. Either `absolute` or `relative` | `relative` | +| `memoryHighWatermark.value` | Memory high watermark value | `0.4` | +| `plugins` | List of default plugins to enable (should only be altered to remove defaults; for additional plugins use `extraPlugins`) | `rabbitmq_management rabbitmq_peer_discovery_k8s` | +| `communityPlugins` | List of Community plugins (URLs) to be downloaded during container initialization | `""` | +| `extraPlugins` | Extra plugins to enable (single string containing a space-separated list) | `rabbitmq_auth_backend_ldap` | +| `clustering.enabled` | Enable RabbitMQ clustering | `true` | +| `clustering.addressType` | Switch clustering mode. Either `ip` or `hostname` | `hostname` | +| `clustering.rebalance` | Rebalance master for queues in cluster when new replica is created | `false` | +| `clustering.forceBoot` | Force boot of an unexpectedly shut down cluster (in an unexpected order). | `false` | +| `loadDefinition.enabled` | Enable loading a RabbitMQ definitions file to configure RabbitMQ | `false` | +| `loadDefinition.existingSecret` | Existing secret with the load definitions file | `""` | +| `command` | Override default container command (useful when using custom images) | `[]` | +| `args` | Override default container args (useful when using custom images) | `[]` | +| `terminationGracePeriodSeconds` | Default duration in seconds k8s waits for container to exit before sending kill signal. | `120` | +| `extraEnvVars` | Extra environment variables to add to RabbitMQ pods | `[]` | +| `extraEnvVarsCM` | Name of existing ConfigMap containing extra environment variables | `""` | +| `extraEnvVarsSecret` | Name of existing Secret containing extra environment variables (in case of sensitive data) | `""` | +| `extraContainerPorts` | Extra ports to be included in container spec, primarily informational | `[]` | +| `configuration` | RabbitMQ Configuration file content: required cluster configuration | `""` | +| `extraConfiguration` | Configuration file content: extra configuration to be appended to RabbitMQ configuration | `""` | +| `advancedConfiguration` | Configuration file content: advanced configuration | `""` | +| `ldap.enabled` | Enable LDAP support | `false` | +| `ldap.servers` | List of LDAP servers hostnames | `[]` | +| `ldap.port` | LDAP servers port | `389` | +| `ldap.user_dn_pattern` | Pattern used to translate the provided username into a value to be used for the LDAP bind | `cn=${username},dc=example,dc=org` | +| `ldap.tls.enabled` | If you enable TLS/SSL you can set advanced options using the `advancedConfiguration` parameter | `false` | +| `extraVolumeMounts` | Optionally specify extra list of additional volumeMounts | `[]` | +| `extraVolumes` | Optionally specify extra list of additional volumes . | `[]` | +| `extraSecrets` | Optionally specify extra secrets to be created by the chart. | `{}` | +| `extraSecretsPrependReleaseName` | Set this flag to true if extraSecrets should be created with prepended. | `false` | + + +### Statefulset parameters + +| Name | Description | Value | +| ------------------------------------ | ------------------------------------------------------------------------------------------------------------------------ | --------------- | +| `replicaCount` | Number of RabbitMQ replicas to deploy | `1` | +| `schedulerName` | Use an alternate scheduler, e.g. "stork". | `""` | +| `podManagementPolicy` | Pod management policy | `OrderedReady` | +| `podLabels` | RabbitMQ Pod labels. Evaluated as a template | `{}` | +| `podAnnotations` | RabbitMQ Pod annotations. Evaluated as a template | `{}` | +| `updateStrategyType` | Update strategy type for RabbitMQ statefulset | `RollingUpdate` | +| `statefulsetLabels` | RabbitMQ statefulset labels. Evaluated as a template | `{}` | +| `priorityClassName` | Name of the priority class to be used by RabbitMQ pods, priority class needs to be created beforehand | `""` | +| `podAffinityPreset` | Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `nodeAffinityPreset.type` | Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `nodeAffinityPreset.key` | Node label key to match Ignored if `affinity` is set. | `""` | +| `nodeAffinityPreset.values` | Node label values to match. Ignored if `affinity` is set. | `[]` | +| `affinity` | Affinity for pod assignment. Evaluated as a template | `{}` | +| `nodeSelector` | Node labels for pod assignment. Evaluated as a template | `{}` | +| `tolerations` | Tolerations for pod assignment. Evaluated as a template | `[]` | +| `topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `{}` | +| `podSecurityContext.enabled` | Enable RabbitMQ pods' Security Context | `true` | +| `podSecurityContext.fsGroup` | Group ID for the filesystem used by the containers | `1001` | +| `podSecurityContext.runAsUser` | User ID for the service user running the pod | `1001` | +| `containerSecurityContext` | RabbitMQ containers' Security Context | `{}` | +| `resources.limits` | The resources limits for RabbitMQ containers | `{}` | +| `resources.requests` | The requested resources for RabbitMQ containers | `{}` | +| `livenessProbe.enabled` | Enable livenessProbe | `true` | +| `livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `120` | +| `livenessProbe.periodSeconds` | Period seconds for livenessProbe | `30` | +| `livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `20` | +| `livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `6` | +| `livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `readinessProbe.enabled` | Enable readinessProbe | `true` | +| `readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `10` | +| `readinessProbe.periodSeconds` | Period seconds for readinessProbe | `30` | +| `readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `20` | +| `readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `3` | +| `readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `customLivenessProbe` | Override default liveness probe | `{}` | +| `customReadinessProbe` | Override default readiness probe | `{}` | +| `customStartupProbe` | Define a custom startup probe | `{}` | +| `initContainers` | Add init containers to the RabbitMQ pod | `[]` | +| `sidecars` | Add sidecar containers to the RabbitMQ pod | `[]` | +| `pdb.create` | Enable/disable a Pod Disruption Budget creation | `false` | +| `pdb.minAvailable` | Minimum number/percentage of pods that should remain scheduled | `1` | +| `pdb.maxUnavailable` | Maximum number/percentage of pods that may be made unavailable | `""` | + + +### RBAC parameters + +| Name | Description | Value | +| ----------------------- | --------------------------------------------------- | ------ | +| `serviceAccount.create` | Enable creation of ServiceAccount for RabbitMQ pods | `true` | +| `serviceAccount.name` | Name of the created serviceAccount | `""` | +| `rbac.create` | Whether RBAC rules should be created | `true` | + + +### Persistence parameters + +| Name | Description | Value | +| --------------------------- | ----------------------------------------------- | --------------- | +| `persistence.enabled` | Enable RabbitMQ data persistence using PVC | `true` | +| `persistence.storageClass` | PVC Storage Class for RabbitMQ data volume | `""` | +| `persistence.selector` | Selector to match an existing Persistent Volume | `{}` | +| `persistence.accessMode` | PVC Access Mode for RabbitMQ data volume | `ReadWriteOnce` | +| `persistence.existingClaim` | Provide an existing PersistentVolumeClaims | `""` | +| `persistence.size` | PVC Storage Request for RabbitMQ data volume | `8Gi` | +| `persistence.volumes` | Additional volumes without creating PVC | `[]` | + + +### Exposure parameters + +| Name | Description | Value | +| ---------------------------------- | ----------------------------------------------------------------------------------------------------------------------- | ------------------------ | +| `service.type` | Kubernetes Service type | `ClusterIP` | +| `service.portEnabled` | Amqp port. Cannot be disabled when `auth.tls.enabled` is `false`. Listener can be disabled with `listeners.tcp = none`. | `true` | +| `service.port` | Amqp port | `5672` | +| `service.portName` | Amqp service port name | `amqp` | +| `service.tlsPort` | Amqp TLS port | `5671` | +| `service.tlsPortName` | Amqp TLS service port name | `amqp-ssl` | +| `service.nodePort` | Node port override for `amqp` port, if serviceType is `NodePort` or `LoadBalancer` | `""` | +| `service.tlsNodePort` | Node port override for `amqp-ssl` port, if serviceType is `NodePort` or `LoadBalancer` | `""` | +| `service.distPort` | Erlang distribution server port | `25672` | +| `service.distPortName` | Erlang distribution service port name | `dist` | +| `service.distNodePort` | Node port override for `dist` port, if serviceType is `NodePort` | `""` | +| `service.managerPortEnabled` | RabbitMQ Manager port | `true` | +| `service.managerPort` | RabbitMQ Manager port | `15672` | +| `service.managerPortName` | RabbitMQ Manager service port name | `http-stats` | +| `service.managerNodePort` | Node port override for `http-stats` port, if serviceType `NodePort` | `""` | +| `service.metricsPort` | RabbitMQ Prometheues metrics port | `9419` | +| `service.metricsPortName` | RabbitMQ Prometheues metrics service port name | `metrics` | +| `service.metricsNodePort` | Node port override for `metrics` port, if serviceType is `NodePort` | `""` | +| `service.epmdNodePort` | Node port override for `epmd` port, if serviceType is `NodePort` | `""` | +| `service.epmdPortName` | EPMD Discovery service port name | `epmd` | +| `service.extraPorts` | Extra ports to expose in the service | `[]` | +| `service.loadBalancerSourceRanges` | Address(es) that are allowed when service is `LoadBalancer` | `[]` | +| `service.externalIPs` | Set the ExternalIPs | `[]` | +| `service.externalTrafficPolicy` | Enable client source IP preservation | `Cluster` | +| `service.loadBalancerIP` | Set the LoadBalancerIP | `""` | +| `service.labels` | Service labels. Evaluated as a template | `{}` | +| `service.annotations` | Service annotations. Evaluated as a template | `{}` | +| `service.annotationsHeadless` | Headless Service annotations. Evaluated as a template | `{}` | +| `ingress.enabled` | Enable ingress resource for Management console | `false` | +| `ingress.path` | Path for the default host. You may need to set this to '/*' in order to use this with ALB ingress controllers. | `/` | +| `ingress.pathType` | Ingress path type | `ImplementationSpecific` | +| `ingress.hostname` | Default host for the ingress resource | `rabbitmq.local` | +| `ingress.annotations` | Ingress annotations | `{}` | +| `ingress.tls` | Enable TLS configuration for the hostname defined at `ingress.hostname` parameter | `false` | +| `ingress.certManager` | Set this to true in order to add the corresponding annotations for cert-manager | `false` | +| `ingress.selfSigned` | Set this to true in order to create a TLS secret for this ingress record | `false` | +| `ingress.extraHosts` | The list of additional hostnames to be covered with this ingress record. | `[]` | +| `ingress.extraTls` | The tls configuration for additional hostnames to be covered with this ingress record. | `[]` | +| `ingress.secrets` | Custom TLS certificates as secrets | `[]` | +| `ingress.ingressClassName` | IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+) | `""` | +| `networkPolicy.enabled` | Enable creation of NetworkPolicy resources | `false` | +| `networkPolicy.allowExternal` | Don't require client label for connections | `true` | +| `networkPolicy.additionalRules` | Additional NetworkPolicy Ingress "from" rules to set. Note that all rules are OR-ed. | `[]` | + + +### Metrics Parameters + +| Name | Description | Value | +| ----------------------------------------- | -------------------------------------------------------------------------------------- | --------------------- | +| `metrics.enabled` | Enable exposing RabbitMQ metrics to be gathered by Prometheus | `false` | +| `metrics.plugins` | Plugins to enable Prometheus metrics in RabbitMQ | `rabbitmq_prometheus` | +| `metrics.podAnnotations` | Annotations for enabling prometheus to access the metrics endpoint | `{}` | +| `metrics.serviceMonitor.enabled` | Create ServiceMonitor Resource for scraping metrics using PrometheusOperator | `false` | +| `metrics.serviceMonitor.namespace` | Specify the namespace in which the serviceMonitor resource will be created | `""` | +| `metrics.serviceMonitor.interval` | Specify the interval at which metrics should be scraped | `30s` | +| `metrics.serviceMonitor.scrapeTimeout` | Specify the timeout after which the scrape is ended | `""` | +| `metrics.serviceMonitor.relabellings` | Specify Metric Relabellings to add to the scrape endpoint | `[]` | +| `metrics.serviceMonitor.honorLabels` | honorLabels chooses the metric's labels on collisions with target labels | `false` | +| `metrics.serviceMonitor.additionalLabels` | Used to pass Labels that are required by the installed Prometheus Operator | `{}` | +| `metrics.serviceMonitor.targetLabels` | Used to keep given service's labels in target | `{}` | +| `metrics.serviceMonitor.podTargetLabels` | Used to keep given pod's labels in target | `{}` | +| `metrics.serviceMonitor.path` | Define the path used by ServiceMonitor to scrap metrics | `""` | +| `metrics.prometheusRule.enabled` | Set this to true to create prometheusRules for Prometheus operator | `false` | +| `metrics.prometheusRule.additionalLabels` | Additional labels that can be used so prometheusRules will be discovered by Prometheus | `{}` | +| `metrics.prometheusRule.namespace` | namespace where prometheusRules resource should be created | `""` | +| `metrics.prometheusRule.rules` | List of rules, used as template by Helm. | `[]` | + + +### Init Container Parameters + +| Name | Description | Value | +| -------------------------------------- | -------------------------------------------------------------------------------------------------------------------- | ----------------------- | +| `volumePermissions.enabled` | Enable init container that changes the owner and group of the persistent volume(s) mountpoint to `runAsUser:fsGroup` | `false` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | +| `volumePermissions.image.repository` | Init container volume-permissions image repository | `bitnami/bitnami-shell` | +| `volumePermissions.image.tag` | Init container volume-permissions image tag | `10-debian-10-r172` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `Always` | +| `volumePermissions.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | +| `volumePermissions.resources.limits` | Init container volume-permissions resource limits | `{}` | +| `volumePermissions.resources.requests` | Init container volume-permissions resource requests | `{}` | + + +The above parameters map to the env variables defined in [bitnami/rabbitmq](http://github.com/bitnami/bitnami-docker-rabbitmq). For more information please refer to the [bitnami/rabbitmq](http://github.com/bitnami/bitnami-docker-rabbitmq) image documentation. + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```bash +$ helm install my-release \ + --set auth.username=admin,auth.password=secretpassword,auth.erlangCookie=secretcookie \ + bitnami/rabbitmq +``` + +The above command sets the RabbitMQ admin username and password to `admin` and `secretpassword` respectively. Additionally the secure erlang cookie is set to `secretcookie`. + +> NOTE: Once this chart is deployed, it is not possible to change the application's access credentials, such as usernames or passwords, using Helm. To change these application credentials after deployment, delete any persistent volumes (PVs) used by the chart and re-deploy it, or use the application's built-in administrative tools if available. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```bash +$ helm install my-release -f values.yaml bitnami/rabbitmq +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +## Configuration and installation details + +### [Rolling vs Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/) + +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. + +Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. + +### Set pod affinity + +This chart allows you to set your custom affinity using the `affinity` parameter. Find more information about Pod's affinity in the [kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity). + +As an alternative, you can use of the preset configurations for pod affinity, pod anti-affinity, and node affinity available at the [bitnami/common](https://github.com/bitnami/charts/tree/master/bitnami/common#affinities) chart. To do so, set the `podAffinityPreset`, `podAntiAffinityPreset`, or `nodeAffinityPreset` parameters. + +### Scale horizontally + +To horizontally scale this chart once it has been deployed, two options are available: + +- Use the `kubectl scale` command. +- Upgrade the chart modifying the `replicaCount` parameter. + +> NOTE: It is mandatory to specify the password and Erlang cookie that was set the first time the chart was installed when upgrading the chart. + +When scaling down the solution, unnecessary RabbitMQ nodes are automatically stopped, but they are not removed from the cluster. You need to manually remove them by running the `rabbitmqctl forget_cluster_node` command. + +Refer to the chart documentation for [more information on scaling the Rabbit cluster horizontally](https://docs.bitnami.com/kubernetes/infrastructure/rabbitmq/administration/scale-deployment/). + +### Enable TLS support + +To enable TLS support, first generate the certificates as described in the [RabbitMQ documentation for SSL certificate generation](https://www.rabbitmq.com/ssl.html#automated-certificate-generation). + +Once the certificates are generated, you have two alternatives: + +* Create a secret with the certificates and associate the secret when deploying the chart +* Include the certificates in the *values.yaml* file when deploying the chart + +Set the *auth.tls.failIfNoPeerCert* parameter to *false* to allow a TLS connection if the client fails to provide a certificate. + +Set the *auth.tls.sslOptionsVerify* to *verify_peer* to force a node to perform peer verification. When set to *verify_none*, peer verification will be disabled and certificate exchange won't be performed. + +Refer to the chart documentation for [more information and examples of enabling TLS and using Let's Encrypt certificates](https://docs.bitnami.com/kubernetes/infrastructure/rabbitmq/administration/enable-tls/). + +### Load custom definitions + +It is possible to [load a RabbitMQ definitions file to configure RabbitMQ](http://www.rabbitmq.com/management.html#load-definitions). + +Because definitions may contain RabbitMQ credentials, [store the JSON as a Kubernetes secret](https://kubernetes.io/docs/concepts/configuration/secret/#using-secrets-as-files-from-a-pod). Within the secret's data, choose a key name that corresponds with the desired load definitions filename (i.e. `load_definition.json`) and use the JSON object as the value. + +Next, specify the `load_definitions` property as an `extraConfiguration` pointing to the load definition file path within the container (i.e. `/app/load_definition.json`) and set `loadDefinition.enable` to `true`. Any load definitions specified will be available within in the container at `/app`. + +> NOTE: Loading a definition will take precedence over any configuration done through [Helm values](#parameters). + +If needed, you can use `extraSecrets` to let the chart create the secret for you. This way, you don't need to manually create it before deploying a release. These secrets can also be templated to use supplied chart values. + +Refer to the chart documentation for [more information and configuration examples of loading custom definitions](https://docs.bitnami.com/kubernetes/infrastructure/rabbitmq/configuration/load-files/). + +### Configure LDAP support + +LDAP support can be enabled in the chart by specifying the `ldap.*` parameters while creating a release. Refer to the chart documentation for [more information and a configuration example](https://docs.bitnami.com/kubernetes/infrastructure/rabbitmq/configuration/configure-ldap/). + +### Configure memory high watermark + +It is possible to configure a memory high watermark on RabbitMQ to define [memory thresholds](https://www.rabbitmq.com/memory.html#threshold) using the `memoryHighWatermark.*` parameters. To do so, you have two alternatives: + +* Set an absolute limit of RAM to be used on each RabbitMQ node, as shown in the configuration example below: + +``` +memoryHighWatermark.enabled="true" +memoryHighWatermark.type="absolute" +memoryHighWatermark.value="512MB" +``` + +* Set a relative limit of RAM to be used on each RabbitMQ node. To enable this feature, define the memory limits at pod level too. An example configuration is shown below: + +``` +memoryHighWatermark.enabled="true" +memoryHighWatermark.type="relative" +memoryHighWatermark.value="0.4" +resources.limits.memory="2Gi" +``` + +### Add extra environment variables + +In case you want to add extra environment variables (useful for advanced operations like custom init scripts), you can use the `extraEnvVars` property. + +```yaml +extraEnvVars: + - name: LOG_LEVEL + value: error +``` + +Alternatively, you can use a ConfigMap or a Secret with the environment variables. To do so, use the `.extraEnvVarsCM` or the `extraEnvVarsSecret` properties. + +### Use plugins + +The Bitnami Docker RabbitMQ image ships a set of plugins by default. By default, this chart enables `rabbitmq_management` and `rabbitmq_peer_discovery_k8s` since they are required for RabbitMQ to work on K8s. + +To enable extra plugins, set the `extraPlugins` parameter with the list of plugins you want to enable. In addition to this, the `communityPlugins` parameter can be used to specify a list of URLs (separated by spaces) for custom plugins for RabbitMQ. + +Refer to the chart documentation for [more information on using RabbitMQ plugins](https://docs.bitnami.com/kubernetes/infrastructure/rabbitmq/configuration/use-plugins/). + +### Recover the cluster from complete shutdown + +> IMPORTANT: Some of these procedures can lead to data loss. Always make a backup beforehand. + +The RabbitMQ cluster is able to support multiple node failures but, in a situation in which all the nodes are brought down at the same time, the cluster might not be able to self-recover. + +This happens if the pod management policy of the statefulset is not `Parallel` and the last pod to be running wasn't the first pod of the statefulset. If that happens, update the pod management policy to recover a healthy state: + +```console +$ kubectl delete statefulset STATEFULSET_NAME --cascade=false +$ helm upgrade RELEASE_NAME bitnami/rabbitmq \ + --set podManagementPolicy=Parallel \ + --set replicaCount=NUMBER_OF_REPLICAS \ + --set auth.password=PASSWORD \ + --set auth.erlangCookie=ERLANG_COOKIE +``` + +For a faster resyncronization of the nodes, you can temporarily disable the readiness probe by setting `readinessProbe.enabled=false`. Bear in mind that the pods will be exposed before they are actually ready to process requests. + +If the steps above don't bring the cluster to a healthy state, it could be possible that none of the RabbitMQ nodes think they were the last node to be up during the shutdown. In those cases, you can force the boot of the nodes by specifying the `clustering.forceBoot=true` parameter (which will execute [`rabbitmqctl force_boot`](https://www.rabbitmq.com/rabbitmqctl.8.html#force_boot) in each pod): + +```console +$ helm upgrade RELEASE_NAME bitnami/rabbitmq \ + --set podManagementPolicy=Parallel \ + --set clustering.forceBoot=true \ + --set replicaCount=NUMBER_OF_REPLICAS \ + --set auth.password=PASSWORD \ + --set auth.erlangCookie=ERLANG_COOKIE +``` + +More information: [Clustering Guide: Restarting](https://www.rabbitmq.com/clustering.html#restarting). + +### Known issues + +- Changing the password through RabbitMQ's UI can make the pod fail due to the default liveness probes. If you do so, remember to make the chart aware of the new password. Updating the default secret with the password you set through RabbitMQ's UI will automatically recreate the pods. If you are using your own secret, you may have to manually recreate the pods. + +## Persistence + +The [Bitnami RabbitMQ](https://github.com/bitnami/bitnami-docker-rabbitmq) image stores the RabbitMQ data and configurations at the `/opt/bitnami/rabbitmq/var/lib/rabbitmq/` path of the container. + +The chart mounts a [Persistent Volume](http://kubernetes.io/docs/user-guide/persistent-volumes/) at this location. By default, the volume is created using dynamic volume provisioning. An existing PersistentVolumeClaim can also be defined. + +### Use existing PersistentVolumeClaims + +1. Create the PersistentVolume +1. Create the PersistentVolumeClaim +1. Install the chart + +```bash +$ helm install my-release --set persistence.existingClaim=PVC_NAME bitnami/rabbitmq +``` + +### Adjust permissions of the persistence volume mountpoint + +As the image runs as non-root by default, it is necessary to adjust the ownership of the persistent volume so that the container can write data into it. + +By default, the chart is configured to use Kubernetes Security Context to automatically change the ownership of the volume. However, this feature does not work in all Kubernetes distributions. +As an alternative, this chart supports using an `initContainer` to change the ownership of the volume before mounting it in the final destination. + +You can enable this `initContainer` by setting `volumePermissions.enabled` to `true`. + +### Configure the default user/vhost + +If you want to create default user/vhost and set the default permission. you can use `extraConfiguration`: + +```yaml +auth: + username: default-user +extraConfiguration: |- + default_vhost = default-vhost + default_permissions.configure = .* + default_permissions.read = .* + default_permissions.write = .* +``` + +## Troubleshooting + +Find more information about how to deal with common errors related to Bitnami’s Helm charts in [this troubleshooting guide](https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues). + +## Upgrading + +It's necessary to set the `auth.password` and `auth.erlangCookie` parameters when upgrading for readiness/liveness probes to work properly. When you install this chart for the first time, some notes will be displayed providing the credentials you must use under the 'Credentials' section. Please note down the password and the cookie, and run the command below to upgrade your chart: + +```bash +$ helm upgrade my-release bitnami/rabbitmq --set auth.password=[PASSWORD] --set auth.erlangCookie=[RABBITMQ_ERLANG_COOKIE] +``` + +| Note: you need to substitute the placeholders [PASSWORD] and [RABBITMQ_ERLANG_COOKIE] with the values obtained in the installation notes. + +### To 8.0.0 + +[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +[Learn more about this change and related upgrade considerations](https://docs.bitnami.com/kubernetes/infrastructure/rabbitmq/administration/upgrade-helm3/). + +### To 7.0.0 + +- Several parameters were renamed or disappeared in favor of new ones on this major version: + - `replicas` is renamed to `replicaCount`. + - `securityContext.*` is deprecated in favor of `podSecurityContext` and `containerSecurityContext`. + - Authentication parameters were reorganized under the `auth.*` parameter: + - `rabbitmq.username`, `rabbitmq.password`, and `rabbitmq.erlangCookie` are now `auth.username`, `auth.password`, and `auth.erlangCookie` respectively. + - `rabbitmq.tls.*` parameters are now under `auth.tls.*`. + - Parameters prefixed with `rabbitmq.` were renamed removing the prefix. E.g. `rabbitmq.configuration` -> renamed to `configuration`. + - `rabbitmq.rabbitmqClusterNodeName` is deprecated. + - `rabbitmq.setUlimitNofiles` is deprecated. + - `forceBoot.enabled` is renamed to `clustering.forceBoot`. + - `loadDefinition.secretName` is renamed to `loadDefinition.existingSecret`. + - `metics.port` is remamed to `service.metricsPort`. + - `service.extraContainerPorts` is renamed to `extraContainerPorts`. + - `service.nodeTlsPort` is renamed to `service.tlsNodePort`. + - `podDisruptionBudget` is deprecated in favor of `pdb.create`, `pdb.minAvailable`, and `pdb.maxUnavailable`. + - `rbacEnabled` -> deprecated in favor of `rbac.create`. + - New parameters: `serviceAccount.create`, and `serviceAccount.name`. + - New parameters: `memoryHighWatermark.enabled`, `memoryHighWatermark.type`, and `memoryHighWatermark.value`. +- Chart labels and Ingress configuration were adapted to follow the Helm charts best practices. +- Initialization logic now relies on the container. +- This version introduces `bitnami/common`, a [library chart](https://helm.sh/docs/topics/library_charts/#helm) as a dependency. More documentation about this new utility could be found [here](https://github.com/bitnami/charts/tree/master/bitnami/common#bitnami-common-library-chart). Please, make sure that you have updated the chart dependencies before executing any upgrade. + +Consequences: + +- Backwards compatibility is not guaranteed. +- Compatibility with non Bitnami images is not guaranteed anymore. + +### To 6.0.0 + +This new version updates the RabbitMQ image to a [new version based on bash instead of node.js](https://github.com/bitnami/bitnami-docker-rabbitmq#3715-r18-3715-ol-7-r19). However, since this Chart overwrites the container's command, the changes to the container shouldn't affect the Chart. To upgrade, it may be needed to enable the `fastBoot` option, as it is already the case from upgrading from 5.X to 5.Y. + +### To 5.0.0 + +This major release changes the clustering method from `ip` to `hostname`. +This change is needed to fix the persistence. The data dir will now depend on the hostname which is stable instead of the pod IP that might change. + +> IMPORTANT: Note that if you upgrade from a previous version you will lose your data. + +### To 3.0.0 + +Backwards compatibility is not guaranteed unless you modify the labels used on the chart's deployments. +Use the workaround below to upgrade from versions previous to 3.0.0. The following example assumes that the release name is rabbitmq: + +```console +$ kubectl delete statefulset rabbitmq --cascade=false +``` + +## Bitnami Kubernetes Documentation + +Bitnami Kubernetes documentation is available at [https://docs.bitnami.com/](https://docs.bitnami.com/). You can find there the following resources: + +- [Documentation for RabbitMQ Helm chart](https://docs.bitnami.com/kubernetes/infrastructure/rabbitmq/) +- [Get Started with Kubernetes guides](https://docs.bitnami.com/kubernetes/) +- [Bitnami Helm charts documentation](https://docs.bitnami.com/kubernetes/apps/) +- [Kubernetes FAQs](https://docs.bitnami.com/kubernetes/faq/) +- [Kubernetes Developer guides](https://docs.bitnami.com/tutorials/) diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/.helmignore b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/.helmignore new file mode 100644 index 0000000..50af031 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/Chart.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/Chart.yaml new file mode 100644 index 0000000..344c403 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/Chart.yaml @@ -0,0 +1,23 @@ +annotations: + category: Infrastructure +apiVersion: v2 +appVersion: 1.8.0 +description: A Library Helm Chart for grouping common logic between bitnami charts. + This chart is not deployable by itself. +home: https://github.com/bitnami/charts/tree/master/bitnami/common +icon: https://bitnami.com/downloads/logos/bitnami-mark.png +keywords: +- common +- helper +- template +- function +- bitnami +maintainers: +- email: containers@bitnami.com + name: Bitnami +name: common +sources: +- https://github.com/bitnami/charts +- http://www.bitnami.com/ +type: library +version: 1.8.0 diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/README.md b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/README.md new file mode 100644 index 0000000..054e51f --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/README.md @@ -0,0 +1,327 @@ +# Bitnami Common Library Chart + +A [Helm Library Chart](https://helm.sh/docs/topics/library_charts/#helm) for grouping common logic between bitnami charts. + +## TL;DR + +```yaml +dependencies: + - name: common + version: 0.x.x + repository: https://charts.bitnami.com/bitnami +``` + +```bash +$ helm dependency update +``` + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "common.names.fullname" . }} +data: + myvalue: "Hello World" +``` + +## Introduction + +This chart provides a common template helpers which can be used to develop new charts using [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This Helm chart has been tested on top of [Bitnami Kubernetes Production Runtime](https://kubeprod.io/) (BKPR). Deploy BKPR to get automated TLS certificates, logging and monitoring for your applications. + +## Prerequisites + +- Kubernetes 1.12+ +- Helm 3.1.0 + +## Parameters + +The following table lists the helpers available in the library which are scoped in different sections. + +### Affinities + +| Helper identifier | Description | Expected Input | +|-------------------------------|------------------------------------------------------|------------------------------------------------| +| `common.affinities.node.soft` | Return a soft nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` | +| `common.affinities.node.hard` | Return a hard nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` | +| `common.affinities.pod.soft` | Return a soft podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` | +| `common.affinities.pod.hard` | Return a hard podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` | + +### Capabilities + +| Helper identifier | Description | Expected Input | +|----------------------------------------------|------------------------------------------------------------------------------------------------|-------------------| +| `common.capabilities.kubeVersion` | Return the target Kubernetes version (using client default if .Values.kubeVersion is not set). | `.` Chart context | +| `common.capabilities.cronjob.apiVersion` | Return the appropriate apiVersion for cronjob. | `.` Chart context | +| `common.capabilities.deployment.apiVersion` | Return the appropriate apiVersion for deployment. | `.` Chart context | +| `common.capabilities.statefulset.apiVersion` | Return the appropriate apiVersion for statefulset. | `.` Chart context | +| `common.capabilities.ingress.apiVersion` | Return the appropriate apiVersion for ingress. | `.` Chart context | +| `common.capabilities.rbac.apiVersion` | Return the appropriate apiVersion for RBAC resources. | `.` Chart context | +| `common.capabilities.crd.apiVersion` | Return the appropriate apiVersion for CRDs. | `.` Chart context | +| `common.capabilities.policy.apiVersion` | Return the appropriate apiVersion for policy | `.` Chart context | +| `common.capabilities.supportsHelmVersion` | Returns true if the used Helm version is 3.3+ | `.` Chart context | + +### Errors + +| Helper identifier | Description | Expected Input | +|-----------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------| +| `common.errors.upgrade.passwords.empty` | It will ensure required passwords are given when we are upgrading a chart. If `validationErrors` is not empty it will throw an error and will stop the upgrade action. | `dict "validationErrors" (list $validationError00 $validationError01) "context" $` | + +### Images + +| Helper identifier | Description | Expected Input | +|-----------------------------|------------------------------------------------------|---------------------------------------------------------------------------------------------------------| +| `common.images.image` | Return the proper and full image name | `dict "imageRoot" .Values.path.to.the.image "global" $`, see [ImageRoot](#imageroot) for the structure. | +| `common.images.pullSecrets` | Return the proper Docker Image Registry Secret Names (deprecated: use common.images.renderPullSecrets instead) | `dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global` | +| `common.images.renderPullSecrets` | Return the proper Docker Image Registry Secret Names (evaluates values as templates) | `dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "context" $` | + +### Ingress + +| Helper identifier | Description | Expected Input | +|-------------------------------------------|----------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.ingress.backend` | Generate a proper Ingress backend entry depending on the API version | `dict "serviceName" "foo" "servicePort" "bar"`, see the [Ingress deprecation notice](https://kubernetes.io/blog/2019/07/18/api-deprecations-in-1-16/) for the syntax differences | +| `common.ingress.supportsPathType` | Prints "true" if the pathType field is supported | `.` Chart context | +| `common.ingress.supportsIngressClassname` | Prints "true" if the ingressClassname field is supported | `.` Chart context | + +### Labels + +| Helper identifier | Description | Expected Input | +|-----------------------------|------------------------------------------------------|-------------------| +| `common.labels.standard` | Return Kubernetes standard labels | `.` Chart context | +| `common.labels.matchLabels` | Return the proper Docker Image Registry Secret Names | `.` Chart context | + +### Names + +| Helper identifier | Description | Expected Inpput | +|-------------------------|------------------------------------------------------------|-------------------| +| `common.names.name` | Expand the name of the chart or use `.Values.nameOverride` | `.` Chart context | +| `common.names.fullname` | Create a default fully qualified app name. | `.` Chart context | +| `common.names.chart` | Chart name plus version | `.` Chart context | + +### Secrets + +| Helper identifier | Description | Expected Input | +|---------------------------|--------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.secrets.name` | Generate the name of the secret. | `dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $` see [ExistingSecret](#existingsecret) for the structure. | +| `common.secrets.key` | Generate secret key. | `dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName"` see [ExistingSecret](#existingsecret) for the structure. | +| `common.passwords.manage` | Generate secret password or retrieve one if already created. | `dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $`, length, strong and chartNAme fields are optional. | +| `common.secrets.exists` | Returns whether a previous generated secret already exists. | `dict "secret" "secret-name" "context" $` | + +### Storage + +| Helper identifier | Description | Expected Input | +|-------------------------------|---------------------------------------|---------------------------------------------------------------------------------------------------------------------| +| `common.affinities.node.soft` | Return a soft nodeAffinity definition | `dict "persistence" .Values.path.to.the.persistence "global" $`, see [Persistence](#persistence) for the structure. | + +### TplValues + +| Helper identifier | Description | Expected Input | +|---------------------------|----------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.tplvalues.render` | Renders a value that contains template | `dict "value" .Values.path.to.the.Value "context" $`, value is the value should rendered as template, context frequently is the chart context `$` or `.` | + +### Utils + +| Helper identifier | Description | Expected Input | +|--------------------------------|------------------------------------------------------------------------------------------|------------------------------------------------------------------------| +| `common.utils.fieldToEnvVar` | Build environment variable name given a field. | `dict "field" "my-password"` | +| `common.utils.secret.getvalue` | Print instructions to get a secret value. | `dict "secret" "secret-name" "field" "secret-value-field" "context" $` | +| `common.utils.getValueFromKey` | Gets a value from `.Values` object given its key path | `dict "key" "path.to.key" "context" $` | +| `common.utils.getKeyFromList` | Returns first `.Values` key with a defined value or first of the list if all non-defined | `dict "keys" (list "path.to.key1" "path.to.key2") "context" $` | + +### Validations + +| Helper identifier | Description | Expected Input | +|--------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.validations.values.single.empty` | Validate a value must not be empty. | `dict "valueKey" "path.to.value" "secret" "secret.name" "field" "my-password" "subchart" "subchart" "context" $` secret, field and subchart are optional. In case they are given, the helper will generate a how to get instruction. See [ValidateValue](#validatevalue) | +| `common.validations.values.multiple.empty` | Validate a multiple values must not be empty. It returns a shared error for all the values. | `dict "required" (list $validateValueConf00 $validateValueConf01) "context" $`. See [ValidateValue](#validatevalue) | +| `common.validations.values.mariadb.passwords` | This helper will ensure required password for MariaDB are not empty. It returns a shared error for all the values. | `dict "secret" "mariadb-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mariadb chart and the helper. | +| `common.validations.values.postgresql.passwords` | This helper will ensure required password for PostgreSQL are not empty. It returns a shared error for all the values. | `dict "secret" "postgresql-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use postgresql chart and the helper. | +| `common.validations.values.redis.passwords` | This helper will ensure required password for Redis™ are not empty. It returns a shared error for all the values. | `dict "secret" "redis-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use redis chart and the helper. | +| `common.validations.values.cassandra.passwords` | This helper will ensure required password for Cassandra are not empty. It returns a shared error for all the values. | `dict "secret" "cassandra-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use cassandra chart and the helper. | +| `common.validations.values.mongodb.passwords` | This helper will ensure required password for MongoDB® are not empty. It returns a shared error for all the values. | `dict "secret" "mongodb-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mongodb chart and the helper. | + +### Warnings + +| Helper identifier | Description | Expected Input | +|------------------------------|----------------------------------|------------------------------------------------------------| +| `common.warnings.rollingTag` | Warning about using rolling tag. | `ImageRoot` see [ImageRoot](#imageroot) for the structure. | + +## Special input schemas + +### ImageRoot + +```yaml +registry: + type: string + description: Docker registry where the image is located + example: docker.io + +repository: + type: string + description: Repository and image name + example: bitnami/nginx + +tag: + type: string + description: image tag + example: 1.16.1-debian-10-r63 + +pullPolicy: + type: string + description: Specify a imagePullPolicy. Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + +pullSecrets: + type: array + items: + type: string + description: Optionally specify an array of imagePullSecrets (evaluated as templates). + +debug: + type: boolean + description: Set to true if you would like to see extra information on logs + example: false + +## An instance would be: +# registry: docker.io +# repository: bitnami/nginx +# tag: 1.16.1-debian-10-r63 +# pullPolicy: IfNotPresent +# debug: false +``` + +### Persistence + +```yaml +enabled: + type: boolean + description: Whether enable persistence. + example: true + +storageClass: + type: string + description: Ghost data Persistent Volume Storage Class, If set to "-", storageClassName: "" which disables dynamic provisioning. + example: "-" + +accessMode: + type: string + description: Access mode for the Persistent Volume Storage. + example: ReadWriteOnce + +size: + type: string + description: Size the Persistent Volume Storage. + example: 8Gi + +path: + type: string + description: Path to be persisted. + example: /bitnami + +## An instance would be: +# enabled: true +# storageClass: "-" +# accessMode: ReadWriteOnce +# size: 8Gi +# path: /bitnami +``` + +### ExistingSecret + +```yaml +name: + type: string + description: Name of the existing secret. + example: mySecret +keyMapping: + description: Mapping between the expected key name and the name of the key in the existing secret. + type: object + +## An instance would be: +# name: mySecret +# keyMapping: +# password: myPasswordKey +``` + +#### Example of use + +When we store sensitive data for a deployment in a secret, some times we want to give to users the possibility of using theirs existing secrets. + +```yaml +# templates/secret.yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }} + labels: + app: {{ include "common.names.fullname" . }} +type: Opaque +data: + password: {{ .Values.password | b64enc | quote }} + +# templates/dpl.yaml +--- +... + env: + - name: PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "common.secrets.name" (dict "existingSecret" .Values.existingSecret "context" $) }} + key: {{ include "common.secrets.key" (dict "existingSecret" .Values.existingSecret "key" "password") }} +... + +# values.yaml +--- +name: mySecret +keyMapping: + password: myPasswordKey +``` + +### ValidateValue + +#### NOTES.txt + +```console +{{- $validateValueConf00 := (dict "valueKey" "path.to.value00" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value01" "secret" "secretName" "field" "password-01") -}} + +{{ include "common.validations.values.multiple.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} +``` + +If we force those values to be empty we will see some alerts + +```console +$ helm install test mychart --set path.to.value00="",path.to.value01="" + 'path.to.value00' must not be empty, please add '--set path.to.value00=$PASSWORD_00' to the command. To get the current value: + + export PASSWORD_00=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-00}" | base64 --decode) + + 'path.to.value01' must not be empty, please add '--set path.to.value01=$PASSWORD_01' to the command. To get the current value: + + export PASSWORD_01=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-01}" | base64 --decode) +``` + +## Upgrading + +### To 1.0.0 + +[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +**What changes were introduced in this major version?** + +- Previous versions of this Helm Chart use `apiVersion: v1` (installable by both Helm 2 and 3), this Helm Chart was updated to `apiVersion: v2` (installable by Helm 3 only). [Here](https://helm.sh/docs/topics/charts/#the-apiversion-field) you can find more information about the `apiVersion` field. +- Use `type: library`. [Here](https://v3.helm.sh/docs/faq/#library-chart-support) you can find more information. +- The different fields present in the *Chart.yaml* file has been ordered alphabetically in a homogeneous way for all the Bitnami Helm Charts + +**Considerations when upgrading to this version** + +- If you want to upgrade to this version from a previous one installed with Helm v3, you shouldn't face any issues +- If you want to upgrade to this version using Helm v2, this scenario is not supported as this version doesn't support Helm v2 anymore +- If you installed the previous version with Helm v2 and wants to upgrade to this version with Helm v3, please refer to the [official Helm documentation](https://helm.sh/docs/topics/v2_v3_migration/#migration-use-cases) about migrating from Helm v2 to v3 + +**Useful links** + +- https://docs.bitnami.com/tutorials/resolve-helm2-helm3-post-migration-issues/ +- https://helm.sh/docs/topics/v2_v3_migration/ +- https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/ diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_affinities.tpl b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_affinities.tpl new file mode 100644 index 0000000..189ea40 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_affinities.tpl @@ -0,0 +1,102 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return a soft nodeAffinity definition +{{ include "common.affinities.nodes.soft" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.soft" -}} +preferredDuringSchedulingIgnoredDuringExecution: + - preference: + matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . | quote }} + {{- end }} + weight: 1 +{{- end -}} + +{{/* +Return a hard nodeAffinity definition +{{ include "common.affinities.nodes.hard" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.hard" -}} +requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . | quote }} + {{- end }} +{{- end -}} + +{{/* +Return a nodeAffinity definition +{{ include "common.affinities.nodes" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.nodes.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.nodes.hard" . -}} + {{- end -}} +{{- end -}} + +{{/* +Return a soft podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.soft" (dict "component" "FOO" "extraMatchLabels" .Values.extraMatchLabels "context" $) -}} +*/}} +{{- define "common.affinities.pods.soft" -}} +{{- $component := default "" .component -}} +{{- $extraMatchLabels := default (dict) .extraMatchLabels -}} +preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 10 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := $extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + namespaces: + - {{ .context.Release.Namespace | quote }} + topologyKey: kubernetes.io/hostname + weight: 1 +{{- end -}} + +{{/* +Return a hard podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.hard" (dict "component" "FOO" "extraMatchLabels" .Values.extraMatchLabels "context" $) -}} +*/}} +{{- define "common.affinities.pods.hard" -}} +{{- $component := default "" .component -}} +{{- $extraMatchLabels := default (dict) .extraMatchLabels -}} +requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 8 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := $extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + namespaces: + - {{ .context.Release.Namespace | quote }} + topologyKey: kubernetes.io/hostname +{{- end -}} + +{{/* +Return a podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.pods" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.pods.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.pods.hard" . -}} + {{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_capabilities.tpl b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_capabilities.tpl new file mode 100644 index 0000000..ae45d5e --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_capabilities.tpl @@ -0,0 +1,117 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return the target Kubernetes version +*/}} +{{- define "common.capabilities.kubeVersion" -}} +{{- if .Values.global }} + {{- if .Values.global.kubeVersion }} + {{- .Values.global.kubeVersion -}} + {{- else }} + {{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} + {{- end -}} +{{- else }} +{{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for policy. +*/}} +{{- define "common.capabilities.policy.apiVersion" -}} +{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "policy/v1beta1" -}} +{{- else -}} +{{- print "policy/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for cronjob. +*/}} +{{- define "common.capabilities.cronjob.apiVersion" -}} +{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "batch/v1beta1" -}} +{{- else -}} +{{- print "batch/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for deployment. +*/}} +{{- define "common.capabilities.deployment.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for statefulset. +*/}} +{{- define "common.capabilities.statefulset.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apps/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for ingress. +*/}} +{{- define "common.capabilities.ingress.apiVersion" -}} +{{- if .Values.ingress -}} +{{- if .Values.ingress.apiVersion -}} +{{- .Values.ingress.apiVersion -}} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end }} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for RBAC resources. +*/}} +{{- define "common.capabilities.rbac.apiVersion" -}} +{{- if semverCompare "<1.17-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "rbac.authorization.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "rbac.authorization.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for CRDs. +*/}} +{{- define "common.capabilities.crd.apiVersion" -}} +{{- if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apiextensions.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "apiextensions.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if the used Helm version is 3.3+. +A way to check the used Helm version was not introduced until version 3.3.0 with .Capabilities.HelmVersion, which contains an additional "{}}" structure. +This check is introduced as a regexMatch instead of {{ if .Capabilities.HelmVersion }} because checking for the key HelmVersion in <3.3 results in a "interface not found" error. +**To be removed when the catalog's minimun Helm version is 3.3** +*/}} +{{- define "common.capabilities.supportsHelmVersion" -}} +{{- if regexMatch "{(v[0-9])*[^}]*}}$" (.Capabilities | toString ) }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_errors.tpl b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_errors.tpl new file mode 100644 index 0000000..a79cc2e --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_errors.tpl @@ -0,0 +1,23 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Through error when upgrading using empty passwords values that must not be empty. + +Usage: +{{- $validationError00 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password00" "secret" "secretName" "field" "password-00") -}} +{{- $validationError01 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password01" "secret" "secretName" "field" "password-01") -}} +{{ include "common.errors.upgrade.passwords.empty" (dict "validationErrors" (list $validationError00 $validationError01) "context" $) }} + +Required password params: + - validationErrors - String - Required. List of validation strings to be return, if it is empty it won't throw error. + - context - Context - Required. Parent context. +*/}} +{{- define "common.errors.upgrade.passwords.empty" -}} + {{- $validationErrors := join "" .validationErrors -}} + {{- if and $validationErrors .context.Release.IsUpgrade -}} + {{- $errorString := "\nPASSWORDS ERROR: You must provide your current passwords when upgrading the release." -}} + {{- $errorString = print $errorString "\n Note that even after reinstallation, old credentials may be needed as they may be kept in persistent volume claims." -}} + {{- $errorString = print $errorString "\n Further information can be obtained at https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues/#credential-errors-while-upgrading-chart-releases" -}} + {{- $errorString = print $errorString "\n%s" -}} + {{- printf $errorString $validationErrors | fail -}} + {{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_images.tpl b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_images.tpl new file mode 100644 index 0000000..42ffbc7 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_images.tpl @@ -0,0 +1,75 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper image name +{{ include "common.images.image" ( dict "imageRoot" .Values.path.to.the.image "global" $) }} +*/}} +{{- define "common.images.image" -}} +{{- $registryName := .imageRoot.registry -}} +{{- $repositoryName := .imageRoot.repository -}} +{{- $tag := .imageRoot.tag | toString -}} +{{- if .global }} + {{- if .global.imageRegistry }} + {{- $registryName = .global.imageRegistry -}} + {{- end -}} +{{- end -}} +{{- if $registryName }} +{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- else -}} +{{- printf "%s:%s" $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names (deprecated: use common.images.renderPullSecrets instead) +{{ include "common.images.pullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global) }} +*/}} +{{- define "common.images.pullSecrets" -}} + {{- $pullSecrets := list }} + + {{- if .global }} + {{- range .global.imagePullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) }} +imagePullSecrets: + {{- range $pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names evaluating values as templates +{{ include "common.images.renderPullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "context" $) }} +*/}} +{{- define "common.images.renderPullSecrets" -}} + {{- $pullSecrets := list }} + {{- $context := .context }} + + {{- if $context.Values.global }} + {{- range $context.Values.global.imagePullSecrets -}} + {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) }} +imagePullSecrets: + {{- range $pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_ingress.tpl b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_ingress.tpl new file mode 100644 index 0000000..f905f20 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_ingress.tpl @@ -0,0 +1,55 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Generate backend entry that is compatible with all Kubernetes API versions. + +Usage: +{{ include "common.ingress.backend" (dict "serviceName" "backendName" "servicePort" "backendPort" "context" $) }} + +Params: + - serviceName - String. Name of an existing service backend + - servicePort - String/Int. Port name (or number) of the service. It will be translated to different yaml depending if it is a string or an integer. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.ingress.backend" -}} +{{- $apiVersion := (include "common.capabilities.ingress.apiVersion" .context) -}} +{{- if or (eq $apiVersion "extensions/v1beta1") (eq $apiVersion "networking.k8s.io/v1beta1") -}} +serviceName: {{ .serviceName }} +servicePort: {{ .servicePort }} +{{- else -}} +service: + name: {{ .serviceName }} + port: + {{- if typeIs "string" .servicePort }} + name: {{ .servicePort }} + {{- else if or (typeIs "int" .servicePort) (typeIs "float64" .servicePort) }} + number: {{ .servicePort | int }} + {{- end }} +{{- end -}} +{{- end -}} + +{{/* +Print "true" if the API pathType field is supported +Usage: +{{ include "common.ingress.supportsPathType" . }} +*/}} +{{- define "common.ingress.supportsPathType" -}} +{{- if (semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .)) -}} +{{- print "false" -}} +{{- else -}} +{{- print "true" -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if the ingressClassname field is supported +Usage: +{{ include "common.ingress.supportsIngressClassname" . }} +*/}} +{{- define "common.ingress.supportsIngressClassname" -}} +{{- if semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "false" -}} +{{- else -}} +{{- print "true" -}} +{{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_labels.tpl b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_labels.tpl new file mode 100644 index 0000000..252066c --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_labels.tpl @@ -0,0 +1,18 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Kubernetes standard labels +*/}} +{{- define "common.labels.standard" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +helm.sh/chart: {{ include "common.names.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{/* +Labels to use on deploy.spec.selector.matchLabels and svc.spec.selector +*/}} +{{- define "common.labels.matchLabels" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_names.tpl b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_names.tpl new file mode 100644 index 0000000..adf2a74 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_names.tpl @@ -0,0 +1,32 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "common.names.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "common.names.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "common.names.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_secrets.tpl b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_secrets.tpl new file mode 100644 index 0000000..60b84a7 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_secrets.tpl @@ -0,0 +1,129 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Generate secret name. + +Usage: +{{ include "common.secrets.name" (dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $) }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/master/bitnami/common#existingsecret + - defaultNameSuffix - String - Optional. It is used only if we have several secrets in the same deployment. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.secrets.name" -}} +{{- $name := (include "common.names.fullname" .context) -}} + +{{- if .defaultNameSuffix -}} +{{- $name = printf "%s-%s" $name .defaultNameSuffix | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- with .existingSecret -}} +{{- if not (typeIs "string" .) -}} +{{- with .name -}} +{{- $name = . -}} +{{- end -}} +{{- else -}} +{{- $name = . -}} +{{- end -}} +{{- end -}} + +{{- printf "%s" $name -}} +{{- end -}} + +{{/* +Generate secret key. + +Usage: +{{ include "common.secrets.key" (dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName") }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/master/bitnami/common#existingsecret + - key - String - Required. Name of the key in the secret. +*/}} +{{- define "common.secrets.key" -}} +{{- $key := .key -}} + +{{- if .existingSecret -}} + {{- if not (typeIs "string" .existingSecret) -}} + {{- if .existingSecret.keyMapping -}} + {{- $key = index .existingSecret.keyMapping $.key -}} + {{- end -}} + {{- end }} +{{- end -}} + +{{- printf "%s" $key -}} +{{- end -}} + +{{/* +Generate secret password or retrieve one if already created. + +Usage: +{{ include "common.secrets.passwords.manage" (dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - key - String - Required - Name of the key in the secret. + - providedValues - List - Required - The path to the validating value in the values.yaml, e.g: "mysql.password". Will pick first parameter with a defined value. + - length - int - Optional - Length of the generated random password. + - strong - Boolean - Optional - Whether to add symbols to the generated random password. + - chartName - String - Optional - Name of the chart used when said chart is deployed as a subchart. + - context - Context - Required - Parent context. +*/}} +{{- define "common.secrets.passwords.manage" -}} + +{{- $password := "" }} +{{- $subchart := "" }} +{{- $chartName := default "" .chartName }} +{{- $passwordLength := default 10 .length }} +{{- $providedPasswordKey := include "common.utils.getKeyFromList" (dict "keys" .providedValues "context" $.context) }} +{{- $providedPasswordValue := include "common.utils.getValueFromKey" (dict "key" $providedPasswordKey "context" $.context) }} +{{- $secret := (lookup "v1" "Secret" $.context.Release.Namespace .secret) }} +{{- if $secret }} + {{- if index $secret.data .key }} + {{- $password = index $secret.data .key }} + {{- end -}} +{{- else if $providedPasswordValue }} + {{- $password = $providedPasswordValue | toString | b64enc | quote }} +{{- else }} + + {{- if .context.Values.enabled }} + {{- $subchart = $chartName }} + {{- end -}} + + {{- $requiredPassword := dict "valueKey" $providedPasswordKey "secret" .secret "field" .key "subchart" $subchart "context" $.context -}} + {{- $requiredPasswordError := include "common.validations.values.single.empty" $requiredPassword -}} + {{- $passwordValidationErrors := list $requiredPasswordError -}} + {{- include "common.errors.upgrade.passwords.empty" (dict "validationErrors" $passwordValidationErrors "context" $.context) -}} + + {{- if .strong }} + {{- $subStr := list (lower (randAlpha 1)) (randNumeric 1) (upper (randAlpha 1)) | join "_" }} + {{- $password = randAscii $passwordLength }} + {{- $password = regexReplaceAllLiteral "\\W" $password "@" | substr 5 $passwordLength }} + {{- $password = printf "%s%s" $subStr $password | toString | shuffle | b64enc | quote }} + {{- else }} + {{- $password = randAlphaNum $passwordLength | b64enc | quote }} + {{- end }} +{{- end -}} +{{- printf "%s" $password -}} +{{- end -}} + +{{/* +Returns whether a previous generated secret already exists + +Usage: +{{ include "common.secrets.exists" (dict "secret" "secret-name" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - context - Context - Required - Parent context. +*/}} +{{- define "common.secrets.exists" -}} +{{- $secret := (lookup "v1" "Secret" $.context.Release.Namespace .secret) }} +{{- if $secret }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_storage.tpl b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_storage.tpl new file mode 100644 index 0000000..60e2a84 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_storage.tpl @@ -0,0 +1,23 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper Storage Class +{{ include "common.storage.class" ( dict "persistence" .Values.path.to.the.persistence "global" $) }} +*/}} +{{- define "common.storage.class" -}} + +{{- $storageClass := .persistence.storageClass -}} +{{- if .global -}} + {{- if .global.storageClass -}} + {{- $storageClass = .global.storageClass -}} + {{- end -}} +{{- end -}} + +{{- if $storageClass -}} + {{- if (eq "-" $storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" $storageClass -}} + {{- end -}} +{{- end -}} + +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_tplvalues.tpl b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_tplvalues.tpl new file mode 100644 index 0000000..2db1668 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_tplvalues.tpl @@ -0,0 +1,13 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Renders a value that contains template. +Usage: +{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $) }} +*/}} +{{- define "common.tplvalues.render" -}} + {{- if typeIs "string" .value }} + {{- tpl .value .context }} + {{- else }} + {{- tpl (.value | toYaml) .context }} + {{- end }} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_utils.tpl b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_utils.tpl new file mode 100644 index 0000000..ea083a2 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_utils.tpl @@ -0,0 +1,62 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Print instructions to get a secret value. +Usage: +{{ include "common.utils.secret.getvalue" (dict "secret" "secret-name" "field" "secret-value-field" "context" $) }} +*/}} +{{- define "common.utils.secret.getvalue" -}} +{{- $varname := include "common.utils.fieldToEnvVar" . -}} +export {{ $varname }}=$(kubectl get secret --namespace {{ .context.Release.Namespace | quote }} {{ .secret }} -o jsonpath="{.data.{{ .field }}}" | base64 --decode) +{{- end -}} + +{{/* +Build env var name given a field +Usage: +{{ include "common.utils.fieldToEnvVar" dict "field" "my-password" }} +*/}} +{{- define "common.utils.fieldToEnvVar" -}} + {{- $fieldNameSplit := splitList "-" .field -}} + {{- $upperCaseFieldNameSplit := list -}} + + {{- range $fieldNameSplit -}} + {{- $upperCaseFieldNameSplit = append $upperCaseFieldNameSplit ( upper . ) -}} + {{- end -}} + + {{ join "_" $upperCaseFieldNameSplit }} +{{- end -}} + +{{/* +Gets a value from .Values given +Usage: +{{ include "common.utils.getValueFromKey" (dict "key" "path.to.key" "context" $) }} +*/}} +{{- define "common.utils.getValueFromKey" -}} +{{- $splitKey := splitList "." .key -}} +{{- $value := "" -}} +{{- $latestObj := $.context.Values -}} +{{- range $splitKey -}} + {{- if not $latestObj -}} + {{- printf "please review the entire path of '%s' exists in values" $.key | fail -}} + {{- end -}} + {{- $value = ( index $latestObj . ) -}} + {{- $latestObj = $value -}} +{{- end -}} +{{- printf "%v" (default "" $value) -}} +{{- end -}} + +{{/* +Returns first .Values key with a defined value or first of the list if all non-defined +Usage: +{{ include "common.utils.getKeyFromList" (dict "keys" (list "path.to.key1" "path.to.key2") "context" $) }} +*/}} +{{- define "common.utils.getKeyFromList" -}} +{{- $key := first .keys -}} +{{- $reverseKeys := reverse .keys }} +{{- range $reverseKeys }} + {{- $value := include "common.utils.getValueFromKey" (dict "key" . "context" $.context ) }} + {{- if $value -}} + {{- $key = . }} + {{- end -}} +{{- end -}} +{{- printf "%s" $key -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_warnings.tpl b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_warnings.tpl new file mode 100644 index 0000000..ae10fa4 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_warnings.tpl @@ -0,0 +1,14 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Warning about using rolling tag. +Usage: +{{ include "common.warnings.rollingTag" .Values.path.to.the.imageRoot }} +*/}} +{{- define "common.warnings.rollingTag" -}} + +{{- if and (contains "bitnami/" .repository) (not (.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .repository }}:{{ .tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} + +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_cassandra.tpl b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_cassandra.tpl new file mode 100644 index 0000000..8679ddf --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_cassandra.tpl @@ -0,0 +1,72 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Cassandra required passwords are not empty. + +Usage: +{{ include "common.validations.values.cassandra.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where Cassandra values are stored, e.g: "cassandra-passwords-secret" + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.cassandra.passwords" -}} + {{- $existingSecret := include "common.cassandra.values.existingSecret" . -}} + {{- $enabled := include "common.cassandra.values.enabled" . -}} + {{- $dbUserPrefix := include "common.cassandra.values.key.dbUser" . -}} + {{- $valueKeyPassword := printf "%s.password" $dbUserPrefix -}} + + {{- if and (not $existingSecret) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "cassandra-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.cassandra.values.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.cassandra.dbUser.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.dbUser.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled cassandra. + +Usage: +{{ include "common.cassandra.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.cassandra.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.cassandra.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key dbUser + +Usage: +{{ include "common.cassandra.values.key.dbUser" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.key.dbUser" -}} + {{- if .subchart -}} + cassandra.dbUser + {{- else -}} + dbUser + {{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_mariadb.tpl b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_mariadb.tpl new file mode 100644 index 0000000..bb5ed72 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_mariadb.tpl @@ -0,0 +1,103 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MariaDB required passwords are not empty. + +Usage: +{{ include "common.validations.values.mariadb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MariaDB values are stored, e.g: "mysql-passwords-secret" + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mariadb.passwords" -}} + {{- $existingSecret := include "common.mariadb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mariadb.values.enabled" . -}} + {{- $architecture := include "common.mariadb.values.architecture" . -}} + {{- $authPrefix := include "common.mariadb.values.key.auth" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}} + + {{- if and (not $existingSecret) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mariadb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- if not (empty $valueUsername) -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mariadb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replication") -}} + {{- $requiredReplicationPassword := dict "valueKey" $valueKeyReplicationPassword "secret" .secret "field" "mariadb-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mariadb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mariadb. + +Usage: +{{ include "common.mariadb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mariadb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mariadb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mariadb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mariadb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.key.auth" -}} + {{- if .subchart -}} + mariadb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_mongodb.tpl b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_mongodb.tpl new file mode 100644 index 0000000..1e5bba9 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_mongodb.tpl @@ -0,0 +1,108 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MongoDB® required passwords are not empty. + +Usage: +{{ include "common.validations.values.mongodb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MongoDB® values are stored, e.g: "mongodb-passwords-secret" + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mongodb.passwords" -}} + {{- $existingSecret := include "common.mongodb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mongodb.values.enabled" . -}} + {{- $authPrefix := include "common.mongodb.values.key.auth" . -}} + {{- $architecture := include "common.mongodb.values.architecture" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyDatabase := printf "%s.database" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicaSetKey := printf "%s.replicaSetKey" $authPrefix -}} + {{- $valueKeyAuthEnabled := printf "%s.enabled" $authPrefix -}} + + {{- $authEnabled := include "common.utils.getValueFromKey" (dict "key" $valueKeyAuthEnabled "context" .context) -}} + + {{- if and (not $existingSecret) (eq $enabled "true") (eq $authEnabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mongodb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- $valueDatabase := include "common.utils.getValueFromKey" (dict "key" $valueKeyDatabase "context" .context) }} + {{- if and $valueUsername $valueDatabase -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mongodb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replicaset") -}} + {{- $requiredReplicaSetKey := dict "valueKey" $valueKeyReplicaSetKey "secret" .secret "field" "mongodb-replica-set-key" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicaSetKey -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mongodb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDb is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mongodb. + +Usage: +{{ include "common.mongodb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mongodb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mongodb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mongodb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.key.auth" -}} + {{- if .subchart -}} + mongodb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mongodb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_postgresql.tpl b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_postgresql.tpl new file mode 100644 index 0000000..992bcd3 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_postgresql.tpl @@ -0,0 +1,131 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate PostgreSQL required passwords are not empty. + +Usage: +{{ include "common.validations.values.postgresql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where postgresql values are stored, e.g: "postgresql-passwords-secret" + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.postgresql.passwords" -}} + {{- $existingSecret := include "common.postgresql.values.existingSecret" . -}} + {{- $enabled := include "common.postgresql.values.enabled" . -}} + {{- $valueKeyPostgresqlPassword := include "common.postgresql.values.key.postgressPassword" . -}} + {{- $valueKeyPostgresqlReplicationEnabled := include "common.postgresql.values.key.replicationPassword" . -}} + + {{- if and (not $existingSecret) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredPostgresqlPassword := dict "valueKey" $valueKeyPostgresqlPassword "secret" .secret "field" "postgresql-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlPassword -}} + + {{- $enabledReplication := include "common.postgresql.values.enabled.replication" . -}} + {{- if (eq $enabledReplication "true") -}} + {{- $requiredPostgresqlReplicationPassword := dict "valueKey" $valueKeyPostgresqlReplicationEnabled "secret" .secret "field" "postgresql-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to decide whether evaluate global values. + +Usage: +{{ include "common.postgresql.values.use.global" (dict "key" "key-of-global" "context" $) }} +Params: + - key - String - Required. Field to be evaluated within global, e.g: "existingSecret" +*/}} +{{- define "common.postgresql.values.use.global" -}} + {{- if .context.Values.global -}} + {{- if .context.Values.global.postgresql -}} + {{- index .context.Values.global.postgresql .key | quote -}} + {{- end -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.postgresql.values.existingSecret" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.existingSecret" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "existingSecret" "context" .context) -}} + + {{- if .subchart -}} + {{- default (.context.Values.postgresql.existingSecret | quote) $globalValue -}} + {{- else -}} + {{- default (.context.Values.existingSecret | quote) $globalValue -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled postgresql. + +Usage: +{{ include "common.postgresql.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key postgressPassword. + +Usage: +{{ include "common.postgresql.values.key.postgressPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.postgressPassword" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "postgresqlUsername" "context" .context) -}} + + {{- if not $globalValue -}} + {{- if .subchart -}} + postgresql.postgresqlPassword + {{- else -}} + postgresqlPassword + {{- end -}} + {{- else -}} + global.postgresql.postgresqlPassword + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled.replication. + +Usage: +{{ include "common.postgresql.values.enabled.replication" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.enabled.replication" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.replication.enabled -}} + {{- else -}} + {{- printf "%v" .context.Values.replication.enabled -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key replication.password. + +Usage: +{{ include "common.postgresql.values.key.replicationPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.replicationPassword" -}} + {{- if .subchart -}} + postgresql.replication.password + {{- else -}} + replication.password + {{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_redis.tpl b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_redis.tpl new file mode 100644 index 0000000..18d9813 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_redis.tpl @@ -0,0 +1,76 @@ + +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Redis™ required passwords are not empty. + +Usage: +{{ include "common.validations.values.redis.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where redis values are stored, e.g: "redis-passwords-secret" + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.redis.passwords" -}} + {{- $enabled := include "common.redis.values.enabled" . -}} + {{- $valueKeyPrefix := include "common.redis.values.keys.prefix" . -}} + {{- $standarizedVersion := include "common.redis.values.standarized.version" . }} + + {{- $existingSecret := ternary (printf "%s%s" $valueKeyPrefix "auth.existingSecret") (printf "%s%s" $valueKeyPrefix "existingSecret") (eq $standarizedVersion "true") }} + {{- $existingSecretValue := include "common.utils.getValueFromKey" (dict "key" $existingSecret "context" .context) }} + + {{- $valueKeyRedisPassword := ternary (printf "%s%s" $valueKeyPrefix "auth.password") (printf "%s%s" $valueKeyPrefix "password") (eq $standarizedVersion "true") }} + {{- $valueKeyRedisUseAuth := ternary (printf "%s%s" $valueKeyPrefix "auth.enabled") (printf "%s%s" $valueKeyPrefix "usePassword") (eq $standarizedVersion "true") }} + + {{- if and (not $existingSecretValue) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $useAuth := include "common.utils.getValueFromKey" (dict "key" $valueKeyRedisUseAuth "context" .context) -}} + {{- if eq $useAuth "true" -}} + {{- $requiredRedisPassword := dict "valueKey" $valueKeyRedisPassword "secret" .secret "field" "redis-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRedisPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled redis. + +Usage: +{{ include "common.redis.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.redis.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.redis.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right prefix path for the values + +Usage: +{{ include "common.redis.values.key.prefix" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.redis.values.keys.prefix" -}} + {{- if .subchart -}}redis.{{- else -}}{{- end -}} +{{- end -}} + +{{/* +Checks whether the redis chart's includes the standarizations (version >= 14) + +Usage: +{{ include "common.redis.values.standarized.version" (dict "context" $) }} +*/}} +{{- define "common.redis.values.standarized.version" -}} + + {{- $standarizedAuth := printf "%s%s" (include "common.redis.values.keys.prefix" .) "auth" -}} + {{- $standarizedAuthValues := include "common.utils.getValueFromKey" (dict "key" $standarizedAuth "context" .context) }} + + {{- if $standarizedAuthValues -}} + {{- true -}} + {{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_validations.tpl b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_validations.tpl new file mode 100644 index 0000000..9a814cf --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_validations.tpl @@ -0,0 +1,46 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate values must not be empty. + +Usage: +{{- $validateValueConf00 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-01") -}} +{{ include "common.validations.values.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" +*/}} +{{- define "common.validations.values.multiple.empty" -}} + {{- range .required -}} + {{- include "common.validations.values.single.empty" (dict "valueKey" .valueKey "secret" .secret "field" .field "context" $.context) -}} + {{- end -}} +{{- end -}} + +{{/* +Validate a value must not be empty. + +Usage: +{{ include "common.validations.value.empty" (dict "valueKey" "mariadb.password" "secret" "secretName" "field" "my-password" "subchart" "subchart" "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" + - subchart - String - Optional - Name of the subchart that the validated password is part of. +*/}} +{{- define "common.validations.values.single.empty" -}} + {{- $value := include "common.utils.getValueFromKey" (dict "key" .valueKey "context" .context) }} + {{- $subchart := ternary "" (printf "%s." .subchart) (empty .subchart) }} + + {{- if not $value -}} + {{- $varname := "my-value" -}} + {{- $getCurrentValue := "" -}} + {{- if and .secret .field -}} + {{- $varname = include "common.utils.fieldToEnvVar" . -}} + {{- $getCurrentValue = printf " To get the current value:\n\n %s\n" (include "common.utils.secret.getvalue" .) -}} + {{- end -}} + {{- printf "\n '%s' must not be empty, please add '--set %s%s=$%s' to the command.%s" .valueKey $subchart .valueKey $varname $getCurrentValue -}} + {{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/values.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/values.yaml new file mode 100644 index 0000000..f2df68e --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/values.yaml @@ -0,0 +1,5 @@ +## bitnami/common +## It is required by CI/CD tools and processes. +## @skip exampleValue +## +exampleValue: common-chart diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/ci/default-values.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/ci/default-values.yaml new file mode 100644 index 0000000..fc2ba60 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/ci/default-values.yaml @@ -0,0 +1 @@ +# Leave this file empty to ensure that CI runs builds against the default configuration in values.yaml. diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/ci/tolerations-values.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/ci/tolerations-values.yaml new file mode 100644 index 0000000..de92d88 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/ci/tolerations-values.yaml @@ -0,0 +1,4 @@ +tolerations: + - key: foo + operator: "Equal" + value: bar diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/NOTES.txt b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/NOTES.txt new file mode 100644 index 0000000..24ffa89 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/NOTES.txt @@ -0,0 +1,167 @@ +{{- $servicePort := or (.Values.service.portEnabled) (not .Values.auth.tls.enabled) | ternary .Values.service.port .Values.service.tlsPort -}} +{{- $serviceNodePort := or (.Values.service.portEnabled) (not .Values.auth.tls.enabled) | ternary .Values.service.nodePort .Values.service.tlsNodePort -}} +** Please be patient while the chart is being deployed ** + +{{- if .Values.diagnosticMode.enabled }} +The chart has been deployed in diagnostic mode. All probes have been disabled and the command has been overwritten with: + + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 4 }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 4 }} + +Get the list of pods by executing: + + kubectl get pods --namespace {{ .Release.Namespace }} -l app.kubernetes.io/instance={{ .Release.Name }} + +Access the pod you want to debug by executing + + kubectl exec --namespace {{ .Release.Namespace }} -ti -- bash + +In order to replicate the container startup scripts execute this command: + + /opt/bitnami/scripts/rabbitmq/entrypoint.sh /opt/bitnami/scripts/rabbitmq/run.sh + +{{- else }} + +Credentials: + +{{- if not .Values.loadDefinition.enabled }} + echo "Username : {{ .Values.auth.username }}" + echo "Password : $(kubectl get secret --namespace {{ .Release.Namespace }} {{ include "rabbitmq.secretPasswordName" . }} -o jsonpath="{.data.rabbitmq-password}" | base64 --decode)" +{{- end }} + echo "ErLang Cookie : $(kubectl get secret --namespace {{ .Release.Namespace }} {{ include "rabbitmq.secretErlangName" . }} -o jsonpath="{.data.rabbitmq-erlang-cookie}" | base64 --decode)" + +Note that the credentials are saved in persistent volume claims and will not be changed upon upgrade or reinstallation unless the persistent volume claim has been deleted. If this is not the first installation of this chart, the credentials may not be valid. +This is applicable when no passwords are set and therefore the random password is autogenerated. In case of using a fixed password, you should specify it when upgrading. +More information about the credentials may be found at https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues/#credential-errors-while-upgrading-chart-releases. + +RabbitMQ can be accessed within the cluster on port {{ $serviceNodePort }} at {{ include "rabbitmq.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clustering.k8s_domain }} + +To access for outside the cluster, perform the following steps: + +{{- if .Values.ingress.enabled }} +{{- if contains "NodePort" .Values.service.type }} + +To Access the RabbitMQ AMQP port: + +1. Obtain the NodePort IP and ports: + + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT_AMQP=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[1].nodePort}" services {{ include "rabbitmq.fullname" . }}) + echo "URL : amqp://$NODE_IP:$NODE_PORT_AMQP/" + +{{- else if contains "LoadBalancer" .Values.service.type }} + +To Access the RabbitMQ AMQP port: + +1. Obtain the LoadBalancer IP: + +NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ include "rabbitmq.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "rabbitmq.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + echo "URL : amqp://$SERVICE_IP:{{ $servicePort }}/" + +{{- else if contains "ClusterIP" .Values.service.type }} + +To Access the RabbitMQ AMQP port: + +1. Create a port-forward to the AMQP port: + + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ include "rabbitmq.fullname" . }} {{ $servicePort }}:{{ $servicePort }} & + echo "URL : amqp://127.0.0.1:{{ $servicePort }}/" + +{{- end }} + +2. Access RabbitMQ using using the obtained URL. + +To Access the RabbitMQ Management interface: + +1. Get the RabbitMQ Management URL and associate its hostname to your cluster external IP: + + export CLUSTER_IP=$(minikube ip) # On Minikube. Use: `kubectl cluster-info` on others K8s clusters + echo "RabbitMQ Management: http{{ if .Values.ingress.tls }}s{{ end }}://{{ .Values.ingress.hostname }}/" + echo "$CLUSTER_IP {{ .Values.ingress.hostname }}" | sudo tee -a /etc/hosts + +2. Open a browser and access RabbitMQ Management using the obtained URL. + +{{- else }} +{{- if contains "NodePort" .Values.service.type }} + +Obtain the NodePort IP and ports: + + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT_AMQP=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[1].nodePort}" services {{ include "rabbitmq.fullname" . }}) + export NODE_PORT_STATS=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[3].nodePort}" services {{ include "rabbitmq.fullname" . }}) + +To Access the RabbitMQ AMQP port: + + echo "URL : amqp://$NODE_IP:$NODE_PORT_AMQP/" + +To Access the RabbitMQ Management interface: + + echo "URL : http://$NODE_IP:$NODE_PORT_STATS/" + +{{- else if contains "LoadBalancer" .Values.service.type }} + +Obtain the LoadBalancer IP: + +NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ include "rabbitmq.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "rabbitmq.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + +To Access the RabbitMQ AMQP port: + + echo "URL : amqp://$SERVICE_IP:{{ $servicePort }}/" + +To Access the RabbitMQ Management interface: + + echo "URL : http://$SERVICE_IP:{{ .Values.service.managerPort }}/" + +{{- else if contains "ClusterIP" .Values.service.type }} + +To Access the RabbitMQ AMQP port: + + echo "URL : amqp://127.0.0.1:{{ $servicePort }}/" + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ include "rabbitmq.fullname" . }} {{ $servicePort }}:{{ $servicePort }} + +To Access the RabbitMQ Management interface: + + echo "URL : http://127.0.0.1:{{ .Values.service.managerPort }}/" + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ include "rabbitmq.fullname" . }} {{ .Values.service.managerPort }}:{{ .Values.service.managerPort }} + +{{- end }} +{{- end }} + +{{- if .Values.metrics.enabled }} + +To access the RabbitMQ Prometheus metrics, get the RabbitMQ Prometheus URL by running: + + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ include "rabbitmq.fullname" . }} {{ .Values.service.metricsPort }}:{{ .Values.service.metricsPort }} & + echo "Prometheus Metrics URL: http://127.0.0.1:{{ .Values.service.metricsPort }}/metrics" + +Then, open the obtained URL in a browser. + +{{- end }} + +{{- include "common.warnings.rollingTag" .Values.image }} +{{- include "rabbitmq.validateValues" . -}} + +{{- $requiredPassword := list -}} +{{- $secretNameRabbitmq := include "rabbitmq.secretPasswordName" . -}} + +{{- if and (not .Values.auth.existingPasswordSecret) (not .Values.loadDefinition.enabled) -}} + {{- $requiredRabbitmqPassword := dict "valueKey" "auth.password" "secret" $secretNameRabbitmq "field" "rabbitmq-password" -}} + {{- $requiredPassword = append $requiredPassword $requiredRabbitmqPassword -}} +{{- end -}} + +{{- if not .Values.auth.existingErlangSecret -}} + {{- $requiredErlangPassword := dict "valueKey" "auth.erlangCookie" "secret" $secretNameRabbitmq "field" "rabbitmq-erlang-cookie" -}} + {{- $requiredPassword = append $requiredPassword $requiredErlangPassword -}} +{{- end -}} + +{{- $requiredRabbitmqPasswordErrors := include "common.validations.values.multiple.empty" (dict "required" $requiredPassword "context" $) -}} + +{{- include "common.errors.upgrade.passwords.empty" (dict "validationErrors" (list $requiredRabbitmqPasswordErrors) "context" $) -}} + +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/_helpers.tpl b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/_helpers.tpl new file mode 100644 index 0000000..6b46b23 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/_helpers.tpl @@ -0,0 +1,247 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "rabbitmq.name" -}} +{{- include "common.names.name" . -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "rabbitmq.fullname" -}} +{{- include "common.names.fullname" . -}} +{{- end -}} + +{{/* +Return the proper RabbitMQ image name +*/}} +{{- define "rabbitmq.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper image name (for the init container volume-permissions image) +*/}} +{{- define "rabbitmq.volumePermissions.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.volumePermissions.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "rabbitmq.imagePullSecrets" -}} +{{ include "common.images.pullSecrets" (dict "images" (list .Values.image .Values.volumePermissions.image) "global" .Values.global) }} +{{- end -}} + +{{/* +Return podAnnotations +*/}} +{{- define "rabbitmq.podAnnotations" -}} +{{- if .Values.podAnnotations }} +{{ include "common.tplvalues.render" (dict "value" .Values.podAnnotations "context" $) }} +{{- end }} +{{- if and .Values.metrics.enabled .Values.metrics.podAnnotations }} +{{ include "common.tplvalues.render" (dict "value" .Values.metrics.podAnnotations "context" $) }} +{{- end }} +{{- end -}} + +{{/* + Create the name of the service account to use + */}} +{{- define "rabbitmq.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "rabbitmq.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Get the password secret. +*/}} +{{- define "rabbitmq.secretPasswordName" -}} + {{- if .Values.auth.existingPasswordSecret -}} + {{- printf "%s" (tpl .Values.auth.existingPasswordSecret $) -}} + {{- else -}} + {{- printf "%s" (include "rabbitmq.fullname" .) -}} + {{- end -}} +{{- end -}} + +{{/* +Get the erlang secret. +*/}} +{{- define "rabbitmq.secretErlangName" -}} + {{- if .Values.auth.existingErlangSecret -}} + {{- printf "%s" (tpl .Values.auth.existingErlangSecret $) -}} + {{- else -}} + {{- printf "%s" (include "rabbitmq.fullname" .) -}} + {{- end -}} +{{- end -}} + +{{/* +Get the TLS secret. +*/}} +{{- define "rabbitmq.tlsSecretName" -}} + {{- if .Values.auth.tls.existingSecret -}} + {{- printf "%s" (tpl .Values.auth.tls.existingSecret $) -}} + {{- else -}} + {{- printf "%s-certs" (include "rabbitmq.fullname" .) -}} + {{- end -}} +{{- end -}} + +{{/* +Return true if a TLS credentials secret object should be created +*/}} +{{- define "rabbitmq.createTlsSecret" -}} +{{- if and .Values.auth.tls.enabled (not .Values.auth.tls.existingSecret) }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper RabbitMQ plugin list +*/}} +{{- define "rabbitmq.plugins" -}} +{{- $plugins := .Values.plugins -}} +{{- if .Values.extraPlugins -}} +{{- $plugins = printf "%s %s" $plugins .Values.extraPlugins -}} +{{- end -}} +{{- if .Values.metrics.enabled -}} +{{- $plugins = printf "%s %s" $plugins .Values.metrics.plugins -}} +{{- end -}} +{{- printf "%s" $plugins | replace " " ", " -}} +{{- end -}} + +{{/* +Return the number of bytes given a value +following a base 2 o base 10 number system. +Usage: +{{ include "rabbitmq.toBytes" .Values.path.to.the.Value }} +*/}} +{{- define "rabbitmq.toBytes" -}} +{{- $value := int (regexReplaceAll "([0-9]+).*" . "${1}") }} +{{- $unit := regexReplaceAll "[0-9]+(.*)" . "${1}" }} +{{- if eq $unit "Ki" }} + {{- mul $value 1024 }} +{{- else if eq $unit "Mi" }} + {{- mul $value 1024 1024 }} +{{- else if eq $unit "Gi" }} + {{- mul $value 1024 1024 1024 }} +{{- else if eq $unit "Ti" }} + {{- mul $value 1024 1024 1024 1024 }} +{{- else if eq $unit "Pi" }} + {{- mul $value 1024 1024 1024 1024 1024 }} +{{- else if eq $unit "Ei" }} + {{- mul $value 1024 1024 1024 1024 1024 1024 }} +{{- else if eq $unit "K" }} + {{- mul $value 1000 }} +{{- else if eq $unit "M" }} + {{- mul $value 1000 1000 }} +{{- else if eq $unit "G" }} + {{- mul $value 1000 1000 1000 }} +{{- else if eq $unit "T" }} + {{- mul $value 1000 1000 1000 1000 }} +{{- else if eq $unit "P" }} + {{- mul $value 1000 1000 1000 1000 1000 }} +{{- else if eq $unit "E" }} + {{- mul $value 1000 1000 1000 1000 1000 1000 }} +{{- end }} +{{- end -}} + +{{/* +Compile all warnings into a single message, and call fail. +*/}} +{{- define "rabbitmq.validateValues" -}} +{{- $messages := list -}} +{{- $messages := append $messages (include "rabbitmq.validateValues.ldap" .) -}} +{{- $messages := append $messages (include "rabbitmq.validateValues.memoryHighWatermark" .) -}} +{{- $messages := append $messages (include "rabbitmq.validateValues.ingress.tls" .) -}} +{{- $messages := append $messages (include "rabbitmq.validateValues.auth.tls" .) -}} +{{- $messages := without $messages "" -}} +{{- $message := join "\n" $messages -}} + +{{- if $message -}} +{{- printf "\nVALUES VALIDATION:\n%s" $message | fail -}} +{{- end -}} +{{- end -}} + +{{/* +Validate values of rabbitmq - LDAP support +*/}} +{{- define "rabbitmq.validateValues.ldap" -}} +{{- if .Values.ldap.enabled }} +{{- $serversListLength := len .Values.ldap.servers }} +{{- if or (not (gt $serversListLength 0)) (not (and .Values.ldap.port .Values.ldap.user_dn_pattern)) }} +rabbitmq: LDAP + Invalid LDAP configuration. When enabling LDAP support, the parameters "ldap.servers", + "ldap.port", and "ldap. user_dn_pattern" are mandatory. Please provide them: + + $ helm install {{ .Release.Name }} bitnami/rabbitmq \ + --set ldap.enabled=true \ + --set ldap.servers[0]="lmy-ldap-server" \ + --set ldap.port="389" \ + --set user_dn_pattern="cn=${username},dc=example,dc=org" +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Validate values of rabbitmq - Memory high watermark +*/}} +{{- define "rabbitmq.validateValues.memoryHighWatermark" -}} +{{- if and (not (eq .Values.memoryHighWatermark.type "absolute")) (not (eq .Values.memoryHighWatermark.type "relative")) }} +rabbitmq: memoryHighWatermark.type + Invalid Memory high watermark type. Valid values are "absolute" and + "relative". Please set a valid mode (--set memoryHighWatermark.type="xxxx") +{{- else if and .Values.memoryHighWatermark.enabled (not .Values.resources.limits.memory) (eq .Values.memoryHighWatermark.type "relative") }} +rabbitmq: memoryHighWatermark + You enabled configuring memory high watermark using a relative limit. However, + no memory limits were defined at POD level. Define your POD limits as shown below: + + $ helm install {{ .Release.Name }} bitnami/rabbitmq \ + --set memoryHighWatermark.enabled=true \ + --set memoryHighWatermark.type="relative" \ + --set memoryHighWatermark.value="0.4" \ + --set resources.limits.memory="2Gi" + + Altenatively, user an absolute value for the memory memory high watermark : + + $ helm install {{ .Release.Name }} bitnami/rabbitmq \ + --set memoryHighWatermark.enabled=true \ + --set memoryHighWatermark.type="absolute" \ + --set memoryHighWatermark.value="512MB" +{{- end -}} +{{- end -}} + +{{/* +Validate values of rabbitmq - TLS configuration for Ingress +*/}} +{{- define "rabbitmq.validateValues.ingress.tls" -}} +{{- if and .Values.ingress.enabled .Values.ingress.tls (not .Values.ingress.certManager) (not .Values.ingress.selfSigned) (empty .Values.ingress.extraTls) }} +rabbitmq: ingress.tls + You enabled the TLS configuration for the default ingress hostname but + you did not enable any of the available mechanisms to create the TLS secret + to be used by the Ingress Controller. + Please use any of these alternatives: + - Use the `ingress.extraTls` and `ingress.secrets` parameters to provide your custom TLS certificates. + - Relay on cert-manager to create it by setting `ingress.certManager=true` + - Relay on Helm to create self-signed certificates by setting `ingress.selfSigned=true` +{{- end -}} +{{- end -}} + +{{/* +Validate values of RabbitMQ - Auth TLS enabled +*/}} +{{- define "rabbitmq.validateValues.auth.tls" -}} +{{- if and .Values.auth.tls.enabled (not .Values.auth.tls.autoGenerated) (not .Values.auth.tls.existingSecret) (not .Values.auth.tls.caCertificate) (not .Values.auth.tls.serverCertificate) (not .Values.auth.tls.serverKey) }} +rabbitmq: auth.tls + You enabled TLS for RabbitMQ but you did not enable any of the available mechanisms to create the TLS secret. + Please use any of these alternatives: + - Provide an existing secret containing the TLS certificates using `auth.tls.existingSecret` + - Provide the plain text certificates using `auth.tls.caCertificate`, `auth.tls.serverCertificate` and `auth.tls.serverKey`. + - Enable auto-generated certificates using `auth.tls.autoGenerated`. +{{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/configuration.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/configuration.yaml new file mode 100644 index 0000000..5ba6b72 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/configuration.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "rabbitmq.fullname" . }}-config + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + rabbitmq.conf: |- + {{- include "common.tplvalues.render" (dict "value" .Values.configuration "context" $) | nindent 4 }} + {{- if .Values.advancedConfiguration}} + advanced.config: |- + {{- include "common.tplvalues.render" (dict "value" .Values.advancedConfiguration "context" $) | nindent 4 }} + {{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/extra-list.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/extra-list.yaml new file mode 100644 index 0000000..9ac65f9 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/extra-list.yaml @@ -0,0 +1,4 @@ +{{- range .Values.extraDeploy }} +--- +{{ include "common.tplvalues.render" (dict "value" . "context" $) }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/ingress.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/ingress.yaml new file mode 100644 index 0000000..db74e50 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/ingress.yaml @@ -0,0 +1,57 @@ +{{- if .Values.ingress.enabled }} +apiVersion: {{ include "common.capabilities.ingress.apiVersion" . }} +kind: Ingress +metadata: + name: {{ include "rabbitmq.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + annotations: + {{- if .Values.ingress.certManager }} + kubernetes.io/tls-acme: "true" + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.ingress.annotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.ingress.annotations "context" $) | nindent 4 }} + {{- end }} +spec: + {{- if and .Values.ingress.ingressClassName (eq "true" (include "common.ingress.supportsIngressClassname" .)) }} + ingressClassName: {{ .Values.ingress.ingressClassName | quote }} + {{- end }} + rules: + {{- if .Values.ingress.hostname }} + - host: {{ include "common.tplvalues.render" ( dict "value" .Values.ingress.hostname "context" $ ) }} + http: + paths: + {{- if .Values.ingress.extraPaths }} + {{- toYaml .Values.ingress.extraPaths | nindent 10 }} + {{- end }} + - path: {{ .Values.ingress.path }} + {{- if eq "true" (include "common.ingress.supportsPathType" .) }} + pathType: {{ .Values.ingress.pathType }} + {{- end }} + backend: {{- include "common.ingress.backend" (dict "serviceName" (include "common.names.fullname" .) "servicePort" .Values.service.managerPortName "context" $) | nindent 14 }} + {{- end }} + {{- range .Values.ingress.extraHosts }} + - host: {{ include "common.tplvalues.render" ( dict "value" .name "context" $ ) }} + http: + paths: + - path: {{ default "/" .path }} + {{- if eq "true" (include "common.ingress.supportsPathType" $) }} + pathType: {{ default "ImplementationSpecific" .pathType }} + {{- end }} + backend: {{- include "common.ingress.backend" (dict "serviceName" (include "common.names.fullname" $) "servicePort" "http-stats" "context" $) | nindent 14 }} + {{- end }} + {{- if or (and .Values.ingress.tls (or .Values.ingress.certManager .Values.ingress.selfSigned)) .Values.ingress.extraTls }} + tls: + {{- if and .Values.ingress.tls (or .Values.ingress.certManager .Values.ingress.selfSigned) }} + - hosts: + - {{ .Values.ingress.hostname | quote }} + secretName: {{ printf "%s-tls" .Values.ingress.hostname }} + {{- end }} + {{- if .Values.ingress.extraTls }} + {{- include "common.tplvalues.render" (dict "value" .Values.ingress.extraTls "context" $) | nindent 4 }} + {{- end }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/networkpolicy.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/networkpolicy.yaml new file mode 100644 index 0000000..158aeaa --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/networkpolicy.yaml @@ -0,0 +1,37 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + name: {{ include "rabbitmq.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + podSelector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + ingress: + # Allow inbound connections + - ports: + - port: 4369 # EPMD + - port: {{ .Values.service.port }} + - port: {{ .Values.service.tlsPort }} + - port: {{ .Values.service.distPort }} + - port: {{ .Values.service.managerPort }} + {{- if not .Values.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ template "rabbitmq.fullname" . }}-client: "true" + - podSelector: + matchLabels: + {{- include "common.labels.matchLabels" . | nindent 14 }} + {{- if .Values.networkPolicy.additionalRules }} + {{- include "common.tplvalues.render" (dict "value" .Values.networkPolicy.additionalRules "context" $) | nindent 8 }} + {{- end }} + {{- end }} + # Allow prometheus scrapes + - ports: + - port: {{ .Values.service.metricsPort }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/pdb.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/pdb.yaml new file mode 100644 index 0000000..bf06b66 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/pdb.yaml @@ -0,0 +1,20 @@ +{{- if .Values.pdb.create }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ include "rabbitmq.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.pdb.minAvailable }} + minAvailable: {{ .Values.pdb.minAvailable }} + {{- end }} + {{- if .Values.pdb.maxUnavailable }} + maxUnavailable: {{ .Values.pdb.maxUnavailable }} + {{- end }} + selector: + matchLabels: {{ include "common.labels.matchLabels" . | nindent 6 }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/prometheusrule.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/prometheusrule.yaml new file mode 100644 index 0000000..a1ba629 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/prometheusrule.yaml @@ -0,0 +1,24 @@ +{{- if and .Values.metrics.enabled .Values.metrics.prometheusRule.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ include "rabbitmq.fullname" . }} + {{- if .Values.metrics.prometheusRule.namespace }} + namespace: {{ .Values.metrics.prometheusRule.namespace }} + {{- else }} + namespace: {{ .Release.Namespace | quote }} + {{- end }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.metrics.prometheusRule.additionalLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.prometheusRule.additionalLabels "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + groups: + {{- with .Values.metrics.prometheusRule.rules }} + - name: {{ template "rabbitmq.name" $ }} + rules: {{- include "common.tplvalues.render" (dict "value" . "context" $) | nindent 8 }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/pv.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/pv.yaml new file mode 100644 index 0000000..d0f8bdd --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/pv.yaml @@ -0,0 +1,22 @@ +kind: PersistentVolume +apiVersion: v1 +metadata: + name: rabbitmq-pv + labels: + app: rabbitmq +spec: + storageClassName: rabbitmq + capacity: + storage: 5Gi + accessModes: + - ReadWriteMany + hostPath: + path: {{ .Values.global.RABBITMQ_PATH }} + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .Values.global.affinity_key }} + operator: In + values: + - {{ .Values.global.affinity_value1 }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/pvc.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/pvc.yaml new file mode 100644 index 0000000..c677752 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/pvc.yaml @@ -0,0 +1,15 @@ +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: rabbitmq-pvc + namespace: imxc + labels: + app: rabbitmq +spec: + storageClassName: rabbitmq + accessModes: + - ReadWriteMany + resources: + requests: + storage: 5Gi + diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/role.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/role.yaml new file mode 100644 index 0000000..9bd029e --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/role.yaml @@ -0,0 +1,18 @@ +{{- if .Values.rbac.create }} +kind: Role +apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }} +metadata: + name: {{ template "rabbitmq.fullname" . }}-endpoint-reader + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +rules: + - apiGroups: [""] + resources: ["endpoints"] + verbs: ["get"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create"] +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/rolebinding.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/rolebinding.yaml new file mode 100644 index 0000000..74f82f0 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/rolebinding.yaml @@ -0,0 +1,18 @@ +{{- if and .Values.serviceAccount.create .Values.rbac.create }} +kind: RoleBinding +apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }} +metadata: + name: {{ template "rabbitmq.fullname" . }}-endpoint-reader + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +subjects: + - kind: ServiceAccount + name: {{ template "rabbitmq.serviceAccountName" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ template "rabbitmq.fullname" . }}-endpoint-reader +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/secrets.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/secrets.yaml new file mode 100644 index 0000000..4d14e4e --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/secrets.yaml @@ -0,0 +1,43 @@ +{{- if or (not .Values.auth.existingErlangSecret) (not .Values.auth.existingPasswordSecret) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "rabbitmq.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + {{- if and (not .Values.auth.existingPasswordSecret) (not .Values.loadDefinition.enabled) }} + {{- if .Values.auth.password }} + rabbitmq-password: {{ .Values.auth.password | b64enc | quote }} + {{- else }} + rabbitmq-password: {{ randAlphaNum 10 | b64enc | quote }} + {{- end }} + {{- end }} + {{- if not .Values.auth.existingErlangSecret }} + {{- if .Values.auth.erlangCookie }} + rabbitmq-erlang-cookie: {{ .Values.auth.erlangCookie | b64enc | quote }} + {{- else }} + rabbitmq-erlang-cookie: {{ randAlphaNum 32 | b64enc | quote }} + {{- end }} + {{- end }} +{{- end }} +{{- $extraSecretsPrependReleaseName := .Values.extraSecretsPrependReleaseName }} +{{- range $key, $value := .Values.extraSecrets }} +--- +apiVersion: v1 +kind: Secret +metadata: + {{- if $extraSecretsPrependReleaseName }} + name: {{ $.Release.Name }}-{{ $key }} + {{- else }} + name: {{ $key }} + {{- end }} + namespace: {{ $.Release.Namespace | quote }} + labels: {{- include "common.labels.standard" $ | nindent 4 }} +type: Opaque +stringData: {{- include "common.tplvalues.render" (dict "value" $value "context" $) | nindent 2 }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/serviceaccount.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/serviceaccount.yaml new file mode 100644 index 0000000..562fde9 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/serviceaccount.yaml @@ -0,0 +1,14 @@ +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "rabbitmq.serviceAccountName" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +secrets: + - name: {{ include "rabbitmq.fullname" . }} +{{- end }} + diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/servicemonitor.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/servicemonitor.yaml new file mode 100644 index 0000000..46b9040 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/servicemonitor.yaml @@ -0,0 +1,49 @@ +{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "rabbitmq.fullname" . }} + {{- if .Values.metrics.serviceMonitor.namespace }} + namespace: {{ .Values.metrics.serviceMonitor.namespace }} + {{- else }} + namespace: {{ .Release.Namespace | quote }} + {{- end }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.metrics.serviceMonitor.additionalLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.serviceMonitor.additionalLabels "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + endpoints: + - port: metrics + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.honorLabels }} + honorLabels: {{ .Values.metrics.serviceMonitor.honorLabels }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.relabellings }} + metricRelabelings: {{- toYaml .Values.metrics.serviceMonitor.relabellings | nindent 6 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.path }} + path: {{ .Values.metrics.serviceMonitor.path }} + {{- end }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace | quote }} + {{- with .Values.metrics.serviceMonitor.podTargetLabels }} + podTargetLabels: + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.metrics.serviceMonitor.targetLabels }} + targetLabels: + {{- toYaml . | nindent 4 }} + {{- end }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/statefulset.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/statefulset.yaml new file mode 100644 index 0000000..45abd14 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/statefulset.yaml @@ -0,0 +1,382 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "rabbitmq.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.statefulsetLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.statefulsetLabels "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + serviceName: {{ template "rabbitmq.fullname" . }}-headless + podManagementPolicy: {{ .Values.podManagementPolicy }} + replicas: {{ .Values.replicaCount }} + updateStrategy: + type: {{ .Values.updateStrategyType }} + {{- if (eq "OnDelete" .Values.updateStrategyType) }} + rollingUpdate: null + {{- end }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + template: + metadata: + labels: {{- include "common.labels.standard" . | nindent 8 }} + {{- if .Values.podLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.podLabels "context" $) | nindent 8 }} + {{- end }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 8 }} + {{- end }} + checksum/config: {{ include (print $.Template.BasePath "/configuration.yaml") . | sha256sum }} + {{- if or (not .Values.auth.existingErlangSecret) (not .Values.auth.existingPasswordSecret) .Values.extraSecrets }} + checksum/secret: {{ include (print $.Template.BasePath "/secrets.yaml") . | sha256sum }} + {{- end }} + {{- if or .Values.podAnnotations .Values.metrics.enabled }} + {{- include "rabbitmq.podAnnotations" . | nindent 8 }} + {{- end }} + spec: + {{- include "rabbitmq.imagePullSecrets" . | nindent 6 }} + {{- if .Values.schedulerName }} + schedulerName: {{ .Values.schedulerName | quote }} + {{- end }} + serviceAccountName: {{ template "rabbitmq.serviceAccountName" . }} + {{- if .Values.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.affinity "context" .) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.podAffinityPreset "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.podAntiAffinityPreset "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.nodeAffinityPreset.type "key" .Values.nodeAffinityPreset.key "values" .Values.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.nodeSelector "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.tolerations "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.topologySpreadConstraints "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.priorityClassName }} + priorityClassName: {{ .Values.priorityClassName }} + {{- end }} + {{- if .Values.podSecurityContext.enabled }} + securityContext: {{- omit .Values.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} + {{- if or (.Values.initContainers) (and .Values.volumePermissions.enabled .Values.persistence.enabled .Values.podSecurityContext) }} + initContainers: + {{- if and .Values.volumePermissions.enabled .Values.persistence.enabled .Values.podSecurityContext }} + - name: volume-permissions + image: {{ include "rabbitmq.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: + - /bin/bash + args: + - -ec + - | + mkdir -p "/bitnami/rabbitmq/mnesia" + chown -R "{{ .Values.podSecurityContext.runAsUser }}:{{ .Values.podSecurityContext.fsGroup }}" "/bitnami/rabbitmq/mnesia" + securityContext: + runAsUser: 0 + {{- if .Values.volumePermissions.resources }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: data + mountPath: /bitnami/rabbitmq/mnesia + {{- end }} + {{- if .Values.initContainers }} + {{- include "common.tplvalues.render" (dict "value" .Values.initContainers "context" $) | nindent 8 }} + {{- end }} + {{- end }} + containers: + - name: rabbitmq + image: {{ template "rabbitmq.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext }} + securityContext: {{- toYaml .Values.containerSecurityContext | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.args "context" $) | nindent 12 }} + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} + - name: MY_POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: MY_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: MY_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: K8S_SERVICE_NAME + value: "{{ template "rabbitmq.fullname" . }}-headless" + - name: K8S_ADDRESS_TYPE + value: {{ .Values.clustering.addressType }} + - name: RABBITMQ_FORCE_BOOT + value: {{ ternary "yes" "no" .Values.clustering.forceBoot | quote }} + {{- if (eq "hostname" .Values.clustering.addressType) }} + - name: RABBITMQ_NODE_NAME + value: "rabbit@$(MY_POD_NAME).$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.{{ .Values.clusterDomain }}" + - name: K8S_HOSTNAME_SUFFIX + value: ".$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.{{ .Values.clusterDomain }}" + {{- else }} + - name: RABBITMQ_NODE_NAME + value: "rabbit@$(MY_POD_NAME)" + {{- end }} + - name: RABBITMQ_MNESIA_DIR + value: "/bitnami/rabbitmq/mnesia/$(RABBITMQ_NODE_NAME)" + - name: RABBITMQ_LDAP_ENABLE + value: {{ ternary "yes" "no" .Values.ldap.enabled | quote }} + {{- if .Values.ldap.enabled }} + - name: RABBITMQ_LDAP_TLS + value: {{ ternary "yes" "no" .Values.ldap.tls.enabled | quote }} + - name: RABBITMQ_LDAP_SERVERS + value: {{ .Values.ldap.servers | join "," | quote }} + - name: RABBITMQ_LDAP_SERVERS_PORT + value: {{ .Values.ldap.port | quote }} + - name: RABBITMQ_LDAP_USER_DN_PATTERN + value: {{ .Values.ldap.user_dn_pattern }} + {{- end }} + - name: RABBITMQ_LOGS + value: {{ .Values.logs | quote }} + - name: RABBITMQ_ULIMIT_NOFILES + value: {{ .Values.ulimitNofiles | quote }} + {{- if and .Values.maxAvailableSchedulers }} + - name: RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS + value: {{ printf "+S %s:%s" (toString .Values.maxAvailableSchedulers) (toString .Values.onlineSchedulers) -}} + {{- end }} + - name: RABBITMQ_USE_LONGNAME + value: "true" + - name: RABBITMQ_ERL_COOKIE + valueFrom: + secretKeyRef: + name: {{ template "rabbitmq.secretErlangName" . }} + key: rabbitmq-erlang-cookie + {{- if .Values.loadDefinition.enabled }} + - name: RABBITMQ_LOAD_DEFINITIONS + value: "yes" + - name: RABBITMQ_SECURE_PASSWORD + value: "no" + {{- else }} + - name: RABBITMQ_LOAD_DEFINITIONS + value: "no" + - name: RABBITMQ_SECURE_PASSWORD + value: "yes" + - name: RABBITMQ_USERNAME + value: {{ .Values.auth.username | quote }} + - name: RABBITMQ_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "rabbitmq.secretPasswordName" . }} + key: rabbitmq-password + {{- end }} + - name: RABBITMQ_PLUGINS + value: {{ include "rabbitmq.plugins" . | quote }} + {{- if .Values.communityPlugins }} + - name: RABBITMQ_COMMUNITY_PLUGINS + value: {{ .Values.communityPlugins | quote }} + {{- end }} + {{- if .Values.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if or .Values.extraEnvVarsCM .Values.extraEnvVarsSecret }} + envFrom: + {{- if .Values.extraEnvVarsCM }} + - configMapRef: + name: {{ tpl .Values.extraEnvVarsCM . | quote }} + {{- end }} + {{- if .Values.extraEnvVarsSecret }} + - secretRef: + name: {{ tpl .Values.extraEnvVarsSecret . | quote }} + {{- end }} + {{- end }} + ports: + {{- if or (.Values.service.portEnabled) (not .Values.auth.tls.enabled) }} + - name: amqp + containerPort: 5672 + {{- end }} + {{- if .Values.auth.tls.enabled }} + - name: amqp-ssl + containerPort: {{ .Values.service.tlsPort }} + {{- end }} + - name: dist + containerPort: 25672 + - name: stats + containerPort: 15672 + - name: epmd + containerPort: 4369 + {{- if .Values.metrics.enabled }} + - name: metrics + containerPort: 9419 + {{- end }} + {{- if .Values.extraContainerPorts }} + {{- toYaml .Values.extraContainerPorts | nindent 12 }} + {{- end }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.livenessProbe.enabled }} + - name: stomp + containerPort: 61613 + livenessProbe: + exec: + command: + - /bin/bash + - -ec + - rabbitmq-diagnostics -q ping + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- else if .Values.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customLivenessProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + exec: + command: + - /bin/bash + - -ec + - rabbitmq-diagnostics -q check_running && rabbitmq-diagnostics -q check_local_alarms + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- else if .Values.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customReadinessProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customStartupProbe "context" $) | nindent 12 }} + {{- end }} + lifecycle: + {{- if and .Values.clustering.rebalance (gt (.Values.replicaCount | int) 1) }} + postStart: + exec: + command: + - /bin/bash + - -ec + - | + until rabbitmqctl cluster_status >/dev/null; do + echo "Waiting for cluster readiness..." + sleep 5 + done + rabbitmq-queues rebalance "all" + {{- end }} + preStop: + exec: + command: + - /bin/bash + - -ec + - | + if [[ -f /opt/bitnami/scripts/rabbitmq/nodeshutdown.sh ]]; then + /opt/bitnami/scripts/rabbitmq/nodeshutdown.sh -t {{ .Values.terminationGracePeriodSeconds | quote }} -d {{ ternary "true" "false" .Values.image.debug | quote }} + else + rabbitmqctl stop_app + fi + {{- end }} + resources: + requests: + memory: "500Mi" + cpu: "150m" + volumeMounts: + - name: configuration + mountPath: /bitnami/rabbitmq/conf + - name: data + mountPath: /bitnami/rabbitmq/mnesia + {{- if .Values.auth.tls.enabled }} + - name: certs + mountPath: /opt/bitnami/rabbitmq/certs + {{- end }} + {{- if .Values.loadDefinition.enabled }} + - name: load-definition-volume + mountPath: /app + readOnly: true + {{- end }} + {{- if .Values.extraVolumeMounts }} + {{- toYaml .Values.extraVolumeMounts | nindent 12 }} + {{- end }} + {{- if .Values.sidecars }} + {{- include "common.tplvalues.render" (dict "value" .Values.sidecars "context" $) | nindent 8 }} + {{- end }} + volumes: + {{- if .Values.persistence.volumes }} + {{- toYaml .Values.persistence.volumes | nindent 8 }} + {{- end }} + {{- if .Values.auth.tls.enabled }} + - name: certs + secret: + secretName: {{ template "rabbitmq.tlsSecretName" . }} + items: + - key: {{ ternary "tls.crt" "ca.crt" .Values.auth.tls.existingSecretFullChain }} + path: ca_certificate.pem + - key: tls.crt + path: server_certificate.pem + - key: tls.key + path: server_key.pem + {{- end }} + - name: configuration + configMap: + name: {{ template "rabbitmq.fullname" . }}-config + items: + - key: rabbitmq.conf + path: rabbitmq.conf + {{- if .Values.advancedConfiguration}} + - key: advanced.config + path: advanced.config + {{- end }} + {{- if .Values.loadDefinition.enabled }} + - name: load-definition-volume + secret: + secretName: {{ tpl .Values.loadDefinition.existingSecret . | quote }} + {{- end }} + {{- if .Values.extraVolumes }} + {{- toYaml .Values.extraVolumes | nindent 8 }} + {{- end }} + {{- if not (contains "data" (quote .Values.persistence.volumes)) }} + {{- if not .Values.persistence.enabled }} + - name: data + emptyDir: {} + {{- else if .Values.persistence.existingClaim }} + - name: data + persistentVolumeClaim: + {{- with .Values.persistence.existingClaim }} + claimName: {{ tpl . $ }} + {{- end }} + {{- else }} + volumeClaimTemplates: + - metadata: + name: data + labels: {{- include "common.labels.matchLabels" . | nindent 10 }} + spec: + accessModes: + - {{ .Values.persistence.accessMode | quote }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{ include "common.storage.class" (dict "persistence" .Values.persistence "global" .Values.global) }} + {{- if .Values.persistence.selector }} + selector: {{- include "common.tplvalues.render" (dict "value" .Values.persistence.selector "context" $) | nindent 10 }} + {{- end -}} + {{- end }} + {{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/svc-headless.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/svc-headless.yaml new file mode 100644 index 0000000..4ed26cc --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/svc-headless.yaml @@ -0,0 +1,40 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "rabbitmq.fullname" . }}-headless + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if or (.Values.service.annotationsHeadless) (.Values.commonAnnotations) }} + annotations: + {{- if .Values.commonAnnotations}} + {{- include "common.tplvalues.render" (dict "value" .Values.commonAnnotations "context" $) | nindent 4 }} + {{- end -}} + {{- if .Values.service.annotationsHeadless}} + {{- include "common.tplvalues.render" (dict "value" .Values.service.annotationsHeadless "context" $) | nindent 4 }} + {{- end -}} + {{- end }} +spec: + clusterIP: None + ports: + - name: {{ .Values.service.epmdPortName }} + port: 4369 + targetPort: epmd + {{- if or (.Values.service.portEnabled) (not .Values.auth.tls.enabled) }} + - name: amqp + port: {{ .Values.service.port }} + targetPort: {{ .Values.service.portName }} + {{- end }} + {{- if .Values.auth.tls.enabled }} + - name: {{ .Values.service.tlsPortName }} + port: {{ .Values.service.tlsPort }} + targetPort: amqp-tls + {{- end }} + - name: {{ .Values.service.distPortName }} + port: {{ .Values.service.distPort }} + targetPort: dist + {{- if .Values.service.managerPortEnabled }} + - name: {{ .Values.service.managerPortName }} + port: {{ .Values.service.managerPort }} + targetPort: stats + {{- end }} + selector: {{ include "common.labels.matchLabels" . | nindent 4 }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/svc.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/svc.yaml new file mode 100644 index 0000000..2b4c224 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/svc.yaml @@ -0,0 +1,95 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "rabbitmq.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.service.labels }} + {{- include "common.tplvalues.render" (dict "value" .Values.service.labels "context" $) | nindent 4 }} + {{- end }} + {{- if or (.Values.service.annotations) (.Values.commonAnnotations) }} + annotations: + {{- if .Values.commonAnnotations}} + {{- include "common.tplvalues.render" (dict "value" .Values.commonAnnotations "context" $) | nindent 4 }} + {{- end -}} + {{- if .Values.service.annotations}} + {{- include "common.tplvalues.render" (dict "value" .Values.service.annotations "context" $) | nindent 4 }} + {{- end -}} + {{- end }} +spec: + type: {{ .Values.service.type }} + {{- if eq .Values.service.type "LoadBalancer" }} + {{- if not (empty .Values.service.loadBalancerIP) }} + loadBalancerIP: {{ .Values.service.loadBalancerIP }} + {{- end }} + {{- if .Values.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- toYaml .Values.service.loadBalancerSourceRanges | nindent 4 }} + {{- end }} + {{- end }} + {{- if (or (eq .Values.service.type "LoadBalancer") (eq .Values.service.type "NodePort")) }} + externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy | quote }} + {{- end }} + {{- if .Values.service.externalIPs }} + externalIPs: {{- toYaml .Values.service.externalIPs | nindent 4 }} + {{- end }} + ports: + {{- if or (.Values.service.portEnabled) (not .Values.auth.tls.enabled) }} + - name: {{ .Values.service.portName }} + port: {{ .Values.service.port }} + targetPort: amqp + {{- if (eq .Values.service.type "ClusterIP") }} + nodePort: null + {{- else if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePort)) }} + nodePort: {{ .Values.service.nodePort }} + {{- end }} + {{- end }} + {{- if .Values.auth.tls.enabled }} + - name: {{ .Values.service.tlsPortName }} + port: {{ .Values.service.tlsPort }} + targetPort: amqp-ssl + {{- if (eq .Values.service.type "ClusterIP") }} + nodePort: null + {{- else if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.tlsNodePort)) }} + nodePort: {{ .Values.service.tlsNodePort }} + {{- end }} + {{- end }} + - name: {{ .Values.service.epmdPortName }} + port: 4369 + targetPort: epmd + {{- if (eq .Values.service.type "ClusterIP") }} + nodePort: null + {{- else if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.epmdNodePort))) }} + nodePort: {{ .Values.service.epmdNodePort }} + {{- end }} + - name: {{ .Values.service.distPortName }} + port: {{ .Values.service.distPort }} + targetPort: dist + {{- if eq .Values.service.type "ClusterIP" }} + nodePort: null + {{- else if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.distNodePort))) }} + nodePort: {{ .Values.service.distNodePort }} + {{- end }} + {{- if .Values.service.managerPortEnabled }} + - name: {{ .Values.service.managerPortName }} + port: {{ .Values.service.managerPort }} + targetPort: stats + {{- if eq .Values.service.type "ClusterIP" }} + nodePort: null + {{- else if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.managerNodePort))) }} + nodePort: {{ .Values.service.managerNodePort }} + {{- end }} + {{- end }} + {{- if .Values.metrics.enabled }} + - name: {{ .Values.service.metricsPortName }} + port: {{ .Values.service.metricsPort }} + targetPort: metrics + {{- if eq .Values.service.type "ClusterIP" }} + nodePort: null + {{- else if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.metricsNodePort))) }} + nodePort: {{ .Values.service.metricsNodePort }} + {{- end }} + {{- end }} + {{- if .Values.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" .Values.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + selector: {{ include "common.labels.matchLabels" . | nindent 4 }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/tls-secrets.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/tls-secrets.yaml new file mode 100644 index 0000000..b6a6078 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/tls-secrets.yaml @@ -0,0 +1,74 @@ +{{- if .Values.ingress.enabled }} +{{- if .Values.ingress.secrets }} +{{- range .Values.ingress.secrets }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ .name }} + namespace: {{ $.Release.Namespace | quote }} + labels: {{- include "common.labels.standard" $ | nindent 4 }} + {{- if $.Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if $.Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + tls.crt: {{ .certificate | b64enc }} + tls.key: {{ .key | b64enc }} +--- +{{- end }} +{{- end }} +{{- if and .Values.ingress.tls .Values.ingress.selfSigned }} +{{- $ca := genCA "rabbitmq-ca" 365 }} +{{- $cert := genSignedCert .Values.ingress.hostname nil (list .Values.ingress.hostname) 365 $ca }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ printf "%s-tls" .Values.ingress.hostname }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + tls.crt: {{ $cert.Cert | b64enc | quote }} + tls.key: {{ $cert.Key | b64enc | quote }} + ca.crt: {{ $ca.Cert | b64enc | quote }} +--- +{{- end }} +{{- end }} +{{- if (include "rabbitmq.createTlsSecret" . )}} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "rabbitmq.fullname" . }}-certs + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + {{- if or (not .Values.auth.tls.autoGenerated ) (and .Values.auth.tls.caCertificate .Values.auth.tls.serverCertificate .Values.auth.tls.serverKey) }} + ca.crt: {{ required "A valid .Values.auth.tls.caCertificate entry required!" .Values.auth.tls.caCertificate | b64enc | quote }} + tls.crt: {{ required "A valid .Values.auth.tls.serverCertificate entry required!" .Values.auth.tls.serverCertificate| b64enc | quote }} + tls.key: {{ required "A valid .Values.auth.tls.serverKey entry required!" .Values.auth.tls.serverKey | b64enc | quote }} + {{- else }} + {{- $ca := genCA "rabbitmq-internal-ca" 365 }} + {{- $fullname := include "rabbitmq.fullname" . }} + {{- $releaseNamespace := .Release.Namespace }} + {{- $clusterDomain := .Values.clusterDomain }} + {{- $serviceName := include "rabbitmq.fullname" . }} + {{- $altNames := list (printf "*.%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) $fullname }} + {{- $crt := genSignedCert $fullname nil $altNames 365 $ca }} + ca.crt: {{ $ca.Cert | b64enc | quote }} + tls.crt: {{ $crt.Cert | b64enc | quote }} + tls.key: {{ $crt.Key | b64enc | quote }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/values.schema.json b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/values.schema.json new file mode 100644 index 0000000..8ef33ef --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/values.schema.json @@ -0,0 +1,100 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": { + "auth": { + "type": "object", + "properties": { + "username": { + "type": "string", + "title": "RabbitMQ user", + "form": true + }, + "password": { + "type": "string", + "title": "RabbitMQ password", + "form": true, + "description": "Defaults to a random 10-character alphanumeric string if not set" + } + } + }, + "extraConfiguration": { + "type": "string", + "title": "Extra RabbitMQ Configuration", + "form": true, + "render": "textArea", + "description": "Extra configuration to be appended to RabbitMQ Configuration" + }, + "replicaCount": { + "type": "integer", + "form": true, + "title": "Number of replicas", + "description": "Number of replicas to deploy" + }, + "persistence": { + "type": "object", + "title": "Persistence configuration", + "form": true, + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable persistence", + "description": "Enable persistence using Persistent Volume Claims" + }, + "size": { + "type": "string", + "title": "Persistent Volume Size", + "form": true, + "render": "slider", + "sliderMin": 1, + "sliderMax": 100, + "sliderUnit": "Gi", + "hidden": { + "value": false, + "path": "persistence/enabled" + } + } + } + }, + "volumePermissions": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable Init Containers", + "description": "Use an init container to set required folder permissions on the data volume before mounting it in the final destination" + } + } + }, + "metrics": { + "type": "object", + "form": true, + "title": "Prometheus metrics details", + "properties": { + "enabled": { + "type": "boolean", + "title": "Enable Prometheus metrics for RabbitMQ", + "description": "Install Prometheus plugin in the RabbitMQ container", + "form": true + }, + "serviceMonitor": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "title": "Create Prometheus Operator ServiceMonitor", + "description": "Create a ServiceMonitor to track metrics using Prometheus Operator", + "form": true, + "hidden": { + "value": false, + "path": "metrics/enabled" + } + } + } + } + } + } + } +} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/values.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/values.yaml new file mode 100644 index 0000000..5b74e6c --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/values.yaml @@ -0,0 +1,1151 @@ +## @section Global parameters +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass + +## @param global.imageRegistry Global Docker image registry +## @param global.imagePullSecrets Global Docker registry secret names as an array +## @param global.storageClass Global StorageClass for Persistent Volume(s) +## +## @section RabitMQ Image parameters +## Bitnami RabbitMQ image version +## ref: https://hub.docker.com/r/bitnami/rabbitmq/tags/ +## @param image.registry RabbitMQ image registry +## @param image.repository RabbitMQ image repository +## @param image.tag RabbitMQ image tag (immutable tags are recommended) +## @param image.pullPolicy RabbitMQ image pull policy +## @param image.pullSecrets Specify docker-registry secret names as an array +## @param image.debug Set to true if you would like to see extra information on logs +## +image: + registry: 10.10.31.243:5000/cmoa3 + repository: rabbitmq + tag: v1.0.0 # {{ .Values.global.RABBITMQ_VERSION }} + + ## set to true if you would like to see extra information on logs + ## It turns BASH and/or NAMI debugging in the image + ## + debug: false + + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: + - regcred + +## @section Common parameters + +## @param nameOverride String to partially override rabbitmq.fullname template (will maintain the release name) +## +nameOverride: "" + +## @param fullnameOverride String to fully override rabbitmq.fullname template +## +fullnameOverride: "" + +## @param kubeVersion Force target Kubernetes version (using Helm capabilities if not set) +## +kubeVersion: "" + +## @param clusterDomain Kubernetes Cluster Domain +## +clusterDomain: cluster.local + +## @param extraDeploy Array of extra objects to deploy with the release +## +extraDeploy: [] + +## Enable diagnostic mode in the deployment +## +diagnosticMode: + ## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden) + ## + enabled: false + ## @param diagnosticMode.command Command to override all containers in the deployment + ## + command: + - sleep + ## @param diagnosticMode.args Args to override all containers in the deployment + ## + args: + - infinity + +## @param hostAliases Deployment pod host aliases +## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ +## +hostAliases: [] +## @param commonAnnotations Annotations to add to all deployed objects +## +commonAnnotations: {} +## RabbitMQ Authentication parameters +## +auth: + ## @param auth.username RabbitMQ application username + ## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables + ## + username: user + + ## @param auth.password RabbitMQ application password + ## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables + ## + password: "eorbahrhkswp" + ## @param auth.existingPasswordSecret Existing secret with RabbitMQ credentials (must contain a value for `rabbitmq-password` key) + ## e.g: + ## existingPasswordSecret: name-of-existing-secret + ## + existingPasswordSecret: "" + + ## @param auth.erlangCookie Erlang cookie to determine whether different nodes are allowed to communicate with each other + ## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables + ## + erlangCookie: "pf6t82zTrqY9iaupUmkPOJxPXjmjiNEd" + ## @param auth.existingErlangSecret Existing secret with RabbitMQ Erlang cookie (must contain a value for `rabbitmq-erlang-cookie` key) + ## e.g: + ## existingErlangSecret: name-of-existing-secret + ## + existingErlangSecret: "" + + ## Enable encryption to rabbitmq + ## ref: https://www.rabbitmq.com/ssl.html + ## @param auth.tls.enabled Enable TLS support on RabbitMQ + ## @param auth.tls.autoGenerated Generate automatically self-signed TLS certificates + ## @param auth.tls.failIfNoPeerCert When set to true, TLS connection will be rejected if client fails to provide a certificate + ## @param auth.tls.sslOptionsVerify Should [peer verification](https://www.rabbitmq.com/ssl.html#peer-verification) be enabled? + ## @param auth.tls.caCertificate Certificate Authority (CA) bundle content + ## @param auth.tls.serverCertificate Server certificate content + ## @param auth.tls.serverKey Server private key content + ## @param auth.tls.existingSecret Existing secret with certificate content to RabbitMQ credentials + ## @param auth.tls.existingSecretFullChain Whether or not the existing secret contains the full chain in the certificate (`tls.crt`). Will be used in place of `ca.cert` if `true`. + ## + tls: + enabled: false + autoGenerated: false + failIfNoPeerCert: true + sslOptionsVerify: verify_peer + caCertificate: |- + serverCertificate: |- + serverKey: |- + existingSecret: "" + existingSecretFullChain: false + +## @param logs Path of the RabbitMQ server's Erlang log file. Value for the `RABBITMQ_LOGS` environment variable +## ref: https://www.rabbitmq.com/logging.html#log-file-location +## +logs: "-" + +## @param ulimitNofiles RabbitMQ Max File Descriptors +## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables +## ref: https://www.rabbitmq.com/install-debian.html#kernel-resource-limits +## +ulimitNofiles: "65536" + +## RabbitMQ maximum available scheduler threads and online scheduler threads. By default it will create a thread per CPU detected, with the following parameters you can tune it manually. +## ref: https://hamidreza-s.github.io/erlang/scheduling/real-time/preemptive/migration/2016/02/09/erlang-scheduler-details.html#scheduler-threads +## ref: https://github.com/bitnami/charts/issues/2189 +## @param maxAvailableSchedulers RabbitMQ maximum available scheduler threads +## @param onlineSchedulers RabbitMQ online scheduler threads +## +maxAvailableSchedulers: "" +onlineSchedulers: "" + +## The memory threshold under which RabbitMQ will stop reading from client network sockets, in order to avoid being killed by the OS +## ref: https://www.rabbitmq.com/alarms.html +## ref: https://www.rabbitmq.com/memory.html#threshold +## +memoryHighWatermark: + ## @param memoryHighWatermark.enabled Enable configuring Memory high watermark on RabbitMQ + ## + enabled: false + ## @param memoryHighWatermark.type Memory high watermark type. Either `absolute` or `relative` + ## + type: "relative" + ## Memory high watermark value. + ## @param memoryHighWatermark.value Memory high watermark value + ## The default value of 0.4 stands for 40% of available RAM + ## Note: the memory relative limit is applied to the resource.limits.memory to calculate the memory threshold + ## You can also use an absolute value, e.g.: 256MB + ## + value: 0.4 + +## @param plugins List of default plugins to enable (should only be altered to remove defaults; for additional plugins use `extraPlugins`) +## +plugins: "rabbitmq_management rabbitmq_peer_discovery_k8s rabbitmq_stomp" + +## @param communityPlugins List of Community plugins (URLs) to be downloaded during container initialization +## Combine it with extraPlugins to also enable them. +## +communityPlugins: "" + +## @param extraPlugins Extra plugins to enable (single string containing a space-separated list) +## Use this instead of `plugins` to add new plugins +## +extraPlugins: "rabbitmq_auth_backend_ldap rabbitmq_stomp" + +## Clustering settings +## +clustering: + ## @param clustering.enabled Enable RabbitMQ clustering + ## + enabled: false + ## @param clustering.addressType Switch clustering mode. Either `ip` or `hostname` + ## + addressType: hostname + ## @param clustering.rebalance Rebalance master for queues in cluster when new replica is created + ## ref: https://www.rabbitmq.com/rabbitmq-queues.8.html#rebalance + ## + rebalance: false + + ## @param clustering.forceBoot Force boot of an unexpectedly shut down cluster (in an unexpected order). + ## forceBoot executes 'rabbitmqctl force_boot' to force boot cluster shut down unexpectedly in an unknown order + ## ref: https://www.rabbitmq.com/rabbitmqctl.8.html#force_boot + ## + forceBoot: false + +## Loading a RabbitMQ definitions file to configure RabbitMQ +## +loadDefinition: + ## @param loadDefinition.enabled Enable loading a RabbitMQ definitions file to configure RabbitMQ + ## + enabled: false + ## @param loadDefinition.existingSecret Existing secret with the load definitions file + ## Can be templated if needed, e.g: + ## existingSecret: "{{ .Release.Name }}-load-definition" + ## + existingSecret: "" + +## @param command Override default container command (useful when using custom images) +## +command: [] +## @param args Override default container args (useful when using custom images) +args: [] + +## @param terminationGracePeriodSeconds Default duration in seconds k8s waits for container to exit before sending kill signal. +## Any time in excess of 10 seconds will be spent waiting for any synchronization necessary for cluster not to lose data. +## +terminationGracePeriodSeconds: 120 + +## @param extraEnvVars Extra environment variables to add to RabbitMQ pods +## E.g: +## extraEnvVars: +## - name: FOO +## value: BAR +## +extraEnvVars: [] + +## @param extraEnvVarsCM Name of existing ConfigMap containing extra environment variables +## +extraEnvVarsCM: "" + +## @param extraEnvVarsSecret Name of existing Secret containing extra environment variables (in case of sensitive data) +## +extraEnvVarsSecret: "" + +## @param extraContainerPorts Extra ports to be included in container spec, primarily informational +## E.g: +## extraContainerPorts: +## - name: new_port_name +## containerPort: 1234 +## +extraContainerPorts: [] + +## @param configuration [string] RabbitMQ Configuration file content: required cluster configuration +## Do not override unless you know what you are doing. +## To add more configuration, use `extraConfiguration` of `advancedConfiguration` instead +## +configuration: |- + {{- if not .Values.loadDefinition.enabled -}} + ## Username and password + ## + default_user = {{ .Values.auth.username }} + default_pass = eorbahrhkswp + {{- end }} + {{- if .Values.clustering.enabled }} + ## Clustering + ## + cluster_formation.peer_discovery_backend = rabbit_peer_discovery_k8s + cluster_formation.k8s.host = kubernetes.default.svc.{{ .Values.clusterDomain }} + cluster_formation.node_cleanup.interval = 10 + cluster_formation.node_cleanup.only_log_warning = true + cluster_partition_handling = autoheal + {{- end }} + # queue master locator + queue_master_locator = min-masters + # enable guest user + loopback_users.guest = false + {{ tpl .Values.extraConfiguration . }} + {{- if .Values.auth.tls.enabled }} + ssl_options.verify = {{ .Values.auth.tls.sslOptionsVerify }} + listeners.ssl.default = {{ .Values.service.tlsPort }} + ssl_options.fail_if_no_peer_cert = {{ .Values.auth.tls.failIfNoPeerCert }} + ssl_options.cacertfile = /opt/bitnami/rabbitmq/certs/ca_certificate.pem + ssl_options.certfile = /opt/bitnami/rabbitmq/certs/server_certificate.pem + ssl_options.keyfile = /opt/bitnami/rabbitmq/certs/server_key.pem + {{- end }} + {{- if .Values.ldap.enabled }} + auth_backends.1 = rabbit_auth_backend_ldap + auth_backends.2 = internal + {{- range $index, $server := .Values.ldap.servers }} + auth_ldap.servers.{{ add $index 1 }} = {{ $server }} + {{- end }} + auth_ldap.port = {{ .Values.ldap.port }} + auth_ldap.user_dn_pattern = {{ .Values.ldap.user_dn_pattern }} + {{- if .Values.ldap.tls.enabled }} + auth_ldap.use_ssl = true + {{- end }} + {{- end }} + {{- if .Values.metrics.enabled }} + ## Prometheus metrics + ## + prometheus.tcp.port = 9419 + {{- end }} + {{- if .Values.memoryHighWatermark.enabled }} + ## Memory Threshold + ## + total_memory_available_override_value = {{ include "rabbitmq.toBytes" .Values.resources.limits.memory }} + vm_memory_high_watermark.{{ .Values.memoryHighWatermark.type }} = {{ .Values.memoryHighWatermark.value }} + {{- end }} + +## @param extraConfiguration [string] Configuration file content: extra configuration to be appended to RabbitMQ configuration +## Use this instead of `configuration` to add more configuration +## +extraConfiguration: |- + #default_vhost = {{ .Release.Namespace }}-vhost + #disk_free_limit.absolute = 50MB + #load_definitions = /app/load_definition.json + +## @param advancedConfiguration Configuration file content: advanced configuration +## Use this as additional configuration in classic config format (Erlang term configuration format) +## +## If you set LDAP with TLS/SSL enabled and you are using self-signed certificates, uncomment these lines. +## advancedConfiguration: |- +## [{ +## rabbitmq_auth_backend_ldap, +## [{ +## ssl_options, +## [{ +## verify, verify_none +## }, { +## fail_if_no_peer_cert, +## false +## }] +## ]} +## }]. +## +advancedConfiguration: |- + +## LDAP configuration +## +ldap: + ## @param ldap.enabled Enable LDAP support + ## + enabled: false + ## @param ldap.servers List of LDAP servers hostnames + ## + servers: [] + ## @param ldap.port LDAP servers port + ## + port: "389" + ## Pattern used to translate the provided username into a value to be used for the LDAP bind + ## @param ldap.user_dn_pattern Pattern used to translate the provided username into a value to be used for the LDAP bind + ## ref: https://www.rabbitmq.com/ldap.html#usernames-and-dns + ## + user_dn_pattern: cn=${username},dc=example,dc=org + tls: + ## @param ldap.tls.enabled If you enable TLS/SSL you can set advanced options using the `advancedConfiguration` parameter + ## + enabled: false + +## @param extraVolumeMounts Optionally specify extra list of additional volumeMounts +## Examples: +## extraVolumeMounts: +## - name: extras +## mountPath: /usr/share/extras +## readOnly: true +## +extraVolumeMounts: [] +## @param extraVolumes Optionally specify extra list of additional volumes . +## Example: +## extraVolumes: +## - name: extras +## emptyDir: {} +## +extraVolumes: [] + +## @param extraSecrets Optionally specify extra secrets to be created by the chart. +## This can be useful when combined with load_definitions to automatically create the secret containing the definitions to be loaded. +## Example: +## extraSecrets: +## load-definition: +## load_definition.json: | +## { +## ... +## } +## +extraSecrets: {} +## @param extraSecretsPrependReleaseName Set this flag to true if extraSecrets should be created with prepended. +## +extraSecretsPrependReleaseName: false + +## @section Statefulset parameters + +## @param replicaCount Number of RabbitMQ replicas to deploy +## +replicaCount: 1 + +## @param schedulerName Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +schedulerName: "" + +## RabbitMQ should be initialized one by one when building cluster for the first time. +## Therefore, the default value of podManagementPolicy is 'OrderedReady' +## Once the RabbitMQ participates in the cluster, it waits for a response from another +## RabbitMQ in the same cluster at reboot, except the last RabbitMQ of the same cluster. +## If the cluster exits gracefully, you do not need to change the podManagementPolicy +## because the first RabbitMQ of the statefulset always will be last of the cluster. +## However if the last RabbitMQ of the cluster is not the first RabbitMQ due to a failure, +## you must change podManagementPolicy to 'Parallel'. +## ref : https://www.rabbitmq.com/clustering.html#restarting +## @param podManagementPolicy Pod management policy +## +podManagementPolicy: OrderedReady + +## @param podLabels RabbitMQ Pod labels. Evaluated as a template +## Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +## +podLabels: {} + +## @param podAnnotations RabbitMQ Pod annotations. Evaluated as a template +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +## +podAnnotations: {} + +## @param updateStrategyType Update strategy type for RabbitMQ statefulset +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies +## +updateStrategyType: RollingUpdate + +## @param statefulsetLabels RabbitMQ statefulset labels. Evaluated as a template +## Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +## +statefulsetLabels: {} + +## @param priorityClassName Name of the priority class to be used by RabbitMQ pods, priority class needs to be created beforehand +## Ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ +## +priorityClassName: "" + +## @param podAffinityPreset Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` +## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity +## +podAffinityPreset: "" + +## @param podAntiAffinityPreset Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` +## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity +## +podAntiAffinityPreset: soft + +## Node affinity preset +## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity +## +nodeAffinityPreset: + ## @param nodeAffinityPreset.type Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param nodeAffinityPreset.key Node label key to match Ignored if `affinity` is set. + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## @param nodeAffinityPreset.values Node label values to match. Ignored if `affinity` is set. + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + +## @param affinity Affinity for pod assignment. Evaluated as a template +## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set +## +affinity: {} + +## @param nodeSelector Node labels for pod assignment. Evaluated as a template +## ref: https://kubernetes.io/docs/user-guide/node-selection/ +## +nodeSelector: {} + +## @param tolerations Tolerations for pod assignment. Evaluated as a template +## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +## +tolerations: [] + +## @param topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template +## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods +## +topologySpreadConstraints: {} + +## RabbitMQ pods' Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod +## @param podSecurityContext.enabled Enable RabbitMQ pods' Security Context +## @param podSecurityContext.fsGroup Group ID for the filesystem used by the containers +## @param podSecurityContext.runAsUser User ID for the service user running the pod +## +podSecurityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## @param containerSecurityContext RabbitMQ containers' Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container +## Example: +## containerSecurityContext: +## capabilities: +## drop: ["NET_RAW"] +## readOnlyRootFilesystem: true +## +containerSecurityContext: {} + +## RabbitMQ containers' resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## We usually recommend not to specify default resources and to leave this as a conscious +## choice for the user. This also increases chances charts run on environments with little +## resources, such as Minikube. If you do want to specify resources, uncomment the following +## lines, adjust them as necessary, and remove the curly braces after 'resources:'. +## @param resources.limits The resources limits for RabbitMQ containers +## @param resources.requests The requested resources for RabbitMQ containers +## +resources: + ## Example: + ## limits: + ## cpu: 1000m + ## memory: 2Gi + limits: {} + ## Examples: + ## requests: + ## cpu: 1000m + ## memory: 2Gi + requests: {} + +## Configure RabbitMQ containers' extra options for liveness probe +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes +## @param livenessProbe.enabled Enable livenessProbe +## @param livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe +## @param livenessProbe.periodSeconds Period seconds for livenessProbe +## @param livenessProbe.timeoutSeconds Timeout seconds for livenessProbe +## @param livenessProbe.failureThreshold Failure threshold for livenessProbe +## @param livenessProbe.successThreshold Success threshold for livenessProbe +## +livenessProbe: + enabled: true + initialDelaySeconds: 120 + timeoutSeconds: 20 + periodSeconds: 30 + failureThreshold: 6 + successThreshold: 1 +## Configure RabbitMQ containers' extra options for readiness probe +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes +## @param readinessProbe.enabled Enable readinessProbe +## @param readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe +## @param readinessProbe.periodSeconds Period seconds for readinessProbe +## @param readinessProbe.timeoutSeconds Timeout seconds for readinessProbe +## @param readinessProbe.failureThreshold Failure threshold for readinessProbe +## @param readinessProbe.successThreshold Success threshold for readinessProbe +## +readinessProbe: + enabled: true + initialDelaySeconds: 10 + timeoutSeconds: 20 + periodSeconds: 30 + failureThreshold: 3 + successThreshold: 1 + +## @param customLivenessProbe Override default liveness probe +## +customLivenessProbe: {} + +## @param customReadinessProbe Override default readiness probe +## +customReadinessProbe: {} + +## @param customStartupProbe Define a custom startup probe +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-startup-probes +## +customStartupProbe: {} + +## @param initContainers Add init containers to the RabbitMQ pod +## Example: +## initContainers: +## - name: your-image-name +## image: your-image +## imagePullPolicy: IfNotPresent +## ports: +## - name: portname +## containerPort: 1234 +## +initContainers: [] + +## @param sidecars Add sidecar containers to the RabbitMQ pod +## Example: +## sidecars: +## - name: your-image-name +## image: your-image +## imagePullPolicy: IfNotPresent +## ports: +## - name: portname +## containerPort: 1234 +## +sidecars: [] + +## Pod Disruption Budget configuration +## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ +## +pdb: + ## @param pdb.create Enable/disable a Pod Disruption Budget creation + ## + create: false + ## @param pdb.minAvailable Minimum number/percentage of pods that should remain scheduled + ## + minAvailable: 1 + ## @param pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable + ## + maxUnavailable: "" + +## @section RBAC parameters + +## RabbitMQ pods ServiceAccount +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +## +serviceAccount: + ## @param serviceAccount.create Enable creation of ServiceAccount for RabbitMQ pods + ## + create: true + ## @param serviceAccount.name Name of the created serviceAccount + ## If not set and create is true, a name is generated using the rabbitmq.fullname template + ## + name: "" + +## Role Based Access +## ref: https://kubernetes.io/docs/admin/authorization/rbac/ +## +rbac: + ## @param rbac.create Whether RBAC rules should be created + ## binding RabbitMQ ServiceAccount to a role + ## that allows RabbitMQ pods querying the K8s API + ## + create: true + +## @section Persistence parameters + +persistence: + ## @param persistence.enabled Enable RabbitMQ data persistence using PVC + ## + enabled: true + + ## @param persistence.storageClass PVC Storage Class for RabbitMQ data volume + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "rabbitmq" + ## @param persistence.selector Selector to match an existing Persistent Volume + ## selector: + ## matchLabels: + ## app: my-app + ## + selector: {} + ## @param persistence.accessMode PVC Access Mode for RabbitMQ data volume + ## + accessMode: ReadWriteOnce + + ## @param persistence.existingClaim Provide an existing PersistentVolumeClaims + ## The value is evaluated as a template + ## So, for example, the name can depend on .Release or .Chart + ## + existingClaim: "rabbitmq-pvc" + + ## @param persistence.size PVC Storage Request for RabbitMQ data volume + ## If you change this value, you might have to adjust `rabbitmq.diskFreeLimit` as well + ## + size: 5Gi + + ## @param persistence.volumes Additional volumes without creating PVC + ## - name: volume_name + ## emptyDir: {} + ## + volumes: [] + +## @section Exposure parameters + +## Kubernetes service type +## +service: + ## @param service.type Kubernetes Service type + ## + # type: NodePort + type: ClusterIP + + ## @param service.portEnabled Amqp port. Cannot be disabled when `auth.tls.enabled` is `false`. Listener can be disabled with `listeners.tcp = none`. + portEnabled: true + + ## @param service.port Amqp port + ## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables + ## + port: 5672 + + ## @param service.portName Amqp service port name + ## + portName: amqp + + ## @param service.tlsPort Amqp TLS port + ## + tlsPort: 5671 + + ## @param service.tlsPortName Amqp TLS service port name + ## + tlsPortName: amqp-ssl + + ## @param service.nodePort Node port override for `amqp` port, if serviceType is `NodePort` or `LoadBalancer` + ## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables + ## e.g: + ## nodePort: 30672 + ## + nodePort: "" + + ## @param service.tlsNodePort Node port override for `amqp-ssl` port, if serviceType is `NodePort` or `LoadBalancer` + ## e.g: + ## tlsNodePort: 30671 + ## + tlsNodePort: "" + + ## @param service.distPort Erlang distribution server port + ## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables + ## + distPort: 25672 + + ## @param service.distPortName Erlang distribution service port name + ## + distPortName: dist + + ## @param service.distNodePort Node port override for `dist` port, if serviceType is `NodePort` + ## e.g: + ## distNodePort: 30676 + ## + distNodePort: "" + + ## @param service.managerPortEnabled RabbitMQ Manager port + ## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables + ## + managerPortEnabled: true + + ## @param service.managerPort RabbitMQ Manager port + ## + managerPort: 15672 + + ## @param service.managerPortName RabbitMQ Manager service port name + ## + managerPortName: http-stats + + ## @param service.managerNodePort Node port override for `http-stats` port, if serviceType `NodePort` + ## e.g: + ## managerNodePort: 30673 + ## + managerNodePort: "" + + ## @param service.metricsPort RabbitMQ Prometheues metrics port + ## + metricsPort: 9419 + + ## @param service.metricsPortName RabbitMQ Prometheues metrics service port name + ## + metricsPortName: metrics + + ## @param service.metricsNodePort Node port override for `metrics` port, if serviceType is `NodePort` + ## e.g: + ## metricsNodePort: 30674 + ## + metricsNodePort: "" + + ## @param service.epmdNodePort Node port override for `epmd` port, if serviceType is `NodePort` + ## e.g: + ## epmdNodePort: 30675 + ## + epmdNodePort: "" + + ## @param service.epmdPortName EPMD Discovery service port name + ## + epmdPortName: epmd + + ## @param service.extraPorts Extra ports to expose in the service + ## E.g.: + ## extraPorts: + ## - name: new_svc_name + ## port: 1234 + ## targetPort: 1234 + ## + extraPorts: + - name: stomp + port: 61613 + targetPort: 61613 + #nodePort: 31613 + + ## @param service.loadBalancerSourceRanges Address(es) that are allowed when service is `LoadBalancer` + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + + ## @param service.externalIPs Set the ExternalIPs + ## + externalIPs: [] + + ## @param service.externalTrafficPolicy Enable client source IP preservation + ## ref http://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + + ## @param service.loadBalancerIP Set the LoadBalancerIP + ## + loadBalancerIP: "" + + ## @param service.labels Service labels. Evaluated as a template + ## + labels: {} + + ## @param service.annotations Service annotations. Evaluated as a template + ## Example: + ## annotations: + ## service.beta.kubernetes.io/aws-load-balancer-internal: 0.0.0.0/0 + ## + annotations: {} + ## @param service.annotationsHeadless Headless Service annotations. Evaluated as a template + ## Example: + ## annotations: + ## external-dns.alpha.kubernetes.io/internal-hostname: rabbitmq.example.com + ## + annotationsHeadless: {} + +## Configure the ingress resource that allows you to access the +## RabbitMQ installation. Set up the URL +## ref: http://kubernetes.io/docs/user-guide/ingress/ +## +ingress: + ## @param ingress.enabled Enable ingress resource for Management console + ## + enabled: false + + ## @param ingress.path Path for the default host. You may need to set this to '/*' in order to use this with ALB ingress controllers. + ## + path: / + + ## @param ingress.pathType Ingress path type + ## + pathType: ImplementationSpecific + + ## @param ingress.hostname Default host for the ingress resource + ## + hostname: rabbitmq.local + + ## @param ingress.annotations Ingress annotations + ## For a full list of possible ingress annotations, please see + ## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/annotations.md + ## + ## If certManager is set to true, annotation kubernetes.io/tls-acme: "true" will automatically be set + ## + annotations: {} + + ## @param ingress.tls Enable TLS configuration for the hostname defined at `ingress.hostname` parameter + ## TLS certificates will be retrieved from a TLS secret with name: {{- printf "%s-tls" .Values.ingress.hostname }} + ## You can: + ## - Use the `ingress.secrets` parameter to create this TLS secret + ## - Relay on cert-manager to create it by setting `ingress.certManager=true` + ## - Relay on Helm to create self-signed certificates by setting `ingress.selfSigned=true` + ## + tls: false + + ## @param ingress.certManager Set this to true in order to add the corresponding annotations for cert-manager + ## to generate a TLS secret for the ingress record + ## + certManager: false + + ## @param ingress.selfSigned Set this to true in order to create a TLS secret for this ingress record + ## using self-signed certificates generated by Helm + ## + selfSigned: false + + ## @param ingress.extraHosts The list of additional hostnames to be covered with this ingress record. + ## Most likely the hostname above will be enough, but in the event more hosts are needed, this is an array + ## e.g: + ## extraHosts: + ## - name: rabbitmq.local + ## path: / + ## + extraHosts: [] + + ## @param ingress.extraTls The tls configuration for additional hostnames to be covered with this ingress record. + ## see: https://kubernetes.io/docs/concepts/services-networking/ingress/#tls + ## e.g: + ## extraTls: + ## - hosts: + ## - rabbitmq.local + ## secretName: rabbitmq.local-tls + ## + extraTls: [] + + ## @param ingress.secrets Custom TLS certificates as secrets + ## NOTE: 'key' and 'certificate' are expected in PEM format + ## NOTE: 'name' should line up with a 'secretName' set further up + ## If it is not set and you're using cert-manager, this is unneeded, as it will create a secret for you with valid certificates + ## If it is not set and you're NOT using cert-manager either, self-signed certificates will be created valid for 365 days + ## It is also possible to create and manage the certificates outside of this helm chart + ## Please see README.md for more information + ## e.g: + ## secrets: + ## - name: rabbitmq.local-tls + ## key: |- + ## -----BEGIN RSA PRIVATE KEY----- + ## ... + ## -----END RSA PRIVATE KEY----- + ## certificate: |- + ## -----BEGIN CERTIFICATE----- + ## ... + ## -----END CERTIFICATE----- + ## + secrets: [] + + ## @param ingress.ingressClassName IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+) + ## This is supported in Kubernetes 1.18+ and required if you have more than one IngressClass marked as the default for your cluster . + ## ref: https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/ + ## + ingressClassName: "" + +## Network Policy configuration +## ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ +## +networkPolicy: + ## @param networkPolicy.enabled Enable creation of NetworkPolicy resources + ## + enabled: false + ## @param networkPolicy.allowExternal Don't require client label for connections + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the ports RabbitMQ is listening + ## on. When true, RabbitMQ will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + ## @param networkPolicy.additionalRules Additional NetworkPolicy Ingress "from" rules to set. Note that all rules are OR-ed. + ## e.g: + ## additionalRules: + ## - matchLabels: + ## - role: frontend + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + ## + additionalRules: [] + +## @section Metrics Parameters + +## Prometheus Metrics +## +metrics: + ## @param metrics.enabled Enable exposing RabbitMQ metrics to be gathered by Prometheus + ## + enabled: false + + ## @param metrics.plugins Plugins to enable Prometheus metrics in RabbitMQ + ## + plugins: "rabbitmq_prometheus" + ## Prometheus pod annotations + ## @param metrics.podAnnotations [object] Annotations for enabling prometheus to access the metrics endpoint + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "{{ .Values.service.metricsPort }}" + + ## Prometheus Service Monitor + ## ref: https://github.com/coreos/prometheus-operator + ## + serviceMonitor: + ## @param metrics.serviceMonitor.enabled Create ServiceMonitor Resource for scraping metrics using PrometheusOperator + ## + enabled: false + ## @param metrics.serviceMonitor.namespace Specify the namespace in which the serviceMonitor resource will be created + ## + namespace: "" + ## @param metrics.serviceMonitor.interval Specify the interval at which metrics should be scraped + ## + interval: 30s + ## @param metrics.serviceMonitor.scrapeTimeout Specify the timeout after which the scrape is ended + ## e.g: + ## scrapeTimeout: 30s + ## + scrapeTimeout: "" + ## @param metrics.serviceMonitor.relabellings Specify Metric Relabellings to add to the scrape endpoint + ## + relabellings: [] + ## @param metrics.serviceMonitor.honorLabels honorLabels chooses the metric's labels on collisions with target labels + ## + honorLabels: false + ## @param metrics.serviceMonitor.additionalLabels Used to pass Labels that are required by the installed Prometheus Operator + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusspec + ## + additionalLabels: {} + ## @param metrics.serviceMonitor.targetLabels Used to keep given service's labels in target + ## e.g: + ## - app.kubernetes.io/name + ## + targetLabels: {} + ## @param metrics.serviceMonitor.podTargetLabels Used to keep given pod's labels in target + ## e.g: + ## - app.kubernetes.io/name + ## + podTargetLabels: {} + ## @param metrics.serviceMonitor.path Define the path used by ServiceMonitor to scrap metrics + ## Could be /metrics for aggregated metrics or /metrics/per-object for more details + path: "" + + ## Custom PrometheusRule to be defined + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + ## + prometheusRule: + ## @param metrics.prometheusRule.enabled Set this to true to create prometheusRules for Prometheus operator + ## + enabled: false + ## @param metrics.prometheusRule.additionalLabels Additional labels that can be used so prometheusRules will be discovered by Prometheus + ## + additionalLabels: {} + ## @param metrics.prometheusRule.namespace namespace where prometheusRules resource should be created + ## + namespace: "" + ## List of rules, used as template by Helm. + ## @param metrics.prometheusRule.rules List of rules, used as template by Helm. + ## These are just examples rules inspired from https://awesome-prometheus-alerts.grep.to/rules.html + ## rules: + ## - alert: RabbitmqDown + ## expr: rabbitmq_up{service="{{ template "rabbitmq.fullname" . }}"} == 0 + ## for: 5m + ## labels: + ## severity: error + ## annotations: + ## summary: Rabbitmq down (instance {{ "{{ $labels.instance }}" }}) + ## description: RabbitMQ node down + ## - alert: ClusterDown + ## expr: | + ## sum(rabbitmq_running{service="{{ template "rabbitmq.fullname" . }}"}) + ## < {{ .Values.replicaCount }} + ## for: 5m + ## labels: + ## severity: error + ## annotations: + ## summary: Cluster down (instance {{ "{{ $labels.instance }}" }}) + ## description: | + ## Less than {{ .Values.replicaCount }} nodes running in RabbitMQ cluster + ## VALUE = {{ "{{ $value }}" }} + ## - alert: ClusterPartition + ## expr: rabbitmq_partitions{service="{{ template "rabbitmq.fullname" . }}"} > 0 + ## for: 5m + ## labels: + ## severity: error + ## annotations: + ## summary: Cluster partition (instance {{ "{{ $labels.instance }}" }}) + ## description: | + ## Cluster partition + ## VALUE = {{ "{{ $value }}" }} + ## - alert: OutOfMemory + ## expr: | + ## rabbitmq_node_mem_used{service="{{ template "rabbitmq.fullname" . }}"} + ## / rabbitmq_node_mem_limit{service="{{ template "rabbitmq.fullname" . }}"} + ## * 100 > 90 + ## for: 5m + ## labels: + ## severity: warning + ## annotations: + ## summary: Out of memory (instance {{ "{{ $labels.instance }}" }}) + ## description: | + ## Memory available for RabbmitMQ is low (< 10%)\n VALUE = {{ "{{ $value }}" }} + ## LABELS: {{ "{{ $labels }}" }} + ## - alert: TooManyConnections + ## expr: rabbitmq_connectionsTotal{service="{{ template "rabbitmq.fullname" . }}"} > 1000 + ## for: 5m + ## labels: + ## severity: warning + ## annotations: + ## summary: Too many connections (instance {{ "{{ $labels.instance }}" }}) + ## description: | + ## RabbitMQ instance has too many connections (> 1000) + ## VALUE = {{ "{{ $value }}" }}\n LABELS: {{ "{{ $labels }}" }} + ## + rules: [] + +## @section Init Container Parameters + +## Init Container parameters +## Change the owner and group of the persistent volume(s) mountpoint(s) to 'runAsUser:fsGroup' on each component +## values from the securityContext section of the component +## +volumePermissions: + ## @param volumePermissions.enabled Enable init container that changes the owner and group of the persistent volume(s) mountpoint to `runAsUser:fsGroup` + ## + enabled: false + ## @param volumePermissions.image.registry Init container volume-permissions image registry + ## @param volumePermissions.image.repository Init container volume-permissions image repository + ## @param volumePermissions.image.tag Init container volume-permissions image tag + ## @param volumePermissions.image.pullPolicy Init container volume-permissions image pull policy + ## @param volumePermissions.image.pullSecrets Specify docker-registry secret names as an array + ## + image: + registry: 10.10.31.243:5000/cmoa3 # docker.io + repository: bitnami-shell # bitnami/bitnami-shell + tag: 10-debian-10-r175 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: + - regcred + ## Init Container resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## We usually recommend not to specify default resources and to leave this as a conscious + ## choice for the user. This also increases chances charts run on environments with little + ## resources, such as Minikube. If you do want to specify resources, uncomment the following + ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. + ## @param volumePermissions.resources.limits Init container volume-permissions resource limits + ## @param volumePermissions.resources.requests Init container volume-permissions resource requests + ## + resources: + ## Example: + ## limits: + ## cpu: 100m + ## memory: 128Mi + limits: {} + ## Examples: + ## requests: + ## cpu: 100m + ## memory: 128Mi + requests: {} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/.helmignore b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/.helmignore new file mode 100644 index 0000000..f0c1319 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/Chart.lock b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/Chart.lock new file mode 100644 index 0000000..ee0ecb7 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/Chart.lock @@ -0,0 +1,6 @@ +dependencies: +- name: common + repository: https://charts.bitnami.com/bitnami + version: 1.3.3 +digest: sha256:264db18c8d0962b5c4340840f62306f45fe8d2c1c8999dd41c0f2d62fc93a220 +generated: "2021-01-15T00:05:10.125742807Z" diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/Chart.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/Chart.yaml new file mode 100644 index 0000000..6924d59 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/Chart.yaml @@ -0,0 +1,29 @@ +annotations: + category: Database +apiVersion: v2 +appVersion: 6.0.10 +dependencies: +- name: common + repository: https://charts.bitnami.com/bitnami + tags: + - bitnami-common + version: 1.x.x +description: Open source, advanced key-value store. It is often referred to as a data + structure server since keys can contain strings, hashes, lists, sets and sorted + sets. +home: https://github.com/bitnami/charts/tree/master/bitnami/redis +icon: https://bitnami.com/assets/stacks/redis/img/redis-stack-220x234.png +keywords: +- redis +- keyvalue +- database +maintainers: +- email: containers@bitnami.com + name: Bitnami +- email: cedric@desaintmartin.fr + name: desaintmartin +name: redis +sources: +- https://github.com/bitnami/bitnami-docker-redis +- http://redis.io/ +version: 12.7.0 diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/README.md b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/README.md new file mode 100644 index 0000000..3befa8c --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/README.md @@ -0,0 +1,707 @@ +# RedisTM Chart packaged by Bitnami + +[RedisTM](http://redis.io/) is an advanced key-value cache and store. It is often referred to as a data structure server since keys can contain strings, hashes, lists, sets, sorted sets, bitmaps and hyperloglogs. + +Disclaimer: REDIS® is a registered trademark of Redis Labs Ltd.Any rights therein are reserved to Redis Labs Ltd. Any use by Bitnami is for referential purposes only and does not indicate any sponsorship, endorsement, or affiliation between Redis Labs Ltd. + +## TL;DR + +```bash +$ helm repo add bitnami https://charts.bitnami.com/bitnami +$ helm install my-release bitnami/redis +``` + +## Introduction + +This chart bootstraps a [RedisTM](https://github.com/bitnami/bitnami-docker-redis) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This chart has been tested to work with NGINX Ingress, cert-manager, fluentd and Prometheus on top of the [BKPR](https://kubeprod.io/). + +### Choose between RedisTM Helm Chart and RedisTM Cluster Helm Chart + +You can choose any of the two RedisTM Helm charts for deploying a RedisTM cluster. +While [RedisTM Helm Chart](https://github.com/bitnami/charts/tree/master/bitnami/redis) will deploy a master-slave cluster using RedisTM Sentinel, the [RedisTM Cluster Helm Chart](https://github.com/bitnami/charts/tree/master/bitnami/redis-cluster) will deploy a RedisTM Cluster topology with sharding. +The main features of each chart are the following: + +| RedisTM | RedisTM Cluster | +|--------------------------------------------------------|------------------------------------------------------------------------| +| Supports multiple databases | Supports only one database. Better if you have a big dataset | +| Single write point (single master) | Multiple write points (multiple masters) | +| ![RedisTM Topology](img/redis-topology.png) | ![RedisTM Cluster Topology](img/redis-cluster-topology.png) | + +## Prerequisites + +- Kubernetes 1.12+ +- Helm 3.1.0 +- PV provisioner support in the underlying infrastructure + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```bash +$ helm install my-release bitnami/redis +``` + +The command deploys RedisTM on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```bash +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Parameters + +The following table lists the configurable parameters of the RedisTM chart and their default values. + +| Parameter | Description | Default | +|:------------------------------------------------------|:----------------------------------------------------------------------------------------------------------------------------------------------------|:--------------------------------------------------------| +| `global.imageRegistry` | Global Docker image registry | `nil` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) | +| `global.storageClass` | Global storage class for dynamic provisioning | `nil` | +| `global.redis.password` | RedisTM password (overrides `password`) | `nil` | +| `image.registry` | RedisTM Image registry | `docker.io` | +| `image.repository` | RedisTM Image name | `bitnami/redis` | +| `image.tag` | RedisTM Image tag | `{TAG_NAME}` | +| `image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify docker-registry secret names as an array | `nil` | +| `nameOverride` | String to partially override redis.fullname template with a string (will prepend the release name) | `nil` | +| `fullnameOverride` | String to fully override redis.fullname template with a string | `nil` | +| `cluster.enabled` | Use master-slave topology | `true` | +| `cluster.slaveCount` | Number of slaves | `2` | +| `existingSecret` | Name of existing secret object (for password authentication) | `nil` | +| `existingSecretPasswordKey` | Name of key containing password to be retrieved from the existing secret | `nil` | +| `usePassword` | Use password | `true` | +| `usePasswordFile` | Mount passwords as files instead of environment variables | `false` | +| `password` | RedisTM password (ignored if existingSecret set) | Randomly generated | +| `configmap` | Additional common RedisTM node configuration (this value is evaluated as a template) | See values.yaml | +| `clusterDomain` | Kubernetes DNS Domain name to use | `cluster.local` | +| `networkPolicy.enabled` | Enable NetworkPolicy | `false` | +| `networkPolicy.allowExternal` | Don't require client label for connections | `true` | +| `networkPolicy.ingressNSMatchLabels` | Allow connections from other namespaces | `{}` | +| `networkPolicy.ingressNSPodMatchLabels` | For other namespaces match by pod labels and namespace labels | `{}` | +| `securityContext.*` | Other pod security context to be included as-is in the pod spec | `{}` | +| `securityContext.enabled` | Enable security context (both redis master and slave pods) | `true` | +| `securityContext.fsGroup` | Group ID for the container (both redis master and slave pods) | `1001` | +| `containerSecurityContext.*` | Other container security context to be included as-is in the container spec | `{}` | +| `containerSecurityContext.enabled` | Enable security context (both redis master and slave containers) | `true` | +| `containerSecurityContext.runAsUser` | User ID for the container (both redis master and slave containers) | `1001` | +| `serviceAccount.create` | Specifies whether a ServiceAccount should be created | `false` | +| `serviceAccount.name` | The name of the ServiceAccount to create | Generated using the fullname template | +| `serviceAccount.annotations` | Specifies annotations to add to ServiceAccount. | `nil` | +| `rbac.create` | Specifies whether RBAC resources should be created | `false` | +| `rbac.role.rules` | Rules to create | `[]` | +| `metrics.enabled` | Start a side-car prometheus exporter | `false` | +| `metrics.image.registry` | RedisTM exporter image registry | `docker.io` | +| `metrics.image.repository` | RedisTM exporter image name | `bitnami/redis-exporter` | +| `metrics.image.tag` | RedisTM exporter image tag | `{TAG_NAME}` | +| `metrics.image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `metrics.image.pullSecrets` | Specify docker-registry secret names as an array | `nil` | +| `metrics.extraArgs` | Extra arguments for the binary; possible values [here](https://github.com/oliver006/redis_exporter#flags) | {} | +| `metrics.podLabels` | Additional labels for Metrics exporter pod | {} | +| `metrics.podAnnotations` | Additional annotations for Metrics exporter pod | {} | +| `metrics.resources` | Exporter resource requests/limit | Memory: `256Mi`, CPU: `100m` | +| `metrics.serviceMonitor.enabled` | if `true`, creates a Prometheus Operator ServiceMonitor (also requires `metrics.enabled` to be `true`) | `false` | +| `metrics.serviceMonitor.namespace` | Optional namespace which Prometheus is running in | `nil` | +| `metrics.serviceMonitor.interval` | How frequently to scrape metrics (use by default, falling back to Prometheus' default) | `nil` | +| `metrics.serviceMonitor.selector` | Default to kube-prometheus install (CoreOS recommended), but should be set according to Prometheus install | `{ prometheus: kube-prometheus }` | +| `metrics.serviceMonitor.relabelings` | ServiceMonitor relabelings. Value is evaluated as a template | `[]` | +| `metrics.serviceMonitor.metricRelabelings` | ServiceMonitor metricRelabelings. Value is evaluated as a template | `[]` | +| `metrics.service.type` | Kubernetes Service type (redis metrics) | `ClusterIP` | +| `metrics.service.externalTrafficPolicy` | External traffic policy (when service type is LoadBalancer) | `Cluster` | +| `metrics.service.annotations` | Annotations for the services to monitor (redis master and redis slave service) | {} | +| `metrics.service.labels` | Additional labels for the metrics service | {} | +| `metrics.service.loadBalancerIP` | loadBalancerIP if redis metrics service type is `LoadBalancer` | `nil` | +| `metrics.priorityClassName` | Metrics exporter pod priorityClassName | `nil` | +| `metrics.prometheusRule.enabled` | Set this to true to create prometheusRules for Prometheus operator | `false` | +| `metrics.prometheusRule.additionalLabels` | Additional labels that can be used so prometheusRules will be discovered by Prometheus | `{}` | +| `metrics.prometheusRule.namespace` | namespace where prometheusRules resource should be created | Same namespace as redis | +| `metrics.prometheusRule.rules` | [rules](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/) to be created, check values for an example. | `[]` | +| `persistence.existingClaim` | Provide an existing PersistentVolumeClaim | `nil` | +| `master.persistence.enabled` | Use a PVC to persist data (master node) | `true` | +| `master.hostAliases` | Add deployment host aliases | `[]` | +| `master.persistence.path` | Path to mount the volume at, to use other images | `/data` | +| `master.persistence.subPath` | Subdirectory of the volume to mount at | `""` | +| `master.persistence.storageClass` | Storage class of backing PVC | `generic` | +| `master.persistence.accessModes` | Persistent Volume Access Modes | `[ReadWriteOnce]` | +| `master.persistence.size` | Size of data volume | `8Gi` | +| `master.persistence.matchLabels` | matchLabels persistent volume selector | `{}` | +| `master.persistence.matchExpressions` | matchExpressions persistent volume selector | `{}` | +| `master.persistence.volumes` | Additional volumes without creating PVC | `{}` | +| `master.statefulset.labels` | Additional labels for redis master StatefulSet | `{}` | +| `master.statefulset.annotations` | Additional annotations for redis master StatefulSet | `{}` | +| `master.statefulset.updateStrategy` | Update strategy for StatefulSet | onDelete | +| `master.statefulset.rollingUpdatePartition` | Partition update strategy | `nil` | +| `master.statefulset.volumeClaimTemplates.labels` | Additional labels for redis master StatefulSet volumeClaimTemplates | `{}` | +| `master.statefulset.volumeClaimTemplates.annotations` | Additional annotations for redis master StatefulSet volumeClaimTemplates | `{}` | +| `master.podLabels` | Additional labels for RedisTM master pod | {} | +| `master.podAnnotations` | Additional annotations for RedisTM master pod | {} | +| `master.extraEnvVars` | Additional Environment Variables passed to the pod of the master's stateful set set | `[]` | +| `master.extraEnvVarCMs` | Additional Environment Variables ConfigMappassed to the pod of the master's stateful set set | `[]` | +| `master.extraEnvVarsSecret` | Additional Environment Variables Secret passed to the master's stateful set | `[]` | +| `podDisruptionBudget.enabled` | Pod Disruption Budget toggle | `false` | +| `podDisruptionBudget.minAvailable` | Minimum available pods | `1` | +| `podDisruptionBudget.maxUnavailable` | Maximum unavailable | `nil` | +| `redisPort` | RedisTM port (in both master and slaves) | `6379` | +| `tls.enabled` | Enable TLS support for replication traffic | `false` | +| `tls.authClients` | Require clients to authenticate or not | `true` | +| `tls.certificatesSecret` | Name of the secret that contains the certificates | `nil` | +| `tls.certFilename` | Certificate filename | `nil` | +| `tls.certKeyFilename` | Certificate key filename | `nil` | +| `tls.certCAFilename` | CA Certificate filename | `nil` | +| `tls.dhParamsFilename` | DH params (in order to support DH based ciphers) | `nil` | +| `master.command` | RedisTM master entrypoint string. The command `redis-server` is executed if this is not provided. Note this is prepended with `exec` | `/run.sh` | +| `master.preExecCmds` | Text to inset into the startup script immediately prior to `master.command`. Use this if you need to run other ad-hoc commands as part of startup | `nil` | +| `master.configmap` | Additional RedisTM configuration for the master nodes (this value is evaluated as a template) | `nil` | +| `master.disableCommands` | Array of RedisTM commands to disable (master) | `["FLUSHDB", "FLUSHALL"]` | +| `master.extraFlags` | RedisTM master additional command line flags | [] | +| `master.nodeSelector` | RedisTM master Node labels for pod assignment | {"beta.kubernetes.io/arch": "amd64"} | +| `master.tolerations` | Toleration labels for RedisTM master pod assignment | [] | +| `master.affinity` | Affinity settings for RedisTM master pod assignment | {} | +| `master.schedulerName` | Name of an alternate scheduler | `nil` | +| `master.service.type` | Kubernetes Service type (redis master) | `ClusterIP` | +| `master.service.externalTrafficPolicy` | External traffic policy (when service type is LoadBalancer) | `Cluster` | +| `master.service.port` | Kubernetes Service port (redis master) | `6379` | +| `master.service.nodePort` | Kubernetes Service nodePort (redis master) | `nil` | +| `master.service.annotations` | annotations for redis master service | {} | +| `master.service.labels` | Additional labels for redis master service | {} | +| `master.service.loadBalancerIP` | loadBalancerIP if redis master service type is `LoadBalancer` | `nil` | +| `master.service.loadBalancerSourceRanges` | loadBalancerSourceRanges if redis master service type is `LoadBalancer` | `nil` | +| `master.resources` | RedisTM master CPU/Memory resource requests/limits | Memory: `256Mi`, CPU: `100m` | +| `master.livenessProbe.enabled` | Turn on and off liveness probe (redis master pod) | `true` | +| `master.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (redis master pod) | `5` | +| `master.livenessProbe.periodSeconds` | How often to perform the probe (redis master pod) | `5` | +| `master.livenessProbe.timeoutSeconds` | When the probe times out (redis master pod) | `5` | +| `master.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis master pod) | `1` | +| `master.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `5` | +| `master.readinessProbe.enabled` | Turn on and off readiness probe (redis master pod) | `true` | +| `master.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated (redis master pod) | `5` | +| `master.readinessProbe.periodSeconds` | How often to perform the probe (redis master pod) | `5` | +| `master.readinessProbe.timeoutSeconds` | When the probe times out (redis master pod) | `1` | +| `master.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis master pod) | `1` | +| `master.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `5` | +| `master.shareProcessNamespace` | RedisTM Master pod `shareProcessNamespace` option. Enables /pause reap zombie PIDs. | `false` | +| `master.priorityClassName` | RedisTM Master pod priorityClassName | `nil` | +| `volumePermissions.enabled` | Enable init container that changes volume permissions in the registry (for cases where the default k8s `runAsUser` and `fsUser` values do not work) | `false` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | +| `volumePermissions.image.repository` | Init container volume-permissions image name | `bitnami/minideb` | +| `volumePermissions.image.tag` | Init container volume-permissions image tag | `buster` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `Always` | +| `volumePermissions.resources ` | Init container volume-permissions CPU/Memory resource requests/limits | {} | +| `volumePermissions.securityContext.*` | Security context of the init container | `{}` | +| `volumePermissions.securityContext.runAsUser` | UserID for the init container (when facing issues in OpenShift or uid unknown, try value "auto") | 0 | +| `slave.hostAliases` | Add deployment host aliases | `[]` | +| `slave.service.type` | Kubernetes Service type (redis slave) | `ClusterIP` | +| `slave.service.externalTrafficPolicy` | External traffic policy (when service type is LoadBalancer) | `Cluster` | +| `slave.service.nodePort` | Kubernetes Service nodePort (redis slave) | `nil` | +| `slave.service.annotations` | annotations for redis slave service | {} | +| `slave.service.labels` | Additional labels for redis slave service | {} | +| `slave.service.port` | Kubernetes Service port (redis slave) | `6379` | +| `slave.service.loadBalancerIP` | LoadBalancerIP if RedisTM slave service type is `LoadBalancer` | `nil` | +| `slave.service.loadBalancerSourceRanges` | loadBalancerSourceRanges if RedisTM slave service type is `LoadBalancer` | `nil` | +| `slave.command` | RedisTM slave entrypoint string. The command `redis-server` is executed if this is not provided. Note this is prepended with `exec` | `/run.sh` | +| `slave.preExecCmds` | Text to inset into the startup script immediately prior to `slave.command`. Use this if you need to run other ad-hoc commands as part of startup | `nil` | +| `slave.configmap` | Additional RedisTM configuration for the slave nodes (this value is evaluated as a template) | `nil` | +| `slave.disableCommands` | Array of RedisTM commands to disable (slave) | `[FLUSHDB, FLUSHALL]` | +| `slave.extraFlags` | RedisTM slave additional command line flags | `[]` | +| `slave.livenessProbe.enabled` | Turn on and off liveness probe (redis slave pod) | `true` | +| `slave.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (redis slave pod) | `5` | +| `slave.livenessProbe.periodSeconds` | How often to perform the probe (redis slave pod) | `5` | +| `slave.livenessProbe.timeoutSeconds` | When the probe times out (redis slave pod) | `5` | +| `slave.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis slave pod) | `1` | +| `slave.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `5` | +| `slave.readinessProbe.enabled` | Turn on and off slave.readiness probe (redis slave pod) | `true` | +| `slave.readinessProbe.initialDelaySeconds` | Delay before slave.readiness probe is initiated (redis slave pod) | `5` | +| `slave.readinessProbe.periodSeconds` | How often to perform the probe (redis slave pod) | `5` | +| `slave.readinessProbe.timeoutSeconds` | When the probe times out (redis slave pod) | `1` | +| `slave.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis slave pod) | `1` | +| `slave.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. (redis slave pod) | `5` | +| `slave.shareProcessNamespace` | RedisTM slave pod `shareProcessNamespace` option. Enables /pause reap zombie PIDs. | `false` | +| `slave.persistence.enabled` | Use a PVC to persist data (slave node) | `true` | +| `slave.persistence.path` | Path to mount the volume at, to use other images | `/data` | +| `slave.persistence.subPath` | Subdirectory of the volume to mount at | `""` | +| `slave.persistence.storageClass` | Storage class of backing PVC | `generic` | +| `slave.persistence.accessModes` | Persistent Volume Access Modes | `[ReadWriteOnce]` | +| `slave.persistence.size` | Size of data volume | `8Gi` | +| `slave.persistence.matchLabels` | matchLabels persistent volume selector | `{}` | +| `slave.persistence.matchExpressions` | matchExpressions persistent volume selector | `{}` | +| `slave.statefulset.labels` | Additional labels for redis slave StatefulSet | `{}` | +| `slave.statefulset.annotations` | Additional annotations for redis slave StatefulSet | `{}` | +| `slave.statefulset.updateStrategy` | Update strategy for StatefulSet | onDelete | +| `slave.statefulset.rollingUpdatePartition` | Partition update strategy | `nil` | +| `slave.statefulset.volumeClaimTemplates.labels` | Additional labels for redis slave StatefulSet volumeClaimTemplates | `{}` | +| `slave.statefulset.volumeClaimTemplates.annotations` | Additional annotations for redis slave StatefulSet volumeClaimTemplates | `{}` | +| `slave.extraEnvVars` | Additional Environment Variables passed to the pod of the slave's stateful set set | `[]` | +| `slave.extraEnvVarCMs` | Additional Environment Variables ConfigMappassed to the pod of the slave's stateful set set | `[]` | +| `masslaveter.extraEnvVarsSecret` | Additional Environment Variables Secret passed to the slave's stateful set | `[]` | +| `slave.podLabels` | Additional labels for RedisTM slave pod | `master.podLabels` | +| `slave.podAnnotations` | Additional annotations for RedisTM slave pod | `master.podAnnotations` | +| `slave.schedulerName` | Name of an alternate scheduler | `nil` | +| `slave.resources` | RedisTM slave CPU/Memory resource requests/limits | `{}` | +| `slave.affinity` | Enable node/pod affinity for slaves | {} | +| `slave.tolerations` | Toleration labels for RedisTM slave pod assignment | [] | +| `slave.spreadConstraints` | [Topology Spread Constraints](https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/) for RedisTM slave pod | {} | +| `slave.priorityClassName` | RedisTM Slave pod priorityClassName | `nil` | +| `sentinel.enabled` | Enable sentinel containers | `false` | +| `sentinel.usePassword` | Use password for sentinel containers | `true` | +| `sentinel.masterSet` | Name of the sentinel master set | `mymaster` | +| `sentinel.initialCheckTimeout` | Timeout for querying the redis sentinel service for the active sentinel list | `5` | +| `sentinel.quorum` | Quorum for electing a new master | `2` | +| `sentinel.downAfterMilliseconds` | Timeout for detecting a RedisTM node is down | `60000` | +| `sentinel.failoverTimeout` | Timeout for performing a election failover | `18000` | +| `sentinel.parallelSyncs` | Number of parallel syncs in the cluster | `1` | +| `sentinel.port` | RedisTM Sentinel port | `26379` | +| `sentinel.configmap` | Additional RedisTM configuration for the sentinel nodes (this value is evaluated as a template) | `nil` | +| `sentinel.staticID` | Enable static IDs for sentinel replicas (If disabled IDs will be randomly generated on startup) | `false` | +| `sentinel.service.type` | Kubernetes Service type (redis sentinel) | `ClusterIP` | +| `sentinel.service.externalTrafficPolicy` | External traffic policy (when service type is LoadBalancer) | `Cluster` | +| `sentinel.service.nodePort` | Kubernetes Service nodePort (redis sentinel) | `nil` | +| `sentinel.service.annotations` | annotations for redis sentinel service | {} | +| `sentinel.service.labels` | Additional labels for redis sentinel service | {} | +| `sentinel.service.redisPort` | Kubernetes Service port for RedisTM read only operations | `6379` | +| `sentinel.service.sentinelPort` | Kubernetes Service port for RedisTM sentinel | `26379` | +| `sentinel.service.redisNodePort` | Kubernetes Service node port for RedisTM read only operations | `` | +| `sentinel.service.sentinelNodePort` | Kubernetes Service node port for RedisTM sentinel | `` | +| `sentinel.service.loadBalancerIP` | LoadBalancerIP if RedisTM sentinel service type is `LoadBalancer` | `nil` | +| `sentinel.livenessProbe.enabled` | Turn on and off liveness probe (redis sentinel pod) | `true` | +| `sentinel.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (redis sentinel pod) | `5` | +| `sentinel.livenessProbe.periodSeconds` | How often to perform the probe (redis sentinel container) | `5` | +| `sentinel.livenessProbe.timeoutSeconds` | When the probe times out (redis sentinel container) | `5` | +| `sentinel.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis sentinel container) | `1` | +| `sentinel.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `5` | +| `sentinel.readinessProbe.enabled` | Turn on and off sentinel.readiness probe (redis sentinel pod) | `true` | +| `sentinel.readinessProbe.initialDelaySeconds` | Delay before sentinel.readiness probe is initiated (redis sentinel pod) | `5` | +| `sentinel.readinessProbe.periodSeconds` | How often to perform the probe (redis sentinel pod) | `5` | +| `sentinel.readinessProbe.timeoutSeconds` | When the probe times out (redis sentinel container) | `1` | +| `sentinel.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis sentinel container) | `1` | +| `sentinel.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. (redis sentinel container) | `5` | +| `sentinel.resources` | RedisTM sentinel CPU/Memory resource requests/limits | `{}` | +| `sentinel.image.registry` | RedisTM Sentinel Image registry | `docker.io` | +| `sentinel.image.repository` | RedisTM Sentinel Image name | `bitnami/redis-sentinel` | +| `sentinel.image.tag` | RedisTM Sentinel Image tag | `{TAG_NAME}` | +| `sentinel.image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `sentinel.image.pullSecrets` | Specify docker-registry secret names as an array | `nil` | +| `sentinel.extraEnvVars` | Additional Environment Variables passed to the pod of the sentinel node stateful set set | `[]` | +| `sentinel.extraEnvVarCMs` | Additional Environment Variables ConfigMappassed to the pod of the sentinel node stateful set set | `[]` | +| `sentinel.extraEnvVarsSecret` | Additional Environment Variables Secret passed to the sentinel node statefulset | `[]` | +| `sentinel.preExecCmds` | Text to inset into the startup script immediately prior to `sentinel.command`. Use this if you need to run other ad-hoc commands as part of startup | `nil` | +| `sysctlImage.enabled` | Enable an init container to modify Kernel settings | `false` | +| `sysctlImage.command` | sysctlImage command to execute | [] | +| `sysctlImage.registry` | sysctlImage Init container registry | `docker.io` | +| `sysctlImage.repository` | sysctlImage Init container name | `bitnami/minideb` | +| `sysctlImage.tag` | sysctlImage Init container tag | `buster` | +| `sysctlImage.pullPolicy` | sysctlImage Init container pull policy | `Always` | +| `sysctlImage.mountHostSys` | Mount the host `/sys` folder to `/host-sys` | `false` | +| `sysctlImage.resources` | sysctlImage Init container CPU/Memory resource requests/limits | {} | +| `podSecurityPolicy.create` | Specifies whether a PodSecurityPolicy should be created | `false` | + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```bash +$ helm install my-release \ + --set password=secretpassword \ + bitnami/redis +``` + +The above command sets the RedisTM server password to `secretpassword`. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```bash +$ helm install my-release -f values.yaml bitnami/redis +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +> **Note for minikube users**: Current versions of minikube (v0.24.1 at the time of writing) provision `hostPath` persistent volumes that are only writable by root. Using chart defaults cause pod failure for the RedisTM pod as it attempts to write to the `/bitnami` directory. Consider installing RedisTM with `--set persistence.enabled=false`. See minikube issue [1990](https://github.com/kubernetes/minikube/issues/1990) for more information. + +## Configuration and installation details + +### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/) + +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. + +Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. + +### Change RedisTM version + +To modify the RedisTM version used in this chart you can specify a [valid image tag](https://hub.docker.com/r/bitnami/redis/tags/) using the `image.tag` parameter. For example, `image.tag=X.Y.Z`. This approach is also applicable to other images like exporters. + +### Cluster topologies + +#### Default: Master-Slave + +When installing the chart with `cluster.enabled=true`, it will deploy a RedisTM master StatefulSet (only one master node allowed) and a RedisTM slave StatefulSet. The slaves will be read-replicas of the master. Two services will be exposed: + + - RedisTM Master service: Points to the master, where read-write operations can be performed + - RedisTM Slave service: Points to the slaves, where only read operations are allowed. + +In case the master crashes, the slaves will wait until the master node is respawned again by the Kubernetes Controller Manager. + +#### Master-Slave with Sentinel + +When installing the chart with `cluster.enabled=true` and `sentinel.enabled=true`, it will deploy a RedisTM master StatefulSet (only one master allowed) and a RedisTM slave StatefulSet. In this case, the pods will contain an extra container with RedisTM Sentinel. This container will form a cluster of RedisTM Sentinel nodes, which will promote a new master in case the actual one fails. In addition to this, only one service is exposed: + + - RedisTM service: Exposes port 6379 for RedisTM read-only operations and port 26379 for accessing RedisTM Sentinel. + +For read-only operations, access the service using port 6379. For write operations, it's necessary to access the RedisTM Sentinel cluster and query the current master using the command below (using redis-cli or similar: + +``` +SENTINEL get-master-addr-by-name +``` +This command will return the address of the current master, which can be accessed from inside the cluster. + +In case the current master crashes, the Sentinel containers will elect a new master node. + +### Using password file +To use a password file for RedisTM you need to create a secret containing the password. + +> *NOTE*: It is important that the file with the password must be called `redis-password` + +And then deploy the Helm Chart using the secret name as parameter: + +```console +usePassword=true +usePasswordFile=true +existingSecret=redis-password-file +sentinels.enabled=true +metrics.enabled=true +``` + +### Securing traffic using TLS + +TLS support can be enabled in the chart by specifying the `tls.` parameters while creating a release. The following parameters should be configured to properly enable the TLS support in the chart: + +- `tls.enabled`: Enable TLS support. Defaults to `false` +- `tls.certificatesSecret`: Name of the secret that contains the certificates. No defaults. +- `tls.certFilename`: Certificate filename. No defaults. +- `tls.certKeyFilename`: Certificate key filename. No defaults. +- `tls.certCAFilename`: CA Certificate filename. No defaults. + +For example: + +First, create the secret with the cetificates files: + +```console +kubectl create secret generic certificates-tls-secret --from-file=./cert.pem --from-file=./cert.key --from-file=./ca.pem +``` + +Then, use the following parameters: + +```console +tls.enabled="true" +tls.certificatesSecret="certificates-tls-secret" +tls.certFilename="cert.pem" +tls.certKeyFilename="cert.key" +tls.certCAFilename="ca.pem" +``` + +### Metrics + +The chart optionally can start a metrics exporter for [prometheus](https://prometheus.io). The metrics endpoint (port 9121) is exposed in the service. Metrics can be scraped from within the cluster using something similar as the described in the [example Prometheus scrape configuration](https://github.com/prometheus/prometheus/blob/master/documentation/examples/prometheus-kubernetes.yml). If metrics are to be scraped from outside the cluster, the Kubernetes API proxy can be utilized to access the endpoint. + +If you have enabled TLS by specifying `tls.enabled=true` you also need to specify TLS option to the metrics exporter. You can do that via `metrics.extraArgs`. You can find the metrics exporter CLI flags for TLS [here](https://github.com/oliver006/redis_exporter#command-line-flags). For example: + +You can either specify `metrics.extraArgs.skip-tls-verification=true` to skip TLS verification or providing the following values under `metrics.extraArgs` for TLS client authentication: + +```console +tls-client-key-file +tls-client-cert-file +tls-ca-cert-file +``` + +### Host Kernel Settings + +RedisTM may require some changes in the kernel of the host machine to work as expected, in particular increasing the `somaxconn` value and disabling transparent huge pages. +To do so, you can set up a privileged initContainer with the `sysctlImage` config values, for example: + +``` +sysctlImage: + enabled: true + mountHostSys: true + command: + - /bin/sh + - -c + - |- + install_packages procps + sysctl -w net.core.somaxconn=10000 + echo never > /host-sys/kernel/mm/transparent_hugepage/enabled +``` + +Alternatively, for Kubernetes 1.12+ you can set `securityContext.sysctls` which will configure sysctls for master and slave pods. Example: + +```yaml +securityContext: + sysctls: + - name: net.core.somaxconn + value: "10000" +``` + +Note that this will not disable transparent huge tables. + +## Persistence + +By default, the chart mounts a [Persistent Volume](http://kubernetes.io/docs/user-guide/persistent-volumes/) at the `/data` path. The volume is created using dynamic volume provisioning. If a Persistent Volume Claim already exists, specify it during installation. + +### Existing PersistentVolumeClaim + +1. Create the PersistentVolume +2. Create the PersistentVolumeClaim +3. Install the chart + +```bash +$ helm install my-release --set persistence.existingClaim=PVC_NAME bitnami/redis +``` + +## Backup and restore + +### Backup + +To perform a backup you will need to connect to one of the nodes and execute: + +```bash +$ kubectl exec -it my-redis-master-0 bash + +$ redis-cli +127.0.0.1:6379> auth your_current_redis_password +OK +127.0.0.1:6379> save +OK +``` + +Then you will need to get the created dump file form the redis node: + +```bash +$ kubectl cp my-redis-master-0:/data/dump.rdb dump.rdb -c redis +``` + +### Restore + +To restore in a new cluster, you will need to change a parameter in the redis.conf file and then upload the `dump.rdb` to the volume. + +Follow the following steps: + +- First you will need to set in the `values.yaml` the parameter `appendonly` to `no`, if it is already `no` you can skip this step. + +```yaml +configmap: |- + # Enable AOF https://redis.io/topics/persistence#append-only-file + appendonly no + # Disable RDB persistence, AOF persistence already enabled. + save "" +``` + +- Start the new cluster to create the PVCs. + +For example, : + +```bash +helm install new-redis -f values.yaml . --set cluster.enabled=true --set cluster.slaveCount=3 +``` + +- Now that the PVC were created, stop it and copy the `dump.rdp` on the persisted data by using a helping pod. + +``` +$ helm delete new-redis + +$ kubectl run --generator=run-pod/v1 -i --rm --tty volpod --overrides=' +{ + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "name": "redisvolpod" + }, + "spec": { + "containers": [{ + "command": [ + "tail", + "-f", + "/dev/null" + ], + "image": "bitnami/minideb", + "name": "mycontainer", + "volumeMounts": [{ + "mountPath": "/mnt", + "name": "redisdata" + }] + }], + "restartPolicy": "Never", + "volumes": [{ + "name": "redisdata", + "persistentVolumeClaim": { + "claimName": "redis-data-new-redis-master-0" + } + }] + } +}' --image="bitnami/minideb" + +$ kubectl cp dump.rdb redisvolpod:/mnt/dump.rdb +$ kubectl delete pod volpod +``` + +- Start again the cluster: + +``` +helm install new-redis -f values.yaml . --set cluster.enabled=true --set cluster.slaveCount=3 +``` + +## NetworkPolicy + +To enable network policy for RedisTM, install +[a networking plugin that implements the Kubernetes NetworkPolicy spec](https://kubernetes.io/docs/tasks/administer-cluster/declare-network-policy#before-you-begin), +and set `networkPolicy.enabled` to `true`. + +For Kubernetes v1.5 & v1.6, you must also turn on NetworkPolicy by setting +the DefaultDeny namespace annotation. Note: this will enforce policy for _all_ pods in the namespace: + + kubectl annotate namespace default "net.beta.kubernetes.io/network-policy={\"ingress\":{\"isolation\":\"DefaultDeny\"}}" + +With NetworkPolicy enabled, only pods with the generated client label will be +able to connect to RedisTM. This label will be displayed in the output +after a successful install. + +With `networkPolicy.ingressNSMatchLabels` pods from other namespaces can connect to redis. Set `networkPolicy.ingressNSPodMatchLabels` to match pod labels in matched namespace. For example, for a namespace labeled `redis=external` and pods in that namespace labeled `redis-client=true` the fields should be set: + +``` +networkPolicy: + enabled: true + ingressNSMatchLabels: + redis: external + ingressNSPodMatchLabels: + redis-client: true +``` + +## Troubleshooting + +Find more information about how to deal with common errors related to Bitnami’s Helm charts in [this troubleshooting guide](https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues). + +## Upgrading an existing Release to a new major version + +A major chart version change (like v1.2.3 -> v2.0.0) indicates that there is an +incompatible breaking change needing manual actions. + +### To 11.0.0 + +When using sentinel, a new statefulset called `-node` was introduced. This will break upgrading from a previous version where the statefulsets are called master and slave. Hence the PVC will not match the new naming and won't be reused. If you want to keep your data, you will need to perform a backup and then a restore the data in this new version. + +### To 10.0.0 + +For releases with `usePassword: true`, the value `sentinel.usePassword` controls whether the password authentication also applies to the sentinel port. This defaults to `true` for a secure configuration, however it is possible to disable to account for the following cases: + +- Using a version of redis-sentinel prior to `5.0.1` where the authentication feature was introduced. +- Where redis clients need to be updated to support sentinel authentication. + +If using a master/slave topology, or with `usePassword: false`, no action is required. + +### To 8.0.18 + +For releases with `metrics.enabled: true` the default tag for the exporter image is now `v1.x.x`. This introduces many changes including metrics names. You'll want to use [this dashboard](https://github.com/oliver006/redis_exporter/blob/master/contrib/grafana_prometheus_redis_dashboard.json) now. Please see the [redis_exporter github page](https://github.com/oliver006/redis_exporter#upgrading-from-0x-to-1x) for more details. + +### To 7.0.0 + +This version causes a change in the RedisTM Master StatefulSet definition, so the command helm upgrade would not work out of the box. As an alternative, one of the following could be done: + +- Recommended: Create a clone of the RedisTM Master PVC (for example, using projects like [this one](https://github.com/edseymour/pvc-transfer)). Then launch a fresh release reusing this cloned PVC. + + ``` + helm install my-release bitnami/redis --set persistence.existingClaim= + ``` + +- Alternative (not recommended, do at your own risk): `helm delete --purge` does not remove the PVC assigned to the RedisTM Master StatefulSet. As a consequence, the following commands can be done to upgrade the release + + ``` + helm delete --purge + helm install bitnami/redis + ``` + +Previous versions of the chart were not using persistence in the slaves, so this upgrade would add it to them. Another important change is that no values are inherited from master to slaves. For example, in 6.0.0 `slaves.readinessProbe.periodSeconds`, if empty, would be set to `master.readinessProbe.periodSeconds`. This approach lacked transparency and was difficult to maintain. From now on, all the slave parameters must be configured just as it is done with the masters. + +Some values have changed as well: + +- `master.port` and `slave.port` have been changed to `redisPort` (same value for both master and slaves) +- `master.securityContext` and `slave.securityContext` have been changed to `securityContext`(same values for both master and slaves) + +By default, the upgrade will not change the cluster topology. In case you want to use RedisTM Sentinel, you must explicitly set `sentinel.enabled` to `true`. + +### To 6.0.0 + +Previous versions of the chart were using an init-container to change the permissions of the volumes. This was done in case the `securityContext` directive in the template was not enough for that (for example, with cephFS). In this new version of the chart, this container is disabled by default (which should not affect most of the deployments). If your installation still requires that init container, execute `helm upgrade` with the `--set volumePermissions.enabled=true`. + +### To 5.0.0 + +The default image in this release may be switched out for any image containing the `redis-server` +and `redis-cli` binaries. If `redis-server` is not the default image ENTRYPOINT, `master.command` +must be specified. + +#### Breaking changes + +- `master.args` and `slave.args` are removed. Use `master.command` or `slave.command` instead in order to override the image entrypoint, or `master.extraFlags` to pass additional flags to `redis-server`. +- `disableCommands` is now interpreted as an array of strings instead of a string of comma separated values. +- `master.persistence.path` now defaults to `/data`. + +### 4.0.0 + +This version removes the `chart` label from the `spec.selector.matchLabels` +which is immutable since `StatefulSet apps/v1beta2`. It has been inadvertently +added, causing any subsequent upgrade to fail. See https://github.com/helm/charts/issues/7726. + +It also fixes https://github.com/helm/charts/issues/7726 where a deployment `extensions/v1beta1` can not be upgraded if `spec.selector` is not explicitly set. + +Finally, it fixes https://github.com/helm/charts/issues/7803 by removing mutable labels in `spec.VolumeClaimTemplate.metadata.labels` so that it is upgradable. + +In order to upgrade, delete the RedisTM StatefulSet before upgrading: + +```bash +kubectl delete statefulsets.apps --cascade=false my-release-redis-master +``` + +And edit the RedisTM slave (and metrics if enabled) deployment: + +```bash +kubectl patch deployments my-release-redis-slave --type=json -p='[{"op": "remove", "path": "/spec/selector/matchLabels/chart"}]' +kubectl patch deployments my-release-redis-metrics --type=json -p='[{"op": "remove", "path": "/spec/selector/matchLabels/chart"}]' +``` + +## Upgrading + +### To 12.0.0 + +[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +**What changes were introduced in this major version?** + +- Previous versions of this Helm Chart use `apiVersion: v1` (installable by both Helm 2 and 3), this Helm Chart was updated to `apiVersion: v2` (installable by Helm 3 only). [Here](https://helm.sh/docs/topics/charts/#the-apiversion-field) you can find more information about the `apiVersion` field. +- The different fields present in the *Chart.yaml* file has been ordered alphabetically in a homogeneous way for all the Bitnami Helm Charts + +**Considerations when upgrading to this version** + +- If you want to upgrade to this version from a previous one installed with Helm v3, you shouldn't face any issues +- If you want to upgrade to this version using Helm v2, this scenario is not supported as this version doesn't support Helm v2 anymore +- If you installed the previous version with Helm v2 and wants to upgrade to this version with Helm v3, please refer to the [official Helm documentation](https://helm.sh/docs/topics/v2_v3_migration/#migration-use-cases) about migrating from Helm v2 to v3 + +**Useful links** + +- https://docs.bitnami.com/tutorials/resolve-helm2-helm3-post-migration-issues/ +- https://helm.sh/docs/topics/v2_v3_migration/ +- https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/ + +### To 11.0.0 + +When deployed with sentinel enabled, only a group of nodes is deployed and the master/slave role is handled in the group. To avoid breaking the compatibility, the settings for this nodes are given through the `slave.xxxx` parameters in `values.yaml` + +### To 9.0.0 + +The metrics exporter has been changed from a separate deployment to a sidecar container, due to the latest changes in the RedisTM exporter code. Check the [official page](https://github.com/oliver006/redis_exporter/) for more information. The metrics container image was changed from oliver006/redis_exporter to bitnami/redis-exporter (Bitnami's maintained package of oliver006/redis_exporter). + +### To 7.0.0 + +In order to improve the performance in case of slave failure, we added persistence to the read-only slaves. That means that we moved from Deployment to StatefulSets. This should not affect upgrades from previous versions of the chart, as the deployments did not contain any persistence at all. + +This version also allows enabling RedisTM Sentinel containers inside of the RedisTM Pods (feature disabled by default). In case the master crashes, a new RedisTM node will be elected as master. In order to query the current master (no redis master service is exposed), you need to query first the Sentinel cluster. Find more information [in this section](#master-slave-with-sentinel). diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/.helmignore b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/.helmignore new file mode 100644 index 0000000..50af031 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/Chart.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/Chart.yaml new file mode 100644 index 0000000..ceb5648 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/Chart.yaml @@ -0,0 +1,23 @@ +annotations: + category: Infrastructure +apiVersion: v2 +appVersion: 1.3.3 +description: A Library Helm Chart for grouping common logic between bitnami charts. + This chart is not deployable by itself. +home: https://github.com/bitnami/charts/tree/master/bitnami/common +icon: https://bitnami.com/downloads/logos/bitnami-mark.png +keywords: +- common +- helper +- template +- function +- bitnami +maintainers: +- email: containers@bitnami.com + name: Bitnami +name: common +sources: +- https://github.com/bitnami/charts +- http://www.bitnami.com/ +type: library +version: 1.3.3 diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/README.md b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/README.md new file mode 100644 index 0000000..461fdc9 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/README.md @@ -0,0 +1,316 @@ +# Bitnami Common Library Chart + +A [Helm Library Chart](https://helm.sh/docs/topics/library_charts/#helm) for grouping common logic between bitnami charts. + +## TL;DR + +```yaml +dependencies: + - name: common + version: 0.x.x + repository: https://charts.bitnami.com/bitnami +``` + +```bash +$ helm dependency update +``` + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "common.names.fullname" . }} +data: + myvalue: "Hello World" +``` + +## Introduction + +This chart provides a common template helpers which can be used to develop new charts using [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This Helm chart has been tested on top of [Bitnami Kubernetes Production Runtime](https://kubeprod.io/) (BKPR). Deploy BKPR to get automated TLS certificates, logging and monitoring for your applications. + +## Prerequisites + +- Kubernetes 1.12+ +- Helm 3.0-beta3+ + +## Parameters + +The following table lists the helpers available in the library which are scoped in different sections. + +### Affinities + +| Helper identifier | Description | Expected Input | +|-------------------------------|------------------------------------------------------|------------------------------------------------| +| `common.affinities.node.soft` | Return a soft nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` | +| `common.affinities.node.hard` | Return a hard nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` | +| `common.affinities.pod.soft` | Return a soft podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` | +| `common.affinities.pod.hard` | Return a hard podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` | + +### Capabilities + +| Helper identifier | Description | Expected Input | +|----------------------------------------------|------------------------------------------------------------------------------------------------|-------------------| +| `common.capabilities.kubeVersion` | Return the target Kubernetes version (using client default if .Values.kubeVersion is not set). | `.` Chart context | +| `common.capabilities.deployment.apiVersion` | Return the appropriate apiVersion for deployment. | `.` Chart context | +| `common.capabilities.statefulset.apiVersion` | Return the appropriate apiVersion for statefulset. | `.` Chart context | +| `common.capabilities.ingress.apiVersion` | Return the appropriate apiVersion for ingress. | `.` Chart context | + +### Errors + +| Helper identifier | Description | Expected Input | +|-----------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------| +| `common.errors.upgrade.passwords.empty` | It will ensure required passwords are given when we are upgrading a chart. If `validationErrors` is not empty it will throw an error and will stop the upgrade action. | `dict "validationErrors" (list $validationError00 $validationError01) "context" $` | + +### Images + +| Helper identifier | Description | Expected Input | +|-----------------------------|------------------------------------------------------|---------------------------------------------------------------------------------------------------------| +| `common.images.image` | Return the proper and full image name | `dict "imageRoot" .Values.path.to.the.image "global" $`, see [ImageRoot](#imageroot) for the structure. | +| `common.images.pullSecrets` | Return the proper Docker Image Registry Secret Names | `dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global` | + +### Ingress + +| Helper identifier | Description | Expected Input | +|--------------------------|----------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.ingress.backend` | Generate a proper Ingress backend entry depending on the API version | `dict "serviceName" "foo" "servicePort" "bar"`, see the [Ingress deprecation notice](https://kubernetes.io/blog/2019/07/18/api-deprecations-in-1-16/) for the syntax differences | + +### Labels + +| Helper identifier | Description | Expected Input | +|-----------------------------|------------------------------------------------------|-------------------| +| `common.labels.standard` | Return Kubernetes standard labels | `.` Chart context | +| `common.labels.matchLabels` | Return the proper Docker Image Registry Secret Names | `.` Chart context | + +### Names + +| Helper identifier | Description | Expected Inpput | +|-------------------------|------------------------------------------------------------|-------------------| +| `common.names.name` | Expand the name of the chart or use `.Values.nameOverride` | `.` Chart context | +| `common.names.fullname` | Create a default fully qualified app name. | `.` Chart context | +| `common.names.chart` | Chart name plus version | `.` Chart context | + +### Secrets + +| Helper identifier | Description | Expected Input | +|-----------------------|----------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.secrets.name` | Generate the name of the secret. | `dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $` see [ExistingSecret](#existingsecret) for the structure. | +| `common.secrets.key` | Generate secret key. | `dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName"` see [ExistingSecret](#existingsecret) for the structure. | + +### Storage + +| Helper identifier | Description | Expected Input | +|-------------------------------|---------------------------------------|---------------------------------------------------------------------------------------------------------------------| +| `common.affinities.node.soft` | Return a soft nodeAffinity definition | `dict "persistence" .Values.path.to.the.persistence "global" $`, see [Persistence](#persistence) for the structure. | + +### TplValues + +| Helper identifier | Description | Expected Input | +|---------------------------|----------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.tplvalues.render` | Renders a value that contains template | `dict "value" .Values.path.to.the.Value "context" $`, value is the value should rendered as template, context frequently is the chart context `$` or `.` | + +### Utils + +| Helper identifier | Description | Expected Input | +|--------------------------------|-------------------------------------------------------|------------------------------------------------------------------------| +| `common.utils.fieldToEnvVar` | Build environment variable name given a field. | `dict "field" "my-password"` | +| `common.utils.secret.getvalue` | Print instructions to get a secret value. | `dict "secret" "secret-name" "field" "secret-value-field" "context" $` | +| `common.utils.getValueFromKey` | Gets a value from `.Values` object given its key path | `dict "key" "path.to.key" "context" $` | + +### Validations + +| Helper identifier | Description | Expected Input | +|--------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.validations.values.single.empty` | Validate a value must not be empty. | `dict "valueKey" "path.to.value" "secret" "secret.name" "field" "my-password" "context" $` secret and field are optional. In case they are given, the helper will generate a how to get instruction. See [ValidateValue](#validatevalue) | +| `common.validations.values.multiple.empty` | Validate a multiple values must not be empty. It returns a shared error for all the values. | `dict "required" (list $validateValueConf00 $validateValueConf01) "context" $`. See [ValidateValue](#validatevalue) | +| `common.validations.values.mariadb.passwords` | This helper will ensure required password for MariaDB are not empty. It returns a shared error for all the values. | `dict "secret" "mariadb-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mariadb chart and the helper. | +| `common.validations.values.postgresql.passwords` | This helper will ensure required password for PostgreSQL are not empty. It returns a shared error for all the values. | `dict "secret" "postgresql-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use postgresql chart and the helper. | +| `common.validations.values.redis.passwords` | This helper will ensure required password for RedisTM are not empty. It returns a shared error for all the values. | `dict "secret" "redis-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use redis chart and the helper. | +| `common.validations.values.cassandra.passwords` | This helper will ensure required password for Cassandra are not empty. It returns a shared error for all the values. | `dict "secret" "cassandra-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use cassandra chart and the helper. | +| `common.validations.values.mongodb.passwords` | This helper will ensure required password for MongoDB are not empty. It returns a shared error for all the values. | `dict "secret" "mongodb-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mongodb chart and the helper. | + +### Warnings + +| Helper identifier | Description | Expected Input | +|------------------------------|----------------------------------|------------------------------------------------------------| +| `common.warnings.rollingTag` | Warning about using rolling tag. | `ImageRoot` see [ImageRoot](#imageroot) for the structure. | + +## Special input schemas + +### ImageRoot + +```yaml +registry: + type: string + description: Docker registry where the image is located + example: docker.io + +repository: + type: string + description: Repository and image name + example: bitnami/nginx + +tag: + type: string + description: image tag + example: 1.16.1-debian-10-r63 + +pullPolicy: + type: string + description: Specify a imagePullPolicy. Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + +pullSecrets: + type: array + items: + type: string + description: Optionally specify an array of imagePullSecrets. + +debug: + type: boolean + description: Set to true if you would like to see extra information on logs + example: false + +## An instance would be: +# registry: docker.io +# repository: bitnami/nginx +# tag: 1.16.1-debian-10-r63 +# pullPolicy: IfNotPresent +# debug: false +``` + +### Persistence + +```yaml +enabled: + type: boolean + description: Whether enable persistence. + example: true + +storageClass: + type: string + description: Ghost data Persistent Volume Storage Class, If set to "-", storageClassName: "" which disables dynamic provisioning. + example: "-" + +accessMode: + type: string + description: Access mode for the Persistent Volume Storage. + example: ReadWriteOnce + +size: + type: string + description: Size the Persistent Volume Storage. + example: 8Gi + +path: + type: string + description: Path to be persisted. + example: /bitnami + +## An instance would be: +# enabled: true +# storageClass: "-" +# accessMode: ReadWriteOnce +# size: 8Gi +# path: /bitnami +``` + +### ExistingSecret + +```yaml +name: + type: string + description: Name of the existing secret. + example: mySecret +keyMapping: + description: Mapping between the expected key name and the name of the key in the existing secret. + type: object + +## An instance would be: +# name: mySecret +# keyMapping: +# password: myPasswordKey +``` + +#### Example of use + +When we store sensitive data for a deployment in a secret, some times we want to give to users the possibility of using theirs existing secrets. + +```yaml +# templates/secret.yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }} + labels: + app: {{ include "common.names.fullname" . }} +type: Opaque +data: + password: {{ .Values.password | b64enc | quote }} + +# templates/dpl.yaml +--- +... + env: + - name: PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "common.secrets.name" (dict "existingSecret" .Values.existingSecret "context" $) }} + key: {{ include "common.secrets.key" (dict "existingSecret" .Values.existingSecret "key" "password") }} +... + +# values.yaml +--- +name: mySecret +keyMapping: + password: myPasswordKey +``` + +### ValidateValue + +#### NOTES.txt + +```console +{{- $validateValueConf00 := (dict "valueKey" "path.to.value00" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value01" "secret" "secretName" "field" "password-01") -}} + +{{ include "common.validations.values.multiple.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} +``` + +If we force those values to be empty we will see some alerts + +```console +$ helm install test mychart --set path.to.value00="",path.to.value01="" + 'path.to.value00' must not be empty, please add '--set path.to.value00=$PASSWORD_00' to the command. To get the current value: + + export PASSWORD_00=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-00}" | base64 --decode) + + 'path.to.value01' must not be empty, please add '--set path.to.value01=$PASSWORD_01' to the command. To get the current value: + + export PASSWORD_01=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-01}" | base64 --decode) +``` + +## Upgrading + +### To 1.0.0 + +[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +**What changes were introduced in this major version?** + +- Previous versions of this Helm Chart use `apiVersion: v1` (installable by both Helm 2 and 3), this Helm Chart was updated to `apiVersion: v2` (installable by Helm 3 only). [Here](https://helm.sh/docs/topics/charts/#the-apiversion-field) you can find more information about the `apiVersion` field. +- Use `type: library`. [Here](https://v3.helm.sh/docs/faq/#library-chart-support) you can find more information. +- The different fields present in the *Chart.yaml* file has been ordered alphabetically in a homogeneous way for all the Bitnami Helm Charts + +**Considerations when upgrading to this version** + +- If you want to upgrade to this version from a previous one installed with Helm v3, you shouldn't face any issues +- If you want to upgrade to this version using Helm v2, this scenario is not supported as this version doesn't support Helm v2 anymore +- If you installed the previous version with Helm v2 and wants to upgrade to this version with Helm v3, please refer to the [official Helm documentation](https://helm.sh/docs/topics/v2_v3_migration/#migration-use-cases) about migrating from Helm v2 to v3 + +**Useful links** + +- https://docs.bitnami.com/tutorials/resolve-helm2-helm3-post-migration-issues/ +- https://helm.sh/docs/topics/v2_v3_migration/ +- https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/ diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/_affinities.tpl b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/_affinities.tpl new file mode 100644 index 0000000..1ff26d5 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/_affinities.tpl @@ -0,0 +1,94 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return a soft nodeAffinity definition +{{ include "common.affinities.nodes.soft" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.soft" -}} +preferredDuringSchedulingIgnoredDuringExecution: + - preference: + matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . }} + {{- end }} + weight: 1 +{{- end -}} + +{{/* +Return a hard nodeAffinity definition +{{ include "common.affinities.nodes.hard" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.hard" -}} +requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . }} + {{- end }} +{{- end -}} + +{{/* +Return a nodeAffinity definition +{{ include "common.affinities.nodes" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.nodes.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.nodes.hard" . -}} + {{- end -}} +{{- end -}} + +{{/* +Return a soft podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.soft" (dict "component" "FOO" "context" $) -}} +*/}} +{{- define "common.affinities.pods.soft" -}} +{{- $component := default "" .component -}} +preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 10 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + namespaces: + - {{ .context.Release.Namespace }} + topologyKey: kubernetes.io/hostname + weight: 1 +{{- end -}} + +{{/* +Return a hard podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.hard" (dict "component" "FOO" "context" $) -}} +*/}} +{{- define "common.affinities.pods.hard" -}} +{{- $component := default "" .component -}} +requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 8 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + namespaces: + - {{ .context.Release.Namespace }} + topologyKey: kubernetes.io/hostname +{{- end -}} + +{{/* +Return a podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.pods" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.pods.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.pods.hard" . -}} + {{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/_capabilities.tpl b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/_capabilities.tpl new file mode 100644 index 0000000..d95b569 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/_capabilities.tpl @@ -0,0 +1,61 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return the target Kubernetes version +*/}} +{{- define "common.capabilities.kubeVersion" -}} +{{- if .Values.global }} + {{- if .Values.global.kubeVersion }} + {{- .Values.global.kubeVersion -}} + {{- else }} + {{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} + {{- end -}} +{{- else }} +{{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for deployment. +*/}} +{{- define "common.capabilities.deployment.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for statefulset. +*/}} +{{- define "common.capabilities.statefulset.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apps/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for ingress. +*/}} +{{- define "common.capabilities.ingress.apiVersion" -}} +{{- if .Values.ingress -}} +{{- if .Values.ingress.apiVersion -}} +{{- .Values.ingress.apiVersion -}} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end }} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/_images.tpl b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/_images.tpl new file mode 100644 index 0000000..aafde9f --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/_images.tpl @@ -0,0 +1,43 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper image name +{{ include "common.images.image" ( dict "imageRoot" .Values.path.to.the.image "global" $) }} +*/}} +{{- define "common.images.image" -}} +{{- $registryName := .imageRoot.registry -}} +{{- $repositoryName := .imageRoot.repository -}} +{{- $tag := .imageRoot.tag | toString -}} +{{- if .global }} + {{- if .global.imageRegistry }} + {{- $registryName = .global.imageRegistry -}} + {{- end -}} +{{- end -}} +{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +{{ include "common.images.pullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global) }} +*/}} +{{- define "common.images.pullSecrets" -}} + {{- $pullSecrets := list }} + + {{- if .global }} + {{- range .global.imagePullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) }} +imagePullSecrets: + {{- range $pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/_ingress.tpl b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/_ingress.tpl new file mode 100644 index 0000000..622ef50 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/_ingress.tpl @@ -0,0 +1,42 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Generate backend entry that is compatible with all Kubernetes API versions. + +Usage: +{{ include "common.ingress.backend" (dict "serviceName" "backendName" "servicePort" "backendPort" "context" $) }} + +Params: + - serviceName - String. Name of an existing service backend + - servicePort - String/Int. Port name (or number) of the service. It will be translated to different yaml depending if it is a string or an integer. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.ingress.backend" -}} +{{- $apiVersion := (include "common.capabilities.ingress.apiVersion" .context) -}} +{{- if or (eq $apiVersion "extensions/v1beta1") (eq $apiVersion "networking.k8s.io/v1beta1") -}} +serviceName: {{ .serviceName }} +servicePort: {{ .servicePort }} +{{- else -}} +service: + name: {{ .serviceName }} + port: + {{- if typeIs "string" .servicePort }} + name: {{ .servicePort }} + {{- else if typeIs "int" .servicePort }} + number: {{ .servicePort }} + {{- end }} +{{- end -}} +{{- end -}} + +{{/* +Print "true" if the API pathType field is supported +Usage: +{{ include "common.ingress.supportsPathType" . }} +*/}} +{{- define "common.ingress.supportsPathType" -}} +{{- if (semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .)) -}} +{{- print "false" -}} +{{- else -}} +{{- print "true" -}} +{{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/_labels.tpl b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/_labels.tpl new file mode 100644 index 0000000..252066c --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/_labels.tpl @@ -0,0 +1,18 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Kubernetes standard labels +*/}} +{{- define "common.labels.standard" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +helm.sh/chart: {{ include "common.names.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{/* +Labels to use on deploy.spec.selector.matchLabels and svc.spec.selector +*/}} +{{- define "common.labels.matchLabels" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/_names.tpl b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/_names.tpl new file mode 100644 index 0000000..adf2a74 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/_names.tpl @@ -0,0 +1,32 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "common.names.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "common.names.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "common.names.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/_secrets.tpl b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/_secrets.tpl new file mode 100644 index 0000000..4931d94 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/_secrets.tpl @@ -0,0 +1,127 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Generate secret name. + +Usage: +{{ include "common.secrets.name" (dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $) }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/master/bitnami/common#existingsecret + - defaultNameSuffix - String - Optional. It is used only if we have several secrets in the same deployment. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.secrets.name" -}} +{{- $name := (include "common.names.fullname" .context) -}} + +{{- if .defaultNameSuffix -}} +{{- $name = printf "%s-%s" $name .defaultNameSuffix | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- with .existingSecret -}} +{{- if not (typeIs "string" .) -}} +{{- $name = .name -}} +{{- else -}} +{{- $name = . -}} +{{- end -}} +{{- end -}} + +{{- printf "%s" $name -}} +{{- end -}} + +{{/* +Generate secret key. + +Usage: +{{ include "common.secrets.key" (dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName") }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/master/bitnami/common#existingsecret + - key - String - Required. Name of the key in the secret. +*/}} +{{- define "common.secrets.key" -}} +{{- $key := .key -}} + +{{- if .existingSecret -}} + {{- if not (typeIs "string" .existingSecret) -}} + {{- if .existingSecret.keyMapping -}} + {{- $key = index .existingSecret.keyMapping $.key -}} + {{- end -}} + {{- end }} +{{- end -}} + +{{- printf "%s" $key -}} +{{- end -}} + +{{/* +Generate secret password or retrieve one if already created. + +Usage: +{{ include "common.secrets.passwords.manage" (dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - key - String - Required - Name of the key in the secret. + - providedValues - List - Required - The path to the validating value in the values.yaml, e.g: "mysql.password". Will pick first parameter with a defined value. + - length - int - Optional - Length of the generated random password. + - strong - Boolean - Optional - Whether to add symbols to the generated random password. + - chartName - String - Optional - Name of the chart used when said chart is deployed as a subchart. + - context - Context - Required - Parent context. +*/}} +{{- define "common.secrets.passwords.manage" -}} + +{{- $password := "" }} +{{- $subchart := "" }} +{{- $chartName := default "" .chartName }} +{{- $passwordLength := default 10 .length }} +{{- $providedPasswordKey := include "common.utils.getKeyFromList" (dict "keys" .providedValues "context" $.context) }} +{{- $providedPasswordValue := include "common.utils.getValueFromKey" (dict "key" $providedPasswordKey "context" $.context) }} +{{- $secret := (lookup "v1" "Secret" $.context.Release.Namespace .secret) }} +{{- if $secret }} + {{- if index $secret.data .key }} + {{- $password = index $secret.data .key }} + {{- end -}} +{{- else if $providedPasswordValue }} + {{- $password = $providedPasswordValue | toString | b64enc | quote }} +{{- else }} + + {{- if .context.Values.enabled }} + {{- $subchart = $chartName }} + {{- end -}} + + {{- $requiredPassword := dict "valueKey" $providedPasswordKey "secret" .secret "field" .key "subchart" $subchart "context" $.context -}} + {{- $requiredPasswordError := include "common.validations.values.single.empty" $requiredPassword -}} + {{- $passwordValidationErrors := list $requiredPasswordError -}} + {{- include "common.errors.upgrade.passwords.empty" (dict "validationErrors" $passwordValidationErrors "context" $.context) -}} + + {{- if .strong }} + {{- $subStr := list (lower (randAlpha 1)) (randNumeric 1) (upper (randAlpha 1)) | join "_" }} + {{- $password = randAscii $passwordLength }} + {{- $password = regexReplaceAllLiteral "\\W" $password "@" | substr 5 $passwordLength }} + {{- $password = printf "%s%s" $subStr $password | toString | shuffle | b64enc | quote }} + {{- else }} + {{- $password = randAlphaNum $passwordLength | b64enc | quote }} + {{- end }} +{{- end -}} +{{- printf "%s" $password -}} +{{- end -}} + +{{/* +Returns whether a previous generated secret already exists + +Usage: +{{ include "common.secrets.exists" (dict "secret" "secret-name" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - context - Context - Required - Parent context. +*/}} +{{- define "common.secrets.exists" -}} +{{- $secret := (lookup "v1" "Secret" $.context.Release.Namespace .secret) }} +{{- if $secret }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/_storage.tpl b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/_storage.tpl new file mode 100644 index 0000000..60e2a84 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/_storage.tpl @@ -0,0 +1,23 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper Storage Class +{{ include "common.storage.class" ( dict "persistence" .Values.path.to.the.persistence "global" $) }} +*/}} +{{- define "common.storage.class" -}} + +{{- $storageClass := .persistence.storageClass -}} +{{- if .global -}} + {{- if .global.storageClass -}} + {{- $storageClass = .global.storageClass -}} + {{- end -}} +{{- end -}} + +{{- if $storageClass -}} + {{- if (eq "-" $storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" $storageClass -}} + {{- end -}} +{{- end -}} + +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/_tplvalues.tpl b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/_tplvalues.tpl new file mode 100644 index 0000000..2db1668 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/_tplvalues.tpl @@ -0,0 +1,13 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Renders a value that contains template. +Usage: +{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $) }} +*/}} +{{- define "common.tplvalues.render" -}} + {{- if typeIs "string" .value }} + {{- tpl .value .context }} + {{- else }} + {{- tpl (.value | toYaml) .context }} + {{- end }} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/_utils.tpl b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/_utils.tpl new file mode 100644 index 0000000..77bcc2b --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/_utils.tpl @@ -0,0 +1,62 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Print instructions to get a secret value. +Usage: +{{ include "common.utils.secret.getvalue" (dict "secret" "secret-name" "field" "secret-value-field" "context" $) }} +*/}} +{{- define "common.utils.secret.getvalue" -}} +{{- $varname := include "common.utils.fieldToEnvVar" . -}} +export {{ $varname }}=$(kubectl get secret --namespace {{ .context.Release.Namespace }} {{ .secret }} -o jsonpath="{.data.{{ .field }}}" | base64 --decode) +{{- end -}} + +{{/* +Build env var name given a field +Usage: +{{ include "common.utils.fieldToEnvVar" dict "field" "my-password" }} +*/}} +{{- define "common.utils.fieldToEnvVar" -}} + {{- $fieldNameSplit := splitList "-" .field -}} + {{- $upperCaseFieldNameSplit := list -}} + + {{- range $fieldNameSplit -}} + {{- $upperCaseFieldNameSplit = append $upperCaseFieldNameSplit ( upper . ) -}} + {{- end -}} + + {{ join "_" $upperCaseFieldNameSplit }} +{{- end -}} + +{{/* +Gets a value from .Values given +Usage: +{{ include "common.utils.getValueFromKey" (dict "key" "path.to.key" "context" $) }} +*/}} +{{- define "common.utils.getValueFromKey" -}} +{{- $splitKey := splitList "." .key -}} +{{- $value := "" -}} +{{- $latestObj := $.context.Values -}} +{{- range $splitKey -}} + {{- if not $latestObj -}} + {{- printf "please review the entire path of '%s' exists in values" $.key | fail -}} + {{- end -}} + {{- $value = ( index $latestObj . ) -}} + {{- $latestObj = $value -}} +{{- end -}} +{{- printf "%v" (default "" $value) -}} +{{- end -}} + +{{/* +Returns first .Values key with a defined value or first of the list if all non-defined +Usage: +{{ include "common.utils.getKeyFromList" (dict "keys" (list "path.to.key1" "path.to.key2") "context" $) }} +*/}} +{{- define "common.utils.getKeyFromList" -}} +{{- $key := first .keys -}} +{{- $reverseKeys := reverse .keys }} +{{- range $reverseKeys }} + {{- $value := include "common.utils.getValueFromKey" (dict "key" . "context" $.context ) }} + {{- if $value -}} + {{- $key = . }} + {{- end -}} +{{- end -}} +{{- printf "%s" $key -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/_warnings.tpl b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/_warnings.tpl new file mode 100644 index 0000000..ae10fa4 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/_warnings.tpl @@ -0,0 +1,14 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Warning about using rolling tag. +Usage: +{{ include "common.warnings.rollingTag" .Values.path.to.the.imageRoot }} +*/}} +{{- define "common.warnings.rollingTag" -}} + +{{- if and (contains "bitnami/" .repository) (not (.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .repository }}:{{ .tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} + +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/validations/_cassandra.tpl b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/validations/_cassandra.tpl new file mode 100644 index 0000000..8679ddf --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/validations/_cassandra.tpl @@ -0,0 +1,72 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Cassandra required passwords are not empty. + +Usage: +{{ include "common.validations.values.cassandra.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where Cassandra values are stored, e.g: "cassandra-passwords-secret" + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.cassandra.passwords" -}} + {{- $existingSecret := include "common.cassandra.values.existingSecret" . -}} + {{- $enabled := include "common.cassandra.values.enabled" . -}} + {{- $dbUserPrefix := include "common.cassandra.values.key.dbUser" . -}} + {{- $valueKeyPassword := printf "%s.password" $dbUserPrefix -}} + + {{- if and (not $existingSecret) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "cassandra-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.cassandra.values.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.cassandra.dbUser.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.dbUser.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled cassandra. + +Usage: +{{ include "common.cassandra.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.cassandra.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.cassandra.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key dbUser + +Usage: +{{ include "common.cassandra.values.key.dbUser" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.key.dbUser" -}} + {{- if .subchart -}} + cassandra.dbUser + {{- else -}} + dbUser + {{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/validations/_mariadb.tpl b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/validations/_mariadb.tpl new file mode 100644 index 0000000..bb5ed72 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/validations/_mariadb.tpl @@ -0,0 +1,103 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MariaDB required passwords are not empty. + +Usage: +{{ include "common.validations.values.mariadb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MariaDB values are stored, e.g: "mysql-passwords-secret" + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mariadb.passwords" -}} + {{- $existingSecret := include "common.mariadb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mariadb.values.enabled" . -}} + {{- $architecture := include "common.mariadb.values.architecture" . -}} + {{- $authPrefix := include "common.mariadb.values.key.auth" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}} + + {{- if and (not $existingSecret) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mariadb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- if not (empty $valueUsername) -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mariadb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replication") -}} + {{- $requiredReplicationPassword := dict "valueKey" $valueKeyReplicationPassword "secret" .secret "field" "mariadb-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mariadb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mariadb. + +Usage: +{{ include "common.mariadb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mariadb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mariadb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mariadb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mariadb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.key.auth" -}} + {{- if .subchart -}} + mariadb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/validations/_mongodb.tpl b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/validations/_mongodb.tpl new file mode 100644 index 0000000..a786188 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/validations/_mongodb.tpl @@ -0,0 +1,108 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MongoDB required passwords are not empty. + +Usage: +{{ include "common.validations.values.mongodb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MongoDB values are stored, e.g: "mongodb-passwords-secret" + - subchart - Boolean - Optional. Whether MongoDB is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mongodb.passwords" -}} + {{- $existingSecret := include "common.mongodb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mongodb.values.enabled" . -}} + {{- $authPrefix := include "common.mongodb.values.key.auth" . -}} + {{- $architecture := include "common.mongodb.values.architecture" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyDatabase := printf "%s.database" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicaSetKey := printf "%s.replicaSetKey" $authPrefix -}} + {{- $valueKeyAuthEnabled := printf "%s.enabled" $authPrefix -}} + + {{- $authEnabled := include "common.utils.getValueFromKey" (dict "key" $valueKeyAuthEnabled "context" .context) -}} + + {{- if and (not $existingSecret) (eq $enabled "true") (eq $authEnabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mongodb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- $valueDatabase := include "common.utils.getValueFromKey" (dict "key" $valueKeyDatabase "context" .context) }} + {{- if and $valueUsername $valueDatabase -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mongodb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replicaset") -}} + {{- $requiredReplicaSetKey := dict "valueKey" $valueKeyReplicaSetKey "secret" .secret "field" "mongodb-replica-set-key" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicaSetKey -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mongodb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDb is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mongodb. + +Usage: +{{ include "common.mongodb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mongodb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mongodb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mongodb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDB is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.key.auth" -}} + {{- if .subchart -}} + mongodb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mongodb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/validations/_postgresql.tpl b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/validations/_postgresql.tpl new file mode 100644 index 0000000..992bcd3 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/validations/_postgresql.tpl @@ -0,0 +1,131 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate PostgreSQL required passwords are not empty. + +Usage: +{{ include "common.validations.values.postgresql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where postgresql values are stored, e.g: "postgresql-passwords-secret" + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.postgresql.passwords" -}} + {{- $existingSecret := include "common.postgresql.values.existingSecret" . -}} + {{- $enabled := include "common.postgresql.values.enabled" . -}} + {{- $valueKeyPostgresqlPassword := include "common.postgresql.values.key.postgressPassword" . -}} + {{- $valueKeyPostgresqlReplicationEnabled := include "common.postgresql.values.key.replicationPassword" . -}} + + {{- if and (not $existingSecret) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredPostgresqlPassword := dict "valueKey" $valueKeyPostgresqlPassword "secret" .secret "field" "postgresql-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlPassword -}} + + {{- $enabledReplication := include "common.postgresql.values.enabled.replication" . -}} + {{- if (eq $enabledReplication "true") -}} + {{- $requiredPostgresqlReplicationPassword := dict "valueKey" $valueKeyPostgresqlReplicationEnabled "secret" .secret "field" "postgresql-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to decide whether evaluate global values. + +Usage: +{{ include "common.postgresql.values.use.global" (dict "key" "key-of-global" "context" $) }} +Params: + - key - String - Required. Field to be evaluated within global, e.g: "existingSecret" +*/}} +{{- define "common.postgresql.values.use.global" -}} + {{- if .context.Values.global -}} + {{- if .context.Values.global.postgresql -}} + {{- index .context.Values.global.postgresql .key | quote -}} + {{- end -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.postgresql.values.existingSecret" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.existingSecret" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "existingSecret" "context" .context) -}} + + {{- if .subchart -}} + {{- default (.context.Values.postgresql.existingSecret | quote) $globalValue -}} + {{- else -}} + {{- default (.context.Values.existingSecret | quote) $globalValue -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled postgresql. + +Usage: +{{ include "common.postgresql.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key postgressPassword. + +Usage: +{{ include "common.postgresql.values.key.postgressPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.postgressPassword" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "postgresqlUsername" "context" .context) -}} + + {{- if not $globalValue -}} + {{- if .subchart -}} + postgresql.postgresqlPassword + {{- else -}} + postgresqlPassword + {{- end -}} + {{- else -}} + global.postgresql.postgresqlPassword + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled.replication. + +Usage: +{{ include "common.postgresql.values.enabled.replication" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.enabled.replication" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.replication.enabled -}} + {{- else -}} + {{- printf "%v" .context.Values.replication.enabled -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key replication.password. + +Usage: +{{ include "common.postgresql.values.key.replicationPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.replicationPassword" -}} + {{- if .subchart -}} + postgresql.replication.password + {{- else -}} + replication.password + {{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/validations/_redis.tpl b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/validations/_redis.tpl new file mode 100644 index 0000000..3e2a47c --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/validations/_redis.tpl @@ -0,0 +1,72 @@ + +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Redis(TM) required passwords are not empty. + +Usage: +{{ include "common.validations.values.redis.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where redis values are stored, e.g: "redis-passwords-secret" + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.redis.passwords" -}} + {{- $existingSecret := include "common.redis.values.existingSecret" . -}} + {{- $enabled := include "common.redis.values.enabled" . -}} + {{- $valueKeyPrefix := include "common.redis.values.keys.prefix" . -}} + {{- $valueKeyRedisPassword := printf "%s%s" $valueKeyPrefix "password" -}} + {{- $valueKeyRedisUsePassword := printf "%s%s" $valueKeyPrefix "usePassword" -}} + + {{- if and (not $existingSecret) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $usePassword := include "common.utils.getValueFromKey" (dict "key" $valueKeyRedisUsePassword "context" .context) -}} + {{- if eq $usePassword "true" -}} + {{- $requiredRedisPassword := dict "valueKey" $valueKeyRedisPassword "secret" .secret "field" "redis-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRedisPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Redis Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.redis.values.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Redis(TM) is used as subchart or not. Default: false +*/}} +{{- define "common.redis.values.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.redis.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled redis. + +Usage: +{{ include "common.redis.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.redis.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.redis.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right prefix path for the values + +Usage: +{{ include "common.redis.values.key.prefix" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.redis.values.keys.prefix" -}} + {{- if .subchart -}}redis.{{- else -}}{{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/validations/_validations.tpl b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/validations/_validations.tpl new file mode 100644 index 0000000..fb2fe60 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/validations/_validations.tpl @@ -0,0 +1,46 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate values must not be empty. + +Usage: +{{- $validateValueConf00 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-01") -}} +{{ include "common.validations.values.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" +*/}} +{{- define "common.validations.values.multiple.empty" -}} + {{- range .required -}} + {{- include "common.validations.values.single.empty" (dict "valueKey" .valueKey "secret" .secret "field" .field "context" $.context) -}} + {{- end -}} +{{- end -}} + +{{/* +Validate a value must not be empty. + +Usage: +{{ include "common.validations.value.empty" (dict "valueKey" "mariadb.password" "secret" "secretName" "field" "my-password" "subchart" "subchart "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" + - subchart - String - Optional - Name of the subchart that the validated password is part of. +*/}} +{{- define "common.validations.values.single.empty" -}} + {{- $value := include "common.utils.getValueFromKey" (dict "key" .valueKey "context" .context) }} + {{- $subchart := ternary "" (printf "%s." .subchart) (empty .subchart) }} + + {{- if not $value -}} + {{- $varname := "my-value" -}} + {{- $getCurrentValue := "" -}} + {{- if and .secret .field -}} + {{- $varname = include "common.utils.fieldToEnvVar" . -}} + {{- $getCurrentValue = printf " To get the current value:\n\n %s\n" (include "common.utils.secret.getvalue" .) -}} + {{- end -}} + {{- printf "\n '%s' must not be empty, please add '--set %s%s=$%s' to the command.%s" .valueKey $subchart .valueKey $varname $getCurrentValue -}} + {{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/values.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/values.yaml new file mode 100644 index 0000000..9ecdc93 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/values.yaml @@ -0,0 +1,3 @@ +## bitnami/common +## It is required by CI/CD tools and processes. +exampleValue: common-chart diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/ci/default-values.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/ci/default-values.yaml new file mode 100644 index 0000000..fc2ba60 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/ci/default-values.yaml @@ -0,0 +1 @@ +# Leave this file empty to ensure that CI runs builds against the default configuration in values.yaml. diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/ci/extra-flags-values.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/ci/extra-flags-values.yaml new file mode 100644 index 0000000..71132f7 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/ci/extra-flags-values.yaml @@ -0,0 +1,11 @@ +master: + extraFlags: + - --maxmemory-policy allkeys-lru + persistence: + enabled: false +slave: + extraFlags: + - --maxmemory-policy allkeys-lru + persistence: + enabled: false +usePassword: false diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/ci/production-sentinel-values.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/ci/production-sentinel-values.yaml new file mode 100644 index 0000000..7efeda3 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/ci/production-sentinel-values.yaml @@ -0,0 +1,682 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +global: + # imageRegistry: myRegistryName + # imagePullSecrets: + # - myRegistryKeySecretName + # storageClass: myStorageClass + redis: {} + +## Bitnami Redis(TM) image version +## ref: https://hub.docker.com/r/bitnami/redis/tags/ +## +image: + registry: 10.10.31.243:5000 # docker.io + repository: redis # bitnami/redis + ## Bitnami Redis(TM) image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis#supported-tags-and-respective-dockerfile-links + ## + tag: 5.0.9-debian-10-r0 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + +## String to partially override redis.fullname template (will maintain the release name) +## +# nameOverride: + +## String to fully override redis.fullname template +## +# fullnameOverride: + +## Cluster settings +cluster: + enabled: true + slaveCount: 3 + +## Use redis sentinel in the redis pod. This will disable the master and slave services and +## create one redis service with ports to the sentinel and the redis instances +sentinel: + enabled: true + ## Require password authentication on the sentinel itself + ## ref: https://redis.io/topics/sentinel + usePassword: true + ## Bitnami Redis(TM) Sentintel image version + ## ref: https://hub.docker.com/r/bitnami/redis-sentinel/tags/ + ## + image: + registry: 10.10.31.243:5000 # docker.io + repository: redis-sentinel # bitnami/redis-sentinel + ## Bitnami Redis(TM) image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis-sentinel#supported-tags-and-respective-dockerfile-links + ## + tag: 5.0.9-debian-10-r0 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + masterSet: mymaster + initialCheckTimeout: 5 + quorum: 2 + downAfterMilliseconds: 60000 + failoverTimeout: 18000 + parallelSyncs: 1 + port: 26379 + ## Additional Redis(TM) configuration for the sentinel nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Enable or disable static sentinel IDs for each replicas + ## If disabled each sentinel will generate a random id at startup + ## If enabled, each replicas will have a constant ID on each start-up + ## + staticID: false + ## Configure extra options for Redis(TM) Sentinel liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + customLivenessProbe: {} + customReadinessProbe: {} + ## Redis(TM) Sentinel resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Redis(TM) Sentinel Service properties + service: + ## Redis(TM) Sentinel Service type + type: ClusterIP + sentinelPort: 26379 + redisPort: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # sentinelNodePort: + # redisNodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + +## Specifies the Kubernetes Cluster's Domain Name. +## +clusterDomain: cluster.local + +networkPolicy: + ## Specifies whether a NetworkPolicy should be created + ## + enabled: true + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port Redis(TM) is listening + ## on. When true, Redis(TM) will accept connections from any source + ## (with the correct destination port). + ## + # allowExternal: true + + ## Allow connections from other namespacess. Just set label for namespace and set label for pods (optional). + ## + ingressNSMatchLabels: {} + ingressNSPodMatchLabels: {} + +serviceAccount: + ## Specifies whether a ServiceAccount should be created + ## + create: false + ## The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the fullname template + name: + +rbac: + ## Specifies whether RBAC resources should be created + ## + create: false + + role: + ## Rules to create. It follows the role specification + # rules: + # - apiGroups: + # - extensions + # resources: + # - podsecuritypolicies + # verbs: + # - use + # resourceNames: + # - gce.unprivileged + rules: [] + +## Redis(TM) pod Security Context +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + ## sysctl settings for master and slave pods + ## + ## Uncomment the setting below to increase the net.core.somaxconn value + ## + # sysctls: + # - name: net.core.somaxconn + # value: "10000" + +## Use password authentication +usePassword: true +## Redis(TM) password (both master and slave) +## Defaults to a random 10-character alphanumeric string if not set and usePassword is true +## ref: https://github.com/bitnami/bitnami-docker-redis#setting-the-server-password-on-first-run +## +password: +## Use existing secret (ignores previous password) +# existingSecret: +## Password key to be retrieved from Redis(TM) secret +## +# existingSecretPasswordKey: + +## Mount secrets as files instead of environment variables +usePasswordFile: false + +## Persist data to a persistent volume (Redis Master) +persistence: + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + existingClaim: + +# Redis(TM) port +redisPort: 6379 + +## +## Redis(TM) Master parameters +## +master: + ## Redis(TM) command arguments + ## + ## Can be used to specify command line arguments, for example: + ## + command: "/run.sh" + ## Additional Redis(TM) configuration for the master nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Redis(TM) additional command line flags + ## + ## Can be used to specify command line flags, for example: + ## + ## extraFlags: + ## - "--maxmemory-policy volatile-ttl" + ## - "--repl-backlog-size 1024mb" + extraFlags: [] + ## Comma-separated list of Redis(TM) commands to disable + ## + ## Can be used to disable Redis(TM) commands for security reasons. + ## Commands will be completely disabled by renaming each to an empty string. + ## ref: https://redis.io/topics/security#disabling-of-specific-commands + ## + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis(TM) Master additional pod labels and annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + podLabels: {} + podAnnotations: {} + + ## Redis(TM) Master resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Configure extra options for Redis(TM) Master liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + + ## Configure custom probes for images other images like + ## rhscl/redis-32-rhel7 rhscl/redis-5-rhel7 + ## Only used if readinessProbe.enabled: false / livenessProbe.enabled: false + ## + # customLivenessProbe: + # tcpSocket: + # port: 6379 + # initialDelaySeconds: 10 + # periodSeconds: 5 + # customReadinessProbe: + # initialDelaySeconds: 30 + # periodSeconds: 10 + # timeoutSeconds: 5 + # exec: + # command: + # - "container-entrypoint" + # - "bash" + # - "-c" + # - "redis-cli set liveness-probe \"`date`\" | grep OK" + customLivenessProbe: {} + customReadinessProbe: {} + + ## Redis(TM) Master Node selectors and tolerations for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + ## Redis(TM) Master pod/node affinity/anti-affinity + ## + affinity: {} + + ## Redis(TM) Master Service properties + service: + ## Redis(TM) Master Service type + type: ClusterIP + port: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + # loadBalancerSourceRanges: ["10.0.0.0/8"] + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis(TM) images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + ## Persistent Volume selectors + ## https://kubernetes.io/docs/concepts/storage/persistent-volumes/#selector + matchLabels: {} + matchExpressions: {} + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + ## Redis(TM) Master pod priorityClassName + ## + priorityClassName: {} + +## +## Redis(TM) Slave properties +## Note: service.type is a mandatory parameter +## The rest of the parameters are either optional or, if undefined, will inherit those declared in Redis(TM) Master +## +slave: + ## Slave Service properties + service: + ## Redis(TM) Slave Service type + type: ClusterIP + ## Redis(TM) port + port: 6379 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + # loadBalancerSourceRanges: ["10.0.0.0/8"] + + ## Redis(TM) slave port + port: 6379 + ## Can be used to specify command line arguments, for example: + ## + command: "/run.sh" + ## Additional Redis(TM) configuration for the slave nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Redis(TM) extra flags + extraFlags: [] + ## List of Redis(TM) commands to disable + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis(TM) Slave pod/node affinity/anti-affinity + ## + affinity: {} + + ## Configure extra options for Redis(TM) Slave liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 10 + successThreshold: 1 + failureThreshold: 5 + + ## Configure custom probes for images other images like + ## rhscl/redis-32-rhel7 rhscl/redis-5-rhel7 + ## Only used if readinessProbe.enabled: false / livenessProbe.enabled: false + ## + # customLivenessProbe: + # tcpSocket: + # port: 6379 + # initialDelaySeconds: 10 + # periodSeconds: 5 + # customReadinessProbe: + # initialDelaySeconds: 30 + # periodSeconds: 10 + # timeoutSeconds: 5 + # exec: + # command: + # - "container-entrypoint" + # - "bash" + # - "-c" + # - "redis-cli set liveness-probe \"`date`\" | grep OK" + customLivenessProbe: {} + customReadinessProbe: {} + + ## Redis(TM) slave Resource + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + + ## Redis(TM) slave selectors and tolerations for pod assignment + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Redis(TM) slave pod Annotation and Labels + podLabels: {} + podAnnotations: {} + + ## Redis(TM) slave pod priorityClassName + # priorityClassName: {} + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis(TM) images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + ## Persistent Volume selectors + ## https://kubernetes.io/docs/concepts/storage/persistent-volumes/#selector + matchLabels: {} + matchExpressions: {} + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + +## Prometheus Exporter / Metrics +## +metrics: + enabled: true + + image: + registry: 10.10.31.243:5000 # docker.io + repository: redis-exporter # bitnami/redis-exporter + tag: 1.5.3-debian-10-r14 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + # resources: {} + + ## Extra arguments for Metrics exporter, for example: + ## extraArgs: + ## check-keys: myKey,myOtherKey + # extraArgs: {} + + ## Metrics exporter pod Annotation and Labels + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9121" + # podLabels: {} + + # Enable this if you're using https://github.com/coreos/prometheus-operator + serviceMonitor: + enabled: false + ## Specify a namespace if needed + # namespace: monitoring + # fallback to the prometheus default unless specified + # interval: 10s + ## Defaults to what's used if you follow CoreOS [Prometheus Install Instructions](https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#tldr) + ## [Prometheus Selector Label](https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#prometheus-operator-1) + ## [Kube Prometheus Selector Label](https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#exporters) + selector: + prometheus: kube-prometheus + + ## Custom PrometheusRule to be defined + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + prometheusRule: + enabled: false + additionalLabels: {} + namespace: "" + ## Redis(TM) prometheus rules + ## These are just examples rules, please adapt them to your needs. + ## Make sure to constraint the rules to the current postgresql service. + # rules: + # - alert: RedisDown + # expr: redis_up{service="{{ template "redis.fullname" . }}-metrics"} == 0 + # for: 2m + # labels: + # severity: error + # annotations: + # summary: Redis(TM) instance {{ "{{ $labels.instance }}" }} down + # description: Redis(TM) instance {{ "{{ $labels.instance }}" }} is down + # - alert: RedisMemoryHigh + # expr: > + # redis_memory_used_bytes{service="{{ template "redis.fullname" . }}-metrics"} * 100 + # / + # redis_memory_max_bytes{service="{{ template "redis.fullname" . }}-metrics"} + # > 90 + # for: 2m + # labels: + # severity: error + # annotations: + # summary: Redis(TM) instance {{ "{{ $labels.instance }}" }} is using too much memory + # description: | + # Redis(TM) instance {{ "{{ $labels.instance }}" }} is using {{ "{{ $value }}" }}% of its available memory. + # - alert: RedisKeyEviction + # expr: | + # increase(redis_evicted_keys_total{service="{{ template "redis.fullname" . }}-metrics"}[5m]) > 0 + # for: 1s + # labels: + # severity: error + # annotations: + # summary: Redis(TM) instance {{ "{{ $labels.instance }}" }} has evicted keys + # description: | + # Redis(TM) instance {{ "{{ $labels.instance }}" }} has evicted {{ "{{ $value }}" }} keys in the last 5 minutes. + rules: [] + + ## Metrics exporter pod priorityClassName + # priorityClassName: {} + service: + type: ClusterIP + ## Use serviceLoadBalancerIP to request a specific static IP, + ## otherwise leave blank + # loadBalancerIP: + annotations: {} + labels: {} + +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: false + image: + registry: 10.10.31.243:5000 # docker.io + repository: minideb # bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m + +## Redis(TM) config file +## ref: https://redis.io/topics/config +## +configmap: |- + # Enable AOF https://redis.io/topics/persistence#append-only-file + appendonly yes + # Disable RDB persistence, AOF persistence already enabled. + save "" + +## Sysctl InitContainer +## used to perform sysctl operation to modify Kernel settings (needed sometimes to avoid warnings) +sysctlImage: + enabled: false + command: [] + registry: 10.10.31.243:5000 # docker.io + repository: minideb # bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + mountHostSys: false + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m + +## PodSecurityPolicy configuration +## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +## +podSecurityPolicy: + ## Specifies whether a PodSecurityPolicy should be created + ## + create: false diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/NOTES.txt b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/NOTES.txt new file mode 100644 index 0000000..a254f58 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/NOTES.txt @@ -0,0 +1,136 @@ +** Please be patient while the chart is being deployed ** + +{{- if contains .Values.master.service.type "LoadBalancer" }} +{{- if not .Values.usePassword }} +{{ if and (not .Values.networkPolicy.enabled) (.Values.networkPolicy.allowExternal) }} + +------------------------------------------------------------------------------- + WARNING + + By specifying "master.service.type=LoadBalancer" and "usePassword=false" you have + most likely exposed the Redis(TM) service externally without any authentication + mechanism. + + For security reasons, we strongly suggest that you switch to "ClusterIP" or + "NodePort". As alternative, you can also switch to "usePassword=true" + providing a valid password on "password" parameter. + +------------------------------------------------------------------------------- +{{- end }} +{{- end }} +{{- end }} + +{{- if and .Values.sentinel.enabled (not .Values.cluster.enabled)}} + +------------------------------------------------------------------------------- + WARNING + + Using redis sentinel without a cluster is not supported. A single pod with + standalone redis has been deployed. + + To deploy redis sentinel, please use the values "cluster.enabled=true" and + "sentinel.enabled=true". + +------------------------------------------------------------------------------- +{{- end }} + +{{- if .Values.cluster.enabled }} +{{- if .Values.sentinel.enabled }} +Redis can be accessed via port {{ .Values.sentinel.service.redisPort }} on the following DNS name from within your cluster: + +{{ template "redis.fullname" . }}.imxc.svc.{{ .Values.clusterDomain }} for read only operations + +For read/write operations, first access the Redis(TM) Sentinel cluster, which is available in port {{ .Values.sentinel.service.sentinelPort }} using the same domain name above. + +{{- else }} +Redis can be accessed via port {{ .Values.redisPort }} on the following DNS names from within your cluster: + +{{ template "redis.fullname" . }}-master.imxc.svc.{{ .Values.clusterDomain }} for read/write operations +{{ template "redis.fullname" . }}-slave.imxc.svc.{{ .Values.clusterDomain }} for read-only operations +{{- end }} + +{{- else }} +Redis can be accessed via port {{ .Values.redisPort }} on the following DNS name from within your cluster: + +{{ template "redis.fullname" . }}-master.imxc.svc.{{ .Values.clusterDomain }} + +{{- end }} + +{{ if .Values.usePassword }} +To get your password run: + + export REDIS_PASSWORD=$(kubectl get secret --namespace imxc {{ template "redis.secretName" . }} -o jsonpath="{.data.redis-password}" | base64 --decode) +{{- end }} + +To connect to your Redis(TM) server: + +1. Run a Redis(TM) pod that you can use as a client: + +{{- if .Values.tls.enabled }} + kubectl run --namespace imxc {{ template "redis.fullname" . }}-client --restart='Never' --env REDIS_PASSWORD=$REDIS_PASSWORD --image {{ template "redis.image" . }} --command -- sleep infinity + + Copy your TLS certificates to the pod: + + kubectl cp --namespace imxc /path/to/client.cert {{ template "redis.fullname" . }}-client:/tmp/client.cert + kubectl cp --namespace imxc /path/to/client.key {{ template "redis.fullname" . }}-client:/tmp/client.key + kubectl cp --namespace imxc /path/to/CA.cert {{ template "redis.fullname" . }}-client:/tmp/CA.cert + + Use the following command to attach to the pod: + + kubectl exec --tty -i {{ template "redis.fullname" . }}-client \ + {{- if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }}--labels="{{ template "redis.fullname" . }}-client=true" \{{- end }} + --namespace imxc -- bash +{{- else }} + kubectl run --namespace imxc {{ template "redis.fullname" . }}-client --rm --tty -i --restart='Never' \ + {{ if .Values.usePassword }} --env REDIS_PASSWORD=$REDIS_PASSWORD \{{ end }} + {{- if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }}--labels="{{ template "redis.fullname" . }}-client=true" \{{- end }} + --image {{ template "redis.image" . }} -- bash +{{- end }} + +2. Connect using the Redis(TM) CLI: + +{{- if .Values.cluster.enabled }} + {{- if .Values.sentinel.enabled }} + redis-cli -h {{ template "redis.fullname" . }} -p {{ .Values.sentinel.service.redisPort }}{{ if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }}{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} # Read only operations + redis-cli -h {{ template "redis.fullname" . }} -p {{ .Values.sentinel.service.sentinelPort }}{{ if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }}{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} # Sentinel access + {{- else }} + redis-cli -h {{ template "redis.fullname" . }}-master{{ if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }}{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} + redis-cli -h {{ template "redis.fullname" . }}-slave{{ if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }}{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} + {{- end }} +{{- else }} + redis-cli -h {{ template "redis.fullname" . }}-master{{ if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }}{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} +{{- end }} + +{{ if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }} +Note: Since NetworkPolicy is enabled, only pods with label +{{ template "redis.fullname" . }}-client=true" +will be able to connect to redis. +{{- else -}} + +To connect to your database from outside the cluster execute the following commands: + +{{- if contains "NodePort" .Values.master.service.type }} + + export NODE_IP=$(kubectl get nodes --namespace imxc -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT=$(kubectl get --namespace imxc -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "redis.fullname" . }}-master) + redis-cli -h $NODE_IP -p $NODE_PORT {{- if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }}{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} + +{{- else if contains "LoadBalancer" .Values.master.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace imxc -w {{ template "redis.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace imxc {{ template "redis.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + redis-cli -h $SERVICE_IP -p {{ .Values.master.service.port }} {{- if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }}{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} + +{{- else if contains "ClusterIP" .Values.master.service.type }} + + kubectl port-forward --namespace imxc svc/{{ template "redis.fullname" . }}-master {{ .Values.redisPort }}:{{ .Values.redisPort }} & + redis-cli -h 127.0.0.1 -p {{ .Values.redisPort }} {{- if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }}{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} + +{{- end }} +{{- end }} + +{{ include "redis.checkRollingTags" . }} + +{{- include "redis.validateValues" . }} \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/_helpers.tpl b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/_helpers.tpl new file mode 100644 index 0000000..193105d --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/_helpers.tpl @@ -0,0 +1,421 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "redis.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Expand the chart plus release name (used by the chart label) +*/}} +{{- define "redis.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "redis.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "networkPolicy.apiVersion" -}} +{{- if semverCompare ">=1.4-0, <1.7-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiGroup for PodSecurityPolicy. +*/}} +{{- define "podSecurityPolicy.apiGroup" -}} +{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "policy" -}} +{{- else -}} +{{- print "extensions" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for PodSecurityPolicy. +*/}} +{{- define "podSecurityPolicy.apiVersion" -}} +{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "policy/v1beta1" -}} +{{- else -}} +{{- print "extensions/v1beta1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Redis(TM) image name +*/}} +{{- define "redis.image" -}} +{{- $registryName := .Values.image.registry -}} +{{- $repositoryName := .Values.image.repository -}} +{{- $tag := .Values.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Redis(TM) Sentinel image name +*/}} +{{- define "sentinel.image" -}} +{{- $registryName := .Values.sentinel.image.registry -}} +{{- $repositoryName := .Values.sentinel.image.repository -}} +{{- $tag := .Values.sentinel.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper image name (for the metrics image) +*/}} +{{- define "redis.metrics.image" -}} +{{- $registryName := .Values.metrics.image.registry -}} +{{- $repositoryName := .Values.metrics.image.repository -}} +{{- $tag := .Values.metrics.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper image name (for the init container volume-permissions image) +*/}} +{{- define "redis.volumePermissions.image" -}} +{{- $registryName := .Values.volumePermissions.image.registry -}} +{{- $repositoryName := .Values.volumePermissions.image.repository -}} +{{- $tag := .Values.volumePermissions.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the path to the cert file. +*/}} +{{- define "redis.tlsCert" -}} +{{- required "Certificate filename is required when TLS in enabled" .Values.tls.certFilename | printf "/opt/bitnami/redis/certs/%s" -}} +{{- end -}} + +{{/* +Return the path to the cert key file. +*/}} +{{- define "redis.tlsCertKey" -}} +{{- required "Certificate Key filename is required when TLS in enabled" .Values.tls.certKeyFilename | printf "/opt/bitnami/redis/certs/%s" -}} +{{- end -}} + +{{/* +Return the path to the CA cert file. +*/}} +{{- define "redis.tlsCACert" -}} +{{- required "Certificate CA filename is required when TLS in enabled" .Values.tls.certCAFilename | printf "/opt/bitnami/redis/certs/%s" -}} +{{- end -}} + +{{/* +Return the path to the DH params file. +*/}} +{{- define "redis.tlsDHParams" -}} +{{- if .Values.tls.dhParamsFilename -}} +{{- printf "/opt/bitnami/redis/certs/%s" .Values.tls.dhParamsFilename -}} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the service account to use +*/}} +{{- define "redis.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "redis.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Get the password secret. +*/}} +{{- define "redis.secretName" -}} +{{- if .Values.existingSecret -}} +{{- printf "%s" .Values.existingSecret -}} +{{- else -}} +{{- printf "%s" (include "redis.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Get the password key to be retrieved from Redis(TM) secret. +*/}} +{{- define "redis.secretPasswordKey" -}} +{{- if and .Values.existingSecret .Values.existingSecretPasswordKey -}} +{{- printf "%s" .Values.existingSecretPasswordKey -}} +{{- else -}} +{{- printf "redis-password" -}} +{{- end -}} +{{- end -}} + +{{/* +Return Redis(TM) password +*/}} +{{- define "redis.password" -}} +{{- if not (empty .Values.global.redis.password) }} + {{- .Values.global.redis.password -}} +{{- else if not (empty .Values.password) -}} + {{- .Values.password -}} +{{- else -}} + {{- randAlphaNum 10 -}} +{{- end -}} +{{- end -}} + +{{/* +Return sysctl image +*/}} +{{- define "redis.sysctl.image" -}} +{{- $registryName := default "docker.io" .Values.sysctlImage.registry -}} +{{- $repositoryName := .Values.sysctlImage.repository -}} +{{- $tag := default "buster" .Values.sysctlImage.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "redis.imagePullSecrets" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +Also, we can not use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} +{{- if .Values.global.imagePullSecrets }} +imagePullSecrets: +{{- range .Values.global.imagePullSecrets }} + - name: {{ . }} +{{- end }} +{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.sysctlImage.pullSecrets .Values.volumePermissions.image.pullSecrets }} +imagePullSecrets: +{{- range .Values.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.metrics.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.sysctlImage.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.volumePermissions.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- end -}} +{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.sysctlImage.pullSecrets .Values.volumePermissions.image.pullSecrets }} +imagePullSecrets: +{{- range .Values.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.metrics.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.sysctlImage.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.volumePermissions.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- end -}} +{{- end -}} + +{{/* Check if there are rolling tags in the images */}} +{{- define "redis.checkRollingTags" -}} +{{- if and (contains "bitnami/" .Values.image.repository) (not (.Values.image.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .Values.image.repository }}:{{ .Values.image.tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} +{{- if and (contains "bitnami/" .Values.sentinel.image.repository) (not (.Values.sentinel.image.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .Values.sentinel.image.repository }}:{{ .Values.sentinel.image.tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} +{{- end -}} + +{{/* +Return the proper Storage Class for master +*/}} +{{- define "redis.master.storageClass" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +*/}} +{{- if .Values.global -}} + {{- if .Values.global.storageClass -}} + {{- if (eq "-" .Values.global.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.global.storageClass -}} + {{- end -}} + {{- else -}} + {{- if .Values.master.persistence.storageClass -}} + {{- if (eq "-" .Values.master.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.master.persistence.storageClass -}} + {{- end -}} + {{- end -}} + {{- end -}} +{{- else -}} + {{- if .Values.master.persistence.storageClass -}} + {{- if (eq "-" .Values.master.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.master.persistence.storageClass -}} + {{- end -}} + {{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Storage Class for slave +*/}} +{{- define "redis.slave.storageClass" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +*/}} +{{- if .Values.global -}} + {{- if .Values.global.storageClass -}} + {{- if (eq "-" .Values.global.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.global.storageClass -}} + {{- end -}} + {{- else -}} + {{- if .Values.slave.persistence.storageClass -}} + {{- if (eq "-" .Values.slave.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.slave.persistence.storageClass -}} + {{- end -}} + {{- end -}} + {{- end -}} +{{- else -}} + {{- if .Values.slave.persistence.storageClass -}} + {{- if (eq "-" .Values.slave.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.slave.persistence.storageClass -}} + {{- end -}} + {{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Compile all warnings into a single message, and call fail. +*/}} +{{- define "redis.validateValues" -}} +{{- $messages := list -}} +{{- $messages := append $messages (include "redis.validateValues.spreadConstraints" .) -}} +{{- $messages := without $messages "" -}} +{{- $message := join "\n" $messages -}} + +{{- if $message -}} +{{- printf "\nVALUES VALIDATION:\n%s" $message | fail -}} +{{- end -}} +{{- end -}} + +{{/* Validate values of Redis(TM) - spreadConstrainsts K8s version */}} +{{- define "redis.validateValues.spreadConstraints" -}} +{{- if and (semverCompare "<1.16-0" .Capabilities.KubeVersion.GitVersion) .Values.slave.spreadConstraints -}} +redis: spreadConstraints + Pod Topology Spread Constraints are only available on K8s >= 1.16 + Find more information at https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ +{{- end -}} +{{- end -}} + +{{/* +Renders a value that contains template. +Usage: +{{ include "redis.tplValue" (dict "value" .Values.path.to.the.Value "context" $) }} +*/}} +{{- define "redis.tplValue" -}} + {{- if typeIs "string" .value }} + {{- tpl .value .context }} + {{- else }} + {{- tpl (.value | toYaml) .context }} + {{- end }} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/configmap-scripts.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/configmap-scripts.yaml new file mode 100644 index 0000000..02411c8 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/configmap-scripts.yaml @@ -0,0 +1,393 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "redis.fullname" . }}-scripts + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: +{{- if and .Values.cluster.enabled .Values.sentinel.enabled }} + start-node.sh: | + #!/bin/bash + is_boolean_yes() { + local -r bool="${1:-}" + # comparison is performed without regard to the case of alphabetic characters + shopt -s nocasematch + if [[ "$bool" = 1 || "$bool" =~ ^(yes|true)$ ]]; then + true + else + false + fi + } + + HEADLESS_SERVICE="{{ template "redis.fullname" . }}-headless.imxc.svc.{{ .Values.clusterDomain }}" + REDIS_SERVICE="{{ template "redis.fullname" . }}.imxc.svc.{{ .Values.clusterDomain }}" + + export REDIS_REPLICATION_MODE="slave" + if [[ -z "$(getent ahosts "$HEADLESS_SERVICE" | grep -v "^$(hostname -i) ")" ]]; then + export REDIS_REPLICATION_MODE="master" + fi + + {{- if and .Values.securityContext.runAsUser (eq (.Values.securityContext.runAsUser | int) 0) }} + useradd redis + chown -R redis {{ .Values.slave.persistence.path }} + {{- end }} + + if [[ -n $REDIS_PASSWORD_FILE ]]; then + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux + fi + + if [[ -n $REDIS_MASTER_PASSWORD_FILE ]]; then + password_aux=`cat ${REDIS_MASTER_PASSWORD_FILE}` + export REDIS_MASTER_PASSWORD=$password_aux + fi + + if [[ "$REDIS_REPLICATION_MODE" == "master" ]]; then + echo "I am master" + if [[ ! -f /opt/bitnami/redis/etc/master.conf ]];then + cp /opt/bitnami/redis/mounted-etc/master.conf /opt/bitnami/redis/etc/master.conf + fi + else + if [[ ! -f /opt/bitnami/redis/etc/replica.conf ]];then + cp /opt/bitnami/redis/mounted-etc/replica.conf /opt/bitnami/redis/etc/replica.conf + fi + + if is_boolean_yes "$REDIS_TLS_ENABLED"; then + sentinel_info_command="redis-cli {{- if .Values.usePassword }} -a $REDIS_PASSWORD {{- end }} -h $REDIS_SERVICE -p {{ .Values.sentinel.port }} --tls --cert ${REDIS_TLS_CERT_FILE} --key ${REDIS_TLS_KEY_FILE} --cacert ${REDIS_TLS_CA_FILE} sentinel get-master-addr-by-name {{ .Values.sentinel.masterSet }}" + else + sentinel_info_command="redis-cli {{- if .Values.usePassword }} -a $REDIS_PASSWORD {{- end }} -h $REDIS_SERVICE -p {{ .Values.sentinel.port }} sentinel get-master-addr-by-name {{ .Values.sentinel.masterSet }}" + fi + REDIS_SENTINEL_INFO=($($sentinel_info_command)) + REDIS_MASTER_HOST=${REDIS_SENTINEL_INFO[0]} + REDIS_MASTER_PORT_NUMBER=${REDIS_SENTINEL_INFO[1]} + + + # Immediately attempt to connect to the reported master. If it doesn't exist the connection attempt will either hang + # or fail with "port unreachable" and give no data. The liveness check will then timeout waiting for the redis + # container to be ready and restart the it. By then the new master will likely have been elected + if is_boolean_yes "$REDIS_TLS_ENABLED"; then + sentinel_info_command="redis-cli {{- if .Values.usePassword }} -a $REDIS_PASSWORD {{- end }} -h $REDIS_MASTER_HOST -p {{ .Values.sentinel.port }} --tls --cert ${REDIS_TLS_CERT_FILE} --key ${REDIS_TLS_KEY_FILE} --cacert ${REDIS_TLS_CA_FILE} sentinel get-master-addr-by-name {{ .Values.sentinel.masterSet }}" + else + sentinel_info_command="redis-cli {{- if .Values.usePassword }} -a $REDIS_PASSWORD {{- end }} -h $REDIS_MASTER_HOST -p {{ .Values.sentinel.port }} sentinel get-master-addr-by-name {{ .Values.sentinel.masterSet }}" + fi + + if [[ ! ($($sentinel_info_command)) ]]; then + # master doesn't actually exist, this probably means the remaining pods haven't elected a new one yet + # and are reporting the old one still. Once this happens the container will get stuck and never see the new + # master. We stop here to allow the container to not pass the liveness check and be restarted. + exit 1 + fi + fi + + if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then + cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf + fi + {{- if .Values.tls.enabled }} + ARGS=("--port" "0") + ARGS+=("--tls-port" "${REDIS_TLS_PORT}") + ARGS+=("--tls-cert-file" "${REDIS_TLS_CERT_FILE}") + ARGS+=("--tls-key-file" "${REDIS_TLS_KEY_FILE}") + ARGS+=("--tls-ca-cert-file" "${REDIS_TLS_CA_FILE}") + ARGS+=("--tls-auth-clients" "${REDIS_TLS_AUTH_CLIENTS}") + ARGS+=("--tls-replication" "yes") + {{- if .Values.tls.dhParamsFilename }} + ARGS+=("--tls-dh-params-file" "${REDIS_TLS_DH_PARAMS_FILE}") + {{- end }} + {{- else }} + ARGS=("--port" "${REDIS_PORT}") + {{- end }} + + if [[ "$REDIS_REPLICATION_MODE" == "slave" ]]; then + ARGS+=("--slaveof" "${REDIS_MASTER_HOST}" "${REDIS_MASTER_PORT_NUMBER}") + fi + + {{- if .Values.usePassword }} + ARGS+=("--requirepass" "${REDIS_PASSWORD}") + ARGS+=("--masterauth" "${REDIS_MASTER_PASSWORD}") + {{- else }} + ARGS+=("--protected-mode" "no") + {{- end }} + + if [[ "$REDIS_REPLICATION_MODE" == "master" ]]; then + ARGS+=("--include" "/opt/bitnami/redis/etc/master.conf") + else + ARGS+=("--include" "/opt/bitnami/redis/etc/replica.conf") + fi + + ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf") + {{- if .Values.slave.extraFlags }} + {{- range .Values.slave.extraFlags }} + ARGS+=({{ . | quote }}) + {{- end }} + {{- end }} + + {{- if .Values.slave.preExecCmds }} + {{ .Values.slave.preExecCmds | nindent 4}} + {{- end }} + + {{- if .Values.slave.command }} + exec {{ .Values.slave.command }} "${ARGS[@]}" + {{- else }} + exec redis-server "${ARGS[@]}" + {{- end }} + + start-sentinel.sh: | + #!/bin/bash + replace_in_file() { + local filename="${1:?filename is required}" + local match_regex="${2:?match regex is required}" + local substitute_regex="${3:?substitute regex is required}" + local posix_regex=${4:-true} + + local result + + # We should avoid using 'sed in-place' substitutions + # 1) They are not compatible with files mounted from ConfigMap(s) + # 2) We found incompatibility issues with Debian10 and "in-place" substitutions + del=$'\001' # Use a non-printable character as a 'sed' delimiter to avoid issues + if [[ $posix_regex = true ]]; then + result="$(sed -E "s${del}${match_regex}${del}${substitute_regex}${del}g" "$filename")" + else + result="$(sed "s${del}${match_regex}${del}${substitute_regex}${del}g" "$filename")" + fi + echo "$result" > "$filename" + } + sentinel_conf_set() { + local -r key="${1:?missing key}" + local value="${2:-}" + + # Sanitize inputs + value="${value//\\/\\\\}" + value="${value//&/\\&}" + value="${value//\?/\\?}" + [[ "$value" = "" ]] && value="\"$value\"" + + replace_in_file "/opt/bitnami/redis-sentinel/etc/sentinel.conf" "^#*\s*${key} .*" "${key} ${value}" false + } + sentinel_conf_add() { + echo $'\n'"$@" >> "/opt/bitnami/redis-sentinel/etc/sentinel.conf" + } + is_boolean_yes() { + local -r bool="${1:-}" + # comparison is performed without regard to the case of alphabetic characters + shopt -s nocasematch + if [[ "$bool" = 1 || "$bool" =~ ^(yes|true)$ ]]; then + true + else + false + fi + } + host_id() { + echo "$1" | openssl sha1 | awk '{print $2}' + } + + HEADLESS_SERVICE="{{ template "redis.fullname" . }}-headless.imxc.svc.{{ .Values.clusterDomain }}" + REDIS_SERVICE="{{ template "redis.fullname" . }}.imxc.svc.{{ .Values.clusterDomain }}" + + if [[ -n $REDIS_PASSWORD_FILE ]]; then + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux + fi + + if [[ ! -f /opt/bitnami/redis-sentinel/etc/sentinel.conf ]]; then + cp /opt/bitnami/redis-sentinel/mounted-etc/sentinel.conf /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- if .Values.usePassword }} + printf "\nsentinel auth-pass %s %s" "{{ .Values.sentinel.masterSet }}" "$REDIS_PASSWORD" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- if .Values.sentinel.usePassword }} + printf "\nrequirepass %s" "$REDIS_PASSWORD" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- end }} + {{- end }} + {{- if .Values.sentinel.staticID }} + printf "\nsentinel myid %s" "$(host_id "$HOSTNAME")" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- end }} + fi + + export REDIS_REPLICATION_MODE="slave" + if [[ -z "$(getent ahosts "$HEADLESS_SERVICE" | grep -v "^$(hostname -i) ")" ]]; then + export REDIS_REPLICATION_MODE="master" + fi + + if [[ "$REDIS_REPLICATION_MODE" == "master" ]]; then + REDIS_MASTER_HOST="$(hostname -i)" + REDIS_MASTER_PORT_NUMBER="{{ .Values.redisPort }}" + else + if is_boolean_yes "$REDIS_SENTINEL_TLS_ENABLED"; then + sentinel_info_command="redis-cli {{- if .Values.usePassword }} -a $REDIS_PASSWORD {{- end }} -h $REDIS_SERVICE -p {{ .Values.sentinel.port }} --tls --cert ${REDIS_SENTINEL_TLS_CERT_FILE} --key ${REDIS_SENTINEL_TLS_KEY_FILE} --cacert ${REDIS_SENTINEL_TLS_CA_FILE} sentinel get-master-addr-by-name {{ .Values.sentinel.masterSet }}" + else + sentinel_info_command="redis-cli {{- if .Values.usePassword }} -a $REDIS_PASSWORD {{- end }} -h $REDIS_SERVICE -p {{ .Values.sentinel.port }} sentinel get-master-addr-by-name {{ .Values.sentinel.masterSet }}" + fi + REDIS_SENTINEL_INFO=($($sentinel_info_command)) + REDIS_MASTER_HOST=${REDIS_SENTINEL_INFO[0]} + REDIS_MASTER_PORT_NUMBER=${REDIS_SENTINEL_INFO[1]} + + # Immediately attempt to connect to the reported master. If it doesn't exist the connection attempt will either hang + # or fail with "port unreachable" and give no data. The liveness check will then timeout waiting for the sentinel + # container to be ready and restart the it. By then the new master will likely have been elected + if is_boolean_yes "$REDIS_SENTINEL_TLS_ENABLED"; then + sentinel_info_command="redis-cli {{- if .Values.usePassword }} -a $REDIS_PASSWORD {{- end }} -h $REDIS_MASTER_HOST -p {{ .Values.sentinel.port }} --tls --cert ${REDIS_SENTINEL_TLS_CERT_FILE} --key ${REDIS_SENTINEL_TLS_KEY_FILE} --cacert ${REDIS_SENTINEL_TLS_CA_FILE} sentinel get-master-addr-by-name {{ .Values.sentinel.masterSet }}" + else + sentinel_info_command="redis-cli {{- if .Values.usePassword }} -a $REDIS_PASSWORD {{- end }} -h $REDIS_MASTER_HOST -p {{ .Values.sentinel.port }} sentinel get-master-addr-by-name {{ .Values.sentinel.masterSet }}" + fi + + if [[ ! ($($sentinel_info_command)) ]]; then + # master doesn't actually exist, this probably means the remaining pods haven't elected a new one yet + # and are reporting the old one still. Once this happens the container will get stuck and never see the new + # master. We stop here to allow the container to not pass the liveness check and be restarted. + exit 1 + fi + fi + sentinel_conf_set "sentinel monitor" "{{ .Values.sentinel.masterSet }} "$REDIS_MASTER_HOST" "$REDIS_MASTER_PORT_NUMBER" {{ .Values.sentinel.quorum }}" + + add_replica() { + if [[ "$1" != "$REDIS_MASTER_HOST" ]]; then + sentinel_conf_add "sentinel known-replica {{ .Values.sentinel.masterSet }} $1 {{ .Values.redisPort }}" + fi + } + + {{- if .Values.sentinel.staticID }} + # remove generated known sentinels and replicas + tmp="$(sed -e '/^sentinel known-/d' -e '/^$/d' /opt/bitnami/redis-sentinel/etc/sentinel.conf)" + echo "$tmp" > /opt/bitnami/redis-sentinel/etc/sentinel.conf + + for node in $(seq 0 {{ .Values.cluster.slaveCount }}); do + NAME="{{ template "redis.fullname" . }}-node-$node" + IP="$(getent hosts "$NAME.$HEADLESS_SERVICE" | awk ' {print $1 }')" + if [[ "$NAME" != "$HOSTNAME" && -n "$IP" ]]; then + sentinel_conf_add "sentinel known-sentinel {{ .Values.sentinel.masterSet }} $IP {{ .Values.sentinel.port }} $(host_id "$NAME")" + add_replica "$IP" + fi + done + add_replica "$(hostname -i)" + {{- end }} + + {{- if .Values.tls.enabled }} + ARGS=("--port" "0") + ARGS+=("--tls-port" "${REDIS_SENTINEL_TLS_PORT_NUMBER}") + ARGS+=("--tls-cert-file" "${REDIS_SENTINEL_TLS_CERT_FILE}") + ARGS+=("--tls-key-file" "${REDIS_SENTINEL_TLS_KEY_FILE}") + ARGS+=("--tls-ca-cert-file" "${REDIS_SENTINEL_TLS_CA_FILE}") + ARGS+=("--tls-replication" "yes") + ARGS+=("--tls-auth-clients" "${REDIS_SENTINEL_TLS_AUTH_CLIENTS}") + {{- if .Values.tls.dhParamsFilename }} + ARGS+=("--tls-dh-params-file" "${REDIS_SENTINEL_TLS_DH_PARAMS_FILE}") + {{- end }} + {{- end }} + {{- if .Values.sentinel.preExecCmds }} + {{ .Values.sentinel.preExecCmds | nindent 4 }} + {{- end }} + exec redis-server /opt/bitnami/redis-sentinel/etc/sentinel.conf --sentinel {{- if .Values.tls.enabled }} "${ARGS[@]}" {{- end }} +{{- else }} + start-master.sh: | + #!/bin/bash + {{- if and .Values.securityContext.runAsUser (eq (.Values.securityContext.runAsUser | int) 0) }} + useradd redis + chown -R redis {{ .Values.master.persistence.path }} + {{- end }} + if [[ -n $REDIS_PASSWORD_FILE ]]; then + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux + fi + if [[ ! -f /opt/bitnami/redis/etc/master.conf ]];then + cp /opt/bitnami/redis/mounted-etc/master.conf /opt/bitnami/redis/etc/master.conf + fi + if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then + cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf + fi + {{- if .Values.tls.enabled }} + ARGS=("--port" "0") + ARGS+=("--tls-port" "${REDIS_TLS_PORT}") + ARGS+=("--tls-cert-file" "${REDIS_TLS_CERT_FILE}") + ARGS+=("--tls-key-file" "${REDIS_TLS_KEY_FILE}") + ARGS+=("--tls-ca-cert-file" "${REDIS_TLS_CA_FILE}") + ARGS+=("--tls-auth-clients" "${REDIS_TLS_AUTH_CLIENTS}") + {{- if .Values.tls.dhParamsFilename }} + ARGS+=("--tls-dh-params-file" "${REDIS_TLS_DH_PARAMS_FILE}") + {{- end }} + {{- else }} + ARGS=("--port" "${REDIS_PORT}") + {{- end }} + {{- if .Values.usePassword }} + ARGS+=("--requirepass" "${REDIS_PASSWORD}") + ARGS+=("--masterauth" "${REDIS_PASSWORD}") + {{- else }} + ARGS+=("--protected-mode" "no") + {{- end }} + ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf") + ARGS+=("--include" "/opt/bitnami/redis/etc/master.conf") + {{- if .Values.master.extraFlags }} + {{- range .Values.master.extraFlags }} + ARGS+=({{ . | quote }}) + {{- end }} + {{- end }} + {{- if .Values.master.preExecCmds }} + {{ .Values.master.preExecCmds | nindent 4}} + {{- end }} + {{- if .Values.master.command }} + exec {{ .Values.master.command }} "${ARGS[@]}" + {{- else }} + exec redis-server "${ARGS[@]}" + {{- end }} + {{- if .Values.cluster.enabled }} + start-slave.sh: | + #!/bin/bash + {{- if and .Values.securityContext.runAsUser (eq (.Values.securityContext.runAsUser | int) 0) }} + useradd redis + chown -R redis {{ .Values.slave.persistence.path }} + {{- end }} + if [[ -n $REDIS_PASSWORD_FILE ]]; then + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux + fi + if [[ -n $REDIS_MASTER_PASSWORD_FILE ]]; then + password_aux=`cat ${REDIS_MASTER_PASSWORD_FILE}` + export REDIS_MASTER_PASSWORD=$password_aux + fi + if [[ ! -f /opt/bitnami/redis/etc/replica.conf ]];then + cp /opt/bitnami/redis/mounted-etc/replica.conf /opt/bitnami/redis/etc/replica.conf + fi + if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then + cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf + fi + {{- if .Values.tls.enabled }} + ARGS=("--port" "0") + ARGS+=("--tls-port" "${REDIS_TLS_PORT}") + ARGS+=("--tls-cert-file" "${REDIS_TLS_CERT_FILE}") + ARGS+=("--tls-key-file" "${REDIS_TLS_KEY_FILE}") + ARGS+=("--tls-ca-cert-file" "${REDIS_TLS_CA_FILE}") + ARGS+=("--tls-auth-clients" "${REDIS_TLS_AUTH_CLIENTS}") + ARGS+=("--tls-replication" "yes") + {{- if .Values.tls.dhParamsFilename }} + ARGS+=("--tls-dh-params-file" "${REDIS_TLS_DH_PARAMS_FILE}") + {{- end }} + {{- else }} + ARGS=("--port" "${REDIS_PORT}") + {{- end }} + ARGS+=("--slaveof" "${REDIS_MASTER_HOST}" "${REDIS_MASTER_PORT_NUMBER}") + {{- if .Values.usePassword }} + ARGS+=("--requirepass" "${REDIS_PASSWORD}") + ARGS+=("--masterauth" "${REDIS_MASTER_PASSWORD}") + {{- else }} + ARGS+=("--protected-mode" "no") + {{- end }} + ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf") + ARGS+=("--include" "/opt/bitnami/redis/etc/replica.conf") + {{- if .Values.slave.extraFlags }} + {{- range .Values.slave.extraFlags }} + ARGS+=({{ . | quote }}) + {{- end }} + {{- end }} + {{- if .Values.slave.preExecCmds }} + {{ .Values.slave.preExecCmds | nindent 4}} + {{- end }} + {{- if .Values.slave.command }} + exec {{ .Values.slave.command }} "${ARGS[@]}" + {{- else }} + exec redis-server "${ARGS[@]}" + {{- end }} + {{- end }} + +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/configmap.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/configmap.yaml new file mode 100644 index 0000000..923272c --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/configmap.yaml @@ -0,0 +1,53 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "redis.fullname" . }} + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: + redis.conf: |- +{{- if .Values.configmap }} + # User-supplied configuration: +{{- tpl .Values.configmap . | nindent 4 }} +{{- end }} + master.conf: |- + dir {{ .Values.master.persistence.path }} +{{- if .Values.master.configmap }} + # User-supplied master configuration: +{{- tpl .Values.master.configmap . | nindent 4 }} +{{- end }} +{{- if .Values.master.disableCommands }} +{{- range .Values.master.disableCommands }} + rename-command {{ . }} "" +{{- end }} +{{- end }} + replica.conf: |- + dir {{ .Values.slave.persistence.path }} + slave-read-only yes +{{- if .Values.slave.configmap }} + # User-supplied slave configuration: +{{- tpl .Values.slave.configmap . | nindent 4 }} +{{- end }} +{{- if .Values.slave.disableCommands }} +{{- range .Values.slave.disableCommands }} + rename-command {{ . }} "" +{{- end }} +{{- end }} +{{- if .Values.sentinel.enabled }} + sentinel.conf: |- + dir "/tmp" + bind 0.0.0.0 + port {{ .Values.sentinel.port }} + sentinel monitor {{ .Values.sentinel.masterSet }} {{ template "redis.fullname" . }}-node-0.{{ template "redis.fullname" . }}-headless.imxc.svc.{{ .Values.clusterDomain }} {{ .Values.redisPort }} {{ .Values.sentinel.quorum }} + sentinel down-after-milliseconds {{ .Values.sentinel.masterSet }} {{ .Values.sentinel.downAfterMilliseconds }} + sentinel failover-timeout {{ .Values.sentinel.masterSet }} {{ .Values.sentinel.failoverTimeout }} + sentinel parallel-syncs {{ .Values.sentinel.masterSet }} {{ .Values.sentinel.parallelSyncs }} +{{- if .Values.sentinel.configmap }} + # User-supplied sentinel configuration: +{{- tpl .Values.sentinel.configmap . | nindent 4 }} +{{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/headless-svc.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/headless-svc.yaml new file mode 100644 index 0000000..7db7371 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/headless-svc.yaml @@ -0,0 +1,28 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "redis.fullname" . }}-headless + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + type: ClusterIP + clusterIP: None + {{- if .Values.sentinel.enabled }} + publishNotReadyAddresses: true + {{- end }} + ports: + - name: redis + port: {{ .Values.redisPort }} + targetPort: redis + {{- if .Values.sentinel.enabled }} + - name: redis-sentinel + port: {{ .Values.sentinel.port }} + targetPort: redis-sentinel + {{- end }} + selector: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/health-configmap.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/health-configmap.yaml new file mode 100644 index 0000000..0bbbfb6 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/health-configmap.yaml @@ -0,0 +1,176 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "redis.fullname" . }}-health + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: + ping_readiness_local.sh: |- + #!/bin/bash +{{- if .Values.usePasswordFile }} + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux +{{- end }} + export REDISCLI_AUTH="$REDIS_PASSWORD" + response=$( + timeout -s 3 $1 \ + redis-cli \ + -h localhost \ +{{- if .Values.tls.enabled }} + -p $REDIS_TLS_PORT \ + --tls \ + --cacert {{ template "redis.tlsCACert" . }} \ + {{- if .Values.tls.authClients }} + --cert {{ template "redis.tlsCert" . }} \ + --key {{ template "redis.tlsCertKey" . }} \ + {{- end }} +{{- else }} + -p $REDIS_PORT \ +{{- end }} + ping + ) + if [ "$response" != "PONG" ]; then + echo "$response" + exit 1 + fi + ping_liveness_local.sh: |- + #!/bin/bash +{{- if .Values.usePasswordFile }} + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux +{{- end }} + export REDISCLI_AUTH="$REDIS_PASSWORD" + response=$( + timeout -s 3 $1 \ + redis-cli \ + -h localhost \ +{{- if .Values.tls.enabled }} + -p $REDIS_TLS_PORT \ + --tls \ + --cacert {{ template "redis.tlsCACert" . }} \ + {{- if .Values.tls.authClients }} + --cert {{ template "redis.tlsCert" . }} \ + --key {{ template "redis.tlsCertKey" . }} \ + {{- end }} +{{- else }} + -p $REDIS_PORT \ +{{- end }} + ping + ) + if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then + echo "$response" + exit 1 + fi +{{- if .Values.sentinel.enabled }} + ping_sentinel.sh: |- + #!/bin/bash +{{- if .Values.usePasswordFile }} + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux +{{- end }} + export REDISCLI_AUTH="$REDIS_PASSWORD" + response=$( + timeout -s 3 $1 \ + redis-cli \ + -h localhost \ +{{- if .Values.tls.enabled }} + -p $REDIS_SENTINEL_TLS_PORT_NUMBER \ + --tls \ + --cacert {{ template "redis.tlsCACert" . }} \ + {{- if .Values.tls.authClients }} + --cert {{ template "redis.tlsCert" . }} \ + --key {{ template "redis.tlsCertKey" . }} \ + {{- end }} +{{- else }} + -p $REDIS_SENTINEL_PORT \ +{{- end }} + ping + ) + if [ "$response" != "PONG" ]; then + echo "$response" + exit 1 + fi + parse_sentinels.awk: |- + /ip/ {FOUND_IP=1} + /port/ {FOUND_PORT=1} + /runid/ {FOUND_RUNID=1} + !/ip|port|runid/ { + if (FOUND_IP==1) { + IP=$1; FOUND_IP=0; + } + else if (FOUND_PORT==1) { + PORT=$1; + FOUND_PORT=0; + } else if (FOUND_RUNID==1) { + printf "\nsentinel known-sentinel {{ .Values.sentinel.masterSet }} %s %s %s", IP, PORT, $0; FOUND_RUNID=0; + } + } +{{- end }} + ping_readiness_master.sh: |- + #!/bin/bash +{{- if .Values.usePasswordFile }} + password_aux=`cat ${REDIS_MASTER_PASSWORD_FILE}` + export REDIS_MASTER_PASSWORD=$password_aux +{{- end }} + export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD" + response=$( + timeout -s 3 $1 \ + redis-cli \ + -h $REDIS_MASTER_HOST \ + -p $REDIS_MASTER_PORT_NUMBER \ +{{- if .Values.tls.enabled }} + --tls \ + --cacert {{ template "redis.tlsCACert" . }} \ + {{- if .Values.tls.authClients }} + --cert {{ template "redis.tlsCert" . }} \ + --key {{ template "redis.tlsCertKey" . }} \ + {{- end }} +{{- end }} + ping + ) + if [ "$response" != "PONG" ]; then + echo "$response" + exit 1 + fi + ping_liveness_master.sh: |- + #!/bin/bash +{{- if .Values.usePasswordFile }} + password_aux=`cat ${REDIS_MASTER_PASSWORD_FILE}` + export REDIS_MASTER_PASSWORD=$password_aux +{{- end }} + export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD" + response=$( + timeout -s 3 $1 \ + redis-cli \ + -h $REDIS_MASTER_HOST \ + -p $REDIS_MASTER_PORT_NUMBER \ +{{- if .Values.tls.enabled }} + --tls \ + --cacert {{ template "redis.tlsCACert" . }} \ + {{- if .Values.tls.authClients }} + --cert {{ template "redis.tlsCert" . }} \ + --key {{ template "redis.tlsCertKey" . }} \ + {{- end }} +{{- end }} + ping + ) + if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then + echo "$response" + exit 1 + fi + ping_readiness_local_and_master.sh: |- + script_dir="$(dirname "$0")" + exit_status=0 + "$script_dir/ping_readiness_local.sh" $1 || exit_status=$? + "$script_dir/ping_readiness_master.sh" $1 || exit_status=$? + exit $exit_status + ping_liveness_local_and_master.sh: |- + script_dir="$(dirname "$0")" + exit_status=0 + "$script_dir/ping_liveness_local.sh" $1 || exit_status=$? + "$script_dir/ping_liveness_master.sh" $1 || exit_status=$? + exit $exit_status diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/metrics-prometheus.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/metrics-prometheus.yaml new file mode 100644 index 0000000..928f9a8 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/metrics-prometheus.yaml @@ -0,0 +1,39 @@ +{{- if and (.Values.metrics.enabled) (.Values.metrics.serviceMonitor.enabled) }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "redis.fullname" . }} +# {{- if .Values.metrics.serviceMonitor.namespace }} +# namespace: {{ .Values.metrics.serviceMonitor.namespace }} +# {{- else }} + namespace: imxc +# {{- end }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- range $key, $value := .Values.metrics.serviceMonitor.selector }} + {{ $key }}: {{ $value | quote }} + {{- end }} +spec: + endpoints: + - port: metrics + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.relabelings }} + relabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.relabelings "context" $) | nindent 6 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.metricRelabelings }} + metricRelabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.metricRelabelings "context" $) | nindent 6 }} + {{- end }} + selector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + app.kubernetes.io/component: "metrics" + namespaceSelector: + matchNames: + - imxc +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/metrics-svc.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/metrics-svc.yaml new file mode 100644 index 0000000..4dae3bc --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/metrics-svc.yaml @@ -0,0 +1,34 @@ +{{- if .Values.metrics.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "redis.fullname" . }}-metrics + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + app.kubernetes.io/component: "metrics" + {{- if .Values.metrics.service.labels -}} + {{- toYaml .Values.metrics.service.labels | nindent 4 }} + {{- end -}} + {{- if .Values.metrics.service.annotations }} + annotations: {{- toYaml .Values.metrics.service.annotations | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.metrics.service.type }} + {{ if eq .Values.metrics.service.type "LoadBalancer" }} + externalTrafficPolicy: {{ .Values.metrics.service.externalTrafficPolicy }} + {{- end }} + {{ if and (eq .Values.metrics.service.type "LoadBalancer") .Values.metrics.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.metrics.service.loadBalancerIP }} + {{- end }} + ports: + - name: metrics + port: 9121 + targetPort: metrics + selector: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/networkpolicy.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/networkpolicy.yaml new file mode 100644 index 0000000..ae27ebb --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/networkpolicy.yaml @@ -0,0 +1,74 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ template "networkPolicy.apiVersion" . }} +metadata: + name: {{ template "redis.fullname" . }} + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + podSelector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + {{- if .Values.cluster.enabled }} + policyTypes: + - Ingress + - Egress + egress: + # Allow dns resolution + - ports: + - port: 53 + protocol: UDP + # Allow outbound connections to other cluster pods + - ports: + - port: {{ .Values.redisPort }} + {{- if .Values.sentinel.enabled }} + - port: {{ .Values.sentinel.port }} + {{- end }} + to: + - podSelector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + {{- end }} + ingress: + # Allow inbound connections + - ports: + - port: {{ .Values.redisPort }} + {{- if .Values.sentinel.enabled }} + - port: {{ .Values.sentinel.port }} + {{- end }} + {{- if not .Values.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ template "redis.fullname" . }}-client: "true" + - podSelector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + {{- if .Values.networkPolicy.ingressNSMatchLabels }} + - namespaceSelector: + matchLabels: + {{- range $key, $value := .Values.networkPolicy.ingressNSMatchLabels }} + {{ $key | quote }}: {{ $value | quote }} + {{- end }} + {{- if .Values.networkPolicy.ingressNSPodMatchLabels }} + podSelector: + matchLabels: + {{- range $key, $value := .Values.networkPolicy.ingressNSPodMatchLabels }} + {{ $key | quote }}: {{ $value | quote }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.metrics.enabled }} + # Allow prometheus scrapes for metrics + - ports: + - port: 9121 + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/pdb.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/pdb.yaml new file mode 100644 index 0000000..e2ad471 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/pdb.yaml @@ -0,0 +1,22 @@ +{{- if .Values.podDisruptionBudget.enabled }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ template "redis.fullname" . }} + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} +spec: + {{- if .Values.podDisruptionBudget.minAvailable }} + minAvailable: {{ .Values.podDisruptionBudget.minAvailable }} + {{- end }} + {{- if .Values.podDisruptionBudget.maxUnavailable }} + maxUnavailable: {{ .Values.podDisruptionBudget.maxUnavailable }} + {{- end }} + selector: + matchLabels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/prometheusrule.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/prometheusrule.yaml new file mode 100644 index 0000000..fba6450 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/prometheusrule.yaml @@ -0,0 +1,25 @@ +{{- if and .Values.metrics.enabled .Values.metrics.prometheusRule.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ template "redis.fullname" . }} + {{- if .Values.metrics.prometheusRule.namespace }} + namespace: {{ .Values.metrics.prometheusRule.namespace }} + {{- else }} + namespace: imxc + {{- end }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +{{- with .Values.metrics.prometheusRule.additionalLabels }} +{{- toYaml . | nindent 4 }} +{{- end }} +spec: +{{- with .Values.metrics.prometheusRule.rules }} + groups: + - name: {{ template "redis.name" $ }} + rules: {{- tpl (toYaml .) $ | nindent 8 }} +{{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/psp.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/psp.yaml new file mode 100644 index 0000000..f3c9390 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/psp.yaml @@ -0,0 +1,43 @@ +{{- if .Values.podSecurityPolicy.create }} +apiVersion: {{ template "podSecurityPolicy.apiVersion" . }} +kind: PodSecurityPolicy +metadata: + name: {{ template "redis.fullname" . }} + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + allowPrivilegeEscalation: false + fsGroup: + rule: 'MustRunAs' + ranges: + - min: {{ .Values.securityContext.fsGroup }} + max: {{ .Values.securityContext.fsGroup }} + hostIPC: false + hostNetwork: false + hostPID: false + privileged: false + readOnlyRootFilesystem: false + requiredDropCapabilities: + - ALL + runAsUser: + rule: 'MustRunAs' + ranges: + - min: {{ .Values.containerSecurityContext.runAsUser }} + max: {{ .Values.containerSecurityContext.runAsUser }} + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: {{ .Values.containerSecurityContext.runAsUser }} + max: {{ .Values.containerSecurityContext.runAsUser }} + volumes: + - 'configMap' + - 'secret' + - 'emptyDir' + - 'persistentVolumeClaim' +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/redis-master-statefulset.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/redis-master-statefulset.yaml new file mode 100644 index 0000000..78aa2e6 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/redis-master-statefulset.yaml @@ -0,0 +1,378 @@ +{{- if or (not .Values.cluster.enabled) (not .Values.sentinel.enabled) }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "redis.fullname" . }}-master + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- if .Values.master.statefulset.labels }} + {{- toYaml .Values.master.statefulset.labels | nindent 4 }} + {{- end }} +{{- if .Values.master.statefulset.annotations }} + annotations: + {{- toYaml .Values.master.statefulset.annotations | nindent 4 }} +{{- end }} +spec: + selector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + role: master + serviceName: {{ template "redis.fullname" . }}-headless + template: + metadata: + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + role: master + {{- if .Values.master.podLabels }} + {{- toYaml .Values.master.podLabels | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podLabels }} + {{- toYaml .Values.metrics.podLabels | nindent 8 }} + {{- end }} + annotations: + checksum/health: {{ include (print $.Template.BasePath "/health-configmap.yaml") . | sha256sum }} + checksum/configmap: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} + {{- if .Values.master.podAnnotations }} + {{- toYaml .Values.master.podAnnotations | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podAnnotations }} + {{- toYaml .Values.metrics.podAnnotations | nindent 8 }} + {{- end }} + spec: + {{- include "redis.imagePullSecrets" . | nindent 6 }} + {{- if .Values.master.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.master.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: {{- omit .Values.securityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + serviceAccountName: {{ template "redis.serviceAccountName" . }} + {{- if .Values.master.priorityClassName }} + priorityClassName: {{ .Values.master.priorityClassName | quote }} + {{- end }} + {{- with .Values.master.affinity }} + affinity: {{- tpl (toYaml .) $ | nindent 8 }} + {{- end }} + {{- if .Values.master.nodeSelector }} + nodeSelector: {{- toYaml .Values.master.nodeSelector | nindent 8 }} + {{- end }} + {{- if .Values.master.tolerations }} + tolerations: {{- toYaml .Values.master.tolerations | nindent 8 }} + {{- end }} + {{- if .Values.master.shareProcessNamespace }} + shareProcessNamespace: {{ .Values.master.shareProcessNamespace }} + {{- end }} + {{- if .Values.master.schedulerName }} + schedulerName: {{ .Values.master.schedulerName }} + {{- end }} + containers: + - name: {{ template "redis.name" . }} + image: {{ template "redis.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + command: + - /bin/sh + - -c + - /opt/bitnami/scripts/start-scripts/start-master.sh + env: + - name: REDIS_REPLICATION_MODE + value: master + {{- if .Values.usePassword }} + {{- if .Values.usePasswordFile }} + - name: REDIS_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + {{- else }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- else }} + - name: ALLOW_EMPTY_PASSWORD + value: "yes" + {{- end }} + - name: REDIS_TLS_ENABLED + value: {{ ternary "yes" "no" .Values.tls.enabled | quote }} + {{- if .Values.tls.enabled }} + - name: REDIS_TLS_PORT + value: {{ .Values.redisPort | quote }} + - name: REDIS_TLS_AUTH_CLIENTS + value: {{ ternary "yes" "no" .Values.tls.authClients | quote }} + - name: REDIS_TLS_CERT_FILE + value: {{ template "redis.tlsCert" . }} + - name: REDIS_TLS_KEY_FILE + value: {{ template "redis.tlsCertKey" . }} + - name: REDIS_TLS_CA_FILE + value: {{ template "redis.tlsCACert" . }} + {{- if .Values.tls.dhParamsFilename }} + - name: REDIS_TLS_DH_PARAMS_FILE + value: {{ template "redis.tlsDHParams" . }} + {{- end }} + {{- else }} + - name: REDIS_PORT + value: {{ .Values.redisPort | quote }} + {{- end }} + {{- if .Values.master.extraEnvVars }} + {{- include "redis.tplValue" (dict "value" .Values.master.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if or .Values.master.extraEnvVarsCM .Values.master.extraEnvVarsSecret }} + envFrom: + {{- if .Values.master.extraEnvVarsCM }} + - configMapRef: + name: {{ .Values.master.extraEnvVarsCM }} + {{- end }} + {{- if .Values.master.extraEnvVarsSecret }} + - secretRef: + name: {{ .Values.master.extraEnvVarsSecret }} + {{- end }} + {{- end }} + ports: + - name: redis + containerPort: {{ .Values.redisPort }} + {{- if .Values.master.livenessProbe.enabled }} + livenessProbe: + initialDelaySeconds: {{ .Values.master.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.master.livenessProbe.periodSeconds }} + # One second longer than command timeout should prevent generation of zombie processes. + timeoutSeconds: {{ add1 .Values.master.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.master.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.master.livenessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_liveness_local.sh {{ .Values.master.livenessProbe.timeoutSeconds }} + {{- else if .Values.master.customLivenessProbe }} + livenessProbe: {{- toYaml .Values.master.customLivenessProbe | nindent 12 }} + {{- end }} + {{- if .Values.master.readinessProbe.enabled}} + readinessProbe: + initialDelaySeconds: {{ .Values.master.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.master.readinessProbe.periodSeconds }} + timeoutSeconds: {{ add1 .Values.master.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.master.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.master.readinessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_readiness_local.sh {{ .Values.master.readinessProbe.timeoutSeconds }} + {{- else if .Values.master.customReadinessProbe }} + readinessProbe: {{- toYaml .Values.master.customReadinessProbe | nindent 12 }} + {{- end }} + resources: {{- toYaml .Values.master.resources | nindent 12 }} + volumeMounts: + - name: start-scripts + mountPath: /opt/bitnami/scripts/start-scripts + - name: health + mountPath: /health + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /opt/bitnami/redis/secrets/ + {{- end }} + - name: redis-data + mountPath: {{ .Values.master.persistence.path }} + subPath: {{ .Values.master.persistence.subPath }} + - name: config + mountPath: /opt/bitnami/redis/mounted-etc + - name: redis-tmp-conf + mountPath: /opt/bitnami/redis/etc/ + {{- if .Values.tls.enabled }} + - name: redis-certificates + mountPath: /opt/bitnami/redis/certs + readOnly: true + {{- end }} + {{- if .Values.metrics.enabled }} + - name: metrics + image: {{ template "redis.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + command: + - /bin/sh + - -c + - | + if [[ -f '/secrets/redis-password' ]]; then + export REDIS_PASSWORD=$(cat /secrets/redis-password) + fi + redis_exporter{{- range $key, $value := .Values.metrics.extraArgs }} --{{ $key }}={{ $value }}{{- end }} + env: + - name: REDIS_ALIAS + value: {{ template "redis.fullname" . }} + {{- if and .Values.usePassword (not .Values.usePasswordFile) }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: REDIS_ADDR + value: rediss://localhost:{{ .Values.redisPort }} + - name: REDIS_EXPORTER_TLS_CLIENT_KEY_FILE + value: {{ template "redis.tlsCertKey" . }} + - name: REDIS_EXPORTER_TLS_CLIENT_CERT_FILE + value: {{ template "redis.tlsCert" . }} + - name: REDIS_EXPORTER_TLS_CA_CERT_FILE + value: {{ template "redis.tlsCACert" . }} + {{- end }} + volumeMounts: + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /secrets/ + {{- end }} + {{- if .Values.tls.enabled }} + - name: redis-certificates + mountPath: /opt/bitnami/redis/certs + readOnly: true + {{- end }} + ports: + - name: metrics + containerPort: 9121 + resources: {{- toYaml .Values.metrics.resources | nindent 12 }} + {{- end }} + {{- $needsVolumePermissions := and .Values.volumePermissions.enabled .Values.master.persistence.enabled .Values.securityContext.enabled .Values.containerSecurityContext.enabled }} + {{- if or $needsVolumePermissions .Values.sysctlImage.enabled }} + initContainers: + {{- if $needsVolumePermissions }} + - name: volume-permissions + image: "{{ template "redis.volumePermissions.image" . }}" + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: + - /bin/sh + - -ec + - | + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + chown -R `id -u`:`id -G | cut -d " " -f2` {{ .Values.master.persistence.path }} + {{- else }} + chown -R {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} {{ .Values.master.persistence.path }} + {{- end }} + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto "}} + securityContext: {{- omit .Values.volumePermissions.securityContext "runAsUser" | toYaml | nindent 12 }} + {{- else }} + securityContext: {{- .Values.volumePermissions.securityContext | toYaml | nindent 12 }} + {{- end }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 10 }} + volumeMounts: + - name: redis-data + mountPath: {{ .Values.master.persistence.path }} + subPath: {{ .Values.master.persistence.subPath }} + {{- end }} + {{- if .Values.sysctlImage.enabled }} + - name: init-sysctl + image: {{ template "redis.sysctl.image" . }} + imagePullPolicy: {{ default "" .Values.sysctlImage.pullPolicy | quote }} + resources: {{- toYaml .Values.sysctlImage.resources | nindent 10 }} + {{- if .Values.sysctlImage.mountHostSys }} + volumeMounts: + - name: host-sys + mountPath: /host-sys + {{- end }} + command: {{- toYaml .Values.sysctlImage.command | nindent 10 }} + securityContext: + privileged: true + runAsUser: 0 + {{- end }} + {{- end }} + volumes: + - name: start-scripts + configMap: + name: {{ include "redis.fullname" . }}-scripts + defaultMode: 0755 + - name: health + configMap: + name: {{ template "redis.fullname" . }}-health + defaultMode: 0755 + {{- if .Values.usePasswordFile }} + - name: redis-password + secret: + secretName: {{ template "redis.secretName" . }} + items: + - key: {{ template "redis.secretPasswordKey" . }} + path: redis-password + {{- end }} + - name: config + configMap: + name: {{ template "redis.fullname" . }} + {{- if not .Values.master.persistence.enabled }} + - name: "redis-data" + emptyDir: {} + {{- else }} + {{- if .Values.persistence.existingClaim }} + - name: "redis-data" + persistentVolumeClaim: + claimName: {{ include "redis.tplValue" (dict "value" .Values.persistence.existingClaim "context" $) }} + {{- end }} + {{- if .Values.master.persistence.volumes }} + {{- toYaml .Values.master.persistence.volumes | nindent 8 }} + {{- end }} + {{- end }} + {{- if .Values.sysctlImage.mountHostSys }} + - name: host-sys + hostPath: + path: /sys + {{- end }} + - name: redis-tmp-conf + emptyDir: {} + {{- if .Values.tls.enabled }} + - name: redis-certificates + secret: + secretName: {{ required "A secret containing the certificates for the TLS traffic is required when TLS in enabled" .Values.tls.certificatesSecret }} + defaultMode: 256 + {{- end }} + {{- if and .Values.master.persistence.enabled (not .Values.persistence.existingClaim) (not .Values.master.persistence.volumes) }} + volumeClaimTemplates: + - metadata: + name: redis-data + labels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + component: master + {{- if .Values.master.statefulset.volumeClaimTemplates.labels }} + {{- toYaml .Values.master.statefulset.volumeClaimTemplates.labels | nindent 10 }} + {{- end }} + {{- if .Values.master.statefulset.volumeClaimTemplates.annotations }} + annotations: + {{- toYaml .Values.master.statefulset.volumeClaimTemplates.annotations | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.master.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.master.persistence.size | quote }} + {{ include "redis.master.storageClass" . }} + selector: + {{- if .Values.master.persistence.matchLabels }} + matchLabels: {{- toYaml .Values.master.persistence.matchLabels | nindent 12 }} + {{- end -}} + {{- if .Values.master.persistence.matchExpressions }} + matchExpressions: {{- toYaml .Values.master.persistence.matchExpressions | nindent 12 }} + {{- end -}} + {{- end }} + updateStrategy: + type: {{ .Values.master.statefulset.updateStrategy }} + {{- if .Values.master.statefulset.rollingUpdatePartition }} + {{- if (eq "Recreate" .Values.master.statefulset.updateStrategy) }} + rollingUpdate: null + {{- else }} + rollingUpdate: + partition: {{ .Values.master.statefulset.rollingUpdatePartition }} + {{- end }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/redis-master-svc.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/redis-master-svc.yaml new file mode 100644 index 0000000..56ba5f1 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/redis-master-svc.yaml @@ -0,0 +1,43 @@ +{{- if not .Values.sentinel.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "redis.fullname" . }}-master + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- if .Values.master.service.labels -}} + {{- toYaml .Values.master.service.labels | nindent 4 }} + {{- end -}} +{{- if .Values.master.service.annotations }} + annotations: {{- toYaml .Values.master.service.annotations | nindent 4 }} +{{- end }} +spec: + type: {{ .Values.master.service.type }} + {{ if eq .Values.master.service.type "LoadBalancer" }} + externalTrafficPolicy: {{ .Values.master.service.externalTrafficPolicy }} + {{- end }} + {{- if and (eq .Values.master.service.type "LoadBalancer") .Values.master.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.master.service.loadBalancerIP }} + {{- end }} + {{- if and (eq .Values.master.service.type "LoadBalancer") .Values.master.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{- with .Values.master.service.loadBalancerSourceRanges }} +{{- toYaml . | nindent 4 }} +{{- end }} + {{- end }} + ports: + - name: redis + port: {{ .Values.master.service.port }} + targetPort: redis + {{- if .Values.master.service.nodePort }} + nodePort: {{ .Values.master.service.nodePort }} + {{- end }} + selector: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + role: master +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/redis-node-statefulset.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/redis-node-statefulset.yaml new file mode 100644 index 0000000..5d697de --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/redis-node-statefulset.yaml @@ -0,0 +1,494 @@ +{{- if and .Values.cluster.enabled .Values.sentinel.enabled }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "redis.fullname" . }}-node + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- if .Values.slave.statefulset.labels }} + {{- toYaml .Values.slave.statefulset.labels | nindent 4 }} + {{- end }} +{{- if .Values.slave.statefulset.annotations }} + annotations: + {{- toYaml .Values.slave.statefulset.annotations | nindent 4 }} +{{- end }} +spec: +{{- if .Values.slave.updateStrategy }} + strategy: {{- toYaml .Values.slave.updateStrategy | nindent 4 }} +{{- end }} + replicas: {{ .Values.cluster.slaveCount }} + serviceName: {{ template "redis.fullname" . }}-headless + selector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + role: node + template: + metadata: + labels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + chart: {{ template "redis.chart" . }} + role: node + {{- if .Values.slave.podLabels }} + {{- toYaml .Values.slave.podLabels | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podLabels }} + {{- toYaml .Values.metrics.podLabels | nindent 8 }} + {{- end }} + annotations: + checksum/health: {{ include (print $.Template.BasePath "/health-configmap.yaml") . | sha256sum }} + checksum/configmap: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} + {{- if .Values.slave.podAnnotations }} + {{- toYaml .Values.slave.podAnnotations | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podAnnotations }} + {{- toYaml .Values.metrics.podAnnotations | nindent 8 }} + {{- end }} + spec: + {{- include "redis.imagePullSecrets" . | nindent 6 }} + {{- if .Values.slave.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.slave.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: {{- omit .Values.securityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + serviceAccountName: {{ template "redis.serviceAccountName" . }} + {{- if .Values.slave.priorityClassName }} + priorityClassName: "{{ .Values.slave.priorityClassName }}" + {{- end }} + {{- if .Values.slave.nodeSelector }} + nodeSelector: {{- toYaml .Values.slave.nodeSelector | nindent 8 }} + {{- end }} + {{- if .Values.slave.tolerations }} + tolerations: {{- toYaml .Values.slave.tolerations | nindent 8 }} + {{- end }} + {{- if .Values.slave.schedulerName }} + schedulerName: {{ .Values.slave.schedulerName }} + {{- end }} + {{- if .Values.master.spreadConstraints }} + topologySpreadConstraints: {{- toYaml .Values.master.spreadConstraints | nindent 8 }} + {{- end }} + {{- with .Values.slave.affinity }} + affinity: {{- tpl (toYaml .) $ | nindent 8 }} + {{- end }} + containers: + - name: {{ template "redis.name" . }} + image: {{ template "redis.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + command: + - /bin/sh + - -c + - /opt/bitnami/scripts/start-scripts/start-node.sh + env: + - name: REDIS_MASTER_PORT_NUMBER + value: {{ .Values.redisPort | quote }} + {{- if .Values.usePassword }} + {{- if .Values.usePasswordFile }} + - name: REDIS_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + - name: REDIS_MASTER_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + {{- else }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + - name: REDIS_MASTER_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- else }} + - name: ALLOW_EMPTY_PASSWORD + value: "yes" + {{- end }} + - name: REDIS_TLS_ENABLED + value: {{ ternary "yes" "no" .Values.tls.enabled | quote }} + {{- if .Values.tls.enabled }} + - name: REDIS_TLS_PORT + value: {{ .Values.redisPort | quote }} + - name: REDIS_TLS_AUTH_CLIENTS + value: {{ ternary "yes" "no" .Values.tls.authClients | quote }} + - name: REDIS_TLS_CERT_FILE + value: {{ template "redis.tlsCert" . }} + - name: REDIS_TLS_KEY_FILE + value: {{ template "redis.tlsCertKey" . }} + - name: REDIS_TLS_CA_FILE + value: {{ template "redis.tlsCACert" . }} + {{- if .Values.tls.dhParamsFilename }} + - name: REDIS_TLS_DH_PARAMS_FILE + value: {{ template "redis.tlsDHParams" . }} + {{- end }} + {{- else }} + - name: REDIS_PORT + value: {{ .Values.redisPort | quote }} + {{- end }} + - name: REDIS_DATA_DIR + value: {{ .Values.slave.persistence.path }} + {{- if .Values.sentinel.extraEnvVars }} + {{- include "redis.tplValue" (dict "value" .Values.sentinel.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if or .Values.sentinel.extraEnvVarsCM .Values.sentinel.extraEnvVarsSecret }} + envFrom: + {{- if .Values.sentinel.extraEnvVarsCM }} + - configMapRef: + name: {{ .Values.sentinel.extraEnvVarsCM }} + {{- end }} + {{- if .Values.sentinel.extraEnvVarsSecret }} + - secretRef: + name: {{ .Values.sentinel.extraEnvVarsSecret }} + {{- end }} + {{- end }} + ports: + - name: redis + containerPort: {{ .Values.redisPort }} + {{- if .Values.slave.livenessProbe.enabled }} + livenessProbe: + initialDelaySeconds: {{ .Values.slave.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.slave.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.slave.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.slave.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.slave.livenessProbe.failureThreshold}} + exec: + command: + - sh + - -c + {{- if .Values.sentinel.enabled }} + - /health/ping_liveness_local.sh {{ .Values.slave.livenessProbe.timeoutSeconds }} + {{- else }} + - /health/ping_liveness_local_and_master.sh {{ .Values.slave.livenessProbe.timeoutSeconds }} + {{- end }} + {{- else if .Values.slave.customLivenessProbe }} + livenessProbe: {{- toYaml .Values.slave.customLivenessProbe | nindent 12 }} + {{- end }} + {{- if .Values.slave.readinessProbe.enabled }} + readinessProbe: + initialDelaySeconds: {{ .Values.slave.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.slave.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.slave.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.slave.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.slave.readinessProbe.failureThreshold }} + exec: + command: + - sh + - -c + {{- if .Values.sentinel.enabled }} + - /health/ping_readiness_local.sh {{ .Values.slave.livenessProbe.timeoutSeconds }} + {{- else }} + - /health/ping_readiness_local_and_master.sh {{ .Values.slave.livenessProbe.timeoutSeconds }} + {{- end }} + {{- else if .Values.slave.customReadinessProbe }} + readinessProbe: {{- toYaml .Values.slave.customReadinessProbe | nindent 12 }} + {{- end }} + resources: {{- toYaml .Values.slave.resources | nindent 12 }} + volumeMounts: + - name: start-scripts + mountPath: /opt/bitnami/scripts/start-scripts + - name: health + mountPath: /health + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /opt/bitnami/redis/secrets/ + {{- end }} + - name: redis-data + mountPath: {{ .Values.slave.persistence.path }} + subPath: {{ .Values.slave.persistence.subPath }} + - name: config + mountPath: /opt/bitnami/redis/mounted-etc + - name: redis-tmp-conf + mountPath: /opt/bitnami/redis/etc + {{- if .Values.tls.enabled }} + - name: redis-certificates + mountPath: /opt/bitnami/redis/certs + readOnly: true + {{- end }} + {{- if and .Values.cluster.enabled .Values.sentinel.enabled }} + - name: sentinel + image: {{ template "sentinel.image" . }} + imagePullPolicy: {{ .Values.sentinel.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + command: + - /bin/sh + - -c + - /opt/bitnami/scripts/start-scripts/start-sentinel.sh + env: + {{- if .Values.usePassword }} + {{- if .Values.usePasswordFile }} + - name: REDIS_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + {{- else }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- else }} + - name: ALLOW_EMPTY_PASSWORD + value: "yes" + {{- end }} + - name: REDIS_SENTINEL_TLS_ENABLED + value: {{ ternary "yes" "no" .Values.tls.enabled | quote }} + {{- if .Values.tls.enabled }} + - name: REDIS_SENTINEL_TLS_PORT_NUMBER + value: {{ .Values.sentinel.port | quote }} + - name: REDIS_SENTINEL_TLS_AUTH_CLIENTS + value: {{ ternary "yes" "no" .Values.tls.authClients | quote }} + - name: REDIS_SENTINEL_TLS_CERT_FILE + value: {{ template "redis.tlsCert" . }} + - name: REDIS_SENTINEL_TLS_KEY_FILE + value: {{ template "redis.tlsCertKey" . }} + - name: REDIS_SENTINEL_TLS_CA_FILE + value: {{ template "redis.tlsCACert" . }} + {{- if .Values.tls.dhParamsFilename }} + - name: REDIS_SENTINEL_TLS_DH_PARAMS_FILE + value: {{ template "redis.dhParams" . }} + {{- end }} + {{- else }} + - name: REDIS_SENTINEL_PORT + value: {{ .Values.sentinel.port | quote }} + {{- end }} + ports: + - name: redis-sentinel + containerPort: {{ .Values.sentinel.port }} + {{- if .Values.sentinel.livenessProbe.enabled }} + livenessProbe: + initialDelaySeconds: {{ .Values.sentinel.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.sentinel.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.sentinel.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.sentinel.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.sentinel.livenessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_sentinel.sh {{ .Values.sentinel.livenessProbe.timeoutSeconds }} + {{- else if .Values.sentinel.customLivenessProbe }} + livenessProbe: {{- toYaml .Values.sentinel.customLivenessProbe | nindent 12 }} + {{- end }} + {{- if .Values.sentinel.readinessProbe.enabled}} + readinessProbe: + initialDelaySeconds: {{ .Values.sentinel.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.sentinel.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.sentinel.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.sentinel.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.sentinel.readinessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_sentinel.sh {{ .Values.sentinel.livenessProbe.timeoutSeconds }} + {{- else if .Values.sentinel.customReadinessProbe }} + readinessProbe: {{- toYaml .Values.sentinel.customReadinessProbe | nindent 12 }} + {{- end }} + resources: {{- toYaml .Values.sentinel.resources | nindent 12 }} + volumeMounts: + - name: start-scripts + mountPath: /opt/bitnami/scripts/start-scripts + - name: health + mountPath: /health + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /opt/bitnami/redis/secrets/ + {{- end }} + - name: redis-data + mountPath: {{ .Values.slave.persistence.path }} + subPath: {{ .Values.slave.persistence.subPath }} + - name: config + mountPath: /opt/bitnami/redis-sentinel/mounted-etc + - name: sentinel-tmp-conf + mountPath: /opt/bitnami/redis-sentinel/etc + {{- if .Values.tls.enabled }} + - name: redis-certificates + mountPath: /opt/bitnami/redis/certs + readOnly: true + {{- end }} + {{- end }} + {{- if .Values.metrics.enabled }} + - name: metrics + image: {{ template "redis.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + command: + - /bin/sh + - -c + - | + if [[ -f '/secrets/redis-password' ]]; then + export REDIS_PASSWORD=$(cat /secrets/redis-password) + fi + redis_exporter{{- range $key, $value := .Values.metrics.extraArgs }} --{{ $key }}={{ $value }}{{- end }} + env: + - name: REDIS_ALIAS + value: {{ template "redis.fullname" . }} + {{- if and .Values.usePassword (not .Values.usePasswordFile) }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: REDIS_ADDR + value: rediss://localhost:{{ .Values.redisPort }} + - name: REDIS_EXPORTER_TLS_CLIENT_KEY_FILE + value: {{ template "redis.tlsCertKey" . }} + - name: REDIS_EXPORTER_TLS_CLIENT_CERT_FILE + value: {{ template "redis.tlsCert" . }} + - name: REDIS_EXPORTER_TLS_CA_CERT_FILE + value: {{ template "redis.tlsCACert" . }} + {{- end }} + volumeMounts: + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /secrets/ + {{- end }} + {{- if .Values.tls.enabled }} + - name: redis-certificates + mountPath: /opt/bitnami/redis/certs + readOnly: true + {{- end }} + ports: + - name: metrics + containerPort: 9121 + resources: {{- toYaml .Values.metrics.resources | nindent 12 }} + {{- end }} + {{- $needsVolumePermissions := and .Values.volumePermissions.enabled .Values.slave.persistence.enabled .Values.securityContext.enabled .Values.containerSecurityContext.enabled }} + {{- if or $needsVolumePermissions .Values.sysctlImage.enabled }} + initContainers: + {{- if $needsVolumePermissions }} + - name: volume-permissions + image: {{ template "redis.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: + - /bin/sh + - -ec + - | + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + chown -R `id -u`:`id -G | cut -d " " -f2` {{ .Values.slave.persistence.path }} + {{- else }} + chown -R {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} {{ .Values.slave.persistence.path }} + {{- end }} + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto "}} + securityContext: {{- omit .Values.volumePermissions.securityContext "runAsUser" | toYaml | nindent 12 }} + {{- else }} + securityContext: {{- .Values.volumePermissions.securityContext | toYaml | nindent 12 }} + {{- end }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} + volumeMounts: + - name: redis-data + mountPath: {{ .Values.slave.persistence.path }} + subPath: {{ .Values.slave.persistence.subPath }} + {{- end }} + {{- if .Values.sysctlImage.enabled }} + - name: init-sysctl + image: {{ template "redis.sysctl.image" . }} + imagePullPolicy: {{ default "" .Values.sysctlImage.pullPolicy | quote }} + resources: {{- toYaml .Values.sysctlImage.resources | nindent 12 }} + {{- if .Values.sysctlImage.mountHostSys }} + volumeMounts: + - name: host-sys + mountPath: /host-sys + {{- end }} + command: {{- toYaml .Values.sysctlImage.command | nindent 12 }} + securityContext: + privileged: true + runAsUser: 0 + {{- end }} + {{- end }} + volumes: + - name: start-scripts + configMap: + name: {{ include "redis.fullname" . }}-scripts + defaultMode: 0755 + - name: health + configMap: + name: {{ template "redis.fullname" . }}-health + defaultMode: 0755 + {{- if .Values.usePasswordFile }} + - name: redis-password + secret: + secretName: {{ template "redis.secretName" . }} + items: + - key: {{ template "redis.secretPasswordKey" . }} + path: redis-password + {{- end }} + - name: config + configMap: + name: {{ template "redis.fullname" . }} + {{- if .Values.sysctlImage.mountHostSys }} + - name: host-sys + hostPath: + path: /sys + {{- end }} + - name: sentinel-tmp-conf + emptyDir: {} + - name: redis-tmp-conf + emptyDir: {} + {{- if .Values.tls.enabled }} + - name: redis-certificates + secret: + secretName: {{ required "A secret containing the certificates for the TLS traffic is required when TLS in enabled" .Values.tls.certificatesSecret }} + defaultMode: 256 + {{- end }} + {{- if not .Values.slave.persistence.enabled }} + - name: redis-data + emptyDir: {} + {{- else }} + volumeClaimTemplates: + - metadata: + name: redis-data + labels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + component: slave + {{- if .Values.slave.statefulset.volumeClaimTemplates.labels }} + {{- toYaml .Values.slave.statefulset.volumeClaimTemplates.labels | nindent 10 }} + {{- end }} + {{- if .Values.slave.statefulset.volumeClaimTemplates.annotations }} + annotations: + {{- toYaml .Values.slave.statefulset.volumeClaimTemplates.annotations | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.slave.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.slave.persistence.size | quote }} + {{ include "redis.slave.storageClass" . }} + selector: + {{- if .Values.slave.persistence.matchLabels }} + matchLabels: {{- toYaml .Values.slave.persistence.matchLabels | nindent 12 }} + {{- end -}} + {{- if .Values.slave.persistence.matchExpressions }} + matchExpressions: {{- toYaml .Values.slave.persistence.matchExpressions | nindent 12 }} + {{- end -}} + {{- end }} + updateStrategy: + type: {{ .Values.slave.statefulset.updateStrategy }} + {{- if .Values.slave.statefulset.rollingUpdatePartition }} + {{- if (eq "Recreate" .Values.slave.statefulset.updateStrategy) }} + rollingUpdate: null + {{- else }} + rollingUpdate: + partition: {{ .Values.slave.statefulset.rollingUpdatePartition }} + {{- end }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/redis-pv.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/redis-pv.yaml new file mode 100644 index 0000000..adb5416 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/redis-pv.yaml @@ -0,0 +1,92 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + labels: + app: redis + type: local + name: redis-pv-master +spec: + storageClassName: manual + accessModes: + - ReadWriteOnce + capacity: + storage: 8Gi + claimRef: + kind: PersistentVolumeClaim + name: redis-data-redis-master-0 + namespace: imxc + hostPath: + path: {{ .Values.global.IMXC_REDIS_PV_PATH1 }} + persistentVolumeReclaimPolicy: Retain + storageClassName: manual + volumeMode: Filesystem + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .Values.global.affinity_key }} + operator: In + values: + - {{ .Values.global.affinity_value1 }} +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + labels: + app: redis + type: local + name: redis-pv-slave-0 +spec: + storageClassName: manual + accessModes: + - ReadWriteOnce + capacity: + storage: 8Gi + claimRef: + kind: PersistentVolumeClaim + name: redis-data-redis-slave-0 + namespace: imxc + hostPath: + path: {{ .Values.global.IMXC_REDIS_PV_PATH2 }} + persistentVolumeReclaimPolicy: Retain + storageClassName: manual + volumeMode: Filesystem + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .Values.global.affinity_key }} + operator: In + values: + - {{ .Values.global.affinity_value2 }} +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + labels: + app: redis + type: local + name: redis-pv-slave-1 +spec: + storageClassName: manual + accessModes: + - ReadWriteOnce + capacity: + storage: 8Gi + claimRef: + kind: PersistentVolumeClaim + name: redis-data-redis-slave-1 + namespace: imxc + hostPath: + path: {{ .Values.global.IMXC_REDIS_PV_PATH3 }} + persistentVolumeReclaimPolicy: Retain + storageClassName: manual + volumeMode: Filesystem + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .Values.global.affinity_key }} + operator: In + values: + - {{ .Values.global.affinity_value3 }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/redis-role.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/redis-role.yaml new file mode 100644 index 0000000..0d14129 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/redis-role.yaml @@ -0,0 +1,22 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ template "redis.fullname" . }} + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +rules: +{{- if .Values.podSecurityPolicy.create }} + - apiGroups: ['{{ template "podSecurityPolicy.apiGroup" . }}'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: [{{ template "redis.fullname" . }}] +{{- end -}} +{{- if .Values.rbac.role.rules }} +{{- toYaml .Values.rbac.role.rules | nindent 2 }} +{{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/redis-rolebinding.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/redis-rolebinding.yaml new file mode 100644 index 0000000..83c87f5 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/redis-rolebinding.yaml @@ -0,0 +1,19 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ template "redis.fullname" . }} + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ template "redis.fullname" . }} +subjects: +- kind: ServiceAccount + name: {{ template "redis.serviceAccountName" . }} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/redis-serviceaccount.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/redis-serviceaccount.yaml new file mode 100644 index 0000000..9452003 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/redis-serviceaccount.yaml @@ -0,0 +1,15 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "redis.serviceAccountName" . }} + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- if .Values.serviceAccount.annotations }} + annotations: {{ toYaml .Values.serviceAccount.annotations | nindent 4 }} + {{- end }} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/redis-slave-statefulset.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/redis-slave-statefulset.yaml new file mode 100644 index 0000000..be0894b --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/redis-slave-statefulset.yaml @@ -0,0 +1,384 @@ +{{- if and .Values.cluster.enabled (not .Values.sentinel.enabled) }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "redis.fullname" . }}-slave + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- if .Values.slave.statefulset.labels }} + {{- toYaml .Values.slave.statefulset.labels | nindent 4 }} + {{- end }} +{{- if .Values.slave.statefulset.annotations }} + annotations: + {{- toYaml .Values.slave.statefulset.annotations | nindent 4 }} +{{- end }} +spec: +{{- if .Values.slave.updateStrategy }} + strategy: {{- toYaml .Values.slave.updateStrategy | nindent 4 }} +{{- end }} + replicas: {{ .Values.cluster.slaveCount }} + serviceName: {{ template "redis.fullname" . }}-headless + selector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + role: slave + template: + metadata: + labels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + chart: {{ template "redis.chart" . }} + role: slave + {{- if .Values.slave.podLabels }} + {{- toYaml .Values.slave.podLabels | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podLabels }} + {{- toYaml .Values.metrics.podLabels | nindent 8 }} + {{- end }} + annotations: + checksum/health: {{ include (print $.Template.BasePath "/health-configmap.yaml") . | sha256sum }} + checksum/configmap: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} + {{- if .Values.slave.podAnnotations }} + {{- toYaml .Values.slave.podAnnotations | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podAnnotations }} + {{- toYaml .Values.metrics.podAnnotations | nindent 8 }} + {{- end }} + spec: + {{- include "redis.imagePullSecrets" . | nindent 6 }} + {{- if .Values.slave.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.slave.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: {{- omit .Values.securityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + serviceAccountName: {{ template "redis.serviceAccountName" . }} + {{- if .Values.slave.priorityClassName }} + priorityClassName: {{ .Values.slave.priorityClassName | quote }} + {{- end }} + {{- if .Values.slave.nodeSelector }} + nodeSelector: {{- toYaml .Values.slave.nodeSelector | nindent 8 }} + {{- end }} + {{- if .Values.slave.tolerations }} + tolerations: {{- toYaml .Values.slave.tolerations | nindent 8 }} + {{- end }} + {{- if .Values.slave.shareProcessNamespace }} + shareProcessNamespace: {{ .Values.slave.shareProcessNamespace }} + {{- end }} + {{- if .Values.slave.schedulerName }} + schedulerName: {{ .Values.slave.schedulerName }} + {{- end }} + {{- if .Values.master.spreadConstraints }} + topologySpreadConstraints: {{- toYaml .Values.master.spreadConstraints | nindent 8 }} + {{- end }} + {{- with .Values.slave.affinity }} + affinity: {{- tpl (toYaml .) $ | nindent 8 }} + {{- end }} + containers: + - name: {{ template "redis.name" . }} + image: {{ template "redis.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + command: + - /bin/sh + - -c + - /opt/bitnami/scripts/start-scripts/start-slave.sh + env: + - name: REDIS_REPLICATION_MODE + value: slave + - name: REDIS_MASTER_HOST + value: {{ template "redis.fullname" . }}-master-0.{{ template "redis.fullname" . }}-headless.imxc.svc.{{ .Values.clusterDomain }} + - name: REDIS_MASTER_PORT_NUMBER + value: {{ .Values.redisPort | quote }} + {{- if .Values.usePassword }} + {{- if .Values.usePasswordFile }} + - name: REDIS_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + - name: REDIS_MASTER_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + {{- else }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + - name: REDIS_MASTER_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- else }} + - name: ALLOW_EMPTY_PASSWORD + value: "yes" + {{- end }} + - name: REDIS_TLS_ENABLED + value: {{ ternary "yes" "no" .Values.tls.enabled | quote }} + {{- if .Values.tls.enabled }} + - name: REDIS_TLS_PORT + value: {{ .Values.redisPort | quote }} + - name: REDIS_TLS_AUTH_CLIENTS + value: {{ ternary "yes" "no" .Values.tls.authClients | quote }} + - name: REDIS_TLS_CERT_FILE + value: {{ template "redis.tlsCert" . }} + - name: REDIS_TLS_KEY_FILE + value: {{ template "redis.tlsCertKey" . }} + - name: REDIS_TLS_CA_FILE + value: {{ template "redis.tlsCACert" . }} + {{- if .Values.tls.dhParamsFilename }} + - name: REDIS_TLS_DH_PARAMS_FILE + value: {{ template "redis.tlsDHParams" . }} + {{- end }} + {{- else }} + - name: REDIS_PORT + value: {{ .Values.redisPort | quote }} + {{- end }} + {{- if .Values.slave.extraEnvVars }} + {{- include "redis.tplValue" (dict "value" .Values.slave.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if or .Values.slave.extraEnvVarsCM .Values.slave.extraEnvVarsSecret }} + envFrom: + {{- if .Values.slave.extraEnvVarsCM }} + - configMapRef: + name: {{ .Values.slave.extraEnvVarsCM }} + {{- end }} + {{- if .Values.slave.extraEnvVarsSecret }} + - secretRef: + name: {{ .Values.slave.extraEnvVarsSecret }} + {{- end }} + {{- end }} + ports: + - name: redis + containerPort: {{ .Values.redisPort }} + {{- if .Values.slave.livenessProbe.enabled }} + livenessProbe: + initialDelaySeconds: {{ .Values.slave.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.slave.livenessProbe.periodSeconds }} + timeoutSeconds: {{ add1 .Values.slave.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.slave.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.slave.livenessProbe.failureThreshold}} + exec: + command: + - sh + - -c + - /health/ping_liveness_local_and_master.sh {{ .Values.slave.livenessProbe.timeoutSeconds }} + {{- else if .Values.slave.customLivenessProbe }} + livenessProbe: {{- toYaml .Values.slave.customLivenessProbe | nindent 12 }} + {{- end }} + {{- if .Values.slave.readinessProbe.enabled }} + readinessProbe: + initialDelaySeconds: {{ .Values.slave.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.slave.readinessProbe.periodSeconds }} + timeoutSeconds: {{ add1 .Values.slave.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.slave.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.slave.readinessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_readiness_local_and_master.sh {{ .Values.slave.readinessProbe.timeoutSeconds }} + {{- else if .Values.slave.customReadinessProbe }} + readinessProbe: {{- toYaml .Values.slave.customReadinessProbe | nindent 12 }} + {{- end }} + resources: {{- toYaml .Values.slave.resources | nindent 12 }} + volumeMounts: + - name: start-scripts + mountPath: /opt/bitnami/scripts/start-scripts + - name: health + mountPath: /health + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /opt/bitnami/redis/secrets/ + {{- end }} + - name: redis-data + mountPath: /data + - name: config + mountPath: /opt/bitnami/redis/mounted-etc + - name: redis-tmp-conf + mountPath: /opt/bitnami/redis/etc + {{- if .Values.tls.enabled }} + - name: redis-certificates + mountPath: /opt/bitnami/redis/certs + readOnly: true + {{- end }} + {{- if .Values.metrics.enabled }} + - name: metrics + image: {{ template "redis.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + command: + - /bin/bash + - -c + - | + if [[ -f '/secrets/redis-password' ]]; then + export REDIS_PASSWORD=$(cat /secrets/redis-password) + fi + redis_exporter{{- range $key, $value := .Values.metrics.extraArgs }} --{{ $key }}={{ $value }}{{- end }} + env: + - name: REDIS_ALIAS + value: {{ template "redis.fullname" . }} + {{- if and .Values.usePassword (not .Values.usePasswordFile) }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: REDIS_ADDR + value: rediss://localhost:{{ .Values.redisPort }} + - name: REDIS_EXPORTER_TLS_CLIENT_KEY_FILE + value: {{ template "redis.tlsCertKey" . }} + - name: REDIS_EXPORTER_TLS_CLIENT_CERT_FILE + value: {{ template "redis.tlsCert" . }} + - name: REDIS_EXPORTER_TLS_CA_CERT_FILE + value: {{ template "redis.tlsCACert" . }} + {{- end }} + volumeMounts: + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /secrets/ + {{- end }} + {{- if .Values.tls.enabled }} + - name: redis-certificates + mountPath: /opt/bitnami/redis/certs + readOnly: true + {{- end }} + ports: + - name: metrics + containerPort: 9121 + resources: {{- toYaml .Values.metrics.resources | nindent 12 }} + {{- end }} + {{- $needsVolumePermissions := and .Values.volumePermissions.enabled .Values.slave.persistence.enabled .Values.securityContext.enabled .Values.containerSecurityContext.enabled }} + {{- if or $needsVolumePermissions .Values.sysctlImage.enabled }} + initContainers: + {{- if $needsVolumePermissions }} + - name: volume-permissions + image: {{ template "redis.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: + - /bin/sh + - -ec + - | + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + chown -R `id -u`:`id -G | cut -d " " -f2` {{ .Values.slave.persistence.path }} + {{- else }} + chown -R {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} {{ .Values.slave.persistence.path }} + {{- end }} + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto "}} + securityContext: {{- omit .Values.volumePermissions.securityContext "runAsUser" | toYaml | nindent 12 }} + {{- else }} + securityContext: {{- .Values.volumePermissions.securityContext | toYaml | nindent 12 }} + {{- end }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} + volumeMounts: + - name: redis-data + mountPath: {{ .Values.slave.persistence.path }} + subPath: {{ .Values.slave.persistence.subPath }} + {{- end }} + {{- if .Values.sysctlImage.enabled }} + - name: init-sysctl + image: {{ template "redis.sysctl.image" . }} + imagePullPolicy: {{ default "" .Values.sysctlImage.pullPolicy | quote }} + resources: {{- toYaml .Values.sysctlImage.resources | nindent 12 }} + {{- if .Values.sysctlImage.mountHostSys }} + volumeMounts: + - name: host-sys + mountPath: /host-sys + {{- end }} + command: {{- toYaml .Values.sysctlImage.command | nindent 12 }} + securityContext: + privileged: true + runAsUser: 0 + {{- end }} + {{- end }} + volumes: + - name: start-scripts + configMap: + name: {{ include "redis.fullname" . }}-scripts + defaultMode: 0755 + - name: health + configMap: + name: {{ template "redis.fullname" . }}-health + defaultMode: 0755 + {{- if .Values.usePasswordFile }} + - name: redis-password + secret: + secretName: {{ template "redis.secretName" . }} + items: + - key: {{ template "redis.secretPasswordKey" . }} + path: redis-password + {{- end }} + - name: config + configMap: + name: {{ template "redis.fullname" . }} + {{- if .Values.sysctlImage.mountHostSys }} + - name: host-sys + hostPath: + path: /sys + {{- end }} + - name: redis-tmp-conf + emptyDir: {} + {{- if .Values.tls.enabled }} + - name: redis-certificates + secret: + secretName: {{ required "A secret containing the certificates for the TLS traffic is required when TLS in enabled" .Values.tls.certificatesSecret }} + defaultMode: 256 + {{- end }} + {{- if not .Values.slave.persistence.enabled }} + - name: redis-data + emptyDir: {} + {{- else }} + volumeClaimTemplates: + - metadata: + name: redis-data + labels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + component: slave + {{- if .Values.slave.statefulset.volumeClaimTemplates.labels }} + {{- toYaml .Values.slave.statefulset.volumeClaimTemplates.labels | nindent 10 }} + {{- end }} + {{- if .Values.slave.statefulset.volumeClaimTemplates.annotations }} + annotations: + {{- toYaml .Values.slave.statefulset.volumeClaimTemplates.annotations | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.slave.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.slave.persistence.size | quote }} + {{ include "redis.slave.storageClass" . }} + selector: + {{- if .Values.slave.persistence.matchLabels }} + matchLabels: {{- toYaml .Values.slave.persistence.matchLabels | nindent 12 }} + {{- end -}} + {{- if .Values.slave.persistence.matchExpressions }} + matchExpressions: {{- toYaml .Values.slave.persistence.matchExpressions | nindent 12 }} + {{- end -}} + {{- end }} + updateStrategy: + type: {{ .Values.slave.statefulset.updateStrategy }} + {{- if .Values.slave.statefulset.rollingUpdatePartition }} + {{- if (eq "Recreate" .Values.slave.statefulset.updateStrategy) }} + rollingUpdate: null + {{- else }} + rollingUpdate: + partition: {{ .Values.slave.statefulset.rollingUpdatePartition }} + {{- end }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/redis-slave-svc.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/redis-slave-svc.yaml new file mode 100644 index 0000000..c1f3ae5 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/redis-slave-svc.yaml @@ -0,0 +1,43 @@ +{{- if and .Values.cluster.enabled (not .Values.sentinel.enabled) }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "redis.fullname" . }}-slave + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- if .Values.slave.service.labels -}} + {{- toYaml .Values.slave.service.labels | nindent 4 }} + {{- end -}} +{{- if .Values.slave.service.annotations }} + annotations: {{- toYaml .Values.slave.service.annotations | nindent 4 }} +{{- end }} +spec: + type: {{ .Values.slave.service.type }} + {{ if eq .Values.slave.service.type "LoadBalancer" }} + externalTrafficPolicy: {{ .Values.slave.service.externalTrafficPolicy }} + {{- end }} + {{- if and (eq .Values.slave.service.type "LoadBalancer") .Values.slave.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.slave.service.loadBalancerIP }} + {{- end }} + {{- if and (eq .Values.slave.service.type "LoadBalancer") .Values.slave.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{- with .Values.slave.service.loadBalancerSourceRanges }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- end }} + ports: + - name: redis + port: {{ .Values.slave.service.port }} + targetPort: redis + {{- if .Values.slave.service.nodePort }} + nodePort: {{ .Values.slave.service.nodePort }} + {{- end }} + selector: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + role: slave +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/redis-with-sentinel-svc.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/redis-with-sentinel-svc.yaml new file mode 100644 index 0000000..3b3458e --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/redis-with-sentinel-svc.yaml @@ -0,0 +1,43 @@ +{{- if .Values.sentinel.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "redis.fullname" . }} + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- if .Values.sentinel.service.labels }} + {{- toYaml .Values.sentinel.service.labels | nindent 4 }} + {{- end }} +{{- if .Values.sentinel.service.annotations }} + annotations: {{- toYaml .Values.sentinel.service.annotations | nindent 4 }} +{{- end }} +spec: + type: {{ .Values.sentinel.service.type }} + {{ if eq .Values.sentinel.service.type "LoadBalancer" }} + externalTrafficPolicy: {{ .Values.sentinel.service.externalTrafficPolicy }} + {{- end }} + {{ if eq .Values.sentinel.service.type "LoadBalancer" -}} {{ if .Values.sentinel.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.sentinel.service.loadBalancerIP }} + {{ end -}} + {{- end -}} + ports: + - name: redis + port: {{ .Values.sentinel.service.redisPort }} + targetPort: redis + {{- if .Values.sentinel.service.redisNodePort }} + nodePort: {{ .Values.sentinel.service.redisNodePort }} + {{- end }} + - name: redis-sentinel + port: {{ .Values.sentinel.service.sentinelPort }} + targetPort: redis-sentinel + {{- if .Values.sentinel.service.sentinelNodePort }} + nodePort: {{ .Values.sentinel.service.sentinelNodePort }} + {{- end }} + selector: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/secret.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/secret.yaml new file mode 100644 index 0000000..c1103d2 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/secret.yaml @@ -0,0 +1,15 @@ +{{- if and .Values.usePassword (not .Values.existingSecret) -}} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "redis.fullname" . }} + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +type: Opaque +data: + redis-password: {{ include "redis.password" . | b64enc | quote }} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/values.schema.json b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/values.schema.json new file mode 100644 index 0000000..3188d0c --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/values.schema.json @@ -0,0 +1,168 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": { + "usePassword": { + "type": "boolean", + "title": "Use password authentication", + "form": true + }, + "password": { + "type": "string", + "title": "Password", + "form": true, + "description": "Defaults to a random 10-character alphanumeric string if not set", + "hidden": { + "value": false, + "path": "usePassword" + } + }, + "cluster": { + "type": "object", + "title": "Cluster Settings", + "form": true, + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable master-slave", + "description": "Enable master-slave architecture" + }, + "slaveCount": { + "type": "integer", + "title": "Slave Replicas", + "form": true, + "hidden": { + "value": false, + "path": "cluster/enabled" + } + } + } + }, + "master": { + "type": "object", + "title": "Master replicas settings", + "form": true, + "properties": { + "persistence": { + "type": "object", + "title": "Persistence for master replicas", + "form": true, + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable persistence", + "description": "Enable persistence using Persistent Volume Claims" + }, + "size": { + "type": "string", + "title": "Persistent Volume Size", + "form": true, + "render": "slider", + "sliderMin": 1, + "sliderMax": 100, + "sliderUnit": "Gi", + "hidden": { + "value": false, + "path": "master/persistence/enabled" + } + }, + "matchLabels": { + "type": "object", + "title": "Persistent Match Labels Selector" + }, + "matchExpressions": { + "type": "object", + "title": "Persistent Match Expressions Selector" + } + } + } + } + }, + "slave": { + "type": "object", + "title": "Slave replicas settings", + "form": true, + "hidden": { + "value": false, + "path": "cluster/enabled" + }, + "properties": { + "persistence": { + "type": "object", + "title": "Persistence for slave replicas", + "form": true, + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable persistence", + "description": "Enable persistence using Persistent Volume Claims" + }, + "size": { + "type": "string", + "title": "Persistent Volume Size", + "form": true, + "render": "slider", + "sliderMin": 1, + "sliderMax": 100, + "sliderUnit": "Gi", + "hidden": { + "value": false, + "path": "slave/persistence/enabled" + } + }, + "matchLabels": { + "type": "object", + "title": "Persistent Match Labels Selector" + }, + "matchExpressions": { + "type": "object", + "title": "Persistent Match Expressions Selector" + } + } + } + } + }, + "volumePermissions": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable Init Containers", + "description": "Use an init container to set required folder permissions on the data volume before mounting it in the final destination" + } + } + }, + "metrics": { + "type": "object", + "form": true, + "title": "Prometheus metrics details", + "properties": { + "enabled": { + "type": "boolean", + "title": "Create Prometheus metrics exporter", + "description": "Create a side-car container to expose Prometheus metrics", + "form": true + }, + "serviceMonitor": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "title": "Create Prometheus Operator ServiceMonitor", + "description": "Create a ServiceMonitor to track metrics using Prometheus Operator", + "form": true, + "hidden": { + "value": false, + "path": "metrics/enabled" + } + } + } + } + } + } + } +} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/values.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/values.yaml new file mode 100644 index 0000000..fcd8710 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/redis/values.yaml @@ -0,0 +1,932 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +global: + # imageRegistry: myRegistryName + # imagePullSecrets: + # - myRegistryKeySecretName + # storageClass: myStorageClass + redis: {} + +## Bitnami Redis(TM) image version +## ref: https://hub.docker.com/r/bitnami/redis/tags/ +## +image: + registry: 10.10.31.243:5000/cmoa3 + repository: redis + ## Bitnami Redis(TM) image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis#supported-tags-and-respective-dockerfile-links + ## + tag: latest + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + +## String to partially override redis.fullname template (will maintain the release name) +## +# nameOverride: + +## String to fully override redis.fullname template +## +fullnameOverride: redis + +## Cluster settings +## +cluster: + enabled: true + slaveCount: 2 + +## Use redis sentinel in the redis pod. This will disable the master and slave services and +## create one redis service with ports to the sentinel and the redis instances +## +sentinel: + enabled: false + #enabled: true + ## Require password authentication on the sentinel itself + ## ref: https://redis.io/topics/sentinel + ## + usePassword: true + ## Bitnami Redis(TM) Sentintel image version + ## ref: https://hub.docker.com/r/bitnami/redis-sentinel/tags/ + ## + image: + #registry: docker.io + registry: 10.10.31.243:5000 + repository: bitnami/redis-sentinel + ## Bitnami Redis(TM) image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis-sentinel#supported-tags-and-respective-dockerfile-links + ## + tag: 6.0.10-debian-10-r0 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + masterSet: mymaster + initialCheckTimeout: 5 + quorum: 2 + downAfterMilliseconds: 60000 + failoverTimeout: 18000 + parallelSyncs: 1 + port: 26379 + ## Additional Redis(TM) configuration for the sentinel nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Enable or disable static sentinel IDs for each replicas + ## If disabled each sentinel will generate a random id at startup + ## If enabled, each replicas will have a constant ID on each start-up + ## + staticID: false + ## Configure extra options for Redis(TM) Sentinel liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + customLivenessProbe: {} + customReadinessProbe: {} + ## Redis(TM) Sentinel resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Redis(TM) Sentinel Service properties + ## + service: + ## Redis(TM) Sentinel Service type + ## + type: ClusterIP + sentinelPort: 26379 + redisPort: 6379 + + ## External traffic policy (when service type is LoadBalancer) + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # sentinelNodePort: + # redisNodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + + ## Additional commands to run prior to starting Redis(TM) node with sentinel + ## + preExecCmds: "" + + ## An array to add extra env var to the sentinel node configurations + ## For example: + ## extraEnvVars: + ## - name: name + ## value: value + ## - name: other_name + ## valueFrom: + ## fieldRef: + ## fieldPath: fieldPath + ## + extraEnvVars: [] + + ## ConfigMap with extra env vars: + ## + extraEnvVarsCM: [] + + ## Secret with extra env vars: + ## + extraEnvVarsSecret: [] + +## Specifies the Kubernetes Cluster's Domain Name. +## +clusterDomain: cluster.local + +networkPolicy: + ## Specifies whether a NetworkPolicy should be created + ## + enabled: true + #enabled: false + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port Redis(TM) is listening + ## on. When true, Redis(TM) will accept connections from any source + ## (with the correct destination port). + ## + # allowExternal: true + allowExternal: true + + ## Allow connections from other namespaces. Just set label for namespace and set label for pods (optional). + ## + ingressNSMatchLabels: {} + ingressNSPodMatchLabels: {} + +serviceAccount: + ## Specifies whether a ServiceAccount should be created + ## + create: false + ## The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the fullname template + ## + name: + ## Add annotations to service account + # annotations: + # iam.gke.io/gcp-service-account: "sa@project.iam.gserviceaccount.com" + +rbac: + ## Specifies whether RBAC resources should be created + ## + create: false + + role: + ## Rules to create. It follows the role specification + # rules: + # - apiGroups: + # - extensions + # resources: + # - podsecuritypolicies + # verbs: + # - use + # resourceNames: + # - gce.unprivileged + rules: [] + +## Redis(TM) pod Security Context +## +securityContext: + enabled: true + fsGroup: 1001 + ## sysctl settings for master and slave pods + ## + ## Uncomment the setting below to increase the net.core.somaxconn value + ## + # sysctls: + # - name: net.core.somaxconn + # value: "10000" + +## Container Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +containerSecurityContext: + enabled: true + runAsUser: 1001 + +## Use password authentication +## +usePassword: true +## Redis(TM) password (both master and slave) +## Defaults to a random 10-character alphanumeric string if not set and usePassword is true +## ref: https://github.com/bitnami/bitnami-docker-redis#setting-the-server-password-on-first-run +## +password: "dkagh1234!" +## Use existing secret (ignores previous password) +# existingSecret: +## Password key to be retrieved from Redis(TM) secret +## +# existingSecretPasswordKey: + +## Mount secrets as files instead of environment variables +## +usePasswordFile: false + +## Persist data to a persistent volume (Redis Master) +## +persistence: + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + ## + existingClaim: + +# Redis(TM) port +redisPort: 6379 + +## +## TLS configuration +## +tls: + # Enable TLS traffic + enabled: false + # + # Whether to require clients to authenticate or not. + authClients: true + # + # Name of the Secret that contains the certificates + certificatesSecret: + # + # Certificate filename + certFilename: + # + # Certificate Key filename + certKeyFilename: + # + # CA Certificate filename + certCAFilename: + # + # File containing DH params (in order to support DH based ciphers) + # dhParamsFilename: + +## +## Redis(TM) Master parameters +## +master: + ## Redis(TM) command arguments + ## + ## Can be used to specify command line arguments, for example: + ## Note `exec` is prepended to command + ## + command: "/run.sh" + ## Additional commands to run prior to starting Redis(TM) + ## + preExecCmds: "" + ## Additional Redis(TM) configuration for the master nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Deployment pod host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## Redis(TM) additional command line flags + ## + ## Can be used to specify command line flags, for example: + ## extraFlags: + ## - "--maxmemory-policy volatile-ttl" + ## - "--repl-backlog-size 1024mb" + ## + extraFlags: [] + ## Comma-separated list of Redis(TM) commands to disable + ## + ## Can be used to disable Redis(TM) commands for security reasons. + ## Commands will be completely disabled by renaming each to an empty string. + ## ref: https://redis.io/topics/security#disabling-of-specific-commands + ## + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis(TM) Master additional pod labels and annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + podAnnotations: {} + + ## Redis(TM) Master resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + # Enable shared process namespace in a pod. + # If set to false (default), each container will run in separate namespace, redis will have PID=1. + # If set to true, the /pause will run as init process and will reap any zombie PIDs, + # for example, generated by a custom exec probe running longer than a probe timeoutSeconds. + # Enable this only if customLivenessProbe or customReadinessProbe is used and zombie PIDs are accumulating. + # Ref: https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/ + shareProcessNamespace: false + ## Configure extra options for Redis(TM) Master liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + + ## Configure custom probes for images other images like + ## rhscl/redis-32-rhel7 rhscl/redis-5-rhel7 + ## Only used if readinessProbe.enabled: false / livenessProbe.enabled: false + ## + # customLivenessProbe: + # tcpSocket: + # port: 6379 + # initialDelaySeconds: 10 + # periodSeconds: 5 + # customReadinessProbe: + # initialDelaySeconds: 30 + # periodSeconds: 10 + # timeoutSeconds: 5 + # exec: + # command: + # - "container-entrypoint" + # - "bash" + # - "-c" + # - "redis-cli set liveness-probe \"`date`\" | grep OK" + customLivenessProbe: {} + customReadinessProbe: {} + + ## Redis(TM) Master Node selectors and tolerations for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + ## Redis(TM) Master pod/node affinity/anti-affinity + ## + affinity: {} + + ## Redis(TM) Master Service properties + ## + service: + ## Redis(TM) Master Service type + ## + type: ClusterIP + # type: NodePort + port: 6379 + + ## External traffic policy (when service type is LoadBalancer) + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: 31379 + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + # loadBalancerSourceRanges: ["10.0.0.0/8"] + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis(TM) images. + ## + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + ## + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + ## Persistent Volume selectors + ## https://kubernetes.io/docs/concepts/storage/persistent-volumes/#selector + ## + matchLabels: {} + matchExpressions: {} + volumes: + # - name: volume_name + # emptyDir: {} + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + ## + statefulset: + labels: {} + annotations: {} + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + volumeClaimTemplates: + labels: {} + annotations: {} + + ## Redis(TM) Master pod priorityClassName + ## + priorityClassName: '' + + ## An array to add extra env vars + ## For example: + ## extraEnvVars: + ## - name: name + ## value: value + ## - name: other_name + ## valueFrom: + ## fieldRef: + ## fieldPath: fieldPath + ## + extraEnvVars: [] + + ## ConfigMap with extra env vars: + ## + extraEnvVarsCM: [] + + ## Secret with extra env vars: + ## + extraEnvVarsSecret: [] + +## +## Redis(TM) Slave properties +## Note: service.type is a mandatory parameter +## The rest of the parameters are either optional or, if undefined, will inherit those declared in Redis(TM) Master +## +slave: + ## Slave Service properties + ## + service: + ## Redis(TM) Slave Service type + ## + type: ClusterIP + #type: NodePort + ## Redis(TM) port + ## + port: 6379 + + ## External traffic policy (when service type is LoadBalancer) + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: 31380 + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + # loadBalancerSourceRanges: ["10.0.0.0/8"] + + ## Redis(TM) slave port + ## + port: 6379 + ## Deployment pod host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## Can be used to specify command line arguments, for example: + ## Note `exec` is prepended to command + ## + command: "/run.sh" + ## Additional commands to run prior to starting Redis(TM) + ## + preExecCmds: "" + ## Additional Redis(TM) configuration for the slave nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Redis(TM) extra flags + ## + extraFlags: [] + ## List of Redis(TM) commands to disable + ## + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis(TM) Slave pod/node affinity/anti-affinity + ## + affinity: {} + + ## Kubernetes Spread Constraints for pod assignment + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + ## + # - maxSkew: 1 + # topologyKey: node + # whenUnsatisfiable: DoNotSchedule + spreadConstraints: {} + + # Enable shared process namespace in a pod. + # If set to false (default), each container will run in separate namespace, redis will have PID=1. + # If set to true, the /pause will run as init process and will reap any zombie PIDs, + # for example, generated by a custom exec probe running longer than a probe timeoutSeconds. + # Enable this only if customLivenessProbe or customReadinessProbe is used and zombie PIDs are accumulating. + # Ref: https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/ + shareProcessNamespace: false + ## Configure extra options for Redis(TM) Slave liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 10 + successThreshold: 1 + failureThreshold: 5 + + ## Configure custom probes for images other images like + ## rhscl/redis-32-rhel7 rhscl/redis-5-rhel7 + ## Only used if readinessProbe.enabled: false / livenessProbe.enabled: false + ## + # customLivenessProbe: + # tcpSocket: + # port: 6379 + # initialDelaySeconds: 10 + # periodSeconds: 5 + # customReadinessProbe: + # initialDelaySeconds: 30 + # periodSeconds: 10 + # timeoutSeconds: 5 + # exec: + # command: + # - "container-entrypoint" + # - "bash" + # - "-c" + # - "redis-cli set liveness-probe \"`date`\" | grep OK" + customLivenessProbe: {} + customReadinessProbe: {} + + ## Redis(TM) slave Resource + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + + ## Redis(TM) slave selectors and tolerations for pod assignment + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Redis(TM) slave pod Annotation and Labels + ## + podLabels: {} + podAnnotations: {} + + ## Redis(TM) slave pod priorityClassName + # priorityClassName: '' + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis(TM) images. + ## + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + ## + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + ## Persistent Volume selectors + ## https://kubernetes.io/docs/concepts/storage/persistent-volumes/#selector + ## + matchLabels: {} + matchExpressions: {} + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + ## + statefulset: + labels: {} + annotations: {} + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + volumeClaimTemplates: + labels: {} + annotations: {} + + ## An array to add extra env vars + ## For example: + ## extraEnvVars: + ## - name: name + ## value: value + ## - name: other_name + ## valueFrom: + ## fieldRef: + ## fieldPath: fieldPath + ## + extraEnvVars: [] + + ## ConfigMap with extra env vars: + ## + extraEnvVarsCM: [] + + ## Secret with extra env vars: + ## + extraEnvVarsSecret: [] + +## Prometheus Exporter / Metrics +## +metrics: + enabled: false +# enabled: true + + image: + registry: 10.10.31.243:5000 # registry.cloud.intermax:5000 + repository: redis/redis-exporter + #tag: 1.15.1-debian-10-r2 + tag: latest + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + # resources: {} + + ## Extra arguments for Metrics exporter, for example: + ## extraArgs: + ## check-keys: myKey,myOtherKey + # extraArgs: {} + + ## Metrics exporter pod Annotation and Labels + ## + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9121" + # podLabels: {} + + # Enable this if you're using https://github.com/coreos/prometheus-operator + serviceMonitor: + enabled: false + ## Specify a namespace if needed + # namespace: monitoring + # fallback to the prometheus default unless specified + # interval: 10s + ## Defaults to what's used if you follow CoreOS [Prometheus Install Instructions](https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#tldr) + ## [Prometheus Selector Label](https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#prometheus-operator-1) + ## [Kube Prometheus Selector Label](https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#exporters) + ## + selector: + prometheus: kube-prometheus + + ## RelabelConfigs to apply to samples before scraping + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#relabelconfig + ## Value is evalued as a template + ## + relabelings: [] + + ## MetricRelabelConfigs to apply to samples before ingestion + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#relabelconfig + ## Value is evalued as a template + ## + metricRelabelings: [] + # - sourceLabels: + # - "__name__" + # targetLabel: "__name__" + # action: replace + # regex: '(.*)' + # replacement: 'example_prefix_$1' + + ## Custom PrometheusRule to be defined + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + ## + prometheusRule: + enabled: false + additionalLabels: {} + namespace: "" + ## Redis(TM) prometheus rules + ## These are just examples rules, please adapt them to your needs. + ## Make sure to constraint the rules to the current redis service. + # rules: + # - alert: RedisDown + # expr: redis_up{service="{{ template "redis.fullname" . }}-metrics"} == 0 + # for: 2m + # labels: + # severity: error + # annotations: + # summary: Redis(TM) instance {{ "{{ $labels.instance }}" }} down + # description: Redis(TM) instance {{ "{{ $labels.instance }}" }} is down + # - alert: RedisMemoryHigh + # expr: > + # redis_memory_used_bytes{service="{{ template "redis.fullname" . }}-metrics"} * 100 + # / + # redis_memory_max_bytes{service="{{ template "redis.fullname" . }}-metrics"} + # > 90 + # for: 2m + # labels: + # severity: error + # annotations: + # summary: Redis(TM) instance {{ "{{ $labels.instance }}" }} is using too much memory + # description: | + # Redis(TM) instance {{ "{{ $labels.instance }}" }} is using {{ "{{ $value }}" }}% of its available memory. + # - alert: RedisKeyEviction + # expr: | + # increase(redis_evicted_keys_total{service="{{ template "redis.fullname" . }}-metrics"}[5m]) > 0 + # for: 1s + # labels: + # severity: error + # annotations: + # summary: Redis(TM) instance {{ "{{ $labels.instance }}" }} has evicted keys + # description: | + # Redis(TM) instance {{ "{{ $labels.instance }}" }} has evicted {{ "{{ $value }}" }} keys in the last 5 minutes. + rules: [] + + ## Metrics exporter pod priorityClassName + # priorityClassName: '' + service: + type: ClusterIP + + ## External traffic policy (when service type is LoadBalancer) + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + + ## Use serviceLoadBalancerIP to request a specific static IP, + ## otherwise leave blank + # loadBalancerIP: + annotations: {} + labels: {} + +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: false + image: + registry: 10.10.31.243:5000 # docker.io + repository: minideb # bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m + + ## Init container Security Context + ## Note: the chown of the data folder is done to containerSecurityContext.runAsUser + ## and not the below volumePermissions.securityContext.runAsUser + ## When runAsUser is set to special value "auto", init container will try to chwon the + ## data folder to autodetermined user&group, using commands: `id -u`:`id -G | cut -d" " -f2` + ## "auto" is especially useful for OpenShift which has scc with dynamic userids (and 0 is not allowed). + ## You may want to use this volumePermissions.securityContext.runAsUser="auto" in combination with + ## podSecurityContext.enabled=false,containerSecurityContext.enabled=false + ## + securityContext: + runAsUser: 0 + +## Redis(TM) config file +## ref: https://redis.io/topics/config +## +configmap: |- + # Enable AOF https://redis.io/topics/persistence#append-only-file + appendonly yes + # Disable RDB persistence, AOF persistence already enabled. + save "" + +## Sysctl InitContainer +## used to perform sysctl operation to modify Kernel settings (needed sometimes to avoid warnings) +## +sysctlImage: + enabled: false + command: [] + registry: 10.10.31.243:5000 # docker.io + repository: minideb # bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + mountHostSys: false + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m + +## PodSecurityPolicy configuration +## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +## +podSecurityPolicy: + ## Specifies whether a PodSecurityPolicy should be created + ## + create: false + +## Define a disruption budget +## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ +## +podDisruptionBudget: + enabled: false + minAvailable: 1 + # maxUnavailable: 1 diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/zookeeper/.helmignore b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/zookeeper/.helmignore new file mode 100644 index 0000000..50af031 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/zookeeper/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/zookeeper/Chart.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/zookeeper/Chart.yaml new file mode 100644 index 0000000..c9a2bfb --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/zookeeper/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for Kubernetes +name: zookeeper +version: 0.1.0 diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/zookeeper/templates/0.config.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/zookeeper/templates/0.config.yaml new file mode 100644 index 0000000..3b23a9e --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/zookeeper/templates/0.config.yaml @@ -0,0 +1,35 @@ +kind: ConfigMap +metadata: + name: zookeeper-config + namespace: imxc +apiVersion: v1 +data: + init.sh: |- + #!/bin/bash + set -e + set -x + [ -d /var/lib/zookeeper/data ] || mkdir /var/lib/zookeeper/data + [ -z "$ID_OFFSET" ] && ID_OFFSET=1 + export ZOOKEEPER_SERVER_ID=$((${HOSTNAME##*-} + $ID_OFFSET)) + echo "${ZOOKEEPER_SERVER_ID:-1}" | tee /var/lib/zookeeper/data/myid + cp -Lur /etc/kafka-configmap/* /etc/kafka/ + sed -i "s/server\.$ZOOKEEPER_SERVER_ID\=[a-z0-9.-]*/server.$ZOOKEEPER_SERVER_ID=0.0.0.0/" /etc/kafka/zookeeper.properties + zookeeper.properties: |- + tickTime=2000 + dataDir=/var/lib/zookeeper/data + dataLogDir=/var/lib/zookeeper/log + clientPort=2181 + maxClientCnxns=1 + initLimit=5 + syncLimit=2 + server.1=zookeeper-0.zookeeper-headless.imxc.svc.cluster.local:2888:3888:participant + server.2=zookeeper-1.zookeeper-headless.imxc.svc.cluster.local:2888:3888:participant + server.3=zookeeper-2.zookeeper-headless.imxc.svc.cluster.local:2888:3888:participant + log4j.properties: |- + log4j.rootLogger=INFO, stdout + log4j.appender.stdout=org.apache.log4j.ConsoleAppender + log4j.appender.stdout.layout=org.apache.log4j.PatternLayout + log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n + # Suppress connection log messages, three lines per livenessProbe execution + log4j.logger.org.apache.zookeeper.server.NIOServerCnxnFactory=WARN + log4j.logger.org.apache.zookeeper.server.NIOServerCnxn=WARN diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/zookeeper/templates/1.service-leader-election.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/zookeeper/templates/1.service-leader-election.yaml new file mode 100644 index 0000000..422433a --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/zookeeper/templates/1.service-leader-election.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Service +metadata: + name: zookeeper-headless + namespace: imxc +spec: + ports: + - port: 2888 + name: peer + - port: 3888 + name: leader-election + clusterIP: None + selector: + app: zookeeper + storage: persistent + diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/zookeeper/templates/2.service-client.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/zookeeper/templates/2.service-client.yaml new file mode 100644 index 0000000..9fdcf95 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/zookeeper/templates/2.service-client.yaml @@ -0,0 +1,12 @@ +# the headless service is for PetSet DNS, this one is for clients +apiVersion: v1 +kind: Service +metadata: + name: zookeeper + namespace: imxc +spec: + ports: + - port: 2181 + name: client + selector: + app: zookeeper diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/zookeeper/templates/3.persistent-volume.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/zookeeper/templates/3.persistent-volume.yaml new file mode 100644 index 0000000..2a909f7 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/zookeeper/templates/3.persistent-volume.yaml @@ -0,0 +1,74 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: zookeeper-cluster-1 + labels: + type: local + app: zookeeper +spec: + capacity: + storage: 30Gi + accessModes: + - ReadWriteOnce + hostPath: + path: {{ .Values.global.IMXC_ZOOKEEPER_PATH1 }} + persistentVolumeReclaimPolicy: Retain + storageClassName: zookeeper-storage + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .Values.global.affinity_key }} + operator: In + values: + - {{ .Values.global.affinity_value1 }} +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: zookeeper-cluster-2 + labels: + type: local + app: zookeeper +spec: + capacity: + storage: 30Gi + accessModes: + - ReadWriteOnce + hostPath: + path: {{ .Values.global.IMXC_ZOOKEEPER_PATH2 }} + persistentVolumeReclaimPolicy: Retain + storageClassName: zookeeper-storage + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .Values.global.affinity_key }} + operator: In + values: + - {{ .Values.global.affinity_value2 }} +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: zookeeper-cluster-3 + labels: + type: local + app: zookeeper +spec: + capacity: + storage: 30Gi + accessModes: + - ReadWriteOnce + hostPath: + path: {{ .Values.global.IMXC_ZOOKEEPER_PATH3 }} + persistentVolumeReclaimPolicy: Retain + storageClassName: zookeeper-storage + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .Values.global.affinity_key }} + operator: In + values: + - {{ .Values.global.affinity_value3 }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/zookeeper/templates/4.statefulset.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/zookeeper/templates/4.statefulset.yaml new file mode 100644 index 0000000..a9e5cb8 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/zookeeper/templates/4.statefulset.yaml @@ -0,0 +1,87 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: zookeeper + namespace: imxc +spec: + selector: + matchLabels: + app: zookeeper + storage: persistent + serviceName: "zookeeper-headless" + replicas: 3 + updateStrategy: + type: RollingUpdate + podManagementPolicy: Parallel + template: + metadata: + labels: + app: zookeeper + storage: persistent + annotations: + spec: + terminationGracePeriodSeconds: 10 + initContainers: + - name: init-config + image: {{ .Values.global.IMXC_IN_REGISTRY }}/kafka-initutils:{{ .Values.global.KAFKA_INITUTILS_VERSION }} + command: ['/bin/bash', '/etc/kafka-configmap/init.sh'] + volumeMounts: + - name: configmap + mountPath: /etc/kafka-configmap + - name: config + mountPath: /etc/kafka + - name: data + mountPath: /var/lib/zookeeper + containers: + - name: zookeeper + image: {{ .Values.global.IMXC_IN_REGISTRY }}/kafka:{{ .Values.global.KAFKA_VERSION }} + resources: + requests: + cpu: 100m + memory: 200Mi + limits: + cpu: 200m + memory: 500Mi + env: + - name: KAFKA_LOG4J_OPTS + value: -Dlog4j.configuration=file:/etc/kafka/log4j.properties + command: + - ./bin/zookeeper-server-start.sh + - /etc/kafka/zookeeper.properties + lifecycle: + preStop: + exec: + command: ["sh", "-ce", "kill -s TERM 1; while $(kill -0 1 2>/dev/null); do sleep 1; done"] + ports: + - containerPort: 2181 + name: client + - containerPort: 2888 + name: peer + - containerPort: 3888 + name: leader-election +# readinessProbe: +# exec: +# command: +# - /bin/sh +# - -c +# - '[ "imok" = "$(echo ruok | nc -w 1 -q 1 127.0.0.1 2181)" ]' + volumeMounts: + - name: config + mountPath: /etc/kafka + - name: data + mountPath: /var/lib/zookeeper + volumes: + - name: configmap + configMap: + name: zookeeper-config + - name: config + emptyDir: {} + volumeClaimTemplates: + - metadata: + name: data + spec: + accessModes: [ "ReadWriteOnce" ] + storageClassName: zookeeper-storage + resources: + requests: + storage: 30Gi diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/zookeeper/templates/5.pvc.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/zookeeper/templates/5.pvc.yaml new file mode 100644 index 0000000..e08ed54 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/zookeeper/templates/5.pvc.yaml @@ -0,0 +1,50 @@ +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + namespace: imxc + name: data-zookeeper-0 +spec: + accessModes: + - ReadWriteOnce + volumeMode: Filesystem + resources: + requests: + storage: 30Gi + storageClassName: zookeeper-storage + selector: + matchLabels: + app: zookeeper +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + namespace: imxc + name: data-zookeeper-1 +spec: + accessModes: + - ReadWriteOnce + volumeMode: Filesystem + resources: + requests: + storage: 30Gi + storageClassName: zookeeper-storage + selector: + matchLabels: + app: zookeeper +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + namespace: imxc + name: data-zookeeper-2 +spec: + accessModes: + - ReadWriteOnce + volumeMode: Filesystem + resources: + requests: + storage: 30Gi + storageClassName: zookeeper-storage + selector: + matchLabels: + app: zookeeper \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/zookeeper/values.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/zookeeper/values.yaml new file mode 100644 index 0000000..7b06985 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/charts/zookeeper/values.yaml @@ -0,0 +1,68 @@ +# Default values for zookeeper. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: 10.10.31.243:5000/cmoa3/nginx + tag: stable + pullPolicy: IfNotPresent + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 80 + +ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: [] + + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +nodeSelector: {} + +tolerations: [] + +affinity: {} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/index.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/index.yaml new file mode 100644 index 0000000..62a41a3 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/index.yaml @@ -0,0 +1,3 @@ +apiVersion: v1 +entries: {} +generated: "2019-11-05T09:47:03.285264152+09:00" diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/templates/role.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/templates/role.yaml new file mode 100644 index 0000000..28f0e32 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/templates/role.yaml @@ -0,0 +1,16 @@ +kind: ClusterRoleBinding +{{- if semverCompare ">=1.17-0" .Capabilities.KubeVersion.GitVersion }} +apiVersion: rbac.authorization.k8s.io/v1 +{{- else }} +apiVersion: rbac.authorization.k8s.io/v1beta1 +{{- end }} +metadata: + name: imxc-cluster-admin-clusterrolebinding +subjects: +- kind: ServiceAccount + name: default + namespace: imxc +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin diff --git a/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/values.yaml b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/values.yaml new file mode 100644 index 0000000..b7c22ff --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/02-base/base/values.yaml @@ -0,0 +1,73 @@ +global: + # cluster variables + CLUSTER_ID: cloudmoa + + # default storageClass + DEFAULT_STORAGE_CLASS: exem-local-storage + + # nodeAffinity + affinity_key: cmoa + affinity_value1: worker1 + affinity_value2: worker2 + affinity_value3: worker2 + + # postgres variables + IMXC_POSTGRES_PV_PATH: /media/data/postgres/postgres-data-0 + + #elastic variables + ELASTICSEARCH_PATH1: /media/data/elasticsearch/elasticsearch-data-0 + ELASTICSEARCH_PATH2: /media/data/elasticsearch/elasticsearch-data-1 + + CMOA_ES_ID: elastic + CMOA_ES_PW: elastic + + # zookeeper variables + IMXC_ZOOKEEPER_PATH1: /media/data/zookeeper/zookeeper-data-0 + IMXC_ZOOKEEPER_PATH2: /media/data/zookeeper/zookeeper-data-1 + IMXC_ZOOKEEPER_PATH3: /media/data/zookeeper/zookeeper-data-2 + + # kafka variables + IMXC_KAFKA_PV_PATH1: /media/data/kafka/kafka-data-0 + IMXC_KAFKA_PV_PATH2: /media/data/kafka/kafka-data-1 + IMXC_KAFKA_PV_PATH3: /media/data/kafka/kafka-data-2 + KAFKA_BROKER_CONFIG: "{{index .metadata.labels \"failure-domain.beta.kubernetes.io/zone\"}}" + + # cortex variables + IMXC_INGESTER_PV_PATH1: /media/cloudmoa/ingester/ingester-data-1 + IMXC_INGESTER_PV_PATH2: /media/cloudmoa/ingester/ingester-data-2 + IMXC_INGESTER_PV_PATH3: /media/cloudmoa/ingester/ingester-data-3 + + # redis variables + IMXC_REDIS_PV_PATH1: /media/data/redis/redis-data-0 + IMXC_REDIS_PV_PATH2: /media/data/redis/redis-data-1 + IMXC_REDIS_PV_PATH3: /media/data/redis/redis-data-2 + + # rabbitmq variables + RABBITMQ_PATH: /media/data/rabbitmq + + # custom or etc variables + # IMXC_WORKER_NODE_NAME: $IMXC_WORKER_NODE_NAME # deprecated 2021.10.21 + # IMXC_MASTER_IP: 10.10.30.202 + IMXC_API_SERVER_DNS: imxc-api-service + + METRIC_ANALYZER_MASTER_VERSION: rel3.4.8 + METRIC_ANALYZER_WORKER_VERSION: rel3.4.8 + ELASTICSEARCH_VERSION: v1.0.0 + KAFKA_MANAGER_VERSION: v1.0.0 + KAFKA_INITUTILS_VERSION: v1.0.0 + #KAFKA_VERSION: v1.0.0 + KAFKA_VERSION: v1.0.1 + METRICS_SERVER_VERSION: v1.0.0 + POSTGRES_VERSION: v1.0.0 + CASSANDRA_VERSION: v1.0.0 + RABBITMQ_VERSION: v1.0.0 + CORTEX_VERSION: v1.11.0 #v1.9.0 + #CONSUL_VERSION: 0.7.1 + + # 레지스트리 변수화 (Public Cloud 대비 / 아래 값 적절히 수정해서 사용할 것) + IMXC_IN_REGISTRY: 10.10.31.243:5000/cmoa3 + + rabbitmq: + image: + registry: 10.10.31.243:5000/cmoa3 # {{ .Values.global.IMXC_REGISTRY }} + tag: v1.0.0 # {{ .Values.global.RABBITMQ_VERSION }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/es-ddl-put.sh b/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/es-ddl-put.sh new file mode 100755 index 0000000..b3a27ed --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/es-ddl-put.sh @@ -0,0 +1,3085 @@ +#!/bin/sh + +#!/bin/bash + +namespace=$1 +export ES_NODEPORT=`kubectl -n ${namespace} get svc elasticsearch -o jsonpath='{.spec.ports[*].nodePort}'` + +export MASTER_IP=`kubectl get node -o wide | grep control-plane | awk '{print $6}'` + +export NUM_SHARDS=2 +export NUM_REPLICAS=1 + +SECURE=true + +if [ $SECURE = true ] +then +PARAM="-u elastic:elastic --insecure" +PROTO="https" +else +PARAM="" +PROTO="http" +fi + +echo Secure=$SECURE +echo Param=$PARAM +echo Proto=$PROTO + +curl ${PARAM} -X GET ${PROTO}://${MASTER_IP}:${ES_NODEPORT}/_cat/indices + +echo "curl ${PARAM} -X GET ${PROTO}://${MASTER_IP}:${ES_NODEPORT}/_cat/indices" + +# kubernetes_cluster_info +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/kubernetes_cluster_info' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "kubernetes_cluster_info" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "sort.field": "mtime", + "sort.order": "desc" + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "date": { + "type": "long" + }, + "mtime": { + "type": "long" + }, + "nodes": { + "type": "text", + "index": false + } + } + } +}' + +# kubernetes_cluster_history +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/kubernetes_cluster_history' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "1d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/kubernetes_cluster_history' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "kubernetes_cluster_history-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "kubernetes_cluster_history" + }, + "sort.field": "mtime", + "sort.order": "desc" + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "mtime": { + "type": "long" + }, + "nodes": { + "type": "text", + "index": false + } + } + }, + "aliases": { + "kubernetes_cluster_history": {} + } +}' + +# kubernetes_info +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/kubernetes_info' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "1d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/kubernetes_info' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "kubernetes_info-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "kubernetes_info" + }, + "sort.field": "mtime", + "sort.order": "desc" + } + }, + "mappings": { + "properties": { + "id": { + "type": "keyword" + }, + "mtime": { + "type": "long" + }, + "data": { + "type": "text", + "index": false + } + } + }, + "aliases": { + "kubernetes_info": {} + } +}' + + + +# kubernetes_event_info +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/kubernetes_event_info' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/kubernetes_event_info' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "kubernetes_event_info-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "kubernetes_event_info" + } + }, + "analysis": { + "analyzer": { + "my_customer_ngram_analyzer": { + "tokenizer": "my_customer_ngram_tokenizer" + } + }, + "tokenizer": { + "my_customer_ngram_tokenizer": { + "type": "ngram", + "min_gram": "2", + "max_gram": "3" + } + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "type": { + "type": "keyword" + }, + "unixtime": { + "type": "long" + }, + "kind": { + "type": "keyword" + }, + "name": { + "type": "keyword" + }, + "firsttime": { + "type": "long" + }, + "lasttime": { + "type": "long" + }, + "data": { + "type": "text", + "index": false + }, + "id": { + "type": "keyword" + }, + "reason": { + "type": "keyword" + }, + "message": { + "type": "text", + "fields": { + "ngram": { + "type": "text", + "analyzer": "my_customer_ngram_analyzer" + } + } + }, + "count": { + "type": "integer" + }, + "sourceComponent": { + "type": "keyword" + }, + "sourceHost": { + "type": "keyword" + } + } + }, + "aliases": { + "kubernetes_event_info": {} + } +}' + + + + +# kubernetes_job_info +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/kubernetes_job_info' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/kubernetes_job_info' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "kubernetes_job_info-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "kubernetes_job_info" + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "name": { + "type": "keyword" + }, + "starttime": { + "type": "long" + }, + "endtime": { + "type": "long" + }, + "duration": { + "type": "long" + }, + "commandlist": { + "type": "text", + "index": false + }, + "labellist": { + "type": "text", + "index": false + }, + "active": { + "type": "boolean" + }, + "status": { + "type": "keyword" + } + } + }, + "aliases": { + "kubernetes_job_info": {} + } +}' + + + +# kubernetes_cronjob_info +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/kubernetes_cronjob_info' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/kubernetes_cronjob_info' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "kubernetes_cronjob_info-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "kubernetes_cronjob_info" + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "name": { + "type": "keyword" + }, + "jobname": { + "type": "keyword" + }, + "kind": { + "type": "keyword" + }, + "starttime": { + "type": "long" + }, + "endtime": { + "type": "long" + }, + "duration": { + "type": "long" + }, + "lastruntime": { + "type": "long" + }, + "arguments": { + "type": "text", + "index": false + }, + "schedule": { + "type": "keyword" + }, + "active": { + "type": "boolean" + }, + "status": { + "type": "keyword" + } + } + }, + "aliases": { + "kubernetes_cronjob_info": {} + } +}' + + + + +# kubernetes_network_connectivity +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/kubernetes_network_connectivity' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "1d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/kubernetes_network_connectivity' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "kubernetes_network_connectivity-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "kubernetes_network_connectivity" + } + } + }, + "mappings": { + "properties": { + "timestamp": { + "type": "long" + }, + "cluster": { + "type": "keyword" + }, + "node": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "service": { + "type": "keyword" + }, + "pod": { + "type": "keyword" + }, + "container": { + "type": "keyword" + }, + "pid": { + "type": "integer" + }, + "peerNode": { + "type": "keyword" + }, + "peerNamespace": { + "type": "keyword" + }, + "peerService": { + "type": "keyword" + }, + "peerPod": { + "type": "keyword" + }, + "peerContainer": { + "type": "keyword" + }, + "peerPid": { + "type": "integer" + } + } + }, + "aliases": { + "kubernetes_network_connectivity": {} + } +}' + + + +# sparse_log +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/sparse_log' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/sparse_log' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "sparse_log-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "sparse_log" + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "date": { + "type": "keyword" + }, + "targetType": { + "type": "keyword" + }, + "targetId": { + "type": "keyword" + }, + "unixtime": { + "type": "long" + }, + "logpath": { + "type": "text", + "index": false + }, + "contents": { + "type": "text" + }, + "lineNumber": { + "type": "integer" + }, + "probability": { + "type": "float" + }, + "subentityId": { + "type": "keyword" + } + } + }, + "aliases": { + "sparse_log": {} + } +}' + + + +# sparse_model +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/sparse_model' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "sparse_model" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s" + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "targetType": { + "type": "keyword" + }, + "targetId": { + "type": "keyword" + }, + "modifiedDate": { + "type": "long" + }, + "logPath": { + "type": "keyword" + }, + "savedModel": { + "type": "text", + "index": false + } + } + } +}' + + + +# kubernetes_pod_info +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/kubernetes_pod_info' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/kubernetes_pod_info' -H 'Content-Type: application/json' -d '{ +"order": 0, + "index_patterns": [ + "kubernetes_pod_info-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "kubernetes_pod_info" + } + } + }, + "mappings": { + "properties": { + "eventType": {"type": "keyword"}, + "cluster": {"type": "keyword"}, + "namespace": {"type": "keyword"}, + "node": {"type": "keyword"}, + "pod": {"type": "keyword"}, + "podUID": {"type": "keyword"}, + "podCreationTimestamp": {"type": "long"}, + "podDeletionTimestamp": {"type": "long"}, + "podDeletionGracePeriod": {"type": "long"}, + "resourceVersion": {"type": "keyword"}, + "ownerKind": {"type": "keyword"}, + "ownerName": {"type": "keyword"}, + "ownerUID": {"type": "keyword"}, + "podPhase": {"type": "keyword"}, + "podIP": {"type": "keyword"}, + "podStartTime": {"type": "long"}, + "podReady": {"type": "boolean"}, + "podContainersReady": {"type": "boolean"}, + "isInitContainer": {"type": "boolean"}, + "containerName": {"type": "keyword"}, + "containerID": {"type": "keyword"}, + "containerImage": {"type": "keyword"}, + "containerImageShort": {"type": "keyword"}, + "containerReady": {"type": "boolean"}, + "containerRestartCount": {"type": "integer"}, + "containerState": {"type": "keyword"}, + "containerStartTime": {"type": "long"}, + "containerMessage": {"type": "keyword"}, + "containerReason": {"type": "keyword"}, + "containerFinishTime": {"type": "long"}, + "containerExitCode": {"type": "integer"}, + "containerLastState": {"type": "keyword"}, + "containerLastStartTime": {"type": "long"}, + "containerLastMessage": {"type": "keyword"}, + "containerLastReason": {"type": "keyword"}, + "containerLastFinishTime": {"type": "long"}, + "containerLastExitCode": {"type": "integer"} + } + }, + "aliases": { + "kubernetes_pod_info": {} + } +}' + + + +# kubernetes_pod_history +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/kubernetes_pod_history' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "1d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/kubernetes_pod_history' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "kubernetes_pod_history-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "kubernetes_pod_history" + } + } + }, + "mappings": { + "properties": { + "deployName": { + "type": "keyword" + }, + "deployType": { + "type": "keyword" + }, + "deployDate": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "nodeId": { + "type": "keyword" + }, + "podId": { + "type": "keyword" + }, + "podPhase": { + "type": "keyword" + }, + "startTime": { + "type": "keyword" + }, + "endTime": { + "type": "keyword" + }, + "exitCode": { + "type": "integer" + }, + "reason": { + "type": "keyword" + }, + "message": { + "type": "text" + }, + "time": { + "type": "long" + }, + "containerId": { + "type": "keyword" + }, + "containerName": { + "type": "keyword" + }, + "containerPhase": { + "type": "keyword" + }, + "eventAction": { + "type": "keyword" + }, + "containerStartTime": { + "type": "keyword" + }, + "containerEndTime": { + "type": "keyword" + }, + "containerImage": { + "type": "keyword" + }, + "containerImageShort": { + "type": "keyword" + } + } + }, + "aliases": { + "kubernetes_pod_history": {} + } +}' + + + + +# metric_score +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/metric_score' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/metric_score' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "metric_score-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "metric_score" + }, + "sort.field": "unixtime", + "sort.order": "desc" + } + }, + "mappings": { + "properties": { + "anomaly": { + "type": "boolean" + }, + "clstId": { + "type": "keyword" + }, + "contName": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "podId": { + "type": "keyword" + }, + "instance": { + "type": "keyword" + }, + "entityId": { + "type": "keyword" + }, + "entityType": { + "type": "keyword" + }, + "metricId": { + "type": "keyword" + }, + "nodeId": { + "type": "keyword" + }, + "score": { + "type": "integer" + }, + "subKey": { + "type": "keyword" + }, + "unixtime": { + "type": "long" + }, + "yhatLowerUpper": { + "type": "text", + "fields": { + "keyword": { + "type": "keyword", + "ignore_above": 256 + } + } + } + } + }, + "aliases": { + "metric_score": {} + } +}' + + + + +# entity_score +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/entity_score' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/entity_score' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "entity_score-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "entity_score" + }, + "sort.field": "unixtime", + "sort.order": "desc" + } + }, + "mappings": { + "properties": { + "clstId": { + "type": "keyword" + }, + "contName": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "podId": { + "type": "keyword" + }, + "entityId": { + "type": "keyword" + }, + "entityType": { + "type": "keyword" + }, + "unixtime": { + "type": "long" + }, + "nodeId": { + "type": "keyword" + }, + "maxId": { + "type": "keyword" + }, + "maxScore": { + "type": "integer" + }, + "entityScore": { + "type": "integer" + } + } + }, + "aliases": { + "entity_score": {} + } +}' + + +# timeline_score +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/timeline_score' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/timeline_score' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "timeline_score-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "timeline_score" + }, + "sort.field": "unixtime", + "sort.order": "desc" + } + }, + "mappings": { + "properties": { + "clstId": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "entityType": { + "type": "keyword" + }, + "criticalCount": { + "type": "integer" + }, + "warningCount": { + "type": "integer" + }, + "attentionCount": { + "type": "integer" + }, + "normalCount": { + "type": "integer" + }, + "unixtime": { + "type": "long" + } + } + }, + "aliases": { + "timeline_score": {} + } +}' + + + +# spaninfo +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/spaninfo' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/spaninfo' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "spaninfo-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": "0", + "refresh_interval": "1s", + "lifecycle": { + "name": "spaninfo" + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "node": { + "type": "keyword" + }, + "service": { + "type": "keyword" + }, + "pod": { + "type": "keyword" + }, + "version": { + "type": "keyword" + }, + "ip": { + "type": "keyword" + }, + "traceId": { + "type": "keyword" + }, + "spanId": { + "type": "keyword" + }, + "parentSpanId": { + "type": "keyword" + }, + "protocolType": { + "type": "keyword" + }, + "startTime": { + "type": "long" + }, + "duration": { + "type": "long" + }, + "endTime": { + "type": "long" + }, + "operation": { + "type": "keyword" + }, + "spanKind": { + "type": "keyword" + }, + "component": { + "type": "keyword" + }, + "error": { + "type": "boolean" + }, + "peerAddress": { + "type": "keyword" + }, + "peerHostname": { + "type": "keyword" + }, + "peerIpv4": { + "type": "keyword" + }, + "peerIpv6": { + "type": "keyword" + }, + "peerPort": { + "type": "integer" + }, + "peerService": { + "type": "keyword" + }, + "samplingPriority": { + "type": "keyword" + }, + "httpStatusCode": { + "type": "integer" + }, + "httpUrl": { + "type": "keyword" + }, + "httpMethod": { + "type": "keyword" + }, + "httpApi": { + "type": "keyword" + }, + "dbInstance": { + "type": "keyword" + }, + "dbStatement": { + "type": "keyword" + }, + "dbType": { + "type": "keyword" + }, + "dbUser": { + "type": "keyword" + }, + "messagebusDestination": { + "type": "keyword" + }, + "logs": { + "dynamic": false, + "type": "nested", + "properties": { + "fields": { + "dynamic": false, + "type": "nested", + "properties": { + "value": { + "ignore_above": 256, + "type": "keyword" + }, + "key": { + "type": "keyword" + } + } + }, + "timestamp": { + "type": "long" + } + } + } + } + }, + "aliases": { + "spaninfo": {} + } +}' + + + +# sta_podinfo +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/sta_podinfo' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/sta_podinfo' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "sta_podinfo-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": "0", + "refresh_interval": "1s", + "lifecycle": { + "name": "sta_podinfo" + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "service": { + "type": "keyword" + }, + "pod": { + "type": "keyword" + }, + "timestamp": { + "type": "long" + }, + "version": { + "type": "keyword" + }, + "components": { + "type": "keyword", + "fields": { + "keyword": { + "type": "keyword" + } + } + } + } + }, + "aliases": { + "sta_podinfo": {} + } +}' + + +# sta_httpapi +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/sta_httpapi' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "1d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/sta_httpapi' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "sta_httpapi-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": "0", + "refresh_interval": "1s", + "lifecycle": { + "name": "sta_httpapi" + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "service": { + "type": "keyword" + }, + "timestamp": { + "type": "long" + }, + "api": { + "type": "keyword" + } + } + }, + "aliases": { + "sta_httpapi": {} + } +}' + + + +# sta_httpsummary +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/sta_httpsummary' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "1d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/sta_httpsummary' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "sta_httpsummary-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": "0", + "refresh_interval": "1s", + "lifecycle": { + "name": "sta_httpsummary" + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "timestamp": { + "type": "long" + }, + "pod": { + "type": "keyword" + }, + "service": { + "type": "keyword" + }, + "api": { + "type": "keyword" + }, + "countTotal": { + "type": "integer" + }, + "errorCountTotal": { + "type": "integer" + }, + "timeTotalMicrosec": { + "type": "integer" + }, + "methods": { + "type": "keyword", + "fields": { + "keyword": { + "type": "keyword" + } + } + }, + "statuses": { + "type": "integer", + "fields": { + "integer": { + "type": "integer" + } + } + } + } + }, + "aliases": { + "sta_httpsummary": {} + } +}' + + + +# sta_relation +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/sta_relation' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "1d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/sta_relation' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "sta_relation-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": "0", + "refresh_interval": "1s", + "lifecycle": { + "name": "sta_relation" + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "timestamp": { + "type": "long" + }, + "parent": { + "type": "keyword" + }, + "children": { + "type": "nested", + "properties": { + "name": { + "type": "keyword" + }, + "count": { + "type": "integer" + } + } + } + } + }, + "aliases": { + "sta_relation": {} + } +}' + + + +# sta_externalrelation +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/sta_externalrelation' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "1d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/sta_externalrelation' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "sta_externalrelation-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": "0", + "refresh_interval": "1s", + "lifecycle": { + "name": "sta_externalrelation" + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "timestamp": { + "type": "long" + }, + "externalNamespace": { + "type": "keyword" + }, + "externalService": { + "type": "keyword" + } + } + }, + "aliases": { + "sta_externalrelation": {} + } +}' + + + +# sta_traceinfo +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/sta_traceinfo' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/sta_traceinfo' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "sta_traceinfo-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": "0", + "refresh_interval": "1s", + "lifecycle": { + "name": "sta_traceinfo" + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "traceId": { + "type": "keyword" + }, + "serviceName": { + "type": "keyword" + }, + "operationName": { + "type": "keyword" + }, + "spanSize": { + "type": "integer" + }, + "relatedServices": { + "type": "keyword", + "fields": { + "keyword": { + "type": "keyword" + } + } + }, + "startTime": { + "type": "long" + }, + "endTime": { + "type": "long" + }, + "duration": { + "type": "long" + }, + "error": { + "type": "boolean" + } + } + }, + "aliases": { + "sta_traceinfo": {} + } +}' + + + +# sta_tracetrend +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/sta_tracetrend' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/sta_tracetrend' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "sta_tracetrend-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": "0", + "refresh_interval": "1s", + "lifecycle": { + "name": "sta_tracetrend" + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "serviceName": { + "type": "keyword" + }, + "endTimeGTE": { + "type": "long" + }, + "endTimeLT": { + "type": "long" + } + }, + "dynamic_templates": [ + { + "totals": { + "match": "total*", + "mapping": {"type": "integer"} + } + }, + { + "errors": { + "match": "error*", + "mapping": {"type": "integer"} + } + } + ] + }, + "aliases": { + "sta_tracetrend": {} + } +}' + +# script_history +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/script_history' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + + + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/script_history' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "script_history-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "script_history" + } + } + }, + "mappings": { + "properties": { + "taskId": { + "type": "long" + }, + "scriptName": { + "type": "keyword" + }, + "agentName": { + "type": "keyword" + }, + "targetFile": { + "type": "keyword" + }, + "args": { + "type": "keyword", + "fields": { + "keyword": { + "type": "keyword" + } + } + }, + "validCmd": { + "type": "keyword" + }, + "validVal": { + "type": "keyword" + }, + "valid": { + "type": "boolean" + }, + "validResult": { + "type": "keyword" + }, + "cronExp": { + "type": "keyword" + }, + "createUser": { + "type": "keyword" + }, + "startTime": { + "type": "long" + }, + "endTime": { + "type": "long" + }, + "error": { + "type": "boolean" + }, + "result": { + "type": "keyword" + }, + "order": { + "type": "keyword" + }, + "mtime": { + "type": "keyword" + } + } + }, + "aliases": { + "script_history": {} + } +}' + + +# kubernetes_audit_log +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/kubernetes_audit_log' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/kubernetes_audit_log' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "kubernetes_audit_log-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": "1", + "refresh_interval": "1s", + "lifecycle": { + "name": "kubernetes_audit_log" + }, + "sort.field": "stageTimestamp", + "sort.order": "desc" + } + }, + "mappings": { + "properties": { + "verb": { + "type": "keyword" + }, + "userName": { + "type": "keyword" + }, + "sourceIps": { + "type": "keyword" + }, + "resource": { + "type": "keyword" + }, + "code": { + "type": "keyword" + }, + "requestReceivedTimestamp": { + "type": "long" + }, + "stageTimestamp": { + "type": "long" + }, + "durationTimestamp": { + "type": "long" + }, + "data": { + "type": "text", + "index": false + } + } + }, + "aliases": { + "kubernetes_audit_log": {} + } +}' + +# license_history +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/license_history' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "90d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/license_history' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "license_history-*" + ], + "settings": { + "index": { + "number_of_shards": "2", + "number_of_replicas": "1", + "refresh_interval": "1s", + "lifecycle": { + "name": "license_history" + }, + "sort.field": "checkTime", + "sort.order": "desc" + } + }, + "mappings": { + "properties": { + "licenseType": { + "type": "integer" + }, + "expireDate": { + "type": "text" + }, + "targetNodesCount": { + "type": "integer" + }, + "realNodesCount": { + "type": "integer" + }, + "targetPodsCount": { + "type": "integer" + }, + "realPodsCount": { + "type": "integer" + }, + "targetSvcsCount": { + "type": "integer" + }, + "realSvcsCount": { + "type": "integer" + }, + "targetCoreCount": { + "type": "integer" + }, + "realCoreCount": { + "type": "integer" + }, + "allowableRange": { + "type": "integer" + }, + "licenseClusterId": { + "type": "keyword" + }, + "tenantId": { + "type": "keyword" + }, + "checkTime": { + "type": "date", + "format": "epoch_millis" + }, + "checkResult": { + "type": "integer" + } + } + }, + "aliases": { + "license_history": {} + } +}' + +# alert_event_history +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/alert_event_history' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/alert_event_history' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "alert_event_history-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "alert_event_history" + } + } + }, + "mappings": { + "properties": { + "alertName": { + "type": "keyword" + }, + "clusterId": { + "type": "keyword" + }, + "data": { + "type": "text", + "index": false + }, + "entityId": { + "type": "keyword" + }, + "entityType": { + "type": "keyword" + }, + "level": { + "type": "keyword" + }, + "metaId": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "startsAt": { + "type": "long" + }, + "threshold": { + "type": "double" + }, + "value": { + "type": "double" + }, + "message": { + "type": "keyword" + }, + "endsAt": { + "type": "long" + }, + "status": { + "type": "keyword" + }, + "hookCollectAt": { + "type": "long" + } + } + }, + "aliases": { + "alert_event_history": {} + } +}' + +# JSPD ilm +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/jspd_ilm' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "1d", + "actions": { + "delete": {} + } + } + } + } +}' + +# jspd_lite-activetxn +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/jspd_lite-activetxn' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "jspd_lite-activetxn-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "jspd_ilm" + } + } + }, + "mappings": { + "properties": { + "server_uuid": { + "type": "keyword" + }, + "time": { + "type": "long" + }, + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "node": { + "type": "keyword" + }, + "service": { + "type": "keyword" + }, + "pod": { + "type": "keyword" + }, + "start_time": { + "type": "long" + }, + "tid": { + "type": "keyword" + }, + "txn_name": { + "type": "text", + "fields": { + "keyword": { + "ignore_above": 256, + "type": "keyword" + } + } + }, + "cpu_time": { + "type": "integer" + }, + "memory_usage": { + "type": "integer" + }, + "web_id": { + "type": "integer" + }, + "prepare_count": { + "type": "integer" + }, + "sql_exec_count": { + "type": "integer" + }, + "fetch_count": { + "type": "integer" + }, + "active_sql_elapse_time": { + "type": "integer" + }, + "db_id": { + "type": "integer" + }, + "sql_text": { + "type": "text", + "fields": { + "keyword": { + "ignore_above": 102400, + "type": "keyword" + } + } + }, + "thread_id": { + "type": "long" + }, + "state": { + "type": "short" + }, + "method_id": { + "type": "integer" + }, + "method_seq": { + "type": "integer" + }, + "stack_crc": { + "type": "integer" + }, + "thread_memory_usage": { + "type": "integer" + }, + "http_method": { + "type": "keyword" + } + } + }, + "aliases": { + "jspd_lite-activetxn": {} + } +}' + +# jspd_lite-alert +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/jspd_lite-alert' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "jspd_lite-alert-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "jspd_ilm" + } + } + }, + "mappings": { + "properties": { + "server_uuid": { + "type": "keyword" + }, + "time": { + "type": "long" + }, + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "node": { + "type": "keyword" + }, + "service": { + "type": "keyword" + }, + "pod": { + "type": "keyword" + }, + "name": { + "type": "keyword" + }, + "status": { + "type": "short" + }, + "value": { + "type": "integer" + }, + "pid": { + "type": "integer" + } + } + }, + "aliases": { + "jspd_lite-alert": {} + } +}' + +# jspd_lite-e2einfo +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/jspd_lite-e2einfo' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "jspd_lite-e2einfo-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "jspd_ilm" + } + } + }, + "mappings": { + "properties": { + "server_uuid": { + "type": "keyword" + }, + "time": { + "type": "long" + }, + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "node": { + "type": "keyword" + }, + "service": { + "type": "keyword" + }, + "pod": { + "type": "keyword" + }, + "root_tid": { + "type": "keyword" + }, + "tid": { + "type": "keyword" + }, + "e2e_info_type": { + "type": "short" + }, + "e2e_key": { + "type": "keyword" + }, + "elapse_time": { + "type": "integer" + }, + "dest_url": { + "type": "keyword" + } + } + }, + "aliases": { + "jspd_lite-e2einfo": {} + } +}' + +# jspd_lite-methodname +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/jspd_lite-methodname' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "jspd_lite-methodname-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "jspd_ilm" + } + } + }, + "mappings": { + "properties": { + "server_uuid": { + "type": "keyword" + }, + "time": { + "type": "long" + }, + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "node": { + "type": "keyword" + }, + "service": { + "type": "keyword" + }, + "pod": { + "type": "keyword" + }, + "method_id": { + "type": "integer" + }, + "class_name": { + "type": "text", + "fields": { + "keyword": { + "ignore_above": 256, + "type": "keyword" + } + } + }, + "method_name": { + "type": "text", + "fields": { + "keyword": { + "ignore_above": 256, + "type": "keyword" + } + } + } + } + }, + "aliases": { + "jspd_lite-methodname": {} + } +}' + +# jspd_lite-sqldbinfo +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/jspd_lite-sqldbinfo' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "jspd_lite-sqldbinfo-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "jspd_ilm" + } + } + }, + "mappings": { + "properties": { + "server_uuid": { + "type": "keyword" + }, + "time": { + "type": "long" + }, + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "node": { + "type": "keyword" + }, + "service": { + "type": "keyword" + }, + "pod": { + "type": "keyword" + }, + "db_id": { + "type": "integer" + }, + "url": { + "type": "keyword" + } + } + }, + "aliases": { + "jspd_lite-sqldbinfo": {} + } +}' + +# jspd_lite-txninfo +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/jspd_lite-txninfo' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "jspd_lite-txninfo-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "jspd_ilm" + } + } + }, + "mappings": { + "properties": { + "server_uuid": { + "type": "keyword" + }, + "time": { + "type": "long" + }, + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "node": { + "type": "keyword" + }, + "service": { + "type": "keyword" + }, + "pod": { + "type": "keyword" + }, + "start_time": { + "type": "long" + }, + "end_time": { + "type": "long" + }, + "tid": { + "type": "keyword" + }, + "txn_name": { + "type": "keyword" + }, + "client_ip": { + "type": "keyword" + }, + "exception": { + "type": "short" + }, + "thread_cpu_time": { + "type": "integer" + }, + "thread_memory_usage": { + "type": "integer" + }, + "web_id": { + "type": "integer" + }, + "open_conn": { + "type": "integer" + }, + "close_conn": { + "type": "integer" + }, + "open_stmt": { + "type": "integer" + }, + "close_stmt": { + "type": "integer" + }, + "open_rs": { + "type": "integer" + }, + "close_rs": { + "type": "integer" + }, + "prepare_count": { + "type": "integer" + }, + "sql_execute_count": { + "type": "integer" + }, + "sql_elapse_time": { + "type": "integer" + }, + "sql_elapse_max": { + "type": "integer" + }, + "fetch_count": { + "type": "integer" + }, + "fetch_time": { + "type": "integer" + }, + "internal_fetch_count": { + "type": "integer" + }, + "txn_flag": { + "type": "integer" + }, + "http_method": { + "type": "keyword" + }, + "http_status": { + "type": "integer" + }, + "duration": { + "type": "long" + } + } + }, + "aliases": { + "jspd_lite-txninfo": {} + } +}' + +# jspd_lite-txnmethod +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/jspd_lite-txnmethod' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "jspd_lite-txnmethod-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "jspd_ilm" + } + } + }, + "mappings": { + "properties": { + "server_uuid": { + "type": "keyword" + }, + "time": { + "type": "long" + }, + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "node": { + "type": "keyword" + }, + "service": { + "type": "keyword" + }, + "pod": { + "type": "keyword" + }, + "tid": { + "type": "keyword" + }, + "method_seq": { + "type": "integer" + }, + "method_id": { + "type": "integer" + }, + "calling_method_id": { + "type": "integer" + }, + "stack_crc32": { + "type": "integer" + }, + "calling_stack_crc32": { + "type": "integer" + }, + "elapse_time": { + "type": "integer" + }, + "exec_count": { + "type": "integer" + }, + "error_count": { + "type": "integer" + }, + "cpu_time": { + "type": "integer" + }, + "memory": { + "type": "integer" + }, + "start_time": { + "type": "long" + }, + "method_depth": { + "type": "integer" + }, + "exception": { + "type": "text", + "fields": { + "keyword": { + "ignore_above": 32768, + "type": "keyword" + } + } + } + } + }, + "aliases": { + "jspd_lite-txnmethod": {} + } +}' + +# jspd_lite-txnsql +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/jspd_lite-txnsql' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "jspd_lite-txnsql-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "jspd_ilm" + } + } + }, + "mappings": { + "properties": { + "server_uuid": { + "type": "keyword" + }, + "time": { + "type": "long" + }, + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "node": { + "type": "keyword" + }, + "service": { + "type": "keyword" + }, + "pod": { + "type": "keyword" + }, + "tid": { + "type": "keyword" + }, + "db_id": { + "type": "integer" + }, + "cursor_id": { + "type": "integer" + }, + "sql_text": { + "type": "text", + "fields": { + "keyword": { + "ignore_above": 102400, + "type": "keyword" + } + } + }, + "method_id": { + "type": "integer" + }, + "execute_count": { + "type": "integer" + }, + "elapsed_time": { + "type": "integer" + }, + "elapsed_time_max": { + "type": "integer" + }, + "fetch_count": { + "type": "integer" + }, + "fetch_time": { + "type": "integer" + }, + "fetch_time_max": { + "type": "integer" + }, + "internal_fetch_count": { + "type": "integer" + } + } + }, + "aliases": { + "jspd_lite-txnsql": {} + } +}' + +# jspd_lite-wasstat +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/jspd_lite-wasstat' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "jspd_lite-wasstat-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "jspd_ilm" + } + } + }, + "mappings": { + "properties": { + "server_uuid": { + "type": "keyword" + }, + "time": { + "type": "long" + }, + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "node": { + "type": "keyword" + }, + "service": { + "type": "keyword" + }, + "pod": { + "type": "keyword" + }, + "active_txns": { + "type": "integer" + }, + "sql_exec_count": { + "type": "long" + }, + "sql_prepare_count": { + "type": "long" + }, + "sql_fetch_count": { + "type": "long" + }, + "txn_end_count": { + "type": "long" + }, + "open_file_count": { + "type": "integer" + }, + "close_file_count": { + "type": "integer" + }, + "open_socket_count": { + "type": "integer" + }, + "close_socket_count": { + "type": "integer" + }, + "txn_elapse": { + "type": "long" + }, + "sql_elapse": { + "type": "long" + }, + "txn_elapse_max": { + "type": "long" + }, + "sql_elapse_max": { + "type": "long" + }, + "txn_error_count": { + "type": "integer" + } + } + }, + "aliases": { + "jspd_lite-wasstat": {} + } +}' + +# jspd_tta-externalrelation +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/jspd_tta-externalrelation' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "jspd_tta-externalrelation-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "jspd_ilm" + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "time": { + "type": "long" + }, + "external_namespace": { + "type": "keyword" + }, + "external_service": { + "type": "keyword" + } + } + }, + "aliases": { + "jspd_tta-externalrelation": {} + } +}' + +# jspd_tta-relation +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/jspd_tta-relation' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "jspd_tta-relation-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "jspd_ilm" + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "time": { + "type": "long" + }, + "from_service": { + "type": "keyword" + }, + "to_service": { + "type": "keyword" + }, + "count": { + "type": "integer" + } + } + }, + "aliases": { + "jspd_tta-relation": {} + } +}' + +# jspd_tta-txnlist +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/jspd_tta-txnlist' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "jspd_tta-txnlist-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "jspd_ilm" + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "service": { + "type": "keyword" + }, + "pod": { + "type": "keyword" + }, + "time": { + "type": "long" + }, + "txn_name": { + "type": "keyword" + } + } + }, + "aliases": { + "jspd_tta-txnlist": {} + } +}' + +# jspd_tta-txnsummary +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/jspd_tta-txnsummary' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "jspd_tta-txnsummary-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "jspd_ilm" + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "service": { + "type": "keyword" + }, + "pod": { + "type": "keyword" + }, + "time": { + "type": "long" + }, + "txn_name": { + "type": "keyword" + }, + "req_count": { + "type": "integer" + }, + "resp_count": { + "type": "integer" + }, + "total_duration": { + "type": "long" + }, + "failed": { + "type": "integer" + }, + "http_methods": { + "type": "keyword", + "fields": { + "keyword": { + "type": "keyword" + } + } + }, + "http_statuses": { + "type": "integer", + "fields": { + "integer": { + "type": "integer" + } + } + } + } + }, + "aliases": { + "jspd_tta-txnsummary": {} + } +}' + +# jspd_tta-txntrend +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/jspd_tta-txntrend' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "jspd_tta-txntrend-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "jspd_ilm" + } + } + }, + "mappings": { + "properties": { + "server_uuid": { + "type": "keyword" + }, + "time": { + "type": "long" + }, + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "service": { + "type": "keyword" + }, + "pod": { + "type": "keyword" + }, + "endTimeGTE": { + "type": "long" + }, + "endTimeLT": { + "type": "long" + } + }, + "dynamic_templates": [ + { + "totals": { + "match": "total*", + "mapping": { + "type": "integer" + } + } + }, + { + "errors": { + "match": "error*", + "mapping": { + "type": "integer" + } + } + } + ] + }, + "aliases": { + "jspd_tta-txntrend": {} + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/maximum_metrics' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "5d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/maximum_metrics' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "maximum_metrics" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "maximum_metrics" + }, + "sort.field": "date", + "sort.order": "desc" + } + }, + "mappings": { + "properties": { + "kind": { + "type": "keyword" + }, + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "entity": { + "type": "keyword" + }, + "maximum": { + "type": "float" + }, + "date": { + "type": "date", + "format": "yyyy-MM-dd" + } + } + } +}' diff --git a/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/1_kubernete_event_info_create_dest_source_index.sh b/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/1_kubernete_event_info_create_dest_source_index.sh new file mode 100644 index 0000000..46007cd --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/1_kubernete_event_info_create_dest_source_index.sh @@ -0,0 +1,220 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +export NUM_SHARDS=2 +export NUM_REPLICAS=1 + +SOURCE_INDEX='kubernetes_event_info' +DEST_INDEX='kubernetes_event_info_backup' + +# 기존 index 재매핑 +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/'"${SOURCE_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/'"${SOURCE_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "'${SOURCE_INDEX}'-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "'${SOURCE_INDEX}'" + } + }, + "analysis": { + "analyzer": { + "my_customer_ngram_analyzer": { + "tokenizer": "my_customer_ngram_tokenizer" + } + }, + "tokenizer": { + "my_customer_ngram_tokenizer": { + "type": "ngram", + "min_gram": "2", + "max_gram": "3" + } + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "type": { + "type": "keyword" + }, + "unixtime": { + "type": "long" + }, + "kind": { + "type": "keyword" + }, + "name": { + "type": "keyword" + }, + "firsttime": { + "type": "long" + }, + "lasttime": { + "type": "long" + }, + "data": { + "type": "text", + "index": false + }, + "id": { + "type": "keyword" + }, + "reason": { + "type": "keyword" + }, + "message": { + "type": "text", + "fields": { + "ngram": { + "type": "text", + "analyzer": "my_customer_ngram_analyzer" + } + } + }, + "count": { + "type": "integer" + }, + "sourceComponent": { + "type": "keyword" + }, + "sourceHost": { + "type": "keyword" + } + } + }, + "aliases": { + "'${SOURCE_INDEX}'": {} + } +}' + +# 기존 index 데이터 백업용 index 매핑 +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/'"${DEST_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/'"${DEST_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "'${DEST_INDEX}'-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "'${DEST_INDEX}'" + } + }, + "analysis": { + "analyzer": { + "my_customer_ngram_analyzer": { + "tokenizer": "my_customer_ngram_tokenizer" + } + }, + "tokenizer": { + "my_customer_ngram_tokenizer": { + "type": "ngram", + "min_gram": "2", + "max_gram": "3" + } + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "type": { + "type": "keyword" + }, + "unixtime": { + "type": "long" + }, + "kind": { + "type": "keyword" + }, + "name": { + "type": "keyword" + }, + "firsttime": { + "type": "long" + }, + "lasttime": { + "type": "long" + }, + "data": { + "type": "text", + "index": false + }, + "id": { + "type": "keyword" + }, + "reason": { + "type": "keyword" + }, + "message": { + "type": "text", + "fields": { + "ngram": { + "type": "text", + "analyzer": "my_customer_ngram_analyzer" + } + } + }, + "count": { + "type": "integer" + }, + "sourceComponent": { + "type": "keyword" + }, + "sourceHost": { + "type": "keyword" + } + } + }, + "aliases": { + "'${DEST_INDEX}'": {} + } +}' \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/2_kubernete_event_info_reindex_to_dest_from_source.sh b/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/2_kubernete_event_info_reindex_to_dest_from_source.sh new file mode 100644 index 0000000..a9c833c --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/2_kubernete_event_info_reindex_to_dest_from_source.sh @@ -0,0 +1,28 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='kubernetes_event_info' +DEST_INDEX='kubernetes_event_info_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${SOURCE_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X POST 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_reindex?wait_for_completion=false' -H 'Content-Type: application/json' -d '{ + "source": { + "index": "'${source_index_date}'" + }, + "dest": { + "index": "'${dest_index_date}'" + } + }' +done \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/3_kubernete_event_info_reindex_to_source_from_dest.sh b/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/3_kubernete_event_info_reindex_to_source_from_dest.sh new file mode 100644 index 0000000..abaa743 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/3_kubernete_event_info_reindex_to_source_from_dest.sh @@ -0,0 +1,30 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='kubernetes_event_info' +DEST_INDEX='kubernetes_event_info_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${DEST_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X DELETE 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/'${source_index_date} + + curl -X POST 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_reindex?wait_for_completion=false' -H 'Content-Type: application/json' -d '{ + "source": { + "index": "'${dest_index_date}'" + }, + "dest": { + "index": "'${source_index_date}'" + } + }' +done diff --git a/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/4_kubernete_event_info_delete_dest_index.sh b/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/4_kubernete_event_info_delete_dest_index.sh new file mode 100644 index 0000000..7948b08 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/4_kubernete_event_info_delete_dest_index.sh @@ -0,0 +1,21 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='kubernetes_event_info' +DEST_INDEX='kubernetes_event_info_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${DEST_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X DELETE 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/'${dest_index_date} +done diff --git a/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/5_license_history_create_dest_source_index.sh b/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/5_license_history_create_dest_source_index.sh new file mode 100644 index 0000000..0ddc9ff --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/5_license_history_create_dest_source_index.sh @@ -0,0 +1,184 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +export NUM_SHARDS=2 +export NUM_REPLICAS=1 + +SOURCE_INDEX='license_history' +DEST_INDEX='license_history_backup' + +# 기존 index 재매핑 +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/'"${SOURCE_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "90d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/'"${SOURCE_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "'${SOURCE_INDEX}'-*" + ], + "settings": { + "index": { + "number_of_shards": "2", + "number_of_replicas": "1", + "refresh_interval": "1s", + "lifecycle": { + "name": "'${SOURCE_INDEX}'" + }, + "sort.field": "checkTime", + "sort.order": "desc" + } + }, + "mappings": { + "properties": { + "licenseType": { + "type": "integer" + }, + "expireDate": { + "type": "text" + }, + "targetNodesCount": { + "type": "integer" + }, + "realNodesCount": { + "type": "integer" + }, + "targetPodsCount": { + "type": "integer" + }, + "realPodsCount": { + "type": "integer" + }, + "targetSvcsCount": { + "type": "integer" + }, + "realSvcsCount": { + "type": "integer" + }, + "targetCoreCount": { + "type": "integer" + }, + "realCoreCount": { + "type": "integer" + }, + "allowableRange": { + "type": "integer" + }, + "licenseClusterId": { + "type": "keyword" + }, + "tenantId": { + "type": "keyword" + }, + "checkTime": { + "type": "date", + "format": "epoch_millis" + }, + "checkResult": { + "type": "integer" + } + } + }, + "aliases": { + "'${SOURCE_INDEX}'": {} + } +}' + +# 기존 index 데이터 백업용 index 매핑 +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/'"${DEST_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "90d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/'"${DEST_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "'${DEST_INDEX}'-*" + ], + "settings": { + "index": { + "number_of_shards": "2", + "number_of_replicas": "1", + "refresh_interval": "1s", + "lifecycle": { + "name": "'${DEST_INDEX}'" + }, + "sort.field": "checkTime", + "sort.order": "desc" + } + }, + "mappings": { + "properties": { + "licenseType": { + "type": "integer" + }, + "expireDate": { + "type": "text" + }, + "targetNodesCount": { + "type": "integer" + }, + "realNodesCount": { + "type": "integer" + }, + "targetPodsCount": { + "type": "integer" + }, + "realPodsCount": { + "type": "integer" + }, + "targetSvcsCount": { + "type": "integer" + }, + "realSvcsCount": { + "type": "integer" + }, + "targetCoreCount": { + "type": "integer" + }, + "realCoreCount": { + "type": "integer" + }, + "allowableRange": { + "type": "integer" + }, + "licenseClusterId": { + "type": "keyword" + }, + "tenantId": { + "type": "keyword" + }, + "checkTime": { + "type": "date", + "format": "epoch_millis" + }, + "checkResult": { + "type": "integer" + } + } + }, + "aliases": { + "'${DEST_INDEX}'": {} + } +}' \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/6_license_history_reindex_to_dest_from_source.sh b/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/6_license_history_reindex_to_dest_from_source.sh new file mode 100644 index 0000000..b1de084 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/6_license_history_reindex_to_dest_from_source.sh @@ -0,0 +1,32 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='license_history' +DEST_INDEX='license_history_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${SOURCE_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X POST 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_reindex?wait_for_completion=false' -H 'Content-Type: application/json' -d '{ + "source": { + "index": "'${source_index_date}'" + }, + "dest": { + "index": "'${dest_index_date}'" + }, + "script": { + "lang": "painless", + "source": "ctx._source.checkTime = Instant.ofEpochSecond(ctx._source.checkTime).toEpochMilli()" + } + }' +done \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/7_license_history_reindex_to_source_from_dest.sh b/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/7_license_history_reindex_to_source_from_dest.sh new file mode 100644 index 0000000..e7e0a5c --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/7_license_history_reindex_to_source_from_dest.sh @@ -0,0 +1,30 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='license_history' +DEST_INDEX='license_history_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${DEST_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X DELETE 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/'${source_index_date} + + curl -X POST 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_reindex?wait_for_completion=false' -H 'Content-Type: application/json' -d '{ + "source": { + "index": "'${dest_index_date}'" + }, + "dest": { + "index": "'${source_index_date}'" + } + }' +done diff --git a/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/8_license_history_delete_dest_index.sh b/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/8_license_history_delete_dest_index.sh new file mode 100644 index 0000000..3d63181 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/8_license_history_delete_dest_index.sh @@ -0,0 +1,21 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='license_history' +DEST_INDEX='license_history_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${DEST_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X DELETE 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/'${dest_index_date} +done diff --git a/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/manual.txt b/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/manual.txt new file mode 100644 index 0000000..95900be --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/manual.txt @@ -0,0 +1,31 @@ +** 두 인덱스간에 데이터 복재가 잘 됐는지 확인해가며 실행 ** + +1) 1_kubernete_event_info_create_dest_source_index.sh 스크립트 실행 + : 기존 인덱스에 새로운 데이터 타입 매핑작업 + : 기존 인덱스 데이터 백업용 인덱스 매핑작업 + +2) 2_kubernete_event_info_reindex_to_dest_from_source.sh 스크립트 실행 + : 기존 인덱스 데이터 백업용 인덱스로 리인덱싱 + +3) curl -X GET http://{IP}:{PORT}/_cat/indices?pretty | grep kubernete_event_info + : 백업용 인덱스에 기존 인덱스 데이터가 백업될때까지 대기하기 + : 7번째 칸에 숫자가 일자별 인덱스 숫자와 동일할때까지 대기하기 + +4) 3_kubernete_event_info_reindex_to_source_from_dest.sh 스크립트 실행 + : 기존 인덱스 삭제 + : 새로 매핑된 기존 인덱스에 백업용 인덱스에 담긴 데이터 다시 리인덱싱 + +5) curl -X GET http://{IP}:{PORT}/_cat/indices?pretty | grep kubernete_event_info + : 새로 매핑된 인덱스에 백업용 인덱스 데이터가 백업될때까지 대기하기 + : 7번째 칸에 숫자가 일자별 인덱스 숫자와 동일할때까지 대기하기 + +6) 4_kubernete_event_info_delete_dest_index.sh 스크립트 실행 + : 백업용 인덱스 삭제 + +** 아래 스크립트도 위와같은 순서로 진행 ** +** grep license_history 로 변경해서 데이터 복재 확인 ** +5_license_history_create_dest_source_index.sh +6_license_history_reindex_to_dest_from_source.sh +7_license_history_reindex_to_source_from_dest.sh +8_license_history_delete_dest_index.sh + diff --git a/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/1_kubernete_event_info_create_dest_source_index.sh b/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/1_kubernete_event_info_create_dest_source_index.sh new file mode 100644 index 0000000..46007cd --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/1_kubernete_event_info_create_dest_source_index.sh @@ -0,0 +1,220 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +export NUM_SHARDS=2 +export NUM_REPLICAS=1 + +SOURCE_INDEX='kubernetes_event_info' +DEST_INDEX='kubernetes_event_info_backup' + +# 기존 index 재매핑 +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/'"${SOURCE_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/'"${SOURCE_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "'${SOURCE_INDEX}'-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "'${SOURCE_INDEX}'" + } + }, + "analysis": { + "analyzer": { + "my_customer_ngram_analyzer": { + "tokenizer": "my_customer_ngram_tokenizer" + } + }, + "tokenizer": { + "my_customer_ngram_tokenizer": { + "type": "ngram", + "min_gram": "2", + "max_gram": "3" + } + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "type": { + "type": "keyword" + }, + "unixtime": { + "type": "long" + }, + "kind": { + "type": "keyword" + }, + "name": { + "type": "keyword" + }, + "firsttime": { + "type": "long" + }, + "lasttime": { + "type": "long" + }, + "data": { + "type": "text", + "index": false + }, + "id": { + "type": "keyword" + }, + "reason": { + "type": "keyword" + }, + "message": { + "type": "text", + "fields": { + "ngram": { + "type": "text", + "analyzer": "my_customer_ngram_analyzer" + } + } + }, + "count": { + "type": "integer" + }, + "sourceComponent": { + "type": "keyword" + }, + "sourceHost": { + "type": "keyword" + } + } + }, + "aliases": { + "'${SOURCE_INDEX}'": {} + } +}' + +# 기존 index 데이터 백업용 index 매핑 +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/'"${DEST_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/'"${DEST_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "'${DEST_INDEX}'-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "'${DEST_INDEX}'" + } + }, + "analysis": { + "analyzer": { + "my_customer_ngram_analyzer": { + "tokenizer": "my_customer_ngram_tokenizer" + } + }, + "tokenizer": { + "my_customer_ngram_tokenizer": { + "type": "ngram", + "min_gram": "2", + "max_gram": "3" + } + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "type": { + "type": "keyword" + }, + "unixtime": { + "type": "long" + }, + "kind": { + "type": "keyword" + }, + "name": { + "type": "keyword" + }, + "firsttime": { + "type": "long" + }, + "lasttime": { + "type": "long" + }, + "data": { + "type": "text", + "index": false + }, + "id": { + "type": "keyword" + }, + "reason": { + "type": "keyword" + }, + "message": { + "type": "text", + "fields": { + "ngram": { + "type": "text", + "analyzer": "my_customer_ngram_analyzer" + } + } + }, + "count": { + "type": "integer" + }, + "sourceComponent": { + "type": "keyword" + }, + "sourceHost": { + "type": "keyword" + } + } + }, + "aliases": { + "'${DEST_INDEX}'": {} + } +}' \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/2_kubernete_event_info_reindex_to_dest_from_source.sh b/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/2_kubernete_event_info_reindex_to_dest_from_source.sh new file mode 100644 index 0000000..a9c833c --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/2_kubernete_event_info_reindex_to_dest_from_source.sh @@ -0,0 +1,28 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='kubernetes_event_info' +DEST_INDEX='kubernetes_event_info_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${SOURCE_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X POST 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_reindex?wait_for_completion=false' -H 'Content-Type: application/json' -d '{ + "source": { + "index": "'${source_index_date}'" + }, + "dest": { + "index": "'${dest_index_date}'" + } + }' +done \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/3_kubernete_event_info_reindex_to_source_from_dest.sh b/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/3_kubernete_event_info_reindex_to_source_from_dest.sh new file mode 100644 index 0000000..abaa743 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/3_kubernete_event_info_reindex_to_source_from_dest.sh @@ -0,0 +1,30 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='kubernetes_event_info' +DEST_INDEX='kubernetes_event_info_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${DEST_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X DELETE 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/'${source_index_date} + + curl -X POST 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_reindex?wait_for_completion=false' -H 'Content-Type: application/json' -d '{ + "source": { + "index": "'${dest_index_date}'" + }, + "dest": { + "index": "'${source_index_date}'" + } + }' +done diff --git a/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/4_kubernete_event_info_delete_dest_index.sh b/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/4_kubernete_event_info_delete_dest_index.sh new file mode 100644 index 0000000..7948b08 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/4_kubernete_event_info_delete_dest_index.sh @@ -0,0 +1,21 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='kubernetes_event_info' +DEST_INDEX='kubernetes_event_info_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${DEST_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X DELETE 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/'${dest_index_date} +done diff --git a/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/5_license_history_create_dest_source_index.sh b/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/5_license_history_create_dest_source_index.sh new file mode 100644 index 0000000..0ddc9ff --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/5_license_history_create_dest_source_index.sh @@ -0,0 +1,184 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +export NUM_SHARDS=2 +export NUM_REPLICAS=1 + +SOURCE_INDEX='license_history' +DEST_INDEX='license_history_backup' + +# 기존 index 재매핑 +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/'"${SOURCE_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "90d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/'"${SOURCE_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "'${SOURCE_INDEX}'-*" + ], + "settings": { + "index": { + "number_of_shards": "2", + "number_of_replicas": "1", + "refresh_interval": "1s", + "lifecycle": { + "name": "'${SOURCE_INDEX}'" + }, + "sort.field": "checkTime", + "sort.order": "desc" + } + }, + "mappings": { + "properties": { + "licenseType": { + "type": "integer" + }, + "expireDate": { + "type": "text" + }, + "targetNodesCount": { + "type": "integer" + }, + "realNodesCount": { + "type": "integer" + }, + "targetPodsCount": { + "type": "integer" + }, + "realPodsCount": { + "type": "integer" + }, + "targetSvcsCount": { + "type": "integer" + }, + "realSvcsCount": { + "type": "integer" + }, + "targetCoreCount": { + "type": "integer" + }, + "realCoreCount": { + "type": "integer" + }, + "allowableRange": { + "type": "integer" + }, + "licenseClusterId": { + "type": "keyword" + }, + "tenantId": { + "type": "keyword" + }, + "checkTime": { + "type": "date", + "format": "epoch_millis" + }, + "checkResult": { + "type": "integer" + } + } + }, + "aliases": { + "'${SOURCE_INDEX}'": {} + } +}' + +# 기존 index 데이터 백업용 index 매핑 +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/'"${DEST_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "90d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/'"${DEST_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "'${DEST_INDEX}'-*" + ], + "settings": { + "index": { + "number_of_shards": "2", + "number_of_replicas": "1", + "refresh_interval": "1s", + "lifecycle": { + "name": "'${DEST_INDEX}'" + }, + "sort.field": "checkTime", + "sort.order": "desc" + } + }, + "mappings": { + "properties": { + "licenseType": { + "type": "integer" + }, + "expireDate": { + "type": "text" + }, + "targetNodesCount": { + "type": "integer" + }, + "realNodesCount": { + "type": "integer" + }, + "targetPodsCount": { + "type": "integer" + }, + "realPodsCount": { + "type": "integer" + }, + "targetSvcsCount": { + "type": "integer" + }, + "realSvcsCount": { + "type": "integer" + }, + "targetCoreCount": { + "type": "integer" + }, + "realCoreCount": { + "type": "integer" + }, + "allowableRange": { + "type": "integer" + }, + "licenseClusterId": { + "type": "keyword" + }, + "tenantId": { + "type": "keyword" + }, + "checkTime": { + "type": "date", + "format": "epoch_millis" + }, + "checkResult": { + "type": "integer" + } + } + }, + "aliases": { + "'${DEST_INDEX}'": {} + } +}' \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/6_license_history_reindex_to_dest_from_source.sh b/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/6_license_history_reindex_to_dest_from_source.sh new file mode 100644 index 0000000..b1de084 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/6_license_history_reindex_to_dest_from_source.sh @@ -0,0 +1,32 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='license_history' +DEST_INDEX='license_history_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${SOURCE_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X POST 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_reindex?wait_for_completion=false' -H 'Content-Type: application/json' -d '{ + "source": { + "index": "'${source_index_date}'" + }, + "dest": { + "index": "'${dest_index_date}'" + }, + "script": { + "lang": "painless", + "source": "ctx._source.checkTime = Instant.ofEpochSecond(ctx._source.checkTime).toEpochMilli()" + } + }' +done \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/7_license_history_reindex_to_source_from_dest.sh b/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/7_license_history_reindex_to_source_from_dest.sh new file mode 100644 index 0000000..e7e0a5c --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/7_license_history_reindex_to_source_from_dest.sh @@ -0,0 +1,30 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='license_history' +DEST_INDEX='license_history_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${DEST_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X DELETE 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/'${source_index_date} + + curl -X POST 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_reindex?wait_for_completion=false' -H 'Content-Type: application/json' -d '{ + "source": { + "index": "'${dest_index_date}'" + }, + "dest": { + "index": "'${source_index_date}'" + } + }' +done diff --git a/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/8_license_history_delete_dest_index.sh b/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/8_license_history_delete_dest_index.sh new file mode 100644 index 0000000..3d63181 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/8_license_history_delete_dest_index.sh @@ -0,0 +1,21 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='license_history' +DEST_INDEX='license_history_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${DEST_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X DELETE 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/'${dest_index_date} +done diff --git a/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/manual.txt b/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/manual.txt new file mode 100644 index 0000000..95900be --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/manual.txt @@ -0,0 +1,31 @@ +** 두 인덱스간에 데이터 복재가 잘 됐는지 확인해가며 실행 ** + +1) 1_kubernete_event_info_create_dest_source_index.sh 스크립트 실행 + : 기존 인덱스에 새로운 데이터 타입 매핑작업 + : 기존 인덱스 데이터 백업용 인덱스 매핑작업 + +2) 2_kubernete_event_info_reindex_to_dest_from_source.sh 스크립트 실행 + : 기존 인덱스 데이터 백업용 인덱스로 리인덱싱 + +3) curl -X GET http://{IP}:{PORT}/_cat/indices?pretty | grep kubernete_event_info + : 백업용 인덱스에 기존 인덱스 데이터가 백업될때까지 대기하기 + : 7번째 칸에 숫자가 일자별 인덱스 숫자와 동일할때까지 대기하기 + +4) 3_kubernete_event_info_reindex_to_source_from_dest.sh 스크립트 실행 + : 기존 인덱스 삭제 + : 새로 매핑된 기존 인덱스에 백업용 인덱스에 담긴 데이터 다시 리인덱싱 + +5) curl -X GET http://{IP}:{PORT}/_cat/indices?pretty | grep kubernete_event_info + : 새로 매핑된 인덱스에 백업용 인덱스 데이터가 백업될때까지 대기하기 + : 7번째 칸에 숫자가 일자별 인덱스 숫자와 동일할때까지 대기하기 + +6) 4_kubernete_event_info_delete_dest_index.sh 스크립트 실행 + : 백업용 인덱스 삭제 + +** 아래 스크립트도 위와같은 순서로 진행 ** +** grep license_history 로 변경해서 데이터 복재 확인 ** +5_license_history_create_dest_source_index.sh +6_license_history_reindex_to_dest_from_source.sh +7_license_history_reindex_to_source_from_dest.sh +8_license_history_delete_dest_index.sh + diff --git a/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/jaeger_menumeta.psql b/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/jaeger_menumeta.psql new file mode 100644 index 0000000..c8252dd --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/jaeger_menumeta.psql @@ -0,0 +1,21 @@ +-- 이미 존재한다는 (insert 시) 에러메세지나 , 존재하지 않는다는 (delete 시) 에러메세지는 무시하셔도 무방합니다. +-- service - active transaction 삭제 +-- auth_resource3 +DELETE FROM public.auth_resource3 WHERE name = 'menu|Services|Active Transaction'; + +-- menu_meta +DELETE FROM public.menu_meta WHERE id = 26; + + +-- service - overview 추가 +-- auth_resource2 +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Overview', (select id from auth_resource2 where type='menu' and name='Services'), 'menu'); + +-- auth_resource3 +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Services|Overview', false, null); + +-- menu_meta +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (22, 'ServiceOverview', NULL, 1, 'overviewServices', (select id from auth_resource3 where name='menu|Services|Overview'), 0); + +-- user_permission2 +INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Services|Overview'), 'owner'); \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/jspd_menumeta.psql b/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/jspd_menumeta.psql new file mode 100644 index 0000000..4541fb2 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/jspd_menumeta.psql @@ -0,0 +1,22 @@ +-- 이미 존재한다는 (insert 시) 에러메세지나 , 존재하지 않는다는 (delete 시) 에러메세지는 무시하셔도 무방합니다. + +-- service - overview 삭제 +-- user_permission2 +DELETE FROM public.user_permission2 WHERE auth_resource_id = (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Services|Overview') AND user_id = 'owner'; + +-- menu_meta +DELETE FROM public.menu_meta WHERE id = 22; + +-- auth_resource2 +DELETE FROM public.auth_resource2 WHERE name = 'Overview' AND parent_id = (select id from auth_resource2 where type='menu' and name='Services'); + +-- auth_resource3 +DELETE FROM public.auth_resource3 WHERE name = 'menu|Services|Overview'; + + +-- service - active transaction 추가 +-- auth_resource3 +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Services|Active Transaction', false, null); + +-- menu_meta +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (26, 'Active Transaction', NULL, 5, 'overviewServiceJSPD', (select id from auth_resource3 where name='menu|Services|Active Transaction'), 2); \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/1_kubernete_event_info_create_dest_source_index.sh b/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/1_kubernete_event_info_create_dest_source_index.sh new file mode 100644 index 0000000..46007cd --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/1_kubernete_event_info_create_dest_source_index.sh @@ -0,0 +1,220 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +export NUM_SHARDS=2 +export NUM_REPLICAS=1 + +SOURCE_INDEX='kubernetes_event_info' +DEST_INDEX='kubernetes_event_info_backup' + +# 기존 index 재매핑 +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/'"${SOURCE_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/'"${SOURCE_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "'${SOURCE_INDEX}'-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "'${SOURCE_INDEX}'" + } + }, + "analysis": { + "analyzer": { + "my_customer_ngram_analyzer": { + "tokenizer": "my_customer_ngram_tokenizer" + } + }, + "tokenizer": { + "my_customer_ngram_tokenizer": { + "type": "ngram", + "min_gram": "2", + "max_gram": "3" + } + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "type": { + "type": "keyword" + }, + "unixtime": { + "type": "long" + }, + "kind": { + "type": "keyword" + }, + "name": { + "type": "keyword" + }, + "firsttime": { + "type": "long" + }, + "lasttime": { + "type": "long" + }, + "data": { + "type": "text", + "index": false + }, + "id": { + "type": "keyword" + }, + "reason": { + "type": "keyword" + }, + "message": { + "type": "text", + "fields": { + "ngram": { + "type": "text", + "analyzer": "my_customer_ngram_analyzer" + } + } + }, + "count": { + "type": "integer" + }, + "sourceComponent": { + "type": "keyword" + }, + "sourceHost": { + "type": "keyword" + } + } + }, + "aliases": { + "'${SOURCE_INDEX}'": {} + } +}' + +# 기존 index 데이터 백업용 index 매핑 +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/'"${DEST_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/'"${DEST_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "'${DEST_INDEX}'-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "'${DEST_INDEX}'" + } + }, + "analysis": { + "analyzer": { + "my_customer_ngram_analyzer": { + "tokenizer": "my_customer_ngram_tokenizer" + } + }, + "tokenizer": { + "my_customer_ngram_tokenizer": { + "type": "ngram", + "min_gram": "2", + "max_gram": "3" + } + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "type": { + "type": "keyword" + }, + "unixtime": { + "type": "long" + }, + "kind": { + "type": "keyword" + }, + "name": { + "type": "keyword" + }, + "firsttime": { + "type": "long" + }, + "lasttime": { + "type": "long" + }, + "data": { + "type": "text", + "index": false + }, + "id": { + "type": "keyword" + }, + "reason": { + "type": "keyword" + }, + "message": { + "type": "text", + "fields": { + "ngram": { + "type": "text", + "analyzer": "my_customer_ngram_analyzer" + } + } + }, + "count": { + "type": "integer" + }, + "sourceComponent": { + "type": "keyword" + }, + "sourceHost": { + "type": "keyword" + } + } + }, + "aliases": { + "'${DEST_INDEX}'": {} + } +}' \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/2_kubernete_event_info_reindex_to_dest_from_source.sh b/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/2_kubernete_event_info_reindex_to_dest_from_source.sh new file mode 100644 index 0000000..a9c833c --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/2_kubernete_event_info_reindex_to_dest_from_source.sh @@ -0,0 +1,28 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='kubernetes_event_info' +DEST_INDEX='kubernetes_event_info_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${SOURCE_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X POST 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_reindex?wait_for_completion=false' -H 'Content-Type: application/json' -d '{ + "source": { + "index": "'${source_index_date}'" + }, + "dest": { + "index": "'${dest_index_date}'" + } + }' +done \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/3_kubernete_event_info_reindex_to_source_from_dest.sh b/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/3_kubernete_event_info_reindex_to_source_from_dest.sh new file mode 100644 index 0000000..abaa743 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/3_kubernete_event_info_reindex_to_source_from_dest.sh @@ -0,0 +1,30 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='kubernetes_event_info' +DEST_INDEX='kubernetes_event_info_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${DEST_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X DELETE 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/'${source_index_date} + + curl -X POST 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_reindex?wait_for_completion=false' -H 'Content-Type: application/json' -d '{ + "source": { + "index": "'${dest_index_date}'" + }, + "dest": { + "index": "'${source_index_date}'" + } + }' +done diff --git a/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/4_kubernete_event_info_delete_dest_index.sh b/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/4_kubernete_event_info_delete_dest_index.sh new file mode 100644 index 0000000..7948b08 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/4_kubernete_event_info_delete_dest_index.sh @@ -0,0 +1,21 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='kubernetes_event_info' +DEST_INDEX='kubernetes_event_info_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${DEST_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X DELETE 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/'${dest_index_date} +done diff --git a/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/5_license_history_create_dest_source_index.sh b/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/5_license_history_create_dest_source_index.sh new file mode 100644 index 0000000..0ddc9ff --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/5_license_history_create_dest_source_index.sh @@ -0,0 +1,184 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +export NUM_SHARDS=2 +export NUM_REPLICAS=1 + +SOURCE_INDEX='license_history' +DEST_INDEX='license_history_backup' + +# 기존 index 재매핑 +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/'"${SOURCE_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "90d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/'"${SOURCE_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "'${SOURCE_INDEX}'-*" + ], + "settings": { + "index": { + "number_of_shards": "2", + "number_of_replicas": "1", + "refresh_interval": "1s", + "lifecycle": { + "name": "'${SOURCE_INDEX}'" + }, + "sort.field": "checkTime", + "sort.order": "desc" + } + }, + "mappings": { + "properties": { + "licenseType": { + "type": "integer" + }, + "expireDate": { + "type": "text" + }, + "targetNodesCount": { + "type": "integer" + }, + "realNodesCount": { + "type": "integer" + }, + "targetPodsCount": { + "type": "integer" + }, + "realPodsCount": { + "type": "integer" + }, + "targetSvcsCount": { + "type": "integer" + }, + "realSvcsCount": { + "type": "integer" + }, + "targetCoreCount": { + "type": "integer" + }, + "realCoreCount": { + "type": "integer" + }, + "allowableRange": { + "type": "integer" + }, + "licenseClusterId": { + "type": "keyword" + }, + "tenantId": { + "type": "keyword" + }, + "checkTime": { + "type": "date", + "format": "epoch_millis" + }, + "checkResult": { + "type": "integer" + } + } + }, + "aliases": { + "'${SOURCE_INDEX}'": {} + } +}' + +# 기존 index 데이터 백업용 index 매핑 +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/'"${DEST_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "90d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/'"${DEST_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "'${DEST_INDEX}'-*" + ], + "settings": { + "index": { + "number_of_shards": "2", + "number_of_replicas": "1", + "refresh_interval": "1s", + "lifecycle": { + "name": "'${DEST_INDEX}'" + }, + "sort.field": "checkTime", + "sort.order": "desc" + } + }, + "mappings": { + "properties": { + "licenseType": { + "type": "integer" + }, + "expireDate": { + "type": "text" + }, + "targetNodesCount": { + "type": "integer" + }, + "realNodesCount": { + "type": "integer" + }, + "targetPodsCount": { + "type": "integer" + }, + "realPodsCount": { + "type": "integer" + }, + "targetSvcsCount": { + "type": "integer" + }, + "realSvcsCount": { + "type": "integer" + }, + "targetCoreCount": { + "type": "integer" + }, + "realCoreCount": { + "type": "integer" + }, + "allowableRange": { + "type": "integer" + }, + "licenseClusterId": { + "type": "keyword" + }, + "tenantId": { + "type": "keyword" + }, + "checkTime": { + "type": "date", + "format": "epoch_millis" + }, + "checkResult": { + "type": "integer" + } + } + }, + "aliases": { + "'${DEST_INDEX}'": {} + } +}' \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/6_license_history_reindex_to_dest_from_source.sh b/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/6_license_history_reindex_to_dest_from_source.sh new file mode 100644 index 0000000..b1de084 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/6_license_history_reindex_to_dest_from_source.sh @@ -0,0 +1,32 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='license_history' +DEST_INDEX='license_history_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${SOURCE_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X POST 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_reindex?wait_for_completion=false' -H 'Content-Type: application/json' -d '{ + "source": { + "index": "'${source_index_date}'" + }, + "dest": { + "index": "'${dest_index_date}'" + }, + "script": { + "lang": "painless", + "source": "ctx._source.checkTime = Instant.ofEpochSecond(ctx._source.checkTime).toEpochMilli()" + } + }' +done \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/7_license_history_reindex_to_source_from_dest.sh b/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/7_license_history_reindex_to_source_from_dest.sh new file mode 100644 index 0000000..e7e0a5c --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/7_license_history_reindex_to_source_from_dest.sh @@ -0,0 +1,30 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='license_history' +DEST_INDEX='license_history_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${DEST_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X DELETE 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/'${source_index_date} + + curl -X POST 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_reindex?wait_for_completion=false' -H 'Content-Type: application/json' -d '{ + "source": { + "index": "'${dest_index_date}'" + }, + "dest": { + "index": "'${source_index_date}'" + } + }' +done diff --git a/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/8_license_history_delete_dest_index.sh b/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/8_license_history_delete_dest_index.sh new file mode 100644 index 0000000..3d63181 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/8_license_history_delete_dest_index.sh @@ -0,0 +1,21 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='license_history' +DEST_INDEX='license_history_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${DEST_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X DELETE 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/'${dest_index_date} +done diff --git a/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/manual.txt b/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/manual.txt new file mode 100644 index 0000000..95900be --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/manual.txt @@ -0,0 +1,31 @@ +** 두 인덱스간에 데이터 복재가 잘 됐는지 확인해가며 실행 ** + +1) 1_kubernete_event_info_create_dest_source_index.sh 스크립트 실행 + : 기존 인덱스에 새로운 데이터 타입 매핑작업 + : 기존 인덱스 데이터 백업용 인덱스 매핑작업 + +2) 2_kubernete_event_info_reindex_to_dest_from_source.sh 스크립트 실행 + : 기존 인덱스 데이터 백업용 인덱스로 리인덱싱 + +3) curl -X GET http://{IP}:{PORT}/_cat/indices?pretty | grep kubernete_event_info + : 백업용 인덱스에 기존 인덱스 데이터가 백업될때까지 대기하기 + : 7번째 칸에 숫자가 일자별 인덱스 숫자와 동일할때까지 대기하기 + +4) 3_kubernete_event_info_reindex_to_source_from_dest.sh 스크립트 실행 + : 기존 인덱스 삭제 + : 새로 매핑된 기존 인덱스에 백업용 인덱스에 담긴 데이터 다시 리인덱싱 + +5) curl -X GET http://{IP}:{PORT}/_cat/indices?pretty | grep kubernete_event_info + : 새로 매핑된 인덱스에 백업용 인덱스 데이터가 백업될때까지 대기하기 + : 7번째 칸에 숫자가 일자별 인덱스 숫자와 동일할때까지 대기하기 + +6) 4_kubernete_event_info_delete_dest_index.sh 스크립트 실행 + : 백업용 인덱스 삭제 + +** 아래 스크립트도 위와같은 순서로 진행 ** +** grep license_history 로 변경해서 데이터 복재 확인 ** +5_license_history_create_dest_source_index.sh +6_license_history_reindex_to_dest_from_source.sh +7_license_history_reindex_to_source_from_dest.sh +8_license_history_delete_dest_index.sh + diff --git a/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/memu_meta/jaeger_menumeta.psql b/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/memu_meta/jaeger_menumeta.psql new file mode 100644 index 0000000..c8252dd --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/memu_meta/jaeger_menumeta.psql @@ -0,0 +1,21 @@ +-- 이미 존재한다는 (insert 시) 에러메세지나 , 존재하지 않는다는 (delete 시) 에러메세지는 무시하셔도 무방합니다. +-- service - active transaction 삭제 +-- auth_resource3 +DELETE FROM public.auth_resource3 WHERE name = 'menu|Services|Active Transaction'; + +-- menu_meta +DELETE FROM public.menu_meta WHERE id = 26; + + +-- service - overview 추가 +-- auth_resource2 +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Overview', (select id from auth_resource2 where type='menu' and name='Services'), 'menu'); + +-- auth_resource3 +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Services|Overview', false, null); + +-- menu_meta +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (22, 'ServiceOverview', NULL, 1, 'overviewServices', (select id from auth_resource3 where name='menu|Services|Overview'), 0); + +-- user_permission2 +INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Services|Overview'), 'owner'); \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/memu_meta/jspd_menumeta.psql b/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/memu_meta/jspd_menumeta.psql new file mode 100644 index 0000000..4541fb2 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/memu_meta/jspd_menumeta.psql @@ -0,0 +1,22 @@ +-- 이미 존재한다는 (insert 시) 에러메세지나 , 존재하지 않는다는 (delete 시) 에러메세지는 무시하셔도 무방합니다. + +-- service - overview 삭제 +-- user_permission2 +DELETE FROM public.user_permission2 WHERE auth_resource_id = (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Services|Overview') AND user_id = 'owner'; + +-- menu_meta +DELETE FROM public.menu_meta WHERE id = 22; + +-- auth_resource2 +DELETE FROM public.auth_resource2 WHERE name = 'Overview' AND parent_id = (select id from auth_resource2 where type='menu' and name='Services'); + +-- auth_resource3 +DELETE FROM public.auth_resource3 WHERE name = 'menu|Services|Overview'; + + +-- service - active transaction 추가 +-- auth_resource3 +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Services|Active Transaction', false, null); + +-- menu_meta +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (26, 'Active Transaction', NULL, 5, 'overviewServiceJSPD', (select id from auth_resource3 where name='menu|Services|Active Transaction'), 2); \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.2.0.psql b/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.2.0.psql new file mode 100644 index 0000000..7ed34ad --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.2.0.psql @@ -0,0 +1,803 @@ +UPDATE public.metric_meta2 SET expr='sum by (xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) ((container_memory_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / (((container_spec_memory_limit_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0)) > 0) * 100) or sum by (xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) ((container_memory_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1024 / 1024 / 1024 *100)' WHERE id = 'container_memory_usage_by_workload'; + +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: v1 +kind: List +items: +- apiVersion: apps/v1 + kind: Deployment + metadata: + name: cloudmoa-trace-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-trace-agent + spec: + selector: + matchLabels: + app: cloudmoa-trace-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-trace-agent + spec: + securityContext: + runAsNonRoot: true + runAsUser: 65534 + containers: + - image: $DOCKER_REGISTRY_URL/trace-agent:$IMAGE_TAG + name: cloudmoa-trace-agent + resources: + requests: + cpu: 100m + memory: 50Mi + limits: + cpu: 200m + memory: 100Mi + ports: + - containerPort: 5775 + protocol: UDP + - containerPort: 6831 + protocol: UDP + - containerPort: 6832 + protocol: UDP + - containerPort: 5778 + protocol: TCP + env: + - name: LOG_LEVEL + value: "INFO" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT +- apiVersion: v1 + kind: Service + metadata: + name: cloudmoa-trace-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-trace-agent + spec: + ports: + - name: agent-zipkin-thrift + port: 5775 + protocol: UDP + targetPort: 5775 + - name: agent-compact + port: 6831 + protocol: UDP + targetPort: 6831 + - name: agent-binary + port: 6832 + protocol: UDP + targetPort: 6832 + - name: agent-configs + port: 5778 + protocol: TCP + targetPort: 5778 + selector: + app: cloudmoa-trace-agent + type: ClusterIP' WHERE id = 7; + +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: v1 +kind: Service +metadata: + annotations: + prometheus.io/scrape: ''true'' + labels: + app: cloudmoa-node-exporter + name: cloudmoa-node-exporter + name: cloudmoa-node-exporter + namespace: $CLOUDMOA_NAMESPACE +spec: + clusterIP: None + ports: + - name: scrape + port: 9110 + protocol: TCP + selector: + app: cloudmoa-node-exporter + type: ClusterIP +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: cloudmoa-node-exporter + namespace: $CLOUDMOA_NAMESPACE +spec: + selector: + matchLabels: + app: cloudmoa-node-exporter + template: + metadata: + labels: + app: cloudmoa-node-exporter + name: cloudmoa-node-exporter + spec: + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - image: $DOCKER_REGISTRY_URL/node-exporter + name: cloudmoa-node-exporter + ports: + - containerPort: 9110 + hostPort: 9110 + name: scrape + args: + - --path.procfs=/host/proc + - --path.sysfs=/host/sys + - --path.rootfs=/host/root + - --collector.filesystem.ignored-mount-points=^/(dev|proc|sys|run|var/lib/docker/.+|var/lib/kubelet/pods/.+)($|/) + - --collector.tcpstat + - --web.listen-address=:9110 + # --log.level=debug + resources: + limits: + cpu: 250m + memory: 180Mi + requests: + cpu: 102m + memory: 180Mi + volumeMounts: + - mountPath: /host/proc + name: proc + readOnly: false + - mountPath: /host/sys + name: sys + readOnly: false + - mountPath: /host/root + mountPropagation: HostToContainer + name: root + readOnly: true + hostNetwork: true + hostPID: true + securityContext: + runAsNonRoot: true + runAsUser: 65534 + volumes: + - hostPath: + path: /proc + name: proc + - hostPath: + path: /sys + name: sys + - hostPath: + path: / + name: root +' WHERE id = 4; + +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cloudmoa-cluster-role +rules: + - nonResourceURLs: + - "*" + verbs: + - get + - apiGroups: + - metrics.k8s.io + resources: + - pods + - nodes + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - nodes/stats + - endpoints + - namespaces + - events + verbs: + - get + - list + - watch + - apiGroups: + - apps + resources: + - daemonsets + - deployments + - deployments/scale + - replicasets + - replicasets/scale + - statefulsets + - statefulsets/scale + verbs: + - get + - list + - watch + - apiGroups: + - batch + resources: + - jobs + verbs: + - get + - list + - watch + - update + - apiGroups: + - batch + resources: + - cronjobs + verbs: + - get + - list + - update + - apiGroups: + - storage.j8s.io + resources: + - storageclasses + verbs: + - get + - list + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - apiGroups: + - extensions + resources: + - ingresses + verbs: + - get + - list + - apiGroups: + - policy + resources: + - podsecuritypolicies + verbs: + - use + resourceNames: + - imxc-ps + - apiGroups: + - certificates.k8s.io + resourceNames: + - kubernetes.io/kube-apiserver-client-kubelet + resources: + - signers + verbs: + - approve + - apiGroups: + - certificates.k8s.io + resourceNames: + - kubernetes.io/kubelet-serving + resources: + - signers + verbs: + - approve + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch + - proxy + - apiGroups: + - "" + resources: + - nodes/log + - nodes/metrics + - nodes/proxy + - nodes/spec + - nodes/stats + verbs: + - ''*'' + - apiGroups: + - ''*'' + resources: + - ''*'' + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cloudmoa-restricted-rb + namespace: $CLOUDMOA_NAMESPACE +subjects: + - kind: ServiceAccount + name: default + namespace: $CLOUDMOA_NAMESPACE +roleRef: + kind: ClusterRole + name: cloudmoa-cluster-role + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: cloudmoa-psp + namespace: $CLOUDMOA_NAMESPACE +spec: + privileged: true + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + runAsUser: + rule: RunAsAny + fsGroup: + rule: RunAsAny + hostPorts: + - max: 65535 + min: 0 + hostNetwork: true + hostPID: true + volumes: + - configMap + - secret + - emptyDir + - hostPath + - projected + - downwardAPI + - persistentVolumeClaim +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: cloudmoa-topology-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-topology-agent +spec: + selector: + matchLabels: + app: cloudmoa-topology-agent + template: + metadata: + labels: + app: cloudmoa-topology-agent + spec: + hostNetwork: true + hostPID: true + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - name: cloudmoa-topology-agent + image: $DOCKER_REGISTRY_URL/topology-agent:$IMAGE_TAG + imagePullPolicy: Always + resources: + requests: + cpu: 200m + memory: 512Mi + limits: + cpu: 500m + memory: 600Mi + securityContext: + privileged: true + volumeMounts: + - mountPath: /host/usr/bin + name: bin-volume + - mountPath: /var/run/docker.sock + name: docker-volume + - mountPath: /host/proc + name: proc-volume + - mountPath: /root + name: root-volume + - mountPath: /log + name: log-volume + env: + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: ROOT_DIRECTORY + value: /root + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: LOG_LEVEL + value: "INFO" + + volumes: + - name: bin-volume + hostPath: + path: /usr/bin + type: Directory + - name: docker-volume + hostPath: + path: /var/run/docker.sock + - name: proc-volume + hostPath: + path: /proc + - name: root-volume + hostPath: + path: / + - name: log-volume + hostPath: + path: /home' WHERE id = 2; + +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cloudmoa-metric-agent-config + namespace: $CLOUDMOA_NAMESPACE +data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + metric-agent.yml: | + global: + scrape_interval: 15s + + scrape_configs: + - job_name: ''kubernetes-kubelet'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + - job_name: ''kubernetes-cadvisor'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod] + target_label: xm_pod_id + - source_labels: [container] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [container] + regex: (.+) + action: keep + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cloudmoa-metric-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-metric-agent +spec: + selector: + matchLabels: + app: cloudmoa-metric-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-metric-agent + spec: + containers: + - name: cloudmoa-metric-agent + image: $DOCKER_REGISTRY_URL/metric-agent:$IMAGE_TAG + args: + - --config.file=/etc/metric-agent/metric-agent.yml + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: /etc/metric-agent/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: LOG_MAXAGE + value: "1" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: STORAGE_TYPE + value: datagate + restartPolicy: Always + volumes: + - name: config-volume + configMap: + name: cloudmoa-metric-agent-config +' WHERE id = 6; + +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cloudmoa-metric-agent-config + namespace: $CLOUDMOA_NAMESPACE +data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + metric-agent.yml: | + global: + scrape_interval: 15s + + scrape_configs: + - job_name: ''kubernetes-kubelet'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + + - job_name: ''kubernetes-cadvisor'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod_name] + target_label: xm_pod_id + - source_labels: [container_name] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [container_name] + regex: (.+) + action: keep + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cloudmoa-metric-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-metric-agent +spec: + selector: + matchLabels: + app: cloudmoa-metric-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-metric-agent + spec: + containers: + - name: cloudmoa-metric-agent + image: $DOCKER_REGISTRY_URL/metric-agent:$IMAGE_TAG + args: + - --config.file=/etc/metric-agent/metric-agent.yml + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: /etc/metric-agent/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: STORAGE_TYPE + value: datagate + restartPolicy: Always + volumes: + - name: config-volume + configMap: + name: cloudmoa-metric-agent-config +' WHERE id = 3; \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.3.0.psql b/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.3.0.psql new file mode 100644 index 0000000..6b63e62 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.3.0.psql @@ -0,0 +1,919 @@ + +-- from diff + +CREATE DATABASE CONFIGS; +CREATE DATABASE keycloak; + +-- cortex alert +create table public.alert_rule_config_info ( + config_id varchar not null, + config_data text not null, + in_use boolean default true not null, + created_date timestamp, + modified_date timestamp +); +create table alert_config_info +( + config_id varchar not null, + config_data text not null, + config_default text not null, + in_use boolean default true not null, + created_date timestamp, + modified_date timestamp +); +create table alert_config +( + id bigint not null, + cluster_id varchar, + resolve_timeout varchar, + receiver varchar, + group_by varchar, + group_wait varchar, + group_interval varchar, + repeat_interval varchar, + routes_level varchar, + routes_continue varchar, + receiver_name varchar, + webhook_url varchar, + send_resolved varchar, + inner_route boolean, + inner_webhook boolean, + in_use boolean default true not null, + created_date timestamp, + modified_date timestamp +); +ALTER TABLE public.alert_rule_config_info ADD CONSTRAINT alert_rule_config_info_config_id_pk PRIMARY KEY (config_id); +ALTER TABLE public.alert_config_info ADD CONSTRAINT alert_config_info_config_id_pk PRIMARY KEY (config_id); +ALTER TABLE public.alert_config ADD CONSTRAINT alert_config_id_pk PRIMARY KEY (id); + + + +alter table tenant_info + add delete_scheduler_date timestamp; + +alter table tenant_info + add tenant_init_clusters varchar(255); + +alter table cloud_user + add dormancy_date timestamp; + +alter table cloud_user + add status varchar(255) default 'use'::character varying not null; + +-- DELETE +-- FROM public.auth_resource3 +-- WHERE name = 'menu|Health Check|Check Script'; + +-- DELETE +-- FROM public.auth_resource3 +-- WHERE name = 'menu|Health Check'; + +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Services|Active Transaction', false, null); + +UPDATE public.menu_meta +SET position = 10::integer +WHERE id = 80::bigint; + +UPDATE public.menu_meta +SET position = 99::integer +WHERE id = 90::bigint; + + + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (26, 'Active Transaction', NULL, 5, 'overviewServiceJSPD', (select id from auth_resource3 where name='menu|Services|Active Transaction'), 2); +insert into public.alert_config_info (config_id, created_date, modified_date, config_data, config_default, in_use) values ('config', now(), null, 'global:${GLOBAL}\nroute:${ROUTE}\nreceivers:${RECEIVERS}', 'global:${GLOBAL}\nroute:${ROUTE}\nreceivers:${RECEIVERS}', true); +insert into public.alert_config_info (config_id, created_date, modified_date, config_data, config_default, in_use) values ('global', now(), null, '\n resolve_timeout: ${RESOLVE_TIMEOUT}', '\n resolve_timeout: 5m', true); +insert into public.alert_config_info (config_id, created_date, modified_date, config_data, config_default, in_use) values ('receivers', now(), null, '\n- name: ''${NAME}''\n webhook_configs:${WEBHOOK_CONFIGS}', '\n- name: ''cdms''\n webhook_configs:${WEBHOOK_CONFIGS}', true); +insert into public.alert_config_info (config_id, created_date, modified_date, config_data, config_default, in_use) values ('route', now(), null, '\n receiver: ''${RECEIVER}''\n group_by: [${GROUP_BY}]\n group_wait: ${GROUP_WAIT}\n group_interval: ${GROUP_INTERVAL}\n repeat_interval: ${REPEAT_INTERVAL}\n routes:${ROUTES}', '\n receiver: ''cdms''\n group_by: [xm_clst_id, level]\n group_wait: 30s\n group_interval: 5m\n repeat_interval: 10m\n routes:${ROUTES}', true); +insert into public.alert_config_info (config_id, created_date, modified_date, config_data, config_default, in_use) values ('webhook_configs', now(), null, '\n - url: ''${WEBHOOK_URL}''\n send_resolved: ${SEND_RESOLVED}', '\n - url: ''${WEBHOOK_URL}''\n send_resolved: false', true); +insert into public.alert_config_info (config_id, created_date, modified_date, config_data, config_default, in_use) values ('routes', now(), null, '\n - receiver: ''${ROUTES_RECEIVER}''\n group_by: [${ROUTES_GROUP_BY}]\n group_wait: ${ROUTES_GROUP_WAIT}\n group_interval: ${ROUTES_GROUP_INTERVAL}\n repeat_interval: ${ROUTES_REPEAT_INTERVAL}\n match_re:\n level: ${LEVEL}\n continue: ${CONTINUE}', '\n - receiver: ''cdms''\n group_by: [xm_clst_id, level]\n group_wait: 5s\n group_interval: 5s\n repeat_interval: 1m\n match_re:\n level: Critical\n continue: true', true); +insert into public.alert_rule_config_info (config_id, created_date, modified_date, config_data, in_use) values ('config', now(), null, 'groups:${GROUPS}', true); +insert into public.alert_rule_config_info (config_id, created_date, modified_date, config_data, in_use) values ('groups', now(), null, '\n- name: "${NAME}"\n rules:${RULES}', true); +insert into public.alert_rule_config_info (config_id, created_date, modified_date, config_data, in_use) values ('isHost', now(), null, '\n instance: "{{ $labels.instance }}"\n is_host: "true"', true); +insert into public.alert_rule_config_info (config_id, created_date, modified_date, config_data, in_use) values ('rules', now(), null, '\n - alert: "${ALERT}"\n expr: "${EXPR}"\n labels:\n level: "${LEVEL}"\n for: "${FOR}"\n annotations:\n xm_service_name: "{{ $labels.xm_service_name }}"\n level: "${LEVEL}"\n meta_id: "${META_ID}"\n xm_node_id: "{{ $labels.xm_node_id }}"\n threshold: ${THRESHOLD}\n xm_container_id: "{{ $labels.xm_cont_name }}"\n message: "${MESSAGE}"\n rule_id: ${RULE_ID}\n xm_pod_id: "{{ $labels.xm_pod_id }}"\n xm_clst_id: "{{ $labels.xm_clst_id }}"\n xm_namespace: "{{ $labels.xm_namespace }}"\n value: "{{ $value }}"\n xm_entity_type: "{{ $labels.xm_entity_type }}"', true); + + + +-- JSPD 옵션 값 테이블 +CREATE TABLE public.jspd_prop ( + code_id character varying(255) NOT NULL, + default_value character varying(255) NOT NULL, + description text, + code_type character varying(255), + input_type character varying(255), + input_props character varying(255), + use_yn boolean NOT NULL, + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone NOT NULL +); + +ALTER TABLE ONLY public.jspd_prop ADD CONSTRAINT jspd_prop_pkey PRIMARY KEY (code_id); + +-- JSPD 옵션 값 설정 LIST table +CREATE TABLE public.jspd_config ( + cluster_id character varying(255) NOT NULL, + namespace character varying(255) NOT NULL, + service character varying(255) NOT NULL, + code_id character varying(255), + code_value character varying(255), + code_type character varying(255), + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone NOT NULL +); +-- ALTER TABLE public.jspd_prop +-- ADD input_type character varying(255); + +-- ALTER TABLE public.jspd_prop +-- ADD input_props character varying(255); + + +ALTER TABLE public.jspd_config + ADD CONSTRAINT jspd_config_pkey PRIMARY KEY (cluster_id, namespace, service, code_id); + +ALTER TABLE ONLY public.jspd_config + ADD CONSTRAINT jspd_config_code_id_fk FOREIGN KEY (code_id) REFERENCES public.jspd_prop(code_id); + +INSERT INTO jspd_prop values('TRX_NAME_TYPE','0', 'Set the transaction name generation method (0:default, 1:parameter, 2:param_nouri, 3:attribute)', 'integer','select','{"default":"0", "parameter":"1", "param_nouri":"2", "attribute":"3"}',true, now(), now()); +INSERT INTO jspd_prop values('TRX_NAME_KEY','', 'Set the transaction name generation method by TRX_NAME_TYPE (parameter(1), param_nouri(2),attribute(3))','string','input','',true, now(), now()); +INSERT INTO jspd_prop values('CURR_TRACE_TXN','*:3000', 'Option to check TXNNAME with startsWith logic and collect calltree based on elapsetime. blank or set to *:0 when collecting all.', 'string','input','', true, now(), now()); +INSERT INTO jspd_prop values('CURR_TRACE_LEVEL','100', 'call tree detection level', 'integer','range','{"gte":"0", "lte":"100"}',true, now(), now()); +INSERT INTO jspd_prop values('TRACE_JDBC','true', 'include call tree data', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('EXCLUDE_SERVICE','gif,js,css,xml', 'exclude service name', 'string','input','',true, now(), now()); +INSERT INTO jspd_prop values('INCLUDE_EXCEPTION','', 'Exception that you do not want to be treated as an exception transaction is set.(type.Exception)', 'string','input','',true, now(), now()); +INSERT INTO jspd_prop values('EXCLUDE_EXCEPTION','', 'Set the exception to be treated as an exception transaction.(type.Exception)', 'string','input','',true, now(), now()); +INSERT INTO jspd_prop values('RESP_HEADER_TID','false', 'include X-Xm-Tid text for gearing imxwsmj', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('USE_RUNTIME_REDEFINE','false', 'rt.jar (socket, file, throwable) function use yn option', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('USE_RUNTIME_REDEFINE_HTTP_REMOTE','false', 'rt.jar (socket, file, throwable) function use yn option', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('RT_RMI','false', 'rt.jar (socket, file, throwable) function use yn option', 'boolean','input','',true, now(), now()); + +INSERT INTO jspd_prop values('RT_RMI_TYPE','3', 'remote key value(1: pkey, 2: ckey, 3: pckey)', 'integer','select','{"pkey":"1", "ckey":"2", "pckey":"3"}',true, now(), now()); +INSERT INTO jspd_prop values('RT_RMI_ELAPSE_TIME','0', 'Collect transactions that are greater than or equal to the option value', 'integer','input','',true, now(), now()); +INSERT INTO jspd_prop values('RT_FILE','0x10', 'Display file input/output in call tree', 'string','input','',true, now(), now()); +INSERT INTO jspd_prop values('RT_SOCKET','0x10', 'Display socket input/output in call tree', 'string','input','',true, now(), now()); + +INSERT INTO jspd_prop values('MTD_LIMIT','100000', 'Limit the number of calltree', 'integer','range','{"gte":"0"}',true, now(), now()); + +INSERT INTO jspd_prop values('LIMIT_SQL','20', 'Collection limits based on SQL sentence length', 'integer','input','',true, now(), now()); +INSERT INTO jspd_prop values('TXN_COUNT_LIMIT','3000', 'Transactions per second', 'integer','input','',true, now(), now()); +INSERT INTO jspd_prop values('USE_SQL_ELLIPSIS','false', 'Collect length of sql string by half of SQL_TEXT_BUFFER_SIZE', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('TXN_SQL_LIMIT_COUNT','2000', 'SQL collection limit', 'integer','input','',true, now(), now()); +INSERT INTO jspd_prop values('TXN_CPU_TIME','false', 'cpu time metric used in transactions option', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('TXN_MEMORY','false', 'memory alloc size metric used in transactions option', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('ENABLE_WEB_ID_WHEN_NO_USERAGENT','false', 'Do not create an web ID unless requested by the browser', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('USE_SQL_SEQ','false', 'Add sequence number to sql and packet', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('TRACE_FETCH_METHOD','false', 'Display the fetch function of ResultSet in the call tree', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('EXCLUDE_THREAD','', 'Ability to block monitoring of a specific thread name, value = String[] (prefix1,prefix2)', 'string','input','',true, now(), now()); +INSERT INTO jspd_prop values('USE_METHOD_SEQ','false', 'Display the calltree in the form of a time series without summary', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('TRACE_METHOD_MEMORY','false', 'Collects allocation memory for each method of calltree. (unit k)', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('TRACE_METHOD_CPUTIME','false', 'Collects cputime for each method of calltree. (unit ms)', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('DISABLE_ROOT_METHOD','false', 'Express the service root method at the top of the call tree', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('MTD_BUFFER_SIZE','2500', 'size of the internal buffer that stores the call tree method data.', 'integer','input','',true, now(), now()); +INSERT INTO jspd_prop values('MTD_STACK_BUFFER_SIZE','100', 'A separate option to additionally collect methods that did not generate an error among methods that were not collected because the MTD_BUFFER_SIZE option value was exceeded.', 'integer','input','',true, now(), now()); +INSERT INTO jspd_prop values('MTD_EXCEPTION_BUFFER_SIZE','100', 'A separate option to additionally collect methods that have an error among methods that could not be collected because the MTD_BUFFER_SIZE option value was exceeded.', 'integer','input','',true, now(), now()); +INSERT INTO jspd_prop values('DEBUG','0x000000000', 'Option to specify log level (Debugging)', 'string','input','',true, now(), now()); + +INSERT INTO jspd_prop values('EXCEPTION_LIMIT', '-1', 'Exception content length limit', 'integer', 'input', '', true, now(), now()); +INSERT INTO jspd_prop values('TXN_SEND_PERIOD', '1000', 'Txninfo transmission cycle (ms)', 'integer', 'input', '', true, now(), now()); +INSERT INTO jspd_prop values('MTD_SEND_PERIOD', '1000', 'Txnmethod transmission cycle (ms)', 'integer', 'input', '', true, now(), now()); +INSERT INTO jspd_prop values('SQL_SEND_PERIOD', '1000', 'Txnspl transmission cycle (ms)', 'integer', 'input', '', true, now(), now()); +INSERT INTO jspd_prop values('ETOE_SEND_PERIOD', '1000', 'E2einfo transmission cycle (ms)', 'integer', 'input', '', true, now(), now()); +INSERT INTO jspd_prop values('TXN_SEND_LIMIT', '15000', 'Txninfo maximum number of transfers', 'integer', 'input', '', true, now(), now()); +INSERT INTO jspd_prop values('MTD_SEND_LIMIT', '15000', 'Txnmethod maximum number of transfers', 'integer', 'input', '', true, now(), now()); +INSERT INTO jspd_prop values('SQL_SEND_LIMIT', '15000', 'Txnsql maximum number of transfers', 'integer', 'input', '', true, now(), now()); +INSERT INTO jspd_prop values('ETOE_SEND_LIMIT', '15000', 'E2einfo maximum number of transfers', 'integer', 'input', '', true, now(), now()); + + +---public.metric_meta2 +UPDATE public.metric_meta2 SET expr = '((node_memory_MemTotal_bytes{xm_entity_type="Node", {filter}} - (node_memory_MemFree_bytes{xm_entity_type="Node", {filter}} + node_memory_Cached_bytes{xm_entity_type="Node", {filter}} + node_memory_Buffers_bytes{xm_entity_type="Node", {filter}} + node_memory_SReclaimable_bytes{xm_entity_type="Node", {filter}})) >= 0 or node_memory_MemTotal_bytes{xm_entity_type="Node", {filter}} - node_memory_MemFree_bytes{xm_entity_type="Node", {filter}}) / 1024 / 1024 / 1024'::text WHERE id LIKE 'node#_memory#_used' ESCAPE '#'; + +UPDATE public.metric_meta2 SET expr = '((node_memory_MemTotal_bytes{{filter}} - (node_memory_MemFree_bytes{{filter}} + node_memory_Cached_bytes{{filter}} + node_memory_Buffers_bytes{{filter}} + node_memory_SReclaimable_bytes{{filter}})) >= 0 or (node_memory_MemTotal_bytes{{filter}} - node_memory_MemFree_bytes{{filter}})) / node_memory_MemTotal_bytes{{filter}} * 100'::text WHERE id LIKE 'host#_memory#_usage' ESCAPE '#'; + +UPDATE public.metric_meta2 SET expr = 'sum by(instance, mountpoint, fstype, data_type) ( +label_replace(node_filesystem_size_bytes {fstype!="rootfs",{filter}}, "data_type", "totalsize", "", "") or +label_replace(node_filesystem_avail_bytes {fstype!="rootfs",{filter}}, "data_type", "availablesize", "", ""))'::text WHERE id LIKE 'host#_fs#_total#_by#_mountpoint' ESCAPE '#'; + +UPDATE public.metric_meta2 SET expr = '(1- avg by (xm_clst_id) (((node_memory_MemFree_bytes{xm_entity_type=''Node'', {filter}} + node_memory_Cached_bytes{xm_entity_type=''Node'', {filter}} + node_memory_Buffers_bytes{xm_entity_type=''Node'', {filter}}) <= node_memory_MemTotal_bytes{xm_entity_type=''Node'', {filter}} or node_memory_MemFree_bytes{xm_entity_type=''Node'', {filter}}) / node_memory_MemTotal_bytes{xm_entity_type=''Node'', {filter}})) * 100'::text WHERE id LIKE 'cluster#_memory#_usage' ESCAPE '#'; + + +UPDATE public.metric_meta2 SET expr = '((node_memory_MemTotal_bytes{xm_entity_type=''Node'', {filter}} - (node_memory_MemFree_bytes{xm_entity_type=''Node'', {filter}} + node_memory_Cached_bytes{xm_entity_type=''Node'', {filter}} + node_memory_Buffers_bytes{xm_entity_type=''Node'', {filter}} + node_memory_SReclaimable_bytes{xm_entity_type=''Node'', {filter}})) >= 0 or (node_memory_MemTotal_bytes{xm_entity_type=''Node'', {filter}} - node_memory_MemFree_bytes{xm_entity_type=''Node'', {filter}})) / node_memory_MemTotal_bytes{xm_entity_type=''Node'', {filter}} * 100'::text WHERE id LIKE 'node#_memory#_usage' ESCAPE '#'; + +UPDATE public.metric_meta2 SET expr = '(node_memory_MemTotal_bytes{{filter}} - (node_memory_MemFree_bytes{{filter}} + node_memory_Cached_bytes{{filter}} + node_memory_Buffers_bytes{{filter}} + node_memory_SReclaimable_bytes{{filter}})) >= 0 or (node_memory_MemTotal_bytes{{filter}} - node_memory_MemFree_bytes{{filter}})'::text WHERE id LIKE 'host#_memory#_used' ESCAPE '#'; + + +INSERT INTO public.metric_meta2 (id, meta_name, description, expr, resource_type, entity_type, groupby_keys, in_use, anomaly_score, message, created_date, modified_date) VALUES +('imxc_jspd_pod_txn_error_rate', 'Service Pod Transaction Error Rate', 'The number of transaction error rate for pod', 'sum by(xm_clst_id, xm_namespace, xm_pod_id, xm_service_name) (rate(imxc_txn_total_count{{filter}}[1m])) == 0 or sum by(xm_clst_id, xm_namespace, xm_pod_id, xm_service_name) (rate(imxc_txn_error_count{{filter}}[1m])) == 0 or sum by(xm_clst_id, xm_namespace, xm_pod_id, xm_service_name) (rate(imxc_txn_error_count {{filter}} [1m])) / sum by(xm_clst_id, xm_namespace, xm_pod_id, xm_service_name) (rate(imxc_txn_total_count {{filter}} [1m]))', 'Request', 'Service', NULL, 't', 'f', 'SVC:{{$labels.xm_service_name}} Svc Pod Transaction Error rate:{{humanize $value}}|{threshold}.', '2022-02-15 18:08:58.18', '2022-02-15 18:08:58.18'); +INSERT INTO public.metric_meta2 (id, meta_name, description, expr, resource_type, entity_type, groupby_keys, in_use, anomaly_score, message, created_date, modified_date) VALUES +('imxc_jspd_txn_error_rate', 'Service Transaction Error Rate', 'Service Transaction Error Rate', 'sum by(xm_clst_id, xm_namespace, xm_service_name) (rate(imxc_txn_total_count{{filter}}[1m])) == 0 or sum by(xm_clst_id, xm_namespace, xm_service_name) (rate(imxc_txn_error_count{{filter}}[1m])) == 0 or sum by(xm_clst_id, xm_namespace, xm_service_name) (rate(imxc_txn_error_count {{filter}} [1m])) / sum by(xm_clst_id, xm_namespace, xm_service_name) (rate(imxc_txn_total_count {{filter}} [1m]))', 'Request', 'Service', NULL, 't', 'f', 'SVC:{{$labels.xm_service_name}} Error Request Rate:{{humanize $value}}%|{threshold}%.', '2022-02-15 14:33:00.118', '2022-02-15 15:40:17.64'); +INSERT INTO public.metric_meta2 (id, meta_name, description, expr, resource_type, entity_type, groupby_keys, in_use, anomaly_score, message, created_date, modified_date) VALUES +('imxc_jspd_txn_elapsed_time_avg', 'Service Transaction Elapsed Time (avg)', 'Service Average Elapsed Time', 'sum by(xm_clst_id, xm_namespace, xm_service_name) ((increase(imxc_txn_total_count{{filter}}[1m])))== 0 or sum by(xm_clst_id, xm_namespace, xm_service_name) ((increase(imxc_txn_laytency{{filter}}[1m])))/ sum by(xm_clst_id, xm_namespace, xm_service_name) ((increase(imxc_txn_total_count{{filter}}[1m])))', 'Request', 'Service', NULL, 't', 't', 'SVC:{{$labels.xm_service_name}} Transaction Requests Time Avg:{{humanize $value}}ms|{threshold}ms.', '2021-11-15 16:09:34.233', '2021-11-15 16:12:21.335'); +INSERT INTO public.metric_meta2 (id, meta_name, description, expr, resource_type, entity_type, groupby_keys, in_use, anomaly_score, message, created_date, modified_date) VALUES +('imxc_jspd_pod_txn_elapsed_time_avg', 'Service Pod Transaction Elapsed Time (avg)', 'The number of transaction counts per second for pod', 'sum by(xm_clst_id, xm_namespace, xm_pod_id, xm_service_name) (increase(imxc_txn_total_count{{filter}}[1m]))==0 or sum by(xm_clst_id, xm_namespace, xm_pod_id, xm_service_name) (increase(imxc_txn_laytency{{filter}}[1m])) / sum by(xm_clst_id, xm_namespace, xm_pod_id, xm_service_name) (increase(imxc_txn_total_count{{filter}}[1m]))', 'Request', 'Service', NULL, 't', 'f', 'SVC:{{$labels.xm_service_name}} Pod Transaction Requests Time Avg:{{humanize $value}}ms|{threshold}ms.', '2022-02-15 18:04:55.228', '2022-02-15 18:04:55.228'); +INSERT INTO public.metric_meta2 (id, meta_name, description, expr, resource_type, entity_type, groupby_keys, in_use, anomaly_score, message, created_date, modified_date) VALUES +('imxc_jspd_txn_error_count', 'Service Transaction Error Count', 'Service Transaction Error Count', 'sum by(xm_clst_id, xm_namespace, xm_service_name) (rate(imxc_txn_error_count{{filter}}[1m])) == 0 or sum by(xm_clst_id, xm_namespace, xm_service_name) (rate(imxc_txn_error_count {{filter}} [1m])) ', 'Request', 'Service', NULL, 't', 't', 'SVC:{{$labels.xm_service_name}} Error Request count:{{humanize $value}}%|{threshold}%.', '2021-11-15 16:10:31.352', '2021-11-15 16:12:21.335'); +INSERT INTO public.metric_meta2 (id, meta_name, description, expr, resource_type, entity_type, groupby_keys, in_use, anomaly_score, message, created_date, modified_date) VALUES +('imxc_jspd_txn_per_sec', 'Service Transaction Count (per Second)', 'Service Transaction Count (per Second)', 'sum by(xm_clst_id, xm_namespace, xm_service_name) (rate(imxc_txn_total_count{{filter}}[1m]))', 'Request', 'Service', NULL, 't', 't', 'SVC:{{$labels.xm_service_name}} Svc Transaction count/Seconds:{{humanize $value}}|{threshold}.', '2021-11-15 16:11:19.606', '2021-11-15 16:12:21.335'); +INSERT INTO public.metric_meta2 (id, meta_name, description, expr, resource_type, entity_type, groupby_keys, in_use, anomaly_score, message, created_date, modified_date) VALUES +('imxc_jspd_pod_txn_per_sec', 'Service Pod Transaction Count (per sec)', 'The number of transaction counts per second for pod', 'sum by(xm_clst_id, xm_namespace, xm_pod_id, xm_service_name) (rate(imxc_txn_total_count{{filter}}[1m]))', 'Request', 'Service', NULL, 't', 'f', 'SVC:{{$labels.xm_service_name}} Svc Pod Transaction count/Seconds:{{humanize $value}}|{threshold}.', '2022-02-15 17:59:39.45', '2022-02-15 17:59:39.45'); + + + +-- Auto-generated SQL script #202202221030 +UPDATE public.metric_meta2 + SET expr='sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (rate(container_cpu_system_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running", {filter}}) without (instance)) * 0) * 100' + WHERE id='container_cpu_system_by_workload'; +UPDATE public.metric_meta2 + SET expr='sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (rate(container_cpu_system_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running", {filter}}) without (instance)) * 0)' + WHERE id='container_cpu_system_core_by_workload'; +UPDATE public.metric_meta2 + SET expr='sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (rate(container_cpu_usage_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running", {filter}}) without (instance)) * 0)' + WHERE id='container_cpu_usage_by_workload'; +UPDATE public.metric_meta2 + SET expr='sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (rate(container_cpu_usage_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running", {filter}}) without (instance)) * 0)' + WHERE id='container_cpu_usage_core_by_workload'; +UPDATE public.metric_meta2 + SET expr='sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (rate(container_cpu_user_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running", {filter}}) without (instance)) * 0) * 100' + WHERE id='container_cpu_user_by_workload'; +UPDATE public.metric_meta2 + SET expr='sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (rate(container_cpu_user_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running", {filter}}) without (instance)) * 0)' + WHERE id='container_cpu_user_core_by_workload'; +UPDATE public.metric_meta2 + SET expr='sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (container_fs_limit_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running", {filter}}) without (instance)) * 0) / 1073741824' + WHERE id='container_fs_limit_bytes_by_workload'; +UPDATE public.metric_meta2 + SET expr='sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (rate(container_fs_reads_bytes_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1024' + WHERE id='container_fs_reads_by_workload'; +UPDATE public.metric_meta2 + SET expr='sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (container_fs_usage_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1073741824' + WHERE id='container_fs_usage_bytes_by_workload'; +UPDATE public.metric_meta2 + SET expr='sum by (xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) ((container_fs_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0)/ (((container_fs_limit_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) * 100) > 0) or (container_fs_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1000)' + WHERE id='container_fs_usage_by_workload'; +UPDATE public.metric_meta2 + SET expr='sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (rate(container_fs_writes_bytes_total{xm_cont_name!="POD"}[1m]) + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1024' + WHERE id='container_fs_writes_by_workload'; +UPDATE public.metric_meta2 + SET expr='sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (container_memory_cache{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1073741824' + WHERE id='container_memory_cache_by_workload'; +UPDATE public.metric_meta2 + SET expr='sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (container_memory_max_usage_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1073741824' + WHERE id='container_memory_max_usage_bytes_by_workload'; +UPDATE public.metric_meta2 + SET expr='sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (container_memory_swap{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1073741824' + WHERE id='container_memory_swap_by_workload'; +UPDATE public.metric_meta2 + SET expr='sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (container_memory_usage_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1024 / 1024 / 1024' + WHERE id='container_memory_usage_bytes_by_workload'; +UPDATE public.metric_meta2 + SET expr='sum by (xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) ((container_memory_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / (((container_spec_memory_limit_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0)) > 0) * 100) or sum by (xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) ((container_memory_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1024 / 1024 / 1024 *100)' + WHERE id='container_memory_usage_by_workload'; +UPDATE public.metric_meta2 + SET expr='sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (container_memory_working_set_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1024 / 1024 / 1024' + WHERE id='container_memory_working_set_bytes_by_workload'; + +INSERT INTO public.metric_meta2 (id, meta_name, description, expr, resource_type, entity_type, groupby_keys, in_use, anomaly_score, message, created_date, modified_date) VALUES('imxc_jspd_active_txn_per_sec', 'Service Active Transaction Count (per Second)', 'Service Active Transaction Count (per Second)', 'sum by(xm_clst_id, xm_namespace, xm_service_name) (rate(imxc_txn_active_count {{filter}}[1m]))', 'Request', 'Service', NULL, true, false, 'SVC:{{$labels.xm_service_name}} Svc Active Transaction count/Seconds:{{humanize $value}}|{threshold}.', '2022-03-11 15:51:45.946', '2022-03-11 15:51:45.946') ON +CONFLICT (id) DO +UPDATE +SET + expr = 'sum by(xm_clst_id, xm_namespace, xm_service_name) (rate(imxc_txn_active_count {{filter}}[1m]))' +WHERE id = 'imxc_jspd_active_txn_per_sec'; + +INSERT INTO public.metric_meta2 (id, meta_name, description, expr, resource_type, entity_type, groupby_keys, in_use, anomaly_score, message, created_date, modified_date) VALUES('imxc_jspd_pod_active_txn_per_sec', 'Service Pod Active Transaction Count (per sec)', 'The number of active transaction counts per second for pod', 'sum by(xm_clst_id, xm_namespace, xm_service_name, xm_pod_id) (rate(imxc_txn_active_count{{filter}}[1m]))', 'Request', 'Service', NULL, true, false, 'SVC:{{$labels.xm_service_name}} Svc Pod Active Transaction count/Seconds:{{humanize $value}}|{threshold}.', '2022-03-11 15:53:29.252', '2022-03-11 15:53:29.252') ON +CONFLICT (id) DO +UPDATE +SET + expr = 'sum by(xm_clst_id, xm_namespace, xm_service_name, xm_pod_id) (rate(imxc_txn_active_count{{filter}}[1m]))' +WHERE id = 'imxc_jspd_pod_active_txn_per_sec'; + + +--public.agent_install_file_info + +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cloudmoa-cluster-role +rules: + - nonResourceURLs: + - "*" + verbs: + - get + - apiGroups: + - metrics.k8s.io + resources: + - pods + - nodes + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - nodes/stats + - endpoints + - namespaces + - events + verbs: + - get + - list + - watch + - apiGroups: + - apps + resources: + - daemonsets + - deployments + - deployments/scale + - replicasets + - replicasets/scale + - statefulsets + - statefulsets/scale + verbs: + - get + - list + - watch + - apiGroups: + - batch + resources: + - jobs + verbs: + - get + - list + - watch + - update + - apiGroups: + - batch + resources: + - cronjobs + verbs: + - get + - list + - update + - apiGroups: + - storage.j8s.io + resources: + - storageclasses + verbs: + - get + - list + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - apiGroups: + - extensions + resources: + - ingresses + verbs: + - get + - list + - apiGroups: + - policy + resources: + - podsecuritypolicies + verbs: + - use + resourceNames: + - imxc-ps + - apiGroups: + - certificates.k8s.io + resourceNames: + - kubernetes.io/kube-apiserver-client-kubelet + resources: + - signers + verbs: + - approve + - apiGroups: + - certificates.k8s.io + resourceNames: + - kubernetes.io/kubelet-serving + resources: + - signers + verbs: + - approve + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch + - proxy + - apiGroups: + - "" + resources: + - nodes/log + - nodes/metrics + - nodes/proxy + - nodes/spec + - nodes/stats + verbs: + - ''*'' + - apiGroups: + - ''*'' + resources: + - ''*'' + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cloudmoa-restricted-rb + namespace: $CLOUDMOA_NAMESPACE +subjects: + - kind: ServiceAccount + name: default + namespace: $CLOUDMOA_NAMESPACE +roleRef: + kind: ClusterRole + name: cloudmoa-cluster-role + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: cloudmoa-psp + namespace: $CLOUDMOA_NAMESPACE +spec: + privileged: true + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + runAsUser: + rule: RunAsAny + fsGroup: + rule: RunAsAny + hostPorts: + - max: 65535 + min: 0 + hostNetwork: true + hostPID: true + volumes: + - configMap + - secret + - emptyDir + - hostPath + - projected + - downwardAPI + - persistentVolumeClaim +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: cloudmoa-topology-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-topology-agent +spec: + selector: + matchLabels: + app: cloudmoa-topology-agent + template: + metadata: + labels: + app: cloudmoa-topology-agent + spec: + hostNetwork: true + hostPID: true + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - name: cloudmoa-topology-agent + image: $DOCKER_REGISTRY_URL/topology-agent:$IMAGE_TAG + imagePullPolicy: Always + resources: + requests: + cpu: 200m + memory: 512Mi + limits: + cpu: 500m + memory: 600Mi + securityContext: + privileged: true + volumeMounts: + - mountPath: /host/usr/bin + name: bin-volume + - mountPath: /var/run/docker.sock + name: docker-volume + - mountPath: /host/proc + name: proc-volume + - mountPath: /root + name: root-volume + - mountPath: /log + name: log-volume + env: + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: ROOT_DIRECTORY + value: /root + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: POD_ID + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LOG_LEVEL + value: "INFO" + volumes: + - name: bin-volume + hostPath: + path: /usr/bin + type: Directory + - name: docker-volume + hostPath: + path: /var/run/docker.sock + - name: proc-volume + hostPath: + path: /proc + - name: root-volume + hostPath: + path: / + - name: log-volume + hostPath: + path: /home'::text WHERE id = 2::bigint; + +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cloudmoa-metric-agent-config + namespace: $CLOUDMOA_NAMESPACE +data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + metric-agent.yml: | + global: + scrape_interval: 15s + + scrape_configs: + - job_name: ''kubernetes-kubelet'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + + - job_name: ''kubernetes-cadvisor'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod_name] + target_label: xm_pod_id + - source_labels: [container_name] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [container_name] + regex: (.+) + action: keep + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cloudmoa-metric-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-metric-agent +spec: + selector: + matchLabels: + app: cloudmoa-metric-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-metric-agent + spec: + containers: + - name: cloudmoa-metric-agent + image: $DOCKER_REGISTRY_URL/metric-agent:$IMAGE_TAG + args: + - --config.file=/etc/metric-agent/metric-agent.yml + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: /etc/metric-agent/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: STORAGE_TYPE + value: datagate + restartPolicy: Always + volumes: + - name: config-volume + configMap: + name: cloudmoa-metric-agent-config +'::text WHERE id = 3::bigint; + +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: v1 +kind: List +items: +- apiVersion: apps/v1 + kind: Deployment + metadata: + name: cloudmoa-trace-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-trace-agent + spec: + selector: + matchLabels: + app: cloudmoa-trace-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-trace-agent + spec: + securityContext: + runAsNonRoot: true + runAsUser: 65534 + containers: + - image: $DOCKER_REGISTRY_URL/trace-agent:$IMAGE_TAG + name: cloudmoa-trace-agent + resources: + requests: + cpu: 100m + memory: 50Mi + limits: + cpu: 200m + memory: 100Mi + ports: + - containerPort: 5775 + protocol: UDP + - containerPort: 6831 + protocol: UDP + - containerPort: 6832 + protocol: UDP + - containerPort: 5778 + protocol: TCP + env: + - name: LOG_LEVEL + value: "INFO" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT +- apiVersion: v1 + kind: Service + metadata: + name: cloudmoa-trace-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-trace-agent + spec: + ports: + - name: agent-zipkin-thrift + port: 5775 + protocol: UDP + targetPort: 5775 + - name: agent-compact + port: 6831 + protocol: UDP + targetPort: 6831 + - name: agent-binary + port: 6832 + protocol: UDP + targetPort: 6832 + - name: agent-configs + port: 5778 + protocol: TCP + targetPort: 5778 + selector: + app: cloudmoa-trace-agent + type: ClusterIP'::text WHERE id = 7::bigint; + +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: v1 +kind: Service +metadata: + annotations: + prometheus.io/scrape: ''true'' + labels: + app: cloudmoa-node-exporter + name: cloudmoa-node-exporter + name: cloudmoa-node-exporter + namespace: $CLOUDMOA_NAMESPACE +spec: + clusterIP: None + ports: + - name: scrape + port: 9110 + protocol: TCP + selector: + app: cloudmoa-node-exporter + type: ClusterIP +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: cloudmoa-node-exporter + namespace: $CLOUDMOA_NAMESPACE +spec: + selector: + matchLabels: + app: cloudmoa-node-exporter + template: + metadata: + labels: + app: cloudmoa-node-exporter + name: cloudmoa-node-exporter + spec: + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - image: $DOCKER_REGISTRY_URL/prom/node-exporter + name: cloudmoa-node-exporter + ports: + - containerPort: 9110 + hostPort: 9110 + name: scrape + args: + - --path.procfs=/host/proc + - --path.sysfs=/host/sys + - --path.rootfs=/host/root + - --collector.filesystem.ignored-mount-points=^/(dev|proc|sys|run|var/lib/docker/.+|var/lib/kubelet/pods/.+)($|/) + - --collector.tcpstat + - --web.listen-address=:9110 + # --log.level=debug + resources: + limits: + cpu: 250m + memory: 180Mi + requests: + cpu: 102m + memory: 180Mi + volumeMounts: + - mountPath: /host/proc + name: proc + readOnly: false + - mountPath: /host/sys + name: sys + readOnly: false + - mountPath: /host/root + mountPropagation: HostToContainer + name: root + readOnly: true + hostNetwork: true + hostPID: true + securityContext: + runAsNonRoot: true + runAsUser: 65534 + volumes: + - hostPath: + path: /proc + name: proc + - hostPath: + path: /sys + name: sys + - hostPath: + path: / + name: root +'::text WHERE id = 4::bigint; diff --git a/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.3.2.psql b/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.3.2.psql new file mode 100644 index 0000000..e84e9be --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.3.2.psql @@ -0,0 +1,459 @@ + UPDATE public.agent_install_file_info SET yaml = '--- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: cloudmoa-cluster-role + rules: + - nonResourceURLs: + - "*" + verbs: + - get + - apiGroups: + - metrics.k8s.io + resources: + - pods + - nodes + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - nodes/stats + - endpoints + - namespaces + - events + verbs: + - get + - list + - watch + - apiGroups: + - apps + resources: + - daemonsets + - deployments + - deployments/scale + - replicasets + - replicasets/scale + - statefulsets + - statefulsets/scale + verbs: + - get + - list + - watch + - apiGroups: + - batch + resources: + - jobs + verbs: + - get + - list + - watch + - update + - apiGroups: + - batch + resources: + - cronjobs + verbs: + - get + - list + - update + - apiGroups: + - storage.j8s.io + resources: + - storageclasses + verbs: + - get + - list + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - apiGroups: + - extensions + resources: + - ingresses + verbs: + - get + - list + - apiGroups: + - policy + resources: + - podsecuritypolicies + verbs: + - use + resourceNames: + - imxc-ps + - apiGroups: + - certificates.k8s.io + resourceNames: + - kubernetes.io/kube-apiserver-client-kubelet + resources: + - signers + verbs: + - approve + - apiGroups: + - certificates.k8s.io + resourceNames: + - kubernetes.io/kubelet-serving + resources: + - signers + verbs: + - approve + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch + - proxy + - apiGroups: + - "" + resources: + - nodes/log + - nodes/metrics + - nodes/proxy + - nodes/spec + - nodes/stats + verbs: + - ''*'' + - apiGroups: + - ''*'' + resources: + - ''*'' + verbs: + - get + - list + - watch + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: cloudmoa-restricted-rb + namespace: $CLOUDMOA_NAMESPACE + subjects: + - kind: ServiceAccount + name: default + namespace: $CLOUDMOA_NAMESPACE + roleRef: + kind: ClusterRole + name: cloudmoa-cluster-role + apiGroup: rbac.authorization.k8s.io + --- + apiVersion: policy/v1beta1 + kind: PodSecurityPolicy + metadata: + name: cloudmoa-psp + namespace: $CLOUDMOA_NAMESPACE + spec: + privileged: true + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + runAsUser: + rule: RunAsAny + fsGroup: + rule: RunAsAny + hostPorts: + - max: 65535 + min: 0 + hostNetwork: true + hostPID: true + volumes: + - configMap + - secret + - emptyDir + - hostPath + - projected + - downwardAPI + - persistentVolumeClaim + --- + apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: cloudmoa-topology-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-topology-agent + spec: + selector: + matchLabels: + app: cloudmoa-topology-agent + template: + metadata: + labels: + app: cloudmoa-topology-agent + spec: + hostNetwork: true + hostPID: true + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - name: cloudmoa-topology-agent + image: $DOCKER_REGISTRY_URL/topology-agent:$IMAGE_TAG + imagePullPolicy: Always + resources: + requests: + cpu: 200m + memory: 512Mi + limits: + cpu: 500m + memory: 600Mi + securityContext: + privileged: true + volumeMounts: + - mountPath: /host/usr/bin + name: bin-volume + - mountPath: /var/run/docker.sock + name: docker-volume + - mountPath: /host/proc + name: proc-volume + - mountPath: /root + name: root-volume + - mountPath: /log + name: log-volume + env: + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: ROOT_DIRECTORY + value: /root + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: POD_ID + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LOG_LEVEL + value: "INFO" + volumes: + - name: bin-volume + hostPath: + path: /usr/bin + type: Directory + - name: docker-volume + hostPath: + path: /var/run/docker.sock + - name: proc-volume + hostPath: + path: /proc + - name: root-volume + hostPath: + path: / + - name: log-volume + hostPath: + path: /home' WHERE id = 2; + +UPDATE public.agent_install_file_info SET yaml = '--- + apiVersion: v1 + kind: ConfigMap + metadata: + name: cloudmoa-metric-agent-config + namespace: $CLOUDMOA_NAMESPACE + data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + metric-agent.yml: | + global: + scrape_interval: 15s + + scrape_configs: + - job_name: ''kubernetes-kubelet'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + - job_name: ''kubernetes-cadvisor'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod] + target_label: xm_pod_id + - source_labels: [container] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [container] + regex: (.+) + action: keep + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep + --- + apiVersion: apps/v1 + kind: Deployment + metadata: + name: cloudmoa-metric-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-metric-agent + spec: + selector: + matchLabels: + app: cloudmoa-metric-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-metric-agent + spec: + containers: + - name: cloudmoa-metric-agent + image: $DOCKER_REGISTRY_URL/metric-agent:$IMAGE_TAG + args: + - --config.file=/etc/metric-agent/metric-agent.yml + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: /etc/metric-agent/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: LOG_MAXAGE + value: "1" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: STORAGE_TYPE + value: datagate + restartPolicy: Always + volumes: + - name: config-volume + configMap: + name: cloudmoa-metric-agent-config + ' WHERE id = 6; \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.1.psql b/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.1.psql new file mode 100644 index 0000000..0d20f2c --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.1.psql @@ -0,0 +1,1379 @@ +CREATE TABLE public.cloud_user_setting ( + user_id character varying(255) NOT NULL, + lang character varying(20) DEFAULT 'en', + theme character varying(20) DEFAULT 'dark', + access_token integer DEFAULT 30, + refresh_token integer DEFAULT 10080, + error_msg boolean DEFAULT false, + alert_sound boolean DEFAULT false, + session_persistence boolean DEFAULT true, + gpu_acc_topology boolean DEFAULT true, + created_date timestamp without time zone, + modified_date timestamp without time zone +); + +ALTER TABLE public.cloud_user_setting OWNER TO admin; + +ALTER TABLE ONLY public.cloud_user_setting ADD CONSTRAINT cloud_user_setting_pkey PRIMARY KEY (user_id); + +INSERT INTO public.cloud_user_setting +(user_id, lang, theme, access_token, refresh_token, error_msg, alert_sound, session_persistence, gpu_acc_topology, created_date, modified_date) +VALUES('admin', null, null, null, null, false, false, true, true, now(), null); + +INSERT INTO public.cloud_user_setting +(user_id, lang, theme, access_token, refresh_token, error_msg, alert_sound, session_persistence, gpu_acc_topology, created_date, modified_date) +VALUES('owner', null, null, null, null, false, false, true, true, now(), null); + +-- 더존(3.3.2) 에서 누락되었던 항목 모두 추가 +INSERT INTO public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) VALUES ('normal_score', '20', null, null, 'anomaly', '2020-07-07 18:15:55.000000', '2020-07-07 18:15:53.000000'); +INSERT INTO public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) VALUES ('attention_score', '60', null, null, 'anomaly', '2020-07-07 09:18:04.968765', '2020-07-07 09:18:04.968765'); +INSERT INTO public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) VALUES ('warning_score', '90', null, null, 'anomaly', '2020-07-07 09:18:17.091678', '2020-07-07 09:18:17.091678'); +INSERT INTO public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) VALUES ('collection_weeks', '5', null, null, 'anomaly', '2020-07-13 03:52:44.445408', '2020-07-13 03:52:44.445408'); + +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('topology_storage_period', 7, 'retention period setting value for topology information', null, 'storage', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('trace_storage_period', 3, 'retention period setting value for trace data', null, 'storage', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('event_storage_period', 7, 'retention period setting value for event data', null, 'storage', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('metric_storage_period', 7, 'retention period setting value for metric data', null, 'storage', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('sparse_storage_period', 90, 'retention period setting value for sparse log', null, 'storage', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('anomaly_storage_period', 7, 'retention period setting value for anomaly score', null, 'storage', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('alert_storage_period', 7, 'retention period setting value for alert data', null, 'storage', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('audit_storage_period', 7, 'retention period setting value for audit data', null, 'storage', now(), null); + +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('topology_idx', 'kubernetes_cluster_info:kubernetes_cluster_history:kubernetes_cronjob_info:kubernetes_info:kubernetes_job_info:kubernetes_network_connectivity:kubernetes_pod_info:kubernetes_pod_history', 'elastic search topology type data index', null, 'storageidx', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('trace_idx', 'spaninfo:sta_httpapi:sta_httpsummary:sta_podinfo:sta_relation:sta_tracetrend:sta_externalrelation:sta_traceinfo:jspd_ilm', 'elastic search trace type data index', null, 'storageidx', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('event_idx', 'kubernetes_event_info', 'elastic search for event data index', null, 'storageidx', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('sparse_idx', 'sparse_model:sparse_log', 'elastic search sparse data index', null, 'storageidx', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('anomaly_idx', 'entity_score:metric_score:timeline_score', 'elastic search amomaly data index', null, 'storageidx', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('alert_idx', 'alert_event_history', 'elastic search alert data index', null, 'storageidx', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('audit_idx', 'kubernetes_audit_log', 'elastic search audit type data index', null, 'storageidx', now(), null); + +-- insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) values ('ratelimiting', 2.0, '{"type" : "int", "operator" : "range", "minVal" : "1", "maxVal" : "3000", "desc" : "The time-based sampling method allows input as an integer (e.g. 1 monitors only 1 trace per second)" }', null, 'tracesampling', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('probabilistic', 0.1, '{"type" : "float", "operator" : "range", "minVal" : "0", "maxVal" : "1.0", "desc" : "Probability-based sampling method allows input between 0 and 1 (e.g. 0.1 monitors only 10% of trace information)" }', null, 'tracesampling', '2020-07-30 13:54:52', null); + +INSERT INTO common_setting values('alert_expression','==,<=,<,>=,>', 'alert expression for user custom', null,'alert', now(), now()); + +INSERT INTO common_setting values('job_duration_range','86400', 'job duration range for average', null,'job', now(), now()); + +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Topology Agent', 'topology-agent', 'topology agent deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Metric Agent', 'metric-agent', 'metric agent deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Trace Agent', 'trace-agent', 'trace agent deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Datagate', 'datagate', 'datagate deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Jspd Collector', 'jspd-lite-collector', 'jspd collector deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Metric Collector', 'metric-collector', 'metric collector deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Cloudmoa Collector', 'imxc-collector', 'cloudmoa collector deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Authentication Server', 'auth-server', 'authentication server deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Notification Server', 'noti-server', 'notification server deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Eureka Server', 'eureka', 'eureka server deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Zuul Server', 'zuul-deployment', 'zuul server deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Api Server', 'imxc-api-demo', 'api server deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Ui Server', 'imxc-ui-demo', 'ui server deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Metric Analyzer Master', 'metric-analyzer-master', 'metric analyzer master deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Metric Analyzer Worker', 'metric-analyzer-worker', 'metric analyzer worker deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Kafka Stream Txntrend', 'kafka-stream-txntrend-deployment', 'kafka stream txntrend deployment name', null, 'modules', now(), null); + +INSERT INTO public.common_setting +(code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +VALUES('error_msg', 'false', 'Error Message default value', '', 'user_setting', now(), null); + +INSERT INTO public.common_setting +(code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +VALUES('alert_sound', 'false', 'Alert Sound default value', '', 'user_setting', now(), null); + +INSERT INTO public.common_setting +(code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +VALUES('session_persistence', 'true', 'Session Persistence default value', '', 'user_setting', now(), null); + +INSERT INTO public.common_setting +(code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +VALUES('gpu_acc_topology', 'true', 'GPU Accelerated Topology default value', '', 'user_setting', now(), null); + +UPDATE public.agent_install_file_info +SET yaml = '--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cloudmoa-cluster-role +rules: + - nonResourceURLs: + - "*" + verbs: + - get + - apiGroups: + - metrics.k8s.io + resources: + - pods + - nodes + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - nodes/stats + - endpoints + - namespaces + - events + verbs: + - get + - list + - watch + - apiGroups: + - apps + resources: + - daemonsets + - deployments + - deployments/scale + - replicasets + - replicasets/scale + - statefulsets + - statefulsets/scale + verbs: + - get + - list + - watch + - update + - apiGroups: + - batch + resources: + - jobs + verbs: + - get + - list + - watch + - update + - apiGroups: + - batch + resources: + - cronjobs + verbs: + - get + - list + - update + - apiGroups: + - storage.j8s.io + resources: + - storageclasses + verbs: + - get + - list + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - apiGroups: + - extensions + resources: + - ingresses + verbs: + - get + - list + - apiGroups: + - policy + resources: + - podsecuritypolicies + verbs: + - use + resourceNames: + - imxc-ps + - apiGroups: + - certificates.k8s.io + resourceNames: + - kubernetes.io/kube-apiserver-client-kubelet + resources: + - signers + verbs: + - approve + - apiGroups: + - certificates.k8s.io + resourceNames: + - kubernetes.io/kubelet-serving + resources: + - signers + verbs: + - approve + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch + - proxy + - apiGroups: + - "" + resources: + - nodes/log + - nodes/metrics + - nodes/proxy + - nodes/spec + - nodes/stats + verbs: + - ''*'' + - apiGroups: + - ''*'' + resources: + - ''*'' + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cloudmoa-restricted-rb + namespace: $CLOUDMOA_NAMESPACE +subjects: + - kind: ServiceAccount + name: default + namespace: $CLOUDMOA_NAMESPACE +roleRef: + kind: ClusterRole + name: cloudmoa-cluster-role + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: cloudmoa-psp + namespace: $CLOUDMOA_NAMESPACE +spec: + privileged: true + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + runAsUser: + rule: RunAsAny + fsGroup: + rule: RunAsAny + hostPorts: + - max: 65535 + min: 0 + hostNetwork: true + hostPID: true + volumes: + - configMap + - secret + - emptyDir + - hostPath + - projected + - downwardAPI + - persistentVolumeClaim +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: cloudmoa-topology-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-topology-agent +spec: + selector: + matchLabels: + app: cloudmoa-topology-agent + template: + metadata: + labels: + app: cloudmoa-topology-agent + spec: + hostNetwork: true + hostPID: true + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - name: cloudmoa-topology-agent + image: $DOCKER_REGISTRY_URL/topology-agent:$IMAGE_TAG + resources: + requests: + cpu: 200m + memory: 512Mi + limits: + cpu: 500m + memory: 600Mi + securityContext: + privileged: true + volumeMounts: + - mountPath: /host/usr/bin + name: bin-volume + - mountPath: /var/run/docker.sock + name: docker-volume + - mountPath: /host/proc + name: proc-volume + - mountPath: /root + name: root-volume + - mountPath: /log + name: log-volume + env: + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: ROOT_DIRECTORY + value: /root + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: POD_ID + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LOG_LEVEL + value: "INFO" + volumes: + - name: bin-volume + hostPath:88888889 + path: /usr/bin + type: Directory + - name: docker-volume + hostPath: + path: /var/run/docker.sock + - name: proc-volume + hostPath: + path: /proc + - name: root-volume + hostPath: + path: / + - name: log-volume + hostPath: + path: /home'::text +WHERE id = 2::bigint; + +UPDATE public.common_setting +SET code_group='storageidx' +WHERE code_id='topology_idx'; + +UPDATE public.common_setting +SET code_value='spaninfo:sta_httpapi:sta_httpsummary:sta_podinfo:sta_relation:sta_tracetrend:sta_externalrelation:sta_traceinfo:jspd_ilm', + code_group='storageidx' +WHERE code_id='trace_idx'; + +UPDATE public.common_setting +SET code_group='storageidx' +WHERE code_id='event_idx'; + +UPDATE public.common_setting +SET code_group='storageidx' +WHERE code_id='sparse_idx'; + +UPDATE public.common_setting +SET code_group='storageidx' +WHERE code_id='anomaly_idx'; + +UPDATE public.common_setting +SET code_value='alert_event_history', + code_group='storageidx' +WHERE code_id='alert_idx'; + +UPDATE public.common_setting +SET code_group='storageidx' +WHERE code_id='audit_idx'; + +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: v1 +kind: Service +metadata: + annotations: + prometheus.io/scrape: ''true'' + labels: + app: cloudmoa-node-exporter + name: cloudmoa-node-exporter + name: cloudmoa-node-exporter + namespace: $CLOUDMOA_NAMESPACE +spec: + clusterIP: None + ports: + - name: scrape + port: 9110 + protocol: TCP + selector: + app: cloudmoa-node-exporter + type: ClusterIP +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: cloudmoa-node-exporter + namespace: $CLOUDMOA_NAMESPACE +spec: + selector: + matchLabels: + app: cloudmoa-node-exporter + template: + metadata: + labels: + app: cloudmoa-node-exporter + name: cloudmoa-node-exporter + spec: + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - image: $DOCKER_REGISTRY_URL/node-exporter + name: cloudmoa-node-exporter + ports: + - containerPort: 9110 + hostPort: 9110 + name: scrape + args: + - --path.procfs=/host/proc + - --path.sysfs=/host/sys + - --path.rootfs=/host/root + - --collector.filesystem.ignored-mount-points=^/(dev|proc|sys|run|var/lib/docker/.+|var/lib/kubelet/pods/.+)($|/) + - --collector.tcpstat + - --web.listen-address=:9110 + # --log.level=debug + resources: + limits: + cpu: 250m + memory: 180Mi + requests: + cpu: 102m + memory: 180Mi + volumeMounts: + - mountPath: /host/proc + name: proc + readOnly: false + - mountPath: /host/sys + name: sys + readOnly: false + - mountPath: /host/root + mountPropagation: HostToContainer + name: root + readOnly: true + hostNetwork: true + hostPID: true + securityContext: + runAsNonRoot: true + runAsUser: 65534 + volumes: + - hostPath: + path: /proc + name: proc + - hostPath: + path: /sys + name: sys + - hostPath: + path: / + name: root +'::text WHERE id = 4::bigint; + +UPDATE public.agent_install_file_info SET yaml = '--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: system:cloudmoa-aggregated-metrics-reader + labels: + rbac.authorization.k8s.io/aggregate-to-view: "true" + rbac.authorization.k8s.io/aggregate-to-edit: "true" + rbac.authorization.k8s.io/aggregate-to-admin: "true" +rules: + - apiGroups: ["metrics.k8s.io"] + resources: ["pods"] + verbs: ["get", "list", "watch"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cloudmoa-metrics-server:system:auth-delegator +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:auth-delegator +subjects: + - kind: ServiceAccount + name: cloudmoa-metrics-server + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: cloudmoa-metrics-server-auth-reader + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: extension-apiserver-authentication-reader +subjects: + - kind: ServiceAccount + name: cloudmoa-metrics-server + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: system:cloudmoa-metrics-server +rules: + - apiGroups: + - "" + resources: + - pods + - nodes + - nodes/stats + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: system:cloudmoa-metrics-server +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:cloudmoa-metrics-server +subjects: + - kind: ServiceAccount + name: cloudmoa-metrics-server + namespace: kube-system +--- +apiVersion: v1 +kind: Service +metadata: + name: cloudmoa-metrics-server + namespace: kube-system + labels: + kubernetes.io/name: "Metrics-server" +spec: + selector: + k8s-app: cloudmoa-metrics-server + ports: + - port: 443 + protocol: TCP + targetPort: 443 +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cloudmoa-metrics-server + namespace: kube-system +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cloudmoa-metrics-server + namespace: kube-system + labels: + k8s-app: cloudmoa-metrics-server +spec: + selector: + matchLabels: + k8s-app: cloudmoa-metrics-server + template: + metadata: + name: cloudmoa-metrics-server + labels: + k8s-app: cloudmoa-metrics-server + spec: + serviceAccountName: cloudmoa-metrics-server + volumes: + # mount in tmp so we can safely use from-scratch images and/or read-only containers + - name: tmp-dir + emptyDir: {} + containers: + - name: cloudmoa-metrics-server + image: $DOCKER_REGISTRY_URL/metrics-server-amd64 + command: + - /metrics-server + - --logtostderr + - --v=4 + - --kubelet-insecure-tls=true + - --kubelet-preferred-address-types=InternalIP,Hostname,InternalDNS,ExternalDNS,ExternalIP + volumeMounts: + - name: tmp-dir + mountPath: /tmp1'::text WHERE id = 5::bigint; + +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cloudmoa-metric-agent-config + namespace: $CLOUDMOA_NAMESPACE +data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + metric-agent.yml: | + global: + scrape_interval: 15s + + scrape_configs: + - job_name: ''kubernetes-kubelet'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + + - job_name: ''kubernetes-cadvisor'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod_name] + target_label: xm_pod_id + - source_labels: [container_name] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [container_name] + regex: (.+) + action: keep + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cloudmoa-metric-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-metric-agent +spec: + selector: + matchLabels: + app: cloudmoa-metric-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-metric-agent + spec: + containers: + - name: cloudmoa-metric-agent + image: $DOCKER_REGISTRY_URL/metric-agent:$IMAGE_TAG + args: + - --config.file=/etc/metric-agent/metric-agent.yml + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: /etc/metric-agent/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: STORAGE_TYPE + value: datagate + restartPolicy: Always + volumes: + - name: config-volume + configMap: + name: cloudmoa-metric-agent-config +'::text WHERE id = 3::bigint; + +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: v1 +kind: List +items: +- apiVersion: apps/v1 + kind: Deployment + metadata: + name: cloudmoa-trace-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-trace-agent + spec: + selector: + matchLabels: + app: cloudmoa-trace-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-trace-agent + spec: + securityContext: + runAsNonRoot: true + runAsUser: 65534 + containers: + - image: $DOCKER_REGISTRY_URL/trace-agent:$IMAGE_TAG + name: cloudmoa-trace-agent + resources: + requests: + cpu: 100m + memory: 50Mi + limits: + cpu: 200m + memory: 100Mi + ports: + - containerPort: 5775 + protocol: UDP + - containerPort: 6831 + protocol: UDP + - containerPort: 6832 + protocol: UDP + - containerPort: 5778 + protocol: TCP + env: + - name: LOG_LEVEL + value: "INFO" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT +- apiVersion: v1 + kind: Service + metadata: + name: cloudmoa-trace-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-trace-agent + spec: + ports: + - name: agent-zipkin-thrift + port: 5775 + protocol: UDP + targetPort: 5775 + - name: agent-compact + port: 6831 + protocol: UDP + targetPort: 6831 + - name: agent-binary + port: 6832 + protocol: UDP + targetPort: 6832 + - name: agent-configs + port: 5778 + protocol: TCP + targetPort: 5778 + selector: + app: cloudmoa-trace-agent + type: ClusterIP'::text WHERE id = 7::bigint; + +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cloudmoa-cluster-role +rules: + - nonResourceURLs: + - "*" + verbs: + - get + - apiGroups: + - metrics.k8s.io + resources: + - pods + - nodes + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - nodes/stats + - endpoints + - namespaces + - events + verbs: + - get + - list + - watch + - apiGroups: + - apps + resources: + - daemonsets + - deployments + - deployments/scale + - replicasets + - replicasets/scale + - statefulsets + - statefulsets/scale + verbs: + - get + - list + - watch + - update + - apiGroups: + - batch + resources: + - jobs + verbs: + - get + - list + - watch + - update + - apiGroups: + - batch + resources: + - cronjobs + verbs: + - get + - list + - update + - apiGroups: + - storage.j8s.io + resources: + - storageclasses + verbs: + - get + - list + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - apiGroups: + - extensions + resources: + - ingresses + verbs: + - get + - list + - apiGroups: + - policy + resources: + - podsecuritypolicies + verbs: + - use + resourceNames: + - imxc-ps + - apiGroups: + - certificates.k8s.io + resourceNames: + - kubernetes.io/kube-apiserver-client-kubelet + resources: + - signers + verbs: + - approve + - apiGroups: + - certificates.k8s.io + resourceNames: + - kubernetes.io/kubelet-serving + resources: + - signers + verbs: + - approve + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch + - proxy + - apiGroups: + - "" + resources: + - nodes/log + - nodes/metrics + - nodes/proxy + - nodes/spec + - nodes/stats + verbs: + - ''*'' + - apiGroups: + - ''*'' + resources: + - ''*'' + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cloudmoa-restricted-rb + namespace: $CLOUDMOA_NAMESPACE +subjects: + - kind: ServiceAccount + name: default + namespace: $CLOUDMOA_NAMESPACE +roleRef: + kind: ClusterRole + name: cloudmoa-cluster-role + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: cloudmoa-psp + namespace: $CLOUDMOA_NAMESPACE +spec: + privileged: true + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + runAsUser: + rule: RunAsAny + fsGroup: + rule: RunAsAny + hostPorts: + - max: 65535 + min: 0 + hostNetwork: true + hostPID: true + volumes: + - configMap + - secret + - emptyDir + - hostPath + - projected + - downwardAPI + - persistentVolumeClaim +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: cloudmoa-topology-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-topology-agent +spec: + selector: + matchLabels: + app: cloudmoa-topology-agent + template: + metadata: + labels: + app: cloudmoa-topology-agent + spec: + hostNetwork: true + hostPID: true + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - name: cloudmoa-topology-agent + image: $DOCKER_REGISTRY_URL/topology-agent:$IMAGE_TAG + resources: + requests: + cpu: 200m + memory: 512Mi + limits: + cpu: 500m + memory: 600Mi + securityContext: + privileged: true + volumeMounts: + - mountPath: /host/usr/bin + name: bin-volume + - mountPath: /var/run/docker.sock + name: docker-volume + - mountPath: /host/proc + name: proc-volume + - mountPath: /root + name: root-volume + - mountPath: /log + name: log-volume + env: + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: ROOT_DIRECTORY + value: /root + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: POD_ID + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LOG_LEVEL + value: "INFO" + volumes: + - name: bin-volume + hostPath: + path: /usr/bin + type: Directory + - name: docker-volume + hostPath: + path: /var/run/docker.sock + - name: proc-volume + hostPath: + path: /proc + - name: root-volume + hostPath: + path: / + - name: log-volume + hostPath: + path: /home'::text WHERE id = 2::bigint; + +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cloudmoa-metric-agent-config + namespace: $CLOUDMOA_NAMESPACE +data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + metric-agent.yml: | + global: + scrape_interval: 15s + + scrape_configs: + - job_name: ''kubernetes-kubelet'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + - job_name: ''kubernetes-cadvisor'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod] + target_label: xm_pod_id + - source_labels: [container] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [container] + regex: (.+) + action: keep + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cloudmoa-metric-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-metric-agent +spec: + selector: + matchLabels: + app: cloudmoa-metric-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-metric-agent + spec: + containers: + - name: cloudmoa-metric-agent + image: $DOCKER_REGISTRY_URL/metric-agent:$IMAGE_TAG + args: + - --config.file=/etc/metric-agent/metric-agent.yml + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: /etc/metric-agent/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: LOG_MAXAGE + value: "1" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: STORAGE_TYPE + value: datagate + restartPolicy: Always + volumes: + - name: config-volume + configMap: + name: cloudmoa-metric-agent-config +'::text WHERE id = 6::bigint; + +ALTER TABLE public.alert_rule_config_info ALTER COLUMN config_data TYPE text; + +update alert_rule_config_info +set config_data = '\n - alert: "${ALERT}"\n expr: "${EXPR}"\n labels:\n level: "${LEVEL}"\n for: "${FOR}"\n annotations:\n xm_service_name: "{{ $labels.xm_service_name }}"\n level: "${LEVEL}"\n meta_id: "${META_ID}"\n xm_node_id: "{{ $labels.xm_node_id }}"\n threshold: ${THRESHOLD}\n xm_container_id: "{{ $labels.xm_cont_name }}"\n message: "${MESSAGE}"\n rule_id: ${RULE_ID}\n xm_pod_id: "{{ $labels.xm_pod_id }}"\n xm_clst_id: "{{ $labels.xm_clst_id }}"\n xm_namespace: "{{ $labels.xm_namespace }}"\n value: "{{ $value }}"\n xm_entity_type: "{{ $labels.xm_entity_type }}"\n alert_entity_type: "${ALERT_ENTITY_TYPE}"' +where config_id = 'rules'; + +ALTER TABLE public.alert_config_info ALTER COLUMN config_data TYPE text, ALTER COLUMN config_default TYPE text; + +insert into public.alert_config_info (config_id, created_date, modified_date, config_data, config_default, in_use) values ('routes', now(), null, '\n - receiver: ''${ROUTES_RECEIVER}''\n group_by: [${ROUTES_GROUP_BY}]\n group_wait: ${ROUTES_GROUP_WAIT}\n group_interval: ${ROUTES_GROUP_INTERVAL}\n repeat_interval: ${ROUTES_REPEAT_INTERVAL}\n match_re:\n level: ${LEVEL}\n continue: ${CONTINUE}', '\n - receiver: ''cdms''\n group_by: [xm_clst_id, level]\n group_wait: 5s\n group_interval: 5s\n repeat_interval: 1m\n match_re:\n level: Critical\n continue: true', true); \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.2.psql b/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.2.psql new file mode 100644 index 0000000..5c5d3c9 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.2.psql @@ -0,0 +1,8 @@ +-- admin의 owner 속성 추가 +UPDATE cloud_user SET is_tenant_owner = true WHERE user_id = 'admin'; + +-- owner에 대한 종속성을 admin으로 이관기능(필요하면 사용) +UPDATE auth_resource3 SET name = replace(name, 'owner', 'admin') WHERE name like '%|owner|%'; + +-- CLOUD-2305 node_memory_used metric_meta node_memory_SReclaimable_bytes 제거 패치문 반영 +UPDATE metric_meta2 SET expr = '((node_memory_MemTotal_bytes{xm_entity_type="Node", {filter}} - (node_memory_MemFree_bytes{xm_entity_type="Node", {filter}} + node_memory_Cached_bytes{xm_entity_type="Node", {filter}} + node_memory_Buffers_bytes{xm_entity_type="Node", {filter}})) >= 0 or node_memory_MemTotal_bytes{xm_entity_type="Node", {filter}} - node_memory_MemFree_bytes{xm_entity_type="Node", {filter}}) / 1024 / 1024 / 1024' WHERE id = 'node_memory_used'; diff --git a/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.3.psql b/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.3.psql new file mode 100644 index 0000000..02f01db --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.3.psql @@ -0,0 +1,361 @@ +-- agent_install_file_info +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cloudmoa-metric-agent-config + namespace: $CLOUDMOA_NAMESPACE +data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + metric-agent.yml: | + global: + scrape_interval: 15s + + scrape_configs: + - job_name: ''kubernetes-kubelet'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_memory_SReclaimable_bytes|node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + + - job_name: ''kubernetes-cadvisor'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod_name] + target_label: xm_pod_id + - source_labels: [container_name] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [container_name] + regex: (.+) + action: keep + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cloudmoa-metric-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-metric-agent +spec: + selector: + matchLabels: + app: cloudmoa-metric-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-metric-agent + spec: + containers: + - name: cloudmoa-metric-agent + image: $DOCKER_REGISTRY_URL/metric-agent:$IMAGE_TAG + args: + - --config.file=/etc/metric-agent/metric-agent.yml + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: /etc/metric-agent/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: STORAGE_TYPE + value: datagate + restartPolicy: Always + volumes: + - name: config-volume + configMap: + name: cloudmoa-metric-agent-config +'::text WHERE id = 3::bigint; + +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cloudmoa-metric-agent-config + namespace: $CLOUDMOA_NAMESPACE +data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + metric-agent.yml: | + global: + scrape_interval: 15s + + scrape_configs: + - job_name: ''kubernetes-kubelet'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_memory_SReclaimable_bytes|node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + - job_name: ''kubernetes-cadvisor'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod] + target_label: xm_pod_id + - source_labels: [container] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [container] + regex: (.+) + action: keep + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cloudmoa-metric-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-metric-agent +spec: + selector: + matchLabels: + app: cloudmoa-metric-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-metric-agent + spec: + containers: + - name: cloudmoa-metric-agent + image: $DOCKER_REGISTRY_URL/metric-agent:$IMAGE_TAG + args: + - --config.file=/etc/metric-agent/metric-agent.yml + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: /etc/metric-agent/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: LOG_MAXAGE + value: "1" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: STORAGE_TYPE + value: datagate + restartPolicy: Always + volumes: + - name: config-volume + configMap: + name: cloudmoa-metric-agent-config +'::text WHERE id = 6::bigint; + +-- CLOUD-2798 pod_phase_count_by_cluster metric_meta 수정 +UPDATE metric_meta2 SET expr = 'count by(xm_clst_id, pod_state) (sum by (xm_clst_id, xm_pod_id, pod_state)(rate(imxc_kubernetes_container_resource_limit_cpu{{filter}}[1m])))' WHERE id = 'pod_phase_count_by_cluster'; + +-- node_memory_usage 수정 +update metric_meta2 set expr = 'sum by (xm_node_id)((node_memory_MemTotal_bytes{xm_entity_type="Node"}- (node_memory_MemFree_bytes{xm_entity_type="Node"} + node_memory_Cached_bytes{xm_entity_type="Node"} + node_memory_Buffers_bytes{xm_entity_type="Node"})) >= 0 or node_memory_MemTotal_bytes{xm_entity_type="Node"}- node_memory_MemFree_bytes{xm_entity_type="Node"}) / (sum by (xm_node_id) (imxc_kubernetes_node_resource_capacity_memory{{filter}})) * 100' where id = 'node_memory_usage'; \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.6.psql b/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.6.psql new file mode 100644 index 0000000..7c582c5 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.6.psql @@ -0,0 +1,360 @@ +-- CLOUD-3473 Memory capacity 조회 쿼리 수정 +update metric_meta2 set description = 'imxc_kubernetes_node_resource_capacity_memory', +expr = 'sum by (xm_clst_id) (imxc_kubernetes_node_resource_capacity_memory{{filter}})' where id = 'cluster_memory_capacity'; + +-- module명 metricdata owner_name 와 일치하도록 변경 +update common_setting set code_value ='cmoa-collector' where code_id = 'Cloudmoa Collector'; +update common_setting set code_value ='imxc-api' where code_id = 'Api Server'; +update common_setting set code_value ='imxc-ui' where code_id = 'Ui Server'; +update common_setting set code_value ='cloudmoa-trace-agent' where code_id = 'Trace Agent'; + +-- CLOUD-4795 Contaeird 환경 Container Network 수집 불가 건 확인 +-- 22.10.08 현대카드 대응 건으로 release 3.4.6에 반영 +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cloudmoa-metric-agent-config + namespace: $CLOUDMOA_NAMESPACE +data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + metric-agent.yml: | + global: + scrape_interval: 15s + + scrape_configs: + - job_name: ''kubernetes-kubelet'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_memory_SReclaimable_bytes|node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + - job_name: ''kubernetes-cadvisor'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod] + target_label: xm_pod_id + - source_labels: [container] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cloudmoa-metric-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-metric-agent +spec: + selector: + matchLabels: + app: cloudmoa-metric-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-metric-agent + spec: + containers: + - name: cloudmoa-metric-agent + image: $DOCKER_REGISTRY_URL/metric-agent:$IMAGE_TAG + args: + - --config.file=/etc/metric-agent/metric-agent.yml + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: /etc/metric-agent/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: LOG_MAXAGE + value: "1" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: STORAGE_TYPE + value: datagate + restartPolicy: Always + volumes: + - name: config-volume + configMap: + name: cloudmoa-metric-agent-config +'::text WHERE id = 6::bigint; + +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cloudmoa-metric-agent-config + namespace: $CLOUDMOA_NAMESPACE +data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + metric-agent.yml: | + global: + scrape_interval: 15s + + scrape_configs: + - job_name: ''kubernetes-kubelet'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_memory_SReclaimable_bytes|node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + + - job_name: ''kubernetes-cadvisor'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod_name] + target_label: xm_pod_id + - source_labels: [container_name] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cloudmoa-metric-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-metric-agent +spec: + selector: + matchLabels: + app: cloudmoa-metric-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-metric-agent + spec: + containers: + - name: cloudmoa-metric-agent + image: $DOCKER_REGISTRY_URL/metric-agent:$IMAGE_TAG + args: + - --config.file=/etc/metric-agent/metric-agent.yml + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: /etc/metric-agent/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: STORAGE_TYPE + value: datagate + restartPolicy: Always + volumes: + - name: config-volume + configMap: + name: cloudmoa-metric-agent-config'::text WHERE id = 3::bigint; + diff --git a/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.7.psql b/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.7.psql new file mode 100644 index 0000000..92344db --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.7.psql @@ -0,0 +1,102 @@ +-- CLOUD-4752 node_memory_usage alert 관련 쿼리 수정 +update metric_meta2 set +expr = 'sum by (xm_clst_id, xm_node_id)((node_memory_MemTotal_bytes{xm_entity_type="Node"}- (node_memory_MemFree_bytes{xm_entity_type="Node"} + node_memory_Cached_bytes{xm_entity_type="Node"} + node_memory_Buffers_bytes{xm_entity_type="Node"})) >= 0 or node_memory_MemTotal_bytes{xm_entity_type="Node"}- node_memory_MemFree_bytes{xm_entity_type="Node"}) / (sum by (xm_clst_id, xm_node_id) (imxc_kubernetes_node_resource_capacity_memory{{filter}})) * 100' +where id = 'node_memory_usage'; + +-- CLOUD-6474 node-exporter | GPMAXPROCS 세팅 +-- Auto-generated SQL script #202211241543 +UPDATE public.agent_install_file_info + SET yaml='--- +apiVersion: v1 +kind: Service +metadata: + annotations: + prometheus.io/scrape: ''true'' + labels: + app: cloudmoa-node-exporter + name: cloudmoa-node-exporter + name: cloudmoa-node-exporter + namespace: $CLOUDMOA_NAMESPACE +spec: + clusterIP: None + ports: + - name: scrape + port: 9110 + protocol: TCP + selector: + app: cloudmoa-node-exporter + type: ClusterIP +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: cloudmoa-node-exporter + namespace: $CLOUDMOA_NAMESPACE +spec: + selector: + matchLabels: + app: cloudmoa-node-exporter + template: + metadata: + labels: + app: cloudmoa-node-exporter + name: cloudmoa-node-exporter + spec: + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - image: $DOCKER_REGISTRY_URL/node-exporter + name: cloudmoa-node-exporter + ports: + - containerPort: 9110 + hostPort: 9110 + name: scrape + args: + - --path.procfs=/host/proc + - --path.sysfs=/host/sys + - --path.rootfs=/host/root + - --collector.filesystem.ignored-mount-points=^/(dev|proc|sys|run|var/lib/docker/.+|var/lib/kubelet/pods/.+)($|/) + - --collector.tcpstat + - --web.listen-address=:9110 + # --log.level=debug + env: + - name: GOMAXPROCS + value: "1" + resources: + limits: + cpu: 250m + memory: 180Mi + requests: + cpu: 102m + memory: 180Mi + volumeMounts: + - mountPath: /host/proc + name: proc + readOnly: false + - mountPath: /host/sys + name: sys + readOnly: false + - mountPath: /host/root + mountPropagation: HostToContainer + name: root + readOnly: true + hostNetwork: true + hostPID: true + securityContext: + runAsNonRoot: true + runAsUser: 65534 + volumes: + - hostPath: + path: /proc + name: proc + - hostPath: + path: /sys + name: sys + - hostPath: + path: / + name: root +' + WHERE id=4; \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.8.psql b/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.8.psql new file mode 100644 index 0000000..ea66c68 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.8.psql @@ -0,0 +1,387 @@ +-- CLOUD-6526 host 관련 쿼리 수정 +-- 수집된 메트릭 시간차로 인해 데이터 표출이 안되는걸 방지하기 위해 rate 5m 추가 +UPDATE metric_meta2 SET expr = 'sum by (data_type, instance) ( +label_replace(rate(node_network_receive_bytes_total{{filter}}[1m]) or rate(node_network_receive_bytes_total{{filter}}[5m]), "data_type", "Receive", "", "") or +label_replace(rate(node_network_transmit_bytes_total{{filter}}[1m]) or rate(node_network_transmit_bytes_total{{filter}}[5m]), "data_type", "Transmit", "", "") )' +WHERE id='host_network_io_byte'; + +UPDATE public.metric_meta2 SET expr = 'sum by (data_type, instance) ( +label_replace(rate(node_disk_read_bytes_total{{filter}}[1m]) or rate(node_disk_read_bytes_total{{filter}}[5m]), "data_type", "Read", "", "") or +label_replace(rate(node_disk_written_bytes_total{{filter}}[1m]) or rate(node_disk_written_bytes_total{{filter}}[5m]), "data_type", "Write", "", "") )' +WHERE id = 'host_disk_read_write_byte'; + +UPDATE public.metric_meta2 SET expr = 'sum by (instance) ( +(rate(node_disk_reads_completed_total{{filter}}[1m]) + rate(node_disk_writes_completed_total{{filter}}[1m])) or +(rate(node_disk_reads_completed_total{{filter}}[5m]) + rate(node_disk_writes_completed_total{{filter}}[5m])))' +WHERE id = 'host_disk_iops'; + +-- CLOUD-8671 Metric-Agent | 데이터 필터링 설정 추가 +-- Workload > Pod 화면 등에 Docker 런타임 환경의 자원 사용량이 2배 가량으로 보이던 문제 픽스 +UPDATE public.agent_install_file_info + SET yaml='--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cloudmoa-metric-agent-config + namespace: $CLOUDMOA_NAMESPACE +data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + metric-agent.yml: | + global: + scrape_interval: 15s + + scrape_configs: + - job_name: ''kubernetes-kubelet'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_memory_SReclaimable_bytes|node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + + - job_name: ''kubernetes-cadvisor'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod_name] + target_label: xm_pod_id + - source_labels: [container_name] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep + - source_labels: [ __name__, image ] + separator: "@" + regex: "container_cpu.*@" + action: drop + - source_labels: [ __name__, name ] + separator: "@" + regex: "container_memory.*@" + action: drop +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cloudmoa-metric-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-metric-agent +spec: + selector: + matchLabels: + app: cloudmoa-metric-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-metric-agent + spec: + containers: + - name: cloudmoa-metric-agent + image: $DOCKER_REGISTRY_URL/metric-agent:$IMAGE_TAG + args: + - --config.file=/etc/metric-agent/metric-agent.yml + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: /etc/metric-agent/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: STORAGE_TYPE + value: datagate + restartPolicy: Always + volumes: + - name: config-volume + configMap: + name: cloudmoa-metric-agent-config +' + WHERE id=3; + +UPDATE public.agent_install_file_info + SET yaml='--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cloudmoa-metric-agent-config + namespace: $CLOUDMOA_NAMESPACE +data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + metric-agent.yml: | + global: + scrape_interval: 15s + + scrape_configs: + - job_name: ''kubernetes-kubelet'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_memory_SReclaimable_bytes|node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + - job_name: ''kubernetes-cadvisor'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod] + target_label: xm_pod_id + - source_labels: [container] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep + - source_labels: [ __name__, image ] + separator: "@" + regex: "container_cpu.*@" + action: drop + - source_labels: [ __name__, name ] + separator: "@" + regex: "container_memory.*@" + action: drop +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cloudmoa-metric-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-metric-agent +spec: + selector: + matchLabels: + app: cloudmoa-metric-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-metric-agent + spec: + containers: + - name: cloudmoa-metric-agent + image: $DOCKER_REGISTRY_URL/metric-agent:$IMAGE_TAG + args: + - --config.file=/etc/metric-agent/metric-agent.yml + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: /etc/metric-agent/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: LOG_MAXAGE + value: "1" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: STORAGE_TYPE + value: datagate + restartPolicy: Always + volumes: + - name: config-volume + configMap: + name: cloudmoa-metric-agent-config +' + WHERE id=6; diff --git a/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/postgres_patch_R30020210503.psql b/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/postgres_patch_R30020210503.psql new file mode 100644 index 0000000..99d1dbe --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/postgres_patch_R30020210503.psql @@ -0,0 +1,2844 @@ +ALTER TABLE alert_rule ADD COLUMN IF NOT EXISTS warning_sign character VARYING(255); +ALTER TABLE alert_rule ADD COLUMN IF NOT EXISTS critical_sign character VARYING(255); + +CREATE TABLE IF NOT EXISTS public.license_policy ( + policy_id character varying(255) NOT NULL, + policy_desc character varying(255), + term_year integer NOT NULL, + term_month integer NOT NULL, + term_day integer NOT NULL, + license_type character varying(255) NOT NULL, + allowable_range character varying(255) NOT NULL, + storage_capacity character varying(255) NOT NULL, + cluster_count character varying(255) NOT NULL, + node_count character varying(255) NOT NULL, + pod_count character varying(255) NOT NULL, + service_count character varying(255) NOT NULL, + core_count character varying(255) NOT NULL, + host_ids character varying(255) NOT NULL, + user_division character varying(255) NOT NULL, + created_date timestamp without time zone, + modified_date timestamp without time zone +) + +ALTER TABLE ONLY public.license_policy + ADD CONSTRAINT license_policy_pkey PRIMARY KEY (policy_id); + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('node_contextswitch_and_filedescriptor','Node contextswitch and filedescriptor','Node contextswitch and filedescriptor','sum by(xm_clst_id, xm_node_id, data_type) ( + label_replace(node_filefd_allocated {{filter}}, "data_type", "file descriptor" , "", "") or + label_replace(rate(node_context_switches_total {{filter}}[1m]), "data_type", "context switches", "" , ""))','File','Node',NULL,false,false,'Node contextswitch and filedescriptor','2020-05-28 12:38:21.587','2020-05-28 12:38:21.587') + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('node_contextswitch_and_filedescriptor','Node contextswitch and filedescriptor','Node contextswitch and filedescriptor','sum by(xm_clst_id, xm_node_id, data_type) ( + label_replace(node_filefd_allocated {{filter}}, "data_type", "file descriptor" , "", "") or + label_replace(rate(node_context_switches_total {{filter}}[1m]), "data_type", "context switches", "" , ""))','File','Node',NULL,false,false,'Node contextswitch and filedescriptor','2020-05-28 12:38:21.587','2020-05-28 12:38:21.587') + WHERE public.metric_meta2.id = 'node_contextswitch_and_filedescriptor'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_cpu_user_by_workload', 'Container CPU User By workload (%)', 'Container CPU Usage(User)', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_cpu_user_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) * 100', 'CPU', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU User (%):{{humanize $value}}%|{threshold}%.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_cpu_user_by_workload', 'Container CPU User By workload (%)', 'Container CPU Usage(User)', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_cpu_user_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) * 100', 'CPU', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU User (%):{{humanize $value}}%|{threshold}%.', now(), now()) + WHERE public.metric_meta2.id = 'container_cpu_user_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_cpu_system_core_by_workload', 'Container CPU System By workload (Core)', 'Container CPU(Core)', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_cpu_system_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0)', 'CPU', 'Workload', NULL, TRUE, FALSE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU System (Core) (System):{{humanize $value}}%|{threshold}%.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_cpu_system_core_by_workload', 'Container CPU System By workload (Core)', 'Container CPU(Core)', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_cpu_system_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0)', 'CPU', 'Workload', NULL, TRUE, FALSE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU System (Core) (System):{{humanize $value}}%|{threshold}%.', now(), now()) + WHERE public.metric_meta2.id = 'container_cpu_system_core_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_cpu_usage_core_by_workload', 'Container CPU Usage By workload (Core)', 'Container CPU Usage (Core)', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_cpu_usage_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0)', 'CPU', 'Workload', NULL, TRUE, FALSE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU Usage (Core):{{humanize $value}}|{threshold}.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_cpu_usage_core_by_workload', 'Container CPU Usage By workload (Core)', 'Container CPU Usage (Core)', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_cpu_usage_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0)', 'CPU', 'Workload', NULL, TRUE, FALSE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU Usage (Core):{{humanize $value}}|{threshold}.', now(), now()) + WHERE public.metric_meta2.id = 'container_cpu_usage_core_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_cpu_user_core_by_workload', 'Container CPU User By workload (Core)', 'Container CPU Usage (User)(Core)', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_cpu_user_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0)', 'CPU', 'Workload', NULL, TRUE, FALSE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU User (Core):{{humanize $value}}|{threshold}.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_cpu_user_core_by_workload', 'Container CPU User By workload (Core)', 'Container CPU Usage (User)(Core)', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_cpu_user_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0)', 'CPU', 'Workload', NULL, TRUE, FALSE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU User (Core):{{humanize $value}}|{threshold}.', now(), now()) + WHERE public.metric_meta2.id = 'container_cpu_user_core_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_cpu_system_by_workload', 'Container CPU System By workload (%)', 'Container CPU Usage (System)', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_cpu_system_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) * 100', 'CPU', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU System (%):{{humanize $value}}%|{threshold}%.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_cpu_system_by_workload', 'Container CPU System By workload (%)', 'Container CPU Usage (System)', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_cpu_system_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) * 100', 'CPU', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU System (%):{{humanize $value}}%|{threshold}%.', now(), now()) + WHERE public.metric_meta2.id = 'container_cpu_system_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_cpu_usage_by_workload', 'Container CPU Usage By workload (%)', 'Container CPU Usage', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_cpu_usage_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) * 100', 'CPU', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU Usage (%):{{humanize $value}}%|{threshold}%', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_cpu_usage_by_workload', 'Container CPU Usage By workload (%)', 'Container CPU Usage', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_cpu_usage_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) * 100', 'CPU', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU Usage (%):{{humanize $value}}%|{threshold}%', now(), now()) + WHERE public.metric_meta2.id = 'container_cpu_usage_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_fs_reads_by_workload', 'Container Filesystem Read Bytes By workload (KiB)', 'Cumulative count of bytes read / 1024', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_fs_reads_bytes_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1024', 'Filesystem', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Filesystem Reads:{{humanize $value}}KiB|{threshold}KiB.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_fs_reads_by_workload', 'Container Filesystem Read Bytes By workload (KiB)', 'Cumulative count of bytes read / 1024', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_fs_reads_bytes_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1024', 'Filesystem', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Filesystem Reads:{{humanize $value}}KiB|{threshold}KiB.', now(), now()) + WHERE public.metric_meta2.id = 'container_fs_reads_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_fs_limit_bytes_by_workload', 'Container Filesystem Limit Bytes By workload (GiB)', 'Number of bytes that can be consumed by the container on this filesystem / 1073741824', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (container_fs_limit_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1073741824', 'Filesystem', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Filesystem Limit:{{humanize $value}}GiB|{threshold}GiB.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_fs_limit_bytes_by_workload', 'Container Filesystem Limit Bytes By workload (GiB)', 'Number of bytes that can be consumed by the container on this filesystem / 1073741824', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (container_fs_limit_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1073741824', 'Filesystem', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Filesystem Limit:{{humanize $value}}GiB|{threshold}GiB.', now(), now()) + WHERE public.metric_meta2.id = 'container_fs_limit_bytes_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_fs_usage_bytes_by_workload', 'Container Filesystem Used Bytes By workload (GiB)', 'Number of bytes that are consumed by the container on this filesystem / 1073741824', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (container_fs_usage_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1073741824', 'Filesystem', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Filesystem Used:{{humanize $value}}GiB||{threshold}GiB.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_fs_usage_bytes_by_workload', 'Container Filesystem Used Bytes By workload (GiB)', 'Number of bytes that are consumed by the container on this filesystem / 1073741824', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (container_fs_usage_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1073741824', 'Filesystem', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Filesystem Used:{{humanize $value}}GiB||{threshold}GiB.', now(), now()) + WHERE public.metric_meta2.id = 'container_fs_usage_bytes_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_fs_writes_by_workload', 'Container Filesystem Write Bytes By workload (KiB)', 'Cumulative count of bytes written / 1024', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_fs_writes_bytes_total{xm_cont_name!="POD"}[1m]) + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1024', 'Filesystem', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Filesystem Writes:{{humanize $value}}KiB|{threshold}KiB.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_fs_writes_by_workload', 'Container Filesystem Write Bytes By workload (KiB)', 'Cumulative count of bytes written / 1024', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_fs_writes_bytes_total{xm_cont_name!="POD"}[1m]) + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1024', 'Filesystem', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Filesystem Writes:{{humanize $value}}KiB|{threshold}KiB.', now(), now()) + WHERE public.metric_meta2.id = 'container_fs_writes_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_fs_usage_by_workload', 'Container Filesystem Usage By workload (%)', 'Container File System Usage: 100 * (Used Bytes / Limit Bytes) (not contain persistent volume)', 'sum by (xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) ((container_fs_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0)/ (((container_fs_limit_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) * 100) > 0) or (container_fs_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1000)', 'Filesystem', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.o + wner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Filesystem Usage:{{humanize $value}}%|{threshold}%.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_fs_usage_by_workload', 'Container Filesystem Usage By workload (%)', 'Container File System Usage: 100 * (Used Bytes / Limit Bytes) (not contain persistent volume)', 'sum by (xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) ((container_fs_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0)/ (((container_fs_limit_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) * 100) > 0) or (container_fs_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1000)', 'Filesystem', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.o + wner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Filesystem Usage:{{humanize $value}}%|{threshold}%.', now(), now()) + WHERE public.metric_meta2.id = 'container_fs_usage_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_memory_max_usage_bytes_by_workload', 'Container Memory Max Used By workload (GiB)', 'Maximum memory usage recorded in bytes / 1073741824', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (container_memory_max_usage_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1073741824', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Max Memory Usage:{{humanize $value}}GiB|{threshold}GiB.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_memory_max_usage_bytes_by_workload', 'Container Memory Max Used By workload (GiB)', 'Maximum memory usage recorded in bytes / 1073741824', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (container_memory_max_usage_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1073741824', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Max Memory Usage:{{humanize $value}}GiB|{threshold}GiB.', now(), now()) + WHERE public.metric_meta2.id = 'container_memory_max_usage_bytes_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_memory_usage_bytes_by_workload', 'Container Memory Used By workload (GiB)', 'Current memory usage in GiB, this includes all memory regardless of when it was accessed', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (container_memory_usage_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1024 / 1024 / 1024', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Used Memory:{{humanize $value}}GiB|{threshold}GiB.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_memory_usage_bytes_by_workload', 'Container Memory Used By workload (GiB)', 'Current memory usage in GiB, this includes all memory regardless of when it was accessed', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (container_memory_usage_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1024 / 1024 / 1024', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Used Memory:{{humanize $value}}GiB|{threshold}GiB.', now(), now()) + WHERE public.metric_meta2.id = 'container_memory_usage_bytes_by_workload'; + + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_memory_usage_by_workload', 'Container Memory Usage By workload (%)', 'Container Memory usage compared to limit if limit is non-zero or 1GiB if limit is zero', 'sum by (xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) ((container_memory_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / (((container_spec_memory_limit_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0)) > 0) * 100) or sum by (xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) ((container_memory_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1024 / 1024 / 1024 *100)', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Memory Usage:{{humanize $value}}%|{threshold}%.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_memory_usage_by_workload', 'Container Memory Usage By workload (%)', 'Container Memory usage compared to limit if limit is non-zero or 1GiB if limit is zero', 'sum by (xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) ((container_memory_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / (((container_spec_memory_limit_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0)) > 0) * 100) or sum by (xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) ((container_memory_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1024 / 1024 / 1024 *100)', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Memory Usage:{{humanize $value}}%|{threshold}%.', now(), now()) + WHERE public.metric_meta2.id = 'container_memory_usage_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_memory_swap_by_workload', 'Container Memory Swap By workload (GiB)', 'Container swap usage in bytes / 1073741824', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (container_memory_swap{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1073741824', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Swap Memory:{{humanize $value}}GiB|{threshold}GiB.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_memory_swap_by_workload', 'Container Memory Swap By workload (GiB)', 'Container swap usage in bytes / 1073741824', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (container_memory_swap{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1073741824', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Swap Memory:{{humanize $value}}GiB|{threshold}GiB.', now(), now()) + WHERE public.metric_meta2.id = 'container_memory_swap_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_memory_working_set_bytes_by_workload', 'Container Memory Working Set By workload (GiB)', 'Current working set in GiB, this includes recently accessed memory, dirty memory, and kernel memory', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (container_memory_working_set_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1024 / 1024 / 1024', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Working Set Memory:{{humanize $value}}GiB|{threshold}GiB.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_memory_working_set_bytes_by_workload', 'Container Memory Working Set By workload (GiB)', 'Current working set in GiB, this includes recently accessed memory, dirty memory, and kernel memory', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (container_memory_working_set_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1024 / 1024 / 1024', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Working Set Memory:{{humanize $value}}GiB|{threshold}GiB.', now(), now()) + WHERE public.metric_meta2.id = 'container_memory_working_set_bytes_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_memory_cache_by_workload', 'Container Memory Cache By workload (GiB)', 'Number of bytes of page cache memory / 1073741824', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (container_memory_cache{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1073741824', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Cache Memory:{{humanize $value}}GiB|{threshold}GiB.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_memory_cache_by_workload', 'Container Memory Cache By workload (GiB)', 'Number of bytes of page cache memory / 1073741824', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (container_memory_cache{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1073741824', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Cache Memory:{{humanize $value}}GiB|{threshold}GiB.', now(), now()) + WHERE public.metric_meta2.id = 'container_memory_cache_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_network_receive_by_workload', 'Container Network Receive By workload (KiB)', 'Network device statistic receive_bytes / 1024', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_network_receive_bytes_total{} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1024', 'Network', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Network Receive Usage:{{humanize $value}}KiB|{threshold}KiB.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_network_receive_by_workload', 'Container Network Receive By workload (KiB)', 'Network device statistic receive_bytes / 1024', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_network_receive_bytes_total{} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1024', 'Network', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Network Receive Usage:{{humanize $value}}KiB|{threshold}KiB.', now(), now()) + WHERE public.metric_meta2.id = 'container_network_receive_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_network_transmit_by_workload', 'Container Network Transmit By workload (KiB)', 'Network device statistic transmit_bytes / 1024', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_network_transmit_bytes_total{} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1024', 'Network', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Network Transmit Usage:{{humanize $value}}KiB|{threshold}KiB.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_network_transmit_by_workload', 'Container Network Transmit By workload (KiB)', 'Network device statistic transmit_bytes / 1024', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_network_transmit_bytes_total{} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1024', 'Network', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Network Transmit Usage:{{humanize $value}}KiB|{threshold}KiB.', now(), now()) + WHERE public.metric_meta2.id = 'container_network_transmit_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('count_pod_not_running_by_workload','Number of Pods not running By Workload','Number of Pods not running (pod_state)','count by (xm_clst_id, xm_pod_id,xm_cont_id, xm_cont_name, entity_type, xm_namespace, pod_state) (imxc_kubernetes_container_resource_limit_cpu{pod_state!="Running", {filter}})','State','Workload',null,true,false,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} State:{{$labels.pod_state}}.',now(),now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('count_pod_not_running_by_workload','Number of Pods not running By Workload','Number of Pods not running (pod_state)','count by (xm_clst_id, xm_pod_id,xm_cont_id, xm_cont_name, entity_type, xm_namespace, pod_state) (imxc_kubernetes_container_resource_limit_cpu{pod_state!="Running", {filter}})','State','Workload',null,true,false,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} State:{{$labels.pod_state}}.',now(),now()) + WHERE public.metric_meta2.id = 'count_pod_not_running_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('count_container_not_running_by_workload','Number of Containers not running By Workload','Number of Containers not running (container_state)','count by (xm_clst_id, xm_pod_id, xm_cont_id, xm_cont_name, entity_type, xm_namespace, container_state) (imxc_kubernetes_container_resource_limit_cpu{container_state!="Running", {filter}})','State','Workload',null,true,false,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} State:{{$labels.container_state}}.',now(),now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('count_container_not_running_by_workload','Number of Containers not running By Workload','Number of Containers not running (container_state)','count by (xm_clst_id, xm_pod_id, xm_cont_id, xm_cont_name, entity_type, xm_namespace, container_state) (imxc_kubernetes_container_resource_limit_cpu{container_state!="Running", {filter}})','State','Workload',null,true,false,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} State:{{$labels.container_state}}.',now(),now()) + WHERE public.metric_meta2.id = 'count_container_not_running_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('cotainer_restart_count_by_workload','Number of Containers Restart','Number of Containers Restart (10m)','increase(imxc_kubernetes_container_restart_count{{filter}}[10m])>1','State','Workload',null,true,false,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} RESTARTCOUNT FOR 10MINUTE:{{humanize $value}}.',now(),now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('cotainer_restart_count_by_workload','Number of Containers Restart','Number of Containers Restart (10m)','increase(imxc_kubernetes_container_restart_count{{filter}}[10m])>1','State','Workload',null,true,false,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} RESTARTCOUNT FOR 10MINUTE:{{humanize $value}}.',now(),now()) + WHERE public.metric_meta2.id = 'cotainer_restart_count_by_workload'; + + +INSERT INTO public.agent_install_file_info (id, name, type, description, yaml, use_yn, created_date, modified_date, version) +VALUES (4, 'node-exporter', 'agent', 'Node에 관련된 Metric 시계열 데이터를 수집하여 고객사 클러스터에 설치된 Prometheus에 전달하는 역할을 합니다.', '--- + apiVersion: v1 + kind: Service + metadata: + annotations: + prometheus.io/scrape: ''true'' + labels: + app: cloudmoa-node-exporter + name: cloudmoa-node-exporter + name: cloudmoa-node-exporter + namespace: $CLOUDMOA_NAMESPACE + spec: + clusterIP: None + ports: + - name: scrape + port: 9110 + protocol: TCP + selector: + app: cloudmoa-node-exporter + type: ClusterIP + --- + apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: cloudmoa-node-exporter + namespace: $CLOUDMOA_NAMESPACE + spec: + selector: + matchLabels: + app: cloudmoa-node-exporter + template: + metadata: + labels: + app: cloudmoa-node-exporter + name: cloudmoa-node-exporter + spec: + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - image: $DOCKER_REGISTRY_URL/prom/node-exporter + name: cloudmoa-node-exporter + ports: + - containerPort: 9110 + hostPort: 9110 + name: scrape + args: + - --path.procfs=/host/proc + - --path.sysfs=/host/sys + - --path.rootfs=/host/root + - --collector.filesystem.ignored-mount-points=^/(dev|proc|sys|run|var/lib/docker/.+|var/lib/kubelet/pods/.+)($|/) + - --collector.tcpstat + - --web.listen-address=:9110 + # --log.level=debug + resources: + limits: + cpu: 250m + memory: 180Mi + requests: + cpu: 102m + memory: 180Mi + volumeMounts: + - mountPath: /host/proc + name: proc + readOnly: false + - mountPath: /host/sys + name: sys + readOnly: false + - mountPath: /host/root + mountPropagation: HostToContainer + name: root + readOnly: true + hostNetwork: true + hostPID: true + securityContext: + runAsNonRoot: true + runAsUser: 65534 + volumes: + - hostPath: + path: /proc + name: proc + - hostPath: + path: /sys + name: sys + - hostPath: + path: / + name: root + ', true, '2021-03-11 13:41:02.000000', '2021-03-11 13:41:06.000000', null) +ON CONFLICT (id) +DO + UPDATE SET (id, name, type, description, yaml, use_yn, created_date, modified_date, version) + = (4, 'node-exporter', 'agent', 'Node에 관련된 Metric 시계열 데이터를 수집하여 고객사 클러스터에 설치된 Prometheus에 전달하는 역할을 합니다.', '--- + apiVersion: v1 + kind: Service + metadata: + annotations: + prometheus.io/scrape: ''true'' + labels: + app: cloudmoa-node-exporter + name: cloudmoa-node-exporter + name: cloudmoa-node-exporter + namespace: $CLOUDMOA_NAMESPACE + spec: + clusterIP: None + ports: + - name: scrape + port: 9110 + protocol: TCP + selector: + app: cloudmoa-node-exporter + type: ClusterIP + --- + apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: cloudmoa-node-exporter + namespace: $CLOUDMOA_NAMESPACE + spec: + selector: + matchLabels: + app: cloudmoa-node-exporter + template: + metadata: + labels: + app: cloudmoa-node-exporter + name: cloudmoa-node-exporter + spec: + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - image: $DOCKER_REGISTRY_URL/prom/node-exporter + name: cloudmoa-node-exporter + ports: + - containerPort: 9110 + hostPort: 9110 + name: scrape + args: + - --path.procfs=/host/proc + - --path.sysfs=/host/sys + - --path.rootfs=/host/root + - --collector.filesystem.ignored-mount-points=^/(dev|proc|sys|run|var/lib/docker/.+|var/lib/kubelet/pods/.+)($|/) + - --collector.tcpstat + - --web.listen-address=:9110 + # --log.level=debug + resources: + limits: + cpu: 250m + memory: 180Mi + requests: + cpu: 102m + memory: 180Mi + volumeMounts: + - mountPath: /host/proc + name: proc + readOnly: false + - mountPath: /host/sys + name: sys + readOnly: false + - mountPath: /host/root + mountPropagation: HostToContainer + name: root + readOnly: true + hostNetwork: true + hostPID: true + securityContext: + runAsNonRoot: true + runAsUser: 65534 + volumes: + - hostPath: + path: /proc + name: proc + - hostPath: + path: /sys + name: sys + - hostPath: + path: / + name: root + ', true, '2021-03-11 13:41:02.000000', '2021-03-11 13:41:06.000000', null) + WHERE public.agent_install_file_info.id = 4; + + +INSERT INTO public.agent_install_file_info (id, name, type, description, yaml, use_yn, created_date, modified_date, version) +VALUES (3, 'prometheus', 'agent', 'Prometheus는 다양한 Exporter들과 연결될 수 있으며, 기본적으로 Node Exporter와 cAdvisor를 통해 수집한 Metric 데이터를 Kafka를 통해 수집 클러스터에 전달하는 역할을 합니다.', '--- + # VERSION : 20190227142300 + + apiVersion: v1 + kind: ConfigMap + metadata: + name: cloudmoa-prometheus-configuration + namespace: $CLOUDMOA_NAMESPACE + data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + prometheus.yml: | + global: + scrape_interval: 15s + # Attach these labels to any time series or alerts when communicating with + # external systems. + external_labels: + monitor: ''5s-monitor'' + + #kafka writer only + no_local_disk_write: true + + # A scrape configuration for running Prometheus on a Kubernetes cluster. + # This uses separate scrape configs for cluster components (i.e. API server, node) + # and services to allow each to use different authentication configs. + # + # Kubernetes labels will be added as Prometheus labels on metrics via the + # `labelmap` relabeling action. + # + + # + # rule_files: + # - "scaling.rules" + + # i suppose my code in the remote kafka write is something wrong ... should append a double quote character at the end of the url + remote_write: + - url: kafka://$COLLTION_SERVER_KAFKA_IP:$COLLTION_SERVER_KAFKA_INTERFACE_PORT/remote_prom?encoding=proto3&compression=snappy + + scrape_configs: + + # Scrape config for nodes (kubelet). + # + # Rather than connecting directly to the node, the scrape is proxied though the + # Kubernetes apiserver. This means it will work if Prometheus is running out of + # cluster, or can''t connect to nodes for some other reason (e.g. because of + # firewalling). + - job_name: ''kubernetes-kubelet'' + + # Default to scraping over https. If required, just disable this or change to + # `http`. + scheme: https + # This TLS & bearer token file config is used to connect to the actual scrape + # endpoints for cluster components. This is separate to discovery auth + # configuration because discovery & scraping are two separate concerns in + # Prometheus. The discovery auth config is automatic if Prometheus runs inside + # the cluster. Otherwise, more config options have to be provided within the + # . + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + # copied from https://github.com/kayrus/prometheus-kubernetes/blob/master/prometheus-configmap.yaml + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + + - job_name: ''kubernetes-cadvisor'' + + # Default to scraping over https. If required, just disable this or change to + # `http`. + scheme: https + + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod_name] + target_label: xm_pod_id + - source_labels: [container_name] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [container_name] + regex: (.+) + action: keep + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep + --- + apiVersion: v1 + kind: Service + metadata: + name: cloudmoa-prometheus + namespace: $CLOUDMOA_NAMESPACE + spec: + ports: + - port: 9090 + protocol: TCP + targetPort: 9090 + selector: + app: cloudmoa-prometheus + type: ClusterIP + --- + apiVersion: apps/v1 + kind: Deployment + metadata: + name: cloudmoa-prometheus + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-prometheus + spec: + selector: + matchLabels: + app: cloudmoa-prometheus + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-prometheus + spec: + containers: + - name: cloudmoa-prometheus + image: $DOCKER_REGISTRY_URL/imxc/metric-agent:$IMAGE_TAG + ports: + - containerPort: 9090 + args: + - --config.file=/etc/prometheus/prometheus.yml + #- --log.level=debug + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: "/prometheus" + name: data + - mountPath: /etc/prometheus/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: LOG_MAXAGE + value: "1" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: API_SERVER_LICENSE + value: $COLLTION_SERVER_API_IP:8080 + + restartPolicy: Always + volumes: + - emptyDir: {} + name: data + - name: config-volume + configMap: + name: cloudmoa-prometheus-configuration + ', true, '2021-03-11 13:39:07.000000', '2021-03-11 13:39:09.000000', '1.15') +ON CONFLICT (id) +DO + UPDATE SET (id, name, type, description, yaml, use_yn, created_date, modified_date, version) + = (3, 'prometheus', 'agent', 'Prometheus는 다양한 Exporter들과 연결될 수 있으며, 기본적으로 Node Exporter와 cAdvisor를 통해 수집한 Metric 데이터를 Kafka를 통해 수집 클러스터에 전달하는 역할을 합니다.', '--- + # VERSION : 20190227142300 + + apiVersion: v1 + kind: ConfigMap + metadata: + name: cloudmoa-prometheus-configuration + namespace: $CLOUDMOA_NAMESPACE + data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + prometheus.yml: | + global: + scrape_interval: 15s + # Attach these labels to any time series or alerts when communicating with + # external systems. + external_labels: + monitor: ''5s-monitor'' + + #kafka writer only + no_local_disk_write: true + + # A scrape configuration for running Prometheus on a Kubernetes cluster. + # This uses separate scrape configs for cluster components (i.e. API server, node) + # and services to allow each to use different authentication configs. + # + # Kubernetes labels will be added as Prometheus labels on metrics via the + # `labelmap` relabeling action. + # + + # + # rule_files: + # - "scaling.rules" + + # i suppose my code in the remote kafka write is something wrong ... should append a double quote character at the end of the url + remote_write: + - url: kafka://$COLLTION_SERVER_KAFKA_IP:$COLLTION_SERVER_KAFKA_INTERFACE_PORT/remote_prom?encoding=proto3&compression=snappy + + scrape_configs: + + # Scrape config for nodes (kubelet). + # + # Rather than connecting directly to the node, the scrape is proxied though the + # Kubernetes apiserver. This means it will work if Prometheus is running out of + # cluster, or can''t connect to nodes for some other reason (e.g. because of + # firewalling). + - job_name: ''kubernetes-kubelet'' + + # Default to scraping over https. If required, just disable this or change to + # `http`. + scheme: https + # This TLS & bearer token file config is used to connect to the actual scrape + # endpoints for cluster components. This is separate to discovery auth + # configuration because discovery & scraping are two separate concerns in + # Prometheus. The discovery auth config is automatic if Prometheus runs inside + # the cluster. Otherwise, more config options have to be provided within the + # . + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + # copied from https://github.com/kayrus/prometheus-kubernetes/blob/master/prometheus-configmap.yaml + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + + - job_name: ''kubernetes-cadvisor'' + + # Default to scraping over https. If required, just disable this or change to + # `http`. + scheme: https + + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod_name] + target_label: xm_pod_id + - source_labels: [container_name] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [container_name] + regex: (.+) + action: keep + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep + --- + apiVersion: v1 + kind: Service + metadata: + name: cloudmoa-prometheus + namespace: $CLOUDMOA_NAMESPACE + spec: + ports: + - port: 9090 + protocol: TCP + targetPort: 9090 + selector: + app: cloudmoa-prometheus + type: ClusterIP + --- + apiVersion: apps/v1 + kind: Deployment + metadata: + name: cloudmoa-prometheus + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-prometheus + spec: + selector: + matchLabels: + app: cloudmoa-prometheus + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-prometheus + spec: + containers: + - name: cloudmoa-prometheus + image: $DOCKER_REGISTRY_URL/imxc/metric-agent:$IMAGE_TAG + ports: + - containerPort: 9090 + args: + - --config.file=/etc/prometheus/prometheus.yml + #- --log.level=debug + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: "/prometheus" + name: data + - mountPath: /etc/prometheus/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: LOG_MAXAGE + value: "1" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: API_SERVER_LICENSE + value: $COLLTION_SERVER_API_IP:8080 + + restartPolicy: Always + volumes: + - emptyDir: {} + name: data + - name: config-volume + configMap: + name: cloudmoa-prometheus-configuration + ', true, '2021-03-11 13:39:07.000000', '2021-03-11 13:39:09.000000', '1.15') + WHERE public.agent_install_file_info.id = 3; + + +INSERT INTO public.agent_install_file_info (id, name, type, description, yaml, use_yn, created_date, modified_date, version) +VALUES (2, 'agent', 'agent', '관제 대상 클러스터의 Topology 데이터를 수집하여 Kafka를 통해 수집 클러스터에 전달하는 역할을 하며, 그 밖에 API 서버와의 TCP 연결을 통해 관리 기능, Log Viewer 기능 등을 수행합니다.', '--- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: cloudmoa-cluster-role + rules: + - nonResourceURLs: + - "*" + verbs: + - get + - apiGroups: + - metrics.k8s.io + resources: + - pods + - nodes + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - nodes/stats + - endpoints + - namespaces + - events + verbs: + - get + - list + - watch + - apiGroups: + - apps + resources: + - daemonsets + - deployments + - deployments/scale + - replicasets + - replicasets/scale + - statefulsets + - statefulsets/scale + verbs: + - get + - list + - watch + - apiGroups: + - batch + resources: + - jobs + verbs: + - get + - list + - watch + - update + - apiGroups: + - batch + resources: + - cronjobs + verbs: + - get + - list + - update + - apiGroups: + - storage.j8s.io + resources: + - storageclasses + verbs: + - get + - list + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - apiGroups: + - extensions + resources: + - ingresses + verbs: + - get + - list + - apiGroups: + - policy + resources: + - podsecuritypolicies + verbs: + - use + resourceNames: + - imxc-ps + - apiGroups: + - certificates.k8s.io + resourceNames: + - kubernetes.io/kube-apiserver-client-kubelet + resources: + - signers + verbs: + - approve + - apiGroups: + - certificates.k8s.io + resourceNames: + - kubernetes.io/kubelet-serving + resources: + - signers + verbs: + - approve + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch + - proxy + - apiGroups: + - "" + resources: + - nodes/log + - nodes/metrics + - nodes/proxy + - nodes/spec + - nodes/stats + verbs: + - ''*'' + - apiGroups: + - ''*'' + resources: + - ''*'' + verbs: + - get + - list + - watch + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: cloudmoa-restricted-rb + namespace: $CLOUDMOA_NAMESPACE + subjects: + - kind: ServiceAccount + name: default + namespace: $CLOUDMOA_NAMESPACE + roleRef: + kind: ClusterRole + name: cloudmoa-cluster-role + apiGroup: rbac.authorization.k8s.io + --- + apiVersion: policy/v1beta1 + kind: PodSecurityPolicy + metadata: + name: cloudmoa-psp + namespace: $CLOUDMOA_NAMESPACE + spec: + privileged: true + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + runAsUser: + rule: RunAsAny + fsGroup: + rule: RunAsAny + hostPorts: + - max: 65535 + min: 0 + hostNetwork: true + hostPID: true + volumes: + - configMap + - secret + - emptyDir + - hostPath + - projected + - downwardAPI + - persistentVolumeClaim + --- + apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: cloudmoa-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-agent + spec: + selector: + matchLabels: + app: cloudmoa-agent + template: + metadata: + labels: + app: cloudmoa-agent + spec: + hostNetwork: true + hostPID: true + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - name: cloudmoa-agent + image: $DOCKER_REGISTRY_URL/imxc/imxc-agent:$IMAGE_TAG + imagePullPolicy: Always + resources: + requests: + cpu: 200m + memory: 512Mi + limits: + cpu: 500m + memory: 600Mi + securityContext: + privileged: true + volumeMounts: + - mountPath: /host/usr/bin + name: bin-volume + - mountPath: /var/run/docker.sock + name: docker-volume + - mountPath: /host/proc + name: proc-volume + - mountPath: /root + name: root-volume + - mountPath: /log + name: log-volume + env: + - name: KAFKA_SERVER + value: $COLLTION_SERVER_KAFKA_IP:$COLLTION_SERVER_KAFKA_INTERFACE_PORT + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: API_SERVER + value: $COLLTION_SERVER_API_IP:$COLLECTION_SERVER_API_NETTY_PORT + - name: ROOT_DIRECTORY + value: /root + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: LOG_LEVEL + value: "DEBUG" + - name: API_SERVER_LICENSE + value: $COLLTION_SERVER_API_IP:8080 + + volumes: + - name: bin-volume + hostPath: + path: /usr/bin + type: Directory + - name: docker-volume + hostPath: + path: /var/run/docker.sock + - name: proc-volume + hostPath: + path: /proc + - name: root-volume + hostPath: + path: / + - name: log-volume + hostPath: + path: /home', true, '2021-03-11 13:37:48.000000', '2021-03-11 13:37:51.000000', null) +ON CONFLICT (id) +DO + UPDATE SET (id, name, type, description, yaml, use_yn, created_date, modified_date, version) + = (2, 'agent', 'agent', '관제 대상 클러스터의 Topology 데이터를 수집하여 Kafka를 통해 수집 클러스터에 전달하는 역할을 하며, 그 밖에 API 서버와의 TCP 연결을 통해 관리 기능, Log Viewer 기능 등을 수행합니다.', '--- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: cloudmoa-cluster-role + rules: + - nonResourceURLs: + - "*" + verbs: + - get + - apiGroups: + - metrics.k8s.io + resources: + - pods + - nodes + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - nodes/stats + - endpoints + - namespaces + - events + verbs: + - get + - list + - watch + - apiGroups: + - apps + resources: + - daemonsets + - deployments + - deployments/scale + - replicasets + - replicasets/scale + - statefulsets + - statefulsets/scale + verbs: + - get + - list + - watch + - apiGroups: + - batch + resources: + - jobs + verbs: + - get + - list + - watch + - update + - apiGroups: + - batch + resources: + - cronjobs + verbs: + - get + - list + - update + - apiGroups: + - storage.j8s.io + resources: + - storageclasses + verbs: + - get + - list + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - apiGroups: + - extensions + resources: + - ingresses + verbs: + - get + - list + - apiGroups: + - policy + resources: + - podsecuritypolicies + verbs: + - use + resourceNames: + - imxc-ps + - apiGroups: + - certificates.k8s.io + resourceNames: + - kubernetes.io/kube-apiserver-client-kubelet + resources: + - signers + verbs: + - approve + - apiGroups: + - certificates.k8s.io + resourceNames: + - kubernetes.io/kubelet-serving + resources: + - signers + verbs: + - approve + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch + - proxy + - apiGroups: + - "" + resources: + - nodes/log + - nodes/metrics + - nodes/proxy + - nodes/spec + - nodes/stats + verbs: + - ''*'' + - apiGroups: + - ''*'' + resources: + - ''*'' + verbs: + - get + - list + - watch + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: cloudmoa-restricted-rb + namespace: $CLOUDMOA_NAMESPACE + subjects: + - kind: ServiceAccount + name: default + namespace: $CLOUDMOA_NAMESPACE + roleRef: + kind: ClusterRole + name: cloudmoa-cluster-role + apiGroup: rbac.authorization.k8s.io + --- + apiVersion: policy/v1beta1 + kind: PodSecurityPolicy + metadata: + name: cloudmoa-psp + namespace: $CLOUDMOA_NAMESPACE + spec: + privileged: true + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + runAsUser: + rule: RunAsAny + fsGroup: + rule: RunAsAny + hostPorts: + - max: 65535 + min: 0 + hostNetwork: true + hostPID: true + volumes: + - configMap + - secret + - emptyDir + - hostPath + - projected + - downwardAPI + - persistentVolumeClaim + --- + apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: cloudmoa-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-agent + spec: + selector: + matchLabels: + app: cloudmoa-agent + template: + metadata: + labels: + app: cloudmoa-agent + spec: + hostNetwork: true + hostPID: true + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - name: cloudmoa-agent + image: $DOCKER_REGISTRY_URL/imxc/imxc-agent:$IMAGE_TAG + imagePullPolicy: Always + resources: + requests: + cpu: 200m + memory: 512Mi + limits: + cpu: 500m + memory: 600Mi + securityContext: + privileged: true + volumeMounts: + - mountPath: /host/usr/bin + name: bin-volume + - mountPath: /var/run/docker.sock + name: docker-volume + - mountPath: /host/proc + name: proc-volume + - mountPath: /root + name: root-volume + - mountPath: /log + name: log-volume + env: + - name: KAFKA_SERVER + value: $COLLTION_SERVER_KAFKA_IP:$COLLTION_SERVER_KAFKA_INTERFACE_PORT + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: API_SERVER + value: $COLLTION_SERVER_API_IP:$COLLECTION_SERVER_API_NETTY_PORT + - name: ROOT_DIRECTORY + value: /root + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: LOG_LEVEL + value: "DEBUG" + - name: API_SERVER_LICENSE + value: $COLLTION_SERVER_API_IP:8080 + + volumes: + - name: bin-volume + hostPath: + path: /usr/bin + type: Directory + - name: docker-volume + hostPath: + path: /var/run/docker.sock + - name: proc-volume + hostPath: + path: /proc + - name: root-volume + hostPath: + path: / + - name: log-volume + hostPath: + path: /home', true, '2021-03-11 13:37:48.000000', '2021-03-11 13:37:51.000000', null) + WHERE public.agent_install_file_info.id = 2; + + +INSERT INTO public.agent_install_file_info (id, name, type, description, yaml, use_yn, created_date, modified_date, version) +VALUES (6, 'prometheus', 'agent', 'Prometheus는 다양한 Exporter들과 연결될 수 있으며, 기본적으로 Node Exporter와 cAdvisor를 통해 수집한 Metric 데이터를 Kafka를 통해 수집 클러스터에 전달하는 역할을 합니다.', '--- + # VERSION : 20190227142300 + + apiVersion: v1 + kind: ConfigMap + metadata: + name: cloudmoa-prometheus-configuration + namespace: $CLOUDMOA_NAMESPACE + data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + prometheus.yml: | + global: + scrape_interval: 15s + # Attach these labels to any time series or alerts when communicating with + # external systems. + external_labels: + monitor: ''5s-monitor'' + + #kafka writer only + no_local_disk_write: true + + # A scrape configuration for running Prometheus on a Kubernetes cluster. + # This uses separate scrape configs for cluster components (i.e. API server, node) + # and services to allow each to use different authentication configs. + # + # Kubernetes labels will be added as Prometheus labels on metrics via the + # `labelmap` relabeling action. + # + + # + # rule_files: + # - "scaling.rules" + + # i suppose my code in the remote kafka write is something wrong ... should append a double quote character at the end of the url + remote_write: + - url: kafka://$COLLTION_SERVER_KAFKA_IP:$COLLTION_SERVER_KAFKA_INTERFACE_PORT/remote_prom?encoding=proto3&compression=snappy + + scrape_configs: + + # Scrape config for nodes (kubelet). + # + # Rather than connecting directly to the node, the scrape is proxied though the + # Kubernetes apiserver. This means it will work if Prometheus is running out of + # cluster, or can''t connect to nodes for some other reason (e.g. because of + # firewalling). + - job_name: ''kubernetes-kubelet'' + + # Default to scraping over https. If required, just disable this or change to + # `http`. + scheme: https + # This TLS & bearer token file config is used to connect to the actual scrape + # endpoints for cluster components. This is separate to discovery auth + # configuration because discovery & scraping are two separate concerns in + # Prometheus. The discovery auth config is automatic if Prometheus runs inside + # the cluster. Otherwise, more config options have to be provided within the + # . + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + # copied from https://github.com/kayrus/prometheus-kubernetes/blob/master/prometheus-configmap.yaml + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + - job_name: ''kubernetes-cadvisor'' + + # Default to scraping over https. If required, just disable this or change to + # `http`. + scheme: https + + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod] + target_label: xm_pod_id + - source_labels: [container] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [container] + regex: (.+) + action: keep + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep + --- + apiVersion: v1 + kind: Service + metadata: + name: cloudmoa-prometheus + namespace: $CLOUDMOA_NAMESPACE + spec: + ports: + - port: 9090 + protocol: TCP + targetPort: 9090 + selector: + app: cloudmoa-prometheus + type: ClusterIP + --- + apiVersion: apps/v1 + kind: Deployment + metadata: + name: cloudmoa-prometheus + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-prometheus + spec: + selector: + matchLabels: + app: cloudmoa-prometheus + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-prometheus + spec: + containers: + - name: cloudmoa-prometheus + image: $DOCKER_REGISTRY_URL/imxc/metric-agent:$IMAGE_TAG + ports: + - containerPort: 9090 + args: + - --config.file=/etc/prometheus/prometheus.yml + #- --log.level=debug + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: "/prometheus" + name: data + - mountPath: /etc/prometheus/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: LOG_MAXAGE + value: "1" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: API_SERVER_LICENSE + value: $COLLTION_SERVER_API_IP:8080 + + restartPolicy: Always + volumes: + - emptyDir: {} + name: data + - name: config-volume + configMap: + name: cloudmoa-prometheus-configuration + ', false, '2021-03-11 13:39:07.000000', '2021-03-11 13:39:09.000000', '1.16') +ON CONFLICT (id) +DO + UPDATE SET (id, name, type, description, yaml, use_yn, created_date, modified_date, version) + = (6, 'prometheus', 'agent', 'Prometheus는 다양한 Exporter들과 연결될 수 있으며, 기본적으로 Node Exporter와 cAdvisor를 통해 수집한 Metric 데이터를 Kafka를 통해 수집 클러스터에 전달하는 역할을 합니다.', '--- + # VERSION : 20190227142300 + + apiVersion: v1 + kind: ConfigMap + metadata: + name: cloudmoa-prometheus-configuration + namespace: $CLOUDMOA_NAMESPACE + data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + prometheus.yml: | + global: + scrape_interval: 15s + # Attach these labels to any time series or alerts when communicating with + # external systems. + external_labels: + monitor: ''5s-monitor'' + + #kafka writer only + no_local_disk_write: true + + # A scrape configuration for running Prometheus on a Kubernetes cluster. + # This uses separate scrape configs for cluster components (i.e. API server, node) + # and services to allow each to use different authentication configs. + # + # Kubernetes labels will be added as Prometheus labels on metrics via the + # `labelmap` relabeling action. + # + + # + # rule_files: + # - "scaling.rules" + + # i suppose my code in the remote kafka write is something wrong ... should append a double quote character at the end of the url + remote_write: + - url: kafka://$COLLTION_SERVER_KAFKA_IP:$COLLTION_SERVER_KAFKA_INTERFACE_PORT/remote_prom?encoding=proto3&compression=snappy + + scrape_configs: + + # Scrape config for nodes (kubelet). + # + # Rather than connecting directly to the node, the scrape is proxied though the + # Kubernetes apiserver. This means it will work if Prometheus is running out of + # cluster, or can''t connect to nodes for some other reason (e.g. because of + # firewalling). + - job_name: ''kubernetes-kubelet'' + + # Default to scraping over https. If required, just disable this or change to + # `http`. + scheme: https + # This TLS & bearer token file config is used to connect to the actual scrape + # endpoints for cluster components. This is separate to discovery auth + # configuration because discovery & scraping are two separate concerns in + # Prometheus. The discovery auth config is automatic if Prometheus runs inside + # the cluster. Otherwise, more config options have to be provided within the + # . + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + # copied from https://github.com/kayrus/prometheus-kubernetes/blob/master/prometheus-configmap.yaml + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + - job_name: ''kubernetes-cadvisor'' + + # Default to scraping over https. If required, just disable this or change to + # `http`. + scheme: https + + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod] + target_label: xm_pod_id + - source_labels: [container] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [container] + regex: (.+) + action: keep + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep + --- + apiVersion: v1 + kind: Service + metadata: + name: cloudmoa-prometheus + namespace: $CLOUDMOA_NAMESPACE + spec: + ports: + - port: 9090 + protocol: TCP + targetPort: 9090 + selector: + app: cloudmoa-prometheus + type: ClusterIP + --- + apiVersion: apps/v1 + kind: Deployment + metadata: + name: cloudmoa-prometheus + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-prometheus + spec: + selector: + matchLabels: + app: cloudmoa-prometheus + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-prometheus + spec: + containers: + - name: cloudmoa-prometheus + image: $DOCKER_REGISTRY_URL/imxc/metric-agent:$IMAGE_TAG + ports: + - containerPort: 9090 + args: + - --config.file=/etc/prometheus/prometheus.yml + #- --log.level=debug + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: "/prometheus" + name: data + - mountPath: /etc/prometheus/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: LOG_MAXAGE + value: "1" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: API_SERVER_LICENSE + value: $COLLTION_SERVER_API_IP:8080 + + restartPolicy: Always + volumes: + - emptyDir: {} + name: data + - name: config-volume + configMap: + name: cloudmoa-prometheus-configuration + ', false, '2021-03-11 13:39:07.000000', '2021-03-11 13:39:09.000000', '1.16') + WHERE public.agent_install_file_info.id = 6; + + +INSERT INTO public.agent_install_file_info (id, name, type, description, yaml, use_yn, created_date, modified_date, version) +VALUES (7, 'jaeger', 'application', 'CloudMOA에서는 고객사에서 운영 중인 application의 TPS, 서비스 연관관계 등의 데이터를 얻기 위해서 Jaeger를 사용하며, Jaeger 사용을 위해 Jaeger-client, jaeger-agent, jaeger-collector의 설치가 필요합니다. + ', '--- + apiVersion: v1 + kind: ConfigMap + metadata: + name: cloudmoa-jaeger-collector-configuration + namespace: $CLOUDMOA_NAMESPACE + data: + strategies.json: | + { + "default_strategy": { + "type": "probabilistic", + "param": 0.1 + } + } + --- + apiVersion: apps/v1 + kind: Deployment + metadata: + name: cloudmoa-jaeger-collector + namespace: $CLOUDMOA_NAMESPACE + labels: + app: jaeger + jaeger-infra: collector-deployment + spec: + selector: + matchLabels: + app: jaeger + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: jaeger + jaeger-infra: collector-pod + spec: + securityContext: + runAsNonRoot: true + runAsUser: 65534 + containers: + - image: $DOCKER_REGISTRY_URL/jaeger/jaeger-collector:$IMAGE_TAG + name: jaeger-collector + args: + - --sampling.strategies-file=/etc/jaeger-collector/strategies.json + - --sampling.strategies-reload-interval=60s + resources: + requests: + cpu: 100m + memory: 50Mi + limits: + cpu: 200m + memory: 100Mi + ports: + - containerPort: 14267 + protocol: TCP + - containerPort: 14268 + protocol: TCP + - containerPort: 9411 + protocol: TCP + - containerPort: 14250 + protocol: TCP + - containerPort: 14269 + protocol: TCP + readinessProbe: + httpGet: + path: "/" + port: 14269 + env: + - name: COLLECTOR_ZIPKIN_HTTP_PORT + value: "9411" + - name: SPAN_STORAGE_TYPE + value: kafka + - name: KAFKA_PRODUCER_BROKERS + value: $COLLTION_SERVER_KAFKA_IP:$COLLTION_SERVER_KAFKA_INTERFACE_PORT + - name: LOG_LEVEL + value: "INFO" + - name: LOG_MAXAGE + value: "1" + - name: LOG_MAXBACKUPS + value: "3" + - name: LOG_MAXSIZE + value: "100" + - name: LOG_STDOUT + value: "TRUE" + - name: LOG_FILENAME + value: "jaeger-collector" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: API_SERVER_LICENSE + value: $COLLTION_SERVER_API_IP:8080 + + volumeMounts: + - mountPath: /etc/jaeger-collector + name: config-volume + + volumes: + - name: config-volume + configMap: + name: cloudmoa-jaeger-collector-configuration + --- + apiVersion: v1 + kind: Service + metadata: + name: cloudmoa-jaeger-collector + namespace: $CLOUDMOA_NAMESPACE + labels: + app: jaeger + jaeger-infra: collector-service + spec: + ports: + - name: jaeger-collector-tchannel + port: 14267 + protocol: TCP + targetPort: 14267 + - name: jaeger-collector-metrics + port: 14269 + targetPort: 14269 + - name: jaeger-collector-grpc + port: 14250 + protocol: TCP + targetPort: 14250 + - name: jaeger-collector-zipkin + port: 9411 + targetPort: 9411 + selector: + jaeger-infra: collector-pod + type: ClusterIP + --- + apiVersion: v1 + kind: List + items: + - apiVersion: apps/v1 + kind: Deployment + metadata: + name: cloudmoa-jaeger-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: jaeger + app.kubernetes.io/name: jaeger + app.kubernetes.io/component: agent + spec: + selector: + matchLabels: + app: jaeger + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: jaeger + app.kubernetes.io/name: jaeger + app.kubernetes.io/component: agent + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "5778" + spec: + securityContext: + runAsNonRoot: true + runAsUser: 65534 + containers: + - image: $DOCKER_REGISTRY_URL/jaegertracing/jaeger-agent:$IMAGE_TAG + name: jaeger-agent + args: ["--reporter.grpc.host-port", "cloudmoa-jaeger-collector:14250"] + resources: + requests: + cpu: 100m + memory: 50Mi + limits: + cpu: 200m + memory: 100Mi + ports: + - containerPort: 5775 + protocol: UDP + - containerPort: 6831 + protocol: UDP + - containerPort: 6832 + protocol: UDP + - containerPort: 5778 + protocol: TCP + env: + - name: LOG_LEVEL + value: "INFO" + - name: LOG_MAXAGE + value: "1" + - name: LOG_MAXBACKUPS + value: "3" + - name: LOG_MAXSIZE + value: "100" + - name: LOG_STDOUT + value: "TRUE" + - name: LOG_FILENAME + value: "jaeger-agent" + + - apiVersion: v1 + kind: Service + metadata: + name: jaeger-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: jaeger + app.kubernetes.io/name: jaeger + app.kubernetes.io/component: agent + spec: + ports: + - name: agent-zipkin-thrift + port: 5775 + protocol: UDP + targetPort: 5775 + - name: agent-compact + port: 6831 + protocol: UDP + targetPort: 6831 + - name: agent-binary + port: 6832 + protocol: UDP + targetPort: 6832 + - name: agent-configs + port: 5778 + protocol: TCP + targetPort: 5778 + selector: + app.kubernetes.io/name: jaeger + app.kubernetes.io/component: agent + type: ClusterIP', true, '2021-03-11 17:48:34.000000', '2021-03-11 17:48:39.000000', null) +ON CONFLICT (id) +DO + UPDATE SET (id, name, type, description, yaml, use_yn, created_date, modified_date, version) + = (7, 'jaeger', 'application', 'CloudMOA에서는 고객사에서 운영 중인 application의 TPS, 서비스 연관관계 등의 데이터를 얻기 위해서 Jaeger를 사용하며, Jaeger 사용을 위해 Jaeger-client, jaeger-agent, jaeger-collector의 설치가 필요합니다. + ', '--- + apiVersion: v1 + kind: ConfigMap + metadata: + name: cloudmoa-jaeger-collector-configuration + namespace: $CLOUDMOA_NAMESPACE + data: + strategies.json: | + { + "default_strategy": { + "type": "probabilistic", + "param": 0.1 + } + } + --- + apiVersion: apps/v1 + kind: Deployment + metadata: + name: cloudmoa-jaeger-collector + namespace: $CLOUDMOA_NAMESPACE + labels: + app: jaeger + jaeger-infra: collector-deployment + spec: + selector: + matchLabels: + app: jaeger + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: jaeger + jaeger-infra: collector-pod + spec: + securityContext: + runAsNonRoot: true + runAsUser: 65534 + containers: + - image: $DOCKER_REGISTRY_URL/jaeger/jaeger-collector:$IMAGE_TAG + name: jaeger-collector + args: + - --sampling.strategies-file=/etc/jaeger-collector/strategies.json + - --sampling.strategies-reload-interval=60s + resources: + requests: + cpu: 100m + memory: 50Mi + limits: + cpu: 200m + memory: 100Mi + ports: + - containerPort: 14267 + protocol: TCP + - containerPort: 14268 + protocol: TCP + - containerPort: 9411 + protocol: TCP + - containerPort: 14250 + protocol: TCP + - containerPort: 14269 + protocol: TCP + readinessProbe: + httpGet: + path: "/" + port: 14269 + env: + - name: COLLECTOR_ZIPKIN_HTTP_PORT + value: "9411" + - name: SPAN_STORAGE_TYPE + value: kafka + - name: KAFKA_PRODUCER_BROKERS + value: $COLLTION_SERVER_KAFKA_IP:$COLLTION_SERVER_KAFKA_INTERFACE_PORT + - name: LOG_LEVEL + value: "INFO" + - name: LOG_MAXAGE + value: "1" + - name: LOG_MAXBACKUPS + value: "3" + - name: LOG_MAXSIZE + value: "100" + - name: LOG_STDOUT + value: "TRUE" + - name: LOG_FILENAME + value: "jaeger-collector" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: API_SERVER_LICENSE + value: $COLLTION_SERVER_API_IP:8080 + + volumeMounts: + - mountPath: /etc/jaeger-collector + name: config-volume + + volumes: + - name: config-volume + configMap: + name: cloudmoa-jaeger-collector-configuration + --- + apiVersion: v1 + kind: Service + metadata: + name: cloudmoa-jaeger-collector + namespace: $CLOUDMOA_NAMESPACE + labels: + app: jaeger + jaeger-infra: collector-service + spec: + ports: + - name: jaeger-collector-tchannel + port: 14267 + protocol: TCP + targetPort: 14267 + - name: jaeger-collector-metrics + port: 14269 + targetPort: 14269 + - name: jaeger-collector-grpc + port: 14250 + protocol: TCP + targetPort: 14250 + - name: jaeger-collector-zipkin + port: 9411 + targetPort: 9411 + selector: + jaeger-infra: collector-pod + type: ClusterIP + --- + apiVersion: v1 + kind: List + items: + - apiVersion: apps/v1 + kind: Deployment + metadata: + name: cloudmoa-jaeger-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: jaeger + app.kubernetes.io/name: jaeger + app.kubernetes.io/component: agent + spec: + selector: + matchLabels: + app: jaeger + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: jaeger + app.kubernetes.io/name: jaeger + app.kubernetes.io/component: agent + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "5778" + spec: + securityContext: + runAsNonRoot: true + runAsUser: 65534 + containers: + - image: $DOCKER_REGISTRY_URL/jaegertracing/jaeger-agent:$IMAGE_TAG + name: jaeger-agent + args: ["--reporter.grpc.host-port", "cloudmoa-jaeger-collector:14250"] + resources: + requests: + cpu: 100m + memory: 50Mi + limits: + cpu: 200m + memory: 100Mi + ports: + - containerPort: 5775 + protocol: UDP + - containerPort: 6831 + protocol: UDP + - containerPort: 6832 + protocol: UDP + - containerPort: 5778 + protocol: TCP + env: + - name: LOG_LEVEL + value: "INFO" + - name: LOG_MAXAGE + value: "1" + - name: LOG_MAXBACKUPS + value: "3" + - name: LOG_MAXSIZE + value: "100" + - name: LOG_STDOUT + value: "TRUE" + - name: LOG_FILENAME + value: "jaeger-agent" + + - apiVersion: v1 + kind: Service + metadata: + name: jaeger-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: jaeger + app.kubernetes.io/name: jaeger + app.kubernetes.io/component: agent + spec: + ports: + - name: agent-zipkin-thrift + port: 5775 + protocol: UDP + targetPort: 5775 + - name: agent-compact + port: 6831 + protocol: UDP + targetPort: 6831 + - name: agent-binary + port: 6832 + protocol: UDP + targetPort: 6832 + - name: agent-configs + port: 5778 + protocol: TCP + targetPort: 5778 + selector: + app.kubernetes.io/name: jaeger + app.kubernetes.io/component: agent + type: ClusterIP', true, '2021-03-11 17:48:34.000000', '2021-03-11 17:48:39.000000', null) + WHERE public.agent_install_file_info.id = 7; + +--Menu Resource +--Infrastructure +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (0, 'Infrastructure', '01.Infrastructure', 0, NULL, (SELECT id FROM auth_resource3 WHERE name='menu|Infrastructure'), 3) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 3 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Infrastructure'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (1, 'Topology', NULL, 0, 'topologyInfra', (SELECT id FROM auth_resource3 WHERE name='menu|Infrastructure|Topology'), 3) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 3 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Infrastructure|Topology'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (2, 'Overview', NULL, 1, 'overViewInfra', (SELECT id FROM auth_resource3 WHERE name='menu|Infrastructure|Overview'), 3) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 3 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Infrastructure|Overview'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (3, 'Resource Usage', NULL, 2, 'resourceUsageInfra', (SELECT id FROM auth_resource3 WHERE name='menu|Infrastructure|Resource Usage'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Infrastructure|Resource Usage'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (4, 'Namespace', NULL, 3, 'namespaceInfra', (SELECT id FROM auth_resource3 WHERE name='menu|Infrastructure|Namespace'), 3) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 3 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Infrastructure|Namespace'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (5, 'Nodes', NULL, 4, 'nodesInfra', (select id from auth_resource3 where name='menu|Infrastructure|Nodes'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Infrastructure|Nodes'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (6, 'Node Details', NULL, 5, 'nodeDetailInfra', (select id from auth_resource3 where name='menu|Infrastructure|Node Details'), 3) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 3 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Infrastructure|Node Details'); + +--Workloads +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (10, 'Workloads', '02.Workload', 1, NULL, (select id from auth_resource3 where name='menu|Workloads'), 3) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 3 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Workloads'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (11, 'Overview', NULL, 0, 'overviewWorkloads', (select id from auth_resource3 where name='menu|Workloads|Overview'), 3) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 3 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Workloads|Overview'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (12, 'deployList', NULL, 1, 'deployListWorkloads', (select id from auth_resource3 where name='menu|Workloads|Deploy List'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Workloads|Deploy List'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (17, 'Jobs', NULL, 6, 'jobsWorkloads', (select id from auth_resource3 where name='menu|Workloads|Jobs'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Workloads|Jobs'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (18, 'Cron Jobs', NULL, 7, 'cronJobsWorkloads', (select id from auth_resource3 where name='menu|Workloads|Cron Jobs'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Workloads|Cron Jobs'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (19, 'Pods', NULL, 8, 'podsWorkloads', (select id from auth_resource3 where name='menu|Workloads|Pods'), 3) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 3 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Workloads|Pods'); + +--Services +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (20, 'Services', '03.Service', 2, NULL, (select id from auth_resource3 where name='menu|Services'), 3) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 3 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Services'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (21, 'DataCenter Service', NULL, 0, 'topologyServices', (select id from auth_resource3 where name='menu|Services|Topology'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Services|Topology'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (22, 'ServiceOverview', NULL, 1, 'overviewServices', (select id from auth_resource3 where name='menu|Services|Overview'), 0) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 0 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Services|Overview'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (23, 'Cluster Service', NULL, 2, 'detailServices', (select id from auth_resource3 where name='menu|Services|Structure'), 0) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 0 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Services|Structure'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (24, 'List', NULL, 3, 'serviceList', (select id from auth_resource3 where name='menu|Services|List'), 3) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 3 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Services|List'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (25, 'Detail', NULL, 4, 'slasServices', (select id from auth_resource3 where name='menu|Services|Detail'), 0) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 0 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Services|Detail'); + +--Statistics & Analysis +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (40, 'Statistics & Analysis', '06.Statistics&Analysis', 5, NULL, (select id from auth_resource3 where name='menu|Statistics & Analysis'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Statistics & Analysis'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (41, 'Performance Trends', NULL, 0, 'performanceTrendSA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Performance Trends'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Statistics & Analysis|Performance Trends'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (42, 'Alert Analysis', NULL, 2, 'alertAnalysisSA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Alert Analysis'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Statistics & Analysis|Alert Analysis'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (43, 'Alert History', NULL, 3, 'alertHistorySA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Alert History'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Statistics & Analysis|Alert History'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (44, 'Anomaly Score Analysis', NULL, 4, 'anomalyScoreSA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Anomaly Score'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Statistics & Analysis|Anomaly Score'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (45, 'Job History', NULL, 5, 'jobHistorySA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Job History'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Statistics & Analysis|Job History'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (46, 'Sparse Log Analysis', NULL, 6, 'sparseLogSA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Sparse Logs'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Statistics & Analysis|Sparse Logs'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (47, 'Log Viewer', NULL, 7, 'logViewerSA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Log Viewer'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Statistics & Analysis|Log Viewer'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (48, 'eventLog Analysis', NULL, 8, 'eventLogSA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Event Logs'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Statistics & Analysis|Event Logs'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (49, 'Container Life Cycle', NULL, 9, 'containerLifecycleSA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Container Life Cycle'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Statistics & Analysis|Container Life Cycle'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (50, 'Service Trace Analysis', NULL, 10, 'serviceTraceSA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Service Traces'), 0) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 0 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Statistics & Analysis|Service Traces'); + +--Reports +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (60, 'Reports', '07.Report', 6, NULL, (select id from auth_resource3 where name='menu|Reports'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Reports'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (61, 'Documents', NULL, 0, 'documentReport', (select id from auth_resource3 where name='menu|Reports|Documents'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Reports|Documents'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (62, 'Templates', NULL, 1, 'reportSettings', (select id from auth_resource3 where name='menu|Reports|Templates'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Reports|Templates'); + +--Dashboards +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (65, 'Dashboards', '10.Dashboard', 7, NULL, (select id from auth_resource3 where name='menu|Dashboards'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Dashboards'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (66, 'Documents', NULL, 0, 'documentDashboard', (select id from auth_resource3 where name='menu|Dashboards|Documents'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Dashboards|Documents'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (67, 'Templates', NULL, 1, 'templateDashboard', (select id from auth_resource3 where name='menu|Dashboards|Templates'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Dashboards|Templates'); + +--Hosts +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (80, 'Hosts', '12.Hosts', 1, NULL, (select id from auth_resource3 where name='menu|Hosts'), 0) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 0 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Hosts'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (81, 'Topology', null, 0, 'topologyHost', (select id from auth_resource3 where name='menu|Hosts|Topology'), 0) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 0 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Hosts|Topology'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (82, 'Overview', NULL, 1, 'overviewHost', (select id from auth_resource3 where name='menu|Hosts|Overview'), 0) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 0 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Hosts|Overview'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (83, 'List', NULL, 2, 'listHost', (select id from auth_resource3 where name='menu|Hosts|List'), 0) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 0 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Hosts|List'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (84, 'Detail', NULL, 3, 'detailHost', (select id from auth_resource3 where name='menu|Hosts|Detail'), 0) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 0 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Hosts|Detail'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (85, 'Group', NULL, 4, 'groupHost', (select id from auth_resource3 where name='menu|Hosts|Group'), 0) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 0 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Hosts|Group'); + +--Settings +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (90, 'Settings', '08.Setting', 10, NULL, (select id from auth_resource3 where name='menu|Settings'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Settings'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (91, 'User', NULL, 0, 'userGroupSettings', (select id from auth_resource3 where name='menu|Settings|User & Group'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Settings|User & Group'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (92, 'Alerts', NULL, 1, 'alertSettings', (select id from auth_resource3 where name='menu|Settings|Alerts'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Settings|Alerts'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (93, 'Host Alerts', NULL, 2, 'hostAlertSettings', (select id from auth_resource3 where name='menu|Settings|Host Alerts'), 0) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 0 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Settings|Host Alerts'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (94, 'Sparse Logs', NULL, 3, 'sparseLogSettings', (select id from auth_resource3 where name='menu|Settings|Sparse Logs'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Settings|Sparse Logs'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (96, 'Metric Meta', NULL, 5, 'metricMetaSettings', (select id from auth_resource3 where name='menu|Settings|Metric Meta'), 0) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 0 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Settings|Metric Meta'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (97, 'Appearance', NULL, 6, 'appearanceSettings', (select id from auth_resource3 where name='menu|Settings|General'), 0) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 0 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Settings|General'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (98, 'Notification', NULL, 7, 'notificationsSettings', (select id from auth_resource3 where name='menu|Settings|Notification'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Settings|Notification'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (99, 'Agent', NULL, 8, 'agentSettings', (select id from auth_resource3 where name='menu|Settings|Agent'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Settings|Agent'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (100, 'Alias', NULL, 9, 'aliasSettings', (select id from auth_resource3 where name='menu|Settings|Alias'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Settings|Alias'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (101, 'License', NULL, 10, 'validationLicense', (select id from auth_resource3 where name='menu|Settings|License'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Settings|License'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (102, 'agent Installation', NULL, 11, 'agentInstallationSettings', (select id from auth_resource3 where name='menu|Settings|Agent Installation'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Settings|Agent Installation'); + +--Health Check +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (121, 'Health Check', '09.HealthCheck', 9, 'healthCHeck', (select id from auth_resource3 where name='menu|Health Check'), 0) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 0 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Health Check'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (122, 'Check Script', NULL, 0, 'checkScript', (select id from auth_resource3 where name='menu|Health Check|Check Script'), 0) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 0 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Health Check|Check Script'); + +INSERT INTO public.license_policy +(policy_id, policy_desc, term_year, term_month, term_day, license_type, allowable_range, storage_capacity, cluster_count, node_count, pod_count, service_count, core_count, host_ids, user_division, created_date, modified_date) +VALUES('promotion_license', '프로모션 기간에 사용자들에게 발급되는 라이선스', 0, 0, 14, 'trial', '0', 'unlimited', '1', '10', 'unlimited', 'unlimited', 'unlimited', 'unlimited', '1', now(), null); \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/postgres_patch_R30020210730.psql b/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/postgres_patch_R30020210730.psql new file mode 100644 index 0000000..60ad862 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/postgres_patch_R30020210730.psql @@ -0,0 +1,4 @@ +alter table cloud_user alter column log_in_count set default 0; +alter table cloud_user alter column user_lock set default false; + +UPDATE public.metric_meta2 SET meta_name = 'Number of Containers Restart', description = 'Number of Containers Restart (10m)', expr = 'increase(imxc_kubernetes_container_restart_count{{filter}}[10m])', resource_type = 'State', entity_type = 'Workload', groupby_keys = null, in_use = true, anomaly_score = false, message = 'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} RESTARTCOUNT FOR 10MINUTE:{{humanize $value}}.', created_date = '2021-06-23 09:30:38.646312', modified_date = '2021-06-23 09:30:38.646312' WHERE id = 'cotainer_restart_count_by_workload'; \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/postgres_insert_ddl.psql b/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/postgres_insert_ddl.psql new file mode 100644 index 0000000..c8deff4 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/postgres_insert_ddl.psql @@ -0,0 +1,1667 @@ +CREATE TABLE public.tenant_info ( + id character varying(255) NOT NULL, + name character varying(255) NOT NULL, + in_used boolean DEFAULT true, + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone NOT NULL, + delete_scheduler_date timestamp without time zone NULL, + contract_id bigint NOT NULL, + tenant_init_clusters character varying(255) NULL +); +ALTER TABLE ONLY public.tenant_info ADD CONSTRAINT tenant_info_pkey PRIMARY KEY (id); + +CREATE TABLE public.alert_group ( + id bigint NOT NULL, + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone NOT NULL, + cluster_id character varying(255) NOT NULL, + description character varying(255), + name character varying(255) NOT NULL, + type character varying(255) NOT NULL, + namespace character varying(255) DEFAULT 'default'::character varying +); + +ALTER TABLE public.alert_group OWNER TO admin; + +ALTER TABLE ONLY public.alert_group + ADD CONSTRAINT alert_group_pkey PRIMARY KEY (id); + +CREATE UNIQUE INDEX alert_group_name_uindex ON public.alert_group USING btree (name); + +CREATE TABLE public.alert_target ( + id bigint NOT NULL, + created_date timestamp without time zone, + modified_date timestamp without time zone, + cluster_id character varying(255) NOT NULL, + entity_id character varying(255) NOT NULL, + entity_type character varying(255) NOT NULL, + alert_group_id bigint, + namespace character varying(255) +); + +ALTER TABLE public.alert_target OWNER TO admin; + +ALTER TABLE ONLY public.alert_target + ADD CONSTRAINT alert_target_pkey PRIMARY KEY (id); + +ALTER TABLE ONLY public.alert_target + ADD CONSTRAINT fkjrvj775641ky7s0f82kx3sile FOREIGN KEY (alert_group_id) REFERENCES public.alert_group(id); + + + +CREATE TABLE public.report_template ( + id bigint NOT NULL, + created_by character varying(255), + created_date timestamp without time zone NOT NULL, + modified_by character varying(255), + modified_date timestamp without time zone NOT NULL, + cron_exp character varying(255), + enable boolean NOT NULL, + metric_data text, + template_data text, + title character varying(255) +); + +ALTER TABLE public.report_template OWNER TO admin; + +ALTER TABLE ONLY public.report_template + ADD CONSTRAINT report_template_pkey PRIMARY KEY (id); + +CREATE TABLE public.alert_event ( + id bigint NOT NULL, + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone NOT NULL, + alert_name character varying(255) NOT NULL, + cluster_id character varying(255) NOT NULL, + data text NOT NULL, + entity_id character varying(255) NOT NULL, + entity_type character varying(255) NOT NULL, + level character varying(255) NOT NULL, + meta_id character varying(255) NOT NULL, + namespace character varying(255), + starts_at bigint NOT NULL, + threshold character varying(255) NOT NULL, + value character varying(255) NOT NULL, + message character varying(255), + ends_at bigint, + status character varying(20) NOT NULL, + hook_collect_at bigint +); + +ALTER TABLE public.alert_event OWNER TO admin; + +CREATE TABLE public.metric_meta2 ( + id character varying(255) NOT NULL, + meta_name character varying(255) NOT NULL, + description character varying(255) NOT NULL, + expr text NOT NULL, + resource_type character varying(255), + entity_type character varying(255) NOT NULL, + groupby_keys character varying(255), + in_use boolean DEFAULT false NOT NULL, + anomaly_score boolean DEFAULT false NOT NULL, + message character varying(255) NOT NULL, + created_date timestamp without time zone DEFAULT now() NOT NULL, + modified_date timestamp without time zone DEFAULT now() NOT NULL +); + +ALTER TABLE public.metric_meta2 OWNER to admin; + +ALTER TABLE ONLY public.metric_meta2 + ADD CONSTRAINT metric_meta2_pk PRIMARY KEY (id); + +CREATE TABLE public.alert_rule ( + id bigint NOT NULL, + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone NOT NULL, + critical float, + name character varying(255), + warning float, + alert_group_id bigint, + alert_rule_meta_id character varying(255) NOT NULL, + alert_target_id bigint, + duration character varying(255) NOT NULL, + pause boolean DEFAULT false NOT NULL, + warning_sign character varying(255), + critical_sign character varying(255) +); + +ALTER TABLE public.alert_rule OWNER TO admin; + +ALTER TABLE ONLY public.alert_rule + ADD CONSTRAINT alert_rule_pkey PRIMARY KEY (id); + +ALTER TABLE ONLY public.alert_rule + ADD CONSTRAINT fk6b09d1xfyago6wiiqhdiv03s3 FOREIGN KEY (alert_rule_meta_id) REFERENCES public.metric_meta2(id); + +ALTER TABLE ONLY public.alert_rule + ADD CONSTRAINT fk8wkucwkgr48hkfg8cvuptww0f FOREIGN KEY (alert_group_id) REFERENCES public.alert_group(id); + +ALTER TABLE ONLY public.alert_rule + ADD CONSTRAINT fkiqaskea7ts0f872u3nx9ne25u FOREIGN KEY (alert_target_id) REFERENCES public.alert_target(id); + +CREATE TABLE public.alert_rule_meta ( + id bigint NOT NULL, + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone NOT NULL, + description text NOT NULL, + expr character varying(255) NOT NULL, + meta_name character varying(255) NOT NULL, + target character varying(255) NOT NULL, + message character varying(255) +); + +ALTER TABLE public.alert_rule_meta OWNER TO admin; + +ALTER TABLE ONLY public.alert_rule_meta + ADD CONSTRAINT alert_rule_meta_pkey PRIMARY KEY (id); + +CREATE SEQUENCE hibernate_sequence; + +CREATE TABLE public.cloud_group ( + id bigint NOT NULL, + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone NOT NULL, + name character varying(255) NOT NULL, + description character varying(255), + created_by character varying(255), + auth_resource_id bigint +); + +ALTER TABLE public.cloud_group OWNER TO admin; + +ALTER TABLE ONLY public.cloud_group + ADD CONSTRAINT cloud_group_pkey PRIMARY KEY (id); + +CREATE UNIQUE INDEX cloud_group_name_uindex ON public.cloud_group USING btree (name); + +CREATE TABLE public.cloud_user ( + user_id character varying(255) NOT NULL, + email character varying(255), + is_admin boolean NOT NULL, + phone character varying(255), + user_nm character varying(255) NOT NULL, + user_pw character varying(255) NOT NULL, + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone NOT NULL, + dormancy_date timestamp without time zone NULL, + company character varying(255), + department character varying(255), + last_log_in_date timestamp without time zone, + "position" character varying(255), + use_ldap boolean NOT NULL, + auth_method character varying(255) NOT NULL, + log_in_count integer default 0 NOT NULL, + user_lock boolean default false NOT NULL, + user_lock_date timestamp without time zone, + tenant_id character varying(120), + is_tenant_owner boolean default false, + auth_resource_id bigint, + status character varying(255) default 'use' NOT NULL +); + +ALTER TABLE public.cloud_user OWNER TO admin; + +ALTER TABLE ONLY public.cloud_user ADD CONSTRAINT cloud_user_pkey PRIMARY KEY (user_id); + +ALTER TABLE ONLY public.cloud_user + ADD CONSTRAINT cloud_user_tenant_id_fk FOREIGN KEY (tenant_id) REFERENCES public.tenant_info(id); + +CREATE TABLE public.menu_meta ( + id bigint NOT NULL, + description character varying(255), + icon character varying(255), + "position" integer NOT NULL, + url character varying(255), + auth_resource3_id bigint NOT NULL, + scope_level int default 0 +); + +ALTER TABLE public.menu_meta OWNER TO admin; + +ALTER TABLE ONLY public.menu_meta + ADD CONSTRAINT menu_meta_pkey PRIMARY KEY (id); + + + +CREATE TABLE public.metric_base ( + meta_name character varying(255) NOT NULL, + provider character varying(255) NOT NULL, + description character varying(255) NOT NULL, + resource_type character varying(255), + diag_type character varying(255), + entity_type character varying(255) NOT NULL, + metric_type character varying(255) NOT NULL, + keys character varying(255), + created_date timestamp without time zone DEFAULT now() NOT NULL, + modified_date timestamp without time zone DEFAULT now() NOT NULL +); + + +ALTER TABLE public.metric_base OWNER TO admin; + +ALTER TABLE ONLY public.metric_base + ADD CONSTRAINT metric_base_pk PRIMARY KEY (meta_name); + +CREATE TABLE public.report_static ( + id bigint NOT NULL, + created_by character varying(255), + created_date timestamp without time zone NOT NULL, + modified_by character varying(255), + modified_date timestamp without time zone NOT NULL, + cron_exp character varying(255), + metric_data text, + template_data text, + title character varying(255), + type character varying(255), + report_template_id bigint +); + +ALTER TABLE public.report_static OWNER TO admin; + +ALTER TABLE ONLY public.report_static + ADD CONSTRAINT report_static_pkey PRIMARY KEY (id); + +ALTER TABLE ONLY public.report_static + ADD CONSTRAINT fk7o821ym9a57lrcfipf928cfpe FOREIGN KEY (report_template_id) REFERENCES public.report_template(id); + +CREATE TABLE public.user_group ( + user_group_id bigint NOT NULL, + user_id character varying(255) NOT NULL +); + +ALTER TABLE public.user_group OWNER TO admin; + +ALTER TABLE ONLY public.user_group + ADD CONSTRAINT user_group_pkey PRIMARY KEY (user_group_id, user_id); + +ALTER TABLE ONLY public.user_group + ADD CONSTRAINT fkooy6rip2craw6jy3geb5wnix6 FOREIGN KEY (user_id) REFERENCES public.cloud_user(user_id); + +ALTER TABLE ONLY public.user_group + ADD CONSTRAINT fkowo8h9te5nwashab3u30docg FOREIGN KEY (user_group_id) REFERENCES public.cloud_group(id); + +CREATE TABLE public.cloud_user_profile ( + user_id character varying(255) NOT NULL, + created_date timestamp without time zone, + modified_date timestamp without time zone, + profile_image oid +); + +ALTER TABLE public.cloud_user_profile OWNER TO admin; + +ALTER TABLE ONLY public.cloud_user_profile + ADD CONSTRAINT cloud_user_profile_pkey PRIMARY KEY (user_id); + + +CREATE TABLE public.common_setting ( + code_id character varying(255) NOT NULL, + code_value character varying(255), + code_desc character varying(255), + code_auth character varying(255), + code_group character varying(255), + created_date timestamp without time zone, + modified_date timestamp without time zone +); + + +ALTER TABLE public.common_setting OWNER TO admin; + +ALTER TABLE ONLY public.common_setting + ADD CONSTRAINT common_setting_pkey PRIMARY KEY (code_id); + + + +CREATE TABLE public.dashboard_thumbnail ( + id bigint NOT NULL, + thumbnail_image oid, + created_date timestamp without time zone, + modified_date timestamp without time zone +); + + +ALTER TABLE public.dashboard_thumbnail OWNER TO admin; + +ALTER TABLE ONLY public.dashboard_thumbnail + ADD CONSTRAINT dashboard_thumbnail_pkey PRIMARY KEY (id); + + + +CREATE TABLE public.notification_channel ( + id bigint NOT NULL, + created_by character varying(255), + created_date timestamp without time zone, + modified_by character varying(255), + modified_date timestamp without time zone, + cluster_id character varying(255), + config text, + name character varying(255), + type character varying(255) +); + +ALTER TABLE public.notification_channel OWNER TO admin; + +ALTER TABLE ONLY public.notification_channel + ADD CONSTRAINT notification_channel_pkey PRIMARY KEY (id); + + +CREATE TABLE public.notification_registry ( + id bigint NOT NULL, + alert_rule_id bigint NOT NULL, + notification_channel_id bigint +); + +ALTER TABLE public.notification_registry OWNER TO admin; + +ALTER TABLE ONLY public.notification_registry + ADD CONSTRAINT notification_registry_pkey PRIMARY KEY (id); + +ALTER TABLE ONLY public.notification_registry + ADD CONSTRAINT fk28xo8snm6fd19i3uap0oba0d1 FOREIGN KEY (notification_channel_id) REFERENCES public.notification_channel(id); + + +CREATE TABLE public.license_check_2 ( + id bigint NOT NULL, + site_name character varying(255) NOT NULL, + license_type integer NOT NULL, + expire_date character varying(255) NOT NULL, + imxc_host_id integer NOT NULL, + real_host_id integer NOT NULL, + imxc_cpu_count integer NOT NULL, + real_cpu_count integer NOT NULL, + target_clusters_count integer NOT NULL, + real_clusters_count integer NOT NULL, + target_nodes_count integer NOT NULL, + real_nodes_count integer NOT NULL, + target_pods_count integer NOT NULL, + real_pods_count integer NOT NULL, + target_svcs_count integer NOT NULL, + real_svcs_count integer NOT NULL, + target_core_count integer NOT NULL, + real_core_count integer NOT NULL, + features_bitmap integer NOT NULL, + allowable_range integer NOT NULL, + check_time timestamp without time zone NOT NULL, + check_result integer NOT NULL +); + +ALTER TABLE public.license_check_2 + ADD CONSTRAINT license_check_pkey PRIMARY KEY (id); + +CREATE INDEX license_check_check_time_idx ON license_check_2(check_time); + + +CREATE TABLE public.license_violation ( + id bigint not null, + check_id bigint not null, + check_time timestamp without time zone not null, + violation_item varchar not null, + allow_time timestamp without time zone not null, + resolved_id bigint, + resolved_time timestamp without time zone +); + +ALTER TABLE public.license_violation + ADD CONSTRAINT license_violation_pkey PRIMARY KEY (id); + +ALTER TABLE public.license_violation + ADD CONSTRAINT license_violation_check_id_fk FOREIGN KEY (check_id) REFERENCES public.license_check_2(id); + +ALTER TABLE public.license_violation + ADD CONSTRAINT license_violation_resolved_id_fk FOREIGN KEY (resolved_id) REFERENCES public.license_check_2(id); + +CREATE INDEX license_violation_check_time_idx ON license_violation(check_time); +CREATE INDEX license_violation_resolved_time_idx ON license_violation(resolved_time); + + +CREATE TABLE public.license_key ( + id bigint NOT NULL, + license_key text NOT NULL, + set_time timestamp NOT NULL, + in_used bool NULL, + tenant_id varchar NULL, + cluster_id bigint NULL, + CONSTRAINT license_key_pkey PRIMARY KEY (id) +); + +ALTER TABLE public.license_key ADD CONSTRAINT license_key_tenant_id_fk FOREIGN KEY (tenant_id) REFERENCES public.tenant_info(id); + +CREATE TABLE public.license_check2 ( + id bigint NOT NULL, + site_name character varying(255) NOT NULL, + license_type integer NOT NULL, + expire_date character varying(255) NOT NULL, + imxc_host_ids character varying(255), + real_host_ids character varying(255), + target_nodes_count integer NOT NULL, + real_nodes_count integer NOT NULL, + target_pods_count integer NOT NULL, + real_pods_count integer NOT NULL, + target_svcs_count integer NOT NULL, + real_svcs_count integer NOT NULL, + target_core_count integer NOT NULL, + real_core_count integer NOT NULL, + allowable_range integer NOT NULL, + license_cluster_id character varying(255), + check_time timestamp without time zone NOT NULL, + check_result integer NOT null +); + +ALTER TABLE public.license_check2 + ADD CONSTRAINT license_check2_pkey PRIMARY KEY (id); + +CREATE INDEX license_check2_time_idx ON license_check2(check_time); + +CREATE TABLE public.license_violation2 ( + id bigint not null, + check_id bigint not null, + check_time timestamp without time zone not null, + violation_item varchar not null, + allow_time timestamp without time zone not null, + resolved_id bigint, + resolved_time timestamp without time zone, + cluster_id varchar not null +); + +ALTER TABLE public.license_violation2 + ADD CONSTRAINT license_violation2_pkey PRIMARY KEY (id); + +ALTER TABLE public.license_violation2 + ADD CONSTRAINT license_violation2_check_id_fk FOREIGN KEY (check_id) REFERENCES public.license_check2(id); + +ALTER TABLE public.license_violation2 + ADD CONSTRAINT license_violation2_resolved_id_fk FOREIGN KEY (resolved_id) REFERENCES public.license_check2(id); + +CREATE INDEX license_violation2_check_time_idx ON license_violation2(check_time); +CREATE INDEX license_violation2_resolved_time_idx ON license_violation2(resolved_time); + +CREATE TABLE public.license_key2 ( + id bigint not null, + license_key text not null, + set_time timestamp without time zone not null, + cluster_id varchar, + license_used bool not null +); + +ALTER TABLE public.license_key2 + ADD CONSTRAINT license_key2_pkey PRIMARY KEY (id); + +create table public.license_policy ( + policy_id character varying(255) NOT NULL, + policy_desc character varying(255), + term_year integer NOT NULL, + term_month integer NOT NULL, + term_day integer NOT NULL, + license_type character varying(255) NOT NULL, + allowable_range character varying(255) NOT NULL, + storage_capacity character varying(255) NOT NULL, + cluster_count character varying(255) NOT NULL, + node_count character varying(255) NOT NULL, + pod_count character varying(255) NOT NULL, + service_count character varying(255) NOT NULL, + core_count character varying(255) NOT NULL, + host_ids character varying(255) NOT NULL, + user_division character varying(255) NOT NULL, + created_date timestamp without time zone, + modified_date timestamp without time zone +); + +ALTER TABLE ONLY public.license_policy + ADD CONSTRAINT license_policy_pkey PRIMARY KEY (policy_id); + + +CREATE TABLE public.auth_resource2 ( + id bigint NOT NULL default nextval('hibernate_sequence'), + access_type integer NOT NULL, + name character varying(255) NOT NULL, + parent_id bigint, + type character varying(255) NOT NULL +); + +ALTER TABLE public.auth_resource2 OWNER TO admin; + +ALTER TABLE ONLY public.auth_resource2 + ADD CONSTRAINT auth_resource2_pkey PRIMARY KEY (id); + +ALTER TABLE ONLY public.auth_resource2 + ADD CONSTRAINT resource_name_uniq UNIQUE (name, type, parent_id); + +--ALTER TABLE ONLY public.auth_resource2 +-- ADD CONSTRAINT auth_resource2_auth_resource_id_fk FOREIGN KEY (parent_id) REFERENCES public.auth_resource2(id); +-- +--ALTER TABLE ONLY public.menu_meta +-- ADD CONSTRAINT fk2tqq4ybf6w130fsaejhrsnw5s FOREIGN KEY (auth_resource_id) REFERENCES public.auth_resource2(id); + +CREATE TABLE public.user_permission2 ( + id bigint NOT NULL, + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone NOT NULL, + all_child boolean NOT NULL, + permission integer NOT NULL, + auth_resource_id bigint, + user_id character varying(255) +); + +ALTER TABLE public.user_permission2 OWNER TO admin; + +ALTER TABLE ONLY public.user_permission2 + ADD CONSTRAINT user_permission2_pkey PRIMARY KEY (id); + +-- ALTER TABLE ONLY public.user_permission2 +-- ADD CONSTRAINT user_permission2_auth_resource2_fk FOREIGN KEY (auth_resource_id) REFERENCES public.auth_resource2(id); + +ALTER TABLE ONLY public.user_permission2 + ADD CONSTRAINT user_permission2_user_id_fk FOREIGN KEY (user_id) REFERENCES public.cloud_user(user_id); + + +CREATE TABLE public.group_permission2 ( + id bigint NOT NULL, + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone NOT NULL, + all_child boolean NOT NULL, + permission integer NOT NULL, + auth_resource_id bigint, + user_group_id bigint +); + +ALTER TABLE public.group_permission2 OWNER TO admin; + +ALTER TABLE ONLY public.group_permission2 + ADD CONSTRAINT group_permission2_pkey PRIMARY KEY (id); + +ALTER TABLE ONLY public.group_permission2 + ADD CONSTRAINT group_permission2_user_group_id_fk FOREIGN KEY (user_group_id) REFERENCES public.cloud_group(id); + +-- ALTER TABLE ONLY public.group_permission2 +-- ADD CONSTRAINT group_permission2_auth_resource2_fk FOREIGN KEY (auth_resource_id) REFERENCES public.auth_resource2(id); + +CREATE TABLE public.resource_group2 ( + id int8 NOT NULL, + created_date timestamp NOT NULL, + modified_date timestamp NOT NULL, + "name" varchar(255) NOT NULL, + description varchar(255) NULL, + CONSTRAINT resource_group2_pkey PRIMARY KEY (id) +-- CONSTRAINT resource_group2_fk1 FOREIGN KEY (id) REFERENCES auth_resource2(id) +); + +ALTER TABLE public.resource_group2 OWNER TO "admin"; +GRANT ALL ON TABLE public.resource_group2 TO "admin"; + +CREATE TABLE public.resource_member2 ( + resource_group_id int8 NOT NULL, + auth_resource_id int8 NOT NULL, + CONSTRAINT resource_member2_pkey PRIMARY KEY (resource_group_id, auth_resource_id), + CONSTRAINT resource_member2_fkey1 FOREIGN KEY (resource_group_id) REFERENCES resource_group2(id) +-- CONSTRAINT resource_member2_fkey2 FOREIGN KEY (auth_resource_id) REFERENCES auth_resource2(id) +); + +ALTER TABLE public.resource_member2 OWNER TO "admin"; +GRANT ALL ON TABLE public.resource_member2 TO "admin"; + +CREATE TABLE public.dashboard2 ( + id bigint NOT NULL, + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone NOT NULL, + layout text NOT NULL, + title character varying(255) NOT NULL, + auth_resource_id bigint NOT NULL, + created_by character varying(255) NOT NULL, + modified_by character varying(255) NOT NULL, + description character varying(255), + share boolean DEFAULT false +); + +ALTER TABLE public.dashboard2 OWNER TO admin; + +ALTER TABLE ONLY public.dashboard2 + ADD CONSTRAINT dashboard2_pkey PRIMARY KEY (id); + +-- ALTER TABLE ONLY public.dashboard2 +-- ADD CONSTRAINT dashboard_resource_fk FOREIGN KEY (auth_resource_id) REFERENCES public.auth_resource2(id); + +CREATE TABLE public.log_management ( + cluster_id varchar NOT NULL, + node_id varchar NOT NULL, + log_rotate_dir varchar, + log_rotate_count integer, + log_rotate_size integer, + log_rotate_management boolean NOT NULL, + back_up_dir varchar, + back_up_period integer, + back_up_dir_size integer, + back_up_management boolean NOT NULL, + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone +); + +alter table public.log_management add constraint log_management_pkey primary key (cluster_id, node_id); + +CREATE TABLE public.sampling_setting ( + service_id bigint NOT NULL, + service_name character varying(255), + sampling_type character varying(255), + sampling_param character varying(255), + cluster varchar, + namespace varchar, + cluster_id bigint +); +ALTER TABLE public.sampling_setting OWNER TO admin; + +ALTER TABLE ONLY public.sampling_setting + ADD CONSTRAINT sampling_setting_pkey PRIMARY KEY (service_id); + +CREATE TABLE public.operation_setting ( + id bigint NOT NULL, + service_id bigint NOT NULL, + sampling_type character varying(255), + sampling_param character varying(255), + operation_name character varying(255) +); + +ALTER TABLE public.operation_setting OWNER TO admin; + +ALTER TABLE ONLY public.operation_setting + ADD CONSTRAINT operation_setting_pkey PRIMARY KEY (id); + +ALTER TABLE ONLY public.operation_setting + ADD CONSTRAINT operation_setting_fkey FOREIGN KEY (service_id) REFERENCES public.sampling_setting(service_id); + +CREATE TABLE public.cluster_setting ( + cluster_id bigint NOT NULL, + param_type character varying(255), + param_value character varying(255), + cluster_name varchar, + name character varying(255) +); + +ALTER TABLE ONLY public.cluster_setting + ADD CONSTRAINT cluster_setting_pkey PRIMARY KEY (cluster_id); + +CREATE TABLE public.alias_code ( + user_id varchar NOT NULL, + id varchar NOT NULL, + name varchar, + type varchar, + use_yn varchar, + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone +); + +ALTER TABLE ONLY public.alias_code add constraint alias_code_pkey primary key (user_id, id); + +CREATE TABLE public.sparse_log_info ( + id varchar NOT NULL, + cluster_id varchar, + namespace varchar, + target_type varchar, + target_id varchar, + log_path varchar, + created_date timestamp, + modified_date timestamp, + threshold float4, + PRIMARY KEY ("id") +); + +CREATE TABLE public.view_code ( + user_id varchar NOT NULL, + view_id varchar NOT NULL, + json_data text, + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone +); + +ALTER TABLE ONLY public.view_code add constraint view_code_pkey primary key (user_id, view_id); + +CREATE TABLE public.entity_black_list ( + entity_type varchar not null, + entity_name varchar not null, + cluster_id varchar not null, + namespace varchar, + black_list bool not null, + workload varchar(255) not null +); + +ALTER TABLE public.entity_black_list + ADD CONSTRAINT entity_black_list_pkey PRIMARY KEY (entity_type, entity_name, cluster_id, namespace); + +CREATE TABLE public.script_setting ( + id bigint NOT NULL, + name character varying(255), + agent_list character varying(255), + file_path character varying(255), + args character varying(255), + valid_cmd character varying(255), + valid_val character varying(255), + cron_exp character varying(255), + create_user character varying(255), + mtime BIGINT, + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone +); + +ALTER TABLE ONLY public.script_setting + ADD CONSTRAINT script_setting_pkey PRIMARY KEY (id); + +CREATE TABLE public.agent_install_file_info ( + id bigint NOT NULL, + name character varying(255) NOT NULL, + type character varying(255) NOT NULL, + description text, + version character varying(255), + yaml text, + use_yn boolean NOT NULL, + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone NOT NULL +); + +ALTER TABLE ONLY public.agent_install_file_info ADD CONSTRAINT agent_install_file_info_pkey PRIMARY KEY (id); + +create table auth_resource3( + id bigint NOT NULL default nextval('hibernate_sequence'), + name character varying(255) NOT NULL, + is_deleted boolean not null, + tenant_id character varying(255) +); + +ALTER TABLE public.auth_resource3 owner to admin; + +ALTER TABLE ONLY public.auth_resource3 + ADD CONSTRAINT auth_resource3_pkey PRIMARY KEY (id); + +ALTER TABLE ONLY public.auth_resource3 + ADD CONSTRAINT auth_resource3_name_uniq UNIQUE (name); + +create table resource_member3( + resource_group_id bigint not null, + auth_resource3_id bigint not null +); + +ALTER TABLE resource_member3 owner to admin; + +ALTER TABLE ONLY public.resource_member3 + ADD CONSTRAINT resource_member3_pkey primary key (resource_group_id, auth_resource3_id); + +ALTER TABLE ONLY public.auth_resource3 ADD CONSTRAINT auth_resource3_tenant_id_fk FOREIGN KEY (tenant_id) REFERENCES public.tenant_info(id); + +ALTER TABLE public.menu_meta ADD CONSTRAINT menu_meta_auth_resource3_fk FOREIGN KEY (auth_resource3_id) REFERENCES auth_resource3(id); +ALTER TABLE public.user_permission2 ADD CONSTRAINT user_permission2_auth_resource3_fk FOREIGN KEY (auth_resource_id) REFERENCES auth_resource3(id); +ALTER TABLE public.resource_group2 ADD CONSTRAINT resource_group2_auth_resource3_fk1 FOREIGN KEY (id) REFERENCES auth_resource3(id); +ALTER TABLE public.resource_member3 ADD CONSTRAINT resource_member3_auth_resource3_fkey1 FOREIGN KEY (resource_group_id) REFERENCES public.resource_group2(id); +ALTER TABLE public.resource_member3 ADD CONSTRAINT resource_member3_auth_resource3_fkey2 FOREIGN KEY (auth_resource3_id) REFERENCES auth_resource3(id); +ALTER TABLE public.group_permission2 ADD CONSTRAINT group_permission2_auth_resource3_fk FOREIGN KEY (auth_resource_id) REFERENCES auth_resource3(id); +ALTER TABLE public.dashboard2 ADD CONSTRAINT dashboard2_auth_resource3_fk FOREIGN KEY (auth_resource_id) REFERENCES auth_resource3(id); +ALTER TABLE public.cloud_user ADD CONSTRAINT cloud_user_auth_resource3_fk FOREIGN KEY (auth_resource_id) REFERENCES auth_resource3(id); +ALTER TABLE public.cloud_group ADD CONSTRAINT cloud_group_auth_resource3_fk FOREIGN KEY (auth_resource_id) REFERENCES auth_resource3(id); + +CREATE DATABASE CONFIGS; +CREATE DATABASE keycloak; + +-- JSPD 옵션 값 테이블 +CREATE TABLE public.jspd_prop ( + code_id character varying(255) NOT NULL, + default_value character varying(255) NOT NULL, + description text, + code_type character varying(255), + input_type character varying(255), + input_props character varying(255), + use_yn boolean NOT NULL, + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone NOT NULL +); + +ALTER TABLE ONLY public.jspd_prop ADD CONSTRAINT jspd_prop_pkey PRIMARY KEY (code_id); + +-- JSPD 옵션 값 설정 LIST table +CREATE TABLE public.jspd_config ( + cluster_id character varying(255) NOT NULL, + namespace character varying(255) NOT NULL, + service character varying(255) NOT NULL, + code_id character varying(255), + code_value character varying(255), + code_type character varying(255), + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone NOT NULL +); +-- ALTER TABLE public.jspd_prop +-- ADD input_type character varying(255); + +-- ALTER TABLE public.jspd_prop +-- ADD input_props character varying(255); + + +ALTER TABLE public.jspd_config + ADD CONSTRAINT jspd_config_pkey PRIMARY KEY (cluster_id, namespace, service, code_id); + +ALTER TABLE ONLY public.jspd_config + ADD CONSTRAINT jspd_config_code_id_fk FOREIGN KEY (code_id) REFERENCES public.jspd_prop(code_id); + +-- noti server table +CREATE TABLE public.alert_group_v2 ( + id bigint NOT NULL, + created_date timestamp NOT NULL, + modified_date timestamp NOT NULL, + cluster_id varchar(255) NOT NULL, + description varchar(255), + name varchar(255) NOT NULL, + type varchar(255) NOT NULL, + namespace varchar(255) default 'default'::character varying, + destination varchar(255) NOT NULL, + created_by varchar(255) NOT NULL +); + +CREATE TABLE public.alert_target_v2 ( + id bigint NOT NULL, + created_date timestamp, + modified_date timestamp, + cluster_id varchar(255) NOT NULL, + entity_id varchar(255) NOT NULL, + entity_type varchar(255) NOT NULL, + alert_group_id bigint, + namespace varchar(255) +); + +CREATE TABLE public.alert_rule_v2 ( + id bigint NOT NULL, + created_date timestamp NOT NULL, + modified_date timestamp NOT NULL, + critical double precision, + name varchar(255), + warning double precision, + alert_group_id bigint, + alert_rule_meta_id varchar(255) NOT NULL, + alert_target_id bigint, + duration varchar(255) NOT NULL, + pause boolean DEFAULT false NOT NULL, + critical_sign varchar(255), + warning_sign varchar(255), + destination varchar(255), + created_by varchar(255) +); + +ALTER TABLE public.alert_group_v2 ADD CONSTRAINT alert_group_v2_id_pk PRIMARY KEY (id); +ALTER TABLE public.alert_target_v2 ADD CONSTRAINT alert_target_v2_id_pk PRIMARY KEY (id); +ALTER TABLE public.alert_rule_v2 ADD CONSTRAINT alert_rule_v2_id_pk PRIMARY KEY (id); + +ALTER TABLE public.alert_target_v2 ADD CONSTRAINT alert_target_v2_alert_group_id_fk FOREIGN KEY (alert_group_id) REFERENCES public.alert_group_v2(id); +ALTER TABLE public.alert_rule_v2 ADD CONSTRAINT alert_rule_v2_alert_group_id_fk FOREIGN KEY (alert_group_id) REFERENCES public.alert_group_v2(id); +ALTER TABLE public.alert_rule_v2 ADD CONSTRAINT alert_rule_v2_alert_rule_meta_id_fk FOREIGN KEY (alert_rule_meta_id) REFERENCES public.metric_meta2(id); +ALTER TABLE public.alert_rule_v2 ADD CONSTRAINT alert_rule_v2_alert_target_id_fk FOREIGN KEY (alert_target_id) REFERENCES public.alert_target_v2(id); +ALTER TABLE ONLY public.notification_registry + ADD CONSTRAINT fk4lljw4fnija73tm3lthjg90rx FOREIGN KEY (alert_rule_id) REFERENCES public.alert_rule_v2(id); + + +-- cortex alert +create table public.alert_rule_config_info ( + config_id varchar not null, + config_data text not null, + in_use boolean default true not null, + created_date timestamp, + modified_date timestamp +); + +create table alert_config_info +( + config_id varchar not null, + config_data text not null, + config_default text not null, + in_use boolean default true not null, + created_date timestamp, + modified_date timestamp +); + +create table alert_config +( + id varchar not null, + cluster_id varchar, + resolve_timeout varchar, + receiver varchar, + group_by varchar, + group_wait varchar, + group_interval varchar, + repeat_interval varchar, + routes_level varchar, + routes_continue varchar, + receiver_name varchar, + webhook_url varchar, + send_resolved varchar, + inner_route boolean, + inner_webhook boolean, + in_use boolean default true not null, + created_date timestamp, + modified_date timestamp +); + +ALTER TABLE public.alert_rule_config_info ADD CONSTRAINT alert_rule_config_info_config_id_pk PRIMARY KEY (config_id); +ALTER TABLE public.alert_config_info ADD CONSTRAINT alert_config_info_config_id_pk PRIMARY KEY (config_id); +ALTER TABLE public.alert_config ADD CONSTRAINT alert_config_id_pk PRIMARY KEY (id); + +CREATE TABLE public.cloud_user_setting ( + user_id character varying(255) NOT NULL, + lang character varying(20) DEFAULT 'en', + theme character varying(20) DEFAULT 'dark', + access_token integer DEFAULT 30, + refresh_token integer DEFAULT 10080, + error_msg boolean DEFAULT false, + alert_sound boolean DEFAULT false, + session_persistence boolean DEFAULT true, + gpu_acc_topology boolean DEFAULT true, + created_date timestamp without time zone, + modified_date timestamp without time zone +); + +ALTER TABLE public.cloud_user_setting OWNER TO admin; + +ALTER TABLE ONLY public.cloud_user_setting ADD CONSTRAINT cloud_user_setting_pkey PRIMARY KEY (user_id); + +-------- 2022-05-31 KubeInfo flatting table -------- +CREATE TABLE cmoa_configmap_base( + kube_flatting_time bigint, + cluster_id varchar(255), + kind varchar(30), + metadata_uid varchar(40), + row_index int, + metadata_name text, + kind_status varchar(50), + metadata_resourceVersion text, + metadata_annotations text, + metadata_creationTimestamp varchar(25), + metadata_labels text, + metadata_namespace text, + binaryData text, + data text, + immutable text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, cluster_id, kind, metadata_uid, row_index) +); +----------------------- +CREATE TABLE cmoa_cronjob_active( + kube_flatting_time bigint, + kind varchar(30), + metadata_uid varchar(40), + row_index int, + metadata_name text, + status_active_apiVersion text, + status_active_fieldPath text, + status_active_kind text, + status_active_name text, + status_active_namespace text, + status_active_resourceVersion text, + status_active_uid text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_cronjob_base( + kube_flatting_time bigint, + cluster_id varchar(255), + kind varchar(30), + metadata_uid varchar(40), + row_index int, + kind_status varchar(50), + metadata_annotations text, + metadata_creationTimestamp varchar(25), + metadata_labels text, + metadata_name text, + metadata_namespace text, + metadata_resourceVersion text, + spec_failedJobsHistoryLimit text, + spec_schedule text, + spec_successfulJobsHistoryLimit text, + spec_suspend text, + status_lastScheduleTime text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, cluster_id, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_daemonset_base( + kube_flatting_time bigint, + cluster_id varchar(255), + kind varchar(30), + metadata_uid varchar(40), + row_index int, + kind_status varchar(50), + metadata_annotations text, + metadata_creationTimestamp varchar(25), + metadata_labels text, + metadata_name text, + metadata_namespace text, + metadata_resourceVersion text, + status_currentNumberScheduled text, + status_desiredNumberScheduled text, + status_numberAvailable text, + status_numberMisscheduled text, + status_numberReady text, + status_numberUnavailable text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, cluster_id, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_deployment_base( + kube_flatting_time bigint, + cluster_id varchar(255), + kind varchar(30), + metadata_uid varchar(40), + row_index int, + kind_status varchar(50), + metadata_annotations text, + metadata_creationTimestamp varchar(25), + metadata_labels text, + metadata_name text, + metadata_namespace text, + metadata_resourceVersion text, + spec_replicas text, + spec_template_spec_containers_image text, + status_availableReplicas text, + status_readyReplicas text, + status_replicas text, + status_unavailableReplicas text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, cluster_id, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_endpoint_addresses( + kube_flatting_time bigint, + kind varchar(30), + metadata_uid varchar(40), + row_index int, + metadata_name text, + subset_addresses_ip text, + subset_addresses_hostname text, + subset_addresses_nodeName text, + subset_addresses_targetRef text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_endpoint_base( + kube_flatting_time bigint, + cluster_id varchar(255), + kind varchar(30), + metadata_uid varchar(40), + row_index int, + kind_status varchar(50), + metadata_name text, + metadata_resourceVersion text, + metadata_annotations text, + metadata_creationTimestamp varchar(25), + metadata_labels text, + metadata_namespace text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, cluster_id, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_endpoint_notreadyaddresses( + kube_flatting_time bigint, + kind varchar(30), + metadata_uid varchar(40), + row_index int, + metadata_name text, + subset_notreadyaddresses_ip text, + subset_notreadyaddresses_hostname text, + subset_notreadyaddresses_nodename text, + subset_notreadyaddresses_targetref text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_endpoint_ports( + kube_flatting_time bigint, + kind varchar(30), + metadata_uid varchar(40), + row_index int, + metadata_name text, + subset_ports_port text, + subset_ports_appprotocol text, + subset_ports_name text, + subset_ports_protocol text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_event_base ( + kube_flatting_time bigint, + cluster_id varchar(255), + kind varchar(30), + metadata_uid varchar(40), + row_index int, + kind_status varchar(50), + action text, + count text, + eventtime text, + firsttimestamp text, + involvedobject_apiversion text, + involvedobject_fieldpath text, + involvedobject_kind text, + involvedobject_name text, + involvedobject_namespace text, + involvedobject_resourceversion text, + involvedobject_uid text, + lasttimestamp text, + message text, + metadata_annotations text, + metadata_creationtimestamp varchar(25), + metadata_labels text, + metadata_name text, + metadata_namespace text, + metadata_resourceversion text, + reason text, + related_apiversion text, + related_fieldpath text, + related_kind text, + related_name text, + related_namespace text, + related_resourceversion text, + related_uid text, + series_count text, + series_lastobservedtime text, + series_state text, + source_component text, + source_host text, + type text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, cluster_id, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_job_base ( + kube_flatting_time bigint, + cluster_id varchar(255), + kind varchar(30), + metadata_uid varchar(40), + row_index int, + kind_status varchar(50), + metadata_annotations text, + metadata_creationtimestamp varchar(25), + metadata_labels text, + metadata_name text, + metadata_namespace text, + metadata_ownerreferences text, + metadata_ownerReferences_kind varchar(30), + metadata_ownerReferences_uid varchar(40), + metadata_resourceversion text, + spec_backofflimit text, + spec_completions text, + spec_parallelism text, + status_active text, + status_completiontime text, + status_failed text, + status_starttime text, + status_succeeded text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, cluster_id, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_job_template ( + kube_flatting_time bigint, + kind varchar(30), + metadata_uid varchar(40), + row_index int, + metadata_name text, + spec_template_spec_containers_args text, + spec_template_spec_containers_command text, + spec_template_spec_containers_image text, + spec_template_spec_containers_name text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_namespace_base ( + kube_flatting_time bigint, + cluster_id varchar(255), + kind varchar(30), + metadata_uid varchar(40), + row_index int, + kind_status varchar(50), + metadata_name text, + metadata_resourceversion text, + metadata_annotations text, + metadata_creationtimestamp varchar(25), + metadata_labels text, + metadata_namespace text, + spec_finalizers text, + status_phase text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, cluster_id, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_node_base ( + kube_flatting_time bigint, + cluster_id varchar(255), + kind varchar(30), + metadata_uid varchar(40), + row_index int, + kind_status varchar(50), + metadata_name text, + metadata_selflink text, + metadata_resourceversion text, + metadata_creationtimestamp varchar(25), + metadata_labels text, + metadata_annotations text, + spec_podcidr text, + spec_taints text, + status_capacity_cpu text, + status_capacity_ephemeral_storage text, + status_capacity_hugepages_1gi text, + status_capacity_hugepages_2mi text, + status_capacity_memory text, + status_capacity_pods text, + status_allocatable_cpu text, + status_allocatable_ephemeral_storage text, + status_allocatable_hugepages_1gi text, + status_allocatable_hugepages_2mi text, + status_allocatable_memory text, + status_allocatable_pods text, + status_addresses text, + status_daemonendpoints_kubeletendpoint_port text, + status_nodeinfo_machineid text, + status_nodeinfo_systemuuid text, + status_nodeinfo_bootid text, + status_nodeinfo_kernelversion text, + status_nodeinfo_osimage text, + status_nodeinfo_containerruntimeversion text, + status_nodeinfo_kubeletversion text, + status_nodeinfo_kubeproxyversion text, + status_nodeinfo_operatingsystem text, + status_nodeinfo_architecture text, + status_volumesinuse text, + status_volumesattached text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, cluster_id, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_node_condition ( + kube_flatting_time bigint, + kind varchar(30), + metadata_uid varchar(40), + row_index int, + metadata_name text, + status_conditions_type text, + status_conditions_status text, + status_conditions_lastheartbeattime text, + status_conditions_lasttransitiontime text, + status_conditions_reason text, + status_conditions_message text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_node_image ( + kube_flatting_time bigint, + kind varchar(30), + metadata_uid varchar(40), + row_index int, + metadata_name text, + status_images_names text, + status_images_sizebytes text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_persistentvolume_base ( + kube_flatting_time bigint, + cluster_id varchar(255), + kind varchar(30), + metadata_uid varchar(40), + row_index int, + kind_status varchar(50), + metadata_annotations text, + metadata_creationtimestamp varchar(25), + metadata_labels text, + metadata_name text, + metadata_namespace text, + metadata_resourceversion text, + spec_accessmodes text, + spec_awselasticblockstore text, + spec_azuredisk text, + spec_azurefile text, + spec_capacity text, + spec_claimref_apiversion text, + spec_claimref_fieldpath text, + spec_claimref_kind text, + spec_claimref_name text, + spec_claimref_namespace text, + spec_claimref_resourceversion text, + spec_claimref_uid text, + spec_csi text, + spec_fc text, + spec_flexvolume text, + spec_flocker text, + spec_gcepersistentdisk text, + spec_glusterfs text, + spec_hostpath text, + spec_iscsi text, + spec_local text, + spec_nfs text, + spec_persistentvolumereclaimpolicy text, + spec_photonpersistentdisk text, + spec_portworxvolume text, + spec_quobyte text, + spec_rbd text, + spec_scaleio text, + spec_storageclassname text, + spec_storageos text, + spec_volumemode text, + spec_vspherevolume text, + status_message text, + status_phase text, + status_reason text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, cluster_id, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_persistentvolumeclaim_base ( + kube_flatting_time bigint, + cluster_id varchar(255), + kind varchar(30), + metadata_uid varchar(40), + row_index int, + kind_status varchar(50), + metadata_annotations text, + metadata_creationtimestamp varchar(25), + metadata_labels text, + metadata_name text, + metadata_namespace text, + metadata_resourceversion text, + spec_accessmodes text, + spec_storageclassname text, + spec_volumemode text, + spec_volumename text, + status_accessmodes text, + status_capacity text, + status_phase text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, cluster_id, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_pod_base ( + kube_flatting_time bigint, + cluster_id varchar(255), + kind varchar(30), + kind_status varchar(50), + metadata_uid varchar(40), + row_index int, + metadata_name text, + metadata_selflink text, + metadata_resourceversion text, + metadata_creationtimestamp varchar(25), + metadata_generatename text, + metadata_namespace text, + metadata_deletiontimestamp text, + metadata_deletiongraceperiodseconds text, + metadata_labels text, + metadata_ownerreferences text, + metadata_ownerReferences_kind varchar(30), + metadata_ownerReferences_uid varchar(40), + metadata_annotations text, + spec_hostnetwork text, + spec_priorityclassname text, + spec_enableservicelinks text, + spec_priority text, + spec_schedulername text, + spec_hostpid text, + spec_nodename text, + spec_serviceaccount text, + spec_serviceaccountname text, + spec_dnspolicy text, + spec_terminationgraceperiodseconds text, + spec_restartpolicy text, + spec_securitycontext text, + spec_nodeselector_kubernetes_io_hostname text, + spec_tolerations text, + status_phase text, + status_hostip text, + status_podip text, + status_starttime text, + status_qosclass text, + status_reason text, + status_message text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, cluster_id, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_pod_conditions ( + kube_flatting_time bigint, + kind varchar(30), + metadata_uid varchar(40), + row_index int, + metadata_name text, + status_conditions_type text, + status_conditions_status text, + status_conditions_lasttransitiontime text, + status_conditions_reason text, + status_conditions_message text, + status_conditions_lastprobetime text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_pod_containerstatuses ( + kube_flatting_time bigint, + kind varchar(30), + metadata_uid varchar(40), + row_index int, + metadata_name text, + status_containerstatuses_name text, + status_containerstatuses_ready text, + status_containerstatuses_restartcount text, + status_containerstatuses_image text, + status_containerstatuses_imageid text, + status_containerstatuses_containerid text, + status_containerstatuses_state_terminated_exitcode text, + status_containerstatuses_state_terminated_reason text, + status_containerstatuses_state_terminated_startedat text, + status_containerstatuses_state_terminated_finishedat text, + status_containerstatuses_state_terminated_containerid text, + status_containerstatuses_state_waiting_reason text, + status_containerstatuses_state_waiting_message text, + status_containerstatuses_state_running_startedat text, + status_containerstatuses_laststate_terminated_exitcode text, + status_containerstatuses_laststate_terminated_reason text, + status_containerstatuses_laststate_terminated_startedat text, + status_containerstatuses_laststate_terminated_finishedat text, + status_containerstatuses_laststate_terminated_containerid text, + status_containerstatuses_laststate_waiting_reason text, + status_containerstatuses_laststate_waiting_message text, + status_containerstatuses_laststate_running_startedat text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_pod_containers ( + kube_flatting_time bigint, + kind varchar(30), + metadata_uid varchar(40), + row_index int, + metadata_name text, + spec_containers_name text, + spec_containers_image text, + spec_containers_env text, + spec_containers_resources_limits_cpu text, + spec_containers_resources_limits_memory text, + spec_containers_resources_requests_cpu text, + spec_containers_resources_requests_memory text, + spec_containers_volumemounts text, + spec_containers_securitycontext_privileged text, + spec_containers_command text, + spec_containers_ports text, + spec_containers_args text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_pod_volume ( + kube_flatting_time bigint, + kind varchar(30), + metadata_uid varchar(40), + row_index int, + metadata_name text, + spec_volumes_name text, + spec_volumes_hostpath text, + spec_volumes_secret text, + spec_volumes_configmap text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_replicaset_base ( + kube_flatting_time bigint, + cluster_id varchar(255), + kind varchar(30), + metadata_uid varchar(40), + row_index int, + kind_status varchar(50), + metadata_annotations text, + metadata_creationtimestamp varchar(25), + metadata_labels text, + metadata_name text, + metadata_namespace text, + metadata_resourceversion text, + spec_replicas text, + status_availablereplicas text, + status_readyreplicas text, + status_replicas text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, cluster_id, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_resourcequota_base ( + kube_flatting_time bigint, + cluster_id varchar(255), + kind varchar(30), + metadata_uid varchar(40), + row_index int, + kind_status varchar(50), + metadata_annotations text, + metadata_creationtimestamp varchar(25), + metadata_labels text, + metadata_name text, + metadata_namespace text, + metadata_resourceversion text, + spec_hard text, + spec_scopes text, + status_hard text, + status_used text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, cluster_id, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_resourcequota_scopeselector ( + kube_flatting_time bigint, + kind varchar(30), + metadata_uid varchar(40), + row_index int, + metadata_name text, + spec_scopeselector_matchexpressions_operator text, + spec_scopeselector_matchexpressions_scopename text, + spec_scopeselector_matchexpressions_values text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_service_base ( + kube_flatting_time bigint, + cluster_id varchar(255), + kind varchar(30), + metadata_uid varchar(40), + row_index int, + kind_status varchar(50), + metadata_name text, + metadata_resourceversion text, + metadata_ownerreferences text, + metadata_ownerReferences_kind varchar(30), + metadata_ownerReferences_uid varchar(40), + metadata_annotations text, + metadata_creationtimestamp varchar(25), + metadata_deletiongraceperiodseconds text, + metadata_deletiontimestamp text, + metadata_labels text, + metadata_namespace text, + spec_clusterip text, + spec_externalips text, + spec_selector text, + spec_type text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, cluster_id, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_service_ports ( + kube_flatting_time bigint, + kind varchar(30), + metadata_uid varchar(40), + row_index int, + metadata_name text, + spec_ports_appprotocol text, + spec_ports_name text, + spec_ports_nodeport text, + spec_ports_port text, + spec_ports_protocol text, + spec_ports_targetport text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_statefulset_base ( + kube_flatting_time bigint, + cluster_id varchar(255), + kind varchar(30), + metadata_uid varchar(40), + row_index int, + kind_status varchar(50), + metadata_annotations text, + metadata_creationtimestamp varchar(25), + metadata_labels text, + metadata_name text, + metadata_namespace text, + metadata_resourceversion text, + spec_replicas text, + status_readyreplicas text, + status_replicas text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, cluster_id, kind, metadata_uid, row_index) +); + +CREATE TABLE public.api_error_history ( + id int8 NOT NULL, + api_msg varchar(255) NULL, + code varchar(255) NULL, + "exception" varchar(255) NULL, + http_error varchar(255) NULL, + http_status int4 NULL, + occureence_time varchar(255) NULL, + params varchar(255) NULL, + "path" varchar(255) NULL, + "type" varchar(255) NULL, + CONSTRAINT api_error_history_pkey PRIMARY KEY (id) +); + +CREATE TABLE public.metric_score ( + clst_id varchar(255) NOT NULL, + entity_id varchar(255) NOT NULL, + entity_type varchar(255) NOT NULL, + metric_id varchar(255) NOT NULL, + sub_key varchar(255) NOT NULL, + unixtime int4 NOT NULL, + anomaly bool NOT NULL, + cont_name varchar(255) NULL, + "instance" varchar(255) NULL, + "namespace" varchar(255) NULL, + node_id varchar(255) NULL, + pod_id varchar(255) NULL, + score int4 NOT NULL, + yhat_lower_upper json NULL, + CONSTRAINT metric_score_pkey PRIMARY KEY (clst_id, entity_id, entity_type, metric_id, sub_key, unixtime) +); + + +CREATE TABLE public.tenant_info_auth_resources ( + tenant_info_id varchar(255) NOT NULL, + auth_resources_id int8 NOT NULL, + CONSTRAINT tenant_info_auth_resources_pkey PRIMARY KEY (tenant_info_id, auth_resources_id), + CONSTRAINT uk_7s6l8e2c8gli4js43c4xoifcl UNIQUE (auth_resources_id) +); + + +-- public.tenant_info_auth_resources foreign keys + +ALTER TABLE public.tenant_info_auth_resources ADD CONSTRAINT fkkecsc13ydhwg8u05aumkqbnx1 FOREIGN KEY (tenant_info_id) REFERENCES public.tenant_info(id); +ALTER TABLE public.tenant_info_auth_resources ADD CONSTRAINT fkpvvec4ju3hsma6s1rtgvr4mf6 FOREIGN KEY (auth_resources_id) REFERENCES public.auth_resource3(id); \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/postgres_insert_dml.psql b/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/postgres_insert_dml.psql new file mode 100644 index 0000000..e6335f3 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/03-ddl-dml/postgres/postgres_insert_dml.psql @@ -0,0 +1,2380 @@ +INSERT INTO public.tenant_info (id, name, in_used, created_date, modified_date, contract_id) VALUES ('DEFAULT_TENANT', 'admin', true, now(), now(), 0); + +INSERT INTO public.auth_resource2 (id, access_type, name, parent_id, type) VALUES (-1, 4, 'null', NULL, 'null'); + +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Infrastructure', -1 , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Workloads', -1 , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Services', -1 , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Diagnosis', -1 , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Statistics & Analysis', -1 , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Reports', -1 , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Settings', -1 , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Hosts', -1, 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Dashboards', -1 , 'menu'); +--INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Health Check', -1, 'menu'); + +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Topology', (select id from auth_resource2 where type='menu' and name='Infrastructure') , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Overview', (select id from auth_resource2 where type='menu' and name='Infrastructure') , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Namespace', (select id from auth_resource2 where type='menu' and name='Infrastructure') , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Nodes', (select id from auth_resource2 where type='menu' and name='Infrastructure') , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Node Details', (select id from auth_resource2 where type='menu' and name='Infrastructure') , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Resource Usage', (select id from auth_resource2 where type='menu' and name='Infrastructure') , 'menu'); +-- INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Persistent Volume', (select id from auth_resource2 where type='menu' and name='Infrastructure') , 'menu'); + +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Overview', (select id from auth_resource2 where type='menu' and name='Workloads') , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Pods', (select id from auth_resource2 where type='menu' and name='Workloads') , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Jobs', (select id from auth_resource2 where type='menu' and name='Workloads') , 'menu'); +-- INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Cron Jobs', (select id from auth_resource2 where type='menu' and name='Workloads') , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Deploy List', (select id from auth_resource2 where type='menu' and name='Workloads'), 'menu'); + +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Topology', (select id from auth_resource2 where type='menu' and name='Services') , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Structure', (select id from auth_resource2 where type='menu' and name='Services') , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Overview', (select id from auth_resource2 where type='menu' and name='Services'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Detail', (select id from auth_resource2 where type='menu' and name='Services'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'List', (select id from auth_resource2 where type='menu' and name='Services'), 'menu'); + +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Anomaly Score', (select id from auth_resource2 where type='menu' and name='Diagnosis'), 'menu'); +-- INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Troubleshooting', (select id from auth_resource2 where type='menu' and name='Diagnosis') , 'menu'); + +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Performance Trends', (select id from auth_resource2 where type='menu' and name='Statistics & Analysis'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Alert History', (select id from auth_resource2 where type='menu' and name='Statistics & Analysis'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Anomaly Score', (select id from auth_resource2 where type='menu' and name='Statistics & Analysis'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Job History', (select id from auth_resource2 where type='menu' and name='Statistics & Analysis'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Log Viewer', (select id from auth_resource2 where type='menu' and name='Statistics & Analysis'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Sparse Logs', (select id from auth_resource2 where type='menu' and name='Statistics & Analysis'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Event Logs', (select id from auth_resource2 where type='menu' and name='Statistics & Analysis') , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Alert Analysis', (select id from auth_resource2 where type='menu' and name='Statistics & Analysis') , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Container Life Cycle', (select id from auth_resource2 where type='menu' and name='Statistics & Analysis'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Service Traces', (select id from auth_resource2 where type='menu' and name='Statistics & Analysis'), 'menu'); +-- INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Resource Used Trends', (select id from auth_resource2 where type='menu' and name='Statistics & Analysis'), 'menu'); + +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Documents', (select id from auth_resource2 where type='menu' and name='Reports'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Templates', (select id from auth_resource2 where type='menu' and name='Reports'), 'menu'); + +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'User & Group', (select id from auth_resource2 where type='menu' and name='Settings') , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Alerts', (select id from auth_resource2 where type='menu' and name='Settings') , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Sparse Logs', (select id from auth_resource2 where type='menu' and name='Settings') , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'General', (select id from auth_resource2 where type='menu' and name='Settings') , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Metric Meta', (select id from auth_resource2 where type='menu' and name='Settings'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Notification', (select id from auth_resource2 where type='menu' and name='Settings'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Host Alerts', (select id from auth_resource2 where type='menu' and name='Settings'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'License', (select id from auth_resource2 where type='menu' and name='Settings'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Agent', (select id from auth_resource2 where type='menu' and name='Settings'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Alias', (select id from auth_resource2 where type='menu' and name='Settings'), 'menu'); + +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Documents', (select id from auth_resource2 where type='menu' and name='Dashboards'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Templates', (select id from auth_resource2 where type='menu' and name='Dashboards'), 'menu'); + +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Topology', (select id from auth_resource2 where type='menu' and name='Hosts'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Overview', (select id from auth_resource2 where type='menu' and name='Hosts'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'List', (select id from auth_resource2 where type='menu' and name='Hosts'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Detail', (select id from auth_resource2 where type='menu' and name='Hosts'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Group', (select id from auth_resource2 where type='menu' and name='Hosts'), 'menu'); + +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'CloudMOA - Nodes Resource', NULL, 'dashboard'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Service Detail', NULL, 'dashboard'); + +--INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES(4, 'Check Script', (select id from auth_resource2 where type='menu' and name='Health Check'), 'menu'); + +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Infrastructure', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Workloads', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Services', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Diagnosis', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Statistics & Analysis', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Reports', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Settings', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Hosts', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Dashboards', false, null); +--INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Health Check', false, null); + +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Infrastructure|Topology', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Infrastructure|Overview', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Infrastructure|Namespace', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Infrastructure|Nodes', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Infrastructure|Node Details', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Infrastructure|Resource Usage', false, null); + +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Workloads|Overview', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Workloads|Pods', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Workloads|Jobs', false, null); +-- NSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Workloads|Cron Jobs', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Workloads|Deploy List', false, null); + +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Services|Topology', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Services|Structure', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Services|Overview', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Services|Detail', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Services|List', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Services|Active Transaction', false, null); + +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Diagnosis|Anomaly Score', false, null); + +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Statistics & Analysis|Performance Trends', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Statistics & Analysis|Alert History', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Statistics & Analysis|Anomaly Score', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Statistics & Analysis|Job History', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Statistics & Analysis|Log Viewer', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Statistics & Analysis|Sparse Logs', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Statistics & Analysis|Event Logs', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Statistics & Analysis|Alert Analysis', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Statistics & Analysis|Container Life Cycle', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Statistics & Analysis|Service Traces', false, null); + +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Reports|Documents', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Reports|Templates', false, null); + +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Settings|User & Group', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Settings|Alerts', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Settings|Sparse Logs', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Settings|General', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Settings|Metric Meta', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Settings|Notification', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Settings|Host Alerts', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Settings|License', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Settings|Agent', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Settings|Alias', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Settings|Agent Installation', false, NULL); + + +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Dashboards|Documents', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Dashboards|Templates', false, null); + +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Hosts|Topology', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Hosts|Overview', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Hosts|List', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Hosts|Detail', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Hosts|Group', false, null); + +--INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Health Check|Check Script', false, null); + +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('user|admin', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('userGroup|admin|default', false, null); + +--INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('user|admin|owner', false, 'DEFAULT_TENANT'); + +INSERT INTO public.cloud_user (user_id, email, is_admin, phone, user_nm, user_pw, created_date, modified_date, company, department, last_log_in_date, "position", use_ldap, auth_method, log_in_count, user_lock, user_lock_date, tenant_id, is_tenant_owner, auth_resource_id) VALUES ('admin', NULL, true, NULL, 'admin', '$2a$10$a0XPdet9RCL8uF8ZVZ2Yzu4y0po5RWCesyB0e03MhrTIfG.0Y6xfS',now() , now() , NULL , NULL , NULL , NULL, false, 'default', 0, false, null, 'DEFAULT_TENANT', true, (select id from auth_resource3 where name='user|admin')); +INSERT INTO public.cloud_group (id, created_date, modified_date, name, description) VALUES ((select id from auth_resource3 where name='userGroup|admin|default'), now(), now(), 'default', '기본그룹정의'); + +--INSERT INTO public.cloud_user (user_id, email, is_admin, phone, user_nm, user_pw, created_date, modified_date, company, department, last_log_in_date, "position", use_ldap, auth_method, log_in_count, user_lock, user_lock_date, tenant_id, is_tenant_owner, auth_resource_id) VALUES ('owner', NULL, false, NULL, 'owner', '$2a$10$a0XPdet9RCL8uF8ZVZ2Yzu4y0po5RWCesyB0e03MhrTIfG.0Y6xfS',now() , now() , NULL , NULL , NULL , NULL, false, 'default', 0, false, null, 'DEFAULT_TENANT', true, (select id from auth_resource3 where name='user|admin|owner')); + +INSERT INTO public.cloud_user_setting +(user_id, lang, theme, access_token, refresh_token, error_msg, alert_sound, session_persistence, gpu_acc_topology, created_date, modified_date) +VALUES('admin', null, null, null, null, false, false, true, true, now(), null); + +--INSERT INTO public.cloud_user_setting +--(user_id, lang, theme, access_token, refresh_token, error_msg, alert_sound, session_persistence, gpu_acc_topology, created_date, modified_date) +--VALUES('owner', null, null, null, null, false, false, true, true, now(), null); + +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('dashboard|admin|CloudMOA - Nodes Resource', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('dashboard|admin|Service Detail', false, null); + +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('cluster|cloudmoa', false, 'DEFAULT_TENANT'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (0, 'Infrastructure', '01.Infrastructure', 0, NULL, (select id from auth_resource3 where name='menu|Infrastructure'), 3); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (1, 'Topology', NULL, 0, 'topologyInfra', (select id from auth_resource3 where name='menu|Infrastructure|Topology'), 3); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (2, 'Overview', NULL, 1, 'overViewInfra', (select id from auth_resource3 where name='menu|Infrastructure|Overview'), 3); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (3, 'Resource Usage', NULL, 2, 'resourceUsageInfra', (select id from auth_resource3 where name='menu|Infrastructure|Resource Usage'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (4, 'Namespace', NULL, 3, 'namespaceInfra', (select id from auth_resource3 where name='menu|Infrastructure|Namespace'), 3); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (5, 'Nodes', NULL, 4, 'nodesInfra', (select id from auth_resource3 where name='menu|Infrastructure|Nodes'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (6, 'Node Details', NULL, 5, 'nodeDetailInfra', (select id from auth_resource3 where name='menu|Infrastructure|Node Details'), 3); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (10, 'Workloads', '02.Workload', 1, NULL, (select id from auth_resource3 where name='menu|Workloads'), 3); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (11, 'Overview', NULL, 0, 'overviewWorkloads', (select id from auth_resource3 where name='menu|Workloads|Overview'), 3); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (12, 'deployList', NULL, 1, 'deployListWorkloads', (select id from auth_resource3 where name='menu|Workloads|Deploy List'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (17, 'Jobs', NULL, 6, 'jobsWorkloads', (select id from auth_resource3 where name='menu|Workloads|Jobs'), 2); +-- INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (18, 'Cron Jobs', NULL, 7, 'cronJobsWorkloads', (select id from auth_resource3 where name='menu|Workloads|Cron Jobs'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (19, 'Pods', NULL, 8, 'podsWorkloads', (select id from auth_resource3 where name='menu|Workloads|Pods'), 3); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (20, 'Services', '03.Service', 2, NULL, (select id from auth_resource3 where name='menu|Services'), 3); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (21, 'DataCenter Service', NULL, 0, 'topologyServices', (select id from auth_resource3 where name='menu|Services|Topology'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (22, 'ServiceOverview', NULL, 1, 'overviewServices', (select id from auth_resource3 where name='menu|Services|Overview'), 0); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (23, 'Cluster Service', NULL, 2, 'detailServices', (select id from auth_resource3 where name='menu|Services|Structure'), 0); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (24, 'List', NULL, 3, 'serviceList', (select id from auth_resource3 where name='menu|Services|List'), 3); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (25, 'Detail', NULL, 4, 'slasServices', (select id from auth_resource3 where name='menu|Services|Detail'), 0); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (26, 'Active Transaction', NULL, 5, 'overviewServiceJSPD', (select id from auth_resource3 where name='menu|Services|Active Transaction'), 2); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (30, 'Diagnosis', '05.Diagnosis', 4, NULL, (select id from auth_resource3 where name='menu|Diagnosis'), 0); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (31, 'Anomaly Score Detail', NULL, 0, 'anomalyScoreDiagnosis', (select id from auth_resource3 where name='menu|Diagnosis|Anomaly Score'), 0); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (40, 'Statistics & Analysis', '06.Statistics&Analysis', 5, NULL, (select id from auth_resource3 where name='menu|Statistics & Analysis'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (41, 'Performance Trends', NULL, 0, 'performanceTrendSA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Performance Trends'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (42, 'Alert Analysis', NULL, 2, 'alertAnalysisSA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Alert Analysis'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (43, 'Alert History', NULL, 3, 'alertHistorySA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Alert History'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (44, 'Anomaly Score Analysis', NULL, 4, 'anomalyScoreSA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Anomaly Score'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (45, 'Job History', NULL, 5, 'jobHistorySA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Job History'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (46, 'Sparse Log Analysis', NULL, 6, 'sparseLogSA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Sparse Logs'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (47, 'Log Viewer', NULL, 7, 'logViewerSA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Log Viewer'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (48, 'eventLog Analysis', NULL, 8, 'eventLogSA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Event Logs'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (49, 'Container Life Cycle', NULL, 9, 'containerLifecycleSA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Container Life Cycle'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (50, 'Service Trace Analysis', NULL, 10, 'serviceTraceSA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Service Traces'), 0); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (60, 'Reports', '07.Report', 6, NULL, (select id from auth_resource3 where name='menu|Reports'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (61, 'Documents', NULL, 0, 'documentReport', (select id from auth_resource3 where name='menu|Reports|Documents'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (62, 'Templates', NULL, 1, 'templateReport', (select id from auth_resource3 where name='menu|Reports|Templates'), 2); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (65, 'Dashboards', '10.Dashboard', 7, NULL, (select id from auth_resource3 where name='menu|Dashboards'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (66, 'Documents', NULL, 0, 'documentDashboard', (select id from auth_resource3 where name='menu|Dashboards|Documents'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (67, 'Templates', NULL, 1, 'templateDashboard', (select id from auth_resource3 where name='menu|Dashboards|Templates'), 2); + + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (80, 'Hosts', '12.Hosts', 10, NULL, (select id from auth_resource3 where name='menu|Hosts'), 0); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (81, 'Topology', null, 0, 'topologyHost', (select id from auth_resource3 where name='menu|Hosts|Topology'), 0); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (82, 'Overview', NULL, 1, 'overviewHost', (select id from auth_resource3 where name='menu|Hosts|Overview'), 0); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (83, 'List', NULL, 2, 'listHost', (select id from auth_resource3 where name='menu|Hosts|List'), 0); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (84, 'Detail', NULL, 3, 'detailHost', (select id from auth_resource3 where name='menu|Hosts|Detail'), 0); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (85, 'Group', NULL, 4, 'groupHost', (select id from auth_resource3 where name='menu|Hosts|Group'), 0); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (90, 'Settings', '08.Setting', 99, NULL, (select id from auth_resource3 where name='menu|Settings'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (91, 'User', NULL, 0, 'userGroupSettings', (select id from auth_resource3 where name='menu|Settings|User & Group'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (92, 'Alerts', NULL, 1, 'alertSettings', (select id from auth_resource3 where name='menu|Settings|Alerts'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (93, 'Host Alerts', NULL, 2, 'hostAlertSettings', (select id from auth_resource3 where name='menu|Settings|Host Alerts'), 0); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (94, 'Sparse Logs', NULL, 3, 'sparseLogSettings', (select id from auth_resource3 where name='menu|Settings|Sparse Logs'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (96, 'Metric Meta', NULL, 5, 'metricMetaSettings', (select id from auth_resource3 where name='menu|Settings|Metric Meta'), 0); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (97, 'Appearance', NULL, 6, 'appearanceSettings', (select id from auth_resource3 where name='menu|Settings|General'), 0); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (98, 'Notification', NULL, 7, 'notificationsSettings', (select id from auth_resource3 where name='menu|Settings|Notification'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (99, 'Agent', NULL, 8, 'agentSettings', (select id from auth_resource3 where name='menu|Settings|Agent'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (100, 'Alias', NULL, 9, 'aliasSettings', (select id from auth_resource3 where name='menu|Settings|Alias'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (101, 'License', NULL, 10, 'validationLicense', (select id from auth_resource3 where name='menu|Settings|License'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (102, 'agent Installation', NULL, 11, 'agentInstallationSettings', (select id from auth_resource3 where name='menu|Settings|Agent Installation'), 2); + +-- INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (121, 'Health Check', '09.HealthCheck', 9, 'healthCHeck', (select id from auth_resource3 where name='menu|Health Check'), 0); +-- INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (122, 'Check Script', NULL, 0, 'checkScript', (select id from auth_resource3 where name='menu|Health Check|Check Script'), 0); + +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Infrastructure'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Infrastructure|Topology'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Infrastructure|Overview'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Infrastructure|Resource Usage'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Infrastructure|Namespace'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Infrastructure|Nodes'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Infrastructure|Node Details'), 'owner'); +-- +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Workloads'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Workloads|Overview'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Workloads|Deploy List'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Workloads|Jobs'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Workloads|Cron Jobs'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Workloads|Pods'), 'owner'); +-- +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Services'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Services|Topology'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Services|Overview'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Services|Structure'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Services|List'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Services|Detail'), 'owner'); +-- +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Diagnosis'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Diagnosis|Anomaly Score'), 'owner'); +-- +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Statistics & Analysis'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Statistics & Analysis|Performance Trends'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Statistics & Analysis|Alert Analysis'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Statistics & Analysis|Alert History'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Statistics & Analysis|Anomaly Score'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Statistics & Analysis|Job History'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Statistics & Analysis|Sparse Logs'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Statistics & Analysis|Log Viewer'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Statistics & Analysis|Event Logs'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Statistics & Analysis|Container Life Cycle'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Statistics & Analysis|Service Traces'), 'owner'); +-- +-- +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Reports'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Reports|Documents'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Reports|Templates'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Dashboards'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Dashboards|Documents'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Dashboards|Templates'), 'owner'); +-- +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Settings'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Settings|User & Group'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Settings|Alerts'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Settings|Sparse Logs'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Settings|Metric Meta'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Settings|General'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Settings|Notification'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Settings|Agent'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Settings|Alias'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Settings|License'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Settings|Agent Installation'), 'owner'); + +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cadvisor_version_info', 'cadvisor', 'A metric with a constant ''1'' value labeled by kernel version, OS version, docker version, cadvisor version & cadvisor revision.', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_cpu_cfs_periods_total', 'cadvisor', 'Number of elapsed enforcement period intervals.', 'CPU', 'LOAD', 'Container', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_cpu_cfs_throttled_periods_total', 'cadvisor', 'Number of throttled period intervals.', 'CPU', 'LOAD', 'Container', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_cpu_cfs_throttled_seconds_total', 'cadvisor', 'Total time duration the container has been throttled.', 'CPU', 'LOAD', 'Container', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_cpu_load_average_10s', 'cadvisor', 'Value of container cpu load average over the last 10 seconds.', 'CPU', 'LOAD', 'Container', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_cpu_schedstat_run_periods_total', 'cadvisor', 'Number of times processes of the cgroup have run on the cpu', 'CPU', 'LOAD', 'Container', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_cpu_schedstat_run_seconds_total', 'cadvisor', 'Time duration the processes of the container have run on the CPU.', 'CPU', 'LOAD', 'Container', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_cpu_schedstat_runqueue_seconds_total', 'cadvisor', 'Time duration processes of the container have been waiting on a runqueue.', 'CPU', 'LOAD', 'Container', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_cpu_system_seconds_total', 'cadvisor', 'Cumulative system cpu time consumed in seconds.', 'CPU', 'LOAD', 'Container', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_cpu_usage_seconds_total', 'cadvisor', 'Cumulative cpu time consumed in seconds.', 'CPU', 'LOAD', 'Container', 'counter', 'cpu', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_cpu_user_seconds_total', 'cadvisor', 'Cumulative user cpu time consumed in seconds.', 'CPU', 'LOAD', 'Container', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_fs_limit_bytes', 'cadvisor', 'Number of bytes that can be consumed by the container on this filesystem.', NULL, NULL, 'Container', 'gauge', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_last_seen', 'cadvisor', 'Last time a container was seen by the exporter', NULL, NULL, 'Container', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_network_receive_bytes_total', 'cadvisor', 'Cumulative count of bytes received', 'NIC', 'LOAD', 'Container', 'counter', 'interface', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_network_receive_packets_dropped_total', 'cadvisor', 'Cumulative count of packets dropped while receiving', 'NIC', 'LOAD', 'Container', 'counter', 'interface', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_network_receive_packets_total', 'cadvisor', 'Cumulative count of packets received', 'NIC', 'LOAD', 'Container', 'counter', 'interface', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_network_transmit_bytes_total', 'cadvisor', 'Cumulative count of bytes transmitted', 'NIC', 'LOAD', 'Container', 'counter', 'interface', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_network_transmit_errors_total', 'cadvisor', 'Cumulative count of errors encountered while transmitting', 'NIC', 'LOAD', 'Container', 'counter', 'interface', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_network_transmit_packets_dropped_total', 'cadvisor', 'Cumulative count of packets dropped while transmitting', 'NIC', 'LOAD', 'Container', 'counter', 'interface', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_network_transmit_packets_total', 'cadvisor', 'Cumulative count of packets transmitted', 'NIC', 'LOAD', 'Container', 'counter', 'interface', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_scrape_error', 'cadvisor', '1 if there was an error while getting container metrics, 0 otherwise', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_spec_cpu_period', 'cadvisor', 'CPU period of the container', NULL, NULL, 'Container', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_spec_cpu_quota', 'cadvisor', 'CPU quota of the container', NULL, NULL, 'Container', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_memory_cache', 'cadvisor', 'Number of bytes of page cache memory.', 'Memory', 'LOAD', 'Container', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_memory_failcnt', 'cadvisor', 'Number of memory usage hits limits', 'Memory', 'LOAD', 'Container', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_memory_failures_total', 'cadvisor', 'Cumulative count of memory allocation failures.', 'Memory', 'LOAD', 'Container', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_memory_max_usage_bytes', 'cadvisor', 'Maximum memory usage recorded in bytes', 'Memory', 'LOAD', 'Container', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_memory_rss', 'cadvisor', 'Size of RSS in bytes.', 'Memory', 'LOAD', 'Container', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_memory_swap', 'cadvisor', 'Container swap usage in bytes.', 'Memory', 'LOAD', 'Container', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_memory_usage_bytes', 'cadvisor', 'Current memory usage in bytes, including all memory regardless of when it was accessed', 'Memory', 'LOAD', 'Container', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_memory_working_set_bytes', 'cadvisor', 'Current working set in bytes.', 'Memory', 'LOAD', 'Container', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_network_tcp_usage_total', 'cadvisor', 'tcp connection usage statistic for container', 'Network', 'LOAD', 'Container', 'counter', 'tcp_state', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_network_udp_usage_total', 'cadvisor', 'udp connection usage statistic for container', 'Network', 'LOAD', 'Container', 'counter', 'udp_state', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_spec_cpu_shares', 'cadvisor', 'CPU share of the container', NULL, NULL, 'Container', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_spec_memory_limit_bytes', 'cadvisor', 'Memory limit for the container.', NULL, NULL, 'Container', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_spec_memory_swap_limit_bytes', 'cadvisor', 'Memory swap limit for the container.', NULL, NULL, 'Container', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_start_time_seconds', 'cadvisor', 'Start time of the container since unix epoch in seconds.', NULL, NULL, 'Container', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_tasks_state', 'cadvisor', 'Number of tasks in given state', NULL, NULL, 'Container', 'gauge', 'state', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('http_request_duration_microseconds', 'prometheus', 'The HTTP request latencies in microseconds.', NULL, 'DURATION', 'Node', 'summary', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('http_request_duration_microseconds_count', 'prometheus', '', NULL, NULL, 'Node', '', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('http_request_duration_microseconds_sum', 'prometheus', '', NULL, NULL, 'Node', '', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('http_request_size_bytes_count', 'prometheus', '', NULL, NULL, 'Node', '', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('http_request_size_bytes_sum', 'prometheus', '', NULL, NULL, 'Node', '', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('http_requests_total', 'prometheus', 'Total number of scrapes by HTTP status code.', NULL, 'ERROR', 'Node', 'counter', 'code,method', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('aws_ec2_ebsread_bytes_average', 'cloudwatch', 'Bytes read from all EBS volumes attached to the instance in a specified period of time.', 'EBS', 'LOAD', 'AWS/EC2', 'gauge', 'instance_id', '2019-07-24 15:23:37.148501', '2019-07-24 15:23:37.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('http_response_size_bytes_count', 'prometheus', '', NULL, NULL, 'Node', '', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('http_response_size_bytes_sum', 'prometheus', '', NULL, NULL, 'Node', '', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('http_server_requests_seconds', 'micrometer', 'Server Response in second', NULL, 'RATE', 'Service', 'summary', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('http_server_requests_seconds_count', 'micrometer', 'the total number of requests.', NULL, NULL, 'Service', '', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('http_server_requests_seconds_sum', 'micrometer', 'the total time taken to serve the requests', NULL, NULL, 'Service', '', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('http_server_requests_seconds_max', 'micrometer', 'the max number of requests.', NULL, 'RATE', 'Service', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('aws_ec2_ebswrite_bytes_average', 'cloudwatch', 'Bytes written to all EBS volumes attached to the instance in a specified period of time.', 'EBS', 'LOAD', 'AWS/EC2', 'gauge', 'instance_id', '2019-07-24 15:23:37.148501', '2019-07-24 15:23:37.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_classes_loaded', 'micrometer', 'jvm info', 'GC', 'LOAD', 'Process', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_classes_unloaded_total', 'micrometer', 'jvm info', 'GC', 'LOAD', 'Process', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_gc_live_data_size_bytes', 'micrometer', 'jvm info', 'GC', 'LOAD', 'Process', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_gc_max_data_size_bytes', 'micrometer', 'jvm info', 'GC', 'LOAD', 'Process', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_gc_memory_allocated_bytes_total', 'micrometer', 'jvm info', 'GC', 'LOAD', 'Process', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_gc_memory_promoted_bytes_total', 'micrometer', 'jvm info', 'GC', 'LOAD', 'Process', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_gc_pause_seconds', 'micrometer', 'jvm info', 'GC', 'LOAD', 'Process', 'summary', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_gc_pause_seconds_count', 'micrometer', 'jvm info', NULL, NULL, 'Process', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_gc_pause_seconds_max', 'micrometer', 'jvm info', 'GC', 'LOAD', 'Process', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_gc_pause_seconds_sum', 'micrometer', 'jvm info', NULL, NULL, 'Process', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_arp_entries', 'node_exporter', 'ARP entries by device', 'OS', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_boot_time_seconds', 'node_exporter', 'Node boot time, in unixtime.', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_context_switches_total', 'node_exporter', 'Total number of context switches.', 'OS', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_cpu_core_throttles_total', 'node_exporter', 'Number of times this cpu core has been throttled.', 'CPU', 'LOAD', 'Node', 'counter', 'core', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_cpu_frequency_hertz', 'node_exporter', 'Current cpu thread frequency in hertz.', 'CPU', 'LOAD', 'Node', 'gauge', 'cpu', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_cpu_frequency_max_hertz', 'node_exporter', 'Maximum cpu thread frequency in hertz.', NULL, NULL, 'Node', 'gauge', 'cpu', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_cpu_frequency_min_hertz', 'node_exporter', 'Minimum cpu thread frequency in hertz.', NULL, NULL, 'Node', 'gauge', 'cpu', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_cpu_guest_seconds_total', 'node_exporter', 'Seconds the cpus spent in guests (VMs) for each mode.', 'CPU', 'LOAD', 'Node', 'counter', 'cpu', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_cpu_package_throttles_total', 'node_exporter', 'Number of times this cpu package has been throttled.', 'CPU', 'LOAD', 'Node', 'counter', 'package', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_cpu_seconds_total', 'node_exporter', 'Seconds the cpus spent in each mode.', 'CPU', 'LOAD', 'Node', 'counter', 'cpu,mode', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_entropy_available_bits', 'node_exporter', 'Bits of available entropy.', 'OS', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_exporter_build_info', 'node_exporter', 'A metric with a constant ''1'' value labeled by version, revision, branch, and goversion from which node_exporter was built.', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('aws_ec2_cpuutilization_average', 'cloudwatch', 'The percentage of allocated EC2 compute units that are currently in use on the instance.', 'CPU', 'LOAD', 'AWS/EC2', 'gauge', 'instance_id', '2019-07-24 15:23:37.148501', '2019-07-24 15:23:37.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('aws_ec2_disk_read_ops_average', 'cloudwatch', 'Completed read operations from all instance store volumes available to the instance in a specified period of time.', 'Disk', 'LOAD', 'AWS/EC2', 'gauge', 'instance_id', '2019-07-24 15:23:37.148501', '2019-07-24 15:23:37.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('aws_ec2_disk_write_ops_average', 'cloudwatch', 'Completed write operations to all instance store volumes available to the instance in a specified period of time.', 'Disk', 'LOAD', 'AWS/EC2', 'gauge', 'instance_id', '2019-07-24 15:23:37.148501', '2019-07-24 15:23:37.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('aws_ec2_disk_read_bytes_average', 'cloudwatch', 'Bytes read from all instance store volumes available to the instance.', 'Disk', 'LOAD', 'AWS/EC2', 'gauge', 'instance_id', '2019-07-24 15:23:37.148501', '2019-07-24 15:23:37.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('aws_ec2_disk_write_bytes_average', 'cloudwatch', 'Bytes written to all instance store volumes available to the instance.', 'Disk', 'LOAD', 'AWS/EC2', 'gauge', 'instance_id', '2019-07-24 15:23:37.148501', '2019-07-24 15:23:37.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('aws_ec2_network_in_average', 'cloudwatch', 'The number of bytes received on all network interfaces by the instance.', 'Network', 'LOAD', 'AWS/EC2', 'gauge', 'instance_id', '2019-07-24 15:23:37.148501', '2019-07-24 15:23:37.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('aws_ec2_network_out_average', 'cloudwatch', 'The number of bytes sent out on all network interfaces by the instance.', 'Network', 'LOAD', 'AWS/EC2', 'gauge', 'instance_id', '2019-07-24 15:23:37.148501', '2019-07-24 15:23:37.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_filesystem_readonly', 'node_exporter', 'Filesystem read-only status.', NULL, NULL, 'Node', 'gauge', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('aws_ec2_network_packets_in_average', 'cloudwatch', 'The number of packets received on all network interfaces by the instance.', 'Network', 'LOAD', 'AWS/EC2', 'gauge', 'instance_id', '2019-07-24 15:23:37.148501', '2019-07-24 15:23:37.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_forks_total', 'node_exporter', 'Total number of forks.', 'OS', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_hwmon_chip_names', 'node_exporter', 'Annotation metric for human-readable chip names', 'CPU', 'LOAD', 'Node', 'gauge', 'chip', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_hwmon_fan_rpm', 'node_exporter', 'Hardware monitor for fan revolutions per minute (input)', 'CPU', 'LOAD', 'Node', 'gauge', 'chip,sensor', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_hwmon_pwm', 'node_exporter', 'Hardware monitor pwm element ', 'CPU', 'LOAD', 'Node', 'gauge', 'chip,sensor', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_hwmon_sensor_label', 'node_exporter', 'Label for given chip and sensor', 'CPU', 'LOAD', 'Node', 'gauge', 'chip,sensor', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_hwmon_temp_celsius', 'node_exporter', 'Hardware monitor for temperature (input)', 'CPU', 'LOAD', 'Node', 'gauge', 'chip,sensor', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_hwmon_temp_crit_alarm_celsius', 'node_exporter', 'Hardware monitor for temperature (crit_alarm)', 'CPU', 'LOAD', 'Node', 'gauge', 'chip,sensor', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_hwmon_temp_crit_celsius', 'node_exporter', 'Hardware monitor for temperature (crit)', 'CPU', 'LOAD', 'Node', 'gauge', 'chip,sensor', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_hwmon_temp_max_celsius', 'node_exporter', 'Hardware monitor for temperature (max)', NULL, NULL, 'Node', 'gauge', 'chip,sensor', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_intr_total', 'node_exporter', 'Total number of interrupts serviced.', 'OS', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('aws_ec2_network_packets_out_average', 'cloudwatch', 'The number of packets sent out on all network interfaces by the instance.', 'Network', 'LOAD', 'AWS/EC2', 'gauge', 'instance_id', '2019-07-24 15:23:37.148501', '2019-07-24 15:23:37.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('aws_ec2_ebsread_ops_average', 'cloudwatch', 'Completed read operations from all Amazon EBS volumes attached to the instance in a specified period of time.', 'EBS', 'LOAD', 'AWS/EC2', 'gauge', 'instance_id', '2019-07-24 15:23:37.148501', '2019-07-24 15:23:37.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('aws_ec2_ebswrite_ops_average', 'cloudwatch', 'Completed write operations to all EBS volumes attached to the instance in a specified period of time.', 'EBS', 'LOAD', 'AWS/EC2', 'gauge', 'instance_id', '2019-07-24 15:23:37.148501', '2019-07-24 15:23:37.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_load1', 'node_exporter', '1m load average.', 'CPU', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_load15', 'node_exporter', '15m load average.', 'CPU', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_load5', 'node_exporter', '5m load average.', 'CPU', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_disk_reads_completed_total', 'node_exporter', 'The total number of reads completed successfully.', 'Disk', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_disk_reads_merged_total', 'node_exporter', 'The total number of reads merged.', 'Disk', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_disk_write_time_seconds_total', 'node_exporter', 'This is the total number of seconds spent by all writes.', 'Disk', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_disk_writes_completed_total', 'node_exporter', 'The total number of writes completed successfully.', 'Disk', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_disk_writes_merged_total', 'node_exporter', 'The number of writes merged.', 'Disk', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_disk_written_bytes_total', 'node_exporter', 'The total number of bytes written successfully.', 'Disk', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_nf_conntrack_entries', 'node_exporter', 'Number of currently allocated flow entries for connection tracking.', 'OS', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_nf_conntrack_entries_limit', 'node_exporter', 'Maximum size of connection tracking table.', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_scrape_collector_duration_seconds', 'node_exporter', 'node_exporter: Duration of a collector scrape.', NULL, NULL, 'Node', 'gauge', 'collector', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_scrape_collector_success', 'node_exporter', 'node_exporter: Whether a collector succeeded.', NULL, NULL, 'Node', 'gauge', 'collector', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_textfile_scrape_error', 'node_exporter', '1 if there was an error opening or reading a file, 0 otherwise', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_time_seconds', 'node_exporter', 'System time in seconds since epoch (1970).', NULL, NULL, 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_timex_estimated_error_seconds', 'node_exporter', 'Estimated error in seconds.', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_timex_frequency_adjustment_ratio', 'node_exporter', 'Local clock frequency adjustment.', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_timex_loop_time_constant', 'node_exporter', 'Phase-locked loop time constant.', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_timex_maxerror_seconds', 'node_exporter', 'Maximum error in seconds.', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_timex_offset_seconds', 'node_exporter', 'Time offset in between local system and reference clock.', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_timex_pps_calibration_total', 'node_exporter', 'Pulse per second count of calibration intervals.', NULL, NULL, 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_timex_pps_error_total', 'node_exporter', 'Pulse per second count of calibration errors.', NULL, NULL, 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_timex_pps_frequency_hertz', 'node_exporter', 'Pulse per second frequency.', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_timex_pps_jitter_seconds', 'node_exporter', 'Pulse per second jitter.', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_timex_pps_jitter_total', 'node_exporter', 'Pulse per second count of jitter limit exceeded events.', NULL, NULL, 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_timex_pps_shift_seconds', 'node_exporter', 'Pulse per second interval duration.', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_timex_pps_stability_exceeded_total', 'node_exporter', 'Pulse per second count of stability limit exceeded events.', NULL, NULL, 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_timex_pps_stability_hertz', 'node_exporter', 'Pulse per second stability, average of recent frequency changes.', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_timex_status', 'node_exporter', 'Value of the status array bits.', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_timex_sync_status', 'node_exporter', 'Is clock synchronized to a reliable server (1 = yes, 0 = no).', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_timex_tai_offset_seconds', 'node_exporter', 'International Atomic Time (TAI) offset.', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_timex_tick_seconds', 'node_exporter', 'Seconds between clock ticks.', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_uname_info', 'node_exporter', 'Labeled system information as provided by the uname system call.', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_vmstat_oom_kill', 'node_exporter', '/proc/vmstat information field oom_kill.', NULL, 'ERROR', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('process_cpu_usage', 'micrometer', 'The "recent cpu usage" for the Java Virtual Machine process', 'CPU', 'LOAD', 'Process', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('process_uptime_seconds', 'micrometer', 'Process uptime in seconds.', NULL, NULL, 'Process', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('service_elapsed_seconds', 'micrometer', 'custom service', NULL, 'DURATION', 'Service', 'summary', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('service_elapsed_seconds_count', 'micrometer', 'custom service', NULL, NULL, 'Service', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('service_elapsed_seconds_max', 'micrometer', 'custom service', NULL, 'DURATION', 'Service', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('service_elapsed_seconds_sum', 'micrometer', 'custom service', NULL, NULL, 'Service', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('system_cpu_usage', 'micrometer', 'The "recent cpu usage" for the whole system', 'CPU', 'LOAD', 'Process', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('system_load_average_1m', 'micrometer', 'The sum of the number of runnable entities queued to available processors and the number of runnable entities running on the available processors averaged over a period of time', 'CPU', 'LOAD', 'Process', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('up', 'prometheus', '1 if the instance is healthy, i.e. reachable, or 0 if the scrape failed.', NULL, 'ERROR', 'Any', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('go_threads', 'prometheus', 'Number of OS threads created.', 'Thread', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('http_request_size_bytes', 'prometheus', 'The HTTP request sizes in bytes.', 'Network', 'LOAD', 'Node', 'summary', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('http_response_size_bytes', 'prometheus', 'The HTTP response sizes in bytes.', 'Network', 'LOAD', 'Node', 'summary', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_buffer_count', 'micrometer', 'jvm info', 'Memory', 'LOAD', 'Process', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_buffer_memory_used_bytes', 'micrometer', 'jvm info', 'Memory', 'LOAD', 'Process', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_buffer_total_capacity_bytes', 'micrometer', 'jvm info', 'Memory', 'LOAD', 'Process', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_memory_committed_bytes', 'micrometer', 'jvm info', 'Memory', 'LOAD', 'Process', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_memory_max_bytes', 'micrometer', 'jvm info', 'Memory', 'LOAD', 'Process', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_memory_used_bytes', 'micrometer', 'jvm info', 'Memory', 'LOAD', 'Process', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_threads_daemon', 'micrometer', 'jvm info', 'Thread', 'LOAD', 'Process', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_threads_live', 'micrometer', 'jvm info', 'Thread', 'LOAD', 'Process', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_threads_peak', 'micrometer', 'jvm info', 'Thread', 'LOAD', 'Process', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_disk_io_now', 'node_exporter', 'The number of I/Os currently in progress.', 'Disk', 'LOAD', 'Node', 'gauge', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_disk_io_time_seconds_total', 'node_exporter', 'Total seconds spent doing I/Os.', 'Disk', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_disk_io_time_weighted_seconds_total', 'node_exporter', 'The weighted # of seconds spent doing I/Os.', 'Disk', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_disk_read_bytes_total', 'node_exporter', 'The total number of bytes read successfully.', 'Disk', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_disk_read_time_seconds_total', 'node_exporter', 'The total number of seconds spent by all reads.', 'Disk', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_filefd_allocated', 'node_exporter', 'File descriptor statistics: allocated.', 'File', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_filefd_maximum', 'node_exporter', 'File descriptor statistics: maximum.', 'File', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_ipvs_connections_total', 'node_exporter', 'The total number of connections made.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_ipvs_incoming_bytes_total', 'node_exporter', 'The total amount of incoming data.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_ipvs_incoming_packets_total', 'node_exporter', 'The total number of incoming packets.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_ipvs_outgoing_bytes_total', 'node_exporter', 'The total amount of outgoing data.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_ipvs_outgoing_packets_total', 'node_exporter', 'The total number of outgoing packets.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Active_anon_bytes', 'node_exporter', 'Memory information field Active_anon_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Active_bytes', 'node_exporter', 'Memory information field Active_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Active_file_bytes', 'node_exporter', 'Memory information field Active_file_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_AnonHugePages_bytes', 'node_exporter', 'Memory information field AnonHugePages_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_AnonPages_bytes', 'node_exporter', 'Memory information field AnonPages_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Bounce_bytes', 'node_exporter', 'Memory information field Bounce_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Buffers_bytes', 'node_exporter', 'Memory information field Buffers_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Cached_bytes', 'node_exporter', 'Memory information field Cached_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_CmaFree_bytes', 'node_exporter', 'Memory information field CmaFree_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_CmaTotal_bytes', 'node_exporter', 'Memory information field CmaTotal_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_CommitLimit_bytes', 'node_exporter', 'Memory information field CommitLimit_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Committed_AS_bytes', 'node_exporter', 'Memory information field Committed_AS_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_DirectMap1G_bytes', 'node_exporter', 'Memory information field DirectMap1G_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_DirectMap2M_bytes', 'node_exporter', 'Memory information field DirectMap2M_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_DirectMap4k_bytes', 'node_exporter', 'Memory information field DirectMap4k_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Dirty_bytes', 'node_exporter', 'Memory information field Dirty_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_HardwareCorrupted_bytes', 'node_exporter', 'Memory information field HardwareCorrupted_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_HugePages_Free', 'node_exporter', 'Memory information field HugePages_Free.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_HugePages_Rsvd', 'node_exporter', 'Memory information field HugePages_Rsvd.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_HugePages_Surp', 'node_exporter', 'Memory information field HugePages_Surp.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_HugePages_Total', 'node_exporter', 'Memory information field HugePages_Total.', 'Memory', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Hugepagesize_bytes', 'node_exporter', 'Memory information field Hugepagesize_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Inactive_anon_bytes', 'node_exporter', 'Memory information field Inactive_anon_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Inactive_bytes', 'node_exporter', 'Memory information field Inactive_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Inactive_file_bytes', 'node_exporter', 'Memory information field Inactive_file_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_KernelStack_bytes', 'node_exporter', 'Memory information field KernelStack_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Mapped_bytes', 'node_exporter', 'Memory information field Mapped_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_MemAvailable_bytes', 'node_exporter', 'Memory information field MemAvailable_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_MemFree_bytes', 'node_exporter', 'Memory information field MemFree_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_MemTotal_bytes', 'node_exporter', 'Memory information field MemTotal_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Mlocked_bytes', 'node_exporter', 'Memory information field Mlocked_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_NFS_Unstable_bytes', 'node_exporter', 'Memory information field NFS_Unstable_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_PageTables_bytes', 'node_exporter', 'Memory information field PageTables_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Shmem_bytes', 'node_exporter', 'Memory information field Shmem_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_ShmemHugePages_bytes', 'node_exporter', 'Memory information field ShmemHugePages_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_ShmemPmdMapped_bytes', 'node_exporter', 'Memory information field ShmemPmdMapped_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Slab_bytes', 'node_exporter', 'Memory information field Slab_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_SReclaimable_bytes', 'node_exporter', 'Memory information field SReclaimable_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_SUnreclaim_bytes', 'node_exporter', 'Memory information field SUnreclaim_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_SwapCached_bytes', 'node_exporter', 'Memory information field SwapCached_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_SwapFree_bytes', 'node_exporter', 'Memory information field SwapFree_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_SwapTotal_bytes', 'node_exporter', 'Memory information field SwapTotal_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Unevictable_bytes', 'node_exporter', 'Memory information field Unevictable_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_VmallocChunk_bytes', 'node_exporter', 'Memory information field VmallocChunk_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_VmallocTotal_bytes', 'node_exporter', 'Memory information field VmallocTotal_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_VmallocUsed_bytes', 'node_exporter', 'Memory information field VmallocUsed_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Writeback_bytes', 'node_exporter', 'Memory information field Writeback_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_WritebackTmp_bytes', 'node_exporter', 'Memory information field WritebackTmp_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Icmp_InErrors', 'node_exporter', 'Statistic IcmpInErrors.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Icmp_InMsgs', 'node_exporter', 'Statistic IcmpInMsgs.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Icmp_OutMsgs', 'node_exporter', 'Statistic IcmpOutMsgs.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Icmp6_InErrors', 'node_exporter', 'Statistic Icmp6InErrors.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Icmp6_InMsgs', 'node_exporter', 'Statistic Icmp6InMsgs.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Icmp6_OutMsgs', 'node_exporter', 'Statistic Icmp6OutMsgs.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Ip_Forwarding', 'node_exporter', 'Statistic IpForwarding.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Ip6_InOctets', 'node_exporter', 'Statistic Ip6InOctets.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Ip6_OutOctets', 'node_exporter', 'Statistic Ip6OutOctets.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_IpExt_InOctets', 'node_exporter', 'Statistic IpExtInOctets.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_IpExt_OutOctets', 'node_exporter', 'Statistic IpExtOutOctets.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Tcp_ActiveOpens', 'node_exporter', 'Statistic TcpActiveOpens.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Tcp_CurrEstab', 'node_exporter', 'Statistic TcpCurrEstab.', 'Network', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Tcp_InErrs', 'node_exporter', 'Statistic TcpInErrs.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Tcp_PassiveOpens', 'node_exporter', 'Statistic TcpPassiveOpens.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Tcp_RetransSegs', 'node_exporter', 'Statistic TcpRetransSegs.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_TcpExt_ListenDrops', 'node_exporter', 'Statistic TcpExtListenDrops.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_TcpExt_ListenOverflows', 'node_exporter', 'Statistic TcpExtListenOverflows.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_TcpExt_SyncookiesFailed', 'node_exporter', 'Statistic TcpExtSyncookiesFailed.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_TcpExt_SyncookiesRecv', 'node_exporter', 'Statistic TcpExtSyncookiesRecv.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_TcpExt_SyncookiesSent', 'node_exporter', 'Statistic TcpExtSyncookiesSent.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Udp_InDatagrams', 'node_exporter', 'Statistic UdpInDatagrams.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Udp_InErrors', 'node_exporter', 'Statistic UdpInErrors.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Udp_NoPorts', 'node_exporter', 'Statistic UdpNoPorts.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Udp_OutDatagrams', 'node_exporter', 'Statistic UdpOutDatagrams.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Udp6_InDatagrams', 'node_exporter', 'Statistic Udp6InDatagrams.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Udp6_InErrors', 'node_exporter', 'Statistic Udp6InErrors.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Udp6_NoPorts', 'node_exporter', 'Statistic Udp6NoPorts.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Udp6_OutDatagrams', 'node_exporter', 'Statistic Udp6OutDatagrams.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_UdpLite_InErrors', 'node_exporter', 'Statistic UdpLiteInErrors.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_UdpLite6_InErrors', 'node_exporter', 'Statistic UdpLite6InErrors.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_network_receive_bytes_total', 'node_exporter', 'Network device statistic receive_bytes.', 'Network', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_network_receive_compressed_total', 'node_exporter', 'Network device statistic receive_compressed.', 'Network', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_network_receive_drop_total', 'node_exporter', 'Network device statistic receive_drop.', 'Network', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_network_receive_errs_total', 'node_exporter', 'Network device statistic receive_errs.', 'Network', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_network_receive_fifo_total', 'node_exporter', 'Network device statistic receive_fifo.', 'Network', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_network_receive_frame_total', 'node_exporter', 'Network device statistic receive_frame.', 'Network', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_network_receive_multicast_total', 'node_exporter', 'Network device statistic receive_multicast.', 'Network', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_network_receive_packets_total', 'node_exporter', 'Network device statistic receive_packets.', 'Network', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_network_transmit_bytes_total', 'node_exporter', 'Network device statistic transmit_bytes.', 'Network', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_network_transmit_carrier_total', 'node_exporter', 'Network device statistic transmit_carrier.', 'Network', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_network_transmit_colls_total', 'node_exporter', 'Network device statistic transmit_colls.', 'Network', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_network_transmit_compressed_total', 'node_exporter', 'Network device statistic transmit_compressed.', 'Network', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_network_transmit_drop_total', 'node_exporter', 'Network device statistic transmit_drop.', 'Network', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_network_transmit_errs_total', 'node_exporter', 'Network device statistic transmit_errs.', 'Network', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_network_transmit_fifo_total', 'node_exporter', 'Network device statistic transmit_fifo.', 'Network', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_network_transmit_packets_total', 'node_exporter', 'Network device statistic transmit_packets.', 'Network', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_procs_blocked', 'node_exporter', 'Number of processes blocked waiting for I/O to complete.', 'Process', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_procs_running', 'node_exporter', 'Number of processes in runnable state.', 'Process', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_sockstat_FRAG_inuse', 'node_exporter', 'Number of FRAG sockets in state inuse.', 'Network', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_sockstat_FRAG_memory', 'node_exporter', 'Number of FRAG sockets in state memory.', 'Network', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_sockstat_RAW_inuse', 'node_exporter', 'Number of RAW sockets in state inuse.', 'Network', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_sockstat_sockets_used', 'node_exporter', 'Number of sockets sockets in state used.', 'Network', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_sockstat_TCP_alloc', 'node_exporter', 'Number of TCP sockets in state alloc.', 'Network', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_sockstat_TCP_inuse', 'node_exporter', 'Number of TCP sockets in state inuse.', 'Network', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_sockstat_TCP_mem', 'node_exporter', 'Number of TCP sockets in state mem.', 'Network', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_sockstat_TCP_mem_bytes', 'node_exporter', 'Number of TCP sockets in state mem_bytes.', 'Network', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_sockstat_TCP_orphan', 'node_exporter', 'Number of TCP sockets in state orphan.', 'Network', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_sockstat_TCP_tw', 'node_exporter', 'Number of TCP sockets in state tw.', 'Network', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_sockstat_UDP_inuse', 'node_exporter', 'Number of UDP sockets in state inuse.', 'Network', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_sockstat_UDP_mem', 'node_exporter', 'Number of UDP sockets in state mem.', 'Network', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_sockstat_UDP_mem_bytes', 'node_exporter', 'Number of UDP sockets in state mem_bytes.', 'Network', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_sockstat_UDPLITE_inuse', 'node_exporter', 'Number of UDPLITE sockets in state inuse.', 'Network', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_tcp_connection_states', 'node_exporter', 'Number of connection states.', 'Network', 'LOAD', 'Node', 'gauge', 'state', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_vmstat_pgfault', 'node_exporter', '/proc/vmstat information field pgfault.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_vmstat_pgmajfault', 'node_exporter', '/proc/vmstat information field pgmajfault.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_vmstat_pgpgin', 'node_exporter', '/proc/vmstat information field pgpgin.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_vmstat_pgpgout', 'node_exporter', '/proc/vmstat information field pgpgout.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_vmstat_pswpin', 'node_exporter', '/proc/vmstat information field pswpin.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_vmstat_pswpout', 'node_exporter', '/proc/vmstat information field pswpout.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('process_files_open', 'micrometer', 'The open file descriptor count', 'File', 'LOAD', 'Process', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('process_open_fds', 'micrometer', 'Number of open file descriptors.', 'File', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('process_resident_memory_bytes', 'micrometer', 'Resident memory size in bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('process_virtual_memory_bytes', 'micrometer', '-', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_fs_inodes_free', 'cadvisor', 'Number of available Inodes', 'Filesystem', 'LOAD', 'Container', 'gauge', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_fs_inodes_total', 'cadvisor', 'Number of Inodes', 'Filesystem', 'LOAD', 'Container', 'gauge', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_fs_io_current', 'cadvisor', 'Number of I/Os currently in progress', 'Filesystem', 'LOAD', 'Container', 'gauge', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_fs_io_time_seconds_total', 'cadvisor', 'Cumulative count of seconds spent doing I/Os', 'Filesystem', 'LOAD', 'Container', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_fs_io_time_weighted_seconds_total', 'cadvisor', 'Cumulative weighted I/O time in seconds', 'Filesystem', 'LOAD', 'Container', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_fs_read_seconds_total', 'cadvisor', 'Cumulative count of seconds spent reading', 'Filesystem', 'LOAD', 'Container', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_fs_reads_bytes_total', 'cadvisor', 'Cumulative count of bytes read', 'Filesystem', 'LOAD', 'Container', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_fs_reads_merged_total', 'cadvisor', 'Cumulative count of reads merged', 'Filesystem', 'LOAD', 'Container', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_fs_reads_total', 'cadvisor', 'Cumulative count of reads completed', 'Filesystem', 'LOAD', 'Container', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_fs_sector_reads_total', 'cadvisor', 'Cumulative count of sector reads completed', 'Filesystem', 'LOAD', 'Container', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_fs_sector_writes_total', 'cadvisor', 'Cumulative count of sector writes completed', 'Filesystem', 'LOAD', 'Container', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_fs_usage_bytes', 'cadvisor', 'Number of bytes that are consumed by the container on this filesystem.', 'Filesystem', 'LOAD', 'Container', 'gauge', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_fs_write_seconds_total', 'cadvisor', 'Cumulative count of seconds spent writing', 'Filesystem', 'LOAD', 'Container', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_fs_writes_bytes_total', 'cadvisor', 'Cumulative count of bytes written', 'Filesystem', 'LOAD', 'Container', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_fs_writes_total', 'cadvisor', 'Cumulative count of writes completed', 'Filesystem', 'LOAD', 'Container', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_filesystem_avail_bytes', 'node_exporter', 'Filesystem space available to non-root users in bytes.', 'Filesystem', 'LOAD', 'Node', 'gauge', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_filesystem_device_error', 'node_exporter', 'Whether an error occurred while getting statistics for the given device.', 'Filesystem', 'LOAD', 'Node', 'gauge', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_filesystem_files', 'node_exporter', 'Filesystem total file nodes.', 'Filesystem', 'LOAD', 'Node', 'gauge', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_filesystem_files_free', 'node_exporter', 'Filesystem total free file nodes.', 'Filesystem', 'LOAD', 'Node', 'gauge', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_filesystem_free_bytes', 'node_exporter', 'Filesystem free space in bytes.', 'Filesystem', 'LOAD', 'Node', 'gauge', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_filesystem_size_bytes', 'node_exporter', 'Filesystem size in bytes.', 'Filesystem', 'LOAD', 'Node', 'gauge', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_cache_hitrate', 'cassandra_exporter', 'All time cache hit rate', 'Cache', 'LOAD', 'Cassandra', 'gauge', 'cache', '2019-10-02 10:17:01', '2019-10-02 10:17:01'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_cache_hits_count', 'cassandra_exporter', 'Total number of cache hits', 'Cache', 'LOAD', 'Cassandra', 'counter', 'cache', '2019-10-02 10:17:01', '2019-10-02 10:17:01'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_cache_requests_count', 'cassandra_exporter', 'Total number of cache requests', 'Cache', 'LOAD', 'Cassandra', 'counter', 'cache', '2019-10-02 10:17:01', '2019-10-02 10:17:01'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_client_connectednativeclients', 'cassandra_exporter', 'Number of clients connected to this nodes native protocol server', 'Connection', 'LOAD', 'Cassandra', 'gauge', NULL, '2019-10-01 16:45:21', '2019-10-01 16:45:21'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_clientrequest_failures_count', 'cassandra_exporter', 'Number of transaction failures encountered', 'Request', 'LOAD', 'Cassandra', 'counter', 'clientrequest', '2019-10-02 10:17:01', '2019-10-02 10:17:01'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_clientrequest_latency_seconds_count', 'cassandra_exporter', 'Number of client requests latency seconds', 'Request', 'LOAD', 'Cassandra', 'counter', 'clientrequest', '2019-10-02 10:17:01', '2019-10-02 10:17:01'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_clientrequest_timeouts_count', 'cassandra_exporter', 'Number of timeouts encountered', 'Request', 'LOAD', 'Cassandra', 'counter', 'clientrequest', '2019-10-02 10:17:01', '2019-10-02 10:17:01'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_clientrequest_unavailables_count', 'cassandra_exporter', 'Number of unavailable exceptions encountered', 'Request', 'LOAD', 'Cassandra', 'counter', 'clientrequest', '2019-10-02 10:17:01', '2019-10-02 10:17:01'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_commitlog_completedtasks', 'cassandra_exporter', 'Total number of commit log messages written', 'Log', 'LOAD', 'Cassandra', 'counter', NULL, '2019-10-02 10:17:01', '2019-10-02 10:17:01'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_commitlog_totalcommitlogsize', 'cassandra_exporter', 'Current size, in bytes, used by all the commit log segments', 'Log', 'LOAD', 'Cassandra', 'counter', NULL, '2019-10-02 10:17:01', '2019-10-02 10:17:01'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_keyspace_rangelatency_seconds', 'cassandra_exporter', 'Local range scan latency seconds for this keyspace', 'Disk', 'LOAD', 'Cassandra', 'gauge', 'keyspace,quantile', '2019-10-02 10:17:01', '2019-10-02 10:17:01'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_keyspace_rangelatency_seconds_count', 'cassandra_exporter', 'Local range scan count for this keyspace', 'Disk', 'LOAD', 'Cassandra', 'counter', 'keyspace', '2019-10-02 10:17:01', '2019-10-02 10:17:01'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_keyspace_readlatency_seconds', 'cassandra_exporter', 'Local read latency seconds for this keyspace', 'Disk', 'LOAD', 'Cassandra', 'gauge', 'keyspace,quantile', '2019-10-02 10:17:01', '2019-10-02 10:17:01'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_keyspace_readlatency_seconds_count', 'cassandra_exporter', 'Local read count for this keyspace', 'Disk', 'LOAD', 'Cassandra', 'counter', 'keyspace', '2019-10-02 10:17:01', '2019-10-02 10:17:01'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_keyspace_totaldiskspaceused', 'cassandra_exporter', 'Total disk space used belonging to this keyspace', 'Disk', 'LOAD', 'Cassandra', 'gauge', 'keyspace', '2019-10-02 10:17:01', '2019-10-02 10:17:01'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_keyspace_writelatency_seconds', 'cassandra_exporter', 'Local write latency seconds for this keyspace', 'Disk', 'LOAD', 'Cassandra', 'gauge', 'keyspace,quantile', '2019-10-02 10:17:01', '2019-10-02 10:17:01'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_keyspace_writelatency_seconds_count', 'cassandra_exporter', 'Local write count for this keyspace', 'Disk', 'LOAD', 'Cassandra', 'counter', 'keyspace', '2019-10-02 10:17:01', '2019-10-02 10:17:01'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_threadpools_activetasks', 'cassandra_exporter', 'Number of tasks being actively worked on', 'Task', 'LOAD', 'Cassandra', 'gauge', 'path,threadpools', '2019-10-01 16:45:21', '2019-10-01 16:45:21'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_threadpools_completedtasks', 'cassandra_exporter', 'Number of tasks completed', 'Task', 'LOAD', 'Cassandra', 'counter', 'path,threadpools', '2019-10-01 16:45:21', '2019-10-01 16:45:21'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_threadpools_pendingtasks', 'cassandra_exporter', 'Number of queued tasks queued up', 'Task', 'LOAD', 'Cassandra', 'gauge', 'path,threadpools', '2019-10-01 16:45:21', '2019-10-01 16:45:21'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_threadpools_totalblockedtasks_count', 'cassandra_exporter', 'Number of tasks that were blocked due to queue saturation', 'Task', 'LOAD', 'Cassandra', 'counter', 'path,threadpools', '2019-10-01 16:45:21', '2019-10-01 16:45:21'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cloudwatch_requests_total', 'cloudwatch', 'API requests made to CloudWatch', 'API', 'LOAD', 'AWS/Usage', 'counter', 'NULL', '2019-08-27 14:07:00', '2019-08-27 14:07:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('imxc_service_errors_count', 'imxc_api_server', 'the number of error counts in 5s', NULL, 'ERROR', 'Service', 'gauge', 'protocol', '2019-10-15 09:37:44', '2019-10-15 09:37:44'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('imxc_service_errors_total', 'imxc_api_server', 'the total number of errors', NULL, 'ERROR', 'Service', 'counter', 'protocol', '2019-12-20 16:30:00', '2019-12-20 16:30:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('imxc_service_request_milliseconds_total', 'imxc_api_server', 'the total time taken to serve the requests', NULL, 'DURATION', 'Service', 'counter', 'protocol', '2019-12-20 16:30:00', '2019-12-20 16:30:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('imxc_service_requests_count', 'imxc_api_server', 'the number of requests counts in 5s', NULL, 'LOAD', 'Service', 'gauge', 'protocol', '2019-10-15 09:37:44', '2019-10-15 09:37:44'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('imxc_service_requests_milliseconds_total', 'imxc_api_server', 'the total time taken to serve the requests', NULL, 'DURATION', 'Service', 'gauge', 'protocol', '2019-12-10 11:22:00', '2019-10-15 09:37:44'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('imxc_service_requests_total', 'imxc_api_server', 'the total number of requests', NULL, 'LOAD', 'Service', 'counter', 'protocol', '2019-12-20 16:30:00', '2019-12-20 16:30:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mongodb_connections', 'mongodb_exporter', 'The number of incoming connections from clients to the database server', 'Connection', 'LOAD', 'MongoDB', 'gauge', 'state', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mongodb_global_lock_client', 'mongodb_exporter', 'The number of the active client connections performing read or write operations', 'Lock', 'LOAD', 'MongoDB', 'gauge', 'type', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mongodb_global_lock_current_queue', 'mongodb_exporter', 'The number of operations that are currently queued and waiting for the read or write lock', 'Lock', 'LOAD', 'MongoDB', 'gauge', 'type', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mongodb_instance_uptime_seconds', 'mongodb_exporter', 'The number of seconds that the current MongoDB process has been active', 'Server', 'DURATION', 'MongoDB', 'gauge', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mongodb_memory', 'mongodb_exporter', 'The amount of memory, in mebibyte (MiB), currently used by the database process', 'Memory', 'LOAD', 'MongoDB', 'gauge', 'type', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mongodb_metrics_document_total', 'mongodb_exporter', 'The total number of documents processed', 'Row', 'LOAD', 'MongoDB', 'counter', 'state', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mongodb_network_bytes_total', 'mongodb_exporter', 'The number of bytes that reflects the amount of network traffic', 'Network', 'LOAD', 'MongoDB', 'counter', 'state', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mongodb_op_counters_total', 'mongodb_exporter', 'The total number of operations since the mongod instance last started', 'Request', 'LOAD', 'MongoDB', 'counter', 'type', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_aborted_connects', 'mysqld_exporter', 'The number of failed attempts to connect to the MySQL server', 'Connection', 'LOAD', 'MySQL', 'counter', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_bytes_received', 'mysqld_exporter', 'The number of bytes received from all clients', 'Network', 'LOAD', 'MySQL', 'counter', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_bytes_sent', 'mysqld_exporter', 'The number of bytes sent to all clients', 'Network', 'LOAD', 'MySQL', 'counter', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_commands_total', 'mysqld_exporter', 'The number of times each XXX command has been executed', 'Request', 'LOAD', 'MySQL', 'counter', 'command', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_connections', 'mysqld_exporter', 'The number of connection attempts (successful or not) to the MySQL server', 'Connection', 'LOAD', 'MySQL', 'counter', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_innodb_buffer_pool_read_requests', 'mysqld_exporter', 'The number of logical read requests', 'Block', 'LOAD', 'MySQL', 'counter', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_innodb_buffer_pool_write_requests', 'mysqld_exporter', 'The number of writes done to the InnoDB buffer pool', 'Block', 'LOAD', 'MySQL', 'counter', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_innodb_data_read', 'mysqld_exporter', 'The amount of data read since the server was started (in bytes)', 'Disk', 'LOAD', 'MySQL', 'counter', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_innodb_data_reads', 'mysqld_exporter', 'The total number of data reads (OS file reads)', 'Disk', 'LOAD', 'MySQL', 'counter', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_innodb_data_writes', 'mysqld_exporter', 'The total number of data writes', 'Disk', 'LOAD', 'MySQL', 'counter', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_innodb_data_written', 'mysqld_exporter', 'The amount of data written so far, in bytes', 'Disk', 'LOAD', 'MySQL', 'counter', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_innodb_log_write_requests', 'mysqld_exporter', 'The number of write requests for the InnoDB redo log', 'Log', 'LOAD', 'MySQL', 'counter', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_innodb_log_writes', 'mysqld_exporter', 'The number of physical writes to the InnoDB redo log file', 'Disk', 'LOAD', 'MySQL', 'counter', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_innodb_os_log_written', 'mysqld_exporter', 'The number of bytes written to the InnoDB redo log files', 'Disk', 'LOAD', 'MySQL', 'counter', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_innodb_row_lock_current_waits', 'mysqld_exporter', 'The number of row locks currently being waited for by operations on InnoDB tables', 'Lock', 'LOAD', 'MySQL', 'gauge', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_innodb_row_lock_time', 'mysqld_exporter', 'The total time spent in acquiring row locks for InnoDB tables, in milliseconds', 'Lock', 'DURATION', 'MySQL', 'counter', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_innodb_row_lock_waits', 'mysqld_exporter', 'The number of times operations on InnoDB tables had to wait for a row lock', 'Lock', 'LOAD', 'MySQL', 'counter', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_innodb_row_ops_total', 'mysqld_exporter', 'The number of rows operated in InnoDB tables', 'Row', 'LOAD', 'MySQL', 'counter', 'operation', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_table_locks_immediate', 'mysqld_exporter', 'The number of times that a request for a table lock could be granted immediately', 'Lock', 'LOAD', 'MySQL', 'counter', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_threads_connected', 'mysqld_exporter', 'The number of currently open connections', 'Thread', 'LOAD', 'MySQL', 'gauge', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_threads_running', 'mysqld_exporter', 'The number of threads that are not sleeping', 'Thread', 'LOAD', 'MySQL', 'gauge', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_uptime', 'mysqld_exporter', 'The number of seconds that the server has been up', 'Server', 'DURATION', 'MySQL', 'gauge', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_up', 'mysqld_exporter', 'Whether the last scrape of metrics from MySQL was able to connect to the server', 'NULL', 'ERROR', 'MySQL', 'gauge', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('pg_locks_count', 'postgres_exporter', 'Number of locks', 'Lock', 'LOAD', 'PostgreSQL', 'gauge', 'datname,mode', '2019-08-27 14:07:00', '2019-08-27 14:07:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('pg_stat_activity_count', 'postgres_exporter', 'number of connections in this state', 'Connection', 'LOAD', 'PostgreSQL', 'gauge', 'datname,state', '2019-08-27 14:07:00', '2019-08-27 14:07:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('pg_stat_database_blk_read_time', 'postgres_exporter', 'Time spent reading data file blocks by backends in this database, in milliseconds', 'Block', 'LOAD', 'PostgreSQL', 'counter', 'datname', '2019-08-27 14:07:00', '2019-08-27 14:07:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('pg_stat_database_blk_write_time', 'postgres_exporter', 'Time spent writing data file blocks by backends in this database, in milliseconds', 'Block', 'LOAD', 'PostgreSQL', 'counter', 'datname', '2019-08-27 14:07:00', '2019-08-27 14:07:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('pg_stat_database_blks_hit', 'postgres_exporter', 'Number of times disk blocks were found already in the buffer cache', 'Block', 'LOAD', 'PostgreSQL', 'counter', 'datname', '2019-08-27 14:07:00', '2019-08-27 14:07:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('pg_stat_database_blks_read', 'postgres_exporter', 'Number of disk blocks read in this database', 'Block', 'LOAD', 'PostgreSQL', 'counter', 'datname', '2019-08-27 14:07:00', '2019-08-27 14:07:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('pg_stat_database_temp_bytes', 'postgres_exporter', 'Total amount of data written to temporary files by queries in this database', 'TemporaryFile', 'LOAD', 'PostgreSQL', 'counter', 'datname', '2019-08-27 14:07:00', '2019-08-27 14:07:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('pg_stat_database_temp_files', 'postgres_exporter', 'Number of temporary files created by queries in this database', 'TemporaryFile', 'LOAD', 'PostgreSQL', 'counter', 'datname', '2019-08-27 14:07:00', '2019-08-27 14:07:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('pg_stat_database_tup_deleted', 'postgres_exporter', 'Number of rows deleted by queries in this database', 'Row', 'LOAD', 'PostgreSQL', 'counter', 'datname', '2019-08-27 14:07:00', '2019-08-27 14:07:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('pg_stat_database_tup_fetched', 'postgres_exporter', 'Number of rows fetched by queries in this database', 'Row', 'LOAD', 'PostgreSQL', 'counter', 'datname', '2019-08-27 14:07:00', '2019-08-27 14:07:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('pg_stat_database_tup_inserted', 'postgres_exporter', 'Number of rows inserted by queries in this database', 'Row', 'LOAD', 'PostgreSQL', 'counter', 'datname', '2019-08-27 14:07:00', '2019-08-27 14:07:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('pg_stat_database_tup_returned', 'postgres_exporter', 'Number of rows returned by queries in this database', 'Row', 'LOAD', 'PostgreSQL', 'counter', 'datname', '2019-08-27 14:07:00', '2019-08-27 14:07:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('pg_stat_database_tup_updated', 'postgres_exporter', 'Number of rows updated by queries in this database', 'Row', 'LOAD', 'PostgreSQL', 'counter', 'datname', '2019-08-27 14:07:00', '2019-08-27 14:07:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('pg_stat_database_xact_commit', 'postgres_exporter', 'Number of transactions in this database that have been committed', 'Transaction', 'LOAD', 'PostgreSQL', 'counter', 'datname', '2019-08-27 14:07:00', '2019-08-27 14:07:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('pg_stat_database_xact_rollback', 'postgres_exporter', 'Number of transactions in this database that have been rolled back', 'Transaction', 'LOAD', 'PostgreSQL', 'counter', 'datname', '2019-08-27 14:07:00', '2019-08-27 14:07:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('pg_up', 'postgres_exporter', 'Whether the last scrape of metrics from PostgreSQL was able to connect to the server', 'NULL', 'ERROR', 'PostgreSQL', 'gauge', 'NULL', '2019-08-27 14:07:00', '2019-08-27 14:07:00'); + +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816000, '2019-08-19 06:14:22.616', '2019-08-19 06:14:22.616', false, 4, (select id from auth_resource2 where type='menu' and name='Infrastructure' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816001, '2019-08-19 06:14:22.635', '2019-08-19 06:14:22.635', false, 4, (select id from auth_resource2 where type='menu' and name='Topology' and parent_id=(select id from auth_resource2 where type='menu' and name='Infrastructure')) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816002, '2019-08-19 06:14:22.638', '2019-08-19 06:14:22.638', false, 4, (select id from auth_resource2 where type='menu' and name='Overview' and parent_id=(select id from auth_resource2 where type='menu' and name='Infrastructure')) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816003, '2019-08-19 06:14:22.64', '2019-08-19 06:14:22.64', false, 4, (select id from auth_resource2 where type='menu' and name='Namespace' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816004, '2019-08-19 06:14:22.643', '2019-08-19 06:14:22.643', false, 4, (select id from auth_resource2 where type='menu' and name='Nodes' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816005, '2019-08-19 06:14:22.72', '2019-08-19 06:14:22.72', false, 4, (select id from auth_resource2 where type='menu' and name='Node Details' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816006, '2019-08-19 06:14:22.72', '2019-08-19 06:14:22.72', false, 4, (select id from auth_resource2 where type='menu' and name='Resource Usage' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816009, '2019-08-19 06:14:22', '2019-08-19 06:14:22', false, 4, (select id from auth_resource2 where type='menu' and name='Persistent Volume' ) , 'admin'); + +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816100, '2019-08-19 06:14:22.619', '2019-08-19 06:14:22.619', false, 4, (select id from auth_resource2 where type='menu' and name='Workloads' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816105, '2019-08-19 06:14:22.657', '2019-08-19 06:14:22.657', false, 4, (select id from auth_resource2 where type='menu' and name='Jobs' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816106, '2019-08-19 06:14:22.66', '2019-08-19 06:14:22.66', false, 4, (select id from auth_resource2 where type='menu' and name='Cron Jobs' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816107, '2019-08-19 06:14:22.646', '2019-08-19 06:14:22.646', false, 4, (select id from auth_resource2 where type='menu' and name='Pods' ) , 'admin'); + +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816200, '2019-08-19 06:14:22.621', '2019-08-19 06:14:22.621', false, 4, (select id from auth_resource2 where type='menu' and name='Services' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816201, '2019-08-19 06:14:22.698', '2019-08-19 06:14:22.698', false, 4, (select id from auth_resource2 where type='menu' and name='Topology' and parent_id=(select id from auth_resource2 where type='menu' and name='Services')) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816202, '2019-08-19 06:14:22.728', '2019-08-19 06:14:22.728', false, 4, (select id from auth_resource2 where type='menu' and name='Overview' and parent_id=(select id from auth_resource2 where type='menu' and name='Services')) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816203, '2019-08-19 06:14:22.734', '2019-08-19 06:14:22.734', false, 4, (select id from auth_resource2 where type='menu' and name='Detail' and parent_id=(select id from auth_resource2 where type='menu' and name='Services')) , 'admin'); + +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816300, '2019-08-19 06:14:22.624', '2019-08-19 06:14:22.624', false, 4, (select id from auth_resource2 where type='menu' and name='Diagnosis' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816301, '2019-08-19 06:14:22.705', '2019-08-19 06:14:22.705', false, 4, (select id from auth_resource2 where type='menu' and name='Anomaly Score' and parent_id=(select id from auth_resource2 where type='menu' and name='Diagnosis') ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816309, '2019-08-19 06:14:22.668', '2019-08-19 06:14:22.668', false, 4, (select id from auth_resource2 where type='menu' and name='Troubleshooting' ) , 'admin'); + +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816400, '2019-08-19 06:14:22.627', '2019-08-19 06:14:22.627', false, 4, (select id from auth_resource2 where type='menu' and name='Statistics & Analysis') , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816401, '2019-08-19 06:14:22.671', '2019-08-19 06:14:22.671', false, 4, (select id from auth_resource2 where type='menu' and name='Performance Trends' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816402, '2019-08-19 06:14:22.731', '2019-08-19 06:14:22.731', false, 4, (select id from auth_resource2 where type='menu' and name='Alert Analysis' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816403, '2019-08-19 06:14:22.674', '2019-08-19 06:14:22.674', false, 4, (select id from auth_resource2 where type='menu' and name='Alert History' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816404, '2019-08-19 06:14:22.677', '2019-08-19 06:14:22.677', false, 4, (select id from auth_resource2 where type='menu' and name='Anomaly Score' and parent_id=(select id from auth_resource2 where type='menu' and name='Statistics & Analysis')) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816405, '2019-08-19 06:14:22.679', '2019-08-19 06:14:22.679', false, 4, (select id from auth_resource2 where type='menu' and name='Job History' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816406, '2019-08-19 06:14:22.685', '2019-08-19 06:14:22.685', false, 4, (select id from auth_resource2 where type='menu' and name='Sparse Logs' and parent_id=(select id from auth_resource2 where type='menu' and name='Statistics & Analysis' )) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816407, '2019-08-19 06:14:22.682', '2019-08-19 06:14:22.682', false, 4, (select id from auth_resource2 where type='menu' and name='Log Viewer' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816408, '2019-08-19 06:14:22.725', '2019-08-19 06:14:22.725', false, 4, (select id from auth_resource2 where type='menu' and name='Event Logs' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816409, '2019-08-19 06:14:22.734', '2019-08-19 06:14:22.734', false, 4, (select id from auth_resource2 where type='menu' and name='Container Life Cycle' and parent_id=(select id from auth_resource2 where type='menu' and name='Statistics & Analysis')) , 'admin'); + +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816500, '2019-08-19 06:14:22.629', '2019-08-19 06:14:22.629', false, 4, (select id from auth_resource2 where type='menu' and name='Reports' and parent_id is null) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816501, '2019-08-19 06:14:22.734', '2019-08-19 06:14:22.734', false, 4, (select id from auth_resource2 where type='menu' and name='Documents' and parent_id=(select id from auth_resource2 where type='menu' and name='Reports' and parent_id is null)) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816502, '2019-08-19 06:14:22.734', '2019-08-19 06:14:22.734', false, 4, (select id from auth_resource2 where type='menu' and name='Templates' and parent_id=(select id from auth_resource2 where type='menu' and name='Reports' and parent_id is null)) , 'admin'); + +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816550, '2019-08-19 06:14:22', '2019-08-19 06:14:22', false, 4, (select id from auth_resource2 where type='menu' and name='Dashboards' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816551, '2019-08-19 06:14:22.734', '2019-08-19 06:14:22.734', false, 4, (select id from auth_resource2 where type='menu' and name='Documents' and parent_id=(select id from auth_resource2 where type='menu' and name='Dashboards' and parent_id is null)) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816552, '2019-08-19 06:14:22.734', '2019-08-19 06:14:22.734', false, 4, (select id from auth_resource2 where type='menu' and name='Templates' and parent_id=(select id from auth_resource2 where type='menu' and name='Dashboards' and parent_id is null)) , 'admin'); + +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816700, '2019-08-19 06:14:22.632', '2019-08-19 06:14:22.632', false, 4, (select id from auth_resource2 where type='menu' and name='Settings' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816701, '2019-08-19 06:14:22.687', '2019-08-19 06:14:22.687', false, 4, (select id from auth_resource2 where type='menu' and name='User & Group' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816702, '2019-08-19 06:14:22.69', '2019-08-19 06:14:22.69', false, 4, (select id from auth_resource2 where type='menu' and name='Alert' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816703, '2019-08-19 06:14:22.734', '2019-08-19 06:14:22.734', false, 4, (select id from auth_resource2 where type='menu' and name='Host Alerts' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816704, '2019-08-19 06:14:22.693', '2019-08-19 06:14:22.693', false, 4, (select id from auth_resource2 where type='menu' and name='Sparse Logs' and parent_id=(select id from auth_resource2 where type='menu' and name='Settings' )) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816706, '2019-08-19 06:14:22.717', '2019-08-19 06:14:22.717', false, 4, (select id from auth_resource2 where type='menu' and name='Metric Meta' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816707, '2019-08-19 06:14:22.696', '2019-08-19 06:14:22.696', false, 4, (select id from auth_resource2 where type='menu' and name='Notification' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816708, '2019-08-19 06:14:22.696', '2019-08-19 06:14:22.696', false, 4, (select id from auth_resource2 where type='menu' and name='General' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816709, '2019-08-19 06:14:22.734', '2019-08-19 06:14:22.734', false, 4, (select id from auth_resource2 where type='menu' and name='License' ) , 'admin'); + +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816800, '2019-08-19 06:14:22.734', '2019-08-19 06:14:22.734', false, 4, (select id from auth_resource2 where type='menu' and name='Hosts' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816801, '2019-08-19 06:14:22.734', '2019-08-19 06:14:22.734', false, 4, (select id from auth_resource2 where type='menu' and name='Topology' and parent_id=(select id from auth_resource2 where type='menu' and name='Hosts')) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816802, '2019-08-19 06:14:22.734', '2019-08-19 06:14:22.734', false, 4, (select id from auth_resource2 where type='menu' and name='Overview' and parent_id=(select id from auth_resource2 where type='menu' and name='Hosts')) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816803, '2019-08-19 06:14:22.734', '2019-08-19 06:14:22.734', false, 4, (select id from auth_resource2 where type='menu' and name='List' and parent_id=(select id from auth_resource2 where type='menu' and name='Hosts')) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816804, '2019-08-19 06:14:22.734', '2019-08-19 06:14:22.734', false, 4, (select id from auth_resource2 where type='menu' and name='Detail' and parent_id=(select id from auth_resource2 where type='menu' and name='Hosts')) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816805, '2019-08-19 06:14:22.734', '2019-08-19 06:14:22.734', false, 4, (select id from auth_resource2 where type='menu' and name='Group' and parent_id=(select id from auth_resource2 where type='menu' and name='Hosts')) , 'admin'); + + + + +INSERT INTO public.alert_rule_meta ( id, created_date, modified_date, description, expr, meta_name, target, message ) VALUES (97, '2019-04-02 18:07:31.319', '2019-04-02 18:07:31.319', 'NODE CPU 사용', '(100 - (avg by (xm_clst_id, xm_node_id, xm_entity_type) (rate(node_cpu_seconds_total{ name=''node-exporter'', mode=''idle'', xm_entity_type=''Node'', {filter} }[1m])) * 100))', 'Node CPU Usage', 'node', 'Cluster:{{$labels.xm_clst_id}} Node:{{$labels.xm_node_id }} CPU 사용률이 {threshold}%를 초과했습니다. 현재값:{{humanize $value}}%'); +INSERT INTO public.alert_rule_meta ( id, created_date, modified_date, description, expr, meta_name, target, message ) VALUES (1, '2019-04-15 02:26:13.826', '2019-04-15 02:26:24.02', 'NODE Disk 사용', '(1- (sum by (xm_clst_id, xm_node_id, xm_entity_type) (node_filesystem_avail_bytes{xm_entity_type=''Node'', {filter} }) / sum by (xm_clst_id, xm_node_id, xm_entity_type) (node_filesystem_size_bytes{xm_entity_type=''Node'', {filter} }))) * 100', 'Node Disk Usage', 'node', 'Cluster:{{$labels.xm_clst_id}} Node:{{$labels.xm_node_id}} Disk 사용률이 {threshold}%를 초과했습니다. 현재값:{{humanize $value}}%'); +INSERT INTO public.alert_rule_meta ( id, created_date, modified_date, description, expr, meta_name, target, message ) VALUES (119, '2019-04-02 18:08:50.17', '2019-04-02 18:08:50.17', 'NODE Memory 사용', '(1- ((node_memory_MemFree_bytes{xm_entity_type=''Node'', {filter}} + node_memory_Cached_bytes{xm_entity_type=''Node'', {filter}} + node_memory_Buffers_bytes{xm_entity_type=''Node'', {filter}}) / node_memory_MemTotal_bytes{xm_entity_type=''Node''})) * 100', 'Node Memory Usage', 'node', 'Cluster:{{$labels.xm_clst_id}} Node:{{$labels.xm_node_id}} Memory 사용률이 {threshold}%를 초과했습니다. 현재값 : {{humanize $value}}%'); +INSERT INTO public.alert_rule_meta ( id, created_date, modified_date, description, expr, meta_name, target, message ) VALUES (2, '2019-04-15 05:27:56.544', '2019-04-15 05:27:59.924', 'Container CPU 사용', 'sum (rate (container_cpu_usage_seconds_total{ {filter} }[1m])) by (xm_clst_id, xm_namespace, xm_entity_type, xm_pod_id) * 100', 'Container CPU Usage', 'controller', 'Cluster:{{$labels.xm_clst_id }} POD:{{$labels.xm_pod_id}} CPU 사용률이 {threshold}%를 초과했습니다. 현재값:{{humanize $value}}%'); + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_cpu_user','Container CPU User (%)','Container CPU Usage (User)','sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) (rate(container_cpu_user_seconds_total{xm_entity_type=''Container'',xm_cont_name!=''POD'',{filter}}[1m])) * 100','CPU','Container',NULL,true,true,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} CPU User:{{humanize $value}}%|{threshold}%.','2019-06-05 09:07:00.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_memory_working_set_bytes','Container Memory Working Set (GiB)','Current working set in GiB, this includes recently accessed memory, dirty memory, and kernel memory','sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) (container_memory_working_set_bytes{xm_entity_type=''Container'',xm_cont_name!=''POD'',{filter}} / 1024 / 1024 / 1024)','Memory','Container',NULL,true,true,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} Working Set Memory:{{humanize $value}}GiB|{threshold}GiB.','2020-06-04 11:11:11.000','2020-06-04 11:11:11.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_disk_io_seconds','Host io Disk seconds','Host disk io seconds','sum by (instance) (rate(node_disk_io_time_seconds_total{{filter}}[1m]))','Disk','Host',NULL,false,false,'Host:{{$labels.instance}} Disk IO Seconds:{{humanize $value}}|{threshold}.','2020-03-23 04:08:37.359','2020-03-23 04:08:37.359'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_disk_read_write_byte','host disk R/W byte','host disk R/W byte','sum by (data_type, instance) ( +label_replace(rate(node_disk_read_bytes_total{{filter}}[1m]) or rate(node_disk_read_bytes_total{{filter}}[5m]), "data_type", "Read", "", "") or +label_replace(rate(node_disk_written_bytes_total{{filter}}[1m]) or rate(node_disk_written_bytes_total{{filter}}[5m]), "data_type", "Write", "", "") )','Disk','Host',NULL,false,false,'Host:{{$labels.instance}} Read/Write Bytes:{{humanize $value}}KiB|{threshold}KiB.','2020-03-24 05:21:53.915','2020-03-24 05:24:52.674'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_memory_free','Host Memory Free (GiB)','Memory information field MemFree_bytes','(node_memory_MemAvailable_bytes{{filter}} or (node_memory_MemFree_bytes{{filter}} + node_memory_Cached_bytes{{filter}} + node_memory_Buffers_bytes{{filter}}))','Memory','Host',NULL,true,false,'Host:{{$labels.instance}} Free Memory Size:{{humanize $value}}GiB|{threshold}GiB.','2020-03-23 04:08:18.977','2020-03-23 04:08:18.977'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_bytes_sent','Number of Bytes Sent','The number of bytes sent to all clients','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(mysql_global_status_bytes_sent[1m]))','Network','MySQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Number of Bytes Sent:{{humanize $value}}KiB|{threshold}KiB.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_memory_sum_by_namespace','Containe memory sum by namespace','Containe memory sum by namespace','sum by(xm_clst_id, xm_namespace, data_type) ( +label_replace(imxc_kubernetes_container_resource_limit_memory{{filter}}, "data_type", "limit", "" , "") or +label_replace(imxc_kubernetes_container_resource_request_memory{{filter}}, "data_type", "request", "" , "") or +label_replace(container_memory_usage_bytes{xm_entity_type=''Container'',{filter}}, "data_type", "used", "" , ""))','memory','Namespace',NULL,false,false,'Container memory sum by namespace','2020-07-03 04:31:10.079','2020-07-03 08:38:17.034'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_count','Node Count','node count','count by(xm_clst_id, xm_namespace,xm_node_id) (up{{filter}})','Node','Namespace',NULL,false,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} NODE:{{$labels.xm_node_id}} Node Count:{{humanize $value}}|{threshold}.','2020-08-19 16:45:00.000','2020-08-19 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_restart_count','Container Restart Count','container restart count group by namespace','sum by(xm_clst_id, xm_namespace, pod_name ) (increase(imxc_kubernetes_container_restart_count{{filter}}[10s]))','Pod','Namespace',NULL,false,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Container Restart Count:{{humanize $value}}|{threshold}.','2020-08-19 16:45:00.000','2020-08-19 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_cpu_usage','Node CPU Usage (%)','NODE CPU Usage','(100 - (avg by (xm_clst_id, xm_node_id, xm_entity_type)(clamp_max(rate(node_cpu_seconds_total{ name=''node-exporter'', mode=''idle'', xm_entity_type=''Node'', {filter} }[1m]),1.0) * 100)))','CPU','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} CPU Usage:{{humanize $value}}%|{threshold}%.','2019-05-15 01:02:23.000','2020-06-04 11:11:11.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_disk_read_latency_device','Node Disk Read Latency per Device (ms)','Node Disk Read Latency per Device','sum by (xm_clst_id, xm_node_id, xm_entity_type, device, mountpoint) (rate(node_disk_read_time_seconds_total{xm_entity_type=''Node'',{filter}}[1m])) * 1000','Disk','Node','device',true,false,'NODE:{{$labels.xm_node_id}} FS:{{$labels.mountpoint}} Disk Read Latency:{{humanize $value}}ms|{threshold}ms.','2019-08-23 11:26:07.000','2019-08-23 11:26:07.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_filesystem_usage_per_device','Node Filesystem Usage per device (%)','NODE Filesystem Usage per Device','(1- (sum by (xm_clst_id, xm_node_id, xm_entity_type, device, mountpoint) (node_filesystem_avail_bytes{xm_entity_type=''Node'', device!=''rootfs'', {filter} }) / sum by (xm_clst_id, xm_node_id, xm_entity_type, device, mountpoint) (node_filesystem_size_bytes{xm_entity_type=''Node'', device!=''rootfs'', {filter} }))) * 100','Filesystem','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} FS:{{$labels.mountpoint}} Usage:{{humanize $value}}%|{threshold}%.','2019-05-15 01:02:23.000','2019-05-15 01:02:23.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_memory_usage','Node Memory Usage (%)','Node Memory Usage','sum by (xm_clst_id, xm_node_id)((node_memory_MemTotal_bytes{xm_entity_type="Node"}- (node_memory_MemFree_bytes{xm_entity_type="Node"} + node_memory_Cached_bytes{xm_entity_type="Node"} + node_memory_Buffers_bytes{xm_entity_type="Node"})) >= 0 or node_memory_MemTotal_bytes{xm_entity_type="Node"}- node_memory_MemFree_bytes{xm_entity_type="Node"}) / (sum by (xm_clst_id, xm_node_id) (imxc_kubernetes_node_resource_capacity_memory{{filter}})) * 100','Memory','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Memory Usage:{{humanize $value}}%|{threshold}%.','2019-05-15 01:02:23.000','2020-06-04 11:11:11.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_tablespace_size','Tablespace Size (GiB)','Generic counter metric of tablespaces bytes in Oracle','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, tablespace, type) (oracledb_tablespace_bytes) / 1073741824','Tablespace','OracleDB','tablespace, type',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Oracle Tablespace Size:{{humanize $value}}GiB|{threshold}GiB.','2020-01-28 13:03:00.000','2020-01-28 13:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_allocator_allocated_size','Allocated Memory (MiB)','The total amount of memory that the Redis allocator allocated','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (redis_allocator_allocated_bytes) / 1048576','Memory','Redis',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis Allocated Memory:{{humanize $value}}MiB|{threshold}MiB.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_kubernetes_event_count','Cluster events count','Kubernetes Namespace Events count','sum by (xm_clst_id, type) (imxc_kubernetes_event_in_last_min{{filter}})','Event','Cluster',NULL,true,true,'CLST:{{$labels.xm_clst_id}} Event Count:{{humanize $value}}|{threshold}.','2019-09-26 05:33:37.000','2020-04-27 05:38:47.804'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_memory_limit','cluster_memory_limit (Gib)','Total container limit size in GiB for the given cluster','sum by (xm_clst_id) (imxc_kubernetes_container_resource_limit_memory{{filter}}) / 1024 / 1024 / 1024','Memory','Cluster',NULL,false,false,'CLST:{{$labels.xm_clst_id}} Memory Limits:{{humanize $value}}GiB|{threshold}GiB.','2019-08-23 08:45:47.000','2019-08-23 08:45:47.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_pod_total_count','Cluster Pod Total Count','Cluster Pod Total Count','sum by (xm_clst_id) (imxc_kubernetes_controller_counts{{filter}})','Pod','Cluster',NULL,true,true,'CLST:{{$labels.xm_clst_id}} Total Pod Counts:{{humanize $value}}|{threshold}.','2019-08-23 17:36:00.000','2019-11-28 08:25:07.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_swap_free','Host Swap Memory Free','Host Swap Free','node_memory_SwapFree_bytes{{filter}}','Memory','Host',NULL,true,false,'Host:{{$labels.instance}} Free Swap Memory Size:{{humanize $value}}KiB|{threshold}KiB.','2020-03-23 04:08:24.594','2020-03-23 04:08:24.594'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_context_switch_count','Host Context','Total number of context switches.','sum by (instance) (node_context_switches_total{{filter}})','CPU','Host',NULL,false,false,'None','2020-03-23 04:08:15.000','2020-03-23 04:08:15.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_fs_used','Host system Filesystem used','Host File system used','sum by (instance) (node_filesystem_size_bytes{{filter}}-node_filesystem_free_bytes{{filter}})','Filesystem','Host',NULL,true,false,'Host:{{$labels.instance}} Filesystem Utillization:{{humanize $value}}%|{threshold}%.','2020-03-23 04:08:30.407','2020-03-23 04:08:30.407'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_disk_io','Node Disk I/O','Total seconds spent doing I/Os','avg by (xm_clst_id, xm_node_id) (rate(node_disk_io_time_seconds_total{{filter}}[1m]))','Disk','Node',NULL,false,false,'None','2020-05-21 01:18:06.000','2020-05-29 09:38:55.992'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_fs_usage','Container Filesystem Usage (%)','Container File System Usage: 100 * (Used Bytes / Limit Bytes) (not contain persistent volume)','sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) ( +container_fs_usage_bytes{xm_entity_type=''Container'',{filter}} / ((container_fs_limit_bytes{xm_entity_type=''Container'',{filter}} * 100) > 0) or +container_fs_usage_bytes{xm_entity_type=''Container'',{filter}} / 1000)','Filesystem','Container',NULL,true,true,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} Filesystem Usage:{{humanize $value}}%|{threshold}%.','2019-06-05 10:27:42.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_fs_reads','Container Filesystem Read Bytes (KiB)','Cumulative count of bytes read / 1024','sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) (rate(container_fs_reads_bytes_total{xm_entity_type=''Container'',{filter}}[1m])) / 1024','Filesystem','Container',NULL,true,true,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} Filesystem Reads:{{humanize $value}}KiB|{threshold}KiB.','2019-05-20 05:53:42.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_cpu_sum_by_namespace','Container cpu sum by namespace','Container cpu sum by namespace','sum by(xm_clst_id, xm_namespace, data_type) ( +label_replace(imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0.001, "data_type", "limit", "" , "") or +label_replace(imxc_kubernetes_container_resource_request_cpu{{filter}} * 0.001, "data_type", "request", "" , "") or +label_replace(rate(container_cpu_usage_seconds_total{xm_entity_type=''Container'',{filter}}[1m]), "data_type", "used", "" , ""))','CPU','Namespace',NULL,false,false,'.','2020-05-30 08:30:10.158','2020-06-09 02:00:50.856'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_filesystem_avail_size','Node Filesystem Available Size (GiB)','Filesystem space available to non-root users in bytes / 1073741824','sum by (xm_clst_id, xm_node_id, xm_entity_type) (node_filesystem_avail_bytes{xm_entity_type=''Node'', device!=''rootfs'', {filter} }) / 1073741824','Filesystem','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Filesystem Avail Size:{{humanize $value}}GiB|{threshold}GiB.','2019-06-04 19:47:00.000','2019-06-04 19:47:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_pod_running_count','Node Pod Running Count','Node Pod Running Count','count by (xm_clst_id, xm_node_id) (sum by (xm_clst_id, xm_node_id, xm_pod_id) (imxc_kubernetes_container_resource_limit_cpu{pod_state="Running", {filter}}))','Pod','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Running Pod Count:{{humanize $value}}|{threshold}.','2019-10-11 00:29:17.000','2019-11-06 08:02:40.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_cpu_user','Pod CPU User (%)','Pod CPU Usage (User)','sum by (xm_clst_id,xm_pod_id,xm_entity_type,xm_namespace) (rate(container_cpu_user_seconds_total{xm_entity_type=''Container'',{filter}}[1m])) * 100','CPU','Pod',NULL,true,true,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} CPU User:{{humanize $value}}%|{threshold}%.','2019-06-05 09:07:00.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_fs_reads','Pod Filesystem Read Bytes (KiB)','Cumulative count of bytes read / 1024','sum by (xm_clst_id,xm_pod_id,xm_entity_type,xm_namespace) (rate(container_fs_reads_bytes_total{xm_entity_type=''Container'',{filter}}[1m])) / 1024','Filesystem','Pod',NULL,true,true,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} Filesystem Read Bytes:{{humanize $value}}KiB|{threshold}KiB.','2019-05-20 05:53:42.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_memory_max_usage_bytes','Pod Memory Max Used (GiB)','Maximum memory usage recorded in bytes / 1073741824','sum by (xm_clst_id,xm_pod_id,xm_entity_type,xm_namespace) (container_memory_max_usage_bytes{xm_entity_type=''Container'',{filter}}) / 1073741824','Memory','Pod',NULL,true,true,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} Max Used Memory:{{humanize $value}}GiB|{threshold}GiB.','2019-06-05 14:27:36.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_network_receive','Pod Network Receive (KiB)','Network device statistic receive_bytes / 1024','sum by (xm_clst_id,xm_pod_id,xm_entity_type,xm_namespace) (rate(container_network_receive_bytes_total{xm_entity_type=''Container'',{filter}}[1m]) ) / 1024','Network','Pod',NULL,true,true,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} Network Receive:{{humanize $value}}KiB|{threshold}KiB.','2019-05-21 08:23:36.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_cache_hits_count','Total number of cache hits (count/s)','Total number of cache hits','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, cache) (rate(cassandra_cache_hits_count{{filter}}[1m]))','Cache','Cassandra','cache',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Cache Hit Counts per second:{{humanize $value}}|{threshold}.','2019-10-02 10:17:01.000','2019-11-05 11:24:29.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_clientrequest_failures_count','Number of transaction failures encountered','Number of transaction failures encountered','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, clientrequest) (rate(cassandra_clientrequest_failures_count[1m]))','Request','Cassandra','clientrequest',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Failure Request:{{humanize $value}}|{threshold}.','2019-10-02 10:17:01.000','2019-10-02 10:17:01.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_connections_and_tasks','Cassandra connections & tasks','cassandra connections & tasks','sum by (data_type, xm_clst_id, xm_namespace, xm_node_id, instance) ( +label_replace(cassandra_threadpools_activetasks {{filter}}, "data_type", "Active tasks", "", "") or +label_replace(cassandra_threadpools_pendingtasks {{filter}}, "data_type", "Pending tasks", "", "") or +label_replace(cassandra_client_connectednativeclients {{filter}}, "data_type", "Client connections", "", "") )','Connection','Cassandra','data_type',true,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} POD:{{$labels.xm_pod_id}} Cassandra Connections and Tasks:{{humanize $value}}|{threshold}.','2020-01-02 09:11:48.000','2020-02-13 01:24:51.522'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_network_transmit','Pod Network Transmit (KiB)','Network device statistic transmit_bytes / 1024','sum by (xm_clst_id,xm_pod_id,xm_entity_type,xm_namespace) (rate(container_network_transmit_bytes_total{xm_entity_type=''Container'',{filter}}[1m]) ) / 1024','Network','Pod',NULL,true,true,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} Network Transmit:{{humanize $value}}KiB|{threshold}KiB.','2019-05-21 08:26:35.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_memory_request','cluster_memory_request (Gib)','Total container memory request in GiB for the given cluster','sum by (xm_clst_id) (imxc_kubernetes_container_resource_request_memory{{filter}}) / 1024 / 1024 / 1024','Memory','Cluster',NULL,false,false,'None','2019-08-23 08:45:47.000','2019-08-23 08:45:47.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_keyspace_read_count','Local read count (count/s)','Local read count for this keyspace','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, keyspace) (rate(cassandra_keyspace_readlatency_seconds_count[1m]))','Disk','Cassandra','keyspace',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Read Count:{{humanize $value}}|{threshold}.','2019-10-02 10:17:01.000','2019-10-02 10:17:01.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_cpu_capacity_cores','cluster_cpu_capacity_cores','cluster_cpu_capacity_cores','sum by (xm_clst_id) (imxc_kubernetes_node_resource_capacity_cpu{{filter}})','CPU','Cluster',NULL,false,false,'CLST:{{$labels.xm_clst_id}} Cluster CPU Capacity Cores:{{humanize $value}}|{threshold}.','2019-08-23 08:40:36.000','2019-08-23 08:40:36.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_alerts_received_count','Cluster alerts received count','Alert count by cluster','sum by (xm_clst_id, level) (ceil(increase(imxc_alerts_received_count_total{status=''firing'', {filter}}[10m])))','Alert','Cluster',NULL,false,false,'CLST:{{$labels.xm_clst_id}} Alert Received Counts:{{humanize $value}}|{threshold}.','2019-08-23 04:41:49.000','2020-04-28 08:09:09.429'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_cpu_throttled_time','Container CPU Throttled Time','container cpu_throttled time','sum by(xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) (increase(container_cpu_cfs_throttled_seconds_total{container_name!="POD", image!="", {filter}}[10s]))','CPU','Cluster',NULL,false,false,'CLST:{{$labels.xm_clst_id}} CPU Throttled:{{humanize $value}}|{threshold}.','2020-08-19 16:45:00.000','2020-08-19 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_cache_hitrate','All time cache hit rate','All time cache hit rate','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, cache) (cassandra_cache_hitrate {{filter}} * 100)','Cache','Cassandra','cache',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Cache Hit Rate:{{humanize $value}}|{threshold}.','2019-10-02 10:17:01.000','2019-12-13 01:19:54.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('aws_ec2_disk_read_bytes','Bytes Read from All Instance Store Volumes (KiB)','Bytes read from all instance store volumes available to the instance.','sum by (xm_clst_id, instance_id, instance) (aws_ec2_disk_read_bytes_average{{filter}}) / 1024','Disk','AWS/EC2',NULL,true,true,'CLST:{{$labels.xm_clst_id}} Instance:{{$labels.instance_id}} Disk Read Size:{{humanize $value}}KiB|{threshold}KiB.','2019-08-23 17:38:23.000','2019-08-23 17:38:23.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('aws_ec2_disk_write_bytes','Bytes Written to All Instance Store Volumes (KiB)','Bytes written to all instance store volumes available to the instance.','sum by (xm_clst_id, instance_id, instance) (aws_ec2_disk_write_bytes_average{{filter}}) / 1024','Disk','AWS/EC2',NULL,true,true,'CLST:{{$labels.xm_clst_id}} Instance:{{$labels.instance_id}} Disk Write Size:{{humanize $value}}KiB|{threshold}KiB.','2019-08-23 17:38:23.000','2019-08-23 17:38:23.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('aws_ec2_ebswrite_bytes','Bytes written to all EBS volumes (KiB)','Bytes written to all EBS volumes attached to the instance in a specified period of time.','sum by (xm_clst_id, instance_id, instance) (aws_ec2_ebswrite_bytes_average{{filter}}) / 1024','EBS','AWS/EC2',NULL,true,true,'CLST:{{$labels.xm_clst_id}} Instance:{{$labels.instance_id}} EBS Write Size:{{humanize $value}}KiB|{threshold}KiB.','2019-08-23 17:38:23.000','2019-08-23 17:38:23.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_cache_requests_count','Total number of cache requests (count/s)','Total number of cache requests','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, cache) (rate(cassandra_cache_requests_count[1m]))','Cache','Cassandra','cache',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Cache Request per second:{{humanize $value}}|{threshold}.','2019-10-02 10:17:01.000','2019-10-02 10:17:01.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_keyspace_write_latency','Local write latency (ms)','Local write latency seconds for this keyspace','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, keyspace) (cassandra_keyspace_writelatency_seconds{quantile=''0.99''}) * 1000','Disk','Cassandra','keyspace',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Write Latency:{{humanize $value}}ms|{threshold}ms.','2019-10-02 10:17:01.000','2019-10-02 10:17:01.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_memory_usage','Cluster Memory Usage (%)','All Nodes Memory Usage in cluster.','(1- avg by (xm_clst_id) (((node_memory_MemFree_bytes{xm_entity_type=''Node'', {filter}} + node_memory_Cached_bytes{xm_entity_type=''Node'', {filter}} + node_memory_Buffers_bytes{xm_entity_type=''Node'', {filter}}) <= node_memory_MemTotal_bytes{xm_entity_type=''Node'', {filter}} or node_memory_MemFree_bytes{xm_entity_type=''Node'', {filter}}) / node_memory_MemTotal_bytes{xm_entity_type=''Node'', {filter}})) * 100','Memory','Cluster',NULL,true,true,'CLST:{{$labels.xm_clst_id}} Memory Usage:{{humanize $value}}%|{threshold}%.','2019-07-18 06:12:22.000','2020-04-22 04:59:14.251'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mongodb_connections_metrics_created_total','Incoming Connections Created','Count of all incoming connections created to the server (This number includes connections that have since closed)','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(mongodb_connections_metrics_created_total[1m]))','Connection','MongoDB',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MongoDB Incoming Connections Created Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_disk_io','MySQL Disk I/O','MySQL Disk I/O','sum by (data_type, xm_clst_id, xm_namespace, xm_node_id, instance) ( +label_replace(rate(mysql_global_status_innodb_data_read[1m]), "data_type", "read", "", "") or +label_replace(rate(mysql_global_status_innodb_data_written[1m]), "data_type", "written", "", ""))','Disk','MySQL','data_type',true,false,'CLST:{{$labels.xm_clst_id}} SVC:{{$labels.xm_service_name}} Mysql Disk IO:{{humanize $value}}|{threshold}.','2019-12-05 08:48:30.000','2020-02-13 01:12:05.438'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_pod_capacity_count','Cluster Pod Capacity Count','Cluster Pod Capacity Count','sum by (xm_clst_id) (imxc_kubernetes_node_resource_capacity_pods{{filter}})','Pod','Cluster',NULL,true,true,'CLST:{{$labels.xm_clst_id}} Capacity Pod Counts:{{humanize $value}}|{threshold}.','2019-08-27 04:45:52.000','2019-11-28 08:25:07.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('namespace_kubernetes_event_count','Namespace events count','Kubernetes Namespace Events count','sum by (xm_clst_id, xm_namespace, type) (imxc_kubernetes_event_in_last_min{{filter}})','Event','Namespace','level',false,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Events:{{humanize $value}}|{threshold}.','2019-09-24 06:42:09.000','2019-09-24 06:42:34.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_cpu_capacity_cores','node_cpu_capacity_cores','node_cpu_capacity_cores','imxc_kubernetes_node_resource_capacity_cpu{{filter}}','CPU','Node',NULL,false,false,'None','2019-08-23 08:40:36.000','2019-08-23 08:40:36.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_cpu_allocatable_cores','node_cpu_allocatable_cores','node_cpu_allocatable_cores','imxc_kubernetes_node_resource_allocatable_cpu{{filter}}','CPU','Node',NULL,false,false,'None','2019-08-23 08:40:36.000','2019-08-23 08:40:36.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_pod_capacity_count','Node Pod Capacity Count','Node Pod Capacity Count','imxc_kubernetes_node_resource_capacity_pods{{filter}}','Pod','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Total Capacity Count of Pods:{{humanize $value}}|{threshold}.','2019-10-11 00:29:17.000','2019-11-26 01:29:10.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_memory_allocatable','node_memory_allocatable (Gib)','imxc_kubernetes_node_resource_allocatable_memory in GiB','imxc_kubernetes_node_resource_allocatable_memory{{filter}} / 1024 / 1024 / 1024','Memory','Node',NULL,false,false,'None','2019-08-23 08:45:47.000','2019-08-23 08:45:47.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_memory_limit','node_memory_limit (Gib)','Total container memory limit for the given cluster, node','sum by (xm_clst_id, xm_node_id) (imxc_kubernetes_container_resource_limit_memory{{filter}}) / 1024 / 1024 / 1024','Memory','Node',NULL,false,false,'None','2019-08-23 08:45:47.000','2019-08-23 08:45:47.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_keyspace_readwritelatency_seconds','Cassandra Read/Write Latency (ms)','Cassandra Read/Write Latency (ms)','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, keyspace) (cassandra_keyspace_readlatency_seconds{quantile=''0.99''}) or (cassandra_keyspace_writelatency_seconds{quantile=''0.99''}) * 1000','Disk','Cassandra','keyspace',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} POD:{{$labels.xm_pod_id}} Cassandra Keyspace Readwritelatency Seconds:{{humanize $value}}ms|{threshold}ms.','2019-10-23 01:46:07.000','2019-11-05 09:03:05.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_cpu_usage','Cluster CPU Usage (%)','All Nodes CPU Usage in cluster.','(100 - (avg by (xm_clst_id)(clamp_max(rate(node_cpu_seconds_total{ name=''node-exporter'', mode=''idle'', xm_entity_type=''Node'', {filter} }[1m]),1.0)) * 100))','CPU','Cluster',NULL,true,true,'CLST:{{$labels.xm_clst_id}} CPU Usage:{{humanize $value}}%|{threshold}%','2019-07-18 05:54:39.000','2020-04-22 04:59:14.253'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_bytes_received','Number of Bytes Received','The number of bytes received from all clients','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(mysql_global_status_bytes_received[1m]))','Network','MySQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Number of Bytes Received:{{humanize $value}}KiB|{threshold}KiB.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_memory_request','node_memory_request (Gib)','Total container memory request in GiB for the given cluster, node','sum by (xm_clst_id, xm_node_id) (imxc_kubernetes_container_resource_request_memory{{filter}}) / 1024 / 1024 / 1024','Memory','Node',NULL,false,false,'None','2019-08-23 08:45:47.000','2019-08-23 08:45:47.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_threadpools_tasks','Number of tasks','Number of tasks','sum by (task_type, xm_clst_id, xm_namespace, xm_node_id, instance) ( +label_replace(cassandra_threadpools_activetasks {{filter}}, "task_type", "active", "", "") or +label_replace(cassandra_threadpools_pendingtasks {{filter}}, "task_type", "pending", "", "") or +label_replace(cassandra_client_connectednativeclients {{filter}}, "task_type", "connected", "", "") )','Task','Cassandra','task_type',true,false,'Number of tasks','2019-10-24 01:34:25.000','2020-02-13 01:14:23.895'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_latency_seconds','Local latency seconds','Local latency seconds','sum by(type, xm_clst_id, xm_namespace, xm_node_id, instance) +(label_replace(cassandra_keyspace_readlatency_seconds{quantile=''0.99'', {filter}}, "type", "read", "", "") or +label_replace(cassandra_keyspace_writelatency_seconds{quantile=''0.99'', {filter}}, "type", "write", "", "")) * 1000','Disk','Cassandra',NULL,true,true,'Local latency seconds','2019-10-24 02:14:45.000','2020-02-13 01:23:46.608'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_wait_time_concurrency','Wait-Time - Concurrency','Generic counter metric from v$waitclassmetric view in Oracle','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(oracledb_wait_time_concurrency[1m]))','Wait','OracleDB',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Oracle Wait-Time - Concurrency:{{humanize $value}}|{threshold}.','2020-01-28 13:03:00.000','2020-01-28 13:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_threadpools_pendingtasks','Number of queued tasks queued up','Number of queued tasks queued up','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, path) (cassandra_threadpools_pendingtasks)','Task','Cassandra','path',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Active Task:{{humanize $value}}|{threshold}.','2019-10-01 16:45:21.000','2019-10-01 16:45:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_pod_ready_count','Cluster Pod Ready Count','Cluster Pod Ready Count','sum by (xm_clst_id) (imxc_kubernetes_controller_ready{{filter}})','Pod','Cluster',NULL,true,true,'CLST:{{$labels.xm_clst_id}} Ready Pod Counts:{{humanize $value}}|{threshold}.','2019-08-23 17:36:00.000','2019-11-28 08:25:07.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_pod_allocatable_count','Node Pod Allocatable Count','Node Pod Allocatable Count','imxc_kubernetes_node_resource_allocatable_pods{{filter}}','Pod','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Allocatable Pod Count:{{humanize $value}}|{threshold}.','2019-10-11 00:29:17.000','2019-11-26 01:29:10.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_sparselog_type_conatiner_count','Container Type Sparselog Count','Container-type sparse log count by xm_clst_id, xm_namespace, xm_node_id, xm_pod_id over last 1 min','sum by (xm_entity_type, xm_clst_id, xm_namespace, xm_node_id, xm_pod_id) (round(increase(imxc_sparselog_count_total{xm_entity_type="Pod",{filter}}[1m])))','SparseLog','Pod',NULL,true,true,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} Sparselog Count:{{humanize $value}}|{threshold}.','2020-03-26 15:05:51.828','2020-03-26 15:05:51.828'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_threads_connected','Number of Open Connections','The number of currently open connections','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (mysql_global_status_threads_connected)','Thread','MySQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Number of Open Connections Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('aws_ec2_ebsread_bytes','Bytes read from all EBS volumes (KiB)','Bytes read from all EBS volumes attached to the instance in a specified period of time.','sum by (xm_clst_id, instance_id, instance) (aws_ec2_ebsread_bytes_average{{filter}}) / 1024','EBS','AWS/EC2',NULL,true,true,'CLST:{{$labels.xm_clst_id}} Instance:{{$labels.instance_id}} EBS Read Size:{{humanize $value}}KiB|{threshold}KiB.','2019-08-23 17:38:23.000','2019-08-23 17:38:23.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('namespace_cpu_usage','Namespace CPU Usage (%)','CPU Usage by namespace','sum by (xm_clst_id,xm_entity_type,xm_namespace) (rate(container_cpu_usage_seconds_total{xm_entity_type=''Container'', {filter}}[1m])) * 100','CPU','Namespace',NULL,true,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} CPU Utillization:{{humanize $value}}%|{threshold}%','2019-08-23 01:06:05.000','2019-08-23 01:06:05.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('namespace_memory_usage','Namespace memory usage (Gib)','Memory usage by namespace in bytes / 1073741824','sum by (xm_clst_id,xm_entity_type,xm_namespace) (container_memory_usage_bytes{xm_entity_type=''Container'', {filter}}) / 1073741824','Memory','Namespace',NULL,true,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Memory Utillization:{{humanize $value}}GiB|{threshold}GiB.','2019-08-23 01:21:31.000','2019-08-23 01:21:31.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_memory_free','Node Memory Free (GiB)','Memory information field MemFree_bytes / 1073741824','node_memory_MemFree_bytes{xm_entity_type=''Node'', {filter}} / 1073741824','Memory','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Free Memory Size:{{humanize $value}}GiB|{threshold}GiB.','2019-06-04 16:03:00.000','2019-06-04 16:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_swap_memory_cached','Node Swap Memory Cached (GiB)','Memory information field SwapCached_bytes / 1073741824','node_memory_SwapCached_bytes{xm_entity_type=''Node'', {filter}} / 1073741824','Memory','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Cached Swap Memory Size:{{humanize $value}}GiB|{threshold}GiB.','2019-06-04 16:03:00.000','2019-06-04 16:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_allocator_active_size','Active Memory (MiB)','The total amount of active memory that the Redis allocator has','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (redis_allocator_active_bytes) / 1048576','Memory','Redis',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis Active Memory:{{humanize $value}}MiB|{threshold}MiB.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_up','MySQL Up Count','Whether the last scrape of metrics from MySQL was able to connect to the server','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (mysql_up)','Instance','MySQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Up counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_up','Oracle DB Up Count','Whether the Oracle database server is up','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (oracledb_up)','Instance','OracleDB',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Oracle DB Up Count:{{humanize $value}}|{threshold}.','2020-01-28 13:03:00.000','2020-01-28 13:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_process_count','Process Count','Gauge metric with count of processes','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (oracledb_process_count)','Process','OracleDB',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Oracle Process Count Count:{{humanize $value}}|{threshold}.','2020-01-28 13:03:00.000','2020-01-28 13:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_locks_count','Number of Locks','Number of locks','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, datname, mode) (pg_locks_count)','Lock','PostgreSQL','datname,mode',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} PostgreSQL Lock Counts:{{humanize $value}}|{threshold}.','2019-08-27 15:49:21.000','2019-08-27 15:49:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_stat_database_tup_updated','Number of Rows Updated','Number of rows updated by queries in this database','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, datname) (rate(pg_stat_database_tup_updated[1m]))','Row','PostgreSQL','datname',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} PostgreSQL Updated Row Counts:{{humanize $value}}|{threshold}.','2019-08-27 15:49:21.000','2019-08-27 15:49:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_stat_database_tup_deleted','Number of Rows Deleted','Number of rows deleted by queries in this database','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, datname) (rate(pg_stat_database_tup_deleted[1m]))','Row','PostgreSQL','datname',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} PostgreSQL Deleted Row counts:{{humanize $value}}|{threshold}.','2019-08-27 15:49:21.000','2019-08-27 15:49:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_stat_database_temp_files','Number of Temporary Files Created','Number of temporary files created by queries in this database','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, datname) (rate(pg_stat_database_temp_files[1m]))','TemporaryFile','PostgreSQL','datname',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} PostgreSQL Temporary File counts:{{humanize $value}}|{threshold}.','2019-08-27 15:49:21.000','2019-08-27 15:49:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_load15','Node CPU Load 15m Average','Node CPU 15m load average','node_load15{xm_entity_type=''Node'',{filter}}','CPU','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} CPU 15m Load Avg:{{humanize $value}}|{threshold}.','2019-05-15 08:27:39.000','2019-05-15 08:27:39.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_cpu_throttling','Node CPU Throttling','Number of times this cpu package has been throttled.','increase(node_cpu_package_throttles_total{xm_entity_type=''Node'',{filter}}[1m])','CPU','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} CPU Throttling Counts:{{humanize $value}}|{threshold}.','2019-05-15 08:29:24.000','2019-05-15 08:29:24.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_cpu_usage','Pod CPU Usage (%)','Pod CPU Usage','sum by (xm_clst_id,xm_pod_id,xm_entity_type,xm_namespace) (clamp_min((rate(container_cpu_usage_seconds_total{xm_entity_type=''Container'',{filter}}[1m] offset 10s)),0)) * 100','CPU','Pod',NULL,true,true,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} CPU Usage:{{humanize $value}}%|{threshold}%.','2019-05-15 01:02:23.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_cpu_system','Pod CPU System (%)','Pod CPU Usage (System)','sum by (xm_clst_id,xm_pod_id,xm_entity_type,xm_namespace) (rate(container_cpu_system_seconds_total{xm_entity_type=''Container'',{filter}}[1m])) * 100','CPU','Pod',NULL,true,true,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} CPU System:{{humanize $value}}%|{threshold}%.','2019-06-05 09:07:00.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_fs_usage_bytes','Pod Filesystem Used Bytes (GiB)','Number of bytes that are consumed by the container on this filesystem / 1073741824','sum by (xm_clst_id,xm_pod_id,xm_entity_type,xm_namespace) (container_fs_usage_bytes{xm_entity_type=''Container'',{filter}}) / 1073741824','Filesystem','Pod',NULL,true,true,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} Filesystem Used Bytes:{{humanize $value}}GiB|{threshold}GiB.','2019-06-05 10:27:42.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_fs_limit_bytes','Pod Filesystem Limit Bytes (GiB)','Number of bytes that can be consumed by the container on this filesystem / 1073741824','sum by (xm_clst_id,xm_pod_id,xm_entity_type,xm_namespace) (container_fs_limit_bytes{xm_entity_type=''Container'',{filter}}) / 1073741824','Filesystem','Pod',NULL,true,true,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} Filesystem Limit Bytes:{{humanize $value}}GiB|{threshold}GiB.','2019-06-05 10:27:42.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_load5','Node CPU Load 5m Average','Node CPU 5m load average','node_load5{xm_entity_type=''Node'',{filter}}','CPU','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} CPU 5m Load Avg:{{humanize $value}}|{threshold}.','2019-05-15 08:26:07.000','2019-05-15 08:26:07.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_client_connectednativeclients','Number of Client Connections','Number of clients connected to this nodes native protocol server','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (cassandra_client_connectednativeclients)','Connection','Cassandra',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Connection:{{humanize $value}}|{threshold}.','2019-10-01 16:45:21.000','2019-11-07 11:59:04.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_threadpools_activetasks','Number of tasks being actively worked on','Number of tasks being actively worked on','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, path) (cassandra_threadpools_activetasks)','Task','Cassandra','path',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Connection:{{humanize $value}}|{threshold}.','2019-10-01 16:45:21.000','2019-10-01 16:45:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cloudwatch_requests_count','API requests made to CloudWatch','API requests made to CloudWatch','sum by (xm_clst_id, namespace, action) (rate(cloudwatch_requests_total{{filter}}[10m]))','Request','AWS/Usage',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.namespace}} CloudWatch API Call Volume:{{humanize $value}}|{threshold}.','2019-08-27 15:49:21.000','2019-08-27 15:49:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('aws_ec2_network_out','Bytes Sent Out on All Network Interfaces (KiB)','The number of bytes sent out on all network interfaces by the instance.','sum by (xm_clst_id, instance_id, instance) (aws_ec2_network_out_average{{filter}}) / 1024','Network','AWS/EC2',NULL,true,true,'CLST:{{$labels.xm_clst_id}} Instance:{{$labels.instance_id}} Network Transmit Usage:{{humanize $value}}KiB|{threshold}KiB.','2019-08-23 17:38:23.000','2019-08-23 17:38:23.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('aws_ec2_network_in','Bytes Received on All Network Interfaces (KiB)','The number of bytes received on all network interfaces by the instance.','sum by (xm_clst_id, instance_id, instance) (aws_ec2_network_in_average{{filter}}) / 1024','Network','AWS/EC2',NULL,true,true,'CLST:{{$labels.xm_clst_id}} Instance:{{$labels.instance_id}} Network Receive Usage:{{humanize $value}}KiB|{threshold}KiB.','2019-08-23 17:38:23.000','2019-08-23 17:38:23.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('namespace_pod_count','Namespace Pod Count','Pod count by namesapce','count (sum (container_last_seen{{filter}}) by (xm_clst_id, xm_namespace, xm_pod_id)) by (xm_clst_id, xm_namespace)','Pod','Namespace',NULL,true,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Pod Counts:{{humanize $value}}|{threshold}.','2019-08-22 16:53:32.000','2019-08-23 01:06:12.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_filesystem_usage','Node Filesystem Usage (%)','NODE Filesystem Usage','(1- (sum by (xm_clst_id, xm_node_id, xm_entity_type) (node_filesystem_avail_bytes{xm_entity_type=''Node'', device!=''rootfs'', {filter} }) / sum by (xm_clst_id, xm_node_id, xm_entity_type) (node_filesystem_size_bytes{xm_entity_type=''Node'', device!=''rootfs'', {filter} }))) * 100','Filesystem','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Filesystem Usage:{{humanize $value}}%|{threshold}%.','2019-05-15 01:02:23.000','2019-05-15 01:02:23.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_memory_available','Node Memory Available (GiB)','Memory information field MemAvailable_bytes / 1073741824','node_memory_MemAvailable_bytes{xm_entity_type=''Node'', {filter}} / 1073741824','Memory','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Avail Memory Size:{{humanize $value}}GiB|{threshold}GiB.','2019-06-04 16:03:00.000','2019-06-04 16:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_memory_total','Node Memory Total (GiB)','Memory information field MemTotal_bytes / 1073741824','node_memory_MemTotal_bytes{xm_entity_type=''Node'', {filter}} / 1073741824','Memory','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Total Memory Size:{{humanize $value}}GiB|{threshold}GiB.','2019-06-04 16:03:00.000','2019-06-04 16:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_network_receive','Node Network Receive (KiB)','Network device statistic receive_bytes / 1024','sum by (xm_clst_id, xm_node_id, xm_entity_type) (rate(node_network_receive_bytes_total{xm_entity_type=''Node'',{filter}}[1m]) ) / 1024','Network','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Network Receive Usage:{{humanize $value}}KiB|{threshold}KiB.','2019-05-20 09:07:46.000','2019-05-31 17:45:22.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_network_transmit','Node Network Transmit (KiB)','Network device statistic transmit_bytes / 1024','sum by (xm_clst_id, xm_node_id, xm_entity_type) (rate(node_network_transmit_bytes_total{xm_entity_type=''Node'',{filter}}[1m]) ) / 1024','Network','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Network Transmit Usage:{{humanize $value}}KiB|{threshold}KiB.','2019-05-20 09:09:05.000','2019-05-31 17:46:06.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_pod_allocated_count','Cluster Pod Allocated Count','Cluster Pod Allocated Count','sum by (xm_clst_id) (imxc_kubernetes_node_resource_allocatable_pods{{filter}})','Pod','Cluster',NULL,true,true,'CLST:{{$labels.xm_clst_id}} Allocated Pod Counts:{{humanize $value}}|{threshold}.','2019-08-23 17:36:00.000','2019-11-28 08:25:07.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_pod_desired_count','Cluster Pod Desired Count','Cluster pod desired count by controller','sum by (xm_clst_id) (imxc_kubernetes_controller_replicas{{filter}})','Pod','Cluster',NULL,true,true,'CLST:{{$labels.xm_clst_id}} Desired Pod Counts:{{humanize $value}}|{threshold}.','2019-08-23 02:26:55.000','2019-11-28 08:25:07.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_commands_total','Number of Commands Executed','The number of times each XXX command has been executed','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, command) (rate(mysql_global_status_commands_total[1m]) > 0)','Request','MySQL','command',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Number of Commands Executed Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-12 08:20:06.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_threads_running','Number of Threads Running','The number of threads that are not sleeping','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (mysql_global_status_threads_running)','Thread','MySQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Number of Threads Running Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_count_by_dbname_state','Count by dbname and state in pg','count by dbname and state in pg','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, state) (pg_stat_activity_count)','Connection','PostgreSQL','state',true,false,'count by dbname and state in pg','2020-01-30 06:10:54.000','2020-01-31 11:33:41.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('namespace_alerts_received_count','Namespace alerts received count','Alert count by namespace','sum by (xm_clst_id, xm_namespace, level) (floor(increase(imxc_alerts_received_count_total{status=''firing'', {filter}}[10m])))','Alert','Namespace','level',false,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Alert Count:{{humanize $value}}|{threshold}.','2019-08-23 04:43:29.000','2019-08-23 04:43:29.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_disk_reads_count_device','Node Disk Reads Count per Device (IOPS)','Node Disk Reads Count per Device','sum by (xm_clst_id, xm_node_id, xm_entity_type, device, mountpoint) (rate(node_disk_reads_completed_total{xm_entity_type=''Node'', {filter}}[1m]) )','Disk','Node','device',true,false,'NODE:{{$labels.xm_node_id}} FS:{{$labels.mountpoint}} Disk Reads Count:{{humanize $value}}IOPS|{threshold}IOPS.','2019-08-23 11:26:07.000','2019-08-23 11:26:07.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_disk_read_latency','Node Disk Read Latency (ms)','Node Disk Read Latency','sum by (xm_clst_id,xm_node_id, xm_entity_type) (rate(node_disk_read_time_seconds_total{xm_entity_type=''Node'',{filter}}[1m])) * 1000','Disk','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Disk Read Latency:{{humanize $value}}ms|{threshold}ms.','2019-05-20 10:59:07.000','2019-05-31 17:46:54.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_disk_write_latency_device','Node Disk Write Latency per Device (ms)','Node Disk Write Latency per Device','sum by (xm_clst_id, xm_node_id, xm_entity_type, device, mountpoint) (rate(node_disk_write_time_seconds_total{xm_entity_type=''Node'',{filter}}[1m])) * 1000','Disk','Node','device',true,false,'NODE:{{$labels.xm_node_id}} FS:{{$labels.mountpoint}} Disk Write Latency:{{humanize $value}}ms|{threshold}ms.','2019-08-23 11:26:07.000','2019-08-23 11:26:07.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_disk_write_bytes','Node Disk Write Bytes (KiB)','The total number of bytes written successfully / 1024','sum by (xm_clst_id, xm_node_id, xm_entity_type) (rate(node_disk_written_bytes_total{xm_entity_type=''Node'', {filter}}[1m]) ) / 1024','Disk','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Disk Write Size:{{humanize $value}}KiB|{threshold}KiB.','2019-06-04 18:11:00.000','2019-06-04 18:11:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_filesystem_avail_size_device','Node Filesystem Available Size per Device (GiB)','Filesystem space available to non-root users in bytes / 1073741824','sum by (xm_clst_id, xm_node_id, xm_entity_type, device, fs_type, mountpoint) (node_filesystem_avail_bytes{xm_entity_type=''Node'', device!=''rootfs'', {filter} }) / 1073741824','Filesystem','Node','device,fs_type',true,false,'NODE:{{$labels.xm_node_id}} FS:{{$labels.mountpoint}} Avail Size:{{humanize $value}}GiB|{threshold}GiB.','2019-08-23 11:26:07.000','2019-08-23 11:26:07.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_filesystem_free_size_device','Node Filesystem Free Size per Device (GiB)','Filesystem free space in bytes / 1073741824','sum by (xm_clst_id, xm_node_id, xm_entity_type, device, fs_type, mountpoint) (node_filesystem_free_bytes{xm_entity_type=''Node'', device!=''rootfs'', {filter} }) / 1073741824','Filesystem','Node','device,fs_type',true,false,'NODE:{{$labels.xm_node_id}} FS:{{$labels.mountpoint}} Free Size:{{humanize $value}}GiB|{threshold}GiB.','2019-08-23 11:26:07.000','2019-08-23 11:26:07.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_filesystem_total_size_device','Node Filesystem Total Size per Device (GiB)','Filesystem size in bytes / 1073741824','sum by (xm_clst_id, xm_node_id, xm_entity_type, device, fs_type, mountpoint) (node_filesystem_size_bytes{xm_entity_type=''Node'', device!=''rootfs'', {filter} }) / 1073741824','Filesystem','Node','device,fs_type',true,false,'NODE:{{$labels.xm_node_id}} FS:{{$labels.mountpoint}} Total Size:{{humanize $value}}GiB|{threshold}GiB.','2019-08-23 11:26:07.000','2019-08-23 11:26:07.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_swap_memory_free','Node Swap Memory Free (GiB)','Memory information field SwapFree_bytes / 1073741824','node_memory_SwapFree_bytes{xm_entity_type=''Node'', {filter}} / 1073741824','Memory','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Free Swap Memory Size:{{humanize $value}}GiB|{threshold}GiB.','2019-06-04 16:03:00.000','2019-06-04 16:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_swap_memory_total','Node Swap Memory Total (GiB)','Memory information field SwapTotal_bytes / 1073741824','node_memory_SwapTotal_bytes{xm_entity_type=''Node'', {filter}} / 1073741824','Memory','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Total Swap Memory Size:{{humanize $value}}GiB|{threshold}GiB.','2019-06-04 16:03:00.000','2019-06-04 16:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_up','PostgreSQL Up Count','Whether the last scrape of metrics from PostgreSQL was able to connect to the server','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (pg_up)','Instance','PostgreSQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} PostgreSQL Instance Count:{{humanize $value}}|{threshold}.','2019-08-27 15:49:21.000','2019-08-27 15:49:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_innodb_buffer_pool_write_requests','Number of Writes to Buffer Pool','The number of writes done to the InnoDB buffer pool','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(mysql_global_status_innodb_buffer_pool_write_requests[1m]))','Block','MySQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Number of Writes to Buffer Pool Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_innodb_buffer_pool_read_requests','Number of Logical Read Requests','The number of logical read requests','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(mysql_global_status_innodb_buffer_pool_read_requests[1m]))','Block','MySQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Number of Logical Read Requests Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_innodb_data_read','Amount of Data Read','The amount of data read since the server was started (in bytes)','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(mysql_global_status_innodb_data_read[1m]))','Disk','MySQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Amount of Data Read Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_innodb_os_log_written','Number of Bytes Written to Redo Log','The number of bytes written to the InnoDB redo log files','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(mysql_global_status_innodb_os_log_written[1m]))','Disk','MySQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Number of Bytes Written to Redo Log Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_innodb_data_written','Amount of Data Written','The amount of data written so far, in bytes','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(mysql_global_status_innodb_data_written[1m]))','Disk','MySQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Amount of Data Written Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_memory_sum_by_pod','Container Memory Request/Limits vs Used by Pod','container_memory_sum_by_pod','sum by(xm_clst_id, xm_namespace, xm_node_id, xm_pod_id, xm_cont_name, data_type) ( +label_replace(imxc_kubernetes_container_resource_limit_memory{{filter}}, "data_type", "limit", "" , "") or +label_replace(imxc_kubernetes_container_resource_request_memory{{filter}}, "data_type", "request", "" , "") or +label_replace(container_memory_usage_bytes{xm_entity_type=''Container'',{filter}}, "data_type", "used", "" , ""))','Memory','Pod',NULL,true,false,'Container memory sum by pod (limit, request, used)','2020-07-22 21:44:33.000','2020-07-22 21:44:33.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_cache_hit_ratio','Buffer Cache Hit Ratio','Buffer Cache Hit Ratio','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) ( +(1 - increase(mysql_global_status_innodb_buffer_pool_reads [1h]) / increase(mysql_global_status_innodb_buffer_pool_read_requests [1h])) * 100)','Block','MySQL',NULL,true,false,'.','2019-12-05 07:47:50.000','2019-12-13 01:17:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_cpu_sum_by_cluster','Container CPU Request/Limits vs Used by Cluster','Container cpu sum by cluster (capacity, limit, request, usage)','sum by(xm_clst_id, data_type) ( +label_replace(imxc_kubernetes_node_resource_capacity_cpu{{filter}} *0.001, "data_type", "capacity" , "", "") or +label_replace(sum by (xm_clst_id) (imxc_kubernetes_container_resource_limit_cpu{{filter}})*0.001, "data_type", "limit", "" , "") or +label_replace(sum by (xm_clst_id) (imxc_kubernetes_container_resource_request_cpu{{filter}})*0.001, "data_type", "request", "" , "") or +label_replace(sum by(xm_clst_id)(rate(container_cpu_usage_seconds_total{{filter}}[1m])), "data_type", "used", "" , ""))','CPU','Cluster',NULL,true,false,'Container cpu sum by cluster','2020-07-22 17:49:53.000','2020-07-22 17:49:53.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_filesystem_total_size','Node Filesystem Total Size (GiB)','Filesystem size in bytes / 1073741824','sum by (xm_clst_id, xm_node_id, xm_entity_type) (node_filesystem_size_bytes{xm_entity_type=''Node'', device!=''rootfs'', {filter} }) / 1073741824','Filesystem','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Filesystem Total Size:{{humanize $value}}GiB|{threshold}GiB.','2019-06-04 19:47:00.000','2019-06-04 19:47:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_filesystem_free_size','Node Filesystem Free Size (GiB)','Filesystem free space in bytes / 1073741824','sum by (xm_clst_id, xm_node_id, xm_entity_type) (node_filesystem_free_bytes{xm_entity_type=''Node'', device!=''rootfs'', {filter} }) / 1073741824','Filesystem','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Filesystem Free Size:{{humanize $value}}GiB|{threshold}GiB.','2019-06-04 19:47:00.000','2019-06-04 19:47:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_cpu_sum_by_pod','Container CPU Request/Limits vs Used by Pod','Container cpu sum by pod (capacity, limit, request, usage)','sum by(xm_clst_id, xm_namespace, xm_node_id, xm_pod_id, xm_cont_name, data_type)( +label_replace (rate(container_cpu_usage_seconds_total{xm_entity_type=''Container'',{filter}}[1m]), "data_type", "used", "", "") or +label_replace (imxc_kubernetes_container_resource_limit_cpu{{filter}}*0.001, "data_type", "limit", "", "") or +label_replace (imxc_kubernetes_container_resource_request_cpu{{filter}}*0.001, "data_type", "request", "", "") +)','CPU','Pod',NULL,true,false,'Container cpu sum by Pod','2020-07-22 21:37:45.000','2020-07-22 21:37:45.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_count_by_lockmode','Count_by_lockmode','Count by lockmode','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, mode) (pg_locks_count)','Lock','PostgreSQL','mode',true,false,'Count by lockmode','2020-01-30 07:06:13.000','2020-01-30 07:06:47.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_innodb_row_lock_current_waits','Number of Row Locks ','The number of row locks currently being waited for by operations on InnoDB tables','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (mysql_global_status_innodb_row_lock_current_waits)','Lock','MySQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Number of Row Locks Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_memory_capacity','cluster_memory_capacity (Gib)','imxc_kubernetes_node_resource_capacity_memory','sum by (xm_clst_id) (imxc_kubernetes_node_resource_capacity_memory{{filter}})','Memory','Cluster',NULL,false,false,'CLST:{{$labels.xm_clst_id}} Memory Capacity:{{humanize $value}}GiB|{threshold}GiB.','2019-08-23 08:46:58.000','2020-05-27 09:05:56.427'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_fs_free','Host system Filesystem free','Host File system free','sum by (instance) (node_filesystem_free_bytes{{filter}})','Filesystem','Host',NULL,true,false,'Host:{{$labels.instance}} Filesystem Free Size:{{humanize $value}}KiB|{threshold}KiB.','2020-03-23 04:08:29.025','2020-03-23 04:08:29.025'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_fs_total','Host system Filesystem total','Host File system total','sum by (instance) (node_filesystem_size_bytes{{filter}})','Filesystem','Host',NULL,true,false,'Host:{{$labels.instance}} Filesystem Total Size:{{humanize $value}}KiB|{threshold}KiB.','2020-03-23 04:08:27.634','2020-03-23 04:08:27.634'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_swap_used','Host Swap Memory Used','Host Swap Used','node_memory_SwapTotal_bytes{{filter}} - node_memory_SwapFree_bytes{{filter}}','Memory','Host',NULL,true,false,'Host:{{$labels.instance}} Used Swap Memory Size:{{humanize $value}}KiB|{threshold}KiB.','2020-03-23 04:08:26.169','2020-03-23 04:08:26.169'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_disk_read_bytes_device','Node Disk Read Bytes per Device (KiB)','The total number of bytes read successfully / 1024','sum by (xm_clst_id, xm_node_id, xm_entity_type, device, mountpoint) (rate(node_disk_read_bytes_total{xm_entity_type=''Node'', {filter}}[1m]) ) / 1024','Disk','Node','device',true,false,'NODE:{{$labels.xm_node_id}} FS:{{$labels.mountpoint}} Disk Read Size:{{humanize $value}}KiB|{threshold}KiB.','2019-08-23 11:26:07.000','2019-08-23 11:26:07.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_disk_read_bytes','Node Disk Read Bytes (KiB)','The total number of bytes read successfully / 1024','sum by (xm_clst_id, xm_node_id, xm_entity_type) (rate(node_disk_read_bytes_total{xm_entity_type=''Node'', {filter}}[1m]) ) / 1024','Disk','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Disk Read Size:{{humanize $value}}KiB|{threshold}KiB.','2019-06-04 18:11:00.000','2019-06-04 18:11:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_stat_database_xact_rollback','Number of Transactions Rolled Back','Number of transactions in this database that have been rolled back','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, datname) (rate(pg_stat_database_xact_rollback[1m]))','Transaction','PostgreSQL','datname',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} PostgreSQL Rollback Counts:{{humanize $value}}|{threshold}.','2019-08-27 15:49:21.000','2019-08-27 15:49:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_stat_database_xact_commit','Number of Transactions Committed','Number of transactions in this database that have been committed','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, datname) (rate(pg_stat_database_xact_commit[1m]))','Transaction','PostgreSQL','datname',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} PostgreSQL Commit Counts:{{humanize $value}}|{threshold}.','2019-08-27 15:49:21.000','2019-08-27 15:49:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_innodb_row_ops_total','Number of Rows Operated','The number of rows operated in InnoDB tables','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, operation) (rate(mysql_global_status_innodb_row_ops_total[1m]))','Row','MySQL','operation',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Number of Rows Operated Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_table_locks_immediate','Number of Table Lock Immediate','The number of times that a request for a table lock could be granted immediately','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(mysql_global_status_table_locks_immediate[1m]))','Lock','MySQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Number of Table Lock Immediate Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_keyspace_range_count','Local range scan count (count/s)','Local range scan count for this keyspace','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, keyspace) (rate(cassandra_keyspace_rangelatency_seconds_count[1m]))','Disk','Cassandra','keyspace',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Range Scan Count:{{humanize $value}}|{threshold}.','2019-10-02 10:17:01.000','2019-10-02 10:17:01.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_table_locks_waited','Number of Table Lock Waited','The number of times that a request for a table lock could not be granted immediately and a wait was needed','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(mysql_global_status_table_locks_waited[1m]))','Lock','MySQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Number of Table Lock Waited Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_stat_database_blk_read_time','Time Spent Reading Data File Blocks (ms)','Time spent reading data file blocks by backends in this database, in milliseconds','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, datname) (rate(pg_stat_database_blk_read_time[1m]))','Block','PostgreSQL','datname',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} PostgreSQL Block Read Time:{{humanize $value}}|{threshold}.','2019-08-27 15:49:21.000','2019-08-27 15:49:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_stat_database_blk_write_time','Time Spent Writing Data File Blocks (ms)','Time spent writing data file blocks by backends in this database, in milliseconds','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, datname) (rate(pg_stat_database_blk_write_time[1m]))','Block','PostgreSQL','datname',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} PostgreSQL Block Write Time:{{humanize $value}}|{threshold}.','2019-08-27 15:49:21.000','2019-08-27 15:49:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_stat_database_blks_read','Number of Disk Blocks Read','Number of disk blocks read in this database','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, datname) (rate(pg_stat_database_blks_read[1m]))','Block','PostgreSQL','datname',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} PostgreSQL Block Read Counts:{{humanize $value}}|{threshold}.','2019-08-27 15:49:21.000','2019-08-27 15:49:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_stat_database_blks_hit','Number of Block Cache Hit','Number of times disk blocks were found already in the buffer cache','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, datname) (rate(pg_stat_database_blks_hit[1m]))','Block','PostgreSQL','datname',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} PostgreSQL Block Hit Counts:{{humanize $value}}|{threshold}.','2019-08-27 15:49:21.000','2019-08-27 15:49:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_stat_activity_count','Number of Client Connections','number of connections in this state','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, datname, state) (pg_stat_activity_count{{filter}})','Connection','PostgreSQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} PostgreSQL Connection Counts:{{humanize $value}}|{threshold}.','2019-08-27 15:49:21.000','2019-11-18 04:16:33.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_stat_database_tup_fetched','Number of Rows Fetched','Number of rows fetched by queries in this database','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, datname) (rate(pg_stat_database_tup_fetched[1m]))','Row','PostgreSQL','datname',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} PostgreSQL Fetched Row Counts:{{humanize $value}}|{threshold}.','2019-08-27 15:49:21.000','2019-08-27 15:49:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_stat_database_tup_inserted','Number of Rows Inserted','Number of rows inserted by queries in this database','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, datname) (rate(pg_stat_database_tup_inserted[1m]))','Row','PostgreSQL','datname',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} PostgreSQL Inserted Row Counts:{{humanize $value}}|{threshold}.','2019-08-27 15:49:21.000','2019-08-27 15:49:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_keyspace_range_latency','Local range scan latency (ms)','Local range scan latency seconds for this keyspace','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, keyspace) (cassandra_keyspace_rangelatency_seconds{quantile=''0.99''}) * 1000','Disk','Cassandra','keyspace',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Range Scan Latency:{{humanize $value}}ms|{threshold}ms.','2019-10-02 10:17:01.000','2019-10-02 10:17:01.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_commitlog_size','Size used by commit log segments (KiB/s)','Current size, in bytes, used by all the commit log segments / 1024','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(cassandra_commitlog_totalcommitlogsize[1m]){{filter}}) / 1024','Log','Cassandra',NULL,true,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Commit Log Volume:{{humanize $value}}KiB/s|{threshold}KiB/s.','2019-10-02 10:17:01.000','2019-11-05 08:07:03.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_commitlog_messages','Number of commit log messages written (count/s)','Total number of commit log messages written','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(cassandra_commitlog_completedtasks[1m]))','Log','Cassandra',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Commit Log Message per second:{{humanize $value}}|{threshold}.','2019-10-02 10:17:01.000','2019-10-02 10:17:01.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_clientrequest_count','Number of client requests (count/s)','Number of client requests by request type','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, clientrequest) (rate(cassandra_clientrequest_latency_seconds_count{{filter}}[1m]))','Request','Cassandra','clientrequest',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Client Request per second:{{humanize $value}}|{threshold}.','2019-10-02 10:17:01.000','2019-11-05 11:04:25.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_memory_active','Node Memory Active (GiB)','Memory information field Active_bytes in GiB','node_memory_Active_bytes{xm_entity_type=''Node'', {filter}} / 1024 / 1024 / 1024','Memory','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Active Memory:{{humanize $value}}GiB|{threshold}GiB.','2020-06-04 11:11:11.000','2020-06-04 11:11:11.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_stat_database_tup_returned','Number of Rows Returned','Number of rows returned by queries in this database','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, datname) (rate(pg_stat_database_tup_returned[1m]))','Row','PostgreSQL','datname',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} PostgreSQL Returned Row Counts:{{humanize $value}}|{threshold}.','2019-08-27 15:49:21.000','2019-08-27 15:49:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_keyspace_write_count','Local write count (count/s)','Local write count for this keyspace','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, keyspace) (rate(cassandra_keyspace_writelatency_seconds_count[1m]))','Disk','Cassandra','keyspace',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Write Count:{{humanize $value}}|{threshold}.','2019-10-02 10:17:01.000','2019-10-02 10:17:01.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_memory_sum_by_cluster','Container Memory Request/Limits vs Used by Cluster','Container memory sum by cluster','sum by (xm_clst_id, data_type)( +label_replace(imxc_kubernetes_node_resource_capacity_memory{{filter}}, "data_type", "capacity", "" , "") or +label_replace(imxc_kubernetes_container_resource_limit_memory{{filter}}, "data_type", "limit", "", "") or +label_replace(imxc_kubernetes_container_resource_request_memory{{filter}}, "data_type", "request", "", "") or +label_replace(container_memory_usage_bytes{xm_entity_type=''Container'',{filter}}, "data_type", "used", "" , ""))','Memory','Cluster',NULL,true,false,'Container memory sum by cluster','2020-07-22 21:23:15.000','2020-07-22 21:23:15.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_memory_capacity','node_memory_capacity (Gib)','node memory capacity in GiB','imxc_kubernetes_node_resource_capacity_memory{{filter}} / 1024 / 1024 / 1024','Memory','Node',NULL,false,false,'None','2019-08-23 08:46:58.000','2019-08-23 08:46:58.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_cpu_request_cores','cluster_cpu_request_cores','cluster_cpu_request_cores','sum by (xm_clst_id) (imxc_kubernetes_container_resource_request_cpu{{filter}})','CPU','Cluster',NULL,false,false,'None','2019-08-23 08:40:36.000','2019-08-23 08:40:36.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_cpu_request_cores','node_cpu_request_cores','node_cpu_request_cores','sum by (xm_clst_id, xm_node_id) (imxc_kubernetes_container_resource_request_cpu{{filter}})','CPU','Node',NULL,false,false,'None','2019-08-23 08:40:36.000','2019-08-23 08:40:36.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_cpu_limit_cores','cluster_cpu_limit_cores','cluster_cpu_limit_cores','sum by (xm_clst_id) (imxc_kubernetes_container_resource_limit_cpu{{filter}})','CPU','Cluster',NULL,false,false,'None','2019-08-23 08:40:36.000','2019-08-23 08:40:36.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_cpu_limit_cores','node_cpu_limit_cores','node_cpu_limit_cores','sum by (xm_clst_id, xm_node_id) (imxc_kubernetes_container_resource_limit_cpu{{filter}})','CPU','Node',NULL,false,false,'None','2019-08-23 08:40:36.000','2019-08-23 08:40:36.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_clientrequest_unavailables_count','Number of unavailable exceptions encountered','Number of unavailable exceptions encountered','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, clientrequest) (rate(cassandra_clientrequest_unavailables_count[1m]))','Request','Cassandra','clientrequest',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Unavailable Request:{{humanize $value}}|{threshold}.','2019-10-02 10:17:01.000','2019-10-02 10:17:01.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_up','Cassandra Up Count','Whether the last scrape of metrics from Cassandra was able to connect to the server','count by (xm_clst_id, xm_namespace, xm_node_id, instance) (cassandra_bufferpool_size{{filter}})','Instance','Cassandra',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Instances:{{humanize $value}}|{threshold}.','2019-10-02 10:17:01.000','2019-11-05 17:01:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mongodb_up','MongoDB Up Count','The number of seconds that the current MongoDB process has been active','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(mongodb_instance_uptime_seconds[1m]))','Instance','MongoDB',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MongoDB Up Count Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mongodb_global_lock_current_queue','Number of Operations Waiting','The number of operations that are currently queued and waiting for the read or write lock','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, type) (mongodb_global_lock_current_queue)','Lock','MongoDB','type',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MongoDB Number of Operations Waiting Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mongodb_global_lock_client','Number of Active Client','The number of the active client connections performing read or write operations','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, type) (mongodb_global_lock_client)','Lock','MongoDB','type',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MongoDB Number of Active Client Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mongodb_metrics_document_total','Number of Documents Processed','The total number of documents processed','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, state) (rate(mongodb_metrics_document_total[1m]))','Row','MongoDB','state',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MongoDB Number of Documents Processed Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_keyspace_totaldiskspaceused','Total disk space used (GiB)','Total disk space used belonging to this keyspace / 1073741824','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, keyspace) (cassandra_keyspace_totaldiskspaceused {{filter}}) / 1073741824','Disk','Cassandra','keyspace',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Disk Space:{{humanize $value}}GiB|{threshold}GiB.','2019-10-02 10:17:01.000','2019-11-07 01:14:39.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_keyspace_read_latency','Local read latency (ms)','Local read latency seconds for this keyspace','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, keyspace) (cassandra_keyspace_readlatency_seconds{quantile=''0.99''}) * 1000','Disk','Cassandra','keyspace',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Read Latency:{{humanize $value}}ms|{threshold}ms.','2019-10-02 10:17:01.000','2019-10-02 10:17:01.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_threadpools_totalblockedtasks','Number of tasks that were blocked (count/s)','Number of tasks that were blocked due to queue saturation in a second','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, path) (rate(cassandra_threadpools_totalblockedtasks_count[1m]))','Task','Cassandra','path',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Blocked Task per second:{{humanize $value}}|{threshold}.','2019-10-01 16:45:21.000','2019-10-01 16:45:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_threadpools_completedtasks','Number of tasks completed (count/s)','Number of tasks completed in a second','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, path) (rate(cassandra_threadpools_completedtasks{{filter}}[1m]))','Task','Cassandra','path',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Pending Task per second:{{humanize $value}}|{threshold}.','2019-10-01 16:45:21.000','2019-11-05 08:08:57.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mongodb_memory','Amount of Memory, in MebiByte','The amount of memory, in mebibyte (MiB), currently used by the database process','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, type) (mongodb_memory)','Memory','MongoDB','type',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MongoDB Amount of Memory:{{humanize $value}}MiB|{threshold}MiB.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_resource_utilization','Resource Usage','Gauge metric with resource utilization','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, resource_name) (oracledb_resource_current_utilization)','Resource','OracleDB','resource_name',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Oracle Resource Usage:{{humanize $value}}%|{threshold}%.','2020-01-28 13:03:00.000','2020-01-28 13:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_clientrequest_timeouts_count','Number of timeouts encountered','Number of timeouts encountered','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, clientrequest) (rate(cassandra_clientrequest_timeouts_count[1m]))','Request','Cassandra','clientrequest',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Timeout Request:{{humanize $value}}|{threshold}.','2019-10-02 10:17:01.000','2019-10-02 10:17:01.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mongodb_network_bytes_total','Amount of Network Traffic','The number of bytes that reflects the amount of network traffic','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, state) (rate(mongodb_network_bytes_total[1m]))','Network','MongoDB','state',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MongoDB Amount of Network Traffic Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mongodb_op_counters_total','Number of Operations','The total number of operations since the mongod instance last started','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, type) (rate(mongodb_op_counters_total[1m]))','Request','MongoDB','type',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MongoDB Number of Operations Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_innodb_row_lock_waits','Number of Waits for Row Locks','The number of times operations on InnoDB tables had to wait for a row lock','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(mysql_global_status_innodb_row_lock_waits[1m]))','Lock','MySQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Number of Waits for Row Locks Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_activity_execute_count','Execute Count','Generic counter metric from v$sysstat view in Oracle','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(oracledb_activity_execute_count[1m]))','Request','OracleDB',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Oracle Execute Count:{{humanize $value}}|{threshold}.','2020-01-28 13:03:00.000','2020-01-28 13:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_activity_user_commits','User Commits','Generic counter metric from v$sysstat view in Oracle','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(oracledb_activity_user_commits[1m]))','Request','OracleDB',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Oracle User Commit:{{humanize $value}}|{threshold}.','2020-01-28 13:03:00.000','2020-01-28 13:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_activity_parse_count','Parse Count','Generic counter metric from v$sysstat view in Oracle','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(oracledb_activity_parse_count_total[1m]))','Request','OracleDB',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Oracle Parse Count:{{humanize $value}}|{threshold}.','2020-01-28 13:03:00.000','2020-01-28 13:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_activity_user_rollbacks','User Rollbacks','Generic counter metric from v$sysstat view in Oracle','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(oracledb_activity_user_rollbacks[1m]))','Request','OracleDB',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Oracle User Rollback:{{humanize $value}}|{threshold}.','2020-01-28 13:03:00.000','2020-01-28 13:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_fs_writes','Pod Filesystem Write Bytes (KiB)','Cumulative count of bytes written / 1024','sum by (xm_clst_id,xm_pod_id,xm_entity_type,xm_namespace) (rate(container_fs_writes_bytes_total{xm_entity_type=''Container'',{filter}}[1m])) / 1024','Filesystem','Pod',NULL,true,true,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} Filesystem Write Bytes:{{humanize $value}}KiB|{threshold}KiB.','2019-05-20 05:58:07.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_memory_usage','Pod Memory Usage (%)','Pod Memory Usage Compared to Limit','sum by (xm_clst_id,xm_pod_id,xm_entity_type,xm_namespace) ( +container_memory_usage_bytes{xm_entity_type=''Container'',{filter}} / ((container_spec_memory_limit_bytes{xm_entity_type=''Container'',{filter}} * 100) > 0) or +container_memory_usage_bytes{xm_entity_type=''Container'',{filter}} / 1024)','Memory','Pod',NULL,true,true,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} Used Utillization:{{humanize $value}}%|{threshold}%.','2019-06-05 14:27:36.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_memory_usage_bytes','Pod Memory Used (GiB)','Current memory usage in bytes / 1073741824','sum by (xm_clst_id,xm_pod_id,xm_entity_type,xm_namespace) (container_memory_usage_bytes{xm_entity_type=''Container'',{filter}}) / 1073741824','Memory','Pod',NULL,true,true,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} Used Memory:{{humanize $value}}GiB|{threshold}GiB.','2019-06-05 14:27:36.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_stat_database_cache_hit_ratio','Buffer Cache Hit Ratio (%)','Number of Block Cache Hit / (Number of Block Cache Hit & Blocks Reads) * 100','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, datname) (increase(pg_stat_database_blks_hit[1h]) / (increase(pg_stat_database_blks_read[1h]) + increase(pg_stat_database_blks_hit[1h])) * 100)','Block','PostgreSQL','datname',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} PostgreSQL Cache Hit Ratio:{{humanize $value}}%|{threshold}%.','2019-08-27 15:49:21.000','2019-12-13 01:33:39.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_wait_time_other','Wait-Time - Other','Generic counter metric from v$waitclassmetric view in Oracle','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(oracledb_wait_time_other[1m]))','Wait','OracleDB',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Oracle Wait-Time - Other:{{humanize $value}}|{threshold}.','2020-01-28 13:03:00.000','2020-01-28 13:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_wait_time_configuration','Wait-Time - Configuration','Generic counter metric from v$waitclassmetric view in Oracle','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(oracledb_wait_time_configuration[1m]))','Wait','OracleDB',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Oracle Wait-Time - Configuration{{humanize $value}}|{threshold}','2020-01-28 13:03:00.000','2020-01-28 13:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_wait_time_commit','Wait-Time - Commit','Generic counter metric from v$waitclassmetric view in Oracle','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(oracledb_wait_time_commit[1m]))','Wait','OracleDB',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Oracle Wait-Time - Commit:{{humanize $value}}|{threshold}.','2020-01-28 13:03:00.000','2020-01-28 13:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_wait_time_scheduler','Wait-Time - Scheduler','Generic counter metric from v$waitclassmetric view in Oracle','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(oracledb_wait_time_scheduler[1m]))','Wait','OracleDB',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Oracle Wait-Time - Scheduler:{{humanize $value}}|{threshold}.','2020-01-28 13:03:00.000','2020-01-28 13:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_wait_time_system_io','Wait-Time - System I/O','Generic counter metric from v$waitclassmetric view in Oracle','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(oracledb_wait_time_system_io[1m]))','Wait','OracleDB',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Oracle Wait-Time - System I/O:{{humanize $value}}|{threshold}.','2020-01-28 13:03:00.000','2020-01-28 13:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_wait_time_user_io','Wait-Time - User I/O','Generic counter metric from v$waitclassmetric view in Oracle','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(oracledb_wait_time_user_io[1m]))','Wait','OracleDB',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Oracle Wait-Time - User I/O:{{humanize $value}}|{threshold}.','2020-01-28 13:03:00.000','2020-01-28 13:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_wait_time_network','Wait-Time - Network','Generic counter metric from v$waitclassmetric view in Oracle','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(oracledb_wait_time_network[1m]))','Wait','OracleDB',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Oracle Wait-Time - Network:{{humanize $value}}|{threshold}.','2020-01-28 13:03:00.000','2020-01-28 13:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_blocked_clients','Blocked Clients','Number of clients pending on a blocking call (BLPOP, BRPOP, BRPOPLPUSH)','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (redis_blocked_clients)','Connection','Redis',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis Blocked Clients:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_connected_clients','Connected Clients','Number of client connections (excluding connections from replicas)','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (redis_connected_clients)','Connection','Redis',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis Connected Clients:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_connections_received','Received Connections','Total number of connections accepted by the server','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(redis_connections_received_total[1m]))','Connection','Redis',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis Received Connections:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_rejected_connections','Rejected Connections','Number of connections rejected because of maxclients limit','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(redis_rejected_connections_total[1m]))','Connection','Redis',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis Rejected Connections:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_up','Redis Up Count','Whether the Redis server is up','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (redis_up)','Instance','Redis',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis Up Count:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_commands_total','Call Count / Command','Total number of calls per command','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, cmd) (rate(redis_commands_total[1m]))','Request','Redis','cmd',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis Call Count:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_commands_processed','Processed Commands','Total number of commands processed by the server','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(redis_commands_processed_total[1m]))','Request','Redis',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace:}} Redis Processed Commands:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_key_hit_raito','Redis key hit raito','redis key hit raito','sum by (data_type, xm_clst_id, xm_namespace, xm_node_id, instance) ( +label_replace(rate(redis_keyspace_hits_total [1m]), "data_type", "hits", "" , "") or +label_replace(rate(redis_keyspace_misses_total [1m]), "data_type", "misses", "" , "") )','Keyspace','Redis','data_type',true,false,'redis key hit raito','2020-01-29 02:28:03.000','2020-02-13 00:46:27.568'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_net_byte_total','Network byte','Network byte','sum by (data_type, xm_clst_id, xm_namespace, xm_node_id, instance) ( +label_replace(rate(redis_net_input_bytes_total [1m]), "data_type", "input", "", "") or +label_replace(rate(redis_net_output_bytes_total [1m]), "data_type", "output", "", ""))','Network','PostgreSQL','data_type',true,false,'Network byte','2020-01-30 07:22:12.000','2020-02-13 01:04:18.528'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_memory_cache','Pod Memory Cache (GiB)','Number of bytes of page cache memory / 1073741824','sum by (xm_clst_id,xm_pod_id,xm_entity_type,xm_namespace) (container_memory_cache{xm_entity_type=''Container'',{filter}}) / 1073741824','Memory','Pod',NULL,true,true,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} Cache Memory:{{humanize $value}}GiB|{threshold}GiB.','2019-06-05 14:27:36.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_memory_swap','Pod Memory Swap (GiB)','Pod swap usage in bytes / 1073741824','sum by (xm_clst_id,xm_pod_id,xm_entity_type,xm_namespace) (container_memory_swap{xm_entity_type=''Container'',{filter}}) / 1073741824','Memory','Pod',NULL,true,true,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} Swap Memory:{{humanize $value}}GiB|{threshold}GiB.','2019-06-05 14:27:36.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_wait_time_total','Oracledb wait time total','oracledb wait time total','sum by (data_type, xm_clst_id, xm_namespace, xm_node_id, instance) ( +label_replace(rate(oracledb_wait_time_scheduler[1m]), "data_type", "scheduler", "", "") or +label_replace(rate(oracledb_wait_time_commit[1m]), "data_type", "commit", "", "") or +label_replace(rate(oracledb_wait_time_network[1m]), "data_type", "network", "", "") or +label_replace(rate(oracledb_wait_time_concurrency[1m]), "data_type", "concurrency", "", "") or +label_replace(rate(oracledb_wait_time_Configuration[1m]), "data_type", "configuration", "", "") or +label_replace(rate(oracledb_wait_time_user_io[1m]), "data_type", "user_io", "", "") or +label_replace(rate(oracledb_wait_time_system_io[1m]), "data_type", "system_io", "", "") or +label_replace(rate(oracledb_wait_time_other[1m]), "data_type", "other", "", ""))','Wait','OracleDB','data_type',true,false,'oracledb wait time total','2020-01-29 11:03:20.000','2020-02-13 01:08:01.629'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_activity_count','Oracledb activity count','oracledb activity count','sum by (data_type, xm_clst_id, xm_namespace, xm_node_id, instance) ( +label_replace(rate(oracledb_activity_execute_count [1m]), "data_type", "excutecount", "", "") or +label_replace(rate(oracledb_activity_parse_count_total[1m]), "data_type", "parse_count", "", "") )','Request','OracleDB','data_type',true,false,'oracledb activity count','2020-01-29 10:40:58.000','2020-02-13 01:12:05.436'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_transaction','Oracledb transaction','oracledb transaction','sum by (data_type, xm_clst_id, xm_namespace, xm_node_id, instance) ( +label_replace(rate(oracledb_activity_user_rollbacks[1m]), "data_type", "rollbacks", "", "") or +label_replace(rate(oracledb_activity_user_commits[1m]), "data_type", "commits", "", ""))','Request','OracleDB','data_type',true,false,'oracledb transaction','2020-01-29 11:20:47.000','2020-02-13 01:26:28.558'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_cpu_usage','Redis cpu usage','redis cpu usage','sum by (data_type, xm_clst_id, xm_namespace, xm_node_id, instance) ( +label_replace(rate(redis_used_cpu_sys [1m]), "data_type", "system", "", "") or +label_replace(rate(redis_used_cpu_user [1m]), "data_type", "user", "", "") )','CPU','Redis','data_type',true,false,'redis cpu usage','2020-01-29 01:56:58.000','2020-02-12 04:47:21.228'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_total_load','host total load','host total load','sum by (instance, data_type) ( +label_replace(node_load1 {{filter}}, "data_type", "load 1", "", "") or +label_replace(node_load5 {{filter}}, "data_type", "load 5", "", "") or +label_replace(node_load15 {{filter}}, "data_type", "load15", "", "") )','CPU','Host',NULL,false,false,'host total load','2020-04-01 08:10:26.588','2020-04-03 01:23:47.665'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_used_cpu_sys_children','System CPU Used Background','System CPU consumed by the background processes','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(redis_used_cpu_sys_children[1m]))','CPU','Redis',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis System CPU Used Backedground:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_keyspace_hits','Keyspace Hits','Number of successful lookup of keys in the main dictionary','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(redis_keyspace_hits_total[1m]))','Keyspace','Redis',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis Keyspace Hits:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_keyspace_misses','Keyspace Misses','Number of failed lookup of keys in the main dictionary','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(redis_keyspace_misses_total[1m]))','Keyspace','Redis',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis Keyspace Misses:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_db_keys','DB Keys Count','Total number of keys by DB','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, db) (redis_db_keys)','Keyspace','Redis','db',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis DB Keys Count:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_expired_keys','Expired Keys','Total number of key expiration events','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(redis_expired_keys_total[1m]))','Keyspace','Redis',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis Expired Keys:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_evicted_keys','Evicted Keys','Number of evicted keys due to maxmemory limit','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(redis_evicted_keys_total[1m]))','Keyspace','Redis',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis Evicted Keys:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_db_keys_expiring','DB Keys Count Expiring','Total number of expiring keys by DB','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, db) (redis_db_keys_expiring)','Keyspace','Redis','db',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis DB Keys Count Expiring:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_commands_duration_seconds','Duration Seconds / Command','Total duration seconds per command','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, cmd) (rate(redis_commands_duration_seconds_total[1m]) * 1000)','Request','Redis','cmd',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis Duration Seconds:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-01-29 01:42:36.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_memory_total','Redis memory total','redis memory total','sum by (data_type, xm_clst_id, xm_namespace, xm_node_id, instance) ( +label_replace(redis_allocator_active_bytes / 1048576, "data_type", "active", "" , "") or +label_replace(redis_memory_used_bytes / 1048576, "data_type", "used", "" , "") or +label_replace(redis_allocator_allocated_bytes / 1048576, "data_type", "allocated", "" , "") or +label_replace(redis_allocator_resident_bytes / 1048576, "data_type", "resident", "" , "") )','Memory','Redis','data_type',true,false,'redis memory total','2020-01-29 02:08:28.000','2020-02-13 00:45:28.475'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('count_by_connection_type','Count by connection type','count by connection type','sum by (data_type, xm_clst_id, xm_namespace, xm_node_id, instance) ( +label_replace(rate(redis_connections_received_total [1m]), "data_type", "received connections", "", "") or +label_replace(rate(redis_rejected_connections_total [1m]), "data_type", "rejected connections", "", "") or +label_replace(redis_connected_clients, "data_type", "connected clients", "", "") or +label_replace(redis_blocked_clients, "data_type", "blocked clients", "", "") )','Connection','Redis','data_type',true,false,'count by connection type','2020-01-29 00:49:09.000','2020-02-13 01:04:18.528'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_stat_database_tup_count','Number of row by stat','Number of row by stat','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, data_type) +(label_replace(rate(pg_stat_database_tup_deleted[1m]), "data_type", "deleted", "", "") or +label_replace(rate(pg_stat_database_tup_updated[1m]), "data_type", "updated", "", "") or +label_replace(rate(pg_stat_database_tup_inserted[1m]), "data_type", "inserted", "", "") or +label_replace(rate(pg_stat_database_tup_returned[1m]), "data_type", "returned", "", "") or +label_replace(rate(pg_stat_database_tup_fetched[1m]), "data_type", "fetched", "", "") )','Row','PostgreSQL','data_type',true,true,'Number of row by stat','2019-10-28 07:29:26.000','2020-02-13 01:04:18.528'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_stat_database_blk_read_write_time','Read/Write spent time by file blocks','Read/Write spent time by file blocks','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, data_type) +(label_replace(rate(pg_stat_database_blk_read_time [1m]), "data_type", "read", "", "") or +label_replace(rate(pg_stat_database_blk_write_time [1m]), "data_type", "write", "", ""))','Block','PostgreSQL','data_type',true,false,'Read/Write spent time by file blocks','2019-10-28 10:56:48.000','2020-02-13 01:06:46.680'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_allocator_resident_size','Resident Memory (MiB)','The total amount of resident memory that the Redis allocator has','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (redis_allocator_resident_bytes) / 1048576','Memory','Redis',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis Resident Memory:{{humanize $value}}MiB|{threshold}MiB.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_memory_used_size','Used Memory (MiB)','Total number of bytes allocated by Redis using its allocator','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (redis_memory_used_bytes) / 1048576','Memory','Redis',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis Used Memory:{{humanize $value}}MiB|{threshold}MiB.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_clientrequest_anormal_count','Number of anormal request','Number of anormal request ','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, anormal_type) +(label_replace(rate(cassandra_clientrequest_unavailables_count[1m]), "anormal_type", "unavailables", "", "") or +label_replace(rate(cassandra_clientrequest_timeouts_count[1m]), "anormal_type", "timeouts", "", "") or +label_replace(rate(cassandra_clientrequest_failures_count[1m]), "anormal_type", "failures", "", ""))','Request','Cassandra','anomal_type',true,false,'Number of anormal request ','2019-10-28 02:09:45.000','2020-02-13 01:16:24.862'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_commitlog','Commitlog count and size','Commitlog count and size','sum by (data_type, xm_clst_id, xm_namespace, xm_node_id, instance) +(label_replace(rate(cassandra_commitlog_completedtasks {{filter}}[1m]), "data_type", "log_count", "", "") or +label_replace(rate(cassandra_commitlog_totalcommitlogsize {{filter}}[1m]) / 1048576, "data_type", "log_size", "", ""))','Log','Cassandra','data_type',true,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Cache Hit Rate:{{humanize $value}}|{threshold}.','2019-10-24 10:44:47.000','2020-02-13 01:16:24.864'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_threads_total','Number of Threads','Number of Threads','sum by (data_type, xm_clst_id, xm_namespace, xm_node_id, instance) ( +label_replace(mysql_global_status_threads_running, "data_type", "active", "", "") or +label_replace(mysql_global_status_threads_connected, "data_type", "connected", "", "") or +label_replace(rate(mysql_global_status_connections [1m]), "data_type", "connection attempts[1m]", "", "") )','Thread','MySQL','data_type',true,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Number of Threads Running Counts:{{humanize $value}}|{threshold}.','2019-12-05 06:04:21.000','2020-02-13 01:12:05.436'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_read_write_count','Local read write count','Local read write count','sum by(xm_clst_id, xm_namespace, xm_node_id, instance, type) +(label_replace( rate(cassandra_keyspace_readlatency_seconds_count [1m]), "type", "read", "", "") or +label_replace( rate(cassandra_keyspace_writelatency_seconds_count [1m]), "type", "write", "", ""))','Disk','Cassandra','type',true,true,'Local read write count','2019-10-24 05:18:50.000','2020-02-13 01:23:46.608'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_lock_total','Oracledb lock total','oracledb lock total','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, resource_name) +(oracledb_resource_current_utilization{resource_name =~''.+_locks''})','Resource','OracleDB','resource_name',true,false,'oracledb lock total','2020-01-29 11:17:01.000','2020-02-13 01:34:00.720'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_service_http_requests_per_sec_by_api','Service HTTP Requests Count by API (per Second)','the number of HTTP requests counts per second by API','(sum by (xm_clst_id,xm_service_name,xm_entity_type,xm_namespace,api) (rate(imxc_service_request_milliseconds_count{xm_entity_type="Service",protocol="http",{filter}}[1m])) / on (xm_clst_id, xm_namespace, xm_service_name ) group_left imxc_sampling_param_value) or (sum by (xm_clst_id,xm_service_name,xm_entity_type,xm_namespace,api) (rate(imxc_service_request_milliseconds_count{xm_entity_type="Service",protocol="http",{filter}}[1m])) / on (xm_clst_id) group_left imxc_sampling_default_param_value)','Request','Service',NULL,false,false,'not for alarm','2020-02-18 12:12:12.000','2020-06-03 06:52:05.498'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_cpu_core_count','Host CPU Core Count','Host_cpu_capacity_cores','count without(cpu, mode) (node_cpu_seconds_total{{filter}})','CPU','Host',NULL,true,false,'None','2020-03-23 04:08:05.290','2020-03-23 04:08:05.290'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_load5','Host CPU Load 5m Average','Host CPU 5m load average','node_load5{{filter}}','CPU','Host',NULL,true,false,'Host:{{$labels.instance}} CPU 5m Load Average:{{humanize $value}}%|{threshold}$.','2020-03-23 04:08:11.655','2020-03-23 04:08:11.655'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_phase_count_by_cluster','Pod Phase Count by Cluster','pod phase count by cluster','count by(xm_clst_id, pod_state) (sum by (xm_clst_id, xm_pod_id, pod_state)(rate(imxc_kubernetes_container_resource_limit_cpu{{filter}}[1m])))','Cluster','Pod',NULL,true,false,'CLST:{{$labels.xm_clst_id}} pod phase count:{{humanize $value}}|{threshold}.','2020-08-19 16:45:00.000','2020-08-19 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_network_io_byte','host network io byte','host network io byte','sum by (data_type, instance) ( +label_replace(rate(node_network_receive_bytes_total{{filter}}[1m]) or rate(node_network_receive_bytes_total{{filter}}[5m]), "data_type", "Receive", "", "") or +label_replace(rate(node_network_transmit_bytes_total{{filter}}[1m]) or rate(node_network_transmit_bytes_total{{filter}}[5m]), "data_type", "Transmit", "", "") )','Network','Host',NULL,false,false,'host network io byte','2020-03-24 05:48:31.359','2020-03-24 05:48:31.359'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_contextswitch_and_filedescriptor','host contextswitch and filedescriptor','host contextswitch and filedescriptor','sum by (data_type, instance) ( +label_replace(rate(node_context_switches_total {{filter}}[1m]), "data_type", "Context switch", "", "") or +label_replace(node_filefd_allocated {{filter}}, "data_type", "File descriptor", "", "") )','OS','Host',NULL,false,false,'host contextswitch and filedescriptor','2020-03-24 09:05:51.828','2020-03-24 09:08:06.867'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_swap_usage','Host Swap Memory Usage (%)','Host Swap Memory Usage','node_memory_SwapTotal_bytes{{filter}} - node_memory_SwapFree_bytes{{filter}} / node_memory_SwapTotal_bytes{{filter}} +','Memory','Host',NULL,true,false,'None','2020-03-26 06:39:21.333','2020-03-26 06:39:21.333'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_boot_time','Host Boot time','Host Boot time','node_boot_time_seconds{{filter}}','CPU','Host',NULL,true,false,'None','2020-03-26 08:03:46.189','2020-03-26 08:03:46.189'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_disk_read_latency','Host read Disk latency','Host disk read latency','sum by (instance) (rate(node_disk_reads_completed_total{{filter}}[1m])) == 0 or sum by (instance) (rate(node_disk_read_time_seconds_total{{filter}}[1m])/rate(node_disk_reads_completed_total{{filter}}[1m]) >= 0 )','Disk','Host',NULL,true,false,'Host:{{$labels.instance}} Disk Read Latency:{{humanize $value}}|{threshold}.','2020-03-23 04:08:34.001','2020-03-23 04:08:34.001'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_disk_write_latency','Host write Disk latency','Host disk write latency','sum by (instance) (rate(node_disk_writes_completed_total{{filter}}[1m])) == 0 or sum by (instance) (rate(node_disk_write_time_seconds_total{{filter}}[1m])/rate(node_disk_writes_completed_total{{filter}}[1m]) >= 0 )','Disk','Host',NULL,true,false,'Host:{{$labels.instance}} Disk Write Latency:{{humanize $value}}|{threshold}.','2020-03-23 04:08:35.823','2020-03-23 04:08:35.823'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_memory_usage','Host Memory Usage (%)','Host Memory Usage ','((node_memory_MemTotal_bytes{{filter}} - (node_memory_MemFree_bytes{{filter}} + node_memory_Cached_bytes{{filter}} + node_memory_Buffers_bytes{{filter}} + node_memory_SReclaimable_bytes{{filter}})) >= 0 or (node_memory_MemTotal_bytes{{filter}} - node_memory_MemFree_bytes{{filter}})) / node_memory_MemTotal_bytes{{filter}} * 100','Memory','Host',NULL,true,false,'Host:{{$labels.instance}} Memory Usage:{{humanize $value}}%|{threshold}%.','2020-03-26 06:36:47.931','2020-03-26 06:36:47.931'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_memory_total','Host Memory Total (GiB)','Memory information field MemTotal_bytes','node_memory_MemTotal_bytes{{filter}}','Memory','Host',NULL,true,false,'Host:{{$labels.instance}} Total Memory Size:{{humanize $value}}GiB|{threshold}GiB.','2020-03-23 04:08:16.897','2020-03-23 04:08:16.897'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_bytes_received_sent','Bytes Received & Sent in MySQL','Bytes Received & Sent in MySQL','sum by (data_type, xm_clst_id, xm_namespace, xm_node_id, instance) ( +label_replace(rate(mysql_global_status_bytes_received [1m]), "data_type", "received", "", "") or +label_replace(rate(mysql_global_status_bytes_sent [1m]), "data_type", "sent", "", ""))','Network','MySQL','data_type',true,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Container:{{$labels.xm_cont_name}} Cache Memory:{{humanize $value}}|{threshold}.','2019-12-05 07:58:11.000','2020-02-13 01:12:05.436'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_service_http_requests_time_95th','Service HTTP 95% Elapsed Time (ms)','the maximum time taken to servce the 95% of HTTP requests','histogram_quantile(0.95, sum by (xm_entity_type,xm_clst_id,xm_namespace,xm_service_name,le) (rate(imxc_service_request_milliseconds_bucket{xm_entity_type="Service",protocol="http",{filter}}[1m]))) >=0 or sum by (xm_entity_type,xm_clst_id,xm_namespace,xm_service_name) (rate(imxc_service_request_milliseconds_bucket{xm_entity_type="Service",protocol="http",{filter}}[1m]))','Request','Service',NULL,true,true,'SVC:{{$labels.xm_service_name}} 95th HTTP Requests Time:{{humanize $value}}ms|{threshold}ms.','2020-02-18 12:12:12.000','2020-02-18 12:12:12.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_service_http_requests_time_99th','Service HTTP 99% Elapsed Time (ms)','the maximum time taken to servce the 99% of HTTP requests','histogram_quantile(0.99, sum by (xm_entity_type,xm_clst_id,xm_namespace,xm_service_name,le) (rate(imxc_service_request_milliseconds_bucket{xm_entity_type="Service",protocol="http",{filter}}[1m]))) >=0 or sum by (xm_entity_type,xm_clst_id,xm_namespace,xm_service_name) (rate(imxc_service_request_milliseconds_bucket{xm_entity_type="Service",protocol="http",{filter}}[1m]))','Request','Service',NULL,true,true,'SVC:{{$labels.xm_service_name}} 99th HTTP Requests Time:{{humanize $value}}ms|{threshold}ms.','2020-02-18 12:12:12.000','2020-02-18 12:12:12.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_service_pod_http_error_rate','Service Pod HTTP Requests Error Rate','the number of HTTP error counts / the number of HTTP requests counts for pod','sum by (xm_clst_id,xm_service_name,xm_entity_type,xm_namespace,xm_pod_id) (rate(imxc_service_request_milliseconds_count{xm_entity_type="Service",protocol="http",{filter}}[1m])) == 0 or +sum by (xm_clst_id,xm_service_name,xm_entity_type,xm_namespace,xm_pod_id) (rate(imxc_service_errors_count{xm_entity_type="Service",protocol="http",{filter}}[1m])) +/ sum by (xm_clst_id,xm_service_name,xm_entity_type,xm_namespace,xm_pod_id) (rate(imxc_service_request_milliseconds_count{xm_entity_type="Service",protocol="http",{filter}}[1m]))','Request','Service',NULL,true,false,'SVC:{{$labels.xm_service_name}} Pod Error Request Rate:{{humanize $value}}%|{threshold}%.','2019-11-07 07:52:24.000','2020-02-17 12:12:12.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_service_http_requests_time_90th','Service HTTP 90% Elapsed Time (ms)','the maximum time taken to servce the 90% of HTTP requests','histogram_quantile(0.90, sum by (xm_entity_type,xm_clst_id,xm_namespace,xm_service_name,le) (rate(imxc_service_request_milliseconds_bucket{xm_entity_type="Service",protocol="http",{filter}}[1m]))) >=0 or sum by (xm_entity_type,xm_clst_id,xm_namespace,xm_service_name) (rate(imxc_service_request_milliseconds_bucket{xm_entity_type="Service",protocol="http",{filter}}[1m]))','Request','Service',NULL,true,true,'SVC:{{$labels.xm_service_name}} 90th HTTP Requests Time:{{humanize $value}}ms|{threshold}ms.','2020-02-18 12:12:12.000','2020-02-18 12:12:12.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_fs_total_by_mountpoint','host filesystem size by mountpoint','host filesystem size by mountpoint','sum by(instance, mountpoint, fstype, data_type) ( +label_replace(node_filesystem_size_bytes {fstype!="rootfs",{filter}}, "data_type", "totalsize", "", "") or +label_replace(node_filesystem_avail_bytes {fstype!="rootfs",{filter}}, "data_type", "availablesize", "", ""))','Filesystem','Host',NULL,false,false,'host filesystem size by mountpoint','2020-03-30 04:01:45.322','2020-03-30 05:16:32.252'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('namespace_timeline_count','Namespace timeline count','alert, event count','sum (floor(increase(imxc_kubernetes_event_counts{{filter}}[10m])) or floor(increase(imxc_alerts_received_count_total{status="firing", {filter}}[10m])))by (xm_clst_id, xm_namespace, level)','Timeline','Namespace',NULL,false,false,'None','2020-04-08 06:21:21.392','2020-04-08 06:21:21.392'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_timeline_count','Cluster timeline count','alert, event count','sum (floor(increase(imxc_kubernetes_event_counts{{filter}}[10m])) or floor(increase(imxc_alerts_received_count_total{status="firing", {filter}}[10m])))by (xm_clst_id,level)','Timeline','Cluster',NULL,false,false,'None','2020-04-08 06:19:32.792','2020-04-28 08:07:47.786'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_network_transmit','Cluster Network Transmit','Cluster Network Transmit','sum by (xm_clst_id) (rate(node_network_transmit_bytes_total{{filter}} [1m]))','Network','Cluster',NULL,true,true,'Cluster Network Transmit','2020-04-28 08:10:21.070','2020-04-28 08:29:18.491'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_network_receive','Cluster Network Receive','Cluster Network Receive','sum by (xm_clst_id) (rate(node_network_receive_bytes_total{{filter}} [1m]))','Network','Cluster',NULL,true,true,'Cluster Network Receive','2020-04-28 08:07:26.294','2020-04-28 08:29:18.486'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('namespace_pod_running_count','Namespace Pod Running Count','Running pod count by namespace','count by (xm_clst_id, xm_namespace) (sum by (xm_clst_id, xm_node_id, xm_namespace, xm_pod_id) (imxc_kubernetes_container_resource_limit_cpu{pod_state="Running", {filter}}))','Pod','Namespace',NULL,false,false,'None','2020-05-21 01:18:06.016','2020-05-21 01:18:06.016'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_cpu_request','Pod CPU Request','Pod CPU Request','sum by (xm_clst_id, xm_node_id, xm_pod_id) (imxc_kubernetes_container_resource_request_cpu{{filter}})','CPU','Pod',NULL,false,false,'None','2020-05-21 06:50:49.546','2020-05-21 06:50:49.546'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_network_io_byte','Node Network IO byte','Node Network IO byte','sum by (data_type, instance) ( +label_replace(rate(node_network_receive_bytes_total{{filter}}[1m]), "data_type", "Receive", "", "") or +label_replace(rate(node_network_transmit_bytes_total{{filter}}[1m]), "data_type", "Transmit", "", "") )','Network','Node',NULL,false,false,'Node Network IO byte','2020-05-21 07:32:03.535','2020-05-21 07:32:03.535'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_memory_request','pod_memory_request (Gib)','Total container memory request in GiB for the given pod','sum by (xm_clst_id, xm_node_id, xm_pod_id) (imxc_kubernetes_container_resource_request_memory{{filter}}) / 1073741824','Memory','Pod',NULL,false,false,'None','2020-05-21 11:50:52.717','2020-05-21 11:50:52.717'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_memory_sum_by_node','Container memory sum by node','Container memory sum by node','sum by(xm_clst_id, xm_node_id, data_type) ( +label_replace(imxc_kubernetes_node_resource_capacity_memory{{filter}}, "data_type", "capacity" , "", "") or +label_replace(imxc_kubernetes_container_resource_limit_memory{{filter}}, "data_type", "limit", "" , "") or +label_replace(imxc_kubernetes_container_resource_request_memory{{filter}}, "data_type", "request", "" , "") or +label_replace(container_memory_working_set_bytes{{filter}}, "data_type", "used", "" , ""))','Memory','Node',NULL,false,false,'Container memory sum by node','2020-05-28 09:36:44.000','2020-06-09 01:38:10.694'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_context_switches','Node Context Switches','Node Context Switches','rate(node_context_switches_total {{filter}}[1m])','CPU','Node',NULL,false,false,'None','2020-05-21 01:18:06.000','2020-05-29 09:38:05.521'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_contextswitch_and_filedescriptor','Node contextswitch and filedescriptor','Node contextswitch and filedescriptor','sum by(xm_clst_id, xm_node_id, data_type) ( +label_replace(node_filefd_allocated {{filter}}, "data_type", "file descriptor" , "", "") or +label_replace(rate(node_context_switches_total {{filter}}[1m]), "data_type", "context switches", "" , ""))','File','Node',NULL,false,false,'Node contextswitch and filedescriptor','2020-05-28 12:38:21.587','2020-05-28 12:38:21.587'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_disk_read_write_byte','Node disk read and write bytes','Node disk read and write bytes','sum by(xm_clst_id, xm_node_id, data_type) ( +label_replace(rate(node_disk_read_bytes_total{{filter}}[1m]), "data_type", "Read" , "", "") or +label_replace(rate(node_disk_written_bytes_total{{filter}}[1m]), "data_type", "Write", "" , "") +)','Disk','Node',NULL,false,false,'Node disk read and write bytes','2020-05-28 13:02:44.729','2020-05-28 13:04:35.126'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_swap_total','Host Swap Memory Total','Host Swap Total','node_memory_SwapTotal_bytes{{filter}}','Memory','Host',NULL,true,false,'Host:{{$labels.instance}} Total Swap Memory Size:{{humanize $value}}GiB|{threshold}GiB.','2020-03-23 04:08:23.130','2020-03-23 04:08:23.130'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_cpu_iowait','Host CPU iowait','Host CPU iowait','avg by (instance) (rate(node_cpu_seconds_total{mode=''iowait'',{filter}}[1m])) * 100','CPU','Host',NULL,false,false,'Host:{{$labels.instance}} CPU IO wait:{{humanize $value}}|{threshold}.','2020-03-26 08:03:51.307','2020-03-26 08:03:51.307'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_filefd_allocated','Host statistics Filesystem allocated.','Host File descriptor statistics: allocated.','sum by (instance) (node_filefd_allocated{{filter}})','Filesystem','Host',NULL,true,false,'Host:{{$labels.instance}} Filesystem allocated:{{humanize $value}}|{threshold}.','2020-03-23 04:08:31.970','2020-03-23 04:08:31.970'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_service_http_requests_time_avg','Service HTTP Average Elapsed Time (ms)','the average time taken to serve the HTTP requests','sum (rate(imxc_service_request_milliseconds_count{xm_entity_type="Service",protocol="http",{filter}}[1m])) by (xm_clst_id,xm_service_name,xm_entity_type,xm_namespace) == 0 or +sum (rate(imxc_service_request_milliseconds_sum{xm_entity_type="Service",protocol="http",{filter}}[1m])) by (xm_clst_id,xm_service_name,xm_entity_type,xm_namespace) +/ sum (rate(imxc_service_request_milliseconds_count{xm_entity_type="Service",protocol="http",{filter}}[1m])) by (xm_clst_id,xm_service_name,xm_entity_type,xm_namespace)','Request','Service',NULL,true,true,'SVC:{{$labels.xm_service_name}} Requests Time Avg:{{humanize $value}}ms|{threshold}ms.','2019-10-15 09:37:44.000','2020-03-09 06:42:14.172'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_service_http_error_rate_by_api','Service HTTP Requests Error Rate by API','the number of HTTP error counts by API / the number of HTTP requests counts by API','sum by (xm_entity_type,xm_clst_id,xm_namespace,xm_service_name,api) (rate(imxc_service_request_milliseconds_count{xm_entity_type="Service",protocol="http",{filter}}[1m])) ==0 or +sum by (xm_entity_type,xm_clst_id,xm_namespace,xm_service_name,api) (rate(imxc_service_errors_count{xm_entity_type="Service",protocol="http",{filter}}[1m])) +/ sum by (xm_entity_type,xm_clst_id,xm_namespace,xm_service_name,api) (rate(imxc_service_request_milliseconds_count{xm_entity_type="Service",protocol="http",{filter}}[1m]))','Request','Service',NULL,false,false,'not for alarm','2020-02-18 12:12:12.000','2020-06-03 06:52:05.498'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_service_http_requests_time_avg_by_api','Service HTTP Average Elapsed Time by API (ms)','the average time taken to serve the HTTP requests by API for a service','sum by (xm_entity_type,xm_clst_id,xm_namespace,xm_service_name,api) (rate(imxc_service_request_milliseconds_count{xm_entity_type="Service",protocol="http",{filter}}[1m])) == 0 or +sum by (xm_entity_type,xm_clst_id,xm_namespace,xm_service_name,api) (rate(imxc_service_request_milliseconds_sum{xm_entity_type="Service",protocol="http",{filter}}[1m])) +/ sum by (xm_entity_type,xm_clst_id,xm_namespace,xm_service_name,api) (rate(imxc_service_request_milliseconds_count{xm_entity_type="Service",protocol="http",{filter}}[1m]))','Request','Service',NULL,false,false,'not for alarm','2020-02-18 12:12:12.000','2020-06-03 06:52:05.500'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_cpu_used','Node CPU Used (Cores)','Node CPU Used (Cores)','(100 - (avg by (xm_clst_id, xm_node_id) (clamp_max(rate(node_cpu_seconds_total{name="node-exporter", mode="idle", xm_entity_type="Node", {filter}}[1m]),1.0)) * 100)) * sum by(xm_clst_id, xm_node_id)(imxc_kubernetes_node_resource_capacity_cpu{{filter}}) / 100','CPU','Node',NULL,false,false,'None','2020-05-21 01:18:06.000','2020-05-29 09:38:35.939'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_cpu_iowait','Node CPU I/O Wait','Node CPU I/O Wait','avg by (xm_clst_id, xm_node_id, xm_entity_type) (rate(node_cpu_seconds_total{name="node-exporter", mode="iowait", xm_entity_type="Node" , {filter}}[1m])) * 100','CPU','Node',NULL,false,false,'None','2020-05-21 01:18:06.000','2020-05-29 09:38:20.633'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_cpu_sum_by_node','Container cpu sum by Node','Container cpu sum by Node','sum by(xm_clst_id, xm_node_id, data_type) ( +label_replace(imxc_kubernetes_node_resource_capacity_cpu{{filter}} * 0.001, "data_type", "capacity" , "", "") or +label_replace(sum by (xm_clst_id, xm_node_id) (imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0.001), "data_type", "limit", "" , "") or +label_replace(sum by (xm_clst_id, xm_node_id) (imxc_kubernetes_container_resource_request_cpu{{filter}} * 0.001), "data_type", "request", "" , "") or +label_replace(rate(container_cpu_usage_seconds_total{{filter}}[1m]), "data_type", "used", "" , ""))','CPU','Node',NULL,false,false,'Container cpu sum by Node','2020-05-28 08:06:35.736','2020-06-09 01:46:12.446'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_disk_iops_per_device','Node Disk IOPs per device','Node Disk I/O Operations Per Second (per device)','sum by (xm_clst_id, xm_node_id, device) (rate(node_disk_reads_completed_total{{filter}}[1m]) + rate(node_disk_writes_completed_total{{filter}}[1m]))','Disk','Node','device',false,false,'None','2020-06-10 05:56:05.311','2020-06-10 07:24:15.462'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_disk_iops','Node Disk IOPs','Node Disk I/O Operations Per Second','sum by (xm_clst_id, xm_node_id) (rate(node_disk_reads_completed_total{{filter}}[1m]) + rate(node_disk_writes_completed_total{{filter}}[1m]))','Disk','Node',NULL,false,false,'None','2020-06-10 05:54:01.309','2020-06-10 07:24:15.462'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_disk_iops','Host Disk IOPs','Host Disk IOPs','sum by (instance) ((rate(node_disk_reads_completed_total{{filter}}[1m]) + rate(node_disk_writes_completed_total{{filter}}[1m])) or (rate(node_disk_reads_completed_total{{filter}}[5m]) + rate(node_disk_writes_completed_total{{filter}}[5m])))','Disk','Node',NULL,false,false,'Host Disk IOPs','2020-06-10 07:26:28.895','2020-06-10 07:26:28.895'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_cpu_limit','Pod CPU Limit','Pod CPU Limit','sum by (xm_clst_id, xm_node_id, xm_pod_id) (imxc_kubernetes_container_resource_limit_cpu{{filter}})','CPU','Pod',NULL,false,false,'None','2020-05-21 06:50:49.546','2020-05-21 06:50:49.546'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_memory_limit','pod_memory_limit (Gib)','Total container memory limit in GiB for the given pod','sum by (xm_clst_id, xm_node_id, xm_pod_id) (imxc_kubernetes_container_resource_limit_memory{{filter}}) / 1073741824','Memory','Pod',NULL,false,false,'None','2020-05-21 11:50:52.717','2020-05-21 11:50:52.717'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_memory_usage_bytes','Container Memory Used (GiB)','Current memory usage in GiB, this includes all memory regardless of when it was accessed','sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) (container_memory_usage_bytes{xm_entity_type=''Container'',xm_cont_name!=''POD'',{filter}} / 1024 / 1024 / 1024)','Memory','Container',NULL,true,true,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} Used Memory:{{humanize $value}}GiB|{threshold}GiB.','2019-06-05 14:27:36.000','2020-06-04 11:11:11.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_memory_used','Node Memory Used (GIB)','Node Memory Used (GIB)','((node_memory_MemTotal_bytes{xm_entity_type="Node", {filter}} - (node_memory_MemFree_bytes{xm_entity_type="Node", {filter}} + node_memory_Cached_bytes{xm_entity_type="Node", {filter}} + node_memory_Buffers_bytes{xm_entity_type="Node", {filter}})) >= 0 or node_memory_MemTotal_bytes{xm_entity_type="Node", {filter}} - node_memory_MemFree_bytes{xm_entity_type="Node", {filter}}) / 1024 / 1024 / 1024','Memory','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Memory Used:{{humanize $value}}GiB|{threshold}GiB.','2020-05-21 01:18:06.000','2020-06-04 11:11:11.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_used_cpu_user','User CPU Used','User CPU consumed by the Redis server','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(redis_used_cpu_user[1m]))','CPU','Redis',NULL,false,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis User CPU Used:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-05-29 09:37:22.273'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_cpu_sum_by_container','Container cpu sum by container','container cpu sum by container','sum by(xm_clst_id, data_type, xm_pod_id, xm_cont_name) (label_replace(imxc_kubernetes_container_resource_request_cpu{xm_cont_name!=''POD'',{filter}} * 0.001, "data_type", "request" , "", "") or label_replace(imxc_kubernetes_container_resource_limit_cpu{xm_cont_name!=''POD'',{filter}} * 0.001, "data_type", "limit" , "", "") or label_replace(rate(container_cpu_usage_seconds_total{xm_cont_name!=''POD'',{filter}}[1m]), "data_type", "used", "" , ""))','CPU','Container',NULL,false,false,'None','2020-05-21 06:50:49.546','2020-05-21 06:50:49.546'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_cpu_sum_by_pods','Container cpu sum by pod','Container cpu sum by pod','sum by(xm_clst_id, data_type, xm_pod_id) (label_replace(imxc_kubernetes_container_resource_request_cpu{{filter}} * 0.001, "data_type", "request" , "", "") or label_replace(imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0.001, "data_type", "limit" , "", "") or label_replace(rate(container_cpu_usage_seconds_total{{filter}}[1m]), "data_type", "used", "" , ""))','CPU','Pod',NULL,false,false,'None','2020-05-21 06:50:49.546','2020-05-21 06:50:49.546'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_memory_sum_by_pods','Container memory sum by pod','Container memory sum by pod','sum by(xm_clst_id, data_type, xm_pod_id) (label_replace(imxc_kubernetes_container_resource_limit_memory{{filter}}, "data_type", "limit", "" , "") or label_replace(imxc_kubernetes_container_resource_request_memory{{filter}}, "data_type", "request", "" , "") or label_replace(container_memory_usage_bytes{{filter}}, "data_type", "used", "" , ""))','Memory','Pod',NULL,false,false,'None','2020-05-21 06:50:49.546','2020-05-21 06:50:49.546'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_memory_sum_by_container','Container memory sum by container','Container memory sum by container','sum by(xm_clst_id, data_type, xm_pod_id, xm_cont_name) (label_replace(imxc_kubernetes_container_resource_limit_memory{xm_cont_name!=''POD'',{filter}}, "data_type", "limit", "" , "") or label_replace(imxc_kubernetes_container_resource_request_memory{xm_cont_name!=''POD'',{filter}}, "data_type", "request", "" , "") or label_replace(container_memory_usage_bytes{xm_cont_name!=''POD'',{filter}}, "data_type", "used", "" , ""))','Memory','Container',NULL,false,false,'None','2020-05-21 06:50:49.546','2020-05-21 06:50:49.546'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_disk_read_write_byte','Container disk read and write bytes','Container disk read and write bytes','sum by(xm_clst_id, xm_pod_id, xm_cont_name, data_type) (label_replace(rate(container_fs_writes_bytes_total{xm_entity_type="Container",{filter}}[1m]), "data_type", "Read" , "", "") or label_replace(rate(container_fs_reads_bytes_total{xm_entity_type="Container",{filter}}[1m]), "data_type", "Write", "" , ""))','Disk','Container',NULL,false,false,'None','2020-05-21 06:50:49.546','2020-05-21 06:50:49.546'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_disk_read_write_byte','Pod disk read and write bytes','Pod disk read and write bytes','sum by(xm_clst_id, xm_pod_id, data_type) (label_replace(rate(container_fs_writes_bytes_total{xm_entity_type="Container",{filter}}[1m]), "data_type", "Read" , "", "") or label_replace(rate(container_fs_reads_bytes_total{xm_entity_type="Container",{filter}}[1m]), "data_type", "Write", "" , ""))','Disk','Pod',NULL,false,false,'None','2020-05-21 06:50:49.546','2020-05-21 06:50:49.546'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_network_io_byte','Container Network IO byte','Container Network IO byte','sum by (xm_clst_id, xm_pod_id, xm_cont_name, data_type) (label_replace(rate(container_network_receive_bytes_total{{filter}}[1m]), "data_type", "Receive", "", "") or label_replace(rate(container_network_transmit_bytes_total{{filter}}[1m]), "data_type", "Transmit", "", ""))','Network','Container',NULL,false,false,'None','2020-05-21 06:50:49.546','2020-05-21 06:50:49.546'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_network_io_byte','Pod Network IO byte','Pod Network IO byte','sum by (xm_clst_id, xm_pod_id, data_type) (label_replace(rate(container_network_receive_bytes_total{{filter}}[1m]), "data_type", "Receive", "", "") or label_replace(rate(container_network_transmit_bytes_total{{filter}}[1m]), "data_type", "Transmit", "", ""))','Network','Pod',NULL,false,false,'None','2020-05-21 06:50:49.546','2020-05-21 06:50:49.546'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_load1','Node CPU Load 1m Average','Node CPU 1m load average','node_load1{xm_entity_type=''Node'',{filter}}','CPU','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} CPU 1m Load Avg:{{humanize $value}}|{threshold}.','2019-05-15 08:22:49.000','2019-05-15 08:22:49.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_open_file_descriptor','Node File Descriptor','Node File Descriptor','sum by(xm_clst_id, xm_node_id)(node_filefd_allocated {{filter}})','Filesystem','Node',NULL,true,false,'NODE:{{$labels.xm_node_id}} File Descriptor:{{humanize $value}}|{threshold}.','2020-05-21 01:18:06.000','2020-05-29 09:37:51.101'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_sparselog_type_node_count','Node Type Sparselog Count','Node-type sparse log count by xm_clst_id, xm_node_id over last 1 min','sum by (xm_entity_type, xm_clst_id, xm_node_id) (round(increase(imxc_sparselog_count_total{xm_entity_type="Node",{filter}}[1m])))','SparseLog','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Sparselog Count:{{humanize $value}}|{threshold}.','2020-03-26 15:05:51.828','2020-03-26 15:05:51.828'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_memory_cache','Container Memory Cache (GiB)','Number of bytes of page cache memory / 1073741824','sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) (container_memory_cache{xm_entity_type=''Container'',xm_cont_name!=''POD'',{filter}}) / 1073741824','Memory','Container',NULL,true,true,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} Cache Memory:{{humanize $value}}GiB|{threshold}GiB.','2019-06-05 14:27:36.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_load15','Host CPU Load 15m Average','Host CPU 15m load average','node_load15{{filter}}','CPU','Host',NULL,true,false,'Host:{{$labels.instance}} CPU 15m Load Average:{{humanize $value}}%|{threshold}%','2020-03-23 04:08:13.337','2020-03-23 04:08:13.337'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_disk_write_bytes_device','Node Disk Write Bytes per Device (KiB)','The total number of bytes written successfully / 1024','sum by (xm_clst_id, xm_node_id, xm_entity_type, device, mountpoint) (rate(node_disk_written_bytes_total{xm_entity_type=''Node'', {filter}}[1m]) ) / 1024','Disk','Node','device',true,false,'NODE:{{$labels.xm_node_id}} FS:{{$labels.mountpoint}} Disk Write Size:{{humanize $value}}KiB|{threshold}KiB.','2019-08-23 11:26:07.000','2019-08-23 11:26:07.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_disk_write_latency','Node Disk Write Latency (ms)','Node Disk Write Latency','sum by (xm_clst_id,xm_node_id, xm_entity_type) (rate(node_disk_write_time_seconds_total{xm_entity_type=''Node'',{filter}}[1m])) * 1000','Disk','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Disk Write Latency:{{humanize $value}}ms|{threshold}ms.','2019-05-20 11:00:56.000','2019-05-31 17:47:10.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_disk_writes_count_device','Node Disk Writes Count per Device (IOPS)','Node Disk Writes Counts per Device','sum by (xm_clst_id, xm_node_id, xm_entity_type, device, mountpoint) (rate(node_disk_writes_completed_total{xm_entity_type=''Node'', {filter}}[1m]) )','Disk','Node','device',true,false,'NODE:{{$labels.xm_node_id}} FS:{{$labels.mountpoint}} Disk Writes Count:{{humanize $value}}IOPS|{threshold}IOPS.','2019-08-23 11:26:07.000','2019-08-23 11:26:07.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_throttled_rate','Container CPU Throttled Rate','container throttled rate','sum by(xm_clst_id, xm_cont_id) (rate(container_cpu_cfs_throttled_seconds_total{container_name!="POD", image!="",{filter}}[1m]))','Cluster','Container',NULL,false,false,'CLST:{{$labels.xm_clst_id}} CPU Throttled:{{humanize $value}}|{threshold}.','2020-08-19 16:45:00.000','2020-08-19 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_pod_total_count','Node Pod Total Count','Node Pod Total Count','count by (xm_clst_id, xm_node_id) (sum by (xm_clst_id, xm_node_id, xm_pod_id) (imxc_kubernetes_container_resource_limit_cpu{{filter}}))','Pod','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Pod Count:{{humanize $value}}|{threshold}.','2019-10-11 00:29:17.000','2019-11-26 01:29:10.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_service_http_requests_per_sec','Service HTTP Requests Count (per Second)','the number of HTTP requests counts per second','((sum by (xm_clst_id,xm_service_name,xm_entity_type,xm_namespace) (rate(imxc_service_request_milliseconds_count{xm_entity_type="Service",protocol="http",{filter}}[1m]))/ on (xm_clst_id, xm_namespace, xm_service_name ) group_left imxc_sampling_param_value) or (sum by (xm_clst_id,xm_service_name,xm_entity_type,xm_namespace) (rate(imxc_service_request_milliseconds_count{xm_entity_type="Service",protocol="http",{filter}}[1m])) / on (xm_clst_id) group_left imxc_sampling_default_param_value))','Request','Service',NULL,true,true,'SVC:{{$labels.xm_service_name}} Http Requests/Second:{{humanize $value}}|{threshold}.','2019-10-15 09:37:44.000','2020-02-17 12:12:12.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_service_pod_http_requests_per_sec','Service Pod HTTP Requests Count (per Second)','the number of HTTP requets counts per second for pod','sum by (xm_clst_id,xm_service_name,xm_entity_type,xm_namespace,xm_pod_id) (rate(imxc_service_request_milliseconds_count{xm_entity_type="Service",protocol="http",{filter}}[1m]))','Request','Service',NULL,true,false,'SVC:{{$labels.xm_service_name}} IMXC Svc Pod Http Requests/Seconds:{{humanize $value}}|{threshold}.','2019-11-07 07:51:11.000','2020-03-09 06:34:19.353'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_memory_max_usage_bytes','Container Memory Max Used (GiB)','Maximum memory usage recorded in bytes / 1073741824','sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) (container_memory_max_usage_bytes{xm_entity_type=''Container'',xm_cont_name!=''POD'',{filter}}) / 1073741824','Memory','Container',NULL,true,true,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} Max Memory Usage:{{humanize $value}}GiB|{threshold}GiB.','2019-06-05 14:27:36.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_network_receive','Container Network Receive (KiB)','Network device statistic receive_bytes / 1024','sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) (rate(container_network_receive_bytes_total{xm_entity_type=''Container'',{filter}}[1m]) ) / 1024','Network','Container',NULL,true,true,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} Network Receive Usage:{{humanize $value}}KiB|{threshold}KiB.','2019-05-21 08:23:36.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_service_http_requests_time_50th','Service HTTP 50% Elapsed Time (ms)','the maximum time taken to servce the 50% of HTTP requests','histogram_quantile(0.50, sum by (xm_entity_type,xm_clst_id,xm_namespace,xm_service_name,le) (rate(imxc_service_request_milliseconds_bucket{xm_entity_type="Service",protocol="http",{filter}}[1m]))) >=0 or sum by (xm_entity_type,xm_clst_id,xm_namespace,xm_service_name) (rate(imxc_service_request_milliseconds_bucket{xm_entity_type="Service",protocol="http",{filter}}[1m]))','Request','Service',NULL,true,true,'SVC:{{$labels.xm_service_name}} 50th HTTP Requests Time:{{humanize $value}}ms|{threshold}ms.','2020-02-18 12:12:12.000','2020-02-18 12:12:12.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_service_errors_count','Service Error Count','service error count','sum by(xm_clst_id, xm_namespace, xm_service_name, statuscode ) (imxc_service_errors_count{statuscode!="200",{filter}}) OR on() vector(0)','Request','Service',NULL,true,false,'SVC:{{$labels.xm_service_name}} Svc Error Count:{{humanize $value}}|{threshold}.','2020-08-21 16:45:00.000','2020-08-21 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_memory_used','Host Memory Used (GiB)','Memory information field MemUsed_bytes','((node_memory_MemTotal_bytes{{filter}} - (node_memory_MemFree_bytes{{filter}} + node_memory_Cached_bytes{{filter}} + node_memory_Buffers_bytes{{filter}} + node_memory_SReclaimable_bytes{{filter}})) >= 0 or (node_memory_MemTotal_bytes{{filter}} - node_memory_MemFree_bytes{{filter}}))','Memory','Host',NULL,true,false,'Host:{{$labels.instance}} Memory Utillization:{{humanize $value}}GiB|{threshold}GiB.','2020-03-23 04:08:21.399','2020-03-23 04:08:21.399'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('workload_count_all_state','Workload Count All State','workload total count regardless of pod state','count by(xm_clst_id, controller_kind) (imxc_kubernetes_controller_ready{controller_kind=~"Deployment|DaemonSet|ReplicaSet|StatefulSet|StaticPod",{filter}})','Pod','Namespace',NULL,true,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Workload Total Count:{{humanize $value}}|{threshold}.','2020-08-19 16:45:00.000','2020-08-19 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('workload_count_running_pod','Workload Count Running Pod','workload count of Running state pod','sum by(xm_clst_id,controller_kind ) (imxc_kubernetes_controller_ready{controller_kind=~"Deployment|DaemonSet|ReplicaSet|StatefulSet|StaticPod",{filter}})','Pod','Namespace',NULL,false,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Workload Total Count:{{humanize $value}}|{threshold}.','2020-08-19 16:45:00.000','2020-08-19 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_network_transmit_device','Node Network Transmit per Device(KiB)','Network device statistic transmit_bytes by device / 1024','sum by (xm_clst_id, xm_node_id, xm_entity_type, device, mountpoint) (rate(node_network_transmit_bytes_total{xm_entity_type=''Node'',{filter}}[1m]) ) / 1024','Network','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} DEV:{{$labels.device}} Network Transmit Usage:{{humanize $value}}KiB|{threshold}KiB.','2020-11-06 09:09:05.000','2020-11-06 09:09:05.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_network_receive_device','Node Network Receive per Device(KiB)','Network device statistic receive_bytes by device / 1024','sum by (xm_clst_id, xm_node_id, xm_entity_type, device, mountpoint) (rate(node_network_receive_bytes_total{xm_entity_type=''Node'',{filter}}[1m]) ) / 1024','Network','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} DEV:{{$labels.device}} Network Receive Usage:{{humanize $value}}KiB|{threshold}KiB.','2020-11-06 09:09:05.000','2020-11-06 09:09:05.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_service_pod_http_requests_time_avg','Service Pod HTTP Average Elapsed Time (ms)','the average time taken to serve the HTTP requests for pod','sum by (xm_clst_id,xm_service_name,xm_entity_type,xm_namespace,xm_pod_id) (rate(imxc_service_request_milliseconds_count{xm_entity_type="Service",protocol="http",{filter}}[1m])) == 0 or +sum by (xm_clst_id,xm_service_name,xm_entity_type,xm_namespace,xm_pod_id) (rate(imxc_service_request_milliseconds_sum{xm_entity_type="Service",protocol="http",{filter}}[1m])) +/ sum by (xm_clst_id,xm_service_name,xm_entity_type,xm_namespace,xm_pod_id) (rate(imxc_service_request_milliseconds_count{xm_entity_type="Service",protocol="http",{filter}}[1m]))','Request','Service',NULL,true,false,'SVC:{{$labels.xm_service_name}} IMXC Svc Pod http Requests Time Avg:{{humanize $value}}ms|{threshold}ms.','2019-11-07 07:51:46.000','2020-02-17 12:12:12.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_cpu_system','Container CPU System (%)','Container CPU Usage (System)','sum by (xm_clst_id,xm_node_id,xm_pod_id,xm_cont_name,xm_entity_type,xm_namespace,xm_cont_id) (rate(container_cpu_system_seconds_total{xm_entity_type=''Container'',xm_cont_name!=''POD'',{filter}}[1m])) * 100','CPU','Container',NULL,true,true,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} CPU System:{{humanize $value}}%|{threshold}%.','2019-06-05 09:07:00.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_cpu_usage','Container CPU Usage (%)','Container CPU Usage','sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) (rate(container_cpu_usage_seconds_total{xm_entity_type=''Container'',xm_cont_name!=''POD'',{filter}}[1m])) * 100','CPU','Container',NULL,true,true,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} CPU Usage:{{humanize $value}}%|{threshold}%','2019-05-15 01:02:23.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_phase_count_by_namespace','Pod Phase Count by Namespace','pod phase count by cluster, namespace','count by(xm_clst_id, xm_namespace, pod_state) (imxc_kubernetes_container_resource_limit_cpu{{filter}})','Namespace','Pod',NULL,true,false,'CLST:{{$labels.xm_clst_id}} Pod phase count:{{humanize $value}}|{threshold}.','2020-08-19 16:45:00.000','2020-08-19 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_fs_limit_bytes','Container Filesystem Limit Bytes (GiB)','Number of bytes that can be consumed by the container on this filesystem / 1073741824','sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) (container_fs_limit_bytes{xm_entity_type=''Container'',{filter}}) / 1073741824','Filesystem','Container',NULL,true,true,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} Filesystem Limit:{{humanize $value}}GiB|{threshold}GiB.','2019-06-05 10:27:42.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_memory_usage','Container Memory Usage (%)','Container memory usage compared to limit if limit is non-zero or 1GiB if limit is zero','sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) (container_memory_usage_bytes{xm_entity_type=''Container'', xm_cont_name!=''POD'', {filter}} / (container_spec_memory_limit_bytes{xm_entity_type=''Container'',{filter}} > 0) * 100) or sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) (container_memory_usage_bytes{xm_entity_type=''Container'',{filter}} / 1024 / 1024 / 1024 * 100)','Memory','Container',NULL,true,true,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} Memory Usage:{{humanize $value}}%|{threshold}%.','2019-06-05 14:27:36.000','2020-06-04 11:11:11.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_memory_swap','Container Memory Swap (GiB)','Container swap usage in bytes / 1073741824','sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) (container_memory_swap{xm_entity_type=''Container'',xm_cont_name!=''POD'',{filter}}) / 1073741824','Memory','Container',NULL,true,true,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} Swap Memory:{{humanize $value}}GiB|{threshold}GiB.','2019-06-05 14:27:36.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_network_transmit','Container Network Transmit (KiB)','Network device statistic transmit_bytes / 1024','sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) (rate(container_network_transmit_bytes_total{xm_entity_type=''Container'',{filter}}[1m]) ) / 1024','Network','Container',NULL,true,true,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} Network Transmit Usage:{{humanize $value}}KiB|{threshold}KiB.','2019-05-21 08:26:35.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('controller_pod_count','Controller Pod Count','Controller Pod Count','sum (imxc_kubernetes_controller_counts{{filter}}) by (xm_clst_id, xm_namespace, xm_entity_name, xm_entity_type)','Pod','Controller',NULL,false,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Controller Pod Counts:{{humanize $value}}|{threshold}.','2019-10-10 06:39:09.000','2019-10-10 06:39:09.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_load1','Host CPU Load 1m Average','Host CPU 1m load average','node_load1{{filter}}','CPU','Host',NULL,true,false,'Host:{{$labels.instance}} CPU 1m Load Average:{{humanize $value}}%|{threshold}%','2020-03-23 04:08:09.946','2020-03-23 04:08:09.946'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_cpu_usage','Host CPU Usage (%)','Host CPU Usage','100 - (avg by (instance)(clamp_max(rate(node_cpu_seconds_total{mode=''idle'',{filter}}[1m]),1.0)) * 100)','CPU','Host',NULL,true,false,'Host:{{$labels.instance}} CPU Utillization:{{humanize $value}}%|{threshold}%.','2020-03-23 04:08:07.606','2020-03-23 04:08:07.606'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('aws_ec2_cpuutilization','The percentage of allocated EC2 compute','The percentage of allocated EC2 compute units that are currently in use on the instance.','sum by (xm_clst_id, instance_id, instance) (aws_ec2_cpuutilization_average{{filter}})','CPU','AWS/EC2',NULL,true,true,'CLST:{{$labels.xm_clst_id}} Instance:{{$labels.instance_id}} CPU Utillization:{{humanize $value}}%|{threshold}%','2019-08-23 17:38:23.000','2019-08-23 17:38:23.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mongodb_connections','Number of Incoming Connections','The number of incoming connections from clients to the database server','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, state) (mongodb_connections{{filter}})','Connection','MongoDB','state',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MongoDB Number of Incoming Connections Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-13 02:26:09.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_buffer_io','Block read / write','mysql buffer I/O summary','sum by (data_type, xm_clst_id, xm_namespace, xm_node_id, instance) ( +label_replace(mysql_global_status_innodb_buffer_pool_write_requests, "data_type", "write", "", "") or +label_replace(mysql_global_status_innodb_buffer_pool_read_requests, "data_type", "read", "", "") )','Block','MySQL','data_type',true,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} POD:{{$labels.xm_pod_id}} Mysql Buffer IO:{{humanize $value}}|{threshold}.','2019-12-05 07:30:33.000','2020-02-13 01:14:23.895'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_innodb_buffer_pool_reads','Number of Reads Directly from Disk','The number of logical reads that InnoDB could not satisfy from the buffer pool, and had to read directly from disk','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(mysql_global_status_innodb_buffer_pool_reads[1m]))','Block','MySQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Number of Reads Directly from Disk Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_connections','Number of Connection Attempts','The number of connection attempts (successful or not) to the MySQL server','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(mysql_global_status_connections[1m]))','Connection','MySQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Number of Connection Attempts counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_status_locks','Number of Locks in MySQL','Number of Locks in MySQL','sum by (data_type, xm_clst_id, xm_namespace, xm_node_id, instance) ( +label_replace(rate(mysql_global_status_innodb_row_lock_current_waits[1m]), "data_type", "rowlocks", "", "") or +label_replace(rate(mysql_global_status_innodb_row_lock_waits[1m]), "data_type", "waits for rowlocks", "", "") or +label_replace(rate(mysql_global_status_table_locks_immediate[1m]), "data_type", "tablelock immediate", "", "") or +label_replace(rate(mysql_global_status_table_locks_waited[1m]), "data_type", "tablelock waited", "", "") )','Lock','MySQL','data_type',true,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Mysql Status Locks:{{humanize $value}}|{threshold}.','2019-12-05 08:39:30.000','2020-02-13 01:12:05.438'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_fs_usage_bytes','Container Filesystem Used Bytes (GiB)','Number of bytes that are consumed by the container on this filesystem / 1073741824','sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) (container_fs_usage_bytes{xm_entity_type=''Container'',{filter}}) / 1073741824','Filesystem','Container',NULL,true,true,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} Filesystem Used:{{humanize $value}}GiB||{threshold}GiB.','2019-06-05 10:27:42.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_fs_writes','Container Filesystem Write Bytes (KiB)','Cumulative count of bytes written / 1024','sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) (rate(container_fs_writes_bytes_total{xm_entity_type=''Container'',{filter}}[1m])) / 1024','Filesystem','Container',NULL,true,true,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} Filesystem Writes:{{humanize $value}}KiB|{threshold}KiB.','2019-05-20 05:58:07.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_sessions_value','Session Count','Gauge metric with count of sessions by status and type','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, status_type) +(label_join(oracledb_sessions_value, "status_type", "-", "status", "type"))','Session','OracleDB','status_type',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Oracle Session Count:{{humanize $value}}|{threshold}.','2020-01-28 13:03:00.000','2020-02-13 01:34:00.720'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_stat_database_temp_bytes','Bytes Written to Temporary Files (KiB)','Total amount of data written to temporary files by queries in this database','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, datname) (rate(pg_stat_database_temp_bytes[1m])) / 1024','TemporaryFile','PostgreSQL','datname',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} PostgreSQL Temporary File Write Size:{{humanize $value}}|{threshold}.','2019-08-27 15:49:21.000','2019-08-27 15:49:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_used_cpu_sys','System CPU Used','System CPU consumed by the Redis server','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(redis_used_cpu_sys[1m]))','CPU','Redis',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis System CPU Used:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_used_cpu_user_children','User CPU Used Background','User CPU consumed by the background processes','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(redis_used_cpu_user_children[1m]))','CPU','Redis',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis User CPU Used Background:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_service_http_error_rate','Service HTTP Requests Error Rate','the number of HTTP error counts / the number of HTTP requests counts','sum by(xm_clst_id,xm_service_name,xm_entity_type,xm_namespace) (rate(imxc_service_request_milliseconds_count{xm_entity_type="Service",protocol="http",{filter}}[1m])) == 0 or +sum by (xm_clst_id,xm_service_name,xm_entity_type,xm_namespace) (rate(imxc_service_errors_count{xm_entity_type="Service",protocol="http",{filter}}[1m])) / sum by +(xm_clst_id,xm_service_name,xm_entity_type,xm_namespace) (rate(imxc_service_request_milliseconds_count{xm_entity_type="Service",protocol="http",{filter}}[1m]))','Request','Service',NULL,true,true,'SVC:{{$labels.xm_service_name}} Error Request Rate:{{humanize $value}}%|{threshold}%.','2019-10-15 09:37:44.000','2020-02-17 12:12:12.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_cache_hit_ratio','Buffer Cache Hit Ratio (%)','(Number of Logical Read - Number of Reads Directly from Disk) / (Number of Logical Read) * 100','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) ((increase(mysql_global_status_innodb_buffer_pool_read_requests[1m]) - increase(mysql_global_status_innodb_buffer_pool_reads[1m])) / increase(mysql_global_status_innodb_buffer_pool_read_requests[1m]) * 100)','Block','MySQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Buffer Cache Hit Ratio:{{humanize $value}}%|{threshold}%.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_fs_usage','Pod Filesystem Usage (%)','Pod File System Usage: 100 * (Used Bytes / Limit Bytes)','sum by (xm_clst_id,xm_pod_id,xm_entity_type,xm_namespace) ( +container_fs_usage_bytes{xm_entity_type=''Container'',{filter}} /((container_fs_limit_bytes{xm_entity_type=''Container'',{filter}} * 100) > 0) or +container_fs_usage_bytes{xm_entity_type=''Container'',{filter}} / 1000)','Filesystem','Pod',NULL,true,true,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} Filesystem Usage:{{humanize $value}}%|{threshold}%.','2019-06-05 10:27:42.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_pod_cpu_request','Node Pod CPU Request','Node Pod CPU Request','sum by (xm_clst_id, xm_node_id) (imxc_kubernetes_container_resource_request_cpu{{filter}})','CPU','Node',NULL,true,false,'NODE:{{$labels.xm_node_id}} Pod CPU Requests:{{humanize $value}}|{threshold}.','2020-11-20 06:50:49.546','2020-11-20 06:50:49.546'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_pod_cpu_usage','Node Pod CPU Usage (%)','Node Pod CPU Usage','sum by (xm_clst_id,xm_node_id) (clamp_min((rate(container_cpu_usage_seconds_total{xm_entity_type=''Container'',{filter}}[1m] offset 10s)),0)) * 100','CPU','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Pod CPU Usage:{{humanize $value}}%|{threshold}%.','2020-11-20 06:50:49.546','2020-11-20 06:50:49.546'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,in_use,anomaly_score,message) + VALUES ('container_cpu_usage_core','Container CPU Usage (Core)','Container CPU Usage (Core)','sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) (rate(container_cpu_usage_seconds_total{xm_entity_type=''Container'',xm_cont_name!=''POD'',{filter}}[1m]))','CPU','Container',true,false,'None'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,in_use,anomaly_score,message) + VALUES ('container_cpu_system_core','Container CPU System (Core)','Container CPU Usage (System)(Core)','sum by (xm_clst_id,xm_node_id,xm_pod_id,xm_cont_name,xm_entity_type,xm_namespace,xm_cont_id) (rate(container_cpu_system_seconds_total{xm_entity_type=''Container'',xm_cont_name!=''POD'',{filter}}[1m]))','CPU','Container',true,false,'None'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,in_use,anomaly_score,message) + VALUES ('container_cpu_user_core','Container CPU User (Core)','Container CPU Usage (User)(Core)','sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) (rate(container_cpu_user_seconds_total{xm_entity_type=''Container'',xm_cont_name!=''POD'',{filter}}[1m]))','CPU','Container',true,false,'None'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_pod_info_in_service','pod info in service','pod info(state, node) in service','sum by (xm_clst_id, xm_namespace, xm_service_name,xm_node_id,node_status,xm_pod_id,pod_state) (imxc_kubernetes_endpoint_count{{filter}})','Pod','Service',NULL,false,false,'None','2020-12-22 16:05:00.000','2020-12-22 16:05:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_service_state','Service State Count Sum','service state sum by xm_service_name','sum by (xm_service_name,pod_state) (imxc_kubernetes_endpoint_count{{filter}})','Pod','Service',NULL,false,false,'None','2021-01-06 17:30:00.000','2021-01-06 17:30:00.000'); + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_workload_state','Workload State Count Sum','wokload state sum by owner_name','count by (owner_name, pod_state) (imxc_kubernetes_container_resource_request_cpu{{filter}})','Pod','Workload',NULL,false,false,'None','2021-02-08 17:00:00.000','2021-02-08 17:00:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_pod_info_in_workload','Pod info by workload type','pod info(state, node) by workload type (do filter param)','count by (xm_clst_id, xm_namespace, owner_name, xm_node_id, node_status, xm_pod_id, pod_state) (imxc_kubernetes_container_resource_request_cpu{{filter}})','Pod','Workload',NULL,false,false,'None','2021-02-08 17:00:00.000','2021-02-08 17:00:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_up_state','Node State metric','Node State metric for up, down check','imxc_kubernetes_node_ready{{filter}}','State','Node',NULL,true,false,'Cluster:{{$labels.xm_clst_id}} Node:{{$labels.xm_node_id}} Down {threshold}.','2020-02-02 14:30:00.000','2020-02-02 14:30:00.000'); + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_cpu_user_by_workload', 'Container CPU User By workload (%)', 'Container CPU Usage(User)', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (rate(container_cpu_user_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running", {filter}}) without (instance)) * 0) * 100', 'CPU', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU User (%):{{humanize $value}}%|{threshold}%.', now(), now()); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_cpu_system_core_by_workload', 'Container CPU System By workload (Core)', 'Container CPU(Core)', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (rate(container_cpu_system_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running", {filter}}) without (instance)) * 0)', 'CPU', 'Workload', NULL, TRUE, FALSE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU System (Core) (System):{{humanize $value}}%|{threshold}%.', now(), now()); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_cpu_usage_core_by_workload', 'Container CPU Usage By workload (Core)', 'Container CPU Usage (Core)', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (rate(container_cpu_usage_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running", {filter}}) without (instance)) * 0)', 'CPU', 'Workload', NULL, TRUE, FALSE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU Usage (Core):{{humanize $value}}|{threshold}.', now(), now()); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_cpu_user_core_by_workload', 'Container CPU User By workload (Core)', 'Container CPU Usage (User)(Core)', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (rate(container_cpu_user_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running", {filter}}) without (instance)) * 0)', 'CPU', 'Workload', NULL, TRUE, FALSE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU User (Core):{{humanize $value}}|{threshold}.', now(), now()); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_cpu_system_by_workload', 'Container CPU System By workload (%)', 'Container CPU Usage (System)', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (rate(container_cpu_system_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running", {filter}}) without (instance)) * 0) * 100', 'CPU', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU System (%):{{humanize $value}}%|{threshold}%.', now(), now()); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_cpu_usage_by_workload', 'Container CPU Usage By workload (%)', 'Container CPU Usage', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (rate(container_cpu_usage_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running", {filter}}) without (instance)) * 0)', 'CPU', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU Usage (%):{{humanize $value}}%|{threshold}%', now(), now()); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_fs_reads_by_workload', 'Container Filesystem Read Bytes By workload (KiB)', 'Cumulative count of bytes read / 1024', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (rate(container_fs_reads_bytes_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1024', 'Filesystem', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Filesystem Reads:{{humanize $value}}KiB|{threshold}KiB.', now(), now()); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_fs_limit_bytes_by_workload', 'Container Filesystem Limit Bytes By workload (GiB)', 'Number of bytes that can be consumed by the container on this filesystem / 1073741824', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (container_fs_limit_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running", {filter}}) without (instance)) * 0) / 1073741824', 'Filesystem', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Filesystem Limit:{{humanize $value}}GiB|{threshold}GiB.', now(), now()); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_fs_usage_bytes_by_workload', 'Container Filesystem Used Bytes By workload (GiB)', 'Number of bytes that are consumed by the container on this filesystem / 1073741824', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (container_fs_usage_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1073741824', 'Filesystem', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Filesystem Used:{{humanize $value}}GiB||{threshold}GiB.', now(), now()); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_fs_writes_by_workload', 'Container Filesystem Write Bytes By workload (KiB)', 'Cumulative count of bytes written / 1024', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (rate(container_fs_writes_bytes_total{xm_cont_name!="POD"}[1m]) + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1024', 'Filesystem', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Filesystem Writes:{{humanize $value}}KiB|{threshold}KiB.', now(), now()); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_fs_usage_by_workload', 'Container Filesystem Usage By workload (%)', 'Container File System Usage: 100 * (Used Bytes / Limit Bytes) (not contain persistent volume)', 'sum by (xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) ((container_fs_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0)/ (((container_fs_limit_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) * 100) > 0) or (container_fs_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1000)', 'Filesystem', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Filesystem Usage:{{humanize $value}}%|{threshold}%.', now(), now()); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_memory_max_usage_bytes_by_workload', 'Container Memory Max Used By workload (GiB)', 'Maximum memory usage recorded in bytes / 1073741824', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (container_memory_max_usage_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1073741824', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Max Memory Usage:{{humanize $value}}GiB|{threshold}GiB.', now(), now()); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_memory_usage_bytes_by_workload', 'Container Memory Used By workload (GiB)', 'Current memory usage in GiB, this includes all memory regardless of when it was accessed', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (container_memory_usage_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1024 / 1024 / 1024', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Used Memory:{{humanize $value}}GiB|{threshold}GiB.', now(), now()); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_memory_usage_by_workload', 'Container Memory Usage By workload (%)', 'Container Memory usage compared to limit if limit is non-zero or 1GiB if limit is zero', 'sum by (xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (((container_memory_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / (((container_spec_memory_limit_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0)) > 0) * 100) or sum by (xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) ((container_memory_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1024 / 1024 / 1024 *100))', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Memory Usage:{{humanize $value}}%|{threshold}%.', now(), now()); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_memory_swap_by_workload', 'Container Memory Swap By workload (GiB)', 'Container swap usage in bytes / 1073741824', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (container_memory_swap{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1073741824', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Swap Memory:{{humanize $value}}GiB|{threshold}GiB.', now(), now()); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_memory_working_set_bytes_by_workload', 'Container Memory Working Set By workload (GiB)', 'Current working set in GiB, this includes recently accessed memory, dirty memory, and kernel memory', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (container_memory_working_set_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1024 / 1024 / 1024', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Working Set Memory:{{humanize $value}}GiB|{threshold}GiB.', now(), now()); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_memory_cache_by_workload', 'Container Memory Cache By workload (GiB)', 'Number of bytes of page cache memory / 1073741824', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (container_memory_cache{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1073741824', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Cache Memory:{{humanize $value}}GiB|{threshold}GiB.', now(), now()); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_network_receive_by_workload', 'Container Network Receive By workload (KiB)', 'Network device statistic receive_bytes / 1024', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name,xm_entity_type) (rate(container_network_receive_bytes_total{} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id) group_left(owner_name) sum by (xm_clst_id, xm_namespace, xm_pod_id, owner_name) (imxc_kubernetes_container_resource_limit_cpu{{filter}}) * 0) / 1024', 'Network', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Network Receive Usage:{{humanize $value}}KiB|{threshold}KiB.', now(), now()); + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_network_transmit_by_workload', 'Container Network Transmit By workload (KiB)', 'Network device statistic transmit_bytes / 1024', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (rate(container_network_transmit_bytes_total{} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id) group_left(owner_name) sum by(xm_clst_id, xm_namespace, xm_pod_id, owner_name) (imxc_kubernetes_container_resource_limit_cpu{{filter}}) * 0) / 1024', 'Network', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Network Transmit Usage:{{humanize $value}}KiB|{threshold}KiB.', now(), now()); +--Number of Pods not running +INSERT INTO public.metric_meta2 VALUES ('count_pod_not_running_by_workload','Number of Pods not running By Workload','Number of Pods not running (pod_state)','count by (xm_clst_id, xm_pod_id,xm_cont_id, xm_cont_name, entity_type, xm_namespace, pod_state) (imxc_kubernetes_container_resource_limit_cpu{pod_state!="Running", {filter}})','State','Workload',null,true,false,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} State:{{$labels.pod_state}}.',now(),now()); +--Number of Containers not running +INSERT INTO public.metric_meta2 VALUES ('count_container_not_running_by_workload','Number of Containers not running By Workload','Number of Containers not running (container_state)','count by (xm_clst_id, xm_pod_id, xm_cont_id, xm_cont_name, entity_type, xm_namespace, container_state) (imxc_kubernetes_container_resource_limit_cpu{container_state!="Running", {filter}})','State','Workload',null,true,false,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} State:{{$labels.container_state}}.',now(),now()); +-- Containers Restart count +INSERT INTO public.metric_meta2 VALUES ('cotainer_restart_count_by_workload','Number of Containers Restart','Number of Containers Restart (10m)','increase(imxc_kubernetes_container_restart_count{{filter}}[10m])','State','Workload',null,true,false,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} RESTARTCOUNT FOR 10MINUTE:{{humanize $value}}.',now(),now()); + +INSERT INTO metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_jspd_txn_per_sec','Service Transaction Count (per Second)','Service Transaction Count (per Second)','sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_service_name) (rate(imxc_txn_total_count{{filter}}[1m]))','Request','Service',NULL,true,true,'Service Transaction Count (per Second)','2021-11-15 16:11:19.606','2021-11-15 16:12:21.335'); +INSERT INTO metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_jspd_pod_txn_elapsed_time_avg','Service Pod Transaction Elapsed Time (avg)','Service Average Elapsed Time','sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_pod_id, xm_service_name) (increase(imxc_txn_total_count{{filter}}[1m]))==0 or sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_pod_id, xm_service_name) (increase(imxc_txn_laytency{{filter}}[1m])) / sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_pod_id, xm_service_name) (increase(imxc_txn_total_count{{filter}}[1m]))','Request','Service',NULL,true,true,'Service Average Elapsed Time','2021-11-15 16:09:34.233','2021-11-15 16:12:21.335'); +INSERT INTO metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_jspd_txn_error_rate','Service Transaction Error Rate','Service Transaction Error Rate','sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_service_name) (sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_service_name) (rate(imxc_txn_total_count{{filter}}[1m])) == 0 or sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_service_name) (rate(imxc_txn_error_count{{filter}}[1m])) == 0 or sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_service_name) (rate(imxc_txn_error_count {{filter}} [1m])) / sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_service_name) (rate(imxc_txn_total_count {{filter}} [1m])))','Request','Service',null,true,false,'SVC:{{$labels.xm_service_name}} Error Request Rate:{{humanize $value}}%|{threshold}%.','2022-02-15 14:33:00.118000','2022-02-15 15:40:17.640000'); +INSERT INTO metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_jspd_pod_txn_per_sec','Service Pod Transaction Count (per sec)','The number of transaction counts per second for pod','sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_pod_id, xm_service_name) (rate(imxc_txn_total_count{{filter}}[1m]))','Request','Service',null,true,false,'SVC:{{$labels.xm_service_name}} Svc Pod Transaction count/Seconds:{{humanize $value}}|{threshold}.','2022-02-15 17:59:39.450000','2022-02-15 17:59:39.450000'); +INSERT INTO metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_jspd_txn_elapsed_time_avg','Service Average Elapsed Time','Service Average Elapsed Time','sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_service_name) ((increase(imxc_txn_total_count{{filter}}[1m])))== 0 or sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_service_name) ((increase(imxc_txn_laytency{{filter}}[1m])))/ sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_service_name) ((increase(imxc_txn_total_count{{filter}}[1m])))','Request','Service',null,true,true,'SVC:{{$labels.xm_service_name}} Transaction Requests Time Avg:{{humanize $value}}ms|{threshold}ms.','2021-11-15 16:09:34.233000','2021-11-15 16:12:21.335000'); +INSERT INTO metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_jspd_txn_error_count','Service Transaction Error Count','Service Transaction Error Count','sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_service_name) (sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_service_name) (rate(imxc_txn_error_count{{filter}}[1m])) == 0 or sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_service_name) (rate(imxc_txn_error_count {{filter}} [1m])))','Request','Service',NULL,true,true,'Service Transaction Error Count','2021-11-15 16:10:31.352','2021-11-15 16:12:21.335'); +INSERT INTO metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_jspd_pod_txn_error_rate','Service Pod Transaction Error Rate','The number of transaction error rate for pod','sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_pod_id, xm_service_name) (sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_pod_id, xm_service_name) (rate(imxc_txn_total_count{{filter}}[1m])) == 0 or sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_pod_id, xm_service_name) (rate(imxc_txn_error_count{{filter}}[1m])) == 0 or sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_pod_id, xm_service_name) (rate(imxc_txn_error_count {{filter}} [1m])) / sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_pod_id, xm_service_name) (rate(imxc_txn_total_count {{filter}} [1m])))','Request','Service',null,true,false,'SVC:{{$labels.xm_service_name}} Svc Pod Transaction Error rate:{{humanize $value}}|{threshold}.','2022-02-15 18:08:58.180000','2022-02-15 18:08:58.180000'); + +INSERT INTO metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_jspd_active_txn_per_sec','Service Active Transaction Count (per Second)','Service Active Transaction Count (per Second)','sum by(xm_clst_id, xm_namespace, xm_service_name) (rate(imxc_txn_active_count{{filter}}[1m]))','Request','Service',NULL,true,false,'SVC:{{$labels.xm_service_name}} Svc Active Transaction count/Seconds:{{humanize $value}}|{threshold}.','2022-03-11 15:51:45.946','2022-03-11 15:51:45.946'); +INSERT INTO metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_jspd_pod_active_txn_per_sec','Service Pod Active Transaction Count (per sec)','The number of active transaction counts per second for pod','sum by(xm_clst_id, xm_namespace, xm_service_name, xm_pod_id) (rate(imxc_txn_active_count{{filter}}[1m]))','Request','Service',NULL,true,false,'SVC:{{$labels.xm_service_name}} Svc Pod Active Transaction count/Seconds:{{humanize $value}}|{threshold}.','2022-03-11 15:53:29.252','2022-03-11 15:53:29.252'); + + +INSERT INTO public.license_key (id, license_key, set_time, in_used, tenant_id) VALUES (nextval('hibernate_sequence'), 'A46CB0A0870B60DD0EF554F092FB8490C647C4ACCF17177EB0028FEF1B677A1DC86C08219D3D357E55E87B653A9D2F044F9095576ED493CE5D1E180E8843A04BCFE94E500F85491D408CFC7397B82F00063415F4CF8756545B6ED1A38F07F91A7B6D9381B7FC433A5086CDD2D748527ECB42835677199F23F7C8E33A66E8138182DDD76BE4925FA4B1DFD96FD5578FE80C75E0E20D76877BF6FD570265D8E69CAC34795B982CF8D811669894886567E4F5F62E28990953401374B548787E35374BFF201D5C9AD062B326E72F9B1D7791A610DA1BDF1D4F829819BC537E06C8D54F95FB04F2DAC456698F605DE3BBD72E472FC79658C806B188988B053E1E4D96FFFFFF0312983D630FAD5E9160650653074248047030124045265319328119048121312221292096178141356403289033057286071001044254168244430392446457353385472238471183338511051434316333006127241420429465082200161165099271484261287306170426201314452131350327249112310323036187433166345114324280269098441154231174135226128298344425341164290424093450115453299282209144110060155055496368233391148510223372355438125122460232315097083390283180026090507303464176016343147301028053052418046214169100404193398101492126437150008449359062078276386196105011194373118107003376243188284337378334352432479501211364186021040035210237120336302073022394079272002081397132067383497202300181309396185361017436058208454167203412219275329234043427354024133409339470296204490485256467335056F5B2CABD122B376DAEA67944E1CCE6867DF9EB6504C78F817DF9EB6504C78F81BF1E615E6EC6242C9667BD675FC5FA39C6672FE2068E5D1431C6CD04429D07655865E293C1F77ED7A0D33F5556DA6CD3A8EC2774DB04F797CE4A29B0312F75E585D51D7B4DD227EA6BD5278CB9233040E7DD2B30A6D5119959D5B7EAC826D3DA0537EFB5A034A6A1C91A619F4E168F46A455B594C91F058E1E22C7EA2957EED7533D069C335C95B4FA2B53E71A800343EA7F16B05AFBA04635F1FBDE9C81709C27BA075C78FA26311ED3A4A5226EF47FC84C3024999406B47F2098B5983CC3CAF79F92332074B9872E429CBE8EF12D5092628E4D4A39CBDDFCAAB2E382229CF09A5B10243340C1A7A0C5CBC14C704FCE873571524A5B038F1781CD31A4D8E2C48E02E63A2746E668273BE9D63937B88D8C864CE439528EB13BDFAC3E52EE4B8CB75B4ED65A7C97B42E5DAEE3E41D2331B06FFFBA71BECD9B96AEEB969670FC3869CC59050FD6DFA32457195314104022250232266247291151DEFAULT_TENANT', now(), true, 'DEFAULT_TENANT'); +insert into public.license_key2 (id, license_key, set_time, cluster_id, license_used) values (nextval('hibernate_sequence'), 'D041F44269EAFF1AF7C37ACAA86B7D9CBED89547431E777B797220CF62FE5D6A27C66BEBEAB8F4C89EA5379009C90CDEBFFAE307B7AEB897DC4D8CEAB61654340BB746B0B46679A9FB4791C777BAEBA176308F6BEB1654CE43D4E80E6D0F80CEC00B1EC30E7DA4BB8D3159133EF98AEB50617107DB77BE94676E0D4AA04ADA3B11A66824DB89A60C52BC1AB92926F10189DBBA6210B31478F48CF87B5D754F1A7C6BED0D1637742179DBF7BE82B3B3357AEA82CFAAD9126E39C4E19BABCB1CBDDB816C86A8F7C476D963265720383B627800775B0C9116D67CE5CB7CFC71D0A8A36623965EBB18A5BE1816FB1FAAAEAC361D2ABBC7344EC0B6C61E0395115B13FFFFFF03DEF34E840F2ED2AC84AC44DF368362366124308470063002498494067338303241077065122260378200508377102354337080160182150254091118451110391059070094162363290186239455351194330333503046082379128006166220287276298120398066372099177432015458270176242025196335311342039022343475412085392206244005184417460227292375103433217376511140361223163316121467443014486278407389237024349111268136424371062035285300509195050441367478101310353464249250399393211468032382017479033204215420319027225173414447170427346074048078201158299332476339297492269181214328291096331271222221199421106169418137405411466364104047152090465446480302462385088114481261428257207129020358100073347153355274495263056109229159157348228275180360410147142130230179450079472482323145202198010119F9BFDDF3C203A7E537AB046811BB7CEA37AB046811BB7CEA37AB046811BB7CEAE012403885A8163C0E3E14D7AD6207B5E8CE91579501D84B09D6682339A4DB462F479FFE1B232AFB3D19E925768AF0AA3E62D9AB6F9CEADDB1CDCA351CAA90996631814A556C47270431A6A40891F756FDDCA7BDD05C62A2932F8E77979E0D43C9F12565B1F4BB4F0520B44CC76BAC23F65330AC5966D22B209F32126132F4848E500A013F4DC32306A9620394D40C94B8EBC2406B68EBE31DAB17EF2DF977731A5C41C11311DC36E1FB8BC2529D1AA20D5D46919472212D781B1D77378872CBD14C2A5B783C7ADF0D2680946C52E56E186A7E971E7EAB2CF09511361DD892B5D4A113E8A2C60E3F7FEFA4100753D82B7064101002937733CE0285C73130635F0CBBDF6F1160C2917B2DF9B1C391A8E9D7D9F380BF31A77A84017D0DF26B35BED6B2D145A051EB4345DA90241CA997828B8393ACD5C7316594634356CCC3986EFDD7776AC62C65E500ED125097142489479219130046503035CloudMOA', now(), null, true); + +INSERT INTO public.license_policy +(policy_id, policy_desc, term_year, term_month, term_day, license_type, allowable_range, storage_capacity, cluster_count, node_count, pod_count, service_count, core_count, host_ids, user_division, created_date, modified_date) +VALUES('promotion_license', '프로모션 기간에 사용자들에게 발급되는 라이선스', 0, 0, 14, 'trial', '0', 'unlimited', '1', '10', 'unlimited', 'unlimited', 'unlimited', 'unlimited', '1', now(), null); + +INSERT INTO public.report_template(id, created_by, created_date, modified_by, modified_date, cron_exp, "enable", metric_data, template_data, title) VALUES(nextval('hibernate_sequence'), 'admin', '2020-04-28 09:29:49.466', 'admin', '2020-04-28 09:29:49.466', '0 0 1 ? * * *', true, +'[{"id":"metricItem1587977724113","requestInfo":{"clusterId":"cloudmoa","namespace":"All","entityId":"","metricId":"cluster_cpu_usage","type":"Cluster","selectTime":"hour","dateType":"relative","startTime":"1d0h","endTime":"0d0h"},"metricName":"Cluster CPU Usage (%)","displayType":"line","unit":"%","data":""},{"id":"metricItem1588037028605","requestInfo":{"clusterId":"cloudmoa","namespace":"All","entityId":"","metricId":"cluster_memory_usage","type":"Cluster","selectTime":"hour","dateType":"relative","startTime":"1d0h","endTime":"0d0h"},"metricName":"Cluster Memory Usage (%)","displayType":"line","unit":"%","data":""},{"id":"metricItem1588059107546","requestInfo":{"clusterId":"cloudmoa","namespace":"All","entityId":"","metricId":"cluster_network_receive","type":"Cluster","selectTime":"hour","dateType":"relative","startTime":"1d0h","endTime":"0d0h"},"metricName":"Cluster Network Receive","displayType":"line","unit":"%","data":""},{"id":"metricItem1588059110952","requestInfo":{"clusterId":"cloudmoa","namespace":"All","entityId":"","metricId":"cluster_network_transmit","type":"Cluster","selectTime":"hour","dateType":"relative","startTime":"1d0h","endTime":"0d0h"},"metricName":"Cluster Network Transmit","displayType":"line","unit":"%","data":""},{"id":"metricItem1588059623963","requestInfo":{"clusterId":"cloudmoa","namespace":"All","entityId":"","metricId":"cluster_pod_ready_count","type":"Cluster","selectTime":"hour","dateType":"relative","startTime":"1d0h","endTime":"0d0h"},"metricName":"Cluster Pod Ready Count","displayType":"line","unit":"%","data":""}]', +'

1. Cluster Resource

Today''s Cluster resource usage is displayed.

1. CPU Usage

${metricItem1587977724113}

2. Memory Usage

${metricItem1588037028605}

3. Network

Transmit

${metricItem1588059107546}

Receive

${metricItem1588059110952}

2. Pod


1. Allocated Pods Count Trend

Running Pod Count
${metricItem1588059623963}





', 'cloudmoa Cluster Daily Report'); +INSERT INTO public.report_template (id, created_by, created_date, modified_by, modified_date, cron_exp, "enable", metric_data, template_data, title) +VALUES(nextval('hibernate_sequence'), 'admin', '2020-01-20 01:17:50.182', 'admin', '2020-04-29 08:01:40.841', '0 0 9 ? * * *', false, +'[{"id":"metricItem1579497906163","requestInfo":{"clusterId":"cloudmoa","namespace":"","entityId":"exem-master,exem-node001,exem-node002","metricId":"node_cpu_usage","type":"node","selectTime":"hour","dateType":"relative","startTime":"1d0h","endTime":"0d0h"},"metricName":"Node CPU Usage (%)","displayType":"line","unit":"%","data":""},{"id":"metricItem1579497916213","requestInfo":{"clusterId":"cloudmoa","namespace":"","entityId":"exem-master,exem-node001,exem-node002","metricId":"node_memory_usage","type":"node","selectTime":"hour","dateType":"relative","startTime":"1d0h","endTime":"0d0h"},"metricName":"Node Memory Usage (%)","displayType":"bar","unit":"%","data":""},{"id":"metricItem1579497928963","requestInfo":{"clusterId":"cloudmoa","namespace":"","entityId":"exem-master,exem-node001,exem-node002","metricId":"node_network_receive","type":"node","selectTime":"hour","dateType":"relative","startTime":"1d0h","endTime":"0d0h"},"metricName":"Node Network Receive (KiB)","displayType":"pie","unit":"%","data":""},{"id":"metricItem1579497947243","requestInfo":{"clusterId":"cloudmoa","namespace":"","entityId":"exem-master,exem-node001,exem-node002","metricId":"node_load5","type":"node","selectTime":"hour","dateType":"relative","startTime":"1d0h","endTime":"0d0h"},"metricName":"Node CPU Load 5m Average","displayType":"table","unit":"%","data":""}]', +'

1. editor usage

Let''s write the editor.

1.1 Text Decoration

Bold
Itelic
Strike


1.2 Color and blockquote

What''s your color?

Today is the first day of the rest of your life

1.3 List

  • Apple
  • Banana

  1. postgre
  2. cassandra
  3. prometheus

[ TODO List ]
  • Create DB table
  • Charge file name

1.4 Link, Table, Image




Deamonset NameAgeNamespaceLabelsImageCPUMemory
imxc-agent5
day
imxcimxc-agentregistry.openstacklocal:5000/imxc/imxc-agent:latest83.151.68
GiB
kube-flannel-ds-amd643
month
kube-systemflannelnodequay.io/coreos/flannel:v0.11.0-amd641.0790.88
MiB
kube-proxy10
month
kube-systemkube-proxyk8s.gcr.io/kube-proxy:v1.16.01.18117.66
MiB
node-exporter10
month
defaultnode-exporternode-exporterprom/node-exporter4.7697.54
MiB

exem.jpg

1.6 Metric Item

${metricItem1579497906163}
${metricItem1579497916213}
${metricItem1579497928963}
${metricItem1579497947243}



















', 'Editor usage example'); + +INSERT INTO public.report_static(id, created_by, created_date, modified_by, modified_date, cron_exp, metric_data, template_data, title, "type", report_template_id) VALUES(10582051, 'admin', '2020-04-29 08:27:52.545', 'admin', '2020-04-29 08:27:52.545', '0 0 1 ? * * *', +'[{"id":"metricItem1587977724113","requestInfo":{"clusterId":"cloudmoa","namespace":"All","entityId":"","metricId":"cluster_cpu_usage","type":"Cluster","selectTime":"hour","dateType":"relative","startTime":"1d0h","endTime":"0d0h"},"metricName":"Cluster CPU Usage (%)","displayType":"line","unit":"%","data":""},{"id":"metricItem1588037028605","requestInfo":{"clusterId":"cloudmoa","namespace":"All","entityId":"","metricId":"cluster_memory_usage","type":"Cluster","selectTime":"hour","dateType":"relative","startTime":"1d0h","endTime":"0d0h"},"metricName":"Cluster Memory Usage (%)","displayType":"line","unit":"%","data":""},{"id":"metricItem1588059107546","requestInfo":{"clusterId":"cloudmoa","namespace":"All","entityId":"","metricId":"cluster_network_receive","type":"Cluster","selectTime":"hour","dateType":"relative","startTime":"1d0h","endTime":"0d0h"},"metricName":"Cluster Network Receive","displayType":"line","unit":"%","data":""},{"id":"metricItem1588059110952","requestInfo":{"clusterId":"cloudmoa","namespace":"All","entityId":"","metricId":"cluster_network_transmit","type":"Cluster","selectTime":"hour","dateType":"relative","startTime":"1d0h","endTime":"0d0h"},"metricName":"Cluster Network Transmit","displayType":"line","unit":"%","data":""},{"id":"metricItem1588059623963","requestInfo":{"clusterId":"cloudmoa","namespace":"All","entityId":"","metricId":"cluster_pod_ready_count","type":"Cluster","selectTime":"hour","dateType":"relative","startTime":"1d0h","endTime":"0d0h"},"metricName":"Cluster Pod Ready Count","displayType":"line","unit":"%","data":""}]', +'

1. Cluster Resource

Today''s cluster resource usage flow is shown.

1. CPU Usage

Abnormally high CPU usage by particular programs can be an indication that there is something wrong with the computer system.

${metricItem1587977724113}

2. Memory Usage

The Memory Usage window displays the amount of memory available on your system, as well as the memory currently in use by all applications, including Windows itself.

${metricItem1588037028605}

3. Network

A network transmit/receive provides basic network utilization data in relation to the available network capacity.

Transmit

${metricItem1588059107546}

Receive

${metricItem1588059110952}

2. Pod

1. Allocated Pods Count Trend

Running Pod Count
${metricItem1588059623963}







', +'cloudmoa Cluster Daily Report', 'manual', (select id from report_template where title='cloudmoa Cluster Daily Report')); + +-- INSERT INTO public.dashboard2 (id, created_date, modified_date, layout, title, auth_resource_id, created_by, modified_by, description, "share") VALUES(nextval('hibernate_sequence'), '2020-04-28 09:23:14.286', '2020-04-28 09:23:44.213', '[{"i":"widget0","widget":{"header":"default-header","body":"event-view"},"w":48,"h":2,"minW":2,"minH":1,"maxW":48,"maxH":36,"component":{"params":{"targets":["widget1","widget2","widget3","widget4","widget5","widget6","widget7","widget8"],"action":"changeFilter","options":{"clusterId":{"mod":true,"value":"cloudmoa"},"namespace":{"mod":false,"value":null},"entity":{"mod":true,"type":["node"],"value":["exem-master","exem-node001","exem-node002"]}}},"visualization":{"type":"select"}},"x":0,"y":0},{"i":"widget1","widget":{"header":"default-header","body":"line-chart-view","title":"CPU Usage"},"w":18,"h":11,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":"cloudmoa","metricId":"node_disk_read_latency","entityId":[],"type":"node","namespace":"default"}},"visualization":{"showLegend":true}},"x":0,"y":2},{"i":"widget2","widget":{"header":"default-header","body":"horizontal-bar-chart-view","title":"Memory Usage"},"w":18,"h":11,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":false,"clusterId":"cloudmoa","metricId":"node_memory_usage","entityId":[],"type":"node"}},"visualization":{"showLegend":true}},"x":0,"y":13},{"i":"widget3","widget":{"header":"default-header","body":"line-chart-view","title":"Network Transmit (KiB)"},"w":15,"h":11,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":"cloudmoa","metricId":"node_network_transmit","entityId":[],"type":"node","namespace":"default"}},"visualization":{"showLegend":true}},"x":18,"y":2},{"i":"widget4","widget":{"header":"default-header","body":"line-chart-view","title":"Network Receive (KiB)"},"w":15,"h":11,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":"cloudmoa","metricId":"node_network_receive","entityId":[],"type":"node","namespace":"default"}},"visualization":{"showLegend":true}},"x":33,"y":2},{"i":"widget5","widget":{"header":"default-header","body":"stack-bar-chart-view","title":"Pod Running Count"},"w":30,"h":12,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":"cloudmoa","metricId":"node_pod_running_count","entityId":[],"type":"node","namespace":"default"}},"visualization":{"showLegend":true}},"x":18,"y":24},{"i":"widget6","widget":{"header":"default-header","body":"stack-bar-chart-view","title":"Disk Read Latency (ms)"},"w":15,"h":11,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":"cloudmoa","metricId":"node_disk_read_latency","entityId":[],"type":"node","namespace":"default"}},"visualization":{"showLegend":true}},"x":18,"y":13},{"i":"widget7","widget":{"header":"default-header","body":"stack-bar-chart-view","title":"Disk Write Latency (ms)"},"w":15,"h":11,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":"cloudmoa","metricId":"node_disk_write_latency","entityId":[],"type":"node","namespace":"default"}},"visualization":{"showLegend":true}},"x":33,"y":13},{"i":"widget8","widget":{"header":"default-header","body":"stack-bar-chart-view","title":"Filesystem Usage (%)"},"w":18,"h":12,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":"cloudmoa","metricId":"node_filesystem_usage","entityId":[],"type":"node","namespace":"default"}},"visualization":{"showLegend":true}},"x":0,"y":24}]', 'CloudMOA - Nodes Resource', (select id from auth_resource2 where name='CloudMOA - Nodes Resource'), 'admin', 'admin', NULL, true); +-- INSERT INTO public.dashboard2 (id, created_date, modified_date, layout, title, auth_resource_id, created_by, modified_by, description, "share") VALUES(nextval('hibernate_sequence'), '2020-04-28 09:23:14.286', '2020-04-28 09:23:44.213', '[{"i":"widget0","widget":{"header":"default-header","body":"service-tps-view","title":"Service TPS"},"w":24,"h":7,"minW":12,"minH":6,"maxW":48,"maxH":16,"component":{"api":{"uri":"metric.chart","params":{"clusterId":null,"namespace":null,"entityId":null,"type":"service","range":false}}},"x":0,"y":2},{"i":"widget1","widget":{"header":"default-header","body":"event-view"},"w":48,"h":2,"minW":2,"minH":2,"maxW":48,"maxH":36,"component":{"params":{"targets":["widget0","widget2","widget3","widget4","widget5","widget6","widget7","widget8"],"action":"changeFilter","options":{"clusterId":{"mod":true,"value":null},"namespace":{"mod":true,"value":null},"entity":{"mod":true,"type":["service"],"value":[]}}},"visualization":{"type":"select"}},"viewStyle":{"backgroundColor":"#252525"},"x":0,"y":0},{"i":"widget2","widget":{"header":"default-header","body":"service-treeMap-view"},"w":24,"h":21,"minW":20,"minH":10,"maxW":48,"maxH":48,"component":{"api":{"uri":"metric.chart","params":{"clusterId":null,"namespace":null,"entityId":null,"type":"service","range":false}}},"x":24,"y":2},{"i":"widget3","widget":{"header":"default-header","body":"stack-bar-chart-view","title":"Service Request Count"},"w":12,"h":7,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":null,"namespace":null,"metricId":"imxc_service_http_requests_per_sec","entityId":"","type":null}},"visualization":{"showLegend":true}},"x":0,"y":9},{"i":"widget4","widget":{"header":"default-header","body":"stack-bar-chart-view","title":"Service Total Error Count"},"w":12,"h":7,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":null,"namespace":null,"metricId":"imxc_service_errors_count","entityId":"","type":null}},"visualization":{"showLegend":true}},"x":0,"y":16},{"i":"widget5","widget":{"header":"default-header","body":"scatter-chart-view","bodyClass":["drag-ignore"],"title":"Xview","headerClass":["drag-handle"]},"w":24,"h":13,"minW":20,"minH":12,"maxW":68,"maxH":60,"component":{"api":{"params":{}}},"x":0,"y":23},{"i":"widget6","widget":{"header":"default-header","body":"event-list-view","title":"Event List"},"w":24,"h":13,"minW":24,"minH":12,"maxW":48,"maxH":36,"component":{"api":{"params":{"clusterId":null}}},"x":24,"y":23},{"i":"widget7","widget":{"header":"default-header","body":"line-chart-view","title":"Service Latency"},"w":12,"h":7,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":null,"namespace":null,"metricId":"imxc_service_http_requests_time_avg","entityId":"","type":null}},"visualization":{"showLegend":true}},"x":12,"y":9},{"i":"widget8","widget":{"header":"default-header","body":"stack-bar-chart-view","title":"Service Total Transaction Count"},"w":12,"h":7,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":null,"namespace":null,"metricId":"imxc_service_http_requests_per_sec_by_api","entityId":"","type":null}},"visualization":{"showLegend":true}},"x":12,"y":16}]', 'Service Detail', (select id from auth_resource2 where name='Service Detail'), 'admin', 'admin', NULL, true); + +INSERT INTO public.dashboard2 (id, created_date, modified_date, layout, title, auth_resource_id, created_by, modified_by, description, "share") VALUES(nextval('hibernate_sequence'), '2020-04-28 09:23:14.286', '2020-04-28 09:23:44.213', '[{"i":"widget0","widget":{"header":"default-header","body":"event-view"},"w":48,"h":2,"minW":2,"minH":1,"maxW":48,"maxH":36,"component":{"params":{"targets":["widget1","widget2","widget3","widget4","widget5","widget6","widget7","widget8"],"action":"changeFilter","options":{"clusterId":{"mod":true,"value":"cloudmoa"},"namespace":{"mod":false,"value":null},"entity":{"mod":true,"type":["node"],"value":["exem-master","exem-node001","exem-node002"]}}},"visualization":{"type":"select"}},"x":0,"y":0},{"i":"widget1","widget":{"header":"default-header","body":"line-chart-view","title":"CPU Usage"},"w":18,"h":11,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":"cloudmoa","metricId":"node_disk_read_latency","entityId":[],"type":"node","namespace":"default"}},"visualization":{"showLegend":true}},"x":0,"y":2},{"i":"widget2","widget":{"header":"default-header","body":"horizontal-bar-chart-view","title":"Memory Usage"},"w":18,"h":11,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":false,"clusterId":"cloudmoa","metricId":"node_memory_usage","entityId":[],"type":"node"}},"visualization":{"showLegend":true}},"x":0,"y":13},{"i":"widget3","widget":{"header":"default-header","body":"line-chart-view","title":"Network Transmit (KiB)"},"w":15,"h":11,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":"cloudmoa","metricId":"node_network_transmit","entityId":[],"type":"node","namespace":"default"}},"visualization":{"showLegend":true}},"x":18,"y":2},{"i":"widget4","widget":{"header":"default-header","body":"line-chart-view","title":"Network Receive (KiB)"},"w":15,"h":11,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":"cloudmoa","metricId":"node_network_receive","entityId":[],"type":"node","namespace":"default"}},"visualization":{"showLegend":true}},"x":33,"y":2},{"i":"widget5","widget":{"header":"default-header","body":"stack-bar-chart-view","title":"Pod Running Count"},"w":30,"h":12,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":"cloudmoa","metricId":"node_pod_running_count","entityId":[],"type":"node","namespace":"default"}},"visualization":{"showLegend":true}},"x":18,"y":24},{"i":"widget6","widget":{"header":"default-header","body":"stack-bar-chart-view","title":"Disk Read Latency (ms)"},"w":15,"h":11,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":"cloudmoa","metricId":"node_disk_read_latency","entityId":[],"type":"node","namespace":"default"}},"visualization":{"showLegend":true}},"x":18,"y":13},{"i":"widget7","widget":{"header":"default-header","body":"stack-bar-chart-view","title":"Disk Write Latency (ms)"},"w":15,"h":11,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":"cloudmoa","metricId":"node_disk_write_latency","entityId":[],"type":"node","namespace":"default"}},"visualization":{"showLegend":true}},"x":33,"y":13},{"i":"widget8","widget":{"header":"default-header","body":"stack-bar-chart-view","title":"Filesystem Usage (%)"},"w":18,"h":12,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":"cloudmoa","metricId":"node_filesystem_usage","entityId":[],"type":"node","namespace":"default"}},"visualization":{"showLegend":true}},"x":0,"y":24}]', 'CloudMOA - Nodes Resource', +(select id from auth_resource3 where name='dashboard|admin|CloudMOA - Nodes Resource'), 'admin', 'admin', NULL, true); +INSERT INTO public.dashboard2 (id, created_date, modified_date, layout, title, auth_resource_id, created_by, modified_by, description, "share") VALUES(nextval('hibernate_sequence'), '2020-04-28 09:23:14.286', '2020-04-28 09:23:44.213', '[{"i":"widget0","widget":{"header":"default-header","body":"service-tps-view","title":"Service TPS"},"w":24,"h":7,"minW":12,"minH":6,"maxW":48,"maxH":16,"component":{"api":{"uri":"metric.chart","params":{"clusterId":null,"namespace":null,"entityId":null,"type":"service","range":false}}},"x":0,"y":2},{"i":"widget1","widget":{"header":"default-header","body":"event-view"},"w":48,"h":2,"minW":2,"minH":2,"maxW":48,"maxH":36,"component":{"params":{"targets":["widget0","widget2","widget3","widget4","widget5","widget6","widget7","widget8"],"action":"changeFilter","options":{"clusterId":{"mod":true,"value":null},"namespace":{"mod":true,"value":null},"entity":{"mod":true,"type":["service"],"value":[]}}},"visualization":{"type":"select"}},"viewStyle":{"backgroundColor":"#252525"},"x":0,"y":0},{"i":"widget2","widget":{"header":"default-header","body":"service-treeMap-view"},"w":24,"h":21,"minW":20,"minH":10,"maxW":48,"maxH":48,"component":{"api":{"uri":"metric.chart","params":{"clusterId":null,"namespace":null,"entityId":null,"type":"service","range":false}}},"x":24,"y":2},{"i":"widget3","widget":{"header":"default-header","body":"stack-bar-chart-view","title":"Service Request Count"},"w":12,"h":7,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":null,"namespace":null,"metricId":"imxc_service_http_requests_per_sec","entityId":"","type":null}},"visualization":{"showLegend":true}},"x":0,"y":9},{"i":"widget4","widget":{"header":"default-header","body":"stack-bar-chart-view","title":"Service Total Error Count"},"w":12,"h":7,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":null,"namespace":null,"metricId":"imxc_service_errors_count","entityId":"","type":null}},"visualization":{"showLegend":true}},"x":0,"y":16},{"i":"widget5","widget":{"header":"default-header","body":"scatter-chart-view","bodyClass":["drag-ignore"],"title":"Xview","headerClass":["drag-handle"]},"w":24,"h":13,"minW":20,"minH":12,"maxW":68,"maxH":60,"component":{"api":{"params":{}}},"x":0,"y":23},{"i":"widget6","widget":{"header":"default-header","body":"event-list-view","title":"Event List"},"w":24,"h":13,"minW":24,"minH":12,"maxW":48,"maxH":36,"component":{"api":{"params":{"clusterId":null}}},"x":24,"y":23},{"i":"widget7","widget":{"header":"default-header","body":"line-chart-view","title":"Service Latency"},"w":12,"h":7,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":null,"namespace":null,"metricId":"imxc_service_http_requests_time_avg","entityId":"","type":null}},"visualization":{"showLegend":true}},"x":12,"y":9},{"i":"widget8","widget":{"header":"default-header","body":"stack-bar-chart-view","title":"Service Total Transaction Count"},"w":12,"h":7,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":null,"namespace":null,"metricId":"imxc_service_http_requests_per_sec_by_api","entityId":"","type":null}},"visualization":{"showLegend":true}},"x":12,"y":16}]', 'Service Detail', +(select id from auth_resource3 where name='dashboard|admin|Service Detail'), 'admin', 'admin', NULL, true); + +INSERT INTO public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) VALUES ('normal_score', '20', null, null, 'anomaly', '2020-07-07 18:15:55.000000', '2020-07-07 18:15:53.000000'); +INSERT INTO public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) VALUES ('attention_score', '60', null, null, 'anomaly', '2020-07-07 09:18:04.968765', '2020-07-07 09:18:04.968765'); +INSERT INTO public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) VALUES ('warning_score', '90', null, null, 'anomaly', '2020-07-07 09:18:17.091678', '2020-07-07 09:18:17.091678'); +INSERT INTO public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) VALUES ('collection_weeks', '5', null, null, 'anomaly', '2020-07-13 03:52:44.445408', '2020-07-13 03:52:44.445408'); + +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('topology_storage_period', 7, 'retention period setting value for topology information', null, 'storage', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('trace_storage_period', 3, 'retention period setting value for trace data', null, 'storage', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('event_storage_period', 7, 'retention period setting value for event data', null, 'storage', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('metric_storage_period', 7, 'retention period setting value for metric data', null, 'storage', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('sparse_storage_period', 90, 'retention period setting value for sparse log', null, 'storage', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('anomaly_storage_period', 7, 'retention period setting value for anomaly score', null, 'storage', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('alert_storage_period', 7, 'retention period setting value for alert data', null, 'storage', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('audit_storage_period', 7, 'retention period setting value for audit data', null, 'storage', now(), null); + +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('topology_idx', 'kubernetes_cluster_info:kubernetes_cluster_history:kubernetes_cronjob_info:kubernetes_info:kubernetes_job_info:kubernetes_network_connectivity:kubernetes_pod_info:kubernetes_pod_history', 'elastic search topology type data index', null, 'storageidx', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('trace_idx', 'spaninfo:sta_httpapi:sta_httpsummary:sta_podinfo:sta_relation:sta_tracetrend:sta_externalrelation:sta_traceinfo:jspd_ilm', 'elastic search trace type data index', null, 'storageidx', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('event_idx', 'kubernetes_event_info', 'elastic search for event data index', null, 'storageidx', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('sparse_idx', 'sparse_model:sparse_log', 'elastic search sparse data index', null, 'storageidx', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('anomaly_idx', 'entity_score:metric_score:timeline_score', 'elastic search amomaly data index', null, 'storageidx', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('alert_idx', 'alert_event_history', 'elastic search alert data index', null, 'storageidx', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('audit_idx', 'kubernetes_audit_log', 'elastic search audit type data index', null, 'storageidx', now(), null); + +-- insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) values ('ratelimiting', 2.0, '{"type" : "int", "operator" : "range", "minVal" : "1", "maxVal" : "3000", "desc" : "The time-based sampling method allows input as an integer (e.g. 1 monitors only 1 trace per second)" }', null, 'tracesampling', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('probabilistic', 0.1, '{"type" : "float", "operator" : "range", "minVal" : "0", "maxVal" : "1.0", "desc" : "Probability-based sampling method allows input between 0 and 1 (e.g. 0.1 monitors only 10% of trace information)" }', null, 'tracesampling', '2020-07-30 13:54:52', null); + +INSERT INTO common_setting values('alert_expression','==,<=,<,>=,>', 'alert expression for user custom', null,'alert', now(), now()); + +INSERT INTO common_setting values('job_duration_range','86400', 'job duration range for average', null,'job', now(), now()); + +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Topology Agent', 'topology-agent', 'topology agent deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Metric Agent', 'metric-agent', 'metric agent deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Trace Agent', 'cloudmoa-trace-agent', 'trace agent deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Datagate', 'datagate', 'datagate deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Jspd Collector', 'jspd-lite-collector', 'jspd collector deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Metric Collector', 'metric-collector', 'metric collector deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Cloudmoa Collector', 'cmoa-collector', 'cloudmoa collector deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Authentication Server', 'auth-server', 'authentication server deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Notification Server', 'noti-server', 'notification server deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Eureka Server', 'eureka', 'eureka server deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Zuul Server', 'zuul-deployment', 'zuul server deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Api Server', 'imxc-api', 'api server deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Ui Server', 'imxc-ui', 'ui server deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Metric Analyzer Master', 'metric-analyzer-master', 'metric analyzer master deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Metric Analyzer Worker', 'metric-analyzer-worker', 'metric analyzer worker deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Kafka Stream Txntrend', 'kafka-stream-txntrend-deployment', 'kafka stream txntrend deployment name', null, 'modules', now(), null); + +INSERT INTO public.common_setting +(code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +VALUES('error_msg', 'false', 'Error Message default value', '', 'user_setting', now(), null); +INSERT INTO public.common_setting +(code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +VALUES('alert_sound', 'false', 'Alert Sound default value', '', 'user_setting', now(), null); +INSERT INTO public.common_setting +(code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +VALUES('session_persistence', 'true', 'Session Persistence default value', '', 'user_setting', now(), null); +INSERT INTO public.common_setting +(code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +VALUES('gpu_acc_topology', 'true', 'GPU Accelerated Topology default value', '', 'user_setting', now(), null); + +insert into public.log_management (cluster_id, node_id, log_rotate_dir, log_rotate_count, log_rotate_size, log_rotate_management, back_up_dir, back_up_period, back_up_dir_size, back_up_management, created_date, modified_date) values ('cloudmoa', '', '/var/lib/docker', 3, 100, true, '/home/moa/log', 5, 1000, true, '2020-07-30 13:54:52', null); + +insert into public.agent_install_file_info (id, name, type, description, version, yaml, use_yn, created_date, modified_date) values (5, 'metrics-server', 'agent', 'Metrcis-Server는 Kubernetes의 kubelet에 있는 cAdvisor로부터 Container Metric 데이터를 수집하여 Prometheus에 전달하는 역할을 합니다.', null, '--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: system:cloudmoa-aggregated-metrics-reader + labels: + rbac.authorization.k8s.io/aggregate-to-view: "true" + rbac.authorization.k8s.io/aggregate-to-edit: "true" + rbac.authorization.k8s.io/aggregate-to-admin: "true" +rules: + - apiGroups: ["metrics.k8s.io"] + resources: ["pods"] + verbs: ["get", "list", "watch"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cloudmoa-metrics-server:system:auth-delegator +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:auth-delegator +subjects: + - kind: ServiceAccount + name: cloudmoa-metrics-server + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: cloudmoa-metrics-server-auth-reader + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: extension-apiserver-authentication-reader +subjects: + - kind: ServiceAccount + name: cloudmoa-metrics-server + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: system:cloudmoa-metrics-server +rules: + - apiGroups: + - "" + resources: + - pods + - nodes + - nodes/stats + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: system:cloudmoa-metrics-server +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:cloudmoa-metrics-server +subjects: + - kind: ServiceAccount + name: cloudmoa-metrics-server + namespace: kube-system +--- +apiVersion: v1 +kind: Service +metadata: + name: cloudmoa-metrics-server + namespace: kube-system + labels: + kubernetes.io/name: "Metrics-server" +spec: + selector: + k8s-app: cloudmoa-metrics-server + ports: + - port: 443 + protocol: TCP + targetPort: 443 +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cloudmoa-metrics-server + namespace: kube-system +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cloudmoa-metrics-server + namespace: kube-system + labels: + k8s-app: cloudmoa-metrics-server +spec: + selector: + matchLabels: + k8s-app: cloudmoa-metrics-server + template: + metadata: + name: cloudmoa-metrics-server + labels: + k8s-app: cloudmoa-metrics-server + spec: + serviceAccountName: cloudmoa-metrics-server + volumes: + # mount in tmp so we can safely use from-scratch images and/or read-only containers + - name: tmp-dir + emptyDir: {} + containers: + - name: cloudmoa-metrics-server + image: $DOCKER_REGISTRY_URL/metrics-server-amd64 + command: + - /metrics-server + - --logtostderr + - --v=4 + - --kubelet-insecure-tls=true + - --kubelet-preferred-address-types=InternalIP,Hostname,InternalDNS,ExternalDNS,ExternalIP + volumeMounts: + - name: tmp-dir + mountPath: /tmp1', true, '2021-03-11 13:41:48.000000', '2021-03-11 13:41:56.000000'); +insert into public.agent_install_file_info (id, name, type, description, version, yaml, use_yn, created_date, modified_date) values (7, 'jaeger', 'application', 'CloudMOA에서는 고객사에서 운영 중인 application의 TPS, 서비스 연관관계 등의 데이터를 얻기 위해서 Jaeger를 사용하며, Jaeger 사용을 위해 Jaeger-client, jaeger-agent, jaeger-collector의 설치가 필요합니다. +', null, '--- +apiVersion: v1 +kind: List +items: +- apiVersion: apps/v1 + kind: Deployment + metadata: + name: cloudmoa-trace-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-trace-agent + spec: + selector: + matchLabels: + app: cloudmoa-trace-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-trace-agent + spec: + securityContext: + runAsNonRoot: true + runAsUser: 65534 + containers: + - image: $DOCKER_REGISTRY_URL/trace-agent:$IMAGE_TAG + name: cloudmoa-trace-agent + resources: + requests: + cpu: 100m + memory: 50Mi + limits: + cpu: 200m + memory: 100Mi + ports: + - containerPort: 5775 + protocol: UDP + - containerPort: 6831 + protocol: UDP + - containerPort: 6832 + protocol: UDP + - containerPort: 5778 + protocol: TCP + env: + - name: LOG_LEVEL + value: "INFO" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT +- apiVersion: v1 + kind: Service + metadata: + name: cloudmoa-trace-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-trace-agent + spec: + ports: + - name: agent-zipkin-thrift + port: 5775 + protocol: UDP + targetPort: 5775 + - name: agent-compact + port: 6831 + protocol: UDP + targetPort: 6831 + - name: agent-binary + port: 6832 + protocol: UDP + targetPort: 6832 + - name: agent-configs + port: 5778 + protocol: TCP + targetPort: 5778 + selector: + app: cloudmoa-trace-agent + type: ClusterIP', true, '2021-03-11 17:48:34.000000', '2021-03-11 17:48:39.000000'); +insert into public.agent_install_file_info (id, name, type, description, version, yaml, use_yn, created_date, modified_date) values (4, 'node-exporter', 'agent', 'Node에 관련된 Metric 시계열 데이터를 수집하여 고객사 클러스터에 설치된 Prometheus에 전달하는 역할을 합니다.', null, '--- +apiVersion: v1 +kind: Service +metadata: + annotations: + prometheus.io/scrape: ''true'' + labels: + app: cloudmoa-node-exporter + name: cloudmoa-node-exporter + name: cloudmoa-node-exporter + namespace: $CLOUDMOA_NAMESPACE +spec: + clusterIP: None + ports: + - name: scrape + port: 9110 + protocol: TCP + selector: + app: cloudmoa-node-exporter + type: ClusterIP +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: cloudmoa-node-exporter + namespace: $CLOUDMOA_NAMESPACE +spec: + selector: + matchLabels: + app: cloudmoa-node-exporter + template: + metadata: + labels: + app: cloudmoa-node-exporter + name: cloudmoa-node-exporter + spec: + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - image: $DOCKER_REGISTRY_URL/node-exporter + name: cloudmoa-node-exporter + ports: + - containerPort: 9110 + hostPort: 9110 + name: scrape + args: + - --path.procfs=/host/proc + - --path.sysfs=/host/sys + - --path.rootfs=/host/root + - --collector.filesystem.ignored-mount-points=^/(dev|proc|sys|run|var/lib/docker/.+|var/lib/kubelet/pods/.+)($|/) + - --collector.tcpstat + - --web.listen-address=:9110 + # --log.level=debug + env: + - name: GOMAXPROCS + value: "1" + resources: + limits: + cpu: 250m + memory: 180Mi + requests: + cpu: 102m + memory: 180Mi + volumeMounts: + - mountPath: /host/proc + name: proc + readOnly: false + - mountPath: /host/sys + name: sys + readOnly: false + - mountPath: /host/root + mountPropagation: HostToContainer + name: root + readOnly: true + hostNetwork: true + hostPID: true + securityContext: + runAsNonRoot: true + runAsUser: 65534 + volumes: + - hostPath: + path: /proc + name: proc + - hostPath: + path: /sys + name: sys + - hostPath: + path: / + name: root +', true, '2021-03-11 13:41:02.000000', '2021-03-11 13:41:06.000000'); +insert into public.agent_install_file_info (id, name, type, description, version, yaml, use_yn, created_date, modified_date) values (2, 'agent', 'agent', '관제 대상 클러스터의 Topology 데이터를 수집하여 Kafka를 통해 수집 클러스터에 전달하는 역할을 하며, 그 밖에 API 서버와의 TCP 연결을 통해 관리 기능, Log Viewer 기능 등을 수행합니다.', null, '--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cloudmoa-cluster-role +rules: + - nonResourceURLs: + - "*" + verbs: + - get + - apiGroups: + - metrics.k8s.io + resources: + - pods + - nodes + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - nodes/stats + - endpoints + - namespaces + - events + verbs: + - get + - list + - watch + - apiGroups: + - apps + resources: + - daemonsets + - deployments + - deployments/scale + - replicasets + - replicasets/scale + - statefulsets + - statefulsets/scale + verbs: + - get + - list + - watch + - update + - apiGroups: + - batch + resources: + - jobs + verbs: + - get + - list + - watch + - update + - apiGroups: + - batch + resources: + - cronjobs + verbs: + - get + - list + - update + - apiGroups: + - storage.j8s.io + resources: + - storageclasses + verbs: + - get + - list + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - apiGroups: + - extensions + resources: + - ingresses + verbs: + - get + - list + - apiGroups: + - policy + resources: + - podsecuritypolicies + verbs: + - use + resourceNames: + - imxc-ps + - apiGroups: + - certificates.k8s.io + resourceNames: + - kubernetes.io/kube-apiserver-client-kubelet + resources: + - signers + verbs: + - approve + - apiGroups: + - certificates.k8s.io + resourceNames: + - kubernetes.io/kubelet-serving + resources: + - signers + verbs: + - approve + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch + - proxy + - apiGroups: + - "" + resources: + - nodes/log + - nodes/metrics + - nodes/proxy + - nodes/spec + - nodes/stats + verbs: + - ''*'' + - apiGroups: + - ''*'' + resources: + - ''*'' + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cloudmoa-restricted-rb + namespace: $CLOUDMOA_NAMESPACE +subjects: + - kind: ServiceAccount + name: default + namespace: $CLOUDMOA_NAMESPACE +roleRef: + kind: ClusterRole + name: cloudmoa-cluster-role + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: cloudmoa-psp + namespace: $CLOUDMOA_NAMESPACE +spec: + privileged: true + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + runAsUser: + rule: RunAsAny + fsGroup: + rule: RunAsAny + hostPorts: + - max: 65535 + min: 0 + hostNetwork: true + hostPID: true + volumes: + - configMap + - secret + - emptyDir + - hostPath + - projected + - downwardAPI + - persistentVolumeClaim +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: cloudmoa-topology-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-topology-agent +spec: + selector: + matchLabels: + app: cloudmoa-topology-agent + template: + metadata: + labels: + app: cloudmoa-topology-agent + spec: + hostNetwork: true + hostPID: true + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - name: cloudmoa-topology-agent + image: $DOCKER_REGISTRY_URL/topology-agent:$IMAGE_TAG + resources: + requests: + cpu: 200m + memory: 512Mi + limits: + cpu: 500m + memory: 600Mi + securityContext: + privileged: true + volumeMounts: + - mountPath: /host/usr/bin + name: bin-volume + - mountPath: /var/run/docker.sock + name: docker-volume + - mountPath: /host/proc + name: proc-volume + - mountPath: /root + name: root-volume + - mountPath: /log + name: log-volume + env: + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: ROOT_DIRECTORY + value: /root + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: POD_ID + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LOG_LEVEL + value: "INFO" + volumes: + - name: bin-volume + hostPath: + path: /usr/bin + type: Directory + - name: docker-volume + hostPath: + path: /var/run/docker.sock + - name: proc-volume + hostPath: + path: /proc + - name: root-volume + hostPath: + path: / + - name: log-volume + hostPath: + path: /home', true, '2021-03-11 13:37:48.000000', '2021-03-11 13:37:51.000000'); +insert into public.agent_install_file_info (id, name, type, description, version, yaml, use_yn, created_date, modified_date) values (6, 'prometheus', 'agent', 'Prometheus는 다양한 Exporter들과 연결될 수 있으며, 기본적으로 Node Exporter와 cAdvisor를 통해 수집한 Metric 데이터를 Kafka를 통해 수집 클러스터에 전달하는 역할을 합니다.', '1.16', '--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cloudmoa-metric-agent-config + namespace: $CLOUDMOA_NAMESPACE +data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + metric-agent.yml: | + global: + scrape_interval: 15s + + scrape_configs: + - job_name: ''kubernetes-kubelet'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_memory_SReclaimable_bytes|node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + - job_name: ''kubernetes-cadvisor'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod] + target_label: xm_pod_id + - source_labels: [container] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep + - source_labels: [ __name__, image ] + separator: "@" + regex: "container_cpu.*@" + action: drop + - source_labels: [ __name__, name ] + separator: "@" + regex: "container_memory.*@" + action: drop +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cloudmoa-metric-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-metric-agent +spec: + selector: + matchLabels: + app: cloudmoa-metric-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-metric-agent + spec: + containers: + - name: cloudmoa-metric-agent + image: $DOCKER_REGISTRY_URL/metric-agent:$IMAGE_TAG + args: + - --config.file=/etc/metric-agent/metric-agent.yml + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: /etc/metric-agent/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: LOG_MAXAGE + value: "1" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: STORAGE_TYPE + value: datagate + restartPolicy: Always + volumes: + - name: config-volume + configMap: + name: cloudmoa-metric-agent-config +', false, '2021-03-11 13:39:07.000000', '2021-03-11 13:39:09.000000'); +insert into public.agent_install_file_info (id, name, type, description, version, yaml, use_yn, created_date, modified_date) values (3, 'prometheus', 'agent', 'Prometheus는 다양한 Exporter들과 연결될 수 있으며, 기본적으로 Node Exporter와 cAdvisor를 통해 수집한 Metric 데이터를 Kafka를 통해 수집 클러스터에 전달하는 역할을 합니다.', '1.15', '--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cloudmoa-metric-agent-config + namespace: $CLOUDMOA_NAMESPACE +data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + metric-agent.yml: | + global: + scrape_interval: 15s + + scrape_configs: + - job_name: ''kubernetes-kubelet'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_memory_SReclaimable_bytes|node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + + - job_name: ''kubernetes-cadvisor'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod_name] + target_label: xm_pod_id + - source_labels: [container_name] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cloudmoa-metric-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-metric-agent +spec: + selector: + matchLabels: + app: cloudmoa-metric-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-metric-agent + spec: + containers: + - name: cloudmoa-metric-agent + image: $DOCKER_REGISTRY_URL/metric-agent:$IMAGE_TAG + args: + - --config.file=/etc/metric-agent/metric-agent.yml + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: /etc/metric-agent/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: STORAGE_TYPE + value: datagate + restartPolicy: Always + volumes: + - name: config-volume + configMap: + name: cloudmoa-metric-agent-config +', true, '2021-03-11 13:39:07.000000', '2021-03-11 13:39:09.000000'); + +insert into public.alert_config_info (config_id, created_date, modified_date, config_data, config_default, in_use) values ('config', now(), null, 'global:${GLOBAL}\nroute:${ROUTE}\nreceivers:${RECEIVERS}', 'global:${GLOBAL}\nroute:${ROUTE}\nreceivers:${RECEIVERS}', true); +insert into public.alert_config_info (config_id, created_date, modified_date, config_data, config_default, in_use) values ('global', now(), null, '\n resolve_timeout: ${RESOLVE_TIMEOUT}', '\n resolve_timeout: 5m', true); +insert into public.alert_config_info (config_id, created_date, modified_date, config_data, config_default, in_use) values ('receivers', now(), null, '\n- name: ''${NAME}''\n webhook_configs:${WEBHOOK_CONFIGS}', '\n- name: ''cdms''\n webhook_configs:${WEBHOOK_CONFIGS}', true); +insert into public.alert_config_info (config_id, created_date, modified_date, config_data, config_default, in_use) values ('route', now(), null, '\n receiver: ''${RECEIVER}''\n group_by: [${GROUP_BY}]\n group_wait: ${GROUP_WAIT}\n group_interval: ${GROUP_INTERVAL}\n repeat_interval: ${REPEAT_INTERVAL}\n routes:${ROUTES}', '\n receiver: ''cdms''\n group_by: [xm_clst_id, level]\n group_wait: 30s\n group_interval: 5m\n repeat_interval: 10m\n routes:${ROUTES}', true); +insert into public.alert_config_info (config_id, created_date, modified_date, config_data, config_default, in_use) values ('webhook_configs', now(), null, '\n - url: ''${WEBHOOK_URL}''\n send_resolved: ${SEND_RESOLVED}', '\n - url: ''${WEBHOOK_URL}''\n send_resolved: false', true); +insert into public.alert_config_info (config_id, created_date, modified_date, config_data, config_default, in_use) values ('routes', now(), null, '\n - receiver: ''${ROUTES_RECEIVER}''\n group_by: [${ROUTES_GROUP_BY}]\n group_wait: ${ROUTES_GROUP_WAIT}\n group_interval: ${ROUTES_GROUP_INTERVAL}\n repeat_interval: ${ROUTES_REPEAT_INTERVAL}\n match_re:\n level: ${LEVEL}\n continue: ${CONTINUE}', '\n - receiver: ''cdms''\n group_by: [xm_clst_id, level]\n group_wait: 5s\n group_interval: 5s\n repeat_interval: 1m\n match_re:\n level: Critical\n continue: true', true); + + +insert into public.alert_rule_config_info (config_id, created_date, modified_date, config_data, in_use) values ('config', now(), null, 'groups:${GROUPS}', true); +insert into public.alert_rule_config_info (config_id, created_date, modified_date, config_data, in_use) values ('groups', now(), null, '\n- name: "${NAME}"\n rules:${RULES}', true); +insert into public.alert_rule_config_info (config_id, created_date, modified_date, config_data, in_use) values ('isHost', now(), null, '\n instance: "{{ $labels.instance }}"\n is_host: "true"', true); +insert into public.alert_rule_config_info (config_id, created_date, modified_date, config_data, in_use) values ('rules', now(), null, '\n - alert: "${ALERT}"\n expr: "${EXPR}"\n labels:\n level: "${LEVEL}"\n for: "${FOR}"\n annotations:\n xm_service_name: "{{ $labels.xm_service_name }}"\n level: "${LEVEL}"\n meta_id: "${META_ID}"\n xm_node_id: "{{ $labels.xm_node_id }}"\n threshold: ${THRESHOLD}\n xm_container_id: "{{ $labels.xm_cont_name }}"\n message: "${MESSAGE}"\n rule_id: ${RULE_ID}\n xm_pod_id: "{{ $labels.xm_pod_id }}"\n xm_clst_id: "{{ $labels.xm_clst_id }}"\n xm_namespace: "{{ $labels.xm_namespace }}"\n value: "{{ $value }}"\n xm_entity_type: "{{ $labels.xm_entity_type }}"\n alert_entity_type: "${ALERT_ENTITY_TYPE}"', true); + + +INSERT INTO jspd_prop values('TRX_NAME_TYPE','0', 'Set the transaction name generation method (0:default, 1:parameter, 2:param_nouri, 3:attribute)', 'integer','select','{"default":"0", "parameter":"1", "param_nouri":"2", "attribute":"3"}',true, now(), now()); +INSERT INTO jspd_prop values('TRX_NAME_KEY','', 'Set the transaction name generation method by TRX_NAME_TYPE (parameter(1), param_nouri(2),attribute(3))','string','input','',true, now(), now()); +INSERT INTO jspd_prop values('CURR_TRACE_TXN','*:3000', 'Option to check TXNNAME with startsWith logic and collect calltree based on elapsetime. blank or set to *:0 when collecting all.', 'string','input','', true, now(), now()); +INSERT INTO jspd_prop values('CURR_TRACE_LEVEL','100', 'call tree detection level', 'integer','range','{"gte":"0", "lte":"100"}',true, now(), now()); +INSERT INTO jspd_prop values('TRACE_JDBC','true', 'include call tree data', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('EXCLUDE_SERVICE','gif,js,css,xml', 'exclude service name', 'string','input','',true, now(), now()); +INSERT INTO jspd_prop values('INCLUDE_EXCEPTION','', 'Exception that you do not want to be treated as an exception transaction is set.(type.Exception)', 'string','input','',true, now(), now()); +INSERT INTO jspd_prop values('EXCLUDE_EXCEPTION','', 'Set the exception to be treated as an exception transaction.(type.Exception)', 'string','input','',true, now(), now()); +INSERT INTO jspd_prop values('RESP_HEADER_TID','false', 'include X-Xm-Tid text for gearing imxwsmj', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('USE_RUNTIME_REDEFINE','false', 'rt.jar (socket, file, throwable) function use yn option', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('USE_RUNTIME_REDEFINE_HTTP_REMOTE','false', 'rt.jar (socket, file, throwable) function use yn option', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('RT_RMI','false', 'rt.jar (socket, file, throwable) function use yn option', 'boolean','input','',true, now(), now()); + +INSERT INTO jspd_prop values('RT_RMI_TYPE','3', 'remote key value(1: pkey, 2: ckey, 3: pckey)', 'integer','select','{"pkey":"1", "ckey":"2", "pckey":"3"}',true, now(), now()); +INSERT INTO jspd_prop values('RT_RMI_ELAPSE_TIME','0', 'Collect transactions that are greater than or equal to the option value', 'integer','input','',true, now(), now()); +INSERT INTO jspd_prop values('RT_FILE','0x10', 'Display file input/output in call tree', 'string','input','',true, now(), now()); +INSERT INTO jspd_prop values('RT_SOCKET','0x10', 'Display socket input/output in call tree', 'string','input','',true, now(), now()); + +INSERT INTO jspd_prop values('MTD_LIMIT','100000', 'Limit the number of calltree', 'integer','range','{"gte":"0"}',true, now(), now()); + +INSERT INTO jspd_prop values('LIMIT_SQL','20', 'Collection limits based on SQL sentence length', 'integer','input','',true, now(), now()); +INSERT INTO jspd_prop values('TXN_COUNT_LIMIT','3000', 'Transactions per second', 'integer','input','',true, now(), now()); +INSERT INTO jspd_prop values('USE_SQL_ELLIPSIS','false', 'Collect length of sql string by half of SQL_TEXT_BUFFER_SIZE', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('TXN_SQL_LIMIT_COUNT','2000', 'SQL collection limit', 'integer','input','',true, now(), now()); +INSERT INTO jspd_prop values('TXN_CPU_TIME','false', 'cpu time metric used in transactions option', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('TXN_MEMORY','false', 'memory alloc size metric used in transactions option', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('ENABLE_WEB_ID_WHEN_NO_USERAGENT','false', 'Do not create an web ID unless requested by the browser', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('USE_SQL_SEQ','false', 'Add sequence number to sql and packet', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('TRACE_FETCH_METHOD','false', 'Display the fetch function of ResultSet in the call tree', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('EXCLUDE_THREAD','', 'Ability to block monitoring of a specific thread name, value = String[] (prefix1,prefix2)', 'string','input','',true, now(), now()); +INSERT INTO jspd_prop values('USE_METHOD_SEQ','false', 'Display the calltree in the form of a time series without summary', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('TRACE_METHOD_MEMORY','false', 'Collects allocation memory for each method of calltree. (unit k)', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('TRACE_METHOD_CPUTIME','false', 'Collects cputime for each method of calltree. (unit ms)', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('DISABLE_ROOT_METHOD','false', 'Express the service root method at the top of the call tree', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('MTD_BUFFER_SIZE','2500', 'size of the internal buffer that stores the call tree method data.', 'integer','input','',true, now(), now()); +INSERT INTO jspd_prop values('MTD_STACK_BUFFER_SIZE','100', 'A separate option to additionally collect methods that did not generate an error among methods that were not collected because the MTD_BUFFER_SIZE option value was exceeded.', 'integer','input','',true, now(), now()); +INSERT INTO jspd_prop values('MTD_EXCEPTION_BUFFER_SIZE','100', 'A separate option to additionally collect methods that have an error among methods that could not be collected because the MTD_BUFFER_SIZE option value was exceeded.', 'integer','input','',true, now(), now()); +INSERT INTO jspd_prop values('DEBUG','0x000000000', 'Option to specify log level (Debugging)', 'string','input','',true, now(), now()); + +INSERT INTO jspd_prop values('EXCEPTION_LIMIT', '-1', 'Exception content length limit', 'integer', 'input', '', true, now(), now()); +INSERT INTO jspd_prop values('TXN_SEND_PERIOD', '1000', 'Txninfo transmission cycle (ms)', 'integer', 'input', '', true, now(), now()); +INSERT INTO jspd_prop values('MTD_SEND_PERIOD', '1000', 'Txnmethod transmission cycle (ms)', 'integer', 'input', '', true, now(), now()); +INSERT INTO jspd_prop values('SQL_SEND_PERIOD', '1000', 'Txnspl transmission cycle (ms)', 'integer', 'input', '', true, now(), now()); +INSERT INTO jspd_prop values('ETOE_SEND_PERIOD', '1000', 'E2einfo transmission cycle (ms)', 'integer', 'input', '', true, now(), now()); +INSERT INTO jspd_prop values('TXN_SEND_LIMIT', '15000', 'Txninfo maximum number of transfers', 'integer', 'input', '', true, now(), now()); +INSERT INTO jspd_prop values('MTD_SEND_LIMIT', '15000', 'Txnmethod maximum number of transfers', 'integer', 'input', '', true, now(), now()); +INSERT INTO jspd_prop values('SQL_SEND_LIMIT', '15000', 'Txnsql maximum number of transfers', 'integer', 'input', '', true, now(), now()); +INSERT INTO jspd_prop values('ETOE_SEND_LIMIT', '15000', 'E2einfo maximum number of transfers', 'integer', 'input', '', true, now(), now()); diff --git a/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/Chart.yaml b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/Chart.yaml new file mode 100644 index 0000000..a5d4032 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/Chart.yaml @@ -0,0 +1,23 @@ +apiVersion: v1 +appVersion: 4.0.0 +description: Modified Authentication Module By EXEM CloudMOA +home: https://www.keycloak.org/ +icon: https://www.keycloak.org/resources/images/keycloak_logo_480x108.png +keywords: +- sso +- idm +- openid connect +- saml +- kerberos +- ldap +maintainers: +- email: unguiculus@gmail.com + name: unguiculus +- email: thomas.darimont+github@gmail.com + name: thomasdarimont +name: keycloak +sources: +- https://github.com/codecentric/helm-charts +- https://github.com/jboss-dockerfiles/keycloak +- https://github.com/bitnami/charts/tree/master/bitnami/postgresql +version: 11.0.1 diff --git a/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/OWNERS b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/OWNERS new file mode 100644 index 0000000..8c2ff0d --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/OWNERS @@ -0,0 +1,6 @@ +approvers: + - unguiculus + - thomasdarimont +reviewers: + - unguiculus + - thomasdarimont diff --git a/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/README.md b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/README.md new file mode 100644 index 0000000..5f8da10 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/README.md @@ -0,0 +1,765 @@ +# Keycloak + +[Keycloak](http://www.keycloak.org/) is an open source identity and access management for modern applications and services. + +## TL;DR; + +```console +$ helm install keycloak codecentric/keycloak +``` + +## Introduction + +This chart bootstraps a [Keycloak](http://www.keycloak.org/) StatefulSet on a [Kubernetes](https://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. +It provisions a fully featured Keycloak installation. +For more information on Keycloak and its capabilities, see its [documentation](http://www.keycloak.org/documentation.html). + +## Prerequisites Details + +The chart has an optional dependency on the [PostgreSQL](https://github.com/bitnami/charts/tree/master/bitnami/postgresql) chart. +By default, the PostgreSQL chart requires PV support on underlying infrastructure (may be disabled). + +## Installing the Chart + +To install the chart with the release name `keycloak`: + +```console +$ helm install keycloak codecentric/keycloak +``` + +## Uninstalling the Chart + +To uninstall the `keycloak` deployment: + +```console +$ helm uninstall keycloak +``` + +## Configuration + +The following table lists the configurable parameters of the Keycloak chart and their default values. + +| Parameter | Description | Default | +|---|---|---| +| `fullnameOverride` | Optionally override the fully qualified name | `""` | +| `nameOverride` | Optionally override the name | `""` | +| `replicas` | The number of replicas to create | `1` | +| `image.repository` | The Keycloak image repository | `docker.io/jboss/keycloak` | +| `image.tag` | Overrides the Keycloak image tag whose default is the chart version | `""` | +| `image.pullPolicy` | The Keycloak image pull policy | `IfNotPresent` | +| `imagePullSecrets` | Image pull secrets for the Pod | `[]` | +| `hostAliases` | Mapping between IPs and hostnames that will be injected as entries in the Pod's hosts files | `[]` | +| `enableServiceLinks` | Indicates whether information about services should be injected into Pod's environment variables, matching the syntax of Docker links | `true` | +| `podManagementPolicy` | Pod management policy. One of `Parallel` or `OrderedReady` | `Parallel` | +| `restartPolicy` | Pod restart policy. One of `Always`, `OnFailure`, or `Never` | `Always` | +| `serviceAccount.create` | Specifies whether a ServiceAccount should be created | `true` | +| `serviceAccount.name` | The name of the service account to use. If not set and create is true, a name is generated using the fullname template | `""` | +| `serviceAccount.annotations` | Additional annotations for the ServiceAccount | `{}` | +| `serviceAccount.labels` | Additional labels for the ServiceAccount | `{}` | +| `serviceAccount.imagePullSecrets` | Image pull secrets that are attached to the ServiceAccount | `[]` | +| `rbac.create` | Specifies whether RBAC resources are to be created | `false` +| `rbac.rules` | Custom RBAC rules, e. g. for KUBE_PING | `[]` +| `podSecurityContext` | SecurityContext for the entire Pod. Every container running in the Pod will inherit this SecurityContext. This might be relevant when other components of the environment inject additional containers into running Pods (service meshes are the most prominent example for this) | `{"fsGroup":1000}` | +| `securityContext` | SecurityContext for the Keycloak container | `{"runAsNonRoot":true,"runAsUser":1000}` | +| `extraInitContainers` | Additional init containers, e. g. for providing custom themes | `[]` | +| `extraContainers` | Additional sidecar containers, e. g. for a database proxy, such as Google's cloudsql-proxy | `[]` | +| `lifecycleHooks` | Lifecycle hooks for the Keycloak container | `{}` | +| `terminationGracePeriodSeconds` | Termination grace period in seconds for Keycloak shutdown. Clusters with a large cache might need to extend this to give Infinispan more time to rebalance | `60` | +| `clusterDomain` | The internal Kubernetes cluster domain | `cluster.local` | +| `command` | Overrides the default entrypoint of the Keycloak container | `[]` | +| `args` | Overrides the default args for the Keycloak container | `[]` | +| `extraEnv` | Additional environment variables for Keycloak | `""` | +| `extraEnvFrom` | Additional environment variables for Keycloak mapped from a Secret or ConfigMap | `""` | +| `priorityClassName` | Pod priority class name | `""` | +| `affinity` | Pod affinity | Hard node and soft zone anti-affinity | +| `nodeSelector` | Node labels for Pod assignment | `{}` | +| `tolerations` | Node taints to tolerate | `[]` | +| `podLabels` | Additional Pod labels | `{}` | +| `podAnnotations` | Additional Pod annotations | `{}` | +| `livenessProbe` | Liveness probe configuration | `{"httpGet":{"path":"/health/live","port":"http"},"initialDelaySeconds":300,"timeoutSeconds":5}` | +| `readinessProbe` | Readiness probe configuration | `{"httpGet":{"path":"/auth/realms/master","port":"http"},"initialDelaySeconds":30,"timeoutSeconds":1}` | +| `resources` | Pod resource requests and limits | `{}` | +| `startupScripts` | Startup scripts to run before Keycloak starts up | `{"keycloak.cli":"{{- .Files.Get "scripts/keycloak.cli" \| nindent 2 }}"}` | +| `extraVolumes` | Add additional volumes, e. g. for custom themes | `""` | +| `extraVolumeMounts` | Add additional volumes mounts, e. g. for custom themes | `""` | +| `extraPorts` | Add additional ports, e. g. for admin console or exposing JGroups ports | `[]` | +| `podDisruptionBudget` | Pod disruption budget | `{}` | +| `statefulsetAnnotations` | Annotations for the StatefulSet | `{}` | +| `statefulsetLabels` | Additional labels for the StatefulSet | `{}` | +| `secrets` | Configuration for secrets that should be created | `{}` | +| `service.annotations` | Annotations for headless and HTTP Services | `{}` | +| `service.labels` | Additional labels for headless and HTTP Services | `{}` | +| `service.type` | The Service type | `ClusterIP` | +| `service.loadBalancerIP` | Optional IP for the load balancer. Used for services of type LoadBalancer only | `""` | +| `loadBalancerSourceRanges` | Optional List of allowed source ranges (CIDRs). Used for service of type LoadBalancer only | `[]` | +| `service.httpPort` | The http Service port | `80` | +| `service.httpNodePort` | The HTTP Service node port if type is NodePort | `""` | +| `service.httpsPort` | The HTTPS Service port | `8443` | +| `service.httpsNodePort` | The HTTPS Service node port if type is NodePort | `""` | +| `service.httpManagementPort` | The WildFly management Service port | `8443` | +| `service.httpManagementNodePort` | The WildFly management node port if type is NodePort | `""` | +| `service.extraPorts` | Additional Service ports, e. g. for custom admin console | `[]` | +| `service.sessionAffinity` | sessionAffinity for Service, e. g. "ClientIP" | `""` | +| `service.sessionAffinityConfig` | sessionAffinityConfig for Service | `{}` | +| `ingress.enabled` | If `true`, an Ingress is created | `false` | +| `ingress.rules` | List of Ingress Ingress rule | see below | +| `ingress.rules[0].host` | Host for the Ingress rule | `{{ .Release.Name }}.keycloak.example.com` | +| `ingress.rules[0].paths` | Paths for the Ingress rule | `[/]` | +| `ingress.servicePort` | The Service port targeted by the Ingress | `http` | +| `ingress.annotations` | Ingress annotations | `{}` | +| `ingress.labels` | Additional Ingress labels | `{}` | +| `ingress.tls` | TLS configuration | see below | +| `ingress.tls[0].hosts` | List of TLS hosts | `[keycloak.example.com]` | +| `ingress.tls[0].secretName` | Name of the TLS secret | `""` | +| `ingress.console.enabled` | If `true`, an Ingress for the console is created | `false` | +| `ingress.console.rules` | List of Ingress Ingress rule for the console | see below | +| `ingress.console.rules[0].host` | Host for the Ingress rule for the console | `{{ .Release.Name }}.keycloak.example.com` | +| `ingress.console.rules[0].paths` | Paths for the Ingress rule for the console | `[/auth/admin]` | +| `ingress.console.annotations` | Ingress annotations for the console | `{}` | +| `networkPolicy.enabled` | If true, the ingress network policy is deployed | `false` +| `networkPolicy.extraFrom` | Allows to define allowed external traffic (see Kubernetes doc for network policy `from` format) | `[]` +| `route.enabled` | If `true`, an OpenShift Route is created | `false` | +| `route.path` | Path for the Route | `/` | +| `route.annotations` | Route annotations | `{}` | +| `route.labels` | Additional Route labels | `{}` | +| `route.host` | Host name for the Route | `""` | +| `route.tls.enabled` | If `true`, TLS is enabled for the Route | `true` | +| `route.tls.insecureEdgeTerminationPolicy` | Insecure edge termination policy of the Route. Can be `None`, `Redirect`, or `Allow` | `Redirect` | +| `route.tls.termination` | TLS termination of the route. Can be `edge`, `passthrough`, or `reencrypt` | `edge` | +| `pgchecker.image.repository` | Docker image used to check Postgresql readiness at startup | `docker.io/busybox` | +| `pgchecker.image.tag` | Image tag for the pgchecker image | `1.32` | +| `pgchecker.image.pullPolicy` | Image pull policy for the pgchecker image | `IfNotPresent` | +| `pgchecker.securityContext` | SecurityContext for the pgchecker container | `{"allowPrivilegeEscalation":false,"runAsGroup":1000,"runAsNonRoot":true,"runAsUser":1000}` | +| `pgchecker.resources` | Resource requests and limits for the pgchecker container | `{"limits":{"cpu":"10m","memory":"16Mi"},"requests":{"cpu":"10m","memory":"16Mi"}}` | +| `postgresql.enabled` | If `true`, the Postgresql dependency is enabled | `true` | +| `postgresql.postgresqlUsername` | PostgreSQL User to create | `keycloak` | +| `postgresql.postgresqlPassword` | PostgreSQL Password for the new user | `keycloak` | +| `postgresql.postgresqlDatabase` | PostgreSQL Database to create | `keycloak` | +| `serviceMonitor.enabled` | If `true`, a ServiceMonitor resource for the prometheus-operator is created | `false` | +| `serviceMonitor.namespace` | Optionally sets a target namespace in which to deploy the ServiceMonitor resource | `""` | +| `serviceMonitor.namespaceSelector` | Optionally sets a namespace selector for the ServiceMonitor | `{}` | +| `serviceMonitor.annotations` | Annotations for the ServiceMonitor | `{}` | +| `serviceMonitor.labels` | Additional labels for the ServiceMonitor | `{}` | +| `serviceMonitor.interval` | Interval at which Prometheus scrapes metrics | `10s` | +| `serviceMonitor.scrapeTimeout` | Timeout for scraping | `10s` | +| `serviceMonitor.path` | The path at which metrics are served | `/metrics` | +| `serviceMonitor.port` | The Service port at which metrics are served | `http` | +| `extraServiceMonitor.enabled` | If `true`, an additional ServiceMonitor resource for the prometheus-operator is created. Could be used for additional metrics via [Keycloak Metrics SPI](https://github.com/aerogear/keycloak-metrics-spi) | `false` | +| `extraServiceMonitor.namespace` | Optionally sets a target namespace in which to deploy the additional ServiceMonitor resource | `""` | +| `extraServiceMonitor.namespaceSelector` | Optionally sets a namespace selector for the additional ServiceMonitor | `{}` | +| `extraServiceMonitor.annotations` | Annotations for the additional ServiceMonitor | `{}` | +| `extraServiceMonitor.labels` | Additional labels for the additional ServiceMonitor | `{}` | +| `extraServiceMonitor.interval` | Interval at which Prometheus scrapes metrics | `10s` | +| `extraServiceMonitor.scrapeTimeout` | Timeout for scraping | `10s` | +| `extraServiceMonitor.path` | The path at which metrics are served | `/metrics` | +| `extraServiceMonitor.port` | The Service port at which metrics are served | `http` | +| `prometheusRule.enabled` | If `true`, a PrometheusRule resource for the prometheus-operator is created | `false` | +| `prometheusRule.annotations` | Annotations for the PrometheusRule | `{}` | +| `prometheusRule.labels` | Additional labels for the PrometheusRule | `{}` | +| `prometheusRule.rules` | List of rules for Prometheus | `[]` | +| `autoscaling.enabled` | Enable creation of a HorizontalPodAutoscaler resource | `false` | +| `autoscaling.labels` | Additional labels for the HorizontalPodAutoscaler resource | `{}` | +| `autoscaling.minReplicas` | The minimum number of Pods when autoscaling is enabled | `3` | +| `autoscaling.maxReplicas` | The maximum number of Pods when autoscaling is enabled | `10` | +| `autoscaling.metrics` | The metrics configuration for the HorizontalPodAutoscaler | `[{"resource":{"name":"cpu","target":{"averageUtilization":80,"type":"Utilization"}},"type":"Resource"}]` | +| `autoscaling.behavior` | The scaling policy configuration for the HorizontalPodAutoscaler | `{"scaleDown":{"policies":[{"periodSeconds":300,"type":"Pods","value":1}],"stabilizationWindowSeconds":300}` | +| `test.enabled` | If `true`, test resources are created | `false` | +| `test.image.repository` | The image for the test Pod | `docker.io/unguiculus/docker-python3-phantomjs-selenium` | +| `test.image.tag` | The tag for the test Pod image | `v1` | +| `test.image.pullPolicy` | The image pull policy for the test Pod image | `IfNotPresent` | +| `test.podSecurityContext` | SecurityContext for the entire test Pod | `{"fsGroup":1000}` | +| `test.securityContext` | SecurityContext for the test container | `{"runAsNonRoot":true,"runAsUser":1000}` | + + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example: + +```console +$ helm install keycloak codecentric/keycloak -n keycloak --set replicas=1 +``` + +Alternatively, a YAML file that specifies the values for the parameters can be provided while +installing the chart. For example: + +```console +$ helm install keycloak codecentric/keycloak -n keycloak --values values.yaml +``` + +The chart offers great flexibility. +It can be configured to work with the official Keycloak Docker image but any custom image can be used as well. + +For the offical Docker image, please check it's configuration at https://github.com/keycloak/keycloak-containers/tree/master/server. + +### Usage of the `tpl` Function + +The `tpl` function allows us to pass string values from `values.yaml` through the templating engine. +It is used for the following values: + +* `extraInitContainers` +* `extraContainers` +* `extraEnv` +* `extraEnvFrom` +* `affinity` +* `extraVolumeMounts` +* `extraVolumes` +* `livenessProbe` +* `readinessProbe` + +Additionally, custom labels and annotations can be set on various resources the values of which being passed through `tpl` as well. + +It is important that these values be configured as strings. +Otherwise, installation will fail. +See example for Google Cloud Proxy or default affinity configuration in `values.yaml`. + +### JVM Settings + +Keycloak sets the following system properties by default: +`-Djava.net.preferIPv4Stack=true -Djboss.modules.system.pkgs=$JBOSS_MODULES_SYSTEM_PKGS -Djava.awt.headless=true` + +You can override these by setting the `JAVA_OPTS` environment variable. +Make sure you configure container support. +This allows you to only configure memory using Kubernetes resources and the JVM will automatically adapt. + +```yaml +extraEnv: | + - name: JAVA_OPTS + value: >- + -XX:+UseContainerSupport + -XX:MaxRAMPercentage=50.0 + -Djava.net.preferIPv4Stack=true + -Djboss.modules.system.pkgs=$JBOSS_MODULES_SYSTEM_PKGS + -Djava.awt.headless=true +``` + +### Database Setup + +By default, Bitnami's [PostgreSQL](https://github.com/bitnami/charts/tree/master/bitnami/postgresql) chart is deployed and used as database. +Please refer to this chart for additional PostgreSQL configuration options. + +#### Using an External Database + +The Keycloak Docker image supports various database types. +Configuration happens in a generic manner. + +##### Using a Secret Managed by the Chart + +The following examples uses a PostgreSQL database with a secret that is managed by the Helm chart. + +```yaml +postgresql: + # Disable PostgreSQL dependency + enabled: false + +extraEnv: | + - name: DB_VENDOR + value: postgres + - name: DB_ADDR + value: mypostgres + - name: DB_PORT + value: "5432" + - name: DB_DATABASE + value: mydb + +extraEnvFrom: | + - secretRef: + name: '{{ include "keycloak.fullname" . }}-db' + +secrets: + db: + stringData: + DB_USER: '{{ .Values.dbUser }}' + DB_PASSWORD: '{{ .Values.dbPassword }}' +``` + +`dbUser` and `dbPassword` are custom values you'd then specify on the commandline using `--set-string`. + +##### Using an Existing Secret + +The following examples uses a PostgreSQL database with a secret. +Username and password are mounted as files. + +```yaml +postgresql: + # Disable PostgreSQL dependency + enabled: false + +extraEnv: | + - name: DB_VENDOR + value: postgres + - name: DB_ADDR + value: mypostgres + - name: DB_PORT + value: "5432" + - name: DB_DATABASE + value: mydb + - name: DB_USER_FILE + value: /secrets/db-creds/user + - name: DB_PASSWORD_FILE + value: /secrets/db-creds/password + +extraVolumeMounts: | + - name: db-creds + mountPath: /secrets/db-creds + readOnly: true + +extraVolumes: | + - name: db-creds + secret: + secretName: keycloak-db-creds +``` + +### Creating a Keycloak Admin User + +The Keycloak Docker image supports creating an initial admin user. +It must be configured via environment variables: + +* `KEYCLOAK_USER` or `KEYCLOAK_USER_FILE` +* `KEYCLOAK_PASSWORD` or `KEYCLOAK_PASSWORD_FILE` + +Please refer to the section on database configuration for how to configure a secret for this. + +### High Availability and Clustering + +For high availability, Keycloak must be run with multiple replicas (`replicas > 1`). +The chart has a helper template (`keycloak.serviceDnsName`) that creates the DNS name based on the headless service. + +#### DNS_PING Service Discovery + +JGroups discovery via DNS_PING can be configured as follows: + +```yaml +extraEnv: | + - name: JGROUPS_DISCOVERY_PROTOCOL + value: dns.DNS_PING + - name: JGROUPS_DISCOVERY_PROPERTIES + value: 'dns_query={{ include "keycloak.serviceDnsName" . }}' + - name: CACHE_OWNERS_COUNT + value: "2" + - name: CACHE_OWNERS_AUTH_SESSIONS_COUNT + value: "2" +``` + +#### KUBE_PING Service Discovery + +Recent versions of Keycloak include a new Kubernetes native [KUBE_PING](https://github.com/jgroups-extras/jgroups-kubernetes) service discovery protocol. +This requires a little more configuration than DNS_PING but can easily be achieved with the Helm chart. + +As with DNS_PING some environment variables must be configured as follows: + +```yaml +extraEnv: | + - name: JGROUPS_DISCOVERY_PROTOCOL + value: kubernetes.KUBE_PING + - name: KUBERNETES_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: CACHE_OWNERS_COUNT + value: "2" + - name: CACHE_OWNERS_AUTH_SESSIONS_COUNT + value: "2" +``` + +However, the Keycloak Pods must also get RBAC permissions to `get` and `list` Pods in the namespace which can be configured as follows: + +```yaml +rbac: + create: true + rules: + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - list +``` + +#### Autoscaling + +Due to the caches in Keycloak only replicating to a few nodes (two in the example configuration above) and the limited controls around autoscaling built into Kubernetes, it has historically been problematic to autoscale Keycloak. +However, in Kubernetes 1.18 [additional controls were introduced](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-configurable-scaling-behavior) which make it possible to scale down in a more controlled manner. + +The example autoscaling configuration in the values file scales from three up to a maximum of ten Pods using CPU utilization as the metric. Scaling up is done as quickly as required but scaling down is done at a maximum rate of one Pod per five minutes. + +Autoscaling can be enabled as follows: + +```yaml +autoscaling: + enabled: true +``` + +KUBE_PING service discovery seems to be the most reliable mechanism to use when enabling autoscaling, due to being faster than DNS_PING at detecting changes in the cluster. + +### Running Keycloak Behind a Reverse Proxy + +When running Keycloak behind a reverse proxy, which is the case when using an ingress controller, +proxy address forwarding must be enabled as follows: + +```yaml +extraEnv: | + - name: PROXY_ADDRESS_FORWARDING + value: "true" +``` + +### Providing a Custom Theme + +One option is certainly to provide a custom Keycloak image that includes the theme. +However, if you prefer to stick with the official Keycloak image, you can use an init container as theme provider. + +Create your own theme and package it up into a Docker image. + +```docker +FROM busybox +COPY mytheme /mytheme +``` + +In combination with an `emptyDir` that is shared with the Keycloak container, configure an init container that runs your theme image and copies the theme over to the right place where Keycloak will pick it up automatically. + +```yaml +extraInitContainers: | + - name: theme-provider + image: myuser/mytheme:1 + imagePullPolicy: IfNotPresent + command: + - sh + args: + - -c + - | + echo "Copying theme..." + cp -R /mytheme/* /theme + volumeMounts: + - name: theme + mountPath: /theme + +extraVolumeMounts: | + - name: theme + mountPath: /opt/jboss/keycloak/themes/mytheme + +extraVolumes: | + - name: theme + emptyDir: {} +``` + +### Setting a Custom Realm + +A realm can be added by creating a secret or configmap for the realm json file and then supplying this into the chart. +It can be mounted using `extraVolumeMounts` and then referenced as environment variable `KEYCLOAK_IMPORT`. +First we need to create a Secret from the realm JSON file using `kubectl create secret generic realm-secret --from-file=realm.json` which we need to reference in `values.yaml`: + +```yaml +extraVolumes: | + - name: realm-secret + secret: + secretName: realm-secret + +extraVolumeMounts: | + - name: realm-secret + mountPath: "/realm/" + readOnly: true + +extraEnv: | + - name: KEYCLOAK_IMPORT + value: /realm/realm.json +``` + +Alternatively, the realm file could be added to a custom image. + +After startup the web admin console for the realm should be available on the path /auth/admin/\/console/. + +### Using Google Cloud SQL Proxy + +Depending on your environment you may need a local proxy to connect to the database. +This is, e. g., the case for Google Kubernetes Engine when using Google Cloud SQL. +Create the secret for the credentials as documented [here](https://cloud.google.com/sql/docs/postgres/connect-kubernetes-engine) and configure the proxy as a sidecar. + +Because `extraContainers` is a string that is passed through the `tpl` function, it is possible to create custom values and use them in the string. + +```yaml +postgresql: + # Disable PostgreSQL dependency + enabled: false + +# Custom values for Google Cloud SQL +cloudsql: + project: my-project + region: europe-west1 + instance: my-instance + +extraContainers: | + - name: cloudsql-proxy + image: gcr.io/cloudsql-docker/gce-proxy:1.17 + command: + - /cloud_sql_proxy + args: + - -instances={{ .Values.cloudsql.project }}:{{ .Values.cloudsql.region }}:{{ .Values.cloudsql.instance }}=tcp:5432 + - -credential_file=/secrets/cloudsql/credentials.json + volumeMounts: + - name: cloudsql-creds + mountPath: /secrets/cloudsql + readOnly: true + +extraVolumes: | + - name: cloudsql-creds + secret: + secretName: cloudsql-instance-credentials + +extraEnv: | + - name: DB_VENDOR + value: postgres + - name: DB_ADDR + value: "127.0.0.1" + - name: DB_PORT + value: "5432" + - name: DB_DATABASE + value: postgres + - name: DB_USER + value: myuser + - name: DB_PASSWORD + value: mypassword +``` + +### Changing the Context Path + +By default, Keycloak is served under context `/auth`. +This can be changed as follows: + +```yaml +contextPath: mycontext + +startupScripts: + # cli script that reconfigures WildFly + contextPath.cli: | + embed-server --server-config=standalone-ha.xml --std-out=echo + batch + {{- if ne .Values.contextPath "auth" }} + /subsystem=keycloak-server/:write-attribute(name=web-context,value={{ if eq .Values.contextPath "" }}/{{ else }}{{ .Values.contextPath }}{{ end }}) + {{- if eq .Values.contextPath "" }} + /subsystem=undertow/server=default-server/host=default-host:write-attribute(name=default-web-module,value=keycloak-server.war) + {{- end }} + {{- end }} + run-batch + stop-embedded-server + +livenessProbe: | + httpGet: + path: {{ if ne .Values.contextPath "" }}/{{ .Values.contextPath }}{{ end }}/ + port: http + initialDelaySeconds: 300 + timeoutSeconds: 5 + +readinessProbe: | + httpGet: + path: {{ if ne .Values.contextPath "" }}/{{ .Values.contextPath }}{{ end }}/realms/master + port: http + initialDelaySeconds: 30 + timeoutSeconds: 1 +``` + +The above YAML references introduces the custom value `contextPath` which is possible because `startupScripts`, `livenessProbe`, and `readinessProbe` are templated using the `tpl` function. +Note that it must not start with a slash. +Alternatively, you may supply it via CLI flag: + +```console +--set-string contextPath=mycontext +``` + +### Prometheus Metrics Support + +#### WildFly Metrics + +WildFly can expose metrics on the management port. +In order to achieve this, the environment variable `KEYCLOAK_STATISTICS` must be set. + +```yaml +extraEnv: | + - name: KEYCLOAK_STATISTICS + value: all +``` + +Add a ServiceMonitor if using prometheus-operator: + +```yaml +serviceMonitor: + # If `true`, a ServiceMonitor resource for the prometheus-operator is created + enabled: true +``` + +Checkout `values.yaml` for customizing the ServiceMonitor and for adding custom Prometheus rules. + +Add annotations if you don't use prometheus-operator: + +```yaml +service: + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9990" +``` + +#### Keycloak Metrics SPI + +Optionally, it is possible to add [Keycloak Metrics SPI](https://github.com/aerogear/keycloak-metrics-spi) via init container. + +A separate `ServiceMonitor` can be enabled to scrape metrics from the SPI: + +```yaml +extraServiceMonitor: + # If `true`, an additional ServiceMonitor resource for the prometheus-operator is created + enabled: true +``` + +Checkout `values.yaml` for customizing this ServiceMonitor. + +Note that the metrics endpoint is exposed on the HTTP port. +You may want to restrict access to it in your ingress controller configuration. +For ingress-nginx, this could be done as follows: + +```yaml +annotations: + nginx.ingress.kubernetes.io/server-snippet: | + location ~* /auth/realms/[^/]+/metrics { + return 403; + } +``` + +## Why StatefulSet? + +The chart sets node identifiers to the system property `jboss.node.name` which is in fact the pod name. +Node identifiers must not be longer than 23 characters. +This can be problematic because pod names are quite long. +We would have to truncate the chart's fullname to six characters because pods get a 17-character suffix (e. g. `-697f8b7655-mf5ht`). +Using a StatefulSet allows us to truncate to 20 characters leaving room for up to 99 replicas, which is much better. +Additionally, we get stable values for `jboss.node.name` which can be advantageous for cluster discovery. +The headless service that governs the StatefulSet is used for DNS discovery via DNS_PING. + +## Upgrading + +### From chart < 10.0.0 + +* Keycloak is updated to 12.0.4 + +The upgrade should be seemless. +No special care has to be taken. + +### From chart versions < 9.0.0 + +The Keycloak chart received a major facelift and, thus, comes with breaking changes. +Opinionated stuff and things that are now baked into Keycloak's Docker image were removed. +Configuration is more generic making it easier to use custom Docker images that are configured differently than the official one. + +* Values are no longer nested under `keycloak`. +* Besides setting the node identifier, no CLI changes are performed out of the box +* Environment variables for the Postresql dependency are set automatically if enabled. + Otherwise, no environment variables are set by default. +* Optionally enables creating RBAC resources with configurable rules (e. g. for KUBE_PING) +* PostgreSQL chart dependency is updated to 9.1.1 + +### From chart versions < 8.0.0 + +* Keycloak is updated to 10.0.0 +* PostgreSQL chart dependency is updated to 8.9.5 + +The upgrade should be seemless. +No special care has to be taken. + +### From chart versions < 7.0.0 + +Version 7.0.0 update breaks backwards-compatibility with the existing `keycloak.persistence.existingSecret` scheme. + +#### Changes in Configuring Database Credentials from an Existing Secret + +Both `DB_USER` and `DB_PASS` are always read from a Kubernetes Secret. +This is a requirement if you are provisioning database credentials dynamically - either via an Operator or some secret-management engine. + +The variable referencing the password key name has been renamed from `keycloak.persistence.existingSecretKey` to `keycloak.persistence.existingSecretPasswordKey` + +A new, optional variable for referencing the username key name for populating the `DB_USER` env has been added: +`keycloak.persistence.existingSecretUsernameKey`. + +If `keycloak.persistence.existingSecret` is left unset, a new Secret will be provisioned populated with the `dbUser` and `dbPassword` Helm variables. + +###### Example configuration: +```yaml +keycloak: + persistence: + existingSecret: keycloak-provisioned-db-credentials + existingSecretPasswordKey: PGPASSWORD + existingSecretUsernameKey: PGUSER + ... +``` +### From chart versions < 6.0.0 + +#### Changes in Probe Configuration + +Now both readiness and liveness probes are configured as strings that are then passed through the `tpl` function. +This allows for greater customizability of the readiness and liveness probes. + +The defaults are unchanged, but since 6.0.0 configured as follows: + +```yaml + livenessProbe: | + httpGet: + path: {{ if ne .Values.keycloak.basepath "" }}/{{ .Values.keycloak.basepath }}{{ end }}/ + port: http + initialDelaySeconds: 300 + timeoutSeconds: 5 + readinessProbe: | + httpGet: + path: {{ if ne .Values.keycloak.basepath "" }}/{{ .Values.keycloak.basepath }}{{ end }}/realms/master + port: http + initialDelaySeconds: 30 + timeoutSeconds: 1 +``` + +#### Changes in Existing Secret Configuration + +This can be useful if you create a secret in a parent chart and want to reference that secret. +Applies to `keycloak.existingSecret` and `keycloak.persistence.existingSecret`. + +_`values.yaml` of parent chart:_ +```yaml +keycloak: + keycloak: + existingSecret: '{{ .Release.Name }}-keycloak-secret' +``` + +#### HTTPS Port Added + +The HTTPS port was added to the pod and to the services. +As a result, service ports are now configured differently. + + +### From chart versions < 5.0.0 + +Version 5.0.0 is a major update. + +* The chart now follows the new Kubernetes label recommendations: +https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/ +* Several changes to the StatefulSet render an out-of-the-box upgrade impossible because StatefulSets only allow updates to a limited set of fields +* The chart uses the new support for running scripts at startup that has been added to Keycloak's Docker image. +If you use this feature, you will have to adjust your configuration + +However, with the following manual steps an automatic upgrade is still possible: + +1. Adjust chart configuration as necessary (e. g. startup scripts) +1. Perform a non-cascading deletion of the StatefulSet which keeps the pods running +1. Add the new labels to the pods +1. Run `helm upgrade` + +Use a script like the following to add labels and to delete the StatefulSet: + +```console +#!/bin/sh + +release= +namespace= + +kubectl delete statefulset -n "$namespace" -l app=keycloak -l release="$release" --cascade=false + +kubectl label pod -n "$namespace" -l app=keycloak -l release="$release" app.kubernetes.io/name=keycloak +kubectl label pod -n "$namespace" -l app=keycloak -l release="$release" app.kubernetes.io/instance="$release" +``` + +**NOTE:** Version 5.0.0 also updates the Postgresql dependency which has received a major upgrade as well. +In case you use this dependency, the database must be upgraded first. +Please refer to the Postgresql chart's upgrading section in its README for instructions. diff --git a/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/.helmignore b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/.helmignore new file mode 100644 index 0000000..f0c1319 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/Chart.yaml b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/Chart.yaml new file mode 100644 index 0000000..48d8f2f --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/Chart.yaml @@ -0,0 +1,24 @@ +annotations: + category: Database +apiVersion: v1 +appVersion: 11.8.0 +description: Chart for PostgreSQL, an object-relational database management system + (ORDBMS) with an emphasis on extensibility and on standards-compliance. +home: https://www.postgresql.org/ +icon: https://bitnami.com/assets/stacks/postgresql/img/postgresql-stack-110x117.png +keywords: +- postgresql +- postgres +- database +- sql +- replication +- cluster +maintainers: +- email: containers@bitnami.com + name: Bitnami +- email: cedric@desaintmartin.fr + name: desaintmartin +name: postgresql +sources: +- https://github.com/bitnami/bitnami-docker-postgresql +version: 9.1.1 diff --git a/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/README.md b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/README.md new file mode 100644 index 0000000..c84cc7b --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/README.md @@ -0,0 +1,625 @@ +# PostgreSQL + +[PostgreSQL](https://www.postgresql.org/) is an object-relational database management system (ORDBMS) with an emphasis on extensibility and on standards-compliance. + +For HA, please see [this repo](https://github.com/bitnami/charts/tree/master/bitnami/postgresql-ha) + +## TL;DR; + +```console +$ helm repo add bitnami https://charts.bitnami.com/bitnami +$ helm install my-release bitnami/postgresql +``` + +## Introduction + +This chart bootstraps a [PostgreSQL](https://github.com/bitnami/bitnami-docker-postgresql) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This chart has been tested to work with NGINX Ingress, cert-manager, fluentd and Prometheus on top of the [BKPR](https://kubeprod.io/). + +## Prerequisites + +- Kubernetes 1.12+ +- Helm 2.12+ or Helm 3.0-beta3+ +- PV provisioner support in the underlying infrastructure + +## Installing the Chart +To install the chart with the release name `my-release`: + +```console +$ helm install my-release bitnami/postgresql +``` + +The command deploys PostgreSQL on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```console +$ helm delete my-release +``` + +The command removes all the Kubernetes components but PVC's associated with the chart and deletes the release. + +To delete the PVC's associated with `my-release`: + +```console +$ kubectl delete pvc -l release=my-release +``` + +> **Note**: Deleting the PVC's will delete postgresql data as well. Please be cautious before doing it. + +## Parameters + +The following tables lists the configurable parameters of the PostgreSQL chart and their default values. + +| Parameter | Description | Default | +|-----------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------| +| `global.imageRegistry` | Global Docker Image registry | `nil` | +| `global.postgresql.postgresqlDatabase` | PostgreSQL database (overrides `postgresqlDatabase`) | `nil` | +| `global.postgresql.postgresqlUsername` | PostgreSQL username (overrides `postgresqlUsername`) | `nil` | +| `global.postgresql.existingSecret` | Name of existing secret to use for PostgreSQL passwords (overrides `existingSecret`) | `nil` | +| `global.postgresql.postgresqlPassword` | PostgreSQL admin password (overrides `postgresqlPassword`) | `nil` | +| `global.postgresql.servicePort` | PostgreSQL port (overrides `service.port`) | `nil` | +| `global.postgresql.replicationPassword` | Replication user password (overrides `replication.password`) | `nil` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) | +| `global.storageClass` | Global storage class for dynamic provisioning | `nil` | +| `image.registry` | PostgreSQL Image registry | `docker.io` | +| `image.repository` | PostgreSQL Image name | `bitnami/postgresql` | +| `image.tag` | PostgreSQL Image tag | `{TAG_NAME}` | +| `image.pullPolicy` | PostgreSQL Image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify Image pull secrets | `nil` (does not add image pull secrets to deployed pods) | +| `image.debug` | Specify if debug values should be set | `false` | +| `nameOverride` | String to partially override postgresql.fullname template with a string (will prepend the release name) | `nil` | +| `fullnameOverride` | String to fully override postgresql.fullname template with a string | `nil` | +| `volumePermissions.enabled` | Enable init container that changes volume permissions in the data directory (for cases where the default k8s `runAsUser` and `fsUser` values do not work) | `false` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | +| `volumePermissions.image.repository` | Init container volume-permissions image name | `bitnami/minideb` | +| `volumePermissions.image.tag` | Init container volume-permissions image tag | `buster` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `Always` | +| `volumePermissions.securityContext.runAsUser` | User ID for the init container (when facing issues in OpenShift or uid unknown, try value "auto") | `0` | +| `usePasswordFile` | Have the secrets mounted as a file instead of env vars | `false` | +| `ldap.enabled` | Enable LDAP support | `false` | +| `ldap.existingSecret` | Name of existing secret to use for LDAP passwords | `nil` | +| `ldap.url` | LDAP URL beginning in the form `ldap[s]://host[:port]/basedn[?[attribute][?[scope][?[filter]]]]` | `nil` | +| `ldap.server` | IP address or name of the LDAP server. | `nil` | +| `ldap.port` | Port number on the LDAP server to connect to | `nil` | +| `ldap.scheme` | Set to `ldaps` to use LDAPS. | `nil` | +| `ldap.tls` | Set to `1` to use TLS encryption | `nil` | +| `ldap.prefix` | String to prepend to the user name when forming the DN to bind | `nil` | +| `ldap.suffix` | String to append to the user name when forming the DN to bind | `nil` | +| `ldap.search_attr` | Attribute to match agains the user name in the search | `nil` | +| `ldap.search_filter` | The search filter to use when doing search+bind authentication | `nil` | +| `ldap.baseDN` | Root DN to begin the search for the user in | `nil` | +| `ldap.bindDN` | DN of user to bind to LDAP | `nil` | +| `ldap.bind_password` | Password for the user to bind to LDAP | `nil` | +| `replication.enabled` | Enable replication | `false` | +| `replication.user` | Replication user | `repl_user` | +| `replication.password` | Replication user password | `repl_password` | +| `replication.slaveReplicas` | Number of slaves replicas | `1` | +| `replication.synchronousCommit` | Set synchronous commit mode. Allowed values: `on`, `remote_apply`, `remote_write`, `local` and `off` | `off` | +| `replication.numSynchronousReplicas` | Number of replicas that will have synchronous replication. Note: Cannot be greater than `replication.slaveReplicas`. | `0` | +| `replication.applicationName` | Cluster application name. Useful for advanced replication settings | `my_application` | +| `existingSecret` | Name of existing secret to use for PostgreSQL passwords. The secret has to contain the keys `postgresql-postgres-password` which is the password for `postgresqlUsername` when it is different of `postgres`, `postgresql-password` which will override `postgresqlPassword`, `postgresql-replication-password` which will override `replication.password` and `postgresql-ldap-password` which will be sed to authenticate on LDAP. The value is evaluated as a template. | `nil` | +| `postgresqlPostgresPassword` | PostgreSQL admin password (used when `postgresqlUsername` is not `postgres`, in which case`postgres` is the admin username). | _random 10 character alphanumeric string_ | +| `postgresqlUsername` | PostgreSQL user (creates a non-admin user when `postgresqlUsername` is not `postgres`) | `postgres` | +| `postgresqlPassword` | PostgreSQL user password | _random 10 character alphanumeric string_ | +| `postgresqlDatabase` | PostgreSQL database | `nil` | +| `postgresqlDataDir` | PostgreSQL data dir folder | `/bitnami/postgresql` (same value as persistence.mountPath) | +| `extraEnv` | Any extra environment variables you would like to pass on to the pod. The value is evaluated as a template. | `[]` | +| `extraEnvVarsCM` | Name of a Config Map containing extra environment variables you would like to pass on to the pod. The value is evaluated as a template. | `nil` | +| `postgresqlInitdbArgs` | PostgreSQL initdb extra arguments | `nil` | +| `postgresqlInitdbWalDir` | PostgreSQL location for transaction log | `nil` | +| `postgresqlConfiguration` | Runtime Config Parameters | `nil` | +| `postgresqlExtendedConf` | Extended Runtime Config Parameters (appended to main or default configuration) | `nil` | +| `pgHbaConfiguration` | Content of pg_hba.conf | `nil (do not create pg_hba.conf)` | +| `configurationConfigMap` | ConfigMap with the PostgreSQL configuration files (Note: Overrides `postgresqlConfiguration` and `pgHbaConfiguration`). The value is evaluated as a template. | `nil` | +| `extendedConfConfigMap` | ConfigMap with the extended PostgreSQL configuration files. The value is evaluated as a template. | `nil` | +| `initdbScripts` | Dictionary of initdb scripts | `nil` | +| `initdbUser` | PostgreSQL user to execute the .sql and sql.gz scripts | `nil` | +| `initdbPassword` | Password for the user specified in `initdbUser` | `nil` | +| `initdbScriptsConfigMap` | ConfigMap with the initdb scripts (Note: Overrides `initdbScripts`). The value is evaluated as a template. | `nil` | +| `initdbScriptsSecret` | Secret with initdb scripts that contain sensitive information (Note: can be used with `initdbScriptsConfigMap` or `initdbScripts`). The value is evaluated as a template. | `nil` | +| `service.type` | Kubernetes Service type | `ClusterIP` | +| `service.port` | PostgreSQL port | `5432` | +| `service.nodePort` | Kubernetes Service nodePort | `nil` | +| `service.annotations` | Annotations for PostgreSQL service | `{}` (evaluated as a template) | +| `service.loadBalancerIP` | loadBalancerIP if service type is `LoadBalancer` | `nil` | +| `service.loadBalancerSourceRanges` | Address that are allowed when svc is LoadBalancer | `[]` (evaluated as a template) | +| `schedulerName` | Name of the k8s scheduler (other than default) | `nil` | +| `shmVolume.enabled` | Enable emptyDir volume for /dev/shm for master and slave(s) Pod(s) | `true` | +| `shmVolume.chmod.enabled` | Run at init chmod 777 of the /dev/shm (ignored if `volumePermissions.enabled` is `false`) | `true` | +| `persistence.enabled` | Enable persistence using PVC | `true` | +| `persistence.existingClaim` | Provide an existing `PersistentVolumeClaim`, the value is evaluated as a template. | `nil` | +| `persistence.mountPath` | Path to mount the volume at | `/bitnami/postgresql` | +| `persistence.subPath` | Subdirectory of the volume to mount at | `""` | +| `persistence.storageClass` | PVC Storage Class for PostgreSQL volume | `nil` | +| `persistence.accessModes` | PVC Access Mode for PostgreSQL volume | `[ReadWriteOnce]` | +| `persistence.size` | PVC Storage Request for PostgreSQL volume | `8Gi` | +| `persistence.annotations` | Annotations for the PVC | `{}` | +| `commonAnnotations` | Annotations to be added to all deployed resources (rendered as a template) | `{}` | +| `master.nodeSelector` | Node labels for pod assignment (postgresql master) | `{}` | +| `master.affinity` | Affinity labels for pod assignment (postgresql master) | `{}` | +| `master.tolerations` | Toleration labels for pod assignment (postgresql master) | `[]` | +| `master.anotations` | Map of annotations to add to the statefulset (postgresql master) | `{}` | +| `master.labels` | Map of labels to add to the statefulset (postgresql master) | `{}` | +| `master.podAnnotations` | Map of annotations to add to the pods (postgresql master) | `{}` | +| `master.podLabels` | Map of labels to add to the pods (postgresql master) | `{}` | +| `master.priorityClassName` | Priority Class to use for each pod (postgresql master) | `nil` | +| `master.extraInitContainers` | Additional init containers to add to the pods (postgresql master) | `[]` | +| `master.extraVolumeMounts` | Additional volume mounts to add to the pods (postgresql master) | `[]` | +| `master.extraVolumes` | Additional volumes to add to the pods (postgresql master) | `[]` | +| `master.sidecars` | Add additional containers to the pod | `[]` | +| `master.service.type` | Allows using a different service type for Master | `nil` | +| `master.service.nodePort` | Allows using a different nodePort for Master | `nil` | +| `master.service.clusterIP` | Allows using a different clusterIP for Master | `nil` | +| `slave.nodeSelector` | Node labels for pod assignment (postgresql slave) | `{}` | +| `slave.affinity` | Affinity labels for pod assignment (postgresql slave) | `{}` | +| `slave.tolerations` | Toleration labels for pod assignment (postgresql slave) | `[]` | +| `slave.anotations` | Map of annotations to add to the statefulsets (postgresql slave) | `{}` | +| `slave.labels` | Map of labels to add to the statefulsets (postgresql slave) | `{}` | +| `slave.podAnnotations` | Map of annotations to add to the pods (postgresql slave) | `{}` | +| `slave.podLabels` | Map of labels to add to the pods (postgresql slave) | `{}` | +| `slave.priorityClassName` | Priority Class to use for each pod (postgresql slave) | `nil` | +| `slave.extraInitContainers` | Additional init containers to add to the pods (postgresql slave) | `[]` | +| `slave.extraVolumeMounts` | Additional volume mounts to add to the pods (postgresql slave) | `[]` | +| `slave.extraVolumes` | Additional volumes to add to the pods (postgresql slave) | `[]` | +| `slave.sidecars` | Add additional containers to the pod | `[]` | +| `slave.service.type` | Allows using a different service type for Slave | `nil` | +| `slave.service.nodePort` | Allows using a different nodePort for Slave | `nil` | +| `slave.service.clusterIP` | Allows using a different clusterIP for Slave | `nil` | +| `terminationGracePeriodSeconds` | Seconds the pod needs to terminate gracefully | `nil` | +| `resources` | CPU/Memory resource requests/limits | Memory: `256Mi`, CPU: `250m` | +| `securityContext.enabled` | Enable security context | `true` | +| `securityContext.fsGroup` | Group ID for the container | `1001` | +| `securityContext.runAsUser` | User ID for the container | `1001` | +| `serviceAccount.enabled` | Enable service account (Note: Service Account will only be automatically created if `serviceAccount.name` is not set) | `false` | +| `serviceAccount.name` | Name of existing service account | `nil` | +| `livenessProbe.enabled` | Would you like a livenessProbe to be enabled | `true` | +| `networkPolicy.enabled` | Enable NetworkPolicy | `false` | +| `networkPolicy.allowExternal` | Don't require client label for connections | `true` | +| `networkPolicy.explicitNamespacesSelector` | A Kubernetes LabelSelector to explicitly select namespaces from which ingress traffic could be allowed | `{}` | +| `livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | 30 | +| `livenessProbe.periodSeconds` | How often to perform the probe | 10 | +| `livenessProbe.timeoutSeconds` | When the probe times out | 5 | +| `livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 6 | +| `livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed | 1 | +| `readinessProbe.enabled` | would you like a readinessProbe to be enabled | `true` | +| `readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | 5 | +| `readinessProbe.periodSeconds` | How often to perform the probe | 10 | +| `readinessProbe.timeoutSeconds` | When the probe times out | 5 | +| `readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 6 | +| `readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed | 1 | +| `tls.enabled` | Enable TLS traffic support | `false` | +| `tls.preferServerCiphers` | Whether to use the server's TLS cipher preferences rather than the client's | `true` | +| `tls.certificatesSecret` | Name of an existing secret that contains the certificates | `nil` | +| `tls.certFilename` | Certificate filename | `""` | +| `tls.certKeyFilename` | Certificate key filename | `""` | +| `tls.certCAFilename` | CA Certificate filename. If provided, PostgreSQL will authenticate TLS/SSL clients by requesting them a certificate. |`nil` | +| `tls.crlFilename` | File containing a Certificate Revocation List |`nil` | +| `metrics.enabled` | Start a prometheus exporter | `false` | +| `metrics.service.type` | Kubernetes Service type | `ClusterIP` | +| `service.clusterIP` | Static clusterIP or None for headless services | `nil` | +| `metrics.service.annotations` | Additional annotations for metrics exporter pod | `{ prometheus.io/scrape: "true", prometheus.io/port: "9187"}` | +| `metrics.service.loadBalancerIP` | loadBalancerIP if redis metrics service type is `LoadBalancer` | `nil` | +| `metrics.serviceMonitor.enabled` | Set this to `true` to create ServiceMonitor for Prometheus operator | `false` | +| `metrics.serviceMonitor.additionalLabels` | Additional labels that can be used so ServiceMonitor will be discovered by Prometheus | `{}` | +| `metrics.serviceMonitor.namespace` | Optional namespace in which to create ServiceMonitor | `nil` | +| `metrics.serviceMonitor.interval` | Scrape interval. If not set, the Prometheus default scrape interval is used | `nil` | +| `metrics.serviceMonitor.scrapeTimeout` | Scrape timeout. If not set, the Prometheus default scrape timeout is used | `nil` | +| `metrics.prometheusRule.enabled` | Set this to true to create prometheusRules for Prometheus operator | `false` | +| `metrics.prometheusRule.additionalLabels` | Additional labels that can be used so prometheusRules will be discovered by Prometheus | `{}` | +| `metrics.prometheusRule.namespace` | namespace where prometheusRules resource should be created | the same namespace as postgresql | +| `metrics.prometheusRule.rules` | [rules](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/) to be created, check values for an example. | `[]` | +| `metrics.image.registry` | PostgreSQL Exporter Image registry | `docker.io` | +| `metrics.image.repository` | PostgreSQL Exporter Image name | `bitnami/postgres-exporter` | +| `metrics.image.tag` | PostgreSQL Exporter Image tag | `{TAG_NAME}` | +| `metrics.image.pullPolicy` | PostgreSQL Exporter Image pull policy | `IfNotPresent` | +| `metrics.image.pullSecrets` | Specify Image pull secrets | `nil` (does not add image pull secrets to deployed pods) | +| `metrics.customMetrics` | Additional custom metrics | `nil` | +| `metrics.extraEnvVars` | Extra environment variables to add to exporter | `{}` (evaluated as a template) | +| `metrics.securityContext.enabled` | Enable security context for metrics | `false` | +| `metrics.securityContext.runAsUser` | User ID for the container for metrics | `1001` | +| `metrics.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | 30 | +| `metrics.livenessProbe.periodSeconds` | How often to perform the probe | 10 | +| `metrics.livenessProbe.timeoutSeconds` | When the probe times out | 5 | +| `metrics.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 6 | +| `metrics.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed | 1 | +| `metrics.readinessProbe.enabled` | would you like a readinessProbe to be enabled | `true` | +| `metrics.readinessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | 5 | +| `metrics.readinessProbe.periodSeconds` | How often to perform the probe | 10 | +| `metrics.readinessProbe.timeoutSeconds` | When the probe times out | 5 | +| `metrics.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 6 | +| `metrics.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed | 1 | +| `updateStrategy` | Update strategy policy | `{type: "RollingUpdate"}` | +| `psp.create` | Create Pod Security Policy | `false` | +| `rbac.create` | Create Role and RoleBinding (required for PSP to work) | `false` | + + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```console +$ helm install my-release \ + --set postgresqlPassword=secretpassword,postgresqlDatabase=my-database \ + bitnami/postgresql +``` + +The above command sets the PostgreSQL `postgres` account password to `secretpassword`. Additionally it creates a database named `my-database`. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```console +$ helm install my-release -f values.yaml bitnami/postgresql +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +## Configuration and installation details + +### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/) + +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. + +Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. + +### Production configuration and horizontal scaling + +This chart includes a `values-production.yaml` file where you can find some parameters oriented to production configuration in comparison to the regular `values.yaml`. You can use this file instead of the default one. + +- Enable replication: +```diff +- replication.enabled: false ++ replication.enabled: true +``` + +- Number of slaves replicas: +```diff +- replication.slaveReplicas: 1 ++ replication.slaveReplicas: 2 +``` + +- Set synchronous commit mode: +```diff +- replication.synchronousCommit: "off" ++ replication.synchronousCommit: "on" +``` + +- Number of replicas that will have synchronous replication: +```diff +- replication.numSynchronousReplicas: 0 ++ replication.numSynchronousReplicas: 1 +``` + +- Start a prometheus exporter: +```diff +- metrics.enabled: false ++ metrics.enabled: true +``` + +To horizontally scale this chart, you can use the `--replicas` flag to modify the number of nodes in your PostgreSQL deployment. Also you can use the `values-production.yaml` file or modify the parameters shown above. + +### Customizing Master and Slave services in a replicated configuration + +At the top level, there is a service object which defines the services for both master and slave. For deeper customization, there are service objects for both the master and slave types individually. This allows you to override the values in the top level service object so that the master and slave can be of different service types and with different clusterIPs / nodePorts. Also in the case you want the master and slave to be of type nodePort, you will need to set the nodePorts to different values to prevent a collision. The values that are deeper in the master.service or slave.service objects will take precedence over the top level service object. + +### Change PostgreSQL version + +To modify the PostgreSQL version used in this chart you can specify a [valid image tag](https://hub.docker.com/r/bitnami/postgresql/tags/) using the `image.tag` parameter. For example, `image.tag=X.Y.Z`. This approach is also applicable to other images like exporters. + +### postgresql.conf / pg_hba.conf files as configMap + +This helm chart also supports to customize the whole configuration file. + +Add your custom file to "files/postgresql.conf" in your working directory. This file will be mounted as configMap to the containers and it will be used for configuring the PostgreSQL server. + +Alternatively, you can specify PostgreSQL configuration parameters using the `postgresqlConfiguration` parameter as a dict, using camelCase, e.g. {"sharedBuffers": "500MB"}. + +In addition to these options, you can also set an external ConfigMap with all the configuration files. This is done by setting the `configurationConfigMap` parameter. Note that this will override the two previous options. + +### Allow settings to be loaded from files other than the default `postgresql.conf` + +If you don't want to provide the whole PostgreSQL configuration file and only specify certain parameters, you can add your extended `.conf` files to "files/conf.d/" in your working directory. +Those files will be mounted as configMap to the containers adding/overwriting the default configuration using the `include_dir` directive that allows settings to be loaded from files other than the default `postgresql.conf`. + +Alternatively, you can also set an external ConfigMap with all the extra configuration files. This is done by setting the `extendedConfConfigMap` parameter. Note that this will override the previous option. + +### Initialize a fresh instance + +The [Bitnami PostgreSQL](https://github.com/bitnami/bitnami-docker-postgresql) image allows you to use your custom scripts to initialize a fresh instance. In order to execute the scripts, they must be located inside the chart folder `files/docker-entrypoint-initdb.d` so they can be consumed as a ConfigMap. + +Alternatively, you can specify custom scripts using the `initdbScripts` parameter as dict. + +In addition to these options, you can also set an external ConfigMap with all the initialization scripts. This is done by setting the `initdbScriptsConfigMap` parameter. Note that this will override the two previous options. If your initialization scripts contain sensitive information such as credentials or passwords, you can use the `initdbScriptsSecret` parameter. + +The allowed extensions are `.sh`, `.sql` and `.sql.gz`. + +### Securing traffic using TLS + +TLS support can be enabled in the chart by specifying the `tls.` parameters while creating a release. The following parameters should be configured to properly enable the TLS support in the chart: + +- `tls.enabled`: Enable TLS support. Defaults to `false` +- `tls.certificatesSecret`: Name of an existing secret that contains the certificates. No defaults. +- `tls.certFilename`: Certificate filename. No defaults. +- `tls.certKeyFilename`: Certificate key filename. No defaults. + +For example: + +* First, create the secret with the cetificates files: + + ```console + kubectl create secret generic certificates-tls-secret --from-file=./cert.crt --from-file=./cert.key --from-file=./ca.crt + ``` + +* Then, use the following parameters: + + ```console + volumePermissions.enabled=true + tls.enabled=true + tls.certificatesSecret="certificates-tls-secret" + tls.certFilename="cert.crt" + tls.certKeyFilename="cert.key" + ``` + + > Note TLS and VolumePermissions: PostgreSQL requires certain permissions on sensitive files (such as certificate keys) to start up. Due to an on-going [issue](https://github.com/kubernetes/kubernetes/issues/57923) regarding kubernetes permissions and the use of `securityContext.runAsUser`, you must enable `volumePermissions` to ensure everything works as expected. + +### Sidecars + +If you need additional containers to run within the same pod as PostgreSQL (e.g. an additional metrics or logging exporter), you can do so via the `sidecars` config parameter. Simply define your container according to the Kubernetes container spec. + +```yaml +# For the PostgreSQL master +master: + sidecars: + - name: your-image-name + image: your-image + imagePullPolicy: Always + ports: + - name: portname + containerPort: 1234 +# For the PostgreSQL replicas +slave: + sidecars: + - name: your-image-name + image: your-image + imagePullPolicy: Always + ports: + - name: portname + containerPort: 1234 +``` + +### Metrics + +The chart optionally can start a metrics exporter for [prometheus](https://prometheus.io). The metrics endpoint (port 9187) is not exposed and it is expected that the metrics are collected from inside the k8s cluster using something similar as the described in the [example Prometheus scrape configuration](https://github.com/prometheus/prometheus/blob/master/documentation/examples/prometheus-kubernetes.yml). + +The exporter allows to create custom metrics from additional SQL queries. See the Chart's `values.yaml` for an example and consult the [exporters documentation](https://github.com/wrouesnel/postgres_exporter#adding-new-metrics-via-a-config-file) for more details. + +### Use of global variables + +In more complex scenarios, we may have the following tree of dependencies + +``` + +--------------+ + | | + +------------+ Chart 1 +-----------+ + | | | | + | --------+------+ | + | | | + | | | + | | | + | | | + v v v ++-------+------+ +--------+------+ +--------+------+ +| | | | | | +| PostgreSQL | | Sub-chart 1 | | Sub-chart 2 | +| | | | | | ++--------------+ +---------------+ +---------------+ +``` + +The three charts below depend on the parent chart Chart 1. However, subcharts 1 and 2 may need to connect to PostgreSQL as well. In order to do so, subcharts 1 and 2 need to know the PostgreSQL credentials, so one option for deploying could be deploy Chart 1 with the following parameters: + +``` +postgresql.postgresqlPassword=testtest +subchart1.postgresql.postgresqlPassword=testtest +subchart2.postgresql.postgresqlPassword=testtest +postgresql.postgresqlDatabase=db1 +subchart1.postgresql.postgresqlDatabase=db1 +subchart2.postgresql.postgresqlDatabase=db1 +``` + +If the number of dependent sub-charts increases, installing the chart with parameters can become increasingly difficult. An alternative would be to set the credentials using global variables as follows: + +``` +global.postgresql.postgresqlPassword=testtest +global.postgresql.postgresqlDatabase=db1 +``` + +This way, the credentials will be available in all of the subcharts. + +## Persistence + +The [Bitnami PostgreSQL](https://github.com/bitnami/bitnami-docker-postgresql) image stores the PostgreSQL data and configurations at the `/bitnami/postgresql` path of the container. + +Persistent Volume Claims are used to keep the data across deployments. This is known to work in GCE, AWS, and minikube. +See the [Parameters](#parameters) section to configure the PVC or to disable persistence. + +If you already have data in it, you will fail to sync to standby nodes for all commits, details can refer to [code](https://github.com/bitnami/bitnami-docker-postgresql/blob/8725fe1d7d30ebe8d9a16e9175d05f7ad9260c93/9.6/debian-9/rootfs/libpostgresql.sh#L518-L556). If you need to use those data, please covert them to sql and import after `helm install` finished. + +## NetworkPolicy + +To enable network policy for PostgreSQL, install [a networking plugin that implements the Kubernetes NetworkPolicy spec](https://kubernetes.io/docs/tasks/administer-cluster/declare-network-policy#before-you-begin), and set `networkPolicy.enabled` to `true`. + +For Kubernetes v1.5 & v1.6, you must also turn on NetworkPolicy by setting the DefaultDeny namespace annotation. Note: this will enforce policy for _all_ pods in the namespace: + +```console +$ kubectl annotate namespace default "net.beta.kubernetes.io/network-policy={\"ingress\":{\"isolation\":\"DefaultDeny\"}}" +``` + +With NetworkPolicy enabled, traffic will be limited to just port 5432. + +For more precise policy, set `networkPolicy.allowExternal=false`. This will only allow pods with the generated client label to connect to PostgreSQL. +This label will be displayed in the output of a successful install. + +## Differences between Bitnami PostgreSQL image and [Docker Official](https://hub.docker.com/_/postgres) image + +- The Docker Official PostgreSQL image does not support replication. If you pass any replication environment variable, this would be ignored. The only environment variables supported by the Docker Official image are POSTGRES_USER, POSTGRES_DB, POSTGRES_PASSWORD, POSTGRES_INITDB_ARGS, POSTGRES_INITDB_WALDIR and PGDATA. All the remaining environment variables are specific to the Bitnami PostgreSQL image. +- The Bitnami PostgreSQL image is non-root by default. This requires that you run the pod with `securityContext` and updates the permissions of the volume with an `initContainer`. A key benefit of this configuration is that the pod follows security best practices and is prepared to run on Kubernetes distributions with hard security constraints like OpenShift. +- For OpenShift, one may either define the runAsUser and fsGroup accordingly, or try this more dynamic option: volumePermissions.securityContext.runAsUser="auto",securityContext.enabled=false,shmVolume.chmod.enabled=false + +### Deploy chart using Docker Official PostgreSQL Image + +From chart version 4.0.0, it is possible to use this chart with the Docker Official PostgreSQL image. +Besides specifying the new Docker repository and tag, it is important to modify the PostgreSQL data directory and volume mount point. Basically, the PostgreSQL data dir cannot be the mount point directly, it has to be a subdirectory. + +``` +image.repository=postgres +image.tag=10.6 +postgresqlDataDir=/data/pgdata +persistence.mountPath=/data/ +``` + +## Upgrade + +It's necessary to specify the existing passwords while performing an upgrade to ensure the secrets are not updated with invalid randomly generated passwords. Remember to specify the existing values of the `postgresqlPassword` and `replication.password` parameters when upgrading the chart: + +```bash +$ helm upgrade my-release stable/postgresql \ + --set postgresqlPassword=[POSTGRESQL_PASSWORD] \ + --set replication.password=[REPLICATION_PASSWORD] +``` + +> Note: you need to substitute the placeholders _[POSTGRESQL_PASSWORD]_, and _[REPLICATION_PASSWORD]_ with the values obtained from instructions in the installation notes. + +## 8.0.0 + +Prefixes the port names with their protocols to comply with Istio conventions. + +If you depend on the port names in your setup, make sure to update them to reflect this change. + +## 7.1.0 + +Adds support for LDAP configuration. + +## 7.0.0 + +Helm performs a lookup for the object based on its group (apps), version (v1), and kind (Deployment). Also known as its GroupVersionKind, or GVK. Changing the GVK is considered a compatibility breaker from Kubernetes' point of view, so you cannot "upgrade" those objects to the new GVK in-place. Earlier versions of Helm 3 did not perform the lookup correctly which has since been fixed to match the spec. + +In https://github.com/helm/charts/pull/17281 the `apiVersion` of the statefulset resources was updated to `apps/v1` in tune with the api's deprecated, resulting in compatibility breakage. + +This major version bump signifies this change. + +## 6.5.7 + +In this version, the chart will use PostgreSQL with the Postgis extension included. The version used with Postgresql version 10, 11 and 12 is Postgis 2.5. It has been compiled with the following dependencies: + +- protobuf +- protobuf-c +- json-c +- geos +- proj + +## 5.0.0 + +In this version, the **chart is using PostgreSQL 11 instead of PostgreSQL 10**. You can find the main difference and notable changes in the following links: [https://www.postgresql.org/about/news/1894/](https://www.postgresql.org/about/news/1894/) and [https://www.postgresql.org/about/featurematrix/](https://www.postgresql.org/about/featurematrix/). + +For major releases of PostgreSQL, the internal data storage format is subject to change, thus complicating upgrades, you can see some errors like the following one in the logs: + +```console +Welcome to the Bitnami postgresql container +Subscribe to project updates by watching https://github.com/bitnami/bitnami-docker-postgresql +Submit issues and feature requests at https://github.com/bitnami/bitnami-docker-postgresql/issues +Send us your feedback at containers@bitnami.com + +INFO ==> ** Starting PostgreSQL setup ** +NFO ==> Validating settings in POSTGRESQL_* env vars.. +INFO ==> Initializing PostgreSQL database... +INFO ==> postgresql.conf file not detected. Generating it... +INFO ==> pg_hba.conf file not detected. Generating it... +INFO ==> Deploying PostgreSQL with persisted data... +INFO ==> Configuring replication parameters +INFO ==> Loading custom scripts... +INFO ==> Enabling remote connections +INFO ==> Stopping PostgreSQL... +INFO ==> ** PostgreSQL setup finished! ** + +INFO ==> ** Starting PostgreSQL ** + [1] FATAL: database files are incompatible with server + [1] DETAIL: The data directory was initialized by PostgreSQL version 10, which is not compatible with this version 11.3. +``` + +In this case, you should migrate the data from the old chart to the new one following an approach similar to that described in [this section](https://www.postgresql.org/docs/current/upgrading.html#UPGRADING-VIA-PGDUMPALL) from the official documentation. Basically, create a database dump in the old chart, move and restore it in the new one. + +### 4.0.0 + +This chart will use by default the Bitnami PostgreSQL container starting from version `10.7.0-r68`. This version moves the initialization logic from node.js to bash. This new version of the chart requires setting the `POSTGRES_PASSWORD` in the slaves as well, in order to properly configure the `pg_hba.conf` file. Users from previous versions of the chart are advised to upgrade immediately. + +IMPORTANT: If you do not want to upgrade the chart version then make sure you use the `10.7.0-r68` version of the container. Otherwise, you will get this error + +``` +The POSTGRESQL_PASSWORD environment variable is empty or not set. Set the environment variable ALLOW_EMPTY_PASSWORD=yes to allow the container to be started with blank passwords. This is recommended only for development +``` + +### 3.0.0 + +This releases make it possible to specify different nodeSelector, affinity and tolerations for master and slave pods. +It also fixes an issue with `postgresql.master.fullname` helper template not obeying fullnameOverride. + +#### Breaking changes + +- `affinty` has been renamed to `master.affinity` and `slave.affinity`. +- `tolerations` has been renamed to `master.tolerations` and `slave.tolerations`. +- `nodeSelector` has been renamed to `master.nodeSelector` and `slave.nodeSelector`. + +### 2.0.0 + +In order to upgrade from the `0.X.X` branch to `1.X.X`, you should follow the below steps: + + - Obtain the service name (`SERVICE_NAME`) and password (`OLD_PASSWORD`) of the existing postgresql chart. You can find the instructions to obtain the password in the NOTES.txt, the service name can be obtained by running + +```console +$ kubectl get svc +``` + +- Install (not upgrade) the new version + +```console +$ helm repo update +$ helm install my-release bitnami/postgresql +``` + +- Connect to the new pod (you can obtain the name by running `kubectl get pods`): + +```console +$ kubectl exec -it NAME bash +``` + +- Once logged in, create a dump file from the previous database using `pg_dump`, for that we should connect to the previous postgresql chart: + +```console +$ pg_dump -h SERVICE_NAME -U postgres DATABASE_NAME > /tmp/backup.sql +``` + +After run above command you should be prompted for a password, this password is the previous chart password (`OLD_PASSWORD`). +This operation could take some time depending on the database size. + +- Once you have the backup file, you can restore it with a command like the one below: + +```console +$ psql -U postgres DATABASE_NAME < /tmp/backup.sql +``` + +In this case, you are accessing to the local postgresql, so the password should be the new one (you can find it in NOTES.txt). + +If you want to restore the database and the database schema does not exist, it is necessary to first follow the steps described below. + +```console +$ psql -U postgres +postgres=# drop database DATABASE_NAME; +postgres=# create database DATABASE_NAME; +postgres=# create user USER_NAME; +postgres=# alter role USER_NAME with password 'BITNAMI_USER_PASSWORD'; +postgres=# grant all privileges on database DATABASE_NAME to USER_NAME; +postgres=# alter database DATABASE_NAME owner to USER_NAME; +``` diff --git a/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/charts/common/.helmignore b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/charts/common/.helmignore new file mode 100644 index 0000000..50af031 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/charts/common/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/charts/common/Chart.yaml b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/charts/common/Chart.yaml new file mode 100644 index 0000000..b4d8828 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/charts/common/Chart.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +appVersion: 0.3.1 +description: A Library Helm Chart for grouping common logic between bitnami charts. + This chart is not deployable by itself. +home: http://www.bitnami.com/ +icon: https://bitnami.com/downloads/logos/bitnami-mark.png +keywords: +- common +- helper +- template +- function +- bitnami +maintainers: +- email: containers@bitnami.com + name: Bitnami +name: common +sources: +- https://github.com/bitnami/charts +version: 0.3.1 diff --git a/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/charts/common/README.md b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/charts/common/README.md new file mode 100644 index 0000000..ab50967 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/charts/common/README.md @@ -0,0 +1,228 @@ +# Bitnami Common Library Chart + +A [Helm Library Chart](https://helm.sh/docs/topics/library_charts/#helm) for grouping common logic between bitnami charts. + +## TL;DR; + +```yaml +dependencies: + - name: common + version: 0.1.0 + repository: https://charts.bitnami.com/bitnami +``` + +```bash +$ helm dependency update +``` + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "common.names.fullname" . }} +data: + myvalue: "Hello World" +``` + +## Introduction + +This chart provides a common template helpers which can be used to develop new charts using [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This Helm chart has been tested on top of [Bitnami Kubernetes Production Runtime](https://kubeprod.io/) (BKPR). Deploy BKPR to get automated TLS certificates, logging and monitoring for your applications. + +## Prerequisites + +- Kubernetes 1.12+ +- Helm 2.12+ or Helm 3.0-beta3+ + +## Parameters + +The following table lists the helpers available in the library which are scoped in different sections. + +**Names** + +| Helper identifier | Description | Expected Input | +|---------------------------------------------|------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.names.name` | Expand the name of the chart or use `.Values.nameOverride` | `.` Chart context | +| `common.names.fullname` | Create a default fully qualified app name. | `.` Chart context | +| `common.names.chart` | Chart name plus version | `.` Chart context | + +**Images** + +| Helper identifier | Description | Expected Input | +|---------------------------------------------|------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.images.image` | Return the proper and full image name | `dict "imageRoot" .Values.path.to.the.image "global" $`, see [ImageRoot](#imageroot) for the structure. | +| `common.images.pullSecrets` | Return the proper Docker Image Registry Secret Names | `dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" $` | + +**Labels** + +| Helper identifier | Description | Expected Input | +|---------------------------------------------|------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.labels.standard` | Return Kubernetes standard labels | `.` Chart context | +| `common.labels.matchLabels` | Return the proper Docker Image Registry Secret Names | `.` Chart context | + +**Storage** + +| Helper identifier | Description | Expected Input | +|---------------------------------------------|------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.storage.class` | Return the proper Storage Class | `dict "persistence" .Values.path.to.the.persistence "global" $`, see [Persistence](#persistence) for the structure. | + +**TplValues** + +| Helper identifier | Description | Expected Input | +|---------------------------------------------|------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.tplvalues.render` | Renders a value that contains template | `dict "value" .Values.path.to.the.Value "context" $`, value is the value should rendered as template, context frecuently is the chart context `$` or `.` | + +**Capabilities** + +| Helper identifier | Description | Expected Input | +|---------------------------------------------|------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.capabilities.deployment.apiVersion` | Return the appropriate apiVersion for deployment. | `.` Chart context | +| `common.capabilities.ingress.apiVersion` | Return the appropriate apiVersion for ingress. | `.` Chart context | + +**Warnings** + +| Helper identifier | Description | Expected Input | +|---------------------------------------------|------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.warnings.rollingTag` | Warning about using rolling tag. | `ImageRoot` see [ImageRoot](#imageroot) for the structure. | + +**Secrets** + +| Helper identifier | Description | Expected Input | +|---------------------------------------------|------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.secrets.name` | Generate the name of the secret. | `dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $` see [ExistingSecret](#existingsecret) for the structure. | +| `common.secrets.key` | Generate secret key. | `dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName"` see [ExistingSecret](#existingsecret) for the structure. | + +## Special input schemas + +### ImageRoot + +```yaml +registry: + type: string + description: Docker registry where the image is located + example: docker.io + +repository: + type: string + description: Repository and image name + example: bitnami/nginx + +tag: + type: string + description: image tag + example: 1.16.1-debian-10-r63 + +pullPolicy: + type: string + description: Specify a imagePullPolicy. Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + +pullSecrets: + type: array + items: + type: string + description: Optionally specify an array of imagePullSecrets. + +debug: + type: boolean + description: Set to true if you would like to see extra information on logs + example: false + +## An instance would be: +# registry: docker.io +# repository: bitnami/nginx +# tag: 1.16.1-debian-10-r63 +# pullPolicy: IfNotPresent +# debug: false +``` + +### Persistence + +```yaml +enabled: + type: boolean + description: Whether enable persistence. + example: true + +storageClass: + type: string + description: Ghost data Persistent Volume Storage Class, If set to "-", storageClassName: "" which disables dynamic provisioning. + example: "-" + +accessMode: + type: string + description: Access mode for the Persistent Volume Storage. + example: ReadWriteOnce + +size: + type: string + description: Size the Persistent Volume Storage. + example: 8Gi + +path: + type: string + description: Path to be persisted. + example: /bitnami + +## An instance would be: +# enabled: true +# storageClass: "-" +# accessMode: ReadWriteOnce +# size: 8Gi +# path: /bitnami +``` + +### ExistingSecret +```yaml +name: + type: string + description: Name of the existing secret. + example: mySecret +keyMapping: + description: Mapping between the expected key name and the name of the key in the existing secret. + type: object + +## An instance would be: +# name: mySecret +# keyMapping: +# password: myPasswordKey +``` + +**Example of use** + +When we store sensitive data for a deployment in a secret, some times we want to give to users the possiblity of using theirs existing secrets. + +```yaml +# templates/secret.yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }} + labels: + app: {{ include "common.names.fullname" . }} +type: Opaque +data: + password: {{ .Values.password | b64enc | quote }} + +# templates/dpl.yaml +--- +... + env: + - name: PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "common.secrets.name" (dict "existingSecret" .Values.existingSecret "context" $) }} + key: {{ include "common.secrets.key" (dict "existingSecret" .Values.existingSecret "key" "password") }} +... + +# values.yaml +--- +name: mySecret +keyMapping: + password: myPasswordKey +``` + +## Notable changes + +N/A diff --git a/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/charts/common/templates/_capabilities.tpl b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/charts/common/templates/_capabilities.tpl new file mode 100644 index 0000000..c0ea2c7 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/charts/common/templates/_capabilities.tpl @@ -0,0 +1,22 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the appropriate apiVersion for deployment. +*/}} +{{- define "common.capabilities.deployment.apiVersion" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for ingress. +*/}} +{{- define "common.capabilities.ingress.apiVersion" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/charts/common/templates/_images.tpl b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/charts/common/templates/_images.tpl new file mode 100644 index 0000000..ee6673a --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/charts/common/templates/_images.tpl @@ -0,0 +1,44 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper image name +{{ include "common.images.image" ( dict "imageRoot" .Values.path.to.the.image "global" $) }} +*/}} +{{- define "common.images.image" -}} +{{- $registryName := .imageRoot.registry -}} +{{- $repositoryName := .imageRoot.repository -}} +{{- $tag := .imageRoot.tag | toString -}} +{{- if .global }} + {{- if .global.imageRegistry }} + {{- $registryName = .global.imageRegistry -}} + {{- end -}} +{{- end -}} +{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +{{ include "common.images.pullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" $) }} +*/}} +{{- define "common.images.pullSecrets" -}} +{{- if .global }} +{{- if .global.imagePullSecrets }} +imagePullSecrets: + {{- range .global.imagePullSecrets }} + - name: {{ . }} + {{- end }} +{{- end }} +{{- else }} +{{- $pullSecrets := list }} +{{- range .images }} + {{- if .pullSecrets }} + {{- $pullSecrets = append $pullSecrets .pullSecrets }} + {{- end }} +{{- end }} +{{- if $pullSecrets }} +imagePullSecrets: + {{- range $pullSecrets }} + - name: {{ . }} + {{- end }} +{{- end }} +{{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/charts/common/templates/_labels.tpl b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/charts/common/templates/_labels.tpl new file mode 100644 index 0000000..252066c --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/charts/common/templates/_labels.tpl @@ -0,0 +1,18 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Kubernetes standard labels +*/}} +{{- define "common.labels.standard" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +helm.sh/chart: {{ include "common.names.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{/* +Labels to use on deploy.spec.selector.matchLabels and svc.spec.selector +*/}} +{{- define "common.labels.matchLabels" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/charts/common/templates/_names.tpl b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/charts/common/templates/_names.tpl new file mode 100644 index 0000000..adf2a74 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/charts/common/templates/_names.tpl @@ -0,0 +1,32 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "common.names.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "common.names.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "common.names.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/charts/common/templates/_secrets.tpl b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/charts/common/templates/_secrets.tpl new file mode 100644 index 0000000..d6165a2 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/charts/common/templates/_secrets.tpl @@ -0,0 +1,49 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Generate secret name. + +Usage: +{{ include "common.secrets.name" (dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $) }} + +Params: + - existingSecret - ExistingSecret - Optional. The path to the existing secrets in the values.yaml given by the user + to be used istead of the default one. +info: https://github.com/bitnami/charts/tree/master/bitnami/common#existingsecret + - defaultNameSuffix - String - Optional. It is used only if we have several secrets in the same deployment. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.secrets.name" -}} +{{- $name := (include "common.names.fullname" .context) -}} + +{{- if .defaultNameSuffix -}} +{{- $name = cat $name .defaultNameSuffix -}} +{{- end -}} + +{{- with .existingSecret -}} +{{- $name = .name -}} +{{- end -}} + +{{- printf "%s" $name -}} +{{- end -}} + +{{/* +Generate secret key. + +Usage: +{{ include "common.secrets.key" (dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName") }} + +Params: + - existingSecret - ExistingSecret - Optional. The path to the existing secrets in the values.yaml given by the user + to be used istead of the default one. +info: https://github.com/bitnami/charts/tree/master/bitnami/common#existingsecret + - key - String - Required. Name of the key in the secret. +*/}} +{{- define "common.secrets.key" -}} +{{- $key := .key -}} + +{{- if .existingSecret -}} + {{- if .existingSecret.keyMapping -}} + {{- $key = index .existingSecret.keyMapping $.key -}} + {{- end -}} +{{- end -}} + +{{- printf "%s" $key -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/charts/common/templates/_storage.tpl b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/charts/common/templates/_storage.tpl new file mode 100644 index 0000000..60e2a84 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/charts/common/templates/_storage.tpl @@ -0,0 +1,23 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper Storage Class +{{ include "common.storage.class" ( dict "persistence" .Values.path.to.the.persistence "global" $) }} +*/}} +{{- define "common.storage.class" -}} + +{{- $storageClass := .persistence.storageClass -}} +{{- if .global -}} + {{- if .global.storageClass -}} + {{- $storageClass = .global.storageClass -}} + {{- end -}} +{{- end -}} + +{{- if $storageClass -}} + {{- if (eq "-" $storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" $storageClass -}} + {{- end -}} +{{- end -}} + +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/charts/common/templates/_tplvalues.tpl b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/charts/common/templates/_tplvalues.tpl new file mode 100644 index 0000000..2db1668 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/charts/common/templates/_tplvalues.tpl @@ -0,0 +1,13 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Renders a value that contains template. +Usage: +{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $) }} +*/}} +{{- define "common.tplvalues.render" -}} + {{- if typeIs "string" .value }} + {{- tpl .value .context }} + {{- else }} + {{- tpl (.value | toYaml) .context }} + {{- end }} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/charts/common/templates/_warnings.tpl b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/charts/common/templates/_warnings.tpl new file mode 100644 index 0000000..ae10fa4 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/charts/common/templates/_warnings.tpl @@ -0,0 +1,14 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Warning about using rolling tag. +Usage: +{{ include "common.warnings.rollingTag" .Values.path.to.the.imageRoot }} +*/}} +{{- define "common.warnings.rollingTag" -}} + +{{- if and (contains "bitnami/" .repository) (not (.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .repository }}:{{ .tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} + +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/charts/common/values.yaml b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/charts/common/values.yaml new file mode 100644 index 0000000..9ecdc93 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/charts/common/values.yaml @@ -0,0 +1,3 @@ +## bitnami/common +## It is required by CI/CD tools and processes. +exampleValue: common-chart diff --git a/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/ci/commonAnnotations.yaml b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/ci/commonAnnotations.yaml new file mode 100644 index 0000000..a936299 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/ci/commonAnnotations.yaml @@ -0,0 +1,4 @@ +commonAnnotations: + helm.sh/hook: "pre-install, pre-upgrade" + helm.sh/hook-weight: "-1" + diff --git a/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/ci/default-values.yaml b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/ci/default-values.yaml new file mode 100644 index 0000000..fc2ba60 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/ci/default-values.yaml @@ -0,0 +1 @@ +# Leave this file empty to ensure that CI runs builds against the default configuration in values.yaml. diff --git a/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/ci/shmvolume-disabled-values.yaml b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/ci/shmvolume-disabled-values.yaml new file mode 100644 index 0000000..347d3b4 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/ci/shmvolume-disabled-values.yaml @@ -0,0 +1,2 @@ +shmVolume: + enabled: false diff --git a/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/files/README.md b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/files/README.md new file mode 100644 index 0000000..1813a2f --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/files/README.md @@ -0,0 +1 @@ +Copy here your postgresql.conf and/or pg_hba.conf files to use it as a config map. diff --git a/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/files/conf.d/README.md b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/files/conf.d/README.md new file mode 100644 index 0000000..184c187 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/files/conf.d/README.md @@ -0,0 +1,4 @@ +If you don't want to provide the whole configuration file and only specify certain parameters, you can copy here your extended `.conf` files. +These files will be injected as a config maps and add/overwrite the default configuration using the `include_dir` directive that allows settings to be loaded from files other than the default `postgresql.conf`. + +More info in the [bitnami-docker-postgresql README](https://github.com/bitnami/bitnami-docker-postgresql#configuration-file). diff --git a/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/files/docker-entrypoint-initdb.d/README.md b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/files/docker-entrypoint-initdb.d/README.md new file mode 100644 index 0000000..cba3809 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/files/docker-entrypoint-initdb.d/README.md @@ -0,0 +1,3 @@ +You can copy here your custom `.sh`, `.sql` or `.sql.gz` file so they are executed during the first boot of the image. + +More info in the [bitnami-docker-postgresql](https://github.com/bitnami/bitnami-docker-postgresql#initializing-a-new-instance) repository. \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/requirements.lock b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/requirements.lock new file mode 100644 index 0000000..1069b62 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/requirements.lock @@ -0,0 +1,6 @@ +dependencies: +- name: common + repository: https://charts.bitnami.com/bitnami + version: 0.3.1 +digest: sha256:740783295d301fdd168fafdbaa760de27ab54b0ff36b513589a5a2515072b885 +generated: "2020-07-15T00:56:02.067804177Z" diff --git a/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/requirements.yaml b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/requirements.yaml new file mode 100644 index 0000000..868eee6 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/requirements.yaml @@ -0,0 +1,4 @@ +dependencies: + - name: common + version: 0.x.x + repository: https://charts.bitnami.com/bitnami \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/NOTES.txt b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/NOTES.txt new file mode 100644 index 0000000..6dec604 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/NOTES.txt @@ -0,0 +1,54 @@ +** Please be patient while the chart is being deployed ** + +PostgreSQL can be accessed via port {{ template "postgresql.port" . }} on the following DNS name from within your cluster: + + {{ template "postgresql.fullname" . }}.imxc.svc.cluster.local - Read/Write connection +{{- if .Values.replication.enabled }} + {{ template "postgresql.fullname" . }}-read.imxc.svc.cluster.local - Read only connection +{{- end }} + +{{- if and .Values.postgresqlPostgresPassword (not (eq .Values.postgresqlUsername "postgres")) }} + +To get the password for "postgres" run: + + export POSTGRES_ADMIN_PASSWORD=$(kubectl get secret --namespace imxc {{ template "postgresql.secretName" . }} -o jsonpath="{.data.postgresql-postgres-password}" | base64 --decode) +{{- end }} + +To get the password for "{{ template "postgresql.username" . }}" run: + + export POSTGRES_PASSWORD=$(kubectl get secret --namespace imxc {{ template "postgresql.secretName" . }} -o jsonpath="{.data.postgresql-password}" | base64 --decode) + +To connect to your database run the following command: + + kubectl run {{ template "postgresql.fullname" . }}-client --rm --tty -i --restart='Never' --namespace imxc --image {{ template "postgresql.image" . }} --env="PGPASSWORD=$POSTGRES_PASSWORD" {{- if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }} + --labels="{{ template "postgresql.fullname" . }}-client=true" {{- end }} --command -- psql --host {{ template "postgresql.fullname" . }} -U {{ .Values.postgresqlUsername }} -d {{- if .Values.postgresqlDatabase }} {{ .Values.postgresqlDatabase }}{{- else }} postgres{{- end }} -p {{ template "postgresql.port" . }} + +{{ if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }} +Note: Since NetworkPolicy is enabled, only pods with label {{ template "postgresql.fullname" . }}-client=true" will be able to connect to this PostgreSQL cluster. +{{- end }} + +To connect to your database from outside the cluster execute the following commands: + +{{- if contains "NodePort" .Values.service.type }} + + export NODE_IP=$(kubectl get nodes --namespace imxc -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT=$(kubectl get --namespace imxc -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "postgresql.fullname" . }}) + {{ if (include "postgresql.password" . ) }}PGPASSWORD="$POSTGRES_PASSWORD" {{ end }}psql --host $NODE_IP --port $NODE_PORT -U {{ .Values.postgresqlUsername }} -d {{- if .Values.postgresqlDatabase }} {{ .Values.postgresqlDatabase }}{{- else }} postgres{{- end }} + +{{- else if contains "LoadBalancer" .Values.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace imxc -w {{ template "postgresql.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace imxc {{ template "postgresql.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + {{ if (include "postgresql.password" . ) }}PGPASSWORD="$POSTGRES_PASSWORD" {{ end }}psql --host $SERVICE_IP --port {{ template "postgresql.port" . }} -U {{ .Values.postgresqlUsername }} -d {{- if .Values.postgresqlDatabase }} {{ .Values.postgresqlDatabase }}{{- else }} postgres{{- end }} + +{{- else if contains "ClusterIP" .Values.service.type }} + + kubectl port-forward --namespace imxc svc/{{ template "postgresql.fullname" . }} {{ template "postgresql.port" . }}:{{ template "postgresql.port" . }} & + {{ if (include "postgresql.password" . ) }}PGPASSWORD="$POSTGRES_PASSWORD" {{ end }}psql --host 127.0.0.1 -U {{ .Values.postgresqlUsername }} -d {{- if .Values.postgresqlDatabase }} {{ .Values.postgresqlDatabase }}{{- else }} postgres{{- end }} -p {{ template "postgresql.port" . }} + +{{- end }} + +{{- include "postgresql.validateValues" . -}} + diff --git a/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/_helpers.tpl b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/_helpers.tpl new file mode 100644 index 0000000..a7008a1 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/_helpers.tpl @@ -0,0 +1,494 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "postgresql.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "postgresql.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "postgresql.master.fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- $fullname := default (printf "%s-%s" .Release.Name $name) .Values.fullnameOverride -}} +{{- if .Values.replication.enabled -}} +{{- printf "%s-%s" $fullname "master" | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s" $fullname | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "postgresql.networkPolicy.apiVersion" -}} +{{- if semverCompare ">=1.4-0, <1.7-0" .Capabilities.KubeVersion.GitVersion -}} +"extensions/v1beta1" +{{- else if semverCompare "^1.7-0" .Capabilities.KubeVersion.GitVersion -}} +"networking.k8s.io/v1" +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "postgresql.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Return the proper PostgreSQL image name +*/}} +{{- define "postgresql.image" -}} +{{- $registryName := .Values.image.registry -}} +{{- $repositoryName := .Values.image.repository -}} +{{- $tag := .Values.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL postgres user password +*/}} +{{- define "postgresql.postgres.password" -}} +{{- if .Values.global.postgresql.postgresqlPostgresPassword }} + {{- .Values.global.postgresql.postgresqlPostgresPassword -}} +{{- else if .Values.postgresqlPostgresPassword -}} + {{- .Values.postgresqlPostgresPassword -}} +{{- else -}} + {{- randAlphaNum 10 -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL password +*/}} +{{- define "postgresql.password" -}} +{{- if .Values.global.postgresql.postgresqlPassword }} + {{- .Values.global.postgresql.postgresqlPassword -}} +{{- else if .Values.postgresqlPassword -}} + {{- .Values.postgresqlPassword -}} +{{- else -}} + {{- randAlphaNum 10 -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL replication password +*/}} +{{- define "postgresql.replication.password" -}} +{{- if .Values.global.postgresql.replicationPassword }} + {{- .Values.global.postgresql.replicationPassword -}} +{{- else if .Values.replication.password -}} + {{- .Values.replication.password -}} +{{- else -}} + {{- randAlphaNum 10 -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL username +*/}} +{{- define "postgresql.username" -}} +{{- if .Values.global.postgresql.postgresqlUsername }} + {{- .Values.global.postgresql.postgresqlUsername -}} +{{- else -}} + {{- .Values.postgresqlUsername -}} +{{- end -}} +{{- end -}} + + +{{/* +Return PostgreSQL replication username +*/}} +{{- define "postgresql.replication.username" -}} +{{- if .Values.global.postgresql.replicationUser }} + {{- .Values.global.postgresql.replicationUser -}} +{{- else -}} + {{- .Values.replication.user -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL port +*/}} +{{- define "postgresql.port" -}} +{{- if .Values.global.postgresql.servicePort }} + {{- .Values.global.postgresql.servicePort -}} +{{- else -}} + {{- .Values.service.port -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL created database +*/}} +{{- define "postgresql.database" -}} +{{- if .Values.global.postgresql.postgresqlDatabase }} + {{- .Values.global.postgresql.postgresqlDatabase -}} +{{- else if .Values.postgresqlDatabase -}} + {{- .Values.postgresqlDatabase -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper image name to change the volume permissions +*/}} +{{- define "postgresql.volumePermissions.image" -}} +{{- $registryName := .Values.volumePermissions.image.registry -}} +{{- $repositoryName := .Values.volumePermissions.image.repository -}} +{{- $tag := .Values.volumePermissions.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper PostgreSQL metrics image name +*/}} +{{- define "postgresql.metrics.image" -}} +{{- $registryName := default "docker.io" .Values.metrics.image.registry -}} +{{- $repositoryName := .Values.metrics.image.repository -}} +{{- $tag := default "latest" .Values.metrics.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Get the password secret. +*/}} +{{- define "postgresql.secretName" -}} +{{- if .Values.global.postgresql.existingSecret }} + {{- printf "%s" (tpl .Values.global.postgresql.existingSecret $) -}} +{{- else if .Values.existingSecret -}} + {{- printf "%s" (tpl .Values.existingSecret $) -}} +{{- else -}} + {{- printf "%s" (include "postgresql.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a secret object should be created +*/}} +{{- define "postgresql.createSecret" -}} +{{- if .Values.global.postgresql.existingSecret }} +{{- else if .Values.existingSecret -}} +{{- else -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Get the configuration ConfigMap name. +*/}} +{{- define "postgresql.configurationCM" -}} +{{- if .Values.configurationConfigMap -}} +{{- printf "%s" (tpl .Values.configurationConfigMap $) -}} +{{- else -}} +{{- printf "%s-configuration" (include "postgresql.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Get the extended configuration ConfigMap name. +*/}} +{{- define "postgresql.extendedConfigurationCM" -}} +{{- if .Values.extendedConfConfigMap -}} +{{- printf "%s" (tpl .Values.extendedConfConfigMap $) -}} +{{- else -}} +{{- printf "%s-extended-configuration" (include "postgresql.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a configmap should be mounted with PostgreSQL configuration +*/}} +{{- define "postgresql.mountConfigurationCM" -}} +{{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Get the initialization scripts ConfigMap name. +*/}} +{{- define "postgresql.initdbScriptsCM" -}} +{{- if .Values.initdbScriptsConfigMap -}} +{{- printf "%s" (tpl .Values.initdbScriptsConfigMap $) -}} +{{- else -}} +{{- printf "%s-init-scripts" (include "postgresql.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Get the initialization scripts Secret name. +*/}} +{{- define "postgresql.initdbScriptsSecret" -}} +{{- printf "%s" (tpl .Values.initdbScriptsSecret $) -}} +{{- end -}} + +{{/* +Get the metrics ConfigMap name. +*/}} +{{- define "postgresql.metricsCM" -}} +{{- printf "%s-metrics" (include "postgresql.fullname" .) -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "postgresql.imagePullSecrets" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +Also, we can not use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} +{{- if .Values.global.imagePullSecrets }} +imagePullSecrets: +{{- range .Values.global.imagePullSecrets }} + - name: {{ . }} +{{- end }} +{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.volumePermissions.image.pullSecrets }} +imagePullSecrets: +{{- range .Values.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.metrics.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.volumePermissions.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- end -}} +{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.volumePermissions.image.pullSecrets }} +imagePullSecrets: +{{- range .Values.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.metrics.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.volumePermissions.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- end -}} +{{- end -}} + +{{/* +Get the readiness probe command +*/}} +{{- define "postgresql.readinessProbeCommand" -}} +- | +{{- if (include "postgresql.database" .) }} + exec pg_isready -U {{ include "postgresql.username" . | quote }} -d "dbname={{ include "postgresql.database" . }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}{{- end }}" -h 127.0.0.1 -p {{ template "postgresql.port" . }} +{{- else }} + exec pg_isready -U {{ include "postgresql.username" . | quote }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} -d "sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}"{{- end }} -h 127.0.0.1 -p {{ template "postgresql.port" . }} +{{- end }} +{{- if contains "bitnami/" .Values.image.repository }} + [ -f /opt/bitnami/postgresql/tmp/.initialized ] || [ -f /bitnami/postgresql/.initialized ] +{{- end -}} +{{- end -}} + +{{/* +Return the proper Storage Class +*/}} +{{- define "postgresql.storageClass" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +*/}} +{{- if .Values.global -}} + {{- if .Values.global.storageClass -}} + {{- if (eq "-" .Values.global.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.global.storageClass -}} + {{- end -}} + {{- else -}} + {{- if .Values.persistence.storageClass -}} + {{- if (eq "-" .Values.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.persistence.storageClass -}} + {{- end -}} + {{- end -}} + {{- end -}} +{{- else -}} + {{- if .Values.persistence.storageClass -}} + {{- if (eq "-" .Values.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.persistence.storageClass -}} + {{- end -}} + {{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Renders a value that contains template. +Usage: +{{ include "postgresql.tplValue" ( dict "value" .Values.path.to.the.Value "context" $) }} +*/}} +{{- define "postgresql.tplValue" -}} + {{- if typeIs "string" .value }} + {{- tpl .value .context }} + {{- else }} + {{- tpl (.value | toYaml) .context }} + {{- end }} +{{- end -}} + +{{/* +Return the appropriate apiVersion for statefulset. +*/}} +{{- define "postgresql.statefulset.apiVersion" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "apps/v1beta2" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Compile all warnings into a single message, and call fail. +*/}} +{{- define "postgresql.validateValues" -}} +{{- $messages := list -}} +{{- $messages := append $messages (include "postgresql.validateValues.ldapConfigurationMethod" .) -}} +{{- $messages := append $messages (include "postgresql.validateValues.psp" .) -}} +{{- $messages := append $messages (include "postgresql.validateValues.tls" .) -}} +{{- $messages := without $messages "" -}} +{{- $message := join "\n" $messages -}} + +{{- if $message -}} +{{- printf "\nVALUES VALIDATION:\n%s" $message | fail -}} +{{- end -}} +{{- end -}} + +{{/* +Validate values of Postgresql - If ldap.url is used then you don't need the other settings for ldap +*/}} +{{- define "postgresql.validateValues.ldapConfigurationMethod" -}} +{{- if and .Values.ldap.enabled (and (not (empty .Values.ldap.url)) (not (empty .Values.ldap.server))) }} +postgresql: ldap.url, ldap.server + You cannot set both `ldap.url` and `ldap.server` at the same time. + Please provide a unique way to configure LDAP. + More info at https://www.postgresql.org/docs/current/auth-ldap.html +{{- end -}} +{{- end -}} + +{{/* +Validate values of Postgresql - If PSP is enabled RBAC should be enabled too +*/}} +{{- define "postgresql.validateValues.psp" -}} +{{- if and .Values.psp.create (not .Values.rbac.create) }} +postgresql: psp.create, rbac.create + RBAC should be enabled if PSP is enabled in order for PSP to work. + More info at https://kubernetes.io/docs/concepts/policy/pod-security-policy/#authorizing-policies +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for podsecuritypolicy. +*/}} +{{- define "podsecuritypolicy.apiVersion" -}} +{{- if semverCompare "<1.10-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "policy/v1beta1" -}} +{{- end -}} +{{- end -}} + +{{/* +Validate values of Postgresql TLS - When TLS is enabled, so must be VolumePermissions +*/}} +{{- define "postgresql.validateValues.tls" -}} +{{- if and .Values.tls.enabled (not .Values.volumePermissions.enabled) }} +postgresql: tls.enabled, volumePermissions.enabled + When TLS is enabled you must enable volumePermissions as well to ensure certificates files have + the right permissions. +{{- end -}} +{{- end -}} + +{{/* +Return the path to the cert file. +*/}} +{{- define "postgresql.tlsCert" -}} +{{- required "Certificate filename is required when TLS in enabled" .Values.tls.certFilename | printf "/opt/bitnami/postgresql/certs/%s" -}} +{{- end -}} + +{{/* +Return the path to the cert key file. +*/}} +{{- define "postgresql.tlsCertKey" -}} +{{- required "Certificate Key filename is required when TLS in enabled" .Values.tls.certKeyFilename | printf "/opt/bitnami/postgresql/certs/%s" -}} +{{- end -}} + +{{/* +Return the path to the CA cert file. +*/}} +{{- define "postgresql.tlsCACert" -}} +{{- printf "/opt/bitnami/postgresql/certs/%s" .Values.tls.certCAFilename -}} +{{- end -}} + +{{/* +Return the path to the CRL file. +*/}} +{{- define "postgresql.tlsCRL" -}} +{{- if .Values.tls.crlFilename -}} +{{- printf "/opt/bitnami/postgresql/certs/%s" .Values.tls.crlFilename -}} +{{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/configmap.yaml b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/configmap.yaml new file mode 100644 index 0000000..b29ef60 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/configmap.yaml @@ -0,0 +1,26 @@ +{{ if and (or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration) (not .Values.configurationConfigMap) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "postgresql.fullname" . }}-configuration + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: +{{- if (.Files.Glob "files/postgresql.conf") }} +{{ (.Files.Glob "files/postgresql.conf").AsConfig | indent 2 }} +{{- else if .Values.postgresqlConfiguration }} + postgresql.conf: | +{{- range $key, $value := default dict .Values.postgresqlConfiguration }} + {{ $key | snakecase }}={{ $value }} +{{- end }} +{{- end }} +{{- if (.Files.Glob "files/pg_hba.conf") }} +{{ (.Files.Glob "files/pg_hba.conf").AsConfig | indent 2 }} +{{- else if .Values.pgHbaConfiguration }} + pg_hba.conf: | +{{ .Values.pgHbaConfiguration | indent 4 }} +{{- end }} +{{ end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/extended-config-configmap.yaml b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/extended-config-configmap.yaml new file mode 100644 index 0000000..f21a976 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/extended-config-configmap.yaml @@ -0,0 +1,21 @@ +{{- if and (or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf) (not .Values.extendedConfConfigMap)}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "postgresql.fullname" . }}-extended-configuration + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: +{{- with .Files.Glob "files/conf.d/*.conf" }} +{{ .AsConfig | indent 2 }} +{{- end }} +{{ with .Values.postgresqlExtendedConf }} + override.conf: | +{{- range $key, $value := . }} + {{ $key | snakecase }}={{ $value }} +{{- end }} +{{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/initialization-configmap.yaml b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/initialization-configmap.yaml new file mode 100644 index 0000000..6637867 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/initialization-configmap.yaml @@ -0,0 +1,24 @@ +{{- if and (or (.Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql,sql.gz}") .Values.initdbScripts) (not .Values.initdbScriptsConfigMap) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "postgresql.fullname" . }}-init-scripts + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +{{- with .Files.Glob "files/docker-entrypoint-initdb.d/*.sql.gz" }} +binaryData: +{{- range $path, $bytes := . }} + {{ base $path }}: {{ $.Files.Get $path | b64enc | quote }} +{{- end }} +{{- end }} +data: +{{- with .Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql}" }} +{{ .AsConfig | indent 2 }} +{{- end }} +{{- with .Values.initdbScripts }} +{{ toYaml . | indent 2 }} +{{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/metrics-configmap.yaml b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/metrics-configmap.yaml new file mode 100644 index 0000000..6b7a317 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/metrics-configmap.yaml @@ -0,0 +1,13 @@ +{{- if and .Values.metrics.enabled .Values.metrics.customMetrics }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "postgresql.metricsCM" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + custom-metrics.yaml: {{ toYaml .Values.metrics.customMetrics | quote }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/metrics-svc.yaml b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/metrics-svc.yaml new file mode 100644 index 0000000..b993c99 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/metrics-svc.yaml @@ -0,0 +1,25 @@ +{{- if .Values.metrics.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "postgresql.fullname" . }}-metrics + labels: + {{- include "common.labels.standard" . | nindent 4 }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- toYaml .Values.metrics.service.annotations | nindent 4 }} +spec: + type: {{ .Values.metrics.service.type }} + {{- if and (eq .Values.metrics.service.type "LoadBalancer") .Values.metrics.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.metrics.service.loadBalancerIP }} + {{- end }} + ports: + - name: http-metrics + port: 9187 + targetPort: http-metrics + selector: + {{- include "common.labels.matchLabels" . | nindent 4 }} + role: master +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/networkpolicy.yaml b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/networkpolicy.yaml new file mode 100644 index 0000000..2a7b372 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/networkpolicy.yaml @@ -0,0 +1,36 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ template "postgresql.networkPolicy.apiVersion" . }} +metadata: + name: {{ template "postgresql.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + podSelector: + matchLabels: + {{- include "common.labels.matchLabels" . | nindent 6 }} + ingress: + # Allow inbound connections + - ports: + - port: {{ template "postgresql.port" . }} + {{- if not .Values.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ template "postgresql.fullname" . }}-client: "true" + {{- if .Values.networkPolicy.explicitNamespacesSelector }} + namespaceSelector: +{{ toYaml .Values.networkPolicy.explicitNamespacesSelector | indent 12 }} + {{- end }} + - podSelector: + matchLabels: + {{- include "common.labels.matchLabels" . | nindent 14 }} + role: slave + {{- end }} + # Allow prometheus scrapes + - ports: + - port: 9187 +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/podsecuritypolicy.yaml b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/podsecuritypolicy.yaml new file mode 100644 index 0000000..da0b3ab --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/podsecuritypolicy.yaml @@ -0,0 +1,37 @@ +{{- if .Values.psp.create }} +apiVersion: {{ include "podsecuritypolicy.apiVersion" . }} +kind: PodSecurityPolicy +metadata: + name: {{ template "postgresql.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + privileged: false + volumes: + - 'configMap' + - 'secret' + - 'persistentVolumeClaim' + - 'emptyDir' + - 'projected' + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: 'MustRunAsNonRoot' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + readOnlyRootFilesystem: false +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/prometheusrule.yaml b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/prometheusrule.yaml new file mode 100644 index 0000000..b0c41b1 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/prometheusrule.yaml @@ -0,0 +1,23 @@ +{{- if and .Values.metrics.enabled .Values.metrics.prometheusRule.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ template "postgresql.fullname" . }} +{{- with .Values.metrics.prometheusRule.namespace }} + namespace: {{ . }} +{{- end }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- with .Values.metrics.prometheusRule.additionalLabels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: +{{- with .Values.metrics.prometheusRule.rules }} + groups: + - name: {{ template "postgresql.name" $ }} + rules: {{ tpl (toYaml .) $ | nindent 8 }} +{{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/pv.yaml b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/pv.yaml new file mode 100644 index 0000000..ddd7d7c --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/pv.yaml @@ -0,0 +1,27 @@ +kind: PersistentVolume +apiVersion: v1 +metadata: + name: keycloak-saas +spec: + storageClassName: manual + capacity: + storage: 8Gi + accessModes: + - ReadWriteOnce + #- ReadWriteMany + hostPath: + #path: "/home/keycloak/keycloak" + path: /mnt/keycloak-postgresql + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/hostname + operator: In + values: + #- imxc-worker1 + - {{ .Values.node.affinity }} + claimRef: + name: data-keycloak-saas-postgresql-0 + #namespace: auth + diff --git a/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/role.yaml b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/role.yaml new file mode 100644 index 0000000..6d3cf50 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/role.yaml @@ -0,0 +1,19 @@ +{{- if .Values.rbac.create }} +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ template "postgresql.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +rules: + {{- if .Values.psp.create }} + - apiGroups: ["extensions"] + resources: ["podsecuritypolicies"] + verbs: ["use"] + resourceNames: + - {{ template "postgresql.fullname" . }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/rolebinding.yaml b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/rolebinding.yaml new file mode 100644 index 0000000..b7daa2a --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/rolebinding.yaml @@ -0,0 +1,19 @@ +{{- if .Values.rbac.create }} +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ template "postgresql.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +roleRef: + kind: Role + name: {{ template "postgresql.fullname" . }} + apiGroup: rbac.authorization.k8s.io +subjects: + - kind: ServiceAccount + name: {{ default (include "postgresql.fullname" . ) .Values.serviceAccount.name }} + namespace: imxc +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/secrets.yaml b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/secrets.yaml new file mode 100644 index 0000000..c93dbe0 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/secrets.yaml @@ -0,0 +1,23 @@ +{{- if (include "postgresql.createSecret" .) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "postgresql.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + {{- if and .Values.postgresqlPostgresPassword (not (eq .Values.postgresqlUsername "postgres")) }} + postgresql-postgres-password: {{ include "postgresql.postgres.password" . | b64enc | quote }} + {{- end }} + postgresql-password: {{ include "postgresql.password" . | b64enc | quote }} + {{- if .Values.replication.enabled }} + postgresql-replication-password: {{ include "postgresql.replication.password" . | b64enc | quote }} + {{- end }} + {{- if (and .Values.ldap.enabled .Values.ldap.bind_password)}} + postgresql-ldap-password: {{ .Values.ldap.bind_password | b64enc | quote }} + {{- end }} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/serviceaccount.yaml b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/serviceaccount.yaml new file mode 100644 index 0000000..17f7ff3 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/serviceaccount.yaml @@ -0,0 +1,11 @@ +{{- if and (.Values.serviceAccount.enabled) (not .Values.serviceAccount.name) }} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + {{- include "common.labels.standard" . | nindent 4 }} + name: {{ template "postgresql.fullname" . }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/servicemonitor.yaml b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/servicemonitor.yaml new file mode 100644 index 0000000..3e643e1 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/servicemonitor.yaml @@ -0,0 +1,33 @@ +{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "postgresql.fullname" . }} + {{- if .Values.metrics.serviceMonitor.namespace }} + namespace: {{ .Values.metrics.serviceMonitor.namespace }} + {{- end }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.metrics.serviceMonitor.additionalLabels }} + {{- toYaml .Values.metrics.serviceMonitor.additionalLabels | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + +spec: + endpoints: + - port: http-metrics + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + namespaceSelector: + matchNames: + - imxc + selector: + matchLabels: + {{- include "common.labels.matchLabels" . | nindent 6 }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/statefulset-slaves.yaml b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/statefulset-slaves.yaml new file mode 100644 index 0000000..a712a03 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/statefulset-slaves.yaml @@ -0,0 +1,340 @@ +{{- if .Values.replication.enabled }} +apiVersion: {{ template "postgresql.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: "{{ template "postgresql.fullname" . }}-slave" + labels: + {{- include "common.labels.standard" . | nindent 4 }} +{{- with .Values.slave.labels }} +{{ toYaml . | indent 4 }} +{{- end }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- with .Values.slave.annotations }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + serviceName: {{ template "postgresql.fullname" . }}-headless + replicas: {{ .Values.replication.slaveReplicas }} + selector: + matchLabels: + {{- include "common.labels.matchLabels" . | nindent 6 }} + role: slave + template: + metadata: + name: {{ template "postgresql.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 8 }} + role: slave +{{- with .Values.slave.podLabels }} +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.slave.podAnnotations }} + annotations: +{{ toYaml . | indent 8 }} +{{- end }} + spec: + {{- if .Values.schedulerName }} + schedulerName: "{{ .Values.schedulerName }}" + {{- end }} +{{- include "postgresql.imagePullSecrets" . | indent 6 }} + {{- if .Values.slave.nodeSelector }} + nodeSelector: +{{ toYaml .Values.slave.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.slave.affinity }} + affinity: +{{ toYaml .Values.slave.affinity | indent 8 }} + {{- end }} + {{- if .Values.slave.tolerations }} + tolerations: +{{ toYaml .Values.slave.tolerations | indent 8 }} + {{- end }} + {{- if .Values.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + {{- end }} + {{- if .Values.serviceAccount.enabled }} + serviceAccountName: {{ default (include "postgresql.fullname" . ) .Values.serviceAccount.name}} + {{- end }} + {{- if or .Values.slave.extraInitContainers (and .Values.volumePermissions.enabled (or .Values.persistence.enabled (and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled))) }} + initContainers: + {{- if and .Values.volumePermissions.enabled (or .Values.persistence.enabled (and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled) .Values.tls.enabled) }} + - name: init-chmod-data + image: {{ template "postgresql.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + command: + - /bin/sh + - -cx + - | + {{- if .Values.persistence.enabled }} + mkdir -p {{ .Values.persistence.mountPath }}/data {{- if (include "postgresql.mountConfigurationCM" .) }} {{ .Values.persistence.mountPath }}/conf {{- end }} + chmod 700 {{ .Values.persistence.mountPath }}/data {{- if (include "postgresql.mountConfigurationCM" .) }} {{ .Values.persistence.mountPath }}/conf {{- end }} + find {{ .Values.persistence.mountPath }} -mindepth 1 -maxdepth 1 {{- if not (include "postgresql.mountConfigurationCM" .) }} -not -name "conf" {{- end }} -not -name ".snapshot" -not -name "lost+found" | \ + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + xargs chown -R `id -u`:`id -G | cut -d " " -f2` + {{- else }} + xargs chown -R {{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} + {{- end }} + {{- end }} + {{- if and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled }} + chmod -R 777 /dev/shm + {{- end }} + {{- if .Values.tls.enabled }} + cp /tmp/certs/* /opt/bitnami/postgresql/certs/ + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + chown -R `id -u`:`id -G | cut -d " " -f2` /opt/bitnami/postgresql/certs/ + {{- else }} + chown -R {{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} /opt/bitnami/postgresql/certs/ + {{- end }} + chmod 600 {{ template "postgresql.tlsCertKey" . }} + {{- end }} + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + securityContext: + {{- else }} + securityContext: + runAsUser: {{ .Values.volumePermissions.securityContext.runAsUser }} + {{- end }} + volumeMounts: + {{ if .Values.persistence.enabled }} + - name: data + mountPath: {{ .Values.persistence.mountPath }} + subPath: {{ .Values.persistence.subPath }} + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + mountPath: /dev/shm + {{- end }} + {{- if .Values.tls.enabled }} + - name: raw-certificates + mountPath: /tmp/certs + - name: postgresql-certificates + mountPath: /opt/bitnami/postgresql/certs + {{- end }} + {{- end }} + {{- if .Values.slave.extraInitContainers }} +{{ tpl .Values.slave.extraInitContainers . | indent 8 }} + {{- end }} + {{- end }} + {{- if .Values.slave.priorityClassName }} + priorityClassName: {{ .Values.slave.priorityClassName }} + {{- end }} + containers: + - name: {{ template "postgresql.fullname" . }} + image: {{ template "postgresql.image" . }} + imagePullPolicy: "{{ .Values.image.pullPolicy }}" + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" .Values.image.debug | quote }} + - name: POSTGRESQL_VOLUME_DIR + value: "{{ .Values.persistence.mountPath }}" + - name: POSTGRESQL_PORT_NUMBER + value: "{{ template "postgresql.port" . }}" + {{- if .Values.persistence.mountPath }} + - name: PGDATA + value: {{ .Values.postgresqlDataDir | quote }} + {{- end }} + - name: POSTGRES_REPLICATION_MODE + value: "slave" + - name: POSTGRES_REPLICATION_USER + value: {{ include "postgresql.replication.username" . | quote }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_REPLICATION_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-replication-password" + {{- else }} + - name: POSTGRES_REPLICATION_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-replication-password + {{- end }} + - name: POSTGRES_CLUSTER_APP_NAME + value: {{ .Values.replication.applicationName }} + - name: POSTGRES_MASTER_HOST + value: {{ template "postgresql.fullname" . }} + - name: POSTGRES_MASTER_PORT_NUMBER + value: {{ include "postgresql.port" . | quote }} + {{- if and .Values.postgresqlPostgresPassword (not (eq .Values.postgresqlUsername "postgres")) }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_POSTGRES_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-postgres-password" + {{- else }} + - name: POSTGRES_POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-postgres-password + {{- end }} + {{- end }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-password" + {{- else }} + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-password + {{- end }} + - name: POSTGRESQL_ENABLE_TLS + value: {{ ternary "yes" "no" .Values.tls.enabled | quote }} + {{- if .Values.tls.enabled }} + - name: POSTGRESQL_TLS_PREFER_SERVER_CIPHERS + value: {{ ternary "yes" "no" .Values.tls.preferServerCiphers | quote }} + - name: POSTGRESQL_TLS_CERT_FILE + value: {{ template "postgresql.tlsCert" . }} + - name: POSTGRESQL_TLS_KEY_FILE + value: {{ template "postgresql.tlsCertKey" . }} + {{- if .Values.tls.certCAFilename }} + - name: POSTGRESQL_TLS_CA_FILE + value: {{ template "postgresql.tlsCACert" . }} + {{- end }} + {{- if .Values.tls.crlFilename }} + - name: POSTGRESQL_TLS_CRL_FILE + value: {{ template "postgresql.tlsCRL" . }} + {{- end }} + {{- end }} + ports: + - name: tcp-postgresql + containerPort: {{ template "postgresql.port" . }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - /bin/sh + - -c + {{- if (include "postgresql.database" .) }} + - exec pg_isready -U {{ include "postgresql.username" . | quote }} -d "dbname={{ include "postgresql.database" . }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}{{- end }}" -h 127.0.0.1 -p {{ template "postgresql.port" . }} + {{- else }} + - exec pg_isready -U {{ include "postgresql.username" . | quote }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} -d "sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}"{{- end }} -h 127.0.0.1 -p {{ template "postgresql.port" . }} + {{- end }} + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + exec: + command: + - /bin/sh + - -c + - -e + {{- include "postgresql.readinessProbeCommand" . | nindent 16 }} + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + volumeMounts: + {{- if .Values.usePasswordFile }} + - name: postgresql-password + mountPath: /opt/bitnami/postgresql/secrets/ + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + mountPath: /dev/shm + {{- end }} + {{- if .Values.persistence.enabled }} + - name: data + mountPath: {{ .Values.persistence.mountPath }} + subPath: {{ .Values.persistence.subPath }} + {{ end }} + {{- if or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf .Values.extendedConfConfigMap }} + - name: postgresql-extended-config + mountPath: /bitnami/postgresql/conf/conf.d/ + {{- end }} + {{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap }} + - name: postgresql-config + mountPath: /bitnami/postgresql/conf + {{- end }} + {{- if .Values.tls.enabled }} + - name: postgresql-certificates + mountPath: /opt/bitnami/postgresql/certs + readOnly: true + {{- end }} + {{- if .Values.slave.extraVolumeMounts }} + {{- toYaml .Values.slave.extraVolumeMounts | nindent 12 }} + {{- end }} +{{- if .Values.slave.sidecars }} +{{- include "postgresql.tplValue" ( dict "value" .Values.slave.sidecars "context" $ ) | nindent 8 }} +{{- end }} + volumes: + {{- if .Values.usePasswordFile }} + - name: postgresql-password + secret: + secretName: {{ template "postgresql.secretName" . }} + {{- end }} + {{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap}} + - name: postgresql-config + configMap: + name: {{ template "postgresql.configurationCM" . }} + {{- end }} + {{- if or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf .Values.extendedConfConfigMap }} + - name: postgresql-extended-config + configMap: + name: {{ template "postgresql.extendedConfigurationCM" . }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: raw-certificates + secret: + secretName: {{ required "A secret containing TLS certificates is required when TLS is enabled" .Values.tls.certificatesSecret }} + - name: postgresql-certificates + emptyDir: {} + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + emptyDir: + medium: Memory + sizeLimit: 1Gi + {{- end }} + {{- if not .Values.persistence.enabled }} + - name: data + emptyDir: {} + {{- end }} + {{- if .Values.slave.extraVolumes }} + {{- toYaml .Values.slave.extraVolumes | nindent 8 }} + {{- end }} + updateStrategy: + type: {{ .Values.updateStrategy.type }} + {{- if (eq "Recreate" .Values.updateStrategy.type) }} + rollingUpdate: null + {{- end }} +{{- if .Values.persistence.enabled }} + volumeClaimTemplates: + - metadata: + name: data + {{- with .Values.persistence.annotations }} + annotations: + {{- range $key, $value := . }} + {{ $key }}: {{ $value }} + {{- end }} + {{- end }} + spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{ include "postgresql.storageClass" . }} +{{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/statefulset.yaml b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/statefulset.yaml new file mode 100644 index 0000000..35c6293 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/statefulset.yaml @@ -0,0 +1,510 @@ +apiVersion: {{ template "postgresql.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ template "postgresql.master.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- with .Values.master.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- with .Values.master.annotations }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + serviceName: {{ template "postgresql.fullname" . }}-headless + replicas: 1 + updateStrategy: + type: {{ .Values.updateStrategy.type }} + {{- if (eq "Recreate" .Values.updateStrategy.type) }} + rollingUpdate: null + {{- end }} + selector: + matchLabels: + {{- include "common.labels.matchLabels" . | nindent 6 }} + role: master + template: + metadata: + name: {{ template "postgresql.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 8 }} + role: master + {{- with .Values.master.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.master.podAnnotations }} + annotations: {{- toYaml . | nindent 8 }} + {{- end }} + spec: + {{- if .Values.schedulerName }} + schedulerName: "{{ .Values.schedulerName }}" + {{- end }} +{{- include "postgresql.imagePullSecrets" . | indent 6 }} + {{- if .Values.master.nodeSelector }} + nodeSelector: {{- toYaml .Values.master.nodeSelector | nindent 8 }} + {{- end }} + {{- if .Values.master.affinity }} + affinity: {{- toYaml .Values.master.affinity | nindent 8 }} + {{- end }} + {{- if .Values.master.tolerations }} + tolerations: {{- toYaml .Values.master.tolerations | nindent 8 }} + {{- end }} + {{- if .Values.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + {{- end }} + {{- if .Values.serviceAccount.enabled }} + serviceAccountName: {{ default (include "postgresql.fullname" . ) .Values.serviceAccount.name }} + {{- end }} + {{- if or .Values.master.extraInitContainers (and .Values.volumePermissions.enabled (or .Values.persistence.enabled (and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled))) }} + initContainers: + {{- if and .Values.volumePermissions.enabled (or .Values.persistence.enabled (and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled) .Values.tls.enabled) }} + - name: init-chmod-data + image: {{ template "postgresql.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + command: + - /bin/sh + - -cx + - | + {{- if .Values.persistence.enabled }} + mkdir -p {{ .Values.persistence.mountPath }}/data {{- if (include "postgresql.mountConfigurationCM" .) }} {{ .Values.persistence.mountPath }}/conf {{- end }} + chmod 700 {{ .Values.persistence.mountPath }}/data {{- if (include "postgresql.mountConfigurationCM" .) }} {{ .Values.persistence.mountPath }}/conf {{- end }} + find {{ .Values.persistence.mountPath }} -mindepth 1 -maxdepth 1 {{- if not (include "postgresql.mountConfigurationCM" .) }} -not -name "conf" {{- end }} -not -name ".snapshot" -not -name "lost+found" | \ + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + xargs chown -R `id -u`:`id -G | cut -d " " -f2` + {{- else }} + xargs chown -R {{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} + {{- end }} + {{- end }} + {{- if and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled }} + chmod -R 777 /dev/shm + {{- end }} + {{- if .Values.tls.enabled }} + cp /tmp/certs/* /opt/bitnami/postgresql/certs/ + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + chown -R `id -u`:`id -G | cut -d " " -f2` /opt/bitnami/postgresql/certs/ + {{- else }} + chown -R {{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} /opt/bitnami/postgresql/certs/ + {{- end }} + chmod 600 {{ template "postgresql.tlsCertKey" . }} + {{- end }} + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + securityContext: + {{- else }} + securityContext: + runAsUser: {{ .Values.volumePermissions.securityContext.runAsUser }} + {{- end }} + volumeMounts: + {{- if .Values.persistence.enabled }} + - name: data + mountPath: {{ .Values.persistence.mountPath }} + subPath: {{ .Values.persistence.subPath }} + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + mountPath: /dev/shm + {{- end }} + {{- if .Values.tls.enabled }} + - name: raw-certificates + mountPath: /tmp/certs + - name: postgresql-certificates + mountPath: /opt/bitnami/postgresql/certs + {{- end }} + {{- end }} + {{- if .Values.master.extraInitContainers }} + {{- include "postgresql.tplValue" ( dict "value" .Values.master.extraInitContainers "context" $ ) | nindent 8 }} + {{- end }} + {{- end }} + {{- if .Values.master.priorityClassName }} + priorityClassName: {{ .Values.master.priorityClassName }} + {{- end }} + containers: + - name: {{ template "postgresql.fullname" . }} + image: {{ template "postgresql.image" . }} + imagePullPolicy: "{{ .Values.image.pullPolicy }}" + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" .Values.image.debug | quote }} + - name: POSTGRESQL_PORT_NUMBER + value: "{{ template "postgresql.port" . }}" + - name: POSTGRESQL_VOLUME_DIR + value: "{{ .Values.persistence.mountPath }}" + {{- if .Values.postgresqlInitdbArgs }} + - name: POSTGRES_INITDB_ARGS + value: {{ .Values.postgresqlInitdbArgs | quote }} + {{- end }} + {{- if .Values.postgresqlInitdbWalDir }} + - name: POSTGRES_INITDB_WALDIR + value: {{ .Values.postgresqlInitdbWalDir | quote }} + {{- end }} + {{- if .Values.initdbUser }} + - name: POSTGRESQL_INITSCRIPTS_USERNAME + value: {{ .Values.initdbUser }} + {{- end }} + {{- if .Values.initdbPassword }} + - name: POSTGRESQL_INITSCRIPTS_PASSWORD + value: {{ .Values.initdbPassword }} + {{- end }} + {{- if .Values.persistence.mountPath }} + - name: PGDATA + value: {{ .Values.postgresqlDataDir | quote }} + {{- end }} + {{- if .Values.replication.enabled }} + - name: POSTGRES_REPLICATION_MODE + value: "master" + - name: POSTGRES_REPLICATION_USER + value: {{ include "postgresql.replication.username" . | quote }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_REPLICATION_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-replication-password" + {{- else }} + - name: POSTGRES_REPLICATION_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-replication-password + {{- end }} + {{- if not (eq .Values.replication.synchronousCommit "off")}} + - name: POSTGRES_SYNCHRONOUS_COMMIT_MODE + value: {{ .Values.replication.synchronousCommit | quote }} + - name: POSTGRES_NUM_SYNCHRONOUS_REPLICAS + value: {{ .Values.replication.numSynchronousReplicas | quote }} + {{- end }} + - name: POSTGRES_CLUSTER_APP_NAME + value: {{ .Values.replication.applicationName }} + {{- end }} + {{- if and .Values.postgresqlPostgresPassword (not (eq .Values.postgresqlUsername "postgres")) }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_POSTGRES_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-postgres-password" + {{- else }} + - name: POSTGRES_POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-postgres-password + {{- end }} + {{- end }} + - name: POSTGRES_USER + value: {{ include "postgresql.username" . | quote }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-password" + {{- else }} + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-password + {{- end }} + {{- if (include "postgresql.database" .) }} + - name: POSTGRES_DB + value: {{ (include "postgresql.database" .) | quote }} + {{- end }} + {{- if .Values.extraEnv }} + {{- include "postgresql.tplValue" (dict "value" .Values.extraEnv "context" $) | nindent 12 }} + {{- end }} + - name: POSTGRESQL_ENABLE_LDAP + value: {{ ternary "yes" "no" .Values.ldap.enabled | quote }} + {{- if .Values.ldap.enabled }} + - name: POSTGRESQL_LDAP_SERVER + value: {{ .Values.ldap.server }} + - name: POSTGRESQL_LDAP_PORT + value: {{ .Values.ldap.port | quote }} + - name: POSTGRESQL_LDAP_SCHEME + value: {{ .Values.ldap.scheme }} + {{- if .Values.ldap.tls }} + - name: POSTGRESQL_LDAP_TLS + value: "1" + {{- end}} + - name: POSTGRESQL_LDAP_PREFIX + value: {{ .Values.ldap.prefix | quote }} + - name: POSTGRESQL_LDAP_SUFFIX + value: {{ .Values.ldap.suffix | quote}} + - name: POSTGRESQL_LDAP_BASE_DN + value: {{ .Values.ldap.baseDN }} + - name: POSTGRESQL_LDAP_BIND_DN + value: {{ .Values.ldap.bindDN }} + {{- if (not (empty .Values.ldap.bind_password)) }} + - name: POSTGRESQL_LDAP_BIND_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-ldap-password + {{- end}} + - name: POSTGRESQL_LDAP_SEARCH_ATTR + value: {{ .Values.ldap.search_attr }} + - name: POSTGRESQL_LDAP_SEARCH_FILTER + value: {{ .Values.ldap.search_filter }} + - name: POSTGRESQL_LDAP_URL + value: {{ .Values.ldap.url }} + {{- end}} + - name: POSTGRESQL_ENABLE_TLS + value: {{ ternary "yes" "no" .Values.tls.enabled | quote }} + {{- if .Values.tls.enabled }} + - name: POSTGRESQL_TLS_PREFER_SERVER_CIPHERS + value: {{ ternary "yes" "no" .Values.tls.preferServerCiphers | quote }} + - name: POSTGRESQL_TLS_CERT_FILE + value: {{ template "postgresql.tlsCert" . }} + - name: POSTGRESQL_TLS_KEY_FILE + value: {{ template "postgresql.tlsCertKey" . }} + {{- if .Values.tls.certCAFilename }} + - name: POSTGRESQL_TLS_CA_FILE + value: {{ template "postgresql.tlsCACert" . }} + {{- end }} + {{- if .Values.tls.crlFilename }} + - name: POSTGRESQL_TLS_CRL_FILE + value: {{ template "postgresql.tlsCRL" . }} + {{- end }} + {{- end }} + {{- if .Values.extraEnvVarsCM }} + envFrom: + - configMapRef: + name: {{ tpl .Values.extraEnvVarsCM . }} + {{- end }} + ports: + - name: tcp-postgresql + containerPort: {{ template "postgresql.port" . }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - /bin/sh + - -c + {{- if (include "postgresql.database" .) }} + - exec pg_isready -U {{ include "postgresql.username" . | quote }} -d "dbname={{ include "postgresql.database" . }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}{{- end }}" -h 127.0.0.1 -p {{ template "postgresql.port" . }} + {{- else }} + - exec pg_isready -U {{ include "postgresql.username" . | quote }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} -d "sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}"{{- end }} -h 127.0.0.1 -p {{ template "postgresql.port" . }} + {{- end }} + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + exec: + command: + - /bin/sh + - -c + - -e + {{- include "postgresql.readinessProbeCommand" . | nindent 16 }} + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + volumeMounts: + {{- if or (.Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql,sql.gz}") .Values.initdbScriptsConfigMap .Values.initdbScripts }} + - name: custom-init-scripts + mountPath: /docker-entrypoint-initdb.d/ + {{- end }} + {{- if .Values.initdbScriptsSecret }} + - name: custom-init-scripts-secret + mountPath: /docker-entrypoint-initdb.d/secret + {{- end }} + {{- if or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf .Values.extendedConfConfigMap }} + - name: postgresql-extended-config + mountPath: /bitnami/postgresql/conf/conf.d/ + {{- end }} + {{- if .Values.usePasswordFile }} + - name: postgresql-password + mountPath: /opt/bitnami/postgresql/secrets/ + {{- end }} + {{- if .Values.tls.enabled }} + - name: postgresql-certificates + mountPath: /opt/bitnami/postgresql/certs + readOnly: true + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + mountPath: /dev/shm + {{- end }} + {{- if .Values.persistence.enabled }} + - name: data + mountPath: {{ .Values.persistence.mountPath }} + subPath: {{ .Values.persistence.subPath }} + {{- end }} + {{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap }} + - name: postgresql-config + mountPath: /bitnami/postgresql/conf + {{- end }} + {{- if .Values.master.extraVolumeMounts }} + {{- toYaml .Values.master.extraVolumeMounts | nindent 12 }} + {{- end }} +{{- if .Values.master.sidecars }} +{{- include "postgresql.tplValue" ( dict "value" .Values.master.sidecars "context" $ ) | nindent 8 }} +{{- end }} +{{- if .Values.metrics.enabled }} + - name: metrics + image: {{ template "postgresql.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + {{- if .Values.metrics.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.metrics.securityContext.runAsUser }} + {{- end }} + env: + {{- $database := required "In order to enable metrics you need to specify a database (.Values.postgresqlDatabase or .Values.global.postgresql.postgresqlDatabase)" (include "postgresql.database" .) }} + {{- $sslmode := ternary "require" "disable" .Values.tls.enabled }} + {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} + - name: DATA_SOURCE_NAME + value: {{ printf "host=127.0.0.1 port=%d user=%s sslmode=%s sslcert=%s sslkey=%s" (int (include "postgresql.port" .)) (include "postgresql.username" .) $sslmode (include "postgresql.tlsCert" .) (include "postgresql.tlsCertKey" .) }} + {{- else }} + - name: DATA_SOURCE_URI + value: {{ printf "127.0.0.1:%d/%s?sslmode=%s" (int (include "postgresql.port" .)) $database $sslmode }} + {{- end }} + {{- if .Values.usePasswordFile }} + - name: DATA_SOURCE_PASS_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-password" + {{- else }} + - name: DATA_SOURCE_PASS + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-password + {{- end }} + - name: DATA_SOURCE_USER + value: {{ template "postgresql.username" . }} + {{- if .Values.metrics.extraEnvVars }} + {{- include "postgresql.tplValue" (dict "value" .Values.metrics.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: / + port: http-metrics + initialDelaySeconds: {{ .Values.metrics.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.metrics.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.metrics.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.metrics.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.metrics.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + httpGet: + path: / + port: http-metrics + initialDelaySeconds: {{ .Values.metrics.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.metrics.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.metrics.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.metrics.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.metrics.readinessProbe.failureThreshold }} + {{- end }} + volumeMounts: + {{- if .Values.usePasswordFile }} + - name: postgresql-password + mountPath: /opt/bitnami/postgresql/secrets/ + {{- end }} + {{- if .Values.tls.enabled }} + - name: postgresql-certificates + mountPath: /opt/bitnami/postgresql/certs + readOnly: true + {{- end }} + {{- if .Values.metrics.customMetrics }} + - name: custom-metrics + mountPath: /conf + readOnly: true + args: ["--extend.query-path", "/conf/custom-metrics.yaml"] + {{- end }} + ports: + - name: http-metrics + containerPort: 9187 + {{- if .Values.metrics.resources }} + resources: {{- toYaml .Values.metrics.resources | nindent 12 }} + {{- end }} +{{- end }} + volumes: + {{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap}} + - name: postgresql-config + configMap: + name: {{ template "postgresql.configurationCM" . }} + {{- end }} + {{- if or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf .Values.extendedConfConfigMap }} + - name: postgresql-extended-config + configMap: + name: {{ template "postgresql.extendedConfigurationCM" . }} + {{- end }} + {{- if .Values.usePasswordFile }} + - name: postgresql-password + secret: + secretName: {{ template "postgresql.secretName" . }} + {{- end }} + {{- if or (.Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql,sql.gz}") .Values.initdbScriptsConfigMap .Values.initdbScripts }} + - name: custom-init-scripts + configMap: + name: {{ template "postgresql.initdbScriptsCM" . }} + {{- end }} + {{- if .Values.initdbScriptsSecret }} + - name: custom-init-scripts-secret + secret: + secretName: {{ template "postgresql.initdbScriptsSecret" . }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: raw-certificates + secret: + secretName: {{ required "A secret containing TLS certificates is required when TLS is enabled" .Values.tls.certificatesSecret }} + - name: postgresql-certificates + emptyDir: {} + {{- end }} + {{- if .Values.master.extraVolumes }} + {{- toYaml .Values.master.extraVolumes | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.customMetrics }} + - name: custom-metrics + configMap: + name: {{ template "postgresql.metricsCM" . }} + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + emptyDir: + medium: Memory + sizeLimit: 1Gi + {{- end }} +{{- if and .Values.persistence.enabled .Values.persistence.existingClaim }} + - name: data + persistentVolumeClaim: +{{- with .Values.persistence.existingClaim }} + #claimName: {{ tpl . $ }} + claimName: data-keycloak-saas-postgresql-0 +{{- end }} +{{- else if not .Values.persistence.enabled }} + - name: data + emptyDir: {} +{{- else if and .Values.persistence.enabled (not .Values.persistence.existingClaim) }} + volumeClaimTemplates: + - metadata: + name: data + {{- with .Values.persistence.annotations }} + annotations: + {{- range $key, $value := . }} + {{ $key }}: {{ $value }} + {{- end }} + {{- end }} + spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{ include "postgresql.storageClass" . }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/svc-headless.yaml b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/svc-headless.yaml new file mode 100644 index 0000000..4913157 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/svc-headless.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "postgresql.fullname" . }}-headless + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + type: ClusterIP + clusterIP: None + ports: + - name: tcp-postgresql + port: {{ template "postgresql.port" . }} + targetPort: tcp-postgresql + selector: + {{- include "common.labels.matchLabels" . | nindent 4 }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/svc-read.yaml b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/svc-read.yaml new file mode 100644 index 0000000..885c7bb --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/svc-read.yaml @@ -0,0 +1,42 @@ +{{- if .Values.replication.enabled }} +{{- $serviceAnnotations := coalesce .Values.slave.service.annotations .Values.service.annotations -}} +{{- $serviceType := coalesce .Values.slave.service.type .Values.service.type -}} +{{- $serviceLoadBalancerIP := coalesce .Values.slave.service.loadBalancerIP .Values.service.loadBalancerIP -}} +{{- $serviceLoadBalancerSourceRanges := coalesce .Values.slave.service.loadBalancerSourceRanges .Values.service.loadBalancerSourceRanges -}} +{{- $serviceClusterIP := coalesce .Values.slave.service.clusterIP .Values.service.clusterIP -}} +{{- $serviceNodePort := coalesce .Values.slave.service.nodePort .Values.service.nodePort -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "postgresql.fullname" . }}-read + labels: + {{- include "common.labels.standard" . | nindent 4 }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if $serviceAnnotations }} + {{- include "postgresql.tplValue" (dict "value" $serviceAnnotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: {{ $serviceType }} + {{- if and $serviceLoadBalancerIP (eq $serviceType "LoadBalancer") }} + loadBalancerIP: {{ $serviceLoadBalancerIP }} + {{- end }} + {{- if and (eq $serviceType "LoadBalancer") $serviceLoadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- include "postgresql.tplValue" (dict "value" $serviceLoadBalancerSourceRanges "context" $) | nindent 4 }} + {{- end }} + {{- if and (eq $serviceType "ClusterIP") $serviceClusterIP }} + clusterIP: {{ $serviceClusterIP }} + {{- end }} + ports: + - name: tcp-postgresql + port: {{ template "postgresql.port" . }} + targetPort: tcp-postgresql + {{- if $serviceNodePort }} + nodePort: {{ $serviceNodePort }} + {{- end }} + selector: + {{- include "common.labels.matchLabels" . | nindent 4 }} + role: slave +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/svc.yaml b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/svc.yaml new file mode 100644 index 0000000..e9fc504 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/svc.yaml @@ -0,0 +1,40 @@ +{{- $serviceAnnotations := coalesce .Values.master.service.annotations .Values.service.annotations -}} +{{- $serviceType := coalesce .Values.master.service.type .Values.service.type -}} +{{- $serviceLoadBalancerIP := coalesce .Values.master.service.loadBalancerIP .Values.service.loadBalancerIP -}} +{{- $serviceLoadBalancerSourceRanges := coalesce .Values.master.service.loadBalancerSourceRanges .Values.service.loadBalancerSourceRanges -}} +{{- $serviceClusterIP := coalesce .Values.master.service.clusterIP .Values.service.clusterIP -}} +{{- $serviceNodePort := coalesce .Values.master.service.nodePort .Values.service.nodePort -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "postgresql.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if $serviceAnnotations }} + {{- include "postgresql.tplValue" (dict "value" $serviceAnnotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: {{ $serviceType }} + {{- if and $serviceLoadBalancerIP (eq $serviceType "LoadBalancer") }} + loadBalancerIP: {{ $serviceLoadBalancerIP }} + {{- end }} + {{- if and (eq $serviceType "LoadBalancer") $serviceLoadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- include "postgresql.tplValue" (dict "value" $serviceLoadBalancerSourceRanges "context" $) | nindent 4 }} + {{- end }} + {{- if and (eq $serviceType "ClusterIP") $serviceClusterIP }} + clusterIP: {{ $serviceClusterIP }} + {{- end }} + ports: + - name: tcp-postgresql + port: {{ template "postgresql.port" . }} + targetPort: tcp-postgresql + {{- if $serviceNodePort }} + nodePort: {{ $serviceNodePort }} + {{- end }} + selector: + {{- include "common.labels.matchLabels" . | nindent 4 }} + role: master diff --git a/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/values-production.yaml b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/values-production.yaml new file mode 100644 index 0000000..a43670f --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/values-production.yaml @@ -0,0 +1,591 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +global: + postgresql: {} +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName +# storageClass: myStorageClass + +## Bitnami PostgreSQL image version +## ref: https://hub.docker.com/r/bitnami/postgresql/tags/ +## +image: + registry: 10.10.31.243:5000 # docker.io + repository: postgresql # bitnami/postgresql + tag: 11.8.0-debian-10-r61 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Set to true if you would like to see extra information on logs + ## It turns BASH and NAMI debugging in minideb + ## ref: https://github.com/bitnami/minideb-extras/#turn-on-bash-debugging + debug: false + +## String to partially override postgresql.fullname template (will maintain the release name) +## +# nameOverride: + +## String to fully override postgresql.fullname template +## +# fullnameOverride: + +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: false + image: + registry: 10.10.31.243:5000 # docker.io + repository: minideb # bitnami/minideb + tag: buster + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + ## Init container Security Context + ## Note: the chown of the data folder is done to securityContext.runAsUser + ## and not the below volumePermissions.securityContext.runAsUser + ## When runAsUser is set to special value "auto", init container will try to chwon the + ## data folder to autodetermined user&group, using commands: `id -u`:`id -G | cut -d" " -f2` + ## "auto" is especially useful for OpenShift which has scc with dynamic userids (and 0 is not allowed). + ## You may want to use this volumePermissions.securityContext.runAsUser="auto" in combination with + ## pod securityContext.enabled=false and shmVolume.chmod.enabled=false + ## + securityContext: + runAsUser: 0 + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: + +## Pod Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## Pod Service Account +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +serviceAccount: + enabled: false + ## Name of an already existing service account. Setting this value disables the automatic service account creation. + # name: + +## Pod Security Policy +## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +psp: + create: false + +## Creates role for ServiceAccount +## Required for PSP +rbac: + create: false + +replication: + enabled: true + user: repl_user + password: repl_password + slaveReplicas: 2 + ## Set synchronous commit mode: on, off, remote_apply, remote_write and local + ## ref: https://www.postgresql.org/docs/9.6/runtime-config-wal.html#GUC-WAL-LEVEL + synchronousCommit: "on" + ## From the number of `slaveReplicas` defined above, set the number of those that will have synchronous replication + ## NOTE: It cannot be > slaveReplicas + numSynchronousReplicas: 1 + ## Replication Cluster application name. Useful for defining multiple replication policies + applicationName: my_application + +## PostgreSQL admin password (used when `postgresqlUsername` is not `postgres`) +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-user-on-first-run (see note!) +# postgresqlPostgresPassword: + +## PostgreSQL user (has superuser privileges if username is `postgres`) +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run +postgresqlUsername: postgres + +## PostgreSQL password +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run +## +# postgresqlPassword: + +## PostgreSQL password using existing secret +## existingSecret: secret + +## Mount PostgreSQL secret as a file instead of passing environment variable +# usePasswordFile: false + +## Create a database +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-on-first-run +## +# postgresqlDatabase: + +## PostgreSQL data dir +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +postgresqlDataDir: /bitnami/postgresql/data + +## An array to add extra environment variables +## For example: +## extraEnv: +## - name: FOO +## value: "bar" +## +# extraEnv: +extraEnv: [] + +## Name of a ConfigMap containing extra env vars +## +# extraEnvVarsCM: + +## Specify extra initdb args +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +# postgresqlInitdbArgs: + +## Specify a custom location for the PostgreSQL transaction log +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +# postgresqlInitdbWalDir: + +## PostgreSQL configuration +## Specify runtime configuration parameters as a dict, using camelCase, e.g. +## {"sharedBuffers": "500MB"} +## Alternatively, you can put your postgresql.conf under the files/ directory +## ref: https://www.postgresql.org/docs/current/static/runtime-config.html +## +# postgresqlConfiguration: + +## PostgreSQL extended configuration +## As above, but _appended_ to the main configuration +## Alternatively, you can put your *.conf under the files/conf.d/ directory +## https://github.com/bitnami/bitnami-docker-postgresql#allow-settings-to-be-loaded-from-files-other-than-the-default-postgresqlconf +## +# postgresqlExtendedConf: + +## PostgreSQL client authentication configuration +## Specify content for pg_hba.conf +## Default: do not create pg_hba.conf +## Alternatively, you can put your pg_hba.conf under the files/ directory +# pgHbaConfiguration: |- +# local all all trust +# host all all localhost trust +# host mydatabase mysuser 192.168.0.0/24 md5 + +## ConfigMap with PostgreSQL configuration +## NOTE: This will override postgresqlConfiguration and pgHbaConfiguration +# configurationConfigMap: + +## ConfigMap with PostgreSQL extended configuration +# extendedConfConfigMap: + +## initdb scripts +## Specify dictionary of scripts to be run at first boot +## Alternatively, you can put your scripts under the files/docker-entrypoint-initdb.d directory +## +# initdbScripts: +# my_init_script.sh: | +# #!/bin/sh +# echo "Do something." + +## Specify the PostgreSQL username and password to execute the initdb scripts +# initdbUser: +# initdbPassword: + +## ConfigMap with scripts to be run at first boot +## NOTE: This will override initdbScripts +# initdbScriptsConfigMap: + +## Secret with scripts to be run at first boot (in case it contains sensitive information) +## NOTE: This can work along initdbScripts or initdbScriptsConfigMap +# initdbScriptsSecret: + +## Optional duration in seconds the pod needs to terminate gracefully. +## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods +## +# terminationGracePeriodSeconds: 30 + +## LDAP configuration +## +ldap: + enabled: false + url: "" + server: "" + port: "" + prefix: "" + suffix: "" + baseDN: "" + bindDN: "" + bind_password: + search_attr: "" + search_filter: "" + scheme: "" + tls: false + +## PostgreSQL service configuration +service: + ## PosgresSQL service type + type: ClusterIP + # clusterIP: None + port: 5432 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. Evaluated as a template. + ## + annotations: {} + ## Set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + # loadBalancerIP: + + ## Load Balancer sources. Evaluated as a template. + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## + # loadBalancerSourceRanges: + # - 10.10.10.0/24 + +## Start master and slave(s) pod(s) without limitations on shm memory. +## By default docker and containerd (and possibly other container runtimes) +## limit `/dev/shm` to `64M` (see e.g. the +## [docker issue](https://github.com/docker-library/postgres/issues/416) and the +## [containerd issue](https://github.com/containerd/containerd/issues/3654), +## which could be not enough if PostgreSQL uses parallel workers heavily. +## +shmVolume: + ## Set `shmVolume.enabled` to `true` to mount a new tmpfs volume to remove + ## this limitation. + ## + enabled: true + ## Set to `true` to `chmod 777 /dev/shm` on a initContainer. + ## This option is ingored if `volumePermissions.enabled` is `false` + ## + chmod: + enabled: true + +## PostgreSQL data Persistent Volume Storage Class +## If defined, storageClassName: +## If set to "-", storageClassName: "", which disables dynamic provisioning +## If undefined (the default) or set to null, no storageClassName spec is +## set, choosing the default provisioner. (gp2 on AWS, standard on +## GKE, AWS & OpenStack) +## +persistence: + enabled: true + ## A manually managed Persistent Volume and Claim + ## If defined, PVC must be created manually before volume will be bound + ## The value is evaluated as a template, so, for example, the name can depend on .Release or .Chart + ## + # existingClaim: + + ## The path the volume will be mounted at, useful when using different + ## PostgreSQL images. + ## + mountPath: /bitnami/postgresql + + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + ## + subPath: "" + + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + annotations: {} + +## updateStrategy for PostgreSQL StatefulSet and its slaves StatefulSets +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies +updateStrategy: + type: RollingUpdate + +## +## PostgreSQL Master parameters +## +master: + ## Node, affinity, tolerations, and priorityclass settings for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption + nodeSelector: {} + affinity: {} + tolerations: [] + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + priorityClassName: "" + ## Additional PostgreSQL Master Volume mounts + ## + extraVolumeMounts: [] + ## Additional PostgreSQL Master Volumes + ## + extraVolumes: [] + ## Add sidecars to the pod + ## + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + sidecars: [] + + ## Override the service configuration for master + ## + service: {} + # type: + # nodePort: + # clusterIP: + +## +## PostgreSQL Slave parameters +## +slave: + ## Node, affinity, tolerations, and priorityclass settings for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption + nodeSelector: {} + affinity: {} + tolerations: [] + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + priorityClassName: "" + ## Extra init containers + ## Example + ## + ## extraInitContainers: + ## - name: do-something + ## image: busybox + ## command: ['do', 'something'] + extraInitContainers: [] + ## Additional PostgreSQL Slave Volume mounts + ## + extraVolumeMounts: [] + ## Additional PostgreSQL Slave Volumes + ## + extraVolumes: [] + ## Add sidecars to the pod + ## + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + sidecars: [] + + ## Override the service configuration for slave + ## + service: {} + # type: + # nodePort: + # clusterIP: + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: + requests: + memory: 256Mi + cpu: 250m + +## Add annotations to all the deployed resources +## +commonAnnotations: {} + +networkPolicy: + ## Enable creation of NetworkPolicy resources. Only Ingress traffic is filtered for now. + ## + enabled: false + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port PostgreSQL is listening + ## on. When true, PostgreSQL will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + + ## if explicitNamespacesSelector is missing or set to {}, only client Pods that are in the networkPolicy's namespace + ## and that match other criteria, the ones that have the good label, can reach the DB. + ## But sometimes, we want the DB to be accessible to clients from other namespaces, in this case, we can use this + ## LabelSelector to select these namespaces, note that the networkPolicy's namespace should also be explicitly added. + ## + ## Example: + ## explicitNamespacesSelector: + ## matchLabels: + ## role: frontend + ## matchExpressions: + ## - {key: role, operator: In, values: [frontend]} + explicitNamespacesSelector: {} + +## Configure extra options for liveness and readiness probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +## +## TLS configuration +## +tls: + # Enable TLS traffic + enabled: false + # + # Whether to use the server's TLS cipher preferences rather than the client's. + preferServerCiphers: true + # + # Name of the Secret that contains the certificates + certificatesSecret: "" + # + # Certificate filename + certFilename: "" + # + # Certificate Key filename + certKeyFilename: "" + # + # CA Certificate filename + # If provided, PostgreSQL will authenticate TLS/SSL clients by requesting them a certificate + # ref: https://www.postgresql.org/docs/9.6/auth-methods.html + certCAFilename: + # + # File containing a Certificate Revocation List + crlFilename: + +## Configure metrics exporter +## +metrics: + enabled: true + # resources: {} + service: + type: ClusterIP + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9187" + loadBalancerIP: + serviceMonitor: + enabled: false + additionalLabels: {} + # namespace: monitoring + # interval: 30s + # scrapeTimeout: 10s + ## Custom PrometheusRule to be defined + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + prometheusRule: + enabled: false + additionalLabels: {} + namespace: "" + ## These are just examples rules, please adapt them to your needs. + ## Make sure to constraint the rules to the current postgresql service. + ## rules: + ## - alert: HugeReplicationLag + ## expr: pg_replication_lag{service="{{ template "postgresql.fullname" . }}-metrics"} / 3600 > 1 + ## for: 1m + ## labels: + ## severity: critical + ## annotations: + ## description: replication for {{ template "postgresql.fullname" . }} PostgreSQL is lagging by {{ "{{ $value }}" }} hour(s). + ## summary: PostgreSQL replication is lagging by {{ "{{ $value }}" }} hour(s). + rules: [] + + image: + registry: 10.10.31.243:5000 # docker.io + repository: postgres-exporter # bitnami/postgres-exporter + tag: 0.8.0-debian-10-r166 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + ## Define additional custom metrics + ## ref: https://github.com/wrouesnel/postgres_exporter#adding-new-metrics-via-a-config-file + # customMetrics: + # pg_database: + # query: "SELECT d.datname AS name, CASE WHEN pg_catalog.has_database_privilege(d.datname, 'CONNECT') THEN pg_catalog.pg_database_size(d.datname) ELSE 0 END AS size FROM pg_catalog.pg_database d where datname not in ('template0', 'template1', 'postgres')" + # metrics: + # - name: + # usage: "LABEL" + # description: "Name of the database" + # - size_bytes: + # usage: "GAUGE" + # description: "Size of the database in bytes" + ## An array to add extra env vars to configure postgres-exporter + ## see: https://github.com/wrouesnel/postgres_exporter#environment-variables + ## For example: + # extraEnvVars: + # - name: PG_EXPORTER_DISABLE_DEFAULT_METRICS + # value: "true" + extraEnvVars: {} + + ## Pod Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## + securityContext: + enabled: false + runAsUser: 1001 + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## Configure extra options for liveness and readiness probes + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 diff --git a/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/values.schema.json b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/values.schema.json new file mode 100644 index 0000000..7b5e2ef --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/values.schema.json @@ -0,0 +1,103 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": { + "postgresqlUsername": { + "type": "string", + "title": "Admin user", + "form": true + }, + "postgresqlPassword": { + "type": "string", + "title": "Password", + "form": true + }, + "persistence": { + "type": "object", + "properties": { + "size": { + "type": "string", + "title": "Persistent Volume Size", + "form": true, + "render": "slider", + "sliderMin": 1, + "sliderMax": 100, + "sliderUnit": "Gi" + } + } + }, + "resources": { + "type": "object", + "title": "Required Resources", + "description": "Configure resource requests", + "form": true, + "properties": { + "requests": { + "type": "object", + "properties": { + "memory": { + "type": "string", + "form": true, + "render": "slider", + "title": "Memory Request", + "sliderMin": 10, + "sliderMax": 2048, + "sliderUnit": "Mi" + }, + "cpu": { + "type": "string", + "form": true, + "render": "slider", + "title": "CPU Request", + "sliderMin": 10, + "sliderMax": 2000, + "sliderUnit": "m" + } + } + } + } + }, + "replication": { + "type": "object", + "form": true, + "title": "Replication Details", + "properties": { + "enabled": { + "type": "boolean", + "title": "Enable Replication", + "form": true + }, + "slaveReplicas": { + "type": "integer", + "title": "Slave Replicas", + "form": true, + "hidden": { + "value": false, + "path": "replication/enabled" + } + } + } + }, + "volumePermissions": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable Init Containers", + "description": "Change the owner of the persist volume mountpoint to RunAsUser:fsGroup" + } + } + }, + "metrics": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "title": "Configure metrics exporter", + "form": true + } + } + } + } +} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/values.yaml b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/values.yaml new file mode 100644 index 0000000..5f831ef --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/values.yaml @@ -0,0 +1,604 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +global: + postgresql: {} +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName +# storageClass: myStorageClass + +## Bitnami PostgreSQL image version +## ref: https://hub.docker.com/r/bitnami/postgresql/tags/ +## +image: + #registry: cdm-dev.exem-oss.org/keycloak + registry: 10.10.31.243:5000/keycloak # registry.openstacklocal:5000/keycloak + repository: keycloak-postgresql + tag: 11.8.0 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Set to true if you would like to see extra information on logs + ## It turns BASH and NAMI debugging in minideb + ## ref: https://github.com/bitnami/minideb-extras/#turn-on-bash-debugging + debug: false + +## String to partially override postgresql.fullname template (will maintain the release name) +## +# nameOverride: + +## String to fully override postgresql.fullname template +## +# fullnameOverride: + +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: true + image: + #registry: cdm-dev.exem-oss.org + registry: 10.10.31.243:5000 # registry.openstacklocal:5000 + repository: minideb # keycloak/minideb + tag: buster + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + ## Init container Security Context + ## Note: the chown of the data folder is done to securityContext.runAsUser + ## and not the below volumePermissions.securityContext.runAsUser + ## When runAsUser is set to special value "auto", init container will try to chwon the + ## data folder to autodetermined user&group, using commands: `id -u`:`id -G | cut -d" " -f2` + ## "auto" is especially useful for OpenShift which has scc with dynamic userids (and 0 is not allowed). + ## You may want to use this volumePermissions.securityContext.runAsUser="auto" in combination with + ## pod securityContext.enabled=false and shmVolume.chmod.enabled=false + ## + securityContext: + runAsUser: 0 + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: + + +## Pod Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## Pod Service Account +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +serviceAccount: + enabled: false + ## Name of an already existing service account. Setting this value disables the automatic service account creation. + # name: + +## Pod Security Policy +## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +psp: + create: false + +## Creates role for ServiceAccount +## Required for PSP +rbac: + create: false + +replication: + enabled: false + user: repl_user + password: repl_password + slaveReplicas: 1 + ## Set synchronous commit mode: on, off, remote_apply, remote_write and local + ## ref: https://www.postgresql.org/docs/9.6/runtime-config-wal.html#GUC-WAL-LEVEL + synchronousCommit: "off" + ## From the number of `slaveReplicas` defined above, set the number of those that will have synchronous replication + ## NOTE: It cannot be > slaveReplicas + numSynchronousReplicas: 0 + ## Replication Cluster application name. Useful for defining multiple replication policies + applicationName: my_application + +## PostgreSQL admin password (used when `postgresqlUsername` is not `postgres`) +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-user-on-first-run (see note!) +# postgresqlPostgresPassword: + +## PostgreSQL user (has superuser privileges if username is `postgres`) +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run +postgresqlUsername: postgres + +## PostgreSQL password +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run +## +# postgresqlPassword: + +## PostgreSQL password using existing secret +## existingSecret: secret + +## Mount PostgreSQL secret as a file instead of passing environment variable +# usePasswordFile: false + +## Create a database +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-on-first-run +## +# postgresqlDatabase: + +## PostgreSQL data dir +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +postgresqlDataDir: /bitnami/postgresql/data +#postgresqlDataDir: /var/lib/postgresql/data/pgdata + +## An array to add extra environment variables +## For example: +## extraEnv: +## - name: FOO +## value: "bar" +## +# extraEnv: +extraEnv: [] + +## Name of a ConfigMap containing extra env vars +## +# extraEnvVarsCM: + +## Specify extra initdb args +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +# postgresqlInitdbArgs: + +## Specify a custom location for the PostgreSQL transaction log +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +# postgresqlInitdbWalDir: + +## PostgreSQL configuration +## Specify runtime configuration parameters as a dict, using camelCase, e.g. +## {"sharedBuffers": "500MB"} +## Alternatively, you can put your postgresql.conf under the files/ directory +## ref: https://www.postgresql.org/docs/current/static/runtime-config.html +## +# postgresqlConfiguration: + +## PostgreSQL extended configuration +## As above, but _appended_ to the main configuration +## Alternatively, you can put your *.conf under the files/conf.d/ directory +## https://github.com/bitnami/bitnami-docker-postgresql#allow-settings-to-be-loaded-from-files-other-than-the-default-postgresqlconf +## +# postgresqlExtendedConf: + +## PostgreSQL client authentication configuration +## Specify content for pg_hba.conf +## Default: do not create pg_hba.conf +## Alternatively, you can put your pg_hba.conf under the files/ directory +# pgHbaConfiguration: |- +# local all all trust +# host all all localhost trust +# host mydatabase mysuser 192.168.0.0/24 md5 + +## ConfigMap with PostgreSQL configuration +## NOTE: This will override postgresqlConfiguration and pgHbaConfiguration +# configurationConfigMap: + +## ConfigMap with PostgreSQL extended configuration +# extendedConfConfigMap: + +## initdb scripts +## Specify dictionary of scripts to be run at first boot +## Alternatively, you can put your scripts under the files/docker-entrypoint-initdb.d directory +## +# initdbScripts: +# my_init_script.sh: | +# #!/bin/sh +# echo "Do something." + +## ConfigMap with scripts to be run at first boot +## NOTE: This will override initdbScripts +# initdbScriptsConfigMap: + +## Secret with scripts to be run at first boot (in case it contains sensitive information) +## NOTE: This can work along initdbScripts or initdbScriptsConfigMap +# initdbScriptsSecret: + +## Specify the PostgreSQL username and password to execute the initdb scripts +# initdbUser: +# initdbPassword: + +## Optional duration in seconds the pod needs to terminate gracefully. +## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods +## +# terminationGracePeriodSeconds: 30 + +## LDAP configuration +## +ldap: + enabled: false + url: "" + server: "" + port: "" + prefix: "" + suffix: "" + baseDN: "" + bindDN: "" + bind_password: + search_attr: "" + search_filter: "" + scheme: "" + tls: false + +## PostgreSQL service configuration +service: + ## PosgresSQL service type + type: ClusterIP + # clusterIP: None + port: 5432 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. Evaluated as a template. + ## + annotations: {} + ## Set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + # loadBalancerIP: + + ## Load Balancer sources. Evaluated as a template. + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## + # loadBalancerSourceRanges: + # - 10.10.10.0/24 + +## Start master and slave(s) pod(s) without limitations on shm memory. +## By default docker and containerd (and possibly other container runtimes) +## limit `/dev/shm` to `64M` (see e.g. the +## [docker issue](https://github.com/docker-library/postgres/issues/416) and the +## [containerd issue](https://github.com/containerd/containerd/issues/3654), +## which could be not enough if PostgreSQL uses parallel workers heavily. +## +shmVolume: + ## Set `shmVolume.enabled` to `true` to mount a new tmpfs volume to remove + ## this limitation. + ## + enabled: true + ## Set to `true` to `chmod 777 /dev/shm` on a initContainer. + ## This option is ingored if `volumePermissions.enabled` is `false` + ## + chmod: + enabled: true + +## PostgreSQL data Persistent Volume Storage Class +## If defined, storageClassName: +## If set to "-", storageClassName: "", which disables dynamic provisioning +## If undefined (the default) or set to null, no storageClassName spec is +## set, choosing the default provisioner. (gp2 on AWS, standard on +## GKE, AWS & OpenStack) +## +persistence: + enabled: true + ## A manually managed Persistent Volume and Claim + ## If defined, PVC must be created manually before volume will be bound + ## The value is evaluated as a template, so, for example, the name can depend on .Release or .Chart + ## + # existingClaim: + + ## The path the volume will be mounted at, useful when using different + ## PostgreSQL images. + ## + mountPath: /bitnami/postgresql + + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + ## + subPath: "" + + storageClass: "" + accessModes: + - ReadWriteOnce + size: 8Gi + annotations: {} + +## updateStrategy for PostgreSQL StatefulSet and its slaves StatefulSets +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies +updateStrategy: + type: RollingUpdate + +## +## PostgreSQL Master parameters +## +master: + ## Node, affinity, tolerations, and priorityclass settings for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption + nodeSelector: {} + affinity: {} + tolerations: [] + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + priorityClassName: "" + ## Extra init containers + ## Example + ## + ## extraInitContainers: + ## - name: do-something + ## image: busybox + ## command: ['do', 'something'] + extraInitContainers: [] + + ## Additional PostgreSQL Master Volume mounts + ## + extraVolumeMounts: [] + ## Additional PostgreSQL Master Volumes + ## + extraVolumes: [] + ## Add sidecars to the pod + ## + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: IfNotPresent + ## ports: + ## - name: portname + ## containerPort: 1234 + sidecars: [] + + ## Override the service configuration for master + ## + service: {} + # type: + # nodePort: + # clusterIP: + +## +## PostgreSQL Slave parameters +## +slave: + ## Node, affinity, tolerations, and priorityclass settings for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption + nodeSelector: {} + affinity: {} + tolerations: [] + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + priorityClassName: "" + extraInitContainers: | + # - name: do-something + # image: busybox + # command: ['do', 'something'] + ## Additional PostgreSQL Slave Volume mounts + ## + extraVolumeMounts: [] + ## Additional PostgreSQL Slave Volumes + ## + extraVolumes: [] + ## Add sidecars to the pod + ## + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: IfNotPresent + ## ports: + ## - name: portname + ## containerPort: 1234 + sidecars: [] + + ## Override the service configuration for slave + ## + service: {} + # type: + # nodePort: + # clusterIP: + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: + requests: + memory: 256Mi + cpu: 250m + +## Add annotations to all the deployed resources +## +commonAnnotations: {} + +networkPolicy: + ## Enable creation of NetworkPolicy resources. Only Ingress traffic is filtered for now. + ## + enabled: false + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port PostgreSQL is listening + ## on. When true, PostgreSQL will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + + ## if explicitNamespacesSelector is missing or set to {}, only client Pods that are in the networkPolicy's namespace + ## and that match other criteria, the ones that have the good label, can reach the DB. + ## But sometimes, we want the DB to be accessible to clients from other namespaces, in this case, we can use this + ## LabelSelector to select these namespaces, note that the networkPolicy's namespace should also be explicitly added. + ## + ## Example: + ## explicitNamespacesSelector: + ## matchLabels: + ## role: frontend + ## matchExpressions: + ## - {key: role, operator: In, values: [frontend]} + explicitNamespacesSelector: {} + +## Configure extra options for liveness and readiness probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +## +## TLS configuration +## +tls: + # Enable TLS traffic + enabled: false + # + # Whether to use the server's TLS cipher preferences rather than the client's. + preferServerCiphers: true + # + # Name of the Secret that contains the certificates + certificatesSecret: "" + # + # Certificate filename + certFilename: "" + # + # Certificate Key filename + certKeyFilename: "" + # + # CA Certificate filename + # If provided, PostgreSQL will authenticate TLS/SSL clients by requesting them a certificate + # ref: https://www.postgresql.org/docs/9.6/auth-methods.html + certCAFilename: + # + # File containing a Certificate Revocation List + crlFilename: + +## Configure metrics exporter +## +metrics: + enabled: false + # resources: {} + service: + type: ClusterIP + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9187" + loadBalancerIP: + serviceMonitor: + enabled: false + additionalLabels: {} + # namespace: monitoring + # interval: 30s + # scrapeTimeout: 10s + ## Custom PrometheusRule to be defined + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + prometheusRule: + enabled: false + additionalLabels: {} + namespace: "" + ## These are just examples rules, please adapt them to your needs. + ## Make sure to constraint the rules to the current postgresql service. + ## rules: + ## - alert: HugeReplicationLag + ## expr: pg_replication_lag{service="{{ template "postgresql.fullname" . }}-metrics"} / 3600 > 1 + ## for: 1m + ## labels: + ## severity: critical + ## annotations: + ## description: replication for {{ template "postgresql.fullname" . }} PostgreSQL is lagging by {{ "{{ $value }}" }} hour(s). + ## summary: PostgreSQL replication is lagging by {{ "{{ $value }}" }} hour(s). + rules: [] + + image: + registry: 10.10.31.243:5000 # docker.io + repository: postgres-exporter # bitnami/postgres-exporter + tag: 0.8.0-debian-10-r166 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + ## Define additional custom metrics + ## ref: https://github.com/wrouesnel/postgres_exporter#adding-new-metrics-via-a-config-file + # customMetrics: + # pg_database: + # query: "SELECT d.datname AS name, CASE WHEN pg_catalog.has_database_privilege(d.datname, 'CONNECT') THEN pg_catalog.pg_database_size(d.datname) ELSE 0 END AS size_bytes FROM pg_catalog.pg_database d where datname not in ('template0', 'template1', 'postgres')" + # metrics: + # - name: + # usage: "LABEL" + # description: "Name of the database" + # - size_bytes: + # usage: "GAUGE" + # description: "Size of the database in bytes" + # + ## An array to add extra env vars to configure postgres-exporter + ## see: https://github.com/wrouesnel/postgres_exporter#environment-variables + ## For example: + # extraEnvVars: + # - name: PG_EXPORTER_DISABLE_DEFAULT_METRICS + # value: "true" + extraEnvVars: {} + + ## Pod Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## + securityContext: + enabled: false + runAsUser: 1001 + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## Configure extra options for liveness and readiness probes + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 +#persistentVolume nodeAffinity Value Require this value +node: + affinity: imxc-worker1 diff --git a/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/ci/h2-values.yaml b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/ci/h2-values.yaml new file mode 100644 index 0000000..10d1705 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/ci/h2-values.yaml @@ -0,0 +1,38 @@ +extraEnv: | + - name: DB_VENDOR + value: h2 + - name: KEYCLOAK_USER_FILE + value: /secrets/admin-creds/user + - name: KEYCLOAK_PASSWORD_FILE + value: /secrets/admin-creds/password + - name: JAVA_OPTS + value: >- + -XX:+UseContainerSupport + -XX:MaxRAMPercentage=50.0 + -Djava.net.preferIPv4Stack=true + -Djboss.modules.system.pkgs=$JBOSS_MODULES_SYSTEM_PKGS + -Djava.awt.headless=true + +secrets: + admin-creds: + annotations: + my-test-annotation: Test secret for {{ include "keycloak.fullname" . }} + stringData: + user: admin + password: secret + +extraVolumeMounts: | + - name: admin-creds + mountPath: /secrets/admin-creds + readOnly: true + +extraVolumes: | + - name: admin-creds + secret: + secretName: '{{ include "keycloak.fullname" . }}-admin-creds' + +postgresql: + enabled: false + +test: + enabled: true diff --git a/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/ci/postgres-ha-values.yaml b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/ci/postgres-ha-values.yaml new file mode 100644 index 0000000..e92c2c7 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/ci/postgres-ha-values.yaml @@ -0,0 +1,73 @@ +replicas: 2 + +podLabels: + test-label: test-label-value + +podAnnotations: + test-annotation: test-annotation-value-{{ .Release.Name }} + test-int-annotation: "12345" + +startupScripts: + hello.sh: | + #!/bin/sh + + echo '********************************************************************************' + echo '* *' + echo '* Hello from my startup script! *' + echo '* *' + echo '********************************************************************************' + +lifecycleHooks: | + postStart: + exec: + command: + - /bin/sh + - -c + - echo 'Hello from lifecycle hook!' + +extraEnv: | + - name: JGROUPS_DISCOVERY_PROTOCOL + value: dns.DNS_PING + - name: JGROUPS_DISCOVERY_PROPERTIES + value: 'dns_query={{ include "keycloak.serviceDnsName" . }}' + - name: CACHE_OWNERS_COUNT + value: "2" + - name: CACHE_OWNERS_AUTH_SESSIONS_COUNT + value: "2" + - name: KEYCLOAK_USER_FILE + value: /secrets/admin-creds/user + - name: KEYCLOAK_PASSWORD_FILE + value: /secrets/admin-creds/password + - name: KEYCLOAK_STATISTICS + value: all + - name: JAVA_OPTS + value: >- + -XX:+UseContainerSupport + -XX:MaxRAMPercentage=50.0 + -Djava.net.preferIPv4Stack=true + -Djboss.modules.system.pkgs=$JBOSS_MODULES_SYSTEM_PKGS + -Djava.awt.headless=true + +secrets: + admin-creds: + stringData: + user: admin + password: secret + +extraVolumeMounts: | + - name: admin-creds + mountPath: /secrets/admin-creds + readOnly: true + +extraVolumes: | + - name: admin-creds + secret: + secretName: '{{ include "keycloak.fullname" . }}-admin-creds' + +postgresql: + enabled: true + persistence: + enabled: true + +test: + enabled: true diff --git a/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/requirements.lock b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/requirements.lock new file mode 100644 index 0000000..4231a57 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/requirements.lock @@ -0,0 +1,6 @@ +dependencies: +- name: postgresql + repository: https://charts.bitnami.com/bitnami + version: 9.1.1 +digest: sha256:33ee9e6caa9e519633071fd71aedd9de7906b9a9d7fb629eb814d9f72bb8d68e +generated: "2020-07-24T07:40:55.78753+02:00" diff --git a/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/requirements.yaml b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/requirements.yaml new file mode 100644 index 0000000..f3409a3 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/requirements.yaml @@ -0,0 +1,5 @@ +dependencies: + - name: postgresql + version: 9.1.1 + repository: https://charts.bitnami.com/bitnami + condition: postgresql.enabled diff --git a/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/scripts/keycloak.cli b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/scripts/keycloak.cli new file mode 100644 index 0000000..1469963 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/scripts/keycloak.cli @@ -0,0 +1,13 @@ +embed-server --server-config=standalone-ha.xml --std-out=echo +batch + +echo Configuring node identifier + +## Sets the node identifier to the node name (= pod name). Node identifiers have to be unique. They can have a +## maximum length of 23 characters. Thus, the chart's fullname template truncates its length accordingly. +/subsystem=transactions:write-attribute(name=node-identifier, value=${jboss.node.name}) + +echo Finished configuring node identifier + +run-batch +stop-embedded-server diff --git a/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/templates/NOTES.txt b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/templates/NOTES.txt new file mode 100644 index 0000000..e76e064 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/templates/NOTES.txt @@ -0,0 +1,61 @@ +*********************************************************************** +* * +* Keycloak Helm Chart by codecentric AG * +* * +*********************************************************************** + +{{- if .Values.ingress.enabled }} + +Keycloak was installed with an Ingress and an be reached at the following URL(s): +{{ range $unused, $rule := .Values.ingress.rules }} + {{- range $rule.paths }} + - http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $rule.host }}{{ . }} + {{- end }} +{{- end }} + +{{- else if eq "NodePort" .Values.service.type }} + +Keycloak was installed with a Service of type NodePort. +{{ if .Values.service.httpNodePort }} +Get its HTTP URL with the following commands: + +export NODE_PORT=$(kubectl get --namespace imxc service {{ include "keycloak.fullname" . }}-http --template='{{"{{ range .spec.ports }}{{ if eq .name \"http\" }}{{ .nodePort }}{{ end }}{{ end }}"}}') +export NODE_IP=$(kubectl get nodes --namespace imxc -o jsonpath="{.items[0].status.addresses[0].address}") +echo "http://$NODE_IP:$NODE_PORT" +{{- end }} +{{ if .Values.service.httpsNodePort }} +Get its HTTPS URL with the following commands: + +export NODE_PORT=$(kubectl get --namespace imxc service {{ include "keycloak.fullname" . }}-http --template='{{"{{ range .spec.ports }}{{ if eq .name \"https\" }}{{ .nodePort }}{{ end }}{{ end }}"}}') +export NODE_IP=$(kubectl get nodes --namespace imxc -o jsonpath="{.items[0].status.addresses[0].address}") +echo "http://$NODE_IP:$NODE_PORT" +{{- end }} + +{{- else if eq "LoadBalancer" .Values.service.type }} + +Keycloak was installed with a Service of type LoadBalancer + +NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get --namespace imxc service -w {{ include "keycloak.fullname" . }}' + +Get its HTTP URL with the following commands: + +export SERVICE_IP=$(kubectl get service --namespace imxc {{ include "keycloak.fullname" . }}-http --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") +echo "http://$SERVICE_IP:{{ .Values.service.httpPort }}" + +Get its HTTPS URL with the following commands: + +export SERVICE_IP=$(kubectl get service --namespace imxc {{ include "keycloak.fullname" . }}-http --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") +echo "http://$SERVICE_IP:{{ .Values.service.httpsPort }}" + +{{- else if eq "ClusterIP" .Values.service.type }} + +Keycloak was installed with a Service of type ClusterIP + +Create a port-forwarding with the following commands: + +export POD_NAME=$(kubectl get pods --namespace imxc -l "app.kubernetes.io/name={{ include "keycloak.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o name) +echo "Visit http://127.0.0.1:8080 to use your application" +kubectl --namespace imxc port-forward "$POD_NAME" 8080 + +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/templates/_helpers.tpl b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/templates/_helpers.tpl new file mode 100644 index 0000000..d019e17 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/templates/_helpers.tpl @@ -0,0 +1,87 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "keycloak.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate to 20 characters because this is used to set the node identifier in WildFly which is limited to +23 characters. This allows for a replica suffix for up to 99 replicas. +*/}} +{{- define "keycloak.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 20 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 20 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 20 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "keycloak.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "keycloak.labels" -}} +helm.sh/chart: {{ include "keycloak.chart" . }} +{{ include "keycloak.selectorLabels" . }} +app.kubernetes.io/version: {{ .Values.image.tag | default .Chart.AppVersion | quote }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "keycloak.selectorLabels" -}} +app.kubernetes.io/name: {{ include "keycloak.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "keycloak.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "keycloak.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} + +{{/* +Create a default fully qualified app name for the postgres requirement. +*/}} +{{- define "keycloak.postgresql.fullname" -}} +{{- $postgresContext := dict "Values" .Values.postgresql "Release" .Release "Chart" (dict "Name" "postgresql") -}} +{{ include "postgresql.fullname" $postgresContext }} +{{- end }} + +{{/* +Create the service DNS name. +*/}} +{{- define "keycloak.serviceDnsName" -}} +{{ include "keycloak.fullname" . }}-headless.imxc.svc.{{ .Values.clusterDomain }} +{{- end }} + +{{/* +Return the appropriate apiVersion for ingress. +*/}} +{{- define "keycloak.ingressAPIVersion" -}} +{{- if .Capabilities.APIVersions.Has "networking.k8s.io/v1/Ingress" -}} +{{- print "networking.k8s.io/v1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/templates/configmap-startup.yaml b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/templates/configmap-startup.yaml new file mode 100644 index 0000000..8fbb462 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/templates/configmap-startup.yaml @@ -0,0 +1,14 @@ +{{- if .Values.startupScripts }} +{{- $highAvailability := gt (int .Values.replicas) 1 -}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "keycloak.fullname" . }}-startup + labels: + {{- include "keycloak.labels" . | nindent 4 }} +data: + {{- range $key, $value := .Values.startupScripts }} + {{ $key }}: | + {{- tpl $value $ | nindent 4 }} + {{- end }} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/templates/hpa.yaml b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/templates/hpa.yaml new file mode 100644 index 0000000..c772b76 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/templates/hpa.yaml @@ -0,0 +1,22 @@ +{{- if .Values.autoscaling.enabled }} +apiVersion: autoscaling/v2beta2 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "keycloak.fullname" . }} + labels: + {{- include "keycloak.labels" . | nindent 4 }} + {{- range $key, $value := .Values.autoscaling.labels }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: StatefulSet + name: {{ include "keycloak.fullname" . }} + minReplicas: {{ .Values.autoscaling.minReplicas }} + maxReplicas: {{ .Values.autoscaling.maxReplicas }} + metrics: + {{- toYaml .Values.autoscaling.metrics | nindent 4 }} + behavior: + {{- toYaml .Values.autoscaling.behavior | nindent 4 }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/templates/ingress.yaml b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/templates/ingress.yaml new file mode 100644 index 0000000..d749e24 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/templates/ingress.yaml @@ -0,0 +1,104 @@ +{{- $ingress := .Values.ingress -}} +{{- if $ingress.enabled -}} +apiVersion: {{ include "keycloak.ingressAPIVersion" . }} +kind: Ingress +metadata: + name: {{ include "keycloak.fullname" . }} + {{- with $ingress.annotations }} + annotations: + {{- range $key, $value := . }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} + {{- end }} + labels: + {{- include "keycloak.labels" . | nindent 4 }} + {{- range $key, $value := $ingress.labels }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} +spec: +{{- if $ingress.tls }} + tls: + {{- range $ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ tpl . $ | quote }} + {{- end }} + {{- with .secretName }} + secretName: {{ tpl . $ }} + {{- end }} + {{- end }} +{{- end }} + rules: + {{- range .Values.ingress.rules }} + - host: {{ tpl .host $ | quote }} + http: + paths: + {{- range .paths }} + - path: {{ . }} + {{- if $.Capabilities.APIVersions.Has "networking.k8s.io/v1/Ingress" }} + pathType: Prefix + backend: + service: + name: {{ include "keycloak.fullname" $ }}-http + port: + name: {{ $ingress.servicePort }} + {{- else }} + backend: + serviceName: {{ include "keycloak.fullname" $ }}-http + servicePort: {{ $ingress.servicePort }} + {{- end }} + {{- end }} + {{- end }} +{{- if $ingress.console.enabled }} +--- +apiVersion: {{ include "keycloak.ingressAPIVersion" . }} +kind: Ingress +metadata: + name: {{ include "keycloak.fullname" . }}-console + {{- with $ingress.console.annotations }} + annotations: + {{- range $key, $value := . }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} + {{- end }} + labels: + {{- include "keycloak.labels" . | nindent 4 }} + {{- range $key, $value := $ingress.labels }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} +spec: +{{- if $ingress.tls }} + tls: + {{- range $ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ tpl . $ | quote }} + {{- end }} + {{- with .secretName }} + secretName: {{ tpl . $ }} + {{- end }} + {{- end }} +{{- end }} + rules: + {{- range .Values.ingress.console.rules }} + - host: {{ tpl .host $ | quote }} + http: + paths: + {{- range .paths }} + - path: {{ . }} + {{- if $.Capabilities.APIVersions.Has "networking.k8s.io/v1/Ingress" }} + pathType: Prefix + backend: + service: + name: {{ include "keycloak.fullname" $ }}-http + port: + name: {{ $ingress.servicePort }} + {{- else }} + backend: + serviceName: {{ include "keycloak.fullname" $ }}-http + servicePort: {{ $ingress.servicePort }} + {{- end }} + {{- end }} + {{- end }} +{{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/templates/networkpolicy.yaml b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/templates/networkpolicy.yaml new file mode 100644 index 0000000..5e7c7b6 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/templates/networkpolicy.yaml @@ -0,0 +1,46 @@ +{{- if .Values.networkPolicy.enabled }} +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: {{ include "keycloak.fullname" . | quote }} + labels: + {{- include "keycloak.labels" . | nindent 4 }} + {{- range $key, $value := .Values.networkPolicy.labels }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} +spec: + policyTypes: + - Ingress + podSelector: + matchLabels: + {{- include "keycloak.selectorLabels" . | nindent 6 }} + ingress: + {{- with .Values.networkPolicy.extraFrom }} + - from: + {{- toYaml . | nindent 8 }} + ports: + - protocol: TCP + port: {{ $.Values.service.httpPort }} + - protocol: TCP + port: {{ $.Values.service.httpsPort }} + {{ range $.Values.extraPorts }} + - protocol: {{ default "TCP" .protocol }} + port: {{ .containerPort }} + {{- end }} + {{- end }} + - from: + - podSelector: + matchLabels: + {{- include "keycloak.selectorLabels" . | nindent 14 }} + ports: + - protocol: TCP + port: {{ .Values.service.httpPort }} + - protocol: TCP + port: {{ .Values.service.httpsPort }} + - protocol: TCP + port: {{ .Values.service.httpManagementPort }} + {{ range .Values.extraPorts }} + - protocol: {{ default "TCP" .protocol }} + port: {{ .containerPort }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/templates/poddisruptionbudget.yaml b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/templates/poddisruptionbudget.yaml new file mode 100644 index 0000000..39cc390 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/templates/poddisruptionbudget.yaml @@ -0,0 +1,13 @@ +{{- if .Values.podDisruptionBudget -}} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ include "keycloak.fullname" . }} + labels: + {{- include "keycloak.labels" . | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "keycloak.selectorLabels" . | nindent 6 }} + {{- toYaml .Values.podDisruptionBudget | nindent 2 }} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/templates/prometheusrule.yaml b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/templates/prometheusrule.yaml new file mode 100644 index 0000000..69af5e7 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/templates/prometheusrule.yaml @@ -0,0 +1,24 @@ +{{- with .Values.prometheusRule -}} +{{- if .enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ include "keycloak.fullname" $ }} + {{- with .annotations }} + annotations: + {{- range $key, $value := . }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} + {{- end }} + labels: + {{- include "keycloak.labels" $ | nindent 4 }} + {{- range $key, $value := .labels }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} +spec: + groups: + - name: {{ include "keycloak.fullname" $ }} + rules: + {{- toYaml .rules | nindent 8 }} +{{- end }} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/templates/rbac.yaml b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/templates/rbac.yaml new file mode 100644 index 0000000..9ca0a2b --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/templates/rbac.yaml @@ -0,0 +1,25 @@ +{{- if and .Values.rbac.create .Values.rbac.rules }} +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ include "keycloak.fullname" . }} + labels: + {{- include "keycloak.labels" . | nindent 4 }} +rules: + {{- toYaml .Values.rbac.rules | nindent 2 }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ include "keycloak.fullname" . }} + labels: + {{- include "keycloak.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ include "keycloak.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ include "keycloak.serviceAccountName" . }} + namespace: {{ .Release.Namespace | quote }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/templates/route.yaml b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/templates/route.yaml new file mode 100644 index 0000000..9507d56 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/templates/route.yaml @@ -0,0 +1,34 @@ +{{- $route := .Values.route -}} +{{- if $route.enabled -}} +apiVersion: route.openshift.io/v1 +kind: Route +metadata: + name: {{ include "keycloak.fullname" . }} + {{- with $route.annotations }} + annotations: + {{- range $key, $value := . }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} + {{- end }} + labels: + {{- include "keycloak.labels" . | nindent 4 }} + {{- range $key, $value := $route.labels }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} +spec: +{{- if $route.host }} + host: {{ tpl $route.host $ | quote }} +{{- end }} + path: {{ $route.path }} + port: + targetPort: http + to: + kind: Service + name: {{ include "keycloak.fullname" $ }}-http + weight: 100 + {{- if $route.tls.enabled }} + tls: + insecureEdgeTerminationPolicy: {{ $route.tls.insecureEdgeTerminationPolicy }} + termination: {{ $route.tls.termination }} + {{- end }} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/templates/secrets.yaml b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/templates/secrets.yaml new file mode 100644 index 0000000..c1cb796 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/templates/secrets.yaml @@ -0,0 +1,29 @@ +{{- range $nameSuffix, $values := .Values.secrets -}} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "keycloak.fullname" $ }}-{{ $nameSuffix }} + {{- with $values.annotations }} + annotations: + {{- range $key, $value := . }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} + {{- end }} + labels: + {{- include "keycloak.labels" $ | nindent 4 }} + {{- range $key, $value := $values.labels }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} +type: {{ default "Opaque" $values.type }} +{{- with $values.data }} +data: + {{- toYaml . | nindent 2 }} +{{- end }} +{{- with $values.stringData }} +stringData: + {{- range $key, $value := . }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 2 }} + {{- end }} +{{- end }} +--- +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/templates/service-headless.yaml b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/templates/service-headless.yaml new file mode 100644 index 0000000..0c22ec9 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/templates/service-headless.yaml @@ -0,0 +1,18 @@ +{{- $highAvailability := gt (int .Values.replicas) 1 -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "keycloak.fullname" . }}-headless + labels: + {{- include "keycloak.labels" . | nindent 4 }} + app.kubernetes.io/component: headless +spec: + type: ClusterIP + clusterIP: None + ports: + - name: http + port: {{ .Values.service.httpPort }} + targetPort: http + protocol: TCP + selector: + {{- include "keycloak.selectorLabels" . | nindent 4 }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/templates/service-http.yaml b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/templates/service-http.yaml new file mode 100644 index 0000000..c4a1dc9 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/templates/service-http.yaml @@ -0,0 +1,59 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "keycloak.fullname" . }}-http + {{- with .Values.service.annotations }} + annotations: + {{- range $key, $value := . }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} + {{- end }} + labels: + {{- include "keycloak.labels" . | nindent 4 }} + {{- range $key, $value := .Values.service.labels }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} + app.kubernetes.io/component: http +spec: + type: {{ .Values.service.type }} + {{- if and (eq "LoadBalancer" .Values.service.type) .Values.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.service.loadBalancerIP }} + {{- end }} + {{- if and (eq "LoadBalancer" .Values.service.type) .Values.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{- toYaml .Values.service.loadBalancerSourceRanges | nindent 4 }} + {{- end }} + {{- if .Values.service.sessionAffinity }} + sessionAffinity: {{ .Values.service.sessionAffinity }} + {{- with .Values.service.sessionAffinityConfig }} + sessionAffinityConfig: + {{- toYaml . | nindent 4 }} + {{- end }} + {{- end }} + ports: + - name: http + port: {{ .Values.service.httpPort }} + targetPort: http + {{- if and (or (eq "NodePort" .Values.service.type) (eq "LoadBalancer" .Values.service.type) ) .Values.service.httpNodePort }} + nodePort: {{ .Values.service.httpNodePort }} + {{- end }} + protocol: TCP + - name: https + port: {{ .Values.service.httpsPort }} + targetPort: https + {{- if and (or (eq "NodePort" .Values.service.type) (eq "LoadBalancer" .Values.service.type) ) .Values.service.httpsNodePort }} + nodePort: {{ .Values.service.httpsNodePort }} + {{- end }} + protocol: TCP + - name: http-management + port: {{ .Values.service.httpManagementPort }} + targetPort: http-management + {{- if and (eq "NodePort" .Values.service.type) .Values.service.httpManagementNodePort }} + nodePort: {{ .Values.service.httpManagementNodePort }} + {{- end }} + protocol: TCP + {{- with .Values.service.extraPorts }} + {{- toYaml . | nindent 4 }} + {{- end }} + selector: + {{- include "keycloak.selectorLabels" . | nindent 4 }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/templates/serviceaccount.yaml b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/templates/serviceaccount.yaml new file mode 100644 index 0000000..1d8f3f0 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/templates/serviceaccount.yaml @@ -0,0 +1,19 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "keycloak.serviceAccountName" . }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- range $key, $value := . }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} + {{- end }} + labels: + {{- include "keycloak.labels" . | nindent 4 }} + {{- range $key, $value := .Values.serviceAccount.labels }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} +imagePullSecrets: + {{- toYaml .Values.serviceAccount.imagePullSecrets | nindent 4 }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/templates/servicemonitor.yaml b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/templates/servicemonitor.yaml new file mode 100644 index 0000000..ba97f62 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/templates/servicemonitor.yaml @@ -0,0 +1,39 @@ +{{- range $key, $serviceMonitor := dict "wildfly" .Values.serviceMonitor "extra" .Values.extraServiceMonitor }} +{{- with $serviceMonitor }} +{{- if .enabled }} +--- +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "keycloak.fullname" $ }}-{{ $key }} + {{- with .namespace }} + namespace: {{ . }} + {{- end }} + {{- with .annotations }} + annotations: + {{- range $key, $value := . }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} + {{- end }} + labels: + {{- include "keycloak.labels" $ | nindent 4 }} + {{- range $key, $value := .labels }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} +spec: + {{- with .namespaceSelector }} + namespaceSelector: + {{- toYaml . | nindent 4 }} + {{- end }} + selector: + matchLabels: + {{- include "keycloak.selectorLabels" $ | nindent 6 }} + app.kubernetes.io/component: http + endpoints: + - port: {{ .port }} + path: {{ .path }} + interval: {{ .interval }} + scrapeTimeout: {{ .scrapeTimeout }} +{{- end }} +{{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/templates/statefulset.yaml b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/templates/statefulset.yaml new file mode 100644 index 0000000..8278986 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/templates/statefulset.yaml @@ -0,0 +1,208 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "keycloak.fullname" . }} + {{- with .Values.statefulsetAnnotations }} + annotations: + {{- range $key, $value := . }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} + {{- end }} + labels: + {{- include "keycloak.labels" . | nindent 4 }} + {{- range $key, $value := .Values.statefulsetLabels }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} +spec: + selector: + matchLabels: + {{- include "keycloak.selectorLabels" . | nindent 6 }} + {{- if not .Values.autoscaling.enabled }} + replicas: {{ .Values.replicas }} + {{- end }} + serviceName: {{ include "keycloak.fullname" . }}-headless + podManagementPolicy: {{ .Values.podManagementPolicy }} + updateStrategy: + type: RollingUpdate + template: + metadata: + annotations: + checksum/config-startup: {{ include (print .Template.BasePath "/configmap-startup.yaml") . | sha256sum }} + checksum/secrets: {{ tpl (toYaml .Values.secrets) . | sha256sum }} + {{- range $key, $value := .Values.podAnnotations }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 8 }} + {{- end }} + labels: + {{- include "keycloak.selectorLabels" . | nindent 8 }} + {{- if and .Values.postgresql.enabled (and .Values.postgresql.networkPolicy .Values.postgresql.networkPolicy.enabled) }} + {{ include "keycloak.postgresql.fullname" . }}-client: "true" + {{- end }} + {{- range $key, $value := .Values.podLabels }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 8 }} + {{- end }} + spec: + {{- if or .Values.postgresql.enabled .Values.extraInitContainers }} + initContainers: + {{- if .Values.postgresql.enabled }} + - name: pgchecker + image: "{{ .Values.pgchecker.image.repository }}:{{ .Values.pgchecker.image.tag }}" + imagePullPolicy: {{ .Values.pgchecker.image.pullPolicy }} + securityContext: + {{- toYaml .Values.pgchecker.securityContext | nindent 12 }} + command: + - sh + - -c + - | + echo 'Waiting for PostgreSQL to become ready...' + + until printf "." && nc -z -w 2 {{ include "keycloak.postgresql.fullname" . }} {{ .Values.postgresql.service.port }}; do + sleep 2; + done; + + echo 'PostgreSQL OK ✓' + volumeMounts: + - mountPath: /opt/jboss/keycloak/themes/cloudmoa/ + name: themes-upper-directory + resources: + {{- toYaml .Values.pgchecker.resources | nindent 12 }} + {{- end }} + {{- with .Values.extraInitContainers }} + {{- tpl . $ | nindent 8 }} + {{- end }} + {{- end }} + containers: + - name: keycloak + securityContext: + {{- toYaml .Values.securityContext | nindent 12 }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + command: + {{- toYaml .Values.command | nindent 12 }} + args: + {{- toYaml .Values.args | nindent 12 }} + {{- with .Values.lifecycleHooks }} + {{- tpl . $ | nindent 12 }} + {{- end }} + env: + - name: KEYCLOAK_USER + value: "admin" + #valueFrom: + # secretKeyRef: + # name: keycloak-secret + # key: KEYCLOAK_MASTER_USERNAME + - name: KEYCLOAK_PASSWORD + value: "admin" + #valueFrom: + # secretKeyRef: + # name: keycloak-secret + # key: KEYCLOAK_MASTER_PASSWORD + {{- if .Values.postgresql.enabled }} + - name: DB_VENDOR + value: postgres + - name: DB_ADDR + value: {{ include "keycloak.postgresql.fullname" . }} + - name: DB_PORT + value: {{ .Values.postgresql.service.port | quote }} + - name: DB_DATABASE + value: {{ .Values.postgresql.postgresqlDatabase | quote }} + - name: DB_USER + value: {{ .Values.postgresql.postgresqlUsername | quote }} + - name: DB_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "keycloak.postgresql.fullname" . }} + key: postgresql-password + {{- end }} + {{- with .Values.extraEnv }} + {{- tpl . $ | nindent 12 }} + {{- end }} + envFrom: + {{- with .Values.extraEnvFrom }} + {{- tpl . $ | nindent 12 }} + {{- end }} + ports: + - name: http + containerPort: 8080 + protocol: TCP + - name: https + containerPort: 8443 + protocol: TCP + - name: http-management + containerPort: 9990 + protocol: TCP + {{- with .Values.extraPorts }} + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.livenessProbe }} + livenessProbe: + {{- tpl . $ | nindent 12 }} + {{- end }} + {{- with .Values.readinessProbe }} + readinessProbe: + {{- tpl . $ | nindent 12 }} + {{- end }} + resources: + {{- toYaml .Values.resources | nindent 12 }} + volumeMounts: + - mountPath: /opt/jboss/keycloak/themes/cloudmoa/ + name: themes-upper-directory + {{- range $key, $value := .Values.startupScripts }} + - name: startup + mountPath: "/opt/jboss/startup-scripts/{{ $key }}" + subPath: "{{ $key }}" + readOnly: true + {{- end }} + {{- with .Values.extraVolumeMounts }} + {{- tpl . $ | nindent 12 }} + {{- end }} + {{- with .Values.extraContainers }} + {{- tpl . $ | nindent 8 }} + {{- end }} + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "keycloak.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} + {{- with .Values.hostAliases }} + hostAliases: + {{- toYaml . | nindent 8 }} + {{- end }} + enableServiceLinks: {{ .Values.enableServiceLinks }} + restartPolicy: {{ .Values.restartPolicy }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- tpl . $ | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.priorityClassName }} + priorityClassName: {{ . }} + {{- end }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} + volumes: + - name: themes-upper-directory + hostPath: + path: /root/oci/infra-set/keycloak/keycloak_theme/ + type: DirectoryOrCreate + {{- with .Values.startupScripts }} + - name: startup + configMap: + name: {{ include "keycloak.fullname" $ }}-startup + defaultMode: 0555 + items: + {{- range $key, $value := . }} + - key: {{ $key }} + path: {{ $key }} + {{- end }} + {{- end }} + {{- with .Values.extraVolumes }} + {{- tpl . $ | nindent 8 }} + {{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/templates/test/configmap-test.yaml b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/templates/test/configmap-test.yaml new file mode 100644 index 0000000..8dda781 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/templates/test/configmap-test.yaml @@ -0,0 +1,50 @@ +{{- if .Values.test.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "keycloak.fullname" . }}-test + labels: + {{- include "keycloak.labels" . | nindent 4 }} + annotations: + helm.sh/hook: test + helm.sh/hook-delete-policy: hook-succeeded +data: + test.py: | + import os + from selenium import webdriver + from selenium.webdriver.common.by import By + from selenium.webdriver.support.ui import WebDriverWait + from selenium.webdriver.support import expected_conditions + from urllib.parse import urlparse + + print('Creating PhantomJS driver...') + driver = webdriver.PhantomJS(service_log_path='/tmp/ghostdriver.log') + + base_url = 'http://{{ include "keycloak.fullname" . }}-http{{ if ne 80 (int .Values.service.httpPort) }}:{{ .Values.service.httpPort }}{{ end }}' + + print('Opening Keycloak...') + driver.get('{0}/auth/admin/'.format(base_url)) + + username = os.environ['KEYCLOAK_USER'] + password = os.environ['KEYCLOAK_PASSWORD'] + + username_input = WebDriverWait(driver, 30).until(expected_conditions.presence_of_element_located((By.ID, "username"))) + password_input = WebDriverWait(driver, 30).until(expected_conditions.presence_of_element_located((By.ID, "password"))) + login_button = WebDriverWait(driver, 30).until(expected_conditions.presence_of_element_located((By.ID, "kc-login"))) + + print('Entering username...') + username_input.send_keys(username) + + print('Entering password...') + password_input.send_keys(password) + + print('Clicking login button...') + login_button.click() + + WebDriverWait(driver, 30).until(lambda driver: '/auth/admin/master/console/' in driver.current_url) + + print('Admin console visible. Login successful.') + + driver.quit() + + {{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/templates/test/pod-test.yaml b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/templates/test/pod-test.yaml new file mode 100644 index 0000000..5b166f2 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/templates/test/pod-test.yaml @@ -0,0 +1,43 @@ +{{- if .Values.test.enabled }} +apiVersion: v1 +kind: Pod +metadata: + name: {{ include "keycloak.fullname" . }}-test + labels: + {{- include "keycloak.labels" . | nindent 4 }} + app.kubernetes.io/component: test + annotations: + helm.sh/hook: test +spec: + securityContext: + {{- toYaml .Values.test.podSecurityContext | nindent 4 }} + containers: + - name: keycloak-test + image: "{{ .Values.test.image.repository }}:{{ .Values.test.image.tag }}" + imagePullPolicy: {{ .Values.test.image.pullPolicy }} + securityContext: + {{- toYaml .Values.test.securityContext | nindent 8 }} + command: + - python3 + args: + - /tests/test.py + env: + - name: KEYCLOAK_USER + valueFrom: + secretKeyRef: + name: {{ include "keycloak.fullname" . }}-admin-creds + key: user + - name: KEYCLOAK_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "keycloak.fullname" . }}-admin-creds + key: password + volumeMounts: + - name: tests + mountPath: /tests + volumes: + - name: tests + configMap: + name: {{ include "keycloak.fullname" . }}-test + restartPolicy: Never +{{- end }} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/values.schema.json b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/values.schema.json new file mode 100644 index 0000000..47c2aa3 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/values.schema.json @@ -0,0 +1,434 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "required": [ + "image" + ], + "definitions": { + "image": { + "type": "object", + "required": [ + "repository", + "tag" + ], + "properties": { + "pullPolicy": { + "type": "string", + "pattern": "^(Always|Never|IfNotPresent)$" + }, + "repository": { + "type": "string" + }, + "tag": { + "type": "string" + } + } + }, + "imagePullSecrets": { + "type": "array", + "items": { + "type": "object", + "properties": { + "name": { + "type": "string" + } + } + } + } + }, + "properties": { + "affinity": { + "type": "string" + }, + "args": { + "type": "array" + }, + "clusterDomain": { + "type": "string" + }, + "command": { + "type": "array" + }, + "enableServiceLinks": { + "type": "boolean" + }, + "extraContainers": { + "type": "string" + }, + "extraEnv": { + "type": "string" + }, + "extraEnvFrom": { + "type": "string" + }, + "extraInitContainers": { + "type": "string" + }, + "extraPorts": { + "type": "array" + }, + "extraVolumeMounts": { + "type": "string" + }, + "extraVolumes": { + "type": "string" + }, + "fullnameOverride": { + "type": "string" + }, + "hostAliases": { + "type": "array" + }, + "image": { + "$ref": "#/definitions/image" + }, + "imagePullSecrets": { + "$ref": "#/definitions/imagePullSecrets" + }, + "ingress": { + "type": "object", + "properties": { + "annotations": { + "type": "object" + }, + "enabled": { + "type": "boolean" + }, + "labels": { + "type": "object" + }, + "rules": { + "type": "array", + "items": { + "type": "object", + "properties": { + "host": { + "type": "string" + }, + "paths": { + "type": "array", + "items": { + "type": "string" + } + } + } + } + }, + "servicePort": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ] + }, + "tls": { + "type": "array", + "items": { + "type": "object", + "properties": { + "hosts": { + "type": "array", + "items": { + "items": { + "type": "string" + } + }, + "secretName": { + "type": "string" + } + } + } + } + } + }, + "lifecycleHooks": { + "type": "string" + }, + "livenessProbe": { + "type": "string" + }, + "nameOverride": { + "type": "string" + }, + "nodeSelector": { + "type": "object" + }, + "pgchecker": { + "type": "object", + "properties": { + "image": { + "$ref": "#/definitions/image" + }, + "resources": { + "type": "object", + "properties": { + "limits": { + "type": "object", + "properties": { + "cpu": { + "type": "string" + }, + "memory": { + "type": "string" + } + } + }, + "requests": { + "type": "object", + "properties": { + "cpu": { + "type": "string" + }, + "memory": { + "type": "string" + } + } + } + } + }, + "securityContext": { + "type": "object" + } + } + }, + "podAnnotations": { + "type": "object" + }, + "podDisruptionBudget": { + "type": "object" + }, + "podLabels": { + "type": "object" + }, + "podManagementPolicy": { + "type": "string" + }, + "podSecurityContext": { + "type": "object" + }, + "postgresql": { + "type": "object" + }, + "priorityClassName": { + "type": "string" + }, + "prometheusRule": { + "type": "object" + }, + "serviceMonitor": { + "type": "object" + }, + "extraServiceMonitor": { + "type": "object" + }, + "readinessProbe": { + "type": "string" + }, + "replicas": { + "type": "integer" + }, + "resources": { + "type": "object" + }, + "restartPolicy": { + "type": "string" + }, + "route": { + "type": "object", + "properties": { + "annotations": { + "type": "object" + }, + "enabled": { + "type": "boolean" + }, + "host": { + "type": "string" + }, + "labels": { + "type": "object" + }, + "path": { + "type": "string" + }, + "tls": { + "type": "object" + } + } + }, + "secrets": { + "type": "object" + }, + "securityContext": { + "type": "object" + }, + "service": { + "type": "object", + "properties": { + "annotations": { + "type": "object" + }, + "extraPorts": { + "type": "array" + }, + "loadBalancerSourceRanges": { + "type": "array" + }, + "httpNodePort": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + }, + "httpPort": { + "type": "integer" + }, + "httpsNodePort": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + }, + "httpsPort": { + "type": "integer" + }, + "httpManagementNodePort": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + }, + "httpManagementPort": { + "type": "integer" + }, + "labels": { + "type": "object" + }, + "nodePort": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + }, + "type": { + "type": "string" + }, + "loadBalancerIP": { + "type": "string" + }, + "sessionAffinity": { + "type": "string" + }, + "sessionAffinityConfig": { + "type": "object" + } + } + }, + "serviceAccount": { + "type": "object", + "properties": { + "annotations": { + "type": "object" + }, + "create": { + "type": "boolean" + }, + "imagePullSecrets": { + "$ref": "#/definitions/imagePullSecrets" + }, + "labels": { + "type": "object" + }, + "name": { + "type": "string" + } + } + }, + "rbac": { + "type": "object", + "properties": { + "create": { + "type": "boolean" + }, + "rules": { + "type": "array" + } + } + }, + "startupScripts": { + "type": "object" + }, + "statefulsetAnnotations": { + "type": "object" + }, + "statefulsetLabels": { + "type": "object" + }, + "terminationGracePeriodSeconds": { + "type": "integer" + }, + "autoscaling": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + }, + "labels": { + "type": "object" + }, + "minReplicas": { + "type": "integer" + }, + "maxReplicas": { + "type": "integer" + }, + "metrics": { + "type": "array" + }, + "behavior": { + "type": "object" + } + } + }, + "test": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + }, + "image": { + "$ref": "#/definitions/image" + }, + "podSecurityContext": { + "type": "object" + }, + "securityContext": { + "type": "object" + } + } + }, + "tolerations": { + "type": "array" + } + } + } +} diff --git a/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/values.yaml b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/values.yaml new file mode 100644 index 0000000..a95521f --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/04-keycloak/values.yaml @@ -0,0 +1,552 @@ +# Optionally override the fully qualified name +fullnameOverride: "imxc-keycloak" + +# Optionally override the name +nameOverride: "" + +# The number of replicas to create (has no effect if autoscaling enabled) +replicas: 2 + +image: + # The Keycloak image repository + #repository: cdm-dev.exem-oss.org/keycloak/keycloak + repository: 10.10.31.243:5000/cmoa3/keycloak + # Overrides the Keycloak image tag whose default is the chart version + tag: "11.0.1" + # The Keycloak image pull policy + pullPolicy: Always + +# Image pull secrets for the Pod +#imagePullSecrets: [] +# - name: myRegistrKeySecretName +imagePullSecrets: + - name: regcred + +# Mapping between IPs and hostnames that will be injected as entries in the Pod's hosts files +hostAliases: [] +# - ip: "1.2.3.4" +# hostnames: +# - "my.host.com" + +# Indicates whether information about services should be injected into Pod's environment variables, matching the syntax of Docker links +enableServiceLinks: true + +# Pod management policy. One of `Parallel` or `OrderedReady` +podManagementPolicy: Parallel + +# Pod restart policy. One of `Always`, `OnFailure`, or `Never` +restartPolicy: Always + +serviceAccount: + # Specifies whether a ServiceAccount should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" + # Additional annotations for the ServiceAccount + annotations: {} + # Additional labels for the ServiceAccount + labels: {} + # Image pull secrets that are attached to the ServiceAccount + #imagePullSecrets: [] + imagePullSecrets: + - name: regcred + +rbac: + create: true + rules: + # RBAC rules for KUBE_PING + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + +# SecurityContext for the entire Pod. Every container running in the Pod will inherit this SecurityContext. This might be relevant when other components of the environment inject additional containers into running Pods (service meshes are the most prominent example for this) +podSecurityContext: + fsGroup: 1000 + +# SecurityContext for the Keycloak container +securityContext: + runAsUser: 1000 + runAsNonRoot: true + +# Additional init containers, e. g. for providing custom themes +extraInitContainers: | + - name: theme-provider + image: 10.10.31.243:5000/cmoa3/theme-provider:latest + imagePullPolicy: IfNotPresent + command: + - sh + args: + - -c + - | + echo "Copying theme ..." + cp -R /mytheme/* /theme + volumeMounts: + - name: theme + mountPath: /theme + +#extraInitContainers: "" + +# Additional sidecar containers, e. g. for a database proxy, such as Google's cloudsql-proxy +extraContainers: "" + +# Lifecycle hooks for the Keycloak container +lifecycleHooks: | +# postStart: +# exec: +# command: +# - /bin/sh +# - -c +# - ls + +# Termination grace period in seconds for Keycloak shutdown. Clusters with a large cache might need to extend this to give Infinispan more time to rebalance +terminationGracePeriodSeconds: 60 + +# The internal Kubernetes cluster domain +clusterDomain: cluster.local + +## Overrides the default entrypoint of the Keycloak container +command: [] + +## Overrides the default args for the Keycloak container +#args: ["-Dkeycloak.profile.feature.scripts=enabled", "-Dkeycloak.profile.feature.upload_scripts=enabled", "-Dkeycloak.profile.feature.admin_fine_grained_authz=enabled"] +args: ["-Dkeycloak.profile.feature.scripts=enabled", "-Dkeycloak.profile.feature.upload_scripts=enabled"] + +# Additional environment variables for Keycloak +extraEnv: | + # HA settings + - name: PROXY_ADDRESS_FORWARDING + value: "true" + - name: JGROUPS_DISCOVERY_PROTOCOL + value: kubernetes.KUBE_PING + - name: KUBERNETES_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: CACHE_OWNERS_COUNT + value: "2" + - name: CACHE_OWNERS_AUTH_SESSIONS_COUNT + value: "2" + # postgresql settings + - name: DB_VENDOR + value: postgres + - name: DB_ADDR + value: postgres + - name: DB_PORT + value: "5432" + - name: DB_DATABASE + value: keycloak + - name: DB_USER + value: admin + - name: DB_PASSWORD + value: eorbahrhkswp +# - name: KEYCLOAK_USER +# value: keycloak +# - name: KEYCLOAK_PASSWORD +# value: keycloak +#extraEnv: "" + # - name: KEYCLOAK_LOGLEVEL + # value: DEBUG + # - name: WILDFLY_LOGLEVEL + # value: DEBUG + # - name: CACHE_OWNERS_COUNT + # value: "2" + # - name: CACHE_OWNERS_AUTH_SESSIONS_COUNT + # value: "2" +#extraEnv: | +# - name: JGROUPS_DISCOVERY_PROTOCOL +# value: dns.DNS_PING +# - name: JGROUPS_DISCOVERY_PROPERTIES +# value: 'dns_query={{ include "keycloak.serviceDnsName" . }}' +# - name: CACHE_OWNERS_COUNT +# value: "2" +# - name: CACHE_OWNERS_AUTH_SESSIONS_COUNT +# value: "2" +# Additional environment variables for Keycloak mapped from Secret or ConfigMap +extraEnvFrom: "" + +# Pod priority class name +#priorityClassName: "manual" + +# Pod affinity +affinity: | + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + {{- include "keycloak.selectorLabels" . | nindent 10 }} + matchExpressions: + - key: app.kubernetes.io/component + operator: NotIn + values: + - test + topologyKey: kubernetes.io/hostname + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchLabels: + {{- include "keycloak.selectorLabels" . | nindent 12 }} + matchExpressions: + - key: app.kubernetes.io/component + operator: NotIn + values: + - test + topologyKey: failure-domain.beta.kubernetes.io/zone + +#affinity: {} + +# Node labels for Pod assignment +nodeSelector: {} + +# Node taints to tolerate +tolerations: [] + +# Additional Pod labels +podLabels: {} + +# Additional Pod annotations +podAnnotations: {} + +# Liveness probe configuration +livenessProbe: | + httpGet: + path: /auth/ + port: http + initialDelaySeconds: 300 + timeoutSeconds: 5 + +# Readiness probe configuration +readinessProbe: | + httpGet: + path: /auth/realms/master + port: http + initialDelaySeconds: 30 + timeoutSeconds: 1 + +# Pod resource requests and limits +#resources: {} + # requests: + # cpu: "500m" + # memory: "1024Mi" + # limits: + # cpu: "500m" + # memory: "1024Mi" +resources: + requests: + memory: "200Mi" + cpu: "10m" + +# Startup scripts to run before Keycloak starts up +startupScripts: + # WildFly CLI script for configuring the node-identifier + keycloak.cli: | + {{- .Files.Get "scripts/keycloak.cli" }} + # mystartup.sh: | + # #!/bin/sh + # + # echo 'Hello from my custom startup script!' + +# Add additional volumes, e. g. for custom themes +extraVolumes: | + - name: theme + emptyDir: {} +#extraVolumes: "" + +# Add additional volumes mounts, e. g. for custom themes +extraVolumeMounts: | + - name: theme + mountPath: /opt/jboss/keycloak/themes +#extraVolumeMounts: "" + +# Add additional ports, e. g. for admin console or exposing JGroups ports +extraPorts: [] + +# Pod disruption budget +podDisruptionBudget: {} +# maxUnavailable: 1 +# minAvailable: 1 + +# Annotations for the StatefulSet +statefulsetAnnotations: {} + +# Additional labels for the StatefulSet +statefulsetLabels: {} + +# Configuration for secrets that should be created +secrets: {} + # mysecret: + # type: {} + # annotations: {} + # labels: {} + # stringData: {} + # data: {} + +service: + # Annotations for headless and HTTP Services + annotations: {} + # Additional labels for headless and HTTP Services + labels: {} + # key: value + # The Service type + type: NodePort + # Optional IP for the load balancer. Used for services of type LoadBalancer only + loadBalancerIP: "" + # The http Service port + httpPort: 80 + # The HTTP Service node port if type is NodePort + httpNodePort: 31082 + # The HTTPS Service port + httpsPort: 8443 + # The HTTPS Service node port if type is NodePort + httpsNodePort: null + # The WildFly management Service port + httpManagementPort: 9990 + # The WildFly management Service node port if type is NodePort + httpManagementNodePort: 31990 + # Additional Service ports, e. g. for custom admin console + extraPorts: [] + # When using Service type LoadBalancer, you can restrict source ranges allowed + # to connect to the LoadBalancer, e. g. will result in Security Groups + # (or equivalent) with inbound source ranges allowed to connect + loadBalancerSourceRanges: [] + # Session affinity + # See https://kubernetes.io/docs/concepts/services-networking/service/#proxy-mode-userspace + sessionAffinity: "" + # Session affinity config + sessionAffinityConfig: {} + +ingress: + # If `true`, an Ingress is created + enabled: false + # The Service port targeted by the Ingress + servicePort: http + # Ingress annotations + annotations: {} + ## Resolve HTTP 502 error using ingress-nginx: + ## See https://www.ibm.com/support/pages/502-error-ingress-keycloak-response + # nginx.ingress.kubernetes.io/proxy-buffer-size: 128k + + # Additional Ingress labels + labels: {} + # List of rules for the Ingress + rules: + - + # Ingress host + host: '{{ .Release.Name }}.keycloak.example.com' + # Paths for the host + paths: + - / + # TLS configuration + tls: + - hosts: + - keycloak.example.com + secretName: "" + + # ingress for console only (/auth/admin) + console: + # If `true`, an Ingress is created for console path only + enabled: false + # Ingress annotations for console ingress only + # Useful to set nginx.ingress.kubernetes.io/whitelist-source-range particularly + annotations: {} + rules: + - + # Ingress host + host: '{{ .Release.Name }}.keycloak.example.com' + # Paths for the host + paths: + - /auth/admin/ + +## Network policy configuration +networkPolicy: + # If true, the Network policies are deployed + enabled: false + + # Additional Network policy labels + labels: {} + + # Define all other external allowed source + # See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#networkpolicypeer-v1-networking-k8s-io + extraFrom: [] + +route: + # If `true`, an OpenShift Route is created + enabled: false + # Path for the Route + path: / + # Route annotations + annotations: {} + # Additional Route labels + labels: {} + # Host name for the Route + host: "" + # TLS configuration + tls: + # If `true`, TLS is enabled for the Route + enabled: false + # Insecure edge termination policy of the Route. Can be `None`, `Redirect`, or `Allow` + insecureEdgeTerminationPolicy: Redirect + # TLS termination of the route. Can be `edge`, `passthrough`, or `reencrypt` + termination: edge + +pgchecker: + image: + # Docker image used to check Postgresql readiness at startup + #repository: cdm-dev.exem-oss.org/keycloak/busybox + #repository: {{ .Values.global.IMXC_REGISTRY }}/keycloak/busybox + repository: 10.10.31.243:5000/cmoa3/busybox + # Image tag for the pgchecker image + tag: 1.32 + # Image pull policy for the pgchecker image + pullPolicy: Always + # SecurityContext for the pgchecker contai/docker.ner + securityContext: + allowPrivilegeEscalation: false + runAsUser: 1000 + runAsGroup: 1000 + runAsNonRoot: true + # Resource requests and limits for the pgchecker container + resources: + requests: + cpu: "10m" + memory: "16Mi" + limits: + cpu: "10m" + memory: "16Mi" + +postgresql: + # If `true`, the Postgresql dependency is enabled + enabled: false + # PostgreSQL User to create + postgresqlUsername: keycloak + # PostgreSQL Password for the new user + postgresqlPassword: keycloak + # PostgreSQL Database to create + postgresqlDatabase: keycloak + # PostgreSQL network policy configuration + networkPolicy: + enabled: false + +serviceMonitor: + # If `true`, a ServiceMonitor resource for the prometheus-operator is created + enabled: false + # Optionally sets a target namespace in which to deploy the ServiceMonitor resource + namespace: "" + # Optionally sets a namespace for the ServiceMonitor + namespaceSelector: {} + # Annotations for the ServiceMonitor + annotations: {} + # Additional labels for the ServiceMonitor + labels: {} + # Interval at which Prometheus scrapes metrics + interval: 10s + # Timeout for scraping + scrapeTimeout: 10s + # The path at which metrics are served + path: /metrics + # The Service port at which metrics are served + port: http-management + +extraServiceMonitor: + # If `true`, a ServiceMonitor resource for the prometheus-operator is created + enabled: false + # Optionally sets a target namespace in which to deploy the ServiceMonitor resource + namespace: "" + # Optionally sets a namespace for the ServiceMonitor + namespaceSelector: {} + # Annotations for the ServiceMonitor + annotations: {} + # Additional labels for the ServiceMonitor + labels: {} + # Interval at which Prometheus scrapes metrics + interval: 10s + # Timeout for scraping + scrapeTimeout: 10s + # The path at which metrics are served + path: /auth/realms/master/metrics + # The Service port at which metrics are served + port: http + +prometheusRule: + # If `true`, a PrometheusRule resource for the prometheus-operator is created + enabled: false + # Annotations for the PrometheusRule + annotations: {} + # Additional labels for the PrometheusRule + labels: {} + # List of rules for Prometheus + rules: [] + # - alert: keycloak-IngressHigh5xxRate + # annotations: + # message: The percentage of 5xx errors for keycloak over the last 5 minutes is over 1%. + # expr: | + # ( + # sum( + # rate( + # nginx_ingress_controller_response_duration_seconds_count{exported_namespace="mynamespace",ingress="mynamespace-keycloak",status=~"5[0-9]{2}"}[1m] + # ) + # ) + # / + # sum( + # rate( + # nginx_ingress_controller_response_duration_seconds_count{exported_namespace="mynamespace",ingress="mynamespace-keycloak"}[1m] + # ) + # ) + # ) * 100 > 1 + # for: 5m + # labels: + # severity: warning + +autoscaling: + # If `true`, a autoscaling/v2beta2 HorizontalPodAutoscaler resource is created (requires Kubernetes 1.18 or above) + # Autoscaling seems to be most reliable when using KUBE_PING service discovery (see README for details) + # This disables the `replicas` field in the StatefulSet + enabled: false + # Additional HorizontalPodAutoscaler labels + labels: {} + # The minimum and maximum number of replicas for the Keycloak StatefulSet + minReplicas: 3 + maxReplicas: 10 + # The metrics to use for scaling + metrics: + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: 80 + # The scaling policy to use. This will scale up quickly but only scale down a single Pod per 5 minutes. + # This is important because caches are usually only replicated to 2 Pods and if one of those Pods is terminated this will give the cluster time to recover. + behavior: + scaleDown: + stabilizationWindowSeconds: 300 + policies: + - type: Pods + value: 1 + periodSeconds: 300 + +test: + # If `true`, test resources are created + enabled: false + image: + # The image for the test Pod + #repository: docker.io/unguiculus/docker-python3-phantomjs-selenium + repository: 10.10.31.243:5000/docker-python3-phantomjs-selenium + # The tag for the test Pod image + tag: v1 + # The image pull policy for the test Pod image + pullPolicy: IfNotPresent + # SecurityContext for the entire test Pod + podSecurityContext: + fsGroup: 1000 + # SecurityContext for the test container + securityContext: + runAsUser: 1000 + runAsNonRoot: true + diff --git a/ansible/01_old/roles/cmoa_demo_install/files/05-imxc/Chart.yaml b/ansible/01_old/roles/cmoa_demo_install/files/05-imxc/Chart.yaml new file mode 100644 index 0000000..e2f559f --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/05-imxc/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for Kubernetes +name: imxc +version: 0.1.0 diff --git a/ansible/01_old/roles/cmoa_demo_install/files/05-imxc/cmoa-manual.yaml b/ansible/01_old/roles/cmoa_demo_install/files/05-imxc/cmoa-manual.yaml new file mode 100644 index 0000000..e94fc14 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/05-imxc/cmoa-manual.yaml @@ -0,0 +1,36 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: manual + namespace: imxc +spec: + selector: + matchLabels: + app: manual + replicas: 1 + template: + metadata: + labels: + app: manual + spec: + containers: + - name: manual + image: {{ .Values.global.IMXC_IN_REGISTRY }}/manual:{{ .Values.global.CMOA_MANUAL_VERSION }} + imagePullPolicy: IfNotPresent + +--- +apiVersion: v1 +kind: Service +metadata: + name: manual + namespace: imxc +spec: + type: NodePort + selector: + app: manual + ports: + - protocol: TCP + port: 8088 + targetPort: 3000 + nodePort: {{ .Values.global.CMOA_MANUAL_PORT }} + diff --git a/ansible/01_old/roles/cmoa_demo_install/files/05-imxc/scripts/init-api-server.sh b/ansible/01_old/roles/cmoa_demo_install/files/05-imxc/scripts/init-api-server.sh new file mode 100644 index 0000000..78a9962 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/05-imxc/scripts/init-api-server.sh @@ -0,0 +1,17 @@ +#! /bin/sh + +STATUS_CODE="$(curl -s -o /dev/null -w '%{http_code}' http://imxc-keycloak-http/auth/realms/exem)" + +if [ $STATUS_CODE -eq 200 ]; then + JWT_KEY="$(curl -s -XGET http://imxc-keycloak-http/auth/realms/exem | jq -r '.public_key')" + export JWT_KEY + + chmod -R 777 /home/cloudmoa/notification/cloudmoa_alert.log + + /sbin/tini -- java -Djava.security.egd=file:/dev/./urandom -jar /app.jar + #java -Djava.security.egd=file:/dev/./urandom -jar /app.jar +elif [ $STATUS_CODE -eq 404 ]; then + echo "not found exem relam. check realm in imxc-keycloak" +else + echo "not found keycloak. check to install keycloak" +fi diff --git a/ansible/01_old/roles/cmoa_demo_install/files/05-imxc/scripts/init-auth-server.sh b/ansible/01_old/roles/cmoa_demo_install/files/05-imxc/scripts/init-auth-server.sh new file mode 100644 index 0000000..279b8a5 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/05-imxc/scripts/init-auth-server.sh @@ -0,0 +1,36 @@ +#! /bin/bash + +# 200 -> 서버 및 realm이 있는 경우 +# 404 -> 서버는 있으나 realm이 없는 경우 +# 000 -> 서버가 없음 +STATUS_CODE="$(curl -s -o /dev/null -w '%{http_code}' http://imxc-keycloak-http/auth/realms/exem)" + +if [ $STATUS_CODE -eq 404 ]; then + TOKEN="$(curl -s -d "client_id=admin-cli" -d "username=admin" -d "password=admin" -d "grant_type=password" http://imxc-keycloak-http/auth/realms/master/protocol/openid-connect/token | jq -r '.access_token')" + + echo $TOKEN + + echo "create realm and client" + # create realm and client + curl -s -v POST -H "Authorization: Bearer $TOKEN" -H "Content-Type: application/json" -d "@/tmp/init.json" http://imxc-keycloak-http/auth/admin/realms + + + echo "create admin and owner" + # create admin and owner + curl -s -v POST -H "Authorization: Bearer $TOKEN" -H "Content-Type: application/json" -d '{"firstName":"","lastName":"", "username":"admin","email":"admin@example.com", "enabled":"true","credentials":[{"type":"password","value":"admin","temporary":false}]}' http://imxc-keycloak-http/auth/admin/realms/exem/users + curl -s -v POST -H "Authorization: Bearer $TOKEN" -H "Content-Type: application/json" -d '{"firstName":"","lastName":"", "username":"owner","email":"owner@example.com", "enabled":"true","credentials":[{"type":"password","value":"admin","temporary":false}]}' http://imxc-keycloak-http/auth/admin/realms/exem/users + + JWT_KEY="$(curl -s -XGET http://imxc-keycloak-http/auth/realms/exem | jq -r '.public_key')" + export JWT_KEY + + java -Djava.security.egd=file:/dev/./urandom -jar /app.jar +elif [ $STATUS_CODE -eq 200 ]; then + echo "exist exem relam" + + JWT_KEY="$(curl -s -XGET http://imxc-keycloak-http/auth/realms/exem | jq -r '.public_key')" + export JWT_KEY + + java -Djava.security.egd=file:/dev/./urandom -jar /app.jar +else + echo "not found keycloak. check to install keycloak" +fi diff --git a/ansible/01_old/roles/cmoa_demo_install/files/05-imxc/scripts/init-noti-server.sh b/ansible/01_old/roles/cmoa_demo_install/files/05-imxc/scripts/init-noti-server.sh new file mode 100644 index 0000000..af73aed --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/05-imxc/scripts/init-noti-server.sh @@ -0,0 +1,14 @@ +#! /bin/sh + +STATUS_CODE="$(curl -s -o /dev/null -w '%{http_code}' http://imxc-keycloak-http/auth/realms/exem)" + +if [ $STATUS_CODE -eq 200 ]; then + JWT_KEY="$(curl -s -XGET http://imxc-keycloak-http/auth/realms/exem | jq -r '.public_key')" + export JWT_KEY + + java -Djava.security.egd=file:/dev/./urandom -jar /app.jar +elif [ $STATUS_CODE -eq 404 ]; then + echo "not found exem relam. check realm in imxc-keycloak" +else + echo "not found keycloak. check to install keycloak" +fi \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_demo_install/files/05-imxc/scripts/init-resource.sh b/ansible/01_old/roles/cmoa_demo_install/files/05-imxc/scripts/init-resource.sh new file mode 100644 index 0000000..58db392 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/05-imxc/scripts/init-resource.sh @@ -0,0 +1,6 @@ +#!/bin/sh + +chmod -R 777 /scripts + +sed -i "s/localhost/$REDIRECT_URLS/g" /scripts/init.json +cp /scripts/init.json /tmp/init.json \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_demo_install/files/05-imxc/scripts/init.json b/ansible/01_old/roles/cmoa_demo_install/files/05-imxc/scripts/init.json new file mode 100644 index 0000000..dcd68b4 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/05-imxc/scripts/init.json @@ -0,0 +1,2148 @@ +{ + "id": "exem", + "realm": "exem", + "notBefore": 0, + "revokeRefreshToken": false, + "refreshTokenMaxReuse": 0, + "accessTokenLifespan": 300, + "accessTokenLifespanForImplicitFlow": 900, + "ssoSessionIdleTimeout": 1800, + "ssoSessionMaxLifespan": 36000, + "ssoSessionIdleTimeoutRememberMe": 0, + "ssoSessionMaxLifespanRememberMe": 0, + "offlineSessionIdleTimeout": 2592000, + "offlineSessionMaxLifespanEnabled": false, + "offlineSessionMaxLifespan": 5184000, + "clientSessionIdleTimeout": 0, + "clientSessionMaxLifespan": 0, + "clientOfflineSessionIdleTimeout": 0, + "clientOfflineSessionMaxLifespan": 0, + "accessCodeLifespan": 60, + "accessCodeLifespanUserAction": 300, + "accessCodeLifespanLogin": 1800, + "actionTokenGeneratedByAdminLifespan": 43200, + "actionTokenGeneratedByUserLifespan": 300, + "enabled": true, + "sslRequired": "none", + "registrationAllowed": false, + "registrationEmailAsUsername": false, + "rememberMe": false, + "verifyEmail": false, + "loginWithEmailAllowed": true, + "duplicateEmailsAllowed": false, + "resetPasswordAllowed": false, + "editUsernameAllowed": false, + "bruteForceProtected": false, + "permanentLockout": false, + "maxFailureWaitSeconds": 900, + "minimumQuickLoginWaitSeconds": 60, + "waitIncrementSeconds": 60, + "quickLoginCheckMilliSeconds": 1000, + "maxDeltaTimeSeconds": 43200, + "failureFactor": 30, + "roles": { + "realm": [ + { + "id": "b361dcb8-4ec4-484e-a432-8d40a8ca5ac8", + "name": "offline_access", + "description": "${role_offline-access}", + "composite": false, + "clientRole": false, + "containerId": "exem", + "attributes": {} + }, + { + "id": "621155f2-6c01-4e4a-bf11-47111503d696", + "name": "uma_authorization", + "description": "${role_uma_authorization}", + "composite": false, + "clientRole": false, + "containerId": "exem", + "attributes": {} + }, + { + "id": "4aadd73a-e863-466a-932b-5bc81553fbf1", + "name": "access", + "composite": false, + "clientRole": false, + "containerId": "exem", + "attributes": {} + } + ], + "client": { + "realm-management": [ + { + "id": "e3eca547-c372-406a-abe7-30f554e13e63", + "name": "manage-realm", + "description": "${role_manage-realm}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "eb1faff2-4cca-458c-b9da-96c1f6f5f647", + "name": "impersonation", + "description": "${role_impersonation}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "eb0f6ebb-8993-47f8-8979-2152ed92bf62", + "name": "create-client", + "description": "${role_create-client}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "29f0b39d-9cc9-4b40-ad81-00041897ae0c", + "name": "view-clients", + "description": "${role_view-clients}", + "composite": true, + "composites": { + "client": { + "realm-management": [ + "query-clients" + ] + } + }, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "b6307563-9b35-4093-b0c4-a27df7cb82bd", + "name": "query-groups", + "description": "${role_query-groups}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "30091a91-f676-4e39-8ae2-ebfcee36c32a", + "name": "query-clients", + "description": "${role_query-clients}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "b40ca071-2318-4f69-9664-f0dfe471d03b", + "name": "view-realm", + "description": "${role_view-realm}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "efd25ec7-e61f-4659-a772-907791aed58e", + "name": "view-authorization", + "description": "${role_view-authorization}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "4ad18bd0-f9a9-4fc7-8864-99afa71f95e4", + "name": "manage-users", + "description": "${role_manage-users}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "a92c781f-7c6a-48d8-aa88-0b3aefb3c10c", + "name": "manage-events", + "description": "${role_manage-events}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "424933c1-3c03-49cd-955c-34aeeb0a3108", + "name": "manage-authorization", + "description": "${role_manage-authorization}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "5476db80-dbfa-408b-a934-5e8decc0af56", + "name": "manage-clients", + "description": "${role_manage-clients}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "acf53868-d09b-4865-92da-3b906307b979", + "name": "realm-admin", + "description": "${role_realm-admin}", + "composite": true, + "composites": { + "client": { + "realm-management": [ + "manage-realm", + "impersonation", + "create-client", + "view-clients", + "query-groups", + "query-clients", + "view-realm", + "view-authorization", + "manage-users", + "manage-events", + "manage-authorization", + "manage-clients", + "query-users", + "query-realms", + "manage-identity-providers", + "view-users", + "view-events", + "view-identity-providers" + ] + } + }, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "f2ad5f83-ffde-4cf4-acc4-21f7bcec4c38", + "name": "query-users", + "description": "${role_query-users}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "96a017bf-5211-4c20-a1b2-7493bc45a3ad", + "name": "query-realms", + "description": "${role_query-realms}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "d8051d4d-f26c-4a6d-bcdd-b3d8111d9d29", + "name": "manage-identity-providers", + "description": "${role_manage-identity-providers}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "8c929b20-abc3-4b78-88f2-ed3348426667", + "name": "view-users", + "description": "${role_view-users}", + "composite": true, + "composites": { + "client": { + "realm-management": [ + "query-groups", + "query-users" + ] + } + }, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "a337a8f7-8725-4ff7-85fc-ecc4b5ce1433", + "name": "view-events", + "description": "${role_view-events}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "649350cf-925c-4502-84b4-ec8415f956d3", + "name": "view-identity-providers", + "description": "${role_view-identity-providers}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + } + ], + "authorization_server": [ + { + "id": "2346ca49-eb3e-4f2e-b0ec-4def9ea9655c", + "name": "access", + "composite": false, + "clientRole": true, + "containerId": "b9bbda1f-a756-4b72-9cd8-06a6dfd6d5bf", + "attributes": {} + } + ], + "security-admin-console": [], + "admin-cli": [], + "account-console": [], + "broker": [ + { + "id": "133ff901-3a8f-48df-893b-4c7e9047e829", + "name": "read-token", + "description": "${role_read-token}", + "composite": false, + "clientRole": true, + "containerId": "fdc71d6d-db86-414f-bd80-ed1f5e9a6975", + "attributes": {} + } + ], + "account": [ + { + "id": "89c5f56f-5845-400b-ac9f-942c46d082e0", + "name": "manage-account-links", + "description": "${role_manage-account-links}", + "composite": false, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + }, + { + "id": "2cba7fed-0a80-4dbd-bd2d-abfa2c6a985e", + "name": "view-profile", + "description": "${role_view-profile}", + "composite": false, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + }, + { + "id": "f446a93d-143f-4071-9bdc-08aa2fdce6d2", + "name": "view-consent", + "description": "${role_view-consent}", + "composite": false, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + }, + { + "id": "ef3364db-e008-4aec-9e74-04bac25cbe40", + "name": "manage-consent", + "description": "${role_manage-consent}", + "composite": true, + "composites": { + "client": { + "account": [ + "view-consent" + ] + } + }, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + }, + { + "id": "96afbe32-3ac2-4345-bc17-06cf0e8de0b4", + "name": "view-applications", + "description": "${role_view-applications}", + "composite": false, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + }, + { + "id": "cf6861ca-4804-40d4-9016-c48e7ebf1c72", + "name": "manage-account", + "description": "${role_manage-account}", + "composite": true, + "composites": { + "client": { + "account": [ + "manage-account-links" + ] + } + }, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + } + ] + } + }, + "groups": [ + { + "id": "8d3f7332-7f72-47e2-9cb3-38331f0c29b5", + "name": "DEFAULT_TENANT", + "path": "/DEFAULT_TENANT", + "attributes": {}, + "realmRoles": [], + "clientRoles": {}, + "subGroups": [] + } + ], + "defaultRoles": [ + "offline_access", + "uma_authorization" + ], + "requiredCredentials": [ + "password" + ], + "otpPolicyType": "totp", + "otpPolicyAlgorithm": "HmacSHA1", + "otpPolicyInitialCounter": 0, + "otpPolicyDigits": 6, + "otpPolicyLookAheadWindow": 1, + "otpPolicyPeriod": 30, + "otpSupportedApplications": [ + "FreeOTP", + "Google Authenticator" + ], + "webAuthnPolicyRpEntityName": "keycloak", + "webAuthnPolicySignatureAlgorithms": [ + "ES256" + ], + "webAuthnPolicyRpId": "", + "webAuthnPolicyAttestationConveyancePreference": "not specified", + "webAuthnPolicyAuthenticatorAttachment": "not specified", + "webAuthnPolicyRequireResidentKey": "not specified", + "webAuthnPolicyUserVerificationRequirement": "not specified", + "webAuthnPolicyCreateTimeout": 0, + "webAuthnPolicyAvoidSameAuthenticatorRegister": false, + "webAuthnPolicyAcceptableAaguids": [], + "webAuthnPolicyPasswordlessRpEntityName": "keycloak", + "webAuthnPolicyPasswordlessSignatureAlgorithms": [ + "ES256" + ], + "webAuthnPolicyPasswordlessRpId": "", + "webAuthnPolicyPasswordlessAttestationConveyancePreference": "not specified", + "webAuthnPolicyPasswordlessAuthenticatorAttachment": "not specified", + "webAuthnPolicyPasswordlessRequireResidentKey": "not specified", + "webAuthnPolicyPasswordlessUserVerificationRequirement": "not specified", + "webAuthnPolicyPasswordlessCreateTimeout": 0, + "webAuthnPolicyPasswordlessAvoidSameAuthenticatorRegister": false, + "webAuthnPolicyPasswordlessAcceptableAaguids": [], + "scopeMappings": [ + { + "clientScope": "offline_access", + "roles": [ + "offline_access" + ] + } + ], + "clientScopeMappings": { + "account": [ + { + "client": "account-console", + "roles": [ + "manage-account" + ] + } + ] + }, + "clients": [ + { + "id": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "clientId": "account", + "name": "${client_account}", + "rootUrl": "${authBaseUrl}", + "baseUrl": "/realms/exem/account/", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "defaultRoles": [ + "view-profile", + "manage-account" + ], + "redirectUris": [ + "/realms/exem/account/*" + ], + "webOrigins": [], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": false, + "serviceAccountsEnabled": false, + "publicClient": false, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": {}, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "1e3d0c5d-c456-4c5f-93cf-58236273186a", + "clientId": "account-console", + "name": "${client_account-console}", + "rootUrl": "${authBaseUrl}", + "baseUrl": "/realms/exem/account/", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [ + "/realms/exem/account/*" + ], + "webOrigins": [], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": false, + "serviceAccountsEnabled": false, + "publicClient": true, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": { + "pkce.code.challenge.method": "S256" + }, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "protocolMappers": [ + { + "id": "cceae7c8-fa8d-48eb-a0a6-6013a2cc771e", + "name": "audience resolve", + "protocol": "openid-connect", + "protocolMapper": "oidc-audience-resolve-mapper", + "consentRequired": false, + "config": {} + } + ], + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "d4d3e5a5-584c-4aff-a79f-ac3c31ace5a1", + "clientId": "admin-cli", + "name": "${client_admin-cli}", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [], + "webOrigins": [], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": false, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": true, + "serviceAccountsEnabled": false, + "publicClient": true, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": {}, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "b9bbda1f-a756-4b72-9cd8-06a6dfd6d5bf", + "clientId": "authorization_server", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [ + "localhost" + ], + "webOrigins": [ + "*" + ], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": true, + "serviceAccountsEnabled": false, + "publicClient": true, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": { + "saml.assertion.signature": "false", + "saml.force.post.binding": "false", + "saml.multivalued.roles": "false", + "saml.encrypt": "false", + "saml.server.signature": "false", + "saml.server.signature.keyinfo.ext": "false", + "exclude.session.state.from.auth.response": "false", + "saml_force_name_id_format": "false", + "saml.client.signature": "false", + "tls.client.certificate.bound.access.tokens": "false", + "saml.authnstatement": "false", + "display.on.consent.screen": "false", + "saml.onetimeuse.condition": "false" + }, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": true, + "nodeReRegistrationTimeout": -1, + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "fdc71d6d-db86-414f-bd80-ed1f5e9a6975", + "clientId": "broker", + "name": "${client_broker}", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [], + "webOrigins": [], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": false, + "serviceAccountsEnabled": false, + "publicClient": false, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": {}, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "clientId": "realm-management", + "name": "${client_realm-management}", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [], + "webOrigins": [], + "notBefore": 0, + "bearerOnly": true, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": false, + "serviceAccountsEnabled": false, + "publicClient": false, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": {}, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "bb6c56f1-126e-4356-9579-d95992a8d150", + "clientId": "security-admin-console", + "name": "${client_security-admin-console}", + "rootUrl": "${authAdminUrl}", + "baseUrl": "/admin/exem/console/", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [ + "/admin/exem/console/*" + ], + "webOrigins": [ + "+" + ], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": false, + "serviceAccountsEnabled": false, + "publicClient": true, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": { + "pkce.code.challenge.method": "S256" + }, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "protocolMappers": [ + { + "id": "3cf06cab-00dd-486b-8e72-1a453a7031ca", + "name": "locale", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "locale", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "locale", + "jsonType.label": "String" + } + } + ], + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + } + ], + "clientScopes": [ + { + "id": "6a21eaaa-69c9-4519-8732-2155865a1891", + "name": "custom_jwt", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "true" + }, + "protocolMappers": [ + { + "id": "fd7557f5-3174-4c65-8cd1-0e9f015a906f", + "name": "customizingJWT", + "protocol": "openid-connect", + "protocolMapper": "oidc-script-based-protocol-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "multivalued": "true", + "id.token.claim": "false", + "access.token.claim": "true", + "jsonType.label": "String", + "script": "/**\r\n * Available variables: \r\n * user - the current user\r\n * realm - the current realm\r\n * token - the current token\r\n * userSession - the current userSession\r\n * keycloakSession - the current keycloakSession\r\n */\r\n\r\n//insert your code here...\r\n\r\n// you can set standard fields in token - test code\r\n// token.setAcr(\"test value\");\r\n\r\n// you can set claims in the token - test code\r\n// token.getOtherClaims().put(\"claimName\", \"claim value\");\r\n\r\n// work with variables and return multivalued token value\r\nvar ArrayList = Java.type(\"java.util.ArrayList\");\r\nvar HashMap = Java.type(\"java.util.HashMap\");\r\nvar tenantInfoMap = new HashMap();\r\nvar tenantIpMap = new HashMap();\r\n\r\nvar forEach = Array.prototype.forEach;\r\n\r\nvar client = keycloakSession.getContext().getClient();\r\nvar groups = user.getGroups();\r\nvar clientRole = client.getRole(\"access\");\r\n\r\nforEach.call(groups.toArray(), function(group) {\r\n if(group.hasRole(clientRole)) {\r\n tenantIpMap.put(group.getName(), clientRole.getAttribute(\"ip\"));\r\n tenantInfoMap.put(group.getName(), group.getAttributes());\r\n }\r\n});\r\n\r\ntoken.setOtherClaims(\"tenantInfo\", tenantInfoMap);\r\n" + } + }, + { + "id": "2cb34189-9f06-4b9f-b066-c28e7930f0a5", + "name": "custom_phone", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "false", + "user.attribute": "phone", + "id.token.claim": "false", + "access.token.claim": "true", + "claim.name": "attributes.phone", + "jsonType.label": "String" + } + }, + { + "id": "6bcb0aa9-8713-4e4b-b997-2e08d2dda0f4", + "name": "group_attr", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "groups", + "id.token.claim": "false", + "access.token.claim": "true", + "claim.name": "groups.attributes", + "jsonType.label": "String" + } + }, + { + "id": "03deb40b-4f83-436e-9eab-f479eed62460", + "name": "custom_name", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "false", + "user.attribute": "name", + "id.token.claim": "false", + "access.token.claim": "true", + "claim.name": "attributes.name", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "9fed7d81-3f42-41b0-b661-7875abb90b2b", + "name": "microprofile-jwt", + "description": "Microprofile - JWT built-in scope", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "false" + }, + "protocolMappers": [ + { + "id": "d030d675-2c31-401a-a461-534211b3d2ec", + "name": "upn", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "username", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "upn", + "jsonType.label": "String" + } + }, + { + "id": "ca2026a0-84de-4b8d-bf0c-35f3d088b115", + "name": "groups", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-realm-role-mapper", + "consentRequired": false, + "config": { + "multivalued": "true", + "user.attribute": "foo", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "groups", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "cf3e7fce-e9e8-40dc-bd0d-5cf7bac861c0", + "name": "web-origins", + "description": "OpenID Connect scope for add allowed web origins to the access token", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "false", + "display.on.consent.screen": "false", + "consent.screen.text": "" + }, + "protocolMappers": [ + { + "id": "6b909bad-30d8-4095-a80b-d71589e8a0b4", + "name": "allowed web origins", + "protocol": "openid-connect", + "protocolMapper": "oidc-allowed-origins-mapper", + "consentRequired": false, + "config": {} + } + ] + }, + { + "id": "73231863-d614-4725-9707-f5704c70893a", + "name": "roles", + "description": "OpenID Connect scope for add user roles to the access token", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "false", + "display.on.consent.screen": "true", + "consent.screen.text": "${rolesScopeConsentText}" + }, + "protocolMappers": [ + { + "id": "fad2c0b3-d6d6-46c9-b8a5-70cf2f3cd69e", + "name": "realm roles", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-realm-role-mapper", + "consentRequired": false, + "config": { + "multivalued": "true", + "user.attribute": "foo", + "access.token.claim": "true", + "claim.name": "realm_access.roles", + "jsonType.label": "String" + } + }, + { + "id": "1fa51f0e-8fa8-4807-a381-c9756ce1d2ff", + "name": "audience resolve", + "protocol": "openid-connect", + "protocolMapper": "oidc-audience-resolve-mapper", + "consentRequired": false, + "config": {} + }, + { + "id": "8be191ba-c7b8-45f1-a37f-2830595d4b54", + "name": "client roles", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-client-role-mapper", + "consentRequired": false, + "config": { + "multivalued": "true", + "user.attribute": "foo", + "access.token.claim": "true", + "claim.name": "resource_access.${client_id}.roles", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "93a4b53a-a281-4203-a070-0ad31e719b29", + "name": "phone", + "description": "OpenID Connect built-in scope: phone", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "true", + "consent.screen.text": "${phoneScopeConsentText}" + }, + "protocolMappers": [ + { + "id": "c716d4df-ad16-4a47-aa05-ded2a69313a3", + "name": "phone number verified", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "phoneNumberVerified", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "phone_number_verified", + "jsonType.label": "boolean" + } + }, + { + "id": "db0fcb5b-bad6-42b7-8ab0-b90225100b8a", + "name": "phone number", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "phoneNumber", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "phone_number", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "f1723d4c-6d93-40be-b5b8-5ca7083e55c7", + "name": "address", + "description": "OpenID Connect built-in scope: address", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "true", + "consent.screen.text": "${addressScopeConsentText}" + }, + "protocolMappers": [ + { + "id": "9e95dff0-dc01-4efe-a414-21c83d94491c", + "name": "address", + "protocol": "openid-connect", + "protocolMapper": "oidc-address-mapper", + "consentRequired": false, + "config": { + "user.attribute.formatted": "formatted", + "user.attribute.country": "country", + "user.attribute.postal_code": "postal_code", + "userinfo.token.claim": "true", + "user.attribute.street": "street", + "id.token.claim": "true", + "user.attribute.region": "region", + "access.token.claim": "true", + "user.attribute.locality": "locality" + } + } + ] + }, + { + "id": "16524b43-6bfc-4e05-868c-682e7e1e611c", + "name": "email", + "description": "OpenID Connect built-in scope: email", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "true", + "consent.screen.text": "${emailScopeConsentText}" + }, + "protocolMappers": [ + { + "id": "4444c30e-5da5-46e6-a201-64c28ab26e10", + "name": "email verified", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "emailVerified", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "email_verified", + "jsonType.label": "boolean" + } + }, + { + "id": "0faa8ba7-6d4d-4ed4-ab89-334e1d18b503", + "name": "email", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "email", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "email", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "4ccced80-99d8-4081-8d1d-37ed6d5aaf34", + "name": "profile", + "description": "OpenID Connect built-in scope: profile", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "true", + "consent.screen.text": "${profileScopeConsentText}" + }, + "protocolMappers": [ + { + "id": "02aea132-f5e1-483c-968a-5fbb9cdfb82d", + "name": "updated at", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "updatedAt", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "updated_at", + "jsonType.label": "String" + } + }, + { + "id": "eb5d10fc-d4a8-473a-ac3e-35f3fb0f41bb", + "name": "family name", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "lastName", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "family_name", + "jsonType.label": "String" + } + }, + { + "id": "2467b8e5-f340-45a2-abff-c658eccf3ed3", + "name": "zoneinfo", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "zoneinfo", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "zoneinfo", + "jsonType.label": "String" + } + }, + { + "id": "50a9bb17-af12-481d-95dd-6aed1dd4bf56", + "name": "full name", + "protocol": "openid-connect", + "protocolMapper": "oidc-full-name-mapper", + "consentRequired": false, + "config": { + "id.token.claim": "true", + "access.token.claim": "true", + "userinfo.token.claim": "true" + } + }, + { + "id": "80a65208-9425-4e66-b769-98c2f1c91e6e", + "name": "nickname", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "nickname", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "nickname", + "jsonType.label": "String" + } + }, + { + "id": "68a750c6-b4b8-47f4-a919-752319e63213", + "name": "gender", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "gender", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "gender", + "jsonType.label": "String" + } + }, + { + "id": "e27abd0e-72c1-40de-a678-e9e4e2db8e7f", + "name": "given name", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "firstName", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "given_name", + "jsonType.label": "String" + } + }, + { + "id": "04f3fa01-6a4c-44eb-bfd8-0a0e1c31bc4a", + "name": "middle name", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "middleName", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "middle_name", + "jsonType.label": "String" + } + }, + { + "id": "94e697d9-fbee-48d8-91d1-7bbc4f1fb44e", + "name": "username", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "username", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "preferred_username", + "jsonType.label": "String" + } + }, + { + "id": "a2f05d76-947d-4ceb-969b-1b923be9a923", + "name": "website", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "website", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "website", + "jsonType.label": "String" + } + }, + { + "id": "1966f863-ac5c-4cbc-a156-d5bd861728f0", + "name": "profile", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "profile", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "profile", + "jsonType.label": "String" + } + }, + { + "id": "18a9b452-cd8e-4c43-a9a8-0ea532074f74", + "name": "locale", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "locale", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "locale", + "jsonType.label": "String" + } + }, + { + "id": "1583790a-ec7a-4899-a901-60e23fd0d969", + "name": "birthdate", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "birthdate", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "birthdate", + "jsonType.label": "String" + } + }, + { + "id": "7094b64a-492b-4f31-aa73-bb19d06ddb56", + "name": "picture", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "picture", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "picture", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "eff18c11-eaf4-4d6a-8365-90f646ea3cc5", + "name": "role_list", + "description": "SAML role list", + "protocol": "saml", + "attributes": { + "consent.screen.text": "${samlRoleListScopeConsentText}", + "display.on.consent.screen": "true" + }, + "protocolMappers": [ + { + "id": "3bb12700-3e6f-4a73-bfbb-cfd16a8ab007", + "name": "role list", + "protocol": "saml", + "protocolMapper": "saml-role-list-mapper", + "consentRequired": false, + "config": { + "single": "false", + "attribute.nameformat": "Basic", + "attribute.name": "Role" + } + } + ] + }, + { + "id": "e83e35b7-9650-4f7e-b182-65c184d261b3", + "name": "offline_access", + "description": "OpenID Connect built-in scope: offline_access", + "protocol": "openid-connect", + "attributes": { + "consent.screen.text": "${offlineAccessScopeConsentText}", + "display.on.consent.screen": "true" + } + } + ], + "defaultDefaultClientScopes": [ + "role_list", + "profile", + "email", + "roles", + "web-origins", + "custom_jwt" + ], + "defaultOptionalClientScopes": [ + "offline_access", + "address", + "phone", + "microprofile-jwt" + ], + "browserSecurityHeaders": { + "contentSecurityPolicyReportOnly": "", + "xContentTypeOptions": "nosniff", + "xRobotsTag": "none", + "xFrameOptions": "SAMEORIGIN", + "contentSecurityPolicy": "frame-src 'self'; frame-ancestors 'self'; object-src 'none';", + "xXSSProtection": "1; mode=block", + "strictTransportSecurity": "max-age=31536000; includeSubDomains" + }, + "smtpServer": {}, + "eventsEnabled": false, + "eventsListeners": [ + "jboss-logging" + ], + "enabledEventTypes": [], + "adminEventsEnabled": false, + "adminEventsDetailsEnabled": false, + "components": { + "org.keycloak.services.clientregistration.policy.ClientRegistrationPolicy": [ + { + "id": "9b1dcf02-e9ec-4302-8aad-28f3250d1b2d", + "name": "Allowed Protocol Mapper Types", + "providerId": "allowed-protocol-mappers", + "subType": "anonymous", + "subComponents": {}, + "config": { + "allowed-protocol-mapper-types": [ + "oidc-sha256-pairwise-sub-mapper", + "oidc-usermodel-property-mapper", + "saml-role-list-mapper", + "saml-user-attribute-mapper", + "oidc-full-name-mapper", + "oidc-usermodel-attribute-mapper", + "oidc-address-mapper", + "saml-user-property-mapper" + ] + } + }, + { + "id": "752137ea-bc3a-46c3-9d83-49cb370d39a9", + "name": "Max Clients Limit", + "providerId": "max-clients", + "subType": "anonymous", + "subComponents": {}, + "config": { + "max-clients": [ + "200" + ] + } + }, + { + "id": "f365d31f-ccc5-4e57-97bd-b2749b1ab5e5", + "name": "Allowed Client Scopes", + "providerId": "allowed-client-templates", + "subType": "authenticated", + "subComponents": {}, + "config": { + "allow-default-scopes": [ + "true" + ] + } + }, + { + "id": "52e385fd-3aa5-442d-b5e4-6ff659126196", + "name": "Allowed Protocol Mapper Types", + "providerId": "allowed-protocol-mappers", + "subType": "authenticated", + "subComponents": {}, + "config": { + "allowed-protocol-mapper-types": [ + "oidc-sha256-pairwise-sub-mapper", + "saml-user-attribute-mapper", + "oidc-full-name-mapper", + "oidc-usermodel-attribute-mapper", + "oidc-address-mapper", + "oidc-usermodel-property-mapper", + "saml-user-property-mapper", + "saml-role-list-mapper" + ] + } + }, + { + "id": "dbebbc9d-1b14-4d09-906c-b4e5638f9588", + "name": "Consent Required", + "providerId": "consent-required", + "subType": "anonymous", + "subComponents": {}, + "config": {} + }, + { + "id": "b3fc18dc-467f-4240-9b6d-f07df5c40aee", + "name": "Full Scope Disabled", + "providerId": "scope", + "subType": "anonymous", + "subComponents": {}, + "config": {} + }, + { + "id": "19e102da-1d66-4747-958b-9311e5156693", + "name": "Trusted Hosts", + "providerId": "trusted-hosts", + "subType": "anonymous", + "subComponents": {}, + "config": { + "host-sending-registration-request-must-match": [ + "true" + ], + "client-uris-must-match": [ + "true" + ] + } + }, + { + "id": "66e83112-7392-46cb-bbd5-b71586183ada", + "name": "Allowed Client Scopes", + "providerId": "allowed-client-templates", + "subType": "anonymous", + "subComponents": {}, + "config": { + "allow-default-scopes": [ + "true" + ] + } + } + ], + "org.keycloak.keys.KeyProvider": [ + { + "id": "a60adc1b-3f6b-40d4-901f-d4f744f0d71b", + "name": "aes-generated", + "providerId": "aes-generated", + "subComponents": {}, + "config": { + "priority": [ + "100" + ] + } + }, + { + "id": "bc1b25d8-b199-4d87-b606-6cde0f6eafb0", + "name": "hmac-generated", + "providerId": "hmac-generated", + "subComponents": {}, + "config": { + "priority": [ + "100" + ], + "algorithm": [ + "HS256" + ] + } + }, + { + "id": "fe624aa7-54a3-43d8-b2a3-f74b543a9225", + "name": "rsa-generated", + "providerId": "rsa-generated", + "subComponents": {}, + "config": { + "priority": [ + "100" + ] + } + } + ] + }, + "internationalizationEnabled": false, + "supportedLocales": [], + "authenticationFlows": [ + { + "id": "a837df3e-15cb-4d2a-8ce0-5eea5c704e76", + "alias": "Account verification options", + "description": "Method with which to verity the existing account", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "idp-email-verification", + "requirement": "ALTERNATIVE", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "ALTERNATIVE", + "priority": 20, + "flowAlias": "Verify Existing Account by Re-authentication", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "59026e13-e2bd-4977-a868-505ea562f545", + "alias": "Authentication Options", + "description": "Authentication options.", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "basic-auth", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "basic-auth-otp", + "requirement": "DISABLED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "auth-spnego", + "requirement": "DISABLED", + "priority": 30, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "484d422c-d9b4-4c0e-86d5-60463ecd24c9", + "alias": "Browser - Conditional OTP", + "description": "Flow to determine if the OTP is required for the authentication", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "conditional-user-configured", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "auth-otp-form", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "0ec05058-6d09-4951-a116-19e8810e5d8e", + "alias": "Direct Grant - Conditional OTP", + "description": "Flow to determine if the OTP is required for the authentication", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "conditional-user-configured", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "direct-grant-validate-otp", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "667c03cd-114c-4d9a-a7fa-7d2c27f10722", + "alias": "First broker login - Conditional OTP", + "description": "Flow to determine if the OTP is required for the authentication", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "conditional-user-configured", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "auth-otp-form", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "1510fbf7-239f-44aa-9955-72d42f6d99fd", + "alias": "Handle Existing Account", + "description": "Handle what to do if there is existing account with same email/username like authenticated identity provider", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "idp-confirm-link", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "REQUIRED", + "priority": 20, + "flowAlias": "Account verification options", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "5622e71d-e1f4-4711-a425-a8470d0a017e", + "alias": "Reset - Conditional OTP", + "description": "Flow to determine if the OTP should be reset or not. Set to REQUIRED to force.", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "conditional-user-configured", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "reset-otp", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "09dfe405-5ef6-4940-8885-5adf867a74c8", + "alias": "User creation or linking", + "description": "Flow for the existing/non-existing user alternatives", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticatorConfig": "create unique user config", + "authenticator": "idp-create-user-if-unique", + "requirement": "ALTERNATIVE", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "ALTERNATIVE", + "priority": 20, + "flowAlias": "Handle Existing Account", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "a3eb6b61-1943-4fb7-9b2f-137826882662", + "alias": "Verify Existing Account by Re-authentication", + "description": "Reauthentication of existing account", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "idp-username-password-form", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "CONDITIONAL", + "priority": 20, + "flowAlias": "First broker login - Conditional OTP", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "7f5e2f68-84bc-4703-b474-e3b092621195", + "alias": "browser", + "description": "browser based authentication", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "auth-cookie", + "requirement": "ALTERNATIVE", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "auth-spnego", + "requirement": "DISABLED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "identity-provider-redirector", + "requirement": "ALTERNATIVE", + "priority": 25, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "ALTERNATIVE", + "priority": 30, + "flowAlias": "forms", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "224cc520-37f7-445e-ab1f-7ba547a45a0d", + "alias": "clients", + "description": "Base authentication for clients", + "providerId": "client-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "client-secret", + "requirement": "ALTERNATIVE", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "client-jwt", + "requirement": "ALTERNATIVE", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "client-secret-jwt", + "requirement": "ALTERNATIVE", + "priority": 30, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "client-x509", + "requirement": "ALTERNATIVE", + "priority": 40, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "2e58184b-529b-450c-9731-29763d26b087", + "alias": "direct grant", + "description": "OpenID Connect Resource Owner Grant", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "direct-grant-validate-username", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "direct-grant-validate-password", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "CONDITIONAL", + "priority": 30, + "flowAlias": "Direct Grant - Conditional OTP", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "c969ac8c-e7d8-44b5-ad4d-5fcb80514eac", + "alias": "docker auth", + "description": "Used by Docker clients to authenticate against the IDP", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "docker-http-basic-authenticator", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "de2259a4-7f92-42ec-994c-f55d8cba3b59", + "alias": "first broker login", + "description": "Actions taken after first broker login with identity provider account, which is not yet linked to any Keycloak account", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticatorConfig": "review profile config", + "authenticator": "idp-review-profile", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "REQUIRED", + "priority": 20, + "flowAlias": "User creation or linking", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "6c2745d2-be21-4f3c-a291-5b3fc039432a", + "alias": "forms", + "description": "Username, password, otp and other auth forms.", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "auth-username-password-form", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "CONDITIONAL", + "priority": 20, + "flowAlias": "Browser - Conditional OTP", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "ac8f5082-3fd0-47c5-854d-0dd9c3951668", + "alias": "http challenge", + "description": "An authentication flow based on challenge-response HTTP Authentication Schemes", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "no-cookie-redirect", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "REQUIRED", + "priority": 20, + "flowAlias": "Authentication Options", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "32030b4b-c82b-4c1a-a692-3b51eae74bbc", + "alias": "registration", + "description": "registration flow", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "registration-page-form", + "requirement": "REQUIRED", + "priority": 10, + "flowAlias": "registration form", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "b99fca4c-386c-4277-acc1-83e57e29244d", + "alias": "registration form", + "description": "registration form", + "providerId": "form-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "registration-user-creation", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "registration-profile-action", + "requirement": "REQUIRED", + "priority": 40, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "registration-password-action", + "requirement": "REQUIRED", + "priority": 50, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "registration-recaptcha-action", + "requirement": "DISABLED", + "priority": 60, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "5edbc053-816a-434e-9866-6c0cc7e49f89", + "alias": "reset credentials", + "description": "Reset credentials for a user if they forgot their password or something", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "reset-credentials-choose-user", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "reset-credential-email", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "reset-password", + "requirement": "REQUIRED", + "priority": 30, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "CONDITIONAL", + "priority": 40, + "flowAlias": "Reset - Conditional OTP", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "460782e7-9644-4a34-8024-cb428cbe3991", + "alias": "saml ecp", + "description": "SAML ECP Profile Authentication Flow", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "http-basic-authenticator", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + } + ], + "authenticatorConfig": [ + { + "id": "67af6e65-853c-4bfd-9eef-72e735691377", + "alias": "create unique user config", + "config": { + "require.password.update.after.registration": "false" + } + }, + { + "id": "af6c6e01-772d-426a-bdd3-3ebc95537bcd", + "alias": "review profile config", + "config": { + "update.profile.on.first.login": "missing" + } + } + ], + "requiredActions": [ + { + "alias": "CONFIGURE_TOTP", + "name": "Configure OTP", + "providerId": "CONFIGURE_TOTP", + "enabled": true, + "defaultAction": false, + "priority": 10, + "config": {} + }, + { + "alias": "terms_and_conditions", + "name": "Terms and Conditions", + "providerId": "terms_and_conditions", + "enabled": false, + "defaultAction": false, + "priority": 20, + "config": {} + }, + { + "alias": "UPDATE_PASSWORD", + "name": "Update Password", + "providerId": "UPDATE_PASSWORD", + "enabled": true, + "defaultAction": false, + "priority": 30, + "config": {} + }, + { + "alias": "UPDATE_PROFILE", + "name": "Update Profile", + "providerId": "UPDATE_PROFILE", + "enabled": true, + "defaultAction": false, + "priority": 40, + "config": {} + }, + { + "alias": "VERIFY_EMAIL", + "name": "Verify Email", + "providerId": "VERIFY_EMAIL", + "enabled": true, + "defaultAction": false, + "priority": 50, + "config": {} + }, + { + "alias": "update_user_locale", + "name": "Update User Locale", + "providerId": "update_user_locale", + "enabled": true, + "defaultAction": false, + "priority": 1000, + "config": {} + } + ], + "browserFlow": "browser", + "registrationFlow": "registration", + "directGrantFlow": "direct grant", + "resetCredentialsFlow": "reset credentials", + "clientAuthenticationFlow": "clients", + "dockerAuthenticationFlow": "docker auth", + "attributes": { + "clientOfflineSessionMaxLifespan": "0", + "clientSessionIdleTimeout": "0", + "clientSessionMaxLifespan": "0", + "clientOfflineSessionIdleTimeout": "0" + }, + "keycloakVersion": "11.0.1", + "userManagedAccessAllowed": false +} \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_demo_install/files/05-imxc/templates/auth-server.yaml b/ansible/01_old/roles/cmoa_demo_install/files/05-imxc/templates/auth-server.yaml new file mode 100644 index 0000000..fb8fe7b --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/05-imxc/templates/auth-server.yaml @@ -0,0 +1,82 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: auth-server + namespace: imxc +spec: + selector: + matchLabels: + app: auth + replicas: 1 + template: + metadata: + labels: + app: auth + spec: + initContainers: + - name: init-resource + image: {{ .Values.global.IMXC_IN_REGISTRY }}/init-resource:latest + imagePullPolicy: IfNotPresent + command: ["/bin/sh", "-c"] + args: ['chmod -R 777 /scripts; cp /scripts/init.json /tmp/init.json'] + volumeMounts: + - name: init + mountPath: /tmp + containers: + - name: auth-server + image: {{ .Values.global.IMXC_IN_REGISTRY }}/auth-server:{{ .Values.global.AUTH_SERVER_VERSION }} + imagePullPolicy: IfNotPresent + command: ["sh", "-c", {{ .Files.Get "scripts/init-auth-server.sh" | quote }}] + env: + # spring profile + - name: SPRING_PROFILES_ACTIVE + value: prd + + # imxc-api-server configuration + - name: IMXC_API-SERVER-URL + value: http://imxc-api-service:8080 + + # keycloak configuration + - name: KEYCLOAK_AUTH-SERVER-URL + value: "{{ .Values.global.KEYCLOAK_AUTH_SERVER_URL }}" + - name: KEYCLOAK_REALM + value: exem + # eureka configuration + - name: EUREKA_CLIENT_SERVICE-URL_DEFAULTZONE + value: http://eureka:8761/eureka + # log4j + - name: LOG4J_FORMAT_MSG_NO_LOOKUPS + value: "true" + - name: LOGGING_LEVEL_COM_EXEM_CLOUD_REPO + value: debug + - name: LOGGING_LEVEL_COM_EXEM_CLOUD_AUTH_AUTHENTICATION_USER_SERVICE + value: debug + # 현대카드는 커스텀으로 해당 값 추가. keycloak만 사용(true), keycloak+내부db 사용(false) + - name: IMXC_KEYCLOAK_ENABLED + value: "true" + + volumeMounts: + - name: init + mountPath: /tmp + resources: + requests: + memory: "200Mi" + cpu: "10m" + + volumes: + - name: init + emptyDir: {} +--- +apiVersion: v1 +kind: Service +metadata: + name: auth-server-service + namespace: imxc +spec: + type: ClusterIP + selector: + app: auth + ports: + - protocol: TCP + port: 8480 + # nodePort: 15016 diff --git a/ansible/01_old/roles/cmoa_demo_install/files/05-imxc/templates/cloudmoa-datagate.yaml b/ansible/01_old/roles/cmoa_demo_install/files/05-imxc/templates/cloudmoa-datagate.yaml new file mode 100644 index 0000000..cbbee9a --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/05-imxc/templates/cloudmoa-datagate.yaml @@ -0,0 +1,79 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: datagate + namespace: imxc + labels: + app: datagate +spec: + selector: + matchLabels: + app: datagate + replicas: 2 + template: + metadata: + labels: + app: datagate + spec: + containers: + - image: {{ .Values.global.IMXC_IN_REGISTRY }}/datagate:{{ .Values.global.DATAGATE_VERSION }} + imagePullPolicy: IfNotPresent + name: datagate + ports: + - containerPort: 50051 + protocol: TCP + - containerPort: 14268 + protocol: TCP + - containerPort: 14269 + protocol: TCP + readinessProbe: + httpGet: + path: "/" + port: 14269 + env: + - name: REDIS_ADDR + value: redis-master:6379 + - name: REDIS_PW + value: dkagh1234! + - name: REDIS_DB + value: "0" + - name: REDIS_TYPE + value: normal + - name: STORAGE_TYPE + value: kafka + - name: KAFKA_PRODUCER_BROKERS + value: kafka-broker:9094 + - name: LOG_LEVEL + value: "INFO" + resources: + requests: + cpu: "100m" + memory: "100Mi" + limits: + cpu: "2000m" + memory: "1Gi" +--- +apiVersion: v1 +kind: Service +metadata: + name: datagate + namespace: imxc + labels: + app: datagate +spec: + ports: + - name: datagate-grpc + port: 50051 + protocol: TCP + targetPort: 50051 + nodePort: 30051 + - name: datagate-http + port: 14268 + targetPort: 14268 +# nodePort: 31268 + - name: datagate-readiness + port: 14269 + targetPort: 14269 + selector: + app: datagate + type: NodePort diff --git a/ansible/01_old/roles/cmoa_demo_install/files/05-imxc/templates/cloudmoa-metric-agent.yaml b/ansible/01_old/roles/cmoa_demo_install/files/05-imxc/templates/cloudmoa-metric-agent.yaml new file mode 100644 index 0000000..45c3d41 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/05-imxc/templates/cloudmoa-metric-agent.yaml @@ -0,0 +1,331 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: metric-agent + namespace: imxc + labels: + app: metric-agent +spec: + selector: + matchLabels: + app: metric-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: metric-agent + spec: + containers: + - name: metric-agent + image: {{ .Values.global.IMXC_IN_REGISTRY }}/metric-agent:{{ .Values.global.METRIC_AGENT_VERSION }} + imagePullPolicy: IfNotPresent + ports: + - containerPort: 14271 + - containerPort: 14272 + args: + - --config.file=/etc/metric-agent/metric-agent.yml + env: + - name: STORAGE_TYPE + value: datagate + - name: DATAGATE + value: datagate:50051 + - name: CLUSTER_ID + value: cloudmoa +# - name: USER_ID +# value: mskim@ex-em.com + volumeMounts: + - mountPath: /etc/metric-agent/ + name: config-volume + resources: + requests: + memory: "256Mi" + cpu: "100m" + limits: + memory: "1000Mi" + cpu: "300m" + volumes: + - name: config-volume + configMap: + name: metric-agent-config + securityContext: + runAsUser: 1000 +--- +apiVersion: v1 +kind: Service +metadata: + name: metric-agent + namespace: imxc + labels: + app: metric-agent +spec: + ports: + - name: metric + port: 14271 + targetPort: 14271 + selector: + app: metric-agent + +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: metric-agent-config + namespace: imxc +data: + metric-agent.yml: | + global: + scrape_interval: 10s + evaluation_interval: 5s # Evaluate rules every 15 seconds. The default is every 1 minute. + + scrape_configs: + - job_name: 'kubernetes-kubelet' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: 'cloudmoa' + - target_label: xm_entity_type + replacement: 'Node' + + # added by mskim 8/19 + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + + - job_name: 'kubernetes-node-exporter' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: '(.*):10250' + replacement: '${1}:9100' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: 'kubernetes-(.*)' + replacement: '${1}' + target_label: name + - target_label: xm_clst_id + replacement: 'cloudmoa' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: 'Node' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + + # added by mskim 8/19 + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + - job_name: 'kubernetes-cadvisor' + scheme: https + + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: 'cloudmoa' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: 'Container' + +{{- if semverCompare ">=1.16-0" .Capabilities.KubeVersion.GitVersion }} + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod] + target_label: xm_pod_id + - source_labels: [container] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + # added by mskim 8/19 + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep + + {{- else }} + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod_name] + target_label: xm_pod_id + - source_labels: [container_name] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + # added by mskim 8/19 + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep +{{- end }} + # CLOUD-8671 | 데이터 필터링 설정 추가 + - source_labels: [ __name__, image ] + separator: "@" + regex: "container_cpu.*@" + action: drop + - source_labels: [ __name__, name ] + separator: "@" + regex: "container_memory.*@" + action: drop + + - job_name: 'kafka-consumer' + metrics_path: /remote_prom + scrape_interval: 5s + scrape_timeout: 5s + scheme: kafka + static_configs: + - targets: ['kafka-broker:9094'] + params: + #server_addrs: ['broker.default.svc.k8s:9094'] + server_addrs: ['kafka-broker:9094'] + encoding: [proto3] + contents: [remote_write] + compression: [snappy] + group: [remote-write-consumer] + workers: [50] + + # job for API server (SpringBoot) commented by ersione 2019-09-19 + - job_name: 'imxc-api' + metrics_path: '/actuator/prometheus' + scrape_interval: 5s + static_configs: + - targets: ['imxc-api-service:8080'] + - job_name: 'imxc-noti' + metrics_path: '/actuator/prometheus' + scrape_interval: 15s + static_configs: + - targets: ['noti-server-service:8080'] + #- job_name: 'imxc-auth' + # metrics_path: '/actuator/prometheus' + # scrape_interval: 15s + # static_configs: + # - targets: ['auth-server-service:8480'] + + + + - job_name: 'alertmanager-exporter' + metrics_path: '/metrics' + scrape_interval: 5s + static_configs: + - targets: ['alertmanager:9093'] + + + # modified by seungtak choi 2020-02-18 + - job_name: 'cmoa-collector' + scrape_interval: 5s + kubernetes_sd_configs: + - role: endpoints + namespaces: + names: + - imxc + relabel_configs: + - source_labels: [__meta_kubernetes_service_name] + action: keep + regex: cmoa-collector + + # added by dwkim 2021-03-15 + - job_name: 'elasticsearch' + scrape_interval: 5s + kubernetes_sd_configs: + - role: endpoints + namespaces: + names: + - imxc + relabel_configs: + - target_label: xm_clst_id + replacement: 'cloudmoa' + - source_labels: [__meta_kubernetes_pod_node_name] + target_label: xm_node_id + - source_labels: [__meta_kubernetes_namespace] + target_label: xm_namespace + - source_labels: [__meta_kubernetes_service_name] + action: keep + regex: es-exporter-elasticsearch-exporter + + # kafka-exporter prometheus 수집 룰 추가 + - job_name: 'kafka-exporter' + kubernetes_sd_configs: + - role: endpoints + namespaces: + names: + - imxc + scheme: http + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_service_label_(.+) + - action: labelmap + regex: __meta_kubernetes_service_annotation_(.+) + - source_labels: [__meta_kubernetes_pod_container_port_number] + action: keep + regex: '(.*)9308' + + # kafka-jmx-exporter configuration yaml 수집룰 추가 + - job_name: 'kafka-jmx' + kubernetes_sd_configs: + - role: endpoints + namespaces: + names: + - imxc + scheme: http + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_service_label_(.+) + - action: labelmap + regex: __meta_kubernetes_service_annotation_(.+) + - source_labels: [__meta_kubernetes_pod_container_port_number] + action: keep + regex: '(.*)9010' + + # job for API Server(Spring Cloud Notification Server) commented by hjyoon 2022-01-26 + - job_name: 'cmoa-noti' + metrics_path: '/actuator/prometheus' + scrape_interval: 15s + static_configs: + - targets: ['noti-server-service:8080'] diff --git a/ansible/01_old/roles/cmoa_demo_install/files/05-imxc/templates/cloudmoa-metric-collector.yaml b/ansible/01_old/roles/cmoa_demo_install/files/05-imxc/templates/cloudmoa-metric-collector.yaml new file mode 100644 index 0000000..3d7acc8 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/05-imxc/templates/cloudmoa-metric-collector.yaml @@ -0,0 +1,45 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: metric-collector + namespace: imxc + labels: + app: metric-collector +spec: + selector: + matchLabels: + app: metric-collector + replicas: 3 + template: + metadata: + labels: + app: metric-collector + spec: + containers: + - name: metric-collector + image: {{ .Values.global.IMXC_IN_REGISTRY }}/metric-collector:{{ .Values.global.METRIC_COLLECTOR_VERSION }} + imagePullPolicy: IfNotPresent + ports: + - containerPort: 14270 + env: + - name: KAFKA_CONSUMER_BROKERS + value: kafka-broker:9094 + - name: HTTP_PUSH + value: http://base-cortex-nginx/api/v1/push + securityContext: + runAsUser: 1000 +--- +apiVersion: v1 +kind: Service +metadata: + name: metric-collector + namespace: imxc + labels: + app: metric-collector +spec: + ports: + - name: metric + port: 14270 + targetPort: 14270 + selector: + app: metric-collector diff --git a/ansible/01_old/roles/cmoa_demo_install/files/05-imxc/templates/cmoa-kube-info-batch.yaml b/ansible/01_old/roles/cmoa_demo_install/files/05-imxc/templates/cmoa-kube-info-batch.yaml new file mode 100644 index 0000000..b20fed2 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/05-imxc/templates/cmoa-kube-info-batch.yaml @@ -0,0 +1,38 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmoa-kube-info-batch + namespace: {{ .Values.global.IMXC_NAMESPACE }} + labels: + app: cmoa-kube-info-batch +spec: + replicas: 1 + selector: + matchLabels: + app: cmoa-kube-info-batch + template: + metadata: + labels: + app: cmoa-kube-info-batch + spec: + containers: + - name: cmoa-kube-info-batch + image: {{ .Values.global.IMXC_IN_REGISTRY }}/kube-info-batch:{{ .Values.global.KUBE_INFO_BATCH_VERSION }} + imagePullPolicy: Always + env: + - name: JDBC_KIND + value: {{ .Values.global.JDBC_KIND }} + - name: JDBC_SERVER + value: {{ .Values.global.JDBC_SERVER }} + - name: JDBC_DB + value: {{ .Values.global.JDBC_DB }} + - name: JDBC_USER + value: {{ .Values.global.JDBC_USER }} + - name: JDBC_PWD + value: {{ .Values.global.JDBC_PWD }} + - name: TABLE_PREFIX + value: {{ .Values.global.TABLE_PREFIX }} + - name: BLACK_LIST + value: {{ .Values.global.BLACK_LIST }} + - name: DELETE_HOUR + value: '{{ .Values.global.DELETE_HOUR }}' diff --git a/ansible/01_old/roles/cmoa_demo_install/files/05-imxc/templates/cmoa-kube-info-connector.yaml b/ansible/01_old/roles/cmoa_demo_install/files/05-imxc/templates/cmoa-kube-info-connector.yaml new file mode 100644 index 0000000..cad91b9 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/05-imxc/templates/cmoa-kube-info-connector.yaml @@ -0,0 +1,48 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmoa-kube-info-connector + namespace: {{ .Values.global.IMXC_NAMESPACE }} + labels: + app: cmoa-kube-info-connector +spec: + replicas: 1 + selector: + matchLabels: + app: cmoa-kube-info-connector + template: + metadata: + labels: + app: cmoa-kube-info-connector + spec: + containers: + - name: cmoa-kube-info-connector + image: {{ .Values.global.IMXC_IN_REGISTRY }}/kube-info-connector:{{ .Values.global.KUBE_INFO_CONNECTOR_VERSION }} + imagePullPolicy: Always + env: + - name: KAFKA_GROUP_ID + value: cmoa-kube-info-connector + - name: KAFKA_SERVER + value: kafka:9092 + - name: JDBC_KIND + value: {{ .Values.global.JDBC_KIND }} + - name: JDBC_SERVER + value: {{ .Values.global.JDBC_SERVER }} + - name: JDBC_DB + value: {{ .Values.global.JDBC_DB }} + - name: JDBC_USER + value: {{ .Values.global.JDBC_USER }} + - name: JDBC_PWD + value: {{ .Values.global.JDBC_PWD }} + - name: TABLE_PREFIX + value: {{ .Values.global.TABLE_PREFIX }} + - name: BLACK_LIST + value: {{ .Values.global.BLACK_LIST }} + - name: MAX_POLL_RECORDS_CONFIG + value: "300" + - name: MAX_POLL_INTERVAL_MS_CONFIG + value: "600000" + - name: SESSION_TIMEOUT_MS_CONFIG + value: "60000" + - name: MAX_PARTITION_FETCH_BYTES_CONFIG + value: "5242880" diff --git a/ansible/01_old/roles/cmoa_demo_install/files/05-imxc/templates/cmoa-kube-info-flat.yaml b/ansible/01_old/roles/cmoa_demo_install/files/05-imxc/templates/cmoa-kube-info-flat.yaml new file mode 100644 index 0000000..6f77ee5 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/05-imxc/templates/cmoa-kube-info-flat.yaml @@ -0,0 +1,35 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmoa-kube-info-flat + namespace: {{ .Values.global.IMXC_NAMESPACE }} + labels: + app: cmoa-kube-info-flat +spec: + replicas: 1 + selector: + matchLabels: + app: cmoa-kube-info-flat + template: + metadata: + labels: + app: cmoa-kube-info-flat + spec: + containers: + - name: cmoa-kube-info-flat + image: {{ .Values.global.IMXC_IN_REGISTRY }}/kube-info-flat:{{ .Values.global.KUBE_INFO_FLAT_VERSION }} + imagePullPolicy: Always + env: + - name: KAFKA_SERVER + value: kafka:9092 + - name: KAFKA_INPUT_TOPIC + value: {{ .Values.global.KAFKA_INPUT_TOPIC }} + - name: TABLE_PREFIX + value: {{ .Values.global.TABLE_PREFIX }} + - name: BLACK_LIST + value: {{ .Values.global.BLACK_LIST }} + resources: + limits: + memory: 1Gi + requests: + memory: 200Mi diff --git a/ansible/01_old/roles/cmoa_demo_install/files/05-imxc/templates/cmoa-manual.yaml b/ansible/01_old/roles/cmoa_demo_install/files/05-imxc/templates/cmoa-manual.yaml new file mode 100644 index 0000000..e94fc14 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/05-imxc/templates/cmoa-manual.yaml @@ -0,0 +1,36 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: manual + namespace: imxc +spec: + selector: + matchLabels: + app: manual + replicas: 1 + template: + metadata: + labels: + app: manual + spec: + containers: + - name: manual + image: {{ .Values.global.IMXC_IN_REGISTRY }}/manual:{{ .Values.global.CMOA_MANUAL_VERSION }} + imagePullPolicy: IfNotPresent + +--- +apiVersion: v1 +kind: Service +metadata: + name: manual + namespace: imxc +spec: + type: NodePort + selector: + app: manual + ports: + - protocol: TCP + port: 8088 + targetPort: 3000 + nodePort: {{ .Values.global.CMOA_MANUAL_PORT }} + diff --git a/ansible/01_old/roles/cmoa_demo_install/files/05-imxc/templates/eureka-server.yaml b/ansible/01_old/roles/cmoa_demo_install/files/05-imxc/templates/eureka-server.yaml new file mode 100644 index 0000000..5ffd9c2 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/05-imxc/templates/eureka-server.yaml @@ -0,0 +1,60 @@ +apiVersion: v1 +kind: Service +metadata: + name: eureka + namespace: imxc + labels: + app: eureka +spec: + type: NodePort + ports: + - port: 8761 + targetPort: 8761 + nodePort: 30030 + name: eureka + selector: + app: eureka +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: eureka + namespace: imxc +spec: + serviceName: 'eureka' + replicas: 3 + selector: + matchLabels: + app: eureka + template: + metadata: + labels: + app: eureka + spec: + containers: + - name: eureka + image: {{ .Values.global.IMXC_IN_REGISTRY }}/eureka-server:{{ .Values.global.EUREKA_SERVER_VERSION }} + imagePullPolicy: IfNotPresent + ports: + - containerPort: 8761 + #resources: + # requests: + # memory: "1Gi" + # cpu: "500m" + # limits: + # memory: "1200Mi" + # cpu: "500m" + env: + - name: SPRING_PROFILES_ACTIVE + value: prd + - name: EUREKA_CLIENT_SERVICE-URL_DEFAULTZONE + value: http://eureka-0.eureka:8761/eureka/,http://eureka-1.eureka:8761/eureka/,http://eureka-2.eureka:8761/eureka/ + - name: JVM_OPTS + value: "-Xms1g -Xmx1g" + # log4j + - name: LOG4J_FORMAT_MSG_NO_LOOKUPS + value: "true" + resources: + requests: + memory: "100Mi" + cpu: "20m" diff --git a/ansible/01_old/roles/cmoa_demo_install/files/05-imxc/templates/imxc-api-server.yaml b/ansible/01_old/roles/cmoa_demo_install/files/05-imxc/templates/imxc-api-server.yaml new file mode 100644 index 0000000..de967a6 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/05-imxc/templates/imxc-api-server.yaml @@ -0,0 +1,245 @@ +--- +kind: Service +apiVersion: v1 +metadata: + name: imxc-api-service + namespace: imxc +spec: + type: NodePort + selector: + app: imxc-api + ports: + - protocol: TCP + name: api + port: 8080 + targetPort: 8080 + nodePort: 32080 + - protocol: TCP + name: netty + port: 10100 + targetPort: 10100 + nodePort: 31100 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: imxc-api + namespace: imxc + labels: + app: imxc-api +spec: + revisionHistoryLimit: 0 + replicas: 1 + selector: + matchLabels: + app: imxc-api + template: + metadata: + labels: + app: imxc-api + build: develop + spec: + securityContext: + #runAsNonRoot: true + runAsUser: 1577 + initContainers: + - name: cloudmoa-api-permission-fix + image: {{ .Values.global.IMXC_IN_REGISTRY }}/busybox:latest + imagePullPolicy: IfNotPresent + securityContext: + runAsUser: 0 +# - sh +# - -c +# - "chmod -R 777 /home/cloudmoa/notification/cloudmoa_alert.log" + volumeMounts: + - mountPath: /home/cloudmoa/notification/ + name: notification-upper-directory + - mountPath: /home/cloudmoa/notification/cloudmoa_alert.log + name: notification-directory + containers: + - name: imxc-api + image: {{ .Values.global.IMXC_IN_REGISTRY }}/api-server:{{ .Values.global.API_SERVER_VERSION }} + resources: + requests: + cpu: 200m + memory: 500Mi + limits: + cpu: 2000m + memory: 5000Mi + imagePullPolicy: IfNotPresent + command: ["sh", "-c", {{ .Files.Get "scripts/init-api-server.sh" | quote }}] + env: + - name: SPRING_PROFILES_ACTIVE + value: prd + - name: SPRING_ELASTIC_URLS + value: elasticsearch + - name: SPRING_ELASTIC_PORT + value: "9200" + - name: SPRING_DATAGATE_URLS + value: "{{ .Values.global.DATAGATE_INSIDE_IP }}" + - name: SPRING_DATAGATE_PORT + value: "{{ .Values.global.DATAGATE_INSIDE_PORT }}" + - name: SPRING_REDIS_URLS + value: {{ .Values.global.REDIS_URLS }} + - name: SPRING_REDIS_PORT + value: "{{ .Values.global.REDIS_PORT }}" + - name: SPRING_REDIS_PASSWORD + value: {{ .Values.global.REDIS_PASSWORD }} + - name: SPRING_DATASOURCE_URL + value: jdbc:log4jdbc:postgresql://postgres:5432/postgresdb + - name: SPRING_BOOT_ADMIN_CLIENT_URL + value: http://{{ .Values.global.IMXC_ADMIN_SERVER_DNS }}:8888 + - name: SPRING_BOOT_ADMIN_CLIENT_INSTANCE_NAME + value: Intermax Cloud API Server + - name: SPRING_BOOT_ADMIN_CLIENT_ENABLED + value: "false" + - name: OPENTRACING_JAEGER_ENABLED + value: "false" + - name: SPRING_JPA_PROPERTIES_HIBERNATE_GENERATE_STATISTICS + value: "false" + - name: IMXC_REPORT_ENABLED + value: "true" + - name: IMXC_ALERT_PERSIST + value: "true" + - name: SPRING_BOOT_ADMIN_CLIENT_INSTANCE_METADATA_TAGS_ENVIRONMENT + value: Demo + - name: SPRING_BOOT_ADMIN_CLIENT_INSTANCE_PREFERIP + value: "true" + - name: SPRING_BOOT_ADMIN_CLIENT_INSTANCE_METADATA_TAGS_NODENAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: SPRING_BOOT_ADMIN_CLIENT_INSTANCE_METADATA_TAGS_PODNAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + - name: SPRING_BOOT_ADMIN_CLIENT_AUTODEREGISTRATION + value: "true" + - name: SPRING_JPA_HIBERNATE_DDL-AUTO + value: validate + - name: KEYCLOAK_AUTH-SERVER-URL + value: "{{ .Values.global.KEYCLOAK_AUTH_SERVER_URL }}" + - name: KEYCLOAK_REALM + value: exem + - name: KEYCLOAK_RESOURCE + value: "{{ .Values.global.KEYCLOAK_RESOURCE }}" + - name: SPRING_KEYCLOAK_MASTER_USERNAME + value: "{{ .Values.global.KEYCLOAK_MASTER_USERNAME }}" + - name: SPRING_KEYCLOAK_MASTER_PASSWORD + value: "{{ .Values.global.KEYCLOAK_MASTER_PASSWORD }}" + - name: SPRING_LDAP_USE + value: "{{ .Values.global.IMXC_LDAP_USE }}" + - name: TIMEZONE + value: Asia/Seoul + - name: IMXC_PROMETHEUS_URL + value: http://base-cortex-nginx/prometheus + - name: IMXC_PROMETHEUS_NAMESPACE + value: "imxc" + - name: LOGGING_LEVEL_ROOT + value: info + - name: IMXC_ALERT_NOTIFICATION_FILE_USE + value: "true" + - name: IMXC_ALERT_NOTIFICATION_FILE_FILE-LIMIT-SIZE-MB + value: "10" + - name: IMXC_ALERT_NOTIFICATION_FILE_PATH + value: /cloudmoa_noti + - name: IMXC_ALERT_NOTIFICATION_FILE_NAME + value: cloudmoa_alert.log + - name: IMXC_ALERT_NOTIFICATION_FILE_FORMAT + value: $[name]/($[level])/$[data]/$[message] + - name: IMXC_ALERT_NOTIFICATION_FILE_LEVELCONTRACT + value: "true" + #R30020210730 추가 :: 현대카드는 true로 설정 + - name: IMXC_ALERT_NOTIFICATION_MAIL_MAIL-HOST + value: "exemmail1.ex-em.com" + - name: IMXC_ALERT_NOTIFICATION_MAIL_MAIL-PORT + value: "587" + - name: IMXC_ALERT_NOTIFICATION_MAIL_MAIL-USERNAME + value: "imxc@ex-em.com" + - name: IMXC_ALERT_NOTIFICATION_MAIL_MAIL-PASSWORD + value: "1234" + - name: IMXC_ALERT_NOTIFICATION_MAIL_PROTOCOL + value: "smtp" + - name: IMXC_ALERT_NOTIFICATION_MAIL_STARTTLS-REQ + value: "true" + - name: IMXC_ALERT_NOTIFICATION_MAIL_STARTTLS-ENB + value: "true" + - name: IMXC_ALERT_NOTIFICATION_MAIL_SMTP-AUTH + value: "true" + - name: IMXC_ALERT_NOTIFICATION_MAIL_DEBUG + value: "true" + - name: IMXC_ANOMALY_BLACK-LIST + value: "false" + - name: IMXC_VERSION_SAAS + value: "false" + - name: LOGGING_LEVEL_COM_EXEM_CLOUD_API_SERVER_KUBERNETES_SERVICE + value: info + - name: IMXC_WEBSOCKET_SCHEDULE_PERIOD_5SECOND + value: "30000" + - name: IMXC_CACHE_INFO_1MCACHE + value: "0 0/1 * * * ?" + - name: IMXC_EXECUTION_LOG_USE + value: "false" + - name: IMXC_EXECUTION_PERMISSION_LOG_USE + value: "false" + - name: IMXC_EXECUTION_CODE-LOG_USE + value: "false" + - name: IMXC_PORTAL_INFO_URL + value: "{{ .Values.global.IMXC_PORTAL_INFO_URL }}" + # Do not remove below rows related to AGENT-INSTALL. Added by youngmin 2021-03-29. + - name: AGENT-INSTALL_COLLECTION-SERVER_KAFKA_IP + value: {{ .Values.global.KAFKA_IP }} + - name: AGENT-INSTALL_COLLECTION-SERVER_KAFKA_INTERFACE-PORT + value: "{{ .Values.global.KAFKA_INTERFACE_PORT }}" + - name: AGENT-INSTALL_COLLECTION-SERVER_APISERVER_IP + value: {{ .Values.global.IMXC_API_SERVER_IP }} + - name: AGENT-INSTALL_COLLECTION-SERVER_APISERVER_NETTY-PORT + value: "{{ .Values.global.APISERVER_NETTY_PORT }}" + - name: AGENT-INSTALL_REGISTRY_URL + value: {{ .Values.global.IMXC_IN_REGISTRY }} + - name: AGENT-INSTALL_IMAGE_TAG + value: {{ .Values.global.AGENT_IMAGE_TAG }} + - name: AGENT-INSTALL_JAEGER_AGENT_CLUSTERIP + value: {{ .Values.global.JAEGER_AGENT_CLUSTERIP }} + - name: AGENT-INSTALL_JAEGER_JAVA-SPECIALAGENT-CLASSPATH + value: {{ .Values.global.JAEGER_JAVA_SPECIALAGENT_CLASSPATH }} + - name: AGENT-INSTALL_COLLECTION-SERVER_DATAGATE_IP + value: "{{ .Values.global.DATAGATE_OUTSIDE_IP }}" + - name: AGENT-INSTALL_COLLECTION-SERVER_DATAGATE_PORT + value: "{{ .Values.global.DATAGATE_OUTSIDE_PORT }}" + - name: IMXC_REST-CONFIG_MAX-CON + value: "200" + - name: IMXC_REST-CONFIG_MAX-CON-ROUTE + value: "65" + # log4j + - name: LOG4J_FORMAT_MSG_NO_LOOKUPS + value: "true" + # Elasticsearch for Security + - name: SPRING_ELASTIC_SSL_USERNAME + value: "{{ .Values.global.CMOA_ES_ID }}" + - name: SPRING_ELASTIC_SSL_PASSWORD + value: "{{ .Values.global.CMOA_ES_PW }}" + - name: IMXC_BACK-LOGIN_ENABLED + value: "{{ .Values.global.BACKLOGIN }}" + volumeMounts: + - mountPath: /var/log/imxc-audit.log + name: auditlog + - mountPath: /home/cloudmoa/notification/cloudmoa_alert.log + name: notification-directory + - mountPath: /home/cloudmoa/notification/ + name: notification-upper-directory + volumes: + - name: auditlog + hostPath: + path: {{ .Values.global.AUDITLOG_PATH }}/imxc-audit.log + type: FileOrCreate + - name: notification-upper-directory + hostPath: + path: /home/ + type: DirectoryOrCreate + - name: notification-directory + hostPath: + path: /home/cloudmoa_event.log + type: FileOrCreate diff --git a/ansible/01_old/roles/cmoa_demo_install/files/05-imxc/templates/imxc-collector.yaml b/ansible/01_old/roles/cmoa_demo_install/files/05-imxc/templates/imxc-collector.yaml new file mode 100644 index 0000000..e125243 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/05-imxc/templates/imxc-collector.yaml @@ -0,0 +1,79 @@ +apiVersion: v1 +kind: List +items: +- apiVersion: apps/v1 + kind: Deployment + metadata: + name: cmoa-collector + namespace: imxc + labels: + app: cmoa-collector + spec: + replicas: 1 + selector: + matchLabels: + app: cmoa-collector + template: + metadata: + labels: + app: cmoa-collector + spec: + securityContext: + runAsNonRoot: true + runAsUser: 65534 + containers: + - name: cmoa-collector + image: {{ .Values.global.IMXC_IN_REGISTRY }}/cmoa-collector:{{ .Values.global.COLLECTOR_VERSION }} + imagePullPolicy: IfNotPresent + resources: + requests: + cpu: 100m + memory: 500Mi + limits: + cpu: 500m + memory: 2500Mi + ports: + - containerPort: 12010 + env: + - name: LOCATION + value: Asia/Seoul + - name: KAFKA_SERVER + value: kafka:9092 + - name: ELASTICSEARCH + value: elasticsearch:9200 +# - name: PROMETHEUS +# value: nginx-cortex/prometheus + - name: REDIS_ADDR + value: redis-master:6379 + - name: REDIS_PW + value: dkagh1234! + - name: REDIS_DB + value: "0" + - name: REDIS_TYPE + value: normal + - name: CMOA_ES_ID + value: {{ .Values.global.CMOA_ES_ID }} + - name: CMOA_ES_PW + value: {{ .Values.global.CMOA_ES_PW }} + resources: + requests: + cpu: "300m" + memory: "1500Mi" + limits: + cpu: "500m" + memory: "2500Mi" +- apiVersion: v1 + kind: Service + metadata: + name: cmoa-collector + namespace: imxc + labels: + app: cmoa-collector + spec: + ports: + - name: cmoa-collector-exporter + port: 12010 + targetPort: 12010 + selector: + app: cmoa-collector + diff --git a/ansible/01_old/roles/cmoa_demo_install/files/05-imxc/templates/noti-server.yaml b/ansible/01_old/roles/cmoa_demo_install/files/05-imxc/templates/noti-server.yaml new file mode 100644 index 0000000..99c7a5b --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/05-imxc/templates/noti-server.yaml @@ -0,0 +1,121 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: noti-server + namespace: imxc +spec: + selector: + matchLabels: + app: noti + replicas: 1 + template: + metadata: + labels: + app: noti + spec: + containers: + - name: noti-server + image: {{ .Values.global.IMXC_IN_REGISTRY }}/notification-server:{{ .Values.global.NOTI_SERVER_VERSION }} + imagePullPolicy: IfNotPresent + command: ["sh", "-c", {{ .Files.Get "scripts/init-noti-server.sh" | quote }}] + env: + # spring profile + - name: SPRING_PROFILES_ACTIVE + value: prd + + # keycloak configuration + - name: KEYCLOAK_AUTH-SERVER-URL + value: {{ .Values.global.KEYCLOAK_AUTH_SERVER_URL }} + - name: KEYCLOAK_REALM + value: exem + + # eureka configuration + - name: EUREKA_CLIENT_SERVICE-URL_DEFAULTZONE + value: http://eureka:8761/eureka + + # postgres configuration + - name: SPRING_DATASOURCE_URL + value: jdbc:log4jdbc:postgresql://postgres:5432/postgresdb + + # redis configuration + - name: SPRING_REDIS_HOST + value: redis-master + - name: SPRING_REDIS_PORT + value: "6379" + - name: SPRING_REDIS_PASSWORD + value: dkagh1234! + + # elasticsearch configuration + - name: SPRING_ELASTIC_URLS + value: elasticsearch + - name: SPRING_ELASTIC_PORT + value: "9200" + + # file I/O configuration + - name: IMXC_ALERT_NOTIFICATION_FILE_USE + value: "true" + - name: IMXC_ALERT_NOTIFICATION_FILE_FILE-LIMIT-SIZE-MB + value: "10" + - name: IMXC_ALERT_NOTIFICATION_FILE_PATH + value: /cloudmoa_noti + - name: IMXC_ALERT_NOTIFICATION_FILE_NAME + value: cloudmoa_alert.log + - name: IMXC_ALERT_NOTIFICATION_FILE_FORMAT + value: $[name]/($[level])/$[data]/$[message] + - name: IMXC_ALERT_NOTIFICATION_FILE_LEVELCONTRACT + value: "true" + + # rabbitmq configuration + - name: IMXC_RABBITMQ_HOST + value: base-rabbitmq + - name: IMXC_RABBITMQ_PORT + value: "61613" + - name: IMXC_RABBITMQ_CLIENT_ID + value: "user" + - name: IMXC_RABBITMQ_CLIENT_PASSWORD + value: "eorbahrhkswp" + - name: IMXC_RABBITMQ_SYSTEM_ID + value: "user" + - name: IMXC_RABBITMQ_SYSTEM_PASSWORD + value: "eorbahrhkswp" + + # api-server configuration + - name: IMXC_API-SERVER-URL + value: "http://imxc-api-service:8080" + + # cortex integration + - name: SPRING_CORTEX_URLS + value: base-cortex-configs + - name: SPRING_CORTEX_PORT + value: "8080" + + # alert webhook + - name: IMXC_ALERT_WEBHOOK_URLS + value: http://noti-server-service:8080/alert + + # etc configuration + - name: IMXC_PROMETHEUS_NAMESPACE + value: {{ .Values.global.IMXC_NAMESPACE }} + - name: IMXC_ALERT_KUBERNETES_NAMESPACE + value: {{ .Values.global.IMXC_NAMESPACE }} + # log4j + - name: LOG4J_FORMAT_MSG_NO_LOOKUPS + value: "true" + resources: + requests: + memory: "100Mi" + cpu: "50m" +--- +apiVersion: v1 +kind: Service +metadata: + name: noti-server-service + namespace: imxc +spec: + type: NodePort + selector: + app: noti + ports: + - protocol: TCP + port: 8080 + nodePort: 31083 diff --git a/ansible/01_old/roles/cmoa_demo_install/files/05-imxc/templates/streams-depl.yaml b/ansible/01_old/roles/cmoa_demo_install/files/05-imxc/templates/streams-depl.yaml new file mode 100644 index 0000000..b3223e5 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/05-imxc/templates/streams-depl.yaml @@ -0,0 +1,26 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: kafka-stream-txntrend-deployment + namespace: imxc + labels: + app: kafka-stream-txntrend +spec: + replicas: 1 + selector: + matchLabels: + app: kafka-stream-txntrend + template: + metadata: + labels: + app: kafka-stream-txntrend + spec: + containers: + - name: kafka-stream-txntrend + image: {{ .Values.global.IMXC_IN_REGISTRY }}/kafka-stream-txntrend:{{ .Values.global.KAFKA_STREAM_VERSION }} + imagePullPolicy: IfNotPresent + env: + - name: SERVICE_KAFKA_HOST + value: kafka-broker:9094 + - name: SERVICE_STREAM_OUTPUT + value: jspd_txntrend diff --git a/ansible/01_old/roles/cmoa_demo_install/files/05-imxc/templates/topology-agent.yaml b/ansible/01_old/roles/cmoa_demo_install/files/05-imxc/templates/topology-agent.yaml new file mode 100644 index 0000000..80476a3 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/05-imxc/templates/topology-agent.yaml @@ -0,0 +1,107 @@ +{{ if semverCompare ">=1.17-0" .Capabilities.KubeVersion.GitVersion }} +apiVersion: rbac.authorization.k8s.io/v1 +{{ else }} +apiVersion: rbac.authorization.k8s.io/v1beta1 +{{ end }} +kind: ClusterRoleBinding +metadata: + name: topology-agent + namespace: imxc + labels: + k8s-app: topology-agent +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +subjects: + - kind: ServiceAccount + name: topology-agent + namespace: imxc +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: topology-agent + namespace: imxc + labels: + app: topology-agent +spec: + selector: + matchLabels: + app: topology-agent + template: + metadata: + labels: + app: topology-agent + spec: + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + # below appended + hostPID: true + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - name: topology-agent + image: {{ .Values.global.IMXC_IN_REGISTRY }}/topology-agent:{{ .Values.global.TOPOLOGY_AGENT_VERSION }} + imagePullPolicy: IfNotPresent + securityContext: + privileged: true + volumeMounts: + - mountPath: /host/usr/bin + name: bin-volume + - mountPath: /var/run/docker.sock + name: docker-volume + - mountPath: /host/proc + name: proc-volume + - mountPath: /root + name: root-volume + env: + - name: CLUSTER_ID + value: cloudmoa + - name: ROOT_DIRECTORY + value: /root + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: POD_ID + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: DATAGATE + value: datagate:50051 + - name: LOG_RNAME_USE + value: "false" + - name: LOG_LEVEL + value: "DEBUG" + - name: CLOUDMOA_SETTING_PATH + value: /home/cloudmoa/setting/ + resources: + requests: + memory: "125Mi" + cpu: "100m" + limits: + memory: "600Mi" + cpu: "500m" + volumes: + - name: bin-volume + hostPath: + path: /usr/bin + type: Directory + - name: docker-volume + hostPath: + path: /var/run/docker.sock + - name: proc-volume + hostPath: + path: /proc + - name: root-volume + hostPath: + path: / diff --git a/ansible/01_old/roles/cmoa_demo_install/files/05-imxc/templates/zuul-server.yaml b/ansible/01_old/roles/cmoa_demo_install/files/05-imxc/templates/zuul-server.yaml new file mode 100644 index 0000000..79969d7 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/05-imxc/templates/zuul-server.yaml @@ -0,0 +1,62 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: zuul-deployment + namespace: imxc + labels: + app: cloud +spec: + selector: + matchLabels: + app: cloud + replicas: 1 + template: + metadata: + labels: + app: cloud + spec: + containers: + - env: + - name: SPRING_PROFILES_ACTIVE + value: prd + - name: SPRING_ZIPKIN_BASE-URL + value: http://zipkin-service:9411 + - name: LOGGING_LEVEL_COM_EXEM_CLOUD_ZUULSERVER_FILTERS_AUTHFILTER + value: info + # log4j + - name: LOG4J_FORMAT_MSG_NO_LOOKUPS + value: "true" + name: zuul + image: {{ .Values.global.IMXC_IN_REGISTRY }}/zuul-server:{{ .Values.global.ZUUL_SERVER_VERSION }} + imagePullPolicy: IfNotPresent + ports: + - containerPort: 8080 + #- containerPort: 6831 + #protocol: UDP + #resources: + # requests: + # memory: "256Mi" + # cpu: "344m" + # limits: + # memory: "1Gi" + # cpu: "700m" + resources: + requests: + memory: "200Mi" + cpu: "50m" +--- +apiVersion: v1 +kind: Service +metadata: + name: zuul + namespace: imxc + labels: + app: cloud +spec: + type: NodePort + selector: + app: cloud + ports: + - port: 8080 + targetPort: 8080 + nodePort: 31081 diff --git a/ansible/01_old/roles/cmoa_demo_install/files/05-imxc/values.yaml b/ansible/01_old/roles/cmoa_demo_install/files/05-imxc/values.yaml new file mode 100644 index 0000000..cdb0390 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/05-imxc/values.yaml @@ -0,0 +1,157 @@ +# Default values for imxc. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: 10.10.31.243:5000/cmoa3/nginx + tag: stable + pullPolicy: IfNotPresent + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 80 + +ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: [] + + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +nodeSelector: {} + +tolerations: [] + +affinity: {} + +global: + IMXC_LDAP_USE: false + IMXC_ADMIN_SERVER_DNS: imxc-admin-service + AUDITLOG_PATH: /var/log + KAFKA_IP: kafka-broker + # 로드밸런서 안 쓴다고 가정했을때 입니다.. + KAFKA_INTERFACE_PORT: 9094 + APISERVER_NETTY_PORT: 10100 + #REGISTRY_URL: cdm-dev.exem-oss.org:5050 + #REGISTRY_URL: 10.10.31.243:5000/cmoa + IMXC_ADMIN_SERVER_DNS: imxc-admin-service + AGENT_IMAGE_TAG: rel3.4.8 + # Jaeger 관련변수 + JAEGER_AGENT_CLUSTERIP: 10.98.94.198 + JAEGER_JAVA_SPECIALAGENT_CLASSPATH: classpath:/install/opentracing-specialagent-1.7.4.jar + # added by DongWoo Kim 2021-06-21 + KEYCLOAK_AUTH_SERVER_URL: http://10.10.43.227:31082/auth + KEYCLOAK_RESOURCE: authorization_server + KEYCLOAK_MASTER_USERNAME: admin + KEYCLOAK_MASTER_PASSWORD: admin + IMXC_PORTAL_INFO_URL: + KEYCLOAK_REALM: exem + # added by EunHye Kim 2021-08-25 + #DATAGATE_URLS: datagate + #DATAGATE_IP: 10.10.43.227 + #DATAGATE_PORT: 14268 + DATAGATE_INSIDE_IP: datagate + DATAGATE_INSIDE_PORT: 14268 + DATAGATE_OUTSIDE_IP: 10.10.43.227 + DATAGATE_OUTSIDE_PORT: 30051 + REDIS_URLS: redis-master + REDIS_PORT: 6379 + REDIS_PASSWORD: dkagh1234! + # added by DongWoo Kim 2021-08-31 (version of each module) + DATAGATE_VERSION: rel3.4.8 + #ADMIN_SERVER_VERSION: v1.0.0 + #API_SERVER_VERSION: CLOUD-172 + API_SERVER_VERSION: rel3.4.8 + COLLECTOR_VERSION: rel3.4.8 + #release-3.3.0 + TOPOLOGY_AGENT_VERSION: rel3.4.8 + METRIC_COLLECTOR_VERSION: rel3.4.8 + #v1.0.0 + METRIC_AGENT_VERSION: rel3.4.8 + # spring cloud + ZUUL_SERVER_VERSION: rel3.4.8 + #CMOA-1269 + EUREKA_SERVER_VERSION: rel3.4.8 + AUTH_SERVER_VERSION: rel3.4.8 + NOTI_SERVER_VERSION: rel3.4.8 + KAFKA_STREAM_VERSION: rel3.4.8 + CMOA_MANUAL_VERSION: rel3.4.8 + KUBE_INFO_FLAT_VERSION: rel3.4.8 + KUBE_INFO_BATCH_VERSION: rel3.4.8 + KUBE_INFO_CONNECTOR_VERSION: rel3.4.8 + + + CMOA_MANUAL_PORT: 31090 + + + # Keycloak + #KEYCLOAK_VERSION: v1.0.0 + + # 레지스트리 변수화 (Public Cloud 대비 / 아래 값 적절히 수정해서 사용할 것) + #IMXC_REGISTRY: 10.10.31.243:5000 + IMXC_IN_REGISTRY: 10.10.31.243:5000/cmoa3 + + + # namespace 추가 + IMXC_NAMESPACE: imxc + + # ZUUL 8080으로 열어놓을것 + + CMOA_ES_ID: elastic + CMOA_ES_PW: elastic + + JDBC_KIND: 'postgres' + JDBC_SERVER: 'postgres:5432' + JDBC_DB: 'postgresdb' + JDBC_USER: 'admin' + JDBC_PWD: 'eorbahrhkswp' + + KAFKA_INPUT_TOPIC: 'kubernetes_info' + + TABLE_PREFIX: 'cmoa_' + BLACK_LIST: 'configmap_base,cronjob_active,endpoint_base,endpoint_addresses,endpoint_notreadyaddresses,endpoint_ports,event_base,node_image,persistentvolume_base,persistentvolumeclaim_base,pod_volume,resourcequota_base,resourcequota_scopeselector' + DELETE_HOUR: '15' + BACKLOGIN: false diff --git a/ansible/01_old/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jaeger/Chart.yaml b/ansible/01_old/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jaeger/Chart.yaml new file mode 100644 index 0000000..e2f559f --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jaeger/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for Kubernetes +name: imxc +version: 0.1.0 diff --git a/ansible/01_old/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jaeger/cmoa-manual.yaml b/ansible/01_old/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jaeger/cmoa-manual.yaml new file mode 100644 index 0000000..e94fc14 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jaeger/cmoa-manual.yaml @@ -0,0 +1,36 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: manual + namespace: imxc +spec: + selector: + matchLabels: + app: manual + replicas: 1 + template: + metadata: + labels: + app: manual + spec: + containers: + - name: manual + image: {{ .Values.global.IMXC_IN_REGISTRY }}/manual:{{ .Values.global.CMOA_MANUAL_VERSION }} + imagePullPolicy: IfNotPresent + +--- +apiVersion: v1 +kind: Service +metadata: + name: manual + namespace: imxc +spec: + type: NodePort + selector: + app: manual + ports: + - protocol: TCP + port: 8088 + targetPort: 3000 + nodePort: {{ .Values.global.CMOA_MANUAL_PORT }} + diff --git a/ansible/01_old/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jaeger/scripts/init-api-server.sh b/ansible/01_old/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jaeger/scripts/init-api-server.sh new file mode 100644 index 0000000..45b8f1e --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jaeger/scripts/init-api-server.sh @@ -0,0 +1,16 @@ +#! /bin/sh + +STATUS_CODE="$(curl -s -o /dev/null -w '%{http_code}' http://imxc-keycloak-http/auth/realms/exem)" + +if [ $STATUS_CODE -eq 200 ]; then + JWT_KEY="$(curl -s -XGET http://imxc-keycloak-http/auth/realms/exem | jq -r '.public_key')" + export JWT_KEY + + chmod -R 777 /home/cloudmoa/notification/cloudmoa_alert.log + + java -Djava.security.egd=file:/dev/./urandom -jar /app.jar +elif [ $STATUS_CODE -eq 404 ]; then + echo "not found exem relam. check realm in imxc-keycloak" +else + echo "not found keycloak. check to install keycloak" +fi \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jaeger/scripts/init-auth-server.sh b/ansible/01_old/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jaeger/scripts/init-auth-server.sh new file mode 100644 index 0000000..279b8a5 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jaeger/scripts/init-auth-server.sh @@ -0,0 +1,36 @@ +#! /bin/bash + +# 200 -> 서버 및 realm이 있는 경우 +# 404 -> 서버는 있으나 realm이 없는 경우 +# 000 -> 서버가 없음 +STATUS_CODE="$(curl -s -o /dev/null -w '%{http_code}' http://imxc-keycloak-http/auth/realms/exem)" + +if [ $STATUS_CODE -eq 404 ]; then + TOKEN="$(curl -s -d "client_id=admin-cli" -d "username=admin" -d "password=admin" -d "grant_type=password" http://imxc-keycloak-http/auth/realms/master/protocol/openid-connect/token | jq -r '.access_token')" + + echo $TOKEN + + echo "create realm and client" + # create realm and client + curl -s -v POST -H "Authorization: Bearer $TOKEN" -H "Content-Type: application/json" -d "@/tmp/init.json" http://imxc-keycloak-http/auth/admin/realms + + + echo "create admin and owner" + # create admin and owner + curl -s -v POST -H "Authorization: Bearer $TOKEN" -H "Content-Type: application/json" -d '{"firstName":"","lastName":"", "username":"admin","email":"admin@example.com", "enabled":"true","credentials":[{"type":"password","value":"admin","temporary":false}]}' http://imxc-keycloak-http/auth/admin/realms/exem/users + curl -s -v POST -H "Authorization: Bearer $TOKEN" -H "Content-Type: application/json" -d '{"firstName":"","lastName":"", "username":"owner","email":"owner@example.com", "enabled":"true","credentials":[{"type":"password","value":"admin","temporary":false}]}' http://imxc-keycloak-http/auth/admin/realms/exem/users + + JWT_KEY="$(curl -s -XGET http://imxc-keycloak-http/auth/realms/exem | jq -r '.public_key')" + export JWT_KEY + + java -Djava.security.egd=file:/dev/./urandom -jar /app.jar +elif [ $STATUS_CODE -eq 200 ]; then + echo "exist exem relam" + + JWT_KEY="$(curl -s -XGET http://imxc-keycloak-http/auth/realms/exem | jq -r '.public_key')" + export JWT_KEY + + java -Djava.security.egd=file:/dev/./urandom -jar /app.jar +else + echo "not found keycloak. check to install keycloak" +fi diff --git a/ansible/01_old/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jaeger/scripts/init-noti-server.sh b/ansible/01_old/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jaeger/scripts/init-noti-server.sh new file mode 100644 index 0000000..af73aed --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jaeger/scripts/init-noti-server.sh @@ -0,0 +1,14 @@ +#! /bin/sh + +STATUS_CODE="$(curl -s -o /dev/null -w '%{http_code}' http://imxc-keycloak-http/auth/realms/exem)" + +if [ $STATUS_CODE -eq 200 ]; then + JWT_KEY="$(curl -s -XGET http://imxc-keycloak-http/auth/realms/exem | jq -r '.public_key')" + export JWT_KEY + + java -Djava.security.egd=file:/dev/./urandom -jar /app.jar +elif [ $STATUS_CODE -eq 404 ]; then + echo "not found exem relam. check realm in imxc-keycloak" +else + echo "not found keycloak. check to install keycloak" +fi \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jaeger/scripts/init-resource.sh b/ansible/01_old/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jaeger/scripts/init-resource.sh new file mode 100644 index 0000000..58db392 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jaeger/scripts/init-resource.sh @@ -0,0 +1,6 @@ +#!/bin/sh + +chmod -R 777 /scripts + +sed -i "s/localhost/$REDIRECT_URLS/g" /scripts/init.json +cp /scripts/init.json /tmp/init.json \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jaeger/scripts/init.json b/ansible/01_old/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jaeger/scripts/init.json new file mode 100644 index 0000000..dcd68b4 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jaeger/scripts/init.json @@ -0,0 +1,2148 @@ +{ + "id": "exem", + "realm": "exem", + "notBefore": 0, + "revokeRefreshToken": false, + "refreshTokenMaxReuse": 0, + "accessTokenLifespan": 300, + "accessTokenLifespanForImplicitFlow": 900, + "ssoSessionIdleTimeout": 1800, + "ssoSessionMaxLifespan": 36000, + "ssoSessionIdleTimeoutRememberMe": 0, + "ssoSessionMaxLifespanRememberMe": 0, + "offlineSessionIdleTimeout": 2592000, + "offlineSessionMaxLifespanEnabled": false, + "offlineSessionMaxLifespan": 5184000, + "clientSessionIdleTimeout": 0, + "clientSessionMaxLifespan": 0, + "clientOfflineSessionIdleTimeout": 0, + "clientOfflineSessionMaxLifespan": 0, + "accessCodeLifespan": 60, + "accessCodeLifespanUserAction": 300, + "accessCodeLifespanLogin": 1800, + "actionTokenGeneratedByAdminLifespan": 43200, + "actionTokenGeneratedByUserLifespan": 300, + "enabled": true, + "sslRequired": "none", + "registrationAllowed": false, + "registrationEmailAsUsername": false, + "rememberMe": false, + "verifyEmail": false, + "loginWithEmailAllowed": true, + "duplicateEmailsAllowed": false, + "resetPasswordAllowed": false, + "editUsernameAllowed": false, + "bruteForceProtected": false, + "permanentLockout": false, + "maxFailureWaitSeconds": 900, + "minimumQuickLoginWaitSeconds": 60, + "waitIncrementSeconds": 60, + "quickLoginCheckMilliSeconds": 1000, + "maxDeltaTimeSeconds": 43200, + "failureFactor": 30, + "roles": { + "realm": [ + { + "id": "b361dcb8-4ec4-484e-a432-8d40a8ca5ac8", + "name": "offline_access", + "description": "${role_offline-access}", + "composite": false, + "clientRole": false, + "containerId": "exem", + "attributes": {} + }, + { + "id": "621155f2-6c01-4e4a-bf11-47111503d696", + "name": "uma_authorization", + "description": "${role_uma_authorization}", + "composite": false, + "clientRole": false, + "containerId": "exem", + "attributes": {} + }, + { + "id": "4aadd73a-e863-466a-932b-5bc81553fbf1", + "name": "access", + "composite": false, + "clientRole": false, + "containerId": "exem", + "attributes": {} + } + ], + "client": { + "realm-management": [ + { + "id": "e3eca547-c372-406a-abe7-30f554e13e63", + "name": "manage-realm", + "description": "${role_manage-realm}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "eb1faff2-4cca-458c-b9da-96c1f6f5f647", + "name": "impersonation", + "description": "${role_impersonation}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "eb0f6ebb-8993-47f8-8979-2152ed92bf62", + "name": "create-client", + "description": "${role_create-client}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "29f0b39d-9cc9-4b40-ad81-00041897ae0c", + "name": "view-clients", + "description": "${role_view-clients}", + "composite": true, + "composites": { + "client": { + "realm-management": [ + "query-clients" + ] + } + }, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "b6307563-9b35-4093-b0c4-a27df7cb82bd", + "name": "query-groups", + "description": "${role_query-groups}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "30091a91-f676-4e39-8ae2-ebfcee36c32a", + "name": "query-clients", + "description": "${role_query-clients}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "b40ca071-2318-4f69-9664-f0dfe471d03b", + "name": "view-realm", + "description": "${role_view-realm}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "efd25ec7-e61f-4659-a772-907791aed58e", + "name": "view-authorization", + "description": "${role_view-authorization}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "4ad18bd0-f9a9-4fc7-8864-99afa71f95e4", + "name": "manage-users", + "description": "${role_manage-users}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "a92c781f-7c6a-48d8-aa88-0b3aefb3c10c", + "name": "manage-events", + "description": "${role_manage-events}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "424933c1-3c03-49cd-955c-34aeeb0a3108", + "name": "manage-authorization", + "description": "${role_manage-authorization}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "5476db80-dbfa-408b-a934-5e8decc0af56", + "name": "manage-clients", + "description": "${role_manage-clients}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "acf53868-d09b-4865-92da-3b906307b979", + "name": "realm-admin", + "description": "${role_realm-admin}", + "composite": true, + "composites": { + "client": { + "realm-management": [ + "manage-realm", + "impersonation", + "create-client", + "view-clients", + "query-groups", + "query-clients", + "view-realm", + "view-authorization", + "manage-users", + "manage-events", + "manage-authorization", + "manage-clients", + "query-users", + "query-realms", + "manage-identity-providers", + "view-users", + "view-events", + "view-identity-providers" + ] + } + }, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "f2ad5f83-ffde-4cf4-acc4-21f7bcec4c38", + "name": "query-users", + "description": "${role_query-users}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "96a017bf-5211-4c20-a1b2-7493bc45a3ad", + "name": "query-realms", + "description": "${role_query-realms}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "d8051d4d-f26c-4a6d-bcdd-b3d8111d9d29", + "name": "manage-identity-providers", + "description": "${role_manage-identity-providers}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "8c929b20-abc3-4b78-88f2-ed3348426667", + "name": "view-users", + "description": "${role_view-users}", + "composite": true, + "composites": { + "client": { + "realm-management": [ + "query-groups", + "query-users" + ] + } + }, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "a337a8f7-8725-4ff7-85fc-ecc4b5ce1433", + "name": "view-events", + "description": "${role_view-events}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "649350cf-925c-4502-84b4-ec8415f956d3", + "name": "view-identity-providers", + "description": "${role_view-identity-providers}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + } + ], + "authorization_server": [ + { + "id": "2346ca49-eb3e-4f2e-b0ec-4def9ea9655c", + "name": "access", + "composite": false, + "clientRole": true, + "containerId": "b9bbda1f-a756-4b72-9cd8-06a6dfd6d5bf", + "attributes": {} + } + ], + "security-admin-console": [], + "admin-cli": [], + "account-console": [], + "broker": [ + { + "id": "133ff901-3a8f-48df-893b-4c7e9047e829", + "name": "read-token", + "description": "${role_read-token}", + "composite": false, + "clientRole": true, + "containerId": "fdc71d6d-db86-414f-bd80-ed1f5e9a6975", + "attributes": {} + } + ], + "account": [ + { + "id": "89c5f56f-5845-400b-ac9f-942c46d082e0", + "name": "manage-account-links", + "description": "${role_manage-account-links}", + "composite": false, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + }, + { + "id": "2cba7fed-0a80-4dbd-bd2d-abfa2c6a985e", + "name": "view-profile", + "description": "${role_view-profile}", + "composite": false, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + }, + { + "id": "f446a93d-143f-4071-9bdc-08aa2fdce6d2", + "name": "view-consent", + "description": "${role_view-consent}", + "composite": false, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + }, + { + "id": "ef3364db-e008-4aec-9e74-04bac25cbe40", + "name": "manage-consent", + "description": "${role_manage-consent}", + "composite": true, + "composites": { + "client": { + "account": [ + "view-consent" + ] + } + }, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + }, + { + "id": "96afbe32-3ac2-4345-bc17-06cf0e8de0b4", + "name": "view-applications", + "description": "${role_view-applications}", + "composite": false, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + }, + { + "id": "cf6861ca-4804-40d4-9016-c48e7ebf1c72", + "name": "manage-account", + "description": "${role_manage-account}", + "composite": true, + "composites": { + "client": { + "account": [ + "manage-account-links" + ] + } + }, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + } + ] + } + }, + "groups": [ + { + "id": "8d3f7332-7f72-47e2-9cb3-38331f0c29b5", + "name": "DEFAULT_TENANT", + "path": "/DEFAULT_TENANT", + "attributes": {}, + "realmRoles": [], + "clientRoles": {}, + "subGroups": [] + } + ], + "defaultRoles": [ + "offline_access", + "uma_authorization" + ], + "requiredCredentials": [ + "password" + ], + "otpPolicyType": "totp", + "otpPolicyAlgorithm": "HmacSHA1", + "otpPolicyInitialCounter": 0, + "otpPolicyDigits": 6, + "otpPolicyLookAheadWindow": 1, + "otpPolicyPeriod": 30, + "otpSupportedApplications": [ + "FreeOTP", + "Google Authenticator" + ], + "webAuthnPolicyRpEntityName": "keycloak", + "webAuthnPolicySignatureAlgorithms": [ + "ES256" + ], + "webAuthnPolicyRpId": "", + "webAuthnPolicyAttestationConveyancePreference": "not specified", + "webAuthnPolicyAuthenticatorAttachment": "not specified", + "webAuthnPolicyRequireResidentKey": "not specified", + "webAuthnPolicyUserVerificationRequirement": "not specified", + "webAuthnPolicyCreateTimeout": 0, + "webAuthnPolicyAvoidSameAuthenticatorRegister": false, + "webAuthnPolicyAcceptableAaguids": [], + "webAuthnPolicyPasswordlessRpEntityName": "keycloak", + "webAuthnPolicyPasswordlessSignatureAlgorithms": [ + "ES256" + ], + "webAuthnPolicyPasswordlessRpId": "", + "webAuthnPolicyPasswordlessAttestationConveyancePreference": "not specified", + "webAuthnPolicyPasswordlessAuthenticatorAttachment": "not specified", + "webAuthnPolicyPasswordlessRequireResidentKey": "not specified", + "webAuthnPolicyPasswordlessUserVerificationRequirement": "not specified", + "webAuthnPolicyPasswordlessCreateTimeout": 0, + "webAuthnPolicyPasswordlessAvoidSameAuthenticatorRegister": false, + "webAuthnPolicyPasswordlessAcceptableAaguids": [], + "scopeMappings": [ + { + "clientScope": "offline_access", + "roles": [ + "offline_access" + ] + } + ], + "clientScopeMappings": { + "account": [ + { + "client": "account-console", + "roles": [ + "manage-account" + ] + } + ] + }, + "clients": [ + { + "id": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "clientId": "account", + "name": "${client_account}", + "rootUrl": "${authBaseUrl}", + "baseUrl": "/realms/exem/account/", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "defaultRoles": [ + "view-profile", + "manage-account" + ], + "redirectUris": [ + "/realms/exem/account/*" + ], + "webOrigins": [], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": false, + "serviceAccountsEnabled": false, + "publicClient": false, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": {}, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "1e3d0c5d-c456-4c5f-93cf-58236273186a", + "clientId": "account-console", + "name": "${client_account-console}", + "rootUrl": "${authBaseUrl}", + "baseUrl": "/realms/exem/account/", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [ + "/realms/exem/account/*" + ], + "webOrigins": [], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": false, + "serviceAccountsEnabled": false, + "publicClient": true, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": { + "pkce.code.challenge.method": "S256" + }, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "protocolMappers": [ + { + "id": "cceae7c8-fa8d-48eb-a0a6-6013a2cc771e", + "name": "audience resolve", + "protocol": "openid-connect", + "protocolMapper": "oidc-audience-resolve-mapper", + "consentRequired": false, + "config": {} + } + ], + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "d4d3e5a5-584c-4aff-a79f-ac3c31ace5a1", + "clientId": "admin-cli", + "name": "${client_admin-cli}", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [], + "webOrigins": [], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": false, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": true, + "serviceAccountsEnabled": false, + "publicClient": true, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": {}, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "b9bbda1f-a756-4b72-9cd8-06a6dfd6d5bf", + "clientId": "authorization_server", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [ + "localhost" + ], + "webOrigins": [ + "*" + ], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": true, + "serviceAccountsEnabled": false, + "publicClient": true, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": { + "saml.assertion.signature": "false", + "saml.force.post.binding": "false", + "saml.multivalued.roles": "false", + "saml.encrypt": "false", + "saml.server.signature": "false", + "saml.server.signature.keyinfo.ext": "false", + "exclude.session.state.from.auth.response": "false", + "saml_force_name_id_format": "false", + "saml.client.signature": "false", + "tls.client.certificate.bound.access.tokens": "false", + "saml.authnstatement": "false", + "display.on.consent.screen": "false", + "saml.onetimeuse.condition": "false" + }, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": true, + "nodeReRegistrationTimeout": -1, + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "fdc71d6d-db86-414f-bd80-ed1f5e9a6975", + "clientId": "broker", + "name": "${client_broker}", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [], + "webOrigins": [], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": false, + "serviceAccountsEnabled": false, + "publicClient": false, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": {}, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "clientId": "realm-management", + "name": "${client_realm-management}", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [], + "webOrigins": [], + "notBefore": 0, + "bearerOnly": true, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": false, + "serviceAccountsEnabled": false, + "publicClient": false, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": {}, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "bb6c56f1-126e-4356-9579-d95992a8d150", + "clientId": "security-admin-console", + "name": "${client_security-admin-console}", + "rootUrl": "${authAdminUrl}", + "baseUrl": "/admin/exem/console/", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [ + "/admin/exem/console/*" + ], + "webOrigins": [ + "+" + ], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": false, + "serviceAccountsEnabled": false, + "publicClient": true, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": { + "pkce.code.challenge.method": "S256" + }, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "protocolMappers": [ + { + "id": "3cf06cab-00dd-486b-8e72-1a453a7031ca", + "name": "locale", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "locale", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "locale", + "jsonType.label": "String" + } + } + ], + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + } + ], + "clientScopes": [ + { + "id": "6a21eaaa-69c9-4519-8732-2155865a1891", + "name": "custom_jwt", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "true" + }, + "protocolMappers": [ + { + "id": "fd7557f5-3174-4c65-8cd1-0e9f015a906f", + "name": "customizingJWT", + "protocol": "openid-connect", + "protocolMapper": "oidc-script-based-protocol-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "multivalued": "true", + "id.token.claim": "false", + "access.token.claim": "true", + "jsonType.label": "String", + "script": "/**\r\n * Available variables: \r\n * user - the current user\r\n * realm - the current realm\r\n * token - the current token\r\n * userSession - the current userSession\r\n * keycloakSession - the current keycloakSession\r\n */\r\n\r\n//insert your code here...\r\n\r\n// you can set standard fields in token - test code\r\n// token.setAcr(\"test value\");\r\n\r\n// you can set claims in the token - test code\r\n// token.getOtherClaims().put(\"claimName\", \"claim value\");\r\n\r\n// work with variables and return multivalued token value\r\nvar ArrayList = Java.type(\"java.util.ArrayList\");\r\nvar HashMap = Java.type(\"java.util.HashMap\");\r\nvar tenantInfoMap = new HashMap();\r\nvar tenantIpMap = new HashMap();\r\n\r\nvar forEach = Array.prototype.forEach;\r\n\r\nvar client = keycloakSession.getContext().getClient();\r\nvar groups = user.getGroups();\r\nvar clientRole = client.getRole(\"access\");\r\n\r\nforEach.call(groups.toArray(), function(group) {\r\n if(group.hasRole(clientRole)) {\r\n tenantIpMap.put(group.getName(), clientRole.getAttribute(\"ip\"));\r\n tenantInfoMap.put(group.getName(), group.getAttributes());\r\n }\r\n});\r\n\r\ntoken.setOtherClaims(\"tenantInfo\", tenantInfoMap);\r\n" + } + }, + { + "id": "2cb34189-9f06-4b9f-b066-c28e7930f0a5", + "name": "custom_phone", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "false", + "user.attribute": "phone", + "id.token.claim": "false", + "access.token.claim": "true", + "claim.name": "attributes.phone", + "jsonType.label": "String" + } + }, + { + "id": "6bcb0aa9-8713-4e4b-b997-2e08d2dda0f4", + "name": "group_attr", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "groups", + "id.token.claim": "false", + "access.token.claim": "true", + "claim.name": "groups.attributes", + "jsonType.label": "String" + } + }, + { + "id": "03deb40b-4f83-436e-9eab-f479eed62460", + "name": "custom_name", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "false", + "user.attribute": "name", + "id.token.claim": "false", + "access.token.claim": "true", + "claim.name": "attributes.name", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "9fed7d81-3f42-41b0-b661-7875abb90b2b", + "name": "microprofile-jwt", + "description": "Microprofile - JWT built-in scope", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "false" + }, + "protocolMappers": [ + { + "id": "d030d675-2c31-401a-a461-534211b3d2ec", + "name": "upn", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "username", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "upn", + "jsonType.label": "String" + } + }, + { + "id": "ca2026a0-84de-4b8d-bf0c-35f3d088b115", + "name": "groups", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-realm-role-mapper", + "consentRequired": false, + "config": { + "multivalued": "true", + "user.attribute": "foo", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "groups", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "cf3e7fce-e9e8-40dc-bd0d-5cf7bac861c0", + "name": "web-origins", + "description": "OpenID Connect scope for add allowed web origins to the access token", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "false", + "display.on.consent.screen": "false", + "consent.screen.text": "" + }, + "protocolMappers": [ + { + "id": "6b909bad-30d8-4095-a80b-d71589e8a0b4", + "name": "allowed web origins", + "protocol": "openid-connect", + "protocolMapper": "oidc-allowed-origins-mapper", + "consentRequired": false, + "config": {} + } + ] + }, + { + "id": "73231863-d614-4725-9707-f5704c70893a", + "name": "roles", + "description": "OpenID Connect scope for add user roles to the access token", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "false", + "display.on.consent.screen": "true", + "consent.screen.text": "${rolesScopeConsentText}" + }, + "protocolMappers": [ + { + "id": "fad2c0b3-d6d6-46c9-b8a5-70cf2f3cd69e", + "name": "realm roles", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-realm-role-mapper", + "consentRequired": false, + "config": { + "multivalued": "true", + "user.attribute": "foo", + "access.token.claim": "true", + "claim.name": "realm_access.roles", + "jsonType.label": "String" + } + }, + { + "id": "1fa51f0e-8fa8-4807-a381-c9756ce1d2ff", + "name": "audience resolve", + "protocol": "openid-connect", + "protocolMapper": "oidc-audience-resolve-mapper", + "consentRequired": false, + "config": {} + }, + { + "id": "8be191ba-c7b8-45f1-a37f-2830595d4b54", + "name": "client roles", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-client-role-mapper", + "consentRequired": false, + "config": { + "multivalued": "true", + "user.attribute": "foo", + "access.token.claim": "true", + "claim.name": "resource_access.${client_id}.roles", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "93a4b53a-a281-4203-a070-0ad31e719b29", + "name": "phone", + "description": "OpenID Connect built-in scope: phone", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "true", + "consent.screen.text": "${phoneScopeConsentText}" + }, + "protocolMappers": [ + { + "id": "c716d4df-ad16-4a47-aa05-ded2a69313a3", + "name": "phone number verified", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "phoneNumberVerified", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "phone_number_verified", + "jsonType.label": "boolean" + } + }, + { + "id": "db0fcb5b-bad6-42b7-8ab0-b90225100b8a", + "name": "phone number", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "phoneNumber", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "phone_number", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "f1723d4c-6d93-40be-b5b8-5ca7083e55c7", + "name": "address", + "description": "OpenID Connect built-in scope: address", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "true", + "consent.screen.text": "${addressScopeConsentText}" + }, + "protocolMappers": [ + { + "id": "9e95dff0-dc01-4efe-a414-21c83d94491c", + "name": "address", + "protocol": "openid-connect", + "protocolMapper": "oidc-address-mapper", + "consentRequired": false, + "config": { + "user.attribute.formatted": "formatted", + "user.attribute.country": "country", + "user.attribute.postal_code": "postal_code", + "userinfo.token.claim": "true", + "user.attribute.street": "street", + "id.token.claim": "true", + "user.attribute.region": "region", + "access.token.claim": "true", + "user.attribute.locality": "locality" + } + } + ] + }, + { + "id": "16524b43-6bfc-4e05-868c-682e7e1e611c", + "name": "email", + "description": "OpenID Connect built-in scope: email", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "true", + "consent.screen.text": "${emailScopeConsentText}" + }, + "protocolMappers": [ + { + "id": "4444c30e-5da5-46e6-a201-64c28ab26e10", + "name": "email verified", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "emailVerified", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "email_verified", + "jsonType.label": "boolean" + } + }, + { + "id": "0faa8ba7-6d4d-4ed4-ab89-334e1d18b503", + "name": "email", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "email", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "email", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "4ccced80-99d8-4081-8d1d-37ed6d5aaf34", + "name": "profile", + "description": "OpenID Connect built-in scope: profile", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "true", + "consent.screen.text": "${profileScopeConsentText}" + }, + "protocolMappers": [ + { + "id": "02aea132-f5e1-483c-968a-5fbb9cdfb82d", + "name": "updated at", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "updatedAt", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "updated_at", + "jsonType.label": "String" + } + }, + { + "id": "eb5d10fc-d4a8-473a-ac3e-35f3fb0f41bb", + "name": "family name", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "lastName", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "family_name", + "jsonType.label": "String" + } + }, + { + "id": "2467b8e5-f340-45a2-abff-c658eccf3ed3", + "name": "zoneinfo", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "zoneinfo", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "zoneinfo", + "jsonType.label": "String" + } + }, + { + "id": "50a9bb17-af12-481d-95dd-6aed1dd4bf56", + "name": "full name", + "protocol": "openid-connect", + "protocolMapper": "oidc-full-name-mapper", + "consentRequired": false, + "config": { + "id.token.claim": "true", + "access.token.claim": "true", + "userinfo.token.claim": "true" + } + }, + { + "id": "80a65208-9425-4e66-b769-98c2f1c91e6e", + "name": "nickname", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "nickname", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "nickname", + "jsonType.label": "String" + } + }, + { + "id": "68a750c6-b4b8-47f4-a919-752319e63213", + "name": "gender", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "gender", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "gender", + "jsonType.label": "String" + } + }, + { + "id": "e27abd0e-72c1-40de-a678-e9e4e2db8e7f", + "name": "given name", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "firstName", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "given_name", + "jsonType.label": "String" + } + }, + { + "id": "04f3fa01-6a4c-44eb-bfd8-0a0e1c31bc4a", + "name": "middle name", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "middleName", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "middle_name", + "jsonType.label": "String" + } + }, + { + "id": "94e697d9-fbee-48d8-91d1-7bbc4f1fb44e", + "name": "username", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "username", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "preferred_username", + "jsonType.label": "String" + } + }, + { + "id": "a2f05d76-947d-4ceb-969b-1b923be9a923", + "name": "website", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "website", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "website", + "jsonType.label": "String" + } + }, + { + "id": "1966f863-ac5c-4cbc-a156-d5bd861728f0", + "name": "profile", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "profile", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "profile", + "jsonType.label": "String" + } + }, + { + "id": "18a9b452-cd8e-4c43-a9a8-0ea532074f74", + "name": "locale", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "locale", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "locale", + "jsonType.label": "String" + } + }, + { + "id": "1583790a-ec7a-4899-a901-60e23fd0d969", + "name": "birthdate", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "birthdate", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "birthdate", + "jsonType.label": "String" + } + }, + { + "id": "7094b64a-492b-4f31-aa73-bb19d06ddb56", + "name": "picture", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "picture", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "picture", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "eff18c11-eaf4-4d6a-8365-90f646ea3cc5", + "name": "role_list", + "description": "SAML role list", + "protocol": "saml", + "attributes": { + "consent.screen.text": "${samlRoleListScopeConsentText}", + "display.on.consent.screen": "true" + }, + "protocolMappers": [ + { + "id": "3bb12700-3e6f-4a73-bfbb-cfd16a8ab007", + "name": "role list", + "protocol": "saml", + "protocolMapper": "saml-role-list-mapper", + "consentRequired": false, + "config": { + "single": "false", + "attribute.nameformat": "Basic", + "attribute.name": "Role" + } + } + ] + }, + { + "id": "e83e35b7-9650-4f7e-b182-65c184d261b3", + "name": "offline_access", + "description": "OpenID Connect built-in scope: offline_access", + "protocol": "openid-connect", + "attributes": { + "consent.screen.text": "${offlineAccessScopeConsentText}", + "display.on.consent.screen": "true" + } + } + ], + "defaultDefaultClientScopes": [ + "role_list", + "profile", + "email", + "roles", + "web-origins", + "custom_jwt" + ], + "defaultOptionalClientScopes": [ + "offline_access", + "address", + "phone", + "microprofile-jwt" + ], + "browserSecurityHeaders": { + "contentSecurityPolicyReportOnly": "", + "xContentTypeOptions": "nosniff", + "xRobotsTag": "none", + "xFrameOptions": "SAMEORIGIN", + "contentSecurityPolicy": "frame-src 'self'; frame-ancestors 'self'; object-src 'none';", + "xXSSProtection": "1; mode=block", + "strictTransportSecurity": "max-age=31536000; includeSubDomains" + }, + "smtpServer": {}, + "eventsEnabled": false, + "eventsListeners": [ + "jboss-logging" + ], + "enabledEventTypes": [], + "adminEventsEnabled": false, + "adminEventsDetailsEnabled": false, + "components": { + "org.keycloak.services.clientregistration.policy.ClientRegistrationPolicy": [ + { + "id": "9b1dcf02-e9ec-4302-8aad-28f3250d1b2d", + "name": "Allowed Protocol Mapper Types", + "providerId": "allowed-protocol-mappers", + "subType": "anonymous", + "subComponents": {}, + "config": { + "allowed-protocol-mapper-types": [ + "oidc-sha256-pairwise-sub-mapper", + "oidc-usermodel-property-mapper", + "saml-role-list-mapper", + "saml-user-attribute-mapper", + "oidc-full-name-mapper", + "oidc-usermodel-attribute-mapper", + "oidc-address-mapper", + "saml-user-property-mapper" + ] + } + }, + { + "id": "752137ea-bc3a-46c3-9d83-49cb370d39a9", + "name": "Max Clients Limit", + "providerId": "max-clients", + "subType": "anonymous", + "subComponents": {}, + "config": { + "max-clients": [ + "200" + ] + } + }, + { + "id": "f365d31f-ccc5-4e57-97bd-b2749b1ab5e5", + "name": "Allowed Client Scopes", + "providerId": "allowed-client-templates", + "subType": "authenticated", + "subComponents": {}, + "config": { + "allow-default-scopes": [ + "true" + ] + } + }, + { + "id": "52e385fd-3aa5-442d-b5e4-6ff659126196", + "name": "Allowed Protocol Mapper Types", + "providerId": "allowed-protocol-mappers", + "subType": "authenticated", + "subComponents": {}, + "config": { + "allowed-protocol-mapper-types": [ + "oidc-sha256-pairwise-sub-mapper", + "saml-user-attribute-mapper", + "oidc-full-name-mapper", + "oidc-usermodel-attribute-mapper", + "oidc-address-mapper", + "oidc-usermodel-property-mapper", + "saml-user-property-mapper", + "saml-role-list-mapper" + ] + } + }, + { + "id": "dbebbc9d-1b14-4d09-906c-b4e5638f9588", + "name": "Consent Required", + "providerId": "consent-required", + "subType": "anonymous", + "subComponents": {}, + "config": {} + }, + { + "id": "b3fc18dc-467f-4240-9b6d-f07df5c40aee", + "name": "Full Scope Disabled", + "providerId": "scope", + "subType": "anonymous", + "subComponents": {}, + "config": {} + }, + { + "id": "19e102da-1d66-4747-958b-9311e5156693", + "name": "Trusted Hosts", + "providerId": "trusted-hosts", + "subType": "anonymous", + "subComponents": {}, + "config": { + "host-sending-registration-request-must-match": [ + "true" + ], + "client-uris-must-match": [ + "true" + ] + } + }, + { + "id": "66e83112-7392-46cb-bbd5-b71586183ada", + "name": "Allowed Client Scopes", + "providerId": "allowed-client-templates", + "subType": "anonymous", + "subComponents": {}, + "config": { + "allow-default-scopes": [ + "true" + ] + } + } + ], + "org.keycloak.keys.KeyProvider": [ + { + "id": "a60adc1b-3f6b-40d4-901f-d4f744f0d71b", + "name": "aes-generated", + "providerId": "aes-generated", + "subComponents": {}, + "config": { + "priority": [ + "100" + ] + } + }, + { + "id": "bc1b25d8-b199-4d87-b606-6cde0f6eafb0", + "name": "hmac-generated", + "providerId": "hmac-generated", + "subComponents": {}, + "config": { + "priority": [ + "100" + ], + "algorithm": [ + "HS256" + ] + } + }, + { + "id": "fe624aa7-54a3-43d8-b2a3-f74b543a9225", + "name": "rsa-generated", + "providerId": "rsa-generated", + "subComponents": {}, + "config": { + "priority": [ + "100" + ] + } + } + ] + }, + "internationalizationEnabled": false, + "supportedLocales": [], + "authenticationFlows": [ + { + "id": "a837df3e-15cb-4d2a-8ce0-5eea5c704e76", + "alias": "Account verification options", + "description": "Method with which to verity the existing account", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "idp-email-verification", + "requirement": "ALTERNATIVE", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "ALTERNATIVE", + "priority": 20, + "flowAlias": "Verify Existing Account by Re-authentication", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "59026e13-e2bd-4977-a868-505ea562f545", + "alias": "Authentication Options", + "description": "Authentication options.", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "basic-auth", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "basic-auth-otp", + "requirement": "DISABLED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "auth-spnego", + "requirement": "DISABLED", + "priority": 30, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "484d422c-d9b4-4c0e-86d5-60463ecd24c9", + "alias": "Browser - Conditional OTP", + "description": "Flow to determine if the OTP is required for the authentication", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "conditional-user-configured", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "auth-otp-form", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "0ec05058-6d09-4951-a116-19e8810e5d8e", + "alias": "Direct Grant - Conditional OTP", + "description": "Flow to determine if the OTP is required for the authentication", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "conditional-user-configured", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "direct-grant-validate-otp", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "667c03cd-114c-4d9a-a7fa-7d2c27f10722", + "alias": "First broker login - Conditional OTP", + "description": "Flow to determine if the OTP is required for the authentication", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "conditional-user-configured", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "auth-otp-form", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "1510fbf7-239f-44aa-9955-72d42f6d99fd", + "alias": "Handle Existing Account", + "description": "Handle what to do if there is existing account with same email/username like authenticated identity provider", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "idp-confirm-link", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "REQUIRED", + "priority": 20, + "flowAlias": "Account verification options", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "5622e71d-e1f4-4711-a425-a8470d0a017e", + "alias": "Reset - Conditional OTP", + "description": "Flow to determine if the OTP should be reset or not. Set to REQUIRED to force.", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "conditional-user-configured", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "reset-otp", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "09dfe405-5ef6-4940-8885-5adf867a74c8", + "alias": "User creation or linking", + "description": "Flow for the existing/non-existing user alternatives", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticatorConfig": "create unique user config", + "authenticator": "idp-create-user-if-unique", + "requirement": "ALTERNATIVE", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "ALTERNATIVE", + "priority": 20, + "flowAlias": "Handle Existing Account", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "a3eb6b61-1943-4fb7-9b2f-137826882662", + "alias": "Verify Existing Account by Re-authentication", + "description": "Reauthentication of existing account", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "idp-username-password-form", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "CONDITIONAL", + "priority": 20, + "flowAlias": "First broker login - Conditional OTP", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "7f5e2f68-84bc-4703-b474-e3b092621195", + "alias": "browser", + "description": "browser based authentication", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "auth-cookie", + "requirement": "ALTERNATIVE", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "auth-spnego", + "requirement": "DISABLED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "identity-provider-redirector", + "requirement": "ALTERNATIVE", + "priority": 25, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "ALTERNATIVE", + "priority": 30, + "flowAlias": "forms", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "224cc520-37f7-445e-ab1f-7ba547a45a0d", + "alias": "clients", + "description": "Base authentication for clients", + "providerId": "client-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "client-secret", + "requirement": "ALTERNATIVE", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "client-jwt", + "requirement": "ALTERNATIVE", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "client-secret-jwt", + "requirement": "ALTERNATIVE", + "priority": 30, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "client-x509", + "requirement": "ALTERNATIVE", + "priority": 40, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "2e58184b-529b-450c-9731-29763d26b087", + "alias": "direct grant", + "description": "OpenID Connect Resource Owner Grant", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "direct-grant-validate-username", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "direct-grant-validate-password", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "CONDITIONAL", + "priority": 30, + "flowAlias": "Direct Grant - Conditional OTP", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "c969ac8c-e7d8-44b5-ad4d-5fcb80514eac", + "alias": "docker auth", + "description": "Used by Docker clients to authenticate against the IDP", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "docker-http-basic-authenticator", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "de2259a4-7f92-42ec-994c-f55d8cba3b59", + "alias": "first broker login", + "description": "Actions taken after first broker login with identity provider account, which is not yet linked to any Keycloak account", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticatorConfig": "review profile config", + "authenticator": "idp-review-profile", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "REQUIRED", + "priority": 20, + "flowAlias": "User creation or linking", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "6c2745d2-be21-4f3c-a291-5b3fc039432a", + "alias": "forms", + "description": "Username, password, otp and other auth forms.", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "auth-username-password-form", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "CONDITIONAL", + "priority": 20, + "flowAlias": "Browser - Conditional OTP", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "ac8f5082-3fd0-47c5-854d-0dd9c3951668", + "alias": "http challenge", + "description": "An authentication flow based on challenge-response HTTP Authentication Schemes", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "no-cookie-redirect", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "REQUIRED", + "priority": 20, + "flowAlias": "Authentication Options", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "32030b4b-c82b-4c1a-a692-3b51eae74bbc", + "alias": "registration", + "description": "registration flow", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "registration-page-form", + "requirement": "REQUIRED", + "priority": 10, + "flowAlias": "registration form", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "b99fca4c-386c-4277-acc1-83e57e29244d", + "alias": "registration form", + "description": "registration form", + "providerId": "form-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "registration-user-creation", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "registration-profile-action", + "requirement": "REQUIRED", + "priority": 40, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "registration-password-action", + "requirement": "REQUIRED", + "priority": 50, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "registration-recaptcha-action", + "requirement": "DISABLED", + "priority": 60, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "5edbc053-816a-434e-9866-6c0cc7e49f89", + "alias": "reset credentials", + "description": "Reset credentials for a user if they forgot their password or something", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "reset-credentials-choose-user", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "reset-credential-email", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "reset-password", + "requirement": "REQUIRED", + "priority": 30, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "CONDITIONAL", + "priority": 40, + "flowAlias": "Reset - Conditional OTP", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "460782e7-9644-4a34-8024-cb428cbe3991", + "alias": "saml ecp", + "description": "SAML ECP Profile Authentication Flow", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "http-basic-authenticator", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + } + ], + "authenticatorConfig": [ + { + "id": "67af6e65-853c-4bfd-9eef-72e735691377", + "alias": "create unique user config", + "config": { + "require.password.update.after.registration": "false" + } + }, + { + "id": "af6c6e01-772d-426a-bdd3-3ebc95537bcd", + "alias": "review profile config", + "config": { + "update.profile.on.first.login": "missing" + } + } + ], + "requiredActions": [ + { + "alias": "CONFIGURE_TOTP", + "name": "Configure OTP", + "providerId": "CONFIGURE_TOTP", + "enabled": true, + "defaultAction": false, + "priority": 10, + "config": {} + }, + { + "alias": "terms_and_conditions", + "name": "Terms and Conditions", + "providerId": "terms_and_conditions", + "enabled": false, + "defaultAction": false, + "priority": 20, + "config": {} + }, + { + "alias": "UPDATE_PASSWORD", + "name": "Update Password", + "providerId": "UPDATE_PASSWORD", + "enabled": true, + "defaultAction": false, + "priority": 30, + "config": {} + }, + { + "alias": "UPDATE_PROFILE", + "name": "Update Profile", + "providerId": "UPDATE_PROFILE", + "enabled": true, + "defaultAction": false, + "priority": 40, + "config": {} + }, + { + "alias": "VERIFY_EMAIL", + "name": "Verify Email", + "providerId": "VERIFY_EMAIL", + "enabled": true, + "defaultAction": false, + "priority": 50, + "config": {} + }, + { + "alias": "update_user_locale", + "name": "Update User Locale", + "providerId": "update_user_locale", + "enabled": true, + "defaultAction": false, + "priority": 1000, + "config": {} + } + ], + "browserFlow": "browser", + "registrationFlow": "registration", + "directGrantFlow": "direct grant", + "resetCredentialsFlow": "reset credentials", + "clientAuthenticationFlow": "clients", + "dockerAuthenticationFlow": "docker auth", + "attributes": { + "clientOfflineSessionMaxLifespan": "0", + "clientSessionIdleTimeout": "0", + "clientSessionMaxLifespan": "0", + "clientOfflineSessionIdleTimeout": "0" + }, + "keycloakVersion": "11.0.1", + "userManagedAccessAllowed": false +} \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jaeger/templates/imxc-ui-config-jaeger.yaml b/ansible/01_old/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jaeger/templates/imxc-ui-config-jaeger.yaml new file mode 100644 index 0000000..9fa97ed --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jaeger/templates/imxc-ui-config-jaeger.yaml @@ -0,0 +1,75 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: imxc-ui-config-jaeger + namespace: imxc +data: + properties.file: | + api.url = {{ .Values.global.SERVELET_URL_PROTOCOL }}://{{ .Values.global.ZUUL_SERVER_IP }}:{{ .Values.global.ZUUL_SERVER_PORT }} + config.js: | + window.appEnv = { + // Env Settings servletURL + offlineAccess: "{{ .Values.global.OFFLINEACCESS }}", + backLogin: "{{ .Values.global.BACKLOGIN }}", + servletURL: "{{ .Values.global.SERVELET_URL_PROTOCOL }}://{{ .Values.global.ZUUL_SERVER_IP }}:{{ .Values.global.ZUUL_SERVER_PORT }}", + demoServletURL: "{{ .Values.global.DEMO_SERVELET_URL_PROTOCOL }}://{{ .Values.global.ZUUL_SERVER_IP }}:{{ .Values.global.ZUUL_SERVER_PORT }}", + // Env Settings socketURL + socketURL: "http://{{ .Values.global.NOTI_SERVER_IP }}:{{ .Values.global.NOTI_SERVER_PORT }}/ui-server-websocket", + manualURL: "http://{{ .Values.global.CMOA_MANUAL_SERVER_IP }}:{{ .Values.global.CMOA_MANUAL_PORT }}", + // Env Settings interMaxURL + interMaxURL: "http://{{ .Values.global.INTERMAX_IP }}:8080/intermax/?", + // Env Settings CloudMOA Version + version: '{{ .Values.global.CLOUDMOA_UI_VERSION }}', + UI_build_ver: '{{ .Values.global.UI_SERVER_VERSION }}', + maxSelectionSize: 30, + loginType: 'keycloak', + keyCloak: { + "realm": "{{ .Values.global.KEYCLOAK_REALM }}", + "auth-server-url": "{{ .Values.global.KEYCLOAK_AUTH_SERVER_URL }}", + "ssl-required": "none", + "resource": "{{ .Values.global.KEYCLOAK_RESOURCE }}", + "public-client": true, + "confidential-port": 0 + }, + healthIndicatorStateInfo: [ + { + state: "critical", + // max: 1.0, + // over: 0.8, + max: 100, + over: 80, + text: "Critical", + color: "#ff4040", + level: 4, + }, { + state: "warning", + // max: 0.8, + // over: 0.5, + max: 80, + over: 50, + text: "Warning", + color: "#ffa733", + level: 3, + }, { + state: "attention", + // max: 0.5, + // over: 0.0, + max: 50, + over: 0, + text: "Attention", + // color: "#B4B83D", + color: "#1cbe85", + level: 2, + }, { + state: "normal", + max: 0, + over: 0, + text: "Normal", + // color: "#64B87D", + color: "#24b0ed", + level: 1, + }, + ] + }; + + diff --git a/ansible/01_old/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jaeger/templates/imxc-ui-server-jaeger.yaml b/ansible/01_old/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jaeger/templates/imxc-ui-server-jaeger.yaml new file mode 100644 index 0000000..a0d959f --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jaeger/templates/imxc-ui-server-jaeger.yaml @@ -0,0 +1,63 @@ +--- +kind: Service +apiVersion: v1 +metadata: + name: imxc-ui-service-jaeger + namespace: imxc +spec: + type: NodePort + selector: + app: imxc-ui-jaeger + ports: + - protocol: TCP + name: ui + port: 80 + targetPort: 9999 + nodePort: 31084 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: imxc-ui-jaeger + namespace: imxc + labels: + app: imxc-ui +spec: + revisionHistoryLimit: 0 + replicas: 1 + selector: + matchLabels: + app: imxc-ui-jaeger + template: + metadata: + labels: + app: imxc-ui-jaeger + spec: + containers: + - name: imxc-ui-jaeger + image: {{ .Values.global.IMXC_IN_REGISTRY }}/ui-server:{{ .Values.global.UI_SERVER_VERSION }} + resources: + requests: + cpu: 100m + memory: 50Mi + limits: + cpu: 200m + memory: 100Mi + imagePullPolicy: IfNotPresent + ports: + - containerPort: 80 + volumeMounts: + - name: config-profile + mountPath: /usr/src/app/web/env + - name: config-server + mountPath: /usr/src/app/config + volumes: + - name: config-profile + configMap: + name: imxc-ui-config-jaeger + items: + - key: "config.js" + path: "config.js" + - name: config-server + configMap: + name: imxc-ui-config-jaeger diff --git a/ansible/01_old/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jaeger/values.yaml b/ansible/01_old/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jaeger/values.yaml new file mode 100644 index 0000000..54b3bcb --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jaeger/values.yaml @@ -0,0 +1,94 @@ +# Default values for imxc. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: 10.10.31.243:5000/cmoa3/nginx + tag: stable + pullPolicy: IfNotPresent + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 80 + +ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: [] + + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +nodeSelector: {} + +tolerations: [] + +affinity: {} + +global: + INTERMAX_IP: + SERVELET_URL_PROTOCOL : http + DEMO_SERVELET_URL_PROTOCOL : http + KEYCLOAK_AUTH_SERVER_URL: http://10.10.43.227:31082/auth + KEYCLOAK_RESOURCE: authorization_server + KEYCLOAK_REALM: exem + + IMXC_IN_REGISTRY: 10.10.31.243:5000/cmoa3 + + ZUUL_SERVER_IP: 10.10.43.227 + ZUUL_SERVER_PORT: 31081 + + NOTI_SERVER_IP: 10.10.43.227 + NOTI_SERVER_PORT: 31083 + + CMOA_MANUAL_SERVER_IP: 10.10.43.227 + CMOA_MANUAL_PORT: 31090 + + OFFLINEACCESS: false + BACKLOGIN: false + + CLOUDMOA_VERSION: rel3.4.8 + UI_SERVER_VERSION: rel3.4.8 + CMOA_MANUAL_VERSION: rel3.4.8 diff --git a/ansible/01_old/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jspd/Chart.yaml b/ansible/01_old/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jspd/Chart.yaml new file mode 100644 index 0000000..e2f559f --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jspd/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for Kubernetes +name: imxc +version: 0.1.0 diff --git a/ansible/01_old/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jspd/scripts/init-api-server.sh b/ansible/01_old/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jspd/scripts/init-api-server.sh new file mode 100644 index 0000000..45b8f1e --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jspd/scripts/init-api-server.sh @@ -0,0 +1,16 @@ +#! /bin/sh + +STATUS_CODE="$(curl -s -o /dev/null -w '%{http_code}' http://imxc-keycloak-http/auth/realms/exem)" + +if [ $STATUS_CODE -eq 200 ]; then + JWT_KEY="$(curl -s -XGET http://imxc-keycloak-http/auth/realms/exem | jq -r '.public_key')" + export JWT_KEY + + chmod -R 777 /home/cloudmoa/notification/cloudmoa_alert.log + + java -Djava.security.egd=file:/dev/./urandom -jar /app.jar +elif [ $STATUS_CODE -eq 404 ]; then + echo "not found exem relam. check realm in imxc-keycloak" +else + echo "not found keycloak. check to install keycloak" +fi \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jspd/scripts/init-auth-server.sh b/ansible/01_old/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jspd/scripts/init-auth-server.sh new file mode 100644 index 0000000..279b8a5 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jspd/scripts/init-auth-server.sh @@ -0,0 +1,36 @@ +#! /bin/bash + +# 200 -> 서버 및 realm이 있는 경우 +# 404 -> 서버는 있으나 realm이 없는 경우 +# 000 -> 서버가 없음 +STATUS_CODE="$(curl -s -o /dev/null -w '%{http_code}' http://imxc-keycloak-http/auth/realms/exem)" + +if [ $STATUS_CODE -eq 404 ]; then + TOKEN="$(curl -s -d "client_id=admin-cli" -d "username=admin" -d "password=admin" -d "grant_type=password" http://imxc-keycloak-http/auth/realms/master/protocol/openid-connect/token | jq -r '.access_token')" + + echo $TOKEN + + echo "create realm and client" + # create realm and client + curl -s -v POST -H "Authorization: Bearer $TOKEN" -H "Content-Type: application/json" -d "@/tmp/init.json" http://imxc-keycloak-http/auth/admin/realms + + + echo "create admin and owner" + # create admin and owner + curl -s -v POST -H "Authorization: Bearer $TOKEN" -H "Content-Type: application/json" -d '{"firstName":"","lastName":"", "username":"admin","email":"admin@example.com", "enabled":"true","credentials":[{"type":"password","value":"admin","temporary":false}]}' http://imxc-keycloak-http/auth/admin/realms/exem/users + curl -s -v POST -H "Authorization: Bearer $TOKEN" -H "Content-Type: application/json" -d '{"firstName":"","lastName":"", "username":"owner","email":"owner@example.com", "enabled":"true","credentials":[{"type":"password","value":"admin","temporary":false}]}' http://imxc-keycloak-http/auth/admin/realms/exem/users + + JWT_KEY="$(curl -s -XGET http://imxc-keycloak-http/auth/realms/exem | jq -r '.public_key')" + export JWT_KEY + + java -Djava.security.egd=file:/dev/./urandom -jar /app.jar +elif [ $STATUS_CODE -eq 200 ]; then + echo "exist exem relam" + + JWT_KEY="$(curl -s -XGET http://imxc-keycloak-http/auth/realms/exem | jq -r '.public_key')" + export JWT_KEY + + java -Djava.security.egd=file:/dev/./urandom -jar /app.jar +else + echo "not found keycloak. check to install keycloak" +fi diff --git a/ansible/01_old/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jspd/scripts/init-noti-server.sh b/ansible/01_old/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jspd/scripts/init-noti-server.sh new file mode 100644 index 0000000..af73aed --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jspd/scripts/init-noti-server.sh @@ -0,0 +1,14 @@ +#! /bin/sh + +STATUS_CODE="$(curl -s -o /dev/null -w '%{http_code}' http://imxc-keycloak-http/auth/realms/exem)" + +if [ $STATUS_CODE -eq 200 ]; then + JWT_KEY="$(curl -s -XGET http://imxc-keycloak-http/auth/realms/exem | jq -r '.public_key')" + export JWT_KEY + + java -Djava.security.egd=file:/dev/./urandom -jar /app.jar +elif [ $STATUS_CODE -eq 404 ]; then + echo "not found exem relam. check realm in imxc-keycloak" +else + echo "not found keycloak. check to install keycloak" +fi \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jspd/scripts/init-resource.sh b/ansible/01_old/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jspd/scripts/init-resource.sh new file mode 100644 index 0000000..58db392 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jspd/scripts/init-resource.sh @@ -0,0 +1,6 @@ +#!/bin/sh + +chmod -R 777 /scripts + +sed -i "s/localhost/$REDIRECT_URLS/g" /scripts/init.json +cp /scripts/init.json /tmp/init.json \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jspd/scripts/init.json b/ansible/01_old/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jspd/scripts/init.json new file mode 100644 index 0000000..dcd68b4 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jspd/scripts/init.json @@ -0,0 +1,2148 @@ +{ + "id": "exem", + "realm": "exem", + "notBefore": 0, + "revokeRefreshToken": false, + "refreshTokenMaxReuse": 0, + "accessTokenLifespan": 300, + "accessTokenLifespanForImplicitFlow": 900, + "ssoSessionIdleTimeout": 1800, + "ssoSessionMaxLifespan": 36000, + "ssoSessionIdleTimeoutRememberMe": 0, + "ssoSessionMaxLifespanRememberMe": 0, + "offlineSessionIdleTimeout": 2592000, + "offlineSessionMaxLifespanEnabled": false, + "offlineSessionMaxLifespan": 5184000, + "clientSessionIdleTimeout": 0, + "clientSessionMaxLifespan": 0, + "clientOfflineSessionIdleTimeout": 0, + "clientOfflineSessionMaxLifespan": 0, + "accessCodeLifespan": 60, + "accessCodeLifespanUserAction": 300, + "accessCodeLifespanLogin": 1800, + "actionTokenGeneratedByAdminLifespan": 43200, + "actionTokenGeneratedByUserLifespan": 300, + "enabled": true, + "sslRequired": "none", + "registrationAllowed": false, + "registrationEmailAsUsername": false, + "rememberMe": false, + "verifyEmail": false, + "loginWithEmailAllowed": true, + "duplicateEmailsAllowed": false, + "resetPasswordAllowed": false, + "editUsernameAllowed": false, + "bruteForceProtected": false, + "permanentLockout": false, + "maxFailureWaitSeconds": 900, + "minimumQuickLoginWaitSeconds": 60, + "waitIncrementSeconds": 60, + "quickLoginCheckMilliSeconds": 1000, + "maxDeltaTimeSeconds": 43200, + "failureFactor": 30, + "roles": { + "realm": [ + { + "id": "b361dcb8-4ec4-484e-a432-8d40a8ca5ac8", + "name": "offline_access", + "description": "${role_offline-access}", + "composite": false, + "clientRole": false, + "containerId": "exem", + "attributes": {} + }, + { + "id": "621155f2-6c01-4e4a-bf11-47111503d696", + "name": "uma_authorization", + "description": "${role_uma_authorization}", + "composite": false, + "clientRole": false, + "containerId": "exem", + "attributes": {} + }, + { + "id": "4aadd73a-e863-466a-932b-5bc81553fbf1", + "name": "access", + "composite": false, + "clientRole": false, + "containerId": "exem", + "attributes": {} + } + ], + "client": { + "realm-management": [ + { + "id": "e3eca547-c372-406a-abe7-30f554e13e63", + "name": "manage-realm", + "description": "${role_manage-realm}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "eb1faff2-4cca-458c-b9da-96c1f6f5f647", + "name": "impersonation", + "description": "${role_impersonation}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "eb0f6ebb-8993-47f8-8979-2152ed92bf62", + "name": "create-client", + "description": "${role_create-client}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "29f0b39d-9cc9-4b40-ad81-00041897ae0c", + "name": "view-clients", + "description": "${role_view-clients}", + "composite": true, + "composites": { + "client": { + "realm-management": [ + "query-clients" + ] + } + }, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "b6307563-9b35-4093-b0c4-a27df7cb82bd", + "name": "query-groups", + "description": "${role_query-groups}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "30091a91-f676-4e39-8ae2-ebfcee36c32a", + "name": "query-clients", + "description": "${role_query-clients}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "b40ca071-2318-4f69-9664-f0dfe471d03b", + "name": "view-realm", + "description": "${role_view-realm}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "efd25ec7-e61f-4659-a772-907791aed58e", + "name": "view-authorization", + "description": "${role_view-authorization}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "4ad18bd0-f9a9-4fc7-8864-99afa71f95e4", + "name": "manage-users", + "description": "${role_manage-users}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "a92c781f-7c6a-48d8-aa88-0b3aefb3c10c", + "name": "manage-events", + "description": "${role_manage-events}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "424933c1-3c03-49cd-955c-34aeeb0a3108", + "name": "manage-authorization", + "description": "${role_manage-authorization}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "5476db80-dbfa-408b-a934-5e8decc0af56", + "name": "manage-clients", + "description": "${role_manage-clients}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "acf53868-d09b-4865-92da-3b906307b979", + "name": "realm-admin", + "description": "${role_realm-admin}", + "composite": true, + "composites": { + "client": { + "realm-management": [ + "manage-realm", + "impersonation", + "create-client", + "view-clients", + "query-groups", + "query-clients", + "view-realm", + "view-authorization", + "manage-users", + "manage-events", + "manage-authorization", + "manage-clients", + "query-users", + "query-realms", + "manage-identity-providers", + "view-users", + "view-events", + "view-identity-providers" + ] + } + }, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "f2ad5f83-ffde-4cf4-acc4-21f7bcec4c38", + "name": "query-users", + "description": "${role_query-users}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "96a017bf-5211-4c20-a1b2-7493bc45a3ad", + "name": "query-realms", + "description": "${role_query-realms}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "d8051d4d-f26c-4a6d-bcdd-b3d8111d9d29", + "name": "manage-identity-providers", + "description": "${role_manage-identity-providers}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "8c929b20-abc3-4b78-88f2-ed3348426667", + "name": "view-users", + "description": "${role_view-users}", + "composite": true, + "composites": { + "client": { + "realm-management": [ + "query-groups", + "query-users" + ] + } + }, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "a337a8f7-8725-4ff7-85fc-ecc4b5ce1433", + "name": "view-events", + "description": "${role_view-events}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "649350cf-925c-4502-84b4-ec8415f956d3", + "name": "view-identity-providers", + "description": "${role_view-identity-providers}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + } + ], + "authorization_server": [ + { + "id": "2346ca49-eb3e-4f2e-b0ec-4def9ea9655c", + "name": "access", + "composite": false, + "clientRole": true, + "containerId": "b9bbda1f-a756-4b72-9cd8-06a6dfd6d5bf", + "attributes": {} + } + ], + "security-admin-console": [], + "admin-cli": [], + "account-console": [], + "broker": [ + { + "id": "133ff901-3a8f-48df-893b-4c7e9047e829", + "name": "read-token", + "description": "${role_read-token}", + "composite": false, + "clientRole": true, + "containerId": "fdc71d6d-db86-414f-bd80-ed1f5e9a6975", + "attributes": {} + } + ], + "account": [ + { + "id": "89c5f56f-5845-400b-ac9f-942c46d082e0", + "name": "manage-account-links", + "description": "${role_manage-account-links}", + "composite": false, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + }, + { + "id": "2cba7fed-0a80-4dbd-bd2d-abfa2c6a985e", + "name": "view-profile", + "description": "${role_view-profile}", + "composite": false, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + }, + { + "id": "f446a93d-143f-4071-9bdc-08aa2fdce6d2", + "name": "view-consent", + "description": "${role_view-consent}", + "composite": false, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + }, + { + "id": "ef3364db-e008-4aec-9e74-04bac25cbe40", + "name": "manage-consent", + "description": "${role_manage-consent}", + "composite": true, + "composites": { + "client": { + "account": [ + "view-consent" + ] + } + }, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + }, + { + "id": "96afbe32-3ac2-4345-bc17-06cf0e8de0b4", + "name": "view-applications", + "description": "${role_view-applications}", + "composite": false, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + }, + { + "id": "cf6861ca-4804-40d4-9016-c48e7ebf1c72", + "name": "manage-account", + "description": "${role_manage-account}", + "composite": true, + "composites": { + "client": { + "account": [ + "manage-account-links" + ] + } + }, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + } + ] + } + }, + "groups": [ + { + "id": "8d3f7332-7f72-47e2-9cb3-38331f0c29b5", + "name": "DEFAULT_TENANT", + "path": "/DEFAULT_TENANT", + "attributes": {}, + "realmRoles": [], + "clientRoles": {}, + "subGroups": [] + } + ], + "defaultRoles": [ + "offline_access", + "uma_authorization" + ], + "requiredCredentials": [ + "password" + ], + "otpPolicyType": "totp", + "otpPolicyAlgorithm": "HmacSHA1", + "otpPolicyInitialCounter": 0, + "otpPolicyDigits": 6, + "otpPolicyLookAheadWindow": 1, + "otpPolicyPeriod": 30, + "otpSupportedApplications": [ + "FreeOTP", + "Google Authenticator" + ], + "webAuthnPolicyRpEntityName": "keycloak", + "webAuthnPolicySignatureAlgorithms": [ + "ES256" + ], + "webAuthnPolicyRpId": "", + "webAuthnPolicyAttestationConveyancePreference": "not specified", + "webAuthnPolicyAuthenticatorAttachment": "not specified", + "webAuthnPolicyRequireResidentKey": "not specified", + "webAuthnPolicyUserVerificationRequirement": "not specified", + "webAuthnPolicyCreateTimeout": 0, + "webAuthnPolicyAvoidSameAuthenticatorRegister": false, + "webAuthnPolicyAcceptableAaguids": [], + "webAuthnPolicyPasswordlessRpEntityName": "keycloak", + "webAuthnPolicyPasswordlessSignatureAlgorithms": [ + "ES256" + ], + "webAuthnPolicyPasswordlessRpId": "", + "webAuthnPolicyPasswordlessAttestationConveyancePreference": "not specified", + "webAuthnPolicyPasswordlessAuthenticatorAttachment": "not specified", + "webAuthnPolicyPasswordlessRequireResidentKey": "not specified", + "webAuthnPolicyPasswordlessUserVerificationRequirement": "not specified", + "webAuthnPolicyPasswordlessCreateTimeout": 0, + "webAuthnPolicyPasswordlessAvoidSameAuthenticatorRegister": false, + "webAuthnPolicyPasswordlessAcceptableAaguids": [], + "scopeMappings": [ + { + "clientScope": "offline_access", + "roles": [ + "offline_access" + ] + } + ], + "clientScopeMappings": { + "account": [ + { + "client": "account-console", + "roles": [ + "manage-account" + ] + } + ] + }, + "clients": [ + { + "id": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "clientId": "account", + "name": "${client_account}", + "rootUrl": "${authBaseUrl}", + "baseUrl": "/realms/exem/account/", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "defaultRoles": [ + "view-profile", + "manage-account" + ], + "redirectUris": [ + "/realms/exem/account/*" + ], + "webOrigins": [], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": false, + "serviceAccountsEnabled": false, + "publicClient": false, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": {}, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "1e3d0c5d-c456-4c5f-93cf-58236273186a", + "clientId": "account-console", + "name": "${client_account-console}", + "rootUrl": "${authBaseUrl}", + "baseUrl": "/realms/exem/account/", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [ + "/realms/exem/account/*" + ], + "webOrigins": [], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": false, + "serviceAccountsEnabled": false, + "publicClient": true, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": { + "pkce.code.challenge.method": "S256" + }, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "protocolMappers": [ + { + "id": "cceae7c8-fa8d-48eb-a0a6-6013a2cc771e", + "name": "audience resolve", + "protocol": "openid-connect", + "protocolMapper": "oidc-audience-resolve-mapper", + "consentRequired": false, + "config": {} + } + ], + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "d4d3e5a5-584c-4aff-a79f-ac3c31ace5a1", + "clientId": "admin-cli", + "name": "${client_admin-cli}", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [], + "webOrigins": [], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": false, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": true, + "serviceAccountsEnabled": false, + "publicClient": true, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": {}, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "b9bbda1f-a756-4b72-9cd8-06a6dfd6d5bf", + "clientId": "authorization_server", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [ + "localhost" + ], + "webOrigins": [ + "*" + ], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": true, + "serviceAccountsEnabled": false, + "publicClient": true, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": { + "saml.assertion.signature": "false", + "saml.force.post.binding": "false", + "saml.multivalued.roles": "false", + "saml.encrypt": "false", + "saml.server.signature": "false", + "saml.server.signature.keyinfo.ext": "false", + "exclude.session.state.from.auth.response": "false", + "saml_force_name_id_format": "false", + "saml.client.signature": "false", + "tls.client.certificate.bound.access.tokens": "false", + "saml.authnstatement": "false", + "display.on.consent.screen": "false", + "saml.onetimeuse.condition": "false" + }, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": true, + "nodeReRegistrationTimeout": -1, + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "fdc71d6d-db86-414f-bd80-ed1f5e9a6975", + "clientId": "broker", + "name": "${client_broker}", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [], + "webOrigins": [], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": false, + "serviceAccountsEnabled": false, + "publicClient": false, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": {}, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "clientId": "realm-management", + "name": "${client_realm-management}", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [], + "webOrigins": [], + "notBefore": 0, + "bearerOnly": true, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": false, + "serviceAccountsEnabled": false, + "publicClient": false, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": {}, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "bb6c56f1-126e-4356-9579-d95992a8d150", + "clientId": "security-admin-console", + "name": "${client_security-admin-console}", + "rootUrl": "${authAdminUrl}", + "baseUrl": "/admin/exem/console/", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [ + "/admin/exem/console/*" + ], + "webOrigins": [ + "+" + ], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": false, + "serviceAccountsEnabled": false, + "publicClient": true, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": { + "pkce.code.challenge.method": "S256" + }, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "protocolMappers": [ + { + "id": "3cf06cab-00dd-486b-8e72-1a453a7031ca", + "name": "locale", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "locale", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "locale", + "jsonType.label": "String" + } + } + ], + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + } + ], + "clientScopes": [ + { + "id": "6a21eaaa-69c9-4519-8732-2155865a1891", + "name": "custom_jwt", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "true" + }, + "protocolMappers": [ + { + "id": "fd7557f5-3174-4c65-8cd1-0e9f015a906f", + "name": "customizingJWT", + "protocol": "openid-connect", + "protocolMapper": "oidc-script-based-protocol-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "multivalued": "true", + "id.token.claim": "false", + "access.token.claim": "true", + "jsonType.label": "String", + "script": "/**\r\n * Available variables: \r\n * user - the current user\r\n * realm - the current realm\r\n * token - the current token\r\n * userSession - the current userSession\r\n * keycloakSession - the current keycloakSession\r\n */\r\n\r\n//insert your code here...\r\n\r\n// you can set standard fields in token - test code\r\n// token.setAcr(\"test value\");\r\n\r\n// you can set claims in the token - test code\r\n// token.getOtherClaims().put(\"claimName\", \"claim value\");\r\n\r\n// work with variables and return multivalued token value\r\nvar ArrayList = Java.type(\"java.util.ArrayList\");\r\nvar HashMap = Java.type(\"java.util.HashMap\");\r\nvar tenantInfoMap = new HashMap();\r\nvar tenantIpMap = new HashMap();\r\n\r\nvar forEach = Array.prototype.forEach;\r\n\r\nvar client = keycloakSession.getContext().getClient();\r\nvar groups = user.getGroups();\r\nvar clientRole = client.getRole(\"access\");\r\n\r\nforEach.call(groups.toArray(), function(group) {\r\n if(group.hasRole(clientRole)) {\r\n tenantIpMap.put(group.getName(), clientRole.getAttribute(\"ip\"));\r\n tenantInfoMap.put(group.getName(), group.getAttributes());\r\n }\r\n});\r\n\r\ntoken.setOtherClaims(\"tenantInfo\", tenantInfoMap);\r\n" + } + }, + { + "id": "2cb34189-9f06-4b9f-b066-c28e7930f0a5", + "name": "custom_phone", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "false", + "user.attribute": "phone", + "id.token.claim": "false", + "access.token.claim": "true", + "claim.name": "attributes.phone", + "jsonType.label": "String" + } + }, + { + "id": "6bcb0aa9-8713-4e4b-b997-2e08d2dda0f4", + "name": "group_attr", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "groups", + "id.token.claim": "false", + "access.token.claim": "true", + "claim.name": "groups.attributes", + "jsonType.label": "String" + } + }, + { + "id": "03deb40b-4f83-436e-9eab-f479eed62460", + "name": "custom_name", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "false", + "user.attribute": "name", + "id.token.claim": "false", + "access.token.claim": "true", + "claim.name": "attributes.name", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "9fed7d81-3f42-41b0-b661-7875abb90b2b", + "name": "microprofile-jwt", + "description": "Microprofile - JWT built-in scope", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "false" + }, + "protocolMappers": [ + { + "id": "d030d675-2c31-401a-a461-534211b3d2ec", + "name": "upn", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "username", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "upn", + "jsonType.label": "String" + } + }, + { + "id": "ca2026a0-84de-4b8d-bf0c-35f3d088b115", + "name": "groups", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-realm-role-mapper", + "consentRequired": false, + "config": { + "multivalued": "true", + "user.attribute": "foo", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "groups", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "cf3e7fce-e9e8-40dc-bd0d-5cf7bac861c0", + "name": "web-origins", + "description": "OpenID Connect scope for add allowed web origins to the access token", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "false", + "display.on.consent.screen": "false", + "consent.screen.text": "" + }, + "protocolMappers": [ + { + "id": "6b909bad-30d8-4095-a80b-d71589e8a0b4", + "name": "allowed web origins", + "protocol": "openid-connect", + "protocolMapper": "oidc-allowed-origins-mapper", + "consentRequired": false, + "config": {} + } + ] + }, + { + "id": "73231863-d614-4725-9707-f5704c70893a", + "name": "roles", + "description": "OpenID Connect scope for add user roles to the access token", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "false", + "display.on.consent.screen": "true", + "consent.screen.text": "${rolesScopeConsentText}" + }, + "protocolMappers": [ + { + "id": "fad2c0b3-d6d6-46c9-b8a5-70cf2f3cd69e", + "name": "realm roles", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-realm-role-mapper", + "consentRequired": false, + "config": { + "multivalued": "true", + "user.attribute": "foo", + "access.token.claim": "true", + "claim.name": "realm_access.roles", + "jsonType.label": "String" + } + }, + { + "id": "1fa51f0e-8fa8-4807-a381-c9756ce1d2ff", + "name": "audience resolve", + "protocol": "openid-connect", + "protocolMapper": "oidc-audience-resolve-mapper", + "consentRequired": false, + "config": {} + }, + { + "id": "8be191ba-c7b8-45f1-a37f-2830595d4b54", + "name": "client roles", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-client-role-mapper", + "consentRequired": false, + "config": { + "multivalued": "true", + "user.attribute": "foo", + "access.token.claim": "true", + "claim.name": "resource_access.${client_id}.roles", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "93a4b53a-a281-4203-a070-0ad31e719b29", + "name": "phone", + "description": "OpenID Connect built-in scope: phone", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "true", + "consent.screen.text": "${phoneScopeConsentText}" + }, + "protocolMappers": [ + { + "id": "c716d4df-ad16-4a47-aa05-ded2a69313a3", + "name": "phone number verified", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "phoneNumberVerified", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "phone_number_verified", + "jsonType.label": "boolean" + } + }, + { + "id": "db0fcb5b-bad6-42b7-8ab0-b90225100b8a", + "name": "phone number", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "phoneNumber", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "phone_number", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "f1723d4c-6d93-40be-b5b8-5ca7083e55c7", + "name": "address", + "description": "OpenID Connect built-in scope: address", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "true", + "consent.screen.text": "${addressScopeConsentText}" + }, + "protocolMappers": [ + { + "id": "9e95dff0-dc01-4efe-a414-21c83d94491c", + "name": "address", + "protocol": "openid-connect", + "protocolMapper": "oidc-address-mapper", + "consentRequired": false, + "config": { + "user.attribute.formatted": "formatted", + "user.attribute.country": "country", + "user.attribute.postal_code": "postal_code", + "userinfo.token.claim": "true", + "user.attribute.street": "street", + "id.token.claim": "true", + "user.attribute.region": "region", + "access.token.claim": "true", + "user.attribute.locality": "locality" + } + } + ] + }, + { + "id": "16524b43-6bfc-4e05-868c-682e7e1e611c", + "name": "email", + "description": "OpenID Connect built-in scope: email", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "true", + "consent.screen.text": "${emailScopeConsentText}" + }, + "protocolMappers": [ + { + "id": "4444c30e-5da5-46e6-a201-64c28ab26e10", + "name": "email verified", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "emailVerified", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "email_verified", + "jsonType.label": "boolean" + } + }, + { + "id": "0faa8ba7-6d4d-4ed4-ab89-334e1d18b503", + "name": "email", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "email", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "email", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "4ccced80-99d8-4081-8d1d-37ed6d5aaf34", + "name": "profile", + "description": "OpenID Connect built-in scope: profile", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "true", + "consent.screen.text": "${profileScopeConsentText}" + }, + "protocolMappers": [ + { + "id": "02aea132-f5e1-483c-968a-5fbb9cdfb82d", + "name": "updated at", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "updatedAt", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "updated_at", + "jsonType.label": "String" + } + }, + { + "id": "eb5d10fc-d4a8-473a-ac3e-35f3fb0f41bb", + "name": "family name", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "lastName", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "family_name", + "jsonType.label": "String" + } + }, + { + "id": "2467b8e5-f340-45a2-abff-c658eccf3ed3", + "name": "zoneinfo", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "zoneinfo", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "zoneinfo", + "jsonType.label": "String" + } + }, + { + "id": "50a9bb17-af12-481d-95dd-6aed1dd4bf56", + "name": "full name", + "protocol": "openid-connect", + "protocolMapper": "oidc-full-name-mapper", + "consentRequired": false, + "config": { + "id.token.claim": "true", + "access.token.claim": "true", + "userinfo.token.claim": "true" + } + }, + { + "id": "80a65208-9425-4e66-b769-98c2f1c91e6e", + "name": "nickname", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "nickname", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "nickname", + "jsonType.label": "String" + } + }, + { + "id": "68a750c6-b4b8-47f4-a919-752319e63213", + "name": "gender", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "gender", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "gender", + "jsonType.label": "String" + } + }, + { + "id": "e27abd0e-72c1-40de-a678-e9e4e2db8e7f", + "name": "given name", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "firstName", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "given_name", + "jsonType.label": "String" + } + }, + { + "id": "04f3fa01-6a4c-44eb-bfd8-0a0e1c31bc4a", + "name": "middle name", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "middleName", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "middle_name", + "jsonType.label": "String" + } + }, + { + "id": "94e697d9-fbee-48d8-91d1-7bbc4f1fb44e", + "name": "username", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "username", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "preferred_username", + "jsonType.label": "String" + } + }, + { + "id": "a2f05d76-947d-4ceb-969b-1b923be9a923", + "name": "website", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "website", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "website", + "jsonType.label": "String" + } + }, + { + "id": "1966f863-ac5c-4cbc-a156-d5bd861728f0", + "name": "profile", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "profile", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "profile", + "jsonType.label": "String" + } + }, + { + "id": "18a9b452-cd8e-4c43-a9a8-0ea532074f74", + "name": "locale", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "locale", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "locale", + "jsonType.label": "String" + } + }, + { + "id": "1583790a-ec7a-4899-a901-60e23fd0d969", + "name": "birthdate", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "birthdate", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "birthdate", + "jsonType.label": "String" + } + }, + { + "id": "7094b64a-492b-4f31-aa73-bb19d06ddb56", + "name": "picture", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "picture", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "picture", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "eff18c11-eaf4-4d6a-8365-90f646ea3cc5", + "name": "role_list", + "description": "SAML role list", + "protocol": "saml", + "attributes": { + "consent.screen.text": "${samlRoleListScopeConsentText}", + "display.on.consent.screen": "true" + }, + "protocolMappers": [ + { + "id": "3bb12700-3e6f-4a73-bfbb-cfd16a8ab007", + "name": "role list", + "protocol": "saml", + "protocolMapper": "saml-role-list-mapper", + "consentRequired": false, + "config": { + "single": "false", + "attribute.nameformat": "Basic", + "attribute.name": "Role" + } + } + ] + }, + { + "id": "e83e35b7-9650-4f7e-b182-65c184d261b3", + "name": "offline_access", + "description": "OpenID Connect built-in scope: offline_access", + "protocol": "openid-connect", + "attributes": { + "consent.screen.text": "${offlineAccessScopeConsentText}", + "display.on.consent.screen": "true" + } + } + ], + "defaultDefaultClientScopes": [ + "role_list", + "profile", + "email", + "roles", + "web-origins", + "custom_jwt" + ], + "defaultOptionalClientScopes": [ + "offline_access", + "address", + "phone", + "microprofile-jwt" + ], + "browserSecurityHeaders": { + "contentSecurityPolicyReportOnly": "", + "xContentTypeOptions": "nosniff", + "xRobotsTag": "none", + "xFrameOptions": "SAMEORIGIN", + "contentSecurityPolicy": "frame-src 'self'; frame-ancestors 'self'; object-src 'none';", + "xXSSProtection": "1; mode=block", + "strictTransportSecurity": "max-age=31536000; includeSubDomains" + }, + "smtpServer": {}, + "eventsEnabled": false, + "eventsListeners": [ + "jboss-logging" + ], + "enabledEventTypes": [], + "adminEventsEnabled": false, + "adminEventsDetailsEnabled": false, + "components": { + "org.keycloak.services.clientregistration.policy.ClientRegistrationPolicy": [ + { + "id": "9b1dcf02-e9ec-4302-8aad-28f3250d1b2d", + "name": "Allowed Protocol Mapper Types", + "providerId": "allowed-protocol-mappers", + "subType": "anonymous", + "subComponents": {}, + "config": { + "allowed-protocol-mapper-types": [ + "oidc-sha256-pairwise-sub-mapper", + "oidc-usermodel-property-mapper", + "saml-role-list-mapper", + "saml-user-attribute-mapper", + "oidc-full-name-mapper", + "oidc-usermodel-attribute-mapper", + "oidc-address-mapper", + "saml-user-property-mapper" + ] + } + }, + { + "id": "752137ea-bc3a-46c3-9d83-49cb370d39a9", + "name": "Max Clients Limit", + "providerId": "max-clients", + "subType": "anonymous", + "subComponents": {}, + "config": { + "max-clients": [ + "200" + ] + } + }, + { + "id": "f365d31f-ccc5-4e57-97bd-b2749b1ab5e5", + "name": "Allowed Client Scopes", + "providerId": "allowed-client-templates", + "subType": "authenticated", + "subComponents": {}, + "config": { + "allow-default-scopes": [ + "true" + ] + } + }, + { + "id": "52e385fd-3aa5-442d-b5e4-6ff659126196", + "name": "Allowed Protocol Mapper Types", + "providerId": "allowed-protocol-mappers", + "subType": "authenticated", + "subComponents": {}, + "config": { + "allowed-protocol-mapper-types": [ + "oidc-sha256-pairwise-sub-mapper", + "saml-user-attribute-mapper", + "oidc-full-name-mapper", + "oidc-usermodel-attribute-mapper", + "oidc-address-mapper", + "oidc-usermodel-property-mapper", + "saml-user-property-mapper", + "saml-role-list-mapper" + ] + } + }, + { + "id": "dbebbc9d-1b14-4d09-906c-b4e5638f9588", + "name": "Consent Required", + "providerId": "consent-required", + "subType": "anonymous", + "subComponents": {}, + "config": {} + }, + { + "id": "b3fc18dc-467f-4240-9b6d-f07df5c40aee", + "name": "Full Scope Disabled", + "providerId": "scope", + "subType": "anonymous", + "subComponents": {}, + "config": {} + }, + { + "id": "19e102da-1d66-4747-958b-9311e5156693", + "name": "Trusted Hosts", + "providerId": "trusted-hosts", + "subType": "anonymous", + "subComponents": {}, + "config": { + "host-sending-registration-request-must-match": [ + "true" + ], + "client-uris-must-match": [ + "true" + ] + } + }, + { + "id": "66e83112-7392-46cb-bbd5-b71586183ada", + "name": "Allowed Client Scopes", + "providerId": "allowed-client-templates", + "subType": "anonymous", + "subComponents": {}, + "config": { + "allow-default-scopes": [ + "true" + ] + } + } + ], + "org.keycloak.keys.KeyProvider": [ + { + "id": "a60adc1b-3f6b-40d4-901f-d4f744f0d71b", + "name": "aes-generated", + "providerId": "aes-generated", + "subComponents": {}, + "config": { + "priority": [ + "100" + ] + } + }, + { + "id": "bc1b25d8-b199-4d87-b606-6cde0f6eafb0", + "name": "hmac-generated", + "providerId": "hmac-generated", + "subComponents": {}, + "config": { + "priority": [ + "100" + ], + "algorithm": [ + "HS256" + ] + } + }, + { + "id": "fe624aa7-54a3-43d8-b2a3-f74b543a9225", + "name": "rsa-generated", + "providerId": "rsa-generated", + "subComponents": {}, + "config": { + "priority": [ + "100" + ] + } + } + ] + }, + "internationalizationEnabled": false, + "supportedLocales": [], + "authenticationFlows": [ + { + "id": "a837df3e-15cb-4d2a-8ce0-5eea5c704e76", + "alias": "Account verification options", + "description": "Method with which to verity the existing account", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "idp-email-verification", + "requirement": "ALTERNATIVE", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "ALTERNATIVE", + "priority": 20, + "flowAlias": "Verify Existing Account by Re-authentication", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "59026e13-e2bd-4977-a868-505ea562f545", + "alias": "Authentication Options", + "description": "Authentication options.", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "basic-auth", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "basic-auth-otp", + "requirement": "DISABLED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "auth-spnego", + "requirement": "DISABLED", + "priority": 30, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "484d422c-d9b4-4c0e-86d5-60463ecd24c9", + "alias": "Browser - Conditional OTP", + "description": "Flow to determine if the OTP is required for the authentication", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "conditional-user-configured", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "auth-otp-form", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "0ec05058-6d09-4951-a116-19e8810e5d8e", + "alias": "Direct Grant - Conditional OTP", + "description": "Flow to determine if the OTP is required for the authentication", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "conditional-user-configured", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "direct-grant-validate-otp", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "667c03cd-114c-4d9a-a7fa-7d2c27f10722", + "alias": "First broker login - Conditional OTP", + "description": "Flow to determine if the OTP is required for the authentication", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "conditional-user-configured", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "auth-otp-form", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "1510fbf7-239f-44aa-9955-72d42f6d99fd", + "alias": "Handle Existing Account", + "description": "Handle what to do if there is existing account with same email/username like authenticated identity provider", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "idp-confirm-link", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "REQUIRED", + "priority": 20, + "flowAlias": "Account verification options", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "5622e71d-e1f4-4711-a425-a8470d0a017e", + "alias": "Reset - Conditional OTP", + "description": "Flow to determine if the OTP should be reset or not. Set to REQUIRED to force.", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "conditional-user-configured", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "reset-otp", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "09dfe405-5ef6-4940-8885-5adf867a74c8", + "alias": "User creation or linking", + "description": "Flow for the existing/non-existing user alternatives", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticatorConfig": "create unique user config", + "authenticator": "idp-create-user-if-unique", + "requirement": "ALTERNATIVE", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "ALTERNATIVE", + "priority": 20, + "flowAlias": "Handle Existing Account", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "a3eb6b61-1943-4fb7-9b2f-137826882662", + "alias": "Verify Existing Account by Re-authentication", + "description": "Reauthentication of existing account", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "idp-username-password-form", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "CONDITIONAL", + "priority": 20, + "flowAlias": "First broker login - Conditional OTP", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "7f5e2f68-84bc-4703-b474-e3b092621195", + "alias": "browser", + "description": "browser based authentication", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "auth-cookie", + "requirement": "ALTERNATIVE", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "auth-spnego", + "requirement": "DISABLED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "identity-provider-redirector", + "requirement": "ALTERNATIVE", + "priority": 25, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "ALTERNATIVE", + "priority": 30, + "flowAlias": "forms", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "224cc520-37f7-445e-ab1f-7ba547a45a0d", + "alias": "clients", + "description": "Base authentication for clients", + "providerId": "client-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "client-secret", + "requirement": "ALTERNATIVE", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "client-jwt", + "requirement": "ALTERNATIVE", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "client-secret-jwt", + "requirement": "ALTERNATIVE", + "priority": 30, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "client-x509", + "requirement": "ALTERNATIVE", + "priority": 40, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "2e58184b-529b-450c-9731-29763d26b087", + "alias": "direct grant", + "description": "OpenID Connect Resource Owner Grant", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "direct-grant-validate-username", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "direct-grant-validate-password", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "CONDITIONAL", + "priority": 30, + "flowAlias": "Direct Grant - Conditional OTP", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "c969ac8c-e7d8-44b5-ad4d-5fcb80514eac", + "alias": "docker auth", + "description": "Used by Docker clients to authenticate against the IDP", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "docker-http-basic-authenticator", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "de2259a4-7f92-42ec-994c-f55d8cba3b59", + "alias": "first broker login", + "description": "Actions taken after first broker login with identity provider account, which is not yet linked to any Keycloak account", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticatorConfig": "review profile config", + "authenticator": "idp-review-profile", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "REQUIRED", + "priority": 20, + "flowAlias": "User creation or linking", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "6c2745d2-be21-4f3c-a291-5b3fc039432a", + "alias": "forms", + "description": "Username, password, otp and other auth forms.", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "auth-username-password-form", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "CONDITIONAL", + "priority": 20, + "flowAlias": "Browser - Conditional OTP", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "ac8f5082-3fd0-47c5-854d-0dd9c3951668", + "alias": "http challenge", + "description": "An authentication flow based on challenge-response HTTP Authentication Schemes", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "no-cookie-redirect", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "REQUIRED", + "priority": 20, + "flowAlias": "Authentication Options", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "32030b4b-c82b-4c1a-a692-3b51eae74bbc", + "alias": "registration", + "description": "registration flow", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "registration-page-form", + "requirement": "REQUIRED", + "priority": 10, + "flowAlias": "registration form", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "b99fca4c-386c-4277-acc1-83e57e29244d", + "alias": "registration form", + "description": "registration form", + "providerId": "form-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "registration-user-creation", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "registration-profile-action", + "requirement": "REQUIRED", + "priority": 40, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "registration-password-action", + "requirement": "REQUIRED", + "priority": 50, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "registration-recaptcha-action", + "requirement": "DISABLED", + "priority": 60, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "5edbc053-816a-434e-9866-6c0cc7e49f89", + "alias": "reset credentials", + "description": "Reset credentials for a user if they forgot their password or something", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "reset-credentials-choose-user", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "reset-credential-email", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "reset-password", + "requirement": "REQUIRED", + "priority": 30, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "CONDITIONAL", + "priority": 40, + "flowAlias": "Reset - Conditional OTP", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "460782e7-9644-4a34-8024-cb428cbe3991", + "alias": "saml ecp", + "description": "SAML ECP Profile Authentication Flow", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "http-basic-authenticator", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + } + ], + "authenticatorConfig": [ + { + "id": "67af6e65-853c-4bfd-9eef-72e735691377", + "alias": "create unique user config", + "config": { + "require.password.update.after.registration": "false" + } + }, + { + "id": "af6c6e01-772d-426a-bdd3-3ebc95537bcd", + "alias": "review profile config", + "config": { + "update.profile.on.first.login": "missing" + } + } + ], + "requiredActions": [ + { + "alias": "CONFIGURE_TOTP", + "name": "Configure OTP", + "providerId": "CONFIGURE_TOTP", + "enabled": true, + "defaultAction": false, + "priority": 10, + "config": {} + }, + { + "alias": "terms_and_conditions", + "name": "Terms and Conditions", + "providerId": "terms_and_conditions", + "enabled": false, + "defaultAction": false, + "priority": 20, + "config": {} + }, + { + "alias": "UPDATE_PASSWORD", + "name": "Update Password", + "providerId": "UPDATE_PASSWORD", + "enabled": true, + "defaultAction": false, + "priority": 30, + "config": {} + }, + { + "alias": "UPDATE_PROFILE", + "name": "Update Profile", + "providerId": "UPDATE_PROFILE", + "enabled": true, + "defaultAction": false, + "priority": 40, + "config": {} + }, + { + "alias": "VERIFY_EMAIL", + "name": "Verify Email", + "providerId": "VERIFY_EMAIL", + "enabled": true, + "defaultAction": false, + "priority": 50, + "config": {} + }, + { + "alias": "update_user_locale", + "name": "Update User Locale", + "providerId": "update_user_locale", + "enabled": true, + "defaultAction": false, + "priority": 1000, + "config": {} + } + ], + "browserFlow": "browser", + "registrationFlow": "registration", + "directGrantFlow": "direct grant", + "resetCredentialsFlow": "reset credentials", + "clientAuthenticationFlow": "clients", + "dockerAuthenticationFlow": "docker auth", + "attributes": { + "clientOfflineSessionMaxLifespan": "0", + "clientSessionIdleTimeout": "0", + "clientSessionMaxLifespan": "0", + "clientOfflineSessionIdleTimeout": "0" + }, + "keycloakVersion": "11.0.1", + "userManagedAccessAllowed": false +} \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jspd/templates/imxc-ui-config.yaml b/ansible/01_old/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jspd/templates/imxc-ui-config.yaml new file mode 100644 index 0000000..e47ff66 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jspd/templates/imxc-ui-config.yaml @@ -0,0 +1,44 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: imxc-ui-config + namespace: imxc + +data: + properties.file: | + api.url = {{ .Values.global.SERVELET_URL_PROTOCOL }}://{{ .Values.global.ZUUL_SERVER_IP }}:{{ .Values.global.ZUUL_SERVER_PORT }} + config.js: | + window.appEnv = { + offlineAccess: "{{ .Values.global.OFFLINEACCESS }}", + backLogin: "{{ .Values.global.BACKLOGIN }}", + // Env Settings servletURL + servletURL: "{{ .Values.global.SERVELET_URL_PROTOCOL }}://{{ .Values.global.ZUUL_SERVER_IP }}:{{ .Values.global.ZUUL_SERVER_PORT }}", + // Env Settings socketURL + socketURL: "http://{{ .Values.global.NOTI_SERVER_IP }}:{{ .Values.global.NOTI_SERVER_PORT }}/ui-server-websocket", + // Env Settings interMaxURL + // ex) ~/intermax/?paConnect=1&paType=ResponseInspector&fromTime=1556096539206&toTime=1556096599206&serverName=jeus89 + interMaxURL: "", + manualURL: "http://{{ .Values.global.CMOA_MANUAL_SERVER_IP }}:{{ .Values.global.CMOA_MANUAL_PORT }}", + // Env Settings CloudMOA Version + version: '{{ .Values.global.CLOUDMOA_VERSION }}', + loginType: 'keycloak', + keyCloak: { + "realm": "{{ .Values.global.KEYCLOAK_REALM }}", + "auth-server-url": "{{ .Values.global.KEYCLOAK_AUTH_SERVER_URL }}", + "ssl-required": "none", + "resource": "{{ .Values.global.KEYCLOAK_RESOURCE }}", + "public-client": true, + "confidential-port": 0 + }, + // refreshTime: '4', // 리로드 주기 설정 4로 설정시 새벽 4시에 리로드 하게 됨 + intervalTime: { // 5의 배수여야만 함 + short: 5, + medium: 10, + long: 60, + }, + // excludedContents: { + // anomalyScoreSettings: true, // entity black list setting page + // anomalyScoreInSidebar: true, // anomaly score in side bar + // }, + serviceTraceAgentType: 'jspd' + }; diff --git a/ansible/01_old/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jspd/templates/imxc-ui-server.yaml b/ansible/01_old/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jspd/templates/imxc-ui-server.yaml new file mode 100644 index 0000000..35c4b61 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jspd/templates/imxc-ui-server.yaml @@ -0,0 +1,63 @@ +--- +kind: Service +apiVersion: v1 +metadata: + name: imxc-ui-service + namespace: imxc +spec: + type: NodePort + selector: + app: imxc-ui + ports: + - protocol: TCP + name: ui + port: 80 + targetPort: 9999 + nodePort: 31080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: imxc-ui + namespace: imxc + labels: + app: imxc-ui +spec: + revisionHistoryLimit: 0 + replicas: 1 + selector: + matchLabels: + app: imxc-ui + template: + metadata: + labels: + app: imxc-ui + spec: + containers: + - name: imxc-ui + image: {{ .Values.global.IMXC_IN_REGISTRY }}/ui-server:{{ .Values.global.UI_SERVER_VERSION }} + resources: + requests: + cpu: 100m + memory: 50Mi + limits: + cpu: 200m + memory: 100Mi + imagePullPolicy: IfNotPresent + ports: + - containerPort: 80 + volumeMounts: + - name: config-profile + mountPath: /usr/src/app/web/env + - name: config-server + mountPath: /usr/src/app/config + volumes: + - name: config-profile + configMap: + name: imxc-ui-config + items: + - key: "config.js" + path: "config.js" + - name: config-server + configMap: + name: imxc-ui-config diff --git a/ansible/01_old/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jspd/values.yaml b/ansible/01_old/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jspd/values.yaml new file mode 100644 index 0000000..54b3bcb --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jspd/values.yaml @@ -0,0 +1,94 @@ +# Default values for imxc. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: 10.10.31.243:5000/cmoa3/nginx + tag: stable + pullPolicy: IfNotPresent + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 80 + +ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: [] + + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +nodeSelector: {} + +tolerations: [] + +affinity: {} + +global: + INTERMAX_IP: + SERVELET_URL_PROTOCOL : http + DEMO_SERVELET_URL_PROTOCOL : http + KEYCLOAK_AUTH_SERVER_URL: http://10.10.43.227:31082/auth + KEYCLOAK_RESOURCE: authorization_server + KEYCLOAK_REALM: exem + + IMXC_IN_REGISTRY: 10.10.31.243:5000/cmoa3 + + ZUUL_SERVER_IP: 10.10.43.227 + ZUUL_SERVER_PORT: 31081 + + NOTI_SERVER_IP: 10.10.43.227 + NOTI_SERVER_PORT: 31083 + + CMOA_MANUAL_SERVER_IP: 10.10.43.227 + CMOA_MANUAL_PORT: 31090 + + OFFLINEACCESS: false + BACKLOGIN: false + + CLOUDMOA_VERSION: rel3.4.8 + UI_SERVER_VERSION: rel3.4.8 + CMOA_MANUAL_VERSION: rel3.4.8 diff --git a/ansible/01_old/roles/cmoa_demo_install/files/ip_change b/ansible/01_old/roles/cmoa_demo_install/files/ip_change new file mode 100755 index 0000000..ac13cc7 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/ip_change @@ -0,0 +1,15 @@ +#!/bin/bash +if [ -z "$BASH_VERSION" ]; then exec bash "$0" "$@"; exit; fi + +before_ip=$1 +after_ip=$2 +grep_path=$3 + +if [[ $before_ip == '' || $after_ip == '' ]]; then + echo '[Usage] $0 {before_ip} {after_ip}' + exit +fi + +grep -rn ${before_ip} ${grep_path} | awk -F':' {'print $1'} | uniq | /usr/bin/xargs sed -i "s/${before_ip}/${after_ip}/g" + +echo "success" \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_demo_install/files/k8s_status b/ansible/01_old/roles/cmoa_demo_install/files/k8s_status new file mode 100755 index 0000000..16b3c61 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/k8s_status @@ -0,0 +1,86 @@ +#! /usr/bin/python3 +#-*- coding:utf-8 -*- + +import os, sys, subprocess, io, time +from kubernetes import client, config +def debug_print(msg): + print(" # ", msg) + +def k8s_conn(KUBE_CONFIG_PATH): + config.load_kube_config( + config_file=KUBE_CONFIG_PATH + ) + k8s_api = client.CoreV1Api() + + return k8s_api + +def k8s_get_pod(k8s_api, namespace, target=''): + pretty=False + watch=False + timeout_seconds=30 + api_response = k8s_api.list_namespaced_pod(namespace, pretty=pretty, timeout_seconds=timeout_seconds, watch=watch) + pod_list=[] + for pod in api_response.items: + status = pod.status.phase + #container_status = pod.status.container_statuses[0] + #if container_status.started is False or container_status.ready is False: + # waiting_state = container_status.state.waiting + # if waiting_state.message is not None and 'Error' in waiting_state.message: + # status = waiting_state.reason + if target != '': + if target in pod.metadata.name: + return (pod.metadata.name + " " + status) + pod_list.append(pod.metadata.name+" "+status) + return pod_list + +def k8s_pod_status_check(k8s_api, waiting_time, namespace,except_pod=False): + num=0 + while True: + num+=1 + resp=k8s_get_pod(k8s_api, namespace) + all_run_flag=True + if debug_mode: + debug_print('-'*30) + debug_print('pod 상태 체크시도 : {} ({}s)'.format(num, waiting_time)) + debug_print('-'*30) + for i in resp: + if except_pod: + if except_pod in i.lower(): continue + if 'pending' in i.lower(): + all_run_flag=False + result='{} 결과: {}'.format(i, all_run_flag) + debug_print(result) + if all_run_flag: + if debug_mode: + debug_print('-'*30) + debug_print('[{}] pod All Running'.format(namespace)) + debug_print('-'*30) + for i in resp: debug_print(i) + break + else: time.sleep(int(waiting_time)) + +def main(): + namespace = os.sys.argv[1] + + try: + Except_k8s_pod = os.sys.argv[2] + except: + Except_k8s_pod = '' + + try: + KUBE_CONFIG_PATH = os.sys.argv[3] + os.environ["KUBECONFIG"]=KUBE_CONFIG_PATH + except: + KUBE_CONFIG_PATH = os.environ["KUBECONFIG"] + + k8s_api=k8s_conn(KUBE_CONFIG_PATH) + k8s_pod_status_check(k8s_api, 60, namespace, Except_k8s_pod) + + +if __name__ == "__main__": + try: + debug_mode=False + main() + except Exception as err: + print("[Usage] k8s_status {namespace} {Except_pod=(default=false)} {KUBECONFIG_PATH=(default=current env)}") + print(err) diff --git a/ansible/01_old/roles/cmoa_demo_install/files/postgres_check_data b/ansible/01_old/roles/cmoa_demo_install/files/postgres_check_data new file mode 100755 index 0000000..d377aeb --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/postgres_check_data @@ -0,0 +1,6 @@ +#!/bin/bash + +namespace=$1 +pg_pod=`kubectl -n ${namespace} get pod --no-headers | awk '{print $1}' | grep postgres` +kubectl_cmd="kubectl -n ${namespace} exec -it ${pg_pod} --" +${kubectl_cmd} bash -c "echo \"select count(*) from pg_database where datname='keycloak';\" | /usr/bin/psql -U postgres | egrep -iv '(count|---|row)' | tr -d ' ' | tr -d '\n'" \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_demo_install/files/rel_change b/ansible/01_old/roles/cmoa_demo_install/files/rel_change new file mode 100755 index 0000000..ae1f6b3 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/files/rel_change @@ -0,0 +1,15 @@ +#!/bin/bash +if [ -z "$BASH_VERSION" ]; then exec bash "$0" "$@"; exit; fi + +before_version=$1 +after_version=$2 +grep_path=$3 + +if [[ $before_version == '' || $after_version == '' ]]; then + echo '[Usage] $0 {before_version} {after_version}' + exit +fi + +grep -rn ${before_version} ${grep_path} | awk -F':' {'print $1'} | uniq | /usr/bin/xargs sed -i "s/${before_version}/${after_version}/g" + +echo "success" \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_demo_install/tasks/00-default-settings-master.yml b/ansible/01_old/roles/cmoa_demo_install/tasks/00-default-settings-master.yml new file mode 100644 index 0000000..4a17c4a --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/tasks/00-default-settings-master.yml @@ -0,0 +1,30 @@ +--- +- name: 1. Create a cmoa namespace + kubernetes.core.k8s: + name: "{{ cmoa_namespace }}" + api_version: v1 + kind: Namespace + state: present + +- name: 2. Create secret + kubernetes.core.k8s: + state: present + namespace: "{{ item }}" + src: "{{ role_path }}/files/00-default/secret_nexus.yaml" + apply: yes + with_items: + - "{{ cmoa_namespace }}" + - default + +- name: 3. kubeconfig check + shell: "echo $KUBECONFIG" + register: kubeconfig + +- name: 4. Patch default sa + shell: "{{ role_path }}/files/00-default/sa_patch.sh {{ kubeconfig.stdout }}" + +- name: 5. Master IP Setting + command: "{{ role_path }}/files/ip_change {{ before_ip }} {{ ansible_default_ipv4.address }} {{ role_path }}/files" + +- name: 6. CloudMOA Version Change + command: "{{ role_path }}/files/rel_change {{ before_version }} {{ cmoa_version }} {{ role_path }}/files" diff --git a/ansible/01_old/roles/cmoa_demo_install/tasks/00-default-settings-node.yml b/ansible/01_old/roles/cmoa_demo_install/tasks/00-default-settings-node.yml new file mode 100644 index 0000000..a568b74 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/tasks/00-default-settings-node.yml @@ -0,0 +1,27 @@ +--- +- name: 1. Node add Label (worker1) + kubernetes.core.k8s: + apply: yes + definition: + apiversion: v1 + kind: Node + metadata: + name: "{{ item }}" + labels: + cmoa: worker1 + with_items: + - "{{ ansible_hostname }}" + when: ansible_default_ipv4.address in groups.worker1 + +- name: 2. Node add Label (worker2) + kubernetes.core.k8s: + definition: + apiversion: v1 + kind: Node + metadata: + name: "{{ item }}" + labels: + cmoa: worker2 + with_items: + - "{{ ansible_hostname }}" + when: ansible_default_ipv4.address in groups.worker2 \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_demo_install/tasks/01-storage-install.yml b/ansible/01_old/roles/cmoa_demo_install/tasks/01-storage-install.yml new file mode 100644 index 0000000..bef58ef --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/tasks/01-storage-install.yml @@ -0,0 +1,45 @@ +--- +- name: 1. yaml file install (sc, pv) + kubernetes.core.k8s: + state: present + namespace: "{{ cmoa_namespace }}" + src: "{{ role_path }}/files/01-storage/{{ item }}" + apply: yes + with_items: + - 00-storageclass.yaml + - 01-persistentvolume.yaml + +- name: 2. helmchart install (minio) + kubernetes.core.helm: + name: "{{item}}" + release_namespace: "{{ cmoa_namespace }}" + chart_ref: "{{ role_path }}/files/01-storage/{{item}}" + create_namespace: yes + release_state: present + values_files: + - "{{ role_path }}/files/01-storage/{{item}}/values.yaml" + with_items: + - minio + +- name: 3. Change a Minio Api Service (NodePort=minio_nodePort) + kubernetes.core.k8s: + state: present + definition: + apiVersion: v1 + kind: Service + metadata: + name: "{{ minio_service_name }}" + namespace: "{{ cmoa_namespace }}" + spec: + type: NodePort + ports: + - protocol: TCP + port: "{{ minio_service_port }}" + nodePort: "{{ minio_nodePort }}" + apply: yes + +- name: 4. Check Kubernetes Pods (minio) + command: "{{ role_path }}/files/k8s_status {{ cmoa_namespace }}" + +- name: 5. minio setting (minio) + command: "{{ role_path }}/files/01-storage/cmoa_minio {{ ansible_default_ipv4.address }}:{{ minio_nodePort }} {{ minio_user }} {{ bucket_name }} {{ days }} {{ rule_id }}" \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_demo_install/tasks/02-base-install.yml b/ansible/01_old/roles/cmoa_demo_install/tasks/02-base-install.yml new file mode 100644 index 0000000..f7924a6 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/tasks/02-base-install.yml @@ -0,0 +1,51 @@ +--- +- name: 1. kafka broker config apply (base) + kubernetes.core.k8s: + state: present + namespace: "{{ cmoa_namespace }}" + src: "{{ role_path }}/files/02-base/{{ item }}" + apply: yes + with_items: + - 00-kafka-broker-config.yaml + +- name: 2. coredns config apply (base) + kubernetes.core.k8s: + state: present + namespace: default + src: "{{ role_path }}/files/02-base/{{ item }}" + apply: yes + with_items: + - 01-coredns.yaml + +- name: 3. helmchart install (base) + kubernetes.core.helm: + name: "{{item}}" + release_name: "{{item}}" + release_namespace: "{{ cmoa_namespace }}" + chart_ref: "{{ role_path }}/files/02-base/{{item}}" + create_namespace: yes + release_state: present + values_files: + - "{{ role_path }}/files/02-base/{{item}}/values.yaml" + with_items: + - base + +- name: 4. Check Kubernetes Pods (base) + command: "{{ role_path }}/files/k8s_status {{ cmoa_namespace }} alertmanage" + +- name: 5. Change a Elasticsearch Service (NodePort=elasticsearch_nodePort) + kubernetes.core.k8s: + state: present + definition: + apiVersion: v1 + kind: Service + metadata: + name: "{{ elasticsearch_service_name }}" + namespace: "{{ cmoa_namespace }}" + spec: + type: NodePort + ports: + - protocol: TCP + port: "{{ elasticsearch_service_port }}" + nodePort: "{{ elasticsearch_nodePort }}" + apply: yes diff --git a/ansible/01_old/roles/cmoa_demo_install/tasks/03-ddl-dml.yml b/ansible/01_old/roles/cmoa_demo_install/tasks/03-ddl-dml.yml new file mode 100644 index 0000000..9c44f8e --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/tasks/03-ddl-dml.yml @@ -0,0 +1,64 @@ +- name: 1. Check Postgres DB Data + command: "{{ role_path }}/files/postgres_check_data {{ cmoa_namespace }}" + register: pg_check_result + +- name: 2. Insert Elasticsearch template + command: "sh {{ role_path }}/files/03-ddl-dml/elasticsearch/es-ddl-put.sh {{ cmoa_namespace }}" +# when: pg_check_result.stdout != '1' +# register: es + +#- debug: +# msg: "{{es.stdout_lines}}" + +- name: 2.1. Elasticsearch dependency deploy restart + command: "kubectl -n {{ cmoa_namespace }} rollout restart deploy alertmanager base-cortex-configs base-cortex-distributor base-cortex-ruler" + register: restart + +- debug: + msg: "{{restart.stdout_lines}}" + +- name: 2.2. Check Kubernetes Pods (Elasticsearch dependency) + command: "{{ role_path }}/files/k8s_status {{ cmoa_namespace }} alertmanage" + +- name: 3. Get a list of all pods from the namespace + command: kubectl -n "{{ cmoa_namespace }}" get pods --no-headers -o custom-columns=":metadata.name" + register: pod_list + when: pg_check_result.stdout != '1' + +- name: 4. Copy psql file in postgres (DDL) + kubernetes.core.k8s_cp: + namespace: "{{ cmoa_namespace }}" + pod: "{{ item }}" + remote_path: /tmp/postgres_insert_ddl.psql + local_path: "{{ role_path }}/files/03-ddl-dml/postgres/postgres_insert_ddl.psql" + when: item is match('postgres') and pg_check_result.stdout != '1' + with_items: "{{ pod_list.stdout_lines }}" + ignore_errors: true + +- name: 5. Execute a command in postgres (DDL) + kubernetes.core.k8s_exec: + namespace: "{{ cmoa_namespace }}" + pod: "{{ item }}" + command: bash -c "PGPASSWORD='eorbahrhkswp' && /usr/bin/psql -h 'localhost' -U 'admin' -d 'postgresdb' -f /tmp/postgres_insert_ddl.psql" + with_items: "{{ pod_list.stdout_lines }}" + when: item is match('postgres') + ignore_errors: true + +- name: 6. Copy psql file in postgres (DML) + kubernetes.core.k8s_cp: + namespace: "{{ cmoa_namespace }}" + pod: "{{ item }}" + remote_path: /tmp/postgres_insert_dml.psql + local_path: "{{ role_path }}/files/03-ddl-dml/postgres/postgres_insert_dml.psql" + with_items: "{{ pod_list.stdout_lines }}" + when: item is match('postgres') + ignore_errors: true + +- name: 7. Execute a command in postgres (DML) + kubernetes.core.k8s_exec: + namespace: "{{ cmoa_namespace }}" + pod: "{{ item }}" + command: bash -c "PGPASSWORD='eorbahrhkswp' && /usr/bin/psql -h 'localhost' -U 'admin' -d 'postgresdb' -f /tmp/postgres_insert_dml.psql" + with_items: "{{ pod_list.stdout_lines }}" + when: item is match('postgres') + ignore_errors: true \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_demo_install/tasks/04-keycloak-install.yml b/ansible/01_old/roles/cmoa_demo_install/tasks/04-keycloak-install.yml new file mode 100644 index 0000000..de5fc9c --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/tasks/04-keycloak-install.yml @@ -0,0 +1,34 @@ +--- +- name: 1. helmchart install (keycloak) + kubernetes.core.helm: + name: "{{item}}" + release_name: "{{item}}" + release_namespace: "{{ cmoa_namespace }}" + chart_ref: "{{ role_path }}/files/04-keycloak" + create_namespace: yes + release_state: present + values_files: + - "{{ role_path }}/files/04-keycloak/values.yaml" + with_items: + - keycloak + +- name: 4. Check Kubernetes Pods (base) + command: "{{ role_path }}/files/k8s_status {{ cmoa_namespace }}" + + +- name: 5. Change a Elasticsearch Service (NodePort=elasticsearch_nodePort) + kubernetes.core.k8s: + state: present + definition: + apiVersion: v1 + kind: Service + metadata: + name: "{{ elasticsearch_service_name }}" + namespace: "{{ cmoa_namespace }}" + spec: + type: NodePort + ports: + - protocol: TCP + port: "{{ elasticsearch_service_port }}" + nodePort: "{{ elasticsearch_nodePort }}" + apply: yes diff --git a/ansible/01_old/roles/cmoa_demo_install/tasks/05-imxc-install.yml b/ansible/01_old/roles/cmoa_demo_install/tasks/05-imxc-install.yml new file mode 100644 index 0000000..420d2d1 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/tasks/05-imxc-install.yml @@ -0,0 +1,16 @@ +--- +- name: 1. helmchart install (imxc) + kubernetes.core.helm: + name: "{{item}}" + release_name: "{{item}}" + release_namespace: "{{ cmoa_namespace }}" + chart_ref: "{{ role_path }}/files/05-imxc" + create_namespace: yes + release_state: present + values_files: + - "{{ role_path }}/files/05-imxc/values.yaml" + with_items: + - imxc + +- name: 2. Check Kubernetes Pods (imxc / keycloak) + command: "{{ role_path }}/files/k8s_status {{ cmoa_namespace }}" diff --git a/ansible/01_old/roles/cmoa_demo_install/tasks/06-imxc-ui-install.yml b/ansible/01_old/roles/cmoa_demo_install/tasks/06-imxc-ui-install.yml new file mode 100644 index 0000000..7da82a1 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/tasks/06-imxc-ui-install.yml @@ -0,0 +1,112 @@ +--- +- name: 1. helmchart install (imxc-ui-all) + kubernetes.core.helm: + name: "{{item}}" + release_name: "{{item}}" + release_namespace: "{{ cmoa_namespace }}" + chart_ref: "{{ role_path }}/files/06-imxc-ui/{{ item }}" + create_namespace: yes + release_state: present + values_files: + - "{{ role_path }}/files/06-imxc-ui/{{ item }}/values.yaml" + with_items: + - imxc-ui-jaeger + - imxc-ui-jspd + when: imxc_ui == 'all' + +- name: 1. helmchart install (imxc-ui-jaeger) + kubernetes.core.helm: + name: "{{item}}" + release_name: "{{item}}" + release_namespace: "{{ cmoa_namespace }}" + chart_ref: "{{ role_path }}/files/06-imxc-ui/{{ item }}" + create_namespace: yes + release_state: present + values_files: + - "{{ role_path }}/files/06-imxc-ui/{{ item }}/values.yaml" + with_items: + - imxc-ui-jaeger + when: imxc_ui == 'jaeger' + +- name: 2. Change a imxc-ui Service (imxc-ui-jaeger) + kubernetes.core.k8s: + state: present + definition: + apiVersion: v1 + kind: Service + metadata: + name: "{{ jaeger_servicename }}" + namespace: "{{ cmoa_namespace }}" + spec: + type: NodePort + ports: + - protocol: TCP + port: "{{ jaeger_service_port }}" + nodePort: "{{ jaeger_nodePort }}" + apply: yes + when: imxc_ui == 'jaeger' + +- name: 2. Get a list of all pods from the namespace + command: kubectl -n "{{ cmoa_namespace }}" get pods --no-headers -o custom-columns=":metadata.name" # Output is a column + register: pod_list + when: imxc_ui != 'all' + +- name: 3. Copy psql file in psql (imxc-jaeger) + kubernetes.core.k8s_cp: + namespace: "{{ cmoa_namespace }}" + pod: "{{ item }}" + remote_path: /tmp/jaeger_menumeta.psql + local_path: "{{ role_path }}/files/03-ddl-dml/postgres/jaeger_menumeta.psql" + with_items: "{{ pod_list.stdout_lines }}" + when: + - item is match('postgres') + - imxc_ui == 'jaeger' + ignore_errors: true + +- name: 4. Execute a command in psql (imxc-jaeger) + kubernetes.core.k8s_exec: + namespace: "{{ cmoa_namespace }}" + pod: "{{ item }}" + command: bash -c "PGPASSWORD='eorbahrhkswp' && /usr/bin/psql -h 'localhost' -U 'admin' -d 'postgresdb' -f /tmp/jaeger_menumeta.psql" + with_items: "{{ pod_list.stdout_lines }}" + when: + - item is match('postgres') + - imxc_ui == 'jaeger' + ignore_errors: true + +- name: 1. helmchart install (imxc-ui-jspd) + kubernetes.core.helm: + name: "{{item}}" + release_name: "{{item}}" + release_namespace: "{{ cmoa_namespace }}" + chart_ref: "{{ role_path }}/files/06-imxc-ui/{{ item }}" + create_namespace: yes + release_state: present + values_files: + - "{{ role_path }}/files/06-imxc-ui/{{ item }}/values.yaml" + with_items: + - imxc-ui-jspd + when: imxc_ui == 'jspd' + ignore_errors: true + +- name: 3. Copy psql file in postgres (imxc-ui-jspd) + kubernetes.core.k8s_cp: + namespace: "{{ cmoa_namespace }}" + pod: "{{ item }}" + remote_path: /tmp/jspd_menumeta.psql + local_path: "{{ role_path }}/files/03-ddl-dml/postgres/jspd_menumeta.psql" + with_items: "{{ pod_list.stdout_lines }}" + when: item is match('postgres') and imxc_ui == 'jspd' + ignore_errors: true + +- name: 4. Execute a command in postgres (imxc-ui-jspd) + kubernetes.core.k8s_exec: + namespace: "{{ cmoa_namespace }}" + pod: "{{ item }}" + command: bash -c "PGPASSWORD='eorbahrhkswp' && /usr/bin/psql -h 'localhost' -U 'admin' -d 'postgresdb' -f /tmp/jspd_menumeta.psql" + with_items: "{{ pod_list.stdout_lines }}" + when: item is match('postgres') and imxc_ui == 'jspd' + ignore_errors: true + +- name: 2. Check Kubernetes Pods (imxc ui) + command: "{{ role_path }}/files/k8s_status {{ cmoa_namespace }}" diff --git a/ansible/01_old/roles/cmoa_demo_install/tasks/07-keycloak-setting.yml b/ansible/01_old/roles/cmoa_demo_install/tasks/07-keycloak-setting.yml new file mode 100644 index 0000000..f800f87 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/tasks/07-keycloak-setting.yml @@ -0,0 +1,76 @@ +--- +- name: 0. Generate keycloak auth token + ansible.builtin.uri: + url: "{{ keycloak_url }}{{ keycloak_context }}/realms/master/protocol/openid-connect/token" + method: POST + body: "client_id={{ keycloak_auth_client }}&username={{ keycloak_admin_user }}&password={{ keycloak_admin_password }}&grant_type=password" + validate_certs: no + #no_log: "{{ keycloak_no_log | default('True') }}" + register: keycloak_auth_response + until: keycloak_auth_response.status == 200 + retries: 5 + delay: 2 + +- name: 1. Determine if realm exists + ansible.builtin.uri: + url: "{{ keycloak_url }}{{ keycloak_context }}/admin/realms/{{ keycloak_realm }}" + method: GET + status_code: + - 200 + - 404 + headers: + Accept: "application/json" + Authorization: "Bearer {{ keycloak_auth_response.json.access_token }}" + register: keycloak_realm_exists + +- name: 2. Validate Keycloak clients + ansible.builtin.assert: + that: + - item.name is defined and item.name | length > 0 + - (item.client_id is defined and item.client_id | length > 0) or (item.id is defined and item.id | length > 0) + fail_msg: "For each keycloak client, attributes `name` and either `id` or `client_id` is required" + quiet: True + loop: "{{ keycloak_clients | flatten }}" + loop_control: + label: "{{ item.name | default('unnamed client') }}" + +- name: 3. update a Keycloak client + community.general.keycloak_client: + auth_client_id: "{{ keycloak_auth_client }}" + auth_keycloak_url: "{{ keycloak_url }}{{ keycloak_context }}" + auth_realm: "{{ keycloak_auth_realm }}" + auth_username: "{{ keycloak_admin_user }}" + auth_password: "{{ keycloak_admin_password }}" + realm: "{{ item.realm }}" + default_roles: "{{ item.roles | default(omit) }}" + client_id: "{{ item.client_id | default(omit) }}" + id: "{{ item.id | default(omit) }}" + name: "{{ item.name | default(omit) }}" + description: "{{ item.description | default(omit) }}" + root_url: "{{ item.root_url | default('') }}" + admin_url: "{{ item.admin_url | default('') }}" + base_url: "{{ item.base_url | default('') }}" + enabled: "{{ item.enabled | default(True) }}" + redirect_uris: "{{ item.redirect_uris | default(omit) }}" + web_origins: "{{ item.web_origins | default('+') }}" + bearer_only: "{{ item.bearer_only | default(omit) }}" + standard_flow_enabled: "{{ item.standard_flow_enabled | default(omit) }}" + implicit_flow_enabled: "{{ item.implicit_flow_enabled | default(omit) }}" + direct_access_grants_enabled: "{{ item.direct_access_grants_enabled | default(omit) }}" + service_accounts_enabled: "{{ item.service_accounts_enabled | default(omit) }}" + public_client: "{{ item.public_client | default(False) }}" + protocol: "{{ item.protocol | default(omit) }}" + state: present + #no_log: "{{ keycloak_no_log | default('True') }}" + register: create_client_result + loop: "{{ keycloak_clients | flatten }}" + when: (item.name is defined and item.client_id is defined) or (item.name is defined and item.id is defined) + +- name: 4. Dependency deploy restart + command: "kubectl -n {{ cmoa_namespace }} rollout restart deploy imxc-api noti-server auth-server zuul-deployment" + register: restart + +- debug: + msg: "{{restart.stdout_lines}}" + + diff --git a/ansible/01_old/roles/cmoa_demo_install/tasks/08-finish.yml b/ansible/01_old/roles/cmoa_demo_install/tasks/08-finish.yml new file mode 100644 index 0000000..f06cc24 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/tasks/08-finish.yml @@ -0,0 +1,92 @@ +--- +- name: 0. Check Kubernetes Pods (ALL) + command: "{{ role_path }}/files/k8s_status {{ cmoa_namespace }}" + +- name: 1. IP Setting reset + command: "{{ role_path }}/files/ip_change {{ansible_default_ipv4.address}} {{before_ip}} {{ role_path }}/files" + +- name: 2. CloudMOA Version reset + command: "{{ role_path }}/files/rel_change {{ cmoa_version }} {{ before_version }} {{ role_path }}/files" + +- debug: + msg: + - ======================================================================================= + - "## Keycloak WEB" + - keycloak URL = http://{{ ansible_default_ipv4.address }}:31082 + - --------------------------------------------------------------------------------------- + - "## Keycloak Login Theme Setting" + - "## WEB > Realm Settings > Themes > Login Theme" + - " > CloudMOA_V2" + - --------------------------------------------------------------------------------------- + - "## CloudMOA WEB " + - CloudMOA Jaeger = http://{{ ansible_default_ipv4.address }}:31080 + - CloudMOA JSPD = http://{{ ansible_default_ipv4.address }}:31084 + - ======================================================================================= + +#- name: Node add Label (worker1) +# shell: kubectl get node "{{ item }}" --show-labels +# register: worker1 +# with_items: +# - "{{ ansible_hostname }}" +# #when: ansible_hostname in groups.worker1 +# +#- name: Node add Label (worker2) +# shell: kubectl get node "{{ item }}" --show-labels +# register: worker2 +# with_items: +# - "{{ ansible_hostname }}" +# #when: ansible_hostname in groups.worker2 +# +# +#- name: debug +# debug: +# msg: "{{item}}" +# with_items: +# - "{{ worker1.stdout }}" +# - "{{ worker2.stdout }}" + +#- name: Iterate over pod names and delete the filtered ones +# #debug: +# # msg: "{{ item }}" +# kubernetes.core.k8s_cp: +# namespace: imxc +# pod: "{{ item }}" +# remote_path: /tmp/postgres_insert_ddl.psql +# local_path: "{{ role_path }}/files/03-ddl-dml/postgres/postgres_insert_ddl.psql" +# with_items: "{{ pod_list.stdout_lines }}" +# when: item is match('postgres') + +#- name: Execute a command +# kubernetes.core.k8s_exec: +# namespace: imxc +# pod: "{{ item }}" +# command: bash -c "PGPASSWORD='eorbahrhkswp' && /usr/bin/psql -h 'localhost' -U 'admin' -d 'postgresdb' -f /tmp/postgres_insert_ddl.psql" +# with_items: "{{ pod_list.stdout_lines }}" +# when: item is match('postgres') +# +#- name: Iterate over pod names and delete the filtered ones +# #debug: +# # msg: "{{ item }}" +# kubernetes.core.k8s_cp: +# namespace: imxc +# pod: "{{ item }}" +# remote_path: /tmp/postgres_insert_dml.psql +# local_path: "{{ role_path }}/files/03-ddl-dml/postgres/postgres_insert_dml.psql" +# with_items: "{{ pod_list.stdout_lines }}" +# when: item is match('postgres') +# +#- name: Execute a command +# kubernetes.core.k8s_exec: +# namespace: imxc +# pod: "{{ item }}" +# command: bash -c "PGPASSWORD='eorbahrhkswp' && /usr/bin/psql -h 'localhost' -U 'admin' -d 'postgresdb' -f /tmp/postgres_insert_dml.psql" +# with_items: "{{ pod_list.stdout_lines }}" +# when: item is match('postgres') +# register: test +# +#- name: test +# debug: +# msg: "{{ test.stdout }}" +##- set_fact: +## postgres_pod: "{{ postgres_pod2.stdout_lines is match('postgres') | default(postgres_pod2) }}" +# \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_demo_install/tasks/helm-install.yml b/ansible/01_old/roles/cmoa_demo_install/tasks/helm-install.yml new file mode 100644 index 0000000..d057455 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/tasks/helm-install.yml @@ -0,0 +1,60 @@ +--- +- name: Create Helm temporary directory + file: + path: /tmp/helm + state: directory + mode: "0755" + +- name: Fetch Helm package + get_url: + url: 'https://get.helm.sh/helm-{{ helm_version }}-linux-amd64.tar.gz' + dest: /tmp/helm.tar.gz + checksum: '{{ helm_checksum }}' + +- name: Extract Helm package + unarchive: + remote_src: true + src: /tmp/helm.tar.gz + dest: /tmp/helm + +- name: Ensure "docker" group exists + group: + name: docker + state: present + become: true + +- name: Install helm to /usr/local/bin + copy: + remote_src: true + src: /tmp/helm/linux-amd64/helm + dest: /usr/local/bin/helm + owner: root + group: docker + mode: "0755" + become: true + +- name: Cleanup Helm temporary directory + file: + path: /tmp/helm + state: absent + +- name: Cleanup Helm temporary download + file: + path: /tmp/helm.tar.gz + state: absent + +- name: Ensure bash_completion.d directory exists + file: + path: /etc/bash_completion.d + state: directory + mode: "0755" + become: true + +- name: Setup Helm tab-completion + shell: | + set -o pipefail + /usr/local/bin/helm completion bash | tee /etc/bash_completion.d/helm + args: + executable: /bin/bash + changed_when: false + become: true diff --git a/ansible/01_old/roles/cmoa_demo_install/tasks/main.yml b/ansible/01_old/roles/cmoa_demo_install/tasks/main.yml new file mode 100644 index 0000000..7239fa3 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/tasks/main.yml @@ -0,0 +1,43 @@ +--- +- include: helm-install.yml + tags: helm-install + +- include: 00-default-settings-master.yml + tags: default_setting + when: kubernetes_role == 'master' + +- include: 00-default-settings-node.yml + tags: default_setting_node + when: kubernetes_role == 'node' + +- include: 01-storage-install.yml + tags: storage-install + when: kubernetes_role == 'master' + +- include: 02-base-install.yml + tags: base-install + when: kubernetes_role == 'master' + +- include: 03-ddl-dml.yml + tags: ddl-dml + when: kubernetes_role == 'master' + +- include: 04-keycloak-install.yml + tags: keycloak-install + when: kubernetes_role == 'master' + +- include: 05-imxc-install.yml + tags: imxc-install + when: kubernetes_role == 'master' + +- include: 06-imxc-ui-install.yml + tags: imxc-ui-install + when: kubernetes_role == 'master' + +- include: 07-keycloak-setting.yml + tags: keycloak-setting + when: kubernetes_role == 'master' + +- include: 08-finish.yml + tags: finish + when: kubernetes_role == 'master' \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_demo_install/templates/realm.json.j2 b/ansible/01_old/roles/cmoa_demo_install/templates/realm.json.j2 new file mode 100644 index 0000000..1323ce2 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/templates/realm.json.j2 @@ -0,0 +1,7 @@ +{ + "id": "{{ keycloak_realm }}", + "realm": "{{ keycloak_realm }}", + "enabled": true, + "eventsEnabled": true, + "eventsExpiration": 7200 +} diff --git a/ansible/01_old/roles/cmoa_demo_install/vars/main.yml b/ansible/01_old/roles/cmoa_demo_install/vars/main.yml new file mode 100644 index 0000000..14c8e95 --- /dev/null +++ b/ansible/01_old/roles/cmoa_demo_install/vars/main.yml @@ -0,0 +1,7 @@ +--- +# name of the realm to create, this is a required variable +keycloak_realm: Exem + +# other settings +keycloak_url: "http://{{ ansible_default_ipv4.address }}:{{ keycloak_http_port }}" +keycloak_management_url: "http://{{ ansible_default_ipv4.address }}:{{ keycloak_management_http_port }}" diff --git a/ansible/01_old/roles/cmoa_install/defaults/main.yml b/ansible/01_old/roles/cmoa_install/defaults/main.yml new file mode 100644 index 0000000..7c45df5 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/defaults/main.yml @@ -0,0 +1,65 @@ +# helm file install +helm_checksum: sha256:950439759ece902157cf915b209b8d694e6f675eaab5099fb7894f30eeaee9a2 +helm_version: v3.10.3 + +# cmoa info +cmoa_namespace: imxc +cmoa_version: rel3.4.8 + +# default ip/version (not change) +before_ip: 111.111.111.111 +before_version: rel0.0.0 + +# files/00-default in role +docker_secret_file: secret_nexus.yaml + +# all, jaeger, jspd +imxc_ui: all + +# [docker_config_path] +docker_config_nexus: dockerconfig/docker_config_nexus.json + +# [jaeger] +jaeger_servicename: imxc-ui-service-jaeger +jaeger_service_port: 80 +jaeger_nodePort: 31080 # only imxc-ui-jaeger option (imxc-ui-jaeger template default port=31084) + +# [minio] +minio_service_name: minio +minio_service_port: 9000 +minio_nodePort: 32002 +minio_user: cloudmoa +minio_pass: admin1234 +bucket_name: cortex-bucket +days: 42 +rule_id: cloudmoa + +# [Elasticsearch] +elasticsearch_service_name: elasticsearch +elasticsearch_service_port: 9200 +elasticsearch_nodePort: 30200 + +# [Keycloak] +# Keycloak configuration settings +keycloak_http_port: 31082 +keycloak_https_port: 8443 +keycloak_management_http_port: 31990 +keycloak_realm: exem + +# Keycloak administration console user +keycloak_admin_user: admin +keycloak_admin_password: admin +keycloak_auth_realm: master +keycloak_auth_client: admin-cli +keycloak_context: /auth +keycloak_login_theme: CloudMOA_V2 + +# keycloak_clients +keycloak_clients: + - name: 'authorization_server' + client_id: authorization_server + realm: exem + redirect_uris: "http://{{ ansible_default_ipv4.address }}:31080/*,http://{{ ansible_default_ipv4.address }}:31084/*,http://localhost:8080/*,http://localhost:8081/*" + public_client: True + + diff --git a/ansible/01_old/roles/cmoa_install/files/00-default/sa_patch.sh b/ansible/01_old/roles/cmoa_install/files/00-default/sa_patch.sh new file mode 100755 index 0000000..618a35b --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/00-default/sa_patch.sh @@ -0,0 +1,8 @@ +#!/bin/bash + +export KUBECONFIG=$1 + +kubectl wait node --for=condition=ready --all --timeout=60s + +#kubectl -n imxc patch sa default -p '{"imagePullSecrets": [{"name": "regcred"}]}' +kubectl -n default patch sa default -p '{"imagePullSecrets": [{"name": "regcred"}]}' diff --git a/ansible/01_old/roles/cmoa_install/files/00-default/secret_dockerhub.yaml b/ansible/01_old/roles/cmoa_install/files/00-default/secret_dockerhub.yaml new file mode 100644 index 0000000..268027b --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/00-default/secret_dockerhub.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: Secret +metadata: + name: regcred +data: + .dockerconfigjson: ewogICJhdXRocyI6IHsKICAgICJodHRwczovL2luZGV4LmRvY2tlci5pby92MS8iOiB7CiAgICAgICJhdXRoIjogIlpYaGxiV1JsZGpJNk0yWXlObVV6T0RjdFlqY3paQzAwTkRVMUxUazNaRFV0T1dWaU9EWmtObVl4WXpOayIKICAgIH0KICB9Cn0KCg== +type: kubernetes.io/dockerconfigjson diff --git a/ansible/01_old/roles/cmoa_install/files/00-default/secret_nexus.yaml b/ansible/01_old/roles/cmoa_install/files/00-default/secret_nexus.yaml new file mode 100644 index 0000000..6a2543f --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/00-default/secret_nexus.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +data: + .dockerconfigjson: ewogICJhdXRocyI6IHsKICAgICIxMC4xMC4zMS4yNDM6NTAwMCI6IHsKICAgICAgImF1dGgiOiAiWTI5eVpUcGpiM0psWVdSdGFXNHhNak0wIgogICAgfQogIH0KfQoK +kind: Secret +metadata: + name: regcred +type: kubernetes.io/dockerconfigjson + diff --git a/ansible/01_old/roles/cmoa_install/files/01-storage/00-storageclass.yaml b/ansible/01_old/roles/cmoa_install/files/01-storage/00-storageclass.yaml new file mode 100644 index 0000000..8f41292 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/01-storage/00-storageclass.yaml @@ -0,0 +1,6 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: exem-local-storage +provisioner: kubernetes.io/no-provisioner +volumeBindingMode: WaitForFirstConsumer diff --git a/ansible/01_old/roles/cmoa_install/files/01-storage/01-persistentvolume.yaml b/ansible/01_old/roles/cmoa_install/files/01-storage/01-persistentvolume.yaml new file mode 100644 index 0000000..1bd4546 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/01-storage/01-persistentvolume.yaml @@ -0,0 +1,92 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: minio-pv-0 +spec: + capacity: + storage: 50Gi + volumeMode: Filesystem + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Retain + storageClassName: exem-local-storage + local: + path: /media/data/minio/pv1 + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: cmoa + operator: In + values: + - worker1 + +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: minio-pv-1 +spec: + capacity: + storage: 50Gi + volumeMode: Filesystem + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Retain + storageClassName: exem-local-storage + local: + path: /media/data/minio/pv2 + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: cmoa + operator: In + values: + - worker1 +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: minio-pv-2 +spec: + capacity: + storage: 50Gi + volumeMode: Filesystem + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Retain + storageClassName: exem-local-storage + local: + path: /media/data/minio/pv3 + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: cmoa + operator: In + values: + - worker2 +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: minio-pv-3 +spec: + capacity: + storage: 50Gi + volumeMode: Filesystem + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Retain + storageClassName: exem-local-storage + local: + path: /media/data/minio/pv4 + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: cmoa + operator: In + values: + - worker2 diff --git a/ansible/01_old/roles/cmoa_install/files/01-storage/cmoa_minio b/ansible/01_old/roles/cmoa_install/files/01-storage/cmoa_minio new file mode 100755 index 0000000..522b87d --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/01-storage/cmoa_minio @@ -0,0 +1,63 @@ +#! /usr/bin/python3 +#-*- coding:utf-8 -*- + +import os, sys, time, urllib3 +from minio import Minio +from minio.lifecycleconfig import Expiration, LifecycleConfig, Rule, Transition +from minio.commonconfig import ENABLED, Filter + +def minio_conn(ipaddr, portnum, ac_key, sec_key): + conn='{}:{}'.format(ipaddr,portnum) + url='http://{}'.format(conn) + print(url) + minio_client = Minio( + conn, access_key=ac_key, secret_key=sec_key, secure=False, + http_client=urllib3.ProxyManager( + url, timeout=urllib3.Timeout.DEFAULT_TIMEOUT, + retries=urllib3.Retry( + total=5, backoff_factor=0.2, + status_forcelist=[ + 500, 502, 503, 504 + ], + ), + ), + ) + + return minio_client + +def minio_create_buckets(minio_client, bucket_name, days, rule_id="cloudmoa"): + config = LifecycleConfig( + [ + Rule( + ENABLED, + rule_filter=Filter(prefix=""), + rule_id=rule_id, + expiration=Expiration(days=days), + ), + ], + ) + minio_client.set_bucket_lifecycle(bucket_name, config) + +def minio_delete_bucket(client, bucket_name): + client.delete_bucket_lifecycle(bucket_name) + +def main(): + s3_url = os.sys.argv[1].split(':')[0] + s3_url_port = os.sys.argv[1].split(':')[1] + minio_user = os.sys.argv[2] + minio_pass = os.sys.argv[3] + bucket_name = os.sys.argv[4] + minio_days = os.sys.argv[5] + rule_id = os.sys.argv[6] + + print(s3_url, s3_url_port, minio_user, minio_pass) + + minio_client=minio_conn(s3_url, s3_url_port, minio_user, minio_pass) + minio_create_buckets(minio_client, bucket_name, minio_days, rule_id) + +if __name__ == "__main__": + try: + main() + except Exception as err: + print("[Usage] minio {url:port} {username} {password} {bucketName} {days} {ruleId}") + print(err) \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install/files/01-storage/minio/.helmignore b/ansible/01_old/roles/cmoa_install/files/01-storage/minio/.helmignore new file mode 100644 index 0000000..a9fe727 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/01-storage/minio/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +# OWNERS file for Kubernetes +OWNERS \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install/files/01-storage/minio/Chart.yaml b/ansible/01_old/roles/cmoa_install/files/01-storage/minio/Chart.yaml new file mode 100644 index 0000000..fc21076 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/01-storage/minio/Chart.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +description: Multi-Cloud Object Storage +name: minio +version: 4.0.2 +appVersion: RELEASE.2022-05-08T23-50-31Z +keywords: + - minio + - storage + - object-storage + - s3 + - cluster +home: https://min.io +icon: https://min.io/resources/img/logo/MINIO_wordmark.png +sources: +- https://github.com/minio/minio +maintainers: +- name: MinIO, Inc + email: dev@minio.io diff --git a/ansible/01_old/roles/cmoa_install/files/01-storage/minio/README.md b/ansible/01_old/roles/cmoa_install/files/01-storage/minio/README.md new file mode 100644 index 0000000..ad3eb7d --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/01-storage/minio/README.md @@ -0,0 +1,235 @@ +# MinIO Helm Chart + +[![Slack](https://slack.min.io/slack?type=svg)](https://slack.min.io) [![license](https://img.shields.io/badge/license-AGPL%20V3-blue)](https://github.com/minio/minio/blob/master/LICENSE) + +MinIO is a High Performance Object Storage released under GNU Affero General Public License v3.0. It is API compatible with Amazon S3 cloud storage service. Use MinIO to build high performance infrastructure for machine learning, analytics and application data workloads. + +For more detailed documentation please visit [here](https://docs.minio.io/) + +## Introduction + +This chart bootstraps MinIO Cluster on [Kubernetes](http://kubernetes.io) using the [Helm](https://helm.sh) package manager. + +## Prerequisites + +- Helm cli with Kubernetes cluster configured. +- PV provisioner support in the underlying infrastructure. (We recommend using ) +- Use Kubernetes version v1.19 and later for best experience. + +## Configure MinIO Helm repo + +```bash +helm repo add minio https://charts.min.io/ +``` + +### Installing the Chart + +Install this chart using: + +```bash +helm install --namespace minio --set rootUser=rootuser,rootPassword=rootpass123 --generate-name minio/minio +``` + +The command deploys MinIO on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation. + +### Upgrading the Chart + +You can use Helm to update MinIO version in a live release. Assuming your release is named as `my-release`, get the values using the command: + +```bash +helm get values my-release > old_values.yaml +``` + +Then change the field `image.tag` in `old_values.yaml` file with MinIO image tag you want to use. Now update the chart using + +```bash +helm upgrade -f old_values.yaml my-release minio/minio +``` + +Default upgrade strategies are specified in the `values.yaml` file. Update these fields if you'd like to use a different strategy. + +### Configuration + +Refer the [Values file](./values.yaml) for all the possible config fields. + +You can specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```bash +helm install --name my-release --set persistence.size=1Ti minio/minio +``` + +The above command deploys MinIO server with a 1Ti backing persistent volume. + +Alternately, you can provide a YAML file that specifies parameter values while installing the chart. For example, + +```bash +helm install --name my-release -f values.yaml minio/minio +``` + +### Persistence + +This chart provisions a PersistentVolumeClaim and mounts corresponding persistent volume to default location `/export`. You'll need physical storage available in the Kubernetes cluster for this to work. If you'd rather use `emptyDir`, disable PersistentVolumeClaim by: + +```bash +helm install --set persistence.enabled=false minio/minio +``` + +> *"An emptyDir volume is first created when a Pod is assigned to a Node, and exists as long as that Pod is running on that node. When a Pod is removed from a node for any reason, the data in the emptyDir is deleted forever."* + +### Existing PersistentVolumeClaim + +If a Persistent Volume Claim already exists, specify it during installation. + +1. Create the PersistentVolume +2. Create the PersistentVolumeClaim +3. Install the chart + +```bash +helm install --set persistence.existingClaim=PVC_NAME minio/minio +``` + +### NetworkPolicy + +To enable network policy for MinIO, +install [a networking plugin that implements the Kubernetes +NetworkPolicy spec](https://kubernetes.io/docs/tasks/administer-cluster/declare-network-policy#before-you-begin), +and set `networkPolicy.enabled` to `true`. + +For Kubernetes v1.5 & v1.6, you must also turn on NetworkPolicy by setting +the DefaultDeny namespace annotation. Note: this will enforce policy for *all* pods in the namespace: + +``` +kubectl annotate namespace default "net.beta.kubernetes.io/network-policy={\"ingress\":{\"isolation\":\"DefaultDeny\"}}" +``` + +With NetworkPolicy enabled, traffic will be limited to just port 9000. + +For more precise policy, set `networkPolicy.allowExternal=true`. This will +only allow pods with the generated client label to connect to MinIO. +This label will be displayed in the output of a successful install. + +### Existing secret + +Instead of having this chart create the secret for you, you can supply a preexisting secret, much +like an existing PersistentVolumeClaim. + +First, create the secret: + +```bash +kubectl create secret generic my-minio-secret --from-literal=rootUser=foobarbaz --from-literal=rootPassword=foobarbazqux +``` + +Then install the chart, specifying that you want to use an existing secret: + +```bash +helm install --set existingSecret=my-minio-secret minio/minio +``` + +The following fields are expected in the secret: + +| .data.\ in Secret | Corresponding variable | Description | Required | +|:------------------------|:-----------------------|:---------------|:---------| +| `rootUser` | `rootUser` | Root user. | yes | +| `rootPassword` | `rootPassword` | Root password. | yes | + +All corresponding variables will be ignored in values file. + +### Configure TLS + +To enable TLS for MinIO containers, acquire TLS certificates from a CA or create self-signed certificates. While creating / acquiring certificates ensure the corresponding domain names are set as per the standard [DNS naming conventions](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-identity) in a Kubernetes StatefulSet (for a distributed MinIO setup). Then create a secret using + +```bash +kubectl create secret generic tls-ssl-minio --from-file=path/to/private.key --from-file=path/to/public.crt +``` + +Then install the chart, specifying that you want to use the TLS secret: + +```bash +helm install --set tls.enabled=true,tls.certSecret=tls-ssl-minio minio/minio +``` + +### Installing certificates from third party CAs + +MinIO can connect to other servers, including MinIO nodes or other server types such as NATs and Redis. If these servers use certificates that were not registered with a known CA, add trust for these certificates to MinIO Server by bundling these certificates into a Kubernetes secret and providing it to Helm via the `trustedCertsSecret` value. If `.Values.tls.enabled` is `true` and you're installing certificates for third party CAs, remember to include MinIO's own certificate with key `public.crt`, if it also needs to be trusted. + +For instance, given that TLS is enabled and you need to add trust for MinIO's own CA and for the CA of a Keycloak server, a Kubernetes secret can be created from the certificate files using `kubectl`: + +``` +kubectl -n minio create secret generic minio-trusted-certs --from-file=public.crt --from-file=keycloak.crt +``` + +If TLS is not enabled, you would need only the third party CA: + +``` +kubectl -n minio create secret generic minio-trusted-certs --from-file=keycloak.crt +``` + +The name of the generated secret can then be passed to Helm using a values file or the `--set` parameter: + +``` +trustedCertsSecret: "minio-trusted-certs" + +or + +--set trustedCertsSecret=minio-trusted-certs +``` + +### Create buckets after install + +Install the chart, specifying the buckets you want to create after install: + +```bash +helm install --set buckets[0].name=bucket1,buckets[0].policy=none,buckets[0].purge=false minio/minio +``` + +Description of the configuration parameters used above - + +- `buckets[].name` - name of the bucket to create, must be a string with length > 0 +- `buckets[].policy` - can be one of none|download|upload|public +- `buckets[].purge` - purge if bucket exists already + +33# Create policies after install +Install the chart, specifying the policies you want to create after install: + +```bash +helm install --set policies[0].name=mypolicy,policies[0].statements[0].resources[0]='arn:aws:s3:::bucket1',policies[0].statements[0].actions[0]='s3:ListBucket',policies[0].statements[0].actions[1]='s3:GetObject' minio/minio +``` + +Description of the configuration parameters used above - + +- `policies[].name` - name of the policy to create, must be a string with length > 0 +- `policies[].statements[]` - list of statements, includes actions and resources +- `policies[].statements[].resources[]` - list of resources that applies the statement +- `policies[].statements[].actions[]` - list of actions granted + +### Create user after install + +Install the chart, specifying the users you want to create after install: + +```bash +helm install --set users[0].accessKey=accessKey,users[0].secretKey=secretKey,users[0].policy=none,users[1].accessKey=accessKey2,users[1].secretRef=existingSecret,users[1].secretKey=password,users[1].policy=none minio/minio +``` + +Description of the configuration parameters used above - + +- `users[].accessKey` - accessKey of user +- `users[].secretKey` - secretKey of usersecretRef +- `users[].existingSecret` - secret name that contains the secretKey of user +- `users[].existingSecretKey` - data key in existingSecret secret containing the secretKey +- `users[].policy` - name of the policy to assign to user + +## Uninstalling the Chart + +Assuming your release is named as `my-release`, delete it using the command: + +```bash +helm delete my-release +``` + +or + +```bash +helm uninstall my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. diff --git a/ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/NOTES.txt b/ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/NOTES.txt new file mode 100644 index 0000000..9337196 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/NOTES.txt @@ -0,0 +1,43 @@ +{{- if eq .Values.service.type "ClusterIP" "NodePort" }} +MinIO can be accessed via port {{ .Values.service.port }} on the following DNS name from within your cluster: +{{ template "minio.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local + +To access MinIO from localhost, run the below commands: + + 1. export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + + 2. kubectl port-forward $POD_NAME 9000 --namespace {{ .Release.Namespace }} + +Read more about port forwarding here: http://kubernetes.io/docs/user-guide/kubectl/kubectl_port-forward/ + +You can now access MinIO server on http://localhost:9000. Follow the below steps to connect to MinIO server with mc client: + + 1. Download the MinIO mc client - https://docs.minio.io/docs/minio-client-quickstart-guide + + 2. export MC_HOST_{{ template "minio.fullname" . }}-local=http://$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "minio.secretName" . }} -o jsonpath="{.data.rootUser}" | base64 --decode):$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "minio.secretName" . }} -o jsonpath="{.data.rootPassword}" | base64 --decode)@localhost:{{ .Values.service.port }} + + 3. mc ls {{ template "minio.fullname" . }}-local + +{{- end }} +{{- if eq .Values.service.type "LoadBalancer" }} +MinIO can be accessed via port {{ .Values.service.port }} on an external IP address. Get the service external IP address by: +kubectl get svc --namespace {{ .Release.Namespace }} -l app={{ template "minio.fullname" . }} + +Note that the public IP may take a couple of minutes to be available. + +You can now access MinIO server on http://:9000. Follow the below steps to connect to MinIO server with mc client: + + 1. Download the MinIO mc client - https://docs.minio.io/docs/minio-client-quickstart-guide + + 2. export MC_HOST_{{ template "minio.fullname" . }}-local=http://$(kubectl get secret {{ template "minio.secretName" . }} --namespace {{ .Release.Namespace }} -o jsonpath="{.data.rootUser}" | base64 --decode):$(kubectl get secret {{ template "minio.secretName" . }} -o jsonpath="{.data.rootPassword}" | base64 --decode)@:{{ .Values.service.port }} + + 3. mc ls {{ template "minio.fullname" . }} + +Alternately, you can use your browser or the MinIO SDK to access the server - https://docs.minio.io/categories/17 +{{- end }} + +{{ if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }} +Note: Since NetworkPolicy is enabled, only pods with label +{{ template "minio.fullname" . }}-client=true" +will be able to connect to this minio cluster. +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/_helper_create_bucket.txt b/ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/_helper_create_bucket.txt new file mode 100644 index 0000000..35a48fc --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/_helper_create_bucket.txt @@ -0,0 +1,109 @@ +#!/bin/sh +set -e ; # Have script exit in the event of a failed command. + +{{- if .Values.configPathmc }} +MC_CONFIG_DIR="{{ .Values.configPathmc }}" +MC="/usr/bin/mc --insecure --config-dir ${MC_CONFIG_DIR}" +{{- else }} +MC="/usr/bin/mc --insecure" +{{- end }} + +# connectToMinio +# Use a check-sleep-check loop to wait for MinIO service to be available +connectToMinio() { + SCHEME=$1 + ATTEMPTS=0 ; LIMIT=29 ; # Allow 30 attempts + set -e ; # fail if we can't read the keys. + ACCESS=$(cat /config/rootUser) ; SECRET=$(cat /config/rootPassword) ; + set +e ; # The connections to minio are allowed to fail. + echo "Connecting to MinIO server: $SCHEME://$MINIO_ENDPOINT:$MINIO_PORT" ; + MC_COMMAND="${MC} alias set myminio $SCHEME://$MINIO_ENDPOINT:$MINIO_PORT $ACCESS $SECRET" ; + $MC_COMMAND ; + STATUS=$? ; + until [ $STATUS = 0 ] + do + ATTEMPTS=`expr $ATTEMPTS + 1` ; + echo \"Failed attempts: $ATTEMPTS\" ; + if [ $ATTEMPTS -gt $LIMIT ]; then + exit 1 ; + fi ; + sleep 2 ; # 1 second intervals between attempts + $MC_COMMAND ; + STATUS=$? ; + done ; + set -e ; # reset `e` as active + return 0 +} + +# checkBucketExists ($bucket) +# Check if the bucket exists, by using the exit code of `mc ls` +checkBucketExists() { + BUCKET=$1 + CMD=$(${MC} ls myminio/$BUCKET > /dev/null 2>&1) + return $? +} + +# createBucket ($bucket, $policy, $purge) +# Ensure bucket exists, purging if asked to +createBucket() { + BUCKET=$1 + POLICY=$2 + PURGE=$3 + VERSIONING=$4 + + # Purge the bucket, if set & exists + # Since PURGE is user input, check explicitly for `true` + if [ $PURGE = true ]; then + if checkBucketExists $BUCKET ; then + echo "Purging bucket '$BUCKET'." + set +e ; # don't exit if this fails + ${MC} rm -r --force myminio/$BUCKET + set -e ; # reset `e` as active + else + echo "Bucket '$BUCKET' does not exist, skipping purge." + fi + fi + + # Create the bucket if it does not exist + if ! checkBucketExists $BUCKET ; then + echo "Creating bucket '$BUCKET'" + ${MC} mb myminio/$BUCKET + else + echo "Bucket '$BUCKET' already exists." + fi + + + # set versioning for bucket + if [ ! -z $VERSIONING ] ; then + if [ $VERSIONING = true ] ; then + echo "Enabling versioning for '$BUCKET'" + ${MC} version enable myminio/$BUCKET + elif [ $VERSIONING = false ] ; then + echo "Suspending versioning for '$BUCKET'" + ${MC} version suspend myminio/$BUCKET + fi + else + echo "Bucket '$BUCKET' versioning unchanged." + fi + + # At this point, the bucket should exist, skip checking for existence + # Set policy on the bucket + echo "Setting policy of bucket '$BUCKET' to '$POLICY'." + ${MC} policy set $POLICY myminio/$BUCKET +} + +# Try connecting to MinIO instance +{{- if .Values.tls.enabled }} +scheme=https +{{- else }} +scheme=http +{{- end }} +connectToMinio $scheme + +{{ if .Values.buckets }} +{{ $global := . }} +# Create the buckets +{{- range .Values.buckets }} +createBucket {{ tpl .name $global }} {{ .policy }} {{ .purge }} {{ .versioning }} +{{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/_helper_create_policy.txt b/ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/_helper_create_policy.txt new file mode 100644 index 0000000..d565b16 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/_helper_create_policy.txt @@ -0,0 +1,75 @@ +#!/bin/sh +set -e ; # Have script exit in the event of a failed command. + +{{- if .Values.configPathmc }} +MC_CONFIG_DIR="{{ .Values.configPathmc }}" +MC="/usr/bin/mc --insecure --config-dir ${MC_CONFIG_DIR}" +{{- else }} +MC="/usr/bin/mc --insecure" +{{- end }} + +# connectToMinio +# Use a check-sleep-check loop to wait for MinIO service to be available +connectToMinio() { + SCHEME=$1 + ATTEMPTS=0 ; LIMIT=29 ; # Allow 30 attempts + set -e ; # fail if we can't read the keys. + ACCESS=$(cat /config/rootUser) ; SECRET=$(cat /config/rootPassword) ; + set +e ; # The connections to minio are allowed to fail. + echo "Connecting to MinIO server: $SCHEME://$MINIO_ENDPOINT:$MINIO_PORT" ; + MC_COMMAND="${MC} alias set myminio $SCHEME://$MINIO_ENDPOINT:$MINIO_PORT $ACCESS $SECRET" ; + $MC_COMMAND ; + STATUS=$? ; + until [ $STATUS = 0 ] + do + ATTEMPTS=`expr $ATTEMPTS + 1` ; + echo \"Failed attempts: $ATTEMPTS\" ; + if [ $ATTEMPTS -gt $LIMIT ]; then + exit 1 ; + fi ; + sleep 2 ; # 1 second intervals between attempts + $MC_COMMAND ; + STATUS=$? ; + done ; + set -e ; # reset `e` as active + return 0 +} + +# checkPolicyExists ($policy) +# Check if the policy exists, by using the exit code of `mc admin policy info` +checkPolicyExists() { + POLICY=$1 + CMD=$(${MC} admin policy info myminio $POLICY > /dev/null 2>&1) + return $? +} + +# createPolicy($name, $filename) +createPolicy () { + NAME=$1 + FILENAME=$2 + + # Create the name if it does not exist + echo "Checking policy: $NAME (in /config/$FILENAME.json)" + if ! checkPolicyExists $NAME ; then + echo "Creating policy '$NAME'" + else + echo "Policy '$NAME' already exists." + fi + ${MC} admin policy add myminio $NAME /config/$FILENAME.json + +} + +# Try connecting to MinIO instance +{{- if .Values.tls.enabled }} +scheme=https +{{- else }} +scheme=http +{{- end }} +connectToMinio $scheme + +{{ if .Values.policies }} +# Create the policies +{{- range $idx, $policy := .Values.policies }} +createPolicy {{ $policy.name }} policy_{{ $idx }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/_helper_create_user.txt b/ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/_helper_create_user.txt new file mode 100644 index 0000000..7771428 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/_helper_create_user.txt @@ -0,0 +1,88 @@ +#!/bin/sh +set -e ; # Have script exit in the event of a failed command. + +{{- if .Values.configPathmc }} +MC_CONFIG_DIR="{{ .Values.configPathmc }}" +MC="/usr/bin/mc --insecure --config-dir ${MC_CONFIG_DIR}" +{{- else }} +MC="/usr/bin/mc --insecure" +{{- end }} + +# connectToMinio +# Use a check-sleep-check loop to wait for MinIO service to be available +connectToMinio() { + SCHEME=$1 + ATTEMPTS=0 ; LIMIT=29 ; # Allow 30 attempts + set -e ; # fail if we can't read the keys. + ACCESS=$(cat /config/rootUser) ; SECRET=$(cat /config/rootPassword) ; + set +e ; # The connections to minio are allowed to fail. + echo "Connecting to MinIO server: $SCHEME://$MINIO_ENDPOINT:$MINIO_PORT" ; + MC_COMMAND="${MC} alias set myminio $SCHEME://$MINIO_ENDPOINT:$MINIO_PORT $ACCESS $SECRET" ; + $MC_COMMAND ; + STATUS=$? ; + until [ $STATUS = 0 ] + do + ATTEMPTS=`expr $ATTEMPTS + 1` ; + echo \"Failed attempts: $ATTEMPTS\" ; + if [ $ATTEMPTS -gt $LIMIT ]; then + exit 1 ; + fi ; + sleep 2 ; # 1 second intervals between attempts + $MC_COMMAND ; + STATUS=$? ; + done ; + set -e ; # reset `e` as active + return 0 +} + +# checkUserExists ($username) +# Check if the user exists, by using the exit code of `mc admin user info` +checkUserExists() { + USER=$1 + CMD=$(${MC} admin user info myminio $USER > /dev/null 2>&1) + return $? +} + +# createUser ($username, $password, $policy) +createUser() { + USER=$1 + PASS=$2 + POLICY=$3 + + # Create the user if it does not exist + if ! checkUserExists $USER ; then + echo "Creating user '$USER'" + ${MC} admin user add myminio $USER $PASS + else + echo "User '$USER' already exists." + fi + + + # set policy for user + if [ ! -z $POLICY -a $POLICY != " " ] ; then + echo "Adding policy '$POLICY' for '$USER'" + ${MC} admin policy set myminio $POLICY user=$USER + else + echo "User '$USER' has no policy attached." + fi +} + +# Try connecting to MinIO instance +{{- if .Values.tls.enabled }} +scheme=https +{{- else }} +scheme=http +{{- end }} +connectToMinio $scheme + +{{ if .Values.users }} +{{ $global := . }} +# Create the users +{{- range .Values.users }} +{{- if .existingSecret }} +createUser {{ tpl .accessKey $global }} $(cat /config/secrets/{{ tpl .accessKey $global }}) {{ .policy }} +{{ else }} +createUser {{ tpl .accessKey $global }} {{ .secretKey }} {{ .policy }} +{{- end }} +{{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/_helper_custom_command.txt b/ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/_helper_custom_command.txt new file mode 100644 index 0000000..b583a77 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/_helper_custom_command.txt @@ -0,0 +1,58 @@ +#!/bin/sh +set -e ; # Have script exit in the event of a failed command. + +{{- if .Values.configPathmc }} +MC_CONFIG_DIR="{{ .Values.configPathmc }}" +MC="/usr/bin/mc --insecure --config-dir ${MC_CONFIG_DIR}" +{{- else }} +MC="/usr/bin/mc --insecure" +{{- end }} + +# connectToMinio +# Use a check-sleep-check loop to wait for MinIO service to be available +connectToMinio() { + SCHEME=$1 + ATTEMPTS=0 ; LIMIT=29 ; # Allow 30 attempts + set -e ; # fail if we can't read the keys. + ACCESS=$(cat /config/rootUser) ; SECRET=$(cat /config/rootPassword) ; + set +e ; # The connections to minio are allowed to fail. + echo "Connecting to MinIO server: $SCHEME://$MINIO_ENDPOINT:$MINIO_PORT" ; + MC_COMMAND="${MC} alias set myminio $SCHEME://$MINIO_ENDPOINT:$MINIO_PORT $ACCESS $SECRET" ; + $MC_COMMAND ; + STATUS=$? ; + until [ $STATUS = 0 ] + do + ATTEMPTS=`expr $ATTEMPTS + 1` ; + echo \"Failed attempts: $ATTEMPTS\" ; + if [ $ATTEMPTS -gt $LIMIT ]; then + exit 1 ; + fi ; + sleep 2 ; # 1 second intervals between attempts + $MC_COMMAND ; + STATUS=$? ; + done ; + set -e ; # reset `e` as active + return 0 +} + +# runCommand ($@) +# Run custom mc command +runCommand() { + ${MC} "$@" + return $? +} + +# Try connecting to MinIO instance +{{- if .Values.tls.enabled }} +scheme=https +{{- else }} +scheme=http +{{- end }} +connectToMinio $scheme + +{{ if .Values.customCommands }} +# Run custom commands +{{- range .Values.customCommands }} +runCommand {{ .command }} +{{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/_helper_policy.tpl b/ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/_helper_policy.tpl new file mode 100644 index 0000000..83a2e15 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/_helper_policy.tpl @@ -0,0 +1,18 @@ +{{- $statements_length := len .statements -}} +{{- $statements_length := sub $statements_length 1 -}} +{ + "Version": "2012-10-17", + "Statement": [ +{{- range $i, $statement := .statements }} + { + "Effect": "Allow", + "Action": [ +"{{ $statement.actions | join "\",\n\"" }}" + ]{{ if $statement.resources }}, + "Resource": [ +"{{ $statement.resources | join "\",\n\"" }}" + ]{{ end }} + }{{ if lt $i $statements_length }},{{end }} +{{- end }} + ] +} diff --git a/ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/_helpers.tpl b/ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/_helpers.tpl new file mode 100644 index 0000000..4e38194 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/_helpers.tpl @@ -0,0 +1,218 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "minio.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "minio.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "minio.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "minio.networkPolicy.apiVersion" -}} +{{- if semverCompare ">=1.4-0, <1.7-0" .Capabilities.KubeVersion.Version -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare ">=1.7-0, <1.16-0" .Capabilities.KubeVersion.Version -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else if semverCompare "^1.16-0" .Capabilities.KubeVersion.Version -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for deployment. +*/}} +{{- define "minio.deployment.apiVersion" -}} +{{- if semverCompare "<1.9-0" .Capabilities.KubeVersion.Version -}} +{{- print "apps/v1beta2" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for statefulset. +*/}} +{{- define "minio.statefulset.apiVersion" -}} +{{- if semverCompare "<1.16-0" .Capabilities.KubeVersion.Version -}} +{{- print "apps/v1beta2" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for ingress. +*/}} +{{- define "minio.ingress.apiVersion" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for console ingress. +*/}} +{{- define "minio.consoleIngress.apiVersion" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Determine secret name. +*/}} +{{- define "minio.secretName" -}} +{{- if .Values.existingSecret -}} +{{- .Values.existingSecret }} +{{- else -}} +{{- include "minio.fullname" . -}} +{{- end -}} +{{- end -}} + +{{/* +Determine name for scc role and rolebinding +*/}} +{{- define "minio.sccRoleName" -}} +{{- printf "%s-%s" "scc" (include "minio.fullname" .) | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Properly format optional additional arguments to MinIO binary +*/}} +{{- define "minio.extraArgs" -}} +{{- range .Values.extraArgs -}} +{{ " " }}{{ . }} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "minio.imagePullSecrets" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +Also, we can not use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} +{{- if .Values.global.imagePullSecrets }} +imagePullSecrets: +{{- range .Values.global.imagePullSecrets }} + - name: {{ . }} +{{- end }} +{{- else if .Values.imagePullSecrets }} +imagePullSecrets: + {{ toYaml .Values.imagePullSecrets }} +{{- end -}} +{{- else if .Values.imagePullSecrets }} +imagePullSecrets: + {{ toYaml .Values.imagePullSecrets }} +{{- end -}} +{{- end -}} + +{{/* +Formats volumeMount for MinIO TLS keys and trusted certs +*/}} +{{- define "minio.tlsKeysVolumeMount" -}} +{{- if .Values.tls.enabled }} +- name: cert-secret-volume + mountPath: {{ .Values.certsPath }} +{{- end }} +{{- if or .Values.tls.enabled (ne .Values.trustedCertsSecret "") }} +{{- $casPath := printf "%s/CAs" .Values.certsPath | clean }} +- name: trusted-cert-secret-volume + mountPath: {{ $casPath }} +{{- end }} +{{- end -}} + +{{/* +Formats volume for MinIO TLS keys and trusted certs +*/}} +{{- define "minio.tlsKeysVolume" -}} +{{- if .Values.tls.enabled }} +- name: cert-secret-volume + secret: + secretName: {{ .Values.tls.certSecret }} + items: + - key: {{ .Values.tls.publicCrt }} + path: public.crt + - key: {{ .Values.tls.privateKey }} + path: private.key +{{- end }} +{{- if or .Values.tls.enabled (ne .Values.trustedCertsSecret "") }} +{{- $certSecret := eq .Values.trustedCertsSecret "" | ternary .Values.tls.certSecret .Values.trustedCertsSecret }} +{{- $publicCrt := eq .Values.trustedCertsSecret "" | ternary .Values.tls.publicCrt "" }} +- name: trusted-cert-secret-volume + secret: + secretName: {{ $certSecret }} + {{- if ne $publicCrt "" }} + items: + - key: {{ $publicCrt }} + path: public.crt + {{- end }} +{{- end }} +{{- end -}} + +{{/* +Returns the available value for certain key in an existing secret (if it exists), +otherwise it generates a random value. +*/}} +{{- define "minio.getValueFromSecret" }} + {{- $len := (default 16 .Length) | int -}} + {{- $obj := (lookup "v1" "Secret" .Namespace .Name).data -}} + {{- if $obj }} + {{- index $obj .Key | b64dec -}} + {{- else -}} + {{- randAlphaNum $len -}} + {{- end -}} +{{- end }} + +{{- define "minio.root.username" -}} + {{- if .Values.rootUser }} + {{- .Values.rootUser | toString }} + {{- else }} + {{- include "minio.getValueFromSecret" (dict "Namespace" .Release.Namespace "Name" (include "minio.fullname" .) "Length" 20 "Key" "rootUser") }} + {{- end }} +{{- end -}} + +{{- define "minio.root.password" -}} + {{- if .Values.rootPassword }} + {{- .Values.rootPassword | toString }} + {{- else }} + {{- include "minio.getValueFromSecret" (dict "Namespace" .Release.Namespace "Name" (include "minio.fullname" .) "Length" 40 "Key" "rootPassword") }} + {{- end }} +{{- end -}} \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/configmap.yaml b/ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/configmap.yaml new file mode 100644 index 0000000..95a7c60 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/configmap.yaml @@ -0,0 +1,24 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "minio.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +data: + initialize: |- +{{ include (print $.Template.BasePath "/_helper_create_bucket.txt") . | indent 4 }} + add-user: |- +{{ include (print $.Template.BasePath "/_helper_create_user.txt") . | indent 4 }} + add-policy: |- +{{ include (print $.Template.BasePath "/_helper_create_policy.txt") . | indent 4 }} +{{- range $idx, $policy := .Values.policies }} + # {{ $policy.name }} + policy_{{ $idx }}.json: |- +{{ include (print $.Template.BasePath "/_helper_policy.tpl") . | indent 4 }} +{{ end }} + custom-command: |- +{{ include (print $.Template.BasePath "/_helper_custom_command.txt") . | indent 4 }} diff --git a/ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/console-ingress.yaml b/ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/console-ingress.yaml new file mode 100644 index 0000000..2ce9a93 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/console-ingress.yaml @@ -0,0 +1,58 @@ +{{- if .Values.consoleIngress.enabled -}} +{{- $fullName := printf "%s-console" (include "minio.fullname" .) -}} +{{- $servicePort := .Values.consoleService.port -}} +{{- $ingressPath := .Values.consoleIngress.path -}} +apiVersion: {{ template "minio.consoleIngress.apiVersion" . }} +kind: Ingress +metadata: + name: {{ $fullName }} + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- with .Values.consoleIngress.labels }} +{{ toYaml . | indent 4 }} +{{- end }} + +{{- with .Values.consoleIngress.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +spec: +{{- if .Values.consoleIngress.ingressClassName }} + ingressClassName: {{ .Values.consoleIngress.ingressClassName }} +{{- end }} +{{- if .Values.consoleIngress.tls }} + tls: + {{- range .Values.consoleIngress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} +{{- end }} + rules: + {{- range .Values.consoleIngress.hosts }} + - http: + paths: + - path: {{ $ingressPath }} + {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} + pathType: Prefix + backend: + service: + name: {{ $fullName }} + port: + number: {{ $servicePort }} + {{- else }} + backend: + serviceName: {{ $fullName }} + servicePort: {{ $servicePort }} + {{- end }} + {{- if . }} + host: {{ . | quote }} + {{- end }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/console-service.yaml b/ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/console-service.yaml new file mode 100644 index 0000000..f4b1294 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/console-service.yaml @@ -0,0 +1,48 @@ +{{ $scheme := "http" }} +{{- if .Values.tls.enabled }} +{{ $scheme = "https" }} +{{ end }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "minio.fullname" . }}-console + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- if .Values.consoleService.annotations }} + annotations: +{{ toYaml .Values.consoleService.annotations | indent 4 }} +{{- end }} +spec: +{{- if (or (eq .Values.consoleService.type "ClusterIP" "") (empty .Values.consoleService.type)) }} + type: ClusterIP + {{- if not (empty .Values.consoleService.clusterIP) }} + clusterIP: {{ .Values.consoleService.clusterIP }} + {{end}} +{{- else if eq .Values.consoleService.type "LoadBalancer" }} + type: {{ .Values.consoleService.type }} + loadBalancerIP: {{ default "" .Values.consoleService.loadBalancerIP }} +{{- else }} + type: {{ .Values.consoleService.type }} +{{- end }} + ports: + - name: {{ $scheme }} + port: {{ .Values.consoleService.port }} + protocol: TCP +{{- if (and (eq .Values.consoleService.type "NodePort") ( .Values.consoleService.nodePort)) }} + nodePort: {{ .Values.consoleService.nodePort }} +{{- else }} + targetPort: {{ .Values.consoleService.port }} +{{- end}} +{{- if .Values.consoleService.externalIPs }} + externalIPs: +{{- range $i , $ip := .Values.consoleService.externalIPs }} + - {{ $ip }} +{{- end }} +{{- end }} + selector: + app: {{ template "minio.name" . }} + release: {{ .Release.Name }} diff --git a/ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/deployment.yaml b/ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/deployment.yaml new file mode 100644 index 0000000..a06bc35 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/deployment.yaml @@ -0,0 +1,174 @@ +{{- if eq .Values.mode "standalone" }} +{{ $scheme := "http" }} +{{- if .Values.tls.enabled }} +{{ $scheme = "https" }} +{{ end }} +{{ $bucketRoot := or ($.Values.bucketRoot) ($.Values.mountPath) }} +apiVersion: {{ template "minio.deployment.apiVersion" . }} +kind: Deployment +metadata: + name: {{ template "minio.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- if .Values.additionalLabels }} +{{ toYaml .Values.additionalLabels | trimSuffix "\n" | indent 4 }} +{{- end }} +{{- if .Values.additionalAnnotations }} + annotations: +{{ toYaml .Values.additionalAnnotations | trimSuffix "\n" | indent 4 }} +{{- end }} +spec: + strategy: + type: {{ .Values.DeploymentUpdate.type }} + {{- if eq .Values.DeploymentUpdate.type "RollingUpdate" }} + rollingUpdate: + maxSurge: {{ .Values.DeploymentUpdate.maxSurge }} + maxUnavailable: {{ .Values.DeploymentUpdate.maxUnavailable }} + {{- end}} + replicas: 1 + selector: + matchLabels: + app: {{ template "minio.name" . }} + release: {{ .Release.Name }} + template: + metadata: + name: {{ template "minio.fullname" . }} + labels: + app: {{ template "minio.name" . }} + release: {{ .Release.Name }} +{{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 8 }} +{{- end }} + annotations: +{{- if not .Values.ignoreChartChecksums }} + checksum/secrets: {{ include (print $.Template.BasePath "/secrets.yaml") . | sha256sum }} + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} +{{- end }} +{{- if .Values.podAnnotations }} +{{ toYaml .Values.podAnnotations | trimSuffix "\n" | indent 8 }} +{{- end }} + spec: + {{- if .Values.priorityClassName }} + priorityClassName: "{{ .Values.priorityClassName }}" + {{- end }} +{{- if and .Values.securityContext.enabled .Values.persistence.enabled }} + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + runAsGroup: {{ .Values.securityContext.runAsGroup }} + fsGroup: {{ .Values.securityContext.fsGroup }} + {{- if and (ge .Capabilities.KubeVersion.Major "1") (ge .Capabilities.KubeVersion.Minor "20") }} + fsGroupChangePolicy: {{ .Values.securityContext.fsGroupChangePolicy }} + {{- end }} +{{- end }} +{{ if .Values.serviceAccount.create }} + serviceAccountName: {{ .Values.serviceAccount.name }} +{{- end }} + containers: + - name: {{ .Chart.Name }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + command: + - "/bin/sh" + - "-ce" + - "/usr/bin/docker-entrypoint.sh minio server {{ $bucketRoot }} -S {{ .Values.certsPath }} --address :{{ .Values.minioAPIPort }} --console-address :{{ .Values.minioConsolePort }} {{- template "minio.extraArgs" . }}" + volumeMounts: + - name: minio-user + mountPath: "/tmp/credentials" + readOnly: true + {{- if .Values.persistence.enabled }} + - name: export + mountPath: {{ .Values.mountPath }} + {{- if .Values.persistence.subPath }} + subPath: "{{ .Values.persistence.subPath }}" + {{- end }} + {{- end }} + {{- if .Values.extraSecret }} + - name: extra-secret + mountPath: "/tmp/minio-config-env" + {{- end }} + {{- include "minio.tlsKeysVolumeMount" . | indent 12 }} + ports: + - name: {{ $scheme }} + containerPort: {{ .Values.minioAPIPort }} + - name: {{ $scheme }}-console + containerPort: {{ .Values.minioConsolePort }} + env: + - name: MINIO_ROOT_USER + valueFrom: + secretKeyRef: + name: {{ template "minio.secretName" . }} + key: rootUser + - name: MINIO_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "minio.secretName" . }} + key: rootPassword + {{- if .Values.extraSecret }} + - name: MINIO_CONFIG_ENV_FILE + value: "/tmp/minio-config-env/config.env" + {{- end}} + {{- if .Values.metrics.serviceMonitor.public }} + - name: MINIO_PROMETHEUS_AUTH_TYPE + value: "public" + {{- end}} + {{- if .Values.etcd.endpoints }} + - name: MINIO_ETCD_ENDPOINTS + value: {{ join "," .Values.etcd.endpoints | quote }} + {{- if .Values.etcd.clientCert }} + - name: MINIO_ETCD_CLIENT_CERT + value: "/tmp/credentials/etcd_client_cert.pem" + {{- end }} + {{- if .Values.etcd.clientCertKey }} + - name: MINIO_ETCD_CLIENT_CERT_KEY + value: "/tmp/credentials/etcd_client_cert_key.pem" + {{- end }} + {{- if .Values.etcd.pathPrefix }} + - name: MINIO_ETCD_PATH_PREFIX + value: {{ .Values.etcd.pathPrefix }} + {{- end }} + {{- if .Values.etcd.corednsPathPrefix }} + - name: MINIO_ETCD_COREDNS_PATH + value: {{ .Values.etcd.corednsPathPrefix }} + {{- end }} + {{- end }} + {{- range $key, $val := .Values.environment }} + - name: {{ $key }} + value: {{ $val | quote }} + {{- end}} + resources: +{{ toYaml .Values.resources | indent 12 }} +{{- with .Values.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 8 }} +{{- end }} +{{- include "minio.imagePullSecrets" . | indent 6 }} +{{- with .Values.affinity }} + affinity: +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} +{{- end }} + volumes: + - name: export + {{- if .Values.persistence.enabled }} + persistentVolumeClaim: + claimName: {{ .Values.persistence.existingClaim | default (include "minio.fullname" .) }} + {{- else }} + emptyDir: {} + {{- end }} + {{- if .Values.extraSecret }} + - name: extra-secret + secret: + secretName: {{ .Values.extraSecret }} + {{- end }} + - name: minio-user + secret: + secretName: {{ template "minio.secretName" . }} + {{- include "minio.tlsKeysVolume" . | indent 8 }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/gateway-deployment.yaml b/ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/gateway-deployment.yaml new file mode 100644 index 0000000..b14f86b --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/gateway-deployment.yaml @@ -0,0 +1,173 @@ +{{- if eq .Values.mode "gateway" }} +{{ $scheme := "http" }} +{{- if .Values.tls.enabled }} +{{ $scheme = "https" }} +{{ end }} +{{ $bucketRoot := or ($.Values.bucketRoot) ($.Values.mountPath) }} +apiVersion: {{ template "minio.deployment.apiVersion" . }} +kind: Deployment +metadata: + name: {{ template "minio.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- if .Values.additionalLabels }} +{{ toYaml .Values.additionalLabels | trimSuffix "\n" | indent 4 }} +{{- end }} +{{- if .Values.additionalAnnotations }} + annotations: +{{ toYaml .Values.additionalAnnotations | trimSuffix "\n" | indent 4 }} +{{- end }} +spec: + strategy: + type: {{ .Values.DeploymentUpdate.type }} + {{- if eq .Values.DeploymentUpdate.type "RollingUpdate" }} + rollingUpdate: + maxSurge: {{ .Values.DeploymentUpdate.maxSurge }} + maxUnavailable: {{ .Values.DeploymentUpdate.maxUnavailable }} + {{- end}} + replicas: {{ .Values.gateway.replicas }} + selector: + matchLabels: + app: {{ template "minio.name" . }} + release: {{ .Release.Name }} + template: + metadata: + name: {{ template "minio.fullname" . }} + labels: + app: {{ template "minio.name" . }} + release: {{ .Release.Name }} +{{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 8 }} +{{- end }} + annotations: +{{- if not .Values.ignoreChartChecksums }} + checksum/secrets: {{ include (print $.Template.BasePath "/secrets.yaml") . | sha256sum }} + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} +{{- end }} +{{- if .Values.podAnnotations }} +{{ toYaml .Values.podAnnotations | trimSuffix "\n" | indent 8 }} +{{- end }} + spec: + {{- if .Values.priorityClassName }} + priorityClassName: "{{ .Values.priorityClassName }}" + {{- end }} +{{- if and .Values.securityContext.enabled .Values.persistence.enabled }} + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + runAsGroup: {{ .Values.securityContext.runAsGroup }} + fsGroup: {{ .Values.securityContext.fsGroup }} +{{- end }} +{{ if .Values.serviceAccount.create }} + serviceAccountName: {{ .Values.serviceAccount.name }} +{{- end }} + containers: + - name: {{ .Chart.Name }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + command: + - "/bin/sh" + - "-ce" + {{- if eq .Values.gateway.type "nas" }} + - "/usr/bin/docker-entrypoint.sh minio gateway nas {{ $bucketRoot }} -S {{ .Values.certsPath }} --address :{{ .Values.minioAPIPort }} --console-address :{{ .Values.minioConsolePort }} {{- template "minio.extraArgs" . }} " + {{- end }} + volumeMounts: + - name: minio-user + mountPath: "/tmp/credentials" + readOnly: true + {{- if .Values.persistence.enabled }} + - name: export + mountPath: {{ .Values.mountPath }} + {{- if .Values.persistence.subPath }} + subPath: "{{ .Values.persistence.subPath }}" + {{- end }} + {{- end }} + {{- if .Values.extraSecret }} + - name: extra-secret + mountPath: "/tmp/minio-config-env" + {{- end }} + {{- include "minio.tlsKeysVolumeMount" . | indent 12 }} + ports: + - name: {{ $scheme }} + containerPort: {{ .Values.minioAPIPort }} + - name: {{ $scheme }}-console + containerPort: {{ .Values.minioConsolePort }} + env: + - name: MINIO_ROOT_USER + valueFrom: + secretKeyRef: + name: {{ template "minio.secretName" . }} + key: rootUser + - name: MINIO_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "minio.secretName" . }} + key: rootPassword + {{- if .Values.extraSecret }} + - name: MINIO_CONFIG_ENV_FILE + value: "/tmp/minio-config-env/config.env" + {{- end}} + {{- if .Values.metrics.serviceMonitor.public }} + - name: MINIO_PROMETHEUS_AUTH_TYPE + value: "public" + {{- end}} + {{- if .Values.etcd.endpoints }} + - name: MINIO_ETCD_ENDPOINTS + value: {{ join "," .Values.etcd.endpoints | quote }} + {{- if .Values.etcd.clientCert }} + - name: MINIO_ETCD_CLIENT_CERT + value: "/tmp/credentials/etcd_client.crt" + {{- end }} + {{- if .Values.etcd.clientCertKey }} + - name: MINIO_ETCD_CLIENT_CERT_KEY + value: "/tmp/credentials/etcd_client.key" + {{- end }} + {{- if .Values.etcd.pathPrefix }} + - name: MINIO_ETCD_PATH_PREFIX + value: {{ .Values.etcd.pathPrefix }} + {{- end }} + {{- if .Values.etcd.corednsPathPrefix }} + - name: MINIO_ETCD_COREDNS_PATH + value: {{ .Values.etcd.corednsPathPrefix }} + {{- end }} + {{- end }} + {{- range $key, $val := .Values.environment }} + - name: {{ $key }} + value: {{ $val | quote }} + {{- end}} + resources: +{{ toYaml .Values.resources | indent 12 }} +{{- with .Values.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 8 }} +{{- end }} +{{- include "minio.imagePullSecrets" . | indent 6 }} +{{- with .Values.affinity }} + affinity: +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} +{{- end }} + volumes: + - name: export + {{- if .Values.persistence.enabled }} + persistentVolumeClaim: + claimName: {{ .Values.persistence.existingClaim | default (include "minio.fullname" .) }} + {{- else }} + emptyDir: {} + {{- end }} + - name: minio-user + secret: + secretName: {{ template "minio.secretName" . }} + {{- if .Values.extraSecret }} + - name: extra-secret + secret: + secretName: {{ .Values.extraSecret }} + {{- end }} + {{- include "minio.tlsKeysVolume" . | indent 8 }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/ingress.yaml b/ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/ingress.yaml new file mode 100644 index 0000000..8d9a837 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/ingress.yaml @@ -0,0 +1,58 @@ +{{- if .Values.ingress.enabled -}} +{{- $fullName := include "minio.fullname" . -}} +{{- $servicePort := .Values.service.port -}} +{{- $ingressPath := .Values.ingress.path -}} +apiVersion: {{ template "minio.ingress.apiVersion" . }} +kind: Ingress +metadata: + name: {{ $fullName }} + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- with .Values.ingress.labels }} +{{ toYaml . | indent 4 }} +{{- end }} + +{{- with .Values.ingress.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +spec: +{{- if .Values.ingress.ingressClassName }} + ingressClassName: {{ .Values.ingress.ingressClassName }} +{{- end }} +{{- if .Values.ingress.tls }} + tls: + {{- range .Values.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} +{{- end }} + rules: + {{- range .Values.ingress.hosts }} + - http: + paths: + - path: {{ $ingressPath }} + {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} + pathType: Prefix + backend: + service: + name: {{ $fullName }} + port: + number: {{ $servicePort }} + {{- else }} + backend: + serviceName: {{ $fullName }} + servicePort: {{ $servicePort }} + {{- end }} + {{- if . }} + host: {{ . | quote }} + {{- end }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/networkpolicy.yaml b/ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/networkpolicy.yaml new file mode 100644 index 0000000..68a2599 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/networkpolicy.yaml @@ -0,0 +1,27 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ template "minio.networkPolicy.apiVersion" . }} +metadata: + name: {{ template "minio.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + podSelector: + matchLabels: + app: {{ template "minio.name" . }} + release: {{ .Release.Name }} + ingress: + - ports: + - port: {{ .Values.service.port }} + - port: {{ .Values.consoleService.port }} + {{- if not .Values.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ template "minio.name" . }}-client: "true" + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/poddisruptionbudget.yaml b/ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/poddisruptionbudget.yaml new file mode 100644 index 0000000..8037eb7 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/poddisruptionbudget.yaml @@ -0,0 +1,14 @@ +{{- if .Values.podDisruptionBudget.enabled }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: minio + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }} +spec: + maxUnavailable: {{ .Values.podDisruptionBudget.maxUnavailable }} + selector: + matchLabels: + app: {{ template "minio.name" . }} +{{- end }} \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/post-install-create-bucket-job.yaml b/ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/post-install-create-bucket-job.yaml new file mode 100644 index 0000000..434b31d --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/post-install-create-bucket-job.yaml @@ -0,0 +1,87 @@ +{{- if .Values.buckets }} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ template "minio.fullname" . }}-make-bucket-job + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }}-make-bucket-job + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + annotations: + "helm.sh/hook": post-install,post-upgrade + "helm.sh/hook-delete-policy": hook-succeeded,before-hook-creation +{{- with .Values.makeBucketJob.annotations }} +{{ toYaml . | indent 4 }} +{{- end }} +spec: + template: + metadata: + labels: + app: {{ template "minio.name" . }}-job + release: {{ .Release.Name }} +{{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 8 }} +{{- end }} +{{- if .Values.makeBucketJob.podAnnotations }} + annotations: +{{ toYaml .Values.makeBucketJob.podAnnotations | indent 8 }} +{{- end }} + spec: + restartPolicy: OnFailure +{{- include "minio.imagePullSecrets" . | indent 6 }} +{{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.makeBucketJob.nodeSelector | indent 8 }} +{{- end }} +{{- with .Values.makeBucketJob.affinity }} + affinity: +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.makeBucketJob.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} +{{- end }} +{{- if .Values.makeBucketJob.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.makeBucketJob.securityContext.runAsUser }} + runAsGroup: {{ .Values.makeBucketJob.securityContext.runAsGroup }} + fsGroup: {{ .Values.makeBucketJob.securityContext.fsGroup }} +{{- end }} + volumes: + - name: minio-configuration + projected: + sources: + - configMap: + name: {{ template "minio.fullname" . }} + - secret: + name: {{ template "minio.secretName" . }} + {{- if .Values.tls.enabled }} + - name: cert-secret-volume-mc + secret: + secretName: {{ .Values.tls.certSecret }} + items: + - key: {{ .Values.tls.publicCrt }} + path: CAs/public.crt + {{ end }} + containers: + - name: minio-mc + image: "{{ .Values.mcImage.repository }}:{{ .Values.mcImage.tag }}" + imagePullPolicy: {{ .Values.mcImage.pullPolicy }} + command: ["/bin/sh", "/config/initialize"] + env: + - name: MINIO_ENDPOINT + value: {{ template "minio.fullname" . }} + - name: MINIO_PORT + value: {{ .Values.service.port | quote }} + volumeMounts: + - name: minio-configuration + mountPath: /config + {{- if .Values.tls.enabled }} + - name: cert-secret-volume-mc + mountPath: {{ .Values.configPathmc }}certs + {{ end }} + resources: +{{ toYaml .Values.makeBucketJob.resources | indent 10 }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/post-install-create-policy-job.yaml b/ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/post-install-create-policy-job.yaml new file mode 100644 index 0000000..ae78769 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/post-install-create-policy-job.yaml @@ -0,0 +1,87 @@ +{{- if .Values.policies }} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ template "minio.fullname" . }}-make-policies-job + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }}-make-policies-job + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + annotations: + "helm.sh/hook": post-install,post-upgrade + "helm.sh/hook-delete-policy": hook-succeeded,before-hook-creation +{{- with .Values.makePolicyJob.annotations }} +{{ toYaml . | indent 4 }} +{{- end }} +spec: + template: + metadata: + labels: + app: {{ template "minio.name" . }}-job + release: {{ .Release.Name }} +{{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 8 }} +{{- end }} +{{- if .Values.makePolicyJob.podAnnotations }} + annotations: +{{ toYaml .Values.makePolicyJob.podAnnotations | indent 8 }} +{{- end }} + spec: + restartPolicy: OnFailure +{{- include "minio.imagePullSecrets" . | indent 6 }} +{{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.makePolicyJob.nodeSelector | indent 8 }} +{{- end }} +{{- with .Values.makePolicyJob.affinity }} + affinity: +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.makePolicyJob.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} +{{- end }} +{{- if .Values.makePolicyJob.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.makePolicyJob.securityContext.runAsUser }} + runAsGroup: {{ .Values.makePolicyJob.securityContext.runAsGroup }} + fsGroup: {{ .Values.makePolicyJob.securityContext.fsGroup }} +{{- end }} + volumes: + - name: minio-configuration + projected: + sources: + - configMap: + name: {{ template "minio.fullname" . }} + - secret: + name: {{ template "minio.secretName" . }} + {{- if .Values.tls.enabled }} + - name: cert-secret-volume-mc + secret: + secretName: {{ .Values.tls.certSecret }} + items: + - key: {{ .Values.tls.publicCrt }} + path: CAs/public.crt + {{ end }} + containers: + - name: minio-mc + image: "{{ .Values.mcImage.repository }}:{{ .Values.mcImage.tag }}" + imagePullPolicy: {{ .Values.mcImage.pullPolicy }} + command: ["/bin/sh", "/config/add-policy"] + env: + - name: MINIO_ENDPOINT + value: {{ template "minio.fullname" . }} + - name: MINIO_PORT + value: {{ .Values.service.port | quote }} + volumeMounts: + - name: minio-configuration + mountPath: /config + {{- if .Values.tls.enabled }} + - name: cert-secret-volume-mc + mountPath: {{ .Values.configPathmc }}certs + {{ end }} + resources: +{{ toYaml .Values.makePolicyJob.resources | indent 10 }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/post-install-create-user-job.yaml b/ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/post-install-create-user-job.yaml new file mode 100644 index 0000000..d3750e8 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/post-install-create-user-job.yaml @@ -0,0 +1,97 @@ +{{- $global := . -}} +{{- if .Values.users }} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ template "minio.fullname" . }}-make-user-job + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }}-make-user-job + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + annotations: + "helm.sh/hook": post-install,post-upgrade + "helm.sh/hook-delete-policy": hook-succeeded,before-hook-creation +{{- with .Values.makeUserJob.annotations }} +{{ toYaml . | indent 4 }} +{{- end }} +spec: + template: + metadata: + labels: + app: {{ template "minio.name" . }}-job + release: {{ .Release.Name }} +{{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 8 }} +{{- end }} +{{- if .Values.makeUserJob.podAnnotations }} + annotations: +{{ toYaml .Values.makeUserJob.podAnnotations | indent 8 }} +{{- end }} + spec: + restartPolicy: OnFailure +{{- include "minio.imagePullSecrets" . | indent 6 }} +{{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.makeUserJob.nodeSelector | indent 8 }} +{{- end }} +{{- with .Values.makeUserJob.affinity }} + affinity: +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.makeUserJob.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} +{{- end }} +{{- if .Values.makeUserJob.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.makeUserJob.securityContext.runAsUser }} + runAsGroup: {{ .Values.makeUserJob.securityContext.runAsGroup }} + fsGroup: {{ .Values.makeUserJob.securityContext.fsGroup }} +{{- end }} + volumes: + - name: minio-configuration + projected: + sources: + - configMap: + name: {{ template "minio.fullname" . }} + - secret: + name: {{ template "minio.secretName" . }} + {{- range .Values.users }} + {{- if .existingSecret }} + - secret: + name: {{ tpl .existingSecret $global }} + items: + - key: {{ .existingSecretKey }} + path: secrets/{{ tpl .accessKey $global }} + {{- end }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: cert-secret-volume-mc + secret: + secretName: {{ .Values.tls.certSecret }} + items: + - key: {{ .Values.tls.publicCrt }} + path: CAs/public.crt + {{ end }} + containers: + - name: minio-mc + image: "{{ .Values.mcImage.repository }}:{{ .Values.mcImage.tag }}" + imagePullPolicy: {{ .Values.mcImage.pullPolicy }} + command: ["/bin/sh", "/config/add-user"] + env: + - name: MINIO_ENDPOINT + value: {{ template "minio.fullname" . }} + - name: MINIO_PORT + value: {{ .Values.service.port | quote }} + volumeMounts: + - name: minio-configuration + mountPath: /config + {{- if .Values.tls.enabled }} + - name: cert-secret-volume-mc + mountPath: {{ .Values.configPathmc }}certs + {{ end }} + resources: +{{ toYaml .Values.makeUserJob.resources | indent 10 }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/post-install-custom-command.yaml b/ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/post-install-custom-command.yaml new file mode 100644 index 0000000..7e83faf --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/post-install-custom-command.yaml @@ -0,0 +1,87 @@ +{{- if .Values.customCommands }} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ template "minio.fullname" . }}-custom-command-job + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }}-custom-command-job + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + annotations: + "helm.sh/hook": post-install,post-upgrade + "helm.sh/hook-delete-policy": hook-succeeded,before-hook-creation +{{- with .Values.customCommandJob.annotations }} +{{ toYaml . | indent 4 }} +{{- end }} +spec: + template: + metadata: + labels: + app: {{ template "minio.name" . }}-job + release: {{ .Release.Name }} +{{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 8 }} +{{- end }} +{{- if .Values.customCommandJob.podAnnotations }} + annotations: +{{ toYaml .Values.customCommandJob.podAnnotations | indent 8 }} +{{- end }} + spec: + restartPolicy: OnFailure +{{- include "minio.imagePullSecrets" . | indent 6 }} +{{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.customCommandJob.nodeSelector | indent 8 }} +{{- end }} +{{- with .Values.customCommandJob.affinity }} + affinity: +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.customCommandJob.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} +{{- end }} +{{- if .Values.customCommandJob.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.customCommandJob.securityContext.runAsUser }} + runAsGroup: {{ .Values.customCommandJob.securityContext.runAsGroup }} + fsGroup: {{ .Values.customCommandJob.securityContext.fsGroup }} +{{- end }} + volumes: + - name: minio-configuration + projected: + sources: + - configMap: + name: {{ template "minio.fullname" . }} + - secret: + name: {{ template "minio.secretName" . }} + {{- if .Values.tls.enabled }} + - name: cert-secret-volume-mc + secret: + secretName: {{ .Values.tls.certSecret }} + items: + - key: {{ .Values.tls.publicCrt }} + path: CAs/public.crt + {{ end }} + containers: + - name: minio-mc + image: "{{ .Values.mcImage.repository }}:{{ .Values.mcImage.tag }}" + imagePullPolicy: {{ .Values.mcImage.pullPolicy }} + command: ["/bin/sh", "/config/custom-command"] + env: + - name: MINIO_ENDPOINT + value: {{ template "minio.fullname" . }} + - name: MINIO_PORT + value: {{ .Values.service.port | quote }} + volumeMounts: + - name: minio-configuration + mountPath: /config + {{- if .Values.tls.enabled }} + - name: cert-secret-volume-mc + mountPath: {{ .Values.configPathmc }}certs + {{ end }} + resources: +{{ toYaml .Values.customCommandJob.resources | indent 10 }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/pvc.yaml b/ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/pvc.yaml new file mode 100644 index 0000000..369aade --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/pvc.yaml @@ -0,0 +1,35 @@ +{{- if eq .Values.mode "standalone" }} +{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) }} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: {{ template "minio.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- if .Values.persistence.annotations }} + annotations: +{{ toYaml .Values.persistence.annotations | trimSuffix "\n" | indent 4 }} +{{- end }} +spec: + accessModes: + - {{ .Values.persistence.accessMode | quote }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + +{{- if .Values.persistence.storageClass }} +{{- if (eq "-" .Values.persistence.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.persistence.storageClass }}" +{{- end }} +{{- end }} +{{- if .Values.persistence.VolumeName }} + volumeName: "{{ .Values.persistence.VolumeName }}" +{{- end }} +{{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/secrets.yaml b/ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/secrets.yaml new file mode 100644 index 0000000..da2ecab --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/secrets.yaml @@ -0,0 +1,22 @@ +{{- if not .Values.existingSecret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "minio.secretName" . }} + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +type: Opaque +data: + rootUser: {{ include "minio.root.username" . | b64enc | quote }} + rootPassword: {{ include "minio.root.password" . | b64enc | quote }} + {{- if .Values.etcd.clientCert }} + etcd_client.crt: {{ .Values.etcd.clientCert | toString | b64enc | quote }} + {{- end }} + {{- if .Values.etcd.clientCertKey }} + etcd_client.key: {{ .Values.etcd.clientCertKey | toString | b64enc | quote }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/securitycontextconstraints.yaml b/ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/securitycontextconstraints.yaml new file mode 100644 index 0000000..4bac7e3 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/securitycontextconstraints.yaml @@ -0,0 +1,45 @@ +{{- if and .Values.securityContext.enabled .Values.persistence.enabled (.Capabilities.APIVersions.Has "security.openshift.io/v1") }} +apiVersion: security.openshift.io/v1 +kind: SecurityContextConstraints +metadata: + name: {{ template "minio.fullname" . }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +allowHostDirVolumePlugin: false +allowHostIPC: false +allowHostNetwork: false +allowHostPID: false +allowHostPorts: false +allowPrivilegeEscalation: true +allowPrivilegedContainer: false +allowedCapabilities: [] +readOnlyRootFilesystem: false +defaultAddCapabilities: [] +requiredDropCapabilities: +- KILL +- MKNOD +- SETUID +- SETGID +fsGroup: + type: MustRunAs + ranges: + - max: {{ .Values.securityContext.fsGroup }} + min: {{ .Values.securityContext.fsGroup }} +runAsUser: + type: MustRunAs + uid: {{ .Values.securityContext.runAsUser }} +seLinuxContext: + type: MustRunAs +supplementalGroups: + type: RunAsAny +volumes: +- configMap +- downwardAPI +- emptyDir +- persistentVolumeClaim +- projected +- secret +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/service.yaml b/ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/service.yaml new file mode 100644 index 0000000..64aa990 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/service.yaml @@ -0,0 +1,49 @@ +{{ $scheme := "http" }} +{{- if .Values.tls.enabled }} +{{ $scheme = "https" }} +{{ end }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "minio.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + monitoring: "true" +{{- if .Values.service.annotations }} + annotations: +{{ toYaml .Values.service.annotations | indent 4 }} +{{- end }} +spec: +{{- if (or (eq .Values.service.type "ClusterIP" "") (empty .Values.service.type)) }} + type: ClusterIP + {{- if not (empty .Values.service.clusterIP) }} + clusterIP: {{ .Values.service.clusterIP }} + {{end}} +{{- else if eq .Values.service.type "LoadBalancer" }} + type: {{ .Values.service.type }} + loadBalancerIP: {{ default "" .Values.service.loadBalancerIP }} +{{- else }} + type: {{ .Values.service.type }} +{{- end }} + ports: + - name: {{ $scheme }} + port: {{ .Values.service.port }} + protocol: TCP +{{- if (and (eq .Values.service.type "NodePort") ( .Values.service.nodePort)) }} + nodePort: {{ .Values.service.nodePort }} +{{- else }} + targetPort: 9000 +{{- end}} +{{- if .Values.service.externalIPs }} + externalIPs: +{{- range $i , $ip := .Values.service.externalIPs }} + - {{ $ip }} +{{- end }} +{{- end }} + selector: + app: {{ template "minio.name" . }} + release: {{ .Release.Name }} diff --git a/ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/serviceaccount.yaml b/ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/serviceaccount.yaml new file mode 100644 index 0000000..6a4bd94 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/serviceaccount.yaml @@ -0,0 +1,7 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Values.serviceAccount.name | quote }} + namespace: {{ .Release.Namespace | quote }} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/servicemonitor.yaml b/ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/servicemonitor.yaml new file mode 100644 index 0000000..809848f --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/servicemonitor.yaml @@ -0,0 +1,51 @@ +{{- if .Values.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "minio.fullname" . }} + {{- if .Values.metrics.serviceMonitor.namespace }} + namespace: {{ .Values.metrics.serviceMonitor.namespace }} + {{ else }} + namespace: {{ .Release.Namespace | quote }} + {{- end }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- if .Values.metrics.serviceMonitor.additionalLabels }} +{{ toYaml .Values.metrics.serviceMonitor.additionalLabels | indent 4 }} + {{- end }} +spec: + endpoints: + {{- if .Values.tls.enabled }} + - port: https + scheme: https + {{ else }} + - port: http + scheme: http + {{- end }} + path: /minio/v2/metrics/cluster + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.relabelConfigs }} +{{ toYaml .Values.metrics.serviceMonitor.relabelConfigs | indent 6 }} + {{- end }} + {{- if not .Values.metrics.serviceMonitor.public }} + bearerTokenSecret: + name: {{ template "minio.fullname" . }}-prometheus + key: token + {{- end }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace | quote }} + selector: + matchLabels: + app: {{ include "minio.name" . }} + release: {{ .Release.Name }} + monitoring: "true" +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/statefulset.yaml b/ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/statefulset.yaml new file mode 100644 index 0000000..b4160f0 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/01-storage/minio/templates/statefulset.yaml @@ -0,0 +1,217 @@ +{{- if eq .Values.mode "distributed" }} +{{ $poolCount := .Values.pools | int }} +{{ $nodeCount := .Values.replicas | int }} +{{ $drivesPerNode := .Values.drivesPerNode | int }} +{{ $scheme := "http" }} +{{- if .Values.tls.enabled }} +{{ $scheme = "https" }} +{{ end }} +{{ $mountPath := .Values.mountPath }} +{{ $bucketRoot := or ($.Values.bucketRoot) ($.Values.mountPath) }} +{{ $subPath := .Values.persistence.subPath }} +{{ $penabled := .Values.persistence.enabled }} +{{ $accessMode := .Values.persistence.accessMode }} +{{ $storageClass := .Values.persistence.storageClass }} +{{ $psize := .Values.persistence.size }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "minio.fullname" . }}-svc + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +spec: + publishNotReadyAddresses: true + clusterIP: None + ports: + - name: {{ $scheme }} + port: {{ .Values.service.port }} + protocol: TCP + selector: + app: {{ template "minio.name" . }} + release: {{ .Release.Name }} +--- +apiVersion: {{ template "minio.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ template "minio.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- if .Values.additionalLabels }} +{{ toYaml .Values.additionalLabels | trimSuffix "\n" | indent 4 }} +{{- end }} +{{- if .Values.additionalAnnotations }} + annotations: +{{ toYaml .Values.additionalAnnotations | trimSuffix "\n" | indent 4 }} +{{- end }} +spec: + updateStrategy: + type: {{ .Values.StatefulSetUpdate.updateStrategy }} + podManagementPolicy: "Parallel" + serviceName: {{ template "minio.fullname" . }}-svc + replicas: {{ mul $poolCount $nodeCount }} + selector: + matchLabels: + app: {{ template "minio.name" . }} + release: {{ .Release.Name }} + template: + metadata: + name: {{ template "minio.fullname" . }} + labels: + app: {{ template "minio.name" . }} + release: {{ .Release.Name }} +{{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 8 }} +{{- end }} + annotations: +{{- if not .Values.ignoreChartChecksums }} + checksum/secrets: {{ include (print $.Template.BasePath "/secrets.yaml") . | sha256sum }} + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} +{{- end }} +{{- if .Values.podAnnotations }} +{{ toYaml .Values.podAnnotations | trimSuffix "\n" | indent 8 }} +{{- end }} + spec: + {{- if .Values.priorityClassName }} + priorityClassName: "{{ .Values.priorityClassName }}" + {{- end }} +{{- if and .Values.securityContext.enabled .Values.persistence.enabled }} + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + runAsGroup: {{ .Values.securityContext.runAsGroup }} + fsGroup: {{ .Values.securityContext.fsGroup }} + {{- if and (ge .Capabilities.KubeVersion.Major "1") (ge .Capabilities.KubeVersion.Minor "20") }} + fsGroupChangePolicy: {{ .Values.securityContext.fsGroupChangePolicy }} + {{- end }} +{{- end }} +{{ if .Values.serviceAccount.create }} + serviceAccountName: {{ .Values.serviceAccount.name }} +{{- end }} + containers: + - name: {{ .Chart.Name }} + image: {{ .Values.image.repository }}:{{ .Values.image.tag }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + + command: [ "/bin/sh", + "-ce", + "/usr/bin/docker-entrypoint.sh minio server {{- range $i := until $poolCount }}{{ $factor := mul $i $nodeCount }}{{ $endIndex := add $factor $nodeCount }}{{ $beginIndex := mul $i $nodeCount }} {{ $scheme }}://{{ template `minio.fullname` $ }}-{{ `{` }}{{ $beginIndex }}...{{ sub $endIndex 1 }}{{ `}`}}.{{ template `minio.fullname` $ }}-svc.{{ $.Release.Namespace }}.svc.{{ $.Values.clusterDomain }}{{if (gt $drivesPerNode 1)}}{{ $bucketRoot }}-{{ `{` }}0...{{ sub $drivesPerNode 1 }}{{ `}` }}{{else}}{{ $bucketRoot }}{{end}}{{- end}} -S {{ .Values.certsPath }} --address :{{ .Values.minioAPIPort }} --console-address :{{ .Values.minioConsolePort }} {{- template `minio.extraArgs` . }}" ] + volumeMounts: + {{- if $penabled }} + {{- if (gt $drivesPerNode 1) }} + {{- range $i := until $drivesPerNode }} + - name: export-{{ $i }} + mountPath: {{ $mountPath }}-{{ $i }} + {{- if and $penabled $subPath }} + subPath: {{ $subPath }} + {{- end }} + {{- end }} + {{- else }} + - name: export + mountPath: {{ $mountPath }} + {{- if and $penabled $subPath }} + subPath: {{ $subPath }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.extraSecret }} + - name: extra-secret + mountPath: "/tmp/minio-config-env" + {{- end }} + {{- include "minio.tlsKeysVolumeMount" . | indent 12 }} + ports: + - name: {{ $scheme }} + containerPort: {{ .Values.minioAPIPort }} + - name: {{ $scheme }}-console + containerPort: {{ .Values.minioConsolePort }} + env: + - name: MINIO_ROOT_USER + valueFrom: + secretKeyRef: + name: {{ template "minio.secretName" . }} + key: rootUser + - name: MINIO_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "minio.secretName" . }} + key: rootPassword + {{- if .Values.extraSecret }} + - name: MINIO_CONFIG_ENV_FILE + value: "/tmp/minio-config-env/config.env" + {{- end}} + {{- if .Values.metrics.serviceMonitor.public }} + - name: MINIO_PROMETHEUS_AUTH_TYPE + value: "public" + {{- end}} + {{- range $key, $val := .Values.environment }} + - name: {{ $key }} + value: {{ $val | quote }} + {{- end}} + resources: +{{ toYaml .Values.resources | indent 12 }} + {{- with .Values.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 8 }} + {{- end }} +{{- include "minio.imagePullSecrets" . | indent 6 }} + {{- with .Values.affinity }} + affinity: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} + {{- end }} + volumes: + - name: minio-user + secret: + secretName: {{ template "minio.secretName" . }} + {{- if .Values.extraSecret }} + - name: extra-secret + secret: + secretName: {{ .Values.extraSecret }} + {{- end }} + {{- include "minio.tlsKeysVolume" . | indent 8 }} +{{- if .Values.persistence.enabled }} + volumeClaimTemplates: + {{- if gt $drivesPerNode 1 }} + {{- range $diskId := until $drivesPerNode}} + - metadata: + name: export-{{ $diskId }} + {{- if $.Values.persistence.annotations }} + annotations: +{{ toYaml $.Values.persistence.annotations | trimSuffix "\n" | indent 10 }} + {{- end }} + spec: + accessModes: [ {{ $accessMode | quote }} ] + {{- if $storageClass }} + storageClassName: {{ $storageClass }} + {{- end }} + resources: + requests: + storage: {{ $psize }} + {{- end }} + {{- else }} + - metadata: + name: export + {{- if $.Values.persistence.annotations }} + annotations: +{{ toYaml $.Values.persistence.annotations | trimSuffix "\n" | indent 10 }} + {{- end }} + spec: + accessModes: [ {{ $accessMode | quote }} ] + {{- if $storageClass }} + storageClassName: {{ $storageClass }} + {{- end }} + resources: + requests: + storage: {{ $psize }} + {{- end }} +{{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/01-storage/minio/values.yaml b/ansible/01_old/roles/cmoa_install/files/01-storage/minio/values.yaml new file mode 100644 index 0000000..a957f7f --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/01-storage/minio/values.yaml @@ -0,0 +1,461 @@ +## Provide a name in place of minio for `app:` labels +## +nameOverride: "" + +## Provide a name to substitute for the full names of resources +## +fullnameOverride: "" + +## set kubernetes cluster domain where minio is running +## +clusterDomain: cluster.local + +## Set default image, imageTag, and imagePullPolicy. mode is used to indicate the +## +image: + repository: 10.10.31.243:5000/cmoa3/minio + tag: RELEASE.2022-05-08T23-50-31Z + pullPolicy: IfNotPresent + +imagePullSecrets: + - name: "regcred" +# - name: "image-pull-secret" + +## Set default image, imageTag, and imagePullPolicy for the `mc` (the minio +## client used to create a default bucket). +## +mcImage: + repository: 10.10.31.243:5000/cmoa3/mc + tag: RELEASE.2022-05-09T04-08-26Z + pullPolicy: IfNotPresent + +## minio mode, i.e. standalone or distributed or gateway. +mode: distributed ## other supported values are "standalone", "gateway" + +## Additional labels to include with deployment or statefulset +additionalLabels: [] + +## Additional annotations to include with deployment or statefulset +additionalAnnotations: [] + +## Typically the deployment/statefulset includes checksums of secrets/config, +## So that when these change on a subsequent helm install, the deployment/statefulset +## is restarted. This can result in unnecessary restarts under GitOps tooling such as +## flux, so set to "true" to disable this behaviour. +ignoreChartChecksums: false + +## Additional arguments to pass to minio binary +extraArgs: [] + +## Port number for MinIO S3 API Access +minioAPIPort: "9000" + +## Port number for MinIO Browser COnsole Access +minioConsolePort: "9001" + +## Update strategy for Deployments +DeploymentUpdate: + type: RollingUpdate + maxUnavailable: 0 + maxSurge: 100% + +## Update strategy for StatefulSets +StatefulSetUpdate: + updateStrategy: RollingUpdate + +## Pod priority settings +## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ +## +priorityClassName: "" + +## Set default rootUser, rootPassword +## AccessKey and secretKey is generated when not set +## Distributed MinIO ref: https://docs.minio.io/docs/distributed-minio-quickstart-guide +## +rootUser: "admin" +rootPassword: "passW0rd" + +## Use existing Secret that store following variables: +## +## | Chart var | .data. in Secret | +## |:----------------------|:-------------------------| +## | rootUser | rootUser | +## | rootPassword | rootPassword | +## +## All mentioned variables will be ignored in values file. +## .data.rootUser and .data.rootPassword are mandatory, +## others depend on enabled status of corresponding sections. +existingSecret: "" + +## Directory on the MinIO pof +certsPath: "/etc/minio/certs/" +configPathmc: "/etc/minio/mc/" + +## Path where PV would be mounted on the MinIO Pod +mountPath: "/export" +## Override the root directory which the minio server should serve from. +## If left empty, it defaults to the value of {{ .Values.mountPath }} +## If defined, it must be a sub-directory of the path specified in {{ .Values.mountPath }} +## +bucketRoot: "" + +# Number of drives attached to a node +drivesPerNode: 2 +# Number of MinIO containers running +#replicas: 16 +replicas: 2 +# Number of expanded MinIO clusters +pools: 1 + +# Deploy if 'mode == gateway' - 4 replicas. +gateway: + type: "nas" # currently only "nas" are supported. + replicas: 4 + +## TLS Settings for MinIO +tls: + enabled: false + ## Create a secret with private.key and public.crt files and pass that here. Ref: https://github.com/minio/minio/tree/master/docs/tls/kubernetes#2-create-kubernetes-secret + certSecret: "" + publicCrt: public.crt + privateKey: private.key + +## Trusted Certificates Settings for MinIO. Ref: https://docs.minio.io/docs/how-to-secure-access-to-minio-server-with-tls#install-certificates-from-third-party-cas +## Bundle multiple trusted certificates into one secret and pass that here. Ref: https://github.com/minio/minio/tree/master/docs/tls/kubernetes#2-create-kubernetes-secret +## When using self-signed certificates, remember to include MinIO's own certificate in the bundle with key public.crt. +## If certSecret is left empty and tls is enabled, this chart installs the public certificate from .Values.tls.certSecret. +trustedCertsSecret: "" + +## Enable persistence using Persistent Volume Claims +## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ +## +persistence: + enabled: true + annotations: {} + + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + existingClaim: "" + + ## minio data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + ## Storage class of PV to bind. By default it looks for standard storage class. + ## If the PV uses a different storage class, specify that here. + storageClass: "exem-local-storage" + VolumeName: "" + accessMode: ReadWriteOnce + size: 50Gi + + ## If subPath is set mount a sub folder of a volume instead of the root of the volume. + ## This is especially handy for volume plugins that don't natively support sub mounting (like glusterfs). + ## + subPath: "" + +## Expose the MinIO service to be accessed from outside the cluster (LoadBalancer service). +## or access it from within the cluster (ClusterIP service). Set the service type and the port to serve it. +## ref: http://kubernetes.io/docs/user-guide/services/ +## +#service: +# type: NodePort +# clusterIP: ~ + ## Make sure to match it to minioAPIPort +# port: "9000" +# nodePort: "32002" + +service: + type: ClusterIP + clusterIP: ~ + ## Make sure to match it to minioAPIPort + port: "9000" + +## Configure Ingress based on the documentation here: https://kubernetes.io/docs/concepts/services-networking/ingress/ +## + +ingress: + enabled: false + # ingressClassName: "" + labels: {} + # node-role.kubernetes.io/ingress: platform + + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + # kubernetes.io/ingress.allow-http: "false" + # kubernetes.io/ingress.global-static-ip-name: "" + # nginx.ingress.kubernetes.io/secure-backends: "true" + # nginx.ingress.kubernetes.io/backend-protocol: "HTTPS" + # nginx.ingress.kubernetes.io/whitelist-source-range: 0.0.0.0/0 + path: / + hosts: + - minio-example.local + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +consoleService: + type: NodePort + clusterIP: ~ + ## Make sure to match it to minioConsolePort + port: "9001" + nodePort: "32001" + +consoleIngress: + enabled: false + # ingressClassName: "" + labels: {} + # node-role.kubernetes.io/ingress: platform + + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + # kubernetes.io/ingress.allow-http: "false" + # kubernetes.io/ingress.global-static-ip-name: "" + # nginx.ingress.kubernetes.io/secure-backends: "true" + # nginx.ingress.kubernetes.io/backend-protocol: "HTTPS" + # nginx.ingress.kubernetes.io/whitelist-source-range: 0.0.0.0/0 + path: / + hosts: + - console.minio-example.local + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +## Node labels for pod assignment +## Ref: https://kubernetes.io/docs/user-guide/node-selection/ +## +nodeSelector: {} +tolerations: [] +affinity: {} + +## Add stateful containers to have security context, if enabled MinIO will run as this +## user and group NOTE: securityContext is only enabled if persistence.enabled=true +securityContext: + enabled: true + runAsUser: 1000 + runAsGroup: 1000 + fsGroup: 1000 + fsGroupChangePolicy: "OnRootMismatch" + +# Additational pod annotations +podAnnotations: {} + +# Additional pod labels +podLabels: {} + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: + requests: + #memory: 16Gi + memory: 1Gi + cpu: 200m + +## List of policies to be created after minio install +## +## In addition to default policies [readonly|readwrite|writeonly|consoleAdmin|diagnostics] +## you can define additional policies with custom supported actions and resources +policies: [] +## writeexamplepolicy policy grants creation or deletion of buckets with name +## starting with example. In addition, grants objects write permissions on buckets starting with +## example. +# - name: writeexamplepolicy +# statements: +# - resources: +# - 'arn:aws:s3:::example*/*' +# actions: +# - "s3:AbortMultipartUpload" +# - "s3:GetObject" +# - "s3:DeleteObject" +# - "s3:PutObject" +# - "s3:ListMultipartUploadParts" +# - resources: +# - 'arn:aws:s3:::example*' +# actions: +# - "s3:CreateBucket" +# - "s3:DeleteBucket" +# - "s3:GetBucketLocation" +# - "s3:ListBucket" +# - "s3:ListBucketMultipartUploads" +## readonlyexamplepolicy policy grants access to buckets with name starting with example. +## In addition, grants objects read permissions on buckets starting with example. +# - name: readonlyexamplepolicy +# statements: +# - resources: +# - 'arn:aws:s3:::example*/*' +# actions: +# - "s3:GetObject" +# - resources: +# - 'arn:aws:s3:::example*' +# actions: +# - "s3:GetBucketLocation" +# - "s3:ListBucket" +# - "s3:ListBucketMultipartUploads" +## Additional Annotations for the Kubernetes Job makePolicyJob +makePolicyJob: + podAnnotations: + annotations: + securityContext: + enabled: false + runAsUser: 1000 + runAsGroup: 1000 + fsGroup: 1000 + resources: + requests: + memory: 128Mi + nodeSelector: {} + tolerations: [] + affinity: {} + +## List of users to be created after minio install +## +users: + ## Username, password and policy to be assigned to the user + ## Default policies are [readonly|readwrite|writeonly|consoleAdmin|diagnostics] + ## Add new policies as explained here https://docs.min.io/docs/minio-multi-user-quickstart-guide.html + ## NOTE: this will fail if LDAP is enabled in your MinIO deployment + ## make sure to disable this if you are using LDAP. + - accessKey: cloudmoa + secretKey: admin1234 + policy: consoleAdmin + # Or you can refer to specific secret + #- accessKey: externalSecret + # existingSecret: my-secret + # existingSecretKey: password + # policy: readonly + + +## Additional Annotations for the Kubernetes Job makeUserJob +makeUserJob: + podAnnotations: + annotations: + securityContext: + enabled: false + runAsUser: 1000 + runAsGroup: 1000 + fsGroup: 1000 + resources: + requests: + memory: 128Mi + nodeSelector: {} + tolerations: [] + affinity: {} + +## List of buckets to be created after minio install +## +buckets: + - name: cortex-bucket + policy: none + purge: false + versioning: false + + # # Name of the bucket + # - name: bucket1 + # # Policy to be set on the + # # bucket [none|download|upload|public] + # policy: none + # # Purge if bucket exists already + # purge: false + # # set versioning for + # # bucket [true|false] + # versioning: false + # - name: bucket2 + # policy: none + # purge: false + # versioning: true + +## Additional Annotations for the Kubernetes Job makeBucketJob +makeBucketJob: + podAnnotations: + annotations: + securityContext: + enabled: false + runAsUser: 1000 + runAsGroup: 1000 + fsGroup: 1000 + resources: + requests: + memory: 128Mi + nodeSelector: {} + tolerations: [] + affinity: {} + +## List of command to run after minio install +## NOTE: the mc command TARGET is always "myminio" +customCommands: + # - command: "admin policy set myminio consoleAdmin group='cn=ops,cn=groups,dc=example,dc=com'" + +## Additional Annotations for the Kubernetes Job customCommandJob +customCommandJob: + podAnnotations: + annotations: + securityContext: + enabled: false + runAsUser: 1000 + runAsGroup: 1000 + fsGroup: 1000 + resources: + requests: + memory: 128Mi + nodeSelector: {} + tolerations: [] + affinity: {} + +## Use this field to add environment variables relevant to MinIO server. These fields will be passed on to MinIO container(s) +## when Chart is deployed +environment: + ## Please refer for comprehensive list https://docs.min.io/minio/baremetal/reference/minio-server/minio-server.html + ## MINIO_SUBNET_LICENSE: "License key obtained from https://subnet.min.io" + ## MINIO_BROWSER: "off" + +## The name of a secret in the same kubernetes namespace which contain secret values +## This can be useful for LDAP password, etc +## The key in the secret must be 'config.env' +## +# extraSecret: minio-extraenv + +networkPolicy: + enabled: false + allowExternal: true + +## PodDisruptionBudget settings +## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ +## +podDisruptionBudget: + enabled: false + maxUnavailable: 1 + +## Specify the service account to use for the MinIO pods. If 'create' is set to 'false' +## and 'name' is left unspecified, the account 'default' will be used. +serviceAccount: + create: true + ## The name of the service account to use. If 'create' is 'true', a service account with that name + ## will be created. + name: "minio-sa" + +metrics: + serviceMonitor: + enabled: false + public: true + additionalLabels: {} + relabelConfigs: {} + # namespace: monitoring + # interval: 30s + # scrapeTimeout: 10s + +## ETCD settings: https://github.com/minio/minio/blob/master/docs/sts/etcd.md +## Define endpoints to enable this section. +etcd: + endpoints: [] + pathPrefix: "" + corednsPathPrefix: "" + clientCert: "" + clientCertKey: "" diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/00-kafka-broker-config.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/00-kafka-broker-config.yaml new file mode 100644 index 0000000..ddf76e1 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/00-kafka-broker-config.yaml @@ -0,0 +1,161 @@ +kind: ConfigMap +metadata: + name: broker-config + namespace: imxc +apiVersion: v1 +data: + init.sh: |- + #!/bin/bash + set -e + set -x + cp /etc/kafka-configmap/log4j.properties /etc/kafka/ + KAFKA_BROKER_ID=${HOSTNAME##*-} + SEDS=("s/#init#broker.id=#init#/broker.id=$KAFKA_BROKER_ID/") + LABELS="kafka-broker-id=$KAFKA_BROKER_ID" + ANNOTATIONS="" + hash kubectl 2>/dev/null || { + SEDS+=("s/#init#broker.rack=#init#/#init#broker.rack=# kubectl not found in path/") + } && { + ZONE=$(kubectl get node "$NODE_NAME" -o=go-template='{{index .metadata.labels "failure-domain.beta.kubernetes.io/zone"}}') + if [ $? -ne 0 ]; then + SEDS+=("s/#init#broker.rack=#init#/#init#broker.rack=# zone lookup failed, see -c init-config logs/") + elif [ "x$ZONE" == "x" ]; then + SEDS+=("s/#init#broker.rack=#init#/#init#broker.rack=# zone label not found for node $NODE_NAME/") + else + SEDS+=("s/#init#broker.rack=#init#/broker.rack=$ZONE/") + LABELS="$LABELS kafka-broker-rack=$ZONE" + fi + # Node Port 설정 주석처리 + # OUTSIDE_HOST=$(kubectl get node "$NODE_NAME" -o jsonpath='{.status.addresses[?(@.type=="InternalIP")].address}') + OUTSIDE_HOST=kafka-outside-${KAFKA_BROKER_ID} + GLOBAL_HOST=kafka-global-${KAFKA_BROKER_ID} + if [ $? -ne 0 ]; then + echo "Outside (i.e. cluster-external access) host lookup command failed" + else + OUTSIDE_PORT=3240${KAFKA_BROKER_ID} + GLOBAL_PORT=3250${KAFKA_BROKER_ID} + # datagate 도입했으므로 Kube DNS 기반 통신 + SEDS+=("s|#init#advertised.listeners=OUTSIDE://#init#|advertised.listeners=OUTSIDE://${OUTSIDE_HOST}:${OUTSIDE_PORT},GLOBAL://${GLOBAL_HOST}:${GLOBAL_PORT}|") + ANNOTATIONS="$ANNOTATIONS kafka-listener-outside-host=$OUTSIDE_HOST kafka-listener-outside-port=$OUTSIDE_PORT" + fi + if [ ! -z "$LABELS" ]; then + kubectl -n $POD_NAMESPACE label pod $POD_NAME $LABELS || echo "Failed to label $POD_NAMESPACE.$POD_NAME - RBAC issue?" + fi + if [ ! -z "$ANNOTATIONS" ]; then + kubectl -n $POD_NAMESPACE annotate pod $POD_NAME $ANNOTATIONS || echo "Failed to annotate $POD_NAMESPACE.$POD_NAME - RBAC issue?" + fi + } + printf '%s\n' "${SEDS[@]}" | sed -f - /etc/kafka-configmap/server.properties > /etc/kafka/server.properties.tmp + [ $? -eq 0 ] && mv /etc/kafka/server.properties.tmp /etc/kafka/server.properties + server.properties: |- + log.dirs=/var/lib/kafka/data/topics + ############################# Zookeeper ############################# + zookeeper.connect=zookeeper:2181 + #zookeeper.connection.timeout.ms=6000 + ############################# Group Coordinator Settings ############################# + #group.initial.rebalance.delay.ms=0 + ############################# Thread ############################# + #background.threads=10 + #num.recovery.threads.per.data.dir=1 + ############################# Topic ############################# + auto.create.topics.enable=true + delete.topic.enable=true + default.replication.factor=2 + ############################# Msg Replication ############################# + min.insync.replicas=1 + num.io.threads=10 + num.network.threads=4 + num.replica.fetchers=4 + replica.fetch.min.bytes=1 + socket.receive.buffer.bytes=1048576 + socket.send.buffer.bytes=1048576 + replica.socket.receive.buffer.bytes=1048576 + socket.request.max.bytes=204857600 + ############################# Partition ############################# + #auto.leader.rebalance.enable=true + num.partitions=12 + ############################# Log size ############################# + message.max.bytes=204857600 + max.message.bytes=204857600 + ############################# Log Flush Policy ############################# + #log.flush.interval.messages=10000 + #log.flush.interval.ms=1000 + ############################# Log Retention Policy ############################# + log.retention.minutes=1 + offsets.retention.minutes=1440 + #log.retention.bytes=1073741824 + #log.segment.bytes=1073741824 + log.retention.check.interval.ms=10000 + ############################# Internal Topic Settings ############################# + offsets.topic.replication.factor=1 + #transaction.state.log.replication.factor=1 + #transaction.state.log.min.isr=1 + ############################# ETC ############################# + listeners=OUTSIDE://:9094,PLAINTEXT://:9092,GLOBAL://:9095 + listener.security.protocol.map=PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL,OUTSIDE:PLAINTEXT,GLOBAL:PLAINTEXT + #listeners=PLAINTEXT://:9092 + inter.broker.listener.name=PLAINTEXT + #init#broker.id=#init# + #init#broker.rack=#init# + log4j.properties: |- + # Unspecified loggers and loggers with additivity=true output to server.log and stdout + # Note that INFO only applies to unspecified loggers, the log level of the child logger is used otherwise + log4j.rootLogger=INFO, stdout + log4j.appender.stdout=org.apache.log4j.ConsoleAppender + log4j.appender.stdout.layout=org.apache.log4j.PatternLayout + log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n + log4j.appender.kafkaAppender=org.apache.log4j.DailyRollingFileAppender + log4j.appender.kafkaAppender.DatePattern='.'yyyy-MM-dd-HH + log4j.appender.kafkaAppender.File=${kafka.logs.dir}/server.log + log4j.appender.kafkaAppender.layout=org.apache.log4j.PatternLayout + log4j.appender.kafkaAppender.layout.ConversionPattern=[%d] %p %m (%c)%n + log4j.appender.stateChangeAppender=org.apache.log4j.DailyRollingFileAppender + log4j.appender.stateChangeAppender.DatePattern='.'yyyy-MM-dd-HH + log4j.appender.stateChangeAppender.File=${kafka.logs.dir}/state-change.log + log4j.appender.stateChangeAppender.layout=org.apache.log4j.PatternLayout + log4j.appender.stateChangeAppender.layout.ConversionPattern=[%d] %p %m (%c)%n + log4j.appender.requestAppender=org.apache.log4j.DailyRollingFileAppender + log4j.appender.requestAppender.DatePattern='.'yyyy-MM-dd-HH + log4j.appender.requestAppender.File=${kafka.logs.dir}/kafka-request.log + log4j.appender.requestAppender.layout=org.apache.log4j.PatternLayout + log4j.appender.requestAppender.layout.ConversionPattern=[%d] %p %m (%c)%n + log4j.appender.cleanerAppender=org.apache.log4j.DailyRollingFileAppender + log4j.appender.cleanerAppender.DatePattern='.'yyyy-MM-dd-HH + log4j.appender.cleanerAppender.File=${kafka.logs.dir}/log-cleaner.log + log4j.appender.cleanerAppender.layout=org.apache.log4j.PatternLayout + log4j.appender.cleanerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n + log4j.appender.controllerAppender=org.apache.log4j.DailyRollingFileAppender + log4j.appender.controllerAppender.DatePattern='.'yyyy-MM-dd-HH + log4j.appender.controllerAppender.File=${kafka.logs.dir}/controller.log + log4j.appender.controllerAppender.layout=org.apache.log4j.PatternLayout + log4j.appender.controllerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n + log4j.appender.authorizerAppender=org.apache.log4j.DailyRollingFileAppender + log4j.appender.authorizerAppender.DatePattern='.'yyyy-MM-dd-HH + log4j.appender.authorizerAppender.File=${kafka.logs.dir}/kafka-authorizer.log + log4j.appender.authorizerAppender.layout=org.apache.log4j.PatternLayout + log4j.appender.authorizerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n + # Change the two lines below to adjust ZK client logging + log4j.logger.org.I0Itec.zkclient.ZkClient=INFO + log4j.logger.org.apache.zookeeper=INFO + # Change the two lines below to adjust the general broker logging level (output to server.log and stdout) + log4j.logger.kafka=INFO + log4j.logger.org.apache.kafka=INFO + # Change to DEBUG or TRACE to enable request logging + log4j.logger.kafka.request.logger=WARN, requestAppender + log4j.additivity.kafka.request.logger=false + # Uncomment the lines below and change log4j.logger.kafka.network.RequestChannel$ to TRACE for additional output + # related to the handling of requests + #log4j.logger.kafka.network.Processor=TRACE, requestAppender + #log4j.logger.kafka.server.KafkaApis=TRACE, requestAppender + #log4j.additivity.kafka.server.KafkaApis=false + log4j.logger.kafka.network.RequestChannel$=WARN, requestAppender + log4j.additivity.kafka.network.RequestChannel$=false + log4j.logger.kafka.controller=TRACE, controllerAppender + log4j.additivity.kafka.controller=false + log4j.logger.kafka.log.LogCleaner=INFO, cleanerAppender + log4j.additivity.kafka.log.LogCleaner=false + log4j.logger.state.change.logger=TRACE, stateChangeAppender + log4j.additivity.state.change.logger=false + # Change to DEBUG to enable audit log for the authorizer + log4j.logger.kafka.authorizer.logger=WARN, authorizerAppender + log4j.additivity.kafka.authorizer.logger=false diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/01-coredns.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/01-coredns.yaml new file mode 100644 index 0000000..c1cb74b --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/01-coredns.yaml @@ -0,0 +1,35 @@ +apiVersion: v1 +kind: Service +metadata: + annotations: + prometheus.io/port: "9153" + prometheus.io/scrape: "true" + labels: + addonmanager.kubernetes.io/mode: Reconcile + k8s-app: kube-dns + kubernetes.io/name: coredns + name: coredns + namespace: kube-system +spec: + internalTrafficPolicy: Cluster + ipFamilies: + - IPv4 + ipFamilyPolicy: SingleStack + ports: + - name: dns + port: 53 + protocol: UDP + targetPort: 53 + - name: dns-tcp + port: 53 + protocol: TCP + targetPort: 53 + - name: metrics + port: 9153 + protocol: TCP + targetPort: 9153 + selector: + k8s-app: kube-dns + sessionAffinity: None + type: ClusterIP + diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/.helmignore b/ansible/01_old/roles/cmoa_install/files/02-base/base/.helmignore new file mode 100644 index 0000000..50af031 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/Chart.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/Chart.yaml new file mode 100644 index 0000000..74d1d30 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for Kubernetes +name: base +version: 0.1.0 diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/analysis/.helmignore b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/analysis/.helmignore new file mode 100644 index 0000000..50af031 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/analysis/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/analysis/Chart.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/analysis/Chart.yaml new file mode 100644 index 0000000..74b9505 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/analysis/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for Kubernetes +name: analysis +version: 0.1.0 diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/analysis/templates/imxc-metric-analyzer-master.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/analysis/templates/imxc-metric-analyzer-master.yaml new file mode 100644 index 0000000..21a9298 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/analysis/templates/imxc-metric-analyzer-master.yaml @@ -0,0 +1,87 @@ +#docker run -d --hostname my-rabbit --name some-rabbit -p 8080:15672 -p 5672:5672 rabbitmq:3-management + +--- +kind: Service +apiVersion: v1 +metadata: + name: metric-analyzer-master + namespace: imxc +spec: +# clusterIP: None # We need a headless service to allow the pods to discover each + ports: # other during autodiscover phase for cluster creation. + - name: http # A ClusterIP will prevent resolving dns requests for other pods + protocol: TCP # under the same service. + port: 15672 + targetPort: 15672 +# nodePort: 30001 + - name: amqp + protocol: TCP + port: 5672 + targetPort: 5672 +# nodePort: 30002 + selector: + app: metric-analyzer-master +# type: NodePort +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: metric-analyzer-master + name: metric-analyzer-master + namespace: imxc +spec: + replicas: 1 + selector: + matchLabels: + app: metric-analyzer-master + template: + metadata: + labels: + app: metric-analyzer-master + spec: + containers: + - image: {{ .Values.global.IMXC_IN_REGISTRY }}/metric_analyzer:{{ .Values.global.METRIC_ANALYZER_MASTER_VERSION }} + imagePullPolicy: IfNotPresent + name: master +# volumeMounts: +# - mountPath: /etc/localtime +# name: timezone-config + env: + - name: BROKER + value: base-rabbitmq + - name: IMXC_RABBITMQ_CLIENT_ID + value: "user" + - name: IMXC_RABBITMQ_CLIENT_PASSWORD + value: "eorbahrhkswp" + - name: POSTGRES_SERVER + value: postgres + - name: POSTGRES_USER + value: admin + - name: POSTGRES_PW + value: eorbahrhkswp + - name: POSTGRES_DB + value: postgresdb + - name: PROMETHEUS_URL + value: http://base-cortex-nginx/prometheus + - name: POSTGRES_PORT + value: "5432" + - name: ES_SERVER + value: elasticsearch + - name: ES_PORT + value: "9200" + - name: ES_ID + value: "elastic" + - name: ES_PWD + value: "elastic" + - name: LOG_LEVEL + value: INFO + - name: AI_TYPE + value: BASELINE + - name: BASELINE_SIZE + value: "3" + - name: CHECK_DAY + value: "2" + resources: + requests: + memory: "100Mi" diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/analysis/templates/imxc-metric-analyzer-worker.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/analysis/templates/imxc-metric-analyzer-worker.yaml new file mode 100644 index 0000000..7e6eaea --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/analysis/templates/imxc-metric-analyzer-worker.yaml @@ -0,0 +1,38 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: metric-analyzer-worker + name: metric-analyzer-worker + namespace: imxc +spec: + replicas: 10 + selector: + matchLabels: + app: metric-analyzer-worker + template: + metadata: + labels: + app: metric-analyzer-worker + spec: + containers: + - image: {{ .Values.global.IMXC_IN_REGISTRY }}/metric_analyzer_worker:{{ .Values.global.METRIC_ANALYZER_WORKER_VERSION }} + imagePullPolicy: IfNotPresent + name: worker +# volumeMounts: +# - mountPath: /etc/localtime +# name: timezone-config + env: + - name: BROKER + value: base-rabbitmq + - name: IMXC_RABBITMQ_CLIENT_ID + value: "user" + - name: IMXC_RABBITMQ_CLIENT_PASSWORD + value: "eorbahrhkswp" +# volumes: +# - hostPath: +# path: /usr/share/zoneinfo/Asia/Seoul +# name: timezone-config + resources: + requests: + memory: "100Mi" diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/analysis/values.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/analysis/values.yaml new file mode 100644 index 0000000..d764210 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/analysis/values.yaml @@ -0,0 +1,68 @@ +# Default values for analysis. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: 10.10.31.243:5000/cmoa3/nginx + tag: stable + pullPolicy: IfNotPresent + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 80 + +ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: [] + + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +nodeSelector: {} + +tolerations: [] + +affinity: {} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/.helmignore b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/.helmignore new file mode 100644 index 0000000..db3418b --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/.helmignore @@ -0,0 +1,29 @@ +# Git +.git/ +.gitignore +.github/ + +# IDE +.project +.idea/ +*.tmproj + +# Common backup files +*.swp +*.bak +*.tmp +*~ + +# Cortex ignore +docs/ +tools/ +ct.yaml +ci/ +README.md.gotmpl +.prettierignore +CHANGELOG.md +MAINTAINERS.md +LICENSE +Makefile +renovate.json + diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/Chart.lock b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/Chart.lock new file mode 100644 index 0000000..f909218 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/Chart.lock @@ -0,0 +1,24 @@ +dependencies: +- name: memcached + repository: https://charts.bitnami.com/bitnami + version: 5.15.12 +- name: memcached + repository: https://charts.bitnami.com/bitnami + version: 5.15.12 +- name: memcached + repository: https://charts.bitnami.com/bitnami + version: 5.15.12 +- name: memcached + repository: https://charts.bitnami.com/bitnami + version: 5.15.12 +- name: memcached + repository: https://charts.bitnami.com/bitnami + version: 5.15.12 +- name: memcached + repository: https://charts.bitnami.com/bitnami + version: 5.15.12 +- name: memcached + repository: https://charts.bitnami.com/bitnami + version: 5.15.12 +digest: sha256:a6b7c1239f9cabc85dd647798a6f92ae8a9486756ab1e87fc11af2180ab03ee4 +generated: "2021-12-25T19:21:57.666697218Z" diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/Chart.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/Chart.yaml new file mode 100644 index 0000000..9122fe6 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/Chart.yaml @@ -0,0 +1,56 @@ +apiVersion: v2 +appVersion: v1.11.0 +dependencies: +- alias: memcached + condition: memcached.enabled + name: memcached + repository: https://charts.bitnami.com/bitnami + version: 5.15.12 +- alias: memcached-index-read + condition: memcached-index-read.enabled + name: memcached + repository: https://charts.bitnami.com/bitnami + version: 5.15.12 +- alias: memcached-index-write + condition: memcached-index-write.enabled + name: memcached + repository: https://charts.bitnami.com/bitnami + version: 5.15.12 +- alias: memcached-frontend + condition: memcached-frontend.enabled + name: memcached + repository: https://charts.bitnami.com/bitnami + version: 5.15.12 +- alias: memcached-blocks-index + name: memcached + repository: https://charts.bitnami.com/bitnami + tags: + - blocks-storage-memcached + version: 5.15.12 +- alias: memcached-blocks + name: memcached + repository: https://charts.bitnami.com/bitnami + tags: + - blocks-storage-memcached + version: 5.15.12 +- alias: memcached-blocks-metadata + name: memcached + repository: https://charts.bitnami.com/bitnami + tags: + - blocks-storage-memcached + version: 5.15.12 +description: Horizontally scalable, highly available, multi-tenant, long term Prometheus. +home: https://cortexmetrics.io/ +icon: https://avatars2.githubusercontent.com/u/43045022?s=200&v=4 +kubeVersion: ^1.19.0-0 +maintainers: +- email: thayward@infoblox.com + name: Tom Hayward + url: https://github.com/kd7lxl +- email: Niclas.Schad@plusserver.com + name: Niclas Schad + url: https://github.com/ShuzZzle +name: cortex +sources: +- https://github.com/cortexproject/cortex-helm-chart +version: 1.2.0 diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/README.md b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/README.md new file mode 100644 index 0000000..9a793d3 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/README.md @@ -0,0 +1,754 @@ + + +# cortex + +![Version: 1.2.0](https://img.shields.io/badge/Version-1.2.0-informational?style=flat-square) ![AppVersion: v1.11.0](https://img.shields.io/badge/AppVersion-v1.11.0-informational?style=flat-square) + +Horizontally scalable, highly available, multi-tenant, long term Prometheus. + +**Homepage:** + +## Maintainers + +| Name | Email | Url | +| ---- | ------ | --- | +| Tom Hayward | thayward@infoblox.com | https://github.com/kd7lxl | +| Niclas Schad | Niclas.Schad@plusserver.com | https://github.com/ShuzZzle | + +## Documentation + +Checkout our documentation for the cortex-helm-chart [here](https://cortexproject.github.io/cortex-helm-chart/) + +## Dependencies + +### Key-Value store + +Cortex requires a Key-Value (KV) store to store the ring. It can use traditional KV stores like [Consul](https://www.consul.io/) or [etcd](https://etcd.io/), but it can also build its own KV store on top of memberlist library using a gossip algorithm. + +The recommended approach is to use the built-in memberlist as a KV store, where supported. + +External KV stores can be installed alongside Cortex using their respective helm charts https://github.com/bitnami/charts/tree/master/bitnami/etcd and https://github.com/helm/charts/tree/master/stable/consul. + +### Storage + +Cortex requires a storage backend to store metrics and indexes. +See [cortex documentation](https://cortexmetrics.io/docs/) for details on storage types and documentation + +## Installation + +[Helm](https://helm.sh) must be installed to use the charts. +Please refer to Helm's [documentation](https://helm.sh/docs/) to get started. + +Once Helm is set up properly, add the repo as follows: + +```bash + helm repo add cortex-helm https://cortexproject.github.io/cortex-helm-chart +``` + +Cortex can now be installed with the following command: + +```bash + helm install cortex --namespace cortex cortex-helm/cortex +``` + +If you have custom options or values you want to override: + +```bash + helm install cortex --namespace cortex -f my-cortex-values.yaml cortex-helm/cortex +``` + +Specific versions of the chart can be installed using the `--version` option, with the default being the latest release. +What versions are available for installation can be listed with the following command: + +```bash + helm search repo cortex-helm +``` + +As part of this chart many different pods and services are installed which all +have varying resource requirements. Please make sure that you have sufficient +resources (CPU/memory) available in your cluster before installing Cortex Helm +chart. + +## Upgrades + +To upgrade Cortex use the following command: + +```bash + helm upgrade cortex -f my-cortex-values.yaml cortex-helm/cortex +``` +Note that it might be necessary to use `--reset-values` since some default values in the values.yaml might have changed or were removed. + +Source code can be found [here](https://cortexmetrics.io/) + +## Requirements + +Kubernetes: `^1.19.0-0` + +| Repository | Name | Version | +|------------|------|---------| +| https://charts.bitnami.com/bitnami | memcached(memcached) | 5.15.12 | +| https://charts.bitnami.com/bitnami | memcached-index-read(memcached) | 5.15.12 | +| https://charts.bitnami.com/bitnami | memcached-index-write(memcached) | 5.15.12 | +| https://charts.bitnami.com/bitnami | memcached-frontend(memcached) | 5.15.12 | +| https://charts.bitnami.com/bitnami | memcached-blocks-index(memcached) | 5.15.12 | +| https://charts.bitnami.com/bitnami | memcached-blocks(memcached) | 5.15.12 | +| https://charts.bitnami.com/bitnami | memcached-blocks-metadata(memcached) | 5.15.12 | + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| alertmanager.​affinity | object | `{}` | | +| alertmanager.​annotations | object | `{}` | | +| alertmanager.​containerSecurityContext.​enabled | bool | `true` | | +| alertmanager.​containerSecurityContext.​readOnlyRootFilesystem | bool | `true` | | +| alertmanager.​enabled | bool | `true` | | +| alertmanager.​env | list | `[]` | Extra env variables to pass to the cortex container | +| alertmanager.​extraArgs | object | `{}` | Additional Cortex container arguments, e.g. log level (debug, info, warn, error) | +| alertmanager.​extraContainers | list | `[]` | Additional containers to be added to the cortex pod. | +| alertmanager.​extraPorts | list | `[]` | Additional ports to the cortex services. Useful to expose extra container ports. | +| alertmanager.​extraVolumeMounts | list | `[]` | Extra volume mounts that will be added to the cortex container | +| alertmanager.​extraVolumes | list | `[]` | Additional volumes to the cortex pod. | +| alertmanager.​initContainers | list | `[]` | Init containers to be added to the cortex pod. | +| alertmanager.​livenessProbe.​httpGet.​path | string | `"/ready"` | | +| alertmanager.​livenessProbe.​httpGet.​port | string | `"http-metrics"` | | +| alertmanager.​nodeSelector | object | `{}` | | +| alertmanager.​persistentVolume.​accessModes | list | `["ReadWriteOnce"]` | Alertmanager data Persistent Volume access modes Must match those of existing PV or dynamic provisioner Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ | +| alertmanager.​persistentVolume.​annotations | object | `{}` | Alertmanager data Persistent Volume Claim annotations | +| alertmanager.​persistentVolume.​enabled | bool | `true` | If true and alertmanager.statefulSet.enabled is true, Alertmanager will create/use a Persistent Volume Claim If false, use emptyDir | +| alertmanager.​persistentVolume.​size | string | `"2Gi"` | Alertmanager data Persistent Volume size | +| alertmanager.​persistentVolume.​storageClass | string | `nil` | Alertmanager data Persistent Volume Storage Class If defined, storageClassName: If set to "-", storageClassName: "", which disables dynamic provisioning If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner. | +| alertmanager.​persistentVolume.​subPath | string | `""` | Subdirectory of Alertmanager data Persistent Volume to mount Useful if the volume's root directory is not empty | +| alertmanager.​podAnnotations | object | `{"prometheus.io/port":"8080","prometheus.io/scrape":"true"}` | Pod Annotations | +| alertmanager.​podDisruptionBudget | object | `{"maxUnavailable":1}` | If not set then a PodDisruptionBudget will not be created | +| alertmanager.​podLabels | object | `{}` | Pod Labels | +| alertmanager.​readinessProbe.​httpGet.​path | string | `"/ready"` | | +| alertmanager.​readinessProbe.​httpGet.​port | string | `"http-metrics"` | | +| alertmanager.​replicas | int | `1` | | +| alertmanager.​resources | object | `{}` | | +| alertmanager.​securityContext | object | `{}` | | +| alertmanager.​service.​annotations | object | `{}` | | +| alertmanager.​service.​labels | object | `{}` | | +| alertmanager.​serviceAccount.​name | string | `""` | "" disables the individual serviceAccount and uses the global serviceAccount for that component | +| alertmanager.​serviceMonitor.​additionalLabels | object | `{}` | | +| alertmanager.​serviceMonitor.​enabled | bool | `false` | | +| alertmanager.​serviceMonitor.​extraEndpointSpec | object | `{}` | Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint | +| alertmanager.​serviceMonitor.​metricRelabelings | list | `[]` | | +| alertmanager.​serviceMonitor.​relabelings | list | `[]` | | +| alertmanager.​sidecar | object | `{"containerSecurityContext":{"enabled":true,"readOnlyRootFilesystem":true},"defaultFolderName":null,"enableUniqueFilenames":false,"enabled":false,"folder":"/data","folderAnnotation":null,"image":{"repository":"quay.io/kiwigrid/k8s-sidecar","sha":"","tag":"1.10.7"},"imagePullPolicy":"IfNotPresent","label":"cortex_alertmanager","labelValue":null,"resources":{},"searchNamespace":null,"skipTlsVerify":false,"watchMethod":null}` | Sidecars that collect the configmaps with specified label and stores the included files them into the respective folders | +| alertmanager.​sidecar.​skipTlsVerify | bool | `false` | skipTlsVerify Set to true to skip tls verification for kube api calls | +| alertmanager.​startupProbe.​failureThreshold | int | `10` | | +| alertmanager.​startupProbe.​httpGet.​path | string | `"/ready"` | | +| alertmanager.​startupProbe.​httpGet.​port | string | `"http-metrics"` | | +| alertmanager.​statefulSet.​enabled | bool | `false` | If true, use a statefulset instead of a deployment for pod management. This is useful for using a persistent volume for storing silences between restarts. | +| alertmanager.​statefulStrategy.​type | string | `"RollingUpdate"` | | +| alertmanager.​strategy.​rollingUpdate.​maxSurge | int | `0` | | +| alertmanager.​strategy.​rollingUpdate.​maxUnavailable | int | `1` | | +| alertmanager.​strategy.​type | string | `"RollingUpdate"` | | +| alertmanager.​terminationGracePeriodSeconds | int | `60` | | +| alertmanager.​tolerations | list | `[]` | Tolerations for pod assignment ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ | +| clusterDomain | string | `"cluster.local"` | Kubernetes cluster DNS domain | +| compactor.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​key | string | `"app.kubernetes.io/component"` | | +| compactor.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​operator | string | `"In"` | | +| compactor.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​values[0] | string | `"compactor"` | | +| compactor.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​topologyKey | string | `"kubernetes.io/hostname"` | | +| compactor.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​weight | int | `100` | | +| compactor.​annotations | object | `{}` | | +| compactor.​containerSecurityContext.​enabled | bool | `true` | | +| compactor.​containerSecurityContext.​readOnlyRootFilesystem | bool | `true` | | +| compactor.​enabled | bool | `true` | | +| compactor.​env | list | `[]` | | +| compactor.​extraArgs | object | `{}` | Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) | +| compactor.​extraContainers | list | `[]` | | +| compactor.​extraPorts | list | `[]` | | +| compactor.​extraVolumeMounts | list | `[]` | | +| compactor.​extraVolumes | list | `[]` | | +| compactor.​initContainers | list | `[]` | | +| compactor.​livenessProbe.​httpGet.​path | string | `"/ready"` | | +| compactor.​livenessProbe.​httpGet.​port | string | `"http-metrics"` | | +| compactor.​livenessProbe.​httpGet.​scheme | string | `"HTTP"` | | +| compactor.​nodeSelector | object | `{}` | | +| compactor.​persistentVolume.​accessModes | list | `["ReadWriteOnce"]` | compactor data Persistent Volume access modes Must match those of existing PV or dynamic provisioner Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ | +| compactor.​persistentVolume.​annotations | object | `{}` | compactor data Persistent Volume Claim annotations | +| compactor.​persistentVolume.​enabled | bool | `true` | If true compactor will create/use a Persistent Volume Claim If false, use emptyDir | +| compactor.​persistentVolume.​size | string | `"2Gi"` | | +| compactor.​persistentVolume.​storageClass | string | `nil` | compactor data Persistent Volume Storage Class If defined, storageClassName: If set to "-", storageClassName: "", which disables dynamic provisioning If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner. | +| compactor.​persistentVolume.​subPath | string | `""` | Subdirectory of compactor data Persistent Volume to mount Useful if the volume's root directory is not empty | +| compactor.​podAnnotations | object | `{"prometheus.io/port":"8080","prometheus.io/scrape":"true"}` | Pod Annotations | +| compactor.​podDisruptionBudget.​maxUnavailable | int | `1` | | +| compactor.​podLabels | object | `{}` | Pod Labels | +| compactor.​readinessProbe.​httpGet.​path | string | `"/ready"` | | +| compactor.​readinessProbe.​httpGet.​port | string | `"http-metrics"` | | +| compactor.​replicas | int | `1` | | +| compactor.​resources | object | `{}` | | +| compactor.​securityContext | object | `{}` | | +| compactor.​service.​annotations | object | `{}` | | +| compactor.​service.​labels | object | `{}` | | +| compactor.​serviceAccount.​name | string | `""` | "" disables the individual serviceAccount and uses the global serviceAccount for that component | +| compactor.​serviceMonitor.​additionalLabels | object | `{}` | | +| compactor.​serviceMonitor.​enabled | bool | `false` | | +| compactor.​serviceMonitor.​extraEndpointSpec | object | `{}` | Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint | +| compactor.​serviceMonitor.​metricRelabelings | list | `[]` | | +| compactor.​serviceMonitor.​relabelings | list | `[]` | | +| compactor.​startupProbe.​failureThreshold | int | `60` | | +| compactor.​startupProbe.​httpGet.​path | string | `"/ready"` | | +| compactor.​startupProbe.​httpGet.​port | string | `"http-metrics"` | | +| compactor.​startupProbe.​httpGet.​scheme | string | `"HTTP"` | | +| compactor.​startupProbe.​initialDelaySeconds | int | `120` | | +| compactor.​startupProbe.​periodSeconds | int | `30` | | +| compactor.​strategy.​type | string | `"RollingUpdate"` | | +| compactor.​terminationGracePeriodSeconds | int | `240` | | +| compactor.​tolerations | list | `[]` | | +| config.​alertmanager.​enable_api | bool | `false` | Enable the experimental alertmanager config api. | +| config.​alertmanager.​external_url | string | `"/api/prom/alertmanager"` | | +| config.​alertmanager.​storage | object | `{}` | Type of backend to use to store alertmanager configs. Supported values are: "configdb", "gcs", "s3", "local". refer to: https://cortexmetrics.io/docs/configuration/configuration-file/#alertmanager_config | +| config.​api.​prometheus_http_prefix | string | `"/prometheus"` | | +| config.​api.​response_compression_enabled | bool | `true` | Use GZIP compression for API responses. Some endpoints serve large YAML or JSON blobs which can benefit from compression. | +| config.​auth_enabled | bool | `false` | | +| config.​blocks_storage.​bucket_store.​bucket_index.​enabled | bool | `true` | | +| config.​blocks_storage.​bucket_store.​sync_dir | string | `"/data/tsdb-sync"` | | +| config.​blocks_storage.​tsdb.​dir | string | `"/data/tsdb"` | | +| config.​distributor.​pool.​health_check_ingesters | bool | `true` | | +| config.​distributor.​shard_by_all_labels | bool | `true` | Distribute samples based on all labels, as opposed to solely by user and metric name. | +| config.​frontend.​log_queries_longer_than | string | `"10s"` | | +| config.​ingester.​lifecycler.​final_sleep | string | `"30s"` | Duration to sleep for before exiting, to ensure metrics are scraped. | +| config.​ingester.​lifecycler.​join_after | string | `"10s"` | We don't want to join immediately, but wait a bit to see other ingesters and their tokens first. It can take a while to have the full picture when using gossip | +| config.​ingester.​lifecycler.​num_tokens | int | `512` | | +| config.​ingester.​lifecycler.​observe_period | string | `"10s"` | To avoid generating same tokens by multiple ingesters, they can "observe" the ring for a while, after putting their own tokens into it. This is only useful when using gossip, since multiple ingesters joining at the same time can have conflicting tokens if they don't see each other yet. | +| config.​ingester.​lifecycler.​ring.​kvstore.​store | string | `"memberlist"` | | +| config.​ingester.​lifecycler.​ring.​replication_factor | int | `3` | Ingester replication factor per default is 3 | +| config.​ingester_client.​grpc_client_config.​max_recv_msg_size | int | `10485760` | | +| config.​ingester_client.​grpc_client_config.​max_send_msg_size | int | `10485760` | | +| config.​limits.​enforce_metric_name | bool | `true` | Enforce that every sample has a metric name | +| config.​limits.​max_query_lookback | string | `"0s"` | | +| config.​limits.​reject_old_samples | bool | `true` | | +| config.​limits.​reject_old_samples_max_age | string | `"168h"` | | +| config.​memberlist.​bind_port | int | `7946` | | +| config.​memberlist.​join_members | list | `["{{ include \"cortex.fullname\" $ }}-memberlist"]` | the service name of the memberlist if using memberlist discovery | +| config.​querier.​active_query_tracker_dir | string | `"/data/active-query-tracker"` | | +| config.​querier.​query_ingesters_within | string | `"13h"` | Maximum lookback beyond which queries are not sent to ingester. 0 means all queries are sent to ingester. Ingesters by default have no data older than 12 hours, so we can safely set this 13 hours | +| config.​querier.​query_store_after | string | `"12h"` | The time after which a metric should be queried from storage and not just ingesters. | +| config.​querier.​store_gateway_addresses | string | automatic | Comma separated list of store-gateway addresses in DNS Service Discovery format. This option should is set automatically when using the blocks storage and the store-gateway sharding is disabled (when enabled, the store-gateway instances form a ring and addresses are picked from the ring). | +| config.​query_range.​align_queries_with_step | bool | `true` | | +| config.​query_range.​cache_results | bool | `true` | | +| config.​query_range.​results_cache.​cache.​memcached.​expiration | string | `"1h"` | | +| config.​query_range.​results_cache.​cache.​memcached_client.​timeout | string | `"1s"` | | +| config.​query_range.​split_queries_by_interval | string | `"24h"` | | +| config.​ruler.​enable_alertmanager_discovery | bool | `false` | | +| config.​ruler.​enable_api | bool | `true` | Enable the experimental ruler config api. | +| config.​ruler.​storage | object | `{}` | Method to use for backend rule storage (configdb, azure, gcs, s3, swift, local) refer to https://cortexmetrics.io/docs/configuration/configuration-file/#ruler_config | +| config.​runtime_config.​file | string | `"/etc/cortex-runtime-config/runtime_config.yaml"` | | +| config.​server.​grpc_listen_port | int | `9095` | | +| config.​server.​grpc_server_max_concurrent_streams | int | `10000` | | +| config.​server.​grpc_server_max_recv_msg_size | int | `10485760` | | +| config.​server.​grpc_server_max_send_msg_size | int | `10485760` | | +| config.​server.​http_listen_port | int | `8080` | | +| config.​storage | object | `{"engine":"blocks","index_queries_cache_config":{"memcached":{"expiration":"1h"},"memcached_client":{"timeout":"1s"}}}` | See https://github.com/cortexproject/cortex/blob/master/docs/configuration/config-file-reference.md#storage_config | +| config.​storage.​index_queries_cache_config.​memcached.​expiration | string | `"1h"` | How long keys stay in the memcache | +| config.​storage.​index_queries_cache_config.​memcached_client.​timeout | string | `"1s"` | Maximum time to wait before giving up on memcached requests. | +| config.​store_gateway | object | `{"sharding_enabled":false}` | https://cortexmetrics.io/docs/configuration/configuration-file/#store_gateway_config | +| configs.​affinity | object | `{}` | | +| configs.​annotations | object | `{}` | | +| configs.​containerSecurityContext.​enabled | bool | `true` | | +| configs.​containerSecurityContext.​readOnlyRootFilesystem | bool | `true` | | +| configs.​enabled | bool | `false` | | +| configs.​env | list | `[]` | | +| configs.​extraArgs | object | `{}` | Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) | +| configs.​extraContainers | list | `[]` | | +| configs.​extraPorts | list | `[]` | | +| configs.​extraVolumeMounts | list | `[]` | | +| configs.​extraVolumes | list | `[]` | | +| configs.​initContainers | list | `[]` | | +| configs.​livenessProbe.​httpGet.​path | string | `"/ready"` | | +| configs.​livenessProbe.​httpGet.​port | string | `"http-metrics"` | | +| configs.​nodeSelector | object | `{}` | | +| configs.​persistentVolume.​subPath | string | `nil` | | +| configs.​podAnnotations | object | `{"prometheus.io/port":"8080","prometheus.io/scrape":"true"}` | Pod Annotations | +| configs.​podDisruptionBudget.​maxUnavailable | int | `1` | | +| configs.​podLabels | object | `{}` | Pod Labels | +| configs.​readinessProbe.​httpGet.​path | string | `"/ready"` | | +| configs.​readinessProbe.​httpGet.​port | string | `"http-metrics"` | | +| configs.​replicas | int | `1` | | +| configs.​resources | object | `{}` | | +| configs.​securityContext | object | `{}` | | +| configs.​service.​annotations | object | `{}` | | +| configs.​service.​labels | object | `{}` | | +| configs.​serviceAccount.​name | string | `""` | "" disables the individual serviceAccount and uses the global serviceAccount for that component | +| configs.​serviceMonitor.​additionalLabels | object | `{}` | | +| configs.​serviceMonitor.​enabled | bool | `false` | | +| configs.​serviceMonitor.​extraEndpointSpec | object | `{}` | Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint | +| configs.​serviceMonitor.​metricRelabelings | list | `[]` | | +| configs.​serviceMonitor.​relabelings | list | `[]` | | +| configs.​startupProbe.​failureThreshold | int | `10` | | +| configs.​startupProbe.​httpGet.​path | string | `"/ready"` | | +| configs.​startupProbe.​httpGet.​port | string | `"http-metrics"` | | +| configs.​strategy.​rollingUpdate.​maxSurge | int | `0` | | +| configs.​strategy.​rollingUpdate.​maxUnavailable | int | `1` | | +| configs.​strategy.​type | string | `"RollingUpdate"` | | +| configs.​terminationGracePeriodSeconds | int | `180` | | +| configs.​tolerations | list | `[]` | | +| configsdb_postgresql.​auth.​existing_secret.​key | string | `nil` | | +| configsdb_postgresql.​auth.​existing_secret.​name | string | `nil` | | +| configsdb_postgresql.​auth.​password | string | `nil` | | +| configsdb_postgresql.​enabled | bool | `false` | | +| configsdb_postgresql.​uri | string | `nil` | | +| distributor.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​key | string | `"app.kubernetes.io/component"` | | +| distributor.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​operator | string | `"In"` | | +| distributor.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​values[0] | string | `"distributor"` | | +| distributor.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​topologyKey | string | `"kubernetes.io/hostname"` | | +| distributor.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​weight | int | `100` | | +| distributor.​annotations | object | `{}` | | +| distributor.​autoscaling.​behavior | object | `{}` | Ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-configurable-scaling-behavior | +| distributor.​autoscaling.​enabled | bool | `false` | Creates a HorizontalPodAutoscaler for the distributor pods. | +| distributor.​autoscaling.​maxReplicas | int | `30` | | +| distributor.​autoscaling.​minReplicas | int | `2` | | +| distributor.​autoscaling.​targetCPUUtilizationPercentage | int | `80` | | +| distributor.​autoscaling.​targetMemoryUtilizationPercentage | int | `0` | | +| distributor.​containerSecurityContext.​enabled | bool | `true` | | +| distributor.​containerSecurityContext.​readOnlyRootFilesystem | bool | `true` | | +| distributor.​env | list | `[]` | | +| distributor.​extraArgs | object | `{}` | Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) | +| distributor.​extraContainers | list | `[]` | | +| distributor.​extraPorts | list | `[]` | | +| distributor.​extraVolumeMounts | list | `[]` | | +| distributor.​extraVolumes | list | `[]` | | +| distributor.​initContainers | list | `[]` | | +| distributor.​lifecycle | object | `{}` | | +| distributor.​livenessProbe.​httpGet.​path | string | `"/ready"` | | +| distributor.​livenessProbe.​httpGet.​port | string | `"http-metrics"` | | +| distributor.​nodeSelector | object | `{}` | | +| distributor.​persistentVolume.​subPath | string | `nil` | | +| distributor.​podAnnotations | object | `{"prometheus.io/port":"8080","prometheus.io/scrape":"true"}` | Pod Annotations | +| distributor.​podDisruptionBudget.​maxUnavailable | int | `1` | | +| distributor.​podLabels | object | `{}` | Pod Labels | +| distributor.​readinessProbe.​httpGet.​path | string | `"/ready"` | | +| distributor.​readinessProbe.​httpGet.​port | string | `"http-metrics"` | | +| distributor.​replicas | int | `2` | | +| distributor.​resources | object | `{}` | | +| distributor.​securityContext | object | `{}` | | +| distributor.​service.​annotations | object | `{}` | | +| distributor.​service.​labels | object | `{}` | | +| distributor.​serviceAccount.​name | string | `""` | "" disables the individual serviceAccount and uses the global serviceAccount for that component | +| distributor.​serviceMonitor.​additionalLabels | object | `{}` | | +| distributor.​serviceMonitor.​enabled | bool | `false` | | +| distributor.​serviceMonitor.​extraEndpointSpec | object | `{}` | Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint | +| distributor.​serviceMonitor.​metricRelabelings | list | `[]` | | +| distributor.​serviceMonitor.​relabelings | list | `[]` | | +| distributor.​startupProbe.​failureThreshold | int | `10` | | +| distributor.​startupProbe.​httpGet.​path | string | `"/ready"` | | +| distributor.​startupProbe.​httpGet.​port | string | `"http-metrics"` | | +| distributor.​strategy.​rollingUpdate.​maxSurge | int | `0` | | +| distributor.​strategy.​rollingUpdate.​maxUnavailable | int | `1` | | +| distributor.​strategy.​type | string | `"RollingUpdate"` | | +| distributor.​terminationGracePeriodSeconds | int | `60` | | +| distributor.​tolerations | list | `[]` | | +| externalConfigSecretName | string | `"secret-with-config.yaml"` | | +| externalConfigVersion | string | `"0"` | | +| image.​pullPolicy | string | `"IfNotPresent"` | | +| image.​pullSecrets | list | `[]` | Optionally specify an array of imagePullSecrets. Secrets must be manually created in the namespace. ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ | +| image.​repository | string | `"quay.io/cortexproject/cortex"` | | +| image.​tag | string | `""` | Allows you to override the cortex version in this chart. Use at your own risk. | +| ingester.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​key | string | `"app.kubernetes.io/component"` | | +| ingester.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​operator | string | `"In"` | | +| ingester.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​values[0] | string | `"ingester"` | | +| ingester.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​topologyKey | string | `"kubernetes.io/hostname"` | | +| ingester.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​weight | int | `100` | | +| ingester.​annotations | object | `{}` | | +| ingester.​autoscaling.​behavior.​scaleDown.​policies | list | `[{"periodSeconds":1800,"type":"Pods","value":1}]` | see https://cortexmetrics.io/docs/guides/ingesters-scaling-up-and-down/#scaling-down for scaledown details | +| ingester.​autoscaling.​behavior.​scaleDown.​stabilizationWindowSeconds | int | `3600` | uses metrics from the past 1h to make scaleDown decisions | +| ingester.​autoscaling.​behavior.​scaleUp.​policies | list | `[{"periodSeconds":1800,"type":"Pods","value":1}]` | This default scaleup policy allows adding 1 pod every 30 minutes. Ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-configurable-scaling-behavior | +| ingester.​autoscaling.​enabled | bool | `false` | | +| ingester.​autoscaling.​maxReplicas | int | `30` | | +| ingester.​autoscaling.​minReplicas | int | `3` | | +| ingester.​autoscaling.​targetMemoryUtilizationPercentage | int | `80` | | +| ingester.​containerSecurityContext.​enabled | bool | `true` | | +| ingester.​containerSecurityContext.​readOnlyRootFilesystem | bool | `true` | | +| ingester.​env | list | `[]` | | +| ingester.​extraArgs | object | `{}` | Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) | +| ingester.​extraContainers | list | `[]` | | +| ingester.​extraPorts | list | `[]` | | +| ingester.​extraVolumeMounts | list | `[]` | | +| ingester.​extraVolumes | list | `[]` | | +| ingester.​initContainers | list | `[]` | | +| ingester.​lifecycle.​preStop | object | `{"httpGet":{"path":"/ingester/shutdown","port":"http-metrics"}}` | The /shutdown preStop hook is recommended as part of the ingester scaledown process, but can be removed to optimize rolling restarts in instances that will never be scaled down or when using chunks storage with WAL disabled. https://cortexmetrics.io/docs/guides/ingesters-scaling-up-and-down/#scaling-down | +| ingester.​livenessProbe | object | `{}` | Startup/liveness probes for ingesters are not recommended. Ref: https://cortexmetrics.io/docs/guides/running-cortex-on-kubernetes/#take-extra-care-with-ingesters | +| ingester.​nodeSelector | object | `{}` | | +| ingester.​persistentVolume.​accessModes | list | `["ReadWriteOnce"]` | Ingester data Persistent Volume access modes Must match those of existing PV or dynamic provisioner Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ | +| ingester.​persistentVolume.​annotations | object | `{}` | Ingester data Persistent Volume Claim annotations | +| ingester.​persistentVolume.​enabled | bool | `true` | If true and ingester.statefulSet.enabled is true, Ingester will create/use a Persistent Volume Claim If false, use emptyDir | +| ingester.​persistentVolume.​size | string | `"2Gi"` | Ingester data Persistent Volume size | +| ingester.​persistentVolume.​storageClass | string | `nil` | Ingester data Persistent Volume Storage Class If defined, storageClassName: If set to "-", storageClassName: "", which disables dynamic provisioning If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner. | +| ingester.​persistentVolume.​subPath | string | `""` | Subdirectory of Ingester data Persistent Volume to mount Useful if the volume's root directory is not empty | +| ingester.​podAnnotations | object | `{"prometheus.io/port":"8080","prometheus.io/scrape":"true"}` | Pod Annotations | +| ingester.​podDisruptionBudget.​maxUnavailable | int | `1` | | +| ingester.​podLabels | object | `{}` | Pod Labels | +| ingester.​readinessProbe.​httpGet.​path | string | `"/ready"` | | +| ingester.​readinessProbe.​httpGet.​port | string | `"http-metrics"` | | +| ingester.​replicas | int | `3` | | +| ingester.​resources | object | `{}` | | +| ingester.​securityContext | object | `{}` | | +| ingester.​service.​annotations | object | `{}` | | +| ingester.​service.​labels | object | `{}` | | +| ingester.​serviceAccount.​name | string | `nil` | | +| ingester.​serviceMonitor.​additionalLabels | object | `{}` | | +| ingester.​serviceMonitor.​enabled | bool | `false` | | +| ingester.​serviceMonitor.​extraEndpointSpec | object | `{}` | Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint | +| ingester.​serviceMonitor.​metricRelabelings | list | `[]` | | +| ingester.​serviceMonitor.​relabelings | list | `[]` | | +| ingester.​startupProbe | object | `{}` | Startup/liveness probes for ingesters are not recommended. Ref: https://cortexmetrics.io/docs/guides/running-cortex-on-kubernetes/#take-extra-care-with-ingesters | +| ingester.​statefulSet.​enabled | bool | `false` | If true, use a statefulset instead of a deployment for pod management. This is useful when using WAL | +| ingester.​statefulSet.​podManagementPolicy | string | `"OrderedReady"` | ref: https://cortexmetrics.io/docs/guides/ingesters-scaling-up-and-down/#scaling-down and https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-management-policies for scaledown details | +| ingester.​statefulStrategy.​type | string | `"RollingUpdate"` | | +| ingester.​strategy.​rollingUpdate.​maxSurge | int | `0` | | +| ingester.​strategy.​rollingUpdate.​maxUnavailable | int | `1` | | +| ingester.​strategy.​type | string | `"RollingUpdate"` | | +| ingester.​terminationGracePeriodSeconds | int | `240` | | +| ingester.​tolerations | list | `[]` | | +| ingress.​annotations | object | `{}` | | +| ingress.​enabled | bool | `false` | | +| ingress.​hosts[0].​host | string | `"chart-example.local"` | | +| ingress.​hosts[0].​paths[0] | string | `"/"` | | +| ingress.​ingressClass.​enabled | bool | `false` | | +| ingress.​ingressClass.​name | string | `"nginx"` | | +| ingress.​tls | list | `[]` | | +| memcached | object | `{"architecture":"high-availability","enabled":false,"extraEnv":[{"name":"MEMCACHED_CACHE_SIZE","value":"1024"},{"name":"MEMCACHED_MAX_CONNECTIONS","value":"1024"},{"name":"MEMCACHED_THREADS","value":"4"}],"metrics":{"enabled":true,"serviceMonitor":{"enabled":false}},"replicaCount":2,"resources":{}}` | chunk caching for legacy chunk storage engine | +| memcached-blocks-index.​architecture | string | `"high-availability"` | | +| memcached-blocks-index.​extraEnv[0] | object | `{"name":"MEMCACHED_CACHE_SIZE","value":"1024"}` | MEMCACHED_CACHE_SIZE is the amount of memory allocated to memcached for object storage | +| memcached-blocks-index.​extraEnv[1] | object | `{"name":"MEMCACHED_MAX_CONNECTIONS","value":"1024"}` | MEMCACHED_MAX_CONNECTIONS is the maximum number of simultaneous connections to the memcached service | +| memcached-blocks-index.​extraEnv[2] | object | `{"name":"MEMCACHED_THREADS","value":"4"}` | MEMCACHED_THREADS is the number of threads to use when processing incoming requests. By default, memcached is configured to use 4 concurrent threads. The threading improves the performance of storing and retrieving data in the cache, using a locking system to prevent different threads overwriting or updating the same values. | +| memcached-blocks-index.​metrics.​enabled | bool | `true` | | +| memcached-blocks-index.​metrics.​serviceMonitor.​enabled | bool | `false` | | +| memcached-blocks-index.​replicaCount | int | `2` | | +| memcached-blocks-index.​resources | object | `{}` | | +| memcached-blocks-metadata.​architecture | string | `"high-availability"` | | +| memcached-blocks-metadata.​extraEnv[0] | object | `{"name":"MEMCACHED_CACHE_SIZE","value":"1024"}` | MEMCACHED_CACHE_SIZE is the amount of memory allocated to memcached for object storage | +| memcached-blocks-metadata.​extraEnv[1] | object | `{"name":"MEMCACHED_MAX_CONNECTIONS","value":"1024"}` | MEMCACHED_MAX_CONNECTIONS is the maximum number of simultaneous connections to the memcached service | +| memcached-blocks-metadata.​extraEnv[2] | object | `{"name":"MEMCACHED_THREADS","value":"4"}` | MEMCACHED_THREADS is the number of threads to use when processing incoming requests. By default, memcached is configured to use 4 concurrent threads. The threading improves the performance of storing and retrieving data in the cache, using a locking system to prevent different threads overwriting or updating the same values. | +| memcached-blocks-metadata.​metrics.​enabled | bool | `true` | | +| memcached-blocks-metadata.​metrics.​serviceMonitor.​enabled | bool | `false` | | +| memcached-blocks-metadata.​replicaCount | int | `2` | | +| memcached-blocks-metadata.​resources | object | `{}` | | +| memcached-blocks.​architecture | string | `"high-availability"` | | +| memcached-blocks.​extraEnv[0] | object | `{"name":"MEMCACHED_CACHE_SIZE","value":"1024"}` | MEMCACHED_CACHE_SIZE is the amount of memory allocated to memcached for object storage | +| memcached-blocks.​extraEnv[1] | object | `{"name":"MEMCACHED_MAX_CONNECTIONS","value":"1024"}` | MEMCACHED_MAX_CONNECTIONS is the maximum number of simultaneous connections to the memcached service | +| memcached-blocks.​extraEnv[2] | object | `{"name":"MEMCACHED_THREADS","value":"4"}` | MEMCACHED_THREADS is the number of threads to use when processing incoming requests. By default, memcached is configured to use 4 concurrent threads. The threading improves the performance of storing and retrieving data in the cache, using a locking system to prevent different threads overwriting or updating the same values. | +| memcached-blocks.​metrics.​enabled | bool | `true` | | +| memcached-blocks.​metrics.​serviceMonitor.​enabled | bool | `false` | | +| memcached-blocks.​replicaCount | int | `2` | | +| memcached-blocks.​resources | object | `{}` | | +| memcached-frontend.​architecture | string | `"high-availability"` | | +| memcached-frontend.​enabled | bool | `false` | | +| memcached-frontend.​extraEnv[0] | object | `{"name":"MEMCACHED_CACHE_SIZE","value":"1024"}` | MEMCACHED_CACHE_SIZE is the amount of memory allocated to memcached for object storage | +| memcached-frontend.​extraEnv[1] | object | `{"name":"MEMCACHED_MAX_CONNECTIONS","value":"1024"}` | MEMCACHED_MAX_CONNECTIONS is the maximum number of simultaneous connections to the memcached service | +| memcached-frontend.​extraEnv[2] | object | `{"name":"MEMCACHED_THREADS","value":"4"}` | MEMCACHED_THREADS is the number of threads to use when processing incoming requests. By default, memcached is configured to use 4 concurrent threads. The threading improves the performance of storing and retrieving data in the cache, using a locking system to prevent different threads overwriting or updating the same values. | +| memcached-frontend.​metrics.​enabled | bool | `true` | | +| memcached-frontend.​metrics.​serviceMonitor.​enabled | bool | `false` | | +| memcached-frontend.​replicaCount | int | `2` | | +| memcached-frontend.​resources | object | `{}` | | +| memcached-index-read | object | `{"architecture":"high-availability","enabled":false,"extraEnv":[{"name":"MEMCACHED_CACHE_SIZE","value":"1024"},{"name":"MEMCACHED_MAX_CONNECTIONS","value":"1024"},{"name":"MEMCACHED_THREADS","value":"4"}],"metrics":{"enabled":true,"serviceMonitor":{"enabled":false}},"replicaCount":2,"resources":{}}` | index read caching for legacy chunk storage engine | +| memcached-index-read.​extraEnv[0] | object | `{"name":"MEMCACHED_CACHE_SIZE","value":"1024"}` | MEMCACHED_CACHE_SIZE is the amount of memory allocated to memcached for object storage | +| memcached-index-read.​extraEnv[1] | object | `{"name":"MEMCACHED_MAX_CONNECTIONS","value":"1024"}` | MEMCACHED_MAX_CONNECTIONS is the maximum number of simultaneous connections to the memcached service | +| memcached-index-read.​extraEnv[2] | object | `{"name":"MEMCACHED_THREADS","value":"4"}` | MEMCACHED_THREADS is the number of threads to use when processing incoming requests. By default, memcached is configured to use 4 concurrent threads. The threading improves the performance of storing and retrieving data in the cache, using a locking system to prevent different threads overwriting or updating the same values. | +| memcached-index-write | object | `{"architecture":"high-availability","enabled":false,"extraEnv":[{"name":"MEMCACHED_CACHE_SIZE","value":"1024"},{"name":"MEMCACHED_MAX_CONNECTIONS","value":"1024"},{"name":"MEMCACHED_THREADS","value":"4"}],"metrics":{"enabled":true,"serviceMonitor":{"enabled":false}},"replicaCount":2,"resources":{}}` | index write caching for legacy chunk storage engine | +| memcached-index-write.​extraEnv[0] | object | `{"name":"MEMCACHED_CACHE_SIZE","value":"1024"}` | MEMCACHED_CACHE_SIZE is the amount of memory allocated to memcached for object storage | +| memcached-index-write.​extraEnv[1] | object | `{"name":"MEMCACHED_MAX_CONNECTIONS","value":"1024"}` | MEMCACHED_MAX_CONNECTIONS is the maximum number of simultaneous connections to the memcached service | +| memcached-index-write.​extraEnv[2] | object | `{"name":"MEMCACHED_THREADS","value":"4"}` | MEMCACHED_THREADS is the number of threads to use when processing incoming requests. By default, memcached is configured to use 4 concurrent threads. The threading improves the performance of storing and retrieving data in the cache, using a locking system to prevent different threads overwriting or updating the same values. | +| memcached.​extraEnv[0] | object | `{"name":"MEMCACHED_CACHE_SIZE","value":"1024"}` | MEMCACHED_CACHE_SIZE is the amount of memory allocated to memcached for object storage | +| memcached.​extraEnv[1] | object | `{"name":"MEMCACHED_MAX_CONNECTIONS","value":"1024"}` | MEMCACHED_MAX_CONNECTIONS is the maximum number of simultaneous connections to the memcached service | +| memcached.​extraEnv[2] | object | `{"name":"MEMCACHED_THREADS","value":"4"}` | MEMCACHED_THREADS is the number of threads to use when processing incoming requests. By default, memcached is configured to use 4 concurrent threads. The threading improves the performance of storing and retrieving data in the cache, using a locking system to prevent different threads overwriting or updating the same values. | +| nginx.​affinity | object | `{}` | | +| nginx.​annotations | object | `{}` | | +| nginx.​autoscaling.​behavior | object | `{}` | Ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-configurable-scaling-behavior | +| nginx.​autoscaling.​enabled | bool | `false` | Creates a HorizontalPodAutoscaler for the nginx pods. | +| nginx.​autoscaling.​maxReplicas | int | `30` | | +| nginx.​autoscaling.​minReplicas | int | `2` | | +| nginx.​autoscaling.​targetCPUUtilizationPercentage | int | `80` | | +| nginx.​autoscaling.​targetMemoryUtilizationPercentage | int | `0` | | +| nginx.​config.​auth_orgs | list | `[]` | (optional) List of [auth tenants](https://cortexmetrics.io/docs/guides/auth/) to set in the nginx config | +| nginx.​config.​basicAuthSecretName | string | `""` | (optional) Name of basic auth secret. In order to use this option, a secret with htpasswd formatted contents at the key ".htpasswd" must exist. For example: apiVersion: v1 kind: Secret metadata: name: my-secret namespace: stringData: .htpasswd: | user1:$apr1$/woC1jnP$KAh0SsVn5qeSMjTtn0E9Q0 user2:$apr1$QdR8fNLT$vbCEEzDj7LyqCMyNpSoBh/ Please note that the use of basic auth will not identify organizations the way X-Scope-OrgID does. Thus, the use of basic auth alone will not prevent one tenant from viewing the metrics of another. To ensure tenants are scoped appropriately, explicitly set the `X-Scope-OrgID` header in the nginx config. Example setHeaders: X-Scope-OrgID: $remote_user | +| nginx.​config.​client_max_body_size | string | `"1M"` | ref: http://nginx.org/en/docs/http/ngx_http_core_module.html#client_max_body_size | +| nginx.​config.​dnsResolver | string | `"coredns.kube-system.svc.cluster.local"` | | +| nginx.​config.​httpSnippet | string | `""` | arbitrary snippet to inject in the http { } section of the nginx config | +| nginx.​config.​mainSnippet | string | `""` | arbitrary snippet to inject in the top section of the nginx config | +| nginx.​config.​serverSnippet | string | `""` | arbitrary snippet to inject in the server { } section of the nginx config | +| nginx.​config.​setHeaders | object | `{}` | | +| nginx.​containerSecurityContext.​enabled | bool | `true` | | +| nginx.​containerSecurityContext.​readOnlyRootFilesystem | bool | `false` | | +| nginx.​enabled | bool | `true` | | +| nginx.​env | list | `[]` | | +| nginx.​extraArgs | object | `{}` | Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) | +| nginx.​extraContainers | list | `[]` | | +| nginx.​extraPorts | list | `[]` | | +| nginx.​extraVolumeMounts | list | `[]` | | +| nginx.​extraVolumes | list | `[]` | | +| nginx.​http_listen_port | int | `80` | | +| nginx.​image.​pullPolicy | string | `"IfNotPresent"` | | +| nginx.​image.​repository | string | `"nginx"` | | +| nginx.​image.​tag | float | `1.21` | | +| nginx.​initContainers | list | `[]` | | +| nginx.​livenessProbe.​httpGet.​path | string | `"/healthz"` | | +| nginx.​livenessProbe.​httpGet.​port | string | `"http-metrics"` | | +| nginx.​nodeSelector | object | `{}` | | +| nginx.​persistentVolume.​subPath | string | `nil` | | +| nginx.​podAnnotations | object | `{}` | Pod Annotations | +| nginx.​podDisruptionBudget.​maxUnavailable | int | `1` | | +| nginx.​podLabels | object | `{}` | Pod Labels | +| nginx.​readinessProbe.​httpGet.​path | string | `"/healthz"` | | +| nginx.​readinessProbe.​httpGet.​port | string | `"http-metrics"` | | +| nginx.​replicas | int | `2` | | +| nginx.​resources | object | `{}` | | +| nginx.​securityContext | object | `{}` | | +| nginx.​service.​annotations | object | `{}` | | +| nginx.​service.​labels | object | `{}` | | +| nginx.​service.​type | string | `"ClusterIP"` | | +| nginx.​serviceAccount.​name | string | `""` | "" disables the individual serviceAccount and uses the global serviceAccount for that component | +| nginx.​startupProbe.​failureThreshold | int | `10` | | +| nginx.​startupProbe.​httpGet.​path | string | `"/healthz"` | | +| nginx.​startupProbe.​httpGet.​port | string | `"http-metrics"` | | +| nginx.​strategy.​rollingUpdate.​maxSurge | int | `0` | | +| nginx.​strategy.​rollingUpdate.​maxUnavailable | int | `1` | | +| nginx.​strategy.​type | string | `"RollingUpdate"` | | +| nginx.​terminationGracePeriodSeconds | int | `10` | | +| nginx.​tolerations | list | `[]` | | +| querier.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​key | string | `"app.kubernetes.io/component"` | | +| querier.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​operator | string | `"In"` | | +| querier.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​values[0] | string | `"querier"` | | +| querier.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​topologyKey | string | `"kubernetes.io/hostname"` | | +| querier.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​weight | int | `100` | | +| querier.​annotations | object | `{}` | | +| querier.​autoscaling.​behavior | object | `{}` | Ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-configurable-scaling-behavior | +| querier.​autoscaling.​enabled | bool | `false` | Creates a HorizontalPodAutoscaler for the querier pods. | +| querier.​autoscaling.​maxReplicas | int | `30` | | +| querier.​autoscaling.​minReplicas | int | `2` | | +| querier.​autoscaling.​targetCPUUtilizationPercentage | int | `80` | | +| querier.​autoscaling.​targetMemoryUtilizationPercentage | int | `0` | | +| querier.​containerSecurityContext.​enabled | bool | `true` | | +| querier.​containerSecurityContext.​readOnlyRootFilesystem | bool | `true` | | +| querier.​env | list | `[]` | | +| querier.​extraArgs | object | `{}` | Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) | +| querier.​extraContainers | list | `[]` | | +| querier.​extraPorts | list | `[]` | | +| querier.​extraVolumeMounts | list | `[]` | | +| querier.​extraVolumes | list | `[]` | | +| querier.​initContainers | list | `[]` | | +| querier.​lifecycle | object | `{}` | | +| querier.​livenessProbe.​httpGet.​path | string | `"/ready"` | | +| querier.​livenessProbe.​httpGet.​port | string | `"http-metrics"` | | +| querier.​nodeSelector | object | `{}` | | +| querier.​persistentVolume.​subPath | string | `nil` | | +| querier.​podAnnotations | object | `{"prometheus.io/port":"8080","prometheus.io/scrape":"true"}` | Pod Annotations | +| querier.​podDisruptionBudget.​maxUnavailable | int | `1` | | +| querier.​podLabels | object | `{}` | Pod Labels | +| querier.​readinessProbe.​httpGet.​path | string | `"/ready"` | | +| querier.​readinessProbe.​httpGet.​port | string | `"http-metrics"` | | +| querier.​replicas | int | `2` | | +| querier.​resources | object | `{}` | | +| querier.​securityContext | object | `{}` | | +| querier.​service.​annotations | object | `{}` | | +| querier.​service.​labels | object | `{}` | | +| querier.​serviceAccount.​name | string | `""` | "" disables the individual serviceAccount and uses the global serviceAccount for that component | +| querier.​serviceMonitor.​additionalLabels | object | `{}` | | +| querier.​serviceMonitor.​enabled | bool | `false` | | +| querier.​serviceMonitor.​extraEndpointSpec | object | `{}` | Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint | +| querier.​serviceMonitor.​metricRelabelings | list | `[]` | | +| querier.​serviceMonitor.​relabelings | list | `[]` | | +| querier.​startupProbe.​failureThreshold | int | `10` | | +| querier.​startupProbe.​httpGet.​path | string | `"/ready"` | | +| querier.​startupProbe.​httpGet.​port | string | `"http-metrics"` | | +| querier.​strategy.​rollingUpdate.​maxSurge | int | `0` | | +| querier.​strategy.​rollingUpdate.​maxUnavailable | int | `1` | | +| querier.​strategy.​type | string | `"RollingUpdate"` | | +| querier.​terminationGracePeriodSeconds | int | `180` | | +| querier.​tolerations | list | `[]` | | +| query_frontend.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​key | string | `"app.kubernetes.io/component"` | | +| query_frontend.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​operator | string | `"In"` | | +| query_frontend.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​values[0] | string | `"query-frontend"` | | +| query_frontend.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​topologyKey | string | `"kubernetes.io/hostname"` | | +| query_frontend.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​weight | int | `100` | | +| query_frontend.​annotations | object | `{}` | | +| query_frontend.​containerSecurityContext.​enabled | bool | `true` | | +| query_frontend.​containerSecurityContext.​readOnlyRootFilesystem | bool | `true` | | +| query_frontend.​env | list | `[]` | | +| query_frontend.​extraArgs | object | `{}` | Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) | +| query_frontend.​extraContainers | list | `[]` | | +| query_frontend.​extraPorts | list | `[]` | | +| query_frontend.​extraVolumeMounts | list | `[]` | | +| query_frontend.​extraVolumes | list | `[]` | | +| query_frontend.​initContainers | list | `[]` | | +| query_frontend.​lifecycle | object | `{}` | | +| query_frontend.​livenessProbe.​httpGet.​path | string | `"/ready"` | | +| query_frontend.​livenessProbe.​httpGet.​port | string | `"http-metrics"` | | +| query_frontend.​nodeSelector | object | `{}` | | +| query_frontend.​persistentVolume.​subPath | string | `nil` | | +| query_frontend.​podAnnotations | object | `{"prometheus.io/port":"8080","prometheus.io/scrape":"true"}` | Pod Annotations | +| query_frontend.​podDisruptionBudget.​maxUnavailable | int | `1` | | +| query_frontend.​podLabels | object | `{}` | Pod Labels | +| query_frontend.​readinessProbe.​httpGet.​path | string | `"/ready"` | | +| query_frontend.​readinessProbe.​httpGet.​port | string | `"http-metrics"` | | +| query_frontend.​replicas | int | `2` | | +| query_frontend.​resources | object | `{}` | | +| query_frontend.​securityContext | object | `{}` | | +| query_frontend.​service.​annotations | object | `{}` | | +| query_frontend.​service.​labels | object | `{}` | | +| query_frontend.​serviceAccount.​name | string | `""` | "" disables the individual serviceAccount and uses the global serviceAccount for that component | +| query_frontend.​serviceMonitor.​additionalLabels | object | `{}` | | +| query_frontend.​serviceMonitor.​enabled | bool | `false` | | +| query_frontend.​serviceMonitor.​extraEndpointSpec | object | `{}` | Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint | +| query_frontend.​serviceMonitor.​metricRelabelings | list | `[]` | | +| query_frontend.​serviceMonitor.​relabelings | list | `[]` | | +| query_frontend.​startupProbe.​failureThreshold | int | `10` | | +| query_frontend.​startupProbe.​httpGet.​path | string | `"/ready"` | | +| query_frontend.​startupProbe.​httpGet.​port | string | `"http-metrics"` | | +| query_frontend.​strategy.​rollingUpdate.​maxSurge | int | `0` | | +| query_frontend.​strategy.​rollingUpdate.​maxUnavailable | int | `1` | | +| query_frontend.​strategy.​type | string | `"RollingUpdate"` | | +| query_frontend.​terminationGracePeriodSeconds | int | `180` | | +| query_frontend.​tolerations | list | `[]` | | +| ruler.​affinity | object | `{}` | | +| ruler.​annotations | object | `{}` | | +| ruler.​containerSecurityContext.​enabled | bool | `true` | | +| ruler.​containerSecurityContext.​readOnlyRootFilesystem | bool | `true` | | +| ruler.​directories | object | `{}` | allow configuring rules via configmap. ref: https://cortexproject.github.io/cortex-helm-chart/guides/configure_rules_via_configmap.html | +| ruler.​enabled | bool | `true` | | +| ruler.​env | list | `[]` | | +| ruler.​extraArgs | object | `{}` | Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) | +| ruler.​extraContainers | list | `[]` | | +| ruler.​extraPorts | list | `[]` | | +| ruler.​extraVolumeMounts | list | `[]` | | +| ruler.​extraVolumes | list | `[]` | | +| ruler.​initContainers | list | `[]` | | +| ruler.​livenessProbe.​httpGet.​path | string | `"/ready"` | | +| ruler.​livenessProbe.​httpGet.​port | string | `"http-metrics"` | | +| ruler.​nodeSelector | object | `{}` | | +| ruler.​persistentVolume.​subPath | string | `nil` | | +| ruler.​podAnnotations | object | `{"prometheus.io/port":"8080","prometheus.io/scrape":"true"}` | Pod Annotations | +| ruler.​podDisruptionBudget.​maxUnavailable | int | `1` | | +| ruler.​podLabels | object | `{}` | Pod Labels | +| ruler.​readinessProbe.​httpGet.​path | string | `"/ready"` | | +| ruler.​readinessProbe.​httpGet.​port | string | `"http-metrics"` | | +| ruler.​replicas | int | `1` | | +| ruler.​resources | object | `{}` | | +| ruler.​securityContext | object | `{}` | | +| ruler.​service.​annotations | object | `{}` | | +| ruler.​service.​labels | object | `{}` | | +| ruler.​serviceAccount.​name | string | `""` | "" disables the individual serviceAccount and uses the global serviceAccount for that component | +| ruler.​serviceMonitor.​additionalLabels | object | `{}` | | +| ruler.​serviceMonitor.​enabled | bool | `false` | | +| ruler.​serviceMonitor.​extraEndpointSpec | object | `{}` | Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint | +| ruler.​serviceMonitor.​metricRelabelings | list | `[]` | | +| ruler.​serviceMonitor.​relabelings | list | `[]` | | +| ruler.​sidecar | object | `{"containerSecurityContext":{"enabled":true,"readOnlyRootFilesystem":true},"defaultFolderName":null,"enableUniqueFilenames":false,"enabled":false,"folder":"/tmp/rules","folderAnnotation":null,"image":{"repository":"quay.io/kiwigrid/k8s-sidecar","sha":"","tag":"1.10.7"},"imagePullPolicy":"IfNotPresent","label":"cortex_rules","labelValue":null,"resources":{},"searchNamespace":null,"watchMethod":null}` | Sidecars that collect the configmaps with specified label and stores the included files them into the respective folders | +| ruler.​sidecar.​defaultFolderName | string | `nil` | The default folder name, it will create a subfolder under the `folder` and put rules in there instead | +| ruler.​sidecar.​folder | string | `"/tmp/rules"` | folder in the pod that should hold the collected rules (unless `defaultFolderName` is set) | +| ruler.​sidecar.​folderAnnotation | string | `nil` | If specified, the sidecar will look for annotation with this name to create folder and put graph here. You can use this parameter together with `provider.foldersFromFilesStructure`to annotate configmaps and create folder structure. | +| ruler.​sidecar.​label | string | `"cortex_rules"` | label that the configmaps with rules are marked with | +| ruler.​sidecar.​labelValue | string | `nil` | value of label that the configmaps with rules are set to | +| ruler.​sidecar.​searchNamespace | string | `nil` | If specified, the sidecar will search for rules config-maps inside this namespace. Otherwise the namespace in which the sidecar is running will be used. It's also possible to specify ALL to search in all namespaces | +| ruler.​startupProbe.​failureThreshold | int | `10` | | +| ruler.​startupProbe.​httpGet.​path | string | `"/ready"` | | +| ruler.​startupProbe.​httpGet.​port | string | `"http-metrics"` | | +| ruler.​strategy.​rollingUpdate.​maxSurge | int | `0` | | +| ruler.​strategy.​rollingUpdate.​maxUnavailable | int | `1` | | +| ruler.​strategy.​type | string | `"RollingUpdate"` | | +| ruler.​terminationGracePeriodSeconds | int | `180` | | +| ruler.​tolerations | list | `[]` | | +| runtimeconfigmap.​annotations | object | `{}` | | +| runtimeconfigmap.​create | bool | `true` | If true, a configmap for the `runtime_config` will be created. If false, the configmap _must_ exist already on the cluster or pods will fail to create. | +| runtimeconfigmap.​runtime_config | object | `{}` | https://cortexmetrics.io/docs/configuration/arguments/#runtime-configuration-file | +| serviceAccount.​annotations | object | `{}` | | +| serviceAccount.​automountServiceAccountToken | bool | `true` | | +| serviceAccount.​create | bool | `true` | | +| serviceAccount.​name | string | `nil` | | +| store_gateway.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​key | string | `"app.kubernetes.io/component"` | | +| store_gateway.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​operator | string | `"In"` | | +| store_gateway.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​values[0] | string | `"store-gateway"` | | +| store_gateway.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​topologyKey | string | `"kubernetes.io/hostname"` | | +| store_gateway.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​weight | int | `100` | | +| store_gateway.​annotations | object | `{}` | | +| store_gateway.​containerSecurityContext.​enabled | bool | `true` | | +| store_gateway.​containerSecurityContext.​readOnlyRootFilesystem | bool | `true` | | +| store_gateway.​env | list | `[]` | | +| store_gateway.​extraArgs | object | `{}` | Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) | +| store_gateway.​extraContainers | list | `[]` | | +| store_gateway.​extraPorts | list | `[]` | | +| store_gateway.​extraVolumeMounts | list | `[]` | | +| store_gateway.​extraVolumes | list | `[]` | | +| store_gateway.​initContainers | list | `[]` | | +| store_gateway.​livenessProbe.​httpGet.​path | string | `"/ready"` | | +| store_gateway.​livenessProbe.​httpGet.​port | string | `"http-metrics"` | | +| store_gateway.​livenessProbe.​httpGet.​scheme | string | `"HTTP"` | | +| store_gateway.​nodeSelector | object | `{}` | | +| store_gateway.​persistentVolume.​accessModes | list | `["ReadWriteOnce"]` | Store-gateway data Persistent Volume access modes Must match those of existing PV or dynamic provisioner Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ | +| store_gateway.​persistentVolume.​annotations | object | `{}` | Store-gateway data Persistent Volume Claim annotations | +| store_gateway.​persistentVolume.​enabled | bool | `true` | If true Store-gateway will create/use a Persistent Volume Claim If false, use emptyDir | +| store_gateway.​persistentVolume.​size | string | `"2Gi"` | Store-gateway data Persistent Volume size | +| store_gateway.​persistentVolume.​storageClass | string | `nil` | Store-gateway data Persistent Volume Storage Class If defined, storageClassName: If set to "-", storageClassName: "", which disables dynamic provisioning If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner. | +| store_gateway.​persistentVolume.​subPath | string | `""` | Subdirectory of Store-gateway data Persistent Volume to mount Useful if the volume's root directory is not empty | +| store_gateway.​podAnnotations | object | `{"prometheus.io/port":"8080","prometheus.io/scrape":"true"}` | Pod Annotations | +| store_gateway.​podDisruptionBudget.​maxUnavailable | int | `1` | | +| store_gateway.​podLabels | object | `{}` | Pod Labels | +| store_gateway.​readinessProbe.​httpGet.​path | string | `"/ready"` | | +| store_gateway.​readinessProbe.​httpGet.​port | string | `"http-metrics"` | | +| store_gateway.​replicas | int | `1` | | +| store_gateway.​resources | object | `{}` | | +| store_gateway.​securityContext | object | `{}` | | +| store_gateway.​service.​annotations | object | `{}` | | +| store_gateway.​service.​labels | object | `{}` | | +| store_gateway.​serviceAccount.​name | string | `""` | "" disables the individual serviceAccount and uses the global serviceAccount for that component | +| store_gateway.​serviceMonitor.​additionalLabels | object | `{}` | | +| store_gateway.​serviceMonitor.​enabled | bool | `false` | | +| store_gateway.​serviceMonitor.​extraEndpointSpec | object | `{}` | Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint | +| store_gateway.​serviceMonitor.​metricRelabelings | list | `[]` | | +| store_gateway.​serviceMonitor.​relabelings | list | `[]` | | +| store_gateway.​startupProbe.​failureThreshold | int | `60` | | +| store_gateway.​startupProbe.​httpGet.​path | string | `"/ready"` | | +| store_gateway.​startupProbe.​httpGet.​port | string | `"http-metrics"` | | +| store_gateway.​startupProbe.​httpGet.​scheme | string | `"HTTP"` | | +| store_gateway.​startupProbe.​initialDelaySeconds | int | `120` | | +| store_gateway.​startupProbe.​periodSeconds | int | `30` | | +| store_gateway.​strategy.​type | string | `"RollingUpdate"` | | +| store_gateway.​terminationGracePeriodSeconds | int | `240` | | +| store_gateway.​tolerations | list | `[]` | | +| table_manager.​affinity | object | `{}` | | +| table_manager.​annotations | object | `{}` | | +| table_manager.​containerSecurityContext.​enabled | bool | `true` | | +| table_manager.​containerSecurityContext.​readOnlyRootFilesystem | bool | `true` | | +| table_manager.​env | list | `[]` | | +| table_manager.​extraArgs | object | `{}` | Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) | +| table_manager.​extraContainers | list | `[]` | | +| table_manager.​extraPorts | list | `[]` | | +| table_manager.​extraVolumeMounts | list | `[]` | | +| table_manager.​extraVolumes | list | `[]` | | +| table_manager.​initContainers | list | `[]` | | +| table_manager.​livenessProbe.​httpGet.​path | string | `"/ready"` | | +| table_manager.​livenessProbe.​httpGet.​port | string | `"http-metrics"` | | +| table_manager.​nodeSelector | object | `{}` | | +| table_manager.​persistentVolume.​subPath | string | `nil` | | +| table_manager.​podAnnotations | object | `{"prometheus.io/port":"8080","prometheus.io/scrape":"true"}` | Pod Annotations | +| table_manager.​podDisruptionBudget.​maxUnavailable | int | `1` | | +| table_manager.​podLabels | object | `{}` | Pod Labels | +| table_manager.​readinessProbe.​httpGet.​path | string | `"/ready"` | | +| table_manager.​readinessProbe.​httpGet.​port | string | `"http-metrics"` | | +| table_manager.​replicas | int | `1` | | +| table_manager.​resources | object | `{}` | | +| table_manager.​securityContext | object | `{}` | | +| table_manager.​service.​annotations | object | `{}` | | +| table_manager.​service.​labels | object | `{}` | | +| table_manager.​serviceAccount.​name | string | `""` | "" disables the individual serviceAccount and uses the global serviceAccount for that component | +| table_manager.​serviceMonitor.​additionalLabels | object | `{}` | | +| table_manager.​serviceMonitor.​enabled | bool | `false` | | +| table_manager.​serviceMonitor.​extraEndpointSpec | object | `{}` | Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint | +| table_manager.​serviceMonitor.​metricRelabelings | list | `[]` | | +| table_manager.​serviceMonitor.​relabelings | list | `[]` | | +| table_manager.​startupProbe.​failureThreshold | int | `10` | | +| table_manager.​startupProbe.​httpGet.​path | string | `"/ready"` | | +| table_manager.​startupProbe.​httpGet.​port | string | `"http-metrics"` | | +| table_manager.​strategy.​rollingUpdate.​maxSurge | int | `0` | | +| table_manager.​strategy.​rollingUpdate.​maxUnavailable | int | `1` | | +| table_manager.​strategy.​type | string | `"RollingUpdate"` | | +| table_manager.​terminationGracePeriodSeconds | int | `180` | | +| table_manager.​tolerations | list | `[]` | | +| tags.​blocks-storage-memcached | bool | `false` | Set to true to enable block storage memcached caching | +| useConfigMap | bool | `false` | | +| useExternalConfig | bool | `false` | | + diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/NOTES.txt b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/NOTES.txt new file mode 100644 index 0000000..1bd3203 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/NOTES.txt @@ -0,0 +1,9 @@ +{{- if eq .Values.config.storage.engine "chunks" }} +Cortex chunks storage has been deprecated, and it's now in maintenance mode: all Cortex users are encouraged to migrate to the blocks storage. +No new features will be added to the chunks storage. +Unlike the official cortex default configuration this helm-chart does not run the chunk engine by default. +{{- end }} + +Verify the application is working by running these commands: + kubectl --namespace {{ .Release.Namespace }} port-forward service/{{ include "cortex.querierFullname" . }} {{ .Values.config.server.http_listen_port }} + curl http://127.0.0.1:{{ .Values.config.server.http_listen_port }}/services diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/_helpers.tpl b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/_helpers.tpl new file mode 100644 index 0000000..81914c9 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/_helpers.tpl @@ -0,0 +1,155 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "cortex.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "cortex.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "cortex.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create the name of the service account +*/}} +{{- define "cortex.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "cortex.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Create the app name of cortex clients. Defaults to the same logic as "cortex.fullname", and default client expects "prometheus". +*/}} +{{- define "client.name" -}} +{{- if .Values.client.name -}} +{{- .Values.client.name -}} +{{- else if .Values.client.fullnameOverride -}} +{{- .Values.client.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default "prometheus" .Values.client.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + + +{{/* +Common labels +*/}} +{{- define "cortex.labels" -}} +helm.sh/chart: {{ include "cortex.chart" . }} +{{ include "cortex.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "cortex.selectorLabels" -}} +app.kubernetes.io/name: {{ include "cortex.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create configuration parameters for memcached configuration +*/}} +{{- define "cortex.memcached" -}} +{{- if and (eq .Values.config.storage.engine "blocks") (index .Values "tags" "blocks-storage-memcached") }} +- "-blocks-storage.bucket-store.index-cache.backend=memcached" +- "-blocks-storage.bucket-store.index-cache.memcached.addresses=dns+{{ .Release.Name }}-memcached-blocks-index.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}:11211" +- "-blocks-storage.bucket-store.chunks-cache.backend=memcached" +- "-blocks-storage.bucket-store.chunks-cache.memcached.addresses=dns+{{ .Release.Name }}-memcached-blocks.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}:11211" +- "-blocks-storage.bucket-store.metadata-cache.backend=memcached" +- "-blocks-storage.bucket-store.metadata-cache.memcached.addresses=dns+{{ .Release.Name }}-memcached-blocks-metadata.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}:11211" +{{- end -}} +{{- if and (ne .Values.config.storage.engine "blocks") .Values.memcached.enabled }} +- "-store.chunks-cache.memcached.addresses=dns+{{ .Release.Name }}-memcached.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}:11211" +{{- end -}} +{{- if and (ne .Values.config.storage.engine "blocks") (index .Values "memcached-index-read" "enabled") }} +- "-store.index-cache-read.memcached.addresses=dns+{{ .Release.Name }}-memcached-index-read.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}:11211" +{{- end -}} +{{- if and (ne .Values.config.storage.engine "blocks") (index .Values "memcached-index-write" "enabled") }} +- "-store.index-cache-write.memcached.addresses=dns+{{ .Release.Name }}-memcached-index-write.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}:11211" +{{- end -}} +{{- end -}} + +{{/* +Create configuration for frontend memcached configuration +*/}} +{{- define "cortex.frontend-memcached" -}} +{{- if index .Values "memcached-frontend" "enabled" }} +- "-frontend.memcached.addresses=dns+{{ template "cortex.fullname" . }}-memcached-frontend.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}:11211" +{{- end -}} +{{- end -}} + +{{/* +Determine the policy api version +*/}} +{{- define "cortex.pdbVersion" -}} +{{- if or (.Capabilities.APIVersions.Has "policy/v1/PodDisruptionBudget") (semverCompare ">=1.21" .Capabilities.KubeVersion.Version) -}} +policy/v1 +{{- else -}} +policy/v1beta1 +{{- end -}} +{{- end -}} + +{{/* +Get checksum of config secret or configMap +*/}} +{{- define "cortex.configChecksum" -}} +{{- if .Values.useExternalConfig -}} +{{- .Values.externalConfigVersion -}} +{{- else if .Values.useConfigMap -}} +{{- include (print $.Template.BasePath "/configmap.yaml") . | sha256sum -}} +{{- else -}} +{{- include (print $.Template.BasePath "/secret.yaml") . | sha256sum -}} +{{- end -}} +{{- end -}} + +{{/* +Get volume of config secret of configMap +*/}} +{{- define "cortex.configVolume" -}} +- name: config + {{- if .Values.useExternalConfig }} + secret: + secretName: {{ .Values.externalConfigSecretName }} + {{- else if .Values.useConfigMap }} + configMap: + name: {{ template "cortex.fullname" . }}-config + {{- else }} + secret: + secretName: {{ template "cortex.fullname" . }} + {{- end }} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/alertmanager/alertmanager-dep.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/alertmanager/alertmanager-dep.yaml new file mode 100644 index 0000000..49c4ca7 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/alertmanager/alertmanager-dep.yaml @@ -0,0 +1,30 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: alertmanager + namespace: imxc +spec: + replicas: 1 + selector: + matchLabels: + name: alertmanager + template: + metadata: + labels: + name: alertmanager + spec: + containers: + - name: alertmanager +# image: quay.io/cortexproject/cortex:v1.9.0 +# image: registry.cloud.intermax:5000/library/cortex:v1.11.0 + image: {{ .Values.global.IMXC_IN_REGISTRY }}/cortex:v1.11.0 + imagePullPolicy: IfNotPresent + args: + - -target=alertmanager +# - -log.level=debug + - -server.http-listen-port=80 + - -alertmanager.configs.url=http://{{ template "cortex.fullname" . }}-configs:8080 + - -alertmanager.web.external-url=/alertmanager + ports: + - containerPort: 80 diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/alertmanager/alertmanager-svc.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/alertmanager/alertmanager-svc.yaml new file mode 100644 index 0000000..989feb2 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/alertmanager/alertmanager-svc.yaml @@ -0,0 +1,10 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: alertmanager +spec: + ports: + - port: 80 + selector: + name: alertmanager diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/clusterrole.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/clusterrole.yaml new file mode 100644 index 0000000..cf7f25a --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/clusterrole.yaml @@ -0,0 +1,12 @@ +{{- if or .Values.ruler.sidecar.enabled .Values.alertmanager.sidecar.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "cortex.fullname" . }}-clusterrole + labels: + {{- include "cortex.labels" . | nindent 4 }} +rules: + - apiGroups: [""] # "" indicates the core API group + resources: ["configmaps", "secrets"] + verbs: ["get", "watch", "list"] +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/clusterrolebinding.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/clusterrolebinding.yaml new file mode 100644 index 0000000..c1d9884 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/clusterrolebinding.yaml @@ -0,0 +1,16 @@ +{{- if or .Values.ruler.sidecar.enabled .Values.alertmanager.sidecar.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "cortex.fullname" . }}-clusterrolebinding + labels: + {{- include "cortex.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "cortex.fullname" . }}-clusterrole +subjects: + - kind: ServiceAccount + name: {{ template "cortex.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/compactor/_helpers-compactor.tpl b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/compactor/_helpers-compactor.tpl new file mode 100644 index 0000000..f89b33c --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/compactor/_helpers-compactor.tpl @@ -0,0 +1,23 @@ + +{{/* +compactor fullname +*/}} +{{- define "cortex.compactorFullname" -}} +{{ include "cortex.fullname" . }}-compactor +{{- end }} + +{{/* +compactor common labels +*/}} +{{- define "cortex.compactorLabels" -}} +{{ include "cortex.labels" . }} +app.kubernetes.io/component: compactor +{{- end }} + +{{/* +compactor selector labels +*/}} +{{- define "cortex.compactorSelectorLabels" -}} +{{ include "cortex.selectorLabels" . }} +app.kubernetes.io/component: compactor +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/compactor/compactor-poddisruptionbudget.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/compactor/compactor-poddisruptionbudget.yaml new file mode 100644 index 0000000..8634e4c --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/compactor/compactor-poddisruptionbudget.yaml @@ -0,0 +1,14 @@ +{{- if and (gt (int .Values.compactor.replicas) 1) (.Values.compactor.podDisruptionBudget) }} +apiVersion: {{ include "cortex.pdbVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "cortex.compactorFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.compactorLabels" . | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "cortex.compactorSelectorLabels" . | nindent 6 }} + {{- toYaml .Values.compactor.podDisruptionBudget | nindent 2 }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/compactor/compactor-servicemonitor.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/compactor/compactor-servicemonitor.yaml new file mode 100644 index 0000000..a33e849 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/compactor/compactor-servicemonitor.yaml @@ -0,0 +1,42 @@ +{{- if .Values.compactor.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "cortex.compactorFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.compactorLabels" . | nindent 4 }} + {{- if .Values.compactor.serviceMonitor.additionalLabels }} +{{ toYaml .Values.compactor.serviceMonitor.additionalLabels | indent 4 }} + {{- end }} + {{- if .Values.compactor.serviceMonitor.annotations }} + annotations: +{{ toYaml .Values.compactor.serviceMonitor.annotations | indent 4 }} + {{- end }} +spec: + selector: + matchLabels: + {{- include "cortex.compactorSelectorLabels" . | nindent 6 }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace | quote }} + endpoints: + - port: http-metrics + {{- if .Values.compactor.serviceMonitor.interval }} + interval: {{ .Values.compactor.serviceMonitor.interval }} + {{- end }} + {{- if .Values.compactor.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.compactor.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.compactor.serviceMonitor.relabelings }} + relabelings: + {{- toYaml .Values.compactor.serviceMonitor.relabelings | nindent 4 }} + {{- end }} + {{- if .Values.compactor.serviceMonitor.metricRelabelings }} + metricRelabelings: + {{- toYaml .Values.compactor.serviceMonitor.metricRelabelings | nindent 4 }} + {{- end }} + {{- with .Values.compactor.serviceMonitor.extraEndpointSpec }} + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/compactor/compactor-statefulset.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/compactor/compactor-statefulset.yaml new file mode 100644 index 0000000..c0a1baf --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/compactor/compactor-statefulset.yaml @@ -0,0 +1,141 @@ +{{- if eq .Values.config.storage.engine "blocks" -}} +{{- if .Values.compactor.enabled -}} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "cortex.compactorFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.compactorLabels" . | nindent 4 }} + app.kubernetes.io/part-of: memberlist + annotations: + {{- toYaml .Values.compactor.annotations | nindent 4 }} +spec: + replicas: {{ .Values.compactor.replicas }} + selector: + matchLabels: + {{- include "cortex.compactorSelectorLabels" . | nindent 6 }} + updateStrategy: + {{- toYaml .Values.compactor.strategy | nindent 4 }} + serviceName: {{ template "cortex.fullname" . }}-compactor + {{- if .Values.compactor.persistentVolume.enabled }} + volumeClaimTemplates: + - metadata: + name: storage + {{- if .Values.compactor.persistentVolume.annotations }} + annotations: + {{ toYaml .Values.compactor.persistentVolume.annotations | nindent 10 }} + {{- end }} + spec: + {{- if .Values.compactor.persistentVolume.storageClass }} + {{- if (eq "-" .Values.compactor.persistentVolume.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.compactor.persistentVolume.storageClass }}" + {{- end }} + {{- end }} + accessModes: + {{ toYaml .Values.compactor.persistentVolume.accessModes | nindent 10 }} + resources: + requests: + storage: "{{ .Values.compactor.persistentVolume.size }}" + {{- end }} + template: + metadata: + labels: + {{- include "cortex.compactorLabels" . | nindent 8 }} + app.kubernetes.io/part-of: memberlist + {{- with .Values.compactor.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + annotations: + checksum/config: {{ include "cortex.configChecksum" . }} + {{- with .Values.compactor.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ .Values.compactor.serviceAccount.name | default (include "cortex.serviceAccountName" . ) }} + {{- if .Values.compactor.priorityClassName }} + priorityClassName: {{ .Values.compactor.priorityClassName }} + {{- end }} + {{- if .Values.compactor.securityContext.enabled }} + securityContext: {{- omit .Values.compactor.securityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + initContainers: + {{- toYaml .Values.compactor.initContainers | nindent 8 }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} + nodeSelector: + {{- toYaml .Values.compactor.nodeSelector | nindent 8 }} + affinity: + {{- toYaml .Values.compactor.affinity | nindent 8 }} + tolerations: + {{- toYaml .Values.compactor.tolerations | nindent 8 }} + terminationGracePeriodSeconds: {{ .Values.compactor.terminationGracePeriodSeconds }} + volumes: + {{- include "cortex.configVolume" . | nindent 8 }} + - name: runtime-config + configMap: + name: {{ template "cortex.fullname" . }}-runtime-config + {{- if not .Values.compactor.persistentVolume.enabled }} + - name: storage + emptyDir: {} + {{- end }} + {{- if .Values.compactor.extraVolumes }} + {{- toYaml .Values.compactor.extraVolumes | nindent 8 }} + {{- end }} + containers: + {{- if .Values.compactor.extraContainers }} + {{ toYaml .Values.compactor.extraContainers | nindent 8 }} + {{- end }} + - name: compactor + image: "{{ .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: + - "-target=compactor" + - "-config.file=/etc/cortex/cortex.yaml" + {{- include "cortex.memcached" . | nindent 12}} + {{- range $key, $value := .Values.compactor.extraArgs }} + - "-{{ $key }}={{ $value }}" + {{- end }} + volumeMounts: + {{- if .Values.compactor.extraVolumeMounts }} + {{- toYaml .Values.compactor.extraVolumeMounts | nindent 12}} + {{- end }} + - name: config + mountPath: /etc/cortex + - name: runtime-config + mountPath: /etc/cortex-runtime-config + - name: storage + mountPath: "/data" + {{- if .Values.compactor.persistentVolume.subPath }} + subPath: {{ .Values.compactor.persistentVolume.subPath }} + {{- end }} + ports: + - name: http-metrics + containerPort: {{ .Values.config.server.http_listen_port }} + protocol: TCP + - name: gossip + containerPort: {{ .Values.config.memberlist.bind_port }} + protocol: TCP + startupProbe: + {{- toYaml .Values.compactor.startupProbe | nindent 12 }} + livenessProbe: + {{- toYaml .Values.compactor.livenessProbe | nindent 12 }} + readinessProbe: + {{- toYaml .Values.compactor.readinessProbe | nindent 12 }} + resources: + {{- toYaml .Values.compactor.resources | nindent 12 }} + {{- if .Values.compactor.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.compactor.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.compactor.env }} + env: + {{- toYaml .Values.compactor.env | nindent 12 }} + {{- end }} +{{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/compactor/compactor-svc.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/compactor/compactor-svc.yaml new file mode 100644 index 0000000..ae20f78 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/compactor/compactor-svc.yaml @@ -0,0 +1,25 @@ +{{- if eq .Values.config.storage.engine "blocks" -}} +{{- if .Values.compactor.enabled -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "cortex.compactorFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.compactorLabels" . | nindent 4 }} + {{- with .Values.compactor.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- toYaml .Values.compactor.service.annotations | nindent 4 }} +spec: + type: ClusterIP + ports: + - port: {{ .Values.config.server.http_listen_port }} + protocol: TCP + name: http-metrics + targetPort: http-metrics + selector: + {{- include "cortex.compactorSelectorLabels" . | nindent 4 }} +{{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/configmap.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/configmap.yaml new file mode 100644 index 0000000..001b13a --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/configmap.yaml @@ -0,0 +1,12 @@ +{{- if (and (not .Values.useExternalConfig) (.Values.useConfigMap)) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "cortex.fullname" . }}-config + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.labels" . | nindent 4 }} +data: + cortex.yaml: | + {{- tpl (toYaml .Values.config) . | nindent 4 }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/configs/_helpers-configs.tpl b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/configs/_helpers-configs.tpl new file mode 100644 index 0000000..c8945dc --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/configs/_helpers-configs.tpl @@ -0,0 +1,23 @@ + +{{/* +configs fullname +*/}} +{{- define "cortex.configsFullname" -}} +{{ include "cortex.fullname" . }}-configs +{{- end }} + +{{/* +configs common labels +*/}} +{{- define "cortex.configsLabels" -}} +{{ include "cortex.labels" . }} +app.kubernetes.io/component: configs +{{- end }} + +{{/* +configs selector labels +*/}} +{{- define "cortex.configsSelectorLabels" -}} +{{ include "cortex.selectorLabels" . }} +app.kubernetes.io/component: configs +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/configs/configs-dep.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/configs/configs-dep.yaml new file mode 100644 index 0000000..86048ce --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/configs/configs-dep.yaml @@ -0,0 +1,124 @@ +{{- if .Values.configs.enabled -}} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "cortex.configsFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.configsLabels" . | nindent 4 }} + annotations: + {{- toYaml .Values.configs.annotations | nindent 4 }} +spec: + replicas: {{ .Values.configs.replicas }} + selector: + matchLabels: + {{- include "cortex.configsSelectorLabels" . | nindent 6 }} + strategy: + {{- toYaml .Values.configs.strategy | nindent 4 }} + template: + metadata: + labels: + {{- include "cortex.configsLabels" . | nindent 8 }} + {{- with .Values.configs.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + annotations: + checksum/config: {{ include "cortex.configChecksum" . }} + {{- with .Values.configs.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ .Values.configs.serviceAccount.name | default (include "cortex.serviceAccountName" . ) }} + {{- if .Values.configs.priorityClassName }} + priorityClassName: {{ .Values.configs.priorityClassName }} + {{- end }} + {{- if .Values.configs.securityContext.enabled }} + securityContext: {{- omit .Values.configs.securityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + initContainers: + {{- toYaml .Values.configs.initContainers | nindent 8 }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} + containers: + - name: configs + image: "{{ .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: + - "-target=configs" + - "-config.file=/etc/cortex/cortex.yaml" + {{- if .Values.configsdb_postgresql.enabled }} + - "-configs.database.uri={{ .Values.configsdb_postgresql.uri }}" + - "-configs.database.password-file=/etc/postgresql/password" + - "-configs.database.migrations-dir=/migrations" + {{- else }} + - "-configs.database.uri=memory://" + {{- end }} + {{- range $key, $value := .Values.configs.extraArgs }} + - "-{{ $key }}={{ $value }}" + {{- end }} + volumeMounts: + - name: config + mountPath: /etc/cortex + subPath: {{ .Values.configs.persistentVolume.subPath }} + - name: runtime-config + mountPath: /etc/cortex-runtime-config + {{- if .Values.configsdb_postgresql.enabled }} + - name: postgres-password + mountPath: /etc/postgresql + {{- end }} + {{- if .Values.configs.extraVolumeMounts }} + {{- toYaml .Values.configs.extraVolumeMounts | nindent 12}} + {{- end }} + ports: + - name: http-metrics + containerPort: {{ .Values.config.server.http_listen_port }} + protocol: TCP + - name: gossip + containerPort: {{ .Values.config.memberlist.bind_port }} + protocol: TCP + startupProbe: + {{- toYaml .Values.configs.startupProbe | nindent 12 }} + livenessProbe: + {{- toYaml .Values.configs.livenessProbe | nindent 12 }} + readinessProbe: + {{- toYaml .Values.configs.readinessProbe | nindent 12 }} + resources: + {{- toYaml .Values.configs.resources | nindent 12 }} + {{- if .Values.configs.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.configs.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.configs.env }} + env: + {{- toYaml .Values.configs.env | nindent 12 }} + {{- end }} + {{- if .Values.configs.extraContainers }} + {{- toYaml .Values.configs.extraContainers | nindent 8}} + {{- end }} + nodeSelector: + {{- toYaml .Values.configs.nodeSelector | nindent 8 }} + affinity: + {{- toYaml .Values.configs.affinity | nindent 8 }} + tolerations: + {{- toYaml .Values.configs.tolerations | nindent 8 }} + terminationGracePeriodSeconds: {{ .Values.configs.terminationGracePeriodSeconds }} + volumes: + {{- include "cortex.configVolume" . | nindent 8 }} + {{- if .Values.configsdb_postgresql.enabled }} + - name: postgres-password + secret: + secretName: {{ if .Values.configsdb_postgresql.auth.existing_secret.name }}{{ .Values.configsdb_postgresql.auth.existing_secret.name }}{{ else }}{{ template "cortex.fullname" . }}-postgresql{{ end }} + items: + - key: {{ if .Values.configsdb_postgresql.auth.existing_secret.name }}{{ .Values.configsdb_postgresql.auth.existing_secret.key }}{{ else }}postgresql-password{{ end }} + path: password + {{- end }} + - name: runtime-config + configMap: + name: {{ template "cortex.fullname" . }}-runtime-config + {{- if .Values.configs.extraVolumes }} + {{- toYaml .Values.configs.extraVolumes | nindent 8}} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/configs/configs-poddisruptionbudget.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/configs/configs-poddisruptionbudget.yaml new file mode 100644 index 0000000..b6e46b4 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/configs/configs-poddisruptionbudget.yaml @@ -0,0 +1,14 @@ +{{- if and (gt (int .Values.configs.replicas) 1) (.Values.configs.podDisruptionBudget) }} +apiVersion: {{ include "cortex.pdbVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "cortex.configsFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.configsLabels" . | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "cortex.configsSelectorLabels" . | nindent 6 }} + {{- toYaml .Values.configs.podDisruptionBudget | nindent 2 }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/configs/configs-servicemonitor.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/configs/configs-servicemonitor.yaml new file mode 100644 index 0000000..393bc32 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/configs/configs-servicemonitor.yaml @@ -0,0 +1,42 @@ +{{- if .Values.configs.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "cortex.configsFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.configsLabels" . | nindent 4 }} + {{- if .Values.configs.serviceMonitor.additionalLabels }} +{{ toYaml .Values.configs.serviceMonitor.additionalLabels | indent 4 }} + {{- end }} + {{- if .Values.configs.serviceMonitor.annotations }} + annotations: +{{ toYaml .Values.configs.serviceMonitor.annotations | indent 4 }} + {{- end }} +spec: + selector: + matchLabels: + {{- include "cortex.configsSelectorLabels" . | nindent 6 }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace | quote }} + endpoints: + - port: http-metrics + {{- if .Values.configs.serviceMonitor.interval }} + interval: {{ .Values.configs.serviceMonitor.interval }} + {{- end }} + {{- if .Values.configs.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.configs.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.configs.serviceMonitor.relabelings }} + relabelings: + {{- toYaml .Values.configs.serviceMonitor.relabelings | nindent 4 }} + {{- end }} + {{- if .Values.configs.serviceMonitor.metricRelabelings }} + metricRelabelings: + {{- toYaml .Values.configs.serviceMonitor.metricRelabelings | nindent 4 }} + {{- end }} + {{- with .Values.configs.serviceMonitor.extraEndpointSpec }} + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/configs/configs-svc.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/configs/configs-svc.yaml new file mode 100644 index 0000000..6dbc2cd --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/configs/configs-svc.yaml @@ -0,0 +1,23 @@ +{{- if .Values.configs.enabled -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "cortex.configsFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.configsLabels" . | nindent 4 }} + {{- with .Values.configs.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- toYaml .Values.configs.service.annotations | nindent 4 }} +spec: + type: ClusterIP + ports: + - port: {{ .Values.config.server.http_listen_port }} + protocol: TCP + name: http-metrics + targetPort: http-metrics + selector: + {{- include "cortex.configsSelectorLabels" . | nindent 4 }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/cortex-pv.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/cortex-pv.yaml new file mode 100644 index 0000000..472f83e --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/cortex-pv.yaml @@ -0,0 +1,68 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: ingester-pv-0 +spec: + capacity: + storage: 2Gi + volumeMode: Filesystem + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Retain + storageClassName: {{ .Values.global.DEFAULT_STORAGE_CLASS }} + local: + path: {{ .Values.global.IMXC_INGESTER_PV_PATH1 }} + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .Values.global.affinity_key }} + operator: In + values: + - {{ .Values.global.affinity_value1 }} +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: ingester-pv-1 +spec: + capacity: + storage: 2Gi + volumeMode: Filesystem + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Retain + storageClassName: {{ .Values.global.DEFAULT_STORAGE_CLASS }} + local: + path: {{ .Values.global.IMXC_INGESTER_PV_PATH2 }} + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .Values.global.affinity_key }} + operator: In + values: + - {{ .Values.global.affinity_value2 }} +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: ingester-pv-2 +spec: + capacity: + storage: 2Gi + volumeMode: Filesystem + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Retain + storageClassName: {{ .Values.global.DEFAULT_STORAGE_CLASS }} + local: + path: {{ .Values.global.IMXC_INGESTER_PV_PATH3 }} + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .Values.global.affinity_key }} + operator: In + values: + - {{ .Values.global.affinity_value3 }} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/distributor/_helpers-distributor.tpl b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/distributor/_helpers-distributor.tpl new file mode 100644 index 0000000..24e8d00 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/distributor/_helpers-distributor.tpl @@ -0,0 +1,23 @@ + +{{/* +distributor fullname +*/}} +{{- define "cortex.distributorFullname" -}} +{{ include "cortex.fullname" . }}-distributor +{{- end }} + +{{/* +distributor common labels +*/}} +{{- define "cortex.distributorLabels" -}} +{{ include "cortex.labels" . }} +app.kubernetes.io/component: distributor +{{- end }} + +{{/* +distributor selector labels +*/}} +{{- define "cortex.distributorSelectorLabels" -}} +{{ include "cortex.selectorLabels" . }} +app.kubernetes.io/component: distributor +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/distributor/distributor-dep.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/distributor/distributor-dep.yaml new file mode 100644 index 0000000..fc9c0ba --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/distributor/distributor-dep.yaml @@ -0,0 +1,121 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "cortex.distributorFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.distributorLabels" . | nindent 4 }} + app.kubernetes.io/part-of: memberlist + annotations: + {{- toYaml .Values.distributor.annotations | nindent 4 }} +spec: + {{- if not .Values.distributor.autoscaling.enabled }} + replicas: {{ .Values.distributor.replicas }} + {{- end }} + selector: + matchLabels: + {{- include "cortex.distributorSelectorLabels" . | nindent 6 }} + strategy: + {{- toYaml .Values.distributor.strategy | nindent 4 }} + template: + metadata: + labels: + {{- include "cortex.distributorLabels" . | nindent 8 }} + app.kubernetes.io/part-of: memberlist + {{- with .Values.distributor.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + annotations: + checksum/config: {{ include "cortex.configChecksum" . }} + {{- with .Values.distributor.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ .Values.distributor.serviceAccount.name | default (include "cortex.serviceAccountName" . ) }} + {{- if .Values.distributor.priorityClassName }} + priorityClassName: {{ .Values.distributor.priorityClassName }} + {{- end }} + {{- if .Values.distributor.securityContext.enabled }} + securityContext: {{- omit .Values.distributor.securityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + initContainers: + {{- toYaml .Values.distributor.initContainers | nindent 8 }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} + containers: + - name: distributor + image: "{{ .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: + - "-target=distributor" + - "-config.file=/etc/cortex/cortex.yaml" + {{- range $key, $value := .Values.distributor.extraArgs }} + - "-{{ $key }}={{ $value }}" + {{- end }} + volumeMounts: + {{- if .Values.distributor.extraVolumeMounts }} + {{- toYaml .Values.distributor.extraVolumeMounts | nindent 12}} + {{- end }} + - name: config + mountPath: /etc/cortex + - name: runtime-config + mountPath: /etc/cortex-runtime-config + - name: storage + mountPath: "/data" + subPath: {{ .Values.distributor.persistentVolume.subPath }} + ports: + - name: http-metrics + containerPort: {{ .Values.config.server.http_listen_port }} + protocol: TCP + - name: gossip + containerPort: {{ .Values.config.memberlist.bind_port }} + protocol: TCP + - name: grpc + containerPort: {{ .Values.config.server.grpc_listen_port }} + protocol: TCP + startupProbe: + {{- toYaml .Values.distributor.startupProbe | nindent 12 }} + livenessProbe: + {{- toYaml .Values.distributor.livenessProbe | nindent 12 }} + readinessProbe: + {{- toYaml .Values.distributor.readinessProbe | nindent 12 }} + resources: + {{- toYaml .Values.distributor.resources | nindent 12 }} + {{- if .Values.distributor.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.distributor.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.distributor.env }} + env: + {{- toYaml .Values.distributor.env | nindent 12 }} + {{- end }} + {{- with .Values.distributor.lifecycle }} + lifecycle: + {{- toYaml . | nindent 12 }} + {{- end }} + resources: + requests: + cpu: "100m" + {{- if .Values.distributor.extraContainers }} + {{- toYaml .Values.distributor.extraContainers | nindent 8}} + {{- end }} + nodeSelector: + {{- toYaml .Values.distributor.nodeSelector | nindent 8 }} + affinity: + {{- toYaml .Values.distributor.affinity | nindent 8 }} + tolerations: + {{- toYaml .Values.distributor.tolerations | nindent 8 }} + terminationGracePeriodSeconds: {{ .Values.distributor.terminationGracePeriodSeconds }} + volumes: + {{- include "cortex.configVolume" . | nindent 8 }} + - name: runtime-config + configMap: + name: {{ template "cortex.fullname" . }}-runtime-config + - name: storage + emptyDir: {} + {{- if .Values.distributor.extraVolumes }} + {{- toYaml .Values.distributor.extraVolumes | nindent 8}} + {{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/distributor/distributor-hpa.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/distributor/distributor-hpa.yaml new file mode 100644 index 0000000..0c1c9f6 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/distributor/distributor-hpa.yaml @@ -0,0 +1,39 @@ +{{- with .Values.distributor.autoscaling -}} +{{- if .enabled }} +apiVersion: autoscaling/v2beta2 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "cortex.distributorFullname" $ }} + namespace: {{ $.Release.Namespace }} + labels: + {{- include "cortex.distributorLabels" $ | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "cortex.distributorFullname" $ }} + minReplicas: {{ .minReplicas }} + maxReplicas: {{ .maxReplicas }} + metrics: + {{- with .targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: {{ . }} + {{- end }} + {{- with .targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ . }} + {{- end }} + {{- with .behavior }} + behavior: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/distributor/distributor-poddisruptionbudget.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/distributor/distributor-poddisruptionbudget.yaml new file mode 100644 index 0000000..7b05701 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/distributor/distributor-poddisruptionbudget.yaml @@ -0,0 +1,14 @@ +{{- if and (gt (int .Values.distributor.replicas) 1) (.Values.distributor.podDisruptionBudget) }} +apiVersion: {{ include "cortex.pdbVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "cortex.distributorFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.distributorLabels" . | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "cortex.distributorSelectorLabels" . | nindent 6 }} + {{- toYaml .Values.distributor.podDisruptionBudget | nindent 2 }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/distributor/distributor-servicemonitor.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/distributor/distributor-servicemonitor.yaml new file mode 100644 index 0000000..5db8389 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/distributor/distributor-servicemonitor.yaml @@ -0,0 +1,42 @@ +{{- if .Values.distributor.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "cortex.distributorFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.distributorLabels" . | nindent 4 }} + {{- if .Values.distributor.serviceMonitor.additionalLabels }} +{{ toYaml .Values.distributor.serviceMonitor.additionalLabels | indent 4 }} + {{- end }} + {{- if .Values.distributor.serviceMonitor.annotations }} + annotations: +{{ toYaml .Values.distributor.serviceMonitor.annotations | indent 4 }} + {{- end }} +spec: + selector: + matchLabels: + {{- include "cortex.distributorSelectorLabels" . | nindent 6 }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace | quote }} + endpoints: + - port: http-metrics + {{- if .Values.distributor.serviceMonitor.interval }} + interval: {{ .Values.distributor.serviceMonitor.interval }} + {{- end }} + {{- if .Values.distributor.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.distributor.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.distributor.serviceMonitor.relabelings }} + relabelings: + {{- toYaml .Values.distributor.serviceMonitor.relabelings | nindent 4 }} + {{- end }} + {{- if .Values.distributor.serviceMonitor.metricRelabelings }} + metricRelabelings: + {{- toYaml .Values.distributor.serviceMonitor.metricRelabelings | nindent 4 }} + {{- end }} + {{- with .Values.distributor.serviceMonitor.extraEndpointSpec }} + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/distributor/distributor-svc-headless.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/distributor/distributor-svc-headless.yaml new file mode 100644 index 0000000..1c4f7f6 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/distributor/distributor-svc-headless.yaml @@ -0,0 +1,23 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "cortex.distributorFullname" . }}-headless + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.distributorLabels" . | nindent 4 }} + {{- with .Values.distributor.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- toYaml .Values.distributor.service.annotations | nindent 4 }} +spec: + type: ClusterIP + clusterIP: None + publishNotReadyAddresses: true + ports: + - port: {{ .Values.config.server.grpc_listen_port }} + protocol: TCP + name: grpc + targetPort: grpc + selector: + {{- include "cortex.distributorSelectorLabels" . | nindent 4 }} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/distributor/distributor-svc.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/distributor/distributor-svc.yaml new file mode 100644 index 0000000..2db7197 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/distributor/distributor-svc.yaml @@ -0,0 +1,21 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "cortex.distributorFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.distributorLabels" . | nindent 4 }} + {{- with .Values.distributor.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- toYaml .Values.distributor.service.annotations | nindent 4 }} +spec: + type: ClusterIP + ports: + - port: {{ .Values.config.server.http_listen_port }} + protocol: TCP + name: http-metrics + targetPort: http-metrics + selector: + {{- include "cortex.distributorSelectorLabels" . | nindent 4 }} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/ingester/_helpers-ingester.tpl b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/ingester/_helpers-ingester.tpl new file mode 100644 index 0000000..4705327 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/ingester/_helpers-ingester.tpl @@ -0,0 +1,23 @@ + +{{/* +ingester fullname +*/}} +{{- define "cortex.ingesterFullname" -}} +{{ include "cortex.fullname" . }}-ingester +{{- end }} + +{{/* +ingester common labels +*/}} +{{- define "cortex.ingesterLabels" -}} +{{ include "cortex.labels" . }} +app.kubernetes.io/component: ingester +{{- end }} + +{{/* +ingester selector labels +*/}} +{{- define "cortex.ingesterSelectorLabels" -}} +{{ include "cortex.selectorLabels" . }} +app.kubernetes.io/component: ingester +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/ingester/ingester-dep.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/ingester/ingester-dep.yaml new file mode 100644 index 0000000..b26d3a3 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/ingester/ingester-dep.yaml @@ -0,0 +1,130 @@ +{{- if not .Values.ingester.statefulSet.enabled -}} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "cortex.ingesterFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.ingesterLabels" . | nindent 4 }} + app.kubernetes.io/part-of: memberlist + annotations: + {{- toYaml .Values.ingester.annotations | nindent 4 }} +spec: + {{- if not .Values.ingester.autoscaling.enabled }} + replicas: {{ .Values.ingester.replicas }} + {{- end }} + selector: + matchLabels: + {{- include "cortex.ingesterSelectorLabels" . | nindent 6 }} + strategy: + {{- toYaml .Values.ingester.strategy | nindent 4 }} + template: + metadata: + labels: + {{- include "cortex.ingesterLabels" . | nindent 8 }} + app.kubernetes.io/part-of: memberlist + {{- with .Values.ingester.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + annotations: + checksum/config: {{ include "cortex.configChecksum" . }} + {{- with .Values.ingester.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ .Values.ingester.serviceAccount.name | default (include "cortex.serviceAccountName" . ) }} + {{- if .Values.ingester.priorityClassName }} + priorityClassName: {{ .Values.ingester.priorityClassName }} + {{- end }} + {{- if .Values.ingester.securityContext.enabled }} + securityContext: {{- omit .Values.ingester.securityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + initContainers: + {{- toYaml .Values.ingester.initContainers | nindent 8 }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} + containers: + - name: ingester + image: "{{ .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: + - "-target=ingester" + - "-config.file=/etc/cortex/cortex.yaml" + {{- include "cortex.memcached" . | nindent 12}} + {{- range $key, $value := .Values.ingester.extraArgs }} + - "-{{ $key }}={{ $value }}" + {{- end }} + volumeMounts: + {{- if .Values.ingester.extraVolumeMounts }} + {{- toYaml .Values.ingester.extraVolumeMounts | nindent 12}} + {{- end }} + - name: config + mountPath: /etc/cortex + - name: runtime-config + mountPath: /etc/cortex-runtime-config + - name: storage + mountPath: "/data" + {{- with .Values.ingester.persistentVolume.subPath }} + subPath: {{ . }} + {{- end }} + ports: + - name: http-metrics + containerPort: {{ .Values.config.server.http_listen_port }} + protocol: TCP + - name: grpc + containerPort: {{ .Values.config.server.grpc_listen_port }} + protocol: TCP + - name: gossip + containerPort: {{ .Values.config.memberlist.bind_port }} + protocol: TCP + {{- if .Values.ingester.startupProbe }} + startupProbe: + {{- toYaml .Values.ingester.startupProbe | nindent 12 }} + {{- end }} + {{- if .Values.ingester.livenessProbe }} + livenessProbe: + {{- toYaml .Values.ingester.livenessProbe | nindent 12 }} + {{- end }} + readinessProbe: + {{- toYaml .Values.ingester.readinessProbe | nindent 12 }} + resources: + {{- toYaml .Values.ingester.resources | nindent 12 }} + {{- if .Values.ingester.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.ingester.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + env: + {{- if .Values.ingester.env }} + {{ toYaml .Values.ingester.env | nindent 12 }} + {{- end }} + {{- with .Values.ingester.lifecycle }} + lifecycle: + {{- toYaml . | nindent 12 }} + {{- end }} + resources: + requests: + cpu: "100m" + {{- with .Values.ingester.extraContainers }} + {{- toYaml . | nindent 8 }} + {{- end }} + nodeSelector: + {{- toYaml .Values.ingester.nodeSelector | nindent 8 }} + affinity: + {{- toYaml .Values.ingester.affinity | nindent 8 }} + tolerations: + {{- toYaml .Values.ingester.tolerations | nindent 8 }} + terminationGracePeriodSeconds: {{ .Values.ingester.terminationGracePeriodSeconds }} + volumes: + {{- include "cortex.configVolume" . | nindent 8 }} + - name: runtime-config + configMap: + name: {{ template "cortex.fullname" . }}-runtime-config + - name: storage + emptyDir: {} + {{- if .Values.ingester.extraVolumes }} + {{- toYaml .Values.ingester.extraVolumes | nindent 8}} + {{- end }} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/ingester/ingester-hpa.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/ingester/ingester-hpa.yaml new file mode 100644 index 0000000..97c5290 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/ingester/ingester-hpa.yaml @@ -0,0 +1,29 @@ +{{- with .Values.ingester.autoscaling -}} +{{- if .enabled }} +apiVersion: autoscaling/v2beta2 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "cortex.ingesterFullname" $ }} + namespace: {{ $.Release.Namespace }} + labels: + {{- include "cortex.ingesterLabels" $ | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: {{ if $.Values.ingester.statefulSet.enabled }}StatefulSet{{ else }}Deployment{{ end }} + name: {{ include "cortex.ingesterFullname" $ }} + minReplicas: {{ .minReplicas }} + maxReplicas: {{ .maxReplicas }} + metrics: + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: {{ .targetMemoryUtilizationPercentage }} + {{- with .behavior }} + behavior: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/ingester/ingester-poddisruptionbudget.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/ingester/ingester-poddisruptionbudget.yaml new file mode 100644 index 0000000..a47ecb4 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/ingester/ingester-poddisruptionbudget.yaml @@ -0,0 +1,14 @@ +{{- if and (gt (int .Values.ingester.replicas) 1) (.Values.ingester.podDisruptionBudget) }} +apiVersion: {{ include "cortex.pdbVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "cortex.ingesterFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.ingesterLabels" . | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "cortex.ingesterSelectorLabels" . | nindent 6 }} + {{- toYaml .Values.ingester.podDisruptionBudget | nindent 2 }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/ingester/ingester-servicemonitor.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/ingester/ingester-servicemonitor.yaml new file mode 100644 index 0000000..310ca54 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/ingester/ingester-servicemonitor.yaml @@ -0,0 +1,42 @@ +{{- if .Values.ingester.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "cortex.ingesterFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.ingesterLabels" . | nindent 4 }} + {{- if .Values.ingester.serviceMonitor.additionalLabels }} +{{ toYaml .Values.ingester.serviceMonitor.additionalLabels | indent 4 }} + {{- end }} + {{- if .Values.ingester.serviceMonitor.annotations }} + annotations: +{{ toYaml .Values.ingester.serviceMonitor.annotations | indent 4 }} + {{- end }} +spec: + selector: + matchLabels: + {{- include "cortex.ingesterSelectorLabels" . | nindent 6 }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace | quote }} + endpoints: + - port: http-metrics + {{- if .Values.ingester.serviceMonitor.interval }} + interval: {{ .Values.ingester.serviceMonitor.interval }} + {{- end }} + {{- if .Values.ingester.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.ingester.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.ingester.serviceMonitor.relabelings }} + relabelings: + {{- toYaml .Values.ingester.serviceMonitor.relabelings | nindent 4 }} + {{- end }} + {{- if .Values.ingester.serviceMonitor.metricRelabelings }} + metricRelabelings: + {{- toYaml .Values.ingester.serviceMonitor.metricRelabelings | nindent 4 }} + {{- end }} + {{- with .Values.ingester.serviceMonitor.extraEndpointSpec }} + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/ingester/ingester-statefulset.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/ingester/ingester-statefulset.yaml new file mode 100644 index 0000000..8016441 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/ingester/ingester-statefulset.yaml @@ -0,0 +1,153 @@ +{{- if .Values.ingester.statefulSet.enabled -}} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "cortex.ingesterFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.ingesterLabels" . | nindent 4 }} + app.kubernetes.io/part-of: memberlist + annotations: + {{- toYaml .Values.ingester.annotations | nindent 4 }} +spec: + {{- if not .Values.ingester.autoscaling.enabled }} + replicas: {{ .Values.ingester.replicas }} + {{- end }} + selector: + matchLabels: + {{- include "cortex.ingesterSelectorLabels" . | nindent 6 }} + updateStrategy: + {{- toYaml .Values.ingester.statefulStrategy | nindent 4 }} + podManagementPolicy: "{{ .Values.ingester.statefulSet.podManagementPolicy }}" + serviceName: {{ template "cortex.fullname" . }}-ingester-headless + {{- if .Values.ingester.persistentVolume.enabled }} + volumeClaimTemplates: + - metadata: + name: storage + {{- if .Values.ingester.persistentVolume.annotations }} + annotations: + {{ toYaml .Values.ingester.persistentVolume.annotations | nindent 10 }} + {{- end }} + spec: + {{- if .Values.ingester.persistentVolume.storageClass }} + {{- if (eq "-" .Values.ingester.persistentVolume.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.ingester.persistentVolume.storageClass }}" + {{- end }} + {{- end }} + accessModes: + {{ toYaml .Values.ingester.persistentVolume.accessModes | nindent 10 }} + resources: + requests: + storage: "{{ .Values.ingester.persistentVolume.size }}" + {{- end }} + template: + metadata: + labels: + {{- include "cortex.ingesterLabels" . | nindent 8 }} + app.kubernetes.io/part-of: memberlist + {{- with .Values.ingester.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + annotations: + checksum/config: {{ include "cortex.configChecksum" . }} + {{- with .Values.ingester.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ .Values.ingester.serviceAccount.name | default (include "cortex.serviceAccountName" . ) }} + {{- if .Values.ingester.priorityClassName }} + priorityClassName: {{ .Values.ingester.priorityClassName }} + {{- end }} + {{- if .Values.ingester.securityContext.enabled }} + securityContext: {{- omit .Values.ingester.securityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + initContainers: + {{- toYaml .Values.ingester.initContainers | nindent 8 }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} + nodeSelector: + {{- toYaml .Values.ingester.nodeSelector | nindent 8 }} + affinity: + {{- toYaml .Values.ingester.affinity | nindent 8 }} + tolerations: + {{- toYaml .Values.ingester.tolerations | nindent 8 }} + terminationGracePeriodSeconds: {{ .Values.ingester.terminationGracePeriodSeconds }} + volumes: + {{- include "cortex.configVolume" . | nindent 8 }} + - name: runtime-config + configMap: + name: {{ template "cortex.fullname" . }}-runtime-config + {{- if not .Values.ingester.persistentVolume.enabled }} + - name: storage + emptyDir: {} + {{- end }} + {{- if .Values.ingester.extraVolumes }} + {{- toYaml .Values.ingester.extraVolumes | nindent 8 }} + {{- end }} + containers: + {{- if .Values.ingester.extraContainers }} + {{- toYaml .Values.ingester.extraContainers | nindent 8 }} + {{- end }} + - name: ingester + image: "{{ .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: + - "-target=ingester" + - "-config.file=/etc/cortex/cortex.yaml" + {{- include "cortex.memcached" . | nindent 12}} + {{- range $key, $value := .Values.ingester.extraArgs }} + - "-{{ $key }}={{ $value }}" + {{- end }} + volumeMounts: + {{- if .Values.ingester.extraVolumeMounts }} + {{- toYaml .Values.ingester.extraVolumeMounts | nindent 12}} + {{- end }} + - name: config + mountPath: /etc/cortex + - name: runtime-config + mountPath: /etc/cortex-runtime-config + - name: storage + mountPath: "/data" + {{- with .Values.ingester.persistentVolume.subPath }} + subPath: {{ . }} + {{- end }} + ports: + - name: http-metrics + containerPort: {{ .Values.config.server.http_listen_port }} + protocol: TCP + - name: grpc + containerPort: {{ .Values.config.server.grpc_listen_port }} + protocol: TCP + - name: gossip + containerPort: {{ .Values.config.memberlist.bind_port }} + protocol: TCP + {{- if .Values.ingester.startupProbe }} + startupProbe: + {{- toYaml .Values.ingester.startupProbe | nindent 12 }} + {{- end }} + {{- if .Values.ingester.livenessProbe }} + livenessProbe: + {{- toYaml .Values.ingester.livenessProbe | nindent 12 }} + {{- end }} + readinessProbe: + {{- toYaml .Values.ingester.readinessProbe | nindent 12 }} + resources: + {{- toYaml .Values.ingester.resources | nindent 12 }} + {{- if .Values.ingester.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.ingester.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.ingester.env }} + env: + {{- toYaml .Values.ingester.env | nindent 12 }} + {{- end }} + {{- with .Values.ingester.lifecycle }} + lifecycle: + {{- toYaml . | nindent 12 }} + {{- end }} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/ingester/ingester-svc-headless.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/ingester/ingester-svc-headless.yaml new file mode 100644 index 0000000..b783caa --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/ingester/ingester-svc-headless.yaml @@ -0,0 +1,22 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "cortex.ingesterFullname" . }}-headless + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.ingesterLabels" . | nindent 4 }} + {{- with .Values.ingester.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- toYaml .Values.ingester.service.annotations | nindent 4 }} +spec: + type: ClusterIP + clusterIP: None + ports: + - port: {{ .Values.config.server.grpc_listen_port }} + protocol: TCP + name: grpc + targetPort: grpc + selector: + {{- include "cortex.ingesterSelectorLabels" . | nindent 4 }} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/ingester/ingester-svc.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/ingester/ingester-svc.yaml new file mode 100644 index 0000000..02183ae --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/ingester/ingester-svc.yaml @@ -0,0 +1,21 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "cortex.ingesterFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.ingesterLabels" . | nindent 4 }} + {{- with .Values.ingester.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- toYaml .Values.ingester.service.annotations | nindent 4 }} +spec: + type: ClusterIP + ports: + - port: {{ .Values.config.server.http_listen_port }} + protocol: TCP + name: http-metrics + targetPort: http-metrics + selector: + {{- include "cortex.ingesterSelectorLabels" . | nindent 4 }} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/nginx/_helpers-nginx.tpl b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/nginx/_helpers-nginx.tpl new file mode 100644 index 0000000..61d8b78 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/nginx/_helpers-nginx.tpl @@ -0,0 +1,23 @@ + +{{/* +nginx fullname +*/}} +{{- define "cortex.nginxFullname" -}} +{{ include "cortex.fullname" . }}-nginx +{{- end }} + +{{/* +nginx common labels +*/}} +{{- define "cortex.nginxLabels" -}} +{{ include "cortex.labels" . }} +app.kubernetes.io/component: nginx +{{- end }} + +{{/* +nginx selector labels +*/}} +{{- define "cortex.nginxSelectorLabels" -}} +{{ include "cortex.selectorLabels" . }} +app.kubernetes.io/component: nginx +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/nginx/nginx-config.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/nginx/nginx-config.yaml new file mode 100644 index 0000000..fd3474d --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/nginx/nginx-config.yaml @@ -0,0 +1,140 @@ +{{- if .Values.nginx.enabled }} +{{- $rootDomain := printf "%s.svc.%s:%d" .Release.Namespace .Values.clusterDomain (.Values.config.server.http_listen_port | int) }} +kind: ConfigMap +apiVersion: v1 +metadata: + name: {{ include "cortex.nginxFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.nginxLabels" . | nindent 4 }} +data: + nginx.conf: |- + worker_processes 5; ## Default: 1 + error_log /dev/stderr; + pid /tmp/nginx.pid; + worker_rlimit_nofile 8192; + + events { + worker_connections 4096; ## Default: 1024 + } + + {{- with .Values.nginx.config.mainSnippet }} + {{ tpl . $ | nindent 4 }} + {{- end }} + + http { + default_type application/octet-stream; + client_max_body_size {{.Values.nginx.config.client_max_body_size}}; + log_format main '$remote_addr - $remote_user [$time_local] $status ' + '"$request" $body_bytes_sent "$http_referer" ' + '"$http_user_agent" "$http_x_forwarded_for" $http_x_scope_orgid'; + access_log /dev/stderr main; + sendfile on; + tcp_nopush on; + resolver {{ default (printf "coredns.kube-system.svc.%s" .Values.clusterDomain ) .Values.nginx.config.dnsResolver }}; + + {{- with .Values.nginx.config.httpSnippet }} + {{ tpl . $ | nindent 6 }} + {{- end }} + + server { # simple reverse-proxy + listen {{ .Values.nginx.http_listen_port }}; + proxy_connect_timeout 300s; + proxy_send_timeout 300s; + proxy_read_timeout 300s; + proxy_http_version 1.1; + proxy_set_header X-Scope-OrgID 0; + + {{- range $key, $value := .Values.nginx.config.setHeaders }} + proxy_set_header {{ $key }} {{ $value }}; + {{- end }} + + {{ if .Values.nginx.config.basicAuthSecretName -}} + auth_basic "Restricted Content"; + auth_basic_user_file /etc/apache2/.htpasswd; + {{- end }} + + {{- with .Values.nginx.config.serverSnippet }} + {{ tpl . $ | nindent 8 }} + {{- end }} + + location = /healthz { + # auth_basic off is not set here, even when a basic auth directive is + # included in the server block, as Nginx's NGX_HTTP_REWRITE_PHASE + # (point when this return statement is evaluated) comes before the + # NGX_HTTP_ACCESS_PHASE (point when basic auth is evaluated). Thus, + # this return statement returns a response before basic auth is + # evaluated. + return 200 'alive'; + } + + # Distributor Config + location = /ring { + proxy_pass http://{{ template "cortex.fullname" . }}-distributor.{{ $rootDomain }}$request_uri; + } + + location = /all_user_stats { + proxy_pass http://{{ template "cortex.fullname" . }}-distributor.{{ $rootDomain }}$request_uri; + } + + location = /api/prom/push { + proxy_pass http://{{ template "cortex.fullname" . }}-distributor.{{ $rootDomain }}$request_uri; + } + + ## New Remote write API. Ref: https://cortexmetrics.io/docs/api/#remote-write + location = /api/v1/push { + proxy_pass http://{{ template "cortex.fullname" . }}-distributor.{{ $rootDomain }}$request_uri; + } + + # Alertmanager Config + location ~ /api/prom/alertmanager/.* { + proxy_pass http://{{ template "cortex.fullname" . }}-alertmanager.{{ $rootDomain }}$request_uri; + } + + location ~ /api/v1/alerts { + proxy_pass http://{{ template "cortex.fullname" . }}-alertmanager.{{ $rootDomain }}$request_uri; + } + + location ~ /multitenant_alertmanager/status { + proxy_pass http://{{ template "cortex.fullname" . }}-alertmanager.{{ $rootDomain }}$request_uri; + } + + # Ruler Config + location ~ /api/v1/rules { + proxy_pass http://{{ template "cortex.fullname" . }}-ruler.{{ $rootDomain }}$request_uri; + } + + location ~ /ruler/ring { + proxy_pass http://{{ template "cortex.fullname" . }}-ruler.{{ $rootDomain }}$request_uri; + } + + # Config Config + location ~ /api/prom/configs/.* { + proxy_pass http://{{ template "cortex.fullname" . }}-configs.{{ $rootDomain }}$request_uri; + } + + # Query Config + location ~ /api/prom/.* { + proxy_pass http://{{ template "cortex.fullname" . }}-query-frontend.{{ $rootDomain }}$request_uri; + } + + ## New Query frontend APIs as per https://cortexmetrics.io/docs/api/#querier--query-frontend + location ~ ^{{.Values.config.api.prometheus_http_prefix}}/api/v1/(read|metadata|labels|series|query_range|query) { + proxy_pass http://{{ template "cortex.fullname" . }}-query-frontend.{{ $rootDomain }}$request_uri; + } + + location ~ {{.Values.config.api.prometheus_http_prefix}}/api/v1/label/.* { + proxy_pass http://{{ template "cortex.fullname" . }}-query-frontend.{{ $rootDomain }}$request_uri; + } + {{- if and (.Values.config.auth_enabled) (.Values.nginx.config.auth_orgs) }} + # Auth orgs + {{- range $org := compact .Values.nginx.config.auth_orgs | uniq }} + location = /api/v1/push/{{ $org }} { + proxy_set_header X-Scope-OrgID {{ $org }}; + proxy_pass http://{{ template "cortex.fullname" $ }}-distributor.{{ $rootDomain }}/api/v1/push; + } + {{- end }} + {{- end }} + } + } +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/nginx/nginx-dep.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/nginx/nginx-dep.yaml new file mode 100644 index 0000000..bbd3a9d --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/nginx/nginx-dep.yaml @@ -0,0 +1,111 @@ +{{- if .Values.nginx.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "cortex.nginxFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.nginxLabels" . | nindent 4 }} + annotations: + {{- toYaml .Values.nginx.annotations | nindent 4 }} +spec: + {{- if not .Values.nginx.autoscaling.enabled }} + replicas: {{ .Values.nginx.replicas }} + {{- end }} + selector: + matchLabels: + {{- include "cortex.nginxSelectorLabels" . | nindent 6 }} + strategy: + {{- toYaml .Values.nginx.strategy | nindent 4 }} + template: + metadata: + labels: + {{- include "cortex.nginxLabels" . | nindent 8 }} + {{- with .Values.nginx.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + annotations: + checksum/config: {{ include (print $.Template.BasePath "/nginx/nginx-config.yaml") . | sha256sum }} + {{- with .Values.nginx.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ .Values.nginx.serviceAccount.name | default (include "cortex.serviceAccountName" . ) }} + {{- if .Values.nginx.priorityClassName }} + priorityClassName: {{ .Values.nginx.priorityClassName }} + {{- end }} + {{- if .Values.nginx.securityContext.enabled }} + securityContext: {{- omit .Values.nginx.securityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + initContainers: + {{- toYaml .Values.nginx.initContainers | nindent 8 }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} + containers: + - name: nginx + image: "{{ .Values.nginx.image.repository }}:{{ .Values.nginx.image.tag }}" + imagePullPolicy: {{ .Values.nginx.image.pullPolicy }} + {{- if .Values.nginx.extraArgs }} + args: + {{- range $key, $value := .Values.nginx.extraArgs }} + - "-{{ $key }}={{ $value }}" + {{- end }} + {{- end }} + volumeMounts: + {{- if .Values.nginx.extraVolumeMounts }} + {{- toYaml .Values.nginx.extraVolumeMounts | nindent 12}} + {{- end }} + - name: config + mountPath: /etc/nginx + {{- if .Values.nginx.config.basicAuthSecretName }} + - name: htpasswd + mountPath: /etc/apache2 + readOnly: true + {{- end }} + ports: + - name: http-metrics + containerPort: {{ .Values.nginx.http_listen_port }} + protocol: TCP + startupProbe: + {{- toYaml .Values.nginx.startupProbe | nindent 12 }} + livenessProbe: + {{- toYaml .Values.nginx.livenessProbe | nindent 12 }} + readinessProbe: + {{- toYaml .Values.nginx.readinessProbe | nindent 12 }} + resources: + {{- toYaml .Values.nginx.resources | nindent 12 }} + {{- if .Values.nginx.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.nginx.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.nginx.env }} + env: + {{- toYaml .Values.nginx.env | nindent 12 }} + {{- end }} + {{- if .Values.nginx.extraContainers }} + {{ toYaml .Values.nginx.extraContainers | indent 8}} + {{- end }} + nodeSelector: + {{- toYaml .Values.nginx.nodeSelector | nindent 8 }} + affinity: + {{- toYaml .Values.nginx.affinity | nindent 8 }} + tolerations: + {{- toYaml .Values.nginx.tolerations | nindent 8 }} + terminationGracePeriodSeconds: {{ .Values.nginx.terminationGracePeriodSeconds }} + volumes: + - name: config + configMap: + name: {{ template "cortex.fullname" . }}-nginx + {{- if .Values.nginx.config.basicAuthSecretName }} + - name: htpasswd + secret: + defaultMode: 420 + secretName: {{ .Values.nginx.config.basicAuthSecretName }} + {{- end }} + {{- if .Values.nginx.extraVolumes }} + {{- toYaml .Values.nginx.extraVolumes | nindent 8}} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/nginx/nginx-hpa.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/nginx/nginx-hpa.yaml new file mode 100644 index 0000000..b93a13d --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/nginx/nginx-hpa.yaml @@ -0,0 +1,39 @@ +{{- if and .Values.nginx.enabled .Values.nginx.autoscaling.enabled }} +{{- with .Values.nginx.autoscaling -}} +apiVersion: autoscaling/v2beta2 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "cortex.nginxFullname" $ }} + namespace: {{ $.Release.Namespace }} + labels: + {{- include "cortex.nginxLabels" $ | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "cortex.nginxFullname" $ }} + minReplicas: {{ .minReplicas }} + maxReplicas: {{ .maxReplicas }} + metrics: + {{- with .targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: {{ . }} + {{- end }} + {{- with .targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ . }} + {{- end }} + {{- with .behavior }} + behavior: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/nginx/nginx-ingress.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/nginx/nginx-ingress.yaml new file mode 100644 index 0000000..51e6609 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/nginx/nginx-ingress.yaml @@ -0,0 +1,40 @@ +{{- if and .Values.ingress.enabled .Values.nginx.enabled -}} +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: {{ include "cortex.nginxFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.nginxLabels" . | nindent 4 }} + annotations: + {{- toYaml .Values.ingress.annotations | nindent 4 }} +spec: +{{- if .Values.ingress.ingressClass.enabled }} + ingressClassName: {{ .Values.ingress.ingressClass.name }} +{{- end }} +{{- if .Values.ingress.tls }} + tls: + {{- range .Values.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} +{{- end }} + rules: + {{- range .Values.ingress.hosts }} + - host: {{ .host | quote }} + http: + paths: + {{- range .paths }} + - path: {{ . }} + pathType: "Prefix" + backend: + service: + name: {{ include "cortex.nginxFullname" $ }} + port: + number: {{ $.Values.nginx.http_listen_port }} + {{- end }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/nginx/nginx-poddisruptionbudget.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/nginx/nginx-poddisruptionbudget.yaml new file mode 100644 index 0000000..959764a --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/nginx/nginx-poddisruptionbudget.yaml @@ -0,0 +1,14 @@ +{{- if and (.Values.nginx.enabled) (gt (int .Values.nginx.replicas) 1) (.Values.nginx.podDisruptionBudget) }} +apiVersion: {{ include "cortex.pdbVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "cortex.nginxFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.nginxLabels" . | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "cortex.nginxSelectorLabels" . | nindent 6 }} + {{- toYaml .Values.nginx.podDisruptionBudget | nindent 2 }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/nginx/nginx-svc.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/nginx/nginx-svc.yaml new file mode 100644 index 0000000..72a2c44 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/nginx/nginx-svc.yaml @@ -0,0 +1,23 @@ +{{- if .Values.nginx.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "cortex.nginxFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.nginxLabels" . | nindent 4 }} + {{- with .Values.nginx.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- toYaml .Values.nginx.service.annotations | nindent 4 }} +spec: + type: {{ .Values.nginx.service.type }} + ports: + - port: {{ .Values.nginx.http_listen_port }} + protocol: TCP + name: http-metrics + targetPort: http-metrics + selector: + {{- include "cortex.nginxSelectorLabels" . | nindent 4 }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/node-exporter.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/node-exporter.yaml new file mode 100644 index 0000000..7bb3983 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/node-exporter.yaml @@ -0,0 +1,96 @@ +apiVersion: v1 +kind: Service +metadata: + annotations: + prometheus.io/scrape: 'true' + labels: + app: node-exporter + name: node-exporter + name: node-exporter + namespace: imxc +spec: + clusterIP: None + ports: + - name: scrape + port: 9100 + protocol: TCP + selector: + app: node-exporter + type: ClusterIP +--- +{{- if semverCompare ">=1.16-0" .Capabilities.KubeVersion.GitVersion }} +apiVersion: apps/v1 +{{- else }} +apiVersion: extensions/v1beta1 +{{- end }} +kind: DaemonSet +metadata: + name: node-exporter + namespace: imxc +spec: +{{- if semverCompare ">=1.16-0" .Capabilities.KubeVersion.GitVersion }} + selector: + matchLabels: + app: node-exporter +{{- end }} + template: + metadata: + labels: + app: node-exporter + name: node-exporter + spec: + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - image: {{ .Values.global.IMXC_IN_REGISTRY }}/node-exporter + name: node-exporter + resources: + limits: + cpu: 250m + memory: 180Mi + requests: + cpu: 102m + memory: 180Mi + ports: + - containerPort: 9100 + hostPort: 9100 + name: scrape + args: + - --path.procfs=/host/proc + - --path.sysfs=/host/sys + - --path.rootfs=/host/root + - --collector.filesystem.ignored-mount-points=^/(dev|proc|sys|run|var/lib/docker/.+|var/lib/kubelet/pods/.+)($|/) + - --collector.tcpstat + # --log.level=debug + env: + - name: GOMAXPROCS + value: "1" + volumeMounts: + - mountPath: /host/proc + name: proc + readOnly: false + - mountPath: /host/sys + name: sys + readOnly: false + - mountPath: /host/root + mountPropagation: HostToContainer + name: root + readOnly: true + hostNetwork: true + hostPID: true + securityContext: + runAsNonRoot: true + runAsUser: 65534 + volumes: + - hostPath: + path: /proc + name: proc + - hostPath: + path: /sys + name: sys + - hostPath: + path: / + name: root diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/querier/_helpers-querier.tpl b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/querier/_helpers-querier.tpl new file mode 100644 index 0000000..c0a6204 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/querier/_helpers-querier.tpl @@ -0,0 +1,23 @@ + +{{/* +querier fullname +*/}} +{{- define "cortex.querierFullname" -}} +{{ include "cortex.fullname" . }}-querier +{{- end }} + +{{/* +querier common labels +*/}} +{{- define "cortex.querierLabels" -}} +{{ include "cortex.labels" . }} +app.kubernetes.io/component: querier +{{- end }} + +{{/* +querier selector labels +*/}} +{{- define "cortex.querierSelectorLabels" -}} +{{ include "cortex.selectorLabels" . }} +app.kubernetes.io/component: querier +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/querier/querier-dep.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/querier/querier-dep.yaml new file mode 100644 index 0000000..a84ba8a --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/querier/querier-dep.yaml @@ -0,0 +1,115 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "cortex.querierFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.querierLabels" . | nindent 4 }} + annotations: + {{- toYaml .Values.querier.annotations | nindent 4 }} +spec: + {{- if not .Values.querier.autoscaling.enabled }} + replicas: {{ .Values.querier.replicas }} + {{- end }} + selector: + matchLabels: + {{- include "cortex.querierSelectorLabels" . | nindent 6 }} + strategy: + {{- toYaml .Values.querier.strategy | nindent 4 }} + template: + metadata: + labels: + {{- include "cortex.querierLabels" . | nindent 8 }} + {{- with .Values.querier.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + annotations: + checksum/config: {{ include "cortex.configChecksum" . }} + {{- with .Values.querier.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ .Values.querier.serviceAccount.name | default (include "cortex.serviceAccountName" . ) }} + {{- if .Values.querier.priorityClassName }} + priorityClassName: {{ .Values.querier.priorityClassName }} + {{- end }} + {{- if .Values.querier.securityContext.enabled }} + securityContext: {{- omit .Values.querier.securityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + initContainers: + {{- toYaml .Values.querier.initContainers | nindent 8 }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} + containers: + - name: querier + image: "{{ .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: + - "-target=querier" + - "-config.file=/etc/cortex/cortex.yaml" + - "-querier.frontend-address={{ template "cortex.fullname" . }}-query-frontend-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}:{{ .Values.config.server.grpc_listen_port }}" + {{- include "cortex.memcached" . | nindent 12}} + {{- range $key, $value := .Values.querier.extraArgs }} + - "-{{ $key }}={{ $value }}" + {{- end }} + volumeMounts: + {{- if .Values.querier.extraVolumeMounts }} + {{- toYaml .Values.querier.extraVolumeMounts | nindent 12}} + {{- end }} + - name: config + mountPath: /etc/cortex + - name: runtime-config + mountPath: /etc/cortex-runtime-config + - name: storage + mountPath: "/data" + subPath: {{ .Values.querier.persistentVolume.subPath }} + ports: + - name: http-metrics + containerPort: {{ .Values.config.server.http_listen_port }} + protocol: TCP + startupProbe: + {{- toYaml .Values.querier.startupProbe | nindent 12 }} + livenessProbe: + {{- toYaml .Values.querier.livenessProbe | nindent 12 }} + readinessProbe: + {{- toYaml .Values.querier.readinessProbe | nindent 12 }} + resources: + {{- toYaml .Values.querier.resources | nindent 12 }} + {{- if .Values.querier.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.querier.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + env: + {{- if .Values.querier.env }} + {{- toYaml .Values.querier.env | nindent 12 }} + {{- end }} + {{- with .Values.querier.lifecycle }} + lifecycle: + {{- toYaml . | nindent 12 }} + {{- end }} + resources: + requests: + cpu: "100m" + {{- if .Values.querier.extraContainers }} + {{- toYaml .Values.querier.extraContainers | nindent 8}} + {{- end }} + nodeSelector: + {{- toYaml .Values.querier.nodeSelector | nindent 8 }} + affinity: + {{- toYaml .Values.querier.affinity | nindent 8 }} + tolerations: + {{- toYaml .Values.querier.tolerations | nindent 8 }} + terminationGracePeriodSeconds: {{ .Values.querier.terminationGracePeriodSeconds }} + volumes: + {{- include "cortex.configVolume" . | nindent 8 }} + - name: runtime-config + configMap: + name: {{ template "cortex.fullname" . }}-runtime-config + - name: storage + emptyDir: {} + {{- if .Values.querier.extraVolumes }} + {{- toYaml .Values.querier.extraVolumes | nindent 8}} + {{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/querier/querier-hpa.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/querier/querier-hpa.yaml new file mode 100644 index 0000000..f078526 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/querier/querier-hpa.yaml @@ -0,0 +1,39 @@ +{{- with .Values.querier.autoscaling -}} +{{- if .enabled }} +apiVersion: autoscaling/v2beta2 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "cortex.querierFullname" $ }} + namespace: {{ $.Release.Namespace }} + labels: + {{- include "cortex.querierLabels" $ | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "cortex.querierFullname" $ }} + minReplicas: {{ .minReplicas }} + maxReplicas: {{ .maxReplicas }} + metrics: + {{- with .targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: {{ . }} + {{- end }} + {{- with .targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ . }} + {{- end }} + {{- with .behavior }} + behavior: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/querier/querier-poddisruptionbudget.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/querier/querier-poddisruptionbudget.yaml new file mode 100644 index 0000000..b69de62 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/querier/querier-poddisruptionbudget.yaml @@ -0,0 +1,14 @@ +{{- if and (gt (int .Values.querier.replicas) 1) (.Values.querier.podDisruptionBudget) }} +apiVersion: {{ include "cortex.pdbVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "cortex.querierFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.querierLabels" . | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "cortex.querierSelectorLabels" . | nindent 6 }} + {{- toYaml .Values.querier.podDisruptionBudget | nindent 2 }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/querier/querier-servicemonitor.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/querier/querier-servicemonitor.yaml new file mode 100644 index 0000000..c84d1a4 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/querier/querier-servicemonitor.yaml @@ -0,0 +1,42 @@ +{{- if .Values.querier.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "cortex.querierFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.querierLabels" . | nindent 4 }} + {{- if .Values.querier.serviceMonitor.additionalLabels }} +{{ toYaml .Values.querier.serviceMonitor.additionalLabels | indent 4 }} + {{- end }} + {{- if .Values.querier.serviceMonitor.annotations }} + annotations: +{{ toYaml .Values.querier.serviceMonitor.annotations | indent 4 }} + {{- end }} +spec: + selector: + matchLabels: + {{- include "cortex.querierSelectorLabels" . | nindent 6 }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace | quote }} + endpoints: + - port: http-metrics + {{- if .Values.querier.serviceMonitor.interval }} + interval: {{ .Values.querier.serviceMonitor.interval }} + {{- end }} + {{- if .Values.querier.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.querier.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.querier.serviceMonitor.relabelings }} + relabelings: + {{- toYaml .Values.querier.serviceMonitor.relabelings | nindent 4 }} + {{- end }} + {{- if .Values.querier.serviceMonitor.metricRelabelings }} + metricRelabelings: + {{- toYaml .Values.querier.serviceMonitor.metricRelabelings | nindent 4 }} + {{- end }} + {{- with .Values.querier.serviceMonitor.extraEndpointSpec }} + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/querier/querier-svc.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/querier/querier-svc.yaml new file mode 100644 index 0000000..0701b7d --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/querier/querier-svc.yaml @@ -0,0 +1,21 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "cortex.querierFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.querierLabels" . | nindent 4 }} + {{- with .Values.querier.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- toYaml .Values.querier.service.annotations | nindent 4 }} +spec: + type: ClusterIP + ports: + - port: {{ .Values.config.server.http_listen_port }} + protocol: TCP + name: http-metrics + targetPort: http-metrics + selector: + {{- include "cortex.querierSelectorLabels" . | nindent 4 }} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/query-frontend/_helpers-query-frontend.tpl b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/query-frontend/_helpers-query-frontend.tpl new file mode 100644 index 0000000..c1f74c9 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/query-frontend/_helpers-query-frontend.tpl @@ -0,0 +1,23 @@ + +{{/* +query-frontend fullname +*/}} +{{- define "cortex.queryFrontendFullname" -}} +{{ include "cortex.fullname" . }}-query-frontend +{{- end }} + +{{/* +query-frontend common labels +*/}} +{{- define "cortex.queryFrontendLabels" -}} +{{ include "cortex.labels" . }} +app.kubernetes.io/component: query-frontend +{{- end }} + +{{/* +query-frontend selector labels +*/}} +{{- define "cortex.queryFrontendSelectorLabels" -}} +{{ include "cortex.selectorLabels" . }} +app.kubernetes.io/component: query-frontend +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/query-frontend/query-frontend-dep.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/query-frontend/query-frontend-dep.yaml new file mode 100644 index 0000000..3e31d18 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/query-frontend/query-frontend-dep.yaml @@ -0,0 +1,107 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "cortex.queryFrontendFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.queryFrontendLabels" . | nindent 4 }} + annotations: + {{- toYaml .Values.query_frontend.annotations | nindent 4 }} +spec: + replicas: {{ .Values.query_frontend.replicas }} + selector: + matchLabels: + {{- include "cortex.queryFrontendSelectorLabels" . | nindent 6 }} + strategy: + {{- toYaml .Values.query_frontend.strategy | nindent 4 }} + template: + metadata: + labels: + {{- include "cortex.queryFrontendLabels" . | nindent 8 }} + {{- with .Values.query_frontend.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + annotations: + checksum/config: {{ include "cortex.configChecksum" . }} + {{- with .Values.query_frontend.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ .Values.query_frontend.serviceAccount.name | default (include "cortex.serviceAccountName" . ) }} + {{- if .Values.query_frontend.priorityClassName }} + priorityClassName: {{ .Values.query_frontend.priorityClassName }} + {{- end }} + {{- if .Values.query_frontend.securityContext.enabled }} + securityContext: {{- omit .Values.query_frontend.securityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + initContainers: + {{- toYaml .Values.query_frontend.initContainers | nindent 8 }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} + containers: + - name: query-frontend + image: "{{ .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: + - "-target=query-frontend" + - "-config.file=/etc/cortex/cortex.yaml" + {{- include "cortex.frontend-memcached" . | nindent 12 }} + {{- range $key, $value := .Values.query_frontend.extraArgs }} + - "-{{ $key }}={{ $value }}" + {{- end }} + volumeMounts: + {{- if .Values.query_frontend.extraVolumeMounts }} + {{- toYaml .Values.query_frontend.extraVolumeMounts | nindent 12}} + {{- end }} + - name: config + mountPath: /etc/cortex + - name: runtime-config + mountPath: /etc/cortex-runtime-config + ports: + - name: http-metrics + containerPort: {{ .Values.config.server.http_listen_port }} + protocol: TCP + - name: grpc + containerPort: {{ .Values.config.server.grpc_listen_port }} + protocol: TCP + startupProbe: + {{- toYaml .Values.query_frontend.startupProbe | nindent 12 }} + livenessProbe: + {{- toYaml .Values.query_frontend.livenessProbe | nindent 12 }} + readinessProbe: + {{- toYaml .Values.query_frontend.readinessProbe | nindent 12 }} + resources: + {{- toYaml .Values.query_frontend.resources | nindent 12 }} + {{- if .Values.query_frontend.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.query_frontend.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.query_frontend.env }} + env: + {{- toYaml .Values.query_frontend.env | nindent 12 }} + {{- end }} + {{- with .Values.query_frontend.lifecycle }} + lifecycle: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- if .Values.query_frontend.extraContainers }} + {{- toYaml .Values.query_frontend.extraContainers | nindent 8}} + {{- end }} + nodeSelector: + {{- toYaml .Values.query_frontend.nodeSelector | nindent 8 }} + affinity: + {{- toYaml .Values.query_frontend.affinity | nindent 8 }} + tolerations: + {{- toYaml .Values.query_frontend.tolerations | nindent 8 }} + terminationGracePeriodSeconds: {{ .Values.query_frontend.terminationGracePeriodSeconds }} + volumes: + {{- include "cortex.configVolume" . | nindent 8 }} + - name: runtime-config + configMap: + name: {{ template "cortex.fullname" . }}-runtime-config + {{- if .Values.query_frontend.extraVolumes }} + {{- toYaml .Values.query_frontend.extraVolumes | nindent 8}} + {{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/query-frontend/query-frontend-servicemonitor.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/query-frontend/query-frontend-servicemonitor.yaml new file mode 100644 index 0000000..2d76c6b --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/query-frontend/query-frontend-servicemonitor.yaml @@ -0,0 +1,42 @@ +{{- if .Values.query_frontend.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "cortex.queryFrontendFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.queryFrontendLabels" . | nindent 4 }} + {{- if .Values.query_frontend.serviceMonitor.additionalLabels }} +{{ toYaml .Values.query_frontend.serviceMonitor.additionalLabels | indent 4 }} + {{- end }} + {{- if .Values.query_frontend.serviceMonitor.annotations }} + annotations: +{{ toYaml .Values.query_frontend.serviceMonitor.annotations | indent 4 }} + {{- end }} +spec: + selector: + matchLabels: + {{- include "cortex.queryFrontendSelectorLabels" . | nindent 6 }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace | quote }} + endpoints: + - port: http-metrics + {{- if .Values.query_frontend.serviceMonitor.interval }} + interval: {{ .Values.query_frontend.serviceMonitor.interval }} + {{- end }} + {{- if .Values.query_frontend.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.query_frontend.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.query_frontend.serviceMonitor.relabelings }} + relabelings: + {{- toYaml .Values.query_frontend.serviceMonitor.relabelings | nindent 4 }} + {{- end }} + {{- if .Values.query_frontend.serviceMonitor.metricRelabelings }} + metricRelabelings: + {{- toYaml .Values.query_frontend.serviceMonitor.metricRelabelings | nindent 4 }} + {{- end }} + {{- with .Values.query_frontend.serviceMonitor.extraEndpointSpec }} + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/query-frontend/query-frontend-svc-headless.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/query-frontend/query-frontend-svc-headless.yaml new file mode 100644 index 0000000..939457c --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/query-frontend/query-frontend-svc-headless.yaml @@ -0,0 +1,23 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "cortex.queryFrontendFullname" . }}-headless + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.queryFrontendLabels" . | nindent 4 }} + {{- with .Values.query_frontend.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- toYaml .Values.query_frontend.service.annotations | nindent 4 }} +spec: + type: ClusterIP + clusterIP: None + publishNotReadyAddresses: true + ports: + - port: {{ .Values.config.server.grpc_listen_port }} + protocol: TCP + name: grpc + targetPort: grpc + selector: + {{- include "cortex.queryFrontendSelectorLabels" . | nindent 4 }} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/query-frontend/query-frontend-svc.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/query-frontend/query-frontend-svc.yaml new file mode 100644 index 0000000..85ff2e8 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/query-frontend/query-frontend-svc.yaml @@ -0,0 +1,21 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "cortex.queryFrontendFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.queryFrontendLabels" . | nindent 4 }} + {{- with .Values.query_frontend.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- toYaml .Values.query_frontend.service.annotations | nindent 4 }} +spec: + type: ClusterIP + ports: + - port: {{ .Values.config.server.http_listen_port }} + protocol: TCP + name: http-metrics + targetPort: http-metrics + selector: + {{- include "cortex.queryFrontendSelectorLabels" . | nindent 4 }} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/query-frontend/query-poddisruptionbudget.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/query-frontend/query-poddisruptionbudget.yaml new file mode 100644 index 0000000..5256949 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/query-frontend/query-poddisruptionbudget.yaml @@ -0,0 +1,14 @@ +{{- if and (gt (int .Values.query_frontend.replicas) 1) (.Values.query_frontend.podDisruptionBudget) }} +apiVersion: {{ include "cortex.pdbVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "cortex.queryFrontendFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.queryFrontendLabels" . | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "cortex.queryFrontendSelectorLabels" . | nindent 6 }} + {{- toYaml .Values.query_frontend.podDisruptionBudget | nindent 2 }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/ruler/_helpers-ruler.tpl b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/ruler/_helpers-ruler.tpl new file mode 100644 index 0000000..86270d0 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/ruler/_helpers-ruler.tpl @@ -0,0 +1,30 @@ + +{{/* +ruler fullname +*/}} +{{- define "cortex.rulerFullname" -}} +{{ include "cortex.fullname" . }}-ruler +{{- end }} + +{{/* +ruler common labels +*/}} +{{- define "cortex.rulerLabels" -}} +{{ include "cortex.labels" . }} +app.kubernetes.io/component: ruler +{{- end }} + +{{/* +ruler selector labels +*/}} +{{- define "cortex.rulerSelectorLabels" -}} +{{ include "cortex.selectorLabels" . }} +app.kubernetes.io/component: ruler +{{- end }} + +{{/* +format rules dir +*/}} +{{- define "cortex.rulerRulesDirName" -}} +rules-{{ . | replace "_" "-" | trimSuffix "-" }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/ruler/ruler-configmap.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/ruler/ruler-configmap.yaml new file mode 100644 index 0000000..8448108 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/ruler/ruler-configmap.yaml @@ -0,0 +1,14 @@ +{{- if .Values.ruler.enabled }} +{{- range $dir, $files := .Values.ruler.directories }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "cortex.rulerFullname" $ }}-{{ include "cortex.rulerRulesDirName" $dir }} + namespace: {{ $.Release.Namespace }} + labels: + {{- include "cortex.rulerLabels" $ | nindent 4 }} +data: + {{- toYaml $files | nindent 2}} +{{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/ruler/ruler-dep.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/ruler/ruler-dep.yaml new file mode 100644 index 0000000..a8e034d --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/ruler/ruler-dep.yaml @@ -0,0 +1,191 @@ +{{- if .Values.ruler.enabled -}} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "cortex.rulerFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.rulerLabels" . | nindent 4 }} + app.kubernetes.io/part-of: memberlist + annotations: + {{- toYaml .Values.ruler.annotations | nindent 4 }} +spec: + replicas: {{ .Values.ruler.replicas }} + selector: + matchLabels: + {{- include "cortex.rulerSelectorLabels" . | nindent 6 }} + strategy: + {{- toYaml .Values.ruler.strategy | nindent 4 }} + template: + metadata: + labels: + {{- include "cortex.rulerLabels" . | nindent 8 }} + app.kubernetes.io/part-of: memberlist + {{- with .Values.ruler.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + annotations: + checksum/config: {{ include "cortex.configChecksum" . }} + {{- with .Values.ruler.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ .Values.ruler.serviceAccount.name | default (include "cortex.serviceAccountName" . ) }} + {{- if .Values.ruler.priorityClassName }} + priorityClassName: {{ .Values.ruler.priorityClassName }} + {{- end }} + {{- if .Values.ruler.securityContext.enabled }} + securityContext: {{- omit .Values.ruler.securityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + initContainers: + {{- toYaml .Values.ruler.initContainers | nindent 8 }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} + containers: + {{- if .Values.ruler.sidecar.enabled }} + - name: {{ template "cortex.name" . }}-sc-rules + {{- if .Values.ruler.sidecar.image.sha }} + image: "{{ .Values.ruler.sidecar.image.repository }}:{{ .Values.ruler.sidecar.image.tag }}@sha256:{{ .Values.ruler.sidecar.image.sha }}" + {{- else }} + image: "{{ .Values.ruler.sidecar.image.repository }}:{{ .Values.ruler.sidecar.image.tag }}" + {{- end }} + imagePullPolicy: {{ .Values.ruler.sidecar.imagePullPolicy }} + env: + {{- if .Values.ruler.sidecar.watchMethod }} + - name: METHOD + value: {{ .Values.ruler.sidecar.watchMethod }} + {{ end }} + - name: LABEL + value: "{{ .Values.ruler.sidecar.label }}" + {{- if .Values.ruler.sidecar.labelValue }} + - name: LABEL_VALUE + value: {{ quote .Values.ruler.sidecar.labelValue }} + {{- end }} + - name: FOLDER + value: "{{ .Values.ruler.sidecar.folder }}{{- with .Values.ruler.sidecar.defaultFolderName }}/{{ . }}{{- end }}" + - name: RESOURCE + value: "both" + {{- if .Values.ruler.sidecar.enableUniqueFilenames }} + - name: UNIQUE_FILENAMES + value: "{{ .Values.ruler.sidecar.enableUniqueFilenames }}" + {{- end }} + {{- if .Values.ruler.sidecar.searchNamespace }} + - name: NAMESPACE + value: "{{ .Values.ruler.sidecar.searchNamespace }}" + {{- end }} + {{- if .Values.ruler.sidecar.skipTlsVerify }} + - name: SKIP_TLS_VERIFY + value: "{{ .Values.ruler.sidecar.skipTlsVerify }}" + {{- end }} + {{- if .Values.ruler.sidecar.folderAnnotation }} + - name: FOLDER_ANNOTATION + value: "{{ .Values.ruler.sidecar.folderAnnotation }}" + {{- end }} + resources: + {{- toYaml .Values.ruler.sidecar.resources | nindent 12 }} + {{- if .Values.ruler.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.ruler.sidecar.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + volumeMounts: + - name: sc-rules-volume + mountPath: {{ .Values.ruler.sidecar.folder | quote }} + {{- end }} + - name: rules + image: "{{ .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: + - "-target=ruler" + - "-config.file=/etc/cortex/cortex.yaml" + {{- if .Values.configs.enabled }} + - "-ruler.configs.url=http://{{ template "cortex.configsFullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}:{{ .Values.config.server.http_listen_port }}" + {{- end }} + {{- if not .Values.config.ruler.alertmanager_url }} + {{- if .Values.config.ruler.enable_alertmanager_discovery }} + - "-ruler.alertmanager-url=http://_http-metrics._tcp.{{ template "cortex.name" . }}-alertmanager-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}/api/prom/alertmanager/" + {{- else }} + - "-ruler.alertmanager-url=http://{{ template "cortex.alertmanagerFullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}:{{ .Values.config.server.http_listen_port }}/api/prom/alertmanager/" + {{- end }} + {{- end }} + {{- include "cortex.memcached" . | nindent 12}} + {{- range $key, $value := .Values.ruler.extraArgs }} + - "-{{ $key }}={{ $value }}" + {{- end }} + volumeMounts: + {{- if .Values.ruler.extraVolumeMounts }} + {{- toYaml .Values.ruler.extraVolumeMounts | nindent 12}} + {{- end }} + {{- if .Values.ruler.sidecar.enabled }} + - name: sc-rules-volume + mountPath: {{ .Values.ruler.sidecar.folder | quote }} + {{ end }} + - name: config + mountPath: /etc/cortex + - name: runtime-config + mountPath: /etc/cortex-runtime-config + - name: storage + mountPath: /data + subPath: {{ .Values.ruler.persistentVolume.subPath }} + - name: tmp + mountPath: /rules + {{- range $dir, $_ := .Values.ruler.directories }} + - name: {{ include "cortex.rulerRulesDirName" $dir }} + mountPath: /etc/cortex/rules/{{ $dir }} + {{- end }} + ports: + - name: http-metrics + containerPort: {{ .Values.config.server.http_listen_port }} + protocol: TCP + - name: gossip + containerPort: {{ .Values.config.memberlist.bind_port }} + protocol: TCP + startupProbe: + {{- toYaml .Values.ruler.startupProbe | nindent 12 }} + livenessProbe: + {{- toYaml .Values.ruler.livenessProbe | nindent 12 }} + readinessProbe: + {{- toYaml .Values.ruler.readinessProbe | nindent 12 }} + resources: + {{- toYaml .Values.ruler.resources | nindent 12 }} + {{- if .Values.ruler.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.ruler.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.ruler.env }} + env: + {{- toYaml .Values.ruler.env | nindent 12 }} + {{- end }} + {{- if .Values.ruler.extraContainers }} + {{- toYaml .Values.ruler.extraContainers | nindent 8}} + {{- end }} + nodeSelector: + {{- toYaml .Values.ruler.nodeSelector | nindent 8 }} + affinity: + {{- toYaml .Values.ruler.affinity | nindent 8 }} + tolerations: + {{- toYaml .Values.ruler.tolerations | nindent 8 }} + terminationGracePeriodSeconds: {{ .Values.ruler.terminationGracePeriodSeconds }} + volumes: + {{- include "cortex.configVolume" . | nindent 8 }} + - name: runtime-config + configMap: + name: {{ template "cortex.fullname" . }}-runtime-config + - name: tmp + emptyDir: {} + {{- range $dir, $_ := .Values.ruler.directories }} + - name: {{ include "cortex.rulerRulesDirName" $dir }} + configMap: + name: {{ include "cortex.rulerFullname" $ }}-{{ include "cortex.rulerRulesDirName" $dir }} + {{- end }} + - name: storage + emptyDir: {} + {{- if .Values.ruler.sidecar.enabled }} + - name: sc-rules-volume + emptyDir: {} + {{- end }} + {{- if .Values.ruler.extraVolumes }} + {{- toYaml .Values.ruler.extraVolumes | nindent 8}} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/ruler/ruler-poddisruptionbudget.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/ruler/ruler-poddisruptionbudget.yaml new file mode 100644 index 0000000..52fb3e0 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/ruler/ruler-poddisruptionbudget.yaml @@ -0,0 +1,14 @@ +{{- if and (gt (int .Values.ruler.replicas) 1) (.Values.ruler.podDisruptionBudget) }} +apiVersion: {{ include "cortex.pdbVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "cortex.rulerFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.rulerLabels" . | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "cortex.rulerSelectorLabels" . | nindent 6 }} + {{- toYaml .Values.ruler.podDisruptionBudget | nindent 2 }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/ruler/ruler-servicemonitor.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/ruler/ruler-servicemonitor.yaml new file mode 100644 index 0000000..de6744f --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/ruler/ruler-servicemonitor.yaml @@ -0,0 +1,42 @@ +{{- if .Values.ruler.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "cortex.rulerFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.rulerLabels" . | nindent 4 }} + {{- if .Values.ruler.serviceMonitor.additionalLabels }} +{{ toYaml .Values.ruler.serviceMonitor.additionalLabels | indent 4 }} + {{- end }} + {{- if .Values.ruler.serviceMonitor.annotations }} + annotations: +{{ toYaml .Values.ruler.serviceMonitor.annotations | indent 4 }} + {{- end }} +spec: + selector: + matchLabels: + {{- include "cortex.rulerSelectorLabels" . | nindent 6 }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace | quote }} + endpoints: + - port: http-metrics + {{- if .Values.ruler.serviceMonitor.interval }} + interval: {{ .Values.ruler.serviceMonitor.interval }} + {{- end }} + {{- if .Values.ruler.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.ruler.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.ruler.serviceMonitor.relabelings }} + relabelings: + {{- toYaml .Values.ruler.serviceMonitor.relabelings | nindent 4 }} + {{- end }} + {{- if .Values.ruler.serviceMonitor.metricRelabelings }} + metricRelabelings: + {{- toYaml .Values.ruler.serviceMonitor.metricRelabelings | nindent 4 }} + {{- end }} + {{- with .Values.ruler.serviceMonitor.extraEndpointSpec }} + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/ruler/ruler-svc.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/ruler/ruler-svc.yaml new file mode 100644 index 0000000..7752ef4 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/ruler/ruler-svc.yaml @@ -0,0 +1,23 @@ +{{- if .Values.ruler.enabled -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "cortex.rulerFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.rulerLabels" . | nindent 4 }} + {{- with .Values.ruler.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- toYaml .Values.ruler.service.annotations | nindent 4 }} +spec: + type: ClusterIP + ports: + - port: {{ .Values.config.server.http_listen_port }} + protocol: TCP + name: http-metrics + targetPort: http-metrics + selector: + {{- include "cortex.rulerSelectorLabels" . | nindent 4 }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/runtime-configmap.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/runtime-configmap.yaml new file mode 100644 index 0000000..2b30599 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/runtime-configmap.yaml @@ -0,0 +1,18 @@ +{{- with .Values.runtimeconfigmap }} +{{- if .create }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "cortex.fullname" $ }}-runtime-config + namespace: {{ $.Release.Namespace }} + labels: + {{- include "cortex.labels" $ | nindent 4 }} + {{- with .annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +data: + runtime_config.yaml: | + {{- tpl (toYaml .runtime_config) $ | nindent 4 }} +{{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/secret-postgresql.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/secret-postgresql.yaml new file mode 100644 index 0000000..9194971 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/secret-postgresql.yaml @@ -0,0 +1,11 @@ +{{- if and .Values.configsdb_postgresql.enabled .Values.configsdb_postgresql.auth.password -}} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "cortex.fullname" . }}-postgresql + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.labels" . | nindent 4 }} +data: + postgresql-password: {{ .Values.configsdb_postgresql.auth.password | b64enc}} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/secret.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/secret.yaml new file mode 100644 index 0000000..ff0e78f --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/secret.yaml @@ -0,0 +1,11 @@ +{{- if (and (not .Values.useExternalConfig) (not .Values.useConfigMap)) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "cortex.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.labels" . | nindent 4 }} +data: + cortex.yaml: {{ tpl (toYaml .Values.config) . | b64enc }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/serviceaccount.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/serviceaccount.yaml new file mode 100644 index 0000000..963f866 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/serviceaccount.yaml @@ -0,0 +1,12 @@ +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "cortex.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.labels" . | nindent 4 }} + annotations: + {{- toYaml .Values.serviceAccount.annotations | nindent 4 }} +automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/store-gateway/_helpers-store-gateway.tpl b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/store-gateway/_helpers-store-gateway.tpl new file mode 100644 index 0000000..3cca867 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/store-gateway/_helpers-store-gateway.tpl @@ -0,0 +1,23 @@ + +{{/* +store-gateway fullname +*/}} +{{- define "cortex.storeGatewayFullname" -}} +{{ include "cortex.fullname" . }}-store-gateway +{{- end }} + +{{/* +store-gateway common labels +*/}} +{{- define "cortex.storeGatewayLabels" -}} +{{ include "cortex.labels" . }} +app.kubernetes.io/component: store-gateway +{{- end }} + +{{/* +store-gateway selector labels +*/}} +{{- define "cortex.storeGatewaySelectorLabels" -}} +{{ include "cortex.selectorLabels" . }} +app.kubernetes.io/component: store-gateway +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-poddisruptionbudget.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-poddisruptionbudget.yaml new file mode 100644 index 0000000..1019cc8 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-poddisruptionbudget.yaml @@ -0,0 +1,14 @@ +{{- if and (gt (int .Values.store_gateway.replicas) 1) (.Values.store_gateway.podDisruptionBudget) }} +apiVersion: {{ include "cortex.pdbVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "cortex.storeGatewayFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.storeGatewayLabels" . | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "cortex.storeGatewaySelectorLabels" . | nindent 6 }} + {{- toYaml .Values.store_gateway.podDisruptionBudget | nindent 2 }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-servicemonitor.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-servicemonitor.yaml new file mode 100644 index 0000000..39eaeda --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-servicemonitor.yaml @@ -0,0 +1,42 @@ +{{- if .Values.store_gateway.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "cortex.storeGatewayFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.storeGatewayLabels" . | nindent 4 }} + {{- if .Values.store_gateway.serviceMonitor.additionalLabels }} +{{ toYaml .Values.store_gateway.serviceMonitor.additionalLabels | indent 4 }} + {{- end }} + {{- if .Values.store_gateway.serviceMonitor.annotations }} + annotations: +{{ toYaml .Values.store_gateway.serviceMonitor.annotations | indent 4 }} + {{- end }} +spec: + selector: + matchLabels: + {{- include "cortex.storeGatewaySelectorLabels" . | nindent 6 }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace | quote }} + endpoints: + - port: http-metrics + {{- if .Values.store_gateway.serviceMonitor.interval }} + interval: {{ .Values.store_gateway.serviceMonitor.interval }} + {{- end }} + {{- if .Values.store_gateway.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.store_gateway.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.store_gateway.serviceMonitor.relabelings }} + relabelings: + {{- toYaml .Values.store_gateway.serviceMonitor.relabelings | nindent 4 }} + {{- end }} + {{- if .Values.store_gateway.serviceMonitor.metricRelabelings }} + metricRelabelings: + {{- toYaml .Values.store_gateway.serviceMonitor.metricRelabelings | nindent 4 }} + {{- end }} + {{- with .Values.store_gateway.serviceMonitor.extraEndpointSpec }} + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-statefulset.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-statefulset.yaml new file mode 100644 index 0000000..0238c75 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-statefulset.yaml @@ -0,0 +1,142 @@ +{{- if eq .Values.config.storage.engine "blocks" -}} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "cortex.storeGatewayFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.storeGatewayLabels" . | nindent 4 }} + app.kubernetes.io/part-of: memberlist + annotations: + {{- toYaml .Values.store_gateway.annotations | nindent 4 }} +spec: + replicas: {{ .Values.store_gateway.replicas }} + selector: + matchLabels: + {{- include "cortex.storeGatewaySelectorLabels" . | nindent 6 }} + updateStrategy: + {{- toYaml .Values.store_gateway.strategy | nindent 4 }} + serviceName: {{ template "cortex.fullname" . }}-store-gateway-headless + {{- if .Values.store_gateway.persistentVolume.enabled }} + volumeClaimTemplates: + - metadata: + name: storage + {{- if .Values.store_gateway.persistentVolume.annotations }} + annotations: + {{ toYaml .Values.store_gateway.persistentVolume.annotations | nindent 10 }} + {{- end }} + spec: + {{- if .Values.store_gateway.persistentVolume.storageClass }} + {{- if (eq "-" .Values.store_gateway.persistentVolume.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.store_gateway.persistentVolume.storageClass }}" + {{- end }} + {{- end }} + accessModes: + {{- toYaml .Values.store_gateway.persistentVolume.accessModes | nindent 10 }} + resources: + requests: + storage: "{{ .Values.store_gateway.persistentVolume.size }}" + {{- end }} + template: + metadata: + labels: + {{- include "cortex.storeGatewayLabels" . | nindent 8 }} + app.kubernetes.io/part-of: memberlist + {{- with .Values.store_gateway.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + annotations: + checksum/config: {{ include "cortex.configChecksum" . }} + {{- with .Values.store_gateway.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ .Values.store_gateway.serviceAccount.name | default (include "cortex.serviceAccountName" . ) }} + {{- if .Values.store_gateway.priorityClassName }} + priorityClassName: {{ .Values.store_gateway.priorityClassName }} + {{- end }} + {{- if .Values.store_gateway.securityContext.enabled }} + securityContext: {{- omit .Values.store_gateway.securityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + initContainers: + {{- toYaml .Values.store_gateway.initContainers | nindent 8 }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} + nodeSelector: + {{- toYaml .Values.store_gateway.nodeSelector | nindent 8 }} + affinity: + {{- toYaml .Values.store_gateway.affinity | nindent 8 }} + tolerations: + {{- toYaml .Values.store_gateway.tolerations | nindent 8 }} + terminationGracePeriodSeconds: {{ .Values.store_gateway.terminationGracePeriodSeconds }} + volumes: + {{- include "cortex.configVolume" . | nindent 8 }} + - name: runtime-config + configMap: + name: {{ template "cortex.fullname" . }}-runtime-config + {{- if not .Values.store_gateway.persistentVolume.enabled }} + - name: storage + emptyDir: {} + {{- end }} + {{- if .Values.store_gateway.extraVolumes }} + {{- toYaml .Values.store_gateway.extraVolumes | nindent 8 }} + {{- end }} + containers: + {{- if .Values.store_gateway.extraContainers }} + {{ toYaml .Values.store_gateway.extraContainers | nindent 8 }} + {{- end }} + - name: store-gateway + image: "{{ .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: + - "-target=store-gateway" + - "-config.file=/etc/cortex/cortex.yaml" + {{- include "cortex.memcached" . | nindent 12}} + {{- range $key, $value := .Values.store_gateway.extraArgs }} + - "-{{ $key }}={{ $value }}" + {{- end }} + volumeMounts: + {{- if .Values.store_gateway.extraVolumeMounts }} + {{- toYaml .Values.store_gateway.extraVolumeMounts | nindent 12}} + {{- end }} + - name: config + mountPath: /etc/cortex + - name: runtime-config + mountPath: /etc/cortex-runtime-config + - name: storage + mountPath: "/data" + {{- if .Values.store_gateway.persistentVolume.subPath }} + subPath: {{ .Values.store_gateway.persistentVolume.subPath }} + {{- end }} + ports: + - name: http-metrics + containerPort: {{ .Values.config.server.http_listen_port }} + protocol: TCP + - name: grpc + containerPort: {{ .Values.config.server.grpc_listen_port }} + protocol: TCP + - name: gossip + containerPort: {{ .Values.config.memberlist.bind_port }} + protocol: TCP + startupProbe: + {{- toYaml .Values.store_gateway.startupProbe | nindent 12 }} + livenessProbe: + {{- toYaml .Values.store_gateway.livenessProbe | nindent 12 }} + readinessProbe: + {{- toYaml .Values.store_gateway.readinessProbe | nindent 12 }} + resources: + {{- toYaml .Values.store_gateway.resources | nindent 12 }} + {{- if .Values.store_gateway.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.store_gateway.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.store_gateway.env }} + env: + {{- toYaml .Values.store_gateway.env | nindent 12 }} + {{- end }} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-svc-headless.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-svc-headless.yaml new file mode 100644 index 0000000..c56ec77 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-svc-headless.yaml @@ -0,0 +1,24 @@ +{{- if eq .Values.config.storage.engine "blocks" -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "cortex.storeGatewayFullname" . }}-headless + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.storeGatewayLabels" . | nindent 4 }} + {{- with .Values.store_gateway.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- toYaml .Values.store_gateway.service.annotations | nindent 4 }} +spec: + type: ClusterIP + clusterIP: None + ports: + - port: {{ .Values.config.server.grpc_listen_port }} + protocol: TCP + name: grpc + targetPort: grpc + selector: + {{- include "cortex.storeGatewaySelectorLabels" . | nindent 4 }} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-svc.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-svc.yaml new file mode 100644 index 0000000..f58019b --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-svc.yaml @@ -0,0 +1,23 @@ +{{- if eq .Values.config.storage.engine "blocks" -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "cortex.storeGatewayFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.storeGatewayLabels" . | nindent 4 }} + {{- with .Values.store_gateway.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- toYaml .Values.store_gateway.service.annotations | nindent 4 }} +spec: + type: ClusterIP + ports: + - port: {{ .Values.config.server.http_listen_port }} + protocol: TCP + name: http-metrics + targetPort: http-metrics + selector: + {{- include "cortex.storeGatewaySelectorLabels" . | nindent 4 }} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/svc-memberlist-headless.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/svc-memberlist-headless.yaml new file mode 100644 index 0000000..fc41461 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/svc-memberlist-headless.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "cortex.fullname" . }}-memberlist + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.labels" . | nindent 4 }} +spec: + type: ClusterIP + clusterIP: None + ports: + - port: {{ .Values.config.memberlist.bind_port }} + protocol: TCP + name: gossip + targetPort: gossip + selector: + {{- include "cortex.selectorLabels" . | nindent 4 }} + app.kubernetes.io/part-of: memberlist diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/table-manager/_helpers-table-manager.tpl b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/table-manager/_helpers-table-manager.tpl new file mode 100644 index 0000000..4798c6d --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/table-manager/_helpers-table-manager.tpl @@ -0,0 +1,23 @@ + +{{/* +table-manager fullname +*/}} +{{- define "cortex.tableManagerFullname" -}} +{{ include "cortex.fullname" . }}-table-manager +{{- end }} + +{{/* +table-manager common labels +*/}} +{{- define "cortex.tableManagerLabels" -}} +{{ include "cortex.labels" . }} +app.kubernetes.io/component: table-manager +{{- end }} + +{{/* +table-manager selector labels +*/}} +{{- define "cortex.tableManagerSelectorLabels" -}} +{{ include "cortex.selectorLabels" . }} +app.kubernetes.io/component: table-manager +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/table-manager/table-manager-dep.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/table-manager/table-manager-dep.yaml new file mode 100644 index 0000000..d24dcc3 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/table-manager/table-manager-dep.yaml @@ -0,0 +1,106 @@ +{{- if ne .Values.config.storage.engine "blocks" -}} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "cortex.tableManagerFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.tableManagerLabels" . | nindent 4 }} + annotations: + {{- toYaml .Values.table_manager.annotations | nindent 4 }} +spec: + replicas: {{ .Values.table_manager.replicas }} + selector: + matchLabels: + {{- include "cortex.tableManagerSelectorLabels" . | nindent 6 }} + strategy: + {{- toYaml .Values.table_manager.strategy | nindent 4 }} + template: + metadata: + labels: + {{- include "cortex.tableManagerLabels" . | nindent 8 }} + {{- with .Values.table_manager.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + annotations: + checksum/config: {{ include "cortex.configChecksum" . }} + {{- with .Values.table_manager.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ .Values.table_manager.serviceAccount.name | default (include "cortex.serviceAccountName" . ) }} + {{- if .Values.table_manager.priorityClassName }} + priorityClassName: {{ .Values.table_manager.priorityClassName }} + {{- end }} + {{- if .Values.table_manager.securityContext.enabled }} + securityContext: {{- omit .Values.table_manager.securityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + initContainers: + {{- toYaml .Values.table_manager.initContainers | nindent 8 }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} + containers: + - name: table-manager + image: "{{ .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: + - "-target=table-manager" + - "-config.file=/etc/cortex/cortex.yaml" + {{- range $key, $value := .Values.table_manager.extraArgs }} + - "-{{ $key }}={{ $value }}" + {{- end }} + volumeMounts: + {{- if .Values.table_manager.extraVolumeMounts }} + {{- toYaml .Values.table_manager.extraVolumeMounts | nindent 12}} + {{- end }} + - name: config + mountPath: /etc/cortex + - name: runtime-config + mountPath: /etc/cortex-runtime-config + - name: storage + mountPath: "/data" + subPath: {{ .Values.table_manager.persistentVolume.subPath }} + ports: + - name: http-metrics + containerPort: {{ .Values.config.server.http_listen_port }} + protocol: TCP + startupProbe: + {{- toYaml .Values.table_manager.startupProbe | nindent 12 }} + livenessProbe: + {{- toYaml .Values.table_manager.livenessProbe | nindent 12 }} + readinessProbe: + {{- toYaml .Values.table_manager.readinessProbe | nindent 12 }} + resources: + {{- toYaml .Values.table_manager.resources | nindent 12 }} + {{- if .Values.table_manager.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.table_manager.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.table_manager.env }} + env: + {{- toYaml .Values.table_manager.env | nindent 12 }} + {{- end }} + {{- if .Values.table_manager.extraContainers }} + {{- toYaml .Values.table_manager.extraContainers | nindent 8}} + {{- end }} + nodeSelector: + {{- toYaml .Values.table_manager.nodeSelector | nindent 8 }} + affinity: + {{- toYaml .Values.table_manager.affinity | nindent 8 }} + tolerations: + {{- toYaml .Values.table_manager.tolerations | nindent 8 }} + terminationGracePeriodSeconds: {{ .Values.table_manager.terminationGracePeriodSeconds }} + volumes: + {{- include "cortex.configVolume" . | nindent 8 }} + - name: runtime-config + configMap: + name: {{ template "cortex.fullname" . }}-runtime-config + - name: storage + emptyDir: {} + {{- if .Values.table_manager.extraVolumes }} + {{- toYaml .Values.table_manager.extraVolumes | nindent 8}} + {{- end }} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/table-manager/table-manager-poddisruptionbudget.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/table-manager/table-manager-poddisruptionbudget.yaml new file mode 100644 index 0000000..91adabf --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/table-manager/table-manager-poddisruptionbudget.yaml @@ -0,0 +1,14 @@ +{{- if and (gt (int .Values.table_manager.replicas) 1) (.Values.table_manager.podDisruptionBudget) }} +apiVersion: {{ include "cortex.pdbVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "cortex.tableManagerFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.tableManagerLabels" . | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "cortex.tableManagerSelectorLabels" . | nindent 6 }} + {{- toYaml .Values.table_manager.podDisruptionBudget | nindent 2 }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/table-manager/table-manager-servicemonitor.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/table-manager/table-manager-servicemonitor.yaml new file mode 100644 index 0000000..9748724 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/table-manager/table-manager-servicemonitor.yaml @@ -0,0 +1,42 @@ +{{- if .Values.table_manager.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "cortex.tableManagerFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.tableManagerLabels" . | nindent 4 }} + {{- if .Values.table_manager.serviceMonitor.additionalLabels }} +{{ toYaml .Values.table_manager.serviceMonitor.additionalLabels | indent 4 }} + {{- end }} + {{- if .Values.table_manager.serviceMonitor.annotations }} + annotations: +{{ toYaml .Values.table_manager.serviceMonitor.annotations | indent 4 }} + {{- end }} +spec: + selector: + matchLabels: + {{- include "cortex.tableManagerSelectorLabels" . | nindent 6 }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace | quote }} + endpoints: + - port: http-metrics + {{- if .Values.table_manager.serviceMonitor.interval }} + interval: {{ .Values.table_manager.serviceMonitor.interval }} + {{- end }} + {{- if .Values.table_manager.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.table_manager.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.table_manager.serviceMonitor.relabelings }} + relabelings: + {{- toYaml .Values.table_manager.serviceMonitor.relabelings | nindent 4 }} + {{- end }} + {{- if .Values.table_manager.serviceMonitor.metricRelabelings }} + metricRelabelings: + {{- toYaml .Values.table_manager.serviceMonitor.metricRelabelings | nindent 4 }} + {{- end }} + {{- with .Values.table_manager.serviceMonitor.extraEndpointSpec }} + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/table-manager/table-manager-svc.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/table-manager/table-manager-svc.yaml new file mode 100644 index 0000000..ff3c57d --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/templates/table-manager/table-manager-svc.yaml @@ -0,0 +1,23 @@ +{{- if ne .Values.config.storage.engine "blocks" -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "cortex.tableManagerFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.tableManagerLabels" . | nindent 4 }} + {{- with .Values.table_manager.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- toYaml .Values.table_manager.service.annotations | nindent 4 }} +spec: + type: ClusterIP + ports: + - port: {{ .Values.config.server.http_listen_port }} + protocol: TCP + name: http-metrics + targetPort: http-metrics + selector: + {{- include "cortex.tableManagerSelectorLabels" . | nindent 4 }} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/values.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/values.yaml new file mode 100644 index 0000000..4a0f8c8 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/cortex/values.yaml @@ -0,0 +1,1605 @@ +image: + #repository: quay.io/cortexproject/cortex + repository: 10.10.31.243:5000/cmoa3/cortex + # -- Allows you to override the cortex version in this chart. Use at your own risk. + #tag: "" + tag: v1.11.0 + pullPolicy: IfNotPresent + + # -- Optionally specify an array of imagePullSecrets. + # Secrets must be manually created in the namespace. + # ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + # pullSecrets: [] + pullSecrets: + - regcred + + +# -- Kubernetes cluster DNS domain +clusterDomain: cluster.local + +tags: + # -- Set to true to enable block storage memcached caching + blocks-storage-memcached: false + +ingress: + enabled: false + ingressClass: + enabled: false + name: "nginx" + annotations: {} + hosts: + - host: chart-example.local + paths: + - / + tls: [] + +serviceAccount: + create: true + name: + annotations: {} + automountServiceAccountToken: true + +useConfigMap: false +useExternalConfig: false +externalConfigSecretName: 'secret-with-config.yaml' +externalConfigVersion: '0' + +config: + auth_enabled: false + api: + prometheus_http_prefix: '/prometheus' + # -- Use GZIP compression for API responses. Some endpoints serve large YAML or JSON blobs + # which can benefit from compression. + response_compression_enabled: true + ingester: + walconfig: + wal_enabled: true + flush_on_shutdown_with_wal_enabled: true + recover_from_wal: true + lifecycler: + # -- We don't want to join immediately, but wait a bit to see other ingesters and their tokens first. + # It can take a while to have the full picture when using gossip + join_after: 10s + + # -- To avoid generating same tokens by multiple ingesters, they can "observe" the ring for a while, + # after putting their own tokens into it. This is only useful when using gossip, since multiple + # ingesters joining at the same time can have conflicting tokens if they don't see each other yet. + observe_period: 10s + # -- Duration to sleep for before exiting, to ensure metrics are scraped. + final_sleep: 30s + num_tokens: 512 + ring: + # -- Ingester replication factor per default is 3 + replication_factor: 3 + kvstore: + store: "memberlist" + limits: + # -- Enforce that every sample has a metric name + enforce_metric_name: true + reject_old_samples: true + reject_old_samples_max_age: 168h + max_query_lookback: 0s + server: + http_listen_port: 8080 + grpc_listen_port: 9095 + grpc_server_max_recv_msg_size: 10485760 + grpc_server_max_send_msg_size: 10485760 + grpc_server_max_concurrent_streams: 10000 + ingester_client: + grpc_client_config: + max_recv_msg_size: 10485760 + max_send_msg_size: 10485760 + # -- See https://github.com/cortexproject/cortex/blob/master/docs/configuration/config-file-reference.md#storage_config + storage: + engine: blocks + index_queries_cache_config: + memcached: + # -- How long keys stay in the memcache + expiration: 1h + memcached_client: + # -- Maximum time to wait before giving up on memcached requests. + timeout: 1s + blocks_storage: + # custume backend setting related to using s3 + backend: s3 + s3: + bucket_name: cortex-bucket + # -- The S3 bucket endpoint. It could be an AWS S3 endpoint listed at + # https://docs.aws.amazon.com/general/latest/gr/s3.html or the address of an + # S3-compatible service in hostname:port format. + endpoint: minio.imxc.svc.cluster.local:9000 + secret_access_key: admin1234 + access_key_id: cloudmoa + insecure: true + + tsdb: + dir: /data/tsdb + bucket_store: + sync_dir: /data/tsdb-sync + bucket_index: + enabled: true + # -- https://cortexmetrics.io/docs/configuration/configuration-file/#store_gateway_config + store_gateway: + sharding_enabled: false + distributor: + # -- Distribute samples based on all labels, as opposed to solely by user and + # metric name. + shard_by_all_labels: true + pool: + health_check_ingesters: true + memberlist: + bind_port: 7946 + # -- the service name of the memberlist + # if using memberlist discovery + join_members: + - '{{ include "cortex.fullname" $ }}-memberlist' + querier: + active_query_tracker_dir: /data/active-query-tracker + # -- Maximum lookback beyond which queries are not sent to ingester. 0 means all + # queries are sent to ingester. Ingesters by default have no data older than 12 hours, + # so we can safely set this 13 hours + query_ingesters_within: 9h + # -- The time after which a metric should be queried from storage and not just + # ingesters. + query_store_after: 7h + # -- Comma separated list of store-gateway addresses in DNS Service Discovery + # format. This option should is set automatically when using the blocks storage and the + # store-gateway sharding is disabled (when enabled, the store-gateway instances + # form a ring and addresses are picked from the ring). + # @default -- automatic + store_gateway_addresses: |- + {{ if and (eq .Values.config.storage.engine "blocks") (not .Values.config.store_gateway.sharding_enabled) -}} + dns+{{ include "cortex.storeGatewayFullname" $ }}-headless:9095 + {{- end }} + query_range: + split_queries_by_interval: 24h + align_queries_with_step: true + cache_results: true + results_cache: + cache: + memcached: + expiration: 1h + memcached_client: + timeout: 1s + ruler: + enable_alertmanager_discovery: false + # -- Enable the experimental ruler config api. + alertmanager_url: 'http://alertmanager.imxc/alertmanager' + enable_api: true + # -- Method to use for backend rule storage (configdb, azure, gcs, s3, swift, local) refer to https://cortexmetrics.io/docs/configuration/configuration-file/#ruler_config + storage: {} + runtime_config: + file: /etc/cortex-runtime-config/runtime_config.yaml + alertmanager: + # -- Enable the experimental alertmanager config api. + enable_api: true + external_url: 'http://alertmanager.imxc/alertmanager' + #external_url: '/api/prom/alertmanager' + # -- Type of backend to use to store alertmanager configs. Supported values are: "configdb", "gcs", "s3", "local". refer to: https://cortexmetrics.io/docs/configuration/configuration-file/#alertmanager_config + storage: {} + frontend: + log_queries_longer_than: 10s + # S3 사용 관련 커스텀 설정 + alertmanager_storage: + s3: + bucket_name: cortex-alertmanager + endpoint: minio.imxc.svc.cluster.local:9000 + secret_access_key: admin1234 + access_key_id: cloudmoa + insecure: true + ruler_storage: + s3: + bucket_name: cortex-ruler + endpoint: minio.imxc.svc.cluster.local:9000 + secret_access_key: admin1234 + access_key_id: cloudmoa + insecure: true + +runtimeconfigmap: + # -- If true, a configmap for the `runtime_config` will be created. + # If false, the configmap _must_ exist already on the cluster or pods will fail to create. + create: true + annotations: {} + # -- https://cortexmetrics.io/docs/configuration/arguments/#runtime-configuration-file + # 설정부 + runtime_config: {} +alertmanager: + enabled: true + replicas: 1 + + statefulSet: + # -- If true, use a statefulset instead of a deployment for pod management. + # This is useful for using a persistent volume for storing silences between restarts. + enabled: false + + service: + annotations: {} + labels: {} + + serviceAccount: + # -- "" disables the individual serviceAccount and uses the global serviceAccount for that component + name: "" + + serviceMonitor: + enabled: false + additionalLabels: {} + relabelings: [] + metricRelabelings: [] + # -- Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint + extraEndpointSpec: {} + + resources: {} + + # -- Additional Cortex container arguments, e.g. log level (debug, info, warn, error) + extraArgs: {} + # -experimental.alertmanager.enable-api: "true" + # -alertmanager.web.external-url: /alertmanager + # -- Pod Labels + podLabels: {} + + # -- Pod Annotations + podAnnotations: + prometheus.io/scrape: 'true' + prometheus.io/port: '8080' + + nodeSelector: {} + affinity: {} + annotations: {} + + persistentVolume: + # -- If true and alertmanager.statefulSet.enabled is true, + # Alertmanager will create/use a Persistent Volume Claim + # If false, use emptyDir + enabled: false + + # -- Alertmanager data Persistent Volume Claim annotations + annotations: {} + + # -- Alertmanager data Persistent Volume access modes + # Must match those of existing PV or dynamic provisioner + # Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + accessModes: + - ReadWriteOnce + + # -- Alertmanager data Persistent Volume size + size: 2Gi + + # -- Subdirectory of Alertmanager data Persistent Volume to mount + # Useful if the volume's root directory is not empty + subPath: '' + + # -- Alertmanager data Persistent Volume Storage Class + # If defined, storageClassName: + # If set to "-", storageClassName: "", which disables dynamic provisioning + # If undefined (the default) or set to null, no storageClassName spec is + # set, choosing the default provisioner. + storageClass: null + + startupProbe: + httpGet: + path: /ready + port: http-metrics + failureThreshold: 10 + livenessProbe: + httpGet: + path: /ready + port: http-metrics + readinessProbe: + httpGet: + path: /ready + port: http-metrics + + securityContext: {} + + containerSecurityContext: + enabled: true + readOnlyRootFilesystem: true + + # -- Tolerations for pod assignment + # ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + tolerations: [] + + # -- If not set then a PodDisruptionBudget will not be created + podDisruptionBudget: + maxUnavailable: 1 + + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + statefulStrategy: + type: RollingUpdate + + terminationGracePeriodSeconds: 60 + + # -- Init containers to be added to the cortex pod. + initContainers: [] + + # -- Additional containers to be added to the cortex pod. + extraContainers: [] + + # -- Additional volumes to the cortex pod. + extraVolumes: [] + + # -- Extra volume mounts that will be added to the cortex container + extraVolumeMounts: [] + + # -- Additional ports to the cortex services. Useful to expose extra container ports. + extraPorts: [] + + # -- Extra env variables to pass to the cortex container + env: [] + + # -- Sidecars that collect the configmaps with specified label and stores the included files them into the respective folders + sidecar: + image: + repository: 10.10.31.243:5000/cmoa3/k8s-sidecar + tag: 1.10.7 + sha: "" + imagePullPolicy: IfNotPresent + resources: {} + # -- skipTlsVerify Set to true to skip tls verification for kube api calls + skipTlsVerify: false + enableUniqueFilenames: false + enabled: false + label: cortex_alertmanager + watchMethod: null + labelValue: null + folder: /data + defaultFolderName: null + searchNamespace: null + folderAnnotation: null + containerSecurityContext: + enabled: true + readOnlyRootFilesystem: true + +distributor: + replicas: 2 + + service: + annotations: {} + labels: {} + + serviceAccount: + # -- "" disables the individual serviceAccount and uses the global serviceAccount for that component + name: "" + + serviceMonitor: + enabled: false + additionalLabels: {} + relabelings: [] + metricRelabelings: [] + # -- Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint + extraEndpointSpec: {} + + resources: {} + + # -- Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) + extraArgs: + -validation.max-label-names-per-series: "45" + + # -- Pod Labels + podLabels: {} + + # -- Pod Annotations + podAnnotations: + prometheus.io/scrape: 'true' + prometheus.io/port: '8080' + + nodeSelector: {} + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app.kubernetes.io/component + operator: In + values: + - distributor + topologyKey: 'kubernetes.io/hostname' + + annotations: {} + + autoscaling: + # -- Creates a HorizontalPodAutoscaler for the distributor pods. + enabled: false + minReplicas: 2 + maxReplicas: 30 + targetCPUUtilizationPercentage: 80 + targetMemoryUtilizationPercentage: 0 # 80 + # -- Ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-configurable-scaling-behavior + behavior: {} + + persistentVolume: + subPath: + + startupProbe: + httpGet: + path: /ready + port: http-metrics + failureThreshold: 10 + livenessProbe: + httpGet: + path: /ready + port: http-metrics + readinessProbe: + httpGet: + path: /ready + port: http-metrics + + securityContext: {} + + containerSecurityContext: + enabled: true + readOnlyRootFilesystem: true + + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + + terminationGracePeriodSeconds: 60 + + tolerations: [] + + podDisruptionBudget: + maxUnavailable: 1 + + initContainers: [] + extraContainers: [] + extraVolumes: [] + extraVolumeMounts: [] + extraPorts: [] + env: [] + lifecycle: {} + +ingester: + replicas: 3 + + statefulSet: + # -- If true, use a statefulset instead of a deployment for pod management. + # This is useful when using WAL + enabled: true + # -- ref: https://cortexmetrics.io/docs/guides/ingesters-scaling-up-and-down/#scaling-down and https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-management-policies for scaledown details + podManagementPolicy: OrderedReady + + service: + annotations: {} + labels: {} + + serviceAccount: + name: + + serviceMonitor: + enabled: false + additionalLabels: {} + relabelings: [] + metricRelabelings: [] + # -- Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint + extraEndpointSpec: {} + + resources: {} + + # -- Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) + extraArgs: {} + + # -- Pod Labels + podLabels: {} + + # -- Pod Annotations + podAnnotations: + prometheus.io/scrape: 'true' + prometheus.io/port: '8080' + + nodeSelector: {} + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app.kubernetes.io/component + operator: In + values: + - ingester + topologyKey: 'kubernetes.io/hostname' + + annotations: {} + + autoscaling: + enabled: false + minReplicas: 3 + maxReplicas: 30 + targetMemoryUtilizationPercentage: 80 + behavior: + scaleDown: + # -- see https://cortexmetrics.io/docs/guides/ingesters-scaling-up-and-down/#scaling-down for scaledown details + policies: + - type: Pods + value: 1 + # set to no less than 2x the maximum between -blocks-storage.bucket-store.sync-interval and -compactor.cleanup-interval + periodSeconds: 1800 + # -- uses metrics from the past 1h to make scaleDown decisions + stabilizationWindowSeconds: 3600 + scaleUp: + # -- This default scaleup policy allows adding 1 pod every 30 minutes. + # Ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-configurable-scaling-behavior + policies: + - type: Pods + value: 1 + periodSeconds: 1800 + + lifecycle: + # -- The /shutdown preStop hook is recommended as part of the ingester + # scaledown process, but can be removed to optimize rolling restarts in + # instances that will never be scaled down or when using chunks storage + # with WAL disabled. + # https://cortexmetrics.io/docs/guides/ingesters-scaling-up-and-down/#scaling-down + preStop: + httpGet: + path: "/ingester/shutdown" + port: http-metrics + + persistentVolume: + # -- If true and ingester.statefulSet.enabled is true, + # Ingester will create/use a Persistent Volume Claim + # If false, use emptyDir + enabled: true + + # -- Ingester data Persistent Volume Claim annotations + annotations: {} + + # -- Ingester data Persistent Volume access modes + # Must match those of existing PV or dynamic provisioner + # Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + accessModes: + - ReadWriteOnce + + # -- Ingester data Persistent Volume size + size: 2Gi + + # -- Subdirectory of Ingester data Persistent Volume to mount + # Useful if the volume's root directory is not empty + subPath: '' + + # -- Ingester data Persistent Volume Storage Class + # If defined, storageClassName: + # If set to "-", storageClassName: "", which disables dynamic provisioning + # If undefined (the default) or set to null, no storageClassName spec is + # set, choosing the default provisioner. + storageClass: exem-local-storage + + # -- Startup/liveness probes for ingesters are not recommended. + # Ref: https://cortexmetrics.io/docs/guides/running-cortex-on-kubernetes/#take-extra-care-with-ingesters + startupProbe: {} + + # -- Startup/liveness probes for ingesters are not recommended. + # Ref: https://cortexmetrics.io/docs/guides/running-cortex-on-kubernetes/#take-extra-care-with-ingesters + livenessProbe: {} + readinessProbe: + httpGet: + path: /ready + port: http-metrics + + securityContext: {} + + containerSecurityContext: + enabled: true + readOnlyRootFilesystem: true + + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + statefulStrategy: + type: RollingUpdate + + terminationGracePeriodSeconds: 240 + + tolerations: [] + + podDisruptionBudget: + maxUnavailable: 1 + + initContainers: [] + extraContainers: [] + extraVolumes: [] + extraVolumeMounts: [] + extraPorts: [] + env: [] + +ruler: + enabled: true + replicas: 1 + + service: + annotations: {} + labels: {} + + serviceAccount: + # -- "" disables the individual serviceAccount and uses the global serviceAccount for that component + name: "" + + serviceMonitor: + enabled: false + additionalLabels: {} + relabelings: [] + metricRelabelings: [] + # -- Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint + extraEndpointSpec: {} + + resources: {} + + # -- Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) + extraArgs: + #-ruler.configs.url: http://cortex-configs:8080 + #-ruler.alertmanager-url: http://cortex-alertmanager:8080 + -ruler.storage.type: configdb + + # -- Pod Labels + podLabels: {} + + # -- Pod Annotations + podAnnotations: + prometheus.io/scrape: 'true' + prometheus.io/port: '8080' + + nodeSelector: {} + affinity: {} + annotations: {} + persistentVolume: + subPath: + + startupProbe: + httpGet: + path: /ready + port: http-metrics + failureThreshold: 10 + livenessProbe: + httpGet: + path: /ready + port: http-metrics + readinessProbe: + httpGet: + path: /ready + port: http-metrics + + securityContext: {} + + containerSecurityContext: + enabled: true + readOnlyRootFilesystem: true + + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + + terminationGracePeriodSeconds: 180 + + tolerations: [] + + podDisruptionBudget: + maxUnavailable: 1 + + initContainers: [] + extraContainers: [] + extraVolumes: [] + extraVolumeMounts: [] + extraPorts: [] + env: [] + # -- allow configuring rules via configmap. ref: https://cortexproject.github.io/cortex-helm-chart/guides/configure_rules_via_configmap.html + directories: {} + + # -- Sidecars that collect the configmaps with specified label and stores the included files them into the respective folders + sidecar: + image: + repository: 10.10.31.243:5000/cmoa3/k8s-sidecar + tag: 1.10.7 + sha: "" + imagePullPolicy: IfNotPresent + resources: {} + # limits: + # cpu: 100m + # memory: 100Mi + # requests: + # cpu: 50m + # memory: 50Mi + # skipTlsVerify Set to true to skip tls verification for kube api calls + # skipTlsVerify: true + enableUniqueFilenames: false + enabled: false + # -- label that the configmaps with rules are marked with + label: cortex_rules + watchMethod: null + # -- value of label that the configmaps with rules are set to + labelValue: null + # -- folder in the pod that should hold the collected rules (unless `defaultFolderName` is set) + folder: /tmp/rules + # -- The default folder name, it will create a subfolder under the `folder` and put rules in there instead + defaultFolderName: null + # -- If specified, the sidecar will search for rules config-maps inside this namespace. + # Otherwise the namespace in which the sidecar is running will be used. + # It's also possible to specify ALL to search in all namespaces + searchNamespace: null + # -- If specified, the sidecar will look for annotation with this name to create folder and put graph here. + # You can use this parameter together with `provider.foldersFromFilesStructure`to annotate configmaps and create folder structure. + folderAnnotation: null + containerSecurityContext: + enabled: true + readOnlyRootFilesystem: true + +querier: + replicas: 2 + + service: + annotations: {} + labels: {} + + serviceAccount: + # -- "" disables the individual serviceAccount and uses the global serviceAccount for that component + name: "" + + serviceMonitor: + enabled: false + additionalLabels: {} + relabelings: [] + metricRelabelings: [] + # -- Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint + extraEndpointSpec: {} + + resources: {} + + # -- Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) + extraArgs: {} + + # -- Pod Labels + podLabels: {} + + # -- Pod Annotations + podAnnotations: + prometheus.io/scrape: 'true' + prometheus.io/port: '8080' + + nodeSelector: {} + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app.kubernetes.io/component + operator: In + values: + - querier + topologyKey: 'kubernetes.io/hostname' + + annotations: {} + + autoscaling: + # -- Creates a HorizontalPodAutoscaler for the querier pods. + enabled: false + minReplicas: 2 + maxReplicas: 30 + targetCPUUtilizationPercentage: 80 + targetMemoryUtilizationPercentage: 0 # 80 + # -- Ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-configurable-scaling-behavior + behavior: {} + + persistentVolume: + subPath: + + startupProbe: + httpGet: + path: /ready + port: http-metrics + failureThreshold: 10 + livenessProbe: + httpGet: + path: /ready + port: http-metrics + readinessProbe: + httpGet: + path: /ready + port: http-metrics + + securityContext: {} + + containerSecurityContext: + enabled: true + readOnlyRootFilesystem: true + + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + + terminationGracePeriodSeconds: 180 + + tolerations: [] + + podDisruptionBudget: + maxUnavailable: 1 + + initContainers: [] + extraContainers: [] + extraVolumes: [] + extraVolumeMounts: [] + extraPorts: [] + env: [] + lifecycle: {} + +query_frontend: + replicas: 2 + + service: + annotations: {} + labels: {} + + serviceAccount: + # -- "" disables the individual serviceAccount and uses the global serviceAccount for that component + name: "" + + serviceMonitor: + enabled: false + additionalLabels: {} + relabelings: [] + metricRelabelings: [] + # -- Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint + extraEndpointSpec: {} + + resources: {} + + # -- Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) + extraArgs: {} + + # -- Pod Labels + podLabels: {} + + # -- Pod Annotations + podAnnotations: + prometheus.io/scrape: 'true' + prometheus.io/port: '8080' + + nodeSelector: {} + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app.kubernetes.io/component + operator: In + values: + - query-frontend + topologyKey: 'kubernetes.io/hostname' + + annotations: {} + persistentVolume: + subPath: + + startupProbe: + httpGet: + path: /ready + port: http-metrics + failureThreshold: 10 + livenessProbe: + httpGet: + path: /ready + port: http-metrics + readinessProbe: + httpGet: + path: /ready + port: http-metrics + + securityContext: {} + containerSecurityContext: + enabled: true + readOnlyRootFilesystem: true + + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + + terminationGracePeriodSeconds: 180 + + tolerations: [] + + podDisruptionBudget: + maxUnavailable: 1 + + initContainers: [] + extraContainers: [] + extraVolumes: [] + extraVolumeMounts: [] + extraPorts: [] + env: [] + lifecycle: {} + +table_manager: + replicas: 1 + + service: + annotations: {} + labels: {} + + serviceAccount: + # -- "" disables the individual serviceAccount and uses the global serviceAccount for that component + name: "" + + serviceMonitor: + enabled: false + additionalLabels: {} + relabelings: [] + metricRelabelings: [] + # -- Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint + extraEndpointSpec: {} + + resources: {} + + # -- Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) + extraArgs: {} + + # -- Pod Labels + podLabels: {} + + # -- Pod Annotations + podAnnotations: + prometheus.io/scrape: 'true' + prometheus.io/port: '8080' + + nodeSelector: {} + affinity: {} + annotations: {} + persistentVolume: + subPath: + + startupProbe: + httpGet: + path: /ready + port: http-metrics + failureThreshold: 10 + livenessProbe: + httpGet: + path: /ready + port: http-metrics + readinessProbe: + httpGet: + path: /ready + port: http-metrics + + securityContext: {} + + containerSecurityContext: + enabled: true + readOnlyRootFilesystem: true + + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + + terminationGracePeriodSeconds: 180 + + tolerations: [] + + podDisruptionBudget: + maxUnavailable: 1 + + initContainers: [] + extraContainers: [] + extraVolumes: [] + extraVolumeMounts: [] + extraPorts: [] + env: [] + +configs: + enabled: true + replicas: 1 + + service: + annotations: {} + labels: {} + + serviceAccount: + # -- "" disables the individual serviceAccount and uses the global serviceAccount for that component + name: "" + + serviceMonitor: + enabled: false + additionalLabels: {} + relabelings: [] + metricRelabelings: [] + # -- Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint + extraEndpointSpec: {} + + resources: {} + + # -- Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) + extraArgs: + # -configs.database.migrations-dir: /migrations + # -- Pod Labels + podLabels: {} + + # -- Pod Annotations + podAnnotations: + prometheus.io/scrape: 'true' + prometheus.io/port: '8080' + + nodeSelector: {} + affinity: {} + annotations: {} + persistentVolume: + subPath: + + startupProbe: + httpGet: + path: /ready + port: http-metrics + failureThreshold: 10 + livenessProbe: + httpGet: + path: /ready + port: http-metrics + readinessProbe: + httpGet: + path: /ready + port: http-metrics + + securityContext: {} + + containerSecurityContext: + enabled: true + readOnlyRootFilesystem: true + + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + + terminationGracePeriodSeconds: 180 + + tolerations: [] + + podDisruptionBudget: + maxUnavailable: 1 + + initContainers: [] + extraContainers: [] + extraVolumes: [] + extraVolumeMounts: [] + extraPorts: [] + env: [] + +nginx: + enabled: true + replicas: 2 + http_listen_port: 80 + config: + dnsResolver: coredns.kube-system.svc.cluster.local + # -- ref: http://nginx.org/en/docs/http/ngx_http_core_module.html#client_max_body_size + client_max_body_size: 20M + # -- arbitrary snippet to inject in the http { } section of the nginx config + httpSnippet: "" + # -- arbitrary snippet to inject in the top section of the nginx config + mainSnippet: "" + # -- arbitrary snippet to inject in the server { } section of the nginx config + serverSnippet: "" + setHeaders: {} + # -- (optional) List of [auth tenants](https://cortexmetrics.io/docs/guides/auth/) to set in the nginx config + auth_orgs: [] + # -- (optional) Name of basic auth secret. + # In order to use this option, a secret with htpasswd formatted contents at + # the key ".htpasswd" must exist. For example: + # + # apiVersion: v1 + # kind: Secret + # metadata: + # name: my-secret + # namespace: + # stringData: + # .htpasswd: | + # user1:$apr1$/woC1jnP$KAh0SsVn5qeSMjTtn0E9Q0 + # user2:$apr1$QdR8fNLT$vbCEEzDj7LyqCMyNpSoBh/ + # + # Please note that the use of basic auth will not identify organizations + # the way X-Scope-OrgID does. Thus, the use of basic auth alone will not + # prevent one tenant from viewing the metrics of another. To ensure tenants + # are scoped appropriately, explicitly set the `X-Scope-OrgID` header + # in the nginx config. Example + # setHeaders: + # X-Scope-OrgID: $remote_user + basicAuthSecretName: "" + + image: + repository: 10.10.31.243:5000/cmoa3/nginx + tag: 1.21 + pullPolicy: IfNotPresent + + service: + type: ClusterIP + annotations: {} + labels: {} + + serviceAccount: + # -- "" disables the individual serviceAccount and uses the global serviceAccount for that component + name: "" + + resources: {} + + # -- Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) + extraArgs: {} + + # -- Pod Labels + podLabels: {} + + # -- Pod Annotations + podAnnotations: {} + + nodeSelector: {} + affinity: {} + annotations: {} + persistentVolume: + subPath: + + startupProbe: + httpGet: + path: /healthz + port: http-metrics + failureThreshold: 10 + livenessProbe: + httpGet: + path: /healthz + port: http-metrics + readinessProbe: + httpGet: + path: /healthz + port: http-metrics + + securityContext: {} + + containerSecurityContext: + enabled: true + readOnlyRootFilesystem: false + + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + + terminationGracePeriodSeconds: 10 + + tolerations: [] + + podDisruptionBudget: + maxUnavailable: 1 + + initContainers: [] + extraContainers: [] + extraVolumes: [] + extraVolumeMounts: [] + extraPorts: [] + env: [] + + autoscaling: + # -- Creates a HorizontalPodAutoscaler for the nginx pods. + enabled: false + minReplicas: 2 + maxReplicas: 30 + targetCPUUtilizationPercentage: 80 + targetMemoryUtilizationPercentage: 0 # 80 + # -- Ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-configurable-scaling-behavior + behavior: {} + +store_gateway: + replicas: 1 + + service: + annotations: {} + labels: {} + + serviceAccount: + # -- "" disables the individual serviceAccount and uses the global serviceAccount for that component + name: "" + + serviceMonitor: + enabled: false + additionalLabels: {} + relabelings: [] + metricRelabelings: [] + # -- Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint + extraEndpointSpec: {} + + resources: {} + + # -- Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) + extraArgs: {} + + # -- Pod Labels + podLabels: {} + + # -- Pod Annotations + podAnnotations: + prometheus.io/scrape: 'true' + prometheus.io/port: '8080' + + nodeSelector: {} + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app.kubernetes.io/component + operator: In + values: + - store-gateway + topologyKey: 'kubernetes.io/hostname' + + annotations: {} + + persistentVolume: + # -- If true Store-gateway will create/use a Persistent Volume Claim + # If false, use emptyDir + enabled: false + + # -- Store-gateway data Persistent Volume Claim annotations + annotations: {} + + # -- Store-gateway data Persistent Volume access modes + # Must match those of existing PV or dynamic provisioner + # Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + accessModes: + - ReadWriteOnce + + # -- Store-gateway data Persistent Volume size + size: 2Gi + + # -- Subdirectory of Store-gateway data Persistent Volume to mount + # Useful if the volume's root directory is not empty + subPath: '' + + # -- Store-gateway data Persistent Volume Storage Class + # If defined, storageClassName: + # If set to "-", storageClassName: "", which disables dynamic provisioning + # If undefined (the default) or set to null, no storageClassName spec is + # set, choosing the default provisioner. + storageClass: null + + startupProbe: + failureThreshold: 60 + initialDelaySeconds: 120 + periodSeconds: 30 + httpGet: + path: /ready + port: http-metrics + scheme: HTTP + livenessProbe: + httpGet: + path: /ready + port: http-metrics + scheme: HTTP + readinessProbe: + httpGet: + path: /ready + port: http-metrics + + securityContext: {} + + containerSecurityContext: + enabled: true + readOnlyRootFilesystem: true + + strategy: + type: RollingUpdate + + terminationGracePeriodSeconds: 240 + + tolerations: [] + + podDisruptionBudget: + maxUnavailable: 1 + + initContainers: [] + extraContainers: [] + extraVolumes: [] + extraVolumeMounts: [] + extraPorts: [] + env: [] + +compactor: + enabled: true + replicas: 1 + + service: + annotations: {} + labels: {} + + serviceAccount: + # -- "" disables the individual serviceAccount and uses the global serviceAccount for that component + name: "" + + serviceMonitor: + enabled: false + additionalLabels: {} + relabelings: [] + metricRelabelings: [] + # -- Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint + extraEndpointSpec: {} + + resources: {} + + # -- Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) + extraArgs: {} + + # -- Pod Labels + podLabels: {} + + # -- Pod Annotations + podAnnotations: + prometheus.io/scrape: 'true' + prometheus.io/port: '8080' + + nodeSelector: {} + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app.kubernetes.io/component + operator: In + values: + - compactor + topologyKey: 'kubernetes.io/hostname' + + annotations: {} + + persistentVolume: + # -- If true compactor will create/use a Persistent Volume Claim + # If false, use emptyDir + enabled: false + + # -- compactor data Persistent Volume Claim annotations + annotations: {} + + # -- compactor data Persistent Volume access modes + # Must match those of existing PV or dynamic provisioner + # Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + accessModes: + - ReadWriteOnce + + # compactor data Persistent Volume size + size: 2Gi + + # -- Subdirectory of compactor data Persistent Volume to mount + # Useful if the volume's root directory is not empty + subPath: '' + + # -- compactor data Persistent Volume Storage Class + # If defined, storageClassName: + # If set to "-", storageClassName: "", which disables dynamic provisioning + # If undefined (the default) or set to null, no storageClassName spec is + # set, choosing the default provisioner. + storageClass: null + + startupProbe: + failureThreshold: 60 + initialDelaySeconds: 120 + periodSeconds: 30 + httpGet: + path: /ready + port: http-metrics + scheme: HTTP + livenessProbe: + httpGet: + path: /ready + port: http-metrics + scheme: HTTP + readinessProbe: + httpGet: + path: /ready + port: http-metrics + + securityContext: {} + containerSecurityContext: + enabled: true + readOnlyRootFilesystem: true + + strategy: + type: RollingUpdate + + terminationGracePeriodSeconds: 240 + + tolerations: [] + + podDisruptionBudget: + maxUnavailable: 1 + + initContainers: [] + extraContainers: [] + extraVolumes: [] + extraVolumeMounts: [] + extraPorts: [] + env: [] + +# -- chunk caching for legacy chunk storage engine +memcached: + enabled: false + architecture: "high-availability" + replicaCount: 2 + resources: {} + extraEnv: + # -- MEMCACHED_CACHE_SIZE is the amount of memory allocated to memcached for object storage + - name: MEMCACHED_CACHE_SIZE + value: "1024" + # -- MEMCACHED_MAX_CONNECTIONS is the maximum number of simultaneous connections to the memcached service + - name: MEMCACHED_MAX_CONNECTIONS + value: "1024" + # -- MEMCACHED_THREADS is the number of threads to use when processing incoming requests. + # By default, memcached is configured to use 4 concurrent threads. The threading improves the performance of + # storing and retrieving data in the cache, using a locking system to prevent different threads overwriting or updating the same values. + - name: MEMCACHED_THREADS + value: "4" + metrics: + enabled: true + serviceMonitor: + enabled: false + +# -- index read caching for legacy chunk storage engine +memcached-index-read: + enabled: false + architecture: "high-availability" + replicaCount: 2 + resources: {} + extraEnv: + # -- MEMCACHED_CACHE_SIZE is the amount of memory allocated to memcached for object storage + - name: MEMCACHED_CACHE_SIZE + value: "1024" + # -- MEMCACHED_MAX_CONNECTIONS is the maximum number of simultaneous connections to the memcached service + - name: MEMCACHED_MAX_CONNECTIONS + value: "1024" + # -- MEMCACHED_THREADS is the number of threads to use when processing incoming requests. + # By default, memcached is configured to use 4 concurrent threads. The threading improves the performance of + # storing and retrieving data in the cache, using a locking system to prevent different threads overwriting or updating the same values. + - name: MEMCACHED_THREADS + value: "4" + metrics: + enabled: true + serviceMonitor: + enabled: false + +# -- index write caching for legacy chunk storage engine +memcached-index-write: + enabled: false + architecture: "high-availability" + replicaCount: 2 + resources: {} + extraEnv: + # -- MEMCACHED_CACHE_SIZE is the amount of memory allocated to memcached for object storage + - name: MEMCACHED_CACHE_SIZE + value: "1024" + # -- MEMCACHED_MAX_CONNECTIONS is the maximum number of simultaneous connections to the memcached service + - name: MEMCACHED_MAX_CONNECTIONS + value: "1024" + # -- MEMCACHED_THREADS is the number of threads to use when processing incoming requests. + # By default, memcached is configured to use 4 concurrent threads. The threading improves the performance of + # storing and retrieving data in the cache, using a locking system to prevent different threads overwriting or updating the same values. + - name: MEMCACHED_THREADS + value: "4" + metrics: + enabled: true + serviceMonitor: + enabled: false + +memcached-frontend: + enabled: false + architecture: "high-availability" + replicaCount: 2 + resources: {} + extraEnv: + # -- MEMCACHED_CACHE_SIZE is the amount of memory allocated to memcached for object storage + - name: MEMCACHED_CACHE_SIZE + value: "1024" + # -- MEMCACHED_MAX_CONNECTIONS is the maximum number of simultaneous connections to the memcached service + - name: MEMCACHED_MAX_CONNECTIONS + value: "1024" + # -- MEMCACHED_THREADS is the number of threads to use when processing incoming requests. + # By default, memcached is configured to use 4 concurrent threads. The threading improves the performance of + # storing and retrieving data in the cache, using a locking system to prevent different threads overwriting or updating the same values. + - name: MEMCACHED_THREADS + value: "4" + metrics: + enabled: true + serviceMonitor: + enabled: false + +memcached-blocks-index: + architecture: "high-availability" + replicaCount: 2 + resources: {} + extraEnv: + # -- MEMCACHED_CACHE_SIZE is the amount of memory allocated to memcached for object storage + - name: MEMCACHED_CACHE_SIZE + value: "1024" + # -- MEMCACHED_MAX_CONNECTIONS is the maximum number of simultaneous connections to the memcached service + - name: MEMCACHED_MAX_CONNECTIONS + value: "1024" + # -- MEMCACHED_THREADS is the number of threads to use when processing incoming requests. + # By default, memcached is configured to use 4 concurrent threads. The threading improves the performance of + # storing and retrieving data in the cache, using a locking system to prevent different threads overwriting or updating the same values. + - name: MEMCACHED_THREADS + value: "4" + metrics: + enabled: true + serviceMonitor: + enabled: false + +memcached-blocks: + architecture: "high-availability" + replicaCount: 2 + resources: {} + extraEnv: + # -- MEMCACHED_CACHE_SIZE is the amount of memory allocated to memcached for object storage + - name: MEMCACHED_CACHE_SIZE + value: "1024" + # -- MEMCACHED_MAX_CONNECTIONS is the maximum number of simultaneous connections to the memcached service + - name: MEMCACHED_MAX_CONNECTIONS + value: "1024" + # -- MEMCACHED_THREADS is the number of threads to use when processing incoming requests. + # By default, memcached is configured to use 4 concurrent threads. The threading improves the performance of + # storing and retrieving data in the cache, using a locking system to prevent different threads overwriting or updating the same values. + - name: MEMCACHED_THREADS + value: "4" + metrics: + enabled: true + serviceMonitor: + enabled: false + +memcached-blocks-metadata: + # enabled/disabled via the tags.blocks-storage-memcached boolean + architecture: "high-availability" + replicaCount: 2 + resources: {} + extraEnv: + # -- MEMCACHED_CACHE_SIZE is the amount of memory allocated to memcached for object storage + - name: MEMCACHED_CACHE_SIZE + value: "1024" + # -- MEMCACHED_MAX_CONNECTIONS is the maximum number of simultaneous connections to the memcached service + - name: MEMCACHED_MAX_CONNECTIONS + value: "1024" + # -- MEMCACHED_THREADS is the number of threads to use when processing incoming requests. + # By default, memcached is configured to use 4 concurrent threads. The threading improves the performance of + # storing and retrieving data in the cache, using a locking system to prevent different threads overwriting or updating the same values. + - name: MEMCACHED_THREADS + value: "4" + metrics: + enabled: true + serviceMonitor: + enabled: false + +configsdb_postgresql: + enabled: true + uri: postgres://admin@postgres/configs?sslmode=disable + auth: + password: eorbahrhkswp + existing_secret: + name: + key: diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/elasticsearch/.helmignore b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/elasticsearch/.helmignore new file mode 100644 index 0000000..e12c0b4 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/elasticsearch/.helmignore @@ -0,0 +1,2 @@ +tests/ +.pytest_cache/ diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/elasticsearch/Chart.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/elasticsearch/Chart.yaml new file mode 100644 index 0000000..be38643 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/elasticsearch/Chart.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +appVersion: 7.6.0 +description: Official Elastic helm chart for Elasticsearch +home: https://github.com/elastic/helm-charts +icon: https://helm.elastic.co/icons/elasticsearch.png +maintainers: +- email: helm-charts@elastic.co + name: Elastic +name: elasticsearch +sources: +- https://github.com/elastic/elasticsearch +version: 7.6.0 diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/elasticsearch/templates/1.headless_service.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/elasticsearch/templates/1.headless_service.yaml new file mode 100644 index 0000000..2631417 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/elasticsearch/templates/1.headless_service.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Service +metadata: + namespace: imxc + name: elasticsearch-headless + labels: + app: elasticsearch +spec: + clusterIP: None + selector: + app: elasticsearch + ports: + - name: transport + port: 9300 diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/elasticsearch/templates/2.service.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/elasticsearch/templates/2.service.yaml new file mode 100644 index 0000000..505cc5a --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/elasticsearch/templates/2.service.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Service +metadata: + namespace: imxc + name: elasticsearch + labels: + app: elasticsearch +spec: + selector: + app: elasticsearch + ports: + - name: http + port: 9200 + targetPort: 9200 +# nodePort: 30200 +# type: NodePort + type: ClusterIP diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/elasticsearch/templates/3.configmap.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/elasticsearch/templates/3.configmap.yaml new file mode 100644 index 0000000..ee0a42d --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/elasticsearch/templates/3.configmap.yaml @@ -0,0 +1,41 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + namespace: imxc + name: elasticsearch-config + labels: + app: elasticsearch +data: +# discovery.seed_hosts: ["elasticsearch-0.elasticsearch", "elasticsearch-1.elasticsearch", "elasticsearch-2.elasticsearch"] +# cluster.initial_master_nodes: ["elasticsearch-0","elasticsearch-1", "elasticsearch-2"] +# ES_JAVA_OPTS: -Xms8g -Xmx8g + elasticsearch.yml: | + cluster.name: imxc-elasticsearch-cluster + network.host: ${POD_NAME} + discovery.seed_hosts: ["elasticsearch-0.elasticsearch", "elasticsearch-1.elasticsearch"] + cluster.initial_master_nodes: ["elasticsearch-0","elasticsearch-1"] + xpack.ml.enabled: false + xpack.security.enabled: true + xpack.security.transport.ssl.enabled: true + xpack.security.transport.ssl.verification_mode: certificate + xpack.security.transport.ssl.client_authentication: required + xpack.security.transport.ssl.keystore.path: elastic-certificates.p12 + xpack.security.transport.ssl.truststore.path: elastic-certificates.p12 + xpack.security.transport.filter.enabled: true + xpack.security.transport.filter.allow: _all + xpack.security.http.ssl.enabled: true + xpack.security.http.ssl.keystore.path: http.p12 + node.ml: false + cluster.routing.rebalance.enable: "all" + cluster.routing.allocation.allow_rebalance: "indices_all_active" + cluster.routing.allocation.cluster_concurrent_rebalance: 2 + cluster.routing.allocation.balance.shard: 0.3 + cluster.routing.allocation.balance.index: 0.7 + cluster.routing.allocation.balance.threshold: 1 + cluster.routing.allocation.disk.threshold_enabled: true + cluster.routing.allocation.disk.watermark.low: "85%" + cluster.routing.allocation.disk.watermark.high: "90%" + cluster.routing.allocation.disk.watermark.flood_stage: "95%" + thread_pool.write.queue_size: 1000 + thread_pool.write.size: 2 + ES_JAVA_OPTS: -Xms8g -Xmx8g diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/elasticsearch/templates/4.pv.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/elasticsearch/templates/4.pv.yaml new file mode 100644 index 0000000..5a53f57 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/elasticsearch/templates/4.pv.yaml @@ -0,0 +1,74 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: data-elasticsearch-cluster-0 + labels: + type: local + app: elasticsearch +spec: + capacity: + storage: 30Gi + accessModes: + - ReadWriteOnce + hostPath: + path: {{ .Values.global.ELASTICSEARCH_PATH1 }} + persistentVolumeReclaimPolicy: Retain + storageClassName: elasticsearch-storage + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .Values.global.affinity_key }} + operator: In + values: + - {{ .Values.global.affinity_value1 }} +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: data-elasticsearch-cluster-1 + labels: + type: local + app: elasticsearch +spec: + capacity: + storage: 30Gi + accessModes: + - ReadWriteOnce + hostPath: + path: {{ .Values.global.ELASTICSEARCH_PATH2 }} + persistentVolumeReclaimPolicy: Retain + storageClassName: elasticsearch-storage + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .Values.global.affinity_key }} + operator: In + values: + - {{ .Values.global.affinity_value2 }} +--- +#apiVersion: v1 +#kind: PersistentVolume +#metadata: +# name: data-elasticsearch-cluster-2 +# labels: +# type: local +# app: elasticsearch +#spec: +# capacity: +# storage: 30Gi +# accessModes: +# - ReadWriteOnce +# hostPath: +# path: {{ .Values.global.ELASTICSEARCH_PATH3 }} +# persistentVolumeReclaimPolicy: Retain +# storageClassName: elasticsearch-storage +# nodeAffinity: +# required: +# nodeSelectorTerms: +# - matchExpressions: +# - key: kubernetes.io/hostname +# operator: In +# values: +# - {{ .Values.global.ELASTICSEARCH_HOST3 }} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/elasticsearch/templates/5.pvc.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/elasticsearch/templates/5.pvc.yaml new file mode 100644 index 0000000..a4ae2db --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/elasticsearch/templates/5.pvc.yaml @@ -0,0 +1,53 @@ +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + namespace: imxc + name: elasticsearch-data-elasticsearch-0 +spec: + accessModes: + - ReadWriteOnce + volumeMode: Filesystem + resources: + requests: + storage: 30Gi + storageClassName: elasticsearch-storage + selector: + matchLabels: + type: local + app: elasticsearch +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + namespace: imxc + name: elasticsearch-data-elasticsearch-1 +spec: + accessModes: + - ReadWriteOnce + volumeMode: Filesystem + resources: + requests: + storage: 30Gi + storageClassName: elasticsearch-storage + selector: + matchLabels: + type: local + app: elasticsearch +--- +#kind: PersistentVolumeClaim +#apiVersion: v1 +#metadata: +# namespace: imxc +# name: elasticsearch-data-elasticsearch-2 +#spec: +# accessModes: +# - ReadWriteOnce +# volumeMode: Filesystem +# resources: +# requests: +# storage: 30Gi +# storageClassName: elasticsearch-storage +# selector: +# matchLabels: +# type: local +# app: elasticsearch \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/elasticsearch/templates/6.statefulset.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/elasticsearch/templates/6.statefulset.yaml new file mode 100644 index 0000000..2cbd4b8 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/elasticsearch/templates/6.statefulset.yaml @@ -0,0 +1,146 @@ +{{- if semverCompare ">=1.16-0" .Capabilities.KubeVersion.GitVersion }} +apiVersion: apps/v1 +{{- else }} +apiVersion: apps/v1beta1 +{{- end }} +kind: StatefulSet +metadata: + namespace: imxc + name: elasticsearch +spec: +{{- if semverCompare ">=1.16-0" .Capabilities.KubeVersion.GitVersion }} + selector: + matchLabels: + app: elasticsearch +{{- end }} + serviceName: elasticsearch + replicas: 2 #3 + updateStrategy: + type: RollingUpdate + template: + metadata: + labels: + app: elasticsearch + spec: + securityContext: + fsGroup: 1000 + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: "app" + operator: In + values: + - elasticsearch + topologyKey: "kubernetes.io/hostname" + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: elastic-node + operator: In + values: + - "true" + initContainers: + - name: init-sysctl + image: {{ .Values.global.IMXC_IN_REGISTRY }}/busybox:latest + imagePullPolicy: IfNotPresent + securityContext: + privileged: true + #command: ["sysctl", "-w", "vm.max_map_count=262144"] + command: ["/bin/sh", "-c"] + args: ["sysctl -w vm.max_map_count=262144; chown -R 1000:1000 /usr/share/elasticsearch/data"] + volumeMounts: + - name: elasticsearch-data + mountPath: /usr/share/elasticsearch/data + containers: + - name: elasticsearch + resources: + requests: + cpu: 1000m + memory: 16000Mi #32000Mi + limits: + cpu: 2000m + memory: 16000Mi #32000Mi + securityContext: + privileged: true + runAsUser: 1000 + capabilities: + add: + - IPC_LOCK + - SYS_RESOURCE + image: {{ .Values.global.IMXC_IN_REGISTRY }}/elasticsearch:{{ .Values.global.ELASTICSEARCH_VERSION }} + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: ES_JAVA_OPTS + valueFrom: + configMapKeyRef: + name: elasticsearch-config + key: ES_JAVA_OPTS + # log4j patch + - name: LOG4J_FORMAT_MSG_NO_LOOKUPS + value: "true" + - name: ELASTIC_USERNAME + value: {{ .Values.global.CMOA_ES_ID }} + - name: ELASTIC_PASSWORD + value: {{ .Values.global.CMOA_ES_PW }} + readinessProbe: + httpGet: + scheme: HTTPS + path: /_cluster/health?local=true + port: 9200 + httpHeaders: + - name: Authorization + # encode base64 by elastic:elastic + value: Basic ZWxhc3RpYzplbGFzdGlj + initialDelaySeconds: 5 + ports: + - containerPort: 9200 + name: es-http + - containerPort: 9300 + name: es-transport + volumeMounts: + - name: elasticsearch-data + mountPath: /usr/share/elasticsearch/data + - name: elasticsearch-config + mountPath: /usr/share/elasticsearch/config/elasticsearch.yml + subPath: elasticsearch.yml + - name: es-cert-certificate + mountPath: /usr/share/elasticsearch/config/elastic-certificates.p12 + subPath: elastic-certificates.p12 + - name: es-cert-ca + mountPath: /usr/share/elasticsearch/config/elastic-stack-ca.p12 + subPath: elastic-stack-ca.p12 + - name: es-cert-http + mountPath: /usr/share/elasticsearch/config/http.p12 + subPath: http.p12 + volumes: + - name: elasticsearch-config + configMap: + name: elasticsearch-config + items: + - key: elasticsearch.yml + path: elasticsearch.yml + - name: es-cert-certificate + secret: + secretName: es-cert + - name: es-cert-ca + secret: + secretName: es-cert + - name: es-cert-http + secret: + secretName: es-cert + volumeClaimTemplates: + - metadata: + name: elasticsearch-data + spec: + accessModes: [ "ReadWriteOnce" ] + storageClassName: elasticsearch-storage + resources: + requests: + storage: 10Gi diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/elasticsearch/templates/7.secrets.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/elasticsearch/templates/7.secrets.yaml new file mode 100644 index 0000000..2a24b92 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/elasticsearch/templates/7.secrets.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +data: + elastic-certificates.p12: MIINbwIBAzCCDSgGCSqGSIb3DQEHAaCCDRkEgg0VMIINETCCBW0GCSqGSIb3DQEHAaCCBV4EggVaMIIFVjCCBVIGCyqGSIb3DQEMCgECoIIE+zCCBPcwKQYKKoZIhvcNAQwBAzAbBBRrCrEWs79GCUPrYkFrkDBEF9uz4gIDAMNQBIIEyJUjcP339Anee6bdJls469HbsqYGgzidG41xto7ignNdZdP9LTTca+w8sN8tbVnTUZi4kQYcPSQqv+cWobi66KpgvQ7HhA/YE9K5L7wR7KEj0o61LYvucHm19hRdt788EvBy4mi8cDAr3m49NNuLUM6wyeCEKr2W2dwZFIyxFTPVv6/ef6cuHyDNLXJtjUmOIzNDL8Olqk8JGAd9bwXlizcShfmbiHHX8pAhK0u9JThFQePvCGiKA4LwzeuuwuEniznMlUQ4T/TjLjLLYcoS4vktfOJKPOgL3esjsc5hPoVgbw+ZpNCxRq1RVs/5eOBkxzXhJ7hdNELJDcMjitBfl71MlSDtMV4FhlVuhjilsuHx6URucsEE2l1V3asg4QP1PoSiACqncr2WhCcrKu0d8DztlIkCYG7D8oiAx4nEzsm0xmOhIcigHw6GP4MNeCieJCgAwLkJf1m73IYcxyaKsJAc57jfs9ue62KkVHL2NxNRjTps2j0Cl5NJQRE4CTkieU0etsNS1nJEwiJunVTyHXAa53MF6j40awEqs2Ko4gQENPpuQc599yJb+ZTHfHPe8bpfrmnxiEAaeiABu+OVH9bdLK5gtCyD5vXGZKVtHbyR+0+UlBggw/horFQIP+x7SKO53+ho0iCnYyQK52kJiv93JNgStGHpxf1SkPTtWHOraR2qSZTX6F7vjBtIq3Y6ocb6yo/jMNhzk3spHdz+F99S6uV3NLmDfX2vJmu1YSaPwaNZGDggcFI/g2S5ylBWyHpk2rB5gtklUIQEWxFFvbFOp37ffcdC0mZ6SgpOxj+IxuVLqTvyDLjrfteEvfjRAFXsT8E4XikC8QKjQ+KAwDYETidOiYB0/ByCh7t1KbcKJWU8XYxqzukX88CyVtO9Lp/f97x3ycvaF1UfzLBrm/bnTa0jPEP2/OdzpbjQJcEGX64+QY92k38zjPe4tedUz5H/C9aw8Q8r/DSxUhn2sdDXssR9jytITLLOJHDJX7XCfZxtoW60bwRm5MyXc4bJmjZT2BgxTWIVokaOhk0IZwpbC/oxh1QkaHBioP6+slASXg8Xu9l+mACevb1b9RvpN+fhurW2wOHl4Kul775BCohuTtiqKAce8KEACwncwYz+ZfcPTkbLRy6+p6NI3zNWpZE+iFlPtLh+2+T/QQHEfKTNUxcXLt8WCMOZuCe776T41nY8UhbUQJKqlEvom3MzCcsvFBoahlpjv+rg9/Ay7ESMil49e2x3qbD2929X0BHz//RcvPO5fvSEK/tC2uHzWzqHf0ZaRwtO19Z95Uv3GjGNF0SO8qri830LfJ+ctjk320qLyZmxA9QgPoI2oMHSxkaX1fgVeiN9coBM8yJbPK8ZdOOg4abnYOhqrTJXaoSFo+SYyAVZoTiQIIk/JScL5Qcw9IJw6sSKmOdChy2spYQKeo1NU9ecLD8YRBqRP0EET7e7NDPKlIWQ1vB5y2hokyL7bxvbGgzqQBAyo9wKJ3v1g4IYEWA9mluvQapOMVEHBYh6wv2nTJpE9EqMxpYQBU1w+vgX0EUgZDEOBkbvd5wubAeERt0mJqjea6vxWJIbeqMVIIoJSZEDaPE5qVNYaosoc8yvAZ9+U3lZlZObHzHEAIUx/2pP/jFEMB8GCSqGSIb3DQEJFDESHhAAaQBuAHMAdABhAG4AYwBlMCEGCSqGSIb3DQEJFTEUBBJUaW1lIDE2NTM5NzE4MTk0NzgwggecBgkqhkiG9w0BBwagggeNMIIHiQIBADCCB4IGCSqGSIb3DQEHATApBgoqhkiG9w0BDAEGMBsEFP43u2ii0k7JTUfInMhUBwjWZrS/AgMAw1CAggdItHB4SBc5KdDVc8eXuF8Ex1WP/Y2wz76PoNNpYm2LeIVozsp5c/2RDN2KqhcvhTihlY44esqWWVCOx+OTwmAPFwzZSrMaOYpcOP3fRWaHJLw98cK8a1ZuNv3eXWecf333TrsvU/bpT3v0KNO915qnSbtNwlvXaOMm6jbw6eBnkB7i6jxA7kgVAW6soa3ZHOrV78quBSbAjXZddHsI8x3MS4rxdvkp6GHet22/fQxjxz8UlQEDqzQgK7F4RqULRJeU//JID7VJqfbHRHfnYsKszsirhWKeJsxLVhG1VU/zRgxs0C35NfQeR/o7jmFpE7CCvvC0Rea2pybNojb51HLvyycXtpGn0gAdTBVNnwK1X58uSDWH7jM61uX9f+/gcDZqlUj6UVc6mzqxAgzDtf6B32G0VQq2szaJjbRVEVXhCAOIdVj6pRpI3l3gRv8OkNAWsGwYDMjeFxnrEpw1AQkEj7FRgI6iNOxEfUhOVYIEsflGTUdcd+K+zlCCHAJoMzbqiwPyHHgvLOp04A7fog+H3/cn6Tdmrp/J7TxpaW1ZwwcHtTRLoq0F77Sj8XJule3CzaDtg6IBen/Yo7H9hhK3ORodlGjJYA285dHAd1mtqmHmoWeDNoVrlVyymge78yXGmlFsBWF83VUChRx+9noF3Zhz+QMPBNsKHk4TM9yRHiWpMZIdkEZKq+obCPU2PmC21wnWx13nhb88gaNyBjHxFsGE91SgEyQh/cPhi01Y7+yNYQvYOXJe3EQ6oqFCBkPUnrbAMiHDP//AVN/tUrgVbmpIclfFprP2YIRcfGa7qch48RFbmhnX5N/OYLaPnNYdbxOiwZ0f/KIpDKWS67kS2N+jDKWs/SCLs2g89q1z2EGvbVwKMD6Vl559EZxAfNRv+eZu0MvTejEkuykIHJpXCyP+8EphUyWW9Cqll1ux4rXMUDkgl5sh1WgSoIEASX2j5TJ3fIh0nBkjAkBi0n2BINZgVWKj9U1zHNdRF67Eb+97lUuY6JIkbFhLSgZiIZqnI9bnW8OKUJFtvVtlSKG4xqdOeAroB8GLw2iR/GjF2Dvy4rIZo+qeTCIN+bm+iFkCri7L2K0/KR25h7bAtXwBxwMct5F4A1vltlLs408efMRJ7dg3iqMGhRyXdwxKexWJLbp02uJQVU9/ogYeLfSiIZEm25qjEMQZqRpQpwLaH5JB9oLKqdLEdeuxOfqb6weHDOtITlFHToeRNzIEmbiT9gbdpMwKTxs/rtwMHgGU6kIJmIFgnw2gauKvpiIuDCY79JpSNipsicvvLTIa4cc8sZCCllZ1wAmbNDsCH6p0bh8CooMjGf2vUbRClSe9+R19/lRMFGSp4N6fElW7MxNw85xpkFjG0s053fvIJmfPhxVqUHMP3fFQv0DUvvQNvNTsRGdDjohkC0095v9EWy7n9Frv2wIM2G7uVHvrlgkQfPK2JsYZKsUE0KXa4HUQptWL71kp7RQSmOmXFzsthjYVXu/pfXA+u+PAtHvQpo1nTPreXn3UZqiEiQmNkmMPLAYzpIi35tjNewfw5XwDj77pqH5OFcMZDTKbiInV1LuvFlKxCEYh4gvTThC0XTsrsiHgldtNcw9ZB017uPW9AAqbj2IB0d5b0ZB3yMZ67uzt1pretcxmEfSoA64QWOC9lBYp4DVE9QxcCnsSgibWreqpdJHmX5MR4umwIb6WaM1pJdCY1bW4tO3ZVT4DA/4ry7jqxUH4AcZRNK0zYR6DAtZndB7LTJhT+8d5EBtmAHzC5HT9KLmHV6mAG1QLMlwhNXmtM0YCJsKxcZo+xLBy/2cHl41EU4ACiuEq1JrM5j9fQk+hmJHT+JB0aqv+kvdxGmgBuVWGHQBtNTV6TYeLzqzDpIl9uXi3qFKFBuTQOska2zAMv7gLOe79w1cVb/SJKdcYjWtLR0v6wfaRgVeBwLvTvh7nNXhXRqKfQKe3e2Tjgq4nV4kOQHI21WDKGSd4ONyyvXGMwNzRgcZwpDFAcvshZATwaBtAo4JWi6D3vJB6H1PHRtyqHjErKkPazoZMjR2sZI8S4BMo4R5fa1ZztZO4p2lJYUIAQHj872UdGXHTXgyZKU8t/ifiVfxon5UtZJRi0Xq5OMdN//Qtq2kVwQxntf0eWsygkKMtNr1XLzu0TAMUMItnohdQWUw5w8UeXYOAYfZFqZEhKfcwkJsfq1q56ptzVBI3T2hDFM7xuVFNn5y+FCTx9pB9FCbln/3ZlKuUiTH/eLMKdQYGkRX4X0qzkx3YqAn6jDLQPEG3Rz0JP53T43uLxGpqa8+jn1XIUCNj50mqZGiah7bdo1qsDHbFWYCe7uoOjPapontpaoEQaZog1INqBNerS19a+i4S0/uAsGApykwUhk/zGfr9UudpKJWd7AznlF3+yfZfk/9mCSajBpoWafCIWmOvxJD77L86YAs9STuhWUGQvL2rxPf2uyS4WAi2+DgbdrGTSiwNB/1YX8iHp/cw6DA+MCEwCQYFKw4DAhoFAAQUSvLiFrAQlmfgL3Cewez5Fw2+0okEFH+RyXvcJHVaYbaqjejrXkgUS0JsAgMBhqA= + elastic-stack-ca.p12: MIIJ2wIBAzCCCZQGCSqGSIb3DQEHAaCCCYUEggmBMIIJfTCCBWEGCSqGSIb3DQEHAaCCBVIEggVOMIIFSjCCBUYGCyqGSIb3DQEMCgECoIIE+zCCBPcwKQYKKoZIhvcNAQwBAzAbBBTQSr5nf5M77CSAHwj38PF//hiFVgIDAMNQBIIEyBrOipz1FxDRF9VG/4bMmue7Dt+Qm37ySQ/ZfV3hFTg6xwjEcHje6hvhzQtFeWppCvd4+7U/MG8G5xL0vfV5GzX1RhVlpgYRfClqMZo3URqBNu6Y5t3sum+X37zbXQ1GI6wo3YURStZkDHlVtObZB667qqj5rO4fIajzRalaxTFda8aS2xAmQklMcCEXASsO5j0+ufVKiOiG2SIEV2LjjYlUymP7d9+LAZ2I6vR+k/jo2oNoPeq0v68qFd9aOB2ojI9Q/PDFA7Nj1kKMK7KjpxGN5/Ocfr8qrxF1mviA6rPdl8GV3WCFMFKcJER4fRmskWGNE/AdwU3laXvJux/qz4rjiYoJX+5rSyXBDxdznaFiSyN1LYkFJ+nao6HSAmPPyfEPVPRICc6XHMUM4BZOVlJO49M1xg7NFQUtkyVm8+ooDwXCiGEUHDZNw+hCcuUewp0ZXki695D0tESnzi3BE56w7CRySeaNR8psAtL74IUtov9I66GlBEI7HSbyLTT9Fa7+o+ElJWnFqIyW8WzNF3T5fvRv2LfKjYO5KiISlOM03KlETWE1F60TZqW3EbP9WjLhRnovFcJVsNyha+wDVTu44DAylMX4Oh2xKYm2YW+Oi0aeCFmJbDp/TlxYhm5ACYUxma6CVxbEgHkxwjWyFfiNQp2MBL/5HFJGxuny2lVnN8yUSCvDdnOlVTB36/EByY/oA8S+GF/QRYd3PMew56s7aBgPt8mhncN5Cdm+GCD/Nb/ibcuTId9HAaT6o3wMsc7bYusjHGCjFbz9fEdU2MdpLJO+FXVM9E1sEKoTpPLeJDh2a9RUWJQPUCLu8MgEdiJohtEpOtvM7y5+XbuAkYaDsBw3ym5M/kwovN09X1m5x5qM0QSRIVKHf1qo6wo68VMeVQDEBNxJ5/tuZ11qE3siGRfwDnUkCpb9H54+w3zaScPHGAdwplYYwaqnFMwi8nFMtjZvGOLT2wqPLPnKVeQGt4TCVWPXuB4kYnmbTWoJbUT5Wpurcnyn8l6uzLmypCD4k8YiQoDb1b9HIFUAypn580KIUF19eCSGeIHl4hbmusuISxQ1qXk7Ijbj7PiVtMKy5h8rG/c57KJvfvnMQy9hauM5kcZmlTUvrHDw+7cUFB96/wXbvqmcPKGKutgXRqHcTYyBOPEJnSUMBIM2r59wgFjlMuQLrJurzwzox/IEKu/KMilIBDp4k+MHz6NrINWfbV7xa6yAja1kWyvUmwYjCHhlXZmhCb2fmhP1lsnN4BNAkDsdfxHBRCBISy6fuHSY+c4RsokxZ4RomHhVvJsEY/AE4DCvVXDunY8t4ARrQCqXYso3+kVjm6+aelKk+KgyLZ3St0eAIl/Y2xqEXgh0wHGrx3CLZqGqq864f5MmrxiytmlSzHP4RSad20drsN3VchaJZkyrGbKEs6ZJDU2dq5NiC5unqx5tLw6XNRTydIC2PaiVl9m3GLUCh6hQSRJnvcXrqOd8a9K1uV5OoA3TRdc2V5lyxWRIJsdK5KfiAiTsNeM+Tt+Dh2pZjt2l2h4n4BjgYApxG8u10BP1iZ1e1OsCRgLGbgiuXtXrlrjwvJzrB5i11oy9mt3vqgtbjAciQpsQYGGfnVqyGXfEc55hIYWClNAFZDE4MBMGCSqGSIb3DQEJFDEGHgQAYwBhMCEGCSqGSIb3DQEJFTEUBBJUaW1lIDE2NTM5NzE3OTU1MTUwggQUBgkqhkiG9w0BBwagggQFMIIEAQIBADCCA/oGCSqGSIb3DQEHATApBgoqhkiG9w0BDAEGMBsEFEVjuzIvhFF9BzWGr3Ee4cw/mLcqAgMAw1CAggPAwroH+zLRt2Jtb8IWeOaIbXAv4sVGUljreWkJE8dkoXNcEQpATEt5H7L4uwnDsevLi1yfWtUDN1OxM8gb7iR4Jysrd+8uM1r0nn9YStz/I3qhN9Fb6yAb+ENTCzwo/oAnyDBM/lXR9fL0EPHRfsDmK+6kC+hZ4AZIao+1oWRD0Bu970yK6gwv7TIRCsS/RBZfC/d4Slz1+IQChiWS4ttTzxK/IuhaFbia0JYtUpjmMGMBQwYRyvITgYpOIct39Il/mabQ4BA1/wk7Oecfe3RHzIfM49AxJtwKppfVfaRJjtK1aoO/GKS6CZuvIIX8q3Mt32OEaoRN9FJM9EkUkKCcYhtRfq0/8MTO97MbrcKeO8XICn8vZwOMM7k7IFtCq44/3QBXa9fpc2BFMVYOoQ22W2ZuMNMRp6OYc6Da1BG4Ik9mt1T4k9NkvfrhpNceR27v6Q0pZNUTN26aPr11/SfS/IZmLGXF7cGAfxITMOQwK2ig6qivXzvwLxfnyW4aHF7K/jL59kDg9Vf9zKmlvPJpHSEWv53U9SFYvvrMISd6E8np0bHRM5p49mgH/KXGauRRaLWUxlBwrhjeZRimTF9x//a0luGf5tIW8ymi32wn8LNiu7fbnkldnivfgWVmktNrPMH+70HNlCWkfaNibSHpzyDQRTzg9PjHEcFH+pQAXCc+A8y8FSvlT+nx9dpXXRK5pqbrGnWyrm5D3oY1ceO0E85R9Fx4Ss0f+mMBtNDYpz7zS5BSX36MNn0gm6MkhlOVbbcAob4WbZAEM7zaiV1ilLegXPZYPCGQydN02Q+lJ7HHZ18T4mzTrjF6M1PFIx31cR1r0ZtJhkCrOWdlTrmovvYYEgEStsiE3pi6dW4v1NgcJVevpnJJ//vpGXasH9Ue/ZNdk1tj/h7cQ/qbKlmvrcuH/UQ969RsNX+K3B1xeYnfbV88BXqFLuqhuWy38wwvBvKO37vq+ioPNIjwaIyCVzoF9/MAx2aNOdk/x04mSNVYh5q0ZKv+3JC3W2vJxV2aonc/ybFgi2GZz2erVYNZTSXz+bEefx8QWzcW6/zr437jh/peQRyQ92PsN+eZV9GB2lrwmF7K2579vNQoVcpzTvTFf+eZZhF8u/1HZW4uFHRUyqE3rHyOukSFukD7XWnFL1yUcWw/SGNIm1HNZD3nXjqcwdAIXl7OvqdO0z/Qt2bny6KpOSJqjMUjB5AX5/yt2xlZBDhlsoGtRfbSWefGf7qTdpg2T9+ClMb7vS1dLzrGRzNgGc7KO2IQdkNcfj+1MD4wITAJBgUrDgMCGgUABBSoZ3hv7XnZag72Gq3IDQUfHtup5gQUHZH4AQTUUCeOS0WnPOdFYNvm1KUCAwGGoA== + http.p12: MIINZwIBAzCCDSAGCSqGSIb3DQEHAaCCDREEgg0NMIINCTCCBWUGCSqGSIb3DQEHAaCCBVYEggVSMIIFTjCCBUoGCyqGSIb3DQEMCgECoIIE+zCCBPcwKQYKKoZIhvcNAQwBAzAbBBRl7KAO2Y5ZolA3Si0i+pNdXpn42AIDAMNQBIIEyE9fBFRMMy358/KJQcAD9Ts0Xs0TR0UEl/an+IaNTz/9doU6Es6P22roJUK8j4l09I8ptGGKYdeGzrVBzWEjPhGAZ3EXZPHi2Sr/QKbaiWUnYvqqbPVoWNLukrPvK5NpEyPO2ulfxXN46wHzQMnk5l+BjR4wzqKquxgSzacXRJCqznVj59shjLoTK9FtJ3KVEl+JfukcAh/3EqkP7PRAXrPeQ5UcvYbYMZgxw8xHYg/sdKqyHBxwQqNtvGlfGHQ6jyb4/CS2vu0ZehGHQoMgmry2pvNMjA9ypSVWRGspcrdcQOJNgYtHmBiBScoURLB+9KJX2ivY8zJFI5e8Hb48sLASkp4HQemBWMQTukSnlgddsAtIKgpoRZWpcJ7PunHuWXAKZPCMH6uF14G71/lhluRjjy5GEnkKhKkKnlX15kmLmylTZJVdMbMRnsGK7exsVS8ot7sYJ9EMIvKJUqKf/RmZvUxZqlGp1oy3Uo5JgBU5MF61wnkad+L1UJsB2ZzPV0S/jYKPFVzBsWXj9IH74D02TcQz774+FQqAXlVLlpglmlnMwOU3IboKOH2Z4LIj7Kx7wfZZMi3/sQbYJM2PWCd8OS/keDf53ZwMKNxWPh1ZB7kX4mqhmMHdNgRblcWXP3LtWKck31Vq1UdGfK4/T/nudD1ve15NPUP1DvcVsDOWnRF4s3IDXZwXWqvag+hz0zVyB/T0X1XkqrPtBNX/o5qeTDP30W2GVdGL6SIlgZHaqqNuamHlhGra43ExKTwRPBsskTrziC2fb/JeqXxJBES/YufiomXw14BnQUpyBfVeV3cDDEZUnfu7lJz19jS+2aTtA6v9Qnps+q0rNnLa54JLf9bWlw4RomSWcJCqkkW/EG0AdTKrqNFYPZVZTLvt+4B8ehWrUWas8MK5jAXeTklr0ao5acGOKWip1wmqIRKRAIT2OBbs9jCmigb2xJNDK4RdUtDYsJeltJ69DvnG7bmTLjfsOQcVIaI40k91N8nnda9+/6BdKFDQtMDB6efGkciWp9ce24uGUzKszD7CmKTlCJiqn/V2bbOKGdk4Tafy4B2HzeaX+fMFjpWu01UMaJJrvYbAnXww1Yg2IjbwdAMTv7z8zPIJ0a+drouylUfvKKeun6BnLe0fR+XbRRs77Rengb30c1plozEFHZjzmQ10uVQSh1wWURJnVSru6b1pyVI+KR3WZHB4vgDx+BDlQjxCk53+Hxm5wv8SgpvNxVkepPVF8ucut9FkGNHov1gyatlEKSzYlrFt0mFQWg20rKMrkB6pEDO8f5W2InR3znO15NTbw/l3BXYGOe1lS0tHljc5zJkmMTdVrJnFEd2RqNPNmFWEn+1bm4NeAr6QEY9fiyBCMWBHEELTfHtu4iS37D1cBEKudpCszaWJiPgEeDu75+IuXa/guZdxWJj/ktDfZQJpp9ork2QScgu31l7QdGfC24C2E6kQp4UHZ3k7wXSTUt61bdmK7BHqjiz3HuP76phzd7nZxwLCpEg8fhtwhNgPx3IrU1B4JX40Wzsy1Tz/8oIcvjykDmI967chWtw/WSschamGBelNt+TV1gVKoLlMpL9QxFcAqXhEC6Nr9nXRZRJAIRun3Vj+EabZoR2YsdghDE9boTE8MBcGCSqGSIb3DQEJFDEKHggAaAB0AHQAcDAhBgkqhkiG9w0BCRUxFAQSVGltZSAxNjUzOTcyMDczODY4MIIHnAYJKoZIhvcNAQcGoIIHjTCCB4kCAQAwggeCBgkqhkiG9w0BBwEwKQYKKoZIhvcNAQwBBjAbBBRmhTM5a6OsdDd4LLR/07U/28/dqgIDAMNQgIIHSCCLUDdxl9rcX65CAYiQD1mrnoDJe+c8hWww8KI+RD1/3U8skUZ+NHjf2cjCrDQdtVZcycc37lkJ4HEU0keMdVE7I9tja81EfQclnZAUgx/zzLQqVV9qc1AcKX0pzUczLewoQZdXQHdpXh0u8Hf4xFeYM3EAGxB0mUYGwZXWSxYSdaHmxTgeftqNHF6tudt0vpPgq9Rbqp7zP8z48VUOSUkbNTXZOgNVpMgs/yKivvURdWBwJMkpOs/daeR+QbOLkhrhTtT8FjwFUlpnQ//8i7UsBBJKcEKvlrfBEDWcIGw8M6oAssoPsCGyXnsP7ZCVBDBgv941mBTJ9Z9vMoKPpr9jZzSVJrU2+DDuxkfSy1KL0vUvZm5PGSiZA72OpRZkNi8ZUbJTRKf71R+hsCtX/ZUQtMlGCX50XUEQl44cvyX32XQb2VlyGvWu0rqgEVS+QZbuWJoZBZAedhzHvnfGiIsnn2PhRyKBvALyGcWAgK0XvC26WF676g2oMk8sjBrp8saPDvMXj06XmD6746i5KC52gLiRAcwlT4zJoA0OB5jYgxXv+/GP9iXNIK578cCGpBes28b7R+hLDBCc/fMv1jMhKWPVXWJZ6VkcpUgH73uxFl43guTZzJfHI1kMF1+PbOviWPdlSj1D44ajloMJP5FXubIfYEIqV19BdU42ZXZ8ISIZYTAj9OhNCUkkTjjGH2VhFz/FjZDxdk9m/Sw+du8dg1v0+6XIMScjuutbLxxol8Dx1yfRSgZZGN+D3vi0hW1OgcpnUhVI/x48LjdWm1IA0XWOzFiJAe98BiL0roTsUk0pgyujzvLcwDFGP9hnQ0YLdCy22UsQ39hRyQzwGAVO8O49bU8sgNy75+4++8Z3pqI91hdoHyzNMSx6fJn/Qd6UcAdTF0divh17q5bZi+x3D7AQEvh5NwePD0HIqBZexT0yNTVTHragJZUetI5FZgE1cZrfchckP/Ub5jdn3e/Cvu8J/yZFAM8glJvO1D+4BZ+/MVAw3AkO7kLhGeXMXr9s9+A/uPlznoC6b9bpjj3X46bFz7dPIYC0aeya87vISA0/5VPkkUZ+U6A9nLkCIcl5XQElMjrzidFJyBmtxHXLrAu5yiWorl3KVOf9QOrKrZt1UrNihIaSIq/46jI5yBQX6LV7fUBrZKe/oMbuf6W0LliNJbKSwZi0RRHo0jBPotUiOsn1qmnh+hZp6rwi1KGOsCAPSMSGnURwoXAdTUmAyPriDjDBKjm2EiDZJ9T3XgNDHVU24SqKjsSoByrD4FcVyqFAl3w0CaSNXloZswE0UqGKoQUy6Up0ceWoeHYfA/FJyaGfkFGRkmYun+wUJZvhpoLv6bn377CziWTSc0o3nl+UZ4pTsRJOlG0FOxzWApjSd8bPIdezPxak2DM0qj6aiUocfEBMLnFn4Sjj1vVFmIGPNXiOPlJF0Ef99I5Gno3YAd4ZHBqpkeUq7+bWur+xhv5zsXs5ARK6TVOVqlMPiKRpDX7lEQoya++U6HIj6zb7arSZivM5YrZeqHFKK4gpORvpg6icApQCBniDgmNxZJFobgzvIwKTABJjoivHs4zIIw6TCjbz38GEFdzbsUuCXQo3tFWaxgiGkxtLnjYr0PTIxFdBfQ5dkRkkxLvUg7uR1uP9IcmO/8QzzyLeSA+I+teZME8QCzui6CY/lhIfjxJimawejCJx33nS9uXNibQ0my41SmXRDGVgiH6el8veIbEHU9RY+elVR6eqlemCuIHfU8QNPNbe7Gzqaaoccd2VUY3PXNHxU87DC7Nttvn99Ow5zxZ8xZUQVfLFntS9d2hgKp8gJ9lgVKzEuYCiL59wuxbNtnAb8mET0Buw24JeQew9e8DdYL2vDLhQz+IqPXKAhlf7BSpPyQTOeaba657CNmkzdiNk3RHGeTRrq4c3/nl1M+ZsPwf8WxoTcmu+W0Y7/j9nps8r+fKlNB23hOEIWZ4KN+Y4qZRKltTARhqmdjLIhUtWh4D49eTe5sS3MqzsZJJwsEHPPOvZKvOG5UU3jXMg9R4F8CaYgx/M4ClwIIlHvcdW7R7sXke9E/qccIG3jQ5b/mgHCk3pVkAyrRWfBZqXxlfWn+cfzVALtUXWePwhN8+i3CQbjLLOgE6yH3/rBfXQQVYHwrZqoyFchDwlFF5FtF5GThnj04kvhZbq0EcF4lbiULAOiBkJong4Op287QYgq4W8szOn9F2m/4M2XNaI3X7w67GADFHs5TtPXjWx1l6kKIwMM2pcpltXblqgH087payQHx1LnCpztxcxmeoFb3owvwKWmQpV0Gh6CIKfa7hqwCsNggOcKEQWwRJtADEXzPhRYG0mPelWLQMdLLaEzUqh9HElXu3awKazlHa1HkV0nywgldm23DPCKj5Fi6hux7vl7vt8K0Q4KA8Xoys4Pw43eRi9puQM3jOJgxX8Q/MsABHHxPBa94bOsRLFUa/Td70xbHpOrCCp64M7cm6kDKAwPjAhMAkGBSsOAwIaBQAEFEi1rtKgyohIpB9yF4t2L1CpwF+ABBSDiyukmk2pIV5XfqW5AtbEC9LvtQIDAYag +kind: Secret +metadata: + creationTimestamp: null + name: es-cert + namespace: imxc diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/elasticsearch/templates/needtocheck_storageclass.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/elasticsearch/templates/needtocheck_storageclass.yaml new file mode 100644 index 0000000..d2bff8e --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/elasticsearch/templates/needtocheck_storageclass.yaml @@ -0,0 +1,8 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: local-storage +provisioner: kubernetes.io/no-provisioner +reclaimPolicy: Delete +volumeBindingMode: WaitForFirstConsumer + diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/elasticsearch/values.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/elasticsearch/values.yaml new file mode 100644 index 0000000..7b0bd6d --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/elasticsearch/values.yaml @@ -0,0 +1,68 @@ +# Default values for sample. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: 10.10.31.243:5000/cmoa3/nginx + tag: stable + pullPolicy: IfNotPresent + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 80 + +ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: [] + + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +nodeSelector: {} + +tolerations: [] + +affinity: {} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/kafka-manager/.helmignore b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/kafka-manager/.helmignore new file mode 100644 index 0000000..50af031 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/kafka-manager/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/kafka-manager/Chart.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/kafka-manager/Chart.yaml new file mode 100644 index 0000000..61a7b7f --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/kafka-manager/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for Kubernetes +name: kafka-manager +version: 0.1.0 diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/kafka-manager/templates/0.kafka-manager-service.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/kafka-manager/templates/0.kafka-manager-service.yaml new file mode 100644 index 0000000..b20900d --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/kafka-manager/templates/0.kafka-manager-service.yaml @@ -0,0 +1,14 @@ +kind: Service +apiVersion: v1 +metadata: + name: kafka-manager + namespace: imxc +spec: + type: NodePort + ports: + - protocol: TCP + port: 80 + nodePort : 32090 + targetPort: 80 + selector: + app: kafka-manager diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/kafka-manager/templates/1.kafka-manager.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/kafka-manager/templates/1.kafka-manager.yaml new file mode 100644 index 0000000..4edcf32 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/kafka-manager/templates/1.kafka-manager.yaml @@ -0,0 +1,33 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: kafka-manager + namespace: imxc +spec: + replicas: 1 + selector: + matchLabels: + app: kafka-manager + template: + metadata: + labels: + app: kafka-manager + spec: + containers: + - name: kafka-manager + image: {{ .Values.global.IMXC_IN_REGISTRY }}/kafka-manager:{{ .Values.global.KAFKA_MANAGER_VERSION }} + resources: + requests: + cpu: 100m + memory: 500Mi + limits: + cpu: 200m + memory: 1000Mi + ports: + - containerPort: 80 + env: + - name: ZK_HOSTS + value: zookeeper:2181 + command: + - ./bin/kafka-manager + - -Dhttp.port=80 diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/kafka-manager/values.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/kafka-manager/values.yaml new file mode 100644 index 0000000..b5532cd --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/kafka-manager/values.yaml @@ -0,0 +1,68 @@ +# Default values for kafka-manager. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: 10.10.31.243:5000/cmoa3/nginx + tag: stable + pullPolicy: IfNotPresent + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 80 + +ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: [] + + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +nodeSelector: {} + +tolerations: [] + +affinity: {} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/kafka/.helmignore b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/kafka/.helmignore new file mode 100644 index 0000000..50af031 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/kafka/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/kafka/1.broker-config.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/kafka/1.broker-config.yaml new file mode 100644 index 0000000..ddf76e1 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/kafka/1.broker-config.yaml @@ -0,0 +1,161 @@ +kind: ConfigMap +metadata: + name: broker-config + namespace: imxc +apiVersion: v1 +data: + init.sh: |- + #!/bin/bash + set -e + set -x + cp /etc/kafka-configmap/log4j.properties /etc/kafka/ + KAFKA_BROKER_ID=${HOSTNAME##*-} + SEDS=("s/#init#broker.id=#init#/broker.id=$KAFKA_BROKER_ID/") + LABELS="kafka-broker-id=$KAFKA_BROKER_ID" + ANNOTATIONS="" + hash kubectl 2>/dev/null || { + SEDS+=("s/#init#broker.rack=#init#/#init#broker.rack=# kubectl not found in path/") + } && { + ZONE=$(kubectl get node "$NODE_NAME" -o=go-template='{{index .metadata.labels "failure-domain.beta.kubernetes.io/zone"}}') + if [ $? -ne 0 ]; then + SEDS+=("s/#init#broker.rack=#init#/#init#broker.rack=# zone lookup failed, see -c init-config logs/") + elif [ "x$ZONE" == "x" ]; then + SEDS+=("s/#init#broker.rack=#init#/#init#broker.rack=# zone label not found for node $NODE_NAME/") + else + SEDS+=("s/#init#broker.rack=#init#/broker.rack=$ZONE/") + LABELS="$LABELS kafka-broker-rack=$ZONE" + fi + # Node Port 설정 주석처리 + # OUTSIDE_HOST=$(kubectl get node "$NODE_NAME" -o jsonpath='{.status.addresses[?(@.type=="InternalIP")].address}') + OUTSIDE_HOST=kafka-outside-${KAFKA_BROKER_ID} + GLOBAL_HOST=kafka-global-${KAFKA_BROKER_ID} + if [ $? -ne 0 ]; then + echo "Outside (i.e. cluster-external access) host lookup command failed" + else + OUTSIDE_PORT=3240${KAFKA_BROKER_ID} + GLOBAL_PORT=3250${KAFKA_BROKER_ID} + # datagate 도입했으므로 Kube DNS 기반 통신 + SEDS+=("s|#init#advertised.listeners=OUTSIDE://#init#|advertised.listeners=OUTSIDE://${OUTSIDE_HOST}:${OUTSIDE_PORT},GLOBAL://${GLOBAL_HOST}:${GLOBAL_PORT}|") + ANNOTATIONS="$ANNOTATIONS kafka-listener-outside-host=$OUTSIDE_HOST kafka-listener-outside-port=$OUTSIDE_PORT" + fi + if [ ! -z "$LABELS" ]; then + kubectl -n $POD_NAMESPACE label pod $POD_NAME $LABELS || echo "Failed to label $POD_NAMESPACE.$POD_NAME - RBAC issue?" + fi + if [ ! -z "$ANNOTATIONS" ]; then + kubectl -n $POD_NAMESPACE annotate pod $POD_NAME $ANNOTATIONS || echo "Failed to annotate $POD_NAMESPACE.$POD_NAME - RBAC issue?" + fi + } + printf '%s\n' "${SEDS[@]}" | sed -f - /etc/kafka-configmap/server.properties > /etc/kafka/server.properties.tmp + [ $? -eq 0 ] && mv /etc/kafka/server.properties.tmp /etc/kafka/server.properties + server.properties: |- + log.dirs=/var/lib/kafka/data/topics + ############################# Zookeeper ############################# + zookeeper.connect=zookeeper:2181 + #zookeeper.connection.timeout.ms=6000 + ############################# Group Coordinator Settings ############################# + #group.initial.rebalance.delay.ms=0 + ############################# Thread ############################# + #background.threads=10 + #num.recovery.threads.per.data.dir=1 + ############################# Topic ############################# + auto.create.topics.enable=true + delete.topic.enable=true + default.replication.factor=2 + ############################# Msg Replication ############################# + min.insync.replicas=1 + num.io.threads=10 + num.network.threads=4 + num.replica.fetchers=4 + replica.fetch.min.bytes=1 + socket.receive.buffer.bytes=1048576 + socket.send.buffer.bytes=1048576 + replica.socket.receive.buffer.bytes=1048576 + socket.request.max.bytes=204857600 + ############################# Partition ############################# + #auto.leader.rebalance.enable=true + num.partitions=12 + ############################# Log size ############################# + message.max.bytes=204857600 + max.message.bytes=204857600 + ############################# Log Flush Policy ############################# + #log.flush.interval.messages=10000 + #log.flush.interval.ms=1000 + ############################# Log Retention Policy ############################# + log.retention.minutes=1 + offsets.retention.minutes=1440 + #log.retention.bytes=1073741824 + #log.segment.bytes=1073741824 + log.retention.check.interval.ms=10000 + ############################# Internal Topic Settings ############################# + offsets.topic.replication.factor=1 + #transaction.state.log.replication.factor=1 + #transaction.state.log.min.isr=1 + ############################# ETC ############################# + listeners=OUTSIDE://:9094,PLAINTEXT://:9092,GLOBAL://:9095 + listener.security.protocol.map=PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL,OUTSIDE:PLAINTEXT,GLOBAL:PLAINTEXT + #listeners=PLAINTEXT://:9092 + inter.broker.listener.name=PLAINTEXT + #init#broker.id=#init# + #init#broker.rack=#init# + log4j.properties: |- + # Unspecified loggers and loggers with additivity=true output to server.log and stdout + # Note that INFO only applies to unspecified loggers, the log level of the child logger is used otherwise + log4j.rootLogger=INFO, stdout + log4j.appender.stdout=org.apache.log4j.ConsoleAppender + log4j.appender.stdout.layout=org.apache.log4j.PatternLayout + log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n + log4j.appender.kafkaAppender=org.apache.log4j.DailyRollingFileAppender + log4j.appender.kafkaAppender.DatePattern='.'yyyy-MM-dd-HH + log4j.appender.kafkaAppender.File=${kafka.logs.dir}/server.log + log4j.appender.kafkaAppender.layout=org.apache.log4j.PatternLayout + log4j.appender.kafkaAppender.layout.ConversionPattern=[%d] %p %m (%c)%n + log4j.appender.stateChangeAppender=org.apache.log4j.DailyRollingFileAppender + log4j.appender.stateChangeAppender.DatePattern='.'yyyy-MM-dd-HH + log4j.appender.stateChangeAppender.File=${kafka.logs.dir}/state-change.log + log4j.appender.stateChangeAppender.layout=org.apache.log4j.PatternLayout + log4j.appender.stateChangeAppender.layout.ConversionPattern=[%d] %p %m (%c)%n + log4j.appender.requestAppender=org.apache.log4j.DailyRollingFileAppender + log4j.appender.requestAppender.DatePattern='.'yyyy-MM-dd-HH + log4j.appender.requestAppender.File=${kafka.logs.dir}/kafka-request.log + log4j.appender.requestAppender.layout=org.apache.log4j.PatternLayout + log4j.appender.requestAppender.layout.ConversionPattern=[%d] %p %m (%c)%n + log4j.appender.cleanerAppender=org.apache.log4j.DailyRollingFileAppender + log4j.appender.cleanerAppender.DatePattern='.'yyyy-MM-dd-HH + log4j.appender.cleanerAppender.File=${kafka.logs.dir}/log-cleaner.log + log4j.appender.cleanerAppender.layout=org.apache.log4j.PatternLayout + log4j.appender.cleanerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n + log4j.appender.controllerAppender=org.apache.log4j.DailyRollingFileAppender + log4j.appender.controllerAppender.DatePattern='.'yyyy-MM-dd-HH + log4j.appender.controllerAppender.File=${kafka.logs.dir}/controller.log + log4j.appender.controllerAppender.layout=org.apache.log4j.PatternLayout + log4j.appender.controllerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n + log4j.appender.authorizerAppender=org.apache.log4j.DailyRollingFileAppender + log4j.appender.authorizerAppender.DatePattern='.'yyyy-MM-dd-HH + log4j.appender.authorizerAppender.File=${kafka.logs.dir}/kafka-authorizer.log + log4j.appender.authorizerAppender.layout=org.apache.log4j.PatternLayout + log4j.appender.authorizerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n + # Change the two lines below to adjust ZK client logging + log4j.logger.org.I0Itec.zkclient.ZkClient=INFO + log4j.logger.org.apache.zookeeper=INFO + # Change the two lines below to adjust the general broker logging level (output to server.log and stdout) + log4j.logger.kafka=INFO + log4j.logger.org.apache.kafka=INFO + # Change to DEBUG or TRACE to enable request logging + log4j.logger.kafka.request.logger=WARN, requestAppender + log4j.additivity.kafka.request.logger=false + # Uncomment the lines below and change log4j.logger.kafka.network.RequestChannel$ to TRACE for additional output + # related to the handling of requests + #log4j.logger.kafka.network.Processor=TRACE, requestAppender + #log4j.logger.kafka.server.KafkaApis=TRACE, requestAppender + #log4j.additivity.kafka.server.KafkaApis=false + log4j.logger.kafka.network.RequestChannel$=WARN, requestAppender + log4j.additivity.kafka.network.RequestChannel$=false + log4j.logger.kafka.controller=TRACE, controllerAppender + log4j.additivity.kafka.controller=false + log4j.logger.kafka.log.LogCleaner=INFO, cleanerAppender + log4j.additivity.kafka.log.LogCleaner=false + log4j.logger.state.change.logger=TRACE, stateChangeAppender + log4j.additivity.state.change.logger=false + # Change to DEBUG to enable audit log for the authorizer + log4j.logger.kafka.authorizer.logger=WARN, authorizerAppender + log4j.additivity.kafka.authorizer.logger=false diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/kafka/Chart.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/kafka/Chart.yaml new file mode 100644 index 0000000..9565567 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/kafka/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for Kubernetes +name: kafka +version: 0.1.0 diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/kafka/templates/2.dns.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/kafka/templates/2.dns.yaml new file mode 100644 index 0000000..8ffb3f8 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/kafka/templates/2.dns.yaml @@ -0,0 +1,14 @@ +# A headless service to create DNS records +--- +apiVersion: v1 +kind: Service +metadata: + name: kafka-headless + namespace: imxc +spec: + ports: + - port: 9092 + clusterIP: None + selector: + app: kafka +--- diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/kafka/templates/3.bootstrap-service.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/kafka/templates/3.bootstrap-service.yaml new file mode 100644 index 0000000..1cd7406 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/kafka/templates/3.bootstrap-service.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: Service +metadata: +# name: bootstrap + name: kafka + namespace: imxc +spec: + ports: + - port: 9092 + selector: + app: kafka diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/kafka/templates/4.persistent-volume.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/kafka/templates/4.persistent-volume.yaml new file mode 100644 index 0000000..6f67ab4 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/kafka/templates/4.persistent-volume.yaml @@ -0,0 +1,76 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: data-kafka-cluster-1 + labels: + type: local + app: kafka +spec: + capacity: + storage: 30Gi + accessModes: + - ReadWriteOnce + hostPath: + path: {{ .Values.global.IMXC_KAFKA_PV_PATH1 }} + persistentVolumeReclaimPolicy: Retain + storageClassName: kafka-broker + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .Values.global.affinity_key }} + operator: In + values: + - {{ .Values.global.affinity_value1 }} + +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: data-kafka-cluster-2 + labels: + type: local + app: kafka +spec: + capacity: + storage: 30Gi + accessModes: + - ReadWriteOnce + hostPath: + path: {{ .Values.global.IMXC_KAFKA_PV_PATH2 }} + persistentVolumeReclaimPolicy: Retain + storageClassName: kafka-broker + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .Values.global.affinity_key }} + operator: In + values: + - {{ .Values.global.affinity_value2 }} +--- +# On-prem/워커노드 두개/브로커 두개 환경에서 발생할 수 있는 affinity 충돌때문에 주석처리 +#apiVersion: v1 +#kind: PersistentVolume +#metadata: +# name: data-kafka-cluster-3 +# labels: +# type: local +# app: kafka +#spec: +# capacity: +# storage: 30Gi +# accessModes: +# - ReadWriteOnce +# hostPath: +# path: {{ .Values.global.IMXC_KAFKA_PV_PATH3 }} +# persistentVolumeReclaimPolicy: Retain +# storageClassName: kafka-broker +# nodeAffinity: +# required: +# nodeSelectorTerms: +# - matchExpressions: +# - key: kubernetes.io/hostname +# operator: In +# values: + # - {{ .Values.global.IMXC_KAFKA_HOST3 }} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/kafka/templates/5.kafka.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/kafka/templates/5.kafka.yaml new file mode 100644 index 0000000..1982584 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/kafka/templates/5.kafka.yaml @@ -0,0 +1,132 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: kafka + namespace: imxc +spec: + selector: + matchLabels: + app: kafka + serviceName: "kafka-headless" + replicas: 2 + updateStrategy: + type: RollingUpdate + podManagementPolicy: Parallel + template: + metadata: + labels: + app: kafka + annotations: + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: "app" + operator: In + values: + - kafka + topologyKey: "kubernetes.io/hostname" + podAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: "app" + operator: In + values: + - zookeeper + topologyKey: "kubernetes.io/hostname" + terminationGracePeriodSeconds: 30 + initContainers: + - name: init-config + image: {{ .Values.global.IMXC_IN_REGISTRY }}/kafka-initutils:{{ .Values.global.KAFKA_INITUTILS_VERSION }} + env: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + command: ['/bin/bash', '/etc/kafka-configmap/init.sh'] + volumeMounts: + - name: configmap + mountPath: /etc/kafka-configmap + - name: config + mountPath: /etc/kafka + - name: extensions + mountPath: /opt/kafka/libs/extensions + containers: + - name: broker + image: {{ .Values.global.IMXC_IN_REGISTRY }}/kafka:{{ .Values.global.KAFKA_VERSION }} + resources: + requests: + cpu: 100m + memory: 6000Mi + limits: + # This limit was intentionally set low as a reminder that + # the entire Yolean/kubernetes-kafka is meant to be tweaked + # before you run production workloads + cpu: 500m + memory: 10000Mi + env: + - name: CLASSPATH + value: /opt/kafka/libs/extensions/* + - name: KAFKA_LOG4J_OPTS + value: -Dlog4j.configuration=file:/etc/kafka/log4j.properties + - name: JMX_PORT + value: "5555" + - name: KAFKA_OPTS + value: -javaagent:/opt/kafka/jmx_prometheus_javaagent-0.15.0.jar=9010:/opt/kafka/config.yaml + ports: + - name: inside + containerPort: 9092 + - name: outside + containerPort: 9094 + - name: global + containerPort: 9095 + - name: jmx + containerPort: 9010 + command: + - ./bin/kafka-server-start.sh + - /etc/kafka/server.properties + lifecycle: + preStop: + exec: + command: ["sh", "-ce", "rm -rf /var/lib/kafka/data/*;kill -s TERM 1; while $(kill -0 1 2>/dev/null); do sleep 1; done"] +# readinessProbe: +# tcpSocket: +# port: 9092 +# timeoutSeconds: 1 + volumeMounts: + - name: config + mountPath: /etc/kafka + - name: data + mountPath: /var/lib/kafka/data + - name: extensions + mountPath: /opt/kafka/libs/extensions + volumes: + - name: configmap + configMap: + name: broker-config + - name: config + emptyDir: {} + - name: extensions + emptyDir: {} + volumeClaimTemplates: + - metadata: + name: data + spec: + accessModes: [ "ReadWriteOnce" ] + storageClassName: kafka-broker + resources: + requests: + storage: 30Gi diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/kafka/templates/6.outside.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/kafka/templates/6.outside.yaml new file mode 100644 index 0000000..c2d8170 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/kafka/templates/6.outside.yaml @@ -0,0 +1,89 @@ +kind: Service +apiVersion: v1 +metadata: + name: kafka-outside-0 + namespace: imxc +spec: + selector: + app: kafka + kafka-broker-id: "0" + ports: + - protocol: TCP + targetPort: 9094 + port: 32400 + type: ClusterIP +--- +kind: Service +apiVersion: v1 +metadata: + name: kafka-outside-1 + namespace: imxc +spec: + selector: + app: kafka + kafka-broker-id: "1" + ports: + - protocol: TCP + targetPort: 9094 + port: 32401 + type: ClusterIP +--- +kind: Service +apiVersion: v1 +metadata: + name: kafka-global-0 + namespace: imxc +spec: + selector: + app: kafka + kafka-broker-id: "0" + ports: + - protocol: TCP + targetPort: 9095 + port: 32500 + type: ClusterIP +--- +kind: Service +apiVersion: v1 +metadata: + name: kafka-global-1 + namespace: imxc +spec: + selector: + app: kafka + kafka-broker-id: "1" + ports: + - protocol: TCP + targetPort: 9095 + port: 32501 + type: ClusterIP +--- +apiVersion: v1 +kind: Service +metadata: + name: kafka-broker + namespace: imxc +spec: + type: ClusterIP + ports: + - port: 9094 + name: kafka + protocol: TCP + targetPort: 9094 + selector: + app: kafka +--- +apiVersion: v1 +kind: Service +metadata: + name: kafka-broker-global + namespace: imxc +spec: + type: ClusterIP + ports: + - port: 9095 + name: kafka + protocol: TCP + targetPort: 9095 + selector: + app: kafka diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/kafka/values.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/kafka/values.yaml new file mode 100644 index 0000000..cb0e677 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/kafka/values.yaml @@ -0,0 +1,68 @@ +# Default values for kafka. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: 10.10.31.243:5000/cmoa3/nginx + tag: stable + pullPolicy: IfNotPresent + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 80 + +ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: [] + + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +nodeSelector: {} + +tolerations: [] + +affinity: {} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/postgres/.helmignore b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/postgres/.helmignore new file mode 100644 index 0000000..50af031 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/postgres/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/postgres/Chart.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/postgres/Chart.yaml new file mode 100644 index 0000000..d602e29 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/postgres/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for Kubernetes +name: postgres +version: 0.1.0 diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/postgres/templates/1.postgres-configmap.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/postgres/templates/1.postgres-configmap.yaml new file mode 100644 index 0000000..95c8bda --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/postgres/templates/1.postgres-configmap.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: postgres-config + namespace: imxc + labels: + app: postgres +data: + POSTGRES_DB: postgresdb + POSTGRES_USER: admin + POSTGRES_PASSWORD: eorbahrhkswp diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/postgres/templates/2.postgres-storage.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/postgres/templates/2.postgres-storage.yaml new file mode 100644 index 0000000..dfbd714 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/postgres/templates/2.postgres-storage.yaml @@ -0,0 +1,38 @@ +kind: PersistentVolume +apiVersion: v1 +metadata: + name: postgres-pv-volume + labels: + type: local + app: postgres +spec: + storageClassName: manual + capacity: + storage: 5Gi + accessModes: + - ReadWriteMany + hostPath: + path: "{{ .Values.global.IMXC_POSTGRES_PV_PATH }}" + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .Values.global.affinity_key }} + operator: In + values: + - {{ .Values.global.affinity_value1 }} +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: postgres-pv-claim + namespace: imxc + labels: + app: postgres +spec: + storageClassName: manual + accessModes: + - ReadWriteMany + resources: + requests: + storage: 5Gi diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/postgres/templates/3.postgres-service.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/postgres/templates/3.postgres-service.yaml new file mode 100644 index 0000000..31e90a2 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/postgres/templates/3.postgres-service.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Service +metadata: + name: postgres + namespace: imxc + labels: + app: postgres +spec: + type: ClusterIP + ports: + - port: 5432 + # nodePort: 5432 + selector: + app: postgres diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/postgres/templates/4.postgres-deployment.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/postgres/templates/4.postgres-deployment.yaml new file mode 100644 index 0000000..14993e8 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/postgres/templates/4.postgres-deployment.yaml @@ -0,0 +1,45 @@ +{{- if semverCompare ">=1.16-0" .Capabilities.KubeVersion.GitVersion }} +apiVersion: apps/v1 +{{- else }} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Deployment +metadata: + name: postgres + namespace: imxc +spec: +{{- if semverCompare ">=1.16-0" .Capabilities.KubeVersion.GitVersion }} + selector: + matchLabels: + app: postgres +{{- end }} + replicas: 1 + template: + metadata: + labels: + app: postgres + spec: + containers: + - name: postgres + image: {{ .Values.global.IMXC_IN_REGISTRY }}/postgres:{{ .Values.global.POSTGRES_VERSION }} + resources: + requests: + cpu: 100m + memory: 2000Mi + limits: + cpu: 300m + memory: 2000Mi + imagePullPolicy: "IfNotPresent" + ports: + - containerPort: 5432 + args: ["-c","max_connections=1000","-c","shared_buffers=512MB","-c","deadlock_timeout=5s","-c","statement_timeout=15s","-c","idle_in_transaction_session_timeout=60s"] + envFrom: + - configMapRef: + name: postgres-config + volumeMounts: + - mountPath: /var/lib/postgresql/data + name: postgredb + volumes: + - name: postgredb + persistentVolumeClaim: + claimName: postgres-pv-claim diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/postgres/values.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/postgres/values.yaml new file mode 100644 index 0000000..9972ab8 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/postgres/values.yaml @@ -0,0 +1,68 @@ +# Default values for postgres. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: 10.10.31.243:5000/cmoa3/nginx + tag: stable + pullPolicy: IfNotPresent + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 80 + +ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: [] + + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +nodeSelector: {} + +tolerations: [] + +affinity: {} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/.helmignore b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/.helmignore new file mode 100644 index 0000000..f0c1319 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/Chart.lock b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/Chart.lock new file mode 100644 index 0000000..21ff14f --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/Chart.lock @@ -0,0 +1,6 @@ +dependencies: +- name: common + repository: https://charts.bitnami.com/bitnami + version: 1.8.0 +digest: sha256:3e342a25057f87853e52d83e1d14e6d8727c15fd85aaae22e7594489cc129f15 +generated: "2021-08-09T15:49:41.56962208Z" diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/Chart.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/Chart.yaml new file mode 100644 index 0000000..3b08f9c --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/Chart.yaml @@ -0,0 +1,26 @@ +annotations: + category: Infrastructure +apiVersion: v2 +appVersion: 3.8.22 +dependencies: +- name: common + repository: https://charts.bitnami.com/bitnami + tags: + - bitnami-common + version: 1.x.x +description: Open source message broker software that implements the Advanced Message + Queuing Protocol (AMQP) +home: https://github.com/bitnami/charts/tree/master/bitnami/rabbitmq +icon: https://bitnami.com/assets/stacks/rabbitmq/img/rabbitmq-stack-220x234.png +keywords: +- rabbitmq +- message queue +- AMQP +maintainers: +- email: containers@bitnami.com + name: Bitnami +name: rabbitmq +sources: +- https://github.com/bitnami/bitnami-docker-rabbitmq +- https://www.rabbitmq.com +version: 8.20.5 diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/README.md b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/README.md new file mode 100644 index 0000000..9b26b09 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/README.md @@ -0,0 +1,566 @@ +# RabbitMQ + +[RabbitMQ](https://www.rabbitmq.com/) is an open source message broker software that implements the Advanced Message Queuing Protocol (AMQP). + +## TL;DR + +```bash +$ helm repo add bitnami https://charts.bitnami.com/bitnami +$ helm install my-release bitnami/rabbitmq +``` + +## Introduction + +This chart bootstraps a [RabbitMQ](https://github.com/bitnami/bitnami-docker-rabbitmq) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This chart has been tested to work with NGINX Ingress, cert-manager, fluentd and Prometheus on top of the [BKPR](https://kubeprod.io/). + +## Prerequisites + +- Kubernetes 1.12+ +- Helm 3.1.0 +- PV provisioner support in the underlying infrastructure + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```bash +$ helm install my-release bitnami/rabbitmq +``` + +The command deploys RabbitMQ on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```bash +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Parameters + +### Global parameters + +| Name | Description | Value | +| ------------------------- | ----------------------------------------------- | ----- | +| `global.imageRegistry` | Global Docker image registry | `""` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` | +| `global.storageClass` | Global StorageClass for Persistent Volume(s) | `""` | + + +### RabitMQ Image parameters + +| Name | Description | Value | +| ------------------- | -------------------------------------------------------------- | ---------------------- | +| `image.registry` | RabbitMQ image registry | `docker.io` | +| `image.repository` | RabbitMQ image repository | `bitnami/rabbitmq` | +| `image.tag` | RabbitMQ image tag (immutable tags are recommended) | `3.8.21-debian-10-r13` | +| `image.pullPolicy` | RabbitMQ image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | +| `image.debug` | Set to true if you would like to see extra information on logs | `false` | + + +### Common parameters + +| Name | Description | Value | +| ---------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------- | +| `nameOverride` | String to partially override rabbitmq.fullname template (will maintain the release name) | `""` | +| `fullnameOverride` | String to fully override rabbitmq.fullname template | `""` | +| `kubeVersion` | Force target Kubernetes version (using Helm capabilities if not set) | `""` | +| `clusterDomain` | Kubernetes Cluster Domain | `cluster.local` | +| `extraDeploy` | Array of extra objects to deploy with the release | `[]` | +| `diagnosticMode.enabled` | Enable diagnostic mode (all probes will be disabled and the command will be overridden) | `false` | +| `diagnosticMode.command` | Command to override all containers in the deployment | `[]` | +| `diagnosticMode.args` | Args to override all containers in the deployment | `[]` | +| `hostAliases` | Deployment pod host aliases | `[]` | +| `commonAnnotations` | Annotations to add to all deployed objects | `{}` | +| `auth.username` | RabbitMQ application username | `user` | +| `auth.password` | RabbitMQ application password | `""` | +| `auth.existingPasswordSecret` | Existing secret with RabbitMQ credentials (must contain a value for `rabbitmq-password` key) | `""` | +| `auth.erlangCookie` | Erlang cookie to determine whether different nodes are allowed to communicate with each other | `""` | +| `auth.existingErlangSecret` | Existing secret with RabbitMQ Erlang cookie (must contain a value for `rabbitmq-erlang-cookie` key) | `""` | +| `auth.tls.enabled` | Enable TLS support on RabbitMQ | `false` | +| `auth.tls.autoGenerated` | Generate automatically self-signed TLS certificates | `false` | +| `auth.tls.failIfNoPeerCert` | When set to true, TLS connection will be rejected if client fails to provide a certificate | `true` | +| `auth.tls.sslOptionsVerify` | Should [peer verification](https://www.rabbitmq.com/ssl.html#peer-verification) be enabled? | `verify_peer` | +| `auth.tls.caCertificate` | Certificate Authority (CA) bundle content | `""` | +| `auth.tls.serverCertificate` | Server certificate content | `""` | +| `auth.tls.serverKey` | Server private key content | `""` | +| `auth.tls.existingSecret` | Existing secret with certificate content to RabbitMQ credentials | `""` | +| `auth.tls.existingSecretFullChain` | Whether or not the existing secret contains the full chain in the certificate (`tls.crt`). Will be used in place of `ca.cert` if `true`. | `false` | +| `logs` | Path of the RabbitMQ server's Erlang log file. Value for the `RABBITMQ_LOGS` environment variable | `-` | +| `ulimitNofiles` | RabbitMQ Max File Descriptors | `65536` | +| `maxAvailableSchedulers` | RabbitMQ maximum available scheduler threads | `""` | +| `onlineSchedulers` | RabbitMQ online scheduler threads | `""` | +| `memoryHighWatermark.enabled` | Enable configuring Memory high watermark on RabbitMQ | `false` | +| `memoryHighWatermark.type` | Memory high watermark type. Either `absolute` or `relative` | `relative` | +| `memoryHighWatermark.value` | Memory high watermark value | `0.4` | +| `plugins` | List of default plugins to enable (should only be altered to remove defaults; for additional plugins use `extraPlugins`) | `rabbitmq_management rabbitmq_peer_discovery_k8s` | +| `communityPlugins` | List of Community plugins (URLs) to be downloaded during container initialization | `""` | +| `extraPlugins` | Extra plugins to enable (single string containing a space-separated list) | `rabbitmq_auth_backend_ldap` | +| `clustering.enabled` | Enable RabbitMQ clustering | `true` | +| `clustering.addressType` | Switch clustering mode. Either `ip` or `hostname` | `hostname` | +| `clustering.rebalance` | Rebalance master for queues in cluster when new replica is created | `false` | +| `clustering.forceBoot` | Force boot of an unexpectedly shut down cluster (in an unexpected order). | `false` | +| `loadDefinition.enabled` | Enable loading a RabbitMQ definitions file to configure RabbitMQ | `false` | +| `loadDefinition.existingSecret` | Existing secret with the load definitions file | `""` | +| `command` | Override default container command (useful when using custom images) | `[]` | +| `args` | Override default container args (useful when using custom images) | `[]` | +| `terminationGracePeriodSeconds` | Default duration in seconds k8s waits for container to exit before sending kill signal. | `120` | +| `extraEnvVars` | Extra environment variables to add to RabbitMQ pods | `[]` | +| `extraEnvVarsCM` | Name of existing ConfigMap containing extra environment variables | `""` | +| `extraEnvVarsSecret` | Name of existing Secret containing extra environment variables (in case of sensitive data) | `""` | +| `extraContainerPorts` | Extra ports to be included in container spec, primarily informational | `[]` | +| `configuration` | RabbitMQ Configuration file content: required cluster configuration | `""` | +| `extraConfiguration` | Configuration file content: extra configuration to be appended to RabbitMQ configuration | `""` | +| `advancedConfiguration` | Configuration file content: advanced configuration | `""` | +| `ldap.enabled` | Enable LDAP support | `false` | +| `ldap.servers` | List of LDAP servers hostnames | `[]` | +| `ldap.port` | LDAP servers port | `389` | +| `ldap.user_dn_pattern` | Pattern used to translate the provided username into a value to be used for the LDAP bind | `cn=${username},dc=example,dc=org` | +| `ldap.tls.enabled` | If you enable TLS/SSL you can set advanced options using the `advancedConfiguration` parameter | `false` | +| `extraVolumeMounts` | Optionally specify extra list of additional volumeMounts | `[]` | +| `extraVolumes` | Optionally specify extra list of additional volumes . | `[]` | +| `extraSecrets` | Optionally specify extra secrets to be created by the chart. | `{}` | +| `extraSecretsPrependReleaseName` | Set this flag to true if extraSecrets should be created with prepended. | `false` | + + +### Statefulset parameters + +| Name | Description | Value | +| ------------------------------------ | ------------------------------------------------------------------------------------------------------------------------ | --------------- | +| `replicaCount` | Number of RabbitMQ replicas to deploy | `1` | +| `schedulerName` | Use an alternate scheduler, e.g. "stork". | `""` | +| `podManagementPolicy` | Pod management policy | `OrderedReady` | +| `podLabels` | RabbitMQ Pod labels. Evaluated as a template | `{}` | +| `podAnnotations` | RabbitMQ Pod annotations. Evaluated as a template | `{}` | +| `updateStrategyType` | Update strategy type for RabbitMQ statefulset | `RollingUpdate` | +| `statefulsetLabels` | RabbitMQ statefulset labels. Evaluated as a template | `{}` | +| `priorityClassName` | Name of the priority class to be used by RabbitMQ pods, priority class needs to be created beforehand | `""` | +| `podAffinityPreset` | Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `nodeAffinityPreset.type` | Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `nodeAffinityPreset.key` | Node label key to match Ignored if `affinity` is set. | `""` | +| `nodeAffinityPreset.values` | Node label values to match. Ignored if `affinity` is set. | `[]` | +| `affinity` | Affinity for pod assignment. Evaluated as a template | `{}` | +| `nodeSelector` | Node labels for pod assignment. Evaluated as a template | `{}` | +| `tolerations` | Tolerations for pod assignment. Evaluated as a template | `[]` | +| `topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `{}` | +| `podSecurityContext.enabled` | Enable RabbitMQ pods' Security Context | `true` | +| `podSecurityContext.fsGroup` | Group ID for the filesystem used by the containers | `1001` | +| `podSecurityContext.runAsUser` | User ID for the service user running the pod | `1001` | +| `containerSecurityContext` | RabbitMQ containers' Security Context | `{}` | +| `resources.limits` | The resources limits for RabbitMQ containers | `{}` | +| `resources.requests` | The requested resources for RabbitMQ containers | `{}` | +| `livenessProbe.enabled` | Enable livenessProbe | `true` | +| `livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `120` | +| `livenessProbe.periodSeconds` | Period seconds for livenessProbe | `30` | +| `livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `20` | +| `livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `6` | +| `livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `readinessProbe.enabled` | Enable readinessProbe | `true` | +| `readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `10` | +| `readinessProbe.periodSeconds` | Period seconds for readinessProbe | `30` | +| `readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `20` | +| `readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `3` | +| `readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `customLivenessProbe` | Override default liveness probe | `{}` | +| `customReadinessProbe` | Override default readiness probe | `{}` | +| `customStartupProbe` | Define a custom startup probe | `{}` | +| `initContainers` | Add init containers to the RabbitMQ pod | `[]` | +| `sidecars` | Add sidecar containers to the RabbitMQ pod | `[]` | +| `pdb.create` | Enable/disable a Pod Disruption Budget creation | `false` | +| `pdb.minAvailable` | Minimum number/percentage of pods that should remain scheduled | `1` | +| `pdb.maxUnavailable` | Maximum number/percentage of pods that may be made unavailable | `""` | + + +### RBAC parameters + +| Name | Description | Value | +| ----------------------- | --------------------------------------------------- | ------ | +| `serviceAccount.create` | Enable creation of ServiceAccount for RabbitMQ pods | `true` | +| `serviceAccount.name` | Name of the created serviceAccount | `""` | +| `rbac.create` | Whether RBAC rules should be created | `true` | + + +### Persistence parameters + +| Name | Description | Value | +| --------------------------- | ----------------------------------------------- | --------------- | +| `persistence.enabled` | Enable RabbitMQ data persistence using PVC | `true` | +| `persistence.storageClass` | PVC Storage Class for RabbitMQ data volume | `""` | +| `persistence.selector` | Selector to match an existing Persistent Volume | `{}` | +| `persistence.accessMode` | PVC Access Mode for RabbitMQ data volume | `ReadWriteOnce` | +| `persistence.existingClaim` | Provide an existing PersistentVolumeClaims | `""` | +| `persistence.size` | PVC Storage Request for RabbitMQ data volume | `8Gi` | +| `persistence.volumes` | Additional volumes without creating PVC | `[]` | + + +### Exposure parameters + +| Name | Description | Value | +| ---------------------------------- | ----------------------------------------------------------------------------------------------------------------------- | ------------------------ | +| `service.type` | Kubernetes Service type | `ClusterIP` | +| `service.portEnabled` | Amqp port. Cannot be disabled when `auth.tls.enabled` is `false`. Listener can be disabled with `listeners.tcp = none`. | `true` | +| `service.port` | Amqp port | `5672` | +| `service.portName` | Amqp service port name | `amqp` | +| `service.tlsPort` | Amqp TLS port | `5671` | +| `service.tlsPortName` | Amqp TLS service port name | `amqp-ssl` | +| `service.nodePort` | Node port override for `amqp` port, if serviceType is `NodePort` or `LoadBalancer` | `""` | +| `service.tlsNodePort` | Node port override for `amqp-ssl` port, if serviceType is `NodePort` or `LoadBalancer` | `""` | +| `service.distPort` | Erlang distribution server port | `25672` | +| `service.distPortName` | Erlang distribution service port name | `dist` | +| `service.distNodePort` | Node port override for `dist` port, if serviceType is `NodePort` | `""` | +| `service.managerPortEnabled` | RabbitMQ Manager port | `true` | +| `service.managerPort` | RabbitMQ Manager port | `15672` | +| `service.managerPortName` | RabbitMQ Manager service port name | `http-stats` | +| `service.managerNodePort` | Node port override for `http-stats` port, if serviceType `NodePort` | `""` | +| `service.metricsPort` | RabbitMQ Prometheues metrics port | `9419` | +| `service.metricsPortName` | RabbitMQ Prometheues metrics service port name | `metrics` | +| `service.metricsNodePort` | Node port override for `metrics` port, if serviceType is `NodePort` | `""` | +| `service.epmdNodePort` | Node port override for `epmd` port, if serviceType is `NodePort` | `""` | +| `service.epmdPortName` | EPMD Discovery service port name | `epmd` | +| `service.extraPorts` | Extra ports to expose in the service | `[]` | +| `service.loadBalancerSourceRanges` | Address(es) that are allowed when service is `LoadBalancer` | `[]` | +| `service.externalIPs` | Set the ExternalIPs | `[]` | +| `service.externalTrafficPolicy` | Enable client source IP preservation | `Cluster` | +| `service.loadBalancerIP` | Set the LoadBalancerIP | `""` | +| `service.labels` | Service labels. Evaluated as a template | `{}` | +| `service.annotations` | Service annotations. Evaluated as a template | `{}` | +| `service.annotationsHeadless` | Headless Service annotations. Evaluated as a template | `{}` | +| `ingress.enabled` | Enable ingress resource for Management console | `false` | +| `ingress.path` | Path for the default host. You may need to set this to '/*' in order to use this with ALB ingress controllers. | `/` | +| `ingress.pathType` | Ingress path type | `ImplementationSpecific` | +| `ingress.hostname` | Default host for the ingress resource | `rabbitmq.local` | +| `ingress.annotations` | Ingress annotations | `{}` | +| `ingress.tls` | Enable TLS configuration for the hostname defined at `ingress.hostname` parameter | `false` | +| `ingress.certManager` | Set this to true in order to add the corresponding annotations for cert-manager | `false` | +| `ingress.selfSigned` | Set this to true in order to create a TLS secret for this ingress record | `false` | +| `ingress.extraHosts` | The list of additional hostnames to be covered with this ingress record. | `[]` | +| `ingress.extraTls` | The tls configuration for additional hostnames to be covered with this ingress record. | `[]` | +| `ingress.secrets` | Custom TLS certificates as secrets | `[]` | +| `ingress.ingressClassName` | IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+) | `""` | +| `networkPolicy.enabled` | Enable creation of NetworkPolicy resources | `false` | +| `networkPolicy.allowExternal` | Don't require client label for connections | `true` | +| `networkPolicy.additionalRules` | Additional NetworkPolicy Ingress "from" rules to set. Note that all rules are OR-ed. | `[]` | + + +### Metrics Parameters + +| Name | Description | Value | +| ----------------------------------------- | -------------------------------------------------------------------------------------- | --------------------- | +| `metrics.enabled` | Enable exposing RabbitMQ metrics to be gathered by Prometheus | `false` | +| `metrics.plugins` | Plugins to enable Prometheus metrics in RabbitMQ | `rabbitmq_prometheus` | +| `metrics.podAnnotations` | Annotations for enabling prometheus to access the metrics endpoint | `{}` | +| `metrics.serviceMonitor.enabled` | Create ServiceMonitor Resource for scraping metrics using PrometheusOperator | `false` | +| `metrics.serviceMonitor.namespace` | Specify the namespace in which the serviceMonitor resource will be created | `""` | +| `metrics.serviceMonitor.interval` | Specify the interval at which metrics should be scraped | `30s` | +| `metrics.serviceMonitor.scrapeTimeout` | Specify the timeout after which the scrape is ended | `""` | +| `metrics.serviceMonitor.relabellings` | Specify Metric Relabellings to add to the scrape endpoint | `[]` | +| `metrics.serviceMonitor.honorLabels` | honorLabels chooses the metric's labels on collisions with target labels | `false` | +| `metrics.serviceMonitor.additionalLabels` | Used to pass Labels that are required by the installed Prometheus Operator | `{}` | +| `metrics.serviceMonitor.targetLabels` | Used to keep given service's labels in target | `{}` | +| `metrics.serviceMonitor.podTargetLabels` | Used to keep given pod's labels in target | `{}` | +| `metrics.serviceMonitor.path` | Define the path used by ServiceMonitor to scrap metrics | `""` | +| `metrics.prometheusRule.enabled` | Set this to true to create prometheusRules for Prometheus operator | `false` | +| `metrics.prometheusRule.additionalLabels` | Additional labels that can be used so prometheusRules will be discovered by Prometheus | `{}` | +| `metrics.prometheusRule.namespace` | namespace where prometheusRules resource should be created | `""` | +| `metrics.prometheusRule.rules` | List of rules, used as template by Helm. | `[]` | + + +### Init Container Parameters + +| Name | Description | Value | +| -------------------------------------- | -------------------------------------------------------------------------------------------------------------------- | ----------------------- | +| `volumePermissions.enabled` | Enable init container that changes the owner and group of the persistent volume(s) mountpoint to `runAsUser:fsGroup` | `false` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | +| `volumePermissions.image.repository` | Init container volume-permissions image repository | `bitnami/bitnami-shell` | +| `volumePermissions.image.tag` | Init container volume-permissions image tag | `10-debian-10-r172` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `Always` | +| `volumePermissions.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | +| `volumePermissions.resources.limits` | Init container volume-permissions resource limits | `{}` | +| `volumePermissions.resources.requests` | Init container volume-permissions resource requests | `{}` | + + +The above parameters map to the env variables defined in [bitnami/rabbitmq](http://github.com/bitnami/bitnami-docker-rabbitmq). For more information please refer to the [bitnami/rabbitmq](http://github.com/bitnami/bitnami-docker-rabbitmq) image documentation. + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```bash +$ helm install my-release \ + --set auth.username=admin,auth.password=secretpassword,auth.erlangCookie=secretcookie \ + bitnami/rabbitmq +``` + +The above command sets the RabbitMQ admin username and password to `admin` and `secretpassword` respectively. Additionally the secure erlang cookie is set to `secretcookie`. + +> NOTE: Once this chart is deployed, it is not possible to change the application's access credentials, such as usernames or passwords, using Helm. To change these application credentials after deployment, delete any persistent volumes (PVs) used by the chart and re-deploy it, or use the application's built-in administrative tools if available. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```bash +$ helm install my-release -f values.yaml bitnami/rabbitmq +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +## Configuration and installation details + +### [Rolling vs Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/) + +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. + +Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. + +### Set pod affinity + +This chart allows you to set your custom affinity using the `affinity` parameter. Find more information about Pod's affinity in the [kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity). + +As an alternative, you can use of the preset configurations for pod affinity, pod anti-affinity, and node affinity available at the [bitnami/common](https://github.com/bitnami/charts/tree/master/bitnami/common#affinities) chart. To do so, set the `podAffinityPreset`, `podAntiAffinityPreset`, or `nodeAffinityPreset` parameters. + +### Scale horizontally + +To horizontally scale this chart once it has been deployed, two options are available: + +- Use the `kubectl scale` command. +- Upgrade the chart modifying the `replicaCount` parameter. + +> NOTE: It is mandatory to specify the password and Erlang cookie that was set the first time the chart was installed when upgrading the chart. + +When scaling down the solution, unnecessary RabbitMQ nodes are automatically stopped, but they are not removed from the cluster. You need to manually remove them by running the `rabbitmqctl forget_cluster_node` command. + +Refer to the chart documentation for [more information on scaling the Rabbit cluster horizontally](https://docs.bitnami.com/kubernetes/infrastructure/rabbitmq/administration/scale-deployment/). + +### Enable TLS support + +To enable TLS support, first generate the certificates as described in the [RabbitMQ documentation for SSL certificate generation](https://www.rabbitmq.com/ssl.html#automated-certificate-generation). + +Once the certificates are generated, you have two alternatives: + +* Create a secret with the certificates and associate the secret when deploying the chart +* Include the certificates in the *values.yaml* file when deploying the chart + +Set the *auth.tls.failIfNoPeerCert* parameter to *false* to allow a TLS connection if the client fails to provide a certificate. + +Set the *auth.tls.sslOptionsVerify* to *verify_peer* to force a node to perform peer verification. When set to *verify_none*, peer verification will be disabled and certificate exchange won't be performed. + +Refer to the chart documentation for [more information and examples of enabling TLS and using Let's Encrypt certificates](https://docs.bitnami.com/kubernetes/infrastructure/rabbitmq/administration/enable-tls/). + +### Load custom definitions + +It is possible to [load a RabbitMQ definitions file to configure RabbitMQ](http://www.rabbitmq.com/management.html#load-definitions). + +Because definitions may contain RabbitMQ credentials, [store the JSON as a Kubernetes secret](https://kubernetes.io/docs/concepts/configuration/secret/#using-secrets-as-files-from-a-pod). Within the secret's data, choose a key name that corresponds with the desired load definitions filename (i.e. `load_definition.json`) and use the JSON object as the value. + +Next, specify the `load_definitions` property as an `extraConfiguration` pointing to the load definition file path within the container (i.e. `/app/load_definition.json`) and set `loadDefinition.enable` to `true`. Any load definitions specified will be available within in the container at `/app`. + +> NOTE: Loading a definition will take precedence over any configuration done through [Helm values](#parameters). + +If needed, you can use `extraSecrets` to let the chart create the secret for you. This way, you don't need to manually create it before deploying a release. These secrets can also be templated to use supplied chart values. + +Refer to the chart documentation for [more information and configuration examples of loading custom definitions](https://docs.bitnami.com/kubernetes/infrastructure/rabbitmq/configuration/load-files/). + +### Configure LDAP support + +LDAP support can be enabled in the chart by specifying the `ldap.*` parameters while creating a release. Refer to the chart documentation for [more information and a configuration example](https://docs.bitnami.com/kubernetes/infrastructure/rabbitmq/configuration/configure-ldap/). + +### Configure memory high watermark + +It is possible to configure a memory high watermark on RabbitMQ to define [memory thresholds](https://www.rabbitmq.com/memory.html#threshold) using the `memoryHighWatermark.*` parameters. To do so, you have two alternatives: + +* Set an absolute limit of RAM to be used on each RabbitMQ node, as shown in the configuration example below: + +``` +memoryHighWatermark.enabled="true" +memoryHighWatermark.type="absolute" +memoryHighWatermark.value="512MB" +``` + +* Set a relative limit of RAM to be used on each RabbitMQ node. To enable this feature, define the memory limits at pod level too. An example configuration is shown below: + +``` +memoryHighWatermark.enabled="true" +memoryHighWatermark.type="relative" +memoryHighWatermark.value="0.4" +resources.limits.memory="2Gi" +``` + +### Add extra environment variables + +In case you want to add extra environment variables (useful for advanced operations like custom init scripts), you can use the `extraEnvVars` property. + +```yaml +extraEnvVars: + - name: LOG_LEVEL + value: error +``` + +Alternatively, you can use a ConfigMap or a Secret with the environment variables. To do so, use the `.extraEnvVarsCM` or the `extraEnvVarsSecret` properties. + +### Use plugins + +The Bitnami Docker RabbitMQ image ships a set of plugins by default. By default, this chart enables `rabbitmq_management` and `rabbitmq_peer_discovery_k8s` since they are required for RabbitMQ to work on K8s. + +To enable extra plugins, set the `extraPlugins` parameter with the list of plugins you want to enable. In addition to this, the `communityPlugins` parameter can be used to specify a list of URLs (separated by spaces) for custom plugins for RabbitMQ. + +Refer to the chart documentation for [more information on using RabbitMQ plugins](https://docs.bitnami.com/kubernetes/infrastructure/rabbitmq/configuration/use-plugins/). + +### Recover the cluster from complete shutdown + +> IMPORTANT: Some of these procedures can lead to data loss. Always make a backup beforehand. + +The RabbitMQ cluster is able to support multiple node failures but, in a situation in which all the nodes are brought down at the same time, the cluster might not be able to self-recover. + +This happens if the pod management policy of the statefulset is not `Parallel` and the last pod to be running wasn't the first pod of the statefulset. If that happens, update the pod management policy to recover a healthy state: + +```console +$ kubectl delete statefulset STATEFULSET_NAME --cascade=false +$ helm upgrade RELEASE_NAME bitnami/rabbitmq \ + --set podManagementPolicy=Parallel \ + --set replicaCount=NUMBER_OF_REPLICAS \ + --set auth.password=PASSWORD \ + --set auth.erlangCookie=ERLANG_COOKIE +``` + +For a faster resyncronization of the nodes, you can temporarily disable the readiness probe by setting `readinessProbe.enabled=false`. Bear in mind that the pods will be exposed before they are actually ready to process requests. + +If the steps above don't bring the cluster to a healthy state, it could be possible that none of the RabbitMQ nodes think they were the last node to be up during the shutdown. In those cases, you can force the boot of the nodes by specifying the `clustering.forceBoot=true` parameter (which will execute [`rabbitmqctl force_boot`](https://www.rabbitmq.com/rabbitmqctl.8.html#force_boot) in each pod): + +```console +$ helm upgrade RELEASE_NAME bitnami/rabbitmq \ + --set podManagementPolicy=Parallel \ + --set clustering.forceBoot=true \ + --set replicaCount=NUMBER_OF_REPLICAS \ + --set auth.password=PASSWORD \ + --set auth.erlangCookie=ERLANG_COOKIE +``` + +More information: [Clustering Guide: Restarting](https://www.rabbitmq.com/clustering.html#restarting). + +### Known issues + +- Changing the password through RabbitMQ's UI can make the pod fail due to the default liveness probes. If you do so, remember to make the chart aware of the new password. Updating the default secret with the password you set through RabbitMQ's UI will automatically recreate the pods. If you are using your own secret, you may have to manually recreate the pods. + +## Persistence + +The [Bitnami RabbitMQ](https://github.com/bitnami/bitnami-docker-rabbitmq) image stores the RabbitMQ data and configurations at the `/opt/bitnami/rabbitmq/var/lib/rabbitmq/` path of the container. + +The chart mounts a [Persistent Volume](http://kubernetes.io/docs/user-guide/persistent-volumes/) at this location. By default, the volume is created using dynamic volume provisioning. An existing PersistentVolumeClaim can also be defined. + +### Use existing PersistentVolumeClaims + +1. Create the PersistentVolume +1. Create the PersistentVolumeClaim +1. Install the chart + +```bash +$ helm install my-release --set persistence.existingClaim=PVC_NAME bitnami/rabbitmq +``` + +### Adjust permissions of the persistence volume mountpoint + +As the image runs as non-root by default, it is necessary to adjust the ownership of the persistent volume so that the container can write data into it. + +By default, the chart is configured to use Kubernetes Security Context to automatically change the ownership of the volume. However, this feature does not work in all Kubernetes distributions. +As an alternative, this chart supports using an `initContainer` to change the ownership of the volume before mounting it in the final destination. + +You can enable this `initContainer` by setting `volumePermissions.enabled` to `true`. + +### Configure the default user/vhost + +If you want to create default user/vhost and set the default permission. you can use `extraConfiguration`: + +```yaml +auth: + username: default-user +extraConfiguration: |- + default_vhost = default-vhost + default_permissions.configure = .* + default_permissions.read = .* + default_permissions.write = .* +``` + +## Troubleshooting + +Find more information about how to deal with common errors related to Bitnami’s Helm charts in [this troubleshooting guide](https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues). + +## Upgrading + +It's necessary to set the `auth.password` and `auth.erlangCookie` parameters when upgrading for readiness/liveness probes to work properly. When you install this chart for the first time, some notes will be displayed providing the credentials you must use under the 'Credentials' section. Please note down the password and the cookie, and run the command below to upgrade your chart: + +```bash +$ helm upgrade my-release bitnami/rabbitmq --set auth.password=[PASSWORD] --set auth.erlangCookie=[RABBITMQ_ERLANG_COOKIE] +``` + +| Note: you need to substitute the placeholders [PASSWORD] and [RABBITMQ_ERLANG_COOKIE] with the values obtained in the installation notes. + +### To 8.0.0 + +[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +[Learn more about this change and related upgrade considerations](https://docs.bitnami.com/kubernetes/infrastructure/rabbitmq/administration/upgrade-helm3/). + +### To 7.0.0 + +- Several parameters were renamed or disappeared in favor of new ones on this major version: + - `replicas` is renamed to `replicaCount`. + - `securityContext.*` is deprecated in favor of `podSecurityContext` and `containerSecurityContext`. + - Authentication parameters were reorganized under the `auth.*` parameter: + - `rabbitmq.username`, `rabbitmq.password`, and `rabbitmq.erlangCookie` are now `auth.username`, `auth.password`, and `auth.erlangCookie` respectively. + - `rabbitmq.tls.*` parameters are now under `auth.tls.*`. + - Parameters prefixed with `rabbitmq.` were renamed removing the prefix. E.g. `rabbitmq.configuration` -> renamed to `configuration`. + - `rabbitmq.rabbitmqClusterNodeName` is deprecated. + - `rabbitmq.setUlimitNofiles` is deprecated. + - `forceBoot.enabled` is renamed to `clustering.forceBoot`. + - `loadDefinition.secretName` is renamed to `loadDefinition.existingSecret`. + - `metics.port` is remamed to `service.metricsPort`. + - `service.extraContainerPorts` is renamed to `extraContainerPorts`. + - `service.nodeTlsPort` is renamed to `service.tlsNodePort`. + - `podDisruptionBudget` is deprecated in favor of `pdb.create`, `pdb.minAvailable`, and `pdb.maxUnavailable`. + - `rbacEnabled` -> deprecated in favor of `rbac.create`. + - New parameters: `serviceAccount.create`, and `serviceAccount.name`. + - New parameters: `memoryHighWatermark.enabled`, `memoryHighWatermark.type`, and `memoryHighWatermark.value`. +- Chart labels and Ingress configuration were adapted to follow the Helm charts best practices. +- Initialization logic now relies on the container. +- This version introduces `bitnami/common`, a [library chart](https://helm.sh/docs/topics/library_charts/#helm) as a dependency. More documentation about this new utility could be found [here](https://github.com/bitnami/charts/tree/master/bitnami/common#bitnami-common-library-chart). Please, make sure that you have updated the chart dependencies before executing any upgrade. + +Consequences: + +- Backwards compatibility is not guaranteed. +- Compatibility with non Bitnami images is not guaranteed anymore. + +### To 6.0.0 + +This new version updates the RabbitMQ image to a [new version based on bash instead of node.js](https://github.com/bitnami/bitnami-docker-rabbitmq#3715-r18-3715-ol-7-r19). However, since this Chart overwrites the container's command, the changes to the container shouldn't affect the Chart. To upgrade, it may be needed to enable the `fastBoot` option, as it is already the case from upgrading from 5.X to 5.Y. + +### To 5.0.0 + +This major release changes the clustering method from `ip` to `hostname`. +This change is needed to fix the persistence. The data dir will now depend on the hostname which is stable instead of the pod IP that might change. + +> IMPORTANT: Note that if you upgrade from a previous version you will lose your data. + +### To 3.0.0 + +Backwards compatibility is not guaranteed unless you modify the labels used on the chart's deployments. +Use the workaround below to upgrade from versions previous to 3.0.0. The following example assumes that the release name is rabbitmq: + +```console +$ kubectl delete statefulset rabbitmq --cascade=false +``` + +## Bitnami Kubernetes Documentation + +Bitnami Kubernetes documentation is available at [https://docs.bitnami.com/](https://docs.bitnami.com/). You can find there the following resources: + +- [Documentation for RabbitMQ Helm chart](https://docs.bitnami.com/kubernetes/infrastructure/rabbitmq/) +- [Get Started with Kubernetes guides](https://docs.bitnami.com/kubernetes/) +- [Bitnami Helm charts documentation](https://docs.bitnami.com/kubernetes/apps/) +- [Kubernetes FAQs](https://docs.bitnami.com/kubernetes/faq/) +- [Kubernetes Developer guides](https://docs.bitnami.com/tutorials/) diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/.helmignore b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/.helmignore new file mode 100644 index 0000000..50af031 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/Chart.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/Chart.yaml new file mode 100644 index 0000000..344c403 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/Chart.yaml @@ -0,0 +1,23 @@ +annotations: + category: Infrastructure +apiVersion: v2 +appVersion: 1.8.0 +description: A Library Helm Chart for grouping common logic between bitnami charts. + This chart is not deployable by itself. +home: https://github.com/bitnami/charts/tree/master/bitnami/common +icon: https://bitnami.com/downloads/logos/bitnami-mark.png +keywords: +- common +- helper +- template +- function +- bitnami +maintainers: +- email: containers@bitnami.com + name: Bitnami +name: common +sources: +- https://github.com/bitnami/charts +- http://www.bitnami.com/ +type: library +version: 1.8.0 diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/README.md b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/README.md new file mode 100644 index 0000000..054e51f --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/README.md @@ -0,0 +1,327 @@ +# Bitnami Common Library Chart + +A [Helm Library Chart](https://helm.sh/docs/topics/library_charts/#helm) for grouping common logic between bitnami charts. + +## TL;DR + +```yaml +dependencies: + - name: common + version: 0.x.x + repository: https://charts.bitnami.com/bitnami +``` + +```bash +$ helm dependency update +``` + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "common.names.fullname" . }} +data: + myvalue: "Hello World" +``` + +## Introduction + +This chart provides a common template helpers which can be used to develop new charts using [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This Helm chart has been tested on top of [Bitnami Kubernetes Production Runtime](https://kubeprod.io/) (BKPR). Deploy BKPR to get automated TLS certificates, logging and monitoring for your applications. + +## Prerequisites + +- Kubernetes 1.12+ +- Helm 3.1.0 + +## Parameters + +The following table lists the helpers available in the library which are scoped in different sections. + +### Affinities + +| Helper identifier | Description | Expected Input | +|-------------------------------|------------------------------------------------------|------------------------------------------------| +| `common.affinities.node.soft` | Return a soft nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` | +| `common.affinities.node.hard` | Return a hard nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` | +| `common.affinities.pod.soft` | Return a soft podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` | +| `common.affinities.pod.hard` | Return a hard podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` | + +### Capabilities + +| Helper identifier | Description | Expected Input | +|----------------------------------------------|------------------------------------------------------------------------------------------------|-------------------| +| `common.capabilities.kubeVersion` | Return the target Kubernetes version (using client default if .Values.kubeVersion is not set). | `.` Chart context | +| `common.capabilities.cronjob.apiVersion` | Return the appropriate apiVersion for cronjob. | `.` Chart context | +| `common.capabilities.deployment.apiVersion` | Return the appropriate apiVersion for deployment. | `.` Chart context | +| `common.capabilities.statefulset.apiVersion` | Return the appropriate apiVersion for statefulset. | `.` Chart context | +| `common.capabilities.ingress.apiVersion` | Return the appropriate apiVersion for ingress. | `.` Chart context | +| `common.capabilities.rbac.apiVersion` | Return the appropriate apiVersion for RBAC resources. | `.` Chart context | +| `common.capabilities.crd.apiVersion` | Return the appropriate apiVersion for CRDs. | `.` Chart context | +| `common.capabilities.policy.apiVersion` | Return the appropriate apiVersion for policy | `.` Chart context | +| `common.capabilities.supportsHelmVersion` | Returns true if the used Helm version is 3.3+ | `.` Chart context | + +### Errors + +| Helper identifier | Description | Expected Input | +|-----------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------| +| `common.errors.upgrade.passwords.empty` | It will ensure required passwords are given when we are upgrading a chart. If `validationErrors` is not empty it will throw an error and will stop the upgrade action. | `dict "validationErrors" (list $validationError00 $validationError01) "context" $` | + +### Images + +| Helper identifier | Description | Expected Input | +|-----------------------------|------------------------------------------------------|---------------------------------------------------------------------------------------------------------| +| `common.images.image` | Return the proper and full image name | `dict "imageRoot" .Values.path.to.the.image "global" $`, see [ImageRoot](#imageroot) for the structure. | +| `common.images.pullSecrets` | Return the proper Docker Image Registry Secret Names (deprecated: use common.images.renderPullSecrets instead) | `dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global` | +| `common.images.renderPullSecrets` | Return the proper Docker Image Registry Secret Names (evaluates values as templates) | `dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "context" $` | + +### Ingress + +| Helper identifier | Description | Expected Input | +|-------------------------------------------|----------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.ingress.backend` | Generate a proper Ingress backend entry depending on the API version | `dict "serviceName" "foo" "servicePort" "bar"`, see the [Ingress deprecation notice](https://kubernetes.io/blog/2019/07/18/api-deprecations-in-1-16/) for the syntax differences | +| `common.ingress.supportsPathType` | Prints "true" if the pathType field is supported | `.` Chart context | +| `common.ingress.supportsIngressClassname` | Prints "true" if the ingressClassname field is supported | `.` Chart context | + +### Labels + +| Helper identifier | Description | Expected Input | +|-----------------------------|------------------------------------------------------|-------------------| +| `common.labels.standard` | Return Kubernetes standard labels | `.` Chart context | +| `common.labels.matchLabels` | Return the proper Docker Image Registry Secret Names | `.` Chart context | + +### Names + +| Helper identifier | Description | Expected Inpput | +|-------------------------|------------------------------------------------------------|-------------------| +| `common.names.name` | Expand the name of the chart or use `.Values.nameOverride` | `.` Chart context | +| `common.names.fullname` | Create a default fully qualified app name. | `.` Chart context | +| `common.names.chart` | Chart name plus version | `.` Chart context | + +### Secrets + +| Helper identifier | Description | Expected Input | +|---------------------------|--------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.secrets.name` | Generate the name of the secret. | `dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $` see [ExistingSecret](#existingsecret) for the structure. | +| `common.secrets.key` | Generate secret key. | `dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName"` see [ExistingSecret](#existingsecret) for the structure. | +| `common.passwords.manage` | Generate secret password or retrieve one if already created. | `dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $`, length, strong and chartNAme fields are optional. | +| `common.secrets.exists` | Returns whether a previous generated secret already exists. | `dict "secret" "secret-name" "context" $` | + +### Storage + +| Helper identifier | Description | Expected Input | +|-------------------------------|---------------------------------------|---------------------------------------------------------------------------------------------------------------------| +| `common.affinities.node.soft` | Return a soft nodeAffinity definition | `dict "persistence" .Values.path.to.the.persistence "global" $`, see [Persistence](#persistence) for the structure. | + +### TplValues + +| Helper identifier | Description | Expected Input | +|---------------------------|----------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.tplvalues.render` | Renders a value that contains template | `dict "value" .Values.path.to.the.Value "context" $`, value is the value should rendered as template, context frequently is the chart context `$` or `.` | + +### Utils + +| Helper identifier | Description | Expected Input | +|--------------------------------|------------------------------------------------------------------------------------------|------------------------------------------------------------------------| +| `common.utils.fieldToEnvVar` | Build environment variable name given a field. | `dict "field" "my-password"` | +| `common.utils.secret.getvalue` | Print instructions to get a secret value. | `dict "secret" "secret-name" "field" "secret-value-field" "context" $` | +| `common.utils.getValueFromKey` | Gets a value from `.Values` object given its key path | `dict "key" "path.to.key" "context" $` | +| `common.utils.getKeyFromList` | Returns first `.Values` key with a defined value or first of the list if all non-defined | `dict "keys" (list "path.to.key1" "path.to.key2") "context" $` | + +### Validations + +| Helper identifier | Description | Expected Input | +|--------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.validations.values.single.empty` | Validate a value must not be empty. | `dict "valueKey" "path.to.value" "secret" "secret.name" "field" "my-password" "subchart" "subchart" "context" $` secret, field and subchart are optional. In case they are given, the helper will generate a how to get instruction. See [ValidateValue](#validatevalue) | +| `common.validations.values.multiple.empty` | Validate a multiple values must not be empty. It returns a shared error for all the values. | `dict "required" (list $validateValueConf00 $validateValueConf01) "context" $`. See [ValidateValue](#validatevalue) | +| `common.validations.values.mariadb.passwords` | This helper will ensure required password for MariaDB are not empty. It returns a shared error for all the values. | `dict "secret" "mariadb-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mariadb chart and the helper. | +| `common.validations.values.postgresql.passwords` | This helper will ensure required password for PostgreSQL are not empty. It returns a shared error for all the values. | `dict "secret" "postgresql-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use postgresql chart and the helper. | +| `common.validations.values.redis.passwords` | This helper will ensure required password for Redis™ are not empty. It returns a shared error for all the values. | `dict "secret" "redis-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use redis chart and the helper. | +| `common.validations.values.cassandra.passwords` | This helper will ensure required password for Cassandra are not empty. It returns a shared error for all the values. | `dict "secret" "cassandra-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use cassandra chart and the helper. | +| `common.validations.values.mongodb.passwords` | This helper will ensure required password for MongoDB® are not empty. It returns a shared error for all the values. | `dict "secret" "mongodb-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mongodb chart and the helper. | + +### Warnings + +| Helper identifier | Description | Expected Input | +|------------------------------|----------------------------------|------------------------------------------------------------| +| `common.warnings.rollingTag` | Warning about using rolling tag. | `ImageRoot` see [ImageRoot](#imageroot) for the structure. | + +## Special input schemas + +### ImageRoot + +```yaml +registry: + type: string + description: Docker registry where the image is located + example: docker.io + +repository: + type: string + description: Repository and image name + example: bitnami/nginx + +tag: + type: string + description: image tag + example: 1.16.1-debian-10-r63 + +pullPolicy: + type: string + description: Specify a imagePullPolicy. Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + +pullSecrets: + type: array + items: + type: string + description: Optionally specify an array of imagePullSecrets (evaluated as templates). + +debug: + type: boolean + description: Set to true if you would like to see extra information on logs + example: false + +## An instance would be: +# registry: docker.io +# repository: bitnami/nginx +# tag: 1.16.1-debian-10-r63 +# pullPolicy: IfNotPresent +# debug: false +``` + +### Persistence + +```yaml +enabled: + type: boolean + description: Whether enable persistence. + example: true + +storageClass: + type: string + description: Ghost data Persistent Volume Storage Class, If set to "-", storageClassName: "" which disables dynamic provisioning. + example: "-" + +accessMode: + type: string + description: Access mode for the Persistent Volume Storage. + example: ReadWriteOnce + +size: + type: string + description: Size the Persistent Volume Storage. + example: 8Gi + +path: + type: string + description: Path to be persisted. + example: /bitnami + +## An instance would be: +# enabled: true +# storageClass: "-" +# accessMode: ReadWriteOnce +# size: 8Gi +# path: /bitnami +``` + +### ExistingSecret + +```yaml +name: + type: string + description: Name of the existing secret. + example: mySecret +keyMapping: + description: Mapping between the expected key name and the name of the key in the existing secret. + type: object + +## An instance would be: +# name: mySecret +# keyMapping: +# password: myPasswordKey +``` + +#### Example of use + +When we store sensitive data for a deployment in a secret, some times we want to give to users the possibility of using theirs existing secrets. + +```yaml +# templates/secret.yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }} + labels: + app: {{ include "common.names.fullname" . }} +type: Opaque +data: + password: {{ .Values.password | b64enc | quote }} + +# templates/dpl.yaml +--- +... + env: + - name: PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "common.secrets.name" (dict "existingSecret" .Values.existingSecret "context" $) }} + key: {{ include "common.secrets.key" (dict "existingSecret" .Values.existingSecret "key" "password") }} +... + +# values.yaml +--- +name: mySecret +keyMapping: + password: myPasswordKey +``` + +### ValidateValue + +#### NOTES.txt + +```console +{{- $validateValueConf00 := (dict "valueKey" "path.to.value00" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value01" "secret" "secretName" "field" "password-01") -}} + +{{ include "common.validations.values.multiple.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} +``` + +If we force those values to be empty we will see some alerts + +```console +$ helm install test mychart --set path.to.value00="",path.to.value01="" + 'path.to.value00' must not be empty, please add '--set path.to.value00=$PASSWORD_00' to the command. To get the current value: + + export PASSWORD_00=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-00}" | base64 --decode) + + 'path.to.value01' must not be empty, please add '--set path.to.value01=$PASSWORD_01' to the command. To get the current value: + + export PASSWORD_01=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-01}" | base64 --decode) +``` + +## Upgrading + +### To 1.0.0 + +[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +**What changes were introduced in this major version?** + +- Previous versions of this Helm Chart use `apiVersion: v1` (installable by both Helm 2 and 3), this Helm Chart was updated to `apiVersion: v2` (installable by Helm 3 only). [Here](https://helm.sh/docs/topics/charts/#the-apiversion-field) you can find more information about the `apiVersion` field. +- Use `type: library`. [Here](https://v3.helm.sh/docs/faq/#library-chart-support) you can find more information. +- The different fields present in the *Chart.yaml* file has been ordered alphabetically in a homogeneous way for all the Bitnami Helm Charts + +**Considerations when upgrading to this version** + +- If you want to upgrade to this version from a previous one installed with Helm v3, you shouldn't face any issues +- If you want to upgrade to this version using Helm v2, this scenario is not supported as this version doesn't support Helm v2 anymore +- If you installed the previous version with Helm v2 and wants to upgrade to this version with Helm v3, please refer to the [official Helm documentation](https://helm.sh/docs/topics/v2_v3_migration/#migration-use-cases) about migrating from Helm v2 to v3 + +**Useful links** + +- https://docs.bitnami.com/tutorials/resolve-helm2-helm3-post-migration-issues/ +- https://helm.sh/docs/topics/v2_v3_migration/ +- https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/ diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_affinities.tpl b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_affinities.tpl new file mode 100644 index 0000000..189ea40 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_affinities.tpl @@ -0,0 +1,102 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return a soft nodeAffinity definition +{{ include "common.affinities.nodes.soft" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.soft" -}} +preferredDuringSchedulingIgnoredDuringExecution: + - preference: + matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . | quote }} + {{- end }} + weight: 1 +{{- end -}} + +{{/* +Return a hard nodeAffinity definition +{{ include "common.affinities.nodes.hard" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.hard" -}} +requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . | quote }} + {{- end }} +{{- end -}} + +{{/* +Return a nodeAffinity definition +{{ include "common.affinities.nodes" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.nodes.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.nodes.hard" . -}} + {{- end -}} +{{- end -}} + +{{/* +Return a soft podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.soft" (dict "component" "FOO" "extraMatchLabels" .Values.extraMatchLabels "context" $) -}} +*/}} +{{- define "common.affinities.pods.soft" -}} +{{- $component := default "" .component -}} +{{- $extraMatchLabels := default (dict) .extraMatchLabels -}} +preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 10 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := $extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + namespaces: + - {{ .context.Release.Namespace | quote }} + topologyKey: kubernetes.io/hostname + weight: 1 +{{- end -}} + +{{/* +Return a hard podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.hard" (dict "component" "FOO" "extraMatchLabels" .Values.extraMatchLabels "context" $) -}} +*/}} +{{- define "common.affinities.pods.hard" -}} +{{- $component := default "" .component -}} +{{- $extraMatchLabels := default (dict) .extraMatchLabels -}} +requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 8 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := $extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + namespaces: + - {{ .context.Release.Namespace | quote }} + topologyKey: kubernetes.io/hostname +{{- end -}} + +{{/* +Return a podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.pods" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.pods.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.pods.hard" . -}} + {{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_capabilities.tpl b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_capabilities.tpl new file mode 100644 index 0000000..ae45d5e --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_capabilities.tpl @@ -0,0 +1,117 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return the target Kubernetes version +*/}} +{{- define "common.capabilities.kubeVersion" -}} +{{- if .Values.global }} + {{- if .Values.global.kubeVersion }} + {{- .Values.global.kubeVersion -}} + {{- else }} + {{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} + {{- end -}} +{{- else }} +{{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for policy. +*/}} +{{- define "common.capabilities.policy.apiVersion" -}} +{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "policy/v1beta1" -}} +{{- else -}} +{{- print "policy/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for cronjob. +*/}} +{{- define "common.capabilities.cronjob.apiVersion" -}} +{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "batch/v1beta1" -}} +{{- else -}} +{{- print "batch/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for deployment. +*/}} +{{- define "common.capabilities.deployment.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for statefulset. +*/}} +{{- define "common.capabilities.statefulset.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apps/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for ingress. +*/}} +{{- define "common.capabilities.ingress.apiVersion" -}} +{{- if .Values.ingress -}} +{{- if .Values.ingress.apiVersion -}} +{{- .Values.ingress.apiVersion -}} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end }} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for RBAC resources. +*/}} +{{- define "common.capabilities.rbac.apiVersion" -}} +{{- if semverCompare "<1.17-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "rbac.authorization.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "rbac.authorization.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for CRDs. +*/}} +{{- define "common.capabilities.crd.apiVersion" -}} +{{- if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apiextensions.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "apiextensions.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if the used Helm version is 3.3+. +A way to check the used Helm version was not introduced until version 3.3.0 with .Capabilities.HelmVersion, which contains an additional "{}}" structure. +This check is introduced as a regexMatch instead of {{ if .Capabilities.HelmVersion }} because checking for the key HelmVersion in <3.3 results in a "interface not found" error. +**To be removed when the catalog's minimun Helm version is 3.3** +*/}} +{{- define "common.capabilities.supportsHelmVersion" -}} +{{- if regexMatch "{(v[0-9])*[^}]*}}$" (.Capabilities | toString ) }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_errors.tpl b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_errors.tpl new file mode 100644 index 0000000..a79cc2e --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_errors.tpl @@ -0,0 +1,23 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Through error when upgrading using empty passwords values that must not be empty. + +Usage: +{{- $validationError00 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password00" "secret" "secretName" "field" "password-00") -}} +{{- $validationError01 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password01" "secret" "secretName" "field" "password-01") -}} +{{ include "common.errors.upgrade.passwords.empty" (dict "validationErrors" (list $validationError00 $validationError01) "context" $) }} + +Required password params: + - validationErrors - String - Required. List of validation strings to be return, if it is empty it won't throw error. + - context - Context - Required. Parent context. +*/}} +{{- define "common.errors.upgrade.passwords.empty" -}} + {{- $validationErrors := join "" .validationErrors -}} + {{- if and $validationErrors .context.Release.IsUpgrade -}} + {{- $errorString := "\nPASSWORDS ERROR: You must provide your current passwords when upgrading the release." -}} + {{- $errorString = print $errorString "\n Note that even after reinstallation, old credentials may be needed as they may be kept in persistent volume claims." -}} + {{- $errorString = print $errorString "\n Further information can be obtained at https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues/#credential-errors-while-upgrading-chart-releases" -}} + {{- $errorString = print $errorString "\n%s" -}} + {{- printf $errorString $validationErrors | fail -}} + {{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_images.tpl b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_images.tpl new file mode 100644 index 0000000..42ffbc7 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_images.tpl @@ -0,0 +1,75 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper image name +{{ include "common.images.image" ( dict "imageRoot" .Values.path.to.the.image "global" $) }} +*/}} +{{- define "common.images.image" -}} +{{- $registryName := .imageRoot.registry -}} +{{- $repositoryName := .imageRoot.repository -}} +{{- $tag := .imageRoot.tag | toString -}} +{{- if .global }} + {{- if .global.imageRegistry }} + {{- $registryName = .global.imageRegistry -}} + {{- end -}} +{{- end -}} +{{- if $registryName }} +{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- else -}} +{{- printf "%s:%s" $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names (deprecated: use common.images.renderPullSecrets instead) +{{ include "common.images.pullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global) }} +*/}} +{{- define "common.images.pullSecrets" -}} + {{- $pullSecrets := list }} + + {{- if .global }} + {{- range .global.imagePullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) }} +imagePullSecrets: + {{- range $pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names evaluating values as templates +{{ include "common.images.renderPullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "context" $) }} +*/}} +{{- define "common.images.renderPullSecrets" -}} + {{- $pullSecrets := list }} + {{- $context := .context }} + + {{- if $context.Values.global }} + {{- range $context.Values.global.imagePullSecrets -}} + {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) }} +imagePullSecrets: + {{- range $pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_ingress.tpl b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_ingress.tpl new file mode 100644 index 0000000..f905f20 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_ingress.tpl @@ -0,0 +1,55 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Generate backend entry that is compatible with all Kubernetes API versions. + +Usage: +{{ include "common.ingress.backend" (dict "serviceName" "backendName" "servicePort" "backendPort" "context" $) }} + +Params: + - serviceName - String. Name of an existing service backend + - servicePort - String/Int. Port name (or number) of the service. It will be translated to different yaml depending if it is a string or an integer. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.ingress.backend" -}} +{{- $apiVersion := (include "common.capabilities.ingress.apiVersion" .context) -}} +{{- if or (eq $apiVersion "extensions/v1beta1") (eq $apiVersion "networking.k8s.io/v1beta1") -}} +serviceName: {{ .serviceName }} +servicePort: {{ .servicePort }} +{{- else -}} +service: + name: {{ .serviceName }} + port: + {{- if typeIs "string" .servicePort }} + name: {{ .servicePort }} + {{- else if or (typeIs "int" .servicePort) (typeIs "float64" .servicePort) }} + number: {{ .servicePort | int }} + {{- end }} +{{- end -}} +{{- end -}} + +{{/* +Print "true" if the API pathType field is supported +Usage: +{{ include "common.ingress.supportsPathType" . }} +*/}} +{{- define "common.ingress.supportsPathType" -}} +{{- if (semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .)) -}} +{{- print "false" -}} +{{- else -}} +{{- print "true" -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if the ingressClassname field is supported +Usage: +{{ include "common.ingress.supportsIngressClassname" . }} +*/}} +{{- define "common.ingress.supportsIngressClassname" -}} +{{- if semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "false" -}} +{{- else -}} +{{- print "true" -}} +{{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_labels.tpl b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_labels.tpl new file mode 100644 index 0000000..252066c --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_labels.tpl @@ -0,0 +1,18 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Kubernetes standard labels +*/}} +{{- define "common.labels.standard" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +helm.sh/chart: {{ include "common.names.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{/* +Labels to use on deploy.spec.selector.matchLabels and svc.spec.selector +*/}} +{{- define "common.labels.matchLabels" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_names.tpl b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_names.tpl new file mode 100644 index 0000000..adf2a74 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_names.tpl @@ -0,0 +1,32 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "common.names.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "common.names.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "common.names.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_secrets.tpl b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_secrets.tpl new file mode 100644 index 0000000..60b84a7 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_secrets.tpl @@ -0,0 +1,129 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Generate secret name. + +Usage: +{{ include "common.secrets.name" (dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $) }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/master/bitnami/common#existingsecret + - defaultNameSuffix - String - Optional. It is used only if we have several secrets in the same deployment. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.secrets.name" -}} +{{- $name := (include "common.names.fullname" .context) -}} + +{{- if .defaultNameSuffix -}} +{{- $name = printf "%s-%s" $name .defaultNameSuffix | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- with .existingSecret -}} +{{- if not (typeIs "string" .) -}} +{{- with .name -}} +{{- $name = . -}} +{{- end -}} +{{- else -}} +{{- $name = . -}} +{{- end -}} +{{- end -}} + +{{- printf "%s" $name -}} +{{- end -}} + +{{/* +Generate secret key. + +Usage: +{{ include "common.secrets.key" (dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName") }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/master/bitnami/common#existingsecret + - key - String - Required. Name of the key in the secret. +*/}} +{{- define "common.secrets.key" -}} +{{- $key := .key -}} + +{{- if .existingSecret -}} + {{- if not (typeIs "string" .existingSecret) -}} + {{- if .existingSecret.keyMapping -}} + {{- $key = index .existingSecret.keyMapping $.key -}} + {{- end -}} + {{- end }} +{{- end -}} + +{{- printf "%s" $key -}} +{{- end -}} + +{{/* +Generate secret password or retrieve one if already created. + +Usage: +{{ include "common.secrets.passwords.manage" (dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - key - String - Required - Name of the key in the secret. + - providedValues - List - Required - The path to the validating value in the values.yaml, e.g: "mysql.password". Will pick first parameter with a defined value. + - length - int - Optional - Length of the generated random password. + - strong - Boolean - Optional - Whether to add symbols to the generated random password. + - chartName - String - Optional - Name of the chart used when said chart is deployed as a subchart. + - context - Context - Required - Parent context. +*/}} +{{- define "common.secrets.passwords.manage" -}} + +{{- $password := "" }} +{{- $subchart := "" }} +{{- $chartName := default "" .chartName }} +{{- $passwordLength := default 10 .length }} +{{- $providedPasswordKey := include "common.utils.getKeyFromList" (dict "keys" .providedValues "context" $.context) }} +{{- $providedPasswordValue := include "common.utils.getValueFromKey" (dict "key" $providedPasswordKey "context" $.context) }} +{{- $secret := (lookup "v1" "Secret" $.context.Release.Namespace .secret) }} +{{- if $secret }} + {{- if index $secret.data .key }} + {{- $password = index $secret.data .key }} + {{- end -}} +{{- else if $providedPasswordValue }} + {{- $password = $providedPasswordValue | toString | b64enc | quote }} +{{- else }} + + {{- if .context.Values.enabled }} + {{- $subchart = $chartName }} + {{- end -}} + + {{- $requiredPassword := dict "valueKey" $providedPasswordKey "secret" .secret "field" .key "subchart" $subchart "context" $.context -}} + {{- $requiredPasswordError := include "common.validations.values.single.empty" $requiredPassword -}} + {{- $passwordValidationErrors := list $requiredPasswordError -}} + {{- include "common.errors.upgrade.passwords.empty" (dict "validationErrors" $passwordValidationErrors "context" $.context) -}} + + {{- if .strong }} + {{- $subStr := list (lower (randAlpha 1)) (randNumeric 1) (upper (randAlpha 1)) | join "_" }} + {{- $password = randAscii $passwordLength }} + {{- $password = regexReplaceAllLiteral "\\W" $password "@" | substr 5 $passwordLength }} + {{- $password = printf "%s%s" $subStr $password | toString | shuffle | b64enc | quote }} + {{- else }} + {{- $password = randAlphaNum $passwordLength | b64enc | quote }} + {{- end }} +{{- end -}} +{{- printf "%s" $password -}} +{{- end -}} + +{{/* +Returns whether a previous generated secret already exists + +Usage: +{{ include "common.secrets.exists" (dict "secret" "secret-name" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - context - Context - Required - Parent context. +*/}} +{{- define "common.secrets.exists" -}} +{{- $secret := (lookup "v1" "Secret" $.context.Release.Namespace .secret) }} +{{- if $secret }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_storage.tpl b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_storage.tpl new file mode 100644 index 0000000..60e2a84 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_storage.tpl @@ -0,0 +1,23 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper Storage Class +{{ include "common.storage.class" ( dict "persistence" .Values.path.to.the.persistence "global" $) }} +*/}} +{{- define "common.storage.class" -}} + +{{- $storageClass := .persistence.storageClass -}} +{{- if .global -}} + {{- if .global.storageClass -}} + {{- $storageClass = .global.storageClass -}} + {{- end -}} +{{- end -}} + +{{- if $storageClass -}} + {{- if (eq "-" $storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" $storageClass -}} + {{- end -}} +{{- end -}} + +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_tplvalues.tpl b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_tplvalues.tpl new file mode 100644 index 0000000..2db1668 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_tplvalues.tpl @@ -0,0 +1,13 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Renders a value that contains template. +Usage: +{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $) }} +*/}} +{{- define "common.tplvalues.render" -}} + {{- if typeIs "string" .value }} + {{- tpl .value .context }} + {{- else }} + {{- tpl (.value | toYaml) .context }} + {{- end }} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_utils.tpl b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_utils.tpl new file mode 100644 index 0000000..ea083a2 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_utils.tpl @@ -0,0 +1,62 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Print instructions to get a secret value. +Usage: +{{ include "common.utils.secret.getvalue" (dict "secret" "secret-name" "field" "secret-value-field" "context" $) }} +*/}} +{{- define "common.utils.secret.getvalue" -}} +{{- $varname := include "common.utils.fieldToEnvVar" . -}} +export {{ $varname }}=$(kubectl get secret --namespace {{ .context.Release.Namespace | quote }} {{ .secret }} -o jsonpath="{.data.{{ .field }}}" | base64 --decode) +{{- end -}} + +{{/* +Build env var name given a field +Usage: +{{ include "common.utils.fieldToEnvVar" dict "field" "my-password" }} +*/}} +{{- define "common.utils.fieldToEnvVar" -}} + {{- $fieldNameSplit := splitList "-" .field -}} + {{- $upperCaseFieldNameSplit := list -}} + + {{- range $fieldNameSplit -}} + {{- $upperCaseFieldNameSplit = append $upperCaseFieldNameSplit ( upper . ) -}} + {{- end -}} + + {{ join "_" $upperCaseFieldNameSplit }} +{{- end -}} + +{{/* +Gets a value from .Values given +Usage: +{{ include "common.utils.getValueFromKey" (dict "key" "path.to.key" "context" $) }} +*/}} +{{- define "common.utils.getValueFromKey" -}} +{{- $splitKey := splitList "." .key -}} +{{- $value := "" -}} +{{- $latestObj := $.context.Values -}} +{{- range $splitKey -}} + {{- if not $latestObj -}} + {{- printf "please review the entire path of '%s' exists in values" $.key | fail -}} + {{- end -}} + {{- $value = ( index $latestObj . ) -}} + {{- $latestObj = $value -}} +{{- end -}} +{{- printf "%v" (default "" $value) -}} +{{- end -}} + +{{/* +Returns first .Values key with a defined value or first of the list if all non-defined +Usage: +{{ include "common.utils.getKeyFromList" (dict "keys" (list "path.to.key1" "path.to.key2") "context" $) }} +*/}} +{{- define "common.utils.getKeyFromList" -}} +{{- $key := first .keys -}} +{{- $reverseKeys := reverse .keys }} +{{- range $reverseKeys }} + {{- $value := include "common.utils.getValueFromKey" (dict "key" . "context" $.context ) }} + {{- if $value -}} + {{- $key = . }} + {{- end -}} +{{- end -}} +{{- printf "%s" $key -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_warnings.tpl b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_warnings.tpl new file mode 100644 index 0000000..ae10fa4 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_warnings.tpl @@ -0,0 +1,14 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Warning about using rolling tag. +Usage: +{{ include "common.warnings.rollingTag" .Values.path.to.the.imageRoot }} +*/}} +{{- define "common.warnings.rollingTag" -}} + +{{- if and (contains "bitnami/" .repository) (not (.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .repository }}:{{ .tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} + +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_cassandra.tpl b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_cassandra.tpl new file mode 100644 index 0000000..8679ddf --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_cassandra.tpl @@ -0,0 +1,72 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Cassandra required passwords are not empty. + +Usage: +{{ include "common.validations.values.cassandra.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where Cassandra values are stored, e.g: "cassandra-passwords-secret" + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.cassandra.passwords" -}} + {{- $existingSecret := include "common.cassandra.values.existingSecret" . -}} + {{- $enabled := include "common.cassandra.values.enabled" . -}} + {{- $dbUserPrefix := include "common.cassandra.values.key.dbUser" . -}} + {{- $valueKeyPassword := printf "%s.password" $dbUserPrefix -}} + + {{- if and (not $existingSecret) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "cassandra-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.cassandra.values.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.cassandra.dbUser.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.dbUser.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled cassandra. + +Usage: +{{ include "common.cassandra.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.cassandra.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.cassandra.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key dbUser + +Usage: +{{ include "common.cassandra.values.key.dbUser" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.key.dbUser" -}} + {{- if .subchart -}} + cassandra.dbUser + {{- else -}} + dbUser + {{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_mariadb.tpl b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_mariadb.tpl new file mode 100644 index 0000000..bb5ed72 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_mariadb.tpl @@ -0,0 +1,103 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MariaDB required passwords are not empty. + +Usage: +{{ include "common.validations.values.mariadb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MariaDB values are stored, e.g: "mysql-passwords-secret" + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mariadb.passwords" -}} + {{- $existingSecret := include "common.mariadb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mariadb.values.enabled" . -}} + {{- $architecture := include "common.mariadb.values.architecture" . -}} + {{- $authPrefix := include "common.mariadb.values.key.auth" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}} + + {{- if and (not $existingSecret) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mariadb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- if not (empty $valueUsername) -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mariadb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replication") -}} + {{- $requiredReplicationPassword := dict "valueKey" $valueKeyReplicationPassword "secret" .secret "field" "mariadb-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mariadb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mariadb. + +Usage: +{{ include "common.mariadb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mariadb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mariadb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mariadb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mariadb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.key.auth" -}} + {{- if .subchart -}} + mariadb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_mongodb.tpl b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_mongodb.tpl new file mode 100644 index 0000000..1e5bba9 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_mongodb.tpl @@ -0,0 +1,108 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MongoDB® required passwords are not empty. + +Usage: +{{ include "common.validations.values.mongodb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MongoDB® values are stored, e.g: "mongodb-passwords-secret" + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mongodb.passwords" -}} + {{- $existingSecret := include "common.mongodb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mongodb.values.enabled" . -}} + {{- $authPrefix := include "common.mongodb.values.key.auth" . -}} + {{- $architecture := include "common.mongodb.values.architecture" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyDatabase := printf "%s.database" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicaSetKey := printf "%s.replicaSetKey" $authPrefix -}} + {{- $valueKeyAuthEnabled := printf "%s.enabled" $authPrefix -}} + + {{- $authEnabled := include "common.utils.getValueFromKey" (dict "key" $valueKeyAuthEnabled "context" .context) -}} + + {{- if and (not $existingSecret) (eq $enabled "true") (eq $authEnabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mongodb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- $valueDatabase := include "common.utils.getValueFromKey" (dict "key" $valueKeyDatabase "context" .context) }} + {{- if and $valueUsername $valueDatabase -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mongodb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replicaset") -}} + {{- $requiredReplicaSetKey := dict "valueKey" $valueKeyReplicaSetKey "secret" .secret "field" "mongodb-replica-set-key" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicaSetKey -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mongodb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDb is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mongodb. + +Usage: +{{ include "common.mongodb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mongodb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mongodb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mongodb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.key.auth" -}} + {{- if .subchart -}} + mongodb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mongodb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_postgresql.tpl b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_postgresql.tpl new file mode 100644 index 0000000..992bcd3 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_postgresql.tpl @@ -0,0 +1,131 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate PostgreSQL required passwords are not empty. + +Usage: +{{ include "common.validations.values.postgresql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where postgresql values are stored, e.g: "postgresql-passwords-secret" + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.postgresql.passwords" -}} + {{- $existingSecret := include "common.postgresql.values.existingSecret" . -}} + {{- $enabled := include "common.postgresql.values.enabled" . -}} + {{- $valueKeyPostgresqlPassword := include "common.postgresql.values.key.postgressPassword" . -}} + {{- $valueKeyPostgresqlReplicationEnabled := include "common.postgresql.values.key.replicationPassword" . -}} + + {{- if and (not $existingSecret) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredPostgresqlPassword := dict "valueKey" $valueKeyPostgresqlPassword "secret" .secret "field" "postgresql-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlPassword -}} + + {{- $enabledReplication := include "common.postgresql.values.enabled.replication" . -}} + {{- if (eq $enabledReplication "true") -}} + {{- $requiredPostgresqlReplicationPassword := dict "valueKey" $valueKeyPostgresqlReplicationEnabled "secret" .secret "field" "postgresql-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to decide whether evaluate global values. + +Usage: +{{ include "common.postgresql.values.use.global" (dict "key" "key-of-global" "context" $) }} +Params: + - key - String - Required. Field to be evaluated within global, e.g: "existingSecret" +*/}} +{{- define "common.postgresql.values.use.global" -}} + {{- if .context.Values.global -}} + {{- if .context.Values.global.postgresql -}} + {{- index .context.Values.global.postgresql .key | quote -}} + {{- end -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.postgresql.values.existingSecret" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.existingSecret" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "existingSecret" "context" .context) -}} + + {{- if .subchart -}} + {{- default (.context.Values.postgresql.existingSecret | quote) $globalValue -}} + {{- else -}} + {{- default (.context.Values.existingSecret | quote) $globalValue -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled postgresql. + +Usage: +{{ include "common.postgresql.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key postgressPassword. + +Usage: +{{ include "common.postgresql.values.key.postgressPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.postgressPassword" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "postgresqlUsername" "context" .context) -}} + + {{- if not $globalValue -}} + {{- if .subchart -}} + postgresql.postgresqlPassword + {{- else -}} + postgresqlPassword + {{- end -}} + {{- else -}} + global.postgresql.postgresqlPassword + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled.replication. + +Usage: +{{ include "common.postgresql.values.enabled.replication" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.enabled.replication" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.replication.enabled -}} + {{- else -}} + {{- printf "%v" .context.Values.replication.enabled -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key replication.password. + +Usage: +{{ include "common.postgresql.values.key.replicationPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.replicationPassword" -}} + {{- if .subchart -}} + postgresql.replication.password + {{- else -}} + replication.password + {{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_redis.tpl b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_redis.tpl new file mode 100644 index 0000000..18d9813 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_redis.tpl @@ -0,0 +1,76 @@ + +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Redis™ required passwords are not empty. + +Usage: +{{ include "common.validations.values.redis.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where redis values are stored, e.g: "redis-passwords-secret" + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.redis.passwords" -}} + {{- $enabled := include "common.redis.values.enabled" . -}} + {{- $valueKeyPrefix := include "common.redis.values.keys.prefix" . -}} + {{- $standarizedVersion := include "common.redis.values.standarized.version" . }} + + {{- $existingSecret := ternary (printf "%s%s" $valueKeyPrefix "auth.existingSecret") (printf "%s%s" $valueKeyPrefix "existingSecret") (eq $standarizedVersion "true") }} + {{- $existingSecretValue := include "common.utils.getValueFromKey" (dict "key" $existingSecret "context" .context) }} + + {{- $valueKeyRedisPassword := ternary (printf "%s%s" $valueKeyPrefix "auth.password") (printf "%s%s" $valueKeyPrefix "password") (eq $standarizedVersion "true") }} + {{- $valueKeyRedisUseAuth := ternary (printf "%s%s" $valueKeyPrefix "auth.enabled") (printf "%s%s" $valueKeyPrefix "usePassword") (eq $standarizedVersion "true") }} + + {{- if and (not $existingSecretValue) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $useAuth := include "common.utils.getValueFromKey" (dict "key" $valueKeyRedisUseAuth "context" .context) -}} + {{- if eq $useAuth "true" -}} + {{- $requiredRedisPassword := dict "valueKey" $valueKeyRedisPassword "secret" .secret "field" "redis-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRedisPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled redis. + +Usage: +{{ include "common.redis.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.redis.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.redis.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right prefix path for the values + +Usage: +{{ include "common.redis.values.key.prefix" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.redis.values.keys.prefix" -}} + {{- if .subchart -}}redis.{{- else -}}{{- end -}} +{{- end -}} + +{{/* +Checks whether the redis chart's includes the standarizations (version >= 14) + +Usage: +{{ include "common.redis.values.standarized.version" (dict "context" $) }} +*/}} +{{- define "common.redis.values.standarized.version" -}} + + {{- $standarizedAuth := printf "%s%s" (include "common.redis.values.keys.prefix" .) "auth" -}} + {{- $standarizedAuthValues := include "common.utils.getValueFromKey" (dict "key" $standarizedAuth "context" .context) }} + + {{- if $standarizedAuthValues -}} + {{- true -}} + {{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_validations.tpl b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_validations.tpl new file mode 100644 index 0000000..9a814cf --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_validations.tpl @@ -0,0 +1,46 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate values must not be empty. + +Usage: +{{- $validateValueConf00 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-01") -}} +{{ include "common.validations.values.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" +*/}} +{{- define "common.validations.values.multiple.empty" -}} + {{- range .required -}} + {{- include "common.validations.values.single.empty" (dict "valueKey" .valueKey "secret" .secret "field" .field "context" $.context) -}} + {{- end -}} +{{- end -}} + +{{/* +Validate a value must not be empty. + +Usage: +{{ include "common.validations.value.empty" (dict "valueKey" "mariadb.password" "secret" "secretName" "field" "my-password" "subchart" "subchart" "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" + - subchart - String - Optional - Name of the subchart that the validated password is part of. +*/}} +{{- define "common.validations.values.single.empty" -}} + {{- $value := include "common.utils.getValueFromKey" (dict "key" .valueKey "context" .context) }} + {{- $subchart := ternary "" (printf "%s." .subchart) (empty .subchart) }} + + {{- if not $value -}} + {{- $varname := "my-value" -}} + {{- $getCurrentValue := "" -}} + {{- if and .secret .field -}} + {{- $varname = include "common.utils.fieldToEnvVar" . -}} + {{- $getCurrentValue = printf " To get the current value:\n\n %s\n" (include "common.utils.secret.getvalue" .) -}} + {{- end -}} + {{- printf "\n '%s' must not be empty, please add '--set %s%s=$%s' to the command.%s" .valueKey $subchart .valueKey $varname $getCurrentValue -}} + {{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/values.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/values.yaml new file mode 100644 index 0000000..f2df68e --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/values.yaml @@ -0,0 +1,5 @@ +## bitnami/common +## It is required by CI/CD tools and processes. +## @skip exampleValue +## +exampleValue: common-chart diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/ci/default-values.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/ci/default-values.yaml new file mode 100644 index 0000000..fc2ba60 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/ci/default-values.yaml @@ -0,0 +1 @@ +# Leave this file empty to ensure that CI runs builds against the default configuration in values.yaml. diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/ci/tolerations-values.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/ci/tolerations-values.yaml new file mode 100644 index 0000000..de92d88 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/ci/tolerations-values.yaml @@ -0,0 +1,4 @@ +tolerations: + - key: foo + operator: "Equal" + value: bar diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/NOTES.txt b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/NOTES.txt new file mode 100644 index 0000000..24ffa89 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/NOTES.txt @@ -0,0 +1,167 @@ +{{- $servicePort := or (.Values.service.portEnabled) (not .Values.auth.tls.enabled) | ternary .Values.service.port .Values.service.tlsPort -}} +{{- $serviceNodePort := or (.Values.service.portEnabled) (not .Values.auth.tls.enabled) | ternary .Values.service.nodePort .Values.service.tlsNodePort -}} +** Please be patient while the chart is being deployed ** + +{{- if .Values.diagnosticMode.enabled }} +The chart has been deployed in diagnostic mode. All probes have been disabled and the command has been overwritten with: + + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 4 }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 4 }} + +Get the list of pods by executing: + + kubectl get pods --namespace {{ .Release.Namespace }} -l app.kubernetes.io/instance={{ .Release.Name }} + +Access the pod you want to debug by executing + + kubectl exec --namespace {{ .Release.Namespace }} -ti -- bash + +In order to replicate the container startup scripts execute this command: + + /opt/bitnami/scripts/rabbitmq/entrypoint.sh /opt/bitnami/scripts/rabbitmq/run.sh + +{{- else }} + +Credentials: + +{{- if not .Values.loadDefinition.enabled }} + echo "Username : {{ .Values.auth.username }}" + echo "Password : $(kubectl get secret --namespace {{ .Release.Namespace }} {{ include "rabbitmq.secretPasswordName" . }} -o jsonpath="{.data.rabbitmq-password}" | base64 --decode)" +{{- end }} + echo "ErLang Cookie : $(kubectl get secret --namespace {{ .Release.Namespace }} {{ include "rabbitmq.secretErlangName" . }} -o jsonpath="{.data.rabbitmq-erlang-cookie}" | base64 --decode)" + +Note that the credentials are saved in persistent volume claims and will not be changed upon upgrade or reinstallation unless the persistent volume claim has been deleted. If this is not the first installation of this chart, the credentials may not be valid. +This is applicable when no passwords are set and therefore the random password is autogenerated. In case of using a fixed password, you should specify it when upgrading. +More information about the credentials may be found at https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues/#credential-errors-while-upgrading-chart-releases. + +RabbitMQ can be accessed within the cluster on port {{ $serviceNodePort }} at {{ include "rabbitmq.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clustering.k8s_domain }} + +To access for outside the cluster, perform the following steps: + +{{- if .Values.ingress.enabled }} +{{- if contains "NodePort" .Values.service.type }} + +To Access the RabbitMQ AMQP port: + +1. Obtain the NodePort IP and ports: + + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT_AMQP=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[1].nodePort}" services {{ include "rabbitmq.fullname" . }}) + echo "URL : amqp://$NODE_IP:$NODE_PORT_AMQP/" + +{{- else if contains "LoadBalancer" .Values.service.type }} + +To Access the RabbitMQ AMQP port: + +1. Obtain the LoadBalancer IP: + +NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ include "rabbitmq.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "rabbitmq.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + echo "URL : amqp://$SERVICE_IP:{{ $servicePort }}/" + +{{- else if contains "ClusterIP" .Values.service.type }} + +To Access the RabbitMQ AMQP port: + +1. Create a port-forward to the AMQP port: + + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ include "rabbitmq.fullname" . }} {{ $servicePort }}:{{ $servicePort }} & + echo "URL : amqp://127.0.0.1:{{ $servicePort }}/" + +{{- end }} + +2. Access RabbitMQ using using the obtained URL. + +To Access the RabbitMQ Management interface: + +1. Get the RabbitMQ Management URL and associate its hostname to your cluster external IP: + + export CLUSTER_IP=$(minikube ip) # On Minikube. Use: `kubectl cluster-info` on others K8s clusters + echo "RabbitMQ Management: http{{ if .Values.ingress.tls }}s{{ end }}://{{ .Values.ingress.hostname }}/" + echo "$CLUSTER_IP {{ .Values.ingress.hostname }}" | sudo tee -a /etc/hosts + +2. Open a browser and access RabbitMQ Management using the obtained URL. + +{{- else }} +{{- if contains "NodePort" .Values.service.type }} + +Obtain the NodePort IP and ports: + + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT_AMQP=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[1].nodePort}" services {{ include "rabbitmq.fullname" . }}) + export NODE_PORT_STATS=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[3].nodePort}" services {{ include "rabbitmq.fullname" . }}) + +To Access the RabbitMQ AMQP port: + + echo "URL : amqp://$NODE_IP:$NODE_PORT_AMQP/" + +To Access the RabbitMQ Management interface: + + echo "URL : http://$NODE_IP:$NODE_PORT_STATS/" + +{{- else if contains "LoadBalancer" .Values.service.type }} + +Obtain the LoadBalancer IP: + +NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ include "rabbitmq.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "rabbitmq.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + +To Access the RabbitMQ AMQP port: + + echo "URL : amqp://$SERVICE_IP:{{ $servicePort }}/" + +To Access the RabbitMQ Management interface: + + echo "URL : http://$SERVICE_IP:{{ .Values.service.managerPort }}/" + +{{- else if contains "ClusterIP" .Values.service.type }} + +To Access the RabbitMQ AMQP port: + + echo "URL : amqp://127.0.0.1:{{ $servicePort }}/" + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ include "rabbitmq.fullname" . }} {{ $servicePort }}:{{ $servicePort }} + +To Access the RabbitMQ Management interface: + + echo "URL : http://127.0.0.1:{{ .Values.service.managerPort }}/" + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ include "rabbitmq.fullname" . }} {{ .Values.service.managerPort }}:{{ .Values.service.managerPort }} + +{{- end }} +{{- end }} + +{{- if .Values.metrics.enabled }} + +To access the RabbitMQ Prometheus metrics, get the RabbitMQ Prometheus URL by running: + + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ include "rabbitmq.fullname" . }} {{ .Values.service.metricsPort }}:{{ .Values.service.metricsPort }} & + echo "Prometheus Metrics URL: http://127.0.0.1:{{ .Values.service.metricsPort }}/metrics" + +Then, open the obtained URL in a browser. + +{{- end }} + +{{- include "common.warnings.rollingTag" .Values.image }} +{{- include "rabbitmq.validateValues" . -}} + +{{- $requiredPassword := list -}} +{{- $secretNameRabbitmq := include "rabbitmq.secretPasswordName" . -}} + +{{- if and (not .Values.auth.existingPasswordSecret) (not .Values.loadDefinition.enabled) -}} + {{- $requiredRabbitmqPassword := dict "valueKey" "auth.password" "secret" $secretNameRabbitmq "field" "rabbitmq-password" -}} + {{- $requiredPassword = append $requiredPassword $requiredRabbitmqPassword -}} +{{- end -}} + +{{- if not .Values.auth.existingErlangSecret -}} + {{- $requiredErlangPassword := dict "valueKey" "auth.erlangCookie" "secret" $secretNameRabbitmq "field" "rabbitmq-erlang-cookie" -}} + {{- $requiredPassword = append $requiredPassword $requiredErlangPassword -}} +{{- end -}} + +{{- $requiredRabbitmqPasswordErrors := include "common.validations.values.multiple.empty" (dict "required" $requiredPassword "context" $) -}} + +{{- include "common.errors.upgrade.passwords.empty" (dict "validationErrors" (list $requiredRabbitmqPasswordErrors) "context" $) -}} + +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/_helpers.tpl b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/_helpers.tpl new file mode 100644 index 0000000..6b46b23 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/_helpers.tpl @@ -0,0 +1,247 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "rabbitmq.name" -}} +{{- include "common.names.name" . -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "rabbitmq.fullname" -}} +{{- include "common.names.fullname" . -}} +{{- end -}} + +{{/* +Return the proper RabbitMQ image name +*/}} +{{- define "rabbitmq.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper image name (for the init container volume-permissions image) +*/}} +{{- define "rabbitmq.volumePermissions.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.volumePermissions.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "rabbitmq.imagePullSecrets" -}} +{{ include "common.images.pullSecrets" (dict "images" (list .Values.image .Values.volumePermissions.image) "global" .Values.global) }} +{{- end -}} + +{{/* +Return podAnnotations +*/}} +{{- define "rabbitmq.podAnnotations" -}} +{{- if .Values.podAnnotations }} +{{ include "common.tplvalues.render" (dict "value" .Values.podAnnotations "context" $) }} +{{- end }} +{{- if and .Values.metrics.enabled .Values.metrics.podAnnotations }} +{{ include "common.tplvalues.render" (dict "value" .Values.metrics.podAnnotations "context" $) }} +{{- end }} +{{- end -}} + +{{/* + Create the name of the service account to use + */}} +{{- define "rabbitmq.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "rabbitmq.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Get the password secret. +*/}} +{{- define "rabbitmq.secretPasswordName" -}} + {{- if .Values.auth.existingPasswordSecret -}} + {{- printf "%s" (tpl .Values.auth.existingPasswordSecret $) -}} + {{- else -}} + {{- printf "%s" (include "rabbitmq.fullname" .) -}} + {{- end -}} +{{- end -}} + +{{/* +Get the erlang secret. +*/}} +{{- define "rabbitmq.secretErlangName" -}} + {{- if .Values.auth.existingErlangSecret -}} + {{- printf "%s" (tpl .Values.auth.existingErlangSecret $) -}} + {{- else -}} + {{- printf "%s" (include "rabbitmq.fullname" .) -}} + {{- end -}} +{{- end -}} + +{{/* +Get the TLS secret. +*/}} +{{- define "rabbitmq.tlsSecretName" -}} + {{- if .Values.auth.tls.existingSecret -}} + {{- printf "%s" (tpl .Values.auth.tls.existingSecret $) -}} + {{- else -}} + {{- printf "%s-certs" (include "rabbitmq.fullname" .) -}} + {{- end -}} +{{- end -}} + +{{/* +Return true if a TLS credentials secret object should be created +*/}} +{{- define "rabbitmq.createTlsSecret" -}} +{{- if and .Values.auth.tls.enabled (not .Values.auth.tls.existingSecret) }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper RabbitMQ plugin list +*/}} +{{- define "rabbitmq.plugins" -}} +{{- $plugins := .Values.plugins -}} +{{- if .Values.extraPlugins -}} +{{- $plugins = printf "%s %s" $plugins .Values.extraPlugins -}} +{{- end -}} +{{- if .Values.metrics.enabled -}} +{{- $plugins = printf "%s %s" $plugins .Values.metrics.plugins -}} +{{- end -}} +{{- printf "%s" $plugins | replace " " ", " -}} +{{- end -}} + +{{/* +Return the number of bytes given a value +following a base 2 o base 10 number system. +Usage: +{{ include "rabbitmq.toBytes" .Values.path.to.the.Value }} +*/}} +{{- define "rabbitmq.toBytes" -}} +{{- $value := int (regexReplaceAll "([0-9]+).*" . "${1}") }} +{{- $unit := regexReplaceAll "[0-9]+(.*)" . "${1}" }} +{{- if eq $unit "Ki" }} + {{- mul $value 1024 }} +{{- else if eq $unit "Mi" }} + {{- mul $value 1024 1024 }} +{{- else if eq $unit "Gi" }} + {{- mul $value 1024 1024 1024 }} +{{- else if eq $unit "Ti" }} + {{- mul $value 1024 1024 1024 1024 }} +{{- else if eq $unit "Pi" }} + {{- mul $value 1024 1024 1024 1024 1024 }} +{{- else if eq $unit "Ei" }} + {{- mul $value 1024 1024 1024 1024 1024 1024 }} +{{- else if eq $unit "K" }} + {{- mul $value 1000 }} +{{- else if eq $unit "M" }} + {{- mul $value 1000 1000 }} +{{- else if eq $unit "G" }} + {{- mul $value 1000 1000 1000 }} +{{- else if eq $unit "T" }} + {{- mul $value 1000 1000 1000 1000 }} +{{- else if eq $unit "P" }} + {{- mul $value 1000 1000 1000 1000 1000 }} +{{- else if eq $unit "E" }} + {{- mul $value 1000 1000 1000 1000 1000 1000 }} +{{- end }} +{{- end -}} + +{{/* +Compile all warnings into a single message, and call fail. +*/}} +{{- define "rabbitmq.validateValues" -}} +{{- $messages := list -}} +{{- $messages := append $messages (include "rabbitmq.validateValues.ldap" .) -}} +{{- $messages := append $messages (include "rabbitmq.validateValues.memoryHighWatermark" .) -}} +{{- $messages := append $messages (include "rabbitmq.validateValues.ingress.tls" .) -}} +{{- $messages := append $messages (include "rabbitmq.validateValues.auth.tls" .) -}} +{{- $messages := without $messages "" -}} +{{- $message := join "\n" $messages -}} + +{{- if $message -}} +{{- printf "\nVALUES VALIDATION:\n%s" $message | fail -}} +{{- end -}} +{{- end -}} + +{{/* +Validate values of rabbitmq - LDAP support +*/}} +{{- define "rabbitmq.validateValues.ldap" -}} +{{- if .Values.ldap.enabled }} +{{- $serversListLength := len .Values.ldap.servers }} +{{- if or (not (gt $serversListLength 0)) (not (and .Values.ldap.port .Values.ldap.user_dn_pattern)) }} +rabbitmq: LDAP + Invalid LDAP configuration. When enabling LDAP support, the parameters "ldap.servers", + "ldap.port", and "ldap. user_dn_pattern" are mandatory. Please provide them: + + $ helm install {{ .Release.Name }} bitnami/rabbitmq \ + --set ldap.enabled=true \ + --set ldap.servers[0]="lmy-ldap-server" \ + --set ldap.port="389" \ + --set user_dn_pattern="cn=${username},dc=example,dc=org" +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Validate values of rabbitmq - Memory high watermark +*/}} +{{- define "rabbitmq.validateValues.memoryHighWatermark" -}} +{{- if and (not (eq .Values.memoryHighWatermark.type "absolute")) (not (eq .Values.memoryHighWatermark.type "relative")) }} +rabbitmq: memoryHighWatermark.type + Invalid Memory high watermark type. Valid values are "absolute" and + "relative". Please set a valid mode (--set memoryHighWatermark.type="xxxx") +{{- else if and .Values.memoryHighWatermark.enabled (not .Values.resources.limits.memory) (eq .Values.memoryHighWatermark.type "relative") }} +rabbitmq: memoryHighWatermark + You enabled configuring memory high watermark using a relative limit. However, + no memory limits were defined at POD level. Define your POD limits as shown below: + + $ helm install {{ .Release.Name }} bitnami/rabbitmq \ + --set memoryHighWatermark.enabled=true \ + --set memoryHighWatermark.type="relative" \ + --set memoryHighWatermark.value="0.4" \ + --set resources.limits.memory="2Gi" + + Altenatively, user an absolute value for the memory memory high watermark : + + $ helm install {{ .Release.Name }} bitnami/rabbitmq \ + --set memoryHighWatermark.enabled=true \ + --set memoryHighWatermark.type="absolute" \ + --set memoryHighWatermark.value="512MB" +{{- end -}} +{{- end -}} + +{{/* +Validate values of rabbitmq - TLS configuration for Ingress +*/}} +{{- define "rabbitmq.validateValues.ingress.tls" -}} +{{- if and .Values.ingress.enabled .Values.ingress.tls (not .Values.ingress.certManager) (not .Values.ingress.selfSigned) (empty .Values.ingress.extraTls) }} +rabbitmq: ingress.tls + You enabled the TLS configuration for the default ingress hostname but + you did not enable any of the available mechanisms to create the TLS secret + to be used by the Ingress Controller. + Please use any of these alternatives: + - Use the `ingress.extraTls` and `ingress.secrets` parameters to provide your custom TLS certificates. + - Relay on cert-manager to create it by setting `ingress.certManager=true` + - Relay on Helm to create self-signed certificates by setting `ingress.selfSigned=true` +{{- end -}} +{{- end -}} + +{{/* +Validate values of RabbitMQ - Auth TLS enabled +*/}} +{{- define "rabbitmq.validateValues.auth.tls" -}} +{{- if and .Values.auth.tls.enabled (not .Values.auth.tls.autoGenerated) (not .Values.auth.tls.existingSecret) (not .Values.auth.tls.caCertificate) (not .Values.auth.tls.serverCertificate) (not .Values.auth.tls.serverKey) }} +rabbitmq: auth.tls + You enabled TLS for RabbitMQ but you did not enable any of the available mechanisms to create the TLS secret. + Please use any of these alternatives: + - Provide an existing secret containing the TLS certificates using `auth.tls.existingSecret` + - Provide the plain text certificates using `auth.tls.caCertificate`, `auth.tls.serverCertificate` and `auth.tls.serverKey`. + - Enable auto-generated certificates using `auth.tls.autoGenerated`. +{{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/configuration.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/configuration.yaml new file mode 100644 index 0000000..5ba6b72 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/configuration.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "rabbitmq.fullname" . }}-config + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + rabbitmq.conf: |- + {{- include "common.tplvalues.render" (dict "value" .Values.configuration "context" $) | nindent 4 }} + {{- if .Values.advancedConfiguration}} + advanced.config: |- + {{- include "common.tplvalues.render" (dict "value" .Values.advancedConfiguration "context" $) | nindent 4 }} + {{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/extra-list.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/extra-list.yaml new file mode 100644 index 0000000..9ac65f9 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/extra-list.yaml @@ -0,0 +1,4 @@ +{{- range .Values.extraDeploy }} +--- +{{ include "common.tplvalues.render" (dict "value" . "context" $) }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/ingress.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/ingress.yaml new file mode 100644 index 0000000..db74e50 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/ingress.yaml @@ -0,0 +1,57 @@ +{{- if .Values.ingress.enabled }} +apiVersion: {{ include "common.capabilities.ingress.apiVersion" . }} +kind: Ingress +metadata: + name: {{ include "rabbitmq.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + annotations: + {{- if .Values.ingress.certManager }} + kubernetes.io/tls-acme: "true" + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.ingress.annotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.ingress.annotations "context" $) | nindent 4 }} + {{- end }} +spec: + {{- if and .Values.ingress.ingressClassName (eq "true" (include "common.ingress.supportsIngressClassname" .)) }} + ingressClassName: {{ .Values.ingress.ingressClassName | quote }} + {{- end }} + rules: + {{- if .Values.ingress.hostname }} + - host: {{ include "common.tplvalues.render" ( dict "value" .Values.ingress.hostname "context" $ ) }} + http: + paths: + {{- if .Values.ingress.extraPaths }} + {{- toYaml .Values.ingress.extraPaths | nindent 10 }} + {{- end }} + - path: {{ .Values.ingress.path }} + {{- if eq "true" (include "common.ingress.supportsPathType" .) }} + pathType: {{ .Values.ingress.pathType }} + {{- end }} + backend: {{- include "common.ingress.backend" (dict "serviceName" (include "common.names.fullname" .) "servicePort" .Values.service.managerPortName "context" $) | nindent 14 }} + {{- end }} + {{- range .Values.ingress.extraHosts }} + - host: {{ include "common.tplvalues.render" ( dict "value" .name "context" $ ) }} + http: + paths: + - path: {{ default "/" .path }} + {{- if eq "true" (include "common.ingress.supportsPathType" $) }} + pathType: {{ default "ImplementationSpecific" .pathType }} + {{- end }} + backend: {{- include "common.ingress.backend" (dict "serviceName" (include "common.names.fullname" $) "servicePort" "http-stats" "context" $) | nindent 14 }} + {{- end }} + {{- if or (and .Values.ingress.tls (or .Values.ingress.certManager .Values.ingress.selfSigned)) .Values.ingress.extraTls }} + tls: + {{- if and .Values.ingress.tls (or .Values.ingress.certManager .Values.ingress.selfSigned) }} + - hosts: + - {{ .Values.ingress.hostname | quote }} + secretName: {{ printf "%s-tls" .Values.ingress.hostname }} + {{- end }} + {{- if .Values.ingress.extraTls }} + {{- include "common.tplvalues.render" (dict "value" .Values.ingress.extraTls "context" $) | nindent 4 }} + {{- end }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/networkpolicy.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/networkpolicy.yaml new file mode 100644 index 0000000..158aeaa --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/networkpolicy.yaml @@ -0,0 +1,37 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + name: {{ include "rabbitmq.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + podSelector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + ingress: + # Allow inbound connections + - ports: + - port: 4369 # EPMD + - port: {{ .Values.service.port }} + - port: {{ .Values.service.tlsPort }} + - port: {{ .Values.service.distPort }} + - port: {{ .Values.service.managerPort }} + {{- if not .Values.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ template "rabbitmq.fullname" . }}-client: "true" + - podSelector: + matchLabels: + {{- include "common.labels.matchLabels" . | nindent 14 }} + {{- if .Values.networkPolicy.additionalRules }} + {{- include "common.tplvalues.render" (dict "value" .Values.networkPolicy.additionalRules "context" $) | nindent 8 }} + {{- end }} + {{- end }} + # Allow prometheus scrapes + - ports: + - port: {{ .Values.service.metricsPort }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/pdb.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/pdb.yaml new file mode 100644 index 0000000..bf06b66 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/pdb.yaml @@ -0,0 +1,20 @@ +{{- if .Values.pdb.create }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ include "rabbitmq.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.pdb.minAvailable }} + minAvailable: {{ .Values.pdb.minAvailable }} + {{- end }} + {{- if .Values.pdb.maxUnavailable }} + maxUnavailable: {{ .Values.pdb.maxUnavailable }} + {{- end }} + selector: + matchLabels: {{ include "common.labels.matchLabels" . | nindent 6 }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/prometheusrule.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/prometheusrule.yaml new file mode 100644 index 0000000..a1ba629 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/prometheusrule.yaml @@ -0,0 +1,24 @@ +{{- if and .Values.metrics.enabled .Values.metrics.prometheusRule.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ include "rabbitmq.fullname" . }} + {{- if .Values.metrics.prometheusRule.namespace }} + namespace: {{ .Values.metrics.prometheusRule.namespace }} + {{- else }} + namespace: {{ .Release.Namespace | quote }} + {{- end }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.metrics.prometheusRule.additionalLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.prometheusRule.additionalLabels "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + groups: + {{- with .Values.metrics.prometheusRule.rules }} + - name: {{ template "rabbitmq.name" $ }} + rules: {{- include "common.tplvalues.render" (dict "value" . "context" $) | nindent 8 }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/pv.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/pv.yaml new file mode 100644 index 0000000..d0f8bdd --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/pv.yaml @@ -0,0 +1,22 @@ +kind: PersistentVolume +apiVersion: v1 +metadata: + name: rabbitmq-pv + labels: + app: rabbitmq +spec: + storageClassName: rabbitmq + capacity: + storage: 5Gi + accessModes: + - ReadWriteMany + hostPath: + path: {{ .Values.global.RABBITMQ_PATH }} + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .Values.global.affinity_key }} + operator: In + values: + - {{ .Values.global.affinity_value1 }} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/pvc.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/pvc.yaml new file mode 100644 index 0000000..c677752 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/pvc.yaml @@ -0,0 +1,15 @@ +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: rabbitmq-pvc + namespace: imxc + labels: + app: rabbitmq +spec: + storageClassName: rabbitmq + accessModes: + - ReadWriteMany + resources: + requests: + storage: 5Gi + diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/role.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/role.yaml new file mode 100644 index 0000000..9bd029e --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/role.yaml @@ -0,0 +1,18 @@ +{{- if .Values.rbac.create }} +kind: Role +apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }} +metadata: + name: {{ template "rabbitmq.fullname" . }}-endpoint-reader + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +rules: + - apiGroups: [""] + resources: ["endpoints"] + verbs: ["get"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create"] +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/rolebinding.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/rolebinding.yaml new file mode 100644 index 0000000..74f82f0 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/rolebinding.yaml @@ -0,0 +1,18 @@ +{{- if and .Values.serviceAccount.create .Values.rbac.create }} +kind: RoleBinding +apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }} +metadata: + name: {{ template "rabbitmq.fullname" . }}-endpoint-reader + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +subjects: + - kind: ServiceAccount + name: {{ template "rabbitmq.serviceAccountName" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ template "rabbitmq.fullname" . }}-endpoint-reader +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/secrets.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/secrets.yaml new file mode 100644 index 0000000..4d14e4e --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/secrets.yaml @@ -0,0 +1,43 @@ +{{- if or (not .Values.auth.existingErlangSecret) (not .Values.auth.existingPasswordSecret) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "rabbitmq.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + {{- if and (not .Values.auth.existingPasswordSecret) (not .Values.loadDefinition.enabled) }} + {{- if .Values.auth.password }} + rabbitmq-password: {{ .Values.auth.password | b64enc | quote }} + {{- else }} + rabbitmq-password: {{ randAlphaNum 10 | b64enc | quote }} + {{- end }} + {{- end }} + {{- if not .Values.auth.existingErlangSecret }} + {{- if .Values.auth.erlangCookie }} + rabbitmq-erlang-cookie: {{ .Values.auth.erlangCookie | b64enc | quote }} + {{- else }} + rabbitmq-erlang-cookie: {{ randAlphaNum 32 | b64enc | quote }} + {{- end }} + {{- end }} +{{- end }} +{{- $extraSecretsPrependReleaseName := .Values.extraSecretsPrependReleaseName }} +{{- range $key, $value := .Values.extraSecrets }} +--- +apiVersion: v1 +kind: Secret +metadata: + {{- if $extraSecretsPrependReleaseName }} + name: {{ $.Release.Name }}-{{ $key }} + {{- else }} + name: {{ $key }} + {{- end }} + namespace: {{ $.Release.Namespace | quote }} + labels: {{- include "common.labels.standard" $ | nindent 4 }} +type: Opaque +stringData: {{- include "common.tplvalues.render" (dict "value" $value "context" $) | nindent 2 }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/serviceaccount.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/serviceaccount.yaml new file mode 100644 index 0000000..562fde9 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/serviceaccount.yaml @@ -0,0 +1,14 @@ +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "rabbitmq.serviceAccountName" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +secrets: + - name: {{ include "rabbitmq.fullname" . }} +{{- end }} + diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/servicemonitor.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/servicemonitor.yaml new file mode 100644 index 0000000..46b9040 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/servicemonitor.yaml @@ -0,0 +1,49 @@ +{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "rabbitmq.fullname" . }} + {{- if .Values.metrics.serviceMonitor.namespace }} + namespace: {{ .Values.metrics.serviceMonitor.namespace }} + {{- else }} + namespace: {{ .Release.Namespace | quote }} + {{- end }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.metrics.serviceMonitor.additionalLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.serviceMonitor.additionalLabels "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + endpoints: + - port: metrics + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.honorLabels }} + honorLabels: {{ .Values.metrics.serviceMonitor.honorLabels }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.relabellings }} + metricRelabelings: {{- toYaml .Values.metrics.serviceMonitor.relabellings | nindent 6 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.path }} + path: {{ .Values.metrics.serviceMonitor.path }} + {{- end }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace | quote }} + {{- with .Values.metrics.serviceMonitor.podTargetLabels }} + podTargetLabels: + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.metrics.serviceMonitor.targetLabels }} + targetLabels: + {{- toYaml . | nindent 4 }} + {{- end }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/statefulset.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/statefulset.yaml new file mode 100644 index 0000000..45abd14 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/statefulset.yaml @@ -0,0 +1,382 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "rabbitmq.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.statefulsetLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.statefulsetLabels "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + serviceName: {{ template "rabbitmq.fullname" . }}-headless + podManagementPolicy: {{ .Values.podManagementPolicy }} + replicas: {{ .Values.replicaCount }} + updateStrategy: + type: {{ .Values.updateStrategyType }} + {{- if (eq "OnDelete" .Values.updateStrategyType) }} + rollingUpdate: null + {{- end }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + template: + metadata: + labels: {{- include "common.labels.standard" . | nindent 8 }} + {{- if .Values.podLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.podLabels "context" $) | nindent 8 }} + {{- end }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 8 }} + {{- end }} + checksum/config: {{ include (print $.Template.BasePath "/configuration.yaml") . | sha256sum }} + {{- if or (not .Values.auth.existingErlangSecret) (not .Values.auth.existingPasswordSecret) .Values.extraSecrets }} + checksum/secret: {{ include (print $.Template.BasePath "/secrets.yaml") . | sha256sum }} + {{- end }} + {{- if or .Values.podAnnotations .Values.metrics.enabled }} + {{- include "rabbitmq.podAnnotations" . | nindent 8 }} + {{- end }} + spec: + {{- include "rabbitmq.imagePullSecrets" . | nindent 6 }} + {{- if .Values.schedulerName }} + schedulerName: {{ .Values.schedulerName | quote }} + {{- end }} + serviceAccountName: {{ template "rabbitmq.serviceAccountName" . }} + {{- if .Values.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.affinity "context" .) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.podAffinityPreset "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.podAntiAffinityPreset "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.nodeAffinityPreset.type "key" .Values.nodeAffinityPreset.key "values" .Values.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.nodeSelector "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.tolerations "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.topologySpreadConstraints "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.priorityClassName }} + priorityClassName: {{ .Values.priorityClassName }} + {{- end }} + {{- if .Values.podSecurityContext.enabled }} + securityContext: {{- omit .Values.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} + {{- if or (.Values.initContainers) (and .Values.volumePermissions.enabled .Values.persistence.enabled .Values.podSecurityContext) }} + initContainers: + {{- if and .Values.volumePermissions.enabled .Values.persistence.enabled .Values.podSecurityContext }} + - name: volume-permissions + image: {{ include "rabbitmq.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: + - /bin/bash + args: + - -ec + - | + mkdir -p "/bitnami/rabbitmq/mnesia" + chown -R "{{ .Values.podSecurityContext.runAsUser }}:{{ .Values.podSecurityContext.fsGroup }}" "/bitnami/rabbitmq/mnesia" + securityContext: + runAsUser: 0 + {{- if .Values.volumePermissions.resources }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: data + mountPath: /bitnami/rabbitmq/mnesia + {{- end }} + {{- if .Values.initContainers }} + {{- include "common.tplvalues.render" (dict "value" .Values.initContainers "context" $) | nindent 8 }} + {{- end }} + {{- end }} + containers: + - name: rabbitmq + image: {{ template "rabbitmq.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext }} + securityContext: {{- toYaml .Values.containerSecurityContext | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.args "context" $) | nindent 12 }} + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} + - name: MY_POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: MY_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: MY_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: K8S_SERVICE_NAME + value: "{{ template "rabbitmq.fullname" . }}-headless" + - name: K8S_ADDRESS_TYPE + value: {{ .Values.clustering.addressType }} + - name: RABBITMQ_FORCE_BOOT + value: {{ ternary "yes" "no" .Values.clustering.forceBoot | quote }} + {{- if (eq "hostname" .Values.clustering.addressType) }} + - name: RABBITMQ_NODE_NAME + value: "rabbit@$(MY_POD_NAME).$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.{{ .Values.clusterDomain }}" + - name: K8S_HOSTNAME_SUFFIX + value: ".$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.{{ .Values.clusterDomain }}" + {{- else }} + - name: RABBITMQ_NODE_NAME + value: "rabbit@$(MY_POD_NAME)" + {{- end }} + - name: RABBITMQ_MNESIA_DIR + value: "/bitnami/rabbitmq/mnesia/$(RABBITMQ_NODE_NAME)" + - name: RABBITMQ_LDAP_ENABLE + value: {{ ternary "yes" "no" .Values.ldap.enabled | quote }} + {{- if .Values.ldap.enabled }} + - name: RABBITMQ_LDAP_TLS + value: {{ ternary "yes" "no" .Values.ldap.tls.enabled | quote }} + - name: RABBITMQ_LDAP_SERVERS + value: {{ .Values.ldap.servers | join "," | quote }} + - name: RABBITMQ_LDAP_SERVERS_PORT + value: {{ .Values.ldap.port | quote }} + - name: RABBITMQ_LDAP_USER_DN_PATTERN + value: {{ .Values.ldap.user_dn_pattern }} + {{- end }} + - name: RABBITMQ_LOGS + value: {{ .Values.logs | quote }} + - name: RABBITMQ_ULIMIT_NOFILES + value: {{ .Values.ulimitNofiles | quote }} + {{- if and .Values.maxAvailableSchedulers }} + - name: RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS + value: {{ printf "+S %s:%s" (toString .Values.maxAvailableSchedulers) (toString .Values.onlineSchedulers) -}} + {{- end }} + - name: RABBITMQ_USE_LONGNAME + value: "true" + - name: RABBITMQ_ERL_COOKIE + valueFrom: + secretKeyRef: + name: {{ template "rabbitmq.secretErlangName" . }} + key: rabbitmq-erlang-cookie + {{- if .Values.loadDefinition.enabled }} + - name: RABBITMQ_LOAD_DEFINITIONS + value: "yes" + - name: RABBITMQ_SECURE_PASSWORD + value: "no" + {{- else }} + - name: RABBITMQ_LOAD_DEFINITIONS + value: "no" + - name: RABBITMQ_SECURE_PASSWORD + value: "yes" + - name: RABBITMQ_USERNAME + value: {{ .Values.auth.username | quote }} + - name: RABBITMQ_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "rabbitmq.secretPasswordName" . }} + key: rabbitmq-password + {{- end }} + - name: RABBITMQ_PLUGINS + value: {{ include "rabbitmq.plugins" . | quote }} + {{- if .Values.communityPlugins }} + - name: RABBITMQ_COMMUNITY_PLUGINS + value: {{ .Values.communityPlugins | quote }} + {{- end }} + {{- if .Values.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if or .Values.extraEnvVarsCM .Values.extraEnvVarsSecret }} + envFrom: + {{- if .Values.extraEnvVarsCM }} + - configMapRef: + name: {{ tpl .Values.extraEnvVarsCM . | quote }} + {{- end }} + {{- if .Values.extraEnvVarsSecret }} + - secretRef: + name: {{ tpl .Values.extraEnvVarsSecret . | quote }} + {{- end }} + {{- end }} + ports: + {{- if or (.Values.service.portEnabled) (not .Values.auth.tls.enabled) }} + - name: amqp + containerPort: 5672 + {{- end }} + {{- if .Values.auth.tls.enabled }} + - name: amqp-ssl + containerPort: {{ .Values.service.tlsPort }} + {{- end }} + - name: dist + containerPort: 25672 + - name: stats + containerPort: 15672 + - name: epmd + containerPort: 4369 + {{- if .Values.metrics.enabled }} + - name: metrics + containerPort: 9419 + {{- end }} + {{- if .Values.extraContainerPorts }} + {{- toYaml .Values.extraContainerPorts | nindent 12 }} + {{- end }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.livenessProbe.enabled }} + - name: stomp + containerPort: 61613 + livenessProbe: + exec: + command: + - /bin/bash + - -ec + - rabbitmq-diagnostics -q ping + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- else if .Values.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customLivenessProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + exec: + command: + - /bin/bash + - -ec + - rabbitmq-diagnostics -q check_running && rabbitmq-diagnostics -q check_local_alarms + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- else if .Values.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customReadinessProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customStartupProbe "context" $) | nindent 12 }} + {{- end }} + lifecycle: + {{- if and .Values.clustering.rebalance (gt (.Values.replicaCount | int) 1) }} + postStart: + exec: + command: + - /bin/bash + - -ec + - | + until rabbitmqctl cluster_status >/dev/null; do + echo "Waiting for cluster readiness..." + sleep 5 + done + rabbitmq-queues rebalance "all" + {{- end }} + preStop: + exec: + command: + - /bin/bash + - -ec + - | + if [[ -f /opt/bitnami/scripts/rabbitmq/nodeshutdown.sh ]]; then + /opt/bitnami/scripts/rabbitmq/nodeshutdown.sh -t {{ .Values.terminationGracePeriodSeconds | quote }} -d {{ ternary "true" "false" .Values.image.debug | quote }} + else + rabbitmqctl stop_app + fi + {{- end }} + resources: + requests: + memory: "500Mi" + cpu: "150m" + volumeMounts: + - name: configuration + mountPath: /bitnami/rabbitmq/conf + - name: data + mountPath: /bitnami/rabbitmq/mnesia + {{- if .Values.auth.tls.enabled }} + - name: certs + mountPath: /opt/bitnami/rabbitmq/certs + {{- end }} + {{- if .Values.loadDefinition.enabled }} + - name: load-definition-volume + mountPath: /app + readOnly: true + {{- end }} + {{- if .Values.extraVolumeMounts }} + {{- toYaml .Values.extraVolumeMounts | nindent 12 }} + {{- end }} + {{- if .Values.sidecars }} + {{- include "common.tplvalues.render" (dict "value" .Values.sidecars "context" $) | nindent 8 }} + {{- end }} + volumes: + {{- if .Values.persistence.volumes }} + {{- toYaml .Values.persistence.volumes | nindent 8 }} + {{- end }} + {{- if .Values.auth.tls.enabled }} + - name: certs + secret: + secretName: {{ template "rabbitmq.tlsSecretName" . }} + items: + - key: {{ ternary "tls.crt" "ca.crt" .Values.auth.tls.existingSecretFullChain }} + path: ca_certificate.pem + - key: tls.crt + path: server_certificate.pem + - key: tls.key + path: server_key.pem + {{- end }} + - name: configuration + configMap: + name: {{ template "rabbitmq.fullname" . }}-config + items: + - key: rabbitmq.conf + path: rabbitmq.conf + {{- if .Values.advancedConfiguration}} + - key: advanced.config + path: advanced.config + {{- end }} + {{- if .Values.loadDefinition.enabled }} + - name: load-definition-volume + secret: + secretName: {{ tpl .Values.loadDefinition.existingSecret . | quote }} + {{- end }} + {{- if .Values.extraVolumes }} + {{- toYaml .Values.extraVolumes | nindent 8 }} + {{- end }} + {{- if not (contains "data" (quote .Values.persistence.volumes)) }} + {{- if not .Values.persistence.enabled }} + - name: data + emptyDir: {} + {{- else if .Values.persistence.existingClaim }} + - name: data + persistentVolumeClaim: + {{- with .Values.persistence.existingClaim }} + claimName: {{ tpl . $ }} + {{- end }} + {{- else }} + volumeClaimTemplates: + - metadata: + name: data + labels: {{- include "common.labels.matchLabels" . | nindent 10 }} + spec: + accessModes: + - {{ .Values.persistence.accessMode | quote }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{ include "common.storage.class" (dict "persistence" .Values.persistence "global" .Values.global) }} + {{- if .Values.persistence.selector }} + selector: {{- include "common.tplvalues.render" (dict "value" .Values.persistence.selector "context" $) | nindent 10 }} + {{- end -}} + {{- end }} + {{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/svc-headless.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/svc-headless.yaml new file mode 100644 index 0000000..4ed26cc --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/svc-headless.yaml @@ -0,0 +1,40 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "rabbitmq.fullname" . }}-headless + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if or (.Values.service.annotationsHeadless) (.Values.commonAnnotations) }} + annotations: + {{- if .Values.commonAnnotations}} + {{- include "common.tplvalues.render" (dict "value" .Values.commonAnnotations "context" $) | nindent 4 }} + {{- end -}} + {{- if .Values.service.annotationsHeadless}} + {{- include "common.tplvalues.render" (dict "value" .Values.service.annotationsHeadless "context" $) | nindent 4 }} + {{- end -}} + {{- end }} +spec: + clusterIP: None + ports: + - name: {{ .Values.service.epmdPortName }} + port: 4369 + targetPort: epmd + {{- if or (.Values.service.portEnabled) (not .Values.auth.tls.enabled) }} + - name: amqp + port: {{ .Values.service.port }} + targetPort: {{ .Values.service.portName }} + {{- end }} + {{- if .Values.auth.tls.enabled }} + - name: {{ .Values.service.tlsPortName }} + port: {{ .Values.service.tlsPort }} + targetPort: amqp-tls + {{- end }} + - name: {{ .Values.service.distPortName }} + port: {{ .Values.service.distPort }} + targetPort: dist + {{- if .Values.service.managerPortEnabled }} + - name: {{ .Values.service.managerPortName }} + port: {{ .Values.service.managerPort }} + targetPort: stats + {{- end }} + selector: {{ include "common.labels.matchLabels" . | nindent 4 }} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/svc.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/svc.yaml new file mode 100644 index 0000000..2b4c224 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/svc.yaml @@ -0,0 +1,95 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "rabbitmq.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.service.labels }} + {{- include "common.tplvalues.render" (dict "value" .Values.service.labels "context" $) | nindent 4 }} + {{- end }} + {{- if or (.Values.service.annotations) (.Values.commonAnnotations) }} + annotations: + {{- if .Values.commonAnnotations}} + {{- include "common.tplvalues.render" (dict "value" .Values.commonAnnotations "context" $) | nindent 4 }} + {{- end -}} + {{- if .Values.service.annotations}} + {{- include "common.tplvalues.render" (dict "value" .Values.service.annotations "context" $) | nindent 4 }} + {{- end -}} + {{- end }} +spec: + type: {{ .Values.service.type }} + {{- if eq .Values.service.type "LoadBalancer" }} + {{- if not (empty .Values.service.loadBalancerIP) }} + loadBalancerIP: {{ .Values.service.loadBalancerIP }} + {{- end }} + {{- if .Values.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- toYaml .Values.service.loadBalancerSourceRanges | nindent 4 }} + {{- end }} + {{- end }} + {{- if (or (eq .Values.service.type "LoadBalancer") (eq .Values.service.type "NodePort")) }} + externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy | quote }} + {{- end }} + {{- if .Values.service.externalIPs }} + externalIPs: {{- toYaml .Values.service.externalIPs | nindent 4 }} + {{- end }} + ports: + {{- if or (.Values.service.portEnabled) (not .Values.auth.tls.enabled) }} + - name: {{ .Values.service.portName }} + port: {{ .Values.service.port }} + targetPort: amqp + {{- if (eq .Values.service.type "ClusterIP") }} + nodePort: null + {{- else if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePort)) }} + nodePort: {{ .Values.service.nodePort }} + {{- end }} + {{- end }} + {{- if .Values.auth.tls.enabled }} + - name: {{ .Values.service.tlsPortName }} + port: {{ .Values.service.tlsPort }} + targetPort: amqp-ssl + {{- if (eq .Values.service.type "ClusterIP") }} + nodePort: null + {{- else if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.tlsNodePort)) }} + nodePort: {{ .Values.service.tlsNodePort }} + {{- end }} + {{- end }} + - name: {{ .Values.service.epmdPortName }} + port: 4369 + targetPort: epmd + {{- if (eq .Values.service.type "ClusterIP") }} + nodePort: null + {{- else if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.epmdNodePort))) }} + nodePort: {{ .Values.service.epmdNodePort }} + {{- end }} + - name: {{ .Values.service.distPortName }} + port: {{ .Values.service.distPort }} + targetPort: dist + {{- if eq .Values.service.type "ClusterIP" }} + nodePort: null + {{- else if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.distNodePort))) }} + nodePort: {{ .Values.service.distNodePort }} + {{- end }} + {{- if .Values.service.managerPortEnabled }} + - name: {{ .Values.service.managerPortName }} + port: {{ .Values.service.managerPort }} + targetPort: stats + {{- if eq .Values.service.type "ClusterIP" }} + nodePort: null + {{- else if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.managerNodePort))) }} + nodePort: {{ .Values.service.managerNodePort }} + {{- end }} + {{- end }} + {{- if .Values.metrics.enabled }} + - name: {{ .Values.service.metricsPortName }} + port: {{ .Values.service.metricsPort }} + targetPort: metrics + {{- if eq .Values.service.type "ClusterIP" }} + nodePort: null + {{- else if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.metricsNodePort))) }} + nodePort: {{ .Values.service.metricsNodePort }} + {{- end }} + {{- end }} + {{- if .Values.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" .Values.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + selector: {{ include "common.labels.matchLabels" . | nindent 4 }} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/tls-secrets.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/tls-secrets.yaml new file mode 100644 index 0000000..b6a6078 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/tls-secrets.yaml @@ -0,0 +1,74 @@ +{{- if .Values.ingress.enabled }} +{{- if .Values.ingress.secrets }} +{{- range .Values.ingress.secrets }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ .name }} + namespace: {{ $.Release.Namespace | quote }} + labels: {{- include "common.labels.standard" $ | nindent 4 }} + {{- if $.Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if $.Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + tls.crt: {{ .certificate | b64enc }} + tls.key: {{ .key | b64enc }} +--- +{{- end }} +{{- end }} +{{- if and .Values.ingress.tls .Values.ingress.selfSigned }} +{{- $ca := genCA "rabbitmq-ca" 365 }} +{{- $cert := genSignedCert .Values.ingress.hostname nil (list .Values.ingress.hostname) 365 $ca }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ printf "%s-tls" .Values.ingress.hostname }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + tls.crt: {{ $cert.Cert | b64enc | quote }} + tls.key: {{ $cert.Key | b64enc | quote }} + ca.crt: {{ $ca.Cert | b64enc | quote }} +--- +{{- end }} +{{- end }} +{{- if (include "rabbitmq.createTlsSecret" . )}} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "rabbitmq.fullname" . }}-certs + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + {{- if or (not .Values.auth.tls.autoGenerated ) (and .Values.auth.tls.caCertificate .Values.auth.tls.serverCertificate .Values.auth.tls.serverKey) }} + ca.crt: {{ required "A valid .Values.auth.tls.caCertificate entry required!" .Values.auth.tls.caCertificate | b64enc | quote }} + tls.crt: {{ required "A valid .Values.auth.tls.serverCertificate entry required!" .Values.auth.tls.serverCertificate| b64enc | quote }} + tls.key: {{ required "A valid .Values.auth.tls.serverKey entry required!" .Values.auth.tls.serverKey | b64enc | quote }} + {{- else }} + {{- $ca := genCA "rabbitmq-internal-ca" 365 }} + {{- $fullname := include "rabbitmq.fullname" . }} + {{- $releaseNamespace := .Release.Namespace }} + {{- $clusterDomain := .Values.clusterDomain }} + {{- $serviceName := include "rabbitmq.fullname" . }} + {{- $altNames := list (printf "*.%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) $fullname }} + {{- $crt := genSignedCert $fullname nil $altNames 365 $ca }} + ca.crt: {{ $ca.Cert | b64enc | quote }} + tls.crt: {{ $crt.Cert | b64enc | quote }} + tls.key: {{ $crt.Key | b64enc | quote }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/values.schema.json b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/values.schema.json new file mode 100644 index 0000000..8ef33ef --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/values.schema.json @@ -0,0 +1,100 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": { + "auth": { + "type": "object", + "properties": { + "username": { + "type": "string", + "title": "RabbitMQ user", + "form": true + }, + "password": { + "type": "string", + "title": "RabbitMQ password", + "form": true, + "description": "Defaults to a random 10-character alphanumeric string if not set" + } + } + }, + "extraConfiguration": { + "type": "string", + "title": "Extra RabbitMQ Configuration", + "form": true, + "render": "textArea", + "description": "Extra configuration to be appended to RabbitMQ Configuration" + }, + "replicaCount": { + "type": "integer", + "form": true, + "title": "Number of replicas", + "description": "Number of replicas to deploy" + }, + "persistence": { + "type": "object", + "title": "Persistence configuration", + "form": true, + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable persistence", + "description": "Enable persistence using Persistent Volume Claims" + }, + "size": { + "type": "string", + "title": "Persistent Volume Size", + "form": true, + "render": "slider", + "sliderMin": 1, + "sliderMax": 100, + "sliderUnit": "Gi", + "hidden": { + "value": false, + "path": "persistence/enabled" + } + } + } + }, + "volumePermissions": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable Init Containers", + "description": "Use an init container to set required folder permissions on the data volume before mounting it in the final destination" + } + } + }, + "metrics": { + "type": "object", + "form": true, + "title": "Prometheus metrics details", + "properties": { + "enabled": { + "type": "boolean", + "title": "Enable Prometheus metrics for RabbitMQ", + "description": "Install Prometheus plugin in the RabbitMQ container", + "form": true + }, + "serviceMonitor": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "title": "Create Prometheus Operator ServiceMonitor", + "description": "Create a ServiceMonitor to track metrics using Prometheus Operator", + "form": true, + "hidden": { + "value": false, + "path": "metrics/enabled" + } + } + } + } + } + } + } +} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/values.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/values.yaml new file mode 100644 index 0000000..5b74e6c --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/rabbitmq/values.yaml @@ -0,0 +1,1151 @@ +## @section Global parameters +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass + +## @param global.imageRegistry Global Docker image registry +## @param global.imagePullSecrets Global Docker registry secret names as an array +## @param global.storageClass Global StorageClass for Persistent Volume(s) +## +## @section RabitMQ Image parameters +## Bitnami RabbitMQ image version +## ref: https://hub.docker.com/r/bitnami/rabbitmq/tags/ +## @param image.registry RabbitMQ image registry +## @param image.repository RabbitMQ image repository +## @param image.tag RabbitMQ image tag (immutable tags are recommended) +## @param image.pullPolicy RabbitMQ image pull policy +## @param image.pullSecrets Specify docker-registry secret names as an array +## @param image.debug Set to true if you would like to see extra information on logs +## +image: + registry: 10.10.31.243:5000/cmoa3 + repository: rabbitmq + tag: v1.0.0 # {{ .Values.global.RABBITMQ_VERSION }} + + ## set to true if you would like to see extra information on logs + ## It turns BASH and/or NAMI debugging in the image + ## + debug: false + + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: + - regcred + +## @section Common parameters + +## @param nameOverride String to partially override rabbitmq.fullname template (will maintain the release name) +## +nameOverride: "" + +## @param fullnameOverride String to fully override rabbitmq.fullname template +## +fullnameOverride: "" + +## @param kubeVersion Force target Kubernetes version (using Helm capabilities if not set) +## +kubeVersion: "" + +## @param clusterDomain Kubernetes Cluster Domain +## +clusterDomain: cluster.local + +## @param extraDeploy Array of extra objects to deploy with the release +## +extraDeploy: [] + +## Enable diagnostic mode in the deployment +## +diagnosticMode: + ## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden) + ## + enabled: false + ## @param diagnosticMode.command Command to override all containers in the deployment + ## + command: + - sleep + ## @param diagnosticMode.args Args to override all containers in the deployment + ## + args: + - infinity + +## @param hostAliases Deployment pod host aliases +## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ +## +hostAliases: [] +## @param commonAnnotations Annotations to add to all deployed objects +## +commonAnnotations: {} +## RabbitMQ Authentication parameters +## +auth: + ## @param auth.username RabbitMQ application username + ## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables + ## + username: user + + ## @param auth.password RabbitMQ application password + ## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables + ## + password: "eorbahrhkswp" + ## @param auth.existingPasswordSecret Existing secret with RabbitMQ credentials (must contain a value for `rabbitmq-password` key) + ## e.g: + ## existingPasswordSecret: name-of-existing-secret + ## + existingPasswordSecret: "" + + ## @param auth.erlangCookie Erlang cookie to determine whether different nodes are allowed to communicate with each other + ## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables + ## + erlangCookie: "pf6t82zTrqY9iaupUmkPOJxPXjmjiNEd" + ## @param auth.existingErlangSecret Existing secret with RabbitMQ Erlang cookie (must contain a value for `rabbitmq-erlang-cookie` key) + ## e.g: + ## existingErlangSecret: name-of-existing-secret + ## + existingErlangSecret: "" + + ## Enable encryption to rabbitmq + ## ref: https://www.rabbitmq.com/ssl.html + ## @param auth.tls.enabled Enable TLS support on RabbitMQ + ## @param auth.tls.autoGenerated Generate automatically self-signed TLS certificates + ## @param auth.tls.failIfNoPeerCert When set to true, TLS connection will be rejected if client fails to provide a certificate + ## @param auth.tls.sslOptionsVerify Should [peer verification](https://www.rabbitmq.com/ssl.html#peer-verification) be enabled? + ## @param auth.tls.caCertificate Certificate Authority (CA) bundle content + ## @param auth.tls.serverCertificate Server certificate content + ## @param auth.tls.serverKey Server private key content + ## @param auth.tls.existingSecret Existing secret with certificate content to RabbitMQ credentials + ## @param auth.tls.existingSecretFullChain Whether or not the existing secret contains the full chain in the certificate (`tls.crt`). Will be used in place of `ca.cert` if `true`. + ## + tls: + enabled: false + autoGenerated: false + failIfNoPeerCert: true + sslOptionsVerify: verify_peer + caCertificate: |- + serverCertificate: |- + serverKey: |- + existingSecret: "" + existingSecretFullChain: false + +## @param logs Path of the RabbitMQ server's Erlang log file. Value for the `RABBITMQ_LOGS` environment variable +## ref: https://www.rabbitmq.com/logging.html#log-file-location +## +logs: "-" + +## @param ulimitNofiles RabbitMQ Max File Descriptors +## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables +## ref: https://www.rabbitmq.com/install-debian.html#kernel-resource-limits +## +ulimitNofiles: "65536" + +## RabbitMQ maximum available scheduler threads and online scheduler threads. By default it will create a thread per CPU detected, with the following parameters you can tune it manually. +## ref: https://hamidreza-s.github.io/erlang/scheduling/real-time/preemptive/migration/2016/02/09/erlang-scheduler-details.html#scheduler-threads +## ref: https://github.com/bitnami/charts/issues/2189 +## @param maxAvailableSchedulers RabbitMQ maximum available scheduler threads +## @param onlineSchedulers RabbitMQ online scheduler threads +## +maxAvailableSchedulers: "" +onlineSchedulers: "" + +## The memory threshold under which RabbitMQ will stop reading from client network sockets, in order to avoid being killed by the OS +## ref: https://www.rabbitmq.com/alarms.html +## ref: https://www.rabbitmq.com/memory.html#threshold +## +memoryHighWatermark: + ## @param memoryHighWatermark.enabled Enable configuring Memory high watermark on RabbitMQ + ## + enabled: false + ## @param memoryHighWatermark.type Memory high watermark type. Either `absolute` or `relative` + ## + type: "relative" + ## Memory high watermark value. + ## @param memoryHighWatermark.value Memory high watermark value + ## The default value of 0.4 stands for 40% of available RAM + ## Note: the memory relative limit is applied to the resource.limits.memory to calculate the memory threshold + ## You can also use an absolute value, e.g.: 256MB + ## + value: 0.4 + +## @param plugins List of default plugins to enable (should only be altered to remove defaults; for additional plugins use `extraPlugins`) +## +plugins: "rabbitmq_management rabbitmq_peer_discovery_k8s rabbitmq_stomp" + +## @param communityPlugins List of Community plugins (URLs) to be downloaded during container initialization +## Combine it with extraPlugins to also enable them. +## +communityPlugins: "" + +## @param extraPlugins Extra plugins to enable (single string containing a space-separated list) +## Use this instead of `plugins` to add new plugins +## +extraPlugins: "rabbitmq_auth_backend_ldap rabbitmq_stomp" + +## Clustering settings +## +clustering: + ## @param clustering.enabled Enable RabbitMQ clustering + ## + enabled: false + ## @param clustering.addressType Switch clustering mode. Either `ip` or `hostname` + ## + addressType: hostname + ## @param clustering.rebalance Rebalance master for queues in cluster when new replica is created + ## ref: https://www.rabbitmq.com/rabbitmq-queues.8.html#rebalance + ## + rebalance: false + + ## @param clustering.forceBoot Force boot of an unexpectedly shut down cluster (in an unexpected order). + ## forceBoot executes 'rabbitmqctl force_boot' to force boot cluster shut down unexpectedly in an unknown order + ## ref: https://www.rabbitmq.com/rabbitmqctl.8.html#force_boot + ## + forceBoot: false + +## Loading a RabbitMQ definitions file to configure RabbitMQ +## +loadDefinition: + ## @param loadDefinition.enabled Enable loading a RabbitMQ definitions file to configure RabbitMQ + ## + enabled: false + ## @param loadDefinition.existingSecret Existing secret with the load definitions file + ## Can be templated if needed, e.g: + ## existingSecret: "{{ .Release.Name }}-load-definition" + ## + existingSecret: "" + +## @param command Override default container command (useful when using custom images) +## +command: [] +## @param args Override default container args (useful when using custom images) +args: [] + +## @param terminationGracePeriodSeconds Default duration in seconds k8s waits for container to exit before sending kill signal. +## Any time in excess of 10 seconds will be spent waiting for any synchronization necessary for cluster not to lose data. +## +terminationGracePeriodSeconds: 120 + +## @param extraEnvVars Extra environment variables to add to RabbitMQ pods +## E.g: +## extraEnvVars: +## - name: FOO +## value: BAR +## +extraEnvVars: [] + +## @param extraEnvVarsCM Name of existing ConfigMap containing extra environment variables +## +extraEnvVarsCM: "" + +## @param extraEnvVarsSecret Name of existing Secret containing extra environment variables (in case of sensitive data) +## +extraEnvVarsSecret: "" + +## @param extraContainerPorts Extra ports to be included in container spec, primarily informational +## E.g: +## extraContainerPorts: +## - name: new_port_name +## containerPort: 1234 +## +extraContainerPorts: [] + +## @param configuration [string] RabbitMQ Configuration file content: required cluster configuration +## Do not override unless you know what you are doing. +## To add more configuration, use `extraConfiguration` of `advancedConfiguration` instead +## +configuration: |- + {{- if not .Values.loadDefinition.enabled -}} + ## Username and password + ## + default_user = {{ .Values.auth.username }} + default_pass = eorbahrhkswp + {{- end }} + {{- if .Values.clustering.enabled }} + ## Clustering + ## + cluster_formation.peer_discovery_backend = rabbit_peer_discovery_k8s + cluster_formation.k8s.host = kubernetes.default.svc.{{ .Values.clusterDomain }} + cluster_formation.node_cleanup.interval = 10 + cluster_formation.node_cleanup.only_log_warning = true + cluster_partition_handling = autoheal + {{- end }} + # queue master locator + queue_master_locator = min-masters + # enable guest user + loopback_users.guest = false + {{ tpl .Values.extraConfiguration . }} + {{- if .Values.auth.tls.enabled }} + ssl_options.verify = {{ .Values.auth.tls.sslOptionsVerify }} + listeners.ssl.default = {{ .Values.service.tlsPort }} + ssl_options.fail_if_no_peer_cert = {{ .Values.auth.tls.failIfNoPeerCert }} + ssl_options.cacertfile = /opt/bitnami/rabbitmq/certs/ca_certificate.pem + ssl_options.certfile = /opt/bitnami/rabbitmq/certs/server_certificate.pem + ssl_options.keyfile = /opt/bitnami/rabbitmq/certs/server_key.pem + {{- end }} + {{- if .Values.ldap.enabled }} + auth_backends.1 = rabbit_auth_backend_ldap + auth_backends.2 = internal + {{- range $index, $server := .Values.ldap.servers }} + auth_ldap.servers.{{ add $index 1 }} = {{ $server }} + {{- end }} + auth_ldap.port = {{ .Values.ldap.port }} + auth_ldap.user_dn_pattern = {{ .Values.ldap.user_dn_pattern }} + {{- if .Values.ldap.tls.enabled }} + auth_ldap.use_ssl = true + {{- end }} + {{- end }} + {{- if .Values.metrics.enabled }} + ## Prometheus metrics + ## + prometheus.tcp.port = 9419 + {{- end }} + {{- if .Values.memoryHighWatermark.enabled }} + ## Memory Threshold + ## + total_memory_available_override_value = {{ include "rabbitmq.toBytes" .Values.resources.limits.memory }} + vm_memory_high_watermark.{{ .Values.memoryHighWatermark.type }} = {{ .Values.memoryHighWatermark.value }} + {{- end }} + +## @param extraConfiguration [string] Configuration file content: extra configuration to be appended to RabbitMQ configuration +## Use this instead of `configuration` to add more configuration +## +extraConfiguration: |- + #default_vhost = {{ .Release.Namespace }}-vhost + #disk_free_limit.absolute = 50MB + #load_definitions = /app/load_definition.json + +## @param advancedConfiguration Configuration file content: advanced configuration +## Use this as additional configuration in classic config format (Erlang term configuration format) +## +## If you set LDAP with TLS/SSL enabled and you are using self-signed certificates, uncomment these lines. +## advancedConfiguration: |- +## [{ +## rabbitmq_auth_backend_ldap, +## [{ +## ssl_options, +## [{ +## verify, verify_none +## }, { +## fail_if_no_peer_cert, +## false +## }] +## ]} +## }]. +## +advancedConfiguration: |- + +## LDAP configuration +## +ldap: + ## @param ldap.enabled Enable LDAP support + ## + enabled: false + ## @param ldap.servers List of LDAP servers hostnames + ## + servers: [] + ## @param ldap.port LDAP servers port + ## + port: "389" + ## Pattern used to translate the provided username into a value to be used for the LDAP bind + ## @param ldap.user_dn_pattern Pattern used to translate the provided username into a value to be used for the LDAP bind + ## ref: https://www.rabbitmq.com/ldap.html#usernames-and-dns + ## + user_dn_pattern: cn=${username},dc=example,dc=org + tls: + ## @param ldap.tls.enabled If you enable TLS/SSL you can set advanced options using the `advancedConfiguration` parameter + ## + enabled: false + +## @param extraVolumeMounts Optionally specify extra list of additional volumeMounts +## Examples: +## extraVolumeMounts: +## - name: extras +## mountPath: /usr/share/extras +## readOnly: true +## +extraVolumeMounts: [] +## @param extraVolumes Optionally specify extra list of additional volumes . +## Example: +## extraVolumes: +## - name: extras +## emptyDir: {} +## +extraVolumes: [] + +## @param extraSecrets Optionally specify extra secrets to be created by the chart. +## This can be useful when combined with load_definitions to automatically create the secret containing the definitions to be loaded. +## Example: +## extraSecrets: +## load-definition: +## load_definition.json: | +## { +## ... +## } +## +extraSecrets: {} +## @param extraSecretsPrependReleaseName Set this flag to true if extraSecrets should be created with prepended. +## +extraSecretsPrependReleaseName: false + +## @section Statefulset parameters + +## @param replicaCount Number of RabbitMQ replicas to deploy +## +replicaCount: 1 + +## @param schedulerName Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +schedulerName: "" + +## RabbitMQ should be initialized one by one when building cluster for the first time. +## Therefore, the default value of podManagementPolicy is 'OrderedReady' +## Once the RabbitMQ participates in the cluster, it waits for a response from another +## RabbitMQ in the same cluster at reboot, except the last RabbitMQ of the same cluster. +## If the cluster exits gracefully, you do not need to change the podManagementPolicy +## because the first RabbitMQ of the statefulset always will be last of the cluster. +## However if the last RabbitMQ of the cluster is not the first RabbitMQ due to a failure, +## you must change podManagementPolicy to 'Parallel'. +## ref : https://www.rabbitmq.com/clustering.html#restarting +## @param podManagementPolicy Pod management policy +## +podManagementPolicy: OrderedReady + +## @param podLabels RabbitMQ Pod labels. Evaluated as a template +## Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +## +podLabels: {} + +## @param podAnnotations RabbitMQ Pod annotations. Evaluated as a template +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +## +podAnnotations: {} + +## @param updateStrategyType Update strategy type for RabbitMQ statefulset +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies +## +updateStrategyType: RollingUpdate + +## @param statefulsetLabels RabbitMQ statefulset labels. Evaluated as a template +## Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +## +statefulsetLabels: {} + +## @param priorityClassName Name of the priority class to be used by RabbitMQ pods, priority class needs to be created beforehand +## Ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ +## +priorityClassName: "" + +## @param podAffinityPreset Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` +## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity +## +podAffinityPreset: "" + +## @param podAntiAffinityPreset Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` +## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity +## +podAntiAffinityPreset: soft + +## Node affinity preset +## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity +## +nodeAffinityPreset: + ## @param nodeAffinityPreset.type Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param nodeAffinityPreset.key Node label key to match Ignored if `affinity` is set. + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## @param nodeAffinityPreset.values Node label values to match. Ignored if `affinity` is set. + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + +## @param affinity Affinity for pod assignment. Evaluated as a template +## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set +## +affinity: {} + +## @param nodeSelector Node labels for pod assignment. Evaluated as a template +## ref: https://kubernetes.io/docs/user-guide/node-selection/ +## +nodeSelector: {} + +## @param tolerations Tolerations for pod assignment. Evaluated as a template +## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +## +tolerations: [] + +## @param topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template +## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods +## +topologySpreadConstraints: {} + +## RabbitMQ pods' Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod +## @param podSecurityContext.enabled Enable RabbitMQ pods' Security Context +## @param podSecurityContext.fsGroup Group ID for the filesystem used by the containers +## @param podSecurityContext.runAsUser User ID for the service user running the pod +## +podSecurityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## @param containerSecurityContext RabbitMQ containers' Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container +## Example: +## containerSecurityContext: +## capabilities: +## drop: ["NET_RAW"] +## readOnlyRootFilesystem: true +## +containerSecurityContext: {} + +## RabbitMQ containers' resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## We usually recommend not to specify default resources and to leave this as a conscious +## choice for the user. This also increases chances charts run on environments with little +## resources, such as Minikube. If you do want to specify resources, uncomment the following +## lines, adjust them as necessary, and remove the curly braces after 'resources:'. +## @param resources.limits The resources limits for RabbitMQ containers +## @param resources.requests The requested resources for RabbitMQ containers +## +resources: + ## Example: + ## limits: + ## cpu: 1000m + ## memory: 2Gi + limits: {} + ## Examples: + ## requests: + ## cpu: 1000m + ## memory: 2Gi + requests: {} + +## Configure RabbitMQ containers' extra options for liveness probe +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes +## @param livenessProbe.enabled Enable livenessProbe +## @param livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe +## @param livenessProbe.periodSeconds Period seconds for livenessProbe +## @param livenessProbe.timeoutSeconds Timeout seconds for livenessProbe +## @param livenessProbe.failureThreshold Failure threshold for livenessProbe +## @param livenessProbe.successThreshold Success threshold for livenessProbe +## +livenessProbe: + enabled: true + initialDelaySeconds: 120 + timeoutSeconds: 20 + periodSeconds: 30 + failureThreshold: 6 + successThreshold: 1 +## Configure RabbitMQ containers' extra options for readiness probe +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes +## @param readinessProbe.enabled Enable readinessProbe +## @param readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe +## @param readinessProbe.periodSeconds Period seconds for readinessProbe +## @param readinessProbe.timeoutSeconds Timeout seconds for readinessProbe +## @param readinessProbe.failureThreshold Failure threshold for readinessProbe +## @param readinessProbe.successThreshold Success threshold for readinessProbe +## +readinessProbe: + enabled: true + initialDelaySeconds: 10 + timeoutSeconds: 20 + periodSeconds: 30 + failureThreshold: 3 + successThreshold: 1 + +## @param customLivenessProbe Override default liveness probe +## +customLivenessProbe: {} + +## @param customReadinessProbe Override default readiness probe +## +customReadinessProbe: {} + +## @param customStartupProbe Define a custom startup probe +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-startup-probes +## +customStartupProbe: {} + +## @param initContainers Add init containers to the RabbitMQ pod +## Example: +## initContainers: +## - name: your-image-name +## image: your-image +## imagePullPolicy: IfNotPresent +## ports: +## - name: portname +## containerPort: 1234 +## +initContainers: [] + +## @param sidecars Add sidecar containers to the RabbitMQ pod +## Example: +## sidecars: +## - name: your-image-name +## image: your-image +## imagePullPolicy: IfNotPresent +## ports: +## - name: portname +## containerPort: 1234 +## +sidecars: [] + +## Pod Disruption Budget configuration +## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ +## +pdb: + ## @param pdb.create Enable/disable a Pod Disruption Budget creation + ## + create: false + ## @param pdb.minAvailable Minimum number/percentage of pods that should remain scheduled + ## + minAvailable: 1 + ## @param pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable + ## + maxUnavailable: "" + +## @section RBAC parameters + +## RabbitMQ pods ServiceAccount +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +## +serviceAccount: + ## @param serviceAccount.create Enable creation of ServiceAccount for RabbitMQ pods + ## + create: true + ## @param serviceAccount.name Name of the created serviceAccount + ## If not set and create is true, a name is generated using the rabbitmq.fullname template + ## + name: "" + +## Role Based Access +## ref: https://kubernetes.io/docs/admin/authorization/rbac/ +## +rbac: + ## @param rbac.create Whether RBAC rules should be created + ## binding RabbitMQ ServiceAccount to a role + ## that allows RabbitMQ pods querying the K8s API + ## + create: true + +## @section Persistence parameters + +persistence: + ## @param persistence.enabled Enable RabbitMQ data persistence using PVC + ## + enabled: true + + ## @param persistence.storageClass PVC Storage Class for RabbitMQ data volume + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "rabbitmq" + ## @param persistence.selector Selector to match an existing Persistent Volume + ## selector: + ## matchLabels: + ## app: my-app + ## + selector: {} + ## @param persistence.accessMode PVC Access Mode for RabbitMQ data volume + ## + accessMode: ReadWriteOnce + + ## @param persistence.existingClaim Provide an existing PersistentVolumeClaims + ## The value is evaluated as a template + ## So, for example, the name can depend on .Release or .Chart + ## + existingClaim: "rabbitmq-pvc" + + ## @param persistence.size PVC Storage Request for RabbitMQ data volume + ## If you change this value, you might have to adjust `rabbitmq.diskFreeLimit` as well + ## + size: 5Gi + + ## @param persistence.volumes Additional volumes without creating PVC + ## - name: volume_name + ## emptyDir: {} + ## + volumes: [] + +## @section Exposure parameters + +## Kubernetes service type +## +service: + ## @param service.type Kubernetes Service type + ## + # type: NodePort + type: ClusterIP + + ## @param service.portEnabled Amqp port. Cannot be disabled when `auth.tls.enabled` is `false`. Listener can be disabled with `listeners.tcp = none`. + portEnabled: true + + ## @param service.port Amqp port + ## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables + ## + port: 5672 + + ## @param service.portName Amqp service port name + ## + portName: amqp + + ## @param service.tlsPort Amqp TLS port + ## + tlsPort: 5671 + + ## @param service.tlsPortName Amqp TLS service port name + ## + tlsPortName: amqp-ssl + + ## @param service.nodePort Node port override for `amqp` port, if serviceType is `NodePort` or `LoadBalancer` + ## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables + ## e.g: + ## nodePort: 30672 + ## + nodePort: "" + + ## @param service.tlsNodePort Node port override for `amqp-ssl` port, if serviceType is `NodePort` or `LoadBalancer` + ## e.g: + ## tlsNodePort: 30671 + ## + tlsNodePort: "" + + ## @param service.distPort Erlang distribution server port + ## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables + ## + distPort: 25672 + + ## @param service.distPortName Erlang distribution service port name + ## + distPortName: dist + + ## @param service.distNodePort Node port override for `dist` port, if serviceType is `NodePort` + ## e.g: + ## distNodePort: 30676 + ## + distNodePort: "" + + ## @param service.managerPortEnabled RabbitMQ Manager port + ## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables + ## + managerPortEnabled: true + + ## @param service.managerPort RabbitMQ Manager port + ## + managerPort: 15672 + + ## @param service.managerPortName RabbitMQ Manager service port name + ## + managerPortName: http-stats + + ## @param service.managerNodePort Node port override for `http-stats` port, if serviceType `NodePort` + ## e.g: + ## managerNodePort: 30673 + ## + managerNodePort: "" + + ## @param service.metricsPort RabbitMQ Prometheues metrics port + ## + metricsPort: 9419 + + ## @param service.metricsPortName RabbitMQ Prometheues metrics service port name + ## + metricsPortName: metrics + + ## @param service.metricsNodePort Node port override for `metrics` port, if serviceType is `NodePort` + ## e.g: + ## metricsNodePort: 30674 + ## + metricsNodePort: "" + + ## @param service.epmdNodePort Node port override for `epmd` port, if serviceType is `NodePort` + ## e.g: + ## epmdNodePort: 30675 + ## + epmdNodePort: "" + + ## @param service.epmdPortName EPMD Discovery service port name + ## + epmdPortName: epmd + + ## @param service.extraPorts Extra ports to expose in the service + ## E.g.: + ## extraPorts: + ## - name: new_svc_name + ## port: 1234 + ## targetPort: 1234 + ## + extraPorts: + - name: stomp + port: 61613 + targetPort: 61613 + #nodePort: 31613 + + ## @param service.loadBalancerSourceRanges Address(es) that are allowed when service is `LoadBalancer` + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + + ## @param service.externalIPs Set the ExternalIPs + ## + externalIPs: [] + + ## @param service.externalTrafficPolicy Enable client source IP preservation + ## ref http://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + + ## @param service.loadBalancerIP Set the LoadBalancerIP + ## + loadBalancerIP: "" + + ## @param service.labels Service labels. Evaluated as a template + ## + labels: {} + + ## @param service.annotations Service annotations. Evaluated as a template + ## Example: + ## annotations: + ## service.beta.kubernetes.io/aws-load-balancer-internal: 0.0.0.0/0 + ## + annotations: {} + ## @param service.annotationsHeadless Headless Service annotations. Evaluated as a template + ## Example: + ## annotations: + ## external-dns.alpha.kubernetes.io/internal-hostname: rabbitmq.example.com + ## + annotationsHeadless: {} + +## Configure the ingress resource that allows you to access the +## RabbitMQ installation. Set up the URL +## ref: http://kubernetes.io/docs/user-guide/ingress/ +## +ingress: + ## @param ingress.enabled Enable ingress resource for Management console + ## + enabled: false + + ## @param ingress.path Path for the default host. You may need to set this to '/*' in order to use this with ALB ingress controllers. + ## + path: / + + ## @param ingress.pathType Ingress path type + ## + pathType: ImplementationSpecific + + ## @param ingress.hostname Default host for the ingress resource + ## + hostname: rabbitmq.local + + ## @param ingress.annotations Ingress annotations + ## For a full list of possible ingress annotations, please see + ## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/annotations.md + ## + ## If certManager is set to true, annotation kubernetes.io/tls-acme: "true" will automatically be set + ## + annotations: {} + + ## @param ingress.tls Enable TLS configuration for the hostname defined at `ingress.hostname` parameter + ## TLS certificates will be retrieved from a TLS secret with name: {{- printf "%s-tls" .Values.ingress.hostname }} + ## You can: + ## - Use the `ingress.secrets` parameter to create this TLS secret + ## - Relay on cert-manager to create it by setting `ingress.certManager=true` + ## - Relay on Helm to create self-signed certificates by setting `ingress.selfSigned=true` + ## + tls: false + + ## @param ingress.certManager Set this to true in order to add the corresponding annotations for cert-manager + ## to generate a TLS secret for the ingress record + ## + certManager: false + + ## @param ingress.selfSigned Set this to true in order to create a TLS secret for this ingress record + ## using self-signed certificates generated by Helm + ## + selfSigned: false + + ## @param ingress.extraHosts The list of additional hostnames to be covered with this ingress record. + ## Most likely the hostname above will be enough, but in the event more hosts are needed, this is an array + ## e.g: + ## extraHosts: + ## - name: rabbitmq.local + ## path: / + ## + extraHosts: [] + + ## @param ingress.extraTls The tls configuration for additional hostnames to be covered with this ingress record. + ## see: https://kubernetes.io/docs/concepts/services-networking/ingress/#tls + ## e.g: + ## extraTls: + ## - hosts: + ## - rabbitmq.local + ## secretName: rabbitmq.local-tls + ## + extraTls: [] + + ## @param ingress.secrets Custom TLS certificates as secrets + ## NOTE: 'key' and 'certificate' are expected in PEM format + ## NOTE: 'name' should line up with a 'secretName' set further up + ## If it is not set and you're using cert-manager, this is unneeded, as it will create a secret for you with valid certificates + ## If it is not set and you're NOT using cert-manager either, self-signed certificates will be created valid for 365 days + ## It is also possible to create and manage the certificates outside of this helm chart + ## Please see README.md for more information + ## e.g: + ## secrets: + ## - name: rabbitmq.local-tls + ## key: |- + ## -----BEGIN RSA PRIVATE KEY----- + ## ... + ## -----END RSA PRIVATE KEY----- + ## certificate: |- + ## -----BEGIN CERTIFICATE----- + ## ... + ## -----END CERTIFICATE----- + ## + secrets: [] + + ## @param ingress.ingressClassName IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+) + ## This is supported in Kubernetes 1.18+ and required if you have more than one IngressClass marked as the default for your cluster . + ## ref: https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/ + ## + ingressClassName: "" + +## Network Policy configuration +## ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ +## +networkPolicy: + ## @param networkPolicy.enabled Enable creation of NetworkPolicy resources + ## + enabled: false + ## @param networkPolicy.allowExternal Don't require client label for connections + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the ports RabbitMQ is listening + ## on. When true, RabbitMQ will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + ## @param networkPolicy.additionalRules Additional NetworkPolicy Ingress "from" rules to set. Note that all rules are OR-ed. + ## e.g: + ## additionalRules: + ## - matchLabels: + ## - role: frontend + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + ## + additionalRules: [] + +## @section Metrics Parameters + +## Prometheus Metrics +## +metrics: + ## @param metrics.enabled Enable exposing RabbitMQ metrics to be gathered by Prometheus + ## + enabled: false + + ## @param metrics.plugins Plugins to enable Prometheus metrics in RabbitMQ + ## + plugins: "rabbitmq_prometheus" + ## Prometheus pod annotations + ## @param metrics.podAnnotations [object] Annotations for enabling prometheus to access the metrics endpoint + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "{{ .Values.service.metricsPort }}" + + ## Prometheus Service Monitor + ## ref: https://github.com/coreos/prometheus-operator + ## + serviceMonitor: + ## @param metrics.serviceMonitor.enabled Create ServiceMonitor Resource for scraping metrics using PrometheusOperator + ## + enabled: false + ## @param metrics.serviceMonitor.namespace Specify the namespace in which the serviceMonitor resource will be created + ## + namespace: "" + ## @param metrics.serviceMonitor.interval Specify the interval at which metrics should be scraped + ## + interval: 30s + ## @param metrics.serviceMonitor.scrapeTimeout Specify the timeout after which the scrape is ended + ## e.g: + ## scrapeTimeout: 30s + ## + scrapeTimeout: "" + ## @param metrics.serviceMonitor.relabellings Specify Metric Relabellings to add to the scrape endpoint + ## + relabellings: [] + ## @param metrics.serviceMonitor.honorLabels honorLabels chooses the metric's labels on collisions with target labels + ## + honorLabels: false + ## @param metrics.serviceMonitor.additionalLabels Used to pass Labels that are required by the installed Prometheus Operator + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusspec + ## + additionalLabels: {} + ## @param metrics.serviceMonitor.targetLabels Used to keep given service's labels in target + ## e.g: + ## - app.kubernetes.io/name + ## + targetLabels: {} + ## @param metrics.serviceMonitor.podTargetLabels Used to keep given pod's labels in target + ## e.g: + ## - app.kubernetes.io/name + ## + podTargetLabels: {} + ## @param metrics.serviceMonitor.path Define the path used by ServiceMonitor to scrap metrics + ## Could be /metrics for aggregated metrics or /metrics/per-object for more details + path: "" + + ## Custom PrometheusRule to be defined + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + ## + prometheusRule: + ## @param metrics.prometheusRule.enabled Set this to true to create prometheusRules for Prometheus operator + ## + enabled: false + ## @param metrics.prometheusRule.additionalLabels Additional labels that can be used so prometheusRules will be discovered by Prometheus + ## + additionalLabels: {} + ## @param metrics.prometheusRule.namespace namespace where prometheusRules resource should be created + ## + namespace: "" + ## List of rules, used as template by Helm. + ## @param metrics.prometheusRule.rules List of rules, used as template by Helm. + ## These are just examples rules inspired from https://awesome-prometheus-alerts.grep.to/rules.html + ## rules: + ## - alert: RabbitmqDown + ## expr: rabbitmq_up{service="{{ template "rabbitmq.fullname" . }}"} == 0 + ## for: 5m + ## labels: + ## severity: error + ## annotations: + ## summary: Rabbitmq down (instance {{ "{{ $labels.instance }}" }}) + ## description: RabbitMQ node down + ## - alert: ClusterDown + ## expr: | + ## sum(rabbitmq_running{service="{{ template "rabbitmq.fullname" . }}"}) + ## < {{ .Values.replicaCount }} + ## for: 5m + ## labels: + ## severity: error + ## annotations: + ## summary: Cluster down (instance {{ "{{ $labels.instance }}" }}) + ## description: | + ## Less than {{ .Values.replicaCount }} nodes running in RabbitMQ cluster + ## VALUE = {{ "{{ $value }}" }} + ## - alert: ClusterPartition + ## expr: rabbitmq_partitions{service="{{ template "rabbitmq.fullname" . }}"} > 0 + ## for: 5m + ## labels: + ## severity: error + ## annotations: + ## summary: Cluster partition (instance {{ "{{ $labels.instance }}" }}) + ## description: | + ## Cluster partition + ## VALUE = {{ "{{ $value }}" }} + ## - alert: OutOfMemory + ## expr: | + ## rabbitmq_node_mem_used{service="{{ template "rabbitmq.fullname" . }}"} + ## / rabbitmq_node_mem_limit{service="{{ template "rabbitmq.fullname" . }}"} + ## * 100 > 90 + ## for: 5m + ## labels: + ## severity: warning + ## annotations: + ## summary: Out of memory (instance {{ "{{ $labels.instance }}" }}) + ## description: | + ## Memory available for RabbmitMQ is low (< 10%)\n VALUE = {{ "{{ $value }}" }} + ## LABELS: {{ "{{ $labels }}" }} + ## - alert: TooManyConnections + ## expr: rabbitmq_connectionsTotal{service="{{ template "rabbitmq.fullname" . }}"} > 1000 + ## for: 5m + ## labels: + ## severity: warning + ## annotations: + ## summary: Too many connections (instance {{ "{{ $labels.instance }}" }}) + ## description: | + ## RabbitMQ instance has too many connections (> 1000) + ## VALUE = {{ "{{ $value }}" }}\n LABELS: {{ "{{ $labels }}" }} + ## + rules: [] + +## @section Init Container Parameters + +## Init Container parameters +## Change the owner and group of the persistent volume(s) mountpoint(s) to 'runAsUser:fsGroup' on each component +## values from the securityContext section of the component +## +volumePermissions: + ## @param volumePermissions.enabled Enable init container that changes the owner and group of the persistent volume(s) mountpoint to `runAsUser:fsGroup` + ## + enabled: false + ## @param volumePermissions.image.registry Init container volume-permissions image registry + ## @param volumePermissions.image.repository Init container volume-permissions image repository + ## @param volumePermissions.image.tag Init container volume-permissions image tag + ## @param volumePermissions.image.pullPolicy Init container volume-permissions image pull policy + ## @param volumePermissions.image.pullSecrets Specify docker-registry secret names as an array + ## + image: + registry: 10.10.31.243:5000/cmoa3 # docker.io + repository: bitnami-shell # bitnami/bitnami-shell + tag: 10-debian-10-r175 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: + - regcred + ## Init Container resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## We usually recommend not to specify default resources and to leave this as a conscious + ## choice for the user. This also increases chances charts run on environments with little + ## resources, such as Minikube. If you do want to specify resources, uncomment the following + ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. + ## @param volumePermissions.resources.limits Init container volume-permissions resource limits + ## @param volumePermissions.resources.requests Init container volume-permissions resource requests + ## + resources: + ## Example: + ## limits: + ## cpu: 100m + ## memory: 128Mi + limits: {} + ## Examples: + ## requests: + ## cpu: 100m + ## memory: 128Mi + requests: {} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/.helmignore b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/.helmignore new file mode 100644 index 0000000..f0c1319 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/Chart.lock b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/Chart.lock new file mode 100644 index 0000000..ee0ecb7 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/Chart.lock @@ -0,0 +1,6 @@ +dependencies: +- name: common + repository: https://charts.bitnami.com/bitnami + version: 1.3.3 +digest: sha256:264db18c8d0962b5c4340840f62306f45fe8d2c1c8999dd41c0f2d62fc93a220 +generated: "2021-01-15T00:05:10.125742807Z" diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/Chart.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/Chart.yaml new file mode 100644 index 0000000..6924d59 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/Chart.yaml @@ -0,0 +1,29 @@ +annotations: + category: Database +apiVersion: v2 +appVersion: 6.0.10 +dependencies: +- name: common + repository: https://charts.bitnami.com/bitnami + tags: + - bitnami-common + version: 1.x.x +description: Open source, advanced key-value store. It is often referred to as a data + structure server since keys can contain strings, hashes, lists, sets and sorted + sets. +home: https://github.com/bitnami/charts/tree/master/bitnami/redis +icon: https://bitnami.com/assets/stacks/redis/img/redis-stack-220x234.png +keywords: +- redis +- keyvalue +- database +maintainers: +- email: containers@bitnami.com + name: Bitnami +- email: cedric@desaintmartin.fr + name: desaintmartin +name: redis +sources: +- https://github.com/bitnami/bitnami-docker-redis +- http://redis.io/ +version: 12.7.0 diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/README.md b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/README.md new file mode 100644 index 0000000..3befa8c --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/README.md @@ -0,0 +1,707 @@ +# RedisTM Chart packaged by Bitnami + +[RedisTM](http://redis.io/) is an advanced key-value cache and store. It is often referred to as a data structure server since keys can contain strings, hashes, lists, sets, sorted sets, bitmaps and hyperloglogs. + +Disclaimer: REDIS® is a registered trademark of Redis Labs Ltd.Any rights therein are reserved to Redis Labs Ltd. Any use by Bitnami is for referential purposes only and does not indicate any sponsorship, endorsement, or affiliation between Redis Labs Ltd. + +## TL;DR + +```bash +$ helm repo add bitnami https://charts.bitnami.com/bitnami +$ helm install my-release bitnami/redis +``` + +## Introduction + +This chart bootstraps a [RedisTM](https://github.com/bitnami/bitnami-docker-redis) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This chart has been tested to work with NGINX Ingress, cert-manager, fluentd and Prometheus on top of the [BKPR](https://kubeprod.io/). + +### Choose between RedisTM Helm Chart and RedisTM Cluster Helm Chart + +You can choose any of the two RedisTM Helm charts for deploying a RedisTM cluster. +While [RedisTM Helm Chart](https://github.com/bitnami/charts/tree/master/bitnami/redis) will deploy a master-slave cluster using RedisTM Sentinel, the [RedisTM Cluster Helm Chart](https://github.com/bitnami/charts/tree/master/bitnami/redis-cluster) will deploy a RedisTM Cluster topology with sharding. +The main features of each chart are the following: + +| RedisTM | RedisTM Cluster | +|--------------------------------------------------------|------------------------------------------------------------------------| +| Supports multiple databases | Supports only one database. Better if you have a big dataset | +| Single write point (single master) | Multiple write points (multiple masters) | +| ![RedisTM Topology](img/redis-topology.png) | ![RedisTM Cluster Topology](img/redis-cluster-topology.png) | + +## Prerequisites + +- Kubernetes 1.12+ +- Helm 3.1.0 +- PV provisioner support in the underlying infrastructure + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```bash +$ helm install my-release bitnami/redis +``` + +The command deploys RedisTM on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```bash +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Parameters + +The following table lists the configurable parameters of the RedisTM chart and their default values. + +| Parameter | Description | Default | +|:------------------------------------------------------|:----------------------------------------------------------------------------------------------------------------------------------------------------|:--------------------------------------------------------| +| `global.imageRegistry` | Global Docker image registry | `nil` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) | +| `global.storageClass` | Global storage class for dynamic provisioning | `nil` | +| `global.redis.password` | RedisTM password (overrides `password`) | `nil` | +| `image.registry` | RedisTM Image registry | `docker.io` | +| `image.repository` | RedisTM Image name | `bitnami/redis` | +| `image.tag` | RedisTM Image tag | `{TAG_NAME}` | +| `image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify docker-registry secret names as an array | `nil` | +| `nameOverride` | String to partially override redis.fullname template with a string (will prepend the release name) | `nil` | +| `fullnameOverride` | String to fully override redis.fullname template with a string | `nil` | +| `cluster.enabled` | Use master-slave topology | `true` | +| `cluster.slaveCount` | Number of slaves | `2` | +| `existingSecret` | Name of existing secret object (for password authentication) | `nil` | +| `existingSecretPasswordKey` | Name of key containing password to be retrieved from the existing secret | `nil` | +| `usePassword` | Use password | `true` | +| `usePasswordFile` | Mount passwords as files instead of environment variables | `false` | +| `password` | RedisTM password (ignored if existingSecret set) | Randomly generated | +| `configmap` | Additional common RedisTM node configuration (this value is evaluated as a template) | See values.yaml | +| `clusterDomain` | Kubernetes DNS Domain name to use | `cluster.local` | +| `networkPolicy.enabled` | Enable NetworkPolicy | `false` | +| `networkPolicy.allowExternal` | Don't require client label for connections | `true` | +| `networkPolicy.ingressNSMatchLabels` | Allow connections from other namespaces | `{}` | +| `networkPolicy.ingressNSPodMatchLabels` | For other namespaces match by pod labels and namespace labels | `{}` | +| `securityContext.*` | Other pod security context to be included as-is in the pod spec | `{}` | +| `securityContext.enabled` | Enable security context (both redis master and slave pods) | `true` | +| `securityContext.fsGroup` | Group ID for the container (both redis master and slave pods) | `1001` | +| `containerSecurityContext.*` | Other container security context to be included as-is in the container spec | `{}` | +| `containerSecurityContext.enabled` | Enable security context (both redis master and slave containers) | `true` | +| `containerSecurityContext.runAsUser` | User ID for the container (both redis master and slave containers) | `1001` | +| `serviceAccount.create` | Specifies whether a ServiceAccount should be created | `false` | +| `serviceAccount.name` | The name of the ServiceAccount to create | Generated using the fullname template | +| `serviceAccount.annotations` | Specifies annotations to add to ServiceAccount. | `nil` | +| `rbac.create` | Specifies whether RBAC resources should be created | `false` | +| `rbac.role.rules` | Rules to create | `[]` | +| `metrics.enabled` | Start a side-car prometheus exporter | `false` | +| `metrics.image.registry` | RedisTM exporter image registry | `docker.io` | +| `metrics.image.repository` | RedisTM exporter image name | `bitnami/redis-exporter` | +| `metrics.image.tag` | RedisTM exporter image tag | `{TAG_NAME}` | +| `metrics.image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `metrics.image.pullSecrets` | Specify docker-registry secret names as an array | `nil` | +| `metrics.extraArgs` | Extra arguments for the binary; possible values [here](https://github.com/oliver006/redis_exporter#flags) | {} | +| `metrics.podLabels` | Additional labels for Metrics exporter pod | {} | +| `metrics.podAnnotations` | Additional annotations for Metrics exporter pod | {} | +| `metrics.resources` | Exporter resource requests/limit | Memory: `256Mi`, CPU: `100m` | +| `metrics.serviceMonitor.enabled` | if `true`, creates a Prometheus Operator ServiceMonitor (also requires `metrics.enabled` to be `true`) | `false` | +| `metrics.serviceMonitor.namespace` | Optional namespace which Prometheus is running in | `nil` | +| `metrics.serviceMonitor.interval` | How frequently to scrape metrics (use by default, falling back to Prometheus' default) | `nil` | +| `metrics.serviceMonitor.selector` | Default to kube-prometheus install (CoreOS recommended), but should be set according to Prometheus install | `{ prometheus: kube-prometheus }` | +| `metrics.serviceMonitor.relabelings` | ServiceMonitor relabelings. Value is evaluated as a template | `[]` | +| `metrics.serviceMonitor.metricRelabelings` | ServiceMonitor metricRelabelings. Value is evaluated as a template | `[]` | +| `metrics.service.type` | Kubernetes Service type (redis metrics) | `ClusterIP` | +| `metrics.service.externalTrafficPolicy` | External traffic policy (when service type is LoadBalancer) | `Cluster` | +| `metrics.service.annotations` | Annotations for the services to monitor (redis master and redis slave service) | {} | +| `metrics.service.labels` | Additional labels for the metrics service | {} | +| `metrics.service.loadBalancerIP` | loadBalancerIP if redis metrics service type is `LoadBalancer` | `nil` | +| `metrics.priorityClassName` | Metrics exporter pod priorityClassName | `nil` | +| `metrics.prometheusRule.enabled` | Set this to true to create prometheusRules for Prometheus operator | `false` | +| `metrics.prometheusRule.additionalLabels` | Additional labels that can be used so prometheusRules will be discovered by Prometheus | `{}` | +| `metrics.prometheusRule.namespace` | namespace where prometheusRules resource should be created | Same namespace as redis | +| `metrics.prometheusRule.rules` | [rules](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/) to be created, check values for an example. | `[]` | +| `persistence.existingClaim` | Provide an existing PersistentVolumeClaim | `nil` | +| `master.persistence.enabled` | Use a PVC to persist data (master node) | `true` | +| `master.hostAliases` | Add deployment host aliases | `[]` | +| `master.persistence.path` | Path to mount the volume at, to use other images | `/data` | +| `master.persistence.subPath` | Subdirectory of the volume to mount at | `""` | +| `master.persistence.storageClass` | Storage class of backing PVC | `generic` | +| `master.persistence.accessModes` | Persistent Volume Access Modes | `[ReadWriteOnce]` | +| `master.persistence.size` | Size of data volume | `8Gi` | +| `master.persistence.matchLabels` | matchLabels persistent volume selector | `{}` | +| `master.persistence.matchExpressions` | matchExpressions persistent volume selector | `{}` | +| `master.persistence.volumes` | Additional volumes without creating PVC | `{}` | +| `master.statefulset.labels` | Additional labels for redis master StatefulSet | `{}` | +| `master.statefulset.annotations` | Additional annotations for redis master StatefulSet | `{}` | +| `master.statefulset.updateStrategy` | Update strategy for StatefulSet | onDelete | +| `master.statefulset.rollingUpdatePartition` | Partition update strategy | `nil` | +| `master.statefulset.volumeClaimTemplates.labels` | Additional labels for redis master StatefulSet volumeClaimTemplates | `{}` | +| `master.statefulset.volumeClaimTemplates.annotations` | Additional annotations for redis master StatefulSet volumeClaimTemplates | `{}` | +| `master.podLabels` | Additional labels for RedisTM master pod | {} | +| `master.podAnnotations` | Additional annotations for RedisTM master pod | {} | +| `master.extraEnvVars` | Additional Environment Variables passed to the pod of the master's stateful set set | `[]` | +| `master.extraEnvVarCMs` | Additional Environment Variables ConfigMappassed to the pod of the master's stateful set set | `[]` | +| `master.extraEnvVarsSecret` | Additional Environment Variables Secret passed to the master's stateful set | `[]` | +| `podDisruptionBudget.enabled` | Pod Disruption Budget toggle | `false` | +| `podDisruptionBudget.minAvailable` | Minimum available pods | `1` | +| `podDisruptionBudget.maxUnavailable` | Maximum unavailable | `nil` | +| `redisPort` | RedisTM port (in both master and slaves) | `6379` | +| `tls.enabled` | Enable TLS support for replication traffic | `false` | +| `tls.authClients` | Require clients to authenticate or not | `true` | +| `tls.certificatesSecret` | Name of the secret that contains the certificates | `nil` | +| `tls.certFilename` | Certificate filename | `nil` | +| `tls.certKeyFilename` | Certificate key filename | `nil` | +| `tls.certCAFilename` | CA Certificate filename | `nil` | +| `tls.dhParamsFilename` | DH params (in order to support DH based ciphers) | `nil` | +| `master.command` | RedisTM master entrypoint string. The command `redis-server` is executed if this is not provided. Note this is prepended with `exec` | `/run.sh` | +| `master.preExecCmds` | Text to inset into the startup script immediately prior to `master.command`. Use this if you need to run other ad-hoc commands as part of startup | `nil` | +| `master.configmap` | Additional RedisTM configuration for the master nodes (this value is evaluated as a template) | `nil` | +| `master.disableCommands` | Array of RedisTM commands to disable (master) | `["FLUSHDB", "FLUSHALL"]` | +| `master.extraFlags` | RedisTM master additional command line flags | [] | +| `master.nodeSelector` | RedisTM master Node labels for pod assignment | {"beta.kubernetes.io/arch": "amd64"} | +| `master.tolerations` | Toleration labels for RedisTM master pod assignment | [] | +| `master.affinity` | Affinity settings for RedisTM master pod assignment | {} | +| `master.schedulerName` | Name of an alternate scheduler | `nil` | +| `master.service.type` | Kubernetes Service type (redis master) | `ClusterIP` | +| `master.service.externalTrafficPolicy` | External traffic policy (when service type is LoadBalancer) | `Cluster` | +| `master.service.port` | Kubernetes Service port (redis master) | `6379` | +| `master.service.nodePort` | Kubernetes Service nodePort (redis master) | `nil` | +| `master.service.annotations` | annotations for redis master service | {} | +| `master.service.labels` | Additional labels for redis master service | {} | +| `master.service.loadBalancerIP` | loadBalancerIP if redis master service type is `LoadBalancer` | `nil` | +| `master.service.loadBalancerSourceRanges` | loadBalancerSourceRanges if redis master service type is `LoadBalancer` | `nil` | +| `master.resources` | RedisTM master CPU/Memory resource requests/limits | Memory: `256Mi`, CPU: `100m` | +| `master.livenessProbe.enabled` | Turn on and off liveness probe (redis master pod) | `true` | +| `master.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (redis master pod) | `5` | +| `master.livenessProbe.periodSeconds` | How often to perform the probe (redis master pod) | `5` | +| `master.livenessProbe.timeoutSeconds` | When the probe times out (redis master pod) | `5` | +| `master.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis master pod) | `1` | +| `master.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `5` | +| `master.readinessProbe.enabled` | Turn on and off readiness probe (redis master pod) | `true` | +| `master.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated (redis master pod) | `5` | +| `master.readinessProbe.periodSeconds` | How often to perform the probe (redis master pod) | `5` | +| `master.readinessProbe.timeoutSeconds` | When the probe times out (redis master pod) | `1` | +| `master.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis master pod) | `1` | +| `master.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `5` | +| `master.shareProcessNamespace` | RedisTM Master pod `shareProcessNamespace` option. Enables /pause reap zombie PIDs. | `false` | +| `master.priorityClassName` | RedisTM Master pod priorityClassName | `nil` | +| `volumePermissions.enabled` | Enable init container that changes volume permissions in the registry (for cases where the default k8s `runAsUser` and `fsUser` values do not work) | `false` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | +| `volumePermissions.image.repository` | Init container volume-permissions image name | `bitnami/minideb` | +| `volumePermissions.image.tag` | Init container volume-permissions image tag | `buster` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `Always` | +| `volumePermissions.resources ` | Init container volume-permissions CPU/Memory resource requests/limits | {} | +| `volumePermissions.securityContext.*` | Security context of the init container | `{}` | +| `volumePermissions.securityContext.runAsUser` | UserID for the init container (when facing issues in OpenShift or uid unknown, try value "auto") | 0 | +| `slave.hostAliases` | Add deployment host aliases | `[]` | +| `slave.service.type` | Kubernetes Service type (redis slave) | `ClusterIP` | +| `slave.service.externalTrafficPolicy` | External traffic policy (when service type is LoadBalancer) | `Cluster` | +| `slave.service.nodePort` | Kubernetes Service nodePort (redis slave) | `nil` | +| `slave.service.annotations` | annotations for redis slave service | {} | +| `slave.service.labels` | Additional labels for redis slave service | {} | +| `slave.service.port` | Kubernetes Service port (redis slave) | `6379` | +| `slave.service.loadBalancerIP` | LoadBalancerIP if RedisTM slave service type is `LoadBalancer` | `nil` | +| `slave.service.loadBalancerSourceRanges` | loadBalancerSourceRanges if RedisTM slave service type is `LoadBalancer` | `nil` | +| `slave.command` | RedisTM slave entrypoint string. The command `redis-server` is executed if this is not provided. Note this is prepended with `exec` | `/run.sh` | +| `slave.preExecCmds` | Text to inset into the startup script immediately prior to `slave.command`. Use this if you need to run other ad-hoc commands as part of startup | `nil` | +| `slave.configmap` | Additional RedisTM configuration for the slave nodes (this value is evaluated as a template) | `nil` | +| `slave.disableCommands` | Array of RedisTM commands to disable (slave) | `[FLUSHDB, FLUSHALL]` | +| `slave.extraFlags` | RedisTM slave additional command line flags | `[]` | +| `slave.livenessProbe.enabled` | Turn on and off liveness probe (redis slave pod) | `true` | +| `slave.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (redis slave pod) | `5` | +| `slave.livenessProbe.periodSeconds` | How often to perform the probe (redis slave pod) | `5` | +| `slave.livenessProbe.timeoutSeconds` | When the probe times out (redis slave pod) | `5` | +| `slave.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis slave pod) | `1` | +| `slave.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `5` | +| `slave.readinessProbe.enabled` | Turn on and off slave.readiness probe (redis slave pod) | `true` | +| `slave.readinessProbe.initialDelaySeconds` | Delay before slave.readiness probe is initiated (redis slave pod) | `5` | +| `slave.readinessProbe.periodSeconds` | How often to perform the probe (redis slave pod) | `5` | +| `slave.readinessProbe.timeoutSeconds` | When the probe times out (redis slave pod) | `1` | +| `slave.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis slave pod) | `1` | +| `slave.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. (redis slave pod) | `5` | +| `slave.shareProcessNamespace` | RedisTM slave pod `shareProcessNamespace` option. Enables /pause reap zombie PIDs. | `false` | +| `slave.persistence.enabled` | Use a PVC to persist data (slave node) | `true` | +| `slave.persistence.path` | Path to mount the volume at, to use other images | `/data` | +| `slave.persistence.subPath` | Subdirectory of the volume to mount at | `""` | +| `slave.persistence.storageClass` | Storage class of backing PVC | `generic` | +| `slave.persistence.accessModes` | Persistent Volume Access Modes | `[ReadWriteOnce]` | +| `slave.persistence.size` | Size of data volume | `8Gi` | +| `slave.persistence.matchLabels` | matchLabels persistent volume selector | `{}` | +| `slave.persistence.matchExpressions` | matchExpressions persistent volume selector | `{}` | +| `slave.statefulset.labels` | Additional labels for redis slave StatefulSet | `{}` | +| `slave.statefulset.annotations` | Additional annotations for redis slave StatefulSet | `{}` | +| `slave.statefulset.updateStrategy` | Update strategy for StatefulSet | onDelete | +| `slave.statefulset.rollingUpdatePartition` | Partition update strategy | `nil` | +| `slave.statefulset.volumeClaimTemplates.labels` | Additional labels for redis slave StatefulSet volumeClaimTemplates | `{}` | +| `slave.statefulset.volumeClaimTemplates.annotations` | Additional annotations for redis slave StatefulSet volumeClaimTemplates | `{}` | +| `slave.extraEnvVars` | Additional Environment Variables passed to the pod of the slave's stateful set set | `[]` | +| `slave.extraEnvVarCMs` | Additional Environment Variables ConfigMappassed to the pod of the slave's stateful set set | `[]` | +| `masslaveter.extraEnvVarsSecret` | Additional Environment Variables Secret passed to the slave's stateful set | `[]` | +| `slave.podLabels` | Additional labels for RedisTM slave pod | `master.podLabels` | +| `slave.podAnnotations` | Additional annotations for RedisTM slave pod | `master.podAnnotations` | +| `slave.schedulerName` | Name of an alternate scheduler | `nil` | +| `slave.resources` | RedisTM slave CPU/Memory resource requests/limits | `{}` | +| `slave.affinity` | Enable node/pod affinity for slaves | {} | +| `slave.tolerations` | Toleration labels for RedisTM slave pod assignment | [] | +| `slave.spreadConstraints` | [Topology Spread Constraints](https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/) for RedisTM slave pod | {} | +| `slave.priorityClassName` | RedisTM Slave pod priorityClassName | `nil` | +| `sentinel.enabled` | Enable sentinel containers | `false` | +| `sentinel.usePassword` | Use password for sentinel containers | `true` | +| `sentinel.masterSet` | Name of the sentinel master set | `mymaster` | +| `sentinel.initialCheckTimeout` | Timeout for querying the redis sentinel service for the active sentinel list | `5` | +| `sentinel.quorum` | Quorum for electing a new master | `2` | +| `sentinel.downAfterMilliseconds` | Timeout for detecting a RedisTM node is down | `60000` | +| `sentinel.failoverTimeout` | Timeout for performing a election failover | `18000` | +| `sentinel.parallelSyncs` | Number of parallel syncs in the cluster | `1` | +| `sentinel.port` | RedisTM Sentinel port | `26379` | +| `sentinel.configmap` | Additional RedisTM configuration for the sentinel nodes (this value is evaluated as a template) | `nil` | +| `sentinel.staticID` | Enable static IDs for sentinel replicas (If disabled IDs will be randomly generated on startup) | `false` | +| `sentinel.service.type` | Kubernetes Service type (redis sentinel) | `ClusterIP` | +| `sentinel.service.externalTrafficPolicy` | External traffic policy (when service type is LoadBalancer) | `Cluster` | +| `sentinel.service.nodePort` | Kubernetes Service nodePort (redis sentinel) | `nil` | +| `sentinel.service.annotations` | annotations for redis sentinel service | {} | +| `sentinel.service.labels` | Additional labels for redis sentinel service | {} | +| `sentinel.service.redisPort` | Kubernetes Service port for RedisTM read only operations | `6379` | +| `sentinel.service.sentinelPort` | Kubernetes Service port for RedisTM sentinel | `26379` | +| `sentinel.service.redisNodePort` | Kubernetes Service node port for RedisTM read only operations | `` | +| `sentinel.service.sentinelNodePort` | Kubernetes Service node port for RedisTM sentinel | `` | +| `sentinel.service.loadBalancerIP` | LoadBalancerIP if RedisTM sentinel service type is `LoadBalancer` | `nil` | +| `sentinel.livenessProbe.enabled` | Turn on and off liveness probe (redis sentinel pod) | `true` | +| `sentinel.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (redis sentinel pod) | `5` | +| `sentinel.livenessProbe.periodSeconds` | How often to perform the probe (redis sentinel container) | `5` | +| `sentinel.livenessProbe.timeoutSeconds` | When the probe times out (redis sentinel container) | `5` | +| `sentinel.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis sentinel container) | `1` | +| `sentinel.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `5` | +| `sentinel.readinessProbe.enabled` | Turn on and off sentinel.readiness probe (redis sentinel pod) | `true` | +| `sentinel.readinessProbe.initialDelaySeconds` | Delay before sentinel.readiness probe is initiated (redis sentinel pod) | `5` | +| `sentinel.readinessProbe.periodSeconds` | How often to perform the probe (redis sentinel pod) | `5` | +| `sentinel.readinessProbe.timeoutSeconds` | When the probe times out (redis sentinel container) | `1` | +| `sentinel.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis sentinel container) | `1` | +| `sentinel.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. (redis sentinel container) | `5` | +| `sentinel.resources` | RedisTM sentinel CPU/Memory resource requests/limits | `{}` | +| `sentinel.image.registry` | RedisTM Sentinel Image registry | `docker.io` | +| `sentinel.image.repository` | RedisTM Sentinel Image name | `bitnami/redis-sentinel` | +| `sentinel.image.tag` | RedisTM Sentinel Image tag | `{TAG_NAME}` | +| `sentinel.image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `sentinel.image.pullSecrets` | Specify docker-registry secret names as an array | `nil` | +| `sentinel.extraEnvVars` | Additional Environment Variables passed to the pod of the sentinel node stateful set set | `[]` | +| `sentinel.extraEnvVarCMs` | Additional Environment Variables ConfigMappassed to the pod of the sentinel node stateful set set | `[]` | +| `sentinel.extraEnvVarsSecret` | Additional Environment Variables Secret passed to the sentinel node statefulset | `[]` | +| `sentinel.preExecCmds` | Text to inset into the startup script immediately prior to `sentinel.command`. Use this if you need to run other ad-hoc commands as part of startup | `nil` | +| `sysctlImage.enabled` | Enable an init container to modify Kernel settings | `false` | +| `sysctlImage.command` | sysctlImage command to execute | [] | +| `sysctlImage.registry` | sysctlImage Init container registry | `docker.io` | +| `sysctlImage.repository` | sysctlImage Init container name | `bitnami/minideb` | +| `sysctlImage.tag` | sysctlImage Init container tag | `buster` | +| `sysctlImage.pullPolicy` | sysctlImage Init container pull policy | `Always` | +| `sysctlImage.mountHostSys` | Mount the host `/sys` folder to `/host-sys` | `false` | +| `sysctlImage.resources` | sysctlImage Init container CPU/Memory resource requests/limits | {} | +| `podSecurityPolicy.create` | Specifies whether a PodSecurityPolicy should be created | `false` | + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```bash +$ helm install my-release \ + --set password=secretpassword \ + bitnami/redis +``` + +The above command sets the RedisTM server password to `secretpassword`. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```bash +$ helm install my-release -f values.yaml bitnami/redis +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +> **Note for minikube users**: Current versions of minikube (v0.24.1 at the time of writing) provision `hostPath` persistent volumes that are only writable by root. Using chart defaults cause pod failure for the RedisTM pod as it attempts to write to the `/bitnami` directory. Consider installing RedisTM with `--set persistence.enabled=false`. See minikube issue [1990](https://github.com/kubernetes/minikube/issues/1990) for more information. + +## Configuration and installation details + +### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/) + +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. + +Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. + +### Change RedisTM version + +To modify the RedisTM version used in this chart you can specify a [valid image tag](https://hub.docker.com/r/bitnami/redis/tags/) using the `image.tag` parameter. For example, `image.tag=X.Y.Z`. This approach is also applicable to other images like exporters. + +### Cluster topologies + +#### Default: Master-Slave + +When installing the chart with `cluster.enabled=true`, it will deploy a RedisTM master StatefulSet (only one master node allowed) and a RedisTM slave StatefulSet. The slaves will be read-replicas of the master. Two services will be exposed: + + - RedisTM Master service: Points to the master, where read-write operations can be performed + - RedisTM Slave service: Points to the slaves, where only read operations are allowed. + +In case the master crashes, the slaves will wait until the master node is respawned again by the Kubernetes Controller Manager. + +#### Master-Slave with Sentinel + +When installing the chart with `cluster.enabled=true` and `sentinel.enabled=true`, it will deploy a RedisTM master StatefulSet (only one master allowed) and a RedisTM slave StatefulSet. In this case, the pods will contain an extra container with RedisTM Sentinel. This container will form a cluster of RedisTM Sentinel nodes, which will promote a new master in case the actual one fails. In addition to this, only one service is exposed: + + - RedisTM service: Exposes port 6379 for RedisTM read-only operations and port 26379 for accessing RedisTM Sentinel. + +For read-only operations, access the service using port 6379. For write operations, it's necessary to access the RedisTM Sentinel cluster and query the current master using the command below (using redis-cli or similar: + +``` +SENTINEL get-master-addr-by-name +``` +This command will return the address of the current master, which can be accessed from inside the cluster. + +In case the current master crashes, the Sentinel containers will elect a new master node. + +### Using password file +To use a password file for RedisTM you need to create a secret containing the password. + +> *NOTE*: It is important that the file with the password must be called `redis-password` + +And then deploy the Helm Chart using the secret name as parameter: + +```console +usePassword=true +usePasswordFile=true +existingSecret=redis-password-file +sentinels.enabled=true +metrics.enabled=true +``` + +### Securing traffic using TLS + +TLS support can be enabled in the chart by specifying the `tls.` parameters while creating a release. The following parameters should be configured to properly enable the TLS support in the chart: + +- `tls.enabled`: Enable TLS support. Defaults to `false` +- `tls.certificatesSecret`: Name of the secret that contains the certificates. No defaults. +- `tls.certFilename`: Certificate filename. No defaults. +- `tls.certKeyFilename`: Certificate key filename. No defaults. +- `tls.certCAFilename`: CA Certificate filename. No defaults. + +For example: + +First, create the secret with the cetificates files: + +```console +kubectl create secret generic certificates-tls-secret --from-file=./cert.pem --from-file=./cert.key --from-file=./ca.pem +``` + +Then, use the following parameters: + +```console +tls.enabled="true" +tls.certificatesSecret="certificates-tls-secret" +tls.certFilename="cert.pem" +tls.certKeyFilename="cert.key" +tls.certCAFilename="ca.pem" +``` + +### Metrics + +The chart optionally can start a metrics exporter for [prometheus](https://prometheus.io). The metrics endpoint (port 9121) is exposed in the service. Metrics can be scraped from within the cluster using something similar as the described in the [example Prometheus scrape configuration](https://github.com/prometheus/prometheus/blob/master/documentation/examples/prometheus-kubernetes.yml). If metrics are to be scraped from outside the cluster, the Kubernetes API proxy can be utilized to access the endpoint. + +If you have enabled TLS by specifying `tls.enabled=true` you also need to specify TLS option to the metrics exporter. You can do that via `metrics.extraArgs`. You can find the metrics exporter CLI flags for TLS [here](https://github.com/oliver006/redis_exporter#command-line-flags). For example: + +You can either specify `metrics.extraArgs.skip-tls-verification=true` to skip TLS verification or providing the following values under `metrics.extraArgs` for TLS client authentication: + +```console +tls-client-key-file +tls-client-cert-file +tls-ca-cert-file +``` + +### Host Kernel Settings + +RedisTM may require some changes in the kernel of the host machine to work as expected, in particular increasing the `somaxconn` value and disabling transparent huge pages. +To do so, you can set up a privileged initContainer with the `sysctlImage` config values, for example: + +``` +sysctlImage: + enabled: true + mountHostSys: true + command: + - /bin/sh + - -c + - |- + install_packages procps + sysctl -w net.core.somaxconn=10000 + echo never > /host-sys/kernel/mm/transparent_hugepage/enabled +``` + +Alternatively, for Kubernetes 1.12+ you can set `securityContext.sysctls` which will configure sysctls for master and slave pods. Example: + +```yaml +securityContext: + sysctls: + - name: net.core.somaxconn + value: "10000" +``` + +Note that this will not disable transparent huge tables. + +## Persistence + +By default, the chart mounts a [Persistent Volume](http://kubernetes.io/docs/user-guide/persistent-volumes/) at the `/data` path. The volume is created using dynamic volume provisioning. If a Persistent Volume Claim already exists, specify it during installation. + +### Existing PersistentVolumeClaim + +1. Create the PersistentVolume +2. Create the PersistentVolumeClaim +3. Install the chart + +```bash +$ helm install my-release --set persistence.existingClaim=PVC_NAME bitnami/redis +``` + +## Backup and restore + +### Backup + +To perform a backup you will need to connect to one of the nodes and execute: + +```bash +$ kubectl exec -it my-redis-master-0 bash + +$ redis-cli +127.0.0.1:6379> auth your_current_redis_password +OK +127.0.0.1:6379> save +OK +``` + +Then you will need to get the created dump file form the redis node: + +```bash +$ kubectl cp my-redis-master-0:/data/dump.rdb dump.rdb -c redis +``` + +### Restore + +To restore in a new cluster, you will need to change a parameter in the redis.conf file and then upload the `dump.rdb` to the volume. + +Follow the following steps: + +- First you will need to set in the `values.yaml` the parameter `appendonly` to `no`, if it is already `no` you can skip this step. + +```yaml +configmap: |- + # Enable AOF https://redis.io/topics/persistence#append-only-file + appendonly no + # Disable RDB persistence, AOF persistence already enabled. + save "" +``` + +- Start the new cluster to create the PVCs. + +For example, : + +```bash +helm install new-redis -f values.yaml . --set cluster.enabled=true --set cluster.slaveCount=3 +``` + +- Now that the PVC were created, stop it and copy the `dump.rdp` on the persisted data by using a helping pod. + +``` +$ helm delete new-redis + +$ kubectl run --generator=run-pod/v1 -i --rm --tty volpod --overrides=' +{ + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "name": "redisvolpod" + }, + "spec": { + "containers": [{ + "command": [ + "tail", + "-f", + "/dev/null" + ], + "image": "bitnami/minideb", + "name": "mycontainer", + "volumeMounts": [{ + "mountPath": "/mnt", + "name": "redisdata" + }] + }], + "restartPolicy": "Never", + "volumes": [{ + "name": "redisdata", + "persistentVolumeClaim": { + "claimName": "redis-data-new-redis-master-0" + } + }] + } +}' --image="bitnami/minideb" + +$ kubectl cp dump.rdb redisvolpod:/mnt/dump.rdb +$ kubectl delete pod volpod +``` + +- Start again the cluster: + +``` +helm install new-redis -f values.yaml . --set cluster.enabled=true --set cluster.slaveCount=3 +``` + +## NetworkPolicy + +To enable network policy for RedisTM, install +[a networking plugin that implements the Kubernetes NetworkPolicy spec](https://kubernetes.io/docs/tasks/administer-cluster/declare-network-policy#before-you-begin), +and set `networkPolicy.enabled` to `true`. + +For Kubernetes v1.5 & v1.6, you must also turn on NetworkPolicy by setting +the DefaultDeny namespace annotation. Note: this will enforce policy for _all_ pods in the namespace: + + kubectl annotate namespace default "net.beta.kubernetes.io/network-policy={\"ingress\":{\"isolation\":\"DefaultDeny\"}}" + +With NetworkPolicy enabled, only pods with the generated client label will be +able to connect to RedisTM. This label will be displayed in the output +after a successful install. + +With `networkPolicy.ingressNSMatchLabels` pods from other namespaces can connect to redis. Set `networkPolicy.ingressNSPodMatchLabels` to match pod labels in matched namespace. For example, for a namespace labeled `redis=external` and pods in that namespace labeled `redis-client=true` the fields should be set: + +``` +networkPolicy: + enabled: true + ingressNSMatchLabels: + redis: external + ingressNSPodMatchLabels: + redis-client: true +``` + +## Troubleshooting + +Find more information about how to deal with common errors related to Bitnami’s Helm charts in [this troubleshooting guide](https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues). + +## Upgrading an existing Release to a new major version + +A major chart version change (like v1.2.3 -> v2.0.0) indicates that there is an +incompatible breaking change needing manual actions. + +### To 11.0.0 + +When using sentinel, a new statefulset called `-node` was introduced. This will break upgrading from a previous version where the statefulsets are called master and slave. Hence the PVC will not match the new naming and won't be reused. If you want to keep your data, you will need to perform a backup and then a restore the data in this new version. + +### To 10.0.0 + +For releases with `usePassword: true`, the value `sentinel.usePassword` controls whether the password authentication also applies to the sentinel port. This defaults to `true` for a secure configuration, however it is possible to disable to account for the following cases: + +- Using a version of redis-sentinel prior to `5.0.1` where the authentication feature was introduced. +- Where redis clients need to be updated to support sentinel authentication. + +If using a master/slave topology, or with `usePassword: false`, no action is required. + +### To 8.0.18 + +For releases with `metrics.enabled: true` the default tag for the exporter image is now `v1.x.x`. This introduces many changes including metrics names. You'll want to use [this dashboard](https://github.com/oliver006/redis_exporter/blob/master/contrib/grafana_prometheus_redis_dashboard.json) now. Please see the [redis_exporter github page](https://github.com/oliver006/redis_exporter#upgrading-from-0x-to-1x) for more details. + +### To 7.0.0 + +This version causes a change in the RedisTM Master StatefulSet definition, so the command helm upgrade would not work out of the box. As an alternative, one of the following could be done: + +- Recommended: Create a clone of the RedisTM Master PVC (for example, using projects like [this one](https://github.com/edseymour/pvc-transfer)). Then launch a fresh release reusing this cloned PVC. + + ``` + helm install my-release bitnami/redis --set persistence.existingClaim= + ``` + +- Alternative (not recommended, do at your own risk): `helm delete --purge` does not remove the PVC assigned to the RedisTM Master StatefulSet. As a consequence, the following commands can be done to upgrade the release + + ``` + helm delete --purge + helm install bitnami/redis + ``` + +Previous versions of the chart were not using persistence in the slaves, so this upgrade would add it to them. Another important change is that no values are inherited from master to slaves. For example, in 6.0.0 `slaves.readinessProbe.periodSeconds`, if empty, would be set to `master.readinessProbe.periodSeconds`. This approach lacked transparency and was difficult to maintain. From now on, all the slave parameters must be configured just as it is done with the masters. + +Some values have changed as well: + +- `master.port` and `slave.port` have been changed to `redisPort` (same value for both master and slaves) +- `master.securityContext` and `slave.securityContext` have been changed to `securityContext`(same values for both master and slaves) + +By default, the upgrade will not change the cluster topology. In case you want to use RedisTM Sentinel, you must explicitly set `sentinel.enabled` to `true`. + +### To 6.0.0 + +Previous versions of the chart were using an init-container to change the permissions of the volumes. This was done in case the `securityContext` directive in the template was not enough for that (for example, with cephFS). In this new version of the chart, this container is disabled by default (which should not affect most of the deployments). If your installation still requires that init container, execute `helm upgrade` with the `--set volumePermissions.enabled=true`. + +### To 5.0.0 + +The default image in this release may be switched out for any image containing the `redis-server` +and `redis-cli` binaries. If `redis-server` is not the default image ENTRYPOINT, `master.command` +must be specified. + +#### Breaking changes + +- `master.args` and `slave.args` are removed. Use `master.command` or `slave.command` instead in order to override the image entrypoint, or `master.extraFlags` to pass additional flags to `redis-server`. +- `disableCommands` is now interpreted as an array of strings instead of a string of comma separated values. +- `master.persistence.path` now defaults to `/data`. + +### 4.0.0 + +This version removes the `chart` label from the `spec.selector.matchLabels` +which is immutable since `StatefulSet apps/v1beta2`. It has been inadvertently +added, causing any subsequent upgrade to fail. See https://github.com/helm/charts/issues/7726. + +It also fixes https://github.com/helm/charts/issues/7726 where a deployment `extensions/v1beta1` can not be upgraded if `spec.selector` is not explicitly set. + +Finally, it fixes https://github.com/helm/charts/issues/7803 by removing mutable labels in `spec.VolumeClaimTemplate.metadata.labels` so that it is upgradable. + +In order to upgrade, delete the RedisTM StatefulSet before upgrading: + +```bash +kubectl delete statefulsets.apps --cascade=false my-release-redis-master +``` + +And edit the RedisTM slave (and metrics if enabled) deployment: + +```bash +kubectl patch deployments my-release-redis-slave --type=json -p='[{"op": "remove", "path": "/spec/selector/matchLabels/chart"}]' +kubectl patch deployments my-release-redis-metrics --type=json -p='[{"op": "remove", "path": "/spec/selector/matchLabels/chart"}]' +``` + +## Upgrading + +### To 12.0.0 + +[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +**What changes were introduced in this major version?** + +- Previous versions of this Helm Chart use `apiVersion: v1` (installable by both Helm 2 and 3), this Helm Chart was updated to `apiVersion: v2` (installable by Helm 3 only). [Here](https://helm.sh/docs/topics/charts/#the-apiversion-field) you can find more information about the `apiVersion` field. +- The different fields present in the *Chart.yaml* file has been ordered alphabetically in a homogeneous way for all the Bitnami Helm Charts + +**Considerations when upgrading to this version** + +- If you want to upgrade to this version from a previous one installed with Helm v3, you shouldn't face any issues +- If you want to upgrade to this version using Helm v2, this scenario is not supported as this version doesn't support Helm v2 anymore +- If you installed the previous version with Helm v2 and wants to upgrade to this version with Helm v3, please refer to the [official Helm documentation](https://helm.sh/docs/topics/v2_v3_migration/#migration-use-cases) about migrating from Helm v2 to v3 + +**Useful links** + +- https://docs.bitnami.com/tutorials/resolve-helm2-helm3-post-migration-issues/ +- https://helm.sh/docs/topics/v2_v3_migration/ +- https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/ + +### To 11.0.0 + +When deployed with sentinel enabled, only a group of nodes is deployed and the master/slave role is handled in the group. To avoid breaking the compatibility, the settings for this nodes are given through the `slave.xxxx` parameters in `values.yaml` + +### To 9.0.0 + +The metrics exporter has been changed from a separate deployment to a sidecar container, due to the latest changes in the RedisTM exporter code. Check the [official page](https://github.com/oliver006/redis_exporter/) for more information. The metrics container image was changed from oliver006/redis_exporter to bitnami/redis-exporter (Bitnami's maintained package of oliver006/redis_exporter). + +### To 7.0.0 + +In order to improve the performance in case of slave failure, we added persistence to the read-only slaves. That means that we moved from Deployment to StatefulSets. This should not affect upgrades from previous versions of the chart, as the deployments did not contain any persistence at all. + +This version also allows enabling RedisTM Sentinel containers inside of the RedisTM Pods (feature disabled by default). In case the master crashes, a new RedisTM node will be elected as master. In order to query the current master (no redis master service is exposed), you need to query first the Sentinel cluster. Find more information [in this section](#master-slave-with-sentinel). diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/.helmignore b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/.helmignore new file mode 100644 index 0000000..50af031 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/Chart.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/Chart.yaml new file mode 100644 index 0000000..ceb5648 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/Chart.yaml @@ -0,0 +1,23 @@ +annotations: + category: Infrastructure +apiVersion: v2 +appVersion: 1.3.3 +description: A Library Helm Chart for grouping common logic between bitnami charts. + This chart is not deployable by itself. +home: https://github.com/bitnami/charts/tree/master/bitnami/common +icon: https://bitnami.com/downloads/logos/bitnami-mark.png +keywords: +- common +- helper +- template +- function +- bitnami +maintainers: +- email: containers@bitnami.com + name: Bitnami +name: common +sources: +- https://github.com/bitnami/charts +- http://www.bitnami.com/ +type: library +version: 1.3.3 diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/README.md b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/README.md new file mode 100644 index 0000000..461fdc9 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/README.md @@ -0,0 +1,316 @@ +# Bitnami Common Library Chart + +A [Helm Library Chart](https://helm.sh/docs/topics/library_charts/#helm) for grouping common logic between bitnami charts. + +## TL;DR + +```yaml +dependencies: + - name: common + version: 0.x.x + repository: https://charts.bitnami.com/bitnami +``` + +```bash +$ helm dependency update +``` + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "common.names.fullname" . }} +data: + myvalue: "Hello World" +``` + +## Introduction + +This chart provides a common template helpers which can be used to develop new charts using [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This Helm chart has been tested on top of [Bitnami Kubernetes Production Runtime](https://kubeprod.io/) (BKPR). Deploy BKPR to get automated TLS certificates, logging and monitoring for your applications. + +## Prerequisites + +- Kubernetes 1.12+ +- Helm 3.0-beta3+ + +## Parameters + +The following table lists the helpers available in the library which are scoped in different sections. + +### Affinities + +| Helper identifier | Description | Expected Input | +|-------------------------------|------------------------------------------------------|------------------------------------------------| +| `common.affinities.node.soft` | Return a soft nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` | +| `common.affinities.node.hard` | Return a hard nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` | +| `common.affinities.pod.soft` | Return a soft podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` | +| `common.affinities.pod.hard` | Return a hard podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` | + +### Capabilities + +| Helper identifier | Description | Expected Input | +|----------------------------------------------|------------------------------------------------------------------------------------------------|-------------------| +| `common.capabilities.kubeVersion` | Return the target Kubernetes version (using client default if .Values.kubeVersion is not set). | `.` Chart context | +| `common.capabilities.deployment.apiVersion` | Return the appropriate apiVersion for deployment. | `.` Chart context | +| `common.capabilities.statefulset.apiVersion` | Return the appropriate apiVersion for statefulset. | `.` Chart context | +| `common.capabilities.ingress.apiVersion` | Return the appropriate apiVersion for ingress. | `.` Chart context | + +### Errors + +| Helper identifier | Description | Expected Input | +|-----------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------| +| `common.errors.upgrade.passwords.empty` | It will ensure required passwords are given when we are upgrading a chart. If `validationErrors` is not empty it will throw an error and will stop the upgrade action. | `dict "validationErrors" (list $validationError00 $validationError01) "context" $` | + +### Images + +| Helper identifier | Description | Expected Input | +|-----------------------------|------------------------------------------------------|---------------------------------------------------------------------------------------------------------| +| `common.images.image` | Return the proper and full image name | `dict "imageRoot" .Values.path.to.the.image "global" $`, see [ImageRoot](#imageroot) for the structure. | +| `common.images.pullSecrets` | Return the proper Docker Image Registry Secret Names | `dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global` | + +### Ingress + +| Helper identifier | Description | Expected Input | +|--------------------------|----------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.ingress.backend` | Generate a proper Ingress backend entry depending on the API version | `dict "serviceName" "foo" "servicePort" "bar"`, see the [Ingress deprecation notice](https://kubernetes.io/blog/2019/07/18/api-deprecations-in-1-16/) for the syntax differences | + +### Labels + +| Helper identifier | Description | Expected Input | +|-----------------------------|------------------------------------------------------|-------------------| +| `common.labels.standard` | Return Kubernetes standard labels | `.` Chart context | +| `common.labels.matchLabels` | Return the proper Docker Image Registry Secret Names | `.` Chart context | + +### Names + +| Helper identifier | Description | Expected Inpput | +|-------------------------|------------------------------------------------------------|-------------------| +| `common.names.name` | Expand the name of the chart or use `.Values.nameOverride` | `.` Chart context | +| `common.names.fullname` | Create a default fully qualified app name. | `.` Chart context | +| `common.names.chart` | Chart name plus version | `.` Chart context | + +### Secrets + +| Helper identifier | Description | Expected Input | +|-----------------------|----------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.secrets.name` | Generate the name of the secret. | `dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $` see [ExistingSecret](#existingsecret) for the structure. | +| `common.secrets.key` | Generate secret key. | `dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName"` see [ExistingSecret](#existingsecret) for the structure. | + +### Storage + +| Helper identifier | Description | Expected Input | +|-------------------------------|---------------------------------------|---------------------------------------------------------------------------------------------------------------------| +| `common.affinities.node.soft` | Return a soft nodeAffinity definition | `dict "persistence" .Values.path.to.the.persistence "global" $`, see [Persistence](#persistence) for the structure. | + +### TplValues + +| Helper identifier | Description | Expected Input | +|---------------------------|----------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.tplvalues.render` | Renders a value that contains template | `dict "value" .Values.path.to.the.Value "context" $`, value is the value should rendered as template, context frequently is the chart context `$` or `.` | + +### Utils + +| Helper identifier | Description | Expected Input | +|--------------------------------|-------------------------------------------------------|------------------------------------------------------------------------| +| `common.utils.fieldToEnvVar` | Build environment variable name given a field. | `dict "field" "my-password"` | +| `common.utils.secret.getvalue` | Print instructions to get a secret value. | `dict "secret" "secret-name" "field" "secret-value-field" "context" $` | +| `common.utils.getValueFromKey` | Gets a value from `.Values` object given its key path | `dict "key" "path.to.key" "context" $` | + +### Validations + +| Helper identifier | Description | Expected Input | +|--------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.validations.values.single.empty` | Validate a value must not be empty. | `dict "valueKey" "path.to.value" "secret" "secret.name" "field" "my-password" "context" $` secret and field are optional. In case they are given, the helper will generate a how to get instruction. See [ValidateValue](#validatevalue) | +| `common.validations.values.multiple.empty` | Validate a multiple values must not be empty. It returns a shared error for all the values. | `dict "required" (list $validateValueConf00 $validateValueConf01) "context" $`. See [ValidateValue](#validatevalue) | +| `common.validations.values.mariadb.passwords` | This helper will ensure required password for MariaDB are not empty. It returns a shared error for all the values. | `dict "secret" "mariadb-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mariadb chart and the helper. | +| `common.validations.values.postgresql.passwords` | This helper will ensure required password for PostgreSQL are not empty. It returns a shared error for all the values. | `dict "secret" "postgresql-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use postgresql chart and the helper. | +| `common.validations.values.redis.passwords` | This helper will ensure required password for RedisTM are not empty. It returns a shared error for all the values. | `dict "secret" "redis-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use redis chart and the helper. | +| `common.validations.values.cassandra.passwords` | This helper will ensure required password for Cassandra are not empty. It returns a shared error for all the values. | `dict "secret" "cassandra-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use cassandra chart and the helper. | +| `common.validations.values.mongodb.passwords` | This helper will ensure required password for MongoDB are not empty. It returns a shared error for all the values. | `dict "secret" "mongodb-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mongodb chart and the helper. | + +### Warnings + +| Helper identifier | Description | Expected Input | +|------------------------------|----------------------------------|------------------------------------------------------------| +| `common.warnings.rollingTag` | Warning about using rolling tag. | `ImageRoot` see [ImageRoot](#imageroot) for the structure. | + +## Special input schemas + +### ImageRoot + +```yaml +registry: + type: string + description: Docker registry where the image is located + example: docker.io + +repository: + type: string + description: Repository and image name + example: bitnami/nginx + +tag: + type: string + description: image tag + example: 1.16.1-debian-10-r63 + +pullPolicy: + type: string + description: Specify a imagePullPolicy. Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + +pullSecrets: + type: array + items: + type: string + description: Optionally specify an array of imagePullSecrets. + +debug: + type: boolean + description: Set to true if you would like to see extra information on logs + example: false + +## An instance would be: +# registry: docker.io +# repository: bitnami/nginx +# tag: 1.16.1-debian-10-r63 +# pullPolicy: IfNotPresent +# debug: false +``` + +### Persistence + +```yaml +enabled: + type: boolean + description: Whether enable persistence. + example: true + +storageClass: + type: string + description: Ghost data Persistent Volume Storage Class, If set to "-", storageClassName: "" which disables dynamic provisioning. + example: "-" + +accessMode: + type: string + description: Access mode for the Persistent Volume Storage. + example: ReadWriteOnce + +size: + type: string + description: Size the Persistent Volume Storage. + example: 8Gi + +path: + type: string + description: Path to be persisted. + example: /bitnami + +## An instance would be: +# enabled: true +# storageClass: "-" +# accessMode: ReadWriteOnce +# size: 8Gi +# path: /bitnami +``` + +### ExistingSecret + +```yaml +name: + type: string + description: Name of the existing secret. + example: mySecret +keyMapping: + description: Mapping between the expected key name and the name of the key in the existing secret. + type: object + +## An instance would be: +# name: mySecret +# keyMapping: +# password: myPasswordKey +``` + +#### Example of use + +When we store sensitive data for a deployment in a secret, some times we want to give to users the possibility of using theirs existing secrets. + +```yaml +# templates/secret.yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }} + labels: + app: {{ include "common.names.fullname" . }} +type: Opaque +data: + password: {{ .Values.password | b64enc | quote }} + +# templates/dpl.yaml +--- +... + env: + - name: PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "common.secrets.name" (dict "existingSecret" .Values.existingSecret "context" $) }} + key: {{ include "common.secrets.key" (dict "existingSecret" .Values.existingSecret "key" "password") }} +... + +# values.yaml +--- +name: mySecret +keyMapping: + password: myPasswordKey +``` + +### ValidateValue + +#### NOTES.txt + +```console +{{- $validateValueConf00 := (dict "valueKey" "path.to.value00" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value01" "secret" "secretName" "field" "password-01") -}} + +{{ include "common.validations.values.multiple.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} +``` + +If we force those values to be empty we will see some alerts + +```console +$ helm install test mychart --set path.to.value00="",path.to.value01="" + 'path.to.value00' must not be empty, please add '--set path.to.value00=$PASSWORD_00' to the command. To get the current value: + + export PASSWORD_00=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-00}" | base64 --decode) + + 'path.to.value01' must not be empty, please add '--set path.to.value01=$PASSWORD_01' to the command. To get the current value: + + export PASSWORD_01=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-01}" | base64 --decode) +``` + +## Upgrading + +### To 1.0.0 + +[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +**What changes were introduced in this major version?** + +- Previous versions of this Helm Chart use `apiVersion: v1` (installable by both Helm 2 and 3), this Helm Chart was updated to `apiVersion: v2` (installable by Helm 3 only). [Here](https://helm.sh/docs/topics/charts/#the-apiversion-field) you can find more information about the `apiVersion` field. +- Use `type: library`. [Here](https://v3.helm.sh/docs/faq/#library-chart-support) you can find more information. +- The different fields present in the *Chart.yaml* file has been ordered alphabetically in a homogeneous way for all the Bitnami Helm Charts + +**Considerations when upgrading to this version** + +- If you want to upgrade to this version from a previous one installed with Helm v3, you shouldn't face any issues +- If you want to upgrade to this version using Helm v2, this scenario is not supported as this version doesn't support Helm v2 anymore +- If you installed the previous version with Helm v2 and wants to upgrade to this version with Helm v3, please refer to the [official Helm documentation](https://helm.sh/docs/topics/v2_v3_migration/#migration-use-cases) about migrating from Helm v2 to v3 + +**Useful links** + +- https://docs.bitnami.com/tutorials/resolve-helm2-helm3-post-migration-issues/ +- https://helm.sh/docs/topics/v2_v3_migration/ +- https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/ diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/_affinities.tpl b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/_affinities.tpl new file mode 100644 index 0000000..1ff26d5 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/_affinities.tpl @@ -0,0 +1,94 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return a soft nodeAffinity definition +{{ include "common.affinities.nodes.soft" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.soft" -}} +preferredDuringSchedulingIgnoredDuringExecution: + - preference: + matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . }} + {{- end }} + weight: 1 +{{- end -}} + +{{/* +Return a hard nodeAffinity definition +{{ include "common.affinities.nodes.hard" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.hard" -}} +requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . }} + {{- end }} +{{- end -}} + +{{/* +Return a nodeAffinity definition +{{ include "common.affinities.nodes" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.nodes.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.nodes.hard" . -}} + {{- end -}} +{{- end -}} + +{{/* +Return a soft podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.soft" (dict "component" "FOO" "context" $) -}} +*/}} +{{- define "common.affinities.pods.soft" -}} +{{- $component := default "" .component -}} +preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 10 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + namespaces: + - {{ .context.Release.Namespace }} + topologyKey: kubernetes.io/hostname + weight: 1 +{{- end -}} + +{{/* +Return a hard podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.hard" (dict "component" "FOO" "context" $) -}} +*/}} +{{- define "common.affinities.pods.hard" -}} +{{- $component := default "" .component -}} +requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 8 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + namespaces: + - {{ .context.Release.Namespace }} + topologyKey: kubernetes.io/hostname +{{- end -}} + +{{/* +Return a podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.pods" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.pods.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.pods.hard" . -}} + {{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/_capabilities.tpl b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/_capabilities.tpl new file mode 100644 index 0000000..d95b569 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/_capabilities.tpl @@ -0,0 +1,61 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return the target Kubernetes version +*/}} +{{- define "common.capabilities.kubeVersion" -}} +{{- if .Values.global }} + {{- if .Values.global.kubeVersion }} + {{- .Values.global.kubeVersion -}} + {{- else }} + {{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} + {{- end -}} +{{- else }} +{{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for deployment. +*/}} +{{- define "common.capabilities.deployment.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for statefulset. +*/}} +{{- define "common.capabilities.statefulset.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apps/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for ingress. +*/}} +{{- define "common.capabilities.ingress.apiVersion" -}} +{{- if .Values.ingress -}} +{{- if .Values.ingress.apiVersion -}} +{{- .Values.ingress.apiVersion -}} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end }} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/_images.tpl b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/_images.tpl new file mode 100644 index 0000000..aafde9f --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/_images.tpl @@ -0,0 +1,43 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper image name +{{ include "common.images.image" ( dict "imageRoot" .Values.path.to.the.image "global" $) }} +*/}} +{{- define "common.images.image" -}} +{{- $registryName := .imageRoot.registry -}} +{{- $repositoryName := .imageRoot.repository -}} +{{- $tag := .imageRoot.tag | toString -}} +{{- if .global }} + {{- if .global.imageRegistry }} + {{- $registryName = .global.imageRegistry -}} + {{- end -}} +{{- end -}} +{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +{{ include "common.images.pullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global) }} +*/}} +{{- define "common.images.pullSecrets" -}} + {{- $pullSecrets := list }} + + {{- if .global }} + {{- range .global.imagePullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) }} +imagePullSecrets: + {{- range $pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/_ingress.tpl b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/_ingress.tpl new file mode 100644 index 0000000..622ef50 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/_ingress.tpl @@ -0,0 +1,42 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Generate backend entry that is compatible with all Kubernetes API versions. + +Usage: +{{ include "common.ingress.backend" (dict "serviceName" "backendName" "servicePort" "backendPort" "context" $) }} + +Params: + - serviceName - String. Name of an existing service backend + - servicePort - String/Int. Port name (or number) of the service. It will be translated to different yaml depending if it is a string or an integer. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.ingress.backend" -}} +{{- $apiVersion := (include "common.capabilities.ingress.apiVersion" .context) -}} +{{- if or (eq $apiVersion "extensions/v1beta1") (eq $apiVersion "networking.k8s.io/v1beta1") -}} +serviceName: {{ .serviceName }} +servicePort: {{ .servicePort }} +{{- else -}} +service: + name: {{ .serviceName }} + port: + {{- if typeIs "string" .servicePort }} + name: {{ .servicePort }} + {{- else if typeIs "int" .servicePort }} + number: {{ .servicePort }} + {{- end }} +{{- end -}} +{{- end -}} + +{{/* +Print "true" if the API pathType field is supported +Usage: +{{ include "common.ingress.supportsPathType" . }} +*/}} +{{- define "common.ingress.supportsPathType" -}} +{{- if (semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .)) -}} +{{- print "false" -}} +{{- else -}} +{{- print "true" -}} +{{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/_labels.tpl b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/_labels.tpl new file mode 100644 index 0000000..252066c --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/_labels.tpl @@ -0,0 +1,18 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Kubernetes standard labels +*/}} +{{- define "common.labels.standard" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +helm.sh/chart: {{ include "common.names.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{/* +Labels to use on deploy.spec.selector.matchLabels and svc.spec.selector +*/}} +{{- define "common.labels.matchLabels" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/_names.tpl b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/_names.tpl new file mode 100644 index 0000000..adf2a74 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/_names.tpl @@ -0,0 +1,32 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "common.names.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "common.names.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "common.names.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/_secrets.tpl b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/_secrets.tpl new file mode 100644 index 0000000..4931d94 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/_secrets.tpl @@ -0,0 +1,127 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Generate secret name. + +Usage: +{{ include "common.secrets.name" (dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $) }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/master/bitnami/common#existingsecret + - defaultNameSuffix - String - Optional. It is used only if we have several secrets in the same deployment. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.secrets.name" -}} +{{- $name := (include "common.names.fullname" .context) -}} + +{{- if .defaultNameSuffix -}} +{{- $name = printf "%s-%s" $name .defaultNameSuffix | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- with .existingSecret -}} +{{- if not (typeIs "string" .) -}} +{{- $name = .name -}} +{{- else -}} +{{- $name = . -}} +{{- end -}} +{{- end -}} + +{{- printf "%s" $name -}} +{{- end -}} + +{{/* +Generate secret key. + +Usage: +{{ include "common.secrets.key" (dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName") }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/master/bitnami/common#existingsecret + - key - String - Required. Name of the key in the secret. +*/}} +{{- define "common.secrets.key" -}} +{{- $key := .key -}} + +{{- if .existingSecret -}} + {{- if not (typeIs "string" .existingSecret) -}} + {{- if .existingSecret.keyMapping -}} + {{- $key = index .existingSecret.keyMapping $.key -}} + {{- end -}} + {{- end }} +{{- end -}} + +{{- printf "%s" $key -}} +{{- end -}} + +{{/* +Generate secret password or retrieve one if already created. + +Usage: +{{ include "common.secrets.passwords.manage" (dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - key - String - Required - Name of the key in the secret. + - providedValues - List - Required - The path to the validating value in the values.yaml, e.g: "mysql.password". Will pick first parameter with a defined value. + - length - int - Optional - Length of the generated random password. + - strong - Boolean - Optional - Whether to add symbols to the generated random password. + - chartName - String - Optional - Name of the chart used when said chart is deployed as a subchart. + - context - Context - Required - Parent context. +*/}} +{{- define "common.secrets.passwords.manage" -}} + +{{- $password := "" }} +{{- $subchart := "" }} +{{- $chartName := default "" .chartName }} +{{- $passwordLength := default 10 .length }} +{{- $providedPasswordKey := include "common.utils.getKeyFromList" (dict "keys" .providedValues "context" $.context) }} +{{- $providedPasswordValue := include "common.utils.getValueFromKey" (dict "key" $providedPasswordKey "context" $.context) }} +{{- $secret := (lookup "v1" "Secret" $.context.Release.Namespace .secret) }} +{{- if $secret }} + {{- if index $secret.data .key }} + {{- $password = index $secret.data .key }} + {{- end -}} +{{- else if $providedPasswordValue }} + {{- $password = $providedPasswordValue | toString | b64enc | quote }} +{{- else }} + + {{- if .context.Values.enabled }} + {{- $subchart = $chartName }} + {{- end -}} + + {{- $requiredPassword := dict "valueKey" $providedPasswordKey "secret" .secret "field" .key "subchart" $subchart "context" $.context -}} + {{- $requiredPasswordError := include "common.validations.values.single.empty" $requiredPassword -}} + {{- $passwordValidationErrors := list $requiredPasswordError -}} + {{- include "common.errors.upgrade.passwords.empty" (dict "validationErrors" $passwordValidationErrors "context" $.context) -}} + + {{- if .strong }} + {{- $subStr := list (lower (randAlpha 1)) (randNumeric 1) (upper (randAlpha 1)) | join "_" }} + {{- $password = randAscii $passwordLength }} + {{- $password = regexReplaceAllLiteral "\\W" $password "@" | substr 5 $passwordLength }} + {{- $password = printf "%s%s" $subStr $password | toString | shuffle | b64enc | quote }} + {{- else }} + {{- $password = randAlphaNum $passwordLength | b64enc | quote }} + {{- end }} +{{- end -}} +{{- printf "%s" $password -}} +{{- end -}} + +{{/* +Returns whether a previous generated secret already exists + +Usage: +{{ include "common.secrets.exists" (dict "secret" "secret-name" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - context - Context - Required - Parent context. +*/}} +{{- define "common.secrets.exists" -}} +{{- $secret := (lookup "v1" "Secret" $.context.Release.Namespace .secret) }} +{{- if $secret }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/_storage.tpl b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/_storage.tpl new file mode 100644 index 0000000..60e2a84 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/_storage.tpl @@ -0,0 +1,23 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper Storage Class +{{ include "common.storage.class" ( dict "persistence" .Values.path.to.the.persistence "global" $) }} +*/}} +{{- define "common.storage.class" -}} + +{{- $storageClass := .persistence.storageClass -}} +{{- if .global -}} + {{- if .global.storageClass -}} + {{- $storageClass = .global.storageClass -}} + {{- end -}} +{{- end -}} + +{{- if $storageClass -}} + {{- if (eq "-" $storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" $storageClass -}} + {{- end -}} +{{- end -}} + +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/_tplvalues.tpl b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/_tplvalues.tpl new file mode 100644 index 0000000..2db1668 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/_tplvalues.tpl @@ -0,0 +1,13 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Renders a value that contains template. +Usage: +{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $) }} +*/}} +{{- define "common.tplvalues.render" -}} + {{- if typeIs "string" .value }} + {{- tpl .value .context }} + {{- else }} + {{- tpl (.value | toYaml) .context }} + {{- end }} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/_utils.tpl b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/_utils.tpl new file mode 100644 index 0000000..77bcc2b --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/_utils.tpl @@ -0,0 +1,62 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Print instructions to get a secret value. +Usage: +{{ include "common.utils.secret.getvalue" (dict "secret" "secret-name" "field" "secret-value-field" "context" $) }} +*/}} +{{- define "common.utils.secret.getvalue" -}} +{{- $varname := include "common.utils.fieldToEnvVar" . -}} +export {{ $varname }}=$(kubectl get secret --namespace {{ .context.Release.Namespace }} {{ .secret }} -o jsonpath="{.data.{{ .field }}}" | base64 --decode) +{{- end -}} + +{{/* +Build env var name given a field +Usage: +{{ include "common.utils.fieldToEnvVar" dict "field" "my-password" }} +*/}} +{{- define "common.utils.fieldToEnvVar" -}} + {{- $fieldNameSplit := splitList "-" .field -}} + {{- $upperCaseFieldNameSplit := list -}} + + {{- range $fieldNameSplit -}} + {{- $upperCaseFieldNameSplit = append $upperCaseFieldNameSplit ( upper . ) -}} + {{- end -}} + + {{ join "_" $upperCaseFieldNameSplit }} +{{- end -}} + +{{/* +Gets a value from .Values given +Usage: +{{ include "common.utils.getValueFromKey" (dict "key" "path.to.key" "context" $) }} +*/}} +{{- define "common.utils.getValueFromKey" -}} +{{- $splitKey := splitList "." .key -}} +{{- $value := "" -}} +{{- $latestObj := $.context.Values -}} +{{- range $splitKey -}} + {{- if not $latestObj -}} + {{- printf "please review the entire path of '%s' exists in values" $.key | fail -}} + {{- end -}} + {{- $value = ( index $latestObj . ) -}} + {{- $latestObj = $value -}} +{{- end -}} +{{- printf "%v" (default "" $value) -}} +{{- end -}} + +{{/* +Returns first .Values key with a defined value or first of the list if all non-defined +Usage: +{{ include "common.utils.getKeyFromList" (dict "keys" (list "path.to.key1" "path.to.key2") "context" $) }} +*/}} +{{- define "common.utils.getKeyFromList" -}} +{{- $key := first .keys -}} +{{- $reverseKeys := reverse .keys }} +{{- range $reverseKeys }} + {{- $value := include "common.utils.getValueFromKey" (dict "key" . "context" $.context ) }} + {{- if $value -}} + {{- $key = . }} + {{- end -}} +{{- end -}} +{{- printf "%s" $key -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/_warnings.tpl b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/_warnings.tpl new file mode 100644 index 0000000..ae10fa4 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/_warnings.tpl @@ -0,0 +1,14 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Warning about using rolling tag. +Usage: +{{ include "common.warnings.rollingTag" .Values.path.to.the.imageRoot }} +*/}} +{{- define "common.warnings.rollingTag" -}} + +{{- if and (contains "bitnami/" .repository) (not (.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .repository }}:{{ .tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} + +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/validations/_cassandra.tpl b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/validations/_cassandra.tpl new file mode 100644 index 0000000..8679ddf --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/validations/_cassandra.tpl @@ -0,0 +1,72 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Cassandra required passwords are not empty. + +Usage: +{{ include "common.validations.values.cassandra.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where Cassandra values are stored, e.g: "cassandra-passwords-secret" + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.cassandra.passwords" -}} + {{- $existingSecret := include "common.cassandra.values.existingSecret" . -}} + {{- $enabled := include "common.cassandra.values.enabled" . -}} + {{- $dbUserPrefix := include "common.cassandra.values.key.dbUser" . -}} + {{- $valueKeyPassword := printf "%s.password" $dbUserPrefix -}} + + {{- if and (not $existingSecret) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "cassandra-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.cassandra.values.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.cassandra.dbUser.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.dbUser.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled cassandra. + +Usage: +{{ include "common.cassandra.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.cassandra.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.cassandra.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key dbUser + +Usage: +{{ include "common.cassandra.values.key.dbUser" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.key.dbUser" -}} + {{- if .subchart -}} + cassandra.dbUser + {{- else -}} + dbUser + {{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/validations/_mariadb.tpl b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/validations/_mariadb.tpl new file mode 100644 index 0000000..bb5ed72 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/validations/_mariadb.tpl @@ -0,0 +1,103 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MariaDB required passwords are not empty. + +Usage: +{{ include "common.validations.values.mariadb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MariaDB values are stored, e.g: "mysql-passwords-secret" + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mariadb.passwords" -}} + {{- $existingSecret := include "common.mariadb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mariadb.values.enabled" . -}} + {{- $architecture := include "common.mariadb.values.architecture" . -}} + {{- $authPrefix := include "common.mariadb.values.key.auth" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}} + + {{- if and (not $existingSecret) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mariadb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- if not (empty $valueUsername) -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mariadb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replication") -}} + {{- $requiredReplicationPassword := dict "valueKey" $valueKeyReplicationPassword "secret" .secret "field" "mariadb-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mariadb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mariadb. + +Usage: +{{ include "common.mariadb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mariadb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mariadb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mariadb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mariadb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.key.auth" -}} + {{- if .subchart -}} + mariadb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/validations/_mongodb.tpl b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/validations/_mongodb.tpl new file mode 100644 index 0000000..a786188 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/validations/_mongodb.tpl @@ -0,0 +1,108 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MongoDB required passwords are not empty. + +Usage: +{{ include "common.validations.values.mongodb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MongoDB values are stored, e.g: "mongodb-passwords-secret" + - subchart - Boolean - Optional. Whether MongoDB is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mongodb.passwords" -}} + {{- $existingSecret := include "common.mongodb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mongodb.values.enabled" . -}} + {{- $authPrefix := include "common.mongodb.values.key.auth" . -}} + {{- $architecture := include "common.mongodb.values.architecture" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyDatabase := printf "%s.database" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicaSetKey := printf "%s.replicaSetKey" $authPrefix -}} + {{- $valueKeyAuthEnabled := printf "%s.enabled" $authPrefix -}} + + {{- $authEnabled := include "common.utils.getValueFromKey" (dict "key" $valueKeyAuthEnabled "context" .context) -}} + + {{- if and (not $existingSecret) (eq $enabled "true") (eq $authEnabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mongodb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- $valueDatabase := include "common.utils.getValueFromKey" (dict "key" $valueKeyDatabase "context" .context) }} + {{- if and $valueUsername $valueDatabase -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mongodb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replicaset") -}} + {{- $requiredReplicaSetKey := dict "valueKey" $valueKeyReplicaSetKey "secret" .secret "field" "mongodb-replica-set-key" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicaSetKey -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mongodb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDb is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mongodb. + +Usage: +{{ include "common.mongodb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mongodb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mongodb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mongodb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDB is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.key.auth" -}} + {{- if .subchart -}} + mongodb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mongodb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/validations/_postgresql.tpl b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/validations/_postgresql.tpl new file mode 100644 index 0000000..992bcd3 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/validations/_postgresql.tpl @@ -0,0 +1,131 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate PostgreSQL required passwords are not empty. + +Usage: +{{ include "common.validations.values.postgresql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where postgresql values are stored, e.g: "postgresql-passwords-secret" + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.postgresql.passwords" -}} + {{- $existingSecret := include "common.postgresql.values.existingSecret" . -}} + {{- $enabled := include "common.postgresql.values.enabled" . -}} + {{- $valueKeyPostgresqlPassword := include "common.postgresql.values.key.postgressPassword" . -}} + {{- $valueKeyPostgresqlReplicationEnabled := include "common.postgresql.values.key.replicationPassword" . -}} + + {{- if and (not $existingSecret) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredPostgresqlPassword := dict "valueKey" $valueKeyPostgresqlPassword "secret" .secret "field" "postgresql-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlPassword -}} + + {{- $enabledReplication := include "common.postgresql.values.enabled.replication" . -}} + {{- if (eq $enabledReplication "true") -}} + {{- $requiredPostgresqlReplicationPassword := dict "valueKey" $valueKeyPostgresqlReplicationEnabled "secret" .secret "field" "postgresql-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to decide whether evaluate global values. + +Usage: +{{ include "common.postgresql.values.use.global" (dict "key" "key-of-global" "context" $) }} +Params: + - key - String - Required. Field to be evaluated within global, e.g: "existingSecret" +*/}} +{{- define "common.postgresql.values.use.global" -}} + {{- if .context.Values.global -}} + {{- if .context.Values.global.postgresql -}} + {{- index .context.Values.global.postgresql .key | quote -}} + {{- end -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.postgresql.values.existingSecret" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.existingSecret" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "existingSecret" "context" .context) -}} + + {{- if .subchart -}} + {{- default (.context.Values.postgresql.existingSecret | quote) $globalValue -}} + {{- else -}} + {{- default (.context.Values.existingSecret | quote) $globalValue -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled postgresql. + +Usage: +{{ include "common.postgresql.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key postgressPassword. + +Usage: +{{ include "common.postgresql.values.key.postgressPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.postgressPassword" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "postgresqlUsername" "context" .context) -}} + + {{- if not $globalValue -}} + {{- if .subchart -}} + postgresql.postgresqlPassword + {{- else -}} + postgresqlPassword + {{- end -}} + {{- else -}} + global.postgresql.postgresqlPassword + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled.replication. + +Usage: +{{ include "common.postgresql.values.enabled.replication" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.enabled.replication" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.replication.enabled -}} + {{- else -}} + {{- printf "%v" .context.Values.replication.enabled -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key replication.password. + +Usage: +{{ include "common.postgresql.values.key.replicationPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.replicationPassword" -}} + {{- if .subchart -}} + postgresql.replication.password + {{- else -}} + replication.password + {{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/validations/_redis.tpl b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/validations/_redis.tpl new file mode 100644 index 0000000..3e2a47c --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/validations/_redis.tpl @@ -0,0 +1,72 @@ + +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Redis(TM) required passwords are not empty. + +Usage: +{{ include "common.validations.values.redis.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where redis values are stored, e.g: "redis-passwords-secret" + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.redis.passwords" -}} + {{- $existingSecret := include "common.redis.values.existingSecret" . -}} + {{- $enabled := include "common.redis.values.enabled" . -}} + {{- $valueKeyPrefix := include "common.redis.values.keys.prefix" . -}} + {{- $valueKeyRedisPassword := printf "%s%s" $valueKeyPrefix "password" -}} + {{- $valueKeyRedisUsePassword := printf "%s%s" $valueKeyPrefix "usePassword" -}} + + {{- if and (not $existingSecret) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $usePassword := include "common.utils.getValueFromKey" (dict "key" $valueKeyRedisUsePassword "context" .context) -}} + {{- if eq $usePassword "true" -}} + {{- $requiredRedisPassword := dict "valueKey" $valueKeyRedisPassword "secret" .secret "field" "redis-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRedisPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Redis Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.redis.values.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Redis(TM) is used as subchart or not. Default: false +*/}} +{{- define "common.redis.values.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.redis.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled redis. + +Usage: +{{ include "common.redis.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.redis.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.redis.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right prefix path for the values + +Usage: +{{ include "common.redis.values.key.prefix" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.redis.values.keys.prefix" -}} + {{- if .subchart -}}redis.{{- else -}}{{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/validations/_validations.tpl b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/validations/_validations.tpl new file mode 100644 index 0000000..fb2fe60 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/validations/_validations.tpl @@ -0,0 +1,46 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate values must not be empty. + +Usage: +{{- $validateValueConf00 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-01") -}} +{{ include "common.validations.values.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" +*/}} +{{- define "common.validations.values.multiple.empty" -}} + {{- range .required -}} + {{- include "common.validations.values.single.empty" (dict "valueKey" .valueKey "secret" .secret "field" .field "context" $.context) -}} + {{- end -}} +{{- end -}} + +{{/* +Validate a value must not be empty. + +Usage: +{{ include "common.validations.value.empty" (dict "valueKey" "mariadb.password" "secret" "secretName" "field" "my-password" "subchart" "subchart "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" + - subchart - String - Optional - Name of the subchart that the validated password is part of. +*/}} +{{- define "common.validations.values.single.empty" -}} + {{- $value := include "common.utils.getValueFromKey" (dict "key" .valueKey "context" .context) }} + {{- $subchart := ternary "" (printf "%s." .subchart) (empty .subchart) }} + + {{- if not $value -}} + {{- $varname := "my-value" -}} + {{- $getCurrentValue := "" -}} + {{- if and .secret .field -}} + {{- $varname = include "common.utils.fieldToEnvVar" . -}} + {{- $getCurrentValue = printf " To get the current value:\n\n %s\n" (include "common.utils.secret.getvalue" .) -}} + {{- end -}} + {{- printf "\n '%s' must not be empty, please add '--set %s%s=$%s' to the command.%s" .valueKey $subchart .valueKey $varname $getCurrentValue -}} + {{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/values.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/values.yaml new file mode 100644 index 0000000..9ecdc93 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/values.yaml @@ -0,0 +1,3 @@ +## bitnami/common +## It is required by CI/CD tools and processes. +exampleValue: common-chart diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/ci/default-values.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/ci/default-values.yaml new file mode 100644 index 0000000..fc2ba60 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/ci/default-values.yaml @@ -0,0 +1 @@ +# Leave this file empty to ensure that CI runs builds against the default configuration in values.yaml. diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/ci/extra-flags-values.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/ci/extra-flags-values.yaml new file mode 100644 index 0000000..71132f7 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/ci/extra-flags-values.yaml @@ -0,0 +1,11 @@ +master: + extraFlags: + - --maxmemory-policy allkeys-lru + persistence: + enabled: false +slave: + extraFlags: + - --maxmemory-policy allkeys-lru + persistence: + enabled: false +usePassword: false diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/ci/production-sentinel-values.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/ci/production-sentinel-values.yaml new file mode 100644 index 0000000..7efeda3 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/ci/production-sentinel-values.yaml @@ -0,0 +1,682 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +global: + # imageRegistry: myRegistryName + # imagePullSecrets: + # - myRegistryKeySecretName + # storageClass: myStorageClass + redis: {} + +## Bitnami Redis(TM) image version +## ref: https://hub.docker.com/r/bitnami/redis/tags/ +## +image: + registry: 10.10.31.243:5000 # docker.io + repository: redis # bitnami/redis + ## Bitnami Redis(TM) image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis#supported-tags-and-respective-dockerfile-links + ## + tag: 5.0.9-debian-10-r0 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + +## String to partially override redis.fullname template (will maintain the release name) +## +# nameOverride: + +## String to fully override redis.fullname template +## +# fullnameOverride: + +## Cluster settings +cluster: + enabled: true + slaveCount: 3 + +## Use redis sentinel in the redis pod. This will disable the master and slave services and +## create one redis service with ports to the sentinel and the redis instances +sentinel: + enabled: true + ## Require password authentication on the sentinel itself + ## ref: https://redis.io/topics/sentinel + usePassword: true + ## Bitnami Redis(TM) Sentintel image version + ## ref: https://hub.docker.com/r/bitnami/redis-sentinel/tags/ + ## + image: + registry: 10.10.31.243:5000 # docker.io + repository: redis-sentinel # bitnami/redis-sentinel + ## Bitnami Redis(TM) image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis-sentinel#supported-tags-and-respective-dockerfile-links + ## + tag: 5.0.9-debian-10-r0 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + masterSet: mymaster + initialCheckTimeout: 5 + quorum: 2 + downAfterMilliseconds: 60000 + failoverTimeout: 18000 + parallelSyncs: 1 + port: 26379 + ## Additional Redis(TM) configuration for the sentinel nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Enable or disable static sentinel IDs for each replicas + ## If disabled each sentinel will generate a random id at startup + ## If enabled, each replicas will have a constant ID on each start-up + ## + staticID: false + ## Configure extra options for Redis(TM) Sentinel liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + customLivenessProbe: {} + customReadinessProbe: {} + ## Redis(TM) Sentinel resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Redis(TM) Sentinel Service properties + service: + ## Redis(TM) Sentinel Service type + type: ClusterIP + sentinelPort: 26379 + redisPort: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # sentinelNodePort: + # redisNodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + +## Specifies the Kubernetes Cluster's Domain Name. +## +clusterDomain: cluster.local + +networkPolicy: + ## Specifies whether a NetworkPolicy should be created + ## + enabled: true + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port Redis(TM) is listening + ## on. When true, Redis(TM) will accept connections from any source + ## (with the correct destination port). + ## + # allowExternal: true + + ## Allow connections from other namespacess. Just set label for namespace and set label for pods (optional). + ## + ingressNSMatchLabels: {} + ingressNSPodMatchLabels: {} + +serviceAccount: + ## Specifies whether a ServiceAccount should be created + ## + create: false + ## The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the fullname template + name: + +rbac: + ## Specifies whether RBAC resources should be created + ## + create: false + + role: + ## Rules to create. It follows the role specification + # rules: + # - apiGroups: + # - extensions + # resources: + # - podsecuritypolicies + # verbs: + # - use + # resourceNames: + # - gce.unprivileged + rules: [] + +## Redis(TM) pod Security Context +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + ## sysctl settings for master and slave pods + ## + ## Uncomment the setting below to increase the net.core.somaxconn value + ## + # sysctls: + # - name: net.core.somaxconn + # value: "10000" + +## Use password authentication +usePassword: true +## Redis(TM) password (both master and slave) +## Defaults to a random 10-character alphanumeric string if not set and usePassword is true +## ref: https://github.com/bitnami/bitnami-docker-redis#setting-the-server-password-on-first-run +## +password: +## Use existing secret (ignores previous password) +# existingSecret: +## Password key to be retrieved from Redis(TM) secret +## +# existingSecretPasswordKey: + +## Mount secrets as files instead of environment variables +usePasswordFile: false + +## Persist data to a persistent volume (Redis Master) +persistence: + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + existingClaim: + +# Redis(TM) port +redisPort: 6379 + +## +## Redis(TM) Master parameters +## +master: + ## Redis(TM) command arguments + ## + ## Can be used to specify command line arguments, for example: + ## + command: "/run.sh" + ## Additional Redis(TM) configuration for the master nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Redis(TM) additional command line flags + ## + ## Can be used to specify command line flags, for example: + ## + ## extraFlags: + ## - "--maxmemory-policy volatile-ttl" + ## - "--repl-backlog-size 1024mb" + extraFlags: [] + ## Comma-separated list of Redis(TM) commands to disable + ## + ## Can be used to disable Redis(TM) commands for security reasons. + ## Commands will be completely disabled by renaming each to an empty string. + ## ref: https://redis.io/topics/security#disabling-of-specific-commands + ## + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis(TM) Master additional pod labels and annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + podLabels: {} + podAnnotations: {} + + ## Redis(TM) Master resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Configure extra options for Redis(TM) Master liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + + ## Configure custom probes for images other images like + ## rhscl/redis-32-rhel7 rhscl/redis-5-rhel7 + ## Only used if readinessProbe.enabled: false / livenessProbe.enabled: false + ## + # customLivenessProbe: + # tcpSocket: + # port: 6379 + # initialDelaySeconds: 10 + # periodSeconds: 5 + # customReadinessProbe: + # initialDelaySeconds: 30 + # periodSeconds: 10 + # timeoutSeconds: 5 + # exec: + # command: + # - "container-entrypoint" + # - "bash" + # - "-c" + # - "redis-cli set liveness-probe \"`date`\" | grep OK" + customLivenessProbe: {} + customReadinessProbe: {} + + ## Redis(TM) Master Node selectors and tolerations for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + ## Redis(TM) Master pod/node affinity/anti-affinity + ## + affinity: {} + + ## Redis(TM) Master Service properties + service: + ## Redis(TM) Master Service type + type: ClusterIP + port: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + # loadBalancerSourceRanges: ["10.0.0.0/8"] + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis(TM) images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + ## Persistent Volume selectors + ## https://kubernetes.io/docs/concepts/storage/persistent-volumes/#selector + matchLabels: {} + matchExpressions: {} + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + ## Redis(TM) Master pod priorityClassName + ## + priorityClassName: {} + +## +## Redis(TM) Slave properties +## Note: service.type is a mandatory parameter +## The rest of the parameters are either optional or, if undefined, will inherit those declared in Redis(TM) Master +## +slave: + ## Slave Service properties + service: + ## Redis(TM) Slave Service type + type: ClusterIP + ## Redis(TM) port + port: 6379 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + # loadBalancerSourceRanges: ["10.0.0.0/8"] + + ## Redis(TM) slave port + port: 6379 + ## Can be used to specify command line arguments, for example: + ## + command: "/run.sh" + ## Additional Redis(TM) configuration for the slave nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Redis(TM) extra flags + extraFlags: [] + ## List of Redis(TM) commands to disable + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis(TM) Slave pod/node affinity/anti-affinity + ## + affinity: {} + + ## Configure extra options for Redis(TM) Slave liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 10 + successThreshold: 1 + failureThreshold: 5 + + ## Configure custom probes for images other images like + ## rhscl/redis-32-rhel7 rhscl/redis-5-rhel7 + ## Only used if readinessProbe.enabled: false / livenessProbe.enabled: false + ## + # customLivenessProbe: + # tcpSocket: + # port: 6379 + # initialDelaySeconds: 10 + # periodSeconds: 5 + # customReadinessProbe: + # initialDelaySeconds: 30 + # periodSeconds: 10 + # timeoutSeconds: 5 + # exec: + # command: + # - "container-entrypoint" + # - "bash" + # - "-c" + # - "redis-cli set liveness-probe \"`date`\" | grep OK" + customLivenessProbe: {} + customReadinessProbe: {} + + ## Redis(TM) slave Resource + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + + ## Redis(TM) slave selectors and tolerations for pod assignment + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Redis(TM) slave pod Annotation and Labels + podLabels: {} + podAnnotations: {} + + ## Redis(TM) slave pod priorityClassName + # priorityClassName: {} + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis(TM) images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + ## Persistent Volume selectors + ## https://kubernetes.io/docs/concepts/storage/persistent-volumes/#selector + matchLabels: {} + matchExpressions: {} + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + +## Prometheus Exporter / Metrics +## +metrics: + enabled: true + + image: + registry: 10.10.31.243:5000 # docker.io + repository: redis-exporter # bitnami/redis-exporter + tag: 1.5.3-debian-10-r14 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + # resources: {} + + ## Extra arguments for Metrics exporter, for example: + ## extraArgs: + ## check-keys: myKey,myOtherKey + # extraArgs: {} + + ## Metrics exporter pod Annotation and Labels + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9121" + # podLabels: {} + + # Enable this if you're using https://github.com/coreos/prometheus-operator + serviceMonitor: + enabled: false + ## Specify a namespace if needed + # namespace: monitoring + # fallback to the prometheus default unless specified + # interval: 10s + ## Defaults to what's used if you follow CoreOS [Prometheus Install Instructions](https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#tldr) + ## [Prometheus Selector Label](https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#prometheus-operator-1) + ## [Kube Prometheus Selector Label](https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#exporters) + selector: + prometheus: kube-prometheus + + ## Custom PrometheusRule to be defined + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + prometheusRule: + enabled: false + additionalLabels: {} + namespace: "" + ## Redis(TM) prometheus rules + ## These are just examples rules, please adapt them to your needs. + ## Make sure to constraint the rules to the current postgresql service. + # rules: + # - alert: RedisDown + # expr: redis_up{service="{{ template "redis.fullname" . }}-metrics"} == 0 + # for: 2m + # labels: + # severity: error + # annotations: + # summary: Redis(TM) instance {{ "{{ $labels.instance }}" }} down + # description: Redis(TM) instance {{ "{{ $labels.instance }}" }} is down + # - alert: RedisMemoryHigh + # expr: > + # redis_memory_used_bytes{service="{{ template "redis.fullname" . }}-metrics"} * 100 + # / + # redis_memory_max_bytes{service="{{ template "redis.fullname" . }}-metrics"} + # > 90 + # for: 2m + # labels: + # severity: error + # annotations: + # summary: Redis(TM) instance {{ "{{ $labels.instance }}" }} is using too much memory + # description: | + # Redis(TM) instance {{ "{{ $labels.instance }}" }} is using {{ "{{ $value }}" }}% of its available memory. + # - alert: RedisKeyEviction + # expr: | + # increase(redis_evicted_keys_total{service="{{ template "redis.fullname" . }}-metrics"}[5m]) > 0 + # for: 1s + # labels: + # severity: error + # annotations: + # summary: Redis(TM) instance {{ "{{ $labels.instance }}" }} has evicted keys + # description: | + # Redis(TM) instance {{ "{{ $labels.instance }}" }} has evicted {{ "{{ $value }}" }} keys in the last 5 minutes. + rules: [] + + ## Metrics exporter pod priorityClassName + # priorityClassName: {} + service: + type: ClusterIP + ## Use serviceLoadBalancerIP to request a specific static IP, + ## otherwise leave blank + # loadBalancerIP: + annotations: {} + labels: {} + +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: false + image: + registry: 10.10.31.243:5000 # docker.io + repository: minideb # bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m + +## Redis(TM) config file +## ref: https://redis.io/topics/config +## +configmap: |- + # Enable AOF https://redis.io/topics/persistence#append-only-file + appendonly yes + # Disable RDB persistence, AOF persistence already enabled. + save "" + +## Sysctl InitContainer +## used to perform sysctl operation to modify Kernel settings (needed sometimes to avoid warnings) +sysctlImage: + enabled: false + command: [] + registry: 10.10.31.243:5000 # docker.io + repository: minideb # bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + mountHostSys: false + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m + +## PodSecurityPolicy configuration +## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +## +podSecurityPolicy: + ## Specifies whether a PodSecurityPolicy should be created + ## + create: false diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/templates/NOTES.txt b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/templates/NOTES.txt new file mode 100644 index 0000000..a254f58 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/templates/NOTES.txt @@ -0,0 +1,136 @@ +** Please be patient while the chart is being deployed ** + +{{- if contains .Values.master.service.type "LoadBalancer" }} +{{- if not .Values.usePassword }} +{{ if and (not .Values.networkPolicy.enabled) (.Values.networkPolicy.allowExternal) }} + +------------------------------------------------------------------------------- + WARNING + + By specifying "master.service.type=LoadBalancer" and "usePassword=false" you have + most likely exposed the Redis(TM) service externally without any authentication + mechanism. + + For security reasons, we strongly suggest that you switch to "ClusterIP" or + "NodePort". As alternative, you can also switch to "usePassword=true" + providing a valid password on "password" parameter. + +------------------------------------------------------------------------------- +{{- end }} +{{- end }} +{{- end }} + +{{- if and .Values.sentinel.enabled (not .Values.cluster.enabled)}} + +------------------------------------------------------------------------------- + WARNING + + Using redis sentinel without a cluster is not supported. A single pod with + standalone redis has been deployed. + + To deploy redis sentinel, please use the values "cluster.enabled=true" and + "sentinel.enabled=true". + +------------------------------------------------------------------------------- +{{- end }} + +{{- if .Values.cluster.enabled }} +{{- if .Values.sentinel.enabled }} +Redis can be accessed via port {{ .Values.sentinel.service.redisPort }} on the following DNS name from within your cluster: + +{{ template "redis.fullname" . }}.imxc.svc.{{ .Values.clusterDomain }} for read only operations + +For read/write operations, first access the Redis(TM) Sentinel cluster, which is available in port {{ .Values.sentinel.service.sentinelPort }} using the same domain name above. + +{{- else }} +Redis can be accessed via port {{ .Values.redisPort }} on the following DNS names from within your cluster: + +{{ template "redis.fullname" . }}-master.imxc.svc.{{ .Values.clusterDomain }} for read/write operations +{{ template "redis.fullname" . }}-slave.imxc.svc.{{ .Values.clusterDomain }} for read-only operations +{{- end }} + +{{- else }} +Redis can be accessed via port {{ .Values.redisPort }} on the following DNS name from within your cluster: + +{{ template "redis.fullname" . }}-master.imxc.svc.{{ .Values.clusterDomain }} + +{{- end }} + +{{ if .Values.usePassword }} +To get your password run: + + export REDIS_PASSWORD=$(kubectl get secret --namespace imxc {{ template "redis.secretName" . }} -o jsonpath="{.data.redis-password}" | base64 --decode) +{{- end }} + +To connect to your Redis(TM) server: + +1. Run a Redis(TM) pod that you can use as a client: + +{{- if .Values.tls.enabled }} + kubectl run --namespace imxc {{ template "redis.fullname" . }}-client --restart='Never' --env REDIS_PASSWORD=$REDIS_PASSWORD --image {{ template "redis.image" . }} --command -- sleep infinity + + Copy your TLS certificates to the pod: + + kubectl cp --namespace imxc /path/to/client.cert {{ template "redis.fullname" . }}-client:/tmp/client.cert + kubectl cp --namespace imxc /path/to/client.key {{ template "redis.fullname" . }}-client:/tmp/client.key + kubectl cp --namespace imxc /path/to/CA.cert {{ template "redis.fullname" . }}-client:/tmp/CA.cert + + Use the following command to attach to the pod: + + kubectl exec --tty -i {{ template "redis.fullname" . }}-client \ + {{- if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }}--labels="{{ template "redis.fullname" . }}-client=true" \{{- end }} + --namespace imxc -- bash +{{- else }} + kubectl run --namespace imxc {{ template "redis.fullname" . }}-client --rm --tty -i --restart='Never' \ + {{ if .Values.usePassword }} --env REDIS_PASSWORD=$REDIS_PASSWORD \{{ end }} + {{- if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }}--labels="{{ template "redis.fullname" . }}-client=true" \{{- end }} + --image {{ template "redis.image" . }} -- bash +{{- end }} + +2. Connect using the Redis(TM) CLI: + +{{- if .Values.cluster.enabled }} + {{- if .Values.sentinel.enabled }} + redis-cli -h {{ template "redis.fullname" . }} -p {{ .Values.sentinel.service.redisPort }}{{ if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }}{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} # Read only operations + redis-cli -h {{ template "redis.fullname" . }} -p {{ .Values.sentinel.service.sentinelPort }}{{ if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }}{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} # Sentinel access + {{- else }} + redis-cli -h {{ template "redis.fullname" . }}-master{{ if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }}{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} + redis-cli -h {{ template "redis.fullname" . }}-slave{{ if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }}{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} + {{- end }} +{{- else }} + redis-cli -h {{ template "redis.fullname" . }}-master{{ if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }}{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} +{{- end }} + +{{ if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }} +Note: Since NetworkPolicy is enabled, only pods with label +{{ template "redis.fullname" . }}-client=true" +will be able to connect to redis. +{{- else -}} + +To connect to your database from outside the cluster execute the following commands: + +{{- if contains "NodePort" .Values.master.service.type }} + + export NODE_IP=$(kubectl get nodes --namespace imxc -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT=$(kubectl get --namespace imxc -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "redis.fullname" . }}-master) + redis-cli -h $NODE_IP -p $NODE_PORT {{- if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }}{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} + +{{- else if contains "LoadBalancer" .Values.master.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace imxc -w {{ template "redis.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace imxc {{ template "redis.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + redis-cli -h $SERVICE_IP -p {{ .Values.master.service.port }} {{- if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }}{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} + +{{- else if contains "ClusterIP" .Values.master.service.type }} + + kubectl port-forward --namespace imxc svc/{{ template "redis.fullname" . }}-master {{ .Values.redisPort }}:{{ .Values.redisPort }} & + redis-cli -h 127.0.0.1 -p {{ .Values.redisPort }} {{- if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }}{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} + +{{- end }} +{{- end }} + +{{ include "redis.checkRollingTags" . }} + +{{- include "redis.validateValues" . }} \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/templates/_helpers.tpl b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/templates/_helpers.tpl new file mode 100644 index 0000000..193105d --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/templates/_helpers.tpl @@ -0,0 +1,421 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "redis.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Expand the chart plus release name (used by the chart label) +*/}} +{{- define "redis.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "redis.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "networkPolicy.apiVersion" -}} +{{- if semverCompare ">=1.4-0, <1.7-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiGroup for PodSecurityPolicy. +*/}} +{{- define "podSecurityPolicy.apiGroup" -}} +{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "policy" -}} +{{- else -}} +{{- print "extensions" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for PodSecurityPolicy. +*/}} +{{- define "podSecurityPolicy.apiVersion" -}} +{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "policy/v1beta1" -}} +{{- else -}} +{{- print "extensions/v1beta1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Redis(TM) image name +*/}} +{{- define "redis.image" -}} +{{- $registryName := .Values.image.registry -}} +{{- $repositoryName := .Values.image.repository -}} +{{- $tag := .Values.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Redis(TM) Sentinel image name +*/}} +{{- define "sentinel.image" -}} +{{- $registryName := .Values.sentinel.image.registry -}} +{{- $repositoryName := .Values.sentinel.image.repository -}} +{{- $tag := .Values.sentinel.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper image name (for the metrics image) +*/}} +{{- define "redis.metrics.image" -}} +{{- $registryName := .Values.metrics.image.registry -}} +{{- $repositoryName := .Values.metrics.image.repository -}} +{{- $tag := .Values.metrics.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper image name (for the init container volume-permissions image) +*/}} +{{- define "redis.volumePermissions.image" -}} +{{- $registryName := .Values.volumePermissions.image.registry -}} +{{- $repositoryName := .Values.volumePermissions.image.repository -}} +{{- $tag := .Values.volumePermissions.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the path to the cert file. +*/}} +{{- define "redis.tlsCert" -}} +{{- required "Certificate filename is required when TLS in enabled" .Values.tls.certFilename | printf "/opt/bitnami/redis/certs/%s" -}} +{{- end -}} + +{{/* +Return the path to the cert key file. +*/}} +{{- define "redis.tlsCertKey" -}} +{{- required "Certificate Key filename is required when TLS in enabled" .Values.tls.certKeyFilename | printf "/opt/bitnami/redis/certs/%s" -}} +{{- end -}} + +{{/* +Return the path to the CA cert file. +*/}} +{{- define "redis.tlsCACert" -}} +{{- required "Certificate CA filename is required when TLS in enabled" .Values.tls.certCAFilename | printf "/opt/bitnami/redis/certs/%s" -}} +{{- end -}} + +{{/* +Return the path to the DH params file. +*/}} +{{- define "redis.tlsDHParams" -}} +{{- if .Values.tls.dhParamsFilename -}} +{{- printf "/opt/bitnami/redis/certs/%s" .Values.tls.dhParamsFilename -}} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the service account to use +*/}} +{{- define "redis.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "redis.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Get the password secret. +*/}} +{{- define "redis.secretName" -}} +{{- if .Values.existingSecret -}} +{{- printf "%s" .Values.existingSecret -}} +{{- else -}} +{{- printf "%s" (include "redis.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Get the password key to be retrieved from Redis(TM) secret. +*/}} +{{- define "redis.secretPasswordKey" -}} +{{- if and .Values.existingSecret .Values.existingSecretPasswordKey -}} +{{- printf "%s" .Values.existingSecretPasswordKey -}} +{{- else -}} +{{- printf "redis-password" -}} +{{- end -}} +{{- end -}} + +{{/* +Return Redis(TM) password +*/}} +{{- define "redis.password" -}} +{{- if not (empty .Values.global.redis.password) }} + {{- .Values.global.redis.password -}} +{{- else if not (empty .Values.password) -}} + {{- .Values.password -}} +{{- else -}} + {{- randAlphaNum 10 -}} +{{- end -}} +{{- end -}} + +{{/* +Return sysctl image +*/}} +{{- define "redis.sysctl.image" -}} +{{- $registryName := default "docker.io" .Values.sysctlImage.registry -}} +{{- $repositoryName := .Values.sysctlImage.repository -}} +{{- $tag := default "buster" .Values.sysctlImage.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "redis.imagePullSecrets" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +Also, we can not use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} +{{- if .Values.global.imagePullSecrets }} +imagePullSecrets: +{{- range .Values.global.imagePullSecrets }} + - name: {{ . }} +{{- end }} +{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.sysctlImage.pullSecrets .Values.volumePermissions.image.pullSecrets }} +imagePullSecrets: +{{- range .Values.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.metrics.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.sysctlImage.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.volumePermissions.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- end -}} +{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.sysctlImage.pullSecrets .Values.volumePermissions.image.pullSecrets }} +imagePullSecrets: +{{- range .Values.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.metrics.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.sysctlImage.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.volumePermissions.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- end -}} +{{- end -}} + +{{/* Check if there are rolling tags in the images */}} +{{- define "redis.checkRollingTags" -}} +{{- if and (contains "bitnami/" .Values.image.repository) (not (.Values.image.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .Values.image.repository }}:{{ .Values.image.tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} +{{- if and (contains "bitnami/" .Values.sentinel.image.repository) (not (.Values.sentinel.image.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .Values.sentinel.image.repository }}:{{ .Values.sentinel.image.tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} +{{- end -}} + +{{/* +Return the proper Storage Class for master +*/}} +{{- define "redis.master.storageClass" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +*/}} +{{- if .Values.global -}} + {{- if .Values.global.storageClass -}} + {{- if (eq "-" .Values.global.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.global.storageClass -}} + {{- end -}} + {{- else -}} + {{- if .Values.master.persistence.storageClass -}} + {{- if (eq "-" .Values.master.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.master.persistence.storageClass -}} + {{- end -}} + {{- end -}} + {{- end -}} +{{- else -}} + {{- if .Values.master.persistence.storageClass -}} + {{- if (eq "-" .Values.master.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.master.persistence.storageClass -}} + {{- end -}} + {{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Storage Class for slave +*/}} +{{- define "redis.slave.storageClass" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +*/}} +{{- if .Values.global -}} + {{- if .Values.global.storageClass -}} + {{- if (eq "-" .Values.global.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.global.storageClass -}} + {{- end -}} + {{- else -}} + {{- if .Values.slave.persistence.storageClass -}} + {{- if (eq "-" .Values.slave.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.slave.persistence.storageClass -}} + {{- end -}} + {{- end -}} + {{- end -}} +{{- else -}} + {{- if .Values.slave.persistence.storageClass -}} + {{- if (eq "-" .Values.slave.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.slave.persistence.storageClass -}} + {{- end -}} + {{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Compile all warnings into a single message, and call fail. +*/}} +{{- define "redis.validateValues" -}} +{{- $messages := list -}} +{{- $messages := append $messages (include "redis.validateValues.spreadConstraints" .) -}} +{{- $messages := without $messages "" -}} +{{- $message := join "\n" $messages -}} + +{{- if $message -}} +{{- printf "\nVALUES VALIDATION:\n%s" $message | fail -}} +{{- end -}} +{{- end -}} + +{{/* Validate values of Redis(TM) - spreadConstrainsts K8s version */}} +{{- define "redis.validateValues.spreadConstraints" -}} +{{- if and (semverCompare "<1.16-0" .Capabilities.KubeVersion.GitVersion) .Values.slave.spreadConstraints -}} +redis: spreadConstraints + Pod Topology Spread Constraints are only available on K8s >= 1.16 + Find more information at https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ +{{- end -}} +{{- end -}} + +{{/* +Renders a value that contains template. +Usage: +{{ include "redis.tplValue" (dict "value" .Values.path.to.the.Value "context" $) }} +*/}} +{{- define "redis.tplValue" -}} + {{- if typeIs "string" .value }} + {{- tpl .value .context }} + {{- else }} + {{- tpl (.value | toYaml) .context }} + {{- end }} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/templates/configmap-scripts.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/templates/configmap-scripts.yaml new file mode 100644 index 0000000..02411c8 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/templates/configmap-scripts.yaml @@ -0,0 +1,393 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "redis.fullname" . }}-scripts + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: +{{- if and .Values.cluster.enabled .Values.sentinel.enabled }} + start-node.sh: | + #!/bin/bash + is_boolean_yes() { + local -r bool="${1:-}" + # comparison is performed without regard to the case of alphabetic characters + shopt -s nocasematch + if [[ "$bool" = 1 || "$bool" =~ ^(yes|true)$ ]]; then + true + else + false + fi + } + + HEADLESS_SERVICE="{{ template "redis.fullname" . }}-headless.imxc.svc.{{ .Values.clusterDomain }}" + REDIS_SERVICE="{{ template "redis.fullname" . }}.imxc.svc.{{ .Values.clusterDomain }}" + + export REDIS_REPLICATION_MODE="slave" + if [[ -z "$(getent ahosts "$HEADLESS_SERVICE" | grep -v "^$(hostname -i) ")" ]]; then + export REDIS_REPLICATION_MODE="master" + fi + + {{- if and .Values.securityContext.runAsUser (eq (.Values.securityContext.runAsUser | int) 0) }} + useradd redis + chown -R redis {{ .Values.slave.persistence.path }} + {{- end }} + + if [[ -n $REDIS_PASSWORD_FILE ]]; then + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux + fi + + if [[ -n $REDIS_MASTER_PASSWORD_FILE ]]; then + password_aux=`cat ${REDIS_MASTER_PASSWORD_FILE}` + export REDIS_MASTER_PASSWORD=$password_aux + fi + + if [[ "$REDIS_REPLICATION_MODE" == "master" ]]; then + echo "I am master" + if [[ ! -f /opt/bitnami/redis/etc/master.conf ]];then + cp /opt/bitnami/redis/mounted-etc/master.conf /opt/bitnami/redis/etc/master.conf + fi + else + if [[ ! -f /opt/bitnami/redis/etc/replica.conf ]];then + cp /opt/bitnami/redis/mounted-etc/replica.conf /opt/bitnami/redis/etc/replica.conf + fi + + if is_boolean_yes "$REDIS_TLS_ENABLED"; then + sentinel_info_command="redis-cli {{- if .Values.usePassword }} -a $REDIS_PASSWORD {{- end }} -h $REDIS_SERVICE -p {{ .Values.sentinel.port }} --tls --cert ${REDIS_TLS_CERT_FILE} --key ${REDIS_TLS_KEY_FILE} --cacert ${REDIS_TLS_CA_FILE} sentinel get-master-addr-by-name {{ .Values.sentinel.masterSet }}" + else + sentinel_info_command="redis-cli {{- if .Values.usePassword }} -a $REDIS_PASSWORD {{- end }} -h $REDIS_SERVICE -p {{ .Values.sentinel.port }} sentinel get-master-addr-by-name {{ .Values.sentinel.masterSet }}" + fi + REDIS_SENTINEL_INFO=($($sentinel_info_command)) + REDIS_MASTER_HOST=${REDIS_SENTINEL_INFO[0]} + REDIS_MASTER_PORT_NUMBER=${REDIS_SENTINEL_INFO[1]} + + + # Immediately attempt to connect to the reported master. If it doesn't exist the connection attempt will either hang + # or fail with "port unreachable" and give no data. The liveness check will then timeout waiting for the redis + # container to be ready and restart the it. By then the new master will likely have been elected + if is_boolean_yes "$REDIS_TLS_ENABLED"; then + sentinel_info_command="redis-cli {{- if .Values.usePassword }} -a $REDIS_PASSWORD {{- end }} -h $REDIS_MASTER_HOST -p {{ .Values.sentinel.port }} --tls --cert ${REDIS_TLS_CERT_FILE} --key ${REDIS_TLS_KEY_FILE} --cacert ${REDIS_TLS_CA_FILE} sentinel get-master-addr-by-name {{ .Values.sentinel.masterSet }}" + else + sentinel_info_command="redis-cli {{- if .Values.usePassword }} -a $REDIS_PASSWORD {{- end }} -h $REDIS_MASTER_HOST -p {{ .Values.sentinel.port }} sentinel get-master-addr-by-name {{ .Values.sentinel.masterSet }}" + fi + + if [[ ! ($($sentinel_info_command)) ]]; then + # master doesn't actually exist, this probably means the remaining pods haven't elected a new one yet + # and are reporting the old one still. Once this happens the container will get stuck and never see the new + # master. We stop here to allow the container to not pass the liveness check and be restarted. + exit 1 + fi + fi + + if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then + cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf + fi + {{- if .Values.tls.enabled }} + ARGS=("--port" "0") + ARGS+=("--tls-port" "${REDIS_TLS_PORT}") + ARGS+=("--tls-cert-file" "${REDIS_TLS_CERT_FILE}") + ARGS+=("--tls-key-file" "${REDIS_TLS_KEY_FILE}") + ARGS+=("--tls-ca-cert-file" "${REDIS_TLS_CA_FILE}") + ARGS+=("--tls-auth-clients" "${REDIS_TLS_AUTH_CLIENTS}") + ARGS+=("--tls-replication" "yes") + {{- if .Values.tls.dhParamsFilename }} + ARGS+=("--tls-dh-params-file" "${REDIS_TLS_DH_PARAMS_FILE}") + {{- end }} + {{- else }} + ARGS=("--port" "${REDIS_PORT}") + {{- end }} + + if [[ "$REDIS_REPLICATION_MODE" == "slave" ]]; then + ARGS+=("--slaveof" "${REDIS_MASTER_HOST}" "${REDIS_MASTER_PORT_NUMBER}") + fi + + {{- if .Values.usePassword }} + ARGS+=("--requirepass" "${REDIS_PASSWORD}") + ARGS+=("--masterauth" "${REDIS_MASTER_PASSWORD}") + {{- else }} + ARGS+=("--protected-mode" "no") + {{- end }} + + if [[ "$REDIS_REPLICATION_MODE" == "master" ]]; then + ARGS+=("--include" "/opt/bitnami/redis/etc/master.conf") + else + ARGS+=("--include" "/opt/bitnami/redis/etc/replica.conf") + fi + + ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf") + {{- if .Values.slave.extraFlags }} + {{- range .Values.slave.extraFlags }} + ARGS+=({{ . | quote }}) + {{- end }} + {{- end }} + + {{- if .Values.slave.preExecCmds }} + {{ .Values.slave.preExecCmds | nindent 4}} + {{- end }} + + {{- if .Values.slave.command }} + exec {{ .Values.slave.command }} "${ARGS[@]}" + {{- else }} + exec redis-server "${ARGS[@]}" + {{- end }} + + start-sentinel.sh: | + #!/bin/bash + replace_in_file() { + local filename="${1:?filename is required}" + local match_regex="${2:?match regex is required}" + local substitute_regex="${3:?substitute regex is required}" + local posix_regex=${4:-true} + + local result + + # We should avoid using 'sed in-place' substitutions + # 1) They are not compatible with files mounted from ConfigMap(s) + # 2) We found incompatibility issues with Debian10 and "in-place" substitutions + del=$'\001' # Use a non-printable character as a 'sed' delimiter to avoid issues + if [[ $posix_regex = true ]]; then + result="$(sed -E "s${del}${match_regex}${del}${substitute_regex}${del}g" "$filename")" + else + result="$(sed "s${del}${match_regex}${del}${substitute_regex}${del}g" "$filename")" + fi + echo "$result" > "$filename" + } + sentinel_conf_set() { + local -r key="${1:?missing key}" + local value="${2:-}" + + # Sanitize inputs + value="${value//\\/\\\\}" + value="${value//&/\\&}" + value="${value//\?/\\?}" + [[ "$value" = "" ]] && value="\"$value\"" + + replace_in_file "/opt/bitnami/redis-sentinel/etc/sentinel.conf" "^#*\s*${key} .*" "${key} ${value}" false + } + sentinel_conf_add() { + echo $'\n'"$@" >> "/opt/bitnami/redis-sentinel/etc/sentinel.conf" + } + is_boolean_yes() { + local -r bool="${1:-}" + # comparison is performed without regard to the case of alphabetic characters + shopt -s nocasematch + if [[ "$bool" = 1 || "$bool" =~ ^(yes|true)$ ]]; then + true + else + false + fi + } + host_id() { + echo "$1" | openssl sha1 | awk '{print $2}' + } + + HEADLESS_SERVICE="{{ template "redis.fullname" . }}-headless.imxc.svc.{{ .Values.clusterDomain }}" + REDIS_SERVICE="{{ template "redis.fullname" . }}.imxc.svc.{{ .Values.clusterDomain }}" + + if [[ -n $REDIS_PASSWORD_FILE ]]; then + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux + fi + + if [[ ! -f /opt/bitnami/redis-sentinel/etc/sentinel.conf ]]; then + cp /opt/bitnami/redis-sentinel/mounted-etc/sentinel.conf /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- if .Values.usePassword }} + printf "\nsentinel auth-pass %s %s" "{{ .Values.sentinel.masterSet }}" "$REDIS_PASSWORD" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- if .Values.sentinel.usePassword }} + printf "\nrequirepass %s" "$REDIS_PASSWORD" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- end }} + {{- end }} + {{- if .Values.sentinel.staticID }} + printf "\nsentinel myid %s" "$(host_id "$HOSTNAME")" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- end }} + fi + + export REDIS_REPLICATION_MODE="slave" + if [[ -z "$(getent ahosts "$HEADLESS_SERVICE" | grep -v "^$(hostname -i) ")" ]]; then + export REDIS_REPLICATION_MODE="master" + fi + + if [[ "$REDIS_REPLICATION_MODE" == "master" ]]; then + REDIS_MASTER_HOST="$(hostname -i)" + REDIS_MASTER_PORT_NUMBER="{{ .Values.redisPort }}" + else + if is_boolean_yes "$REDIS_SENTINEL_TLS_ENABLED"; then + sentinel_info_command="redis-cli {{- if .Values.usePassword }} -a $REDIS_PASSWORD {{- end }} -h $REDIS_SERVICE -p {{ .Values.sentinel.port }} --tls --cert ${REDIS_SENTINEL_TLS_CERT_FILE} --key ${REDIS_SENTINEL_TLS_KEY_FILE} --cacert ${REDIS_SENTINEL_TLS_CA_FILE} sentinel get-master-addr-by-name {{ .Values.sentinel.masterSet }}" + else + sentinel_info_command="redis-cli {{- if .Values.usePassword }} -a $REDIS_PASSWORD {{- end }} -h $REDIS_SERVICE -p {{ .Values.sentinel.port }} sentinel get-master-addr-by-name {{ .Values.sentinel.masterSet }}" + fi + REDIS_SENTINEL_INFO=($($sentinel_info_command)) + REDIS_MASTER_HOST=${REDIS_SENTINEL_INFO[0]} + REDIS_MASTER_PORT_NUMBER=${REDIS_SENTINEL_INFO[1]} + + # Immediately attempt to connect to the reported master. If it doesn't exist the connection attempt will either hang + # or fail with "port unreachable" and give no data. The liveness check will then timeout waiting for the sentinel + # container to be ready and restart the it. By then the new master will likely have been elected + if is_boolean_yes "$REDIS_SENTINEL_TLS_ENABLED"; then + sentinel_info_command="redis-cli {{- if .Values.usePassword }} -a $REDIS_PASSWORD {{- end }} -h $REDIS_MASTER_HOST -p {{ .Values.sentinel.port }} --tls --cert ${REDIS_SENTINEL_TLS_CERT_FILE} --key ${REDIS_SENTINEL_TLS_KEY_FILE} --cacert ${REDIS_SENTINEL_TLS_CA_FILE} sentinel get-master-addr-by-name {{ .Values.sentinel.masterSet }}" + else + sentinel_info_command="redis-cli {{- if .Values.usePassword }} -a $REDIS_PASSWORD {{- end }} -h $REDIS_MASTER_HOST -p {{ .Values.sentinel.port }} sentinel get-master-addr-by-name {{ .Values.sentinel.masterSet }}" + fi + + if [[ ! ($($sentinel_info_command)) ]]; then + # master doesn't actually exist, this probably means the remaining pods haven't elected a new one yet + # and are reporting the old one still. Once this happens the container will get stuck and never see the new + # master. We stop here to allow the container to not pass the liveness check and be restarted. + exit 1 + fi + fi + sentinel_conf_set "sentinel monitor" "{{ .Values.sentinel.masterSet }} "$REDIS_MASTER_HOST" "$REDIS_MASTER_PORT_NUMBER" {{ .Values.sentinel.quorum }}" + + add_replica() { + if [[ "$1" != "$REDIS_MASTER_HOST" ]]; then + sentinel_conf_add "sentinel known-replica {{ .Values.sentinel.masterSet }} $1 {{ .Values.redisPort }}" + fi + } + + {{- if .Values.sentinel.staticID }} + # remove generated known sentinels and replicas + tmp="$(sed -e '/^sentinel known-/d' -e '/^$/d' /opt/bitnami/redis-sentinel/etc/sentinel.conf)" + echo "$tmp" > /opt/bitnami/redis-sentinel/etc/sentinel.conf + + for node in $(seq 0 {{ .Values.cluster.slaveCount }}); do + NAME="{{ template "redis.fullname" . }}-node-$node" + IP="$(getent hosts "$NAME.$HEADLESS_SERVICE" | awk ' {print $1 }')" + if [[ "$NAME" != "$HOSTNAME" && -n "$IP" ]]; then + sentinel_conf_add "sentinel known-sentinel {{ .Values.sentinel.masterSet }} $IP {{ .Values.sentinel.port }} $(host_id "$NAME")" + add_replica "$IP" + fi + done + add_replica "$(hostname -i)" + {{- end }} + + {{- if .Values.tls.enabled }} + ARGS=("--port" "0") + ARGS+=("--tls-port" "${REDIS_SENTINEL_TLS_PORT_NUMBER}") + ARGS+=("--tls-cert-file" "${REDIS_SENTINEL_TLS_CERT_FILE}") + ARGS+=("--tls-key-file" "${REDIS_SENTINEL_TLS_KEY_FILE}") + ARGS+=("--tls-ca-cert-file" "${REDIS_SENTINEL_TLS_CA_FILE}") + ARGS+=("--tls-replication" "yes") + ARGS+=("--tls-auth-clients" "${REDIS_SENTINEL_TLS_AUTH_CLIENTS}") + {{- if .Values.tls.dhParamsFilename }} + ARGS+=("--tls-dh-params-file" "${REDIS_SENTINEL_TLS_DH_PARAMS_FILE}") + {{- end }} + {{- end }} + {{- if .Values.sentinel.preExecCmds }} + {{ .Values.sentinel.preExecCmds | nindent 4 }} + {{- end }} + exec redis-server /opt/bitnami/redis-sentinel/etc/sentinel.conf --sentinel {{- if .Values.tls.enabled }} "${ARGS[@]}" {{- end }} +{{- else }} + start-master.sh: | + #!/bin/bash + {{- if and .Values.securityContext.runAsUser (eq (.Values.securityContext.runAsUser | int) 0) }} + useradd redis + chown -R redis {{ .Values.master.persistence.path }} + {{- end }} + if [[ -n $REDIS_PASSWORD_FILE ]]; then + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux + fi + if [[ ! -f /opt/bitnami/redis/etc/master.conf ]];then + cp /opt/bitnami/redis/mounted-etc/master.conf /opt/bitnami/redis/etc/master.conf + fi + if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then + cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf + fi + {{- if .Values.tls.enabled }} + ARGS=("--port" "0") + ARGS+=("--tls-port" "${REDIS_TLS_PORT}") + ARGS+=("--tls-cert-file" "${REDIS_TLS_CERT_FILE}") + ARGS+=("--tls-key-file" "${REDIS_TLS_KEY_FILE}") + ARGS+=("--tls-ca-cert-file" "${REDIS_TLS_CA_FILE}") + ARGS+=("--tls-auth-clients" "${REDIS_TLS_AUTH_CLIENTS}") + {{- if .Values.tls.dhParamsFilename }} + ARGS+=("--tls-dh-params-file" "${REDIS_TLS_DH_PARAMS_FILE}") + {{- end }} + {{- else }} + ARGS=("--port" "${REDIS_PORT}") + {{- end }} + {{- if .Values.usePassword }} + ARGS+=("--requirepass" "${REDIS_PASSWORD}") + ARGS+=("--masterauth" "${REDIS_PASSWORD}") + {{- else }} + ARGS+=("--protected-mode" "no") + {{- end }} + ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf") + ARGS+=("--include" "/opt/bitnami/redis/etc/master.conf") + {{- if .Values.master.extraFlags }} + {{- range .Values.master.extraFlags }} + ARGS+=({{ . | quote }}) + {{- end }} + {{- end }} + {{- if .Values.master.preExecCmds }} + {{ .Values.master.preExecCmds | nindent 4}} + {{- end }} + {{- if .Values.master.command }} + exec {{ .Values.master.command }} "${ARGS[@]}" + {{- else }} + exec redis-server "${ARGS[@]}" + {{- end }} + {{- if .Values.cluster.enabled }} + start-slave.sh: | + #!/bin/bash + {{- if and .Values.securityContext.runAsUser (eq (.Values.securityContext.runAsUser | int) 0) }} + useradd redis + chown -R redis {{ .Values.slave.persistence.path }} + {{- end }} + if [[ -n $REDIS_PASSWORD_FILE ]]; then + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux + fi + if [[ -n $REDIS_MASTER_PASSWORD_FILE ]]; then + password_aux=`cat ${REDIS_MASTER_PASSWORD_FILE}` + export REDIS_MASTER_PASSWORD=$password_aux + fi + if [[ ! -f /opt/bitnami/redis/etc/replica.conf ]];then + cp /opt/bitnami/redis/mounted-etc/replica.conf /opt/bitnami/redis/etc/replica.conf + fi + if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then + cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf + fi + {{- if .Values.tls.enabled }} + ARGS=("--port" "0") + ARGS+=("--tls-port" "${REDIS_TLS_PORT}") + ARGS+=("--tls-cert-file" "${REDIS_TLS_CERT_FILE}") + ARGS+=("--tls-key-file" "${REDIS_TLS_KEY_FILE}") + ARGS+=("--tls-ca-cert-file" "${REDIS_TLS_CA_FILE}") + ARGS+=("--tls-auth-clients" "${REDIS_TLS_AUTH_CLIENTS}") + ARGS+=("--tls-replication" "yes") + {{- if .Values.tls.dhParamsFilename }} + ARGS+=("--tls-dh-params-file" "${REDIS_TLS_DH_PARAMS_FILE}") + {{- end }} + {{- else }} + ARGS=("--port" "${REDIS_PORT}") + {{- end }} + ARGS+=("--slaveof" "${REDIS_MASTER_HOST}" "${REDIS_MASTER_PORT_NUMBER}") + {{- if .Values.usePassword }} + ARGS+=("--requirepass" "${REDIS_PASSWORD}") + ARGS+=("--masterauth" "${REDIS_MASTER_PASSWORD}") + {{- else }} + ARGS+=("--protected-mode" "no") + {{- end }} + ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf") + ARGS+=("--include" "/opt/bitnami/redis/etc/replica.conf") + {{- if .Values.slave.extraFlags }} + {{- range .Values.slave.extraFlags }} + ARGS+=({{ . | quote }}) + {{- end }} + {{- end }} + {{- if .Values.slave.preExecCmds }} + {{ .Values.slave.preExecCmds | nindent 4}} + {{- end }} + {{- if .Values.slave.command }} + exec {{ .Values.slave.command }} "${ARGS[@]}" + {{- else }} + exec redis-server "${ARGS[@]}" + {{- end }} + {{- end }} + +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/templates/configmap.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/templates/configmap.yaml new file mode 100644 index 0000000..923272c --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/templates/configmap.yaml @@ -0,0 +1,53 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "redis.fullname" . }} + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: + redis.conf: |- +{{- if .Values.configmap }} + # User-supplied configuration: +{{- tpl .Values.configmap . | nindent 4 }} +{{- end }} + master.conf: |- + dir {{ .Values.master.persistence.path }} +{{- if .Values.master.configmap }} + # User-supplied master configuration: +{{- tpl .Values.master.configmap . | nindent 4 }} +{{- end }} +{{- if .Values.master.disableCommands }} +{{- range .Values.master.disableCommands }} + rename-command {{ . }} "" +{{- end }} +{{- end }} + replica.conf: |- + dir {{ .Values.slave.persistence.path }} + slave-read-only yes +{{- if .Values.slave.configmap }} + # User-supplied slave configuration: +{{- tpl .Values.slave.configmap . | nindent 4 }} +{{- end }} +{{- if .Values.slave.disableCommands }} +{{- range .Values.slave.disableCommands }} + rename-command {{ . }} "" +{{- end }} +{{- end }} +{{- if .Values.sentinel.enabled }} + sentinel.conf: |- + dir "/tmp" + bind 0.0.0.0 + port {{ .Values.sentinel.port }} + sentinel monitor {{ .Values.sentinel.masterSet }} {{ template "redis.fullname" . }}-node-0.{{ template "redis.fullname" . }}-headless.imxc.svc.{{ .Values.clusterDomain }} {{ .Values.redisPort }} {{ .Values.sentinel.quorum }} + sentinel down-after-milliseconds {{ .Values.sentinel.masterSet }} {{ .Values.sentinel.downAfterMilliseconds }} + sentinel failover-timeout {{ .Values.sentinel.masterSet }} {{ .Values.sentinel.failoverTimeout }} + sentinel parallel-syncs {{ .Values.sentinel.masterSet }} {{ .Values.sentinel.parallelSyncs }} +{{- if .Values.sentinel.configmap }} + # User-supplied sentinel configuration: +{{- tpl .Values.sentinel.configmap . | nindent 4 }} +{{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/templates/headless-svc.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/templates/headless-svc.yaml new file mode 100644 index 0000000..7db7371 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/templates/headless-svc.yaml @@ -0,0 +1,28 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "redis.fullname" . }}-headless + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + type: ClusterIP + clusterIP: None + {{- if .Values.sentinel.enabled }} + publishNotReadyAddresses: true + {{- end }} + ports: + - name: redis + port: {{ .Values.redisPort }} + targetPort: redis + {{- if .Values.sentinel.enabled }} + - name: redis-sentinel + port: {{ .Values.sentinel.port }} + targetPort: redis-sentinel + {{- end }} + selector: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/templates/health-configmap.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/templates/health-configmap.yaml new file mode 100644 index 0000000..0bbbfb6 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/templates/health-configmap.yaml @@ -0,0 +1,176 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "redis.fullname" . }}-health + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: + ping_readiness_local.sh: |- + #!/bin/bash +{{- if .Values.usePasswordFile }} + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux +{{- end }} + export REDISCLI_AUTH="$REDIS_PASSWORD" + response=$( + timeout -s 3 $1 \ + redis-cli \ + -h localhost \ +{{- if .Values.tls.enabled }} + -p $REDIS_TLS_PORT \ + --tls \ + --cacert {{ template "redis.tlsCACert" . }} \ + {{- if .Values.tls.authClients }} + --cert {{ template "redis.tlsCert" . }} \ + --key {{ template "redis.tlsCertKey" . }} \ + {{- end }} +{{- else }} + -p $REDIS_PORT \ +{{- end }} + ping + ) + if [ "$response" != "PONG" ]; then + echo "$response" + exit 1 + fi + ping_liveness_local.sh: |- + #!/bin/bash +{{- if .Values.usePasswordFile }} + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux +{{- end }} + export REDISCLI_AUTH="$REDIS_PASSWORD" + response=$( + timeout -s 3 $1 \ + redis-cli \ + -h localhost \ +{{- if .Values.tls.enabled }} + -p $REDIS_TLS_PORT \ + --tls \ + --cacert {{ template "redis.tlsCACert" . }} \ + {{- if .Values.tls.authClients }} + --cert {{ template "redis.tlsCert" . }} \ + --key {{ template "redis.tlsCertKey" . }} \ + {{- end }} +{{- else }} + -p $REDIS_PORT \ +{{- end }} + ping + ) + if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then + echo "$response" + exit 1 + fi +{{- if .Values.sentinel.enabled }} + ping_sentinel.sh: |- + #!/bin/bash +{{- if .Values.usePasswordFile }} + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux +{{- end }} + export REDISCLI_AUTH="$REDIS_PASSWORD" + response=$( + timeout -s 3 $1 \ + redis-cli \ + -h localhost \ +{{- if .Values.tls.enabled }} + -p $REDIS_SENTINEL_TLS_PORT_NUMBER \ + --tls \ + --cacert {{ template "redis.tlsCACert" . }} \ + {{- if .Values.tls.authClients }} + --cert {{ template "redis.tlsCert" . }} \ + --key {{ template "redis.tlsCertKey" . }} \ + {{- end }} +{{- else }} + -p $REDIS_SENTINEL_PORT \ +{{- end }} + ping + ) + if [ "$response" != "PONG" ]; then + echo "$response" + exit 1 + fi + parse_sentinels.awk: |- + /ip/ {FOUND_IP=1} + /port/ {FOUND_PORT=1} + /runid/ {FOUND_RUNID=1} + !/ip|port|runid/ { + if (FOUND_IP==1) { + IP=$1; FOUND_IP=0; + } + else if (FOUND_PORT==1) { + PORT=$1; + FOUND_PORT=0; + } else if (FOUND_RUNID==1) { + printf "\nsentinel known-sentinel {{ .Values.sentinel.masterSet }} %s %s %s", IP, PORT, $0; FOUND_RUNID=0; + } + } +{{- end }} + ping_readiness_master.sh: |- + #!/bin/bash +{{- if .Values.usePasswordFile }} + password_aux=`cat ${REDIS_MASTER_PASSWORD_FILE}` + export REDIS_MASTER_PASSWORD=$password_aux +{{- end }} + export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD" + response=$( + timeout -s 3 $1 \ + redis-cli \ + -h $REDIS_MASTER_HOST \ + -p $REDIS_MASTER_PORT_NUMBER \ +{{- if .Values.tls.enabled }} + --tls \ + --cacert {{ template "redis.tlsCACert" . }} \ + {{- if .Values.tls.authClients }} + --cert {{ template "redis.tlsCert" . }} \ + --key {{ template "redis.tlsCertKey" . }} \ + {{- end }} +{{- end }} + ping + ) + if [ "$response" != "PONG" ]; then + echo "$response" + exit 1 + fi + ping_liveness_master.sh: |- + #!/bin/bash +{{- if .Values.usePasswordFile }} + password_aux=`cat ${REDIS_MASTER_PASSWORD_FILE}` + export REDIS_MASTER_PASSWORD=$password_aux +{{- end }} + export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD" + response=$( + timeout -s 3 $1 \ + redis-cli \ + -h $REDIS_MASTER_HOST \ + -p $REDIS_MASTER_PORT_NUMBER \ +{{- if .Values.tls.enabled }} + --tls \ + --cacert {{ template "redis.tlsCACert" . }} \ + {{- if .Values.tls.authClients }} + --cert {{ template "redis.tlsCert" . }} \ + --key {{ template "redis.tlsCertKey" . }} \ + {{- end }} +{{- end }} + ping + ) + if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then + echo "$response" + exit 1 + fi + ping_readiness_local_and_master.sh: |- + script_dir="$(dirname "$0")" + exit_status=0 + "$script_dir/ping_readiness_local.sh" $1 || exit_status=$? + "$script_dir/ping_readiness_master.sh" $1 || exit_status=$? + exit $exit_status + ping_liveness_local_and_master.sh: |- + script_dir="$(dirname "$0")" + exit_status=0 + "$script_dir/ping_liveness_local.sh" $1 || exit_status=$? + "$script_dir/ping_liveness_master.sh" $1 || exit_status=$? + exit $exit_status diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/templates/metrics-prometheus.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/templates/metrics-prometheus.yaml new file mode 100644 index 0000000..928f9a8 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/templates/metrics-prometheus.yaml @@ -0,0 +1,39 @@ +{{- if and (.Values.metrics.enabled) (.Values.metrics.serviceMonitor.enabled) }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "redis.fullname" . }} +# {{- if .Values.metrics.serviceMonitor.namespace }} +# namespace: {{ .Values.metrics.serviceMonitor.namespace }} +# {{- else }} + namespace: imxc +# {{- end }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- range $key, $value := .Values.metrics.serviceMonitor.selector }} + {{ $key }}: {{ $value | quote }} + {{- end }} +spec: + endpoints: + - port: metrics + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.relabelings }} + relabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.relabelings "context" $) | nindent 6 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.metricRelabelings }} + metricRelabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.metricRelabelings "context" $) | nindent 6 }} + {{- end }} + selector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + app.kubernetes.io/component: "metrics" + namespaceSelector: + matchNames: + - imxc +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/templates/metrics-svc.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/templates/metrics-svc.yaml new file mode 100644 index 0000000..4dae3bc --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/templates/metrics-svc.yaml @@ -0,0 +1,34 @@ +{{- if .Values.metrics.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "redis.fullname" . }}-metrics + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + app.kubernetes.io/component: "metrics" + {{- if .Values.metrics.service.labels -}} + {{- toYaml .Values.metrics.service.labels | nindent 4 }} + {{- end -}} + {{- if .Values.metrics.service.annotations }} + annotations: {{- toYaml .Values.metrics.service.annotations | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.metrics.service.type }} + {{ if eq .Values.metrics.service.type "LoadBalancer" }} + externalTrafficPolicy: {{ .Values.metrics.service.externalTrafficPolicy }} + {{- end }} + {{ if and (eq .Values.metrics.service.type "LoadBalancer") .Values.metrics.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.metrics.service.loadBalancerIP }} + {{- end }} + ports: + - name: metrics + port: 9121 + targetPort: metrics + selector: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/templates/networkpolicy.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/templates/networkpolicy.yaml new file mode 100644 index 0000000..ae27ebb --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/templates/networkpolicy.yaml @@ -0,0 +1,74 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ template "networkPolicy.apiVersion" . }} +metadata: + name: {{ template "redis.fullname" . }} + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + podSelector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + {{- if .Values.cluster.enabled }} + policyTypes: + - Ingress + - Egress + egress: + # Allow dns resolution + - ports: + - port: 53 + protocol: UDP + # Allow outbound connections to other cluster pods + - ports: + - port: {{ .Values.redisPort }} + {{- if .Values.sentinel.enabled }} + - port: {{ .Values.sentinel.port }} + {{- end }} + to: + - podSelector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + {{- end }} + ingress: + # Allow inbound connections + - ports: + - port: {{ .Values.redisPort }} + {{- if .Values.sentinel.enabled }} + - port: {{ .Values.sentinel.port }} + {{- end }} + {{- if not .Values.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ template "redis.fullname" . }}-client: "true" + - podSelector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + {{- if .Values.networkPolicy.ingressNSMatchLabels }} + - namespaceSelector: + matchLabels: + {{- range $key, $value := .Values.networkPolicy.ingressNSMatchLabels }} + {{ $key | quote }}: {{ $value | quote }} + {{- end }} + {{- if .Values.networkPolicy.ingressNSPodMatchLabels }} + podSelector: + matchLabels: + {{- range $key, $value := .Values.networkPolicy.ingressNSPodMatchLabels }} + {{ $key | quote }}: {{ $value | quote }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.metrics.enabled }} + # Allow prometheus scrapes for metrics + - ports: + - port: 9121 + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/templates/pdb.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/templates/pdb.yaml new file mode 100644 index 0000000..e2ad471 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/templates/pdb.yaml @@ -0,0 +1,22 @@ +{{- if .Values.podDisruptionBudget.enabled }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ template "redis.fullname" . }} + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} +spec: + {{- if .Values.podDisruptionBudget.minAvailable }} + minAvailable: {{ .Values.podDisruptionBudget.minAvailable }} + {{- end }} + {{- if .Values.podDisruptionBudget.maxUnavailable }} + maxUnavailable: {{ .Values.podDisruptionBudget.maxUnavailable }} + {{- end }} + selector: + matchLabels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/templates/prometheusrule.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/templates/prometheusrule.yaml new file mode 100644 index 0000000..fba6450 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/templates/prometheusrule.yaml @@ -0,0 +1,25 @@ +{{- if and .Values.metrics.enabled .Values.metrics.prometheusRule.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ template "redis.fullname" . }} + {{- if .Values.metrics.prometheusRule.namespace }} + namespace: {{ .Values.metrics.prometheusRule.namespace }} + {{- else }} + namespace: imxc + {{- end }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +{{- with .Values.metrics.prometheusRule.additionalLabels }} +{{- toYaml . | nindent 4 }} +{{- end }} +spec: +{{- with .Values.metrics.prometheusRule.rules }} + groups: + - name: {{ template "redis.name" $ }} + rules: {{- tpl (toYaml .) $ | nindent 8 }} +{{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/templates/psp.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/templates/psp.yaml new file mode 100644 index 0000000..f3c9390 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/templates/psp.yaml @@ -0,0 +1,43 @@ +{{- if .Values.podSecurityPolicy.create }} +apiVersion: {{ template "podSecurityPolicy.apiVersion" . }} +kind: PodSecurityPolicy +metadata: + name: {{ template "redis.fullname" . }} + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + allowPrivilegeEscalation: false + fsGroup: + rule: 'MustRunAs' + ranges: + - min: {{ .Values.securityContext.fsGroup }} + max: {{ .Values.securityContext.fsGroup }} + hostIPC: false + hostNetwork: false + hostPID: false + privileged: false + readOnlyRootFilesystem: false + requiredDropCapabilities: + - ALL + runAsUser: + rule: 'MustRunAs' + ranges: + - min: {{ .Values.containerSecurityContext.runAsUser }} + max: {{ .Values.containerSecurityContext.runAsUser }} + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: {{ .Values.containerSecurityContext.runAsUser }} + max: {{ .Values.containerSecurityContext.runAsUser }} + volumes: + - 'configMap' + - 'secret' + - 'emptyDir' + - 'persistentVolumeClaim' +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/templates/redis-master-statefulset.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/templates/redis-master-statefulset.yaml new file mode 100644 index 0000000..78aa2e6 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/templates/redis-master-statefulset.yaml @@ -0,0 +1,378 @@ +{{- if or (not .Values.cluster.enabled) (not .Values.sentinel.enabled) }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "redis.fullname" . }}-master + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- if .Values.master.statefulset.labels }} + {{- toYaml .Values.master.statefulset.labels | nindent 4 }} + {{- end }} +{{- if .Values.master.statefulset.annotations }} + annotations: + {{- toYaml .Values.master.statefulset.annotations | nindent 4 }} +{{- end }} +spec: + selector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + role: master + serviceName: {{ template "redis.fullname" . }}-headless + template: + metadata: + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + role: master + {{- if .Values.master.podLabels }} + {{- toYaml .Values.master.podLabels | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podLabels }} + {{- toYaml .Values.metrics.podLabels | nindent 8 }} + {{- end }} + annotations: + checksum/health: {{ include (print $.Template.BasePath "/health-configmap.yaml") . | sha256sum }} + checksum/configmap: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} + {{- if .Values.master.podAnnotations }} + {{- toYaml .Values.master.podAnnotations | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podAnnotations }} + {{- toYaml .Values.metrics.podAnnotations | nindent 8 }} + {{- end }} + spec: + {{- include "redis.imagePullSecrets" . | nindent 6 }} + {{- if .Values.master.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.master.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: {{- omit .Values.securityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + serviceAccountName: {{ template "redis.serviceAccountName" . }} + {{- if .Values.master.priorityClassName }} + priorityClassName: {{ .Values.master.priorityClassName | quote }} + {{- end }} + {{- with .Values.master.affinity }} + affinity: {{- tpl (toYaml .) $ | nindent 8 }} + {{- end }} + {{- if .Values.master.nodeSelector }} + nodeSelector: {{- toYaml .Values.master.nodeSelector | nindent 8 }} + {{- end }} + {{- if .Values.master.tolerations }} + tolerations: {{- toYaml .Values.master.tolerations | nindent 8 }} + {{- end }} + {{- if .Values.master.shareProcessNamespace }} + shareProcessNamespace: {{ .Values.master.shareProcessNamespace }} + {{- end }} + {{- if .Values.master.schedulerName }} + schedulerName: {{ .Values.master.schedulerName }} + {{- end }} + containers: + - name: {{ template "redis.name" . }} + image: {{ template "redis.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + command: + - /bin/sh + - -c + - /opt/bitnami/scripts/start-scripts/start-master.sh + env: + - name: REDIS_REPLICATION_MODE + value: master + {{- if .Values.usePassword }} + {{- if .Values.usePasswordFile }} + - name: REDIS_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + {{- else }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- else }} + - name: ALLOW_EMPTY_PASSWORD + value: "yes" + {{- end }} + - name: REDIS_TLS_ENABLED + value: {{ ternary "yes" "no" .Values.tls.enabled | quote }} + {{- if .Values.tls.enabled }} + - name: REDIS_TLS_PORT + value: {{ .Values.redisPort | quote }} + - name: REDIS_TLS_AUTH_CLIENTS + value: {{ ternary "yes" "no" .Values.tls.authClients | quote }} + - name: REDIS_TLS_CERT_FILE + value: {{ template "redis.tlsCert" . }} + - name: REDIS_TLS_KEY_FILE + value: {{ template "redis.tlsCertKey" . }} + - name: REDIS_TLS_CA_FILE + value: {{ template "redis.tlsCACert" . }} + {{- if .Values.tls.dhParamsFilename }} + - name: REDIS_TLS_DH_PARAMS_FILE + value: {{ template "redis.tlsDHParams" . }} + {{- end }} + {{- else }} + - name: REDIS_PORT + value: {{ .Values.redisPort | quote }} + {{- end }} + {{- if .Values.master.extraEnvVars }} + {{- include "redis.tplValue" (dict "value" .Values.master.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if or .Values.master.extraEnvVarsCM .Values.master.extraEnvVarsSecret }} + envFrom: + {{- if .Values.master.extraEnvVarsCM }} + - configMapRef: + name: {{ .Values.master.extraEnvVarsCM }} + {{- end }} + {{- if .Values.master.extraEnvVarsSecret }} + - secretRef: + name: {{ .Values.master.extraEnvVarsSecret }} + {{- end }} + {{- end }} + ports: + - name: redis + containerPort: {{ .Values.redisPort }} + {{- if .Values.master.livenessProbe.enabled }} + livenessProbe: + initialDelaySeconds: {{ .Values.master.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.master.livenessProbe.periodSeconds }} + # One second longer than command timeout should prevent generation of zombie processes. + timeoutSeconds: {{ add1 .Values.master.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.master.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.master.livenessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_liveness_local.sh {{ .Values.master.livenessProbe.timeoutSeconds }} + {{- else if .Values.master.customLivenessProbe }} + livenessProbe: {{- toYaml .Values.master.customLivenessProbe | nindent 12 }} + {{- end }} + {{- if .Values.master.readinessProbe.enabled}} + readinessProbe: + initialDelaySeconds: {{ .Values.master.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.master.readinessProbe.periodSeconds }} + timeoutSeconds: {{ add1 .Values.master.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.master.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.master.readinessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_readiness_local.sh {{ .Values.master.readinessProbe.timeoutSeconds }} + {{- else if .Values.master.customReadinessProbe }} + readinessProbe: {{- toYaml .Values.master.customReadinessProbe | nindent 12 }} + {{- end }} + resources: {{- toYaml .Values.master.resources | nindent 12 }} + volumeMounts: + - name: start-scripts + mountPath: /opt/bitnami/scripts/start-scripts + - name: health + mountPath: /health + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /opt/bitnami/redis/secrets/ + {{- end }} + - name: redis-data + mountPath: {{ .Values.master.persistence.path }} + subPath: {{ .Values.master.persistence.subPath }} + - name: config + mountPath: /opt/bitnami/redis/mounted-etc + - name: redis-tmp-conf + mountPath: /opt/bitnami/redis/etc/ + {{- if .Values.tls.enabled }} + - name: redis-certificates + mountPath: /opt/bitnami/redis/certs + readOnly: true + {{- end }} + {{- if .Values.metrics.enabled }} + - name: metrics + image: {{ template "redis.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + command: + - /bin/sh + - -c + - | + if [[ -f '/secrets/redis-password' ]]; then + export REDIS_PASSWORD=$(cat /secrets/redis-password) + fi + redis_exporter{{- range $key, $value := .Values.metrics.extraArgs }} --{{ $key }}={{ $value }}{{- end }} + env: + - name: REDIS_ALIAS + value: {{ template "redis.fullname" . }} + {{- if and .Values.usePassword (not .Values.usePasswordFile) }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: REDIS_ADDR + value: rediss://localhost:{{ .Values.redisPort }} + - name: REDIS_EXPORTER_TLS_CLIENT_KEY_FILE + value: {{ template "redis.tlsCertKey" . }} + - name: REDIS_EXPORTER_TLS_CLIENT_CERT_FILE + value: {{ template "redis.tlsCert" . }} + - name: REDIS_EXPORTER_TLS_CA_CERT_FILE + value: {{ template "redis.tlsCACert" . }} + {{- end }} + volumeMounts: + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /secrets/ + {{- end }} + {{- if .Values.tls.enabled }} + - name: redis-certificates + mountPath: /opt/bitnami/redis/certs + readOnly: true + {{- end }} + ports: + - name: metrics + containerPort: 9121 + resources: {{- toYaml .Values.metrics.resources | nindent 12 }} + {{- end }} + {{- $needsVolumePermissions := and .Values.volumePermissions.enabled .Values.master.persistence.enabled .Values.securityContext.enabled .Values.containerSecurityContext.enabled }} + {{- if or $needsVolumePermissions .Values.sysctlImage.enabled }} + initContainers: + {{- if $needsVolumePermissions }} + - name: volume-permissions + image: "{{ template "redis.volumePermissions.image" . }}" + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: + - /bin/sh + - -ec + - | + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + chown -R `id -u`:`id -G | cut -d " " -f2` {{ .Values.master.persistence.path }} + {{- else }} + chown -R {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} {{ .Values.master.persistence.path }} + {{- end }} + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto "}} + securityContext: {{- omit .Values.volumePermissions.securityContext "runAsUser" | toYaml | nindent 12 }} + {{- else }} + securityContext: {{- .Values.volumePermissions.securityContext | toYaml | nindent 12 }} + {{- end }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 10 }} + volumeMounts: + - name: redis-data + mountPath: {{ .Values.master.persistence.path }} + subPath: {{ .Values.master.persistence.subPath }} + {{- end }} + {{- if .Values.sysctlImage.enabled }} + - name: init-sysctl + image: {{ template "redis.sysctl.image" . }} + imagePullPolicy: {{ default "" .Values.sysctlImage.pullPolicy | quote }} + resources: {{- toYaml .Values.sysctlImage.resources | nindent 10 }} + {{- if .Values.sysctlImage.mountHostSys }} + volumeMounts: + - name: host-sys + mountPath: /host-sys + {{- end }} + command: {{- toYaml .Values.sysctlImage.command | nindent 10 }} + securityContext: + privileged: true + runAsUser: 0 + {{- end }} + {{- end }} + volumes: + - name: start-scripts + configMap: + name: {{ include "redis.fullname" . }}-scripts + defaultMode: 0755 + - name: health + configMap: + name: {{ template "redis.fullname" . }}-health + defaultMode: 0755 + {{- if .Values.usePasswordFile }} + - name: redis-password + secret: + secretName: {{ template "redis.secretName" . }} + items: + - key: {{ template "redis.secretPasswordKey" . }} + path: redis-password + {{- end }} + - name: config + configMap: + name: {{ template "redis.fullname" . }} + {{- if not .Values.master.persistence.enabled }} + - name: "redis-data" + emptyDir: {} + {{- else }} + {{- if .Values.persistence.existingClaim }} + - name: "redis-data" + persistentVolumeClaim: + claimName: {{ include "redis.tplValue" (dict "value" .Values.persistence.existingClaim "context" $) }} + {{- end }} + {{- if .Values.master.persistence.volumes }} + {{- toYaml .Values.master.persistence.volumes | nindent 8 }} + {{- end }} + {{- end }} + {{- if .Values.sysctlImage.mountHostSys }} + - name: host-sys + hostPath: + path: /sys + {{- end }} + - name: redis-tmp-conf + emptyDir: {} + {{- if .Values.tls.enabled }} + - name: redis-certificates + secret: + secretName: {{ required "A secret containing the certificates for the TLS traffic is required when TLS in enabled" .Values.tls.certificatesSecret }} + defaultMode: 256 + {{- end }} + {{- if and .Values.master.persistence.enabled (not .Values.persistence.existingClaim) (not .Values.master.persistence.volumes) }} + volumeClaimTemplates: + - metadata: + name: redis-data + labels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + component: master + {{- if .Values.master.statefulset.volumeClaimTemplates.labels }} + {{- toYaml .Values.master.statefulset.volumeClaimTemplates.labels | nindent 10 }} + {{- end }} + {{- if .Values.master.statefulset.volumeClaimTemplates.annotations }} + annotations: + {{- toYaml .Values.master.statefulset.volumeClaimTemplates.annotations | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.master.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.master.persistence.size | quote }} + {{ include "redis.master.storageClass" . }} + selector: + {{- if .Values.master.persistence.matchLabels }} + matchLabels: {{- toYaml .Values.master.persistence.matchLabels | nindent 12 }} + {{- end -}} + {{- if .Values.master.persistence.matchExpressions }} + matchExpressions: {{- toYaml .Values.master.persistence.matchExpressions | nindent 12 }} + {{- end -}} + {{- end }} + updateStrategy: + type: {{ .Values.master.statefulset.updateStrategy }} + {{- if .Values.master.statefulset.rollingUpdatePartition }} + {{- if (eq "Recreate" .Values.master.statefulset.updateStrategy) }} + rollingUpdate: null + {{- else }} + rollingUpdate: + partition: {{ .Values.master.statefulset.rollingUpdatePartition }} + {{- end }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/templates/redis-master-svc.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/templates/redis-master-svc.yaml new file mode 100644 index 0000000..56ba5f1 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/templates/redis-master-svc.yaml @@ -0,0 +1,43 @@ +{{- if not .Values.sentinel.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "redis.fullname" . }}-master + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- if .Values.master.service.labels -}} + {{- toYaml .Values.master.service.labels | nindent 4 }} + {{- end -}} +{{- if .Values.master.service.annotations }} + annotations: {{- toYaml .Values.master.service.annotations | nindent 4 }} +{{- end }} +spec: + type: {{ .Values.master.service.type }} + {{ if eq .Values.master.service.type "LoadBalancer" }} + externalTrafficPolicy: {{ .Values.master.service.externalTrafficPolicy }} + {{- end }} + {{- if and (eq .Values.master.service.type "LoadBalancer") .Values.master.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.master.service.loadBalancerIP }} + {{- end }} + {{- if and (eq .Values.master.service.type "LoadBalancer") .Values.master.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{- with .Values.master.service.loadBalancerSourceRanges }} +{{- toYaml . | nindent 4 }} +{{- end }} + {{- end }} + ports: + - name: redis + port: {{ .Values.master.service.port }} + targetPort: redis + {{- if .Values.master.service.nodePort }} + nodePort: {{ .Values.master.service.nodePort }} + {{- end }} + selector: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + role: master +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/templates/redis-node-statefulset.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/templates/redis-node-statefulset.yaml new file mode 100644 index 0000000..5d697de --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/templates/redis-node-statefulset.yaml @@ -0,0 +1,494 @@ +{{- if and .Values.cluster.enabled .Values.sentinel.enabled }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "redis.fullname" . }}-node + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- if .Values.slave.statefulset.labels }} + {{- toYaml .Values.slave.statefulset.labels | nindent 4 }} + {{- end }} +{{- if .Values.slave.statefulset.annotations }} + annotations: + {{- toYaml .Values.slave.statefulset.annotations | nindent 4 }} +{{- end }} +spec: +{{- if .Values.slave.updateStrategy }} + strategy: {{- toYaml .Values.slave.updateStrategy | nindent 4 }} +{{- end }} + replicas: {{ .Values.cluster.slaveCount }} + serviceName: {{ template "redis.fullname" . }}-headless + selector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + role: node + template: + metadata: + labels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + chart: {{ template "redis.chart" . }} + role: node + {{- if .Values.slave.podLabels }} + {{- toYaml .Values.slave.podLabels | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podLabels }} + {{- toYaml .Values.metrics.podLabels | nindent 8 }} + {{- end }} + annotations: + checksum/health: {{ include (print $.Template.BasePath "/health-configmap.yaml") . | sha256sum }} + checksum/configmap: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} + {{- if .Values.slave.podAnnotations }} + {{- toYaml .Values.slave.podAnnotations | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podAnnotations }} + {{- toYaml .Values.metrics.podAnnotations | nindent 8 }} + {{- end }} + spec: + {{- include "redis.imagePullSecrets" . | nindent 6 }} + {{- if .Values.slave.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.slave.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: {{- omit .Values.securityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + serviceAccountName: {{ template "redis.serviceAccountName" . }} + {{- if .Values.slave.priorityClassName }} + priorityClassName: "{{ .Values.slave.priorityClassName }}" + {{- end }} + {{- if .Values.slave.nodeSelector }} + nodeSelector: {{- toYaml .Values.slave.nodeSelector | nindent 8 }} + {{- end }} + {{- if .Values.slave.tolerations }} + tolerations: {{- toYaml .Values.slave.tolerations | nindent 8 }} + {{- end }} + {{- if .Values.slave.schedulerName }} + schedulerName: {{ .Values.slave.schedulerName }} + {{- end }} + {{- if .Values.master.spreadConstraints }} + topologySpreadConstraints: {{- toYaml .Values.master.spreadConstraints | nindent 8 }} + {{- end }} + {{- with .Values.slave.affinity }} + affinity: {{- tpl (toYaml .) $ | nindent 8 }} + {{- end }} + containers: + - name: {{ template "redis.name" . }} + image: {{ template "redis.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + command: + - /bin/sh + - -c + - /opt/bitnami/scripts/start-scripts/start-node.sh + env: + - name: REDIS_MASTER_PORT_NUMBER + value: {{ .Values.redisPort | quote }} + {{- if .Values.usePassword }} + {{- if .Values.usePasswordFile }} + - name: REDIS_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + - name: REDIS_MASTER_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + {{- else }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + - name: REDIS_MASTER_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- else }} + - name: ALLOW_EMPTY_PASSWORD + value: "yes" + {{- end }} + - name: REDIS_TLS_ENABLED + value: {{ ternary "yes" "no" .Values.tls.enabled | quote }} + {{- if .Values.tls.enabled }} + - name: REDIS_TLS_PORT + value: {{ .Values.redisPort | quote }} + - name: REDIS_TLS_AUTH_CLIENTS + value: {{ ternary "yes" "no" .Values.tls.authClients | quote }} + - name: REDIS_TLS_CERT_FILE + value: {{ template "redis.tlsCert" . }} + - name: REDIS_TLS_KEY_FILE + value: {{ template "redis.tlsCertKey" . }} + - name: REDIS_TLS_CA_FILE + value: {{ template "redis.tlsCACert" . }} + {{- if .Values.tls.dhParamsFilename }} + - name: REDIS_TLS_DH_PARAMS_FILE + value: {{ template "redis.tlsDHParams" . }} + {{- end }} + {{- else }} + - name: REDIS_PORT + value: {{ .Values.redisPort | quote }} + {{- end }} + - name: REDIS_DATA_DIR + value: {{ .Values.slave.persistence.path }} + {{- if .Values.sentinel.extraEnvVars }} + {{- include "redis.tplValue" (dict "value" .Values.sentinel.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if or .Values.sentinel.extraEnvVarsCM .Values.sentinel.extraEnvVarsSecret }} + envFrom: + {{- if .Values.sentinel.extraEnvVarsCM }} + - configMapRef: + name: {{ .Values.sentinel.extraEnvVarsCM }} + {{- end }} + {{- if .Values.sentinel.extraEnvVarsSecret }} + - secretRef: + name: {{ .Values.sentinel.extraEnvVarsSecret }} + {{- end }} + {{- end }} + ports: + - name: redis + containerPort: {{ .Values.redisPort }} + {{- if .Values.slave.livenessProbe.enabled }} + livenessProbe: + initialDelaySeconds: {{ .Values.slave.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.slave.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.slave.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.slave.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.slave.livenessProbe.failureThreshold}} + exec: + command: + - sh + - -c + {{- if .Values.sentinel.enabled }} + - /health/ping_liveness_local.sh {{ .Values.slave.livenessProbe.timeoutSeconds }} + {{- else }} + - /health/ping_liveness_local_and_master.sh {{ .Values.slave.livenessProbe.timeoutSeconds }} + {{- end }} + {{- else if .Values.slave.customLivenessProbe }} + livenessProbe: {{- toYaml .Values.slave.customLivenessProbe | nindent 12 }} + {{- end }} + {{- if .Values.slave.readinessProbe.enabled }} + readinessProbe: + initialDelaySeconds: {{ .Values.slave.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.slave.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.slave.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.slave.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.slave.readinessProbe.failureThreshold }} + exec: + command: + - sh + - -c + {{- if .Values.sentinel.enabled }} + - /health/ping_readiness_local.sh {{ .Values.slave.livenessProbe.timeoutSeconds }} + {{- else }} + - /health/ping_readiness_local_and_master.sh {{ .Values.slave.livenessProbe.timeoutSeconds }} + {{- end }} + {{- else if .Values.slave.customReadinessProbe }} + readinessProbe: {{- toYaml .Values.slave.customReadinessProbe | nindent 12 }} + {{- end }} + resources: {{- toYaml .Values.slave.resources | nindent 12 }} + volumeMounts: + - name: start-scripts + mountPath: /opt/bitnami/scripts/start-scripts + - name: health + mountPath: /health + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /opt/bitnami/redis/secrets/ + {{- end }} + - name: redis-data + mountPath: {{ .Values.slave.persistence.path }} + subPath: {{ .Values.slave.persistence.subPath }} + - name: config + mountPath: /opt/bitnami/redis/mounted-etc + - name: redis-tmp-conf + mountPath: /opt/bitnami/redis/etc + {{- if .Values.tls.enabled }} + - name: redis-certificates + mountPath: /opt/bitnami/redis/certs + readOnly: true + {{- end }} + {{- if and .Values.cluster.enabled .Values.sentinel.enabled }} + - name: sentinel + image: {{ template "sentinel.image" . }} + imagePullPolicy: {{ .Values.sentinel.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + command: + - /bin/sh + - -c + - /opt/bitnami/scripts/start-scripts/start-sentinel.sh + env: + {{- if .Values.usePassword }} + {{- if .Values.usePasswordFile }} + - name: REDIS_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + {{- else }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- else }} + - name: ALLOW_EMPTY_PASSWORD + value: "yes" + {{- end }} + - name: REDIS_SENTINEL_TLS_ENABLED + value: {{ ternary "yes" "no" .Values.tls.enabled | quote }} + {{- if .Values.tls.enabled }} + - name: REDIS_SENTINEL_TLS_PORT_NUMBER + value: {{ .Values.sentinel.port | quote }} + - name: REDIS_SENTINEL_TLS_AUTH_CLIENTS + value: {{ ternary "yes" "no" .Values.tls.authClients | quote }} + - name: REDIS_SENTINEL_TLS_CERT_FILE + value: {{ template "redis.tlsCert" . }} + - name: REDIS_SENTINEL_TLS_KEY_FILE + value: {{ template "redis.tlsCertKey" . }} + - name: REDIS_SENTINEL_TLS_CA_FILE + value: {{ template "redis.tlsCACert" . }} + {{- if .Values.tls.dhParamsFilename }} + - name: REDIS_SENTINEL_TLS_DH_PARAMS_FILE + value: {{ template "redis.dhParams" . }} + {{- end }} + {{- else }} + - name: REDIS_SENTINEL_PORT + value: {{ .Values.sentinel.port | quote }} + {{- end }} + ports: + - name: redis-sentinel + containerPort: {{ .Values.sentinel.port }} + {{- if .Values.sentinel.livenessProbe.enabled }} + livenessProbe: + initialDelaySeconds: {{ .Values.sentinel.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.sentinel.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.sentinel.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.sentinel.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.sentinel.livenessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_sentinel.sh {{ .Values.sentinel.livenessProbe.timeoutSeconds }} + {{- else if .Values.sentinel.customLivenessProbe }} + livenessProbe: {{- toYaml .Values.sentinel.customLivenessProbe | nindent 12 }} + {{- end }} + {{- if .Values.sentinel.readinessProbe.enabled}} + readinessProbe: + initialDelaySeconds: {{ .Values.sentinel.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.sentinel.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.sentinel.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.sentinel.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.sentinel.readinessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_sentinel.sh {{ .Values.sentinel.livenessProbe.timeoutSeconds }} + {{- else if .Values.sentinel.customReadinessProbe }} + readinessProbe: {{- toYaml .Values.sentinel.customReadinessProbe | nindent 12 }} + {{- end }} + resources: {{- toYaml .Values.sentinel.resources | nindent 12 }} + volumeMounts: + - name: start-scripts + mountPath: /opt/bitnami/scripts/start-scripts + - name: health + mountPath: /health + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /opt/bitnami/redis/secrets/ + {{- end }} + - name: redis-data + mountPath: {{ .Values.slave.persistence.path }} + subPath: {{ .Values.slave.persistence.subPath }} + - name: config + mountPath: /opt/bitnami/redis-sentinel/mounted-etc + - name: sentinel-tmp-conf + mountPath: /opt/bitnami/redis-sentinel/etc + {{- if .Values.tls.enabled }} + - name: redis-certificates + mountPath: /opt/bitnami/redis/certs + readOnly: true + {{- end }} + {{- end }} + {{- if .Values.metrics.enabled }} + - name: metrics + image: {{ template "redis.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + command: + - /bin/sh + - -c + - | + if [[ -f '/secrets/redis-password' ]]; then + export REDIS_PASSWORD=$(cat /secrets/redis-password) + fi + redis_exporter{{- range $key, $value := .Values.metrics.extraArgs }} --{{ $key }}={{ $value }}{{- end }} + env: + - name: REDIS_ALIAS + value: {{ template "redis.fullname" . }} + {{- if and .Values.usePassword (not .Values.usePasswordFile) }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: REDIS_ADDR + value: rediss://localhost:{{ .Values.redisPort }} + - name: REDIS_EXPORTER_TLS_CLIENT_KEY_FILE + value: {{ template "redis.tlsCertKey" . }} + - name: REDIS_EXPORTER_TLS_CLIENT_CERT_FILE + value: {{ template "redis.tlsCert" . }} + - name: REDIS_EXPORTER_TLS_CA_CERT_FILE + value: {{ template "redis.tlsCACert" . }} + {{- end }} + volumeMounts: + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /secrets/ + {{- end }} + {{- if .Values.tls.enabled }} + - name: redis-certificates + mountPath: /opt/bitnami/redis/certs + readOnly: true + {{- end }} + ports: + - name: metrics + containerPort: 9121 + resources: {{- toYaml .Values.metrics.resources | nindent 12 }} + {{- end }} + {{- $needsVolumePermissions := and .Values.volumePermissions.enabled .Values.slave.persistence.enabled .Values.securityContext.enabled .Values.containerSecurityContext.enabled }} + {{- if or $needsVolumePermissions .Values.sysctlImage.enabled }} + initContainers: + {{- if $needsVolumePermissions }} + - name: volume-permissions + image: {{ template "redis.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: + - /bin/sh + - -ec + - | + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + chown -R `id -u`:`id -G | cut -d " " -f2` {{ .Values.slave.persistence.path }} + {{- else }} + chown -R {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} {{ .Values.slave.persistence.path }} + {{- end }} + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto "}} + securityContext: {{- omit .Values.volumePermissions.securityContext "runAsUser" | toYaml | nindent 12 }} + {{- else }} + securityContext: {{- .Values.volumePermissions.securityContext | toYaml | nindent 12 }} + {{- end }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} + volumeMounts: + - name: redis-data + mountPath: {{ .Values.slave.persistence.path }} + subPath: {{ .Values.slave.persistence.subPath }} + {{- end }} + {{- if .Values.sysctlImage.enabled }} + - name: init-sysctl + image: {{ template "redis.sysctl.image" . }} + imagePullPolicy: {{ default "" .Values.sysctlImage.pullPolicy | quote }} + resources: {{- toYaml .Values.sysctlImage.resources | nindent 12 }} + {{- if .Values.sysctlImage.mountHostSys }} + volumeMounts: + - name: host-sys + mountPath: /host-sys + {{- end }} + command: {{- toYaml .Values.sysctlImage.command | nindent 12 }} + securityContext: + privileged: true + runAsUser: 0 + {{- end }} + {{- end }} + volumes: + - name: start-scripts + configMap: + name: {{ include "redis.fullname" . }}-scripts + defaultMode: 0755 + - name: health + configMap: + name: {{ template "redis.fullname" . }}-health + defaultMode: 0755 + {{- if .Values.usePasswordFile }} + - name: redis-password + secret: + secretName: {{ template "redis.secretName" . }} + items: + - key: {{ template "redis.secretPasswordKey" . }} + path: redis-password + {{- end }} + - name: config + configMap: + name: {{ template "redis.fullname" . }} + {{- if .Values.sysctlImage.mountHostSys }} + - name: host-sys + hostPath: + path: /sys + {{- end }} + - name: sentinel-tmp-conf + emptyDir: {} + - name: redis-tmp-conf + emptyDir: {} + {{- if .Values.tls.enabled }} + - name: redis-certificates + secret: + secretName: {{ required "A secret containing the certificates for the TLS traffic is required when TLS in enabled" .Values.tls.certificatesSecret }} + defaultMode: 256 + {{- end }} + {{- if not .Values.slave.persistence.enabled }} + - name: redis-data + emptyDir: {} + {{- else }} + volumeClaimTemplates: + - metadata: + name: redis-data + labels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + component: slave + {{- if .Values.slave.statefulset.volumeClaimTemplates.labels }} + {{- toYaml .Values.slave.statefulset.volumeClaimTemplates.labels | nindent 10 }} + {{- end }} + {{- if .Values.slave.statefulset.volumeClaimTemplates.annotations }} + annotations: + {{- toYaml .Values.slave.statefulset.volumeClaimTemplates.annotations | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.slave.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.slave.persistence.size | quote }} + {{ include "redis.slave.storageClass" . }} + selector: + {{- if .Values.slave.persistence.matchLabels }} + matchLabels: {{- toYaml .Values.slave.persistence.matchLabels | nindent 12 }} + {{- end -}} + {{- if .Values.slave.persistence.matchExpressions }} + matchExpressions: {{- toYaml .Values.slave.persistence.matchExpressions | nindent 12 }} + {{- end -}} + {{- end }} + updateStrategy: + type: {{ .Values.slave.statefulset.updateStrategy }} + {{- if .Values.slave.statefulset.rollingUpdatePartition }} + {{- if (eq "Recreate" .Values.slave.statefulset.updateStrategy) }} + rollingUpdate: null + {{- else }} + rollingUpdate: + partition: {{ .Values.slave.statefulset.rollingUpdatePartition }} + {{- end }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/templates/redis-pv.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/templates/redis-pv.yaml new file mode 100644 index 0000000..adb5416 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/templates/redis-pv.yaml @@ -0,0 +1,92 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + labels: + app: redis + type: local + name: redis-pv-master +spec: + storageClassName: manual + accessModes: + - ReadWriteOnce + capacity: + storage: 8Gi + claimRef: + kind: PersistentVolumeClaim + name: redis-data-redis-master-0 + namespace: imxc + hostPath: + path: {{ .Values.global.IMXC_REDIS_PV_PATH1 }} + persistentVolumeReclaimPolicy: Retain + storageClassName: manual + volumeMode: Filesystem + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .Values.global.affinity_key }} + operator: In + values: + - {{ .Values.global.affinity_value1 }} +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + labels: + app: redis + type: local + name: redis-pv-slave-0 +spec: + storageClassName: manual + accessModes: + - ReadWriteOnce + capacity: + storage: 8Gi + claimRef: + kind: PersistentVolumeClaim + name: redis-data-redis-slave-0 + namespace: imxc + hostPath: + path: {{ .Values.global.IMXC_REDIS_PV_PATH2 }} + persistentVolumeReclaimPolicy: Retain + storageClassName: manual + volumeMode: Filesystem + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .Values.global.affinity_key }} + operator: In + values: + - {{ .Values.global.affinity_value2 }} +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + labels: + app: redis + type: local + name: redis-pv-slave-1 +spec: + storageClassName: manual + accessModes: + - ReadWriteOnce + capacity: + storage: 8Gi + claimRef: + kind: PersistentVolumeClaim + name: redis-data-redis-slave-1 + namespace: imxc + hostPath: + path: {{ .Values.global.IMXC_REDIS_PV_PATH3 }} + persistentVolumeReclaimPolicy: Retain + storageClassName: manual + volumeMode: Filesystem + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .Values.global.affinity_key }} + operator: In + values: + - {{ .Values.global.affinity_value3 }} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/templates/redis-role.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/templates/redis-role.yaml new file mode 100644 index 0000000..0d14129 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/templates/redis-role.yaml @@ -0,0 +1,22 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ template "redis.fullname" . }} + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +rules: +{{- if .Values.podSecurityPolicy.create }} + - apiGroups: ['{{ template "podSecurityPolicy.apiGroup" . }}'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: [{{ template "redis.fullname" . }}] +{{- end -}} +{{- if .Values.rbac.role.rules }} +{{- toYaml .Values.rbac.role.rules | nindent 2 }} +{{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/templates/redis-rolebinding.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/templates/redis-rolebinding.yaml new file mode 100644 index 0000000..83c87f5 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/templates/redis-rolebinding.yaml @@ -0,0 +1,19 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ template "redis.fullname" . }} + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ template "redis.fullname" . }} +subjects: +- kind: ServiceAccount + name: {{ template "redis.serviceAccountName" . }} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/templates/redis-serviceaccount.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/templates/redis-serviceaccount.yaml new file mode 100644 index 0000000..9452003 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/templates/redis-serviceaccount.yaml @@ -0,0 +1,15 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "redis.serviceAccountName" . }} + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- if .Values.serviceAccount.annotations }} + annotations: {{ toYaml .Values.serviceAccount.annotations | nindent 4 }} + {{- end }} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/templates/redis-slave-statefulset.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/templates/redis-slave-statefulset.yaml new file mode 100644 index 0000000..be0894b --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/templates/redis-slave-statefulset.yaml @@ -0,0 +1,384 @@ +{{- if and .Values.cluster.enabled (not .Values.sentinel.enabled) }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "redis.fullname" . }}-slave + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- if .Values.slave.statefulset.labels }} + {{- toYaml .Values.slave.statefulset.labels | nindent 4 }} + {{- end }} +{{- if .Values.slave.statefulset.annotations }} + annotations: + {{- toYaml .Values.slave.statefulset.annotations | nindent 4 }} +{{- end }} +spec: +{{- if .Values.slave.updateStrategy }} + strategy: {{- toYaml .Values.slave.updateStrategy | nindent 4 }} +{{- end }} + replicas: {{ .Values.cluster.slaveCount }} + serviceName: {{ template "redis.fullname" . }}-headless + selector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + role: slave + template: + metadata: + labels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + chart: {{ template "redis.chart" . }} + role: slave + {{- if .Values.slave.podLabels }} + {{- toYaml .Values.slave.podLabels | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podLabels }} + {{- toYaml .Values.metrics.podLabels | nindent 8 }} + {{- end }} + annotations: + checksum/health: {{ include (print $.Template.BasePath "/health-configmap.yaml") . | sha256sum }} + checksum/configmap: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} + {{- if .Values.slave.podAnnotations }} + {{- toYaml .Values.slave.podAnnotations | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podAnnotations }} + {{- toYaml .Values.metrics.podAnnotations | nindent 8 }} + {{- end }} + spec: + {{- include "redis.imagePullSecrets" . | nindent 6 }} + {{- if .Values.slave.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.slave.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: {{- omit .Values.securityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + serviceAccountName: {{ template "redis.serviceAccountName" . }} + {{- if .Values.slave.priorityClassName }} + priorityClassName: {{ .Values.slave.priorityClassName | quote }} + {{- end }} + {{- if .Values.slave.nodeSelector }} + nodeSelector: {{- toYaml .Values.slave.nodeSelector | nindent 8 }} + {{- end }} + {{- if .Values.slave.tolerations }} + tolerations: {{- toYaml .Values.slave.tolerations | nindent 8 }} + {{- end }} + {{- if .Values.slave.shareProcessNamespace }} + shareProcessNamespace: {{ .Values.slave.shareProcessNamespace }} + {{- end }} + {{- if .Values.slave.schedulerName }} + schedulerName: {{ .Values.slave.schedulerName }} + {{- end }} + {{- if .Values.master.spreadConstraints }} + topologySpreadConstraints: {{- toYaml .Values.master.spreadConstraints | nindent 8 }} + {{- end }} + {{- with .Values.slave.affinity }} + affinity: {{- tpl (toYaml .) $ | nindent 8 }} + {{- end }} + containers: + - name: {{ template "redis.name" . }} + image: {{ template "redis.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + command: + - /bin/sh + - -c + - /opt/bitnami/scripts/start-scripts/start-slave.sh + env: + - name: REDIS_REPLICATION_MODE + value: slave + - name: REDIS_MASTER_HOST + value: {{ template "redis.fullname" . }}-master-0.{{ template "redis.fullname" . }}-headless.imxc.svc.{{ .Values.clusterDomain }} + - name: REDIS_MASTER_PORT_NUMBER + value: {{ .Values.redisPort | quote }} + {{- if .Values.usePassword }} + {{- if .Values.usePasswordFile }} + - name: REDIS_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + - name: REDIS_MASTER_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + {{- else }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + - name: REDIS_MASTER_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- else }} + - name: ALLOW_EMPTY_PASSWORD + value: "yes" + {{- end }} + - name: REDIS_TLS_ENABLED + value: {{ ternary "yes" "no" .Values.tls.enabled | quote }} + {{- if .Values.tls.enabled }} + - name: REDIS_TLS_PORT + value: {{ .Values.redisPort | quote }} + - name: REDIS_TLS_AUTH_CLIENTS + value: {{ ternary "yes" "no" .Values.tls.authClients | quote }} + - name: REDIS_TLS_CERT_FILE + value: {{ template "redis.tlsCert" . }} + - name: REDIS_TLS_KEY_FILE + value: {{ template "redis.tlsCertKey" . }} + - name: REDIS_TLS_CA_FILE + value: {{ template "redis.tlsCACert" . }} + {{- if .Values.tls.dhParamsFilename }} + - name: REDIS_TLS_DH_PARAMS_FILE + value: {{ template "redis.tlsDHParams" . }} + {{- end }} + {{- else }} + - name: REDIS_PORT + value: {{ .Values.redisPort | quote }} + {{- end }} + {{- if .Values.slave.extraEnvVars }} + {{- include "redis.tplValue" (dict "value" .Values.slave.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if or .Values.slave.extraEnvVarsCM .Values.slave.extraEnvVarsSecret }} + envFrom: + {{- if .Values.slave.extraEnvVarsCM }} + - configMapRef: + name: {{ .Values.slave.extraEnvVarsCM }} + {{- end }} + {{- if .Values.slave.extraEnvVarsSecret }} + - secretRef: + name: {{ .Values.slave.extraEnvVarsSecret }} + {{- end }} + {{- end }} + ports: + - name: redis + containerPort: {{ .Values.redisPort }} + {{- if .Values.slave.livenessProbe.enabled }} + livenessProbe: + initialDelaySeconds: {{ .Values.slave.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.slave.livenessProbe.periodSeconds }} + timeoutSeconds: {{ add1 .Values.slave.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.slave.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.slave.livenessProbe.failureThreshold}} + exec: + command: + - sh + - -c + - /health/ping_liveness_local_and_master.sh {{ .Values.slave.livenessProbe.timeoutSeconds }} + {{- else if .Values.slave.customLivenessProbe }} + livenessProbe: {{- toYaml .Values.slave.customLivenessProbe | nindent 12 }} + {{- end }} + {{- if .Values.slave.readinessProbe.enabled }} + readinessProbe: + initialDelaySeconds: {{ .Values.slave.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.slave.readinessProbe.periodSeconds }} + timeoutSeconds: {{ add1 .Values.slave.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.slave.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.slave.readinessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_readiness_local_and_master.sh {{ .Values.slave.readinessProbe.timeoutSeconds }} + {{- else if .Values.slave.customReadinessProbe }} + readinessProbe: {{- toYaml .Values.slave.customReadinessProbe | nindent 12 }} + {{- end }} + resources: {{- toYaml .Values.slave.resources | nindent 12 }} + volumeMounts: + - name: start-scripts + mountPath: /opt/bitnami/scripts/start-scripts + - name: health + mountPath: /health + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /opt/bitnami/redis/secrets/ + {{- end }} + - name: redis-data + mountPath: /data + - name: config + mountPath: /opt/bitnami/redis/mounted-etc + - name: redis-tmp-conf + mountPath: /opt/bitnami/redis/etc + {{- if .Values.tls.enabled }} + - name: redis-certificates + mountPath: /opt/bitnami/redis/certs + readOnly: true + {{- end }} + {{- if .Values.metrics.enabled }} + - name: metrics + image: {{ template "redis.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + command: + - /bin/bash + - -c + - | + if [[ -f '/secrets/redis-password' ]]; then + export REDIS_PASSWORD=$(cat /secrets/redis-password) + fi + redis_exporter{{- range $key, $value := .Values.metrics.extraArgs }} --{{ $key }}={{ $value }}{{- end }} + env: + - name: REDIS_ALIAS + value: {{ template "redis.fullname" . }} + {{- if and .Values.usePassword (not .Values.usePasswordFile) }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: REDIS_ADDR + value: rediss://localhost:{{ .Values.redisPort }} + - name: REDIS_EXPORTER_TLS_CLIENT_KEY_FILE + value: {{ template "redis.tlsCertKey" . }} + - name: REDIS_EXPORTER_TLS_CLIENT_CERT_FILE + value: {{ template "redis.tlsCert" . }} + - name: REDIS_EXPORTER_TLS_CA_CERT_FILE + value: {{ template "redis.tlsCACert" . }} + {{- end }} + volumeMounts: + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /secrets/ + {{- end }} + {{- if .Values.tls.enabled }} + - name: redis-certificates + mountPath: /opt/bitnami/redis/certs + readOnly: true + {{- end }} + ports: + - name: metrics + containerPort: 9121 + resources: {{- toYaml .Values.metrics.resources | nindent 12 }} + {{- end }} + {{- $needsVolumePermissions := and .Values.volumePermissions.enabled .Values.slave.persistence.enabled .Values.securityContext.enabled .Values.containerSecurityContext.enabled }} + {{- if or $needsVolumePermissions .Values.sysctlImage.enabled }} + initContainers: + {{- if $needsVolumePermissions }} + - name: volume-permissions + image: {{ template "redis.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: + - /bin/sh + - -ec + - | + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + chown -R `id -u`:`id -G | cut -d " " -f2` {{ .Values.slave.persistence.path }} + {{- else }} + chown -R {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} {{ .Values.slave.persistence.path }} + {{- end }} + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto "}} + securityContext: {{- omit .Values.volumePermissions.securityContext "runAsUser" | toYaml | nindent 12 }} + {{- else }} + securityContext: {{- .Values.volumePermissions.securityContext | toYaml | nindent 12 }} + {{- end }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} + volumeMounts: + - name: redis-data + mountPath: {{ .Values.slave.persistence.path }} + subPath: {{ .Values.slave.persistence.subPath }} + {{- end }} + {{- if .Values.sysctlImage.enabled }} + - name: init-sysctl + image: {{ template "redis.sysctl.image" . }} + imagePullPolicy: {{ default "" .Values.sysctlImage.pullPolicy | quote }} + resources: {{- toYaml .Values.sysctlImage.resources | nindent 12 }} + {{- if .Values.sysctlImage.mountHostSys }} + volumeMounts: + - name: host-sys + mountPath: /host-sys + {{- end }} + command: {{- toYaml .Values.sysctlImage.command | nindent 12 }} + securityContext: + privileged: true + runAsUser: 0 + {{- end }} + {{- end }} + volumes: + - name: start-scripts + configMap: + name: {{ include "redis.fullname" . }}-scripts + defaultMode: 0755 + - name: health + configMap: + name: {{ template "redis.fullname" . }}-health + defaultMode: 0755 + {{- if .Values.usePasswordFile }} + - name: redis-password + secret: + secretName: {{ template "redis.secretName" . }} + items: + - key: {{ template "redis.secretPasswordKey" . }} + path: redis-password + {{- end }} + - name: config + configMap: + name: {{ template "redis.fullname" . }} + {{- if .Values.sysctlImage.mountHostSys }} + - name: host-sys + hostPath: + path: /sys + {{- end }} + - name: redis-tmp-conf + emptyDir: {} + {{- if .Values.tls.enabled }} + - name: redis-certificates + secret: + secretName: {{ required "A secret containing the certificates for the TLS traffic is required when TLS in enabled" .Values.tls.certificatesSecret }} + defaultMode: 256 + {{- end }} + {{- if not .Values.slave.persistence.enabled }} + - name: redis-data + emptyDir: {} + {{- else }} + volumeClaimTemplates: + - metadata: + name: redis-data + labels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + component: slave + {{- if .Values.slave.statefulset.volumeClaimTemplates.labels }} + {{- toYaml .Values.slave.statefulset.volumeClaimTemplates.labels | nindent 10 }} + {{- end }} + {{- if .Values.slave.statefulset.volumeClaimTemplates.annotations }} + annotations: + {{- toYaml .Values.slave.statefulset.volumeClaimTemplates.annotations | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.slave.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.slave.persistence.size | quote }} + {{ include "redis.slave.storageClass" . }} + selector: + {{- if .Values.slave.persistence.matchLabels }} + matchLabels: {{- toYaml .Values.slave.persistence.matchLabels | nindent 12 }} + {{- end -}} + {{- if .Values.slave.persistence.matchExpressions }} + matchExpressions: {{- toYaml .Values.slave.persistence.matchExpressions | nindent 12 }} + {{- end -}} + {{- end }} + updateStrategy: + type: {{ .Values.slave.statefulset.updateStrategy }} + {{- if .Values.slave.statefulset.rollingUpdatePartition }} + {{- if (eq "Recreate" .Values.slave.statefulset.updateStrategy) }} + rollingUpdate: null + {{- else }} + rollingUpdate: + partition: {{ .Values.slave.statefulset.rollingUpdatePartition }} + {{- end }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/templates/redis-slave-svc.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/templates/redis-slave-svc.yaml new file mode 100644 index 0000000..c1f3ae5 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/templates/redis-slave-svc.yaml @@ -0,0 +1,43 @@ +{{- if and .Values.cluster.enabled (not .Values.sentinel.enabled) }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "redis.fullname" . }}-slave + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- if .Values.slave.service.labels -}} + {{- toYaml .Values.slave.service.labels | nindent 4 }} + {{- end -}} +{{- if .Values.slave.service.annotations }} + annotations: {{- toYaml .Values.slave.service.annotations | nindent 4 }} +{{- end }} +spec: + type: {{ .Values.slave.service.type }} + {{ if eq .Values.slave.service.type "LoadBalancer" }} + externalTrafficPolicy: {{ .Values.slave.service.externalTrafficPolicy }} + {{- end }} + {{- if and (eq .Values.slave.service.type "LoadBalancer") .Values.slave.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.slave.service.loadBalancerIP }} + {{- end }} + {{- if and (eq .Values.slave.service.type "LoadBalancer") .Values.slave.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{- with .Values.slave.service.loadBalancerSourceRanges }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- end }} + ports: + - name: redis + port: {{ .Values.slave.service.port }} + targetPort: redis + {{- if .Values.slave.service.nodePort }} + nodePort: {{ .Values.slave.service.nodePort }} + {{- end }} + selector: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + role: slave +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/templates/redis-with-sentinel-svc.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/templates/redis-with-sentinel-svc.yaml new file mode 100644 index 0000000..3b3458e --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/templates/redis-with-sentinel-svc.yaml @@ -0,0 +1,43 @@ +{{- if .Values.sentinel.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "redis.fullname" . }} + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- if .Values.sentinel.service.labels }} + {{- toYaml .Values.sentinel.service.labels | nindent 4 }} + {{- end }} +{{- if .Values.sentinel.service.annotations }} + annotations: {{- toYaml .Values.sentinel.service.annotations | nindent 4 }} +{{- end }} +spec: + type: {{ .Values.sentinel.service.type }} + {{ if eq .Values.sentinel.service.type "LoadBalancer" }} + externalTrafficPolicy: {{ .Values.sentinel.service.externalTrafficPolicy }} + {{- end }} + {{ if eq .Values.sentinel.service.type "LoadBalancer" -}} {{ if .Values.sentinel.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.sentinel.service.loadBalancerIP }} + {{ end -}} + {{- end -}} + ports: + - name: redis + port: {{ .Values.sentinel.service.redisPort }} + targetPort: redis + {{- if .Values.sentinel.service.redisNodePort }} + nodePort: {{ .Values.sentinel.service.redisNodePort }} + {{- end }} + - name: redis-sentinel + port: {{ .Values.sentinel.service.sentinelPort }} + targetPort: redis-sentinel + {{- if .Values.sentinel.service.sentinelNodePort }} + nodePort: {{ .Values.sentinel.service.sentinelNodePort }} + {{- end }} + selector: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/templates/secret.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/templates/secret.yaml new file mode 100644 index 0000000..c1103d2 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/templates/secret.yaml @@ -0,0 +1,15 @@ +{{- if and .Values.usePassword (not .Values.existingSecret) -}} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "redis.fullname" . }} + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +type: Opaque +data: + redis-password: {{ include "redis.password" . | b64enc | quote }} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/values.schema.json b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/values.schema.json new file mode 100644 index 0000000..3188d0c --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/values.schema.json @@ -0,0 +1,168 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": { + "usePassword": { + "type": "boolean", + "title": "Use password authentication", + "form": true + }, + "password": { + "type": "string", + "title": "Password", + "form": true, + "description": "Defaults to a random 10-character alphanumeric string if not set", + "hidden": { + "value": false, + "path": "usePassword" + } + }, + "cluster": { + "type": "object", + "title": "Cluster Settings", + "form": true, + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable master-slave", + "description": "Enable master-slave architecture" + }, + "slaveCount": { + "type": "integer", + "title": "Slave Replicas", + "form": true, + "hidden": { + "value": false, + "path": "cluster/enabled" + } + } + } + }, + "master": { + "type": "object", + "title": "Master replicas settings", + "form": true, + "properties": { + "persistence": { + "type": "object", + "title": "Persistence for master replicas", + "form": true, + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable persistence", + "description": "Enable persistence using Persistent Volume Claims" + }, + "size": { + "type": "string", + "title": "Persistent Volume Size", + "form": true, + "render": "slider", + "sliderMin": 1, + "sliderMax": 100, + "sliderUnit": "Gi", + "hidden": { + "value": false, + "path": "master/persistence/enabled" + } + }, + "matchLabels": { + "type": "object", + "title": "Persistent Match Labels Selector" + }, + "matchExpressions": { + "type": "object", + "title": "Persistent Match Expressions Selector" + } + } + } + } + }, + "slave": { + "type": "object", + "title": "Slave replicas settings", + "form": true, + "hidden": { + "value": false, + "path": "cluster/enabled" + }, + "properties": { + "persistence": { + "type": "object", + "title": "Persistence for slave replicas", + "form": true, + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable persistence", + "description": "Enable persistence using Persistent Volume Claims" + }, + "size": { + "type": "string", + "title": "Persistent Volume Size", + "form": true, + "render": "slider", + "sliderMin": 1, + "sliderMax": 100, + "sliderUnit": "Gi", + "hidden": { + "value": false, + "path": "slave/persistence/enabled" + } + }, + "matchLabels": { + "type": "object", + "title": "Persistent Match Labels Selector" + }, + "matchExpressions": { + "type": "object", + "title": "Persistent Match Expressions Selector" + } + } + } + } + }, + "volumePermissions": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable Init Containers", + "description": "Use an init container to set required folder permissions on the data volume before mounting it in the final destination" + } + } + }, + "metrics": { + "type": "object", + "form": true, + "title": "Prometheus metrics details", + "properties": { + "enabled": { + "type": "boolean", + "title": "Create Prometheus metrics exporter", + "description": "Create a side-car container to expose Prometheus metrics", + "form": true + }, + "serviceMonitor": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "title": "Create Prometheus Operator ServiceMonitor", + "description": "Create a ServiceMonitor to track metrics using Prometheus Operator", + "form": true, + "hidden": { + "value": false, + "path": "metrics/enabled" + } + } + } + } + } + } + } +} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/values.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/values.yaml new file mode 100644 index 0000000..fcd8710 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/redis/values.yaml @@ -0,0 +1,932 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +global: + # imageRegistry: myRegistryName + # imagePullSecrets: + # - myRegistryKeySecretName + # storageClass: myStorageClass + redis: {} + +## Bitnami Redis(TM) image version +## ref: https://hub.docker.com/r/bitnami/redis/tags/ +## +image: + registry: 10.10.31.243:5000/cmoa3 + repository: redis + ## Bitnami Redis(TM) image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis#supported-tags-and-respective-dockerfile-links + ## + tag: latest + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + +## String to partially override redis.fullname template (will maintain the release name) +## +# nameOverride: + +## String to fully override redis.fullname template +## +fullnameOverride: redis + +## Cluster settings +## +cluster: + enabled: true + slaveCount: 2 + +## Use redis sentinel in the redis pod. This will disable the master and slave services and +## create one redis service with ports to the sentinel and the redis instances +## +sentinel: + enabled: false + #enabled: true + ## Require password authentication on the sentinel itself + ## ref: https://redis.io/topics/sentinel + ## + usePassword: true + ## Bitnami Redis(TM) Sentintel image version + ## ref: https://hub.docker.com/r/bitnami/redis-sentinel/tags/ + ## + image: + #registry: docker.io + registry: 10.10.31.243:5000 + repository: bitnami/redis-sentinel + ## Bitnami Redis(TM) image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis-sentinel#supported-tags-and-respective-dockerfile-links + ## + tag: 6.0.10-debian-10-r0 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + masterSet: mymaster + initialCheckTimeout: 5 + quorum: 2 + downAfterMilliseconds: 60000 + failoverTimeout: 18000 + parallelSyncs: 1 + port: 26379 + ## Additional Redis(TM) configuration for the sentinel nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Enable or disable static sentinel IDs for each replicas + ## If disabled each sentinel will generate a random id at startup + ## If enabled, each replicas will have a constant ID on each start-up + ## + staticID: false + ## Configure extra options for Redis(TM) Sentinel liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + customLivenessProbe: {} + customReadinessProbe: {} + ## Redis(TM) Sentinel resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Redis(TM) Sentinel Service properties + ## + service: + ## Redis(TM) Sentinel Service type + ## + type: ClusterIP + sentinelPort: 26379 + redisPort: 6379 + + ## External traffic policy (when service type is LoadBalancer) + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # sentinelNodePort: + # redisNodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + + ## Additional commands to run prior to starting Redis(TM) node with sentinel + ## + preExecCmds: "" + + ## An array to add extra env var to the sentinel node configurations + ## For example: + ## extraEnvVars: + ## - name: name + ## value: value + ## - name: other_name + ## valueFrom: + ## fieldRef: + ## fieldPath: fieldPath + ## + extraEnvVars: [] + + ## ConfigMap with extra env vars: + ## + extraEnvVarsCM: [] + + ## Secret with extra env vars: + ## + extraEnvVarsSecret: [] + +## Specifies the Kubernetes Cluster's Domain Name. +## +clusterDomain: cluster.local + +networkPolicy: + ## Specifies whether a NetworkPolicy should be created + ## + enabled: true + #enabled: false + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port Redis(TM) is listening + ## on. When true, Redis(TM) will accept connections from any source + ## (with the correct destination port). + ## + # allowExternal: true + allowExternal: true + + ## Allow connections from other namespaces. Just set label for namespace and set label for pods (optional). + ## + ingressNSMatchLabels: {} + ingressNSPodMatchLabels: {} + +serviceAccount: + ## Specifies whether a ServiceAccount should be created + ## + create: false + ## The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the fullname template + ## + name: + ## Add annotations to service account + # annotations: + # iam.gke.io/gcp-service-account: "sa@project.iam.gserviceaccount.com" + +rbac: + ## Specifies whether RBAC resources should be created + ## + create: false + + role: + ## Rules to create. It follows the role specification + # rules: + # - apiGroups: + # - extensions + # resources: + # - podsecuritypolicies + # verbs: + # - use + # resourceNames: + # - gce.unprivileged + rules: [] + +## Redis(TM) pod Security Context +## +securityContext: + enabled: true + fsGroup: 1001 + ## sysctl settings for master and slave pods + ## + ## Uncomment the setting below to increase the net.core.somaxconn value + ## + # sysctls: + # - name: net.core.somaxconn + # value: "10000" + +## Container Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +containerSecurityContext: + enabled: true + runAsUser: 1001 + +## Use password authentication +## +usePassword: true +## Redis(TM) password (both master and slave) +## Defaults to a random 10-character alphanumeric string if not set and usePassword is true +## ref: https://github.com/bitnami/bitnami-docker-redis#setting-the-server-password-on-first-run +## +password: "dkagh1234!" +## Use existing secret (ignores previous password) +# existingSecret: +## Password key to be retrieved from Redis(TM) secret +## +# existingSecretPasswordKey: + +## Mount secrets as files instead of environment variables +## +usePasswordFile: false + +## Persist data to a persistent volume (Redis Master) +## +persistence: + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + ## + existingClaim: + +# Redis(TM) port +redisPort: 6379 + +## +## TLS configuration +## +tls: + # Enable TLS traffic + enabled: false + # + # Whether to require clients to authenticate or not. + authClients: true + # + # Name of the Secret that contains the certificates + certificatesSecret: + # + # Certificate filename + certFilename: + # + # Certificate Key filename + certKeyFilename: + # + # CA Certificate filename + certCAFilename: + # + # File containing DH params (in order to support DH based ciphers) + # dhParamsFilename: + +## +## Redis(TM) Master parameters +## +master: + ## Redis(TM) command arguments + ## + ## Can be used to specify command line arguments, for example: + ## Note `exec` is prepended to command + ## + command: "/run.sh" + ## Additional commands to run prior to starting Redis(TM) + ## + preExecCmds: "" + ## Additional Redis(TM) configuration for the master nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Deployment pod host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## Redis(TM) additional command line flags + ## + ## Can be used to specify command line flags, for example: + ## extraFlags: + ## - "--maxmemory-policy volatile-ttl" + ## - "--repl-backlog-size 1024mb" + ## + extraFlags: [] + ## Comma-separated list of Redis(TM) commands to disable + ## + ## Can be used to disable Redis(TM) commands for security reasons. + ## Commands will be completely disabled by renaming each to an empty string. + ## ref: https://redis.io/topics/security#disabling-of-specific-commands + ## + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis(TM) Master additional pod labels and annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + podAnnotations: {} + + ## Redis(TM) Master resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + # Enable shared process namespace in a pod. + # If set to false (default), each container will run in separate namespace, redis will have PID=1. + # If set to true, the /pause will run as init process and will reap any zombie PIDs, + # for example, generated by a custom exec probe running longer than a probe timeoutSeconds. + # Enable this only if customLivenessProbe or customReadinessProbe is used and zombie PIDs are accumulating. + # Ref: https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/ + shareProcessNamespace: false + ## Configure extra options for Redis(TM) Master liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + + ## Configure custom probes for images other images like + ## rhscl/redis-32-rhel7 rhscl/redis-5-rhel7 + ## Only used if readinessProbe.enabled: false / livenessProbe.enabled: false + ## + # customLivenessProbe: + # tcpSocket: + # port: 6379 + # initialDelaySeconds: 10 + # periodSeconds: 5 + # customReadinessProbe: + # initialDelaySeconds: 30 + # periodSeconds: 10 + # timeoutSeconds: 5 + # exec: + # command: + # - "container-entrypoint" + # - "bash" + # - "-c" + # - "redis-cli set liveness-probe \"`date`\" | grep OK" + customLivenessProbe: {} + customReadinessProbe: {} + + ## Redis(TM) Master Node selectors and tolerations for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + ## Redis(TM) Master pod/node affinity/anti-affinity + ## + affinity: {} + + ## Redis(TM) Master Service properties + ## + service: + ## Redis(TM) Master Service type + ## + type: ClusterIP + # type: NodePort + port: 6379 + + ## External traffic policy (when service type is LoadBalancer) + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: 31379 + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + # loadBalancerSourceRanges: ["10.0.0.0/8"] + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis(TM) images. + ## + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + ## + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + ## Persistent Volume selectors + ## https://kubernetes.io/docs/concepts/storage/persistent-volumes/#selector + ## + matchLabels: {} + matchExpressions: {} + volumes: + # - name: volume_name + # emptyDir: {} + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + ## + statefulset: + labels: {} + annotations: {} + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + volumeClaimTemplates: + labels: {} + annotations: {} + + ## Redis(TM) Master pod priorityClassName + ## + priorityClassName: '' + + ## An array to add extra env vars + ## For example: + ## extraEnvVars: + ## - name: name + ## value: value + ## - name: other_name + ## valueFrom: + ## fieldRef: + ## fieldPath: fieldPath + ## + extraEnvVars: [] + + ## ConfigMap with extra env vars: + ## + extraEnvVarsCM: [] + + ## Secret with extra env vars: + ## + extraEnvVarsSecret: [] + +## +## Redis(TM) Slave properties +## Note: service.type is a mandatory parameter +## The rest of the parameters are either optional or, if undefined, will inherit those declared in Redis(TM) Master +## +slave: + ## Slave Service properties + ## + service: + ## Redis(TM) Slave Service type + ## + type: ClusterIP + #type: NodePort + ## Redis(TM) port + ## + port: 6379 + + ## External traffic policy (when service type is LoadBalancer) + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: 31380 + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + # loadBalancerSourceRanges: ["10.0.0.0/8"] + + ## Redis(TM) slave port + ## + port: 6379 + ## Deployment pod host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## Can be used to specify command line arguments, for example: + ## Note `exec` is prepended to command + ## + command: "/run.sh" + ## Additional commands to run prior to starting Redis(TM) + ## + preExecCmds: "" + ## Additional Redis(TM) configuration for the slave nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Redis(TM) extra flags + ## + extraFlags: [] + ## List of Redis(TM) commands to disable + ## + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis(TM) Slave pod/node affinity/anti-affinity + ## + affinity: {} + + ## Kubernetes Spread Constraints for pod assignment + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + ## + # - maxSkew: 1 + # topologyKey: node + # whenUnsatisfiable: DoNotSchedule + spreadConstraints: {} + + # Enable shared process namespace in a pod. + # If set to false (default), each container will run in separate namespace, redis will have PID=1. + # If set to true, the /pause will run as init process and will reap any zombie PIDs, + # for example, generated by a custom exec probe running longer than a probe timeoutSeconds. + # Enable this only if customLivenessProbe or customReadinessProbe is used and zombie PIDs are accumulating. + # Ref: https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/ + shareProcessNamespace: false + ## Configure extra options for Redis(TM) Slave liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 10 + successThreshold: 1 + failureThreshold: 5 + + ## Configure custom probes for images other images like + ## rhscl/redis-32-rhel7 rhscl/redis-5-rhel7 + ## Only used if readinessProbe.enabled: false / livenessProbe.enabled: false + ## + # customLivenessProbe: + # tcpSocket: + # port: 6379 + # initialDelaySeconds: 10 + # periodSeconds: 5 + # customReadinessProbe: + # initialDelaySeconds: 30 + # periodSeconds: 10 + # timeoutSeconds: 5 + # exec: + # command: + # - "container-entrypoint" + # - "bash" + # - "-c" + # - "redis-cli set liveness-probe \"`date`\" | grep OK" + customLivenessProbe: {} + customReadinessProbe: {} + + ## Redis(TM) slave Resource + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + + ## Redis(TM) slave selectors and tolerations for pod assignment + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Redis(TM) slave pod Annotation and Labels + ## + podLabels: {} + podAnnotations: {} + + ## Redis(TM) slave pod priorityClassName + # priorityClassName: '' + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis(TM) images. + ## + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + ## + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + ## Persistent Volume selectors + ## https://kubernetes.io/docs/concepts/storage/persistent-volumes/#selector + ## + matchLabels: {} + matchExpressions: {} + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + ## + statefulset: + labels: {} + annotations: {} + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + volumeClaimTemplates: + labels: {} + annotations: {} + + ## An array to add extra env vars + ## For example: + ## extraEnvVars: + ## - name: name + ## value: value + ## - name: other_name + ## valueFrom: + ## fieldRef: + ## fieldPath: fieldPath + ## + extraEnvVars: [] + + ## ConfigMap with extra env vars: + ## + extraEnvVarsCM: [] + + ## Secret with extra env vars: + ## + extraEnvVarsSecret: [] + +## Prometheus Exporter / Metrics +## +metrics: + enabled: false +# enabled: true + + image: + registry: 10.10.31.243:5000 # registry.cloud.intermax:5000 + repository: redis/redis-exporter + #tag: 1.15.1-debian-10-r2 + tag: latest + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + # resources: {} + + ## Extra arguments for Metrics exporter, for example: + ## extraArgs: + ## check-keys: myKey,myOtherKey + # extraArgs: {} + + ## Metrics exporter pod Annotation and Labels + ## + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9121" + # podLabels: {} + + # Enable this if you're using https://github.com/coreos/prometheus-operator + serviceMonitor: + enabled: false + ## Specify a namespace if needed + # namespace: monitoring + # fallback to the prometheus default unless specified + # interval: 10s + ## Defaults to what's used if you follow CoreOS [Prometheus Install Instructions](https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#tldr) + ## [Prometheus Selector Label](https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#prometheus-operator-1) + ## [Kube Prometheus Selector Label](https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#exporters) + ## + selector: + prometheus: kube-prometheus + + ## RelabelConfigs to apply to samples before scraping + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#relabelconfig + ## Value is evalued as a template + ## + relabelings: [] + + ## MetricRelabelConfigs to apply to samples before ingestion + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#relabelconfig + ## Value is evalued as a template + ## + metricRelabelings: [] + # - sourceLabels: + # - "__name__" + # targetLabel: "__name__" + # action: replace + # regex: '(.*)' + # replacement: 'example_prefix_$1' + + ## Custom PrometheusRule to be defined + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + ## + prometheusRule: + enabled: false + additionalLabels: {} + namespace: "" + ## Redis(TM) prometheus rules + ## These are just examples rules, please adapt them to your needs. + ## Make sure to constraint the rules to the current redis service. + # rules: + # - alert: RedisDown + # expr: redis_up{service="{{ template "redis.fullname" . }}-metrics"} == 0 + # for: 2m + # labels: + # severity: error + # annotations: + # summary: Redis(TM) instance {{ "{{ $labels.instance }}" }} down + # description: Redis(TM) instance {{ "{{ $labels.instance }}" }} is down + # - alert: RedisMemoryHigh + # expr: > + # redis_memory_used_bytes{service="{{ template "redis.fullname" . }}-metrics"} * 100 + # / + # redis_memory_max_bytes{service="{{ template "redis.fullname" . }}-metrics"} + # > 90 + # for: 2m + # labels: + # severity: error + # annotations: + # summary: Redis(TM) instance {{ "{{ $labels.instance }}" }} is using too much memory + # description: | + # Redis(TM) instance {{ "{{ $labels.instance }}" }} is using {{ "{{ $value }}" }}% of its available memory. + # - alert: RedisKeyEviction + # expr: | + # increase(redis_evicted_keys_total{service="{{ template "redis.fullname" . }}-metrics"}[5m]) > 0 + # for: 1s + # labels: + # severity: error + # annotations: + # summary: Redis(TM) instance {{ "{{ $labels.instance }}" }} has evicted keys + # description: | + # Redis(TM) instance {{ "{{ $labels.instance }}" }} has evicted {{ "{{ $value }}" }} keys in the last 5 minutes. + rules: [] + + ## Metrics exporter pod priorityClassName + # priorityClassName: '' + service: + type: ClusterIP + + ## External traffic policy (when service type is LoadBalancer) + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + + ## Use serviceLoadBalancerIP to request a specific static IP, + ## otherwise leave blank + # loadBalancerIP: + annotations: {} + labels: {} + +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: false + image: + registry: 10.10.31.243:5000 # docker.io + repository: minideb # bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m + + ## Init container Security Context + ## Note: the chown of the data folder is done to containerSecurityContext.runAsUser + ## and not the below volumePermissions.securityContext.runAsUser + ## When runAsUser is set to special value "auto", init container will try to chwon the + ## data folder to autodetermined user&group, using commands: `id -u`:`id -G | cut -d" " -f2` + ## "auto" is especially useful for OpenShift which has scc with dynamic userids (and 0 is not allowed). + ## You may want to use this volumePermissions.securityContext.runAsUser="auto" in combination with + ## podSecurityContext.enabled=false,containerSecurityContext.enabled=false + ## + securityContext: + runAsUser: 0 + +## Redis(TM) config file +## ref: https://redis.io/topics/config +## +configmap: |- + # Enable AOF https://redis.io/topics/persistence#append-only-file + appendonly yes + # Disable RDB persistence, AOF persistence already enabled. + save "" + +## Sysctl InitContainer +## used to perform sysctl operation to modify Kernel settings (needed sometimes to avoid warnings) +## +sysctlImage: + enabled: false + command: [] + registry: 10.10.31.243:5000 # docker.io + repository: minideb # bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + mountHostSys: false + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m + +## PodSecurityPolicy configuration +## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +## +podSecurityPolicy: + ## Specifies whether a PodSecurityPolicy should be created + ## + create: false + +## Define a disruption budget +## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ +## +podDisruptionBudget: + enabled: false + minAvailable: 1 + # maxUnavailable: 1 diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/zookeeper/.helmignore b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/zookeeper/.helmignore new file mode 100644 index 0000000..50af031 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/zookeeper/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/zookeeper/Chart.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/zookeeper/Chart.yaml new file mode 100644 index 0000000..c9a2bfb --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/zookeeper/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for Kubernetes +name: zookeeper +version: 0.1.0 diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/zookeeper/templates/0.config.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/zookeeper/templates/0.config.yaml new file mode 100644 index 0000000..3b23a9e --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/zookeeper/templates/0.config.yaml @@ -0,0 +1,35 @@ +kind: ConfigMap +metadata: + name: zookeeper-config + namespace: imxc +apiVersion: v1 +data: + init.sh: |- + #!/bin/bash + set -e + set -x + [ -d /var/lib/zookeeper/data ] || mkdir /var/lib/zookeeper/data + [ -z "$ID_OFFSET" ] && ID_OFFSET=1 + export ZOOKEEPER_SERVER_ID=$((${HOSTNAME##*-} + $ID_OFFSET)) + echo "${ZOOKEEPER_SERVER_ID:-1}" | tee /var/lib/zookeeper/data/myid + cp -Lur /etc/kafka-configmap/* /etc/kafka/ + sed -i "s/server\.$ZOOKEEPER_SERVER_ID\=[a-z0-9.-]*/server.$ZOOKEEPER_SERVER_ID=0.0.0.0/" /etc/kafka/zookeeper.properties + zookeeper.properties: |- + tickTime=2000 + dataDir=/var/lib/zookeeper/data + dataLogDir=/var/lib/zookeeper/log + clientPort=2181 + maxClientCnxns=1 + initLimit=5 + syncLimit=2 + server.1=zookeeper-0.zookeeper-headless.imxc.svc.cluster.local:2888:3888:participant + server.2=zookeeper-1.zookeeper-headless.imxc.svc.cluster.local:2888:3888:participant + server.3=zookeeper-2.zookeeper-headless.imxc.svc.cluster.local:2888:3888:participant + log4j.properties: |- + log4j.rootLogger=INFO, stdout + log4j.appender.stdout=org.apache.log4j.ConsoleAppender + log4j.appender.stdout.layout=org.apache.log4j.PatternLayout + log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n + # Suppress connection log messages, three lines per livenessProbe execution + log4j.logger.org.apache.zookeeper.server.NIOServerCnxnFactory=WARN + log4j.logger.org.apache.zookeeper.server.NIOServerCnxn=WARN diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/zookeeper/templates/1.service-leader-election.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/zookeeper/templates/1.service-leader-election.yaml new file mode 100644 index 0000000..422433a --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/zookeeper/templates/1.service-leader-election.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Service +metadata: + name: zookeeper-headless + namespace: imxc +spec: + ports: + - port: 2888 + name: peer + - port: 3888 + name: leader-election + clusterIP: None + selector: + app: zookeeper + storage: persistent + diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/zookeeper/templates/2.service-client.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/zookeeper/templates/2.service-client.yaml new file mode 100644 index 0000000..9fdcf95 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/zookeeper/templates/2.service-client.yaml @@ -0,0 +1,12 @@ +# the headless service is for PetSet DNS, this one is for clients +apiVersion: v1 +kind: Service +metadata: + name: zookeeper + namespace: imxc +spec: + ports: + - port: 2181 + name: client + selector: + app: zookeeper diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/zookeeper/templates/3.persistent-volume.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/zookeeper/templates/3.persistent-volume.yaml new file mode 100644 index 0000000..2a909f7 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/zookeeper/templates/3.persistent-volume.yaml @@ -0,0 +1,74 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: zookeeper-cluster-1 + labels: + type: local + app: zookeeper +spec: + capacity: + storage: 30Gi + accessModes: + - ReadWriteOnce + hostPath: + path: {{ .Values.global.IMXC_ZOOKEEPER_PATH1 }} + persistentVolumeReclaimPolicy: Retain + storageClassName: zookeeper-storage + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .Values.global.affinity_key }} + operator: In + values: + - {{ .Values.global.affinity_value1 }} +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: zookeeper-cluster-2 + labels: + type: local + app: zookeeper +spec: + capacity: + storage: 30Gi + accessModes: + - ReadWriteOnce + hostPath: + path: {{ .Values.global.IMXC_ZOOKEEPER_PATH2 }} + persistentVolumeReclaimPolicy: Retain + storageClassName: zookeeper-storage + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .Values.global.affinity_key }} + operator: In + values: + - {{ .Values.global.affinity_value2 }} +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: zookeeper-cluster-3 + labels: + type: local + app: zookeeper +spec: + capacity: + storage: 30Gi + accessModes: + - ReadWriteOnce + hostPath: + path: {{ .Values.global.IMXC_ZOOKEEPER_PATH3 }} + persistentVolumeReclaimPolicy: Retain + storageClassName: zookeeper-storage + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .Values.global.affinity_key }} + operator: In + values: + - {{ .Values.global.affinity_value3 }} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/zookeeper/templates/4.statefulset.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/zookeeper/templates/4.statefulset.yaml new file mode 100644 index 0000000..a9e5cb8 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/zookeeper/templates/4.statefulset.yaml @@ -0,0 +1,87 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: zookeeper + namespace: imxc +spec: + selector: + matchLabels: + app: zookeeper + storage: persistent + serviceName: "zookeeper-headless" + replicas: 3 + updateStrategy: + type: RollingUpdate + podManagementPolicy: Parallel + template: + metadata: + labels: + app: zookeeper + storage: persistent + annotations: + spec: + terminationGracePeriodSeconds: 10 + initContainers: + - name: init-config + image: {{ .Values.global.IMXC_IN_REGISTRY }}/kafka-initutils:{{ .Values.global.KAFKA_INITUTILS_VERSION }} + command: ['/bin/bash', '/etc/kafka-configmap/init.sh'] + volumeMounts: + - name: configmap + mountPath: /etc/kafka-configmap + - name: config + mountPath: /etc/kafka + - name: data + mountPath: /var/lib/zookeeper + containers: + - name: zookeeper + image: {{ .Values.global.IMXC_IN_REGISTRY }}/kafka:{{ .Values.global.KAFKA_VERSION }} + resources: + requests: + cpu: 100m + memory: 200Mi + limits: + cpu: 200m + memory: 500Mi + env: + - name: KAFKA_LOG4J_OPTS + value: -Dlog4j.configuration=file:/etc/kafka/log4j.properties + command: + - ./bin/zookeeper-server-start.sh + - /etc/kafka/zookeeper.properties + lifecycle: + preStop: + exec: + command: ["sh", "-ce", "kill -s TERM 1; while $(kill -0 1 2>/dev/null); do sleep 1; done"] + ports: + - containerPort: 2181 + name: client + - containerPort: 2888 + name: peer + - containerPort: 3888 + name: leader-election +# readinessProbe: +# exec: +# command: +# - /bin/sh +# - -c +# - '[ "imok" = "$(echo ruok | nc -w 1 -q 1 127.0.0.1 2181)" ]' + volumeMounts: + - name: config + mountPath: /etc/kafka + - name: data + mountPath: /var/lib/zookeeper + volumes: + - name: configmap + configMap: + name: zookeeper-config + - name: config + emptyDir: {} + volumeClaimTemplates: + - metadata: + name: data + spec: + accessModes: [ "ReadWriteOnce" ] + storageClassName: zookeeper-storage + resources: + requests: + storage: 30Gi diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/zookeeper/templates/5.pvc.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/zookeeper/templates/5.pvc.yaml new file mode 100644 index 0000000..e08ed54 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/zookeeper/templates/5.pvc.yaml @@ -0,0 +1,50 @@ +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + namespace: imxc + name: data-zookeeper-0 +spec: + accessModes: + - ReadWriteOnce + volumeMode: Filesystem + resources: + requests: + storage: 30Gi + storageClassName: zookeeper-storage + selector: + matchLabels: + app: zookeeper +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + namespace: imxc + name: data-zookeeper-1 +spec: + accessModes: + - ReadWriteOnce + volumeMode: Filesystem + resources: + requests: + storage: 30Gi + storageClassName: zookeeper-storage + selector: + matchLabels: + app: zookeeper +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + namespace: imxc + name: data-zookeeper-2 +spec: + accessModes: + - ReadWriteOnce + volumeMode: Filesystem + resources: + requests: + storage: 30Gi + storageClassName: zookeeper-storage + selector: + matchLabels: + app: zookeeper \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/zookeeper/values.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/zookeeper/values.yaml new file mode 100644 index 0000000..7b06985 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/charts/zookeeper/values.yaml @@ -0,0 +1,68 @@ +# Default values for zookeeper. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: 10.10.31.243:5000/cmoa3/nginx + tag: stable + pullPolicy: IfNotPresent + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 80 + +ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: [] + + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +nodeSelector: {} + +tolerations: [] + +affinity: {} diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/index.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/index.yaml new file mode 100644 index 0000000..62a41a3 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/index.yaml @@ -0,0 +1,3 @@ +apiVersion: v1 +entries: {} +generated: "2019-11-05T09:47:03.285264152+09:00" diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/templates/role.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/templates/role.yaml new file mode 100644 index 0000000..28f0e32 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/templates/role.yaml @@ -0,0 +1,16 @@ +kind: ClusterRoleBinding +{{- if semverCompare ">=1.17-0" .Capabilities.KubeVersion.GitVersion }} +apiVersion: rbac.authorization.k8s.io/v1 +{{- else }} +apiVersion: rbac.authorization.k8s.io/v1beta1 +{{- end }} +metadata: + name: imxc-cluster-admin-clusterrolebinding +subjects: +- kind: ServiceAccount + name: default + namespace: imxc +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin diff --git a/ansible/01_old/roles/cmoa_install/files/02-base/base/values.yaml b/ansible/01_old/roles/cmoa_install/files/02-base/base/values.yaml new file mode 100644 index 0000000..e2ad288 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/02-base/base/values.yaml @@ -0,0 +1,73 @@ +global: + # cluster variables + CLUSTER_ID: cloudmoa + + # default storageClass + DEFAULT_STORAGE_CLASS: exem-local-storage + + # nodeAffinity + affinity_key: cmoa + affinity_value1: worker1 + affinity_value2: worker2 + affinity_value3: worker2 + + # postgres variables + IMXC_POSTGRES_PV_PATH: /media/data/postgres/postgres-data-0 + + #elastic variables + ELASTICSEARCH_PATH1: /media/data/elasticsearch/elasticsearch-data-0 + ELASTICSEARCH_PATH2: /media/data/elasticsearch/elasticsearch-data-1 + + CMOA_ES_ID: elastic + CMOA_ES_PW: elastic + + # zookeeper variables + IMXC_ZOOKEEPER_PATH1: /media/data/zookeeper/zookeeper-data-0 + IMXC_ZOOKEEPER_PATH2: /media/data/zookeeper/zookeeper-data-1 + IMXC_ZOOKEEPER_PATH3: /media/data/zookeeper/zookeeper-data-2 + + # kafka variables + IMXC_KAFKA_PV_PATH1: /media/data/kafka/kafka-data-0 + IMXC_KAFKA_PV_PATH2: /media/data/kafka/kafka-data-1 + IMXC_KAFKA_PV_PATH3: /media/data/kafka/kafka-data-2 + KAFKA_BROKER_CONFIG: "{{index .metadata.labels \"failure-domain.beta.kubernetes.io/zone\"}}" + + # cortex variables + IMXC_INGESTER_PV_PATH1: /media/cloudmoa/ingester/ingester-data-1 + IMXC_INGESTER_PV_PATH2: /media/cloudmoa/ingester/ingester-data-2 + IMXC_INGESTER_PV_PATH3: /media/cloudmoa/ingester/ingester-data-3 + + # redis variables + IMXC_REDIS_PV_PATH1: /media/data/redis/redis-data-0 + IMXC_REDIS_PV_PATH2: /media/data/redis/redis-data-1 + IMXC_REDIS_PV_PATH3: /media/data/redis/redis-data-2 + + # rabbitmq variables + RABBITMQ_PATH: /media/data/rabbitmq + + # custom or etc variables + # IMXC_WORKER_NODE_NAME: $IMXC_WORKER_NODE_NAME # deprecated 2021.10.21 + # IMXC_MASTER_IP: 10.10.30.202 + IMXC_API_SERVER_DNS: imxc-api-service + + METRIC_ANALYZER_MASTER_VERSION: rel0.0.0 + METRIC_ANALYZER_WORKER_VERSION: rel0.0.0 + ELASTICSEARCH_VERSION: v1.0.0 + KAFKA_MANAGER_VERSION: v1.0.0 + KAFKA_INITUTILS_VERSION: v1.0.0 + #KAFKA_VERSION: v1.0.0 + KAFKA_VERSION: v1.0.1 + METRICS_SERVER_VERSION: v1.0.0 + POSTGRES_VERSION: v1.0.0 + CASSANDRA_VERSION: v1.0.0 + RABBITMQ_VERSION: v1.0.0 + CORTEX_VERSION: v1.11.0 #v1.9.0 + #CONSUL_VERSION: 0.7.1 + + # 레지스트리 변수화 (Public Cloud 대비 / 아래 값 적절히 수정해서 사용할 것) + IMXC_IN_REGISTRY: 10.10.31.243:5000/cmoa3 + + rabbitmq: + image: + registry: 10.10.31.243:5000/cmoa3 # {{ .Values.global.IMXC_REGISTRY }} + tag: v1.0.0 # {{ .Values.global.RABBITMQ_VERSION }} diff --git a/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/elasticsearch/es-ddl-put.sh b/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/elasticsearch/es-ddl-put.sh new file mode 100755 index 0000000..4079243 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/elasticsearch/es-ddl-put.sh @@ -0,0 +1,3085 @@ +#!/bin/bash + +kubectl -n imxc wait --for=condition=ready pod/elasticsearch-1 --timeout=600s + +namespace=$1 +export ES_NODEPORT=`kubectl -n ${namespace} get svc elasticsearch -o jsonpath='{.spec.ports[*].nodePort}'` + +export MASTER_IP=`kubectl get node -o wide | grep control-plane | awk '{print $6}'` + +export NUM_SHARDS=2 +export NUM_REPLICAS=1 + +SECURE=true + +if [ $SECURE = true ] +then +PARAM="-u elastic:elastic --insecure" +PROTO="https" +else +PARAM="" +PROTO="http" +fi + +echo Secure=$SECURE +echo Param=$PARAM +echo Proto=$PROTO + +curl ${PARAM} -X GET ${PROTO}://${MASTER_IP}:${ES_NODEPORT}/_cat/indices + +echo "curl ${PARAM} -X GET ${PROTO}://${MASTER_IP}:${ES_NODEPORT}/_cat/indices" + +# kubernetes_cluster_info +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/kubernetes_cluster_info' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "kubernetes_cluster_info" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "sort.field": "mtime", + "sort.order": "desc" + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "date": { + "type": "long" + }, + "mtime": { + "type": "long" + }, + "nodes": { + "type": "text", + "index": false + } + } + } +}' + +# kubernetes_cluster_history +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/kubernetes_cluster_history' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "1d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/kubernetes_cluster_history' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "kubernetes_cluster_history-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "kubernetes_cluster_history" + }, + "sort.field": "mtime", + "sort.order": "desc" + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "mtime": { + "type": "long" + }, + "nodes": { + "type": "text", + "index": false + } + } + }, + "aliases": { + "kubernetes_cluster_history": {} + } +}' + +# kubernetes_info +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/kubernetes_info' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "1d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/kubernetes_info' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "kubernetes_info-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "kubernetes_info" + }, + "sort.field": "mtime", + "sort.order": "desc" + } + }, + "mappings": { + "properties": { + "id": { + "type": "keyword" + }, + "mtime": { + "type": "long" + }, + "data": { + "type": "text", + "index": false + } + } + }, + "aliases": { + "kubernetes_info": {} + } +}' + + + +# kubernetes_event_info +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/kubernetes_event_info' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/kubernetes_event_info' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "kubernetes_event_info-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "kubernetes_event_info" + } + }, + "analysis": { + "analyzer": { + "my_customer_ngram_analyzer": { + "tokenizer": "my_customer_ngram_tokenizer" + } + }, + "tokenizer": { + "my_customer_ngram_tokenizer": { + "type": "ngram", + "min_gram": "2", + "max_gram": "3" + } + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "type": { + "type": "keyword" + }, + "unixtime": { + "type": "long" + }, + "kind": { + "type": "keyword" + }, + "name": { + "type": "keyword" + }, + "firsttime": { + "type": "long" + }, + "lasttime": { + "type": "long" + }, + "data": { + "type": "text", + "index": false + }, + "id": { + "type": "keyword" + }, + "reason": { + "type": "keyword" + }, + "message": { + "type": "text", + "fields": { + "ngram": { + "type": "text", + "analyzer": "my_customer_ngram_analyzer" + } + } + }, + "count": { + "type": "integer" + }, + "sourceComponent": { + "type": "keyword" + }, + "sourceHost": { + "type": "keyword" + } + } + }, + "aliases": { + "kubernetes_event_info": {} + } +}' + + + + +# kubernetes_job_info +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/kubernetes_job_info' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/kubernetes_job_info' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "kubernetes_job_info-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "kubernetes_job_info" + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "name": { + "type": "keyword" + }, + "starttime": { + "type": "long" + }, + "endtime": { + "type": "long" + }, + "duration": { + "type": "long" + }, + "commandlist": { + "type": "text", + "index": false + }, + "labellist": { + "type": "text", + "index": false + }, + "active": { + "type": "boolean" + }, + "status": { + "type": "keyword" + } + } + }, + "aliases": { + "kubernetes_job_info": {} + } +}' + + + +# kubernetes_cronjob_info +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/kubernetes_cronjob_info' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/kubernetes_cronjob_info' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "kubernetes_cronjob_info-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "kubernetes_cronjob_info" + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "name": { + "type": "keyword" + }, + "jobname": { + "type": "keyword" + }, + "kind": { + "type": "keyword" + }, + "starttime": { + "type": "long" + }, + "endtime": { + "type": "long" + }, + "duration": { + "type": "long" + }, + "lastruntime": { + "type": "long" + }, + "arguments": { + "type": "text", + "index": false + }, + "schedule": { + "type": "keyword" + }, + "active": { + "type": "boolean" + }, + "status": { + "type": "keyword" + } + } + }, + "aliases": { + "kubernetes_cronjob_info": {} + } +}' + + + + +# kubernetes_network_connectivity +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/kubernetes_network_connectivity' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "1d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/kubernetes_network_connectivity' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "kubernetes_network_connectivity-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "kubernetes_network_connectivity" + } + } + }, + "mappings": { + "properties": { + "timestamp": { + "type": "long" + }, + "cluster": { + "type": "keyword" + }, + "node": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "service": { + "type": "keyword" + }, + "pod": { + "type": "keyword" + }, + "container": { + "type": "keyword" + }, + "pid": { + "type": "integer" + }, + "peerNode": { + "type": "keyword" + }, + "peerNamespace": { + "type": "keyword" + }, + "peerService": { + "type": "keyword" + }, + "peerPod": { + "type": "keyword" + }, + "peerContainer": { + "type": "keyword" + }, + "peerPid": { + "type": "integer" + } + } + }, + "aliases": { + "kubernetes_network_connectivity": {} + } +}' + + + +# sparse_log +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/sparse_log' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/sparse_log' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "sparse_log-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "sparse_log" + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "date": { + "type": "keyword" + }, + "targetType": { + "type": "keyword" + }, + "targetId": { + "type": "keyword" + }, + "unixtime": { + "type": "long" + }, + "logpath": { + "type": "text", + "index": false + }, + "contents": { + "type": "text" + }, + "lineNumber": { + "type": "integer" + }, + "probability": { + "type": "float" + }, + "subentityId": { + "type": "keyword" + } + } + }, + "aliases": { + "sparse_log": {} + } +}' + + + +# sparse_model +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/sparse_model' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "sparse_model" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s" + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "targetType": { + "type": "keyword" + }, + "targetId": { + "type": "keyword" + }, + "modifiedDate": { + "type": "long" + }, + "logPath": { + "type": "keyword" + }, + "savedModel": { + "type": "text", + "index": false + } + } + } +}' + + + +# kubernetes_pod_info +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/kubernetes_pod_info' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/kubernetes_pod_info' -H 'Content-Type: application/json' -d '{ +"order": 0, + "index_patterns": [ + "kubernetes_pod_info-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "kubernetes_pod_info" + } + } + }, + "mappings": { + "properties": { + "eventType": {"type": "keyword"}, + "cluster": {"type": "keyword"}, + "namespace": {"type": "keyword"}, + "node": {"type": "keyword"}, + "pod": {"type": "keyword"}, + "podUID": {"type": "keyword"}, + "podCreationTimestamp": {"type": "long"}, + "podDeletionTimestamp": {"type": "long"}, + "podDeletionGracePeriod": {"type": "long"}, + "resourceVersion": {"type": "keyword"}, + "ownerKind": {"type": "keyword"}, + "ownerName": {"type": "keyword"}, + "ownerUID": {"type": "keyword"}, + "podPhase": {"type": "keyword"}, + "podIP": {"type": "keyword"}, + "podStartTime": {"type": "long"}, + "podReady": {"type": "boolean"}, + "podContainersReady": {"type": "boolean"}, + "isInitContainer": {"type": "boolean"}, + "containerName": {"type": "keyword"}, + "containerID": {"type": "keyword"}, + "containerImage": {"type": "keyword"}, + "containerImageShort": {"type": "keyword"}, + "containerReady": {"type": "boolean"}, + "containerRestartCount": {"type": "integer"}, + "containerState": {"type": "keyword"}, + "containerStartTime": {"type": "long"}, + "containerMessage": {"type": "keyword"}, + "containerReason": {"type": "keyword"}, + "containerFinishTime": {"type": "long"}, + "containerExitCode": {"type": "integer"}, + "containerLastState": {"type": "keyword"}, + "containerLastStartTime": {"type": "long"}, + "containerLastMessage": {"type": "keyword"}, + "containerLastReason": {"type": "keyword"}, + "containerLastFinishTime": {"type": "long"}, + "containerLastExitCode": {"type": "integer"} + } + }, + "aliases": { + "kubernetes_pod_info": {} + } +}' + + + +# kubernetes_pod_history +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/kubernetes_pod_history' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "1d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/kubernetes_pod_history' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "kubernetes_pod_history-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "kubernetes_pod_history" + } + } + }, + "mappings": { + "properties": { + "deployName": { + "type": "keyword" + }, + "deployType": { + "type": "keyword" + }, + "deployDate": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "nodeId": { + "type": "keyword" + }, + "podId": { + "type": "keyword" + }, + "podPhase": { + "type": "keyword" + }, + "startTime": { + "type": "keyword" + }, + "endTime": { + "type": "keyword" + }, + "exitCode": { + "type": "integer" + }, + "reason": { + "type": "keyword" + }, + "message": { + "type": "text" + }, + "time": { + "type": "long" + }, + "containerId": { + "type": "keyword" + }, + "containerName": { + "type": "keyword" + }, + "containerPhase": { + "type": "keyword" + }, + "eventAction": { + "type": "keyword" + }, + "containerStartTime": { + "type": "keyword" + }, + "containerEndTime": { + "type": "keyword" + }, + "containerImage": { + "type": "keyword" + }, + "containerImageShort": { + "type": "keyword" + } + } + }, + "aliases": { + "kubernetes_pod_history": {} + } +}' + + + + +# metric_score +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/metric_score' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/metric_score' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "metric_score-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "metric_score" + }, + "sort.field": "unixtime", + "sort.order": "desc" + } + }, + "mappings": { + "properties": { + "anomaly": { + "type": "boolean" + }, + "clstId": { + "type": "keyword" + }, + "contName": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "podId": { + "type": "keyword" + }, + "instance": { + "type": "keyword" + }, + "entityId": { + "type": "keyword" + }, + "entityType": { + "type": "keyword" + }, + "metricId": { + "type": "keyword" + }, + "nodeId": { + "type": "keyword" + }, + "score": { + "type": "integer" + }, + "subKey": { + "type": "keyword" + }, + "unixtime": { + "type": "long" + }, + "yhatLowerUpper": { + "type": "text", + "fields": { + "keyword": { + "type": "keyword", + "ignore_above": 256 + } + } + } + } + }, + "aliases": { + "metric_score": {} + } +}' + + + + +# entity_score +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/entity_score' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/entity_score' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "entity_score-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "entity_score" + }, + "sort.field": "unixtime", + "sort.order": "desc" + } + }, + "mappings": { + "properties": { + "clstId": { + "type": "keyword" + }, + "contName": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "podId": { + "type": "keyword" + }, + "entityId": { + "type": "keyword" + }, + "entityType": { + "type": "keyword" + }, + "unixtime": { + "type": "long" + }, + "nodeId": { + "type": "keyword" + }, + "maxId": { + "type": "keyword" + }, + "maxScore": { + "type": "integer" + }, + "entityScore": { + "type": "integer" + } + } + }, + "aliases": { + "entity_score": {} + } +}' + + +# timeline_score +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/timeline_score' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/timeline_score' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "timeline_score-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "timeline_score" + }, + "sort.field": "unixtime", + "sort.order": "desc" + } + }, + "mappings": { + "properties": { + "clstId": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "entityType": { + "type": "keyword" + }, + "criticalCount": { + "type": "integer" + }, + "warningCount": { + "type": "integer" + }, + "attentionCount": { + "type": "integer" + }, + "normalCount": { + "type": "integer" + }, + "unixtime": { + "type": "long" + } + } + }, + "aliases": { + "timeline_score": {} + } +}' + + + +# spaninfo +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/spaninfo' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/spaninfo' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "spaninfo-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": "0", + "refresh_interval": "1s", + "lifecycle": { + "name": "spaninfo" + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "node": { + "type": "keyword" + }, + "service": { + "type": "keyword" + }, + "pod": { + "type": "keyword" + }, + "version": { + "type": "keyword" + }, + "ip": { + "type": "keyword" + }, + "traceId": { + "type": "keyword" + }, + "spanId": { + "type": "keyword" + }, + "parentSpanId": { + "type": "keyword" + }, + "protocolType": { + "type": "keyword" + }, + "startTime": { + "type": "long" + }, + "duration": { + "type": "long" + }, + "endTime": { + "type": "long" + }, + "operation": { + "type": "keyword" + }, + "spanKind": { + "type": "keyword" + }, + "component": { + "type": "keyword" + }, + "error": { + "type": "boolean" + }, + "peerAddress": { + "type": "keyword" + }, + "peerHostname": { + "type": "keyword" + }, + "peerIpv4": { + "type": "keyword" + }, + "peerIpv6": { + "type": "keyword" + }, + "peerPort": { + "type": "integer" + }, + "peerService": { + "type": "keyword" + }, + "samplingPriority": { + "type": "keyword" + }, + "httpStatusCode": { + "type": "integer" + }, + "httpUrl": { + "type": "keyword" + }, + "httpMethod": { + "type": "keyword" + }, + "httpApi": { + "type": "keyword" + }, + "dbInstance": { + "type": "keyword" + }, + "dbStatement": { + "type": "keyword" + }, + "dbType": { + "type": "keyword" + }, + "dbUser": { + "type": "keyword" + }, + "messagebusDestination": { + "type": "keyword" + }, + "logs": { + "dynamic": false, + "type": "nested", + "properties": { + "fields": { + "dynamic": false, + "type": "nested", + "properties": { + "value": { + "ignore_above": 256, + "type": "keyword" + }, + "key": { + "type": "keyword" + } + } + }, + "timestamp": { + "type": "long" + } + } + } + } + }, + "aliases": { + "spaninfo": {} + } +}' + + + +# sta_podinfo +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/sta_podinfo' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/sta_podinfo' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "sta_podinfo-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": "0", + "refresh_interval": "1s", + "lifecycle": { + "name": "sta_podinfo" + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "service": { + "type": "keyword" + }, + "pod": { + "type": "keyword" + }, + "timestamp": { + "type": "long" + }, + "version": { + "type": "keyword" + }, + "components": { + "type": "keyword", + "fields": { + "keyword": { + "type": "keyword" + } + } + } + } + }, + "aliases": { + "sta_podinfo": {} + } +}' + + +# sta_httpapi +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/sta_httpapi' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "1d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/sta_httpapi' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "sta_httpapi-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": "0", + "refresh_interval": "1s", + "lifecycle": { + "name": "sta_httpapi" + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "service": { + "type": "keyword" + }, + "timestamp": { + "type": "long" + }, + "api": { + "type": "keyword" + } + } + }, + "aliases": { + "sta_httpapi": {} + } +}' + + + +# sta_httpsummary +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/sta_httpsummary' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "1d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/sta_httpsummary' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "sta_httpsummary-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": "0", + "refresh_interval": "1s", + "lifecycle": { + "name": "sta_httpsummary" + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "timestamp": { + "type": "long" + }, + "pod": { + "type": "keyword" + }, + "service": { + "type": "keyword" + }, + "api": { + "type": "keyword" + }, + "countTotal": { + "type": "integer" + }, + "errorCountTotal": { + "type": "integer" + }, + "timeTotalMicrosec": { + "type": "integer" + }, + "methods": { + "type": "keyword", + "fields": { + "keyword": { + "type": "keyword" + } + } + }, + "statuses": { + "type": "integer", + "fields": { + "integer": { + "type": "integer" + } + } + } + } + }, + "aliases": { + "sta_httpsummary": {} + } +}' + + + +# sta_relation +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/sta_relation' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "1d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/sta_relation' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "sta_relation-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": "0", + "refresh_interval": "1s", + "lifecycle": { + "name": "sta_relation" + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "timestamp": { + "type": "long" + }, + "parent": { + "type": "keyword" + }, + "children": { + "type": "nested", + "properties": { + "name": { + "type": "keyword" + }, + "count": { + "type": "integer" + } + } + } + } + }, + "aliases": { + "sta_relation": {} + } +}' + + + +# sta_externalrelation +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/sta_externalrelation' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "1d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/sta_externalrelation' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "sta_externalrelation-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": "0", + "refresh_interval": "1s", + "lifecycle": { + "name": "sta_externalrelation" + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "timestamp": { + "type": "long" + }, + "externalNamespace": { + "type": "keyword" + }, + "externalService": { + "type": "keyword" + } + } + }, + "aliases": { + "sta_externalrelation": {} + } +}' + + + +# sta_traceinfo +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/sta_traceinfo' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/sta_traceinfo' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "sta_traceinfo-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": "0", + "refresh_interval": "1s", + "lifecycle": { + "name": "sta_traceinfo" + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "traceId": { + "type": "keyword" + }, + "serviceName": { + "type": "keyword" + }, + "operationName": { + "type": "keyword" + }, + "spanSize": { + "type": "integer" + }, + "relatedServices": { + "type": "keyword", + "fields": { + "keyword": { + "type": "keyword" + } + } + }, + "startTime": { + "type": "long" + }, + "endTime": { + "type": "long" + }, + "duration": { + "type": "long" + }, + "error": { + "type": "boolean" + } + } + }, + "aliases": { + "sta_traceinfo": {} + } +}' + + + +# sta_tracetrend +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/sta_tracetrend' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/sta_tracetrend' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "sta_tracetrend-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": "0", + "refresh_interval": "1s", + "lifecycle": { + "name": "sta_tracetrend" + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "serviceName": { + "type": "keyword" + }, + "endTimeGTE": { + "type": "long" + }, + "endTimeLT": { + "type": "long" + } + }, + "dynamic_templates": [ + { + "totals": { + "match": "total*", + "mapping": {"type": "integer"} + } + }, + { + "errors": { + "match": "error*", + "mapping": {"type": "integer"} + } + } + ] + }, + "aliases": { + "sta_tracetrend": {} + } +}' + +# script_history +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/script_history' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + + + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/script_history' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "script_history-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "script_history" + } + } + }, + "mappings": { + "properties": { + "taskId": { + "type": "long" + }, + "scriptName": { + "type": "keyword" + }, + "agentName": { + "type": "keyword" + }, + "targetFile": { + "type": "keyword" + }, + "args": { + "type": "keyword", + "fields": { + "keyword": { + "type": "keyword" + } + } + }, + "validCmd": { + "type": "keyword" + }, + "validVal": { + "type": "keyword" + }, + "valid": { + "type": "boolean" + }, + "validResult": { + "type": "keyword" + }, + "cronExp": { + "type": "keyword" + }, + "createUser": { + "type": "keyword" + }, + "startTime": { + "type": "long" + }, + "endTime": { + "type": "long" + }, + "error": { + "type": "boolean" + }, + "result": { + "type": "keyword" + }, + "order": { + "type": "keyword" + }, + "mtime": { + "type": "keyword" + } + } + }, + "aliases": { + "script_history": {} + } +}' + + +# kubernetes_audit_log +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/kubernetes_audit_log' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/kubernetes_audit_log' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "kubernetes_audit_log-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": "1", + "refresh_interval": "1s", + "lifecycle": { + "name": "kubernetes_audit_log" + }, + "sort.field": "stageTimestamp", + "sort.order": "desc" + } + }, + "mappings": { + "properties": { + "verb": { + "type": "keyword" + }, + "userName": { + "type": "keyword" + }, + "sourceIps": { + "type": "keyword" + }, + "resource": { + "type": "keyword" + }, + "code": { + "type": "keyword" + }, + "requestReceivedTimestamp": { + "type": "long" + }, + "stageTimestamp": { + "type": "long" + }, + "durationTimestamp": { + "type": "long" + }, + "data": { + "type": "text", + "index": false + } + } + }, + "aliases": { + "kubernetes_audit_log": {} + } +}' + +# license_history +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/license_history' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "90d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/license_history' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "license_history-*" + ], + "settings": { + "index": { + "number_of_shards": "2", + "number_of_replicas": "1", + "refresh_interval": "1s", + "lifecycle": { + "name": "license_history" + }, + "sort.field": "checkTime", + "sort.order": "desc" + } + }, + "mappings": { + "properties": { + "licenseType": { + "type": "integer" + }, + "expireDate": { + "type": "text" + }, + "targetNodesCount": { + "type": "integer" + }, + "realNodesCount": { + "type": "integer" + }, + "targetPodsCount": { + "type": "integer" + }, + "realPodsCount": { + "type": "integer" + }, + "targetSvcsCount": { + "type": "integer" + }, + "realSvcsCount": { + "type": "integer" + }, + "targetCoreCount": { + "type": "integer" + }, + "realCoreCount": { + "type": "integer" + }, + "allowableRange": { + "type": "integer" + }, + "licenseClusterId": { + "type": "keyword" + }, + "tenantId": { + "type": "keyword" + }, + "checkTime": { + "type": "date", + "format": "epoch_millis" + }, + "checkResult": { + "type": "integer" + } + } + }, + "aliases": { + "license_history": {} + } +}' + +# alert_event_history +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/alert_event_history' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/alert_event_history' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "alert_event_history-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "alert_event_history" + } + } + }, + "mappings": { + "properties": { + "alertName": { + "type": "keyword" + }, + "clusterId": { + "type": "keyword" + }, + "data": { + "type": "text", + "index": false + }, + "entityId": { + "type": "keyword" + }, + "entityType": { + "type": "keyword" + }, + "level": { + "type": "keyword" + }, + "metaId": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "startsAt": { + "type": "long" + }, + "threshold": { + "type": "double" + }, + "value": { + "type": "double" + }, + "message": { + "type": "keyword" + }, + "endsAt": { + "type": "long" + }, + "status": { + "type": "keyword" + }, + "hookCollectAt": { + "type": "long" + } + } + }, + "aliases": { + "alert_event_history": {} + } +}' + +# JSPD ilm +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/jspd_ilm' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "1d", + "actions": { + "delete": {} + } + } + } + } +}' + +# jspd_lite-activetxn +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/jspd_lite-activetxn' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "jspd_lite-activetxn-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "jspd_ilm" + } + } + }, + "mappings": { + "properties": { + "server_uuid": { + "type": "keyword" + }, + "time": { + "type": "long" + }, + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "node": { + "type": "keyword" + }, + "service": { + "type": "keyword" + }, + "pod": { + "type": "keyword" + }, + "start_time": { + "type": "long" + }, + "tid": { + "type": "keyword" + }, + "txn_name": { + "type": "text", + "fields": { + "keyword": { + "ignore_above": 256, + "type": "keyword" + } + } + }, + "cpu_time": { + "type": "integer" + }, + "memory_usage": { + "type": "integer" + }, + "web_id": { + "type": "integer" + }, + "prepare_count": { + "type": "integer" + }, + "sql_exec_count": { + "type": "integer" + }, + "fetch_count": { + "type": "integer" + }, + "active_sql_elapse_time": { + "type": "integer" + }, + "db_id": { + "type": "integer" + }, + "sql_text": { + "type": "text", + "fields": { + "keyword": { + "ignore_above": 102400, + "type": "keyword" + } + } + }, + "thread_id": { + "type": "long" + }, + "state": { + "type": "short" + }, + "method_id": { + "type": "integer" + }, + "method_seq": { + "type": "integer" + }, + "stack_crc": { + "type": "integer" + }, + "thread_memory_usage": { + "type": "integer" + }, + "http_method": { + "type": "keyword" + } + } + }, + "aliases": { + "jspd_lite-activetxn": {} + } +}' + +# jspd_lite-alert +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/jspd_lite-alert' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "jspd_lite-alert-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "jspd_ilm" + } + } + }, + "mappings": { + "properties": { + "server_uuid": { + "type": "keyword" + }, + "time": { + "type": "long" + }, + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "node": { + "type": "keyword" + }, + "service": { + "type": "keyword" + }, + "pod": { + "type": "keyword" + }, + "name": { + "type": "keyword" + }, + "status": { + "type": "short" + }, + "value": { + "type": "integer" + }, + "pid": { + "type": "integer" + } + } + }, + "aliases": { + "jspd_lite-alert": {} + } +}' + +# jspd_lite-e2einfo +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/jspd_lite-e2einfo' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "jspd_lite-e2einfo-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "jspd_ilm" + } + } + }, + "mappings": { + "properties": { + "server_uuid": { + "type": "keyword" + }, + "time": { + "type": "long" + }, + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "node": { + "type": "keyword" + }, + "service": { + "type": "keyword" + }, + "pod": { + "type": "keyword" + }, + "root_tid": { + "type": "keyword" + }, + "tid": { + "type": "keyword" + }, + "e2e_info_type": { + "type": "short" + }, + "e2e_key": { + "type": "keyword" + }, + "elapse_time": { + "type": "integer" + }, + "dest_url": { + "type": "keyword" + } + } + }, + "aliases": { + "jspd_lite-e2einfo": {} + } +}' + +# jspd_lite-methodname +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/jspd_lite-methodname' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "jspd_lite-methodname-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "jspd_ilm" + } + } + }, + "mappings": { + "properties": { + "server_uuid": { + "type": "keyword" + }, + "time": { + "type": "long" + }, + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "node": { + "type": "keyword" + }, + "service": { + "type": "keyword" + }, + "pod": { + "type": "keyword" + }, + "method_id": { + "type": "integer" + }, + "class_name": { + "type": "text", + "fields": { + "keyword": { + "ignore_above": 256, + "type": "keyword" + } + } + }, + "method_name": { + "type": "text", + "fields": { + "keyword": { + "ignore_above": 256, + "type": "keyword" + } + } + } + } + }, + "aliases": { + "jspd_lite-methodname": {} + } +}' + +# jspd_lite-sqldbinfo +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/jspd_lite-sqldbinfo' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "jspd_lite-sqldbinfo-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "jspd_ilm" + } + } + }, + "mappings": { + "properties": { + "server_uuid": { + "type": "keyword" + }, + "time": { + "type": "long" + }, + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "node": { + "type": "keyword" + }, + "service": { + "type": "keyword" + }, + "pod": { + "type": "keyword" + }, + "db_id": { + "type": "integer" + }, + "url": { + "type": "keyword" + } + } + }, + "aliases": { + "jspd_lite-sqldbinfo": {} + } +}' + +# jspd_lite-txninfo +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/jspd_lite-txninfo' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "jspd_lite-txninfo-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "jspd_ilm" + } + } + }, + "mappings": { + "properties": { + "server_uuid": { + "type": "keyword" + }, + "time": { + "type": "long" + }, + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "node": { + "type": "keyword" + }, + "service": { + "type": "keyword" + }, + "pod": { + "type": "keyword" + }, + "start_time": { + "type": "long" + }, + "end_time": { + "type": "long" + }, + "tid": { + "type": "keyword" + }, + "txn_name": { + "type": "keyword" + }, + "client_ip": { + "type": "keyword" + }, + "exception": { + "type": "short" + }, + "thread_cpu_time": { + "type": "integer" + }, + "thread_memory_usage": { + "type": "integer" + }, + "web_id": { + "type": "integer" + }, + "open_conn": { + "type": "integer" + }, + "close_conn": { + "type": "integer" + }, + "open_stmt": { + "type": "integer" + }, + "close_stmt": { + "type": "integer" + }, + "open_rs": { + "type": "integer" + }, + "close_rs": { + "type": "integer" + }, + "prepare_count": { + "type": "integer" + }, + "sql_execute_count": { + "type": "integer" + }, + "sql_elapse_time": { + "type": "integer" + }, + "sql_elapse_max": { + "type": "integer" + }, + "fetch_count": { + "type": "integer" + }, + "fetch_time": { + "type": "integer" + }, + "internal_fetch_count": { + "type": "integer" + }, + "txn_flag": { + "type": "integer" + }, + "http_method": { + "type": "keyword" + }, + "http_status": { + "type": "integer" + }, + "duration": { + "type": "long" + } + } + }, + "aliases": { + "jspd_lite-txninfo": {} + } +}' + +# jspd_lite-txnmethod +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/jspd_lite-txnmethod' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "jspd_lite-txnmethod-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "jspd_ilm" + } + } + }, + "mappings": { + "properties": { + "server_uuid": { + "type": "keyword" + }, + "time": { + "type": "long" + }, + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "node": { + "type": "keyword" + }, + "service": { + "type": "keyword" + }, + "pod": { + "type": "keyword" + }, + "tid": { + "type": "keyword" + }, + "method_seq": { + "type": "integer" + }, + "method_id": { + "type": "integer" + }, + "calling_method_id": { + "type": "integer" + }, + "stack_crc32": { + "type": "integer" + }, + "calling_stack_crc32": { + "type": "integer" + }, + "elapse_time": { + "type": "integer" + }, + "exec_count": { + "type": "integer" + }, + "error_count": { + "type": "integer" + }, + "cpu_time": { + "type": "integer" + }, + "memory": { + "type": "integer" + }, + "start_time": { + "type": "long" + }, + "method_depth": { + "type": "integer" + }, + "exception": { + "type": "text", + "fields": { + "keyword": { + "ignore_above": 32768, + "type": "keyword" + } + } + } + } + }, + "aliases": { + "jspd_lite-txnmethod": {} + } +}' + +# jspd_lite-txnsql +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/jspd_lite-txnsql' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "jspd_lite-txnsql-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "jspd_ilm" + } + } + }, + "mappings": { + "properties": { + "server_uuid": { + "type": "keyword" + }, + "time": { + "type": "long" + }, + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "node": { + "type": "keyword" + }, + "service": { + "type": "keyword" + }, + "pod": { + "type": "keyword" + }, + "tid": { + "type": "keyword" + }, + "db_id": { + "type": "integer" + }, + "cursor_id": { + "type": "integer" + }, + "sql_text": { + "type": "text", + "fields": { + "keyword": { + "ignore_above": 102400, + "type": "keyword" + } + } + }, + "method_id": { + "type": "integer" + }, + "execute_count": { + "type": "integer" + }, + "elapsed_time": { + "type": "integer" + }, + "elapsed_time_max": { + "type": "integer" + }, + "fetch_count": { + "type": "integer" + }, + "fetch_time": { + "type": "integer" + }, + "fetch_time_max": { + "type": "integer" + }, + "internal_fetch_count": { + "type": "integer" + } + } + }, + "aliases": { + "jspd_lite-txnsql": {} + } +}' + +# jspd_lite-wasstat +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/jspd_lite-wasstat' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "jspd_lite-wasstat-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "jspd_ilm" + } + } + }, + "mappings": { + "properties": { + "server_uuid": { + "type": "keyword" + }, + "time": { + "type": "long" + }, + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "node": { + "type": "keyword" + }, + "service": { + "type": "keyword" + }, + "pod": { + "type": "keyword" + }, + "active_txns": { + "type": "integer" + }, + "sql_exec_count": { + "type": "long" + }, + "sql_prepare_count": { + "type": "long" + }, + "sql_fetch_count": { + "type": "long" + }, + "txn_end_count": { + "type": "long" + }, + "open_file_count": { + "type": "integer" + }, + "close_file_count": { + "type": "integer" + }, + "open_socket_count": { + "type": "integer" + }, + "close_socket_count": { + "type": "integer" + }, + "txn_elapse": { + "type": "long" + }, + "sql_elapse": { + "type": "long" + }, + "txn_elapse_max": { + "type": "long" + }, + "sql_elapse_max": { + "type": "long" + }, + "txn_error_count": { + "type": "integer" + } + } + }, + "aliases": { + "jspd_lite-wasstat": {} + } +}' + +# jspd_tta-externalrelation +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/jspd_tta-externalrelation' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "jspd_tta-externalrelation-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "jspd_ilm" + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "time": { + "type": "long" + }, + "external_namespace": { + "type": "keyword" + }, + "external_service": { + "type": "keyword" + } + } + }, + "aliases": { + "jspd_tta-externalrelation": {} + } +}' + +# jspd_tta-relation +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/jspd_tta-relation' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "jspd_tta-relation-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "jspd_ilm" + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "time": { + "type": "long" + }, + "from_service": { + "type": "keyword" + }, + "to_service": { + "type": "keyword" + }, + "count": { + "type": "integer" + } + } + }, + "aliases": { + "jspd_tta-relation": {} + } +}' + +# jspd_tta-txnlist +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/jspd_tta-txnlist' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "jspd_tta-txnlist-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "jspd_ilm" + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "service": { + "type": "keyword" + }, + "pod": { + "type": "keyword" + }, + "time": { + "type": "long" + }, + "txn_name": { + "type": "keyword" + } + } + }, + "aliases": { + "jspd_tta-txnlist": {} + } +}' + +# jspd_tta-txnsummary +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/jspd_tta-txnsummary' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "jspd_tta-txnsummary-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "jspd_ilm" + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "service": { + "type": "keyword" + }, + "pod": { + "type": "keyword" + }, + "time": { + "type": "long" + }, + "txn_name": { + "type": "keyword" + }, + "req_count": { + "type": "integer" + }, + "resp_count": { + "type": "integer" + }, + "total_duration": { + "type": "long" + }, + "failed": { + "type": "integer" + }, + "http_methods": { + "type": "keyword", + "fields": { + "keyword": { + "type": "keyword" + } + } + }, + "http_statuses": { + "type": "integer", + "fields": { + "integer": { + "type": "integer" + } + } + } + } + }, + "aliases": { + "jspd_tta-txnsummary": {} + } +}' + +# jspd_tta-txntrend +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/jspd_tta-txntrend' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "jspd_tta-txntrend-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "jspd_ilm" + } + } + }, + "mappings": { + "properties": { + "server_uuid": { + "type": "keyword" + }, + "time": { + "type": "long" + }, + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "service": { + "type": "keyword" + }, + "pod": { + "type": "keyword" + }, + "endTimeGTE": { + "type": "long" + }, + "endTimeLT": { + "type": "long" + } + }, + "dynamic_templates": [ + { + "totals": { + "match": "total*", + "mapping": { + "type": "integer" + } + } + }, + { + "errors": { + "match": "error*", + "mapping": { + "type": "integer" + } + } + } + ] + }, + "aliases": { + "jspd_tta-txntrend": {} + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/maximum_metrics' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "5d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/maximum_metrics' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "maximum_metrics" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "maximum_metrics" + }, + "sort.field": "date", + "sort.order": "desc" + } + }, + "mappings": { + "properties": { + "kind": { + "type": "keyword" + }, + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "entity": { + "type": "keyword" + }, + "maximum": { + "type": "float" + }, + "date": { + "type": "date", + "format": "yyyy-MM-dd" + } + } + } +}' diff --git a/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/1_kubernete_event_info_create_dest_source_index.sh b/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/1_kubernete_event_info_create_dest_source_index.sh new file mode 100644 index 0000000..46007cd --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/1_kubernete_event_info_create_dest_source_index.sh @@ -0,0 +1,220 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +export NUM_SHARDS=2 +export NUM_REPLICAS=1 + +SOURCE_INDEX='kubernetes_event_info' +DEST_INDEX='kubernetes_event_info_backup' + +# 기존 index 재매핑 +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/'"${SOURCE_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/'"${SOURCE_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "'${SOURCE_INDEX}'-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "'${SOURCE_INDEX}'" + } + }, + "analysis": { + "analyzer": { + "my_customer_ngram_analyzer": { + "tokenizer": "my_customer_ngram_tokenizer" + } + }, + "tokenizer": { + "my_customer_ngram_tokenizer": { + "type": "ngram", + "min_gram": "2", + "max_gram": "3" + } + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "type": { + "type": "keyword" + }, + "unixtime": { + "type": "long" + }, + "kind": { + "type": "keyword" + }, + "name": { + "type": "keyword" + }, + "firsttime": { + "type": "long" + }, + "lasttime": { + "type": "long" + }, + "data": { + "type": "text", + "index": false + }, + "id": { + "type": "keyword" + }, + "reason": { + "type": "keyword" + }, + "message": { + "type": "text", + "fields": { + "ngram": { + "type": "text", + "analyzer": "my_customer_ngram_analyzer" + } + } + }, + "count": { + "type": "integer" + }, + "sourceComponent": { + "type": "keyword" + }, + "sourceHost": { + "type": "keyword" + } + } + }, + "aliases": { + "'${SOURCE_INDEX}'": {} + } +}' + +# 기존 index 데이터 백업용 index 매핑 +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/'"${DEST_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/'"${DEST_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "'${DEST_INDEX}'-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "'${DEST_INDEX}'" + } + }, + "analysis": { + "analyzer": { + "my_customer_ngram_analyzer": { + "tokenizer": "my_customer_ngram_tokenizer" + } + }, + "tokenizer": { + "my_customer_ngram_tokenizer": { + "type": "ngram", + "min_gram": "2", + "max_gram": "3" + } + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "type": { + "type": "keyword" + }, + "unixtime": { + "type": "long" + }, + "kind": { + "type": "keyword" + }, + "name": { + "type": "keyword" + }, + "firsttime": { + "type": "long" + }, + "lasttime": { + "type": "long" + }, + "data": { + "type": "text", + "index": false + }, + "id": { + "type": "keyword" + }, + "reason": { + "type": "keyword" + }, + "message": { + "type": "text", + "fields": { + "ngram": { + "type": "text", + "analyzer": "my_customer_ngram_analyzer" + } + } + }, + "count": { + "type": "integer" + }, + "sourceComponent": { + "type": "keyword" + }, + "sourceHost": { + "type": "keyword" + } + } + }, + "aliases": { + "'${DEST_INDEX}'": {} + } +}' \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/2_kubernete_event_info_reindex_to_dest_from_source.sh b/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/2_kubernete_event_info_reindex_to_dest_from_source.sh new file mode 100644 index 0000000..a9c833c --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/2_kubernete_event_info_reindex_to_dest_from_source.sh @@ -0,0 +1,28 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='kubernetes_event_info' +DEST_INDEX='kubernetes_event_info_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${SOURCE_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X POST 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_reindex?wait_for_completion=false' -H 'Content-Type: application/json' -d '{ + "source": { + "index": "'${source_index_date}'" + }, + "dest": { + "index": "'${dest_index_date}'" + } + }' +done \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/3_kubernete_event_info_reindex_to_source_from_dest.sh b/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/3_kubernete_event_info_reindex_to_source_from_dest.sh new file mode 100644 index 0000000..abaa743 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/3_kubernete_event_info_reindex_to_source_from_dest.sh @@ -0,0 +1,30 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='kubernetes_event_info' +DEST_INDEX='kubernetes_event_info_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${DEST_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X DELETE 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/'${source_index_date} + + curl -X POST 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_reindex?wait_for_completion=false' -H 'Content-Type: application/json' -d '{ + "source": { + "index": "'${dest_index_date}'" + }, + "dest": { + "index": "'${source_index_date}'" + } + }' +done diff --git a/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/4_kubernete_event_info_delete_dest_index.sh b/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/4_kubernete_event_info_delete_dest_index.sh new file mode 100644 index 0000000..7948b08 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/4_kubernete_event_info_delete_dest_index.sh @@ -0,0 +1,21 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='kubernetes_event_info' +DEST_INDEX='kubernetes_event_info_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${DEST_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X DELETE 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/'${dest_index_date} +done diff --git a/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/5_license_history_create_dest_source_index.sh b/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/5_license_history_create_dest_source_index.sh new file mode 100644 index 0000000..0ddc9ff --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/5_license_history_create_dest_source_index.sh @@ -0,0 +1,184 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +export NUM_SHARDS=2 +export NUM_REPLICAS=1 + +SOURCE_INDEX='license_history' +DEST_INDEX='license_history_backup' + +# 기존 index 재매핑 +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/'"${SOURCE_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "90d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/'"${SOURCE_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "'${SOURCE_INDEX}'-*" + ], + "settings": { + "index": { + "number_of_shards": "2", + "number_of_replicas": "1", + "refresh_interval": "1s", + "lifecycle": { + "name": "'${SOURCE_INDEX}'" + }, + "sort.field": "checkTime", + "sort.order": "desc" + } + }, + "mappings": { + "properties": { + "licenseType": { + "type": "integer" + }, + "expireDate": { + "type": "text" + }, + "targetNodesCount": { + "type": "integer" + }, + "realNodesCount": { + "type": "integer" + }, + "targetPodsCount": { + "type": "integer" + }, + "realPodsCount": { + "type": "integer" + }, + "targetSvcsCount": { + "type": "integer" + }, + "realSvcsCount": { + "type": "integer" + }, + "targetCoreCount": { + "type": "integer" + }, + "realCoreCount": { + "type": "integer" + }, + "allowableRange": { + "type": "integer" + }, + "licenseClusterId": { + "type": "keyword" + }, + "tenantId": { + "type": "keyword" + }, + "checkTime": { + "type": "date", + "format": "epoch_millis" + }, + "checkResult": { + "type": "integer" + } + } + }, + "aliases": { + "'${SOURCE_INDEX}'": {} + } +}' + +# 기존 index 데이터 백업용 index 매핑 +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/'"${DEST_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "90d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/'"${DEST_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "'${DEST_INDEX}'-*" + ], + "settings": { + "index": { + "number_of_shards": "2", + "number_of_replicas": "1", + "refresh_interval": "1s", + "lifecycle": { + "name": "'${DEST_INDEX}'" + }, + "sort.field": "checkTime", + "sort.order": "desc" + } + }, + "mappings": { + "properties": { + "licenseType": { + "type": "integer" + }, + "expireDate": { + "type": "text" + }, + "targetNodesCount": { + "type": "integer" + }, + "realNodesCount": { + "type": "integer" + }, + "targetPodsCount": { + "type": "integer" + }, + "realPodsCount": { + "type": "integer" + }, + "targetSvcsCount": { + "type": "integer" + }, + "realSvcsCount": { + "type": "integer" + }, + "targetCoreCount": { + "type": "integer" + }, + "realCoreCount": { + "type": "integer" + }, + "allowableRange": { + "type": "integer" + }, + "licenseClusterId": { + "type": "keyword" + }, + "tenantId": { + "type": "keyword" + }, + "checkTime": { + "type": "date", + "format": "epoch_millis" + }, + "checkResult": { + "type": "integer" + } + } + }, + "aliases": { + "'${DEST_INDEX}'": {} + } +}' \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/6_license_history_reindex_to_dest_from_source.sh b/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/6_license_history_reindex_to_dest_from_source.sh new file mode 100644 index 0000000..b1de084 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/6_license_history_reindex_to_dest_from_source.sh @@ -0,0 +1,32 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='license_history' +DEST_INDEX='license_history_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${SOURCE_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X POST 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_reindex?wait_for_completion=false' -H 'Content-Type: application/json' -d '{ + "source": { + "index": "'${source_index_date}'" + }, + "dest": { + "index": "'${dest_index_date}'" + }, + "script": { + "lang": "painless", + "source": "ctx._source.checkTime = Instant.ofEpochSecond(ctx._source.checkTime).toEpochMilli()" + } + }' +done \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/7_license_history_reindex_to_source_from_dest.sh b/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/7_license_history_reindex_to_source_from_dest.sh new file mode 100644 index 0000000..e7e0a5c --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/7_license_history_reindex_to_source_from_dest.sh @@ -0,0 +1,30 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='license_history' +DEST_INDEX='license_history_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${DEST_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X DELETE 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/'${source_index_date} + + curl -X POST 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_reindex?wait_for_completion=false' -H 'Content-Type: application/json' -d '{ + "source": { + "index": "'${dest_index_date}'" + }, + "dest": { + "index": "'${source_index_date}'" + } + }' +done diff --git a/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/8_license_history_delete_dest_index.sh b/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/8_license_history_delete_dest_index.sh new file mode 100644 index 0000000..3d63181 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/8_license_history_delete_dest_index.sh @@ -0,0 +1,21 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='license_history' +DEST_INDEX='license_history_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${DEST_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X DELETE 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/'${dest_index_date} +done diff --git a/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/manual.txt b/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/manual.txt new file mode 100644 index 0000000..95900be --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/manual.txt @@ -0,0 +1,31 @@ +** 두 인덱스간에 데이터 복재가 잘 됐는지 확인해가며 실행 ** + +1) 1_kubernete_event_info_create_dest_source_index.sh 스크립트 실행 + : 기존 인덱스에 새로운 데이터 타입 매핑작업 + : 기존 인덱스 데이터 백업용 인덱스 매핑작업 + +2) 2_kubernete_event_info_reindex_to_dest_from_source.sh 스크립트 실행 + : 기존 인덱스 데이터 백업용 인덱스로 리인덱싱 + +3) curl -X GET http://{IP}:{PORT}/_cat/indices?pretty | grep kubernete_event_info + : 백업용 인덱스에 기존 인덱스 데이터가 백업될때까지 대기하기 + : 7번째 칸에 숫자가 일자별 인덱스 숫자와 동일할때까지 대기하기 + +4) 3_kubernete_event_info_reindex_to_source_from_dest.sh 스크립트 실행 + : 기존 인덱스 삭제 + : 새로 매핑된 기존 인덱스에 백업용 인덱스에 담긴 데이터 다시 리인덱싱 + +5) curl -X GET http://{IP}:{PORT}/_cat/indices?pretty | grep kubernete_event_info + : 새로 매핑된 인덱스에 백업용 인덱스 데이터가 백업될때까지 대기하기 + : 7번째 칸에 숫자가 일자별 인덱스 숫자와 동일할때까지 대기하기 + +6) 4_kubernete_event_info_delete_dest_index.sh 스크립트 실행 + : 백업용 인덱스 삭제 + +** 아래 스크립트도 위와같은 순서로 진행 ** +** grep license_history 로 변경해서 데이터 복재 확인 ** +5_license_history_create_dest_source_index.sh +6_license_history_reindex_to_dest_from_source.sh +7_license_history_reindex_to_source_from_dest.sh +8_license_history_delete_dest_index.sh + diff --git a/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/1_kubernete_event_info_create_dest_source_index.sh b/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/1_kubernete_event_info_create_dest_source_index.sh new file mode 100644 index 0000000..46007cd --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/1_kubernete_event_info_create_dest_source_index.sh @@ -0,0 +1,220 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +export NUM_SHARDS=2 +export NUM_REPLICAS=1 + +SOURCE_INDEX='kubernetes_event_info' +DEST_INDEX='kubernetes_event_info_backup' + +# 기존 index 재매핑 +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/'"${SOURCE_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/'"${SOURCE_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "'${SOURCE_INDEX}'-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "'${SOURCE_INDEX}'" + } + }, + "analysis": { + "analyzer": { + "my_customer_ngram_analyzer": { + "tokenizer": "my_customer_ngram_tokenizer" + } + }, + "tokenizer": { + "my_customer_ngram_tokenizer": { + "type": "ngram", + "min_gram": "2", + "max_gram": "3" + } + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "type": { + "type": "keyword" + }, + "unixtime": { + "type": "long" + }, + "kind": { + "type": "keyword" + }, + "name": { + "type": "keyword" + }, + "firsttime": { + "type": "long" + }, + "lasttime": { + "type": "long" + }, + "data": { + "type": "text", + "index": false + }, + "id": { + "type": "keyword" + }, + "reason": { + "type": "keyword" + }, + "message": { + "type": "text", + "fields": { + "ngram": { + "type": "text", + "analyzer": "my_customer_ngram_analyzer" + } + } + }, + "count": { + "type": "integer" + }, + "sourceComponent": { + "type": "keyword" + }, + "sourceHost": { + "type": "keyword" + } + } + }, + "aliases": { + "'${SOURCE_INDEX}'": {} + } +}' + +# 기존 index 데이터 백업용 index 매핑 +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/'"${DEST_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/'"${DEST_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "'${DEST_INDEX}'-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "'${DEST_INDEX}'" + } + }, + "analysis": { + "analyzer": { + "my_customer_ngram_analyzer": { + "tokenizer": "my_customer_ngram_tokenizer" + } + }, + "tokenizer": { + "my_customer_ngram_tokenizer": { + "type": "ngram", + "min_gram": "2", + "max_gram": "3" + } + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "type": { + "type": "keyword" + }, + "unixtime": { + "type": "long" + }, + "kind": { + "type": "keyword" + }, + "name": { + "type": "keyword" + }, + "firsttime": { + "type": "long" + }, + "lasttime": { + "type": "long" + }, + "data": { + "type": "text", + "index": false + }, + "id": { + "type": "keyword" + }, + "reason": { + "type": "keyword" + }, + "message": { + "type": "text", + "fields": { + "ngram": { + "type": "text", + "analyzer": "my_customer_ngram_analyzer" + } + } + }, + "count": { + "type": "integer" + }, + "sourceComponent": { + "type": "keyword" + }, + "sourceHost": { + "type": "keyword" + } + } + }, + "aliases": { + "'${DEST_INDEX}'": {} + } +}' \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/2_kubernete_event_info_reindex_to_dest_from_source.sh b/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/2_kubernete_event_info_reindex_to_dest_from_source.sh new file mode 100644 index 0000000..a9c833c --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/2_kubernete_event_info_reindex_to_dest_from_source.sh @@ -0,0 +1,28 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='kubernetes_event_info' +DEST_INDEX='kubernetes_event_info_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${SOURCE_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X POST 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_reindex?wait_for_completion=false' -H 'Content-Type: application/json' -d '{ + "source": { + "index": "'${source_index_date}'" + }, + "dest": { + "index": "'${dest_index_date}'" + } + }' +done \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/3_kubernete_event_info_reindex_to_source_from_dest.sh b/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/3_kubernete_event_info_reindex_to_source_from_dest.sh new file mode 100644 index 0000000..abaa743 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/3_kubernete_event_info_reindex_to_source_from_dest.sh @@ -0,0 +1,30 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='kubernetes_event_info' +DEST_INDEX='kubernetes_event_info_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${DEST_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X DELETE 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/'${source_index_date} + + curl -X POST 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_reindex?wait_for_completion=false' -H 'Content-Type: application/json' -d '{ + "source": { + "index": "'${dest_index_date}'" + }, + "dest": { + "index": "'${source_index_date}'" + } + }' +done diff --git a/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/4_kubernete_event_info_delete_dest_index.sh b/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/4_kubernete_event_info_delete_dest_index.sh new file mode 100644 index 0000000..7948b08 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/4_kubernete_event_info_delete_dest_index.sh @@ -0,0 +1,21 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='kubernetes_event_info' +DEST_INDEX='kubernetes_event_info_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${DEST_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X DELETE 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/'${dest_index_date} +done diff --git a/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/5_license_history_create_dest_source_index.sh b/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/5_license_history_create_dest_source_index.sh new file mode 100644 index 0000000..0ddc9ff --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/5_license_history_create_dest_source_index.sh @@ -0,0 +1,184 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +export NUM_SHARDS=2 +export NUM_REPLICAS=1 + +SOURCE_INDEX='license_history' +DEST_INDEX='license_history_backup' + +# 기존 index 재매핑 +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/'"${SOURCE_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "90d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/'"${SOURCE_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "'${SOURCE_INDEX}'-*" + ], + "settings": { + "index": { + "number_of_shards": "2", + "number_of_replicas": "1", + "refresh_interval": "1s", + "lifecycle": { + "name": "'${SOURCE_INDEX}'" + }, + "sort.field": "checkTime", + "sort.order": "desc" + } + }, + "mappings": { + "properties": { + "licenseType": { + "type": "integer" + }, + "expireDate": { + "type": "text" + }, + "targetNodesCount": { + "type": "integer" + }, + "realNodesCount": { + "type": "integer" + }, + "targetPodsCount": { + "type": "integer" + }, + "realPodsCount": { + "type": "integer" + }, + "targetSvcsCount": { + "type": "integer" + }, + "realSvcsCount": { + "type": "integer" + }, + "targetCoreCount": { + "type": "integer" + }, + "realCoreCount": { + "type": "integer" + }, + "allowableRange": { + "type": "integer" + }, + "licenseClusterId": { + "type": "keyword" + }, + "tenantId": { + "type": "keyword" + }, + "checkTime": { + "type": "date", + "format": "epoch_millis" + }, + "checkResult": { + "type": "integer" + } + } + }, + "aliases": { + "'${SOURCE_INDEX}'": {} + } +}' + +# 기존 index 데이터 백업용 index 매핑 +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/'"${DEST_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "90d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/'"${DEST_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "'${DEST_INDEX}'-*" + ], + "settings": { + "index": { + "number_of_shards": "2", + "number_of_replicas": "1", + "refresh_interval": "1s", + "lifecycle": { + "name": "'${DEST_INDEX}'" + }, + "sort.field": "checkTime", + "sort.order": "desc" + } + }, + "mappings": { + "properties": { + "licenseType": { + "type": "integer" + }, + "expireDate": { + "type": "text" + }, + "targetNodesCount": { + "type": "integer" + }, + "realNodesCount": { + "type": "integer" + }, + "targetPodsCount": { + "type": "integer" + }, + "realPodsCount": { + "type": "integer" + }, + "targetSvcsCount": { + "type": "integer" + }, + "realSvcsCount": { + "type": "integer" + }, + "targetCoreCount": { + "type": "integer" + }, + "realCoreCount": { + "type": "integer" + }, + "allowableRange": { + "type": "integer" + }, + "licenseClusterId": { + "type": "keyword" + }, + "tenantId": { + "type": "keyword" + }, + "checkTime": { + "type": "date", + "format": "epoch_millis" + }, + "checkResult": { + "type": "integer" + } + } + }, + "aliases": { + "'${DEST_INDEX}'": {} + } +}' \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/6_license_history_reindex_to_dest_from_source.sh b/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/6_license_history_reindex_to_dest_from_source.sh new file mode 100644 index 0000000..b1de084 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/6_license_history_reindex_to_dest_from_source.sh @@ -0,0 +1,32 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='license_history' +DEST_INDEX='license_history_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${SOURCE_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X POST 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_reindex?wait_for_completion=false' -H 'Content-Type: application/json' -d '{ + "source": { + "index": "'${source_index_date}'" + }, + "dest": { + "index": "'${dest_index_date}'" + }, + "script": { + "lang": "painless", + "source": "ctx._source.checkTime = Instant.ofEpochSecond(ctx._source.checkTime).toEpochMilli()" + } + }' +done \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/7_license_history_reindex_to_source_from_dest.sh b/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/7_license_history_reindex_to_source_from_dest.sh new file mode 100644 index 0000000..e7e0a5c --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/7_license_history_reindex_to_source_from_dest.sh @@ -0,0 +1,30 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='license_history' +DEST_INDEX='license_history_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${DEST_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X DELETE 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/'${source_index_date} + + curl -X POST 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_reindex?wait_for_completion=false' -H 'Content-Type: application/json' -d '{ + "source": { + "index": "'${dest_index_date}'" + }, + "dest": { + "index": "'${source_index_date}'" + } + }' +done diff --git a/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/8_license_history_delete_dest_index.sh b/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/8_license_history_delete_dest_index.sh new file mode 100644 index 0000000..3d63181 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/8_license_history_delete_dest_index.sh @@ -0,0 +1,21 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='license_history' +DEST_INDEX='license_history_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${DEST_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X DELETE 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/'${dest_index_date} +done diff --git a/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/manual.txt b/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/manual.txt new file mode 100644 index 0000000..95900be --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/manual.txt @@ -0,0 +1,31 @@ +** 두 인덱스간에 데이터 복재가 잘 됐는지 확인해가며 실행 ** + +1) 1_kubernete_event_info_create_dest_source_index.sh 스크립트 실행 + : 기존 인덱스에 새로운 데이터 타입 매핑작업 + : 기존 인덱스 데이터 백업용 인덱스 매핑작업 + +2) 2_kubernete_event_info_reindex_to_dest_from_source.sh 스크립트 실행 + : 기존 인덱스 데이터 백업용 인덱스로 리인덱싱 + +3) curl -X GET http://{IP}:{PORT}/_cat/indices?pretty | grep kubernete_event_info + : 백업용 인덱스에 기존 인덱스 데이터가 백업될때까지 대기하기 + : 7번째 칸에 숫자가 일자별 인덱스 숫자와 동일할때까지 대기하기 + +4) 3_kubernete_event_info_reindex_to_source_from_dest.sh 스크립트 실행 + : 기존 인덱스 삭제 + : 새로 매핑된 기존 인덱스에 백업용 인덱스에 담긴 데이터 다시 리인덱싱 + +5) curl -X GET http://{IP}:{PORT}/_cat/indices?pretty | grep kubernete_event_info + : 새로 매핑된 인덱스에 백업용 인덱스 데이터가 백업될때까지 대기하기 + : 7번째 칸에 숫자가 일자별 인덱스 숫자와 동일할때까지 대기하기 + +6) 4_kubernete_event_info_delete_dest_index.sh 스크립트 실행 + : 백업용 인덱스 삭제 + +** 아래 스크립트도 위와같은 순서로 진행 ** +** grep license_history 로 변경해서 데이터 복재 확인 ** +5_license_history_create_dest_source_index.sh +6_license_history_reindex_to_dest_from_source.sh +7_license_history_reindex_to_source_from_dest.sh +8_license_history_delete_dest_index.sh + diff --git a/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/jaeger_menumeta.psql b/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/jaeger_menumeta.psql new file mode 100644 index 0000000..c8252dd --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/jaeger_menumeta.psql @@ -0,0 +1,21 @@ +-- 이미 존재한다는 (insert 시) 에러메세지나 , 존재하지 않는다는 (delete 시) 에러메세지는 무시하셔도 무방합니다. +-- service - active transaction 삭제 +-- auth_resource3 +DELETE FROM public.auth_resource3 WHERE name = 'menu|Services|Active Transaction'; + +-- menu_meta +DELETE FROM public.menu_meta WHERE id = 26; + + +-- service - overview 추가 +-- auth_resource2 +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Overview', (select id from auth_resource2 where type='menu' and name='Services'), 'menu'); + +-- auth_resource3 +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Services|Overview', false, null); + +-- menu_meta +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (22, 'ServiceOverview', NULL, 1, 'overviewServices', (select id from auth_resource3 where name='menu|Services|Overview'), 0); + +-- user_permission2 +INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Services|Overview'), 'owner'); \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/jspd_menumeta.psql b/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/jspd_menumeta.psql new file mode 100644 index 0000000..4541fb2 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/jspd_menumeta.psql @@ -0,0 +1,22 @@ +-- 이미 존재한다는 (insert 시) 에러메세지나 , 존재하지 않는다는 (delete 시) 에러메세지는 무시하셔도 무방합니다. + +-- service - overview 삭제 +-- user_permission2 +DELETE FROM public.user_permission2 WHERE auth_resource_id = (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Services|Overview') AND user_id = 'owner'; + +-- menu_meta +DELETE FROM public.menu_meta WHERE id = 22; + +-- auth_resource2 +DELETE FROM public.auth_resource2 WHERE name = 'Overview' AND parent_id = (select id from auth_resource2 where type='menu' and name='Services'); + +-- auth_resource3 +DELETE FROM public.auth_resource3 WHERE name = 'menu|Services|Overview'; + + +-- service - active transaction 추가 +-- auth_resource3 +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Services|Active Transaction', false, null); + +-- menu_meta +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (26, 'Active Transaction', NULL, 5, 'overviewServiceJSPD', (select id from auth_resource3 where name='menu|Services|Active Transaction'), 2); \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/1_kubernete_event_info_create_dest_source_index.sh b/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/1_kubernete_event_info_create_dest_source_index.sh new file mode 100644 index 0000000..46007cd --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/1_kubernete_event_info_create_dest_source_index.sh @@ -0,0 +1,220 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +export NUM_SHARDS=2 +export NUM_REPLICAS=1 + +SOURCE_INDEX='kubernetes_event_info' +DEST_INDEX='kubernetes_event_info_backup' + +# 기존 index 재매핑 +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/'"${SOURCE_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/'"${SOURCE_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "'${SOURCE_INDEX}'-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "'${SOURCE_INDEX}'" + } + }, + "analysis": { + "analyzer": { + "my_customer_ngram_analyzer": { + "tokenizer": "my_customer_ngram_tokenizer" + } + }, + "tokenizer": { + "my_customer_ngram_tokenizer": { + "type": "ngram", + "min_gram": "2", + "max_gram": "3" + } + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "type": { + "type": "keyword" + }, + "unixtime": { + "type": "long" + }, + "kind": { + "type": "keyword" + }, + "name": { + "type": "keyword" + }, + "firsttime": { + "type": "long" + }, + "lasttime": { + "type": "long" + }, + "data": { + "type": "text", + "index": false + }, + "id": { + "type": "keyword" + }, + "reason": { + "type": "keyword" + }, + "message": { + "type": "text", + "fields": { + "ngram": { + "type": "text", + "analyzer": "my_customer_ngram_analyzer" + } + } + }, + "count": { + "type": "integer" + }, + "sourceComponent": { + "type": "keyword" + }, + "sourceHost": { + "type": "keyword" + } + } + }, + "aliases": { + "'${SOURCE_INDEX}'": {} + } +}' + +# 기존 index 데이터 백업용 index 매핑 +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/'"${DEST_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/'"${DEST_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "'${DEST_INDEX}'-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "'${DEST_INDEX}'" + } + }, + "analysis": { + "analyzer": { + "my_customer_ngram_analyzer": { + "tokenizer": "my_customer_ngram_tokenizer" + } + }, + "tokenizer": { + "my_customer_ngram_tokenizer": { + "type": "ngram", + "min_gram": "2", + "max_gram": "3" + } + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "type": { + "type": "keyword" + }, + "unixtime": { + "type": "long" + }, + "kind": { + "type": "keyword" + }, + "name": { + "type": "keyword" + }, + "firsttime": { + "type": "long" + }, + "lasttime": { + "type": "long" + }, + "data": { + "type": "text", + "index": false + }, + "id": { + "type": "keyword" + }, + "reason": { + "type": "keyword" + }, + "message": { + "type": "text", + "fields": { + "ngram": { + "type": "text", + "analyzer": "my_customer_ngram_analyzer" + } + } + }, + "count": { + "type": "integer" + }, + "sourceComponent": { + "type": "keyword" + }, + "sourceHost": { + "type": "keyword" + } + } + }, + "aliases": { + "'${DEST_INDEX}'": {} + } +}' \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/2_kubernete_event_info_reindex_to_dest_from_source.sh b/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/2_kubernete_event_info_reindex_to_dest_from_source.sh new file mode 100644 index 0000000..a9c833c --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/2_kubernete_event_info_reindex_to_dest_from_source.sh @@ -0,0 +1,28 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='kubernetes_event_info' +DEST_INDEX='kubernetes_event_info_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${SOURCE_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X POST 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_reindex?wait_for_completion=false' -H 'Content-Type: application/json' -d '{ + "source": { + "index": "'${source_index_date}'" + }, + "dest": { + "index": "'${dest_index_date}'" + } + }' +done \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/3_kubernete_event_info_reindex_to_source_from_dest.sh b/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/3_kubernete_event_info_reindex_to_source_from_dest.sh new file mode 100644 index 0000000..abaa743 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/3_kubernete_event_info_reindex_to_source_from_dest.sh @@ -0,0 +1,30 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='kubernetes_event_info' +DEST_INDEX='kubernetes_event_info_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${DEST_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X DELETE 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/'${source_index_date} + + curl -X POST 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_reindex?wait_for_completion=false' -H 'Content-Type: application/json' -d '{ + "source": { + "index": "'${dest_index_date}'" + }, + "dest": { + "index": "'${source_index_date}'" + } + }' +done diff --git a/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/4_kubernete_event_info_delete_dest_index.sh b/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/4_kubernete_event_info_delete_dest_index.sh new file mode 100644 index 0000000..7948b08 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/4_kubernete_event_info_delete_dest_index.sh @@ -0,0 +1,21 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='kubernetes_event_info' +DEST_INDEX='kubernetes_event_info_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${DEST_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X DELETE 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/'${dest_index_date} +done diff --git a/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/5_license_history_create_dest_source_index.sh b/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/5_license_history_create_dest_source_index.sh new file mode 100644 index 0000000..0ddc9ff --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/5_license_history_create_dest_source_index.sh @@ -0,0 +1,184 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +export NUM_SHARDS=2 +export NUM_REPLICAS=1 + +SOURCE_INDEX='license_history' +DEST_INDEX='license_history_backup' + +# 기존 index 재매핑 +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/'"${SOURCE_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "90d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/'"${SOURCE_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "'${SOURCE_INDEX}'-*" + ], + "settings": { + "index": { + "number_of_shards": "2", + "number_of_replicas": "1", + "refresh_interval": "1s", + "lifecycle": { + "name": "'${SOURCE_INDEX}'" + }, + "sort.field": "checkTime", + "sort.order": "desc" + } + }, + "mappings": { + "properties": { + "licenseType": { + "type": "integer" + }, + "expireDate": { + "type": "text" + }, + "targetNodesCount": { + "type": "integer" + }, + "realNodesCount": { + "type": "integer" + }, + "targetPodsCount": { + "type": "integer" + }, + "realPodsCount": { + "type": "integer" + }, + "targetSvcsCount": { + "type": "integer" + }, + "realSvcsCount": { + "type": "integer" + }, + "targetCoreCount": { + "type": "integer" + }, + "realCoreCount": { + "type": "integer" + }, + "allowableRange": { + "type": "integer" + }, + "licenseClusterId": { + "type": "keyword" + }, + "tenantId": { + "type": "keyword" + }, + "checkTime": { + "type": "date", + "format": "epoch_millis" + }, + "checkResult": { + "type": "integer" + } + } + }, + "aliases": { + "'${SOURCE_INDEX}'": {} + } +}' + +# 기존 index 데이터 백업용 index 매핑 +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/'"${DEST_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "90d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/'"${DEST_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "'${DEST_INDEX}'-*" + ], + "settings": { + "index": { + "number_of_shards": "2", + "number_of_replicas": "1", + "refresh_interval": "1s", + "lifecycle": { + "name": "'${DEST_INDEX}'" + }, + "sort.field": "checkTime", + "sort.order": "desc" + } + }, + "mappings": { + "properties": { + "licenseType": { + "type": "integer" + }, + "expireDate": { + "type": "text" + }, + "targetNodesCount": { + "type": "integer" + }, + "realNodesCount": { + "type": "integer" + }, + "targetPodsCount": { + "type": "integer" + }, + "realPodsCount": { + "type": "integer" + }, + "targetSvcsCount": { + "type": "integer" + }, + "realSvcsCount": { + "type": "integer" + }, + "targetCoreCount": { + "type": "integer" + }, + "realCoreCount": { + "type": "integer" + }, + "allowableRange": { + "type": "integer" + }, + "licenseClusterId": { + "type": "keyword" + }, + "tenantId": { + "type": "keyword" + }, + "checkTime": { + "type": "date", + "format": "epoch_millis" + }, + "checkResult": { + "type": "integer" + } + } + }, + "aliases": { + "'${DEST_INDEX}'": {} + } +}' \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/6_license_history_reindex_to_dest_from_source.sh b/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/6_license_history_reindex_to_dest_from_source.sh new file mode 100644 index 0000000..b1de084 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/6_license_history_reindex_to_dest_from_source.sh @@ -0,0 +1,32 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='license_history' +DEST_INDEX='license_history_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${SOURCE_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X POST 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_reindex?wait_for_completion=false' -H 'Content-Type: application/json' -d '{ + "source": { + "index": "'${source_index_date}'" + }, + "dest": { + "index": "'${dest_index_date}'" + }, + "script": { + "lang": "painless", + "source": "ctx._source.checkTime = Instant.ofEpochSecond(ctx._source.checkTime).toEpochMilli()" + } + }' +done \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/7_license_history_reindex_to_source_from_dest.sh b/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/7_license_history_reindex_to_source_from_dest.sh new file mode 100644 index 0000000..e7e0a5c --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/7_license_history_reindex_to_source_from_dest.sh @@ -0,0 +1,30 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='license_history' +DEST_INDEX='license_history_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${DEST_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X DELETE 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/'${source_index_date} + + curl -X POST 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_reindex?wait_for_completion=false' -H 'Content-Type: application/json' -d '{ + "source": { + "index": "'${dest_index_date}'" + }, + "dest": { + "index": "'${source_index_date}'" + } + }' +done diff --git a/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/8_license_history_delete_dest_index.sh b/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/8_license_history_delete_dest_index.sh new file mode 100644 index 0000000..3d63181 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/8_license_history_delete_dest_index.sh @@ -0,0 +1,21 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='license_history' +DEST_INDEX='license_history_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${DEST_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X DELETE 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/'${dest_index_date} +done diff --git a/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/manual.txt b/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/manual.txt new file mode 100644 index 0000000..95900be --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/manual.txt @@ -0,0 +1,31 @@ +** 두 인덱스간에 데이터 복재가 잘 됐는지 확인해가며 실행 ** + +1) 1_kubernete_event_info_create_dest_source_index.sh 스크립트 실행 + : 기존 인덱스에 새로운 데이터 타입 매핑작업 + : 기존 인덱스 데이터 백업용 인덱스 매핑작업 + +2) 2_kubernete_event_info_reindex_to_dest_from_source.sh 스크립트 실행 + : 기존 인덱스 데이터 백업용 인덱스로 리인덱싱 + +3) curl -X GET http://{IP}:{PORT}/_cat/indices?pretty | grep kubernete_event_info + : 백업용 인덱스에 기존 인덱스 데이터가 백업될때까지 대기하기 + : 7번째 칸에 숫자가 일자별 인덱스 숫자와 동일할때까지 대기하기 + +4) 3_kubernete_event_info_reindex_to_source_from_dest.sh 스크립트 실행 + : 기존 인덱스 삭제 + : 새로 매핑된 기존 인덱스에 백업용 인덱스에 담긴 데이터 다시 리인덱싱 + +5) curl -X GET http://{IP}:{PORT}/_cat/indices?pretty | grep kubernete_event_info + : 새로 매핑된 인덱스에 백업용 인덱스 데이터가 백업될때까지 대기하기 + : 7번째 칸에 숫자가 일자별 인덱스 숫자와 동일할때까지 대기하기 + +6) 4_kubernete_event_info_delete_dest_index.sh 스크립트 실행 + : 백업용 인덱스 삭제 + +** 아래 스크립트도 위와같은 순서로 진행 ** +** grep license_history 로 변경해서 데이터 복재 확인 ** +5_license_history_create_dest_source_index.sh +6_license_history_reindex_to_dest_from_source.sh +7_license_history_reindex_to_source_from_dest.sh +8_license_history_delete_dest_index.sh + diff --git a/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/patch/memu_meta/jaeger_menumeta.psql b/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/patch/memu_meta/jaeger_menumeta.psql new file mode 100644 index 0000000..c8252dd --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/patch/memu_meta/jaeger_menumeta.psql @@ -0,0 +1,21 @@ +-- 이미 존재한다는 (insert 시) 에러메세지나 , 존재하지 않는다는 (delete 시) 에러메세지는 무시하셔도 무방합니다. +-- service - active transaction 삭제 +-- auth_resource3 +DELETE FROM public.auth_resource3 WHERE name = 'menu|Services|Active Transaction'; + +-- menu_meta +DELETE FROM public.menu_meta WHERE id = 26; + + +-- service - overview 추가 +-- auth_resource2 +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Overview', (select id from auth_resource2 where type='menu' and name='Services'), 'menu'); + +-- auth_resource3 +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Services|Overview', false, null); + +-- menu_meta +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (22, 'ServiceOverview', NULL, 1, 'overviewServices', (select id from auth_resource3 where name='menu|Services|Overview'), 0); + +-- user_permission2 +INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Services|Overview'), 'owner'); \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/patch/memu_meta/jspd_menumeta.psql b/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/patch/memu_meta/jspd_menumeta.psql new file mode 100644 index 0000000..4541fb2 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/patch/memu_meta/jspd_menumeta.psql @@ -0,0 +1,22 @@ +-- 이미 존재한다는 (insert 시) 에러메세지나 , 존재하지 않는다는 (delete 시) 에러메세지는 무시하셔도 무방합니다. + +-- service - overview 삭제 +-- user_permission2 +DELETE FROM public.user_permission2 WHERE auth_resource_id = (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Services|Overview') AND user_id = 'owner'; + +-- menu_meta +DELETE FROM public.menu_meta WHERE id = 22; + +-- auth_resource2 +DELETE FROM public.auth_resource2 WHERE name = 'Overview' AND parent_id = (select id from auth_resource2 where type='menu' and name='Services'); + +-- auth_resource3 +DELETE FROM public.auth_resource3 WHERE name = 'menu|Services|Overview'; + + +-- service - active transaction 추가 +-- auth_resource3 +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Services|Active Transaction', false, null); + +-- menu_meta +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (26, 'Active Transaction', NULL, 5, 'overviewServiceJSPD', (select id from auth_resource3 where name='menu|Services|Active Transaction'), 2); \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.2.0.psql b/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.2.0.psql new file mode 100644 index 0000000..7ed34ad --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.2.0.psql @@ -0,0 +1,803 @@ +UPDATE public.metric_meta2 SET expr='sum by (xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) ((container_memory_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / (((container_spec_memory_limit_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0)) > 0) * 100) or sum by (xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) ((container_memory_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1024 / 1024 / 1024 *100)' WHERE id = 'container_memory_usage_by_workload'; + +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: v1 +kind: List +items: +- apiVersion: apps/v1 + kind: Deployment + metadata: + name: cloudmoa-trace-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-trace-agent + spec: + selector: + matchLabels: + app: cloudmoa-trace-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-trace-agent + spec: + securityContext: + runAsNonRoot: true + runAsUser: 65534 + containers: + - image: $DOCKER_REGISTRY_URL/trace-agent:$IMAGE_TAG + name: cloudmoa-trace-agent + resources: + requests: + cpu: 100m + memory: 50Mi + limits: + cpu: 200m + memory: 100Mi + ports: + - containerPort: 5775 + protocol: UDP + - containerPort: 6831 + protocol: UDP + - containerPort: 6832 + protocol: UDP + - containerPort: 5778 + protocol: TCP + env: + - name: LOG_LEVEL + value: "INFO" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT +- apiVersion: v1 + kind: Service + metadata: + name: cloudmoa-trace-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-trace-agent + spec: + ports: + - name: agent-zipkin-thrift + port: 5775 + protocol: UDP + targetPort: 5775 + - name: agent-compact + port: 6831 + protocol: UDP + targetPort: 6831 + - name: agent-binary + port: 6832 + protocol: UDP + targetPort: 6832 + - name: agent-configs + port: 5778 + protocol: TCP + targetPort: 5778 + selector: + app: cloudmoa-trace-agent + type: ClusterIP' WHERE id = 7; + +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: v1 +kind: Service +metadata: + annotations: + prometheus.io/scrape: ''true'' + labels: + app: cloudmoa-node-exporter + name: cloudmoa-node-exporter + name: cloudmoa-node-exporter + namespace: $CLOUDMOA_NAMESPACE +spec: + clusterIP: None + ports: + - name: scrape + port: 9110 + protocol: TCP + selector: + app: cloudmoa-node-exporter + type: ClusterIP +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: cloudmoa-node-exporter + namespace: $CLOUDMOA_NAMESPACE +spec: + selector: + matchLabels: + app: cloudmoa-node-exporter + template: + metadata: + labels: + app: cloudmoa-node-exporter + name: cloudmoa-node-exporter + spec: + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - image: $DOCKER_REGISTRY_URL/node-exporter + name: cloudmoa-node-exporter + ports: + - containerPort: 9110 + hostPort: 9110 + name: scrape + args: + - --path.procfs=/host/proc + - --path.sysfs=/host/sys + - --path.rootfs=/host/root + - --collector.filesystem.ignored-mount-points=^/(dev|proc|sys|run|var/lib/docker/.+|var/lib/kubelet/pods/.+)($|/) + - --collector.tcpstat + - --web.listen-address=:9110 + # --log.level=debug + resources: + limits: + cpu: 250m + memory: 180Mi + requests: + cpu: 102m + memory: 180Mi + volumeMounts: + - mountPath: /host/proc + name: proc + readOnly: false + - mountPath: /host/sys + name: sys + readOnly: false + - mountPath: /host/root + mountPropagation: HostToContainer + name: root + readOnly: true + hostNetwork: true + hostPID: true + securityContext: + runAsNonRoot: true + runAsUser: 65534 + volumes: + - hostPath: + path: /proc + name: proc + - hostPath: + path: /sys + name: sys + - hostPath: + path: / + name: root +' WHERE id = 4; + +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cloudmoa-cluster-role +rules: + - nonResourceURLs: + - "*" + verbs: + - get + - apiGroups: + - metrics.k8s.io + resources: + - pods + - nodes + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - nodes/stats + - endpoints + - namespaces + - events + verbs: + - get + - list + - watch + - apiGroups: + - apps + resources: + - daemonsets + - deployments + - deployments/scale + - replicasets + - replicasets/scale + - statefulsets + - statefulsets/scale + verbs: + - get + - list + - watch + - apiGroups: + - batch + resources: + - jobs + verbs: + - get + - list + - watch + - update + - apiGroups: + - batch + resources: + - cronjobs + verbs: + - get + - list + - update + - apiGroups: + - storage.j8s.io + resources: + - storageclasses + verbs: + - get + - list + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - apiGroups: + - extensions + resources: + - ingresses + verbs: + - get + - list + - apiGroups: + - policy + resources: + - podsecuritypolicies + verbs: + - use + resourceNames: + - imxc-ps + - apiGroups: + - certificates.k8s.io + resourceNames: + - kubernetes.io/kube-apiserver-client-kubelet + resources: + - signers + verbs: + - approve + - apiGroups: + - certificates.k8s.io + resourceNames: + - kubernetes.io/kubelet-serving + resources: + - signers + verbs: + - approve + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch + - proxy + - apiGroups: + - "" + resources: + - nodes/log + - nodes/metrics + - nodes/proxy + - nodes/spec + - nodes/stats + verbs: + - ''*'' + - apiGroups: + - ''*'' + resources: + - ''*'' + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cloudmoa-restricted-rb + namespace: $CLOUDMOA_NAMESPACE +subjects: + - kind: ServiceAccount + name: default + namespace: $CLOUDMOA_NAMESPACE +roleRef: + kind: ClusterRole + name: cloudmoa-cluster-role + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: cloudmoa-psp + namespace: $CLOUDMOA_NAMESPACE +spec: + privileged: true + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + runAsUser: + rule: RunAsAny + fsGroup: + rule: RunAsAny + hostPorts: + - max: 65535 + min: 0 + hostNetwork: true + hostPID: true + volumes: + - configMap + - secret + - emptyDir + - hostPath + - projected + - downwardAPI + - persistentVolumeClaim +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: cloudmoa-topology-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-topology-agent +spec: + selector: + matchLabels: + app: cloudmoa-topology-agent + template: + metadata: + labels: + app: cloudmoa-topology-agent + spec: + hostNetwork: true + hostPID: true + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - name: cloudmoa-topology-agent + image: $DOCKER_REGISTRY_URL/topology-agent:$IMAGE_TAG + imagePullPolicy: Always + resources: + requests: + cpu: 200m + memory: 512Mi + limits: + cpu: 500m + memory: 600Mi + securityContext: + privileged: true + volumeMounts: + - mountPath: /host/usr/bin + name: bin-volume + - mountPath: /var/run/docker.sock + name: docker-volume + - mountPath: /host/proc + name: proc-volume + - mountPath: /root + name: root-volume + - mountPath: /log + name: log-volume + env: + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: ROOT_DIRECTORY + value: /root + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: LOG_LEVEL + value: "INFO" + + volumes: + - name: bin-volume + hostPath: + path: /usr/bin + type: Directory + - name: docker-volume + hostPath: + path: /var/run/docker.sock + - name: proc-volume + hostPath: + path: /proc + - name: root-volume + hostPath: + path: / + - name: log-volume + hostPath: + path: /home' WHERE id = 2; + +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cloudmoa-metric-agent-config + namespace: $CLOUDMOA_NAMESPACE +data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + metric-agent.yml: | + global: + scrape_interval: 15s + + scrape_configs: + - job_name: ''kubernetes-kubelet'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + - job_name: ''kubernetes-cadvisor'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod] + target_label: xm_pod_id + - source_labels: [container] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [container] + regex: (.+) + action: keep + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cloudmoa-metric-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-metric-agent +spec: + selector: + matchLabels: + app: cloudmoa-metric-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-metric-agent + spec: + containers: + - name: cloudmoa-metric-agent + image: $DOCKER_REGISTRY_URL/metric-agent:$IMAGE_TAG + args: + - --config.file=/etc/metric-agent/metric-agent.yml + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: /etc/metric-agent/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: LOG_MAXAGE + value: "1" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: STORAGE_TYPE + value: datagate + restartPolicy: Always + volumes: + - name: config-volume + configMap: + name: cloudmoa-metric-agent-config +' WHERE id = 6; + +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cloudmoa-metric-agent-config + namespace: $CLOUDMOA_NAMESPACE +data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + metric-agent.yml: | + global: + scrape_interval: 15s + + scrape_configs: + - job_name: ''kubernetes-kubelet'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + + - job_name: ''kubernetes-cadvisor'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod_name] + target_label: xm_pod_id + - source_labels: [container_name] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [container_name] + regex: (.+) + action: keep + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cloudmoa-metric-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-metric-agent +spec: + selector: + matchLabels: + app: cloudmoa-metric-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-metric-agent + spec: + containers: + - name: cloudmoa-metric-agent + image: $DOCKER_REGISTRY_URL/metric-agent:$IMAGE_TAG + args: + - --config.file=/etc/metric-agent/metric-agent.yml + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: /etc/metric-agent/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: STORAGE_TYPE + value: datagate + restartPolicy: Always + volumes: + - name: config-volume + configMap: + name: cloudmoa-metric-agent-config +' WHERE id = 3; \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.3.0.psql b/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.3.0.psql new file mode 100644 index 0000000..6b63e62 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.3.0.psql @@ -0,0 +1,919 @@ + +-- from diff + +CREATE DATABASE CONFIGS; +CREATE DATABASE keycloak; + +-- cortex alert +create table public.alert_rule_config_info ( + config_id varchar not null, + config_data text not null, + in_use boolean default true not null, + created_date timestamp, + modified_date timestamp +); +create table alert_config_info +( + config_id varchar not null, + config_data text not null, + config_default text not null, + in_use boolean default true not null, + created_date timestamp, + modified_date timestamp +); +create table alert_config +( + id bigint not null, + cluster_id varchar, + resolve_timeout varchar, + receiver varchar, + group_by varchar, + group_wait varchar, + group_interval varchar, + repeat_interval varchar, + routes_level varchar, + routes_continue varchar, + receiver_name varchar, + webhook_url varchar, + send_resolved varchar, + inner_route boolean, + inner_webhook boolean, + in_use boolean default true not null, + created_date timestamp, + modified_date timestamp +); +ALTER TABLE public.alert_rule_config_info ADD CONSTRAINT alert_rule_config_info_config_id_pk PRIMARY KEY (config_id); +ALTER TABLE public.alert_config_info ADD CONSTRAINT alert_config_info_config_id_pk PRIMARY KEY (config_id); +ALTER TABLE public.alert_config ADD CONSTRAINT alert_config_id_pk PRIMARY KEY (id); + + + +alter table tenant_info + add delete_scheduler_date timestamp; + +alter table tenant_info + add tenant_init_clusters varchar(255); + +alter table cloud_user + add dormancy_date timestamp; + +alter table cloud_user + add status varchar(255) default 'use'::character varying not null; + +-- DELETE +-- FROM public.auth_resource3 +-- WHERE name = 'menu|Health Check|Check Script'; + +-- DELETE +-- FROM public.auth_resource3 +-- WHERE name = 'menu|Health Check'; + +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Services|Active Transaction', false, null); + +UPDATE public.menu_meta +SET position = 10::integer +WHERE id = 80::bigint; + +UPDATE public.menu_meta +SET position = 99::integer +WHERE id = 90::bigint; + + + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (26, 'Active Transaction', NULL, 5, 'overviewServiceJSPD', (select id from auth_resource3 where name='menu|Services|Active Transaction'), 2); +insert into public.alert_config_info (config_id, created_date, modified_date, config_data, config_default, in_use) values ('config', now(), null, 'global:${GLOBAL}\nroute:${ROUTE}\nreceivers:${RECEIVERS}', 'global:${GLOBAL}\nroute:${ROUTE}\nreceivers:${RECEIVERS}', true); +insert into public.alert_config_info (config_id, created_date, modified_date, config_data, config_default, in_use) values ('global', now(), null, '\n resolve_timeout: ${RESOLVE_TIMEOUT}', '\n resolve_timeout: 5m', true); +insert into public.alert_config_info (config_id, created_date, modified_date, config_data, config_default, in_use) values ('receivers', now(), null, '\n- name: ''${NAME}''\n webhook_configs:${WEBHOOK_CONFIGS}', '\n- name: ''cdms''\n webhook_configs:${WEBHOOK_CONFIGS}', true); +insert into public.alert_config_info (config_id, created_date, modified_date, config_data, config_default, in_use) values ('route', now(), null, '\n receiver: ''${RECEIVER}''\n group_by: [${GROUP_BY}]\n group_wait: ${GROUP_WAIT}\n group_interval: ${GROUP_INTERVAL}\n repeat_interval: ${REPEAT_INTERVAL}\n routes:${ROUTES}', '\n receiver: ''cdms''\n group_by: [xm_clst_id, level]\n group_wait: 30s\n group_interval: 5m\n repeat_interval: 10m\n routes:${ROUTES}', true); +insert into public.alert_config_info (config_id, created_date, modified_date, config_data, config_default, in_use) values ('webhook_configs', now(), null, '\n - url: ''${WEBHOOK_URL}''\n send_resolved: ${SEND_RESOLVED}', '\n - url: ''${WEBHOOK_URL}''\n send_resolved: false', true); +insert into public.alert_config_info (config_id, created_date, modified_date, config_data, config_default, in_use) values ('routes', now(), null, '\n - receiver: ''${ROUTES_RECEIVER}''\n group_by: [${ROUTES_GROUP_BY}]\n group_wait: ${ROUTES_GROUP_WAIT}\n group_interval: ${ROUTES_GROUP_INTERVAL}\n repeat_interval: ${ROUTES_REPEAT_INTERVAL}\n match_re:\n level: ${LEVEL}\n continue: ${CONTINUE}', '\n - receiver: ''cdms''\n group_by: [xm_clst_id, level]\n group_wait: 5s\n group_interval: 5s\n repeat_interval: 1m\n match_re:\n level: Critical\n continue: true', true); +insert into public.alert_rule_config_info (config_id, created_date, modified_date, config_data, in_use) values ('config', now(), null, 'groups:${GROUPS}', true); +insert into public.alert_rule_config_info (config_id, created_date, modified_date, config_data, in_use) values ('groups', now(), null, '\n- name: "${NAME}"\n rules:${RULES}', true); +insert into public.alert_rule_config_info (config_id, created_date, modified_date, config_data, in_use) values ('isHost', now(), null, '\n instance: "{{ $labels.instance }}"\n is_host: "true"', true); +insert into public.alert_rule_config_info (config_id, created_date, modified_date, config_data, in_use) values ('rules', now(), null, '\n - alert: "${ALERT}"\n expr: "${EXPR}"\n labels:\n level: "${LEVEL}"\n for: "${FOR}"\n annotations:\n xm_service_name: "{{ $labels.xm_service_name }}"\n level: "${LEVEL}"\n meta_id: "${META_ID}"\n xm_node_id: "{{ $labels.xm_node_id }}"\n threshold: ${THRESHOLD}\n xm_container_id: "{{ $labels.xm_cont_name }}"\n message: "${MESSAGE}"\n rule_id: ${RULE_ID}\n xm_pod_id: "{{ $labels.xm_pod_id }}"\n xm_clst_id: "{{ $labels.xm_clst_id }}"\n xm_namespace: "{{ $labels.xm_namespace }}"\n value: "{{ $value }}"\n xm_entity_type: "{{ $labels.xm_entity_type }}"', true); + + + +-- JSPD 옵션 값 테이블 +CREATE TABLE public.jspd_prop ( + code_id character varying(255) NOT NULL, + default_value character varying(255) NOT NULL, + description text, + code_type character varying(255), + input_type character varying(255), + input_props character varying(255), + use_yn boolean NOT NULL, + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone NOT NULL +); + +ALTER TABLE ONLY public.jspd_prop ADD CONSTRAINT jspd_prop_pkey PRIMARY KEY (code_id); + +-- JSPD 옵션 값 설정 LIST table +CREATE TABLE public.jspd_config ( + cluster_id character varying(255) NOT NULL, + namespace character varying(255) NOT NULL, + service character varying(255) NOT NULL, + code_id character varying(255), + code_value character varying(255), + code_type character varying(255), + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone NOT NULL +); +-- ALTER TABLE public.jspd_prop +-- ADD input_type character varying(255); + +-- ALTER TABLE public.jspd_prop +-- ADD input_props character varying(255); + + +ALTER TABLE public.jspd_config + ADD CONSTRAINT jspd_config_pkey PRIMARY KEY (cluster_id, namespace, service, code_id); + +ALTER TABLE ONLY public.jspd_config + ADD CONSTRAINT jspd_config_code_id_fk FOREIGN KEY (code_id) REFERENCES public.jspd_prop(code_id); + +INSERT INTO jspd_prop values('TRX_NAME_TYPE','0', 'Set the transaction name generation method (0:default, 1:parameter, 2:param_nouri, 3:attribute)', 'integer','select','{"default":"0", "parameter":"1", "param_nouri":"2", "attribute":"3"}',true, now(), now()); +INSERT INTO jspd_prop values('TRX_NAME_KEY','', 'Set the transaction name generation method by TRX_NAME_TYPE (parameter(1), param_nouri(2),attribute(3))','string','input','',true, now(), now()); +INSERT INTO jspd_prop values('CURR_TRACE_TXN','*:3000', 'Option to check TXNNAME with startsWith logic and collect calltree based on elapsetime. blank or set to *:0 when collecting all.', 'string','input','', true, now(), now()); +INSERT INTO jspd_prop values('CURR_TRACE_LEVEL','100', 'call tree detection level', 'integer','range','{"gte":"0", "lte":"100"}',true, now(), now()); +INSERT INTO jspd_prop values('TRACE_JDBC','true', 'include call tree data', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('EXCLUDE_SERVICE','gif,js,css,xml', 'exclude service name', 'string','input','',true, now(), now()); +INSERT INTO jspd_prop values('INCLUDE_EXCEPTION','', 'Exception that you do not want to be treated as an exception transaction is set.(type.Exception)', 'string','input','',true, now(), now()); +INSERT INTO jspd_prop values('EXCLUDE_EXCEPTION','', 'Set the exception to be treated as an exception transaction.(type.Exception)', 'string','input','',true, now(), now()); +INSERT INTO jspd_prop values('RESP_HEADER_TID','false', 'include X-Xm-Tid text for gearing imxwsmj', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('USE_RUNTIME_REDEFINE','false', 'rt.jar (socket, file, throwable) function use yn option', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('USE_RUNTIME_REDEFINE_HTTP_REMOTE','false', 'rt.jar (socket, file, throwable) function use yn option', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('RT_RMI','false', 'rt.jar (socket, file, throwable) function use yn option', 'boolean','input','',true, now(), now()); + +INSERT INTO jspd_prop values('RT_RMI_TYPE','3', 'remote key value(1: pkey, 2: ckey, 3: pckey)', 'integer','select','{"pkey":"1", "ckey":"2", "pckey":"3"}',true, now(), now()); +INSERT INTO jspd_prop values('RT_RMI_ELAPSE_TIME','0', 'Collect transactions that are greater than or equal to the option value', 'integer','input','',true, now(), now()); +INSERT INTO jspd_prop values('RT_FILE','0x10', 'Display file input/output in call tree', 'string','input','',true, now(), now()); +INSERT INTO jspd_prop values('RT_SOCKET','0x10', 'Display socket input/output in call tree', 'string','input','',true, now(), now()); + +INSERT INTO jspd_prop values('MTD_LIMIT','100000', 'Limit the number of calltree', 'integer','range','{"gte":"0"}',true, now(), now()); + +INSERT INTO jspd_prop values('LIMIT_SQL','20', 'Collection limits based on SQL sentence length', 'integer','input','',true, now(), now()); +INSERT INTO jspd_prop values('TXN_COUNT_LIMIT','3000', 'Transactions per second', 'integer','input','',true, now(), now()); +INSERT INTO jspd_prop values('USE_SQL_ELLIPSIS','false', 'Collect length of sql string by half of SQL_TEXT_BUFFER_SIZE', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('TXN_SQL_LIMIT_COUNT','2000', 'SQL collection limit', 'integer','input','',true, now(), now()); +INSERT INTO jspd_prop values('TXN_CPU_TIME','false', 'cpu time metric used in transactions option', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('TXN_MEMORY','false', 'memory alloc size metric used in transactions option', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('ENABLE_WEB_ID_WHEN_NO_USERAGENT','false', 'Do not create an web ID unless requested by the browser', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('USE_SQL_SEQ','false', 'Add sequence number to sql and packet', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('TRACE_FETCH_METHOD','false', 'Display the fetch function of ResultSet in the call tree', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('EXCLUDE_THREAD','', 'Ability to block monitoring of a specific thread name, value = String[] (prefix1,prefix2)', 'string','input','',true, now(), now()); +INSERT INTO jspd_prop values('USE_METHOD_SEQ','false', 'Display the calltree in the form of a time series without summary', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('TRACE_METHOD_MEMORY','false', 'Collects allocation memory for each method of calltree. (unit k)', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('TRACE_METHOD_CPUTIME','false', 'Collects cputime for each method of calltree. (unit ms)', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('DISABLE_ROOT_METHOD','false', 'Express the service root method at the top of the call tree', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('MTD_BUFFER_SIZE','2500', 'size of the internal buffer that stores the call tree method data.', 'integer','input','',true, now(), now()); +INSERT INTO jspd_prop values('MTD_STACK_BUFFER_SIZE','100', 'A separate option to additionally collect methods that did not generate an error among methods that were not collected because the MTD_BUFFER_SIZE option value was exceeded.', 'integer','input','',true, now(), now()); +INSERT INTO jspd_prop values('MTD_EXCEPTION_BUFFER_SIZE','100', 'A separate option to additionally collect methods that have an error among methods that could not be collected because the MTD_BUFFER_SIZE option value was exceeded.', 'integer','input','',true, now(), now()); +INSERT INTO jspd_prop values('DEBUG','0x000000000', 'Option to specify log level (Debugging)', 'string','input','',true, now(), now()); + +INSERT INTO jspd_prop values('EXCEPTION_LIMIT', '-1', 'Exception content length limit', 'integer', 'input', '', true, now(), now()); +INSERT INTO jspd_prop values('TXN_SEND_PERIOD', '1000', 'Txninfo transmission cycle (ms)', 'integer', 'input', '', true, now(), now()); +INSERT INTO jspd_prop values('MTD_SEND_PERIOD', '1000', 'Txnmethod transmission cycle (ms)', 'integer', 'input', '', true, now(), now()); +INSERT INTO jspd_prop values('SQL_SEND_PERIOD', '1000', 'Txnspl transmission cycle (ms)', 'integer', 'input', '', true, now(), now()); +INSERT INTO jspd_prop values('ETOE_SEND_PERIOD', '1000', 'E2einfo transmission cycle (ms)', 'integer', 'input', '', true, now(), now()); +INSERT INTO jspd_prop values('TXN_SEND_LIMIT', '15000', 'Txninfo maximum number of transfers', 'integer', 'input', '', true, now(), now()); +INSERT INTO jspd_prop values('MTD_SEND_LIMIT', '15000', 'Txnmethod maximum number of transfers', 'integer', 'input', '', true, now(), now()); +INSERT INTO jspd_prop values('SQL_SEND_LIMIT', '15000', 'Txnsql maximum number of transfers', 'integer', 'input', '', true, now(), now()); +INSERT INTO jspd_prop values('ETOE_SEND_LIMIT', '15000', 'E2einfo maximum number of transfers', 'integer', 'input', '', true, now(), now()); + + +---public.metric_meta2 +UPDATE public.metric_meta2 SET expr = '((node_memory_MemTotal_bytes{xm_entity_type="Node", {filter}} - (node_memory_MemFree_bytes{xm_entity_type="Node", {filter}} + node_memory_Cached_bytes{xm_entity_type="Node", {filter}} + node_memory_Buffers_bytes{xm_entity_type="Node", {filter}} + node_memory_SReclaimable_bytes{xm_entity_type="Node", {filter}})) >= 0 or node_memory_MemTotal_bytes{xm_entity_type="Node", {filter}} - node_memory_MemFree_bytes{xm_entity_type="Node", {filter}}) / 1024 / 1024 / 1024'::text WHERE id LIKE 'node#_memory#_used' ESCAPE '#'; + +UPDATE public.metric_meta2 SET expr = '((node_memory_MemTotal_bytes{{filter}} - (node_memory_MemFree_bytes{{filter}} + node_memory_Cached_bytes{{filter}} + node_memory_Buffers_bytes{{filter}} + node_memory_SReclaimable_bytes{{filter}})) >= 0 or (node_memory_MemTotal_bytes{{filter}} - node_memory_MemFree_bytes{{filter}})) / node_memory_MemTotal_bytes{{filter}} * 100'::text WHERE id LIKE 'host#_memory#_usage' ESCAPE '#'; + +UPDATE public.metric_meta2 SET expr = 'sum by(instance, mountpoint, fstype, data_type) ( +label_replace(node_filesystem_size_bytes {fstype!="rootfs",{filter}}, "data_type", "totalsize", "", "") or +label_replace(node_filesystem_avail_bytes {fstype!="rootfs",{filter}}, "data_type", "availablesize", "", ""))'::text WHERE id LIKE 'host#_fs#_total#_by#_mountpoint' ESCAPE '#'; + +UPDATE public.metric_meta2 SET expr = '(1- avg by (xm_clst_id) (((node_memory_MemFree_bytes{xm_entity_type=''Node'', {filter}} + node_memory_Cached_bytes{xm_entity_type=''Node'', {filter}} + node_memory_Buffers_bytes{xm_entity_type=''Node'', {filter}}) <= node_memory_MemTotal_bytes{xm_entity_type=''Node'', {filter}} or node_memory_MemFree_bytes{xm_entity_type=''Node'', {filter}}) / node_memory_MemTotal_bytes{xm_entity_type=''Node'', {filter}})) * 100'::text WHERE id LIKE 'cluster#_memory#_usage' ESCAPE '#'; + + +UPDATE public.metric_meta2 SET expr = '((node_memory_MemTotal_bytes{xm_entity_type=''Node'', {filter}} - (node_memory_MemFree_bytes{xm_entity_type=''Node'', {filter}} + node_memory_Cached_bytes{xm_entity_type=''Node'', {filter}} + node_memory_Buffers_bytes{xm_entity_type=''Node'', {filter}} + node_memory_SReclaimable_bytes{xm_entity_type=''Node'', {filter}})) >= 0 or (node_memory_MemTotal_bytes{xm_entity_type=''Node'', {filter}} - node_memory_MemFree_bytes{xm_entity_type=''Node'', {filter}})) / node_memory_MemTotal_bytes{xm_entity_type=''Node'', {filter}} * 100'::text WHERE id LIKE 'node#_memory#_usage' ESCAPE '#'; + +UPDATE public.metric_meta2 SET expr = '(node_memory_MemTotal_bytes{{filter}} - (node_memory_MemFree_bytes{{filter}} + node_memory_Cached_bytes{{filter}} + node_memory_Buffers_bytes{{filter}} + node_memory_SReclaimable_bytes{{filter}})) >= 0 or (node_memory_MemTotal_bytes{{filter}} - node_memory_MemFree_bytes{{filter}})'::text WHERE id LIKE 'host#_memory#_used' ESCAPE '#'; + + +INSERT INTO public.metric_meta2 (id, meta_name, description, expr, resource_type, entity_type, groupby_keys, in_use, anomaly_score, message, created_date, modified_date) VALUES +('imxc_jspd_pod_txn_error_rate', 'Service Pod Transaction Error Rate', 'The number of transaction error rate for pod', 'sum by(xm_clst_id, xm_namespace, xm_pod_id, xm_service_name) (rate(imxc_txn_total_count{{filter}}[1m])) == 0 or sum by(xm_clst_id, xm_namespace, xm_pod_id, xm_service_name) (rate(imxc_txn_error_count{{filter}}[1m])) == 0 or sum by(xm_clst_id, xm_namespace, xm_pod_id, xm_service_name) (rate(imxc_txn_error_count {{filter}} [1m])) / sum by(xm_clst_id, xm_namespace, xm_pod_id, xm_service_name) (rate(imxc_txn_total_count {{filter}} [1m]))', 'Request', 'Service', NULL, 't', 'f', 'SVC:{{$labels.xm_service_name}} Svc Pod Transaction Error rate:{{humanize $value}}|{threshold}.', '2022-02-15 18:08:58.18', '2022-02-15 18:08:58.18'); +INSERT INTO public.metric_meta2 (id, meta_name, description, expr, resource_type, entity_type, groupby_keys, in_use, anomaly_score, message, created_date, modified_date) VALUES +('imxc_jspd_txn_error_rate', 'Service Transaction Error Rate', 'Service Transaction Error Rate', 'sum by(xm_clst_id, xm_namespace, xm_service_name) (rate(imxc_txn_total_count{{filter}}[1m])) == 0 or sum by(xm_clst_id, xm_namespace, xm_service_name) (rate(imxc_txn_error_count{{filter}}[1m])) == 0 or sum by(xm_clst_id, xm_namespace, xm_service_name) (rate(imxc_txn_error_count {{filter}} [1m])) / sum by(xm_clst_id, xm_namespace, xm_service_name) (rate(imxc_txn_total_count {{filter}} [1m]))', 'Request', 'Service', NULL, 't', 'f', 'SVC:{{$labels.xm_service_name}} Error Request Rate:{{humanize $value}}%|{threshold}%.', '2022-02-15 14:33:00.118', '2022-02-15 15:40:17.64'); +INSERT INTO public.metric_meta2 (id, meta_name, description, expr, resource_type, entity_type, groupby_keys, in_use, anomaly_score, message, created_date, modified_date) VALUES +('imxc_jspd_txn_elapsed_time_avg', 'Service Transaction Elapsed Time (avg)', 'Service Average Elapsed Time', 'sum by(xm_clst_id, xm_namespace, xm_service_name) ((increase(imxc_txn_total_count{{filter}}[1m])))== 0 or sum by(xm_clst_id, xm_namespace, xm_service_name) ((increase(imxc_txn_laytency{{filter}}[1m])))/ sum by(xm_clst_id, xm_namespace, xm_service_name) ((increase(imxc_txn_total_count{{filter}}[1m])))', 'Request', 'Service', NULL, 't', 't', 'SVC:{{$labels.xm_service_name}} Transaction Requests Time Avg:{{humanize $value}}ms|{threshold}ms.', '2021-11-15 16:09:34.233', '2021-11-15 16:12:21.335'); +INSERT INTO public.metric_meta2 (id, meta_name, description, expr, resource_type, entity_type, groupby_keys, in_use, anomaly_score, message, created_date, modified_date) VALUES +('imxc_jspd_pod_txn_elapsed_time_avg', 'Service Pod Transaction Elapsed Time (avg)', 'The number of transaction counts per second for pod', 'sum by(xm_clst_id, xm_namespace, xm_pod_id, xm_service_name) (increase(imxc_txn_total_count{{filter}}[1m]))==0 or sum by(xm_clst_id, xm_namespace, xm_pod_id, xm_service_name) (increase(imxc_txn_laytency{{filter}}[1m])) / sum by(xm_clst_id, xm_namespace, xm_pod_id, xm_service_name) (increase(imxc_txn_total_count{{filter}}[1m]))', 'Request', 'Service', NULL, 't', 'f', 'SVC:{{$labels.xm_service_name}} Pod Transaction Requests Time Avg:{{humanize $value}}ms|{threshold}ms.', '2022-02-15 18:04:55.228', '2022-02-15 18:04:55.228'); +INSERT INTO public.metric_meta2 (id, meta_name, description, expr, resource_type, entity_type, groupby_keys, in_use, anomaly_score, message, created_date, modified_date) VALUES +('imxc_jspd_txn_error_count', 'Service Transaction Error Count', 'Service Transaction Error Count', 'sum by(xm_clst_id, xm_namespace, xm_service_name) (rate(imxc_txn_error_count{{filter}}[1m])) == 0 or sum by(xm_clst_id, xm_namespace, xm_service_name) (rate(imxc_txn_error_count {{filter}} [1m])) ', 'Request', 'Service', NULL, 't', 't', 'SVC:{{$labels.xm_service_name}} Error Request count:{{humanize $value}}%|{threshold}%.', '2021-11-15 16:10:31.352', '2021-11-15 16:12:21.335'); +INSERT INTO public.metric_meta2 (id, meta_name, description, expr, resource_type, entity_type, groupby_keys, in_use, anomaly_score, message, created_date, modified_date) VALUES +('imxc_jspd_txn_per_sec', 'Service Transaction Count (per Second)', 'Service Transaction Count (per Second)', 'sum by(xm_clst_id, xm_namespace, xm_service_name) (rate(imxc_txn_total_count{{filter}}[1m]))', 'Request', 'Service', NULL, 't', 't', 'SVC:{{$labels.xm_service_name}} Svc Transaction count/Seconds:{{humanize $value}}|{threshold}.', '2021-11-15 16:11:19.606', '2021-11-15 16:12:21.335'); +INSERT INTO public.metric_meta2 (id, meta_name, description, expr, resource_type, entity_type, groupby_keys, in_use, anomaly_score, message, created_date, modified_date) VALUES +('imxc_jspd_pod_txn_per_sec', 'Service Pod Transaction Count (per sec)', 'The number of transaction counts per second for pod', 'sum by(xm_clst_id, xm_namespace, xm_pod_id, xm_service_name) (rate(imxc_txn_total_count{{filter}}[1m]))', 'Request', 'Service', NULL, 't', 'f', 'SVC:{{$labels.xm_service_name}} Svc Pod Transaction count/Seconds:{{humanize $value}}|{threshold}.', '2022-02-15 17:59:39.45', '2022-02-15 17:59:39.45'); + + + +-- Auto-generated SQL script #202202221030 +UPDATE public.metric_meta2 + SET expr='sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (rate(container_cpu_system_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running", {filter}}) without (instance)) * 0) * 100' + WHERE id='container_cpu_system_by_workload'; +UPDATE public.metric_meta2 + SET expr='sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (rate(container_cpu_system_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running", {filter}}) without (instance)) * 0)' + WHERE id='container_cpu_system_core_by_workload'; +UPDATE public.metric_meta2 + SET expr='sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (rate(container_cpu_usage_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running", {filter}}) without (instance)) * 0)' + WHERE id='container_cpu_usage_by_workload'; +UPDATE public.metric_meta2 + SET expr='sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (rate(container_cpu_usage_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running", {filter}}) without (instance)) * 0)' + WHERE id='container_cpu_usage_core_by_workload'; +UPDATE public.metric_meta2 + SET expr='sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (rate(container_cpu_user_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running", {filter}}) without (instance)) * 0) * 100' + WHERE id='container_cpu_user_by_workload'; +UPDATE public.metric_meta2 + SET expr='sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (rate(container_cpu_user_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running", {filter}}) without (instance)) * 0)' + WHERE id='container_cpu_user_core_by_workload'; +UPDATE public.metric_meta2 + SET expr='sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (container_fs_limit_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running", {filter}}) without (instance)) * 0) / 1073741824' + WHERE id='container_fs_limit_bytes_by_workload'; +UPDATE public.metric_meta2 + SET expr='sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (rate(container_fs_reads_bytes_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1024' + WHERE id='container_fs_reads_by_workload'; +UPDATE public.metric_meta2 + SET expr='sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (container_fs_usage_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1073741824' + WHERE id='container_fs_usage_bytes_by_workload'; +UPDATE public.metric_meta2 + SET expr='sum by (xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) ((container_fs_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0)/ (((container_fs_limit_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) * 100) > 0) or (container_fs_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1000)' + WHERE id='container_fs_usage_by_workload'; +UPDATE public.metric_meta2 + SET expr='sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (rate(container_fs_writes_bytes_total{xm_cont_name!="POD"}[1m]) + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1024' + WHERE id='container_fs_writes_by_workload'; +UPDATE public.metric_meta2 + SET expr='sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (container_memory_cache{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1073741824' + WHERE id='container_memory_cache_by_workload'; +UPDATE public.metric_meta2 + SET expr='sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (container_memory_max_usage_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1073741824' + WHERE id='container_memory_max_usage_bytes_by_workload'; +UPDATE public.metric_meta2 + SET expr='sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (container_memory_swap{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1073741824' + WHERE id='container_memory_swap_by_workload'; +UPDATE public.metric_meta2 + SET expr='sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (container_memory_usage_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1024 / 1024 / 1024' + WHERE id='container_memory_usage_bytes_by_workload'; +UPDATE public.metric_meta2 + SET expr='sum by (xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) ((container_memory_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / (((container_spec_memory_limit_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0)) > 0) * 100) or sum by (xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) ((container_memory_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1024 / 1024 / 1024 *100)' + WHERE id='container_memory_usage_by_workload'; +UPDATE public.metric_meta2 + SET expr='sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (container_memory_working_set_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1024 / 1024 / 1024' + WHERE id='container_memory_working_set_bytes_by_workload'; + +INSERT INTO public.metric_meta2 (id, meta_name, description, expr, resource_type, entity_type, groupby_keys, in_use, anomaly_score, message, created_date, modified_date) VALUES('imxc_jspd_active_txn_per_sec', 'Service Active Transaction Count (per Second)', 'Service Active Transaction Count (per Second)', 'sum by(xm_clst_id, xm_namespace, xm_service_name) (rate(imxc_txn_active_count {{filter}}[1m]))', 'Request', 'Service', NULL, true, false, 'SVC:{{$labels.xm_service_name}} Svc Active Transaction count/Seconds:{{humanize $value}}|{threshold}.', '2022-03-11 15:51:45.946', '2022-03-11 15:51:45.946') ON +CONFLICT (id) DO +UPDATE +SET + expr = 'sum by(xm_clst_id, xm_namespace, xm_service_name) (rate(imxc_txn_active_count {{filter}}[1m]))' +WHERE id = 'imxc_jspd_active_txn_per_sec'; + +INSERT INTO public.metric_meta2 (id, meta_name, description, expr, resource_type, entity_type, groupby_keys, in_use, anomaly_score, message, created_date, modified_date) VALUES('imxc_jspd_pod_active_txn_per_sec', 'Service Pod Active Transaction Count (per sec)', 'The number of active transaction counts per second for pod', 'sum by(xm_clst_id, xm_namespace, xm_service_name, xm_pod_id) (rate(imxc_txn_active_count{{filter}}[1m]))', 'Request', 'Service', NULL, true, false, 'SVC:{{$labels.xm_service_name}} Svc Pod Active Transaction count/Seconds:{{humanize $value}}|{threshold}.', '2022-03-11 15:53:29.252', '2022-03-11 15:53:29.252') ON +CONFLICT (id) DO +UPDATE +SET + expr = 'sum by(xm_clst_id, xm_namespace, xm_service_name, xm_pod_id) (rate(imxc_txn_active_count{{filter}}[1m]))' +WHERE id = 'imxc_jspd_pod_active_txn_per_sec'; + + +--public.agent_install_file_info + +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cloudmoa-cluster-role +rules: + - nonResourceURLs: + - "*" + verbs: + - get + - apiGroups: + - metrics.k8s.io + resources: + - pods + - nodes + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - nodes/stats + - endpoints + - namespaces + - events + verbs: + - get + - list + - watch + - apiGroups: + - apps + resources: + - daemonsets + - deployments + - deployments/scale + - replicasets + - replicasets/scale + - statefulsets + - statefulsets/scale + verbs: + - get + - list + - watch + - apiGroups: + - batch + resources: + - jobs + verbs: + - get + - list + - watch + - update + - apiGroups: + - batch + resources: + - cronjobs + verbs: + - get + - list + - update + - apiGroups: + - storage.j8s.io + resources: + - storageclasses + verbs: + - get + - list + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - apiGroups: + - extensions + resources: + - ingresses + verbs: + - get + - list + - apiGroups: + - policy + resources: + - podsecuritypolicies + verbs: + - use + resourceNames: + - imxc-ps + - apiGroups: + - certificates.k8s.io + resourceNames: + - kubernetes.io/kube-apiserver-client-kubelet + resources: + - signers + verbs: + - approve + - apiGroups: + - certificates.k8s.io + resourceNames: + - kubernetes.io/kubelet-serving + resources: + - signers + verbs: + - approve + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch + - proxy + - apiGroups: + - "" + resources: + - nodes/log + - nodes/metrics + - nodes/proxy + - nodes/spec + - nodes/stats + verbs: + - ''*'' + - apiGroups: + - ''*'' + resources: + - ''*'' + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cloudmoa-restricted-rb + namespace: $CLOUDMOA_NAMESPACE +subjects: + - kind: ServiceAccount + name: default + namespace: $CLOUDMOA_NAMESPACE +roleRef: + kind: ClusterRole + name: cloudmoa-cluster-role + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: cloudmoa-psp + namespace: $CLOUDMOA_NAMESPACE +spec: + privileged: true + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + runAsUser: + rule: RunAsAny + fsGroup: + rule: RunAsAny + hostPorts: + - max: 65535 + min: 0 + hostNetwork: true + hostPID: true + volumes: + - configMap + - secret + - emptyDir + - hostPath + - projected + - downwardAPI + - persistentVolumeClaim +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: cloudmoa-topology-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-topology-agent +spec: + selector: + matchLabels: + app: cloudmoa-topology-agent + template: + metadata: + labels: + app: cloudmoa-topology-agent + spec: + hostNetwork: true + hostPID: true + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - name: cloudmoa-topology-agent + image: $DOCKER_REGISTRY_URL/topology-agent:$IMAGE_TAG + imagePullPolicy: Always + resources: + requests: + cpu: 200m + memory: 512Mi + limits: + cpu: 500m + memory: 600Mi + securityContext: + privileged: true + volumeMounts: + - mountPath: /host/usr/bin + name: bin-volume + - mountPath: /var/run/docker.sock + name: docker-volume + - mountPath: /host/proc + name: proc-volume + - mountPath: /root + name: root-volume + - mountPath: /log + name: log-volume + env: + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: ROOT_DIRECTORY + value: /root + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: POD_ID + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LOG_LEVEL + value: "INFO" + volumes: + - name: bin-volume + hostPath: + path: /usr/bin + type: Directory + - name: docker-volume + hostPath: + path: /var/run/docker.sock + - name: proc-volume + hostPath: + path: /proc + - name: root-volume + hostPath: + path: / + - name: log-volume + hostPath: + path: /home'::text WHERE id = 2::bigint; + +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cloudmoa-metric-agent-config + namespace: $CLOUDMOA_NAMESPACE +data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + metric-agent.yml: | + global: + scrape_interval: 15s + + scrape_configs: + - job_name: ''kubernetes-kubelet'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + + - job_name: ''kubernetes-cadvisor'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod_name] + target_label: xm_pod_id + - source_labels: [container_name] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [container_name] + regex: (.+) + action: keep + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cloudmoa-metric-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-metric-agent +spec: + selector: + matchLabels: + app: cloudmoa-metric-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-metric-agent + spec: + containers: + - name: cloudmoa-metric-agent + image: $DOCKER_REGISTRY_URL/metric-agent:$IMAGE_TAG + args: + - --config.file=/etc/metric-agent/metric-agent.yml + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: /etc/metric-agent/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: STORAGE_TYPE + value: datagate + restartPolicy: Always + volumes: + - name: config-volume + configMap: + name: cloudmoa-metric-agent-config +'::text WHERE id = 3::bigint; + +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: v1 +kind: List +items: +- apiVersion: apps/v1 + kind: Deployment + metadata: + name: cloudmoa-trace-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-trace-agent + spec: + selector: + matchLabels: + app: cloudmoa-trace-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-trace-agent + spec: + securityContext: + runAsNonRoot: true + runAsUser: 65534 + containers: + - image: $DOCKER_REGISTRY_URL/trace-agent:$IMAGE_TAG + name: cloudmoa-trace-agent + resources: + requests: + cpu: 100m + memory: 50Mi + limits: + cpu: 200m + memory: 100Mi + ports: + - containerPort: 5775 + protocol: UDP + - containerPort: 6831 + protocol: UDP + - containerPort: 6832 + protocol: UDP + - containerPort: 5778 + protocol: TCP + env: + - name: LOG_LEVEL + value: "INFO" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT +- apiVersion: v1 + kind: Service + metadata: + name: cloudmoa-trace-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-trace-agent + spec: + ports: + - name: agent-zipkin-thrift + port: 5775 + protocol: UDP + targetPort: 5775 + - name: agent-compact + port: 6831 + protocol: UDP + targetPort: 6831 + - name: agent-binary + port: 6832 + protocol: UDP + targetPort: 6832 + - name: agent-configs + port: 5778 + protocol: TCP + targetPort: 5778 + selector: + app: cloudmoa-trace-agent + type: ClusterIP'::text WHERE id = 7::bigint; + +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: v1 +kind: Service +metadata: + annotations: + prometheus.io/scrape: ''true'' + labels: + app: cloudmoa-node-exporter + name: cloudmoa-node-exporter + name: cloudmoa-node-exporter + namespace: $CLOUDMOA_NAMESPACE +spec: + clusterIP: None + ports: + - name: scrape + port: 9110 + protocol: TCP + selector: + app: cloudmoa-node-exporter + type: ClusterIP +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: cloudmoa-node-exporter + namespace: $CLOUDMOA_NAMESPACE +spec: + selector: + matchLabels: + app: cloudmoa-node-exporter + template: + metadata: + labels: + app: cloudmoa-node-exporter + name: cloudmoa-node-exporter + spec: + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - image: $DOCKER_REGISTRY_URL/prom/node-exporter + name: cloudmoa-node-exporter + ports: + - containerPort: 9110 + hostPort: 9110 + name: scrape + args: + - --path.procfs=/host/proc + - --path.sysfs=/host/sys + - --path.rootfs=/host/root + - --collector.filesystem.ignored-mount-points=^/(dev|proc|sys|run|var/lib/docker/.+|var/lib/kubelet/pods/.+)($|/) + - --collector.tcpstat + - --web.listen-address=:9110 + # --log.level=debug + resources: + limits: + cpu: 250m + memory: 180Mi + requests: + cpu: 102m + memory: 180Mi + volumeMounts: + - mountPath: /host/proc + name: proc + readOnly: false + - mountPath: /host/sys + name: sys + readOnly: false + - mountPath: /host/root + mountPropagation: HostToContainer + name: root + readOnly: true + hostNetwork: true + hostPID: true + securityContext: + runAsNonRoot: true + runAsUser: 65534 + volumes: + - hostPath: + path: /proc + name: proc + - hostPath: + path: /sys + name: sys + - hostPath: + path: / + name: root +'::text WHERE id = 4::bigint; diff --git a/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.3.2.psql b/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.3.2.psql new file mode 100644 index 0000000..e84e9be --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.3.2.psql @@ -0,0 +1,459 @@ + UPDATE public.agent_install_file_info SET yaml = '--- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: cloudmoa-cluster-role + rules: + - nonResourceURLs: + - "*" + verbs: + - get + - apiGroups: + - metrics.k8s.io + resources: + - pods + - nodes + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - nodes/stats + - endpoints + - namespaces + - events + verbs: + - get + - list + - watch + - apiGroups: + - apps + resources: + - daemonsets + - deployments + - deployments/scale + - replicasets + - replicasets/scale + - statefulsets + - statefulsets/scale + verbs: + - get + - list + - watch + - apiGroups: + - batch + resources: + - jobs + verbs: + - get + - list + - watch + - update + - apiGroups: + - batch + resources: + - cronjobs + verbs: + - get + - list + - update + - apiGroups: + - storage.j8s.io + resources: + - storageclasses + verbs: + - get + - list + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - apiGroups: + - extensions + resources: + - ingresses + verbs: + - get + - list + - apiGroups: + - policy + resources: + - podsecuritypolicies + verbs: + - use + resourceNames: + - imxc-ps + - apiGroups: + - certificates.k8s.io + resourceNames: + - kubernetes.io/kube-apiserver-client-kubelet + resources: + - signers + verbs: + - approve + - apiGroups: + - certificates.k8s.io + resourceNames: + - kubernetes.io/kubelet-serving + resources: + - signers + verbs: + - approve + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch + - proxy + - apiGroups: + - "" + resources: + - nodes/log + - nodes/metrics + - nodes/proxy + - nodes/spec + - nodes/stats + verbs: + - ''*'' + - apiGroups: + - ''*'' + resources: + - ''*'' + verbs: + - get + - list + - watch + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: cloudmoa-restricted-rb + namespace: $CLOUDMOA_NAMESPACE + subjects: + - kind: ServiceAccount + name: default + namespace: $CLOUDMOA_NAMESPACE + roleRef: + kind: ClusterRole + name: cloudmoa-cluster-role + apiGroup: rbac.authorization.k8s.io + --- + apiVersion: policy/v1beta1 + kind: PodSecurityPolicy + metadata: + name: cloudmoa-psp + namespace: $CLOUDMOA_NAMESPACE + spec: + privileged: true + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + runAsUser: + rule: RunAsAny + fsGroup: + rule: RunAsAny + hostPorts: + - max: 65535 + min: 0 + hostNetwork: true + hostPID: true + volumes: + - configMap + - secret + - emptyDir + - hostPath + - projected + - downwardAPI + - persistentVolumeClaim + --- + apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: cloudmoa-topology-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-topology-agent + spec: + selector: + matchLabels: + app: cloudmoa-topology-agent + template: + metadata: + labels: + app: cloudmoa-topology-agent + spec: + hostNetwork: true + hostPID: true + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - name: cloudmoa-topology-agent + image: $DOCKER_REGISTRY_URL/topology-agent:$IMAGE_TAG + imagePullPolicy: Always + resources: + requests: + cpu: 200m + memory: 512Mi + limits: + cpu: 500m + memory: 600Mi + securityContext: + privileged: true + volumeMounts: + - mountPath: /host/usr/bin + name: bin-volume + - mountPath: /var/run/docker.sock + name: docker-volume + - mountPath: /host/proc + name: proc-volume + - mountPath: /root + name: root-volume + - mountPath: /log + name: log-volume + env: + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: ROOT_DIRECTORY + value: /root + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: POD_ID + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LOG_LEVEL + value: "INFO" + volumes: + - name: bin-volume + hostPath: + path: /usr/bin + type: Directory + - name: docker-volume + hostPath: + path: /var/run/docker.sock + - name: proc-volume + hostPath: + path: /proc + - name: root-volume + hostPath: + path: / + - name: log-volume + hostPath: + path: /home' WHERE id = 2; + +UPDATE public.agent_install_file_info SET yaml = '--- + apiVersion: v1 + kind: ConfigMap + metadata: + name: cloudmoa-metric-agent-config + namespace: $CLOUDMOA_NAMESPACE + data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + metric-agent.yml: | + global: + scrape_interval: 15s + + scrape_configs: + - job_name: ''kubernetes-kubelet'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + - job_name: ''kubernetes-cadvisor'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod] + target_label: xm_pod_id + - source_labels: [container] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [container] + regex: (.+) + action: keep + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep + --- + apiVersion: apps/v1 + kind: Deployment + metadata: + name: cloudmoa-metric-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-metric-agent + spec: + selector: + matchLabels: + app: cloudmoa-metric-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-metric-agent + spec: + containers: + - name: cloudmoa-metric-agent + image: $DOCKER_REGISTRY_URL/metric-agent:$IMAGE_TAG + args: + - --config.file=/etc/metric-agent/metric-agent.yml + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: /etc/metric-agent/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: LOG_MAXAGE + value: "1" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: STORAGE_TYPE + value: datagate + restartPolicy: Always + volumes: + - name: config-volume + configMap: + name: cloudmoa-metric-agent-config + ' WHERE id = 6; \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.1.psql b/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.1.psql new file mode 100644 index 0000000..0d20f2c --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.1.psql @@ -0,0 +1,1379 @@ +CREATE TABLE public.cloud_user_setting ( + user_id character varying(255) NOT NULL, + lang character varying(20) DEFAULT 'en', + theme character varying(20) DEFAULT 'dark', + access_token integer DEFAULT 30, + refresh_token integer DEFAULT 10080, + error_msg boolean DEFAULT false, + alert_sound boolean DEFAULT false, + session_persistence boolean DEFAULT true, + gpu_acc_topology boolean DEFAULT true, + created_date timestamp without time zone, + modified_date timestamp without time zone +); + +ALTER TABLE public.cloud_user_setting OWNER TO admin; + +ALTER TABLE ONLY public.cloud_user_setting ADD CONSTRAINT cloud_user_setting_pkey PRIMARY KEY (user_id); + +INSERT INTO public.cloud_user_setting +(user_id, lang, theme, access_token, refresh_token, error_msg, alert_sound, session_persistence, gpu_acc_topology, created_date, modified_date) +VALUES('admin', null, null, null, null, false, false, true, true, now(), null); + +INSERT INTO public.cloud_user_setting +(user_id, lang, theme, access_token, refresh_token, error_msg, alert_sound, session_persistence, gpu_acc_topology, created_date, modified_date) +VALUES('owner', null, null, null, null, false, false, true, true, now(), null); + +-- 더존(3.3.2) 에서 누락되었던 항목 모두 추가 +INSERT INTO public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) VALUES ('normal_score', '20', null, null, 'anomaly', '2020-07-07 18:15:55.000000', '2020-07-07 18:15:53.000000'); +INSERT INTO public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) VALUES ('attention_score', '60', null, null, 'anomaly', '2020-07-07 09:18:04.968765', '2020-07-07 09:18:04.968765'); +INSERT INTO public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) VALUES ('warning_score', '90', null, null, 'anomaly', '2020-07-07 09:18:17.091678', '2020-07-07 09:18:17.091678'); +INSERT INTO public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) VALUES ('collection_weeks', '5', null, null, 'anomaly', '2020-07-13 03:52:44.445408', '2020-07-13 03:52:44.445408'); + +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('topology_storage_period', 7, 'retention period setting value for topology information', null, 'storage', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('trace_storage_period', 3, 'retention period setting value for trace data', null, 'storage', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('event_storage_period', 7, 'retention period setting value for event data', null, 'storage', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('metric_storage_period', 7, 'retention period setting value for metric data', null, 'storage', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('sparse_storage_period', 90, 'retention period setting value for sparse log', null, 'storage', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('anomaly_storage_period', 7, 'retention period setting value for anomaly score', null, 'storage', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('alert_storage_period', 7, 'retention period setting value for alert data', null, 'storage', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('audit_storage_period', 7, 'retention period setting value for audit data', null, 'storage', now(), null); + +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('topology_idx', 'kubernetes_cluster_info:kubernetes_cluster_history:kubernetes_cronjob_info:kubernetes_info:kubernetes_job_info:kubernetes_network_connectivity:kubernetes_pod_info:kubernetes_pod_history', 'elastic search topology type data index', null, 'storageidx', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('trace_idx', 'spaninfo:sta_httpapi:sta_httpsummary:sta_podinfo:sta_relation:sta_tracetrend:sta_externalrelation:sta_traceinfo:jspd_ilm', 'elastic search trace type data index', null, 'storageidx', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('event_idx', 'kubernetes_event_info', 'elastic search for event data index', null, 'storageidx', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('sparse_idx', 'sparse_model:sparse_log', 'elastic search sparse data index', null, 'storageidx', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('anomaly_idx', 'entity_score:metric_score:timeline_score', 'elastic search amomaly data index', null, 'storageidx', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('alert_idx', 'alert_event_history', 'elastic search alert data index', null, 'storageidx', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('audit_idx', 'kubernetes_audit_log', 'elastic search audit type data index', null, 'storageidx', now(), null); + +-- insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) values ('ratelimiting', 2.0, '{"type" : "int", "operator" : "range", "minVal" : "1", "maxVal" : "3000", "desc" : "The time-based sampling method allows input as an integer (e.g. 1 monitors only 1 trace per second)" }', null, 'tracesampling', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('probabilistic', 0.1, '{"type" : "float", "operator" : "range", "minVal" : "0", "maxVal" : "1.0", "desc" : "Probability-based sampling method allows input between 0 and 1 (e.g. 0.1 monitors only 10% of trace information)" }', null, 'tracesampling', '2020-07-30 13:54:52', null); + +INSERT INTO common_setting values('alert_expression','==,<=,<,>=,>', 'alert expression for user custom', null,'alert', now(), now()); + +INSERT INTO common_setting values('job_duration_range','86400', 'job duration range for average', null,'job', now(), now()); + +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Topology Agent', 'topology-agent', 'topology agent deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Metric Agent', 'metric-agent', 'metric agent deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Trace Agent', 'trace-agent', 'trace agent deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Datagate', 'datagate', 'datagate deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Jspd Collector', 'jspd-lite-collector', 'jspd collector deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Metric Collector', 'metric-collector', 'metric collector deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Cloudmoa Collector', 'imxc-collector', 'cloudmoa collector deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Authentication Server', 'auth-server', 'authentication server deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Notification Server', 'noti-server', 'notification server deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Eureka Server', 'eureka', 'eureka server deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Zuul Server', 'zuul-deployment', 'zuul server deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Api Server', 'imxc-api-demo', 'api server deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Ui Server', 'imxc-ui-demo', 'ui server deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Metric Analyzer Master', 'metric-analyzer-master', 'metric analyzer master deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Metric Analyzer Worker', 'metric-analyzer-worker', 'metric analyzer worker deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Kafka Stream Txntrend', 'kafka-stream-txntrend-deployment', 'kafka stream txntrend deployment name', null, 'modules', now(), null); + +INSERT INTO public.common_setting +(code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +VALUES('error_msg', 'false', 'Error Message default value', '', 'user_setting', now(), null); + +INSERT INTO public.common_setting +(code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +VALUES('alert_sound', 'false', 'Alert Sound default value', '', 'user_setting', now(), null); + +INSERT INTO public.common_setting +(code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +VALUES('session_persistence', 'true', 'Session Persistence default value', '', 'user_setting', now(), null); + +INSERT INTO public.common_setting +(code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +VALUES('gpu_acc_topology', 'true', 'GPU Accelerated Topology default value', '', 'user_setting', now(), null); + +UPDATE public.agent_install_file_info +SET yaml = '--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cloudmoa-cluster-role +rules: + - nonResourceURLs: + - "*" + verbs: + - get + - apiGroups: + - metrics.k8s.io + resources: + - pods + - nodes + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - nodes/stats + - endpoints + - namespaces + - events + verbs: + - get + - list + - watch + - apiGroups: + - apps + resources: + - daemonsets + - deployments + - deployments/scale + - replicasets + - replicasets/scale + - statefulsets + - statefulsets/scale + verbs: + - get + - list + - watch + - update + - apiGroups: + - batch + resources: + - jobs + verbs: + - get + - list + - watch + - update + - apiGroups: + - batch + resources: + - cronjobs + verbs: + - get + - list + - update + - apiGroups: + - storage.j8s.io + resources: + - storageclasses + verbs: + - get + - list + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - apiGroups: + - extensions + resources: + - ingresses + verbs: + - get + - list + - apiGroups: + - policy + resources: + - podsecuritypolicies + verbs: + - use + resourceNames: + - imxc-ps + - apiGroups: + - certificates.k8s.io + resourceNames: + - kubernetes.io/kube-apiserver-client-kubelet + resources: + - signers + verbs: + - approve + - apiGroups: + - certificates.k8s.io + resourceNames: + - kubernetes.io/kubelet-serving + resources: + - signers + verbs: + - approve + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch + - proxy + - apiGroups: + - "" + resources: + - nodes/log + - nodes/metrics + - nodes/proxy + - nodes/spec + - nodes/stats + verbs: + - ''*'' + - apiGroups: + - ''*'' + resources: + - ''*'' + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cloudmoa-restricted-rb + namespace: $CLOUDMOA_NAMESPACE +subjects: + - kind: ServiceAccount + name: default + namespace: $CLOUDMOA_NAMESPACE +roleRef: + kind: ClusterRole + name: cloudmoa-cluster-role + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: cloudmoa-psp + namespace: $CLOUDMOA_NAMESPACE +spec: + privileged: true + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + runAsUser: + rule: RunAsAny + fsGroup: + rule: RunAsAny + hostPorts: + - max: 65535 + min: 0 + hostNetwork: true + hostPID: true + volumes: + - configMap + - secret + - emptyDir + - hostPath + - projected + - downwardAPI + - persistentVolumeClaim +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: cloudmoa-topology-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-topology-agent +spec: + selector: + matchLabels: + app: cloudmoa-topology-agent + template: + metadata: + labels: + app: cloudmoa-topology-agent + spec: + hostNetwork: true + hostPID: true + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - name: cloudmoa-topology-agent + image: $DOCKER_REGISTRY_URL/topology-agent:$IMAGE_TAG + resources: + requests: + cpu: 200m + memory: 512Mi + limits: + cpu: 500m + memory: 600Mi + securityContext: + privileged: true + volumeMounts: + - mountPath: /host/usr/bin + name: bin-volume + - mountPath: /var/run/docker.sock + name: docker-volume + - mountPath: /host/proc + name: proc-volume + - mountPath: /root + name: root-volume + - mountPath: /log + name: log-volume + env: + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: ROOT_DIRECTORY + value: /root + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: POD_ID + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LOG_LEVEL + value: "INFO" + volumes: + - name: bin-volume + hostPath:88888889 + path: /usr/bin + type: Directory + - name: docker-volume + hostPath: + path: /var/run/docker.sock + - name: proc-volume + hostPath: + path: /proc + - name: root-volume + hostPath: + path: / + - name: log-volume + hostPath: + path: /home'::text +WHERE id = 2::bigint; + +UPDATE public.common_setting +SET code_group='storageidx' +WHERE code_id='topology_idx'; + +UPDATE public.common_setting +SET code_value='spaninfo:sta_httpapi:sta_httpsummary:sta_podinfo:sta_relation:sta_tracetrend:sta_externalrelation:sta_traceinfo:jspd_ilm', + code_group='storageidx' +WHERE code_id='trace_idx'; + +UPDATE public.common_setting +SET code_group='storageidx' +WHERE code_id='event_idx'; + +UPDATE public.common_setting +SET code_group='storageidx' +WHERE code_id='sparse_idx'; + +UPDATE public.common_setting +SET code_group='storageidx' +WHERE code_id='anomaly_idx'; + +UPDATE public.common_setting +SET code_value='alert_event_history', + code_group='storageidx' +WHERE code_id='alert_idx'; + +UPDATE public.common_setting +SET code_group='storageidx' +WHERE code_id='audit_idx'; + +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: v1 +kind: Service +metadata: + annotations: + prometheus.io/scrape: ''true'' + labels: + app: cloudmoa-node-exporter + name: cloudmoa-node-exporter + name: cloudmoa-node-exporter + namespace: $CLOUDMOA_NAMESPACE +spec: + clusterIP: None + ports: + - name: scrape + port: 9110 + protocol: TCP + selector: + app: cloudmoa-node-exporter + type: ClusterIP +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: cloudmoa-node-exporter + namespace: $CLOUDMOA_NAMESPACE +spec: + selector: + matchLabels: + app: cloudmoa-node-exporter + template: + metadata: + labels: + app: cloudmoa-node-exporter + name: cloudmoa-node-exporter + spec: + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - image: $DOCKER_REGISTRY_URL/node-exporter + name: cloudmoa-node-exporter + ports: + - containerPort: 9110 + hostPort: 9110 + name: scrape + args: + - --path.procfs=/host/proc + - --path.sysfs=/host/sys + - --path.rootfs=/host/root + - --collector.filesystem.ignored-mount-points=^/(dev|proc|sys|run|var/lib/docker/.+|var/lib/kubelet/pods/.+)($|/) + - --collector.tcpstat + - --web.listen-address=:9110 + # --log.level=debug + resources: + limits: + cpu: 250m + memory: 180Mi + requests: + cpu: 102m + memory: 180Mi + volumeMounts: + - mountPath: /host/proc + name: proc + readOnly: false + - mountPath: /host/sys + name: sys + readOnly: false + - mountPath: /host/root + mountPropagation: HostToContainer + name: root + readOnly: true + hostNetwork: true + hostPID: true + securityContext: + runAsNonRoot: true + runAsUser: 65534 + volumes: + - hostPath: + path: /proc + name: proc + - hostPath: + path: /sys + name: sys + - hostPath: + path: / + name: root +'::text WHERE id = 4::bigint; + +UPDATE public.agent_install_file_info SET yaml = '--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: system:cloudmoa-aggregated-metrics-reader + labels: + rbac.authorization.k8s.io/aggregate-to-view: "true" + rbac.authorization.k8s.io/aggregate-to-edit: "true" + rbac.authorization.k8s.io/aggregate-to-admin: "true" +rules: + - apiGroups: ["metrics.k8s.io"] + resources: ["pods"] + verbs: ["get", "list", "watch"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cloudmoa-metrics-server:system:auth-delegator +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:auth-delegator +subjects: + - kind: ServiceAccount + name: cloudmoa-metrics-server + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: cloudmoa-metrics-server-auth-reader + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: extension-apiserver-authentication-reader +subjects: + - kind: ServiceAccount + name: cloudmoa-metrics-server + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: system:cloudmoa-metrics-server +rules: + - apiGroups: + - "" + resources: + - pods + - nodes + - nodes/stats + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: system:cloudmoa-metrics-server +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:cloudmoa-metrics-server +subjects: + - kind: ServiceAccount + name: cloudmoa-metrics-server + namespace: kube-system +--- +apiVersion: v1 +kind: Service +metadata: + name: cloudmoa-metrics-server + namespace: kube-system + labels: + kubernetes.io/name: "Metrics-server" +spec: + selector: + k8s-app: cloudmoa-metrics-server + ports: + - port: 443 + protocol: TCP + targetPort: 443 +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cloudmoa-metrics-server + namespace: kube-system +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cloudmoa-metrics-server + namespace: kube-system + labels: + k8s-app: cloudmoa-metrics-server +spec: + selector: + matchLabels: + k8s-app: cloudmoa-metrics-server + template: + metadata: + name: cloudmoa-metrics-server + labels: + k8s-app: cloudmoa-metrics-server + spec: + serviceAccountName: cloudmoa-metrics-server + volumes: + # mount in tmp so we can safely use from-scratch images and/or read-only containers + - name: tmp-dir + emptyDir: {} + containers: + - name: cloudmoa-metrics-server + image: $DOCKER_REGISTRY_URL/metrics-server-amd64 + command: + - /metrics-server + - --logtostderr + - --v=4 + - --kubelet-insecure-tls=true + - --kubelet-preferred-address-types=InternalIP,Hostname,InternalDNS,ExternalDNS,ExternalIP + volumeMounts: + - name: tmp-dir + mountPath: /tmp1'::text WHERE id = 5::bigint; + +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cloudmoa-metric-agent-config + namespace: $CLOUDMOA_NAMESPACE +data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + metric-agent.yml: | + global: + scrape_interval: 15s + + scrape_configs: + - job_name: ''kubernetes-kubelet'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + + - job_name: ''kubernetes-cadvisor'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod_name] + target_label: xm_pod_id + - source_labels: [container_name] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [container_name] + regex: (.+) + action: keep + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cloudmoa-metric-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-metric-agent +spec: + selector: + matchLabels: + app: cloudmoa-metric-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-metric-agent + spec: + containers: + - name: cloudmoa-metric-agent + image: $DOCKER_REGISTRY_URL/metric-agent:$IMAGE_TAG + args: + - --config.file=/etc/metric-agent/metric-agent.yml + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: /etc/metric-agent/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: STORAGE_TYPE + value: datagate + restartPolicy: Always + volumes: + - name: config-volume + configMap: + name: cloudmoa-metric-agent-config +'::text WHERE id = 3::bigint; + +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: v1 +kind: List +items: +- apiVersion: apps/v1 + kind: Deployment + metadata: + name: cloudmoa-trace-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-trace-agent + spec: + selector: + matchLabels: + app: cloudmoa-trace-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-trace-agent + spec: + securityContext: + runAsNonRoot: true + runAsUser: 65534 + containers: + - image: $DOCKER_REGISTRY_URL/trace-agent:$IMAGE_TAG + name: cloudmoa-trace-agent + resources: + requests: + cpu: 100m + memory: 50Mi + limits: + cpu: 200m + memory: 100Mi + ports: + - containerPort: 5775 + protocol: UDP + - containerPort: 6831 + protocol: UDP + - containerPort: 6832 + protocol: UDP + - containerPort: 5778 + protocol: TCP + env: + - name: LOG_LEVEL + value: "INFO" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT +- apiVersion: v1 + kind: Service + metadata: + name: cloudmoa-trace-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-trace-agent + spec: + ports: + - name: agent-zipkin-thrift + port: 5775 + protocol: UDP + targetPort: 5775 + - name: agent-compact + port: 6831 + protocol: UDP + targetPort: 6831 + - name: agent-binary + port: 6832 + protocol: UDP + targetPort: 6832 + - name: agent-configs + port: 5778 + protocol: TCP + targetPort: 5778 + selector: + app: cloudmoa-trace-agent + type: ClusterIP'::text WHERE id = 7::bigint; + +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cloudmoa-cluster-role +rules: + - nonResourceURLs: + - "*" + verbs: + - get + - apiGroups: + - metrics.k8s.io + resources: + - pods + - nodes + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - nodes/stats + - endpoints + - namespaces + - events + verbs: + - get + - list + - watch + - apiGroups: + - apps + resources: + - daemonsets + - deployments + - deployments/scale + - replicasets + - replicasets/scale + - statefulsets + - statefulsets/scale + verbs: + - get + - list + - watch + - update + - apiGroups: + - batch + resources: + - jobs + verbs: + - get + - list + - watch + - update + - apiGroups: + - batch + resources: + - cronjobs + verbs: + - get + - list + - update + - apiGroups: + - storage.j8s.io + resources: + - storageclasses + verbs: + - get + - list + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - apiGroups: + - extensions + resources: + - ingresses + verbs: + - get + - list + - apiGroups: + - policy + resources: + - podsecuritypolicies + verbs: + - use + resourceNames: + - imxc-ps + - apiGroups: + - certificates.k8s.io + resourceNames: + - kubernetes.io/kube-apiserver-client-kubelet + resources: + - signers + verbs: + - approve + - apiGroups: + - certificates.k8s.io + resourceNames: + - kubernetes.io/kubelet-serving + resources: + - signers + verbs: + - approve + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch + - proxy + - apiGroups: + - "" + resources: + - nodes/log + - nodes/metrics + - nodes/proxy + - nodes/spec + - nodes/stats + verbs: + - ''*'' + - apiGroups: + - ''*'' + resources: + - ''*'' + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cloudmoa-restricted-rb + namespace: $CLOUDMOA_NAMESPACE +subjects: + - kind: ServiceAccount + name: default + namespace: $CLOUDMOA_NAMESPACE +roleRef: + kind: ClusterRole + name: cloudmoa-cluster-role + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: cloudmoa-psp + namespace: $CLOUDMOA_NAMESPACE +spec: + privileged: true + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + runAsUser: + rule: RunAsAny + fsGroup: + rule: RunAsAny + hostPorts: + - max: 65535 + min: 0 + hostNetwork: true + hostPID: true + volumes: + - configMap + - secret + - emptyDir + - hostPath + - projected + - downwardAPI + - persistentVolumeClaim +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: cloudmoa-topology-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-topology-agent +spec: + selector: + matchLabels: + app: cloudmoa-topology-agent + template: + metadata: + labels: + app: cloudmoa-topology-agent + spec: + hostNetwork: true + hostPID: true + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - name: cloudmoa-topology-agent + image: $DOCKER_REGISTRY_URL/topology-agent:$IMAGE_TAG + resources: + requests: + cpu: 200m + memory: 512Mi + limits: + cpu: 500m + memory: 600Mi + securityContext: + privileged: true + volumeMounts: + - mountPath: /host/usr/bin + name: bin-volume + - mountPath: /var/run/docker.sock + name: docker-volume + - mountPath: /host/proc + name: proc-volume + - mountPath: /root + name: root-volume + - mountPath: /log + name: log-volume + env: + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: ROOT_DIRECTORY + value: /root + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: POD_ID + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LOG_LEVEL + value: "INFO" + volumes: + - name: bin-volume + hostPath: + path: /usr/bin + type: Directory + - name: docker-volume + hostPath: + path: /var/run/docker.sock + - name: proc-volume + hostPath: + path: /proc + - name: root-volume + hostPath: + path: / + - name: log-volume + hostPath: + path: /home'::text WHERE id = 2::bigint; + +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cloudmoa-metric-agent-config + namespace: $CLOUDMOA_NAMESPACE +data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + metric-agent.yml: | + global: + scrape_interval: 15s + + scrape_configs: + - job_name: ''kubernetes-kubelet'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + - job_name: ''kubernetes-cadvisor'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod] + target_label: xm_pod_id + - source_labels: [container] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [container] + regex: (.+) + action: keep + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cloudmoa-metric-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-metric-agent +spec: + selector: + matchLabels: + app: cloudmoa-metric-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-metric-agent + spec: + containers: + - name: cloudmoa-metric-agent + image: $DOCKER_REGISTRY_URL/metric-agent:$IMAGE_TAG + args: + - --config.file=/etc/metric-agent/metric-agent.yml + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: /etc/metric-agent/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: LOG_MAXAGE + value: "1" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: STORAGE_TYPE + value: datagate + restartPolicy: Always + volumes: + - name: config-volume + configMap: + name: cloudmoa-metric-agent-config +'::text WHERE id = 6::bigint; + +ALTER TABLE public.alert_rule_config_info ALTER COLUMN config_data TYPE text; + +update alert_rule_config_info +set config_data = '\n - alert: "${ALERT}"\n expr: "${EXPR}"\n labels:\n level: "${LEVEL}"\n for: "${FOR}"\n annotations:\n xm_service_name: "{{ $labels.xm_service_name }}"\n level: "${LEVEL}"\n meta_id: "${META_ID}"\n xm_node_id: "{{ $labels.xm_node_id }}"\n threshold: ${THRESHOLD}\n xm_container_id: "{{ $labels.xm_cont_name }}"\n message: "${MESSAGE}"\n rule_id: ${RULE_ID}\n xm_pod_id: "{{ $labels.xm_pod_id }}"\n xm_clst_id: "{{ $labels.xm_clst_id }}"\n xm_namespace: "{{ $labels.xm_namespace }}"\n value: "{{ $value }}"\n xm_entity_type: "{{ $labels.xm_entity_type }}"\n alert_entity_type: "${ALERT_ENTITY_TYPE}"' +where config_id = 'rules'; + +ALTER TABLE public.alert_config_info ALTER COLUMN config_data TYPE text, ALTER COLUMN config_default TYPE text; + +insert into public.alert_config_info (config_id, created_date, modified_date, config_data, config_default, in_use) values ('routes', now(), null, '\n - receiver: ''${ROUTES_RECEIVER}''\n group_by: [${ROUTES_GROUP_BY}]\n group_wait: ${ROUTES_GROUP_WAIT}\n group_interval: ${ROUTES_GROUP_INTERVAL}\n repeat_interval: ${ROUTES_REPEAT_INTERVAL}\n match_re:\n level: ${LEVEL}\n continue: ${CONTINUE}', '\n - receiver: ''cdms''\n group_by: [xm_clst_id, level]\n group_wait: 5s\n group_interval: 5s\n repeat_interval: 1m\n match_re:\n level: Critical\n continue: true', true); \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.2.psql b/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.2.psql new file mode 100644 index 0000000..5c5d3c9 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.2.psql @@ -0,0 +1,8 @@ +-- admin의 owner 속성 추가 +UPDATE cloud_user SET is_tenant_owner = true WHERE user_id = 'admin'; + +-- owner에 대한 종속성을 admin으로 이관기능(필요하면 사용) +UPDATE auth_resource3 SET name = replace(name, 'owner', 'admin') WHERE name like '%|owner|%'; + +-- CLOUD-2305 node_memory_used metric_meta node_memory_SReclaimable_bytes 제거 패치문 반영 +UPDATE metric_meta2 SET expr = '((node_memory_MemTotal_bytes{xm_entity_type="Node", {filter}} - (node_memory_MemFree_bytes{xm_entity_type="Node", {filter}} + node_memory_Cached_bytes{xm_entity_type="Node", {filter}} + node_memory_Buffers_bytes{xm_entity_type="Node", {filter}})) >= 0 or node_memory_MemTotal_bytes{xm_entity_type="Node", {filter}} - node_memory_MemFree_bytes{xm_entity_type="Node", {filter}}) / 1024 / 1024 / 1024' WHERE id = 'node_memory_used'; diff --git a/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.3.psql b/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.3.psql new file mode 100644 index 0000000..02f01db --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.3.psql @@ -0,0 +1,361 @@ +-- agent_install_file_info +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cloudmoa-metric-agent-config + namespace: $CLOUDMOA_NAMESPACE +data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + metric-agent.yml: | + global: + scrape_interval: 15s + + scrape_configs: + - job_name: ''kubernetes-kubelet'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_memory_SReclaimable_bytes|node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + + - job_name: ''kubernetes-cadvisor'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod_name] + target_label: xm_pod_id + - source_labels: [container_name] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [container_name] + regex: (.+) + action: keep + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cloudmoa-metric-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-metric-agent +spec: + selector: + matchLabels: + app: cloudmoa-metric-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-metric-agent + spec: + containers: + - name: cloudmoa-metric-agent + image: $DOCKER_REGISTRY_URL/metric-agent:$IMAGE_TAG + args: + - --config.file=/etc/metric-agent/metric-agent.yml + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: /etc/metric-agent/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: STORAGE_TYPE + value: datagate + restartPolicy: Always + volumes: + - name: config-volume + configMap: + name: cloudmoa-metric-agent-config +'::text WHERE id = 3::bigint; + +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cloudmoa-metric-agent-config + namespace: $CLOUDMOA_NAMESPACE +data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + metric-agent.yml: | + global: + scrape_interval: 15s + + scrape_configs: + - job_name: ''kubernetes-kubelet'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_memory_SReclaimable_bytes|node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + - job_name: ''kubernetes-cadvisor'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod] + target_label: xm_pod_id + - source_labels: [container] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [container] + regex: (.+) + action: keep + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cloudmoa-metric-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-metric-agent +spec: + selector: + matchLabels: + app: cloudmoa-metric-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-metric-agent + spec: + containers: + - name: cloudmoa-metric-agent + image: $DOCKER_REGISTRY_URL/metric-agent:$IMAGE_TAG + args: + - --config.file=/etc/metric-agent/metric-agent.yml + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: /etc/metric-agent/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: LOG_MAXAGE + value: "1" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: STORAGE_TYPE + value: datagate + restartPolicy: Always + volumes: + - name: config-volume + configMap: + name: cloudmoa-metric-agent-config +'::text WHERE id = 6::bigint; + +-- CLOUD-2798 pod_phase_count_by_cluster metric_meta 수정 +UPDATE metric_meta2 SET expr = 'count by(xm_clst_id, pod_state) (sum by (xm_clst_id, xm_pod_id, pod_state)(rate(imxc_kubernetes_container_resource_limit_cpu{{filter}}[1m])))' WHERE id = 'pod_phase_count_by_cluster'; + +-- node_memory_usage 수정 +update metric_meta2 set expr = 'sum by (xm_node_id)((node_memory_MemTotal_bytes{xm_entity_type="Node"}- (node_memory_MemFree_bytes{xm_entity_type="Node"} + node_memory_Cached_bytes{xm_entity_type="Node"} + node_memory_Buffers_bytes{xm_entity_type="Node"})) >= 0 or node_memory_MemTotal_bytes{xm_entity_type="Node"}- node_memory_MemFree_bytes{xm_entity_type="Node"}) / (sum by (xm_node_id) (imxc_kubernetes_node_resource_capacity_memory{{filter}})) * 100' where id = 'node_memory_usage'; \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.6.psql b/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.6.psql new file mode 100644 index 0000000..7c582c5 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.6.psql @@ -0,0 +1,360 @@ +-- CLOUD-3473 Memory capacity 조회 쿼리 수정 +update metric_meta2 set description = 'imxc_kubernetes_node_resource_capacity_memory', +expr = 'sum by (xm_clst_id) (imxc_kubernetes_node_resource_capacity_memory{{filter}})' where id = 'cluster_memory_capacity'; + +-- module명 metricdata owner_name 와 일치하도록 변경 +update common_setting set code_value ='cmoa-collector' where code_id = 'Cloudmoa Collector'; +update common_setting set code_value ='imxc-api' where code_id = 'Api Server'; +update common_setting set code_value ='imxc-ui' where code_id = 'Ui Server'; +update common_setting set code_value ='cloudmoa-trace-agent' where code_id = 'Trace Agent'; + +-- CLOUD-4795 Contaeird 환경 Container Network 수집 불가 건 확인 +-- 22.10.08 현대카드 대응 건으로 release 3.4.6에 반영 +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cloudmoa-metric-agent-config + namespace: $CLOUDMOA_NAMESPACE +data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + metric-agent.yml: | + global: + scrape_interval: 15s + + scrape_configs: + - job_name: ''kubernetes-kubelet'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_memory_SReclaimable_bytes|node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + - job_name: ''kubernetes-cadvisor'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod] + target_label: xm_pod_id + - source_labels: [container] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cloudmoa-metric-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-metric-agent +spec: + selector: + matchLabels: + app: cloudmoa-metric-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-metric-agent + spec: + containers: + - name: cloudmoa-metric-agent + image: $DOCKER_REGISTRY_URL/metric-agent:$IMAGE_TAG + args: + - --config.file=/etc/metric-agent/metric-agent.yml + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: /etc/metric-agent/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: LOG_MAXAGE + value: "1" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: STORAGE_TYPE + value: datagate + restartPolicy: Always + volumes: + - name: config-volume + configMap: + name: cloudmoa-metric-agent-config +'::text WHERE id = 6::bigint; + +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cloudmoa-metric-agent-config + namespace: $CLOUDMOA_NAMESPACE +data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + metric-agent.yml: | + global: + scrape_interval: 15s + + scrape_configs: + - job_name: ''kubernetes-kubelet'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_memory_SReclaimable_bytes|node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + + - job_name: ''kubernetes-cadvisor'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod_name] + target_label: xm_pod_id + - source_labels: [container_name] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cloudmoa-metric-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-metric-agent +spec: + selector: + matchLabels: + app: cloudmoa-metric-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-metric-agent + spec: + containers: + - name: cloudmoa-metric-agent + image: $DOCKER_REGISTRY_URL/metric-agent:$IMAGE_TAG + args: + - --config.file=/etc/metric-agent/metric-agent.yml + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: /etc/metric-agent/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: STORAGE_TYPE + value: datagate + restartPolicy: Always + volumes: + - name: config-volume + configMap: + name: cloudmoa-metric-agent-config'::text WHERE id = 3::bigint; + diff --git a/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.7.psql b/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.7.psql new file mode 100644 index 0000000..92344db --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.7.psql @@ -0,0 +1,102 @@ +-- CLOUD-4752 node_memory_usage alert 관련 쿼리 수정 +update metric_meta2 set +expr = 'sum by (xm_clst_id, xm_node_id)((node_memory_MemTotal_bytes{xm_entity_type="Node"}- (node_memory_MemFree_bytes{xm_entity_type="Node"} + node_memory_Cached_bytes{xm_entity_type="Node"} + node_memory_Buffers_bytes{xm_entity_type="Node"})) >= 0 or node_memory_MemTotal_bytes{xm_entity_type="Node"}- node_memory_MemFree_bytes{xm_entity_type="Node"}) / (sum by (xm_clst_id, xm_node_id) (imxc_kubernetes_node_resource_capacity_memory{{filter}})) * 100' +where id = 'node_memory_usage'; + +-- CLOUD-6474 node-exporter | GPMAXPROCS 세팅 +-- Auto-generated SQL script #202211241543 +UPDATE public.agent_install_file_info + SET yaml='--- +apiVersion: v1 +kind: Service +metadata: + annotations: + prometheus.io/scrape: ''true'' + labels: + app: cloudmoa-node-exporter + name: cloudmoa-node-exporter + name: cloudmoa-node-exporter + namespace: $CLOUDMOA_NAMESPACE +spec: + clusterIP: None + ports: + - name: scrape + port: 9110 + protocol: TCP + selector: + app: cloudmoa-node-exporter + type: ClusterIP +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: cloudmoa-node-exporter + namespace: $CLOUDMOA_NAMESPACE +spec: + selector: + matchLabels: + app: cloudmoa-node-exporter + template: + metadata: + labels: + app: cloudmoa-node-exporter + name: cloudmoa-node-exporter + spec: + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - image: $DOCKER_REGISTRY_URL/node-exporter + name: cloudmoa-node-exporter + ports: + - containerPort: 9110 + hostPort: 9110 + name: scrape + args: + - --path.procfs=/host/proc + - --path.sysfs=/host/sys + - --path.rootfs=/host/root + - --collector.filesystem.ignored-mount-points=^/(dev|proc|sys|run|var/lib/docker/.+|var/lib/kubelet/pods/.+)($|/) + - --collector.tcpstat + - --web.listen-address=:9110 + # --log.level=debug + env: + - name: GOMAXPROCS + value: "1" + resources: + limits: + cpu: 250m + memory: 180Mi + requests: + cpu: 102m + memory: 180Mi + volumeMounts: + - mountPath: /host/proc + name: proc + readOnly: false + - mountPath: /host/sys + name: sys + readOnly: false + - mountPath: /host/root + mountPropagation: HostToContainer + name: root + readOnly: true + hostNetwork: true + hostPID: true + securityContext: + runAsNonRoot: true + runAsUser: 65534 + volumes: + - hostPath: + path: /proc + name: proc + - hostPath: + path: /sys + name: sys + - hostPath: + path: / + name: root +' + WHERE id=4; \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.8.psql b/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.8.psql new file mode 100644 index 0000000..ea66c68 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.8.psql @@ -0,0 +1,387 @@ +-- CLOUD-6526 host 관련 쿼리 수정 +-- 수집된 메트릭 시간차로 인해 데이터 표출이 안되는걸 방지하기 위해 rate 5m 추가 +UPDATE metric_meta2 SET expr = 'sum by (data_type, instance) ( +label_replace(rate(node_network_receive_bytes_total{{filter}}[1m]) or rate(node_network_receive_bytes_total{{filter}}[5m]), "data_type", "Receive", "", "") or +label_replace(rate(node_network_transmit_bytes_total{{filter}}[1m]) or rate(node_network_transmit_bytes_total{{filter}}[5m]), "data_type", "Transmit", "", "") )' +WHERE id='host_network_io_byte'; + +UPDATE public.metric_meta2 SET expr = 'sum by (data_type, instance) ( +label_replace(rate(node_disk_read_bytes_total{{filter}}[1m]) or rate(node_disk_read_bytes_total{{filter}}[5m]), "data_type", "Read", "", "") or +label_replace(rate(node_disk_written_bytes_total{{filter}}[1m]) or rate(node_disk_written_bytes_total{{filter}}[5m]), "data_type", "Write", "", "") )' +WHERE id = 'host_disk_read_write_byte'; + +UPDATE public.metric_meta2 SET expr = 'sum by (instance) ( +(rate(node_disk_reads_completed_total{{filter}}[1m]) + rate(node_disk_writes_completed_total{{filter}}[1m])) or +(rate(node_disk_reads_completed_total{{filter}}[5m]) + rate(node_disk_writes_completed_total{{filter}}[5m])))' +WHERE id = 'host_disk_iops'; + +-- CLOUD-8671 Metric-Agent | 데이터 필터링 설정 추가 +-- Workload > Pod 화면 등에 Docker 런타임 환경의 자원 사용량이 2배 가량으로 보이던 문제 픽스 +UPDATE public.agent_install_file_info + SET yaml='--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cloudmoa-metric-agent-config + namespace: $CLOUDMOA_NAMESPACE +data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + metric-agent.yml: | + global: + scrape_interval: 15s + + scrape_configs: + - job_name: ''kubernetes-kubelet'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_memory_SReclaimable_bytes|node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + + - job_name: ''kubernetes-cadvisor'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod_name] + target_label: xm_pod_id + - source_labels: [container_name] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep + - source_labels: [ __name__, image ] + separator: "@" + regex: "container_cpu.*@" + action: drop + - source_labels: [ __name__, name ] + separator: "@" + regex: "container_memory.*@" + action: drop +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cloudmoa-metric-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-metric-agent +spec: + selector: + matchLabels: + app: cloudmoa-metric-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-metric-agent + spec: + containers: + - name: cloudmoa-metric-agent + image: $DOCKER_REGISTRY_URL/metric-agent:$IMAGE_TAG + args: + - --config.file=/etc/metric-agent/metric-agent.yml + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: /etc/metric-agent/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: STORAGE_TYPE + value: datagate + restartPolicy: Always + volumes: + - name: config-volume + configMap: + name: cloudmoa-metric-agent-config +' + WHERE id=3; + +UPDATE public.agent_install_file_info + SET yaml='--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cloudmoa-metric-agent-config + namespace: $CLOUDMOA_NAMESPACE +data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + metric-agent.yml: | + global: + scrape_interval: 15s + + scrape_configs: + - job_name: ''kubernetes-kubelet'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_memory_SReclaimable_bytes|node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + - job_name: ''kubernetes-cadvisor'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod] + target_label: xm_pod_id + - source_labels: [container] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep + - source_labels: [ __name__, image ] + separator: "@" + regex: "container_cpu.*@" + action: drop + - source_labels: [ __name__, name ] + separator: "@" + regex: "container_memory.*@" + action: drop +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cloudmoa-metric-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-metric-agent +spec: + selector: + matchLabels: + app: cloudmoa-metric-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-metric-agent + spec: + containers: + - name: cloudmoa-metric-agent + image: $DOCKER_REGISTRY_URL/metric-agent:$IMAGE_TAG + args: + - --config.file=/etc/metric-agent/metric-agent.yml + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: /etc/metric-agent/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: LOG_MAXAGE + value: "1" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: STORAGE_TYPE + value: datagate + restartPolicy: Always + volumes: + - name: config-volume + configMap: + name: cloudmoa-metric-agent-config +' + WHERE id=6; diff --git a/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/patch/postgres_patch_R30020210503.psql b/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/patch/postgres_patch_R30020210503.psql new file mode 100644 index 0000000..99d1dbe --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/patch/postgres_patch_R30020210503.psql @@ -0,0 +1,2844 @@ +ALTER TABLE alert_rule ADD COLUMN IF NOT EXISTS warning_sign character VARYING(255); +ALTER TABLE alert_rule ADD COLUMN IF NOT EXISTS critical_sign character VARYING(255); + +CREATE TABLE IF NOT EXISTS public.license_policy ( + policy_id character varying(255) NOT NULL, + policy_desc character varying(255), + term_year integer NOT NULL, + term_month integer NOT NULL, + term_day integer NOT NULL, + license_type character varying(255) NOT NULL, + allowable_range character varying(255) NOT NULL, + storage_capacity character varying(255) NOT NULL, + cluster_count character varying(255) NOT NULL, + node_count character varying(255) NOT NULL, + pod_count character varying(255) NOT NULL, + service_count character varying(255) NOT NULL, + core_count character varying(255) NOT NULL, + host_ids character varying(255) NOT NULL, + user_division character varying(255) NOT NULL, + created_date timestamp without time zone, + modified_date timestamp without time zone +) + +ALTER TABLE ONLY public.license_policy + ADD CONSTRAINT license_policy_pkey PRIMARY KEY (policy_id); + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('node_contextswitch_and_filedescriptor','Node contextswitch and filedescriptor','Node contextswitch and filedescriptor','sum by(xm_clst_id, xm_node_id, data_type) ( + label_replace(node_filefd_allocated {{filter}}, "data_type", "file descriptor" , "", "") or + label_replace(rate(node_context_switches_total {{filter}}[1m]), "data_type", "context switches", "" , ""))','File','Node',NULL,false,false,'Node contextswitch and filedescriptor','2020-05-28 12:38:21.587','2020-05-28 12:38:21.587') + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('node_contextswitch_and_filedescriptor','Node contextswitch and filedescriptor','Node contextswitch and filedescriptor','sum by(xm_clst_id, xm_node_id, data_type) ( + label_replace(node_filefd_allocated {{filter}}, "data_type", "file descriptor" , "", "") or + label_replace(rate(node_context_switches_total {{filter}}[1m]), "data_type", "context switches", "" , ""))','File','Node',NULL,false,false,'Node contextswitch and filedescriptor','2020-05-28 12:38:21.587','2020-05-28 12:38:21.587') + WHERE public.metric_meta2.id = 'node_contextswitch_and_filedescriptor'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_cpu_user_by_workload', 'Container CPU User By workload (%)', 'Container CPU Usage(User)', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_cpu_user_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) * 100', 'CPU', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU User (%):{{humanize $value}}%|{threshold}%.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_cpu_user_by_workload', 'Container CPU User By workload (%)', 'Container CPU Usage(User)', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_cpu_user_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) * 100', 'CPU', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU User (%):{{humanize $value}}%|{threshold}%.', now(), now()) + WHERE public.metric_meta2.id = 'container_cpu_user_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_cpu_system_core_by_workload', 'Container CPU System By workload (Core)', 'Container CPU(Core)', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_cpu_system_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0)', 'CPU', 'Workload', NULL, TRUE, FALSE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU System (Core) (System):{{humanize $value}}%|{threshold}%.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_cpu_system_core_by_workload', 'Container CPU System By workload (Core)', 'Container CPU(Core)', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_cpu_system_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0)', 'CPU', 'Workload', NULL, TRUE, FALSE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU System (Core) (System):{{humanize $value}}%|{threshold}%.', now(), now()) + WHERE public.metric_meta2.id = 'container_cpu_system_core_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_cpu_usage_core_by_workload', 'Container CPU Usage By workload (Core)', 'Container CPU Usage (Core)', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_cpu_usage_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0)', 'CPU', 'Workload', NULL, TRUE, FALSE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU Usage (Core):{{humanize $value}}|{threshold}.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_cpu_usage_core_by_workload', 'Container CPU Usage By workload (Core)', 'Container CPU Usage (Core)', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_cpu_usage_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0)', 'CPU', 'Workload', NULL, TRUE, FALSE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU Usage (Core):{{humanize $value}}|{threshold}.', now(), now()) + WHERE public.metric_meta2.id = 'container_cpu_usage_core_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_cpu_user_core_by_workload', 'Container CPU User By workload (Core)', 'Container CPU Usage (User)(Core)', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_cpu_user_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0)', 'CPU', 'Workload', NULL, TRUE, FALSE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU User (Core):{{humanize $value}}|{threshold}.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_cpu_user_core_by_workload', 'Container CPU User By workload (Core)', 'Container CPU Usage (User)(Core)', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_cpu_user_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0)', 'CPU', 'Workload', NULL, TRUE, FALSE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU User (Core):{{humanize $value}}|{threshold}.', now(), now()) + WHERE public.metric_meta2.id = 'container_cpu_user_core_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_cpu_system_by_workload', 'Container CPU System By workload (%)', 'Container CPU Usage (System)', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_cpu_system_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) * 100', 'CPU', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU System (%):{{humanize $value}}%|{threshold}%.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_cpu_system_by_workload', 'Container CPU System By workload (%)', 'Container CPU Usage (System)', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_cpu_system_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) * 100', 'CPU', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU System (%):{{humanize $value}}%|{threshold}%.', now(), now()) + WHERE public.metric_meta2.id = 'container_cpu_system_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_cpu_usage_by_workload', 'Container CPU Usage By workload (%)', 'Container CPU Usage', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_cpu_usage_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) * 100', 'CPU', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU Usage (%):{{humanize $value}}%|{threshold}%', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_cpu_usage_by_workload', 'Container CPU Usage By workload (%)', 'Container CPU Usage', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_cpu_usage_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) * 100', 'CPU', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU Usage (%):{{humanize $value}}%|{threshold}%', now(), now()) + WHERE public.metric_meta2.id = 'container_cpu_usage_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_fs_reads_by_workload', 'Container Filesystem Read Bytes By workload (KiB)', 'Cumulative count of bytes read / 1024', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_fs_reads_bytes_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1024', 'Filesystem', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Filesystem Reads:{{humanize $value}}KiB|{threshold}KiB.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_fs_reads_by_workload', 'Container Filesystem Read Bytes By workload (KiB)', 'Cumulative count of bytes read / 1024', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_fs_reads_bytes_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1024', 'Filesystem', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Filesystem Reads:{{humanize $value}}KiB|{threshold}KiB.', now(), now()) + WHERE public.metric_meta2.id = 'container_fs_reads_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_fs_limit_bytes_by_workload', 'Container Filesystem Limit Bytes By workload (GiB)', 'Number of bytes that can be consumed by the container on this filesystem / 1073741824', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (container_fs_limit_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1073741824', 'Filesystem', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Filesystem Limit:{{humanize $value}}GiB|{threshold}GiB.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_fs_limit_bytes_by_workload', 'Container Filesystem Limit Bytes By workload (GiB)', 'Number of bytes that can be consumed by the container on this filesystem / 1073741824', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (container_fs_limit_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1073741824', 'Filesystem', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Filesystem Limit:{{humanize $value}}GiB|{threshold}GiB.', now(), now()) + WHERE public.metric_meta2.id = 'container_fs_limit_bytes_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_fs_usage_bytes_by_workload', 'Container Filesystem Used Bytes By workload (GiB)', 'Number of bytes that are consumed by the container on this filesystem / 1073741824', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (container_fs_usage_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1073741824', 'Filesystem', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Filesystem Used:{{humanize $value}}GiB||{threshold}GiB.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_fs_usage_bytes_by_workload', 'Container Filesystem Used Bytes By workload (GiB)', 'Number of bytes that are consumed by the container on this filesystem / 1073741824', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (container_fs_usage_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1073741824', 'Filesystem', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Filesystem Used:{{humanize $value}}GiB||{threshold}GiB.', now(), now()) + WHERE public.metric_meta2.id = 'container_fs_usage_bytes_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_fs_writes_by_workload', 'Container Filesystem Write Bytes By workload (KiB)', 'Cumulative count of bytes written / 1024', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_fs_writes_bytes_total{xm_cont_name!="POD"}[1m]) + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1024', 'Filesystem', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Filesystem Writes:{{humanize $value}}KiB|{threshold}KiB.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_fs_writes_by_workload', 'Container Filesystem Write Bytes By workload (KiB)', 'Cumulative count of bytes written / 1024', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_fs_writes_bytes_total{xm_cont_name!="POD"}[1m]) + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1024', 'Filesystem', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Filesystem Writes:{{humanize $value}}KiB|{threshold}KiB.', now(), now()) + WHERE public.metric_meta2.id = 'container_fs_writes_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_fs_usage_by_workload', 'Container Filesystem Usage By workload (%)', 'Container File System Usage: 100 * (Used Bytes / Limit Bytes) (not contain persistent volume)', 'sum by (xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) ((container_fs_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0)/ (((container_fs_limit_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) * 100) > 0) or (container_fs_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1000)', 'Filesystem', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.o + wner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Filesystem Usage:{{humanize $value}}%|{threshold}%.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_fs_usage_by_workload', 'Container Filesystem Usage By workload (%)', 'Container File System Usage: 100 * (Used Bytes / Limit Bytes) (not contain persistent volume)', 'sum by (xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) ((container_fs_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0)/ (((container_fs_limit_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) * 100) > 0) or (container_fs_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1000)', 'Filesystem', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.o + wner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Filesystem Usage:{{humanize $value}}%|{threshold}%.', now(), now()) + WHERE public.metric_meta2.id = 'container_fs_usage_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_memory_max_usage_bytes_by_workload', 'Container Memory Max Used By workload (GiB)', 'Maximum memory usage recorded in bytes / 1073741824', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (container_memory_max_usage_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1073741824', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Max Memory Usage:{{humanize $value}}GiB|{threshold}GiB.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_memory_max_usage_bytes_by_workload', 'Container Memory Max Used By workload (GiB)', 'Maximum memory usage recorded in bytes / 1073741824', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (container_memory_max_usage_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1073741824', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Max Memory Usage:{{humanize $value}}GiB|{threshold}GiB.', now(), now()) + WHERE public.metric_meta2.id = 'container_memory_max_usage_bytes_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_memory_usage_bytes_by_workload', 'Container Memory Used By workload (GiB)', 'Current memory usage in GiB, this includes all memory regardless of when it was accessed', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (container_memory_usage_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1024 / 1024 / 1024', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Used Memory:{{humanize $value}}GiB|{threshold}GiB.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_memory_usage_bytes_by_workload', 'Container Memory Used By workload (GiB)', 'Current memory usage in GiB, this includes all memory regardless of when it was accessed', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (container_memory_usage_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1024 / 1024 / 1024', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Used Memory:{{humanize $value}}GiB|{threshold}GiB.', now(), now()) + WHERE public.metric_meta2.id = 'container_memory_usage_bytes_by_workload'; + + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_memory_usage_by_workload', 'Container Memory Usage By workload (%)', 'Container Memory usage compared to limit if limit is non-zero or 1GiB if limit is zero', 'sum by (xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) ((container_memory_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / (((container_spec_memory_limit_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0)) > 0) * 100) or sum by (xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) ((container_memory_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1024 / 1024 / 1024 *100)', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Memory Usage:{{humanize $value}}%|{threshold}%.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_memory_usage_by_workload', 'Container Memory Usage By workload (%)', 'Container Memory usage compared to limit if limit is non-zero or 1GiB if limit is zero', 'sum by (xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) ((container_memory_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / (((container_spec_memory_limit_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0)) > 0) * 100) or sum by (xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) ((container_memory_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1024 / 1024 / 1024 *100)', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Memory Usage:{{humanize $value}}%|{threshold}%.', now(), now()) + WHERE public.metric_meta2.id = 'container_memory_usage_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_memory_swap_by_workload', 'Container Memory Swap By workload (GiB)', 'Container swap usage in bytes / 1073741824', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (container_memory_swap{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1073741824', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Swap Memory:{{humanize $value}}GiB|{threshold}GiB.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_memory_swap_by_workload', 'Container Memory Swap By workload (GiB)', 'Container swap usage in bytes / 1073741824', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (container_memory_swap{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1073741824', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Swap Memory:{{humanize $value}}GiB|{threshold}GiB.', now(), now()) + WHERE public.metric_meta2.id = 'container_memory_swap_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_memory_working_set_bytes_by_workload', 'Container Memory Working Set By workload (GiB)', 'Current working set in GiB, this includes recently accessed memory, dirty memory, and kernel memory', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (container_memory_working_set_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1024 / 1024 / 1024', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Working Set Memory:{{humanize $value}}GiB|{threshold}GiB.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_memory_working_set_bytes_by_workload', 'Container Memory Working Set By workload (GiB)', 'Current working set in GiB, this includes recently accessed memory, dirty memory, and kernel memory', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (container_memory_working_set_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1024 / 1024 / 1024', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Working Set Memory:{{humanize $value}}GiB|{threshold}GiB.', now(), now()) + WHERE public.metric_meta2.id = 'container_memory_working_set_bytes_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_memory_cache_by_workload', 'Container Memory Cache By workload (GiB)', 'Number of bytes of page cache memory / 1073741824', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (container_memory_cache{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1073741824', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Cache Memory:{{humanize $value}}GiB|{threshold}GiB.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_memory_cache_by_workload', 'Container Memory Cache By workload (GiB)', 'Number of bytes of page cache memory / 1073741824', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (container_memory_cache{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1073741824', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Cache Memory:{{humanize $value}}GiB|{threshold}GiB.', now(), now()) + WHERE public.metric_meta2.id = 'container_memory_cache_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_network_receive_by_workload', 'Container Network Receive By workload (KiB)', 'Network device statistic receive_bytes / 1024', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_network_receive_bytes_total{} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1024', 'Network', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Network Receive Usage:{{humanize $value}}KiB|{threshold}KiB.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_network_receive_by_workload', 'Container Network Receive By workload (KiB)', 'Network device statistic receive_bytes / 1024', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_network_receive_bytes_total{} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1024', 'Network', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Network Receive Usage:{{humanize $value}}KiB|{threshold}KiB.', now(), now()) + WHERE public.metric_meta2.id = 'container_network_receive_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_network_transmit_by_workload', 'Container Network Transmit By workload (KiB)', 'Network device statistic transmit_bytes / 1024', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_network_transmit_bytes_total{} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1024', 'Network', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Network Transmit Usage:{{humanize $value}}KiB|{threshold}KiB.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_network_transmit_by_workload', 'Container Network Transmit By workload (KiB)', 'Network device statistic transmit_bytes / 1024', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_network_transmit_bytes_total{} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1024', 'Network', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Network Transmit Usage:{{humanize $value}}KiB|{threshold}KiB.', now(), now()) + WHERE public.metric_meta2.id = 'container_network_transmit_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('count_pod_not_running_by_workload','Number of Pods not running By Workload','Number of Pods not running (pod_state)','count by (xm_clst_id, xm_pod_id,xm_cont_id, xm_cont_name, entity_type, xm_namespace, pod_state) (imxc_kubernetes_container_resource_limit_cpu{pod_state!="Running", {filter}})','State','Workload',null,true,false,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} State:{{$labels.pod_state}}.',now(),now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('count_pod_not_running_by_workload','Number of Pods not running By Workload','Number of Pods not running (pod_state)','count by (xm_clst_id, xm_pod_id,xm_cont_id, xm_cont_name, entity_type, xm_namespace, pod_state) (imxc_kubernetes_container_resource_limit_cpu{pod_state!="Running", {filter}})','State','Workload',null,true,false,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} State:{{$labels.pod_state}}.',now(),now()) + WHERE public.metric_meta2.id = 'count_pod_not_running_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('count_container_not_running_by_workload','Number of Containers not running By Workload','Number of Containers not running (container_state)','count by (xm_clst_id, xm_pod_id, xm_cont_id, xm_cont_name, entity_type, xm_namespace, container_state) (imxc_kubernetes_container_resource_limit_cpu{container_state!="Running", {filter}})','State','Workload',null,true,false,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} State:{{$labels.container_state}}.',now(),now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('count_container_not_running_by_workload','Number of Containers not running By Workload','Number of Containers not running (container_state)','count by (xm_clst_id, xm_pod_id, xm_cont_id, xm_cont_name, entity_type, xm_namespace, container_state) (imxc_kubernetes_container_resource_limit_cpu{container_state!="Running", {filter}})','State','Workload',null,true,false,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} State:{{$labels.container_state}}.',now(),now()) + WHERE public.metric_meta2.id = 'count_container_not_running_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('cotainer_restart_count_by_workload','Number of Containers Restart','Number of Containers Restart (10m)','increase(imxc_kubernetes_container_restart_count{{filter}}[10m])>1','State','Workload',null,true,false,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} RESTARTCOUNT FOR 10MINUTE:{{humanize $value}}.',now(),now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('cotainer_restart_count_by_workload','Number of Containers Restart','Number of Containers Restart (10m)','increase(imxc_kubernetes_container_restart_count{{filter}}[10m])>1','State','Workload',null,true,false,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} RESTARTCOUNT FOR 10MINUTE:{{humanize $value}}.',now(),now()) + WHERE public.metric_meta2.id = 'cotainer_restart_count_by_workload'; + + +INSERT INTO public.agent_install_file_info (id, name, type, description, yaml, use_yn, created_date, modified_date, version) +VALUES (4, 'node-exporter', 'agent', 'Node에 관련된 Metric 시계열 데이터를 수집하여 고객사 클러스터에 설치된 Prometheus에 전달하는 역할을 합니다.', '--- + apiVersion: v1 + kind: Service + metadata: + annotations: + prometheus.io/scrape: ''true'' + labels: + app: cloudmoa-node-exporter + name: cloudmoa-node-exporter + name: cloudmoa-node-exporter + namespace: $CLOUDMOA_NAMESPACE + spec: + clusterIP: None + ports: + - name: scrape + port: 9110 + protocol: TCP + selector: + app: cloudmoa-node-exporter + type: ClusterIP + --- + apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: cloudmoa-node-exporter + namespace: $CLOUDMOA_NAMESPACE + spec: + selector: + matchLabels: + app: cloudmoa-node-exporter + template: + metadata: + labels: + app: cloudmoa-node-exporter + name: cloudmoa-node-exporter + spec: + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - image: $DOCKER_REGISTRY_URL/prom/node-exporter + name: cloudmoa-node-exporter + ports: + - containerPort: 9110 + hostPort: 9110 + name: scrape + args: + - --path.procfs=/host/proc + - --path.sysfs=/host/sys + - --path.rootfs=/host/root + - --collector.filesystem.ignored-mount-points=^/(dev|proc|sys|run|var/lib/docker/.+|var/lib/kubelet/pods/.+)($|/) + - --collector.tcpstat + - --web.listen-address=:9110 + # --log.level=debug + resources: + limits: + cpu: 250m + memory: 180Mi + requests: + cpu: 102m + memory: 180Mi + volumeMounts: + - mountPath: /host/proc + name: proc + readOnly: false + - mountPath: /host/sys + name: sys + readOnly: false + - mountPath: /host/root + mountPropagation: HostToContainer + name: root + readOnly: true + hostNetwork: true + hostPID: true + securityContext: + runAsNonRoot: true + runAsUser: 65534 + volumes: + - hostPath: + path: /proc + name: proc + - hostPath: + path: /sys + name: sys + - hostPath: + path: / + name: root + ', true, '2021-03-11 13:41:02.000000', '2021-03-11 13:41:06.000000', null) +ON CONFLICT (id) +DO + UPDATE SET (id, name, type, description, yaml, use_yn, created_date, modified_date, version) + = (4, 'node-exporter', 'agent', 'Node에 관련된 Metric 시계열 데이터를 수집하여 고객사 클러스터에 설치된 Prometheus에 전달하는 역할을 합니다.', '--- + apiVersion: v1 + kind: Service + metadata: + annotations: + prometheus.io/scrape: ''true'' + labels: + app: cloudmoa-node-exporter + name: cloudmoa-node-exporter + name: cloudmoa-node-exporter + namespace: $CLOUDMOA_NAMESPACE + spec: + clusterIP: None + ports: + - name: scrape + port: 9110 + protocol: TCP + selector: + app: cloudmoa-node-exporter + type: ClusterIP + --- + apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: cloudmoa-node-exporter + namespace: $CLOUDMOA_NAMESPACE + spec: + selector: + matchLabels: + app: cloudmoa-node-exporter + template: + metadata: + labels: + app: cloudmoa-node-exporter + name: cloudmoa-node-exporter + spec: + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - image: $DOCKER_REGISTRY_URL/prom/node-exporter + name: cloudmoa-node-exporter + ports: + - containerPort: 9110 + hostPort: 9110 + name: scrape + args: + - --path.procfs=/host/proc + - --path.sysfs=/host/sys + - --path.rootfs=/host/root + - --collector.filesystem.ignored-mount-points=^/(dev|proc|sys|run|var/lib/docker/.+|var/lib/kubelet/pods/.+)($|/) + - --collector.tcpstat + - --web.listen-address=:9110 + # --log.level=debug + resources: + limits: + cpu: 250m + memory: 180Mi + requests: + cpu: 102m + memory: 180Mi + volumeMounts: + - mountPath: /host/proc + name: proc + readOnly: false + - mountPath: /host/sys + name: sys + readOnly: false + - mountPath: /host/root + mountPropagation: HostToContainer + name: root + readOnly: true + hostNetwork: true + hostPID: true + securityContext: + runAsNonRoot: true + runAsUser: 65534 + volumes: + - hostPath: + path: /proc + name: proc + - hostPath: + path: /sys + name: sys + - hostPath: + path: / + name: root + ', true, '2021-03-11 13:41:02.000000', '2021-03-11 13:41:06.000000', null) + WHERE public.agent_install_file_info.id = 4; + + +INSERT INTO public.agent_install_file_info (id, name, type, description, yaml, use_yn, created_date, modified_date, version) +VALUES (3, 'prometheus', 'agent', 'Prometheus는 다양한 Exporter들과 연결될 수 있으며, 기본적으로 Node Exporter와 cAdvisor를 통해 수집한 Metric 데이터를 Kafka를 통해 수집 클러스터에 전달하는 역할을 합니다.', '--- + # VERSION : 20190227142300 + + apiVersion: v1 + kind: ConfigMap + metadata: + name: cloudmoa-prometheus-configuration + namespace: $CLOUDMOA_NAMESPACE + data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + prometheus.yml: | + global: + scrape_interval: 15s + # Attach these labels to any time series or alerts when communicating with + # external systems. + external_labels: + monitor: ''5s-monitor'' + + #kafka writer only + no_local_disk_write: true + + # A scrape configuration for running Prometheus on a Kubernetes cluster. + # This uses separate scrape configs for cluster components (i.e. API server, node) + # and services to allow each to use different authentication configs. + # + # Kubernetes labels will be added as Prometheus labels on metrics via the + # `labelmap` relabeling action. + # + + # + # rule_files: + # - "scaling.rules" + + # i suppose my code in the remote kafka write is something wrong ... should append a double quote character at the end of the url + remote_write: + - url: kafka://$COLLTION_SERVER_KAFKA_IP:$COLLTION_SERVER_KAFKA_INTERFACE_PORT/remote_prom?encoding=proto3&compression=snappy + + scrape_configs: + + # Scrape config for nodes (kubelet). + # + # Rather than connecting directly to the node, the scrape is proxied though the + # Kubernetes apiserver. This means it will work if Prometheus is running out of + # cluster, or can''t connect to nodes for some other reason (e.g. because of + # firewalling). + - job_name: ''kubernetes-kubelet'' + + # Default to scraping over https. If required, just disable this or change to + # `http`. + scheme: https + # This TLS & bearer token file config is used to connect to the actual scrape + # endpoints for cluster components. This is separate to discovery auth + # configuration because discovery & scraping are two separate concerns in + # Prometheus. The discovery auth config is automatic if Prometheus runs inside + # the cluster. Otherwise, more config options have to be provided within the + # . + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + # copied from https://github.com/kayrus/prometheus-kubernetes/blob/master/prometheus-configmap.yaml + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + + - job_name: ''kubernetes-cadvisor'' + + # Default to scraping over https. If required, just disable this or change to + # `http`. + scheme: https + + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod_name] + target_label: xm_pod_id + - source_labels: [container_name] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [container_name] + regex: (.+) + action: keep + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep + --- + apiVersion: v1 + kind: Service + metadata: + name: cloudmoa-prometheus + namespace: $CLOUDMOA_NAMESPACE + spec: + ports: + - port: 9090 + protocol: TCP + targetPort: 9090 + selector: + app: cloudmoa-prometheus + type: ClusterIP + --- + apiVersion: apps/v1 + kind: Deployment + metadata: + name: cloudmoa-prometheus + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-prometheus + spec: + selector: + matchLabels: + app: cloudmoa-prometheus + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-prometheus + spec: + containers: + - name: cloudmoa-prometheus + image: $DOCKER_REGISTRY_URL/imxc/metric-agent:$IMAGE_TAG + ports: + - containerPort: 9090 + args: + - --config.file=/etc/prometheus/prometheus.yml + #- --log.level=debug + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: "/prometheus" + name: data + - mountPath: /etc/prometheus/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: LOG_MAXAGE + value: "1" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: API_SERVER_LICENSE + value: $COLLTION_SERVER_API_IP:8080 + + restartPolicy: Always + volumes: + - emptyDir: {} + name: data + - name: config-volume + configMap: + name: cloudmoa-prometheus-configuration + ', true, '2021-03-11 13:39:07.000000', '2021-03-11 13:39:09.000000', '1.15') +ON CONFLICT (id) +DO + UPDATE SET (id, name, type, description, yaml, use_yn, created_date, modified_date, version) + = (3, 'prometheus', 'agent', 'Prometheus는 다양한 Exporter들과 연결될 수 있으며, 기본적으로 Node Exporter와 cAdvisor를 통해 수집한 Metric 데이터를 Kafka를 통해 수집 클러스터에 전달하는 역할을 합니다.', '--- + # VERSION : 20190227142300 + + apiVersion: v1 + kind: ConfigMap + metadata: + name: cloudmoa-prometheus-configuration + namespace: $CLOUDMOA_NAMESPACE + data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + prometheus.yml: | + global: + scrape_interval: 15s + # Attach these labels to any time series or alerts when communicating with + # external systems. + external_labels: + monitor: ''5s-monitor'' + + #kafka writer only + no_local_disk_write: true + + # A scrape configuration for running Prometheus on a Kubernetes cluster. + # This uses separate scrape configs for cluster components (i.e. API server, node) + # and services to allow each to use different authentication configs. + # + # Kubernetes labels will be added as Prometheus labels on metrics via the + # `labelmap` relabeling action. + # + + # + # rule_files: + # - "scaling.rules" + + # i suppose my code in the remote kafka write is something wrong ... should append a double quote character at the end of the url + remote_write: + - url: kafka://$COLLTION_SERVER_KAFKA_IP:$COLLTION_SERVER_KAFKA_INTERFACE_PORT/remote_prom?encoding=proto3&compression=snappy + + scrape_configs: + + # Scrape config for nodes (kubelet). + # + # Rather than connecting directly to the node, the scrape is proxied though the + # Kubernetes apiserver. This means it will work if Prometheus is running out of + # cluster, or can''t connect to nodes for some other reason (e.g. because of + # firewalling). + - job_name: ''kubernetes-kubelet'' + + # Default to scraping over https. If required, just disable this or change to + # `http`. + scheme: https + # This TLS & bearer token file config is used to connect to the actual scrape + # endpoints for cluster components. This is separate to discovery auth + # configuration because discovery & scraping are two separate concerns in + # Prometheus. The discovery auth config is automatic if Prometheus runs inside + # the cluster. Otherwise, more config options have to be provided within the + # . + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + # copied from https://github.com/kayrus/prometheus-kubernetes/blob/master/prometheus-configmap.yaml + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + + - job_name: ''kubernetes-cadvisor'' + + # Default to scraping over https. If required, just disable this or change to + # `http`. + scheme: https + + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod_name] + target_label: xm_pod_id + - source_labels: [container_name] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [container_name] + regex: (.+) + action: keep + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep + --- + apiVersion: v1 + kind: Service + metadata: + name: cloudmoa-prometheus + namespace: $CLOUDMOA_NAMESPACE + spec: + ports: + - port: 9090 + protocol: TCP + targetPort: 9090 + selector: + app: cloudmoa-prometheus + type: ClusterIP + --- + apiVersion: apps/v1 + kind: Deployment + metadata: + name: cloudmoa-prometheus + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-prometheus + spec: + selector: + matchLabels: + app: cloudmoa-prometheus + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-prometheus + spec: + containers: + - name: cloudmoa-prometheus + image: $DOCKER_REGISTRY_URL/imxc/metric-agent:$IMAGE_TAG + ports: + - containerPort: 9090 + args: + - --config.file=/etc/prometheus/prometheus.yml + #- --log.level=debug + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: "/prometheus" + name: data + - mountPath: /etc/prometheus/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: LOG_MAXAGE + value: "1" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: API_SERVER_LICENSE + value: $COLLTION_SERVER_API_IP:8080 + + restartPolicy: Always + volumes: + - emptyDir: {} + name: data + - name: config-volume + configMap: + name: cloudmoa-prometheus-configuration + ', true, '2021-03-11 13:39:07.000000', '2021-03-11 13:39:09.000000', '1.15') + WHERE public.agent_install_file_info.id = 3; + + +INSERT INTO public.agent_install_file_info (id, name, type, description, yaml, use_yn, created_date, modified_date, version) +VALUES (2, 'agent', 'agent', '관제 대상 클러스터의 Topology 데이터를 수집하여 Kafka를 통해 수집 클러스터에 전달하는 역할을 하며, 그 밖에 API 서버와의 TCP 연결을 통해 관리 기능, Log Viewer 기능 등을 수행합니다.', '--- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: cloudmoa-cluster-role + rules: + - nonResourceURLs: + - "*" + verbs: + - get + - apiGroups: + - metrics.k8s.io + resources: + - pods + - nodes + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - nodes/stats + - endpoints + - namespaces + - events + verbs: + - get + - list + - watch + - apiGroups: + - apps + resources: + - daemonsets + - deployments + - deployments/scale + - replicasets + - replicasets/scale + - statefulsets + - statefulsets/scale + verbs: + - get + - list + - watch + - apiGroups: + - batch + resources: + - jobs + verbs: + - get + - list + - watch + - update + - apiGroups: + - batch + resources: + - cronjobs + verbs: + - get + - list + - update + - apiGroups: + - storage.j8s.io + resources: + - storageclasses + verbs: + - get + - list + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - apiGroups: + - extensions + resources: + - ingresses + verbs: + - get + - list + - apiGroups: + - policy + resources: + - podsecuritypolicies + verbs: + - use + resourceNames: + - imxc-ps + - apiGroups: + - certificates.k8s.io + resourceNames: + - kubernetes.io/kube-apiserver-client-kubelet + resources: + - signers + verbs: + - approve + - apiGroups: + - certificates.k8s.io + resourceNames: + - kubernetes.io/kubelet-serving + resources: + - signers + verbs: + - approve + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch + - proxy + - apiGroups: + - "" + resources: + - nodes/log + - nodes/metrics + - nodes/proxy + - nodes/spec + - nodes/stats + verbs: + - ''*'' + - apiGroups: + - ''*'' + resources: + - ''*'' + verbs: + - get + - list + - watch + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: cloudmoa-restricted-rb + namespace: $CLOUDMOA_NAMESPACE + subjects: + - kind: ServiceAccount + name: default + namespace: $CLOUDMOA_NAMESPACE + roleRef: + kind: ClusterRole + name: cloudmoa-cluster-role + apiGroup: rbac.authorization.k8s.io + --- + apiVersion: policy/v1beta1 + kind: PodSecurityPolicy + metadata: + name: cloudmoa-psp + namespace: $CLOUDMOA_NAMESPACE + spec: + privileged: true + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + runAsUser: + rule: RunAsAny + fsGroup: + rule: RunAsAny + hostPorts: + - max: 65535 + min: 0 + hostNetwork: true + hostPID: true + volumes: + - configMap + - secret + - emptyDir + - hostPath + - projected + - downwardAPI + - persistentVolumeClaim + --- + apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: cloudmoa-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-agent + spec: + selector: + matchLabels: + app: cloudmoa-agent + template: + metadata: + labels: + app: cloudmoa-agent + spec: + hostNetwork: true + hostPID: true + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - name: cloudmoa-agent + image: $DOCKER_REGISTRY_URL/imxc/imxc-agent:$IMAGE_TAG + imagePullPolicy: Always + resources: + requests: + cpu: 200m + memory: 512Mi + limits: + cpu: 500m + memory: 600Mi + securityContext: + privileged: true + volumeMounts: + - mountPath: /host/usr/bin + name: bin-volume + - mountPath: /var/run/docker.sock + name: docker-volume + - mountPath: /host/proc + name: proc-volume + - mountPath: /root + name: root-volume + - mountPath: /log + name: log-volume + env: + - name: KAFKA_SERVER + value: $COLLTION_SERVER_KAFKA_IP:$COLLTION_SERVER_KAFKA_INTERFACE_PORT + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: API_SERVER + value: $COLLTION_SERVER_API_IP:$COLLECTION_SERVER_API_NETTY_PORT + - name: ROOT_DIRECTORY + value: /root + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: LOG_LEVEL + value: "DEBUG" + - name: API_SERVER_LICENSE + value: $COLLTION_SERVER_API_IP:8080 + + volumes: + - name: bin-volume + hostPath: + path: /usr/bin + type: Directory + - name: docker-volume + hostPath: + path: /var/run/docker.sock + - name: proc-volume + hostPath: + path: /proc + - name: root-volume + hostPath: + path: / + - name: log-volume + hostPath: + path: /home', true, '2021-03-11 13:37:48.000000', '2021-03-11 13:37:51.000000', null) +ON CONFLICT (id) +DO + UPDATE SET (id, name, type, description, yaml, use_yn, created_date, modified_date, version) + = (2, 'agent', 'agent', '관제 대상 클러스터의 Topology 데이터를 수집하여 Kafka를 통해 수집 클러스터에 전달하는 역할을 하며, 그 밖에 API 서버와의 TCP 연결을 통해 관리 기능, Log Viewer 기능 등을 수행합니다.', '--- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: cloudmoa-cluster-role + rules: + - nonResourceURLs: + - "*" + verbs: + - get + - apiGroups: + - metrics.k8s.io + resources: + - pods + - nodes + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - nodes/stats + - endpoints + - namespaces + - events + verbs: + - get + - list + - watch + - apiGroups: + - apps + resources: + - daemonsets + - deployments + - deployments/scale + - replicasets + - replicasets/scale + - statefulsets + - statefulsets/scale + verbs: + - get + - list + - watch + - apiGroups: + - batch + resources: + - jobs + verbs: + - get + - list + - watch + - update + - apiGroups: + - batch + resources: + - cronjobs + verbs: + - get + - list + - update + - apiGroups: + - storage.j8s.io + resources: + - storageclasses + verbs: + - get + - list + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - apiGroups: + - extensions + resources: + - ingresses + verbs: + - get + - list + - apiGroups: + - policy + resources: + - podsecuritypolicies + verbs: + - use + resourceNames: + - imxc-ps + - apiGroups: + - certificates.k8s.io + resourceNames: + - kubernetes.io/kube-apiserver-client-kubelet + resources: + - signers + verbs: + - approve + - apiGroups: + - certificates.k8s.io + resourceNames: + - kubernetes.io/kubelet-serving + resources: + - signers + verbs: + - approve + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch + - proxy + - apiGroups: + - "" + resources: + - nodes/log + - nodes/metrics + - nodes/proxy + - nodes/spec + - nodes/stats + verbs: + - ''*'' + - apiGroups: + - ''*'' + resources: + - ''*'' + verbs: + - get + - list + - watch + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: cloudmoa-restricted-rb + namespace: $CLOUDMOA_NAMESPACE + subjects: + - kind: ServiceAccount + name: default + namespace: $CLOUDMOA_NAMESPACE + roleRef: + kind: ClusterRole + name: cloudmoa-cluster-role + apiGroup: rbac.authorization.k8s.io + --- + apiVersion: policy/v1beta1 + kind: PodSecurityPolicy + metadata: + name: cloudmoa-psp + namespace: $CLOUDMOA_NAMESPACE + spec: + privileged: true + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + runAsUser: + rule: RunAsAny + fsGroup: + rule: RunAsAny + hostPorts: + - max: 65535 + min: 0 + hostNetwork: true + hostPID: true + volumes: + - configMap + - secret + - emptyDir + - hostPath + - projected + - downwardAPI + - persistentVolumeClaim + --- + apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: cloudmoa-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-agent + spec: + selector: + matchLabels: + app: cloudmoa-agent + template: + metadata: + labels: + app: cloudmoa-agent + spec: + hostNetwork: true + hostPID: true + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - name: cloudmoa-agent + image: $DOCKER_REGISTRY_URL/imxc/imxc-agent:$IMAGE_TAG + imagePullPolicy: Always + resources: + requests: + cpu: 200m + memory: 512Mi + limits: + cpu: 500m + memory: 600Mi + securityContext: + privileged: true + volumeMounts: + - mountPath: /host/usr/bin + name: bin-volume + - mountPath: /var/run/docker.sock + name: docker-volume + - mountPath: /host/proc + name: proc-volume + - mountPath: /root + name: root-volume + - mountPath: /log + name: log-volume + env: + - name: KAFKA_SERVER + value: $COLLTION_SERVER_KAFKA_IP:$COLLTION_SERVER_KAFKA_INTERFACE_PORT + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: API_SERVER + value: $COLLTION_SERVER_API_IP:$COLLECTION_SERVER_API_NETTY_PORT + - name: ROOT_DIRECTORY + value: /root + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: LOG_LEVEL + value: "DEBUG" + - name: API_SERVER_LICENSE + value: $COLLTION_SERVER_API_IP:8080 + + volumes: + - name: bin-volume + hostPath: + path: /usr/bin + type: Directory + - name: docker-volume + hostPath: + path: /var/run/docker.sock + - name: proc-volume + hostPath: + path: /proc + - name: root-volume + hostPath: + path: / + - name: log-volume + hostPath: + path: /home', true, '2021-03-11 13:37:48.000000', '2021-03-11 13:37:51.000000', null) + WHERE public.agent_install_file_info.id = 2; + + +INSERT INTO public.agent_install_file_info (id, name, type, description, yaml, use_yn, created_date, modified_date, version) +VALUES (6, 'prometheus', 'agent', 'Prometheus는 다양한 Exporter들과 연결될 수 있으며, 기본적으로 Node Exporter와 cAdvisor를 통해 수집한 Metric 데이터를 Kafka를 통해 수집 클러스터에 전달하는 역할을 합니다.', '--- + # VERSION : 20190227142300 + + apiVersion: v1 + kind: ConfigMap + metadata: + name: cloudmoa-prometheus-configuration + namespace: $CLOUDMOA_NAMESPACE + data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + prometheus.yml: | + global: + scrape_interval: 15s + # Attach these labels to any time series or alerts when communicating with + # external systems. + external_labels: + monitor: ''5s-monitor'' + + #kafka writer only + no_local_disk_write: true + + # A scrape configuration for running Prometheus on a Kubernetes cluster. + # This uses separate scrape configs for cluster components (i.e. API server, node) + # and services to allow each to use different authentication configs. + # + # Kubernetes labels will be added as Prometheus labels on metrics via the + # `labelmap` relabeling action. + # + + # + # rule_files: + # - "scaling.rules" + + # i suppose my code in the remote kafka write is something wrong ... should append a double quote character at the end of the url + remote_write: + - url: kafka://$COLLTION_SERVER_KAFKA_IP:$COLLTION_SERVER_KAFKA_INTERFACE_PORT/remote_prom?encoding=proto3&compression=snappy + + scrape_configs: + + # Scrape config for nodes (kubelet). + # + # Rather than connecting directly to the node, the scrape is proxied though the + # Kubernetes apiserver. This means it will work if Prometheus is running out of + # cluster, or can''t connect to nodes for some other reason (e.g. because of + # firewalling). + - job_name: ''kubernetes-kubelet'' + + # Default to scraping over https. If required, just disable this or change to + # `http`. + scheme: https + # This TLS & bearer token file config is used to connect to the actual scrape + # endpoints for cluster components. This is separate to discovery auth + # configuration because discovery & scraping are two separate concerns in + # Prometheus. The discovery auth config is automatic if Prometheus runs inside + # the cluster. Otherwise, more config options have to be provided within the + # . + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + # copied from https://github.com/kayrus/prometheus-kubernetes/blob/master/prometheus-configmap.yaml + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + - job_name: ''kubernetes-cadvisor'' + + # Default to scraping over https. If required, just disable this or change to + # `http`. + scheme: https + + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod] + target_label: xm_pod_id + - source_labels: [container] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [container] + regex: (.+) + action: keep + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep + --- + apiVersion: v1 + kind: Service + metadata: + name: cloudmoa-prometheus + namespace: $CLOUDMOA_NAMESPACE + spec: + ports: + - port: 9090 + protocol: TCP + targetPort: 9090 + selector: + app: cloudmoa-prometheus + type: ClusterIP + --- + apiVersion: apps/v1 + kind: Deployment + metadata: + name: cloudmoa-prometheus + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-prometheus + spec: + selector: + matchLabels: + app: cloudmoa-prometheus + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-prometheus + spec: + containers: + - name: cloudmoa-prometheus + image: $DOCKER_REGISTRY_URL/imxc/metric-agent:$IMAGE_TAG + ports: + - containerPort: 9090 + args: + - --config.file=/etc/prometheus/prometheus.yml + #- --log.level=debug + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: "/prometheus" + name: data + - mountPath: /etc/prometheus/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: LOG_MAXAGE + value: "1" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: API_SERVER_LICENSE + value: $COLLTION_SERVER_API_IP:8080 + + restartPolicy: Always + volumes: + - emptyDir: {} + name: data + - name: config-volume + configMap: + name: cloudmoa-prometheus-configuration + ', false, '2021-03-11 13:39:07.000000', '2021-03-11 13:39:09.000000', '1.16') +ON CONFLICT (id) +DO + UPDATE SET (id, name, type, description, yaml, use_yn, created_date, modified_date, version) + = (6, 'prometheus', 'agent', 'Prometheus는 다양한 Exporter들과 연결될 수 있으며, 기본적으로 Node Exporter와 cAdvisor를 통해 수집한 Metric 데이터를 Kafka를 통해 수집 클러스터에 전달하는 역할을 합니다.', '--- + # VERSION : 20190227142300 + + apiVersion: v1 + kind: ConfigMap + metadata: + name: cloudmoa-prometheus-configuration + namespace: $CLOUDMOA_NAMESPACE + data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + prometheus.yml: | + global: + scrape_interval: 15s + # Attach these labels to any time series or alerts when communicating with + # external systems. + external_labels: + monitor: ''5s-monitor'' + + #kafka writer only + no_local_disk_write: true + + # A scrape configuration for running Prometheus on a Kubernetes cluster. + # This uses separate scrape configs for cluster components (i.e. API server, node) + # and services to allow each to use different authentication configs. + # + # Kubernetes labels will be added as Prometheus labels on metrics via the + # `labelmap` relabeling action. + # + + # + # rule_files: + # - "scaling.rules" + + # i suppose my code in the remote kafka write is something wrong ... should append a double quote character at the end of the url + remote_write: + - url: kafka://$COLLTION_SERVER_KAFKA_IP:$COLLTION_SERVER_KAFKA_INTERFACE_PORT/remote_prom?encoding=proto3&compression=snappy + + scrape_configs: + + # Scrape config for nodes (kubelet). + # + # Rather than connecting directly to the node, the scrape is proxied though the + # Kubernetes apiserver. This means it will work if Prometheus is running out of + # cluster, or can''t connect to nodes for some other reason (e.g. because of + # firewalling). + - job_name: ''kubernetes-kubelet'' + + # Default to scraping over https. If required, just disable this or change to + # `http`. + scheme: https + # This TLS & bearer token file config is used to connect to the actual scrape + # endpoints for cluster components. This is separate to discovery auth + # configuration because discovery & scraping are two separate concerns in + # Prometheus. The discovery auth config is automatic if Prometheus runs inside + # the cluster. Otherwise, more config options have to be provided within the + # . + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + # copied from https://github.com/kayrus/prometheus-kubernetes/blob/master/prometheus-configmap.yaml + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + - job_name: ''kubernetes-cadvisor'' + + # Default to scraping over https. If required, just disable this or change to + # `http`. + scheme: https + + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod] + target_label: xm_pod_id + - source_labels: [container] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [container] + regex: (.+) + action: keep + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep + --- + apiVersion: v1 + kind: Service + metadata: + name: cloudmoa-prometheus + namespace: $CLOUDMOA_NAMESPACE + spec: + ports: + - port: 9090 + protocol: TCP + targetPort: 9090 + selector: + app: cloudmoa-prometheus + type: ClusterIP + --- + apiVersion: apps/v1 + kind: Deployment + metadata: + name: cloudmoa-prometheus + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-prometheus + spec: + selector: + matchLabels: + app: cloudmoa-prometheus + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-prometheus + spec: + containers: + - name: cloudmoa-prometheus + image: $DOCKER_REGISTRY_URL/imxc/metric-agent:$IMAGE_TAG + ports: + - containerPort: 9090 + args: + - --config.file=/etc/prometheus/prometheus.yml + #- --log.level=debug + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: "/prometheus" + name: data + - mountPath: /etc/prometheus/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: LOG_MAXAGE + value: "1" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: API_SERVER_LICENSE + value: $COLLTION_SERVER_API_IP:8080 + + restartPolicy: Always + volumes: + - emptyDir: {} + name: data + - name: config-volume + configMap: + name: cloudmoa-prometheus-configuration + ', false, '2021-03-11 13:39:07.000000', '2021-03-11 13:39:09.000000', '1.16') + WHERE public.agent_install_file_info.id = 6; + + +INSERT INTO public.agent_install_file_info (id, name, type, description, yaml, use_yn, created_date, modified_date, version) +VALUES (7, 'jaeger', 'application', 'CloudMOA에서는 고객사에서 운영 중인 application의 TPS, 서비스 연관관계 등의 데이터를 얻기 위해서 Jaeger를 사용하며, Jaeger 사용을 위해 Jaeger-client, jaeger-agent, jaeger-collector의 설치가 필요합니다. + ', '--- + apiVersion: v1 + kind: ConfigMap + metadata: + name: cloudmoa-jaeger-collector-configuration + namespace: $CLOUDMOA_NAMESPACE + data: + strategies.json: | + { + "default_strategy": { + "type": "probabilistic", + "param": 0.1 + } + } + --- + apiVersion: apps/v1 + kind: Deployment + metadata: + name: cloudmoa-jaeger-collector + namespace: $CLOUDMOA_NAMESPACE + labels: + app: jaeger + jaeger-infra: collector-deployment + spec: + selector: + matchLabels: + app: jaeger + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: jaeger + jaeger-infra: collector-pod + spec: + securityContext: + runAsNonRoot: true + runAsUser: 65534 + containers: + - image: $DOCKER_REGISTRY_URL/jaeger/jaeger-collector:$IMAGE_TAG + name: jaeger-collector + args: + - --sampling.strategies-file=/etc/jaeger-collector/strategies.json + - --sampling.strategies-reload-interval=60s + resources: + requests: + cpu: 100m + memory: 50Mi + limits: + cpu: 200m + memory: 100Mi + ports: + - containerPort: 14267 + protocol: TCP + - containerPort: 14268 + protocol: TCP + - containerPort: 9411 + protocol: TCP + - containerPort: 14250 + protocol: TCP + - containerPort: 14269 + protocol: TCP + readinessProbe: + httpGet: + path: "/" + port: 14269 + env: + - name: COLLECTOR_ZIPKIN_HTTP_PORT + value: "9411" + - name: SPAN_STORAGE_TYPE + value: kafka + - name: KAFKA_PRODUCER_BROKERS + value: $COLLTION_SERVER_KAFKA_IP:$COLLTION_SERVER_KAFKA_INTERFACE_PORT + - name: LOG_LEVEL + value: "INFO" + - name: LOG_MAXAGE + value: "1" + - name: LOG_MAXBACKUPS + value: "3" + - name: LOG_MAXSIZE + value: "100" + - name: LOG_STDOUT + value: "TRUE" + - name: LOG_FILENAME + value: "jaeger-collector" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: API_SERVER_LICENSE + value: $COLLTION_SERVER_API_IP:8080 + + volumeMounts: + - mountPath: /etc/jaeger-collector + name: config-volume + + volumes: + - name: config-volume + configMap: + name: cloudmoa-jaeger-collector-configuration + --- + apiVersion: v1 + kind: Service + metadata: + name: cloudmoa-jaeger-collector + namespace: $CLOUDMOA_NAMESPACE + labels: + app: jaeger + jaeger-infra: collector-service + spec: + ports: + - name: jaeger-collector-tchannel + port: 14267 + protocol: TCP + targetPort: 14267 + - name: jaeger-collector-metrics + port: 14269 + targetPort: 14269 + - name: jaeger-collector-grpc + port: 14250 + protocol: TCP + targetPort: 14250 + - name: jaeger-collector-zipkin + port: 9411 + targetPort: 9411 + selector: + jaeger-infra: collector-pod + type: ClusterIP + --- + apiVersion: v1 + kind: List + items: + - apiVersion: apps/v1 + kind: Deployment + metadata: + name: cloudmoa-jaeger-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: jaeger + app.kubernetes.io/name: jaeger + app.kubernetes.io/component: agent + spec: + selector: + matchLabels: + app: jaeger + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: jaeger + app.kubernetes.io/name: jaeger + app.kubernetes.io/component: agent + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "5778" + spec: + securityContext: + runAsNonRoot: true + runAsUser: 65534 + containers: + - image: $DOCKER_REGISTRY_URL/jaegertracing/jaeger-agent:$IMAGE_TAG + name: jaeger-agent + args: ["--reporter.grpc.host-port", "cloudmoa-jaeger-collector:14250"] + resources: + requests: + cpu: 100m + memory: 50Mi + limits: + cpu: 200m + memory: 100Mi + ports: + - containerPort: 5775 + protocol: UDP + - containerPort: 6831 + protocol: UDP + - containerPort: 6832 + protocol: UDP + - containerPort: 5778 + protocol: TCP + env: + - name: LOG_LEVEL + value: "INFO" + - name: LOG_MAXAGE + value: "1" + - name: LOG_MAXBACKUPS + value: "3" + - name: LOG_MAXSIZE + value: "100" + - name: LOG_STDOUT + value: "TRUE" + - name: LOG_FILENAME + value: "jaeger-agent" + + - apiVersion: v1 + kind: Service + metadata: + name: jaeger-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: jaeger + app.kubernetes.io/name: jaeger + app.kubernetes.io/component: agent + spec: + ports: + - name: agent-zipkin-thrift + port: 5775 + protocol: UDP + targetPort: 5775 + - name: agent-compact + port: 6831 + protocol: UDP + targetPort: 6831 + - name: agent-binary + port: 6832 + protocol: UDP + targetPort: 6832 + - name: agent-configs + port: 5778 + protocol: TCP + targetPort: 5778 + selector: + app.kubernetes.io/name: jaeger + app.kubernetes.io/component: agent + type: ClusterIP', true, '2021-03-11 17:48:34.000000', '2021-03-11 17:48:39.000000', null) +ON CONFLICT (id) +DO + UPDATE SET (id, name, type, description, yaml, use_yn, created_date, modified_date, version) + = (7, 'jaeger', 'application', 'CloudMOA에서는 고객사에서 운영 중인 application의 TPS, 서비스 연관관계 등의 데이터를 얻기 위해서 Jaeger를 사용하며, Jaeger 사용을 위해 Jaeger-client, jaeger-agent, jaeger-collector의 설치가 필요합니다. + ', '--- + apiVersion: v1 + kind: ConfigMap + metadata: + name: cloudmoa-jaeger-collector-configuration + namespace: $CLOUDMOA_NAMESPACE + data: + strategies.json: | + { + "default_strategy": { + "type": "probabilistic", + "param": 0.1 + } + } + --- + apiVersion: apps/v1 + kind: Deployment + metadata: + name: cloudmoa-jaeger-collector + namespace: $CLOUDMOA_NAMESPACE + labels: + app: jaeger + jaeger-infra: collector-deployment + spec: + selector: + matchLabels: + app: jaeger + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: jaeger + jaeger-infra: collector-pod + spec: + securityContext: + runAsNonRoot: true + runAsUser: 65534 + containers: + - image: $DOCKER_REGISTRY_URL/jaeger/jaeger-collector:$IMAGE_TAG + name: jaeger-collector + args: + - --sampling.strategies-file=/etc/jaeger-collector/strategies.json + - --sampling.strategies-reload-interval=60s + resources: + requests: + cpu: 100m + memory: 50Mi + limits: + cpu: 200m + memory: 100Mi + ports: + - containerPort: 14267 + protocol: TCP + - containerPort: 14268 + protocol: TCP + - containerPort: 9411 + protocol: TCP + - containerPort: 14250 + protocol: TCP + - containerPort: 14269 + protocol: TCP + readinessProbe: + httpGet: + path: "/" + port: 14269 + env: + - name: COLLECTOR_ZIPKIN_HTTP_PORT + value: "9411" + - name: SPAN_STORAGE_TYPE + value: kafka + - name: KAFKA_PRODUCER_BROKERS + value: $COLLTION_SERVER_KAFKA_IP:$COLLTION_SERVER_KAFKA_INTERFACE_PORT + - name: LOG_LEVEL + value: "INFO" + - name: LOG_MAXAGE + value: "1" + - name: LOG_MAXBACKUPS + value: "3" + - name: LOG_MAXSIZE + value: "100" + - name: LOG_STDOUT + value: "TRUE" + - name: LOG_FILENAME + value: "jaeger-collector" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: API_SERVER_LICENSE + value: $COLLTION_SERVER_API_IP:8080 + + volumeMounts: + - mountPath: /etc/jaeger-collector + name: config-volume + + volumes: + - name: config-volume + configMap: + name: cloudmoa-jaeger-collector-configuration + --- + apiVersion: v1 + kind: Service + metadata: + name: cloudmoa-jaeger-collector + namespace: $CLOUDMOA_NAMESPACE + labels: + app: jaeger + jaeger-infra: collector-service + spec: + ports: + - name: jaeger-collector-tchannel + port: 14267 + protocol: TCP + targetPort: 14267 + - name: jaeger-collector-metrics + port: 14269 + targetPort: 14269 + - name: jaeger-collector-grpc + port: 14250 + protocol: TCP + targetPort: 14250 + - name: jaeger-collector-zipkin + port: 9411 + targetPort: 9411 + selector: + jaeger-infra: collector-pod + type: ClusterIP + --- + apiVersion: v1 + kind: List + items: + - apiVersion: apps/v1 + kind: Deployment + metadata: + name: cloudmoa-jaeger-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: jaeger + app.kubernetes.io/name: jaeger + app.kubernetes.io/component: agent + spec: + selector: + matchLabels: + app: jaeger + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: jaeger + app.kubernetes.io/name: jaeger + app.kubernetes.io/component: agent + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "5778" + spec: + securityContext: + runAsNonRoot: true + runAsUser: 65534 + containers: + - image: $DOCKER_REGISTRY_URL/jaegertracing/jaeger-agent:$IMAGE_TAG + name: jaeger-agent + args: ["--reporter.grpc.host-port", "cloudmoa-jaeger-collector:14250"] + resources: + requests: + cpu: 100m + memory: 50Mi + limits: + cpu: 200m + memory: 100Mi + ports: + - containerPort: 5775 + protocol: UDP + - containerPort: 6831 + protocol: UDP + - containerPort: 6832 + protocol: UDP + - containerPort: 5778 + protocol: TCP + env: + - name: LOG_LEVEL + value: "INFO" + - name: LOG_MAXAGE + value: "1" + - name: LOG_MAXBACKUPS + value: "3" + - name: LOG_MAXSIZE + value: "100" + - name: LOG_STDOUT + value: "TRUE" + - name: LOG_FILENAME + value: "jaeger-agent" + + - apiVersion: v1 + kind: Service + metadata: + name: jaeger-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: jaeger + app.kubernetes.io/name: jaeger + app.kubernetes.io/component: agent + spec: + ports: + - name: agent-zipkin-thrift + port: 5775 + protocol: UDP + targetPort: 5775 + - name: agent-compact + port: 6831 + protocol: UDP + targetPort: 6831 + - name: agent-binary + port: 6832 + protocol: UDP + targetPort: 6832 + - name: agent-configs + port: 5778 + protocol: TCP + targetPort: 5778 + selector: + app.kubernetes.io/name: jaeger + app.kubernetes.io/component: agent + type: ClusterIP', true, '2021-03-11 17:48:34.000000', '2021-03-11 17:48:39.000000', null) + WHERE public.agent_install_file_info.id = 7; + +--Menu Resource +--Infrastructure +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (0, 'Infrastructure', '01.Infrastructure', 0, NULL, (SELECT id FROM auth_resource3 WHERE name='menu|Infrastructure'), 3) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 3 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Infrastructure'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (1, 'Topology', NULL, 0, 'topologyInfra', (SELECT id FROM auth_resource3 WHERE name='menu|Infrastructure|Topology'), 3) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 3 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Infrastructure|Topology'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (2, 'Overview', NULL, 1, 'overViewInfra', (SELECT id FROM auth_resource3 WHERE name='menu|Infrastructure|Overview'), 3) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 3 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Infrastructure|Overview'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (3, 'Resource Usage', NULL, 2, 'resourceUsageInfra', (SELECT id FROM auth_resource3 WHERE name='menu|Infrastructure|Resource Usage'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Infrastructure|Resource Usage'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (4, 'Namespace', NULL, 3, 'namespaceInfra', (SELECT id FROM auth_resource3 WHERE name='menu|Infrastructure|Namespace'), 3) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 3 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Infrastructure|Namespace'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (5, 'Nodes', NULL, 4, 'nodesInfra', (select id from auth_resource3 where name='menu|Infrastructure|Nodes'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Infrastructure|Nodes'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (6, 'Node Details', NULL, 5, 'nodeDetailInfra', (select id from auth_resource3 where name='menu|Infrastructure|Node Details'), 3) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 3 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Infrastructure|Node Details'); + +--Workloads +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (10, 'Workloads', '02.Workload', 1, NULL, (select id from auth_resource3 where name='menu|Workloads'), 3) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 3 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Workloads'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (11, 'Overview', NULL, 0, 'overviewWorkloads', (select id from auth_resource3 where name='menu|Workloads|Overview'), 3) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 3 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Workloads|Overview'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (12, 'deployList', NULL, 1, 'deployListWorkloads', (select id from auth_resource3 where name='menu|Workloads|Deploy List'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Workloads|Deploy List'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (17, 'Jobs', NULL, 6, 'jobsWorkloads', (select id from auth_resource3 where name='menu|Workloads|Jobs'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Workloads|Jobs'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (18, 'Cron Jobs', NULL, 7, 'cronJobsWorkloads', (select id from auth_resource3 where name='menu|Workloads|Cron Jobs'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Workloads|Cron Jobs'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (19, 'Pods', NULL, 8, 'podsWorkloads', (select id from auth_resource3 where name='menu|Workloads|Pods'), 3) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 3 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Workloads|Pods'); + +--Services +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (20, 'Services', '03.Service', 2, NULL, (select id from auth_resource3 where name='menu|Services'), 3) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 3 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Services'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (21, 'DataCenter Service', NULL, 0, 'topologyServices', (select id from auth_resource3 where name='menu|Services|Topology'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Services|Topology'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (22, 'ServiceOverview', NULL, 1, 'overviewServices', (select id from auth_resource3 where name='menu|Services|Overview'), 0) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 0 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Services|Overview'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (23, 'Cluster Service', NULL, 2, 'detailServices', (select id from auth_resource3 where name='menu|Services|Structure'), 0) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 0 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Services|Structure'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (24, 'List', NULL, 3, 'serviceList', (select id from auth_resource3 where name='menu|Services|List'), 3) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 3 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Services|List'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (25, 'Detail', NULL, 4, 'slasServices', (select id from auth_resource3 where name='menu|Services|Detail'), 0) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 0 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Services|Detail'); + +--Statistics & Analysis +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (40, 'Statistics & Analysis', '06.Statistics&Analysis', 5, NULL, (select id from auth_resource3 where name='menu|Statistics & Analysis'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Statistics & Analysis'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (41, 'Performance Trends', NULL, 0, 'performanceTrendSA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Performance Trends'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Statistics & Analysis|Performance Trends'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (42, 'Alert Analysis', NULL, 2, 'alertAnalysisSA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Alert Analysis'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Statistics & Analysis|Alert Analysis'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (43, 'Alert History', NULL, 3, 'alertHistorySA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Alert History'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Statistics & Analysis|Alert History'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (44, 'Anomaly Score Analysis', NULL, 4, 'anomalyScoreSA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Anomaly Score'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Statistics & Analysis|Anomaly Score'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (45, 'Job History', NULL, 5, 'jobHistorySA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Job History'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Statistics & Analysis|Job History'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (46, 'Sparse Log Analysis', NULL, 6, 'sparseLogSA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Sparse Logs'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Statistics & Analysis|Sparse Logs'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (47, 'Log Viewer', NULL, 7, 'logViewerSA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Log Viewer'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Statistics & Analysis|Log Viewer'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (48, 'eventLog Analysis', NULL, 8, 'eventLogSA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Event Logs'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Statistics & Analysis|Event Logs'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (49, 'Container Life Cycle', NULL, 9, 'containerLifecycleSA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Container Life Cycle'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Statistics & Analysis|Container Life Cycle'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (50, 'Service Trace Analysis', NULL, 10, 'serviceTraceSA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Service Traces'), 0) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 0 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Statistics & Analysis|Service Traces'); + +--Reports +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (60, 'Reports', '07.Report', 6, NULL, (select id from auth_resource3 where name='menu|Reports'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Reports'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (61, 'Documents', NULL, 0, 'documentReport', (select id from auth_resource3 where name='menu|Reports|Documents'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Reports|Documents'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (62, 'Templates', NULL, 1, 'reportSettings', (select id from auth_resource3 where name='menu|Reports|Templates'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Reports|Templates'); + +--Dashboards +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (65, 'Dashboards', '10.Dashboard', 7, NULL, (select id from auth_resource3 where name='menu|Dashboards'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Dashboards'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (66, 'Documents', NULL, 0, 'documentDashboard', (select id from auth_resource3 where name='menu|Dashboards|Documents'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Dashboards|Documents'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (67, 'Templates', NULL, 1, 'templateDashboard', (select id from auth_resource3 where name='menu|Dashboards|Templates'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Dashboards|Templates'); + +--Hosts +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (80, 'Hosts', '12.Hosts', 1, NULL, (select id from auth_resource3 where name='menu|Hosts'), 0) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 0 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Hosts'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (81, 'Topology', null, 0, 'topologyHost', (select id from auth_resource3 where name='menu|Hosts|Topology'), 0) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 0 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Hosts|Topology'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (82, 'Overview', NULL, 1, 'overviewHost', (select id from auth_resource3 where name='menu|Hosts|Overview'), 0) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 0 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Hosts|Overview'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (83, 'List', NULL, 2, 'listHost', (select id from auth_resource3 where name='menu|Hosts|List'), 0) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 0 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Hosts|List'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (84, 'Detail', NULL, 3, 'detailHost', (select id from auth_resource3 where name='menu|Hosts|Detail'), 0) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 0 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Hosts|Detail'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (85, 'Group', NULL, 4, 'groupHost', (select id from auth_resource3 where name='menu|Hosts|Group'), 0) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 0 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Hosts|Group'); + +--Settings +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (90, 'Settings', '08.Setting', 10, NULL, (select id from auth_resource3 where name='menu|Settings'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Settings'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (91, 'User', NULL, 0, 'userGroupSettings', (select id from auth_resource3 where name='menu|Settings|User & Group'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Settings|User & Group'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (92, 'Alerts', NULL, 1, 'alertSettings', (select id from auth_resource3 where name='menu|Settings|Alerts'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Settings|Alerts'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (93, 'Host Alerts', NULL, 2, 'hostAlertSettings', (select id from auth_resource3 where name='menu|Settings|Host Alerts'), 0) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 0 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Settings|Host Alerts'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (94, 'Sparse Logs', NULL, 3, 'sparseLogSettings', (select id from auth_resource3 where name='menu|Settings|Sparse Logs'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Settings|Sparse Logs'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (96, 'Metric Meta', NULL, 5, 'metricMetaSettings', (select id from auth_resource3 where name='menu|Settings|Metric Meta'), 0) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 0 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Settings|Metric Meta'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (97, 'Appearance', NULL, 6, 'appearanceSettings', (select id from auth_resource3 where name='menu|Settings|General'), 0) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 0 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Settings|General'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (98, 'Notification', NULL, 7, 'notificationsSettings', (select id from auth_resource3 where name='menu|Settings|Notification'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Settings|Notification'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (99, 'Agent', NULL, 8, 'agentSettings', (select id from auth_resource3 where name='menu|Settings|Agent'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Settings|Agent'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (100, 'Alias', NULL, 9, 'aliasSettings', (select id from auth_resource3 where name='menu|Settings|Alias'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Settings|Alias'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (101, 'License', NULL, 10, 'validationLicense', (select id from auth_resource3 where name='menu|Settings|License'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Settings|License'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (102, 'agent Installation', NULL, 11, 'agentInstallationSettings', (select id from auth_resource3 where name='menu|Settings|Agent Installation'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Settings|Agent Installation'); + +--Health Check +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (121, 'Health Check', '09.HealthCheck', 9, 'healthCHeck', (select id from auth_resource3 where name='menu|Health Check'), 0) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 0 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Health Check'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (122, 'Check Script', NULL, 0, 'checkScript', (select id from auth_resource3 where name='menu|Health Check|Check Script'), 0) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 0 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Health Check|Check Script'); + +INSERT INTO public.license_policy +(policy_id, policy_desc, term_year, term_month, term_day, license_type, allowable_range, storage_capacity, cluster_count, node_count, pod_count, service_count, core_count, host_ids, user_division, created_date, modified_date) +VALUES('promotion_license', '프로모션 기간에 사용자들에게 발급되는 라이선스', 0, 0, 14, 'trial', '0', 'unlimited', '1', '10', 'unlimited', 'unlimited', 'unlimited', 'unlimited', '1', now(), null); \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/patch/postgres_patch_R30020210730.psql b/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/patch/postgres_patch_R30020210730.psql new file mode 100644 index 0000000..60ad862 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/patch/postgres_patch_R30020210730.psql @@ -0,0 +1,4 @@ +alter table cloud_user alter column log_in_count set default 0; +alter table cloud_user alter column user_lock set default false; + +UPDATE public.metric_meta2 SET meta_name = 'Number of Containers Restart', description = 'Number of Containers Restart (10m)', expr = 'increase(imxc_kubernetes_container_restart_count{{filter}}[10m])', resource_type = 'State', entity_type = 'Workload', groupby_keys = null, in_use = true, anomaly_score = false, message = 'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} RESTARTCOUNT FOR 10MINUTE:{{humanize $value}}.', created_date = '2021-06-23 09:30:38.646312', modified_date = '2021-06-23 09:30:38.646312' WHERE id = 'cotainer_restart_count_by_workload'; \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/postgres_insert_ddl.psql b/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/postgres_insert_ddl.psql new file mode 100644 index 0000000..c8deff4 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/postgres_insert_ddl.psql @@ -0,0 +1,1667 @@ +CREATE TABLE public.tenant_info ( + id character varying(255) NOT NULL, + name character varying(255) NOT NULL, + in_used boolean DEFAULT true, + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone NOT NULL, + delete_scheduler_date timestamp without time zone NULL, + contract_id bigint NOT NULL, + tenant_init_clusters character varying(255) NULL +); +ALTER TABLE ONLY public.tenant_info ADD CONSTRAINT tenant_info_pkey PRIMARY KEY (id); + +CREATE TABLE public.alert_group ( + id bigint NOT NULL, + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone NOT NULL, + cluster_id character varying(255) NOT NULL, + description character varying(255), + name character varying(255) NOT NULL, + type character varying(255) NOT NULL, + namespace character varying(255) DEFAULT 'default'::character varying +); + +ALTER TABLE public.alert_group OWNER TO admin; + +ALTER TABLE ONLY public.alert_group + ADD CONSTRAINT alert_group_pkey PRIMARY KEY (id); + +CREATE UNIQUE INDEX alert_group_name_uindex ON public.alert_group USING btree (name); + +CREATE TABLE public.alert_target ( + id bigint NOT NULL, + created_date timestamp without time zone, + modified_date timestamp without time zone, + cluster_id character varying(255) NOT NULL, + entity_id character varying(255) NOT NULL, + entity_type character varying(255) NOT NULL, + alert_group_id bigint, + namespace character varying(255) +); + +ALTER TABLE public.alert_target OWNER TO admin; + +ALTER TABLE ONLY public.alert_target + ADD CONSTRAINT alert_target_pkey PRIMARY KEY (id); + +ALTER TABLE ONLY public.alert_target + ADD CONSTRAINT fkjrvj775641ky7s0f82kx3sile FOREIGN KEY (alert_group_id) REFERENCES public.alert_group(id); + + + +CREATE TABLE public.report_template ( + id bigint NOT NULL, + created_by character varying(255), + created_date timestamp without time zone NOT NULL, + modified_by character varying(255), + modified_date timestamp without time zone NOT NULL, + cron_exp character varying(255), + enable boolean NOT NULL, + metric_data text, + template_data text, + title character varying(255) +); + +ALTER TABLE public.report_template OWNER TO admin; + +ALTER TABLE ONLY public.report_template + ADD CONSTRAINT report_template_pkey PRIMARY KEY (id); + +CREATE TABLE public.alert_event ( + id bigint NOT NULL, + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone NOT NULL, + alert_name character varying(255) NOT NULL, + cluster_id character varying(255) NOT NULL, + data text NOT NULL, + entity_id character varying(255) NOT NULL, + entity_type character varying(255) NOT NULL, + level character varying(255) NOT NULL, + meta_id character varying(255) NOT NULL, + namespace character varying(255), + starts_at bigint NOT NULL, + threshold character varying(255) NOT NULL, + value character varying(255) NOT NULL, + message character varying(255), + ends_at bigint, + status character varying(20) NOT NULL, + hook_collect_at bigint +); + +ALTER TABLE public.alert_event OWNER TO admin; + +CREATE TABLE public.metric_meta2 ( + id character varying(255) NOT NULL, + meta_name character varying(255) NOT NULL, + description character varying(255) NOT NULL, + expr text NOT NULL, + resource_type character varying(255), + entity_type character varying(255) NOT NULL, + groupby_keys character varying(255), + in_use boolean DEFAULT false NOT NULL, + anomaly_score boolean DEFAULT false NOT NULL, + message character varying(255) NOT NULL, + created_date timestamp without time zone DEFAULT now() NOT NULL, + modified_date timestamp without time zone DEFAULT now() NOT NULL +); + +ALTER TABLE public.metric_meta2 OWNER to admin; + +ALTER TABLE ONLY public.metric_meta2 + ADD CONSTRAINT metric_meta2_pk PRIMARY KEY (id); + +CREATE TABLE public.alert_rule ( + id bigint NOT NULL, + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone NOT NULL, + critical float, + name character varying(255), + warning float, + alert_group_id bigint, + alert_rule_meta_id character varying(255) NOT NULL, + alert_target_id bigint, + duration character varying(255) NOT NULL, + pause boolean DEFAULT false NOT NULL, + warning_sign character varying(255), + critical_sign character varying(255) +); + +ALTER TABLE public.alert_rule OWNER TO admin; + +ALTER TABLE ONLY public.alert_rule + ADD CONSTRAINT alert_rule_pkey PRIMARY KEY (id); + +ALTER TABLE ONLY public.alert_rule + ADD CONSTRAINT fk6b09d1xfyago6wiiqhdiv03s3 FOREIGN KEY (alert_rule_meta_id) REFERENCES public.metric_meta2(id); + +ALTER TABLE ONLY public.alert_rule + ADD CONSTRAINT fk8wkucwkgr48hkfg8cvuptww0f FOREIGN KEY (alert_group_id) REFERENCES public.alert_group(id); + +ALTER TABLE ONLY public.alert_rule + ADD CONSTRAINT fkiqaskea7ts0f872u3nx9ne25u FOREIGN KEY (alert_target_id) REFERENCES public.alert_target(id); + +CREATE TABLE public.alert_rule_meta ( + id bigint NOT NULL, + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone NOT NULL, + description text NOT NULL, + expr character varying(255) NOT NULL, + meta_name character varying(255) NOT NULL, + target character varying(255) NOT NULL, + message character varying(255) +); + +ALTER TABLE public.alert_rule_meta OWNER TO admin; + +ALTER TABLE ONLY public.alert_rule_meta + ADD CONSTRAINT alert_rule_meta_pkey PRIMARY KEY (id); + +CREATE SEQUENCE hibernate_sequence; + +CREATE TABLE public.cloud_group ( + id bigint NOT NULL, + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone NOT NULL, + name character varying(255) NOT NULL, + description character varying(255), + created_by character varying(255), + auth_resource_id bigint +); + +ALTER TABLE public.cloud_group OWNER TO admin; + +ALTER TABLE ONLY public.cloud_group + ADD CONSTRAINT cloud_group_pkey PRIMARY KEY (id); + +CREATE UNIQUE INDEX cloud_group_name_uindex ON public.cloud_group USING btree (name); + +CREATE TABLE public.cloud_user ( + user_id character varying(255) NOT NULL, + email character varying(255), + is_admin boolean NOT NULL, + phone character varying(255), + user_nm character varying(255) NOT NULL, + user_pw character varying(255) NOT NULL, + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone NOT NULL, + dormancy_date timestamp without time zone NULL, + company character varying(255), + department character varying(255), + last_log_in_date timestamp without time zone, + "position" character varying(255), + use_ldap boolean NOT NULL, + auth_method character varying(255) NOT NULL, + log_in_count integer default 0 NOT NULL, + user_lock boolean default false NOT NULL, + user_lock_date timestamp without time zone, + tenant_id character varying(120), + is_tenant_owner boolean default false, + auth_resource_id bigint, + status character varying(255) default 'use' NOT NULL +); + +ALTER TABLE public.cloud_user OWNER TO admin; + +ALTER TABLE ONLY public.cloud_user ADD CONSTRAINT cloud_user_pkey PRIMARY KEY (user_id); + +ALTER TABLE ONLY public.cloud_user + ADD CONSTRAINT cloud_user_tenant_id_fk FOREIGN KEY (tenant_id) REFERENCES public.tenant_info(id); + +CREATE TABLE public.menu_meta ( + id bigint NOT NULL, + description character varying(255), + icon character varying(255), + "position" integer NOT NULL, + url character varying(255), + auth_resource3_id bigint NOT NULL, + scope_level int default 0 +); + +ALTER TABLE public.menu_meta OWNER TO admin; + +ALTER TABLE ONLY public.menu_meta + ADD CONSTRAINT menu_meta_pkey PRIMARY KEY (id); + + + +CREATE TABLE public.metric_base ( + meta_name character varying(255) NOT NULL, + provider character varying(255) NOT NULL, + description character varying(255) NOT NULL, + resource_type character varying(255), + diag_type character varying(255), + entity_type character varying(255) NOT NULL, + metric_type character varying(255) NOT NULL, + keys character varying(255), + created_date timestamp without time zone DEFAULT now() NOT NULL, + modified_date timestamp without time zone DEFAULT now() NOT NULL +); + + +ALTER TABLE public.metric_base OWNER TO admin; + +ALTER TABLE ONLY public.metric_base + ADD CONSTRAINT metric_base_pk PRIMARY KEY (meta_name); + +CREATE TABLE public.report_static ( + id bigint NOT NULL, + created_by character varying(255), + created_date timestamp without time zone NOT NULL, + modified_by character varying(255), + modified_date timestamp without time zone NOT NULL, + cron_exp character varying(255), + metric_data text, + template_data text, + title character varying(255), + type character varying(255), + report_template_id bigint +); + +ALTER TABLE public.report_static OWNER TO admin; + +ALTER TABLE ONLY public.report_static + ADD CONSTRAINT report_static_pkey PRIMARY KEY (id); + +ALTER TABLE ONLY public.report_static + ADD CONSTRAINT fk7o821ym9a57lrcfipf928cfpe FOREIGN KEY (report_template_id) REFERENCES public.report_template(id); + +CREATE TABLE public.user_group ( + user_group_id bigint NOT NULL, + user_id character varying(255) NOT NULL +); + +ALTER TABLE public.user_group OWNER TO admin; + +ALTER TABLE ONLY public.user_group + ADD CONSTRAINT user_group_pkey PRIMARY KEY (user_group_id, user_id); + +ALTER TABLE ONLY public.user_group + ADD CONSTRAINT fkooy6rip2craw6jy3geb5wnix6 FOREIGN KEY (user_id) REFERENCES public.cloud_user(user_id); + +ALTER TABLE ONLY public.user_group + ADD CONSTRAINT fkowo8h9te5nwashab3u30docg FOREIGN KEY (user_group_id) REFERENCES public.cloud_group(id); + +CREATE TABLE public.cloud_user_profile ( + user_id character varying(255) NOT NULL, + created_date timestamp without time zone, + modified_date timestamp without time zone, + profile_image oid +); + +ALTER TABLE public.cloud_user_profile OWNER TO admin; + +ALTER TABLE ONLY public.cloud_user_profile + ADD CONSTRAINT cloud_user_profile_pkey PRIMARY KEY (user_id); + + +CREATE TABLE public.common_setting ( + code_id character varying(255) NOT NULL, + code_value character varying(255), + code_desc character varying(255), + code_auth character varying(255), + code_group character varying(255), + created_date timestamp without time zone, + modified_date timestamp without time zone +); + + +ALTER TABLE public.common_setting OWNER TO admin; + +ALTER TABLE ONLY public.common_setting + ADD CONSTRAINT common_setting_pkey PRIMARY KEY (code_id); + + + +CREATE TABLE public.dashboard_thumbnail ( + id bigint NOT NULL, + thumbnail_image oid, + created_date timestamp without time zone, + modified_date timestamp without time zone +); + + +ALTER TABLE public.dashboard_thumbnail OWNER TO admin; + +ALTER TABLE ONLY public.dashboard_thumbnail + ADD CONSTRAINT dashboard_thumbnail_pkey PRIMARY KEY (id); + + + +CREATE TABLE public.notification_channel ( + id bigint NOT NULL, + created_by character varying(255), + created_date timestamp without time zone, + modified_by character varying(255), + modified_date timestamp without time zone, + cluster_id character varying(255), + config text, + name character varying(255), + type character varying(255) +); + +ALTER TABLE public.notification_channel OWNER TO admin; + +ALTER TABLE ONLY public.notification_channel + ADD CONSTRAINT notification_channel_pkey PRIMARY KEY (id); + + +CREATE TABLE public.notification_registry ( + id bigint NOT NULL, + alert_rule_id bigint NOT NULL, + notification_channel_id bigint +); + +ALTER TABLE public.notification_registry OWNER TO admin; + +ALTER TABLE ONLY public.notification_registry + ADD CONSTRAINT notification_registry_pkey PRIMARY KEY (id); + +ALTER TABLE ONLY public.notification_registry + ADD CONSTRAINT fk28xo8snm6fd19i3uap0oba0d1 FOREIGN KEY (notification_channel_id) REFERENCES public.notification_channel(id); + + +CREATE TABLE public.license_check_2 ( + id bigint NOT NULL, + site_name character varying(255) NOT NULL, + license_type integer NOT NULL, + expire_date character varying(255) NOT NULL, + imxc_host_id integer NOT NULL, + real_host_id integer NOT NULL, + imxc_cpu_count integer NOT NULL, + real_cpu_count integer NOT NULL, + target_clusters_count integer NOT NULL, + real_clusters_count integer NOT NULL, + target_nodes_count integer NOT NULL, + real_nodes_count integer NOT NULL, + target_pods_count integer NOT NULL, + real_pods_count integer NOT NULL, + target_svcs_count integer NOT NULL, + real_svcs_count integer NOT NULL, + target_core_count integer NOT NULL, + real_core_count integer NOT NULL, + features_bitmap integer NOT NULL, + allowable_range integer NOT NULL, + check_time timestamp without time zone NOT NULL, + check_result integer NOT NULL +); + +ALTER TABLE public.license_check_2 + ADD CONSTRAINT license_check_pkey PRIMARY KEY (id); + +CREATE INDEX license_check_check_time_idx ON license_check_2(check_time); + + +CREATE TABLE public.license_violation ( + id bigint not null, + check_id bigint not null, + check_time timestamp without time zone not null, + violation_item varchar not null, + allow_time timestamp without time zone not null, + resolved_id bigint, + resolved_time timestamp without time zone +); + +ALTER TABLE public.license_violation + ADD CONSTRAINT license_violation_pkey PRIMARY KEY (id); + +ALTER TABLE public.license_violation + ADD CONSTRAINT license_violation_check_id_fk FOREIGN KEY (check_id) REFERENCES public.license_check_2(id); + +ALTER TABLE public.license_violation + ADD CONSTRAINT license_violation_resolved_id_fk FOREIGN KEY (resolved_id) REFERENCES public.license_check_2(id); + +CREATE INDEX license_violation_check_time_idx ON license_violation(check_time); +CREATE INDEX license_violation_resolved_time_idx ON license_violation(resolved_time); + + +CREATE TABLE public.license_key ( + id bigint NOT NULL, + license_key text NOT NULL, + set_time timestamp NOT NULL, + in_used bool NULL, + tenant_id varchar NULL, + cluster_id bigint NULL, + CONSTRAINT license_key_pkey PRIMARY KEY (id) +); + +ALTER TABLE public.license_key ADD CONSTRAINT license_key_tenant_id_fk FOREIGN KEY (tenant_id) REFERENCES public.tenant_info(id); + +CREATE TABLE public.license_check2 ( + id bigint NOT NULL, + site_name character varying(255) NOT NULL, + license_type integer NOT NULL, + expire_date character varying(255) NOT NULL, + imxc_host_ids character varying(255), + real_host_ids character varying(255), + target_nodes_count integer NOT NULL, + real_nodes_count integer NOT NULL, + target_pods_count integer NOT NULL, + real_pods_count integer NOT NULL, + target_svcs_count integer NOT NULL, + real_svcs_count integer NOT NULL, + target_core_count integer NOT NULL, + real_core_count integer NOT NULL, + allowable_range integer NOT NULL, + license_cluster_id character varying(255), + check_time timestamp without time zone NOT NULL, + check_result integer NOT null +); + +ALTER TABLE public.license_check2 + ADD CONSTRAINT license_check2_pkey PRIMARY KEY (id); + +CREATE INDEX license_check2_time_idx ON license_check2(check_time); + +CREATE TABLE public.license_violation2 ( + id bigint not null, + check_id bigint not null, + check_time timestamp without time zone not null, + violation_item varchar not null, + allow_time timestamp without time zone not null, + resolved_id bigint, + resolved_time timestamp without time zone, + cluster_id varchar not null +); + +ALTER TABLE public.license_violation2 + ADD CONSTRAINT license_violation2_pkey PRIMARY KEY (id); + +ALTER TABLE public.license_violation2 + ADD CONSTRAINT license_violation2_check_id_fk FOREIGN KEY (check_id) REFERENCES public.license_check2(id); + +ALTER TABLE public.license_violation2 + ADD CONSTRAINT license_violation2_resolved_id_fk FOREIGN KEY (resolved_id) REFERENCES public.license_check2(id); + +CREATE INDEX license_violation2_check_time_idx ON license_violation2(check_time); +CREATE INDEX license_violation2_resolved_time_idx ON license_violation2(resolved_time); + +CREATE TABLE public.license_key2 ( + id bigint not null, + license_key text not null, + set_time timestamp without time zone not null, + cluster_id varchar, + license_used bool not null +); + +ALTER TABLE public.license_key2 + ADD CONSTRAINT license_key2_pkey PRIMARY KEY (id); + +create table public.license_policy ( + policy_id character varying(255) NOT NULL, + policy_desc character varying(255), + term_year integer NOT NULL, + term_month integer NOT NULL, + term_day integer NOT NULL, + license_type character varying(255) NOT NULL, + allowable_range character varying(255) NOT NULL, + storage_capacity character varying(255) NOT NULL, + cluster_count character varying(255) NOT NULL, + node_count character varying(255) NOT NULL, + pod_count character varying(255) NOT NULL, + service_count character varying(255) NOT NULL, + core_count character varying(255) NOT NULL, + host_ids character varying(255) NOT NULL, + user_division character varying(255) NOT NULL, + created_date timestamp without time zone, + modified_date timestamp without time zone +); + +ALTER TABLE ONLY public.license_policy + ADD CONSTRAINT license_policy_pkey PRIMARY KEY (policy_id); + + +CREATE TABLE public.auth_resource2 ( + id bigint NOT NULL default nextval('hibernate_sequence'), + access_type integer NOT NULL, + name character varying(255) NOT NULL, + parent_id bigint, + type character varying(255) NOT NULL +); + +ALTER TABLE public.auth_resource2 OWNER TO admin; + +ALTER TABLE ONLY public.auth_resource2 + ADD CONSTRAINT auth_resource2_pkey PRIMARY KEY (id); + +ALTER TABLE ONLY public.auth_resource2 + ADD CONSTRAINT resource_name_uniq UNIQUE (name, type, parent_id); + +--ALTER TABLE ONLY public.auth_resource2 +-- ADD CONSTRAINT auth_resource2_auth_resource_id_fk FOREIGN KEY (parent_id) REFERENCES public.auth_resource2(id); +-- +--ALTER TABLE ONLY public.menu_meta +-- ADD CONSTRAINT fk2tqq4ybf6w130fsaejhrsnw5s FOREIGN KEY (auth_resource_id) REFERENCES public.auth_resource2(id); + +CREATE TABLE public.user_permission2 ( + id bigint NOT NULL, + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone NOT NULL, + all_child boolean NOT NULL, + permission integer NOT NULL, + auth_resource_id bigint, + user_id character varying(255) +); + +ALTER TABLE public.user_permission2 OWNER TO admin; + +ALTER TABLE ONLY public.user_permission2 + ADD CONSTRAINT user_permission2_pkey PRIMARY KEY (id); + +-- ALTER TABLE ONLY public.user_permission2 +-- ADD CONSTRAINT user_permission2_auth_resource2_fk FOREIGN KEY (auth_resource_id) REFERENCES public.auth_resource2(id); + +ALTER TABLE ONLY public.user_permission2 + ADD CONSTRAINT user_permission2_user_id_fk FOREIGN KEY (user_id) REFERENCES public.cloud_user(user_id); + + +CREATE TABLE public.group_permission2 ( + id bigint NOT NULL, + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone NOT NULL, + all_child boolean NOT NULL, + permission integer NOT NULL, + auth_resource_id bigint, + user_group_id bigint +); + +ALTER TABLE public.group_permission2 OWNER TO admin; + +ALTER TABLE ONLY public.group_permission2 + ADD CONSTRAINT group_permission2_pkey PRIMARY KEY (id); + +ALTER TABLE ONLY public.group_permission2 + ADD CONSTRAINT group_permission2_user_group_id_fk FOREIGN KEY (user_group_id) REFERENCES public.cloud_group(id); + +-- ALTER TABLE ONLY public.group_permission2 +-- ADD CONSTRAINT group_permission2_auth_resource2_fk FOREIGN KEY (auth_resource_id) REFERENCES public.auth_resource2(id); + +CREATE TABLE public.resource_group2 ( + id int8 NOT NULL, + created_date timestamp NOT NULL, + modified_date timestamp NOT NULL, + "name" varchar(255) NOT NULL, + description varchar(255) NULL, + CONSTRAINT resource_group2_pkey PRIMARY KEY (id) +-- CONSTRAINT resource_group2_fk1 FOREIGN KEY (id) REFERENCES auth_resource2(id) +); + +ALTER TABLE public.resource_group2 OWNER TO "admin"; +GRANT ALL ON TABLE public.resource_group2 TO "admin"; + +CREATE TABLE public.resource_member2 ( + resource_group_id int8 NOT NULL, + auth_resource_id int8 NOT NULL, + CONSTRAINT resource_member2_pkey PRIMARY KEY (resource_group_id, auth_resource_id), + CONSTRAINT resource_member2_fkey1 FOREIGN KEY (resource_group_id) REFERENCES resource_group2(id) +-- CONSTRAINT resource_member2_fkey2 FOREIGN KEY (auth_resource_id) REFERENCES auth_resource2(id) +); + +ALTER TABLE public.resource_member2 OWNER TO "admin"; +GRANT ALL ON TABLE public.resource_member2 TO "admin"; + +CREATE TABLE public.dashboard2 ( + id bigint NOT NULL, + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone NOT NULL, + layout text NOT NULL, + title character varying(255) NOT NULL, + auth_resource_id bigint NOT NULL, + created_by character varying(255) NOT NULL, + modified_by character varying(255) NOT NULL, + description character varying(255), + share boolean DEFAULT false +); + +ALTER TABLE public.dashboard2 OWNER TO admin; + +ALTER TABLE ONLY public.dashboard2 + ADD CONSTRAINT dashboard2_pkey PRIMARY KEY (id); + +-- ALTER TABLE ONLY public.dashboard2 +-- ADD CONSTRAINT dashboard_resource_fk FOREIGN KEY (auth_resource_id) REFERENCES public.auth_resource2(id); + +CREATE TABLE public.log_management ( + cluster_id varchar NOT NULL, + node_id varchar NOT NULL, + log_rotate_dir varchar, + log_rotate_count integer, + log_rotate_size integer, + log_rotate_management boolean NOT NULL, + back_up_dir varchar, + back_up_period integer, + back_up_dir_size integer, + back_up_management boolean NOT NULL, + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone +); + +alter table public.log_management add constraint log_management_pkey primary key (cluster_id, node_id); + +CREATE TABLE public.sampling_setting ( + service_id bigint NOT NULL, + service_name character varying(255), + sampling_type character varying(255), + sampling_param character varying(255), + cluster varchar, + namespace varchar, + cluster_id bigint +); +ALTER TABLE public.sampling_setting OWNER TO admin; + +ALTER TABLE ONLY public.sampling_setting + ADD CONSTRAINT sampling_setting_pkey PRIMARY KEY (service_id); + +CREATE TABLE public.operation_setting ( + id bigint NOT NULL, + service_id bigint NOT NULL, + sampling_type character varying(255), + sampling_param character varying(255), + operation_name character varying(255) +); + +ALTER TABLE public.operation_setting OWNER TO admin; + +ALTER TABLE ONLY public.operation_setting + ADD CONSTRAINT operation_setting_pkey PRIMARY KEY (id); + +ALTER TABLE ONLY public.operation_setting + ADD CONSTRAINT operation_setting_fkey FOREIGN KEY (service_id) REFERENCES public.sampling_setting(service_id); + +CREATE TABLE public.cluster_setting ( + cluster_id bigint NOT NULL, + param_type character varying(255), + param_value character varying(255), + cluster_name varchar, + name character varying(255) +); + +ALTER TABLE ONLY public.cluster_setting + ADD CONSTRAINT cluster_setting_pkey PRIMARY KEY (cluster_id); + +CREATE TABLE public.alias_code ( + user_id varchar NOT NULL, + id varchar NOT NULL, + name varchar, + type varchar, + use_yn varchar, + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone +); + +ALTER TABLE ONLY public.alias_code add constraint alias_code_pkey primary key (user_id, id); + +CREATE TABLE public.sparse_log_info ( + id varchar NOT NULL, + cluster_id varchar, + namespace varchar, + target_type varchar, + target_id varchar, + log_path varchar, + created_date timestamp, + modified_date timestamp, + threshold float4, + PRIMARY KEY ("id") +); + +CREATE TABLE public.view_code ( + user_id varchar NOT NULL, + view_id varchar NOT NULL, + json_data text, + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone +); + +ALTER TABLE ONLY public.view_code add constraint view_code_pkey primary key (user_id, view_id); + +CREATE TABLE public.entity_black_list ( + entity_type varchar not null, + entity_name varchar not null, + cluster_id varchar not null, + namespace varchar, + black_list bool not null, + workload varchar(255) not null +); + +ALTER TABLE public.entity_black_list + ADD CONSTRAINT entity_black_list_pkey PRIMARY KEY (entity_type, entity_name, cluster_id, namespace); + +CREATE TABLE public.script_setting ( + id bigint NOT NULL, + name character varying(255), + agent_list character varying(255), + file_path character varying(255), + args character varying(255), + valid_cmd character varying(255), + valid_val character varying(255), + cron_exp character varying(255), + create_user character varying(255), + mtime BIGINT, + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone +); + +ALTER TABLE ONLY public.script_setting + ADD CONSTRAINT script_setting_pkey PRIMARY KEY (id); + +CREATE TABLE public.agent_install_file_info ( + id bigint NOT NULL, + name character varying(255) NOT NULL, + type character varying(255) NOT NULL, + description text, + version character varying(255), + yaml text, + use_yn boolean NOT NULL, + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone NOT NULL +); + +ALTER TABLE ONLY public.agent_install_file_info ADD CONSTRAINT agent_install_file_info_pkey PRIMARY KEY (id); + +create table auth_resource3( + id bigint NOT NULL default nextval('hibernate_sequence'), + name character varying(255) NOT NULL, + is_deleted boolean not null, + tenant_id character varying(255) +); + +ALTER TABLE public.auth_resource3 owner to admin; + +ALTER TABLE ONLY public.auth_resource3 + ADD CONSTRAINT auth_resource3_pkey PRIMARY KEY (id); + +ALTER TABLE ONLY public.auth_resource3 + ADD CONSTRAINT auth_resource3_name_uniq UNIQUE (name); + +create table resource_member3( + resource_group_id bigint not null, + auth_resource3_id bigint not null +); + +ALTER TABLE resource_member3 owner to admin; + +ALTER TABLE ONLY public.resource_member3 + ADD CONSTRAINT resource_member3_pkey primary key (resource_group_id, auth_resource3_id); + +ALTER TABLE ONLY public.auth_resource3 ADD CONSTRAINT auth_resource3_tenant_id_fk FOREIGN KEY (tenant_id) REFERENCES public.tenant_info(id); + +ALTER TABLE public.menu_meta ADD CONSTRAINT menu_meta_auth_resource3_fk FOREIGN KEY (auth_resource3_id) REFERENCES auth_resource3(id); +ALTER TABLE public.user_permission2 ADD CONSTRAINT user_permission2_auth_resource3_fk FOREIGN KEY (auth_resource_id) REFERENCES auth_resource3(id); +ALTER TABLE public.resource_group2 ADD CONSTRAINT resource_group2_auth_resource3_fk1 FOREIGN KEY (id) REFERENCES auth_resource3(id); +ALTER TABLE public.resource_member3 ADD CONSTRAINT resource_member3_auth_resource3_fkey1 FOREIGN KEY (resource_group_id) REFERENCES public.resource_group2(id); +ALTER TABLE public.resource_member3 ADD CONSTRAINT resource_member3_auth_resource3_fkey2 FOREIGN KEY (auth_resource3_id) REFERENCES auth_resource3(id); +ALTER TABLE public.group_permission2 ADD CONSTRAINT group_permission2_auth_resource3_fk FOREIGN KEY (auth_resource_id) REFERENCES auth_resource3(id); +ALTER TABLE public.dashboard2 ADD CONSTRAINT dashboard2_auth_resource3_fk FOREIGN KEY (auth_resource_id) REFERENCES auth_resource3(id); +ALTER TABLE public.cloud_user ADD CONSTRAINT cloud_user_auth_resource3_fk FOREIGN KEY (auth_resource_id) REFERENCES auth_resource3(id); +ALTER TABLE public.cloud_group ADD CONSTRAINT cloud_group_auth_resource3_fk FOREIGN KEY (auth_resource_id) REFERENCES auth_resource3(id); + +CREATE DATABASE CONFIGS; +CREATE DATABASE keycloak; + +-- JSPD 옵션 값 테이블 +CREATE TABLE public.jspd_prop ( + code_id character varying(255) NOT NULL, + default_value character varying(255) NOT NULL, + description text, + code_type character varying(255), + input_type character varying(255), + input_props character varying(255), + use_yn boolean NOT NULL, + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone NOT NULL +); + +ALTER TABLE ONLY public.jspd_prop ADD CONSTRAINT jspd_prop_pkey PRIMARY KEY (code_id); + +-- JSPD 옵션 값 설정 LIST table +CREATE TABLE public.jspd_config ( + cluster_id character varying(255) NOT NULL, + namespace character varying(255) NOT NULL, + service character varying(255) NOT NULL, + code_id character varying(255), + code_value character varying(255), + code_type character varying(255), + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone NOT NULL +); +-- ALTER TABLE public.jspd_prop +-- ADD input_type character varying(255); + +-- ALTER TABLE public.jspd_prop +-- ADD input_props character varying(255); + + +ALTER TABLE public.jspd_config + ADD CONSTRAINT jspd_config_pkey PRIMARY KEY (cluster_id, namespace, service, code_id); + +ALTER TABLE ONLY public.jspd_config + ADD CONSTRAINT jspd_config_code_id_fk FOREIGN KEY (code_id) REFERENCES public.jspd_prop(code_id); + +-- noti server table +CREATE TABLE public.alert_group_v2 ( + id bigint NOT NULL, + created_date timestamp NOT NULL, + modified_date timestamp NOT NULL, + cluster_id varchar(255) NOT NULL, + description varchar(255), + name varchar(255) NOT NULL, + type varchar(255) NOT NULL, + namespace varchar(255) default 'default'::character varying, + destination varchar(255) NOT NULL, + created_by varchar(255) NOT NULL +); + +CREATE TABLE public.alert_target_v2 ( + id bigint NOT NULL, + created_date timestamp, + modified_date timestamp, + cluster_id varchar(255) NOT NULL, + entity_id varchar(255) NOT NULL, + entity_type varchar(255) NOT NULL, + alert_group_id bigint, + namespace varchar(255) +); + +CREATE TABLE public.alert_rule_v2 ( + id bigint NOT NULL, + created_date timestamp NOT NULL, + modified_date timestamp NOT NULL, + critical double precision, + name varchar(255), + warning double precision, + alert_group_id bigint, + alert_rule_meta_id varchar(255) NOT NULL, + alert_target_id bigint, + duration varchar(255) NOT NULL, + pause boolean DEFAULT false NOT NULL, + critical_sign varchar(255), + warning_sign varchar(255), + destination varchar(255), + created_by varchar(255) +); + +ALTER TABLE public.alert_group_v2 ADD CONSTRAINT alert_group_v2_id_pk PRIMARY KEY (id); +ALTER TABLE public.alert_target_v2 ADD CONSTRAINT alert_target_v2_id_pk PRIMARY KEY (id); +ALTER TABLE public.alert_rule_v2 ADD CONSTRAINT alert_rule_v2_id_pk PRIMARY KEY (id); + +ALTER TABLE public.alert_target_v2 ADD CONSTRAINT alert_target_v2_alert_group_id_fk FOREIGN KEY (alert_group_id) REFERENCES public.alert_group_v2(id); +ALTER TABLE public.alert_rule_v2 ADD CONSTRAINT alert_rule_v2_alert_group_id_fk FOREIGN KEY (alert_group_id) REFERENCES public.alert_group_v2(id); +ALTER TABLE public.alert_rule_v2 ADD CONSTRAINT alert_rule_v2_alert_rule_meta_id_fk FOREIGN KEY (alert_rule_meta_id) REFERENCES public.metric_meta2(id); +ALTER TABLE public.alert_rule_v2 ADD CONSTRAINT alert_rule_v2_alert_target_id_fk FOREIGN KEY (alert_target_id) REFERENCES public.alert_target_v2(id); +ALTER TABLE ONLY public.notification_registry + ADD CONSTRAINT fk4lljw4fnija73tm3lthjg90rx FOREIGN KEY (alert_rule_id) REFERENCES public.alert_rule_v2(id); + + +-- cortex alert +create table public.alert_rule_config_info ( + config_id varchar not null, + config_data text not null, + in_use boolean default true not null, + created_date timestamp, + modified_date timestamp +); + +create table alert_config_info +( + config_id varchar not null, + config_data text not null, + config_default text not null, + in_use boolean default true not null, + created_date timestamp, + modified_date timestamp +); + +create table alert_config +( + id varchar not null, + cluster_id varchar, + resolve_timeout varchar, + receiver varchar, + group_by varchar, + group_wait varchar, + group_interval varchar, + repeat_interval varchar, + routes_level varchar, + routes_continue varchar, + receiver_name varchar, + webhook_url varchar, + send_resolved varchar, + inner_route boolean, + inner_webhook boolean, + in_use boolean default true not null, + created_date timestamp, + modified_date timestamp +); + +ALTER TABLE public.alert_rule_config_info ADD CONSTRAINT alert_rule_config_info_config_id_pk PRIMARY KEY (config_id); +ALTER TABLE public.alert_config_info ADD CONSTRAINT alert_config_info_config_id_pk PRIMARY KEY (config_id); +ALTER TABLE public.alert_config ADD CONSTRAINT alert_config_id_pk PRIMARY KEY (id); + +CREATE TABLE public.cloud_user_setting ( + user_id character varying(255) NOT NULL, + lang character varying(20) DEFAULT 'en', + theme character varying(20) DEFAULT 'dark', + access_token integer DEFAULT 30, + refresh_token integer DEFAULT 10080, + error_msg boolean DEFAULT false, + alert_sound boolean DEFAULT false, + session_persistence boolean DEFAULT true, + gpu_acc_topology boolean DEFAULT true, + created_date timestamp without time zone, + modified_date timestamp without time zone +); + +ALTER TABLE public.cloud_user_setting OWNER TO admin; + +ALTER TABLE ONLY public.cloud_user_setting ADD CONSTRAINT cloud_user_setting_pkey PRIMARY KEY (user_id); + +-------- 2022-05-31 KubeInfo flatting table -------- +CREATE TABLE cmoa_configmap_base( + kube_flatting_time bigint, + cluster_id varchar(255), + kind varchar(30), + metadata_uid varchar(40), + row_index int, + metadata_name text, + kind_status varchar(50), + metadata_resourceVersion text, + metadata_annotations text, + metadata_creationTimestamp varchar(25), + metadata_labels text, + metadata_namespace text, + binaryData text, + data text, + immutable text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, cluster_id, kind, metadata_uid, row_index) +); +----------------------- +CREATE TABLE cmoa_cronjob_active( + kube_flatting_time bigint, + kind varchar(30), + metadata_uid varchar(40), + row_index int, + metadata_name text, + status_active_apiVersion text, + status_active_fieldPath text, + status_active_kind text, + status_active_name text, + status_active_namespace text, + status_active_resourceVersion text, + status_active_uid text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_cronjob_base( + kube_flatting_time bigint, + cluster_id varchar(255), + kind varchar(30), + metadata_uid varchar(40), + row_index int, + kind_status varchar(50), + metadata_annotations text, + metadata_creationTimestamp varchar(25), + metadata_labels text, + metadata_name text, + metadata_namespace text, + metadata_resourceVersion text, + spec_failedJobsHistoryLimit text, + spec_schedule text, + spec_successfulJobsHistoryLimit text, + spec_suspend text, + status_lastScheduleTime text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, cluster_id, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_daemonset_base( + kube_flatting_time bigint, + cluster_id varchar(255), + kind varchar(30), + metadata_uid varchar(40), + row_index int, + kind_status varchar(50), + metadata_annotations text, + metadata_creationTimestamp varchar(25), + metadata_labels text, + metadata_name text, + metadata_namespace text, + metadata_resourceVersion text, + status_currentNumberScheduled text, + status_desiredNumberScheduled text, + status_numberAvailable text, + status_numberMisscheduled text, + status_numberReady text, + status_numberUnavailable text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, cluster_id, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_deployment_base( + kube_flatting_time bigint, + cluster_id varchar(255), + kind varchar(30), + metadata_uid varchar(40), + row_index int, + kind_status varchar(50), + metadata_annotations text, + metadata_creationTimestamp varchar(25), + metadata_labels text, + metadata_name text, + metadata_namespace text, + metadata_resourceVersion text, + spec_replicas text, + spec_template_spec_containers_image text, + status_availableReplicas text, + status_readyReplicas text, + status_replicas text, + status_unavailableReplicas text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, cluster_id, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_endpoint_addresses( + kube_flatting_time bigint, + kind varchar(30), + metadata_uid varchar(40), + row_index int, + metadata_name text, + subset_addresses_ip text, + subset_addresses_hostname text, + subset_addresses_nodeName text, + subset_addresses_targetRef text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_endpoint_base( + kube_flatting_time bigint, + cluster_id varchar(255), + kind varchar(30), + metadata_uid varchar(40), + row_index int, + kind_status varchar(50), + metadata_name text, + metadata_resourceVersion text, + metadata_annotations text, + metadata_creationTimestamp varchar(25), + metadata_labels text, + metadata_namespace text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, cluster_id, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_endpoint_notreadyaddresses( + kube_flatting_time bigint, + kind varchar(30), + metadata_uid varchar(40), + row_index int, + metadata_name text, + subset_notreadyaddresses_ip text, + subset_notreadyaddresses_hostname text, + subset_notreadyaddresses_nodename text, + subset_notreadyaddresses_targetref text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_endpoint_ports( + kube_flatting_time bigint, + kind varchar(30), + metadata_uid varchar(40), + row_index int, + metadata_name text, + subset_ports_port text, + subset_ports_appprotocol text, + subset_ports_name text, + subset_ports_protocol text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_event_base ( + kube_flatting_time bigint, + cluster_id varchar(255), + kind varchar(30), + metadata_uid varchar(40), + row_index int, + kind_status varchar(50), + action text, + count text, + eventtime text, + firsttimestamp text, + involvedobject_apiversion text, + involvedobject_fieldpath text, + involvedobject_kind text, + involvedobject_name text, + involvedobject_namespace text, + involvedobject_resourceversion text, + involvedobject_uid text, + lasttimestamp text, + message text, + metadata_annotations text, + metadata_creationtimestamp varchar(25), + metadata_labels text, + metadata_name text, + metadata_namespace text, + metadata_resourceversion text, + reason text, + related_apiversion text, + related_fieldpath text, + related_kind text, + related_name text, + related_namespace text, + related_resourceversion text, + related_uid text, + series_count text, + series_lastobservedtime text, + series_state text, + source_component text, + source_host text, + type text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, cluster_id, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_job_base ( + kube_flatting_time bigint, + cluster_id varchar(255), + kind varchar(30), + metadata_uid varchar(40), + row_index int, + kind_status varchar(50), + metadata_annotations text, + metadata_creationtimestamp varchar(25), + metadata_labels text, + metadata_name text, + metadata_namespace text, + metadata_ownerreferences text, + metadata_ownerReferences_kind varchar(30), + metadata_ownerReferences_uid varchar(40), + metadata_resourceversion text, + spec_backofflimit text, + spec_completions text, + spec_parallelism text, + status_active text, + status_completiontime text, + status_failed text, + status_starttime text, + status_succeeded text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, cluster_id, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_job_template ( + kube_flatting_time bigint, + kind varchar(30), + metadata_uid varchar(40), + row_index int, + metadata_name text, + spec_template_spec_containers_args text, + spec_template_spec_containers_command text, + spec_template_spec_containers_image text, + spec_template_spec_containers_name text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_namespace_base ( + kube_flatting_time bigint, + cluster_id varchar(255), + kind varchar(30), + metadata_uid varchar(40), + row_index int, + kind_status varchar(50), + metadata_name text, + metadata_resourceversion text, + metadata_annotations text, + metadata_creationtimestamp varchar(25), + metadata_labels text, + metadata_namespace text, + spec_finalizers text, + status_phase text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, cluster_id, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_node_base ( + kube_flatting_time bigint, + cluster_id varchar(255), + kind varchar(30), + metadata_uid varchar(40), + row_index int, + kind_status varchar(50), + metadata_name text, + metadata_selflink text, + metadata_resourceversion text, + metadata_creationtimestamp varchar(25), + metadata_labels text, + metadata_annotations text, + spec_podcidr text, + spec_taints text, + status_capacity_cpu text, + status_capacity_ephemeral_storage text, + status_capacity_hugepages_1gi text, + status_capacity_hugepages_2mi text, + status_capacity_memory text, + status_capacity_pods text, + status_allocatable_cpu text, + status_allocatable_ephemeral_storage text, + status_allocatable_hugepages_1gi text, + status_allocatable_hugepages_2mi text, + status_allocatable_memory text, + status_allocatable_pods text, + status_addresses text, + status_daemonendpoints_kubeletendpoint_port text, + status_nodeinfo_machineid text, + status_nodeinfo_systemuuid text, + status_nodeinfo_bootid text, + status_nodeinfo_kernelversion text, + status_nodeinfo_osimage text, + status_nodeinfo_containerruntimeversion text, + status_nodeinfo_kubeletversion text, + status_nodeinfo_kubeproxyversion text, + status_nodeinfo_operatingsystem text, + status_nodeinfo_architecture text, + status_volumesinuse text, + status_volumesattached text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, cluster_id, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_node_condition ( + kube_flatting_time bigint, + kind varchar(30), + metadata_uid varchar(40), + row_index int, + metadata_name text, + status_conditions_type text, + status_conditions_status text, + status_conditions_lastheartbeattime text, + status_conditions_lasttransitiontime text, + status_conditions_reason text, + status_conditions_message text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_node_image ( + kube_flatting_time bigint, + kind varchar(30), + metadata_uid varchar(40), + row_index int, + metadata_name text, + status_images_names text, + status_images_sizebytes text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_persistentvolume_base ( + kube_flatting_time bigint, + cluster_id varchar(255), + kind varchar(30), + metadata_uid varchar(40), + row_index int, + kind_status varchar(50), + metadata_annotations text, + metadata_creationtimestamp varchar(25), + metadata_labels text, + metadata_name text, + metadata_namespace text, + metadata_resourceversion text, + spec_accessmodes text, + spec_awselasticblockstore text, + spec_azuredisk text, + spec_azurefile text, + spec_capacity text, + spec_claimref_apiversion text, + spec_claimref_fieldpath text, + spec_claimref_kind text, + spec_claimref_name text, + spec_claimref_namespace text, + spec_claimref_resourceversion text, + spec_claimref_uid text, + spec_csi text, + spec_fc text, + spec_flexvolume text, + spec_flocker text, + spec_gcepersistentdisk text, + spec_glusterfs text, + spec_hostpath text, + spec_iscsi text, + spec_local text, + spec_nfs text, + spec_persistentvolumereclaimpolicy text, + spec_photonpersistentdisk text, + spec_portworxvolume text, + spec_quobyte text, + spec_rbd text, + spec_scaleio text, + spec_storageclassname text, + spec_storageos text, + spec_volumemode text, + spec_vspherevolume text, + status_message text, + status_phase text, + status_reason text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, cluster_id, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_persistentvolumeclaim_base ( + kube_flatting_time bigint, + cluster_id varchar(255), + kind varchar(30), + metadata_uid varchar(40), + row_index int, + kind_status varchar(50), + metadata_annotations text, + metadata_creationtimestamp varchar(25), + metadata_labels text, + metadata_name text, + metadata_namespace text, + metadata_resourceversion text, + spec_accessmodes text, + spec_storageclassname text, + spec_volumemode text, + spec_volumename text, + status_accessmodes text, + status_capacity text, + status_phase text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, cluster_id, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_pod_base ( + kube_flatting_time bigint, + cluster_id varchar(255), + kind varchar(30), + kind_status varchar(50), + metadata_uid varchar(40), + row_index int, + metadata_name text, + metadata_selflink text, + metadata_resourceversion text, + metadata_creationtimestamp varchar(25), + metadata_generatename text, + metadata_namespace text, + metadata_deletiontimestamp text, + metadata_deletiongraceperiodseconds text, + metadata_labels text, + metadata_ownerreferences text, + metadata_ownerReferences_kind varchar(30), + metadata_ownerReferences_uid varchar(40), + metadata_annotations text, + spec_hostnetwork text, + spec_priorityclassname text, + spec_enableservicelinks text, + spec_priority text, + spec_schedulername text, + spec_hostpid text, + spec_nodename text, + spec_serviceaccount text, + spec_serviceaccountname text, + spec_dnspolicy text, + spec_terminationgraceperiodseconds text, + spec_restartpolicy text, + spec_securitycontext text, + spec_nodeselector_kubernetes_io_hostname text, + spec_tolerations text, + status_phase text, + status_hostip text, + status_podip text, + status_starttime text, + status_qosclass text, + status_reason text, + status_message text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, cluster_id, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_pod_conditions ( + kube_flatting_time bigint, + kind varchar(30), + metadata_uid varchar(40), + row_index int, + metadata_name text, + status_conditions_type text, + status_conditions_status text, + status_conditions_lasttransitiontime text, + status_conditions_reason text, + status_conditions_message text, + status_conditions_lastprobetime text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_pod_containerstatuses ( + kube_flatting_time bigint, + kind varchar(30), + metadata_uid varchar(40), + row_index int, + metadata_name text, + status_containerstatuses_name text, + status_containerstatuses_ready text, + status_containerstatuses_restartcount text, + status_containerstatuses_image text, + status_containerstatuses_imageid text, + status_containerstatuses_containerid text, + status_containerstatuses_state_terminated_exitcode text, + status_containerstatuses_state_terminated_reason text, + status_containerstatuses_state_terminated_startedat text, + status_containerstatuses_state_terminated_finishedat text, + status_containerstatuses_state_terminated_containerid text, + status_containerstatuses_state_waiting_reason text, + status_containerstatuses_state_waiting_message text, + status_containerstatuses_state_running_startedat text, + status_containerstatuses_laststate_terminated_exitcode text, + status_containerstatuses_laststate_terminated_reason text, + status_containerstatuses_laststate_terminated_startedat text, + status_containerstatuses_laststate_terminated_finishedat text, + status_containerstatuses_laststate_terminated_containerid text, + status_containerstatuses_laststate_waiting_reason text, + status_containerstatuses_laststate_waiting_message text, + status_containerstatuses_laststate_running_startedat text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_pod_containers ( + kube_flatting_time bigint, + kind varchar(30), + metadata_uid varchar(40), + row_index int, + metadata_name text, + spec_containers_name text, + spec_containers_image text, + spec_containers_env text, + spec_containers_resources_limits_cpu text, + spec_containers_resources_limits_memory text, + spec_containers_resources_requests_cpu text, + spec_containers_resources_requests_memory text, + spec_containers_volumemounts text, + spec_containers_securitycontext_privileged text, + spec_containers_command text, + spec_containers_ports text, + spec_containers_args text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_pod_volume ( + kube_flatting_time bigint, + kind varchar(30), + metadata_uid varchar(40), + row_index int, + metadata_name text, + spec_volumes_name text, + spec_volumes_hostpath text, + spec_volumes_secret text, + spec_volumes_configmap text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_replicaset_base ( + kube_flatting_time bigint, + cluster_id varchar(255), + kind varchar(30), + metadata_uid varchar(40), + row_index int, + kind_status varchar(50), + metadata_annotations text, + metadata_creationtimestamp varchar(25), + metadata_labels text, + metadata_name text, + metadata_namespace text, + metadata_resourceversion text, + spec_replicas text, + status_availablereplicas text, + status_readyreplicas text, + status_replicas text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, cluster_id, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_resourcequota_base ( + kube_flatting_time bigint, + cluster_id varchar(255), + kind varchar(30), + metadata_uid varchar(40), + row_index int, + kind_status varchar(50), + metadata_annotations text, + metadata_creationtimestamp varchar(25), + metadata_labels text, + metadata_name text, + metadata_namespace text, + metadata_resourceversion text, + spec_hard text, + spec_scopes text, + status_hard text, + status_used text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, cluster_id, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_resourcequota_scopeselector ( + kube_flatting_time bigint, + kind varchar(30), + metadata_uid varchar(40), + row_index int, + metadata_name text, + spec_scopeselector_matchexpressions_operator text, + spec_scopeselector_matchexpressions_scopename text, + spec_scopeselector_matchexpressions_values text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_service_base ( + kube_flatting_time bigint, + cluster_id varchar(255), + kind varchar(30), + metadata_uid varchar(40), + row_index int, + kind_status varchar(50), + metadata_name text, + metadata_resourceversion text, + metadata_ownerreferences text, + metadata_ownerReferences_kind varchar(30), + metadata_ownerReferences_uid varchar(40), + metadata_annotations text, + metadata_creationtimestamp varchar(25), + metadata_deletiongraceperiodseconds text, + metadata_deletiontimestamp text, + metadata_labels text, + metadata_namespace text, + spec_clusterip text, + spec_externalips text, + spec_selector text, + spec_type text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, cluster_id, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_service_ports ( + kube_flatting_time bigint, + kind varchar(30), + metadata_uid varchar(40), + row_index int, + metadata_name text, + spec_ports_appprotocol text, + spec_ports_name text, + spec_ports_nodeport text, + spec_ports_port text, + spec_ports_protocol text, + spec_ports_targetport text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_statefulset_base ( + kube_flatting_time bigint, + cluster_id varchar(255), + kind varchar(30), + metadata_uid varchar(40), + row_index int, + kind_status varchar(50), + metadata_annotations text, + metadata_creationtimestamp varchar(25), + metadata_labels text, + metadata_name text, + metadata_namespace text, + metadata_resourceversion text, + spec_replicas text, + status_readyreplicas text, + status_replicas text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, cluster_id, kind, metadata_uid, row_index) +); + +CREATE TABLE public.api_error_history ( + id int8 NOT NULL, + api_msg varchar(255) NULL, + code varchar(255) NULL, + "exception" varchar(255) NULL, + http_error varchar(255) NULL, + http_status int4 NULL, + occureence_time varchar(255) NULL, + params varchar(255) NULL, + "path" varchar(255) NULL, + "type" varchar(255) NULL, + CONSTRAINT api_error_history_pkey PRIMARY KEY (id) +); + +CREATE TABLE public.metric_score ( + clst_id varchar(255) NOT NULL, + entity_id varchar(255) NOT NULL, + entity_type varchar(255) NOT NULL, + metric_id varchar(255) NOT NULL, + sub_key varchar(255) NOT NULL, + unixtime int4 NOT NULL, + anomaly bool NOT NULL, + cont_name varchar(255) NULL, + "instance" varchar(255) NULL, + "namespace" varchar(255) NULL, + node_id varchar(255) NULL, + pod_id varchar(255) NULL, + score int4 NOT NULL, + yhat_lower_upper json NULL, + CONSTRAINT metric_score_pkey PRIMARY KEY (clst_id, entity_id, entity_type, metric_id, sub_key, unixtime) +); + + +CREATE TABLE public.tenant_info_auth_resources ( + tenant_info_id varchar(255) NOT NULL, + auth_resources_id int8 NOT NULL, + CONSTRAINT tenant_info_auth_resources_pkey PRIMARY KEY (tenant_info_id, auth_resources_id), + CONSTRAINT uk_7s6l8e2c8gli4js43c4xoifcl UNIQUE (auth_resources_id) +); + + +-- public.tenant_info_auth_resources foreign keys + +ALTER TABLE public.tenant_info_auth_resources ADD CONSTRAINT fkkecsc13ydhwg8u05aumkqbnx1 FOREIGN KEY (tenant_info_id) REFERENCES public.tenant_info(id); +ALTER TABLE public.tenant_info_auth_resources ADD CONSTRAINT fkpvvec4ju3hsma6s1rtgvr4mf6 FOREIGN KEY (auth_resources_id) REFERENCES public.auth_resource3(id); \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/postgres_insert_dml.psql b/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/postgres_insert_dml.psql new file mode 100644 index 0000000..e6335f3 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/03-ddl-dml/postgres/postgres_insert_dml.psql @@ -0,0 +1,2380 @@ +INSERT INTO public.tenant_info (id, name, in_used, created_date, modified_date, contract_id) VALUES ('DEFAULT_TENANT', 'admin', true, now(), now(), 0); + +INSERT INTO public.auth_resource2 (id, access_type, name, parent_id, type) VALUES (-1, 4, 'null', NULL, 'null'); + +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Infrastructure', -1 , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Workloads', -1 , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Services', -1 , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Diagnosis', -1 , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Statistics & Analysis', -1 , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Reports', -1 , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Settings', -1 , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Hosts', -1, 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Dashboards', -1 , 'menu'); +--INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Health Check', -1, 'menu'); + +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Topology', (select id from auth_resource2 where type='menu' and name='Infrastructure') , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Overview', (select id from auth_resource2 where type='menu' and name='Infrastructure') , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Namespace', (select id from auth_resource2 where type='menu' and name='Infrastructure') , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Nodes', (select id from auth_resource2 where type='menu' and name='Infrastructure') , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Node Details', (select id from auth_resource2 where type='menu' and name='Infrastructure') , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Resource Usage', (select id from auth_resource2 where type='menu' and name='Infrastructure') , 'menu'); +-- INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Persistent Volume', (select id from auth_resource2 where type='menu' and name='Infrastructure') , 'menu'); + +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Overview', (select id from auth_resource2 where type='menu' and name='Workloads') , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Pods', (select id from auth_resource2 where type='menu' and name='Workloads') , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Jobs', (select id from auth_resource2 where type='menu' and name='Workloads') , 'menu'); +-- INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Cron Jobs', (select id from auth_resource2 where type='menu' and name='Workloads') , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Deploy List', (select id from auth_resource2 where type='menu' and name='Workloads'), 'menu'); + +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Topology', (select id from auth_resource2 where type='menu' and name='Services') , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Structure', (select id from auth_resource2 where type='menu' and name='Services') , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Overview', (select id from auth_resource2 where type='menu' and name='Services'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Detail', (select id from auth_resource2 where type='menu' and name='Services'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'List', (select id from auth_resource2 where type='menu' and name='Services'), 'menu'); + +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Anomaly Score', (select id from auth_resource2 where type='menu' and name='Diagnosis'), 'menu'); +-- INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Troubleshooting', (select id from auth_resource2 where type='menu' and name='Diagnosis') , 'menu'); + +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Performance Trends', (select id from auth_resource2 where type='menu' and name='Statistics & Analysis'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Alert History', (select id from auth_resource2 where type='menu' and name='Statistics & Analysis'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Anomaly Score', (select id from auth_resource2 where type='menu' and name='Statistics & Analysis'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Job History', (select id from auth_resource2 where type='menu' and name='Statistics & Analysis'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Log Viewer', (select id from auth_resource2 where type='menu' and name='Statistics & Analysis'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Sparse Logs', (select id from auth_resource2 where type='menu' and name='Statistics & Analysis'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Event Logs', (select id from auth_resource2 where type='menu' and name='Statistics & Analysis') , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Alert Analysis', (select id from auth_resource2 where type='menu' and name='Statistics & Analysis') , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Container Life Cycle', (select id from auth_resource2 where type='menu' and name='Statistics & Analysis'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Service Traces', (select id from auth_resource2 where type='menu' and name='Statistics & Analysis'), 'menu'); +-- INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Resource Used Trends', (select id from auth_resource2 where type='menu' and name='Statistics & Analysis'), 'menu'); + +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Documents', (select id from auth_resource2 where type='menu' and name='Reports'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Templates', (select id from auth_resource2 where type='menu' and name='Reports'), 'menu'); + +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'User & Group', (select id from auth_resource2 where type='menu' and name='Settings') , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Alerts', (select id from auth_resource2 where type='menu' and name='Settings') , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Sparse Logs', (select id from auth_resource2 where type='menu' and name='Settings') , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'General', (select id from auth_resource2 where type='menu' and name='Settings') , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Metric Meta', (select id from auth_resource2 where type='menu' and name='Settings'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Notification', (select id from auth_resource2 where type='menu' and name='Settings'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Host Alerts', (select id from auth_resource2 where type='menu' and name='Settings'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'License', (select id from auth_resource2 where type='menu' and name='Settings'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Agent', (select id from auth_resource2 where type='menu' and name='Settings'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Alias', (select id from auth_resource2 where type='menu' and name='Settings'), 'menu'); + +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Documents', (select id from auth_resource2 where type='menu' and name='Dashboards'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Templates', (select id from auth_resource2 where type='menu' and name='Dashboards'), 'menu'); + +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Topology', (select id from auth_resource2 where type='menu' and name='Hosts'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Overview', (select id from auth_resource2 where type='menu' and name='Hosts'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'List', (select id from auth_resource2 where type='menu' and name='Hosts'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Detail', (select id from auth_resource2 where type='menu' and name='Hosts'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Group', (select id from auth_resource2 where type='menu' and name='Hosts'), 'menu'); + +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'CloudMOA - Nodes Resource', NULL, 'dashboard'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Service Detail', NULL, 'dashboard'); + +--INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES(4, 'Check Script', (select id from auth_resource2 where type='menu' and name='Health Check'), 'menu'); + +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Infrastructure', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Workloads', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Services', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Diagnosis', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Statistics & Analysis', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Reports', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Settings', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Hosts', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Dashboards', false, null); +--INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Health Check', false, null); + +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Infrastructure|Topology', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Infrastructure|Overview', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Infrastructure|Namespace', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Infrastructure|Nodes', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Infrastructure|Node Details', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Infrastructure|Resource Usage', false, null); + +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Workloads|Overview', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Workloads|Pods', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Workloads|Jobs', false, null); +-- NSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Workloads|Cron Jobs', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Workloads|Deploy List', false, null); + +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Services|Topology', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Services|Structure', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Services|Overview', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Services|Detail', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Services|List', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Services|Active Transaction', false, null); + +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Diagnosis|Anomaly Score', false, null); + +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Statistics & Analysis|Performance Trends', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Statistics & Analysis|Alert History', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Statistics & Analysis|Anomaly Score', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Statistics & Analysis|Job History', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Statistics & Analysis|Log Viewer', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Statistics & Analysis|Sparse Logs', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Statistics & Analysis|Event Logs', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Statistics & Analysis|Alert Analysis', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Statistics & Analysis|Container Life Cycle', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Statistics & Analysis|Service Traces', false, null); + +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Reports|Documents', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Reports|Templates', false, null); + +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Settings|User & Group', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Settings|Alerts', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Settings|Sparse Logs', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Settings|General', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Settings|Metric Meta', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Settings|Notification', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Settings|Host Alerts', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Settings|License', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Settings|Agent', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Settings|Alias', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Settings|Agent Installation', false, NULL); + + +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Dashboards|Documents', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Dashboards|Templates', false, null); + +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Hosts|Topology', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Hosts|Overview', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Hosts|List', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Hosts|Detail', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Hosts|Group', false, null); + +--INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Health Check|Check Script', false, null); + +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('user|admin', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('userGroup|admin|default', false, null); + +--INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('user|admin|owner', false, 'DEFAULT_TENANT'); + +INSERT INTO public.cloud_user (user_id, email, is_admin, phone, user_nm, user_pw, created_date, modified_date, company, department, last_log_in_date, "position", use_ldap, auth_method, log_in_count, user_lock, user_lock_date, tenant_id, is_tenant_owner, auth_resource_id) VALUES ('admin', NULL, true, NULL, 'admin', '$2a$10$a0XPdet9RCL8uF8ZVZ2Yzu4y0po5RWCesyB0e03MhrTIfG.0Y6xfS',now() , now() , NULL , NULL , NULL , NULL, false, 'default', 0, false, null, 'DEFAULT_TENANT', true, (select id from auth_resource3 where name='user|admin')); +INSERT INTO public.cloud_group (id, created_date, modified_date, name, description) VALUES ((select id from auth_resource3 where name='userGroup|admin|default'), now(), now(), 'default', '기본그룹정의'); + +--INSERT INTO public.cloud_user (user_id, email, is_admin, phone, user_nm, user_pw, created_date, modified_date, company, department, last_log_in_date, "position", use_ldap, auth_method, log_in_count, user_lock, user_lock_date, tenant_id, is_tenant_owner, auth_resource_id) VALUES ('owner', NULL, false, NULL, 'owner', '$2a$10$a0XPdet9RCL8uF8ZVZ2Yzu4y0po5RWCesyB0e03MhrTIfG.0Y6xfS',now() , now() , NULL , NULL , NULL , NULL, false, 'default', 0, false, null, 'DEFAULT_TENANT', true, (select id from auth_resource3 where name='user|admin|owner')); + +INSERT INTO public.cloud_user_setting +(user_id, lang, theme, access_token, refresh_token, error_msg, alert_sound, session_persistence, gpu_acc_topology, created_date, modified_date) +VALUES('admin', null, null, null, null, false, false, true, true, now(), null); + +--INSERT INTO public.cloud_user_setting +--(user_id, lang, theme, access_token, refresh_token, error_msg, alert_sound, session_persistence, gpu_acc_topology, created_date, modified_date) +--VALUES('owner', null, null, null, null, false, false, true, true, now(), null); + +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('dashboard|admin|CloudMOA - Nodes Resource', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('dashboard|admin|Service Detail', false, null); + +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('cluster|cloudmoa', false, 'DEFAULT_TENANT'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (0, 'Infrastructure', '01.Infrastructure', 0, NULL, (select id from auth_resource3 where name='menu|Infrastructure'), 3); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (1, 'Topology', NULL, 0, 'topologyInfra', (select id from auth_resource3 where name='menu|Infrastructure|Topology'), 3); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (2, 'Overview', NULL, 1, 'overViewInfra', (select id from auth_resource3 where name='menu|Infrastructure|Overview'), 3); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (3, 'Resource Usage', NULL, 2, 'resourceUsageInfra', (select id from auth_resource3 where name='menu|Infrastructure|Resource Usage'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (4, 'Namespace', NULL, 3, 'namespaceInfra', (select id from auth_resource3 where name='menu|Infrastructure|Namespace'), 3); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (5, 'Nodes', NULL, 4, 'nodesInfra', (select id from auth_resource3 where name='menu|Infrastructure|Nodes'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (6, 'Node Details', NULL, 5, 'nodeDetailInfra', (select id from auth_resource3 where name='menu|Infrastructure|Node Details'), 3); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (10, 'Workloads', '02.Workload', 1, NULL, (select id from auth_resource3 where name='menu|Workloads'), 3); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (11, 'Overview', NULL, 0, 'overviewWorkloads', (select id from auth_resource3 where name='menu|Workloads|Overview'), 3); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (12, 'deployList', NULL, 1, 'deployListWorkloads', (select id from auth_resource3 where name='menu|Workloads|Deploy List'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (17, 'Jobs', NULL, 6, 'jobsWorkloads', (select id from auth_resource3 where name='menu|Workloads|Jobs'), 2); +-- INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (18, 'Cron Jobs', NULL, 7, 'cronJobsWorkloads', (select id from auth_resource3 where name='menu|Workloads|Cron Jobs'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (19, 'Pods', NULL, 8, 'podsWorkloads', (select id from auth_resource3 where name='menu|Workloads|Pods'), 3); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (20, 'Services', '03.Service', 2, NULL, (select id from auth_resource3 where name='menu|Services'), 3); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (21, 'DataCenter Service', NULL, 0, 'topologyServices', (select id from auth_resource3 where name='menu|Services|Topology'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (22, 'ServiceOverview', NULL, 1, 'overviewServices', (select id from auth_resource3 where name='menu|Services|Overview'), 0); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (23, 'Cluster Service', NULL, 2, 'detailServices', (select id from auth_resource3 where name='menu|Services|Structure'), 0); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (24, 'List', NULL, 3, 'serviceList', (select id from auth_resource3 where name='menu|Services|List'), 3); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (25, 'Detail', NULL, 4, 'slasServices', (select id from auth_resource3 where name='menu|Services|Detail'), 0); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (26, 'Active Transaction', NULL, 5, 'overviewServiceJSPD', (select id from auth_resource3 where name='menu|Services|Active Transaction'), 2); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (30, 'Diagnosis', '05.Diagnosis', 4, NULL, (select id from auth_resource3 where name='menu|Diagnosis'), 0); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (31, 'Anomaly Score Detail', NULL, 0, 'anomalyScoreDiagnosis', (select id from auth_resource3 where name='menu|Diagnosis|Anomaly Score'), 0); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (40, 'Statistics & Analysis', '06.Statistics&Analysis', 5, NULL, (select id from auth_resource3 where name='menu|Statistics & Analysis'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (41, 'Performance Trends', NULL, 0, 'performanceTrendSA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Performance Trends'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (42, 'Alert Analysis', NULL, 2, 'alertAnalysisSA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Alert Analysis'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (43, 'Alert History', NULL, 3, 'alertHistorySA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Alert History'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (44, 'Anomaly Score Analysis', NULL, 4, 'anomalyScoreSA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Anomaly Score'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (45, 'Job History', NULL, 5, 'jobHistorySA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Job History'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (46, 'Sparse Log Analysis', NULL, 6, 'sparseLogSA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Sparse Logs'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (47, 'Log Viewer', NULL, 7, 'logViewerSA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Log Viewer'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (48, 'eventLog Analysis', NULL, 8, 'eventLogSA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Event Logs'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (49, 'Container Life Cycle', NULL, 9, 'containerLifecycleSA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Container Life Cycle'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (50, 'Service Trace Analysis', NULL, 10, 'serviceTraceSA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Service Traces'), 0); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (60, 'Reports', '07.Report', 6, NULL, (select id from auth_resource3 where name='menu|Reports'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (61, 'Documents', NULL, 0, 'documentReport', (select id from auth_resource3 where name='menu|Reports|Documents'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (62, 'Templates', NULL, 1, 'templateReport', (select id from auth_resource3 where name='menu|Reports|Templates'), 2); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (65, 'Dashboards', '10.Dashboard', 7, NULL, (select id from auth_resource3 where name='menu|Dashboards'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (66, 'Documents', NULL, 0, 'documentDashboard', (select id from auth_resource3 where name='menu|Dashboards|Documents'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (67, 'Templates', NULL, 1, 'templateDashboard', (select id from auth_resource3 where name='menu|Dashboards|Templates'), 2); + + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (80, 'Hosts', '12.Hosts', 10, NULL, (select id from auth_resource3 where name='menu|Hosts'), 0); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (81, 'Topology', null, 0, 'topologyHost', (select id from auth_resource3 where name='menu|Hosts|Topology'), 0); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (82, 'Overview', NULL, 1, 'overviewHost', (select id from auth_resource3 where name='menu|Hosts|Overview'), 0); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (83, 'List', NULL, 2, 'listHost', (select id from auth_resource3 where name='menu|Hosts|List'), 0); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (84, 'Detail', NULL, 3, 'detailHost', (select id from auth_resource3 where name='menu|Hosts|Detail'), 0); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (85, 'Group', NULL, 4, 'groupHost', (select id from auth_resource3 where name='menu|Hosts|Group'), 0); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (90, 'Settings', '08.Setting', 99, NULL, (select id from auth_resource3 where name='menu|Settings'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (91, 'User', NULL, 0, 'userGroupSettings', (select id from auth_resource3 where name='menu|Settings|User & Group'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (92, 'Alerts', NULL, 1, 'alertSettings', (select id from auth_resource3 where name='menu|Settings|Alerts'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (93, 'Host Alerts', NULL, 2, 'hostAlertSettings', (select id from auth_resource3 where name='menu|Settings|Host Alerts'), 0); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (94, 'Sparse Logs', NULL, 3, 'sparseLogSettings', (select id from auth_resource3 where name='menu|Settings|Sparse Logs'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (96, 'Metric Meta', NULL, 5, 'metricMetaSettings', (select id from auth_resource3 where name='menu|Settings|Metric Meta'), 0); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (97, 'Appearance', NULL, 6, 'appearanceSettings', (select id from auth_resource3 where name='menu|Settings|General'), 0); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (98, 'Notification', NULL, 7, 'notificationsSettings', (select id from auth_resource3 where name='menu|Settings|Notification'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (99, 'Agent', NULL, 8, 'agentSettings', (select id from auth_resource3 where name='menu|Settings|Agent'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (100, 'Alias', NULL, 9, 'aliasSettings', (select id from auth_resource3 where name='menu|Settings|Alias'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (101, 'License', NULL, 10, 'validationLicense', (select id from auth_resource3 where name='menu|Settings|License'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (102, 'agent Installation', NULL, 11, 'agentInstallationSettings', (select id from auth_resource3 where name='menu|Settings|Agent Installation'), 2); + +-- INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (121, 'Health Check', '09.HealthCheck', 9, 'healthCHeck', (select id from auth_resource3 where name='menu|Health Check'), 0); +-- INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (122, 'Check Script', NULL, 0, 'checkScript', (select id from auth_resource3 where name='menu|Health Check|Check Script'), 0); + +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Infrastructure'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Infrastructure|Topology'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Infrastructure|Overview'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Infrastructure|Resource Usage'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Infrastructure|Namespace'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Infrastructure|Nodes'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Infrastructure|Node Details'), 'owner'); +-- +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Workloads'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Workloads|Overview'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Workloads|Deploy List'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Workloads|Jobs'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Workloads|Cron Jobs'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Workloads|Pods'), 'owner'); +-- +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Services'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Services|Topology'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Services|Overview'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Services|Structure'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Services|List'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Services|Detail'), 'owner'); +-- +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Diagnosis'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Diagnosis|Anomaly Score'), 'owner'); +-- +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Statistics & Analysis'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Statistics & Analysis|Performance Trends'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Statistics & Analysis|Alert Analysis'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Statistics & Analysis|Alert History'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Statistics & Analysis|Anomaly Score'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Statistics & Analysis|Job History'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Statistics & Analysis|Sparse Logs'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Statistics & Analysis|Log Viewer'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Statistics & Analysis|Event Logs'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Statistics & Analysis|Container Life Cycle'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Statistics & Analysis|Service Traces'), 'owner'); +-- +-- +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Reports'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Reports|Documents'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Reports|Templates'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Dashboards'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Dashboards|Documents'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Dashboards|Templates'), 'owner'); +-- +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Settings'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Settings|User & Group'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Settings|Alerts'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Settings|Sparse Logs'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Settings|Metric Meta'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Settings|General'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Settings|Notification'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Settings|Agent'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Settings|Alias'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Settings|License'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Settings|Agent Installation'), 'owner'); + +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cadvisor_version_info', 'cadvisor', 'A metric with a constant ''1'' value labeled by kernel version, OS version, docker version, cadvisor version & cadvisor revision.', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_cpu_cfs_periods_total', 'cadvisor', 'Number of elapsed enforcement period intervals.', 'CPU', 'LOAD', 'Container', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_cpu_cfs_throttled_periods_total', 'cadvisor', 'Number of throttled period intervals.', 'CPU', 'LOAD', 'Container', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_cpu_cfs_throttled_seconds_total', 'cadvisor', 'Total time duration the container has been throttled.', 'CPU', 'LOAD', 'Container', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_cpu_load_average_10s', 'cadvisor', 'Value of container cpu load average over the last 10 seconds.', 'CPU', 'LOAD', 'Container', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_cpu_schedstat_run_periods_total', 'cadvisor', 'Number of times processes of the cgroup have run on the cpu', 'CPU', 'LOAD', 'Container', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_cpu_schedstat_run_seconds_total', 'cadvisor', 'Time duration the processes of the container have run on the CPU.', 'CPU', 'LOAD', 'Container', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_cpu_schedstat_runqueue_seconds_total', 'cadvisor', 'Time duration processes of the container have been waiting on a runqueue.', 'CPU', 'LOAD', 'Container', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_cpu_system_seconds_total', 'cadvisor', 'Cumulative system cpu time consumed in seconds.', 'CPU', 'LOAD', 'Container', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_cpu_usage_seconds_total', 'cadvisor', 'Cumulative cpu time consumed in seconds.', 'CPU', 'LOAD', 'Container', 'counter', 'cpu', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_cpu_user_seconds_total', 'cadvisor', 'Cumulative user cpu time consumed in seconds.', 'CPU', 'LOAD', 'Container', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_fs_limit_bytes', 'cadvisor', 'Number of bytes that can be consumed by the container on this filesystem.', NULL, NULL, 'Container', 'gauge', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_last_seen', 'cadvisor', 'Last time a container was seen by the exporter', NULL, NULL, 'Container', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_network_receive_bytes_total', 'cadvisor', 'Cumulative count of bytes received', 'NIC', 'LOAD', 'Container', 'counter', 'interface', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_network_receive_packets_dropped_total', 'cadvisor', 'Cumulative count of packets dropped while receiving', 'NIC', 'LOAD', 'Container', 'counter', 'interface', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_network_receive_packets_total', 'cadvisor', 'Cumulative count of packets received', 'NIC', 'LOAD', 'Container', 'counter', 'interface', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_network_transmit_bytes_total', 'cadvisor', 'Cumulative count of bytes transmitted', 'NIC', 'LOAD', 'Container', 'counter', 'interface', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_network_transmit_errors_total', 'cadvisor', 'Cumulative count of errors encountered while transmitting', 'NIC', 'LOAD', 'Container', 'counter', 'interface', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_network_transmit_packets_dropped_total', 'cadvisor', 'Cumulative count of packets dropped while transmitting', 'NIC', 'LOAD', 'Container', 'counter', 'interface', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_network_transmit_packets_total', 'cadvisor', 'Cumulative count of packets transmitted', 'NIC', 'LOAD', 'Container', 'counter', 'interface', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_scrape_error', 'cadvisor', '1 if there was an error while getting container metrics, 0 otherwise', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_spec_cpu_period', 'cadvisor', 'CPU period of the container', NULL, NULL, 'Container', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_spec_cpu_quota', 'cadvisor', 'CPU quota of the container', NULL, NULL, 'Container', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_memory_cache', 'cadvisor', 'Number of bytes of page cache memory.', 'Memory', 'LOAD', 'Container', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_memory_failcnt', 'cadvisor', 'Number of memory usage hits limits', 'Memory', 'LOAD', 'Container', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_memory_failures_total', 'cadvisor', 'Cumulative count of memory allocation failures.', 'Memory', 'LOAD', 'Container', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_memory_max_usage_bytes', 'cadvisor', 'Maximum memory usage recorded in bytes', 'Memory', 'LOAD', 'Container', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_memory_rss', 'cadvisor', 'Size of RSS in bytes.', 'Memory', 'LOAD', 'Container', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_memory_swap', 'cadvisor', 'Container swap usage in bytes.', 'Memory', 'LOAD', 'Container', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_memory_usage_bytes', 'cadvisor', 'Current memory usage in bytes, including all memory regardless of when it was accessed', 'Memory', 'LOAD', 'Container', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_memory_working_set_bytes', 'cadvisor', 'Current working set in bytes.', 'Memory', 'LOAD', 'Container', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_network_tcp_usage_total', 'cadvisor', 'tcp connection usage statistic for container', 'Network', 'LOAD', 'Container', 'counter', 'tcp_state', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_network_udp_usage_total', 'cadvisor', 'udp connection usage statistic for container', 'Network', 'LOAD', 'Container', 'counter', 'udp_state', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_spec_cpu_shares', 'cadvisor', 'CPU share of the container', NULL, NULL, 'Container', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_spec_memory_limit_bytes', 'cadvisor', 'Memory limit for the container.', NULL, NULL, 'Container', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_spec_memory_swap_limit_bytes', 'cadvisor', 'Memory swap limit for the container.', NULL, NULL, 'Container', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_start_time_seconds', 'cadvisor', 'Start time of the container since unix epoch in seconds.', NULL, NULL, 'Container', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_tasks_state', 'cadvisor', 'Number of tasks in given state', NULL, NULL, 'Container', 'gauge', 'state', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('http_request_duration_microseconds', 'prometheus', 'The HTTP request latencies in microseconds.', NULL, 'DURATION', 'Node', 'summary', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('http_request_duration_microseconds_count', 'prometheus', '', NULL, NULL, 'Node', '', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('http_request_duration_microseconds_sum', 'prometheus', '', NULL, NULL, 'Node', '', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('http_request_size_bytes_count', 'prometheus', '', NULL, NULL, 'Node', '', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('http_request_size_bytes_sum', 'prometheus', '', NULL, NULL, 'Node', '', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('http_requests_total', 'prometheus', 'Total number of scrapes by HTTP status code.', NULL, 'ERROR', 'Node', 'counter', 'code,method', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('aws_ec2_ebsread_bytes_average', 'cloudwatch', 'Bytes read from all EBS volumes attached to the instance in a specified period of time.', 'EBS', 'LOAD', 'AWS/EC2', 'gauge', 'instance_id', '2019-07-24 15:23:37.148501', '2019-07-24 15:23:37.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('http_response_size_bytes_count', 'prometheus', '', NULL, NULL, 'Node', '', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('http_response_size_bytes_sum', 'prometheus', '', NULL, NULL, 'Node', '', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('http_server_requests_seconds', 'micrometer', 'Server Response in second', NULL, 'RATE', 'Service', 'summary', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('http_server_requests_seconds_count', 'micrometer', 'the total number of requests.', NULL, NULL, 'Service', '', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('http_server_requests_seconds_sum', 'micrometer', 'the total time taken to serve the requests', NULL, NULL, 'Service', '', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('http_server_requests_seconds_max', 'micrometer', 'the max number of requests.', NULL, 'RATE', 'Service', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('aws_ec2_ebswrite_bytes_average', 'cloudwatch', 'Bytes written to all EBS volumes attached to the instance in a specified period of time.', 'EBS', 'LOAD', 'AWS/EC2', 'gauge', 'instance_id', '2019-07-24 15:23:37.148501', '2019-07-24 15:23:37.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_classes_loaded', 'micrometer', 'jvm info', 'GC', 'LOAD', 'Process', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_classes_unloaded_total', 'micrometer', 'jvm info', 'GC', 'LOAD', 'Process', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_gc_live_data_size_bytes', 'micrometer', 'jvm info', 'GC', 'LOAD', 'Process', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_gc_max_data_size_bytes', 'micrometer', 'jvm info', 'GC', 'LOAD', 'Process', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_gc_memory_allocated_bytes_total', 'micrometer', 'jvm info', 'GC', 'LOAD', 'Process', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_gc_memory_promoted_bytes_total', 'micrometer', 'jvm info', 'GC', 'LOAD', 'Process', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_gc_pause_seconds', 'micrometer', 'jvm info', 'GC', 'LOAD', 'Process', 'summary', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_gc_pause_seconds_count', 'micrometer', 'jvm info', NULL, NULL, 'Process', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_gc_pause_seconds_max', 'micrometer', 'jvm info', 'GC', 'LOAD', 'Process', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_gc_pause_seconds_sum', 'micrometer', 'jvm info', NULL, NULL, 'Process', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_arp_entries', 'node_exporter', 'ARP entries by device', 'OS', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_boot_time_seconds', 'node_exporter', 'Node boot time, in unixtime.', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_context_switches_total', 'node_exporter', 'Total number of context switches.', 'OS', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_cpu_core_throttles_total', 'node_exporter', 'Number of times this cpu core has been throttled.', 'CPU', 'LOAD', 'Node', 'counter', 'core', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_cpu_frequency_hertz', 'node_exporter', 'Current cpu thread frequency in hertz.', 'CPU', 'LOAD', 'Node', 'gauge', 'cpu', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_cpu_frequency_max_hertz', 'node_exporter', 'Maximum cpu thread frequency in hertz.', NULL, NULL, 'Node', 'gauge', 'cpu', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_cpu_frequency_min_hertz', 'node_exporter', 'Minimum cpu thread frequency in hertz.', NULL, NULL, 'Node', 'gauge', 'cpu', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_cpu_guest_seconds_total', 'node_exporter', 'Seconds the cpus spent in guests (VMs) for each mode.', 'CPU', 'LOAD', 'Node', 'counter', 'cpu', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_cpu_package_throttles_total', 'node_exporter', 'Number of times this cpu package has been throttled.', 'CPU', 'LOAD', 'Node', 'counter', 'package', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_cpu_seconds_total', 'node_exporter', 'Seconds the cpus spent in each mode.', 'CPU', 'LOAD', 'Node', 'counter', 'cpu,mode', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_entropy_available_bits', 'node_exporter', 'Bits of available entropy.', 'OS', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_exporter_build_info', 'node_exporter', 'A metric with a constant ''1'' value labeled by version, revision, branch, and goversion from which node_exporter was built.', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('aws_ec2_cpuutilization_average', 'cloudwatch', 'The percentage of allocated EC2 compute units that are currently in use on the instance.', 'CPU', 'LOAD', 'AWS/EC2', 'gauge', 'instance_id', '2019-07-24 15:23:37.148501', '2019-07-24 15:23:37.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('aws_ec2_disk_read_ops_average', 'cloudwatch', 'Completed read operations from all instance store volumes available to the instance in a specified period of time.', 'Disk', 'LOAD', 'AWS/EC2', 'gauge', 'instance_id', '2019-07-24 15:23:37.148501', '2019-07-24 15:23:37.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('aws_ec2_disk_write_ops_average', 'cloudwatch', 'Completed write operations to all instance store volumes available to the instance in a specified period of time.', 'Disk', 'LOAD', 'AWS/EC2', 'gauge', 'instance_id', '2019-07-24 15:23:37.148501', '2019-07-24 15:23:37.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('aws_ec2_disk_read_bytes_average', 'cloudwatch', 'Bytes read from all instance store volumes available to the instance.', 'Disk', 'LOAD', 'AWS/EC2', 'gauge', 'instance_id', '2019-07-24 15:23:37.148501', '2019-07-24 15:23:37.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('aws_ec2_disk_write_bytes_average', 'cloudwatch', 'Bytes written to all instance store volumes available to the instance.', 'Disk', 'LOAD', 'AWS/EC2', 'gauge', 'instance_id', '2019-07-24 15:23:37.148501', '2019-07-24 15:23:37.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('aws_ec2_network_in_average', 'cloudwatch', 'The number of bytes received on all network interfaces by the instance.', 'Network', 'LOAD', 'AWS/EC2', 'gauge', 'instance_id', '2019-07-24 15:23:37.148501', '2019-07-24 15:23:37.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('aws_ec2_network_out_average', 'cloudwatch', 'The number of bytes sent out on all network interfaces by the instance.', 'Network', 'LOAD', 'AWS/EC2', 'gauge', 'instance_id', '2019-07-24 15:23:37.148501', '2019-07-24 15:23:37.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_filesystem_readonly', 'node_exporter', 'Filesystem read-only status.', NULL, NULL, 'Node', 'gauge', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('aws_ec2_network_packets_in_average', 'cloudwatch', 'The number of packets received on all network interfaces by the instance.', 'Network', 'LOAD', 'AWS/EC2', 'gauge', 'instance_id', '2019-07-24 15:23:37.148501', '2019-07-24 15:23:37.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_forks_total', 'node_exporter', 'Total number of forks.', 'OS', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_hwmon_chip_names', 'node_exporter', 'Annotation metric for human-readable chip names', 'CPU', 'LOAD', 'Node', 'gauge', 'chip', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_hwmon_fan_rpm', 'node_exporter', 'Hardware monitor for fan revolutions per minute (input)', 'CPU', 'LOAD', 'Node', 'gauge', 'chip,sensor', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_hwmon_pwm', 'node_exporter', 'Hardware monitor pwm element ', 'CPU', 'LOAD', 'Node', 'gauge', 'chip,sensor', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_hwmon_sensor_label', 'node_exporter', 'Label for given chip and sensor', 'CPU', 'LOAD', 'Node', 'gauge', 'chip,sensor', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_hwmon_temp_celsius', 'node_exporter', 'Hardware monitor for temperature (input)', 'CPU', 'LOAD', 'Node', 'gauge', 'chip,sensor', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_hwmon_temp_crit_alarm_celsius', 'node_exporter', 'Hardware monitor for temperature (crit_alarm)', 'CPU', 'LOAD', 'Node', 'gauge', 'chip,sensor', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_hwmon_temp_crit_celsius', 'node_exporter', 'Hardware monitor for temperature (crit)', 'CPU', 'LOAD', 'Node', 'gauge', 'chip,sensor', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_hwmon_temp_max_celsius', 'node_exporter', 'Hardware monitor for temperature (max)', NULL, NULL, 'Node', 'gauge', 'chip,sensor', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_intr_total', 'node_exporter', 'Total number of interrupts serviced.', 'OS', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('aws_ec2_network_packets_out_average', 'cloudwatch', 'The number of packets sent out on all network interfaces by the instance.', 'Network', 'LOAD', 'AWS/EC2', 'gauge', 'instance_id', '2019-07-24 15:23:37.148501', '2019-07-24 15:23:37.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('aws_ec2_ebsread_ops_average', 'cloudwatch', 'Completed read operations from all Amazon EBS volumes attached to the instance in a specified period of time.', 'EBS', 'LOAD', 'AWS/EC2', 'gauge', 'instance_id', '2019-07-24 15:23:37.148501', '2019-07-24 15:23:37.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('aws_ec2_ebswrite_ops_average', 'cloudwatch', 'Completed write operations to all EBS volumes attached to the instance in a specified period of time.', 'EBS', 'LOAD', 'AWS/EC2', 'gauge', 'instance_id', '2019-07-24 15:23:37.148501', '2019-07-24 15:23:37.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_load1', 'node_exporter', '1m load average.', 'CPU', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_load15', 'node_exporter', '15m load average.', 'CPU', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_load5', 'node_exporter', '5m load average.', 'CPU', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_disk_reads_completed_total', 'node_exporter', 'The total number of reads completed successfully.', 'Disk', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_disk_reads_merged_total', 'node_exporter', 'The total number of reads merged.', 'Disk', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_disk_write_time_seconds_total', 'node_exporter', 'This is the total number of seconds spent by all writes.', 'Disk', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_disk_writes_completed_total', 'node_exporter', 'The total number of writes completed successfully.', 'Disk', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_disk_writes_merged_total', 'node_exporter', 'The number of writes merged.', 'Disk', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_disk_written_bytes_total', 'node_exporter', 'The total number of bytes written successfully.', 'Disk', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_nf_conntrack_entries', 'node_exporter', 'Number of currently allocated flow entries for connection tracking.', 'OS', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_nf_conntrack_entries_limit', 'node_exporter', 'Maximum size of connection tracking table.', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_scrape_collector_duration_seconds', 'node_exporter', 'node_exporter: Duration of a collector scrape.', NULL, NULL, 'Node', 'gauge', 'collector', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_scrape_collector_success', 'node_exporter', 'node_exporter: Whether a collector succeeded.', NULL, NULL, 'Node', 'gauge', 'collector', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_textfile_scrape_error', 'node_exporter', '1 if there was an error opening or reading a file, 0 otherwise', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_time_seconds', 'node_exporter', 'System time in seconds since epoch (1970).', NULL, NULL, 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_timex_estimated_error_seconds', 'node_exporter', 'Estimated error in seconds.', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_timex_frequency_adjustment_ratio', 'node_exporter', 'Local clock frequency adjustment.', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_timex_loop_time_constant', 'node_exporter', 'Phase-locked loop time constant.', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_timex_maxerror_seconds', 'node_exporter', 'Maximum error in seconds.', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_timex_offset_seconds', 'node_exporter', 'Time offset in between local system and reference clock.', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_timex_pps_calibration_total', 'node_exporter', 'Pulse per second count of calibration intervals.', NULL, NULL, 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_timex_pps_error_total', 'node_exporter', 'Pulse per second count of calibration errors.', NULL, NULL, 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_timex_pps_frequency_hertz', 'node_exporter', 'Pulse per second frequency.', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_timex_pps_jitter_seconds', 'node_exporter', 'Pulse per second jitter.', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_timex_pps_jitter_total', 'node_exporter', 'Pulse per second count of jitter limit exceeded events.', NULL, NULL, 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_timex_pps_shift_seconds', 'node_exporter', 'Pulse per second interval duration.', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_timex_pps_stability_exceeded_total', 'node_exporter', 'Pulse per second count of stability limit exceeded events.', NULL, NULL, 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_timex_pps_stability_hertz', 'node_exporter', 'Pulse per second stability, average of recent frequency changes.', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_timex_status', 'node_exporter', 'Value of the status array bits.', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_timex_sync_status', 'node_exporter', 'Is clock synchronized to a reliable server (1 = yes, 0 = no).', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_timex_tai_offset_seconds', 'node_exporter', 'International Atomic Time (TAI) offset.', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_timex_tick_seconds', 'node_exporter', 'Seconds between clock ticks.', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_uname_info', 'node_exporter', 'Labeled system information as provided by the uname system call.', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_vmstat_oom_kill', 'node_exporter', '/proc/vmstat information field oom_kill.', NULL, 'ERROR', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('process_cpu_usage', 'micrometer', 'The "recent cpu usage" for the Java Virtual Machine process', 'CPU', 'LOAD', 'Process', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('process_uptime_seconds', 'micrometer', 'Process uptime in seconds.', NULL, NULL, 'Process', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('service_elapsed_seconds', 'micrometer', 'custom service', NULL, 'DURATION', 'Service', 'summary', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('service_elapsed_seconds_count', 'micrometer', 'custom service', NULL, NULL, 'Service', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('service_elapsed_seconds_max', 'micrometer', 'custom service', NULL, 'DURATION', 'Service', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('service_elapsed_seconds_sum', 'micrometer', 'custom service', NULL, NULL, 'Service', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('system_cpu_usage', 'micrometer', 'The "recent cpu usage" for the whole system', 'CPU', 'LOAD', 'Process', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('system_load_average_1m', 'micrometer', 'The sum of the number of runnable entities queued to available processors and the number of runnable entities running on the available processors averaged over a period of time', 'CPU', 'LOAD', 'Process', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('up', 'prometheus', '1 if the instance is healthy, i.e. reachable, or 0 if the scrape failed.', NULL, 'ERROR', 'Any', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('go_threads', 'prometheus', 'Number of OS threads created.', 'Thread', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('http_request_size_bytes', 'prometheus', 'The HTTP request sizes in bytes.', 'Network', 'LOAD', 'Node', 'summary', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('http_response_size_bytes', 'prometheus', 'The HTTP response sizes in bytes.', 'Network', 'LOAD', 'Node', 'summary', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_buffer_count', 'micrometer', 'jvm info', 'Memory', 'LOAD', 'Process', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_buffer_memory_used_bytes', 'micrometer', 'jvm info', 'Memory', 'LOAD', 'Process', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_buffer_total_capacity_bytes', 'micrometer', 'jvm info', 'Memory', 'LOAD', 'Process', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_memory_committed_bytes', 'micrometer', 'jvm info', 'Memory', 'LOAD', 'Process', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_memory_max_bytes', 'micrometer', 'jvm info', 'Memory', 'LOAD', 'Process', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_memory_used_bytes', 'micrometer', 'jvm info', 'Memory', 'LOAD', 'Process', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_threads_daemon', 'micrometer', 'jvm info', 'Thread', 'LOAD', 'Process', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_threads_live', 'micrometer', 'jvm info', 'Thread', 'LOAD', 'Process', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_threads_peak', 'micrometer', 'jvm info', 'Thread', 'LOAD', 'Process', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_disk_io_now', 'node_exporter', 'The number of I/Os currently in progress.', 'Disk', 'LOAD', 'Node', 'gauge', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_disk_io_time_seconds_total', 'node_exporter', 'Total seconds spent doing I/Os.', 'Disk', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_disk_io_time_weighted_seconds_total', 'node_exporter', 'The weighted # of seconds spent doing I/Os.', 'Disk', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_disk_read_bytes_total', 'node_exporter', 'The total number of bytes read successfully.', 'Disk', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_disk_read_time_seconds_total', 'node_exporter', 'The total number of seconds spent by all reads.', 'Disk', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_filefd_allocated', 'node_exporter', 'File descriptor statistics: allocated.', 'File', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_filefd_maximum', 'node_exporter', 'File descriptor statistics: maximum.', 'File', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_ipvs_connections_total', 'node_exporter', 'The total number of connections made.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_ipvs_incoming_bytes_total', 'node_exporter', 'The total amount of incoming data.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_ipvs_incoming_packets_total', 'node_exporter', 'The total number of incoming packets.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_ipvs_outgoing_bytes_total', 'node_exporter', 'The total amount of outgoing data.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_ipvs_outgoing_packets_total', 'node_exporter', 'The total number of outgoing packets.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Active_anon_bytes', 'node_exporter', 'Memory information field Active_anon_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Active_bytes', 'node_exporter', 'Memory information field Active_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Active_file_bytes', 'node_exporter', 'Memory information field Active_file_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_AnonHugePages_bytes', 'node_exporter', 'Memory information field AnonHugePages_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_AnonPages_bytes', 'node_exporter', 'Memory information field AnonPages_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Bounce_bytes', 'node_exporter', 'Memory information field Bounce_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Buffers_bytes', 'node_exporter', 'Memory information field Buffers_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Cached_bytes', 'node_exporter', 'Memory information field Cached_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_CmaFree_bytes', 'node_exporter', 'Memory information field CmaFree_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_CmaTotal_bytes', 'node_exporter', 'Memory information field CmaTotal_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_CommitLimit_bytes', 'node_exporter', 'Memory information field CommitLimit_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Committed_AS_bytes', 'node_exporter', 'Memory information field Committed_AS_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_DirectMap1G_bytes', 'node_exporter', 'Memory information field DirectMap1G_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_DirectMap2M_bytes', 'node_exporter', 'Memory information field DirectMap2M_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_DirectMap4k_bytes', 'node_exporter', 'Memory information field DirectMap4k_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Dirty_bytes', 'node_exporter', 'Memory information field Dirty_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_HardwareCorrupted_bytes', 'node_exporter', 'Memory information field HardwareCorrupted_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_HugePages_Free', 'node_exporter', 'Memory information field HugePages_Free.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_HugePages_Rsvd', 'node_exporter', 'Memory information field HugePages_Rsvd.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_HugePages_Surp', 'node_exporter', 'Memory information field HugePages_Surp.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_HugePages_Total', 'node_exporter', 'Memory information field HugePages_Total.', 'Memory', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Hugepagesize_bytes', 'node_exporter', 'Memory information field Hugepagesize_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Inactive_anon_bytes', 'node_exporter', 'Memory information field Inactive_anon_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Inactive_bytes', 'node_exporter', 'Memory information field Inactive_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Inactive_file_bytes', 'node_exporter', 'Memory information field Inactive_file_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_KernelStack_bytes', 'node_exporter', 'Memory information field KernelStack_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Mapped_bytes', 'node_exporter', 'Memory information field Mapped_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_MemAvailable_bytes', 'node_exporter', 'Memory information field MemAvailable_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_MemFree_bytes', 'node_exporter', 'Memory information field MemFree_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_MemTotal_bytes', 'node_exporter', 'Memory information field MemTotal_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Mlocked_bytes', 'node_exporter', 'Memory information field Mlocked_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_NFS_Unstable_bytes', 'node_exporter', 'Memory information field NFS_Unstable_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_PageTables_bytes', 'node_exporter', 'Memory information field PageTables_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Shmem_bytes', 'node_exporter', 'Memory information field Shmem_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_ShmemHugePages_bytes', 'node_exporter', 'Memory information field ShmemHugePages_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_ShmemPmdMapped_bytes', 'node_exporter', 'Memory information field ShmemPmdMapped_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Slab_bytes', 'node_exporter', 'Memory information field Slab_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_SReclaimable_bytes', 'node_exporter', 'Memory information field SReclaimable_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_SUnreclaim_bytes', 'node_exporter', 'Memory information field SUnreclaim_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_SwapCached_bytes', 'node_exporter', 'Memory information field SwapCached_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_SwapFree_bytes', 'node_exporter', 'Memory information field SwapFree_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_SwapTotal_bytes', 'node_exporter', 'Memory information field SwapTotal_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Unevictable_bytes', 'node_exporter', 'Memory information field Unevictable_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_VmallocChunk_bytes', 'node_exporter', 'Memory information field VmallocChunk_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_VmallocTotal_bytes', 'node_exporter', 'Memory information field VmallocTotal_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_VmallocUsed_bytes', 'node_exporter', 'Memory information field VmallocUsed_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Writeback_bytes', 'node_exporter', 'Memory information field Writeback_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_WritebackTmp_bytes', 'node_exporter', 'Memory information field WritebackTmp_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Icmp_InErrors', 'node_exporter', 'Statistic IcmpInErrors.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Icmp_InMsgs', 'node_exporter', 'Statistic IcmpInMsgs.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Icmp_OutMsgs', 'node_exporter', 'Statistic IcmpOutMsgs.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Icmp6_InErrors', 'node_exporter', 'Statistic Icmp6InErrors.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Icmp6_InMsgs', 'node_exporter', 'Statistic Icmp6InMsgs.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Icmp6_OutMsgs', 'node_exporter', 'Statistic Icmp6OutMsgs.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Ip_Forwarding', 'node_exporter', 'Statistic IpForwarding.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Ip6_InOctets', 'node_exporter', 'Statistic Ip6InOctets.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Ip6_OutOctets', 'node_exporter', 'Statistic Ip6OutOctets.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_IpExt_InOctets', 'node_exporter', 'Statistic IpExtInOctets.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_IpExt_OutOctets', 'node_exporter', 'Statistic IpExtOutOctets.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Tcp_ActiveOpens', 'node_exporter', 'Statistic TcpActiveOpens.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Tcp_CurrEstab', 'node_exporter', 'Statistic TcpCurrEstab.', 'Network', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Tcp_InErrs', 'node_exporter', 'Statistic TcpInErrs.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Tcp_PassiveOpens', 'node_exporter', 'Statistic TcpPassiveOpens.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Tcp_RetransSegs', 'node_exporter', 'Statistic TcpRetransSegs.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_TcpExt_ListenDrops', 'node_exporter', 'Statistic TcpExtListenDrops.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_TcpExt_ListenOverflows', 'node_exporter', 'Statistic TcpExtListenOverflows.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_TcpExt_SyncookiesFailed', 'node_exporter', 'Statistic TcpExtSyncookiesFailed.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_TcpExt_SyncookiesRecv', 'node_exporter', 'Statistic TcpExtSyncookiesRecv.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_TcpExt_SyncookiesSent', 'node_exporter', 'Statistic TcpExtSyncookiesSent.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Udp_InDatagrams', 'node_exporter', 'Statistic UdpInDatagrams.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Udp_InErrors', 'node_exporter', 'Statistic UdpInErrors.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Udp_NoPorts', 'node_exporter', 'Statistic UdpNoPorts.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Udp_OutDatagrams', 'node_exporter', 'Statistic UdpOutDatagrams.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Udp6_InDatagrams', 'node_exporter', 'Statistic Udp6InDatagrams.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Udp6_InErrors', 'node_exporter', 'Statistic Udp6InErrors.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Udp6_NoPorts', 'node_exporter', 'Statistic Udp6NoPorts.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Udp6_OutDatagrams', 'node_exporter', 'Statistic Udp6OutDatagrams.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_UdpLite_InErrors', 'node_exporter', 'Statistic UdpLiteInErrors.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_UdpLite6_InErrors', 'node_exporter', 'Statistic UdpLite6InErrors.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_network_receive_bytes_total', 'node_exporter', 'Network device statistic receive_bytes.', 'Network', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_network_receive_compressed_total', 'node_exporter', 'Network device statistic receive_compressed.', 'Network', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_network_receive_drop_total', 'node_exporter', 'Network device statistic receive_drop.', 'Network', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_network_receive_errs_total', 'node_exporter', 'Network device statistic receive_errs.', 'Network', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_network_receive_fifo_total', 'node_exporter', 'Network device statistic receive_fifo.', 'Network', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_network_receive_frame_total', 'node_exporter', 'Network device statistic receive_frame.', 'Network', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_network_receive_multicast_total', 'node_exporter', 'Network device statistic receive_multicast.', 'Network', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_network_receive_packets_total', 'node_exporter', 'Network device statistic receive_packets.', 'Network', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_network_transmit_bytes_total', 'node_exporter', 'Network device statistic transmit_bytes.', 'Network', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_network_transmit_carrier_total', 'node_exporter', 'Network device statistic transmit_carrier.', 'Network', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_network_transmit_colls_total', 'node_exporter', 'Network device statistic transmit_colls.', 'Network', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_network_transmit_compressed_total', 'node_exporter', 'Network device statistic transmit_compressed.', 'Network', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_network_transmit_drop_total', 'node_exporter', 'Network device statistic transmit_drop.', 'Network', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_network_transmit_errs_total', 'node_exporter', 'Network device statistic transmit_errs.', 'Network', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_network_transmit_fifo_total', 'node_exporter', 'Network device statistic transmit_fifo.', 'Network', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_network_transmit_packets_total', 'node_exporter', 'Network device statistic transmit_packets.', 'Network', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_procs_blocked', 'node_exporter', 'Number of processes blocked waiting for I/O to complete.', 'Process', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_procs_running', 'node_exporter', 'Number of processes in runnable state.', 'Process', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_sockstat_FRAG_inuse', 'node_exporter', 'Number of FRAG sockets in state inuse.', 'Network', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_sockstat_FRAG_memory', 'node_exporter', 'Number of FRAG sockets in state memory.', 'Network', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_sockstat_RAW_inuse', 'node_exporter', 'Number of RAW sockets in state inuse.', 'Network', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_sockstat_sockets_used', 'node_exporter', 'Number of sockets sockets in state used.', 'Network', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_sockstat_TCP_alloc', 'node_exporter', 'Number of TCP sockets in state alloc.', 'Network', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_sockstat_TCP_inuse', 'node_exporter', 'Number of TCP sockets in state inuse.', 'Network', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_sockstat_TCP_mem', 'node_exporter', 'Number of TCP sockets in state mem.', 'Network', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_sockstat_TCP_mem_bytes', 'node_exporter', 'Number of TCP sockets in state mem_bytes.', 'Network', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_sockstat_TCP_orphan', 'node_exporter', 'Number of TCP sockets in state orphan.', 'Network', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_sockstat_TCP_tw', 'node_exporter', 'Number of TCP sockets in state tw.', 'Network', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_sockstat_UDP_inuse', 'node_exporter', 'Number of UDP sockets in state inuse.', 'Network', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_sockstat_UDP_mem', 'node_exporter', 'Number of UDP sockets in state mem.', 'Network', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_sockstat_UDP_mem_bytes', 'node_exporter', 'Number of UDP sockets in state mem_bytes.', 'Network', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_sockstat_UDPLITE_inuse', 'node_exporter', 'Number of UDPLITE sockets in state inuse.', 'Network', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_tcp_connection_states', 'node_exporter', 'Number of connection states.', 'Network', 'LOAD', 'Node', 'gauge', 'state', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_vmstat_pgfault', 'node_exporter', '/proc/vmstat information field pgfault.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_vmstat_pgmajfault', 'node_exporter', '/proc/vmstat information field pgmajfault.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_vmstat_pgpgin', 'node_exporter', '/proc/vmstat information field pgpgin.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_vmstat_pgpgout', 'node_exporter', '/proc/vmstat information field pgpgout.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_vmstat_pswpin', 'node_exporter', '/proc/vmstat information field pswpin.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_vmstat_pswpout', 'node_exporter', '/proc/vmstat information field pswpout.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('process_files_open', 'micrometer', 'The open file descriptor count', 'File', 'LOAD', 'Process', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('process_open_fds', 'micrometer', 'Number of open file descriptors.', 'File', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('process_resident_memory_bytes', 'micrometer', 'Resident memory size in bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('process_virtual_memory_bytes', 'micrometer', '-', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_fs_inodes_free', 'cadvisor', 'Number of available Inodes', 'Filesystem', 'LOAD', 'Container', 'gauge', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_fs_inodes_total', 'cadvisor', 'Number of Inodes', 'Filesystem', 'LOAD', 'Container', 'gauge', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_fs_io_current', 'cadvisor', 'Number of I/Os currently in progress', 'Filesystem', 'LOAD', 'Container', 'gauge', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_fs_io_time_seconds_total', 'cadvisor', 'Cumulative count of seconds spent doing I/Os', 'Filesystem', 'LOAD', 'Container', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_fs_io_time_weighted_seconds_total', 'cadvisor', 'Cumulative weighted I/O time in seconds', 'Filesystem', 'LOAD', 'Container', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_fs_read_seconds_total', 'cadvisor', 'Cumulative count of seconds spent reading', 'Filesystem', 'LOAD', 'Container', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_fs_reads_bytes_total', 'cadvisor', 'Cumulative count of bytes read', 'Filesystem', 'LOAD', 'Container', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_fs_reads_merged_total', 'cadvisor', 'Cumulative count of reads merged', 'Filesystem', 'LOAD', 'Container', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_fs_reads_total', 'cadvisor', 'Cumulative count of reads completed', 'Filesystem', 'LOAD', 'Container', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_fs_sector_reads_total', 'cadvisor', 'Cumulative count of sector reads completed', 'Filesystem', 'LOAD', 'Container', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_fs_sector_writes_total', 'cadvisor', 'Cumulative count of sector writes completed', 'Filesystem', 'LOAD', 'Container', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_fs_usage_bytes', 'cadvisor', 'Number of bytes that are consumed by the container on this filesystem.', 'Filesystem', 'LOAD', 'Container', 'gauge', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_fs_write_seconds_total', 'cadvisor', 'Cumulative count of seconds spent writing', 'Filesystem', 'LOAD', 'Container', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_fs_writes_bytes_total', 'cadvisor', 'Cumulative count of bytes written', 'Filesystem', 'LOAD', 'Container', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_fs_writes_total', 'cadvisor', 'Cumulative count of writes completed', 'Filesystem', 'LOAD', 'Container', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_filesystem_avail_bytes', 'node_exporter', 'Filesystem space available to non-root users in bytes.', 'Filesystem', 'LOAD', 'Node', 'gauge', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_filesystem_device_error', 'node_exporter', 'Whether an error occurred while getting statistics for the given device.', 'Filesystem', 'LOAD', 'Node', 'gauge', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_filesystem_files', 'node_exporter', 'Filesystem total file nodes.', 'Filesystem', 'LOAD', 'Node', 'gauge', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_filesystem_files_free', 'node_exporter', 'Filesystem total free file nodes.', 'Filesystem', 'LOAD', 'Node', 'gauge', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_filesystem_free_bytes', 'node_exporter', 'Filesystem free space in bytes.', 'Filesystem', 'LOAD', 'Node', 'gauge', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_filesystem_size_bytes', 'node_exporter', 'Filesystem size in bytes.', 'Filesystem', 'LOAD', 'Node', 'gauge', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_cache_hitrate', 'cassandra_exporter', 'All time cache hit rate', 'Cache', 'LOAD', 'Cassandra', 'gauge', 'cache', '2019-10-02 10:17:01', '2019-10-02 10:17:01'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_cache_hits_count', 'cassandra_exporter', 'Total number of cache hits', 'Cache', 'LOAD', 'Cassandra', 'counter', 'cache', '2019-10-02 10:17:01', '2019-10-02 10:17:01'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_cache_requests_count', 'cassandra_exporter', 'Total number of cache requests', 'Cache', 'LOAD', 'Cassandra', 'counter', 'cache', '2019-10-02 10:17:01', '2019-10-02 10:17:01'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_client_connectednativeclients', 'cassandra_exporter', 'Number of clients connected to this nodes native protocol server', 'Connection', 'LOAD', 'Cassandra', 'gauge', NULL, '2019-10-01 16:45:21', '2019-10-01 16:45:21'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_clientrequest_failures_count', 'cassandra_exporter', 'Number of transaction failures encountered', 'Request', 'LOAD', 'Cassandra', 'counter', 'clientrequest', '2019-10-02 10:17:01', '2019-10-02 10:17:01'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_clientrequest_latency_seconds_count', 'cassandra_exporter', 'Number of client requests latency seconds', 'Request', 'LOAD', 'Cassandra', 'counter', 'clientrequest', '2019-10-02 10:17:01', '2019-10-02 10:17:01'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_clientrequest_timeouts_count', 'cassandra_exporter', 'Number of timeouts encountered', 'Request', 'LOAD', 'Cassandra', 'counter', 'clientrequest', '2019-10-02 10:17:01', '2019-10-02 10:17:01'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_clientrequest_unavailables_count', 'cassandra_exporter', 'Number of unavailable exceptions encountered', 'Request', 'LOAD', 'Cassandra', 'counter', 'clientrequest', '2019-10-02 10:17:01', '2019-10-02 10:17:01'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_commitlog_completedtasks', 'cassandra_exporter', 'Total number of commit log messages written', 'Log', 'LOAD', 'Cassandra', 'counter', NULL, '2019-10-02 10:17:01', '2019-10-02 10:17:01'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_commitlog_totalcommitlogsize', 'cassandra_exporter', 'Current size, in bytes, used by all the commit log segments', 'Log', 'LOAD', 'Cassandra', 'counter', NULL, '2019-10-02 10:17:01', '2019-10-02 10:17:01'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_keyspace_rangelatency_seconds', 'cassandra_exporter', 'Local range scan latency seconds for this keyspace', 'Disk', 'LOAD', 'Cassandra', 'gauge', 'keyspace,quantile', '2019-10-02 10:17:01', '2019-10-02 10:17:01'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_keyspace_rangelatency_seconds_count', 'cassandra_exporter', 'Local range scan count for this keyspace', 'Disk', 'LOAD', 'Cassandra', 'counter', 'keyspace', '2019-10-02 10:17:01', '2019-10-02 10:17:01'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_keyspace_readlatency_seconds', 'cassandra_exporter', 'Local read latency seconds for this keyspace', 'Disk', 'LOAD', 'Cassandra', 'gauge', 'keyspace,quantile', '2019-10-02 10:17:01', '2019-10-02 10:17:01'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_keyspace_readlatency_seconds_count', 'cassandra_exporter', 'Local read count for this keyspace', 'Disk', 'LOAD', 'Cassandra', 'counter', 'keyspace', '2019-10-02 10:17:01', '2019-10-02 10:17:01'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_keyspace_totaldiskspaceused', 'cassandra_exporter', 'Total disk space used belonging to this keyspace', 'Disk', 'LOAD', 'Cassandra', 'gauge', 'keyspace', '2019-10-02 10:17:01', '2019-10-02 10:17:01'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_keyspace_writelatency_seconds', 'cassandra_exporter', 'Local write latency seconds for this keyspace', 'Disk', 'LOAD', 'Cassandra', 'gauge', 'keyspace,quantile', '2019-10-02 10:17:01', '2019-10-02 10:17:01'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_keyspace_writelatency_seconds_count', 'cassandra_exporter', 'Local write count for this keyspace', 'Disk', 'LOAD', 'Cassandra', 'counter', 'keyspace', '2019-10-02 10:17:01', '2019-10-02 10:17:01'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_threadpools_activetasks', 'cassandra_exporter', 'Number of tasks being actively worked on', 'Task', 'LOAD', 'Cassandra', 'gauge', 'path,threadpools', '2019-10-01 16:45:21', '2019-10-01 16:45:21'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_threadpools_completedtasks', 'cassandra_exporter', 'Number of tasks completed', 'Task', 'LOAD', 'Cassandra', 'counter', 'path,threadpools', '2019-10-01 16:45:21', '2019-10-01 16:45:21'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_threadpools_pendingtasks', 'cassandra_exporter', 'Number of queued tasks queued up', 'Task', 'LOAD', 'Cassandra', 'gauge', 'path,threadpools', '2019-10-01 16:45:21', '2019-10-01 16:45:21'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_threadpools_totalblockedtasks_count', 'cassandra_exporter', 'Number of tasks that were blocked due to queue saturation', 'Task', 'LOAD', 'Cassandra', 'counter', 'path,threadpools', '2019-10-01 16:45:21', '2019-10-01 16:45:21'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cloudwatch_requests_total', 'cloudwatch', 'API requests made to CloudWatch', 'API', 'LOAD', 'AWS/Usage', 'counter', 'NULL', '2019-08-27 14:07:00', '2019-08-27 14:07:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('imxc_service_errors_count', 'imxc_api_server', 'the number of error counts in 5s', NULL, 'ERROR', 'Service', 'gauge', 'protocol', '2019-10-15 09:37:44', '2019-10-15 09:37:44'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('imxc_service_errors_total', 'imxc_api_server', 'the total number of errors', NULL, 'ERROR', 'Service', 'counter', 'protocol', '2019-12-20 16:30:00', '2019-12-20 16:30:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('imxc_service_request_milliseconds_total', 'imxc_api_server', 'the total time taken to serve the requests', NULL, 'DURATION', 'Service', 'counter', 'protocol', '2019-12-20 16:30:00', '2019-12-20 16:30:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('imxc_service_requests_count', 'imxc_api_server', 'the number of requests counts in 5s', NULL, 'LOAD', 'Service', 'gauge', 'protocol', '2019-10-15 09:37:44', '2019-10-15 09:37:44'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('imxc_service_requests_milliseconds_total', 'imxc_api_server', 'the total time taken to serve the requests', NULL, 'DURATION', 'Service', 'gauge', 'protocol', '2019-12-10 11:22:00', '2019-10-15 09:37:44'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('imxc_service_requests_total', 'imxc_api_server', 'the total number of requests', NULL, 'LOAD', 'Service', 'counter', 'protocol', '2019-12-20 16:30:00', '2019-12-20 16:30:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mongodb_connections', 'mongodb_exporter', 'The number of incoming connections from clients to the database server', 'Connection', 'LOAD', 'MongoDB', 'gauge', 'state', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mongodb_global_lock_client', 'mongodb_exporter', 'The number of the active client connections performing read or write operations', 'Lock', 'LOAD', 'MongoDB', 'gauge', 'type', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mongodb_global_lock_current_queue', 'mongodb_exporter', 'The number of operations that are currently queued and waiting for the read or write lock', 'Lock', 'LOAD', 'MongoDB', 'gauge', 'type', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mongodb_instance_uptime_seconds', 'mongodb_exporter', 'The number of seconds that the current MongoDB process has been active', 'Server', 'DURATION', 'MongoDB', 'gauge', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mongodb_memory', 'mongodb_exporter', 'The amount of memory, in mebibyte (MiB), currently used by the database process', 'Memory', 'LOAD', 'MongoDB', 'gauge', 'type', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mongodb_metrics_document_total', 'mongodb_exporter', 'The total number of documents processed', 'Row', 'LOAD', 'MongoDB', 'counter', 'state', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mongodb_network_bytes_total', 'mongodb_exporter', 'The number of bytes that reflects the amount of network traffic', 'Network', 'LOAD', 'MongoDB', 'counter', 'state', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mongodb_op_counters_total', 'mongodb_exporter', 'The total number of operations since the mongod instance last started', 'Request', 'LOAD', 'MongoDB', 'counter', 'type', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_aborted_connects', 'mysqld_exporter', 'The number of failed attempts to connect to the MySQL server', 'Connection', 'LOAD', 'MySQL', 'counter', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_bytes_received', 'mysqld_exporter', 'The number of bytes received from all clients', 'Network', 'LOAD', 'MySQL', 'counter', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_bytes_sent', 'mysqld_exporter', 'The number of bytes sent to all clients', 'Network', 'LOAD', 'MySQL', 'counter', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_commands_total', 'mysqld_exporter', 'The number of times each XXX command has been executed', 'Request', 'LOAD', 'MySQL', 'counter', 'command', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_connections', 'mysqld_exporter', 'The number of connection attempts (successful or not) to the MySQL server', 'Connection', 'LOAD', 'MySQL', 'counter', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_innodb_buffer_pool_read_requests', 'mysqld_exporter', 'The number of logical read requests', 'Block', 'LOAD', 'MySQL', 'counter', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_innodb_buffer_pool_write_requests', 'mysqld_exporter', 'The number of writes done to the InnoDB buffer pool', 'Block', 'LOAD', 'MySQL', 'counter', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_innodb_data_read', 'mysqld_exporter', 'The amount of data read since the server was started (in bytes)', 'Disk', 'LOAD', 'MySQL', 'counter', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_innodb_data_reads', 'mysqld_exporter', 'The total number of data reads (OS file reads)', 'Disk', 'LOAD', 'MySQL', 'counter', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_innodb_data_writes', 'mysqld_exporter', 'The total number of data writes', 'Disk', 'LOAD', 'MySQL', 'counter', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_innodb_data_written', 'mysqld_exporter', 'The amount of data written so far, in bytes', 'Disk', 'LOAD', 'MySQL', 'counter', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_innodb_log_write_requests', 'mysqld_exporter', 'The number of write requests for the InnoDB redo log', 'Log', 'LOAD', 'MySQL', 'counter', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_innodb_log_writes', 'mysqld_exporter', 'The number of physical writes to the InnoDB redo log file', 'Disk', 'LOAD', 'MySQL', 'counter', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_innodb_os_log_written', 'mysqld_exporter', 'The number of bytes written to the InnoDB redo log files', 'Disk', 'LOAD', 'MySQL', 'counter', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_innodb_row_lock_current_waits', 'mysqld_exporter', 'The number of row locks currently being waited for by operations on InnoDB tables', 'Lock', 'LOAD', 'MySQL', 'gauge', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_innodb_row_lock_time', 'mysqld_exporter', 'The total time spent in acquiring row locks for InnoDB tables, in milliseconds', 'Lock', 'DURATION', 'MySQL', 'counter', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_innodb_row_lock_waits', 'mysqld_exporter', 'The number of times operations on InnoDB tables had to wait for a row lock', 'Lock', 'LOAD', 'MySQL', 'counter', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_innodb_row_ops_total', 'mysqld_exporter', 'The number of rows operated in InnoDB tables', 'Row', 'LOAD', 'MySQL', 'counter', 'operation', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_table_locks_immediate', 'mysqld_exporter', 'The number of times that a request for a table lock could be granted immediately', 'Lock', 'LOAD', 'MySQL', 'counter', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_threads_connected', 'mysqld_exporter', 'The number of currently open connections', 'Thread', 'LOAD', 'MySQL', 'gauge', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_threads_running', 'mysqld_exporter', 'The number of threads that are not sleeping', 'Thread', 'LOAD', 'MySQL', 'gauge', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_uptime', 'mysqld_exporter', 'The number of seconds that the server has been up', 'Server', 'DURATION', 'MySQL', 'gauge', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_up', 'mysqld_exporter', 'Whether the last scrape of metrics from MySQL was able to connect to the server', 'NULL', 'ERROR', 'MySQL', 'gauge', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('pg_locks_count', 'postgres_exporter', 'Number of locks', 'Lock', 'LOAD', 'PostgreSQL', 'gauge', 'datname,mode', '2019-08-27 14:07:00', '2019-08-27 14:07:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('pg_stat_activity_count', 'postgres_exporter', 'number of connections in this state', 'Connection', 'LOAD', 'PostgreSQL', 'gauge', 'datname,state', '2019-08-27 14:07:00', '2019-08-27 14:07:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('pg_stat_database_blk_read_time', 'postgres_exporter', 'Time spent reading data file blocks by backends in this database, in milliseconds', 'Block', 'LOAD', 'PostgreSQL', 'counter', 'datname', '2019-08-27 14:07:00', '2019-08-27 14:07:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('pg_stat_database_blk_write_time', 'postgres_exporter', 'Time spent writing data file blocks by backends in this database, in milliseconds', 'Block', 'LOAD', 'PostgreSQL', 'counter', 'datname', '2019-08-27 14:07:00', '2019-08-27 14:07:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('pg_stat_database_blks_hit', 'postgres_exporter', 'Number of times disk blocks were found already in the buffer cache', 'Block', 'LOAD', 'PostgreSQL', 'counter', 'datname', '2019-08-27 14:07:00', '2019-08-27 14:07:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('pg_stat_database_blks_read', 'postgres_exporter', 'Number of disk blocks read in this database', 'Block', 'LOAD', 'PostgreSQL', 'counter', 'datname', '2019-08-27 14:07:00', '2019-08-27 14:07:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('pg_stat_database_temp_bytes', 'postgres_exporter', 'Total amount of data written to temporary files by queries in this database', 'TemporaryFile', 'LOAD', 'PostgreSQL', 'counter', 'datname', '2019-08-27 14:07:00', '2019-08-27 14:07:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('pg_stat_database_temp_files', 'postgres_exporter', 'Number of temporary files created by queries in this database', 'TemporaryFile', 'LOAD', 'PostgreSQL', 'counter', 'datname', '2019-08-27 14:07:00', '2019-08-27 14:07:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('pg_stat_database_tup_deleted', 'postgres_exporter', 'Number of rows deleted by queries in this database', 'Row', 'LOAD', 'PostgreSQL', 'counter', 'datname', '2019-08-27 14:07:00', '2019-08-27 14:07:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('pg_stat_database_tup_fetched', 'postgres_exporter', 'Number of rows fetched by queries in this database', 'Row', 'LOAD', 'PostgreSQL', 'counter', 'datname', '2019-08-27 14:07:00', '2019-08-27 14:07:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('pg_stat_database_tup_inserted', 'postgres_exporter', 'Number of rows inserted by queries in this database', 'Row', 'LOAD', 'PostgreSQL', 'counter', 'datname', '2019-08-27 14:07:00', '2019-08-27 14:07:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('pg_stat_database_tup_returned', 'postgres_exporter', 'Number of rows returned by queries in this database', 'Row', 'LOAD', 'PostgreSQL', 'counter', 'datname', '2019-08-27 14:07:00', '2019-08-27 14:07:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('pg_stat_database_tup_updated', 'postgres_exporter', 'Number of rows updated by queries in this database', 'Row', 'LOAD', 'PostgreSQL', 'counter', 'datname', '2019-08-27 14:07:00', '2019-08-27 14:07:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('pg_stat_database_xact_commit', 'postgres_exporter', 'Number of transactions in this database that have been committed', 'Transaction', 'LOAD', 'PostgreSQL', 'counter', 'datname', '2019-08-27 14:07:00', '2019-08-27 14:07:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('pg_stat_database_xact_rollback', 'postgres_exporter', 'Number of transactions in this database that have been rolled back', 'Transaction', 'LOAD', 'PostgreSQL', 'counter', 'datname', '2019-08-27 14:07:00', '2019-08-27 14:07:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('pg_up', 'postgres_exporter', 'Whether the last scrape of metrics from PostgreSQL was able to connect to the server', 'NULL', 'ERROR', 'PostgreSQL', 'gauge', 'NULL', '2019-08-27 14:07:00', '2019-08-27 14:07:00'); + +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816000, '2019-08-19 06:14:22.616', '2019-08-19 06:14:22.616', false, 4, (select id from auth_resource2 where type='menu' and name='Infrastructure' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816001, '2019-08-19 06:14:22.635', '2019-08-19 06:14:22.635', false, 4, (select id from auth_resource2 where type='menu' and name='Topology' and parent_id=(select id from auth_resource2 where type='menu' and name='Infrastructure')) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816002, '2019-08-19 06:14:22.638', '2019-08-19 06:14:22.638', false, 4, (select id from auth_resource2 where type='menu' and name='Overview' and parent_id=(select id from auth_resource2 where type='menu' and name='Infrastructure')) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816003, '2019-08-19 06:14:22.64', '2019-08-19 06:14:22.64', false, 4, (select id from auth_resource2 where type='menu' and name='Namespace' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816004, '2019-08-19 06:14:22.643', '2019-08-19 06:14:22.643', false, 4, (select id from auth_resource2 where type='menu' and name='Nodes' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816005, '2019-08-19 06:14:22.72', '2019-08-19 06:14:22.72', false, 4, (select id from auth_resource2 where type='menu' and name='Node Details' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816006, '2019-08-19 06:14:22.72', '2019-08-19 06:14:22.72', false, 4, (select id from auth_resource2 where type='menu' and name='Resource Usage' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816009, '2019-08-19 06:14:22', '2019-08-19 06:14:22', false, 4, (select id from auth_resource2 where type='menu' and name='Persistent Volume' ) , 'admin'); + +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816100, '2019-08-19 06:14:22.619', '2019-08-19 06:14:22.619', false, 4, (select id from auth_resource2 where type='menu' and name='Workloads' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816105, '2019-08-19 06:14:22.657', '2019-08-19 06:14:22.657', false, 4, (select id from auth_resource2 where type='menu' and name='Jobs' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816106, '2019-08-19 06:14:22.66', '2019-08-19 06:14:22.66', false, 4, (select id from auth_resource2 where type='menu' and name='Cron Jobs' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816107, '2019-08-19 06:14:22.646', '2019-08-19 06:14:22.646', false, 4, (select id from auth_resource2 where type='menu' and name='Pods' ) , 'admin'); + +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816200, '2019-08-19 06:14:22.621', '2019-08-19 06:14:22.621', false, 4, (select id from auth_resource2 where type='menu' and name='Services' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816201, '2019-08-19 06:14:22.698', '2019-08-19 06:14:22.698', false, 4, (select id from auth_resource2 where type='menu' and name='Topology' and parent_id=(select id from auth_resource2 where type='menu' and name='Services')) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816202, '2019-08-19 06:14:22.728', '2019-08-19 06:14:22.728', false, 4, (select id from auth_resource2 where type='menu' and name='Overview' and parent_id=(select id from auth_resource2 where type='menu' and name='Services')) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816203, '2019-08-19 06:14:22.734', '2019-08-19 06:14:22.734', false, 4, (select id from auth_resource2 where type='menu' and name='Detail' and parent_id=(select id from auth_resource2 where type='menu' and name='Services')) , 'admin'); + +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816300, '2019-08-19 06:14:22.624', '2019-08-19 06:14:22.624', false, 4, (select id from auth_resource2 where type='menu' and name='Diagnosis' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816301, '2019-08-19 06:14:22.705', '2019-08-19 06:14:22.705', false, 4, (select id from auth_resource2 where type='menu' and name='Anomaly Score' and parent_id=(select id from auth_resource2 where type='menu' and name='Diagnosis') ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816309, '2019-08-19 06:14:22.668', '2019-08-19 06:14:22.668', false, 4, (select id from auth_resource2 where type='menu' and name='Troubleshooting' ) , 'admin'); + +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816400, '2019-08-19 06:14:22.627', '2019-08-19 06:14:22.627', false, 4, (select id from auth_resource2 where type='menu' and name='Statistics & Analysis') , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816401, '2019-08-19 06:14:22.671', '2019-08-19 06:14:22.671', false, 4, (select id from auth_resource2 where type='menu' and name='Performance Trends' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816402, '2019-08-19 06:14:22.731', '2019-08-19 06:14:22.731', false, 4, (select id from auth_resource2 where type='menu' and name='Alert Analysis' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816403, '2019-08-19 06:14:22.674', '2019-08-19 06:14:22.674', false, 4, (select id from auth_resource2 where type='menu' and name='Alert History' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816404, '2019-08-19 06:14:22.677', '2019-08-19 06:14:22.677', false, 4, (select id from auth_resource2 where type='menu' and name='Anomaly Score' and parent_id=(select id from auth_resource2 where type='menu' and name='Statistics & Analysis')) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816405, '2019-08-19 06:14:22.679', '2019-08-19 06:14:22.679', false, 4, (select id from auth_resource2 where type='menu' and name='Job History' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816406, '2019-08-19 06:14:22.685', '2019-08-19 06:14:22.685', false, 4, (select id from auth_resource2 where type='menu' and name='Sparse Logs' and parent_id=(select id from auth_resource2 where type='menu' and name='Statistics & Analysis' )) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816407, '2019-08-19 06:14:22.682', '2019-08-19 06:14:22.682', false, 4, (select id from auth_resource2 where type='menu' and name='Log Viewer' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816408, '2019-08-19 06:14:22.725', '2019-08-19 06:14:22.725', false, 4, (select id from auth_resource2 where type='menu' and name='Event Logs' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816409, '2019-08-19 06:14:22.734', '2019-08-19 06:14:22.734', false, 4, (select id from auth_resource2 where type='menu' and name='Container Life Cycle' and parent_id=(select id from auth_resource2 where type='menu' and name='Statistics & Analysis')) , 'admin'); + +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816500, '2019-08-19 06:14:22.629', '2019-08-19 06:14:22.629', false, 4, (select id from auth_resource2 where type='menu' and name='Reports' and parent_id is null) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816501, '2019-08-19 06:14:22.734', '2019-08-19 06:14:22.734', false, 4, (select id from auth_resource2 where type='menu' and name='Documents' and parent_id=(select id from auth_resource2 where type='menu' and name='Reports' and parent_id is null)) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816502, '2019-08-19 06:14:22.734', '2019-08-19 06:14:22.734', false, 4, (select id from auth_resource2 where type='menu' and name='Templates' and parent_id=(select id from auth_resource2 where type='menu' and name='Reports' and parent_id is null)) , 'admin'); + +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816550, '2019-08-19 06:14:22', '2019-08-19 06:14:22', false, 4, (select id from auth_resource2 where type='menu' and name='Dashboards' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816551, '2019-08-19 06:14:22.734', '2019-08-19 06:14:22.734', false, 4, (select id from auth_resource2 where type='menu' and name='Documents' and parent_id=(select id from auth_resource2 where type='menu' and name='Dashboards' and parent_id is null)) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816552, '2019-08-19 06:14:22.734', '2019-08-19 06:14:22.734', false, 4, (select id from auth_resource2 where type='menu' and name='Templates' and parent_id=(select id from auth_resource2 where type='menu' and name='Dashboards' and parent_id is null)) , 'admin'); + +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816700, '2019-08-19 06:14:22.632', '2019-08-19 06:14:22.632', false, 4, (select id from auth_resource2 where type='menu' and name='Settings' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816701, '2019-08-19 06:14:22.687', '2019-08-19 06:14:22.687', false, 4, (select id from auth_resource2 where type='menu' and name='User & Group' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816702, '2019-08-19 06:14:22.69', '2019-08-19 06:14:22.69', false, 4, (select id from auth_resource2 where type='menu' and name='Alert' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816703, '2019-08-19 06:14:22.734', '2019-08-19 06:14:22.734', false, 4, (select id from auth_resource2 where type='menu' and name='Host Alerts' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816704, '2019-08-19 06:14:22.693', '2019-08-19 06:14:22.693', false, 4, (select id from auth_resource2 where type='menu' and name='Sparse Logs' and parent_id=(select id from auth_resource2 where type='menu' and name='Settings' )) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816706, '2019-08-19 06:14:22.717', '2019-08-19 06:14:22.717', false, 4, (select id from auth_resource2 where type='menu' and name='Metric Meta' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816707, '2019-08-19 06:14:22.696', '2019-08-19 06:14:22.696', false, 4, (select id from auth_resource2 where type='menu' and name='Notification' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816708, '2019-08-19 06:14:22.696', '2019-08-19 06:14:22.696', false, 4, (select id from auth_resource2 where type='menu' and name='General' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816709, '2019-08-19 06:14:22.734', '2019-08-19 06:14:22.734', false, 4, (select id from auth_resource2 where type='menu' and name='License' ) , 'admin'); + +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816800, '2019-08-19 06:14:22.734', '2019-08-19 06:14:22.734', false, 4, (select id from auth_resource2 where type='menu' and name='Hosts' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816801, '2019-08-19 06:14:22.734', '2019-08-19 06:14:22.734', false, 4, (select id from auth_resource2 where type='menu' and name='Topology' and parent_id=(select id from auth_resource2 where type='menu' and name='Hosts')) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816802, '2019-08-19 06:14:22.734', '2019-08-19 06:14:22.734', false, 4, (select id from auth_resource2 where type='menu' and name='Overview' and parent_id=(select id from auth_resource2 where type='menu' and name='Hosts')) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816803, '2019-08-19 06:14:22.734', '2019-08-19 06:14:22.734', false, 4, (select id from auth_resource2 where type='menu' and name='List' and parent_id=(select id from auth_resource2 where type='menu' and name='Hosts')) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816804, '2019-08-19 06:14:22.734', '2019-08-19 06:14:22.734', false, 4, (select id from auth_resource2 where type='menu' and name='Detail' and parent_id=(select id from auth_resource2 where type='menu' and name='Hosts')) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816805, '2019-08-19 06:14:22.734', '2019-08-19 06:14:22.734', false, 4, (select id from auth_resource2 where type='menu' and name='Group' and parent_id=(select id from auth_resource2 where type='menu' and name='Hosts')) , 'admin'); + + + + +INSERT INTO public.alert_rule_meta ( id, created_date, modified_date, description, expr, meta_name, target, message ) VALUES (97, '2019-04-02 18:07:31.319', '2019-04-02 18:07:31.319', 'NODE CPU 사용', '(100 - (avg by (xm_clst_id, xm_node_id, xm_entity_type) (rate(node_cpu_seconds_total{ name=''node-exporter'', mode=''idle'', xm_entity_type=''Node'', {filter} }[1m])) * 100))', 'Node CPU Usage', 'node', 'Cluster:{{$labels.xm_clst_id}} Node:{{$labels.xm_node_id }} CPU 사용률이 {threshold}%를 초과했습니다. 현재값:{{humanize $value}}%'); +INSERT INTO public.alert_rule_meta ( id, created_date, modified_date, description, expr, meta_name, target, message ) VALUES (1, '2019-04-15 02:26:13.826', '2019-04-15 02:26:24.02', 'NODE Disk 사용', '(1- (sum by (xm_clst_id, xm_node_id, xm_entity_type) (node_filesystem_avail_bytes{xm_entity_type=''Node'', {filter} }) / sum by (xm_clst_id, xm_node_id, xm_entity_type) (node_filesystem_size_bytes{xm_entity_type=''Node'', {filter} }))) * 100', 'Node Disk Usage', 'node', 'Cluster:{{$labels.xm_clst_id}} Node:{{$labels.xm_node_id}} Disk 사용률이 {threshold}%를 초과했습니다. 현재값:{{humanize $value}}%'); +INSERT INTO public.alert_rule_meta ( id, created_date, modified_date, description, expr, meta_name, target, message ) VALUES (119, '2019-04-02 18:08:50.17', '2019-04-02 18:08:50.17', 'NODE Memory 사용', '(1- ((node_memory_MemFree_bytes{xm_entity_type=''Node'', {filter}} + node_memory_Cached_bytes{xm_entity_type=''Node'', {filter}} + node_memory_Buffers_bytes{xm_entity_type=''Node'', {filter}}) / node_memory_MemTotal_bytes{xm_entity_type=''Node''})) * 100', 'Node Memory Usage', 'node', 'Cluster:{{$labels.xm_clst_id}} Node:{{$labels.xm_node_id}} Memory 사용률이 {threshold}%를 초과했습니다. 현재값 : {{humanize $value}}%'); +INSERT INTO public.alert_rule_meta ( id, created_date, modified_date, description, expr, meta_name, target, message ) VALUES (2, '2019-04-15 05:27:56.544', '2019-04-15 05:27:59.924', 'Container CPU 사용', 'sum (rate (container_cpu_usage_seconds_total{ {filter} }[1m])) by (xm_clst_id, xm_namespace, xm_entity_type, xm_pod_id) * 100', 'Container CPU Usage', 'controller', 'Cluster:{{$labels.xm_clst_id }} POD:{{$labels.xm_pod_id}} CPU 사용률이 {threshold}%를 초과했습니다. 현재값:{{humanize $value}}%'); + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_cpu_user','Container CPU User (%)','Container CPU Usage (User)','sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) (rate(container_cpu_user_seconds_total{xm_entity_type=''Container'',xm_cont_name!=''POD'',{filter}}[1m])) * 100','CPU','Container',NULL,true,true,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} CPU User:{{humanize $value}}%|{threshold}%.','2019-06-05 09:07:00.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_memory_working_set_bytes','Container Memory Working Set (GiB)','Current working set in GiB, this includes recently accessed memory, dirty memory, and kernel memory','sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) (container_memory_working_set_bytes{xm_entity_type=''Container'',xm_cont_name!=''POD'',{filter}} / 1024 / 1024 / 1024)','Memory','Container',NULL,true,true,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} Working Set Memory:{{humanize $value}}GiB|{threshold}GiB.','2020-06-04 11:11:11.000','2020-06-04 11:11:11.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_disk_io_seconds','Host io Disk seconds','Host disk io seconds','sum by (instance) (rate(node_disk_io_time_seconds_total{{filter}}[1m]))','Disk','Host',NULL,false,false,'Host:{{$labels.instance}} Disk IO Seconds:{{humanize $value}}|{threshold}.','2020-03-23 04:08:37.359','2020-03-23 04:08:37.359'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_disk_read_write_byte','host disk R/W byte','host disk R/W byte','sum by (data_type, instance) ( +label_replace(rate(node_disk_read_bytes_total{{filter}}[1m]) or rate(node_disk_read_bytes_total{{filter}}[5m]), "data_type", "Read", "", "") or +label_replace(rate(node_disk_written_bytes_total{{filter}}[1m]) or rate(node_disk_written_bytes_total{{filter}}[5m]), "data_type", "Write", "", "") )','Disk','Host',NULL,false,false,'Host:{{$labels.instance}} Read/Write Bytes:{{humanize $value}}KiB|{threshold}KiB.','2020-03-24 05:21:53.915','2020-03-24 05:24:52.674'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_memory_free','Host Memory Free (GiB)','Memory information field MemFree_bytes','(node_memory_MemAvailable_bytes{{filter}} or (node_memory_MemFree_bytes{{filter}} + node_memory_Cached_bytes{{filter}} + node_memory_Buffers_bytes{{filter}}))','Memory','Host',NULL,true,false,'Host:{{$labels.instance}} Free Memory Size:{{humanize $value}}GiB|{threshold}GiB.','2020-03-23 04:08:18.977','2020-03-23 04:08:18.977'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_bytes_sent','Number of Bytes Sent','The number of bytes sent to all clients','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(mysql_global_status_bytes_sent[1m]))','Network','MySQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Number of Bytes Sent:{{humanize $value}}KiB|{threshold}KiB.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_memory_sum_by_namespace','Containe memory sum by namespace','Containe memory sum by namespace','sum by(xm_clst_id, xm_namespace, data_type) ( +label_replace(imxc_kubernetes_container_resource_limit_memory{{filter}}, "data_type", "limit", "" , "") or +label_replace(imxc_kubernetes_container_resource_request_memory{{filter}}, "data_type", "request", "" , "") or +label_replace(container_memory_usage_bytes{xm_entity_type=''Container'',{filter}}, "data_type", "used", "" , ""))','memory','Namespace',NULL,false,false,'Container memory sum by namespace','2020-07-03 04:31:10.079','2020-07-03 08:38:17.034'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_count','Node Count','node count','count by(xm_clst_id, xm_namespace,xm_node_id) (up{{filter}})','Node','Namespace',NULL,false,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} NODE:{{$labels.xm_node_id}} Node Count:{{humanize $value}}|{threshold}.','2020-08-19 16:45:00.000','2020-08-19 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_restart_count','Container Restart Count','container restart count group by namespace','sum by(xm_clst_id, xm_namespace, pod_name ) (increase(imxc_kubernetes_container_restart_count{{filter}}[10s]))','Pod','Namespace',NULL,false,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Container Restart Count:{{humanize $value}}|{threshold}.','2020-08-19 16:45:00.000','2020-08-19 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_cpu_usage','Node CPU Usage (%)','NODE CPU Usage','(100 - (avg by (xm_clst_id, xm_node_id, xm_entity_type)(clamp_max(rate(node_cpu_seconds_total{ name=''node-exporter'', mode=''idle'', xm_entity_type=''Node'', {filter} }[1m]),1.0) * 100)))','CPU','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} CPU Usage:{{humanize $value}}%|{threshold}%.','2019-05-15 01:02:23.000','2020-06-04 11:11:11.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_disk_read_latency_device','Node Disk Read Latency per Device (ms)','Node Disk Read Latency per Device','sum by (xm_clst_id, xm_node_id, xm_entity_type, device, mountpoint) (rate(node_disk_read_time_seconds_total{xm_entity_type=''Node'',{filter}}[1m])) * 1000','Disk','Node','device',true,false,'NODE:{{$labels.xm_node_id}} FS:{{$labels.mountpoint}} Disk Read Latency:{{humanize $value}}ms|{threshold}ms.','2019-08-23 11:26:07.000','2019-08-23 11:26:07.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_filesystem_usage_per_device','Node Filesystem Usage per device (%)','NODE Filesystem Usage per Device','(1- (sum by (xm_clst_id, xm_node_id, xm_entity_type, device, mountpoint) (node_filesystem_avail_bytes{xm_entity_type=''Node'', device!=''rootfs'', {filter} }) / sum by (xm_clst_id, xm_node_id, xm_entity_type, device, mountpoint) (node_filesystem_size_bytes{xm_entity_type=''Node'', device!=''rootfs'', {filter} }))) * 100','Filesystem','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} FS:{{$labels.mountpoint}} Usage:{{humanize $value}}%|{threshold}%.','2019-05-15 01:02:23.000','2019-05-15 01:02:23.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_memory_usage','Node Memory Usage (%)','Node Memory Usage','sum by (xm_clst_id, xm_node_id)((node_memory_MemTotal_bytes{xm_entity_type="Node"}- (node_memory_MemFree_bytes{xm_entity_type="Node"} + node_memory_Cached_bytes{xm_entity_type="Node"} + node_memory_Buffers_bytes{xm_entity_type="Node"})) >= 0 or node_memory_MemTotal_bytes{xm_entity_type="Node"}- node_memory_MemFree_bytes{xm_entity_type="Node"}) / (sum by (xm_clst_id, xm_node_id) (imxc_kubernetes_node_resource_capacity_memory{{filter}})) * 100','Memory','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Memory Usage:{{humanize $value}}%|{threshold}%.','2019-05-15 01:02:23.000','2020-06-04 11:11:11.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_tablespace_size','Tablespace Size (GiB)','Generic counter metric of tablespaces bytes in Oracle','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, tablespace, type) (oracledb_tablespace_bytes) / 1073741824','Tablespace','OracleDB','tablespace, type',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Oracle Tablespace Size:{{humanize $value}}GiB|{threshold}GiB.','2020-01-28 13:03:00.000','2020-01-28 13:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_allocator_allocated_size','Allocated Memory (MiB)','The total amount of memory that the Redis allocator allocated','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (redis_allocator_allocated_bytes) / 1048576','Memory','Redis',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis Allocated Memory:{{humanize $value}}MiB|{threshold}MiB.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_kubernetes_event_count','Cluster events count','Kubernetes Namespace Events count','sum by (xm_clst_id, type) (imxc_kubernetes_event_in_last_min{{filter}})','Event','Cluster',NULL,true,true,'CLST:{{$labels.xm_clst_id}} Event Count:{{humanize $value}}|{threshold}.','2019-09-26 05:33:37.000','2020-04-27 05:38:47.804'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_memory_limit','cluster_memory_limit (Gib)','Total container limit size in GiB for the given cluster','sum by (xm_clst_id) (imxc_kubernetes_container_resource_limit_memory{{filter}}) / 1024 / 1024 / 1024','Memory','Cluster',NULL,false,false,'CLST:{{$labels.xm_clst_id}} Memory Limits:{{humanize $value}}GiB|{threshold}GiB.','2019-08-23 08:45:47.000','2019-08-23 08:45:47.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_pod_total_count','Cluster Pod Total Count','Cluster Pod Total Count','sum by (xm_clst_id) (imxc_kubernetes_controller_counts{{filter}})','Pod','Cluster',NULL,true,true,'CLST:{{$labels.xm_clst_id}} Total Pod Counts:{{humanize $value}}|{threshold}.','2019-08-23 17:36:00.000','2019-11-28 08:25:07.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_swap_free','Host Swap Memory Free','Host Swap Free','node_memory_SwapFree_bytes{{filter}}','Memory','Host',NULL,true,false,'Host:{{$labels.instance}} Free Swap Memory Size:{{humanize $value}}KiB|{threshold}KiB.','2020-03-23 04:08:24.594','2020-03-23 04:08:24.594'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_context_switch_count','Host Context','Total number of context switches.','sum by (instance) (node_context_switches_total{{filter}})','CPU','Host',NULL,false,false,'None','2020-03-23 04:08:15.000','2020-03-23 04:08:15.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_fs_used','Host system Filesystem used','Host File system used','sum by (instance) (node_filesystem_size_bytes{{filter}}-node_filesystem_free_bytes{{filter}})','Filesystem','Host',NULL,true,false,'Host:{{$labels.instance}} Filesystem Utillization:{{humanize $value}}%|{threshold}%.','2020-03-23 04:08:30.407','2020-03-23 04:08:30.407'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_disk_io','Node Disk I/O','Total seconds spent doing I/Os','avg by (xm_clst_id, xm_node_id) (rate(node_disk_io_time_seconds_total{{filter}}[1m]))','Disk','Node',NULL,false,false,'None','2020-05-21 01:18:06.000','2020-05-29 09:38:55.992'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_fs_usage','Container Filesystem Usage (%)','Container File System Usage: 100 * (Used Bytes / Limit Bytes) (not contain persistent volume)','sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) ( +container_fs_usage_bytes{xm_entity_type=''Container'',{filter}} / ((container_fs_limit_bytes{xm_entity_type=''Container'',{filter}} * 100) > 0) or +container_fs_usage_bytes{xm_entity_type=''Container'',{filter}} / 1000)','Filesystem','Container',NULL,true,true,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} Filesystem Usage:{{humanize $value}}%|{threshold}%.','2019-06-05 10:27:42.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_fs_reads','Container Filesystem Read Bytes (KiB)','Cumulative count of bytes read / 1024','sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) (rate(container_fs_reads_bytes_total{xm_entity_type=''Container'',{filter}}[1m])) / 1024','Filesystem','Container',NULL,true,true,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} Filesystem Reads:{{humanize $value}}KiB|{threshold}KiB.','2019-05-20 05:53:42.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_cpu_sum_by_namespace','Container cpu sum by namespace','Container cpu sum by namespace','sum by(xm_clst_id, xm_namespace, data_type) ( +label_replace(imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0.001, "data_type", "limit", "" , "") or +label_replace(imxc_kubernetes_container_resource_request_cpu{{filter}} * 0.001, "data_type", "request", "" , "") or +label_replace(rate(container_cpu_usage_seconds_total{xm_entity_type=''Container'',{filter}}[1m]), "data_type", "used", "" , ""))','CPU','Namespace',NULL,false,false,'.','2020-05-30 08:30:10.158','2020-06-09 02:00:50.856'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_filesystem_avail_size','Node Filesystem Available Size (GiB)','Filesystem space available to non-root users in bytes / 1073741824','sum by (xm_clst_id, xm_node_id, xm_entity_type) (node_filesystem_avail_bytes{xm_entity_type=''Node'', device!=''rootfs'', {filter} }) / 1073741824','Filesystem','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Filesystem Avail Size:{{humanize $value}}GiB|{threshold}GiB.','2019-06-04 19:47:00.000','2019-06-04 19:47:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_pod_running_count','Node Pod Running Count','Node Pod Running Count','count by (xm_clst_id, xm_node_id) (sum by (xm_clst_id, xm_node_id, xm_pod_id) (imxc_kubernetes_container_resource_limit_cpu{pod_state="Running", {filter}}))','Pod','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Running Pod Count:{{humanize $value}}|{threshold}.','2019-10-11 00:29:17.000','2019-11-06 08:02:40.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_cpu_user','Pod CPU User (%)','Pod CPU Usage (User)','sum by (xm_clst_id,xm_pod_id,xm_entity_type,xm_namespace) (rate(container_cpu_user_seconds_total{xm_entity_type=''Container'',{filter}}[1m])) * 100','CPU','Pod',NULL,true,true,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} CPU User:{{humanize $value}}%|{threshold}%.','2019-06-05 09:07:00.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_fs_reads','Pod Filesystem Read Bytes (KiB)','Cumulative count of bytes read / 1024','sum by (xm_clst_id,xm_pod_id,xm_entity_type,xm_namespace) (rate(container_fs_reads_bytes_total{xm_entity_type=''Container'',{filter}}[1m])) / 1024','Filesystem','Pod',NULL,true,true,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} Filesystem Read Bytes:{{humanize $value}}KiB|{threshold}KiB.','2019-05-20 05:53:42.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_memory_max_usage_bytes','Pod Memory Max Used (GiB)','Maximum memory usage recorded in bytes / 1073741824','sum by (xm_clst_id,xm_pod_id,xm_entity_type,xm_namespace) (container_memory_max_usage_bytes{xm_entity_type=''Container'',{filter}}) / 1073741824','Memory','Pod',NULL,true,true,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} Max Used Memory:{{humanize $value}}GiB|{threshold}GiB.','2019-06-05 14:27:36.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_network_receive','Pod Network Receive (KiB)','Network device statistic receive_bytes / 1024','sum by (xm_clst_id,xm_pod_id,xm_entity_type,xm_namespace) (rate(container_network_receive_bytes_total{xm_entity_type=''Container'',{filter}}[1m]) ) / 1024','Network','Pod',NULL,true,true,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} Network Receive:{{humanize $value}}KiB|{threshold}KiB.','2019-05-21 08:23:36.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_cache_hits_count','Total number of cache hits (count/s)','Total number of cache hits','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, cache) (rate(cassandra_cache_hits_count{{filter}}[1m]))','Cache','Cassandra','cache',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Cache Hit Counts per second:{{humanize $value}}|{threshold}.','2019-10-02 10:17:01.000','2019-11-05 11:24:29.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_clientrequest_failures_count','Number of transaction failures encountered','Number of transaction failures encountered','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, clientrequest) (rate(cassandra_clientrequest_failures_count[1m]))','Request','Cassandra','clientrequest',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Failure Request:{{humanize $value}}|{threshold}.','2019-10-02 10:17:01.000','2019-10-02 10:17:01.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_connections_and_tasks','Cassandra connections & tasks','cassandra connections & tasks','sum by (data_type, xm_clst_id, xm_namespace, xm_node_id, instance) ( +label_replace(cassandra_threadpools_activetasks {{filter}}, "data_type", "Active tasks", "", "") or +label_replace(cassandra_threadpools_pendingtasks {{filter}}, "data_type", "Pending tasks", "", "") or +label_replace(cassandra_client_connectednativeclients {{filter}}, "data_type", "Client connections", "", "") )','Connection','Cassandra','data_type',true,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} POD:{{$labels.xm_pod_id}} Cassandra Connections and Tasks:{{humanize $value}}|{threshold}.','2020-01-02 09:11:48.000','2020-02-13 01:24:51.522'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_network_transmit','Pod Network Transmit (KiB)','Network device statistic transmit_bytes / 1024','sum by (xm_clst_id,xm_pod_id,xm_entity_type,xm_namespace) (rate(container_network_transmit_bytes_total{xm_entity_type=''Container'',{filter}}[1m]) ) / 1024','Network','Pod',NULL,true,true,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} Network Transmit:{{humanize $value}}KiB|{threshold}KiB.','2019-05-21 08:26:35.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_memory_request','cluster_memory_request (Gib)','Total container memory request in GiB for the given cluster','sum by (xm_clst_id) (imxc_kubernetes_container_resource_request_memory{{filter}}) / 1024 / 1024 / 1024','Memory','Cluster',NULL,false,false,'None','2019-08-23 08:45:47.000','2019-08-23 08:45:47.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_keyspace_read_count','Local read count (count/s)','Local read count for this keyspace','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, keyspace) (rate(cassandra_keyspace_readlatency_seconds_count[1m]))','Disk','Cassandra','keyspace',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Read Count:{{humanize $value}}|{threshold}.','2019-10-02 10:17:01.000','2019-10-02 10:17:01.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_cpu_capacity_cores','cluster_cpu_capacity_cores','cluster_cpu_capacity_cores','sum by (xm_clst_id) (imxc_kubernetes_node_resource_capacity_cpu{{filter}})','CPU','Cluster',NULL,false,false,'CLST:{{$labels.xm_clst_id}} Cluster CPU Capacity Cores:{{humanize $value}}|{threshold}.','2019-08-23 08:40:36.000','2019-08-23 08:40:36.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_alerts_received_count','Cluster alerts received count','Alert count by cluster','sum by (xm_clst_id, level) (ceil(increase(imxc_alerts_received_count_total{status=''firing'', {filter}}[10m])))','Alert','Cluster',NULL,false,false,'CLST:{{$labels.xm_clst_id}} Alert Received Counts:{{humanize $value}}|{threshold}.','2019-08-23 04:41:49.000','2020-04-28 08:09:09.429'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_cpu_throttled_time','Container CPU Throttled Time','container cpu_throttled time','sum by(xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) (increase(container_cpu_cfs_throttled_seconds_total{container_name!="POD", image!="", {filter}}[10s]))','CPU','Cluster',NULL,false,false,'CLST:{{$labels.xm_clst_id}} CPU Throttled:{{humanize $value}}|{threshold}.','2020-08-19 16:45:00.000','2020-08-19 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_cache_hitrate','All time cache hit rate','All time cache hit rate','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, cache) (cassandra_cache_hitrate {{filter}} * 100)','Cache','Cassandra','cache',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Cache Hit Rate:{{humanize $value}}|{threshold}.','2019-10-02 10:17:01.000','2019-12-13 01:19:54.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('aws_ec2_disk_read_bytes','Bytes Read from All Instance Store Volumes (KiB)','Bytes read from all instance store volumes available to the instance.','sum by (xm_clst_id, instance_id, instance) (aws_ec2_disk_read_bytes_average{{filter}}) / 1024','Disk','AWS/EC2',NULL,true,true,'CLST:{{$labels.xm_clst_id}} Instance:{{$labels.instance_id}} Disk Read Size:{{humanize $value}}KiB|{threshold}KiB.','2019-08-23 17:38:23.000','2019-08-23 17:38:23.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('aws_ec2_disk_write_bytes','Bytes Written to All Instance Store Volumes (KiB)','Bytes written to all instance store volumes available to the instance.','sum by (xm_clst_id, instance_id, instance) (aws_ec2_disk_write_bytes_average{{filter}}) / 1024','Disk','AWS/EC2',NULL,true,true,'CLST:{{$labels.xm_clst_id}} Instance:{{$labels.instance_id}} Disk Write Size:{{humanize $value}}KiB|{threshold}KiB.','2019-08-23 17:38:23.000','2019-08-23 17:38:23.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('aws_ec2_ebswrite_bytes','Bytes written to all EBS volumes (KiB)','Bytes written to all EBS volumes attached to the instance in a specified period of time.','sum by (xm_clst_id, instance_id, instance) (aws_ec2_ebswrite_bytes_average{{filter}}) / 1024','EBS','AWS/EC2',NULL,true,true,'CLST:{{$labels.xm_clst_id}} Instance:{{$labels.instance_id}} EBS Write Size:{{humanize $value}}KiB|{threshold}KiB.','2019-08-23 17:38:23.000','2019-08-23 17:38:23.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_cache_requests_count','Total number of cache requests (count/s)','Total number of cache requests','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, cache) (rate(cassandra_cache_requests_count[1m]))','Cache','Cassandra','cache',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Cache Request per second:{{humanize $value}}|{threshold}.','2019-10-02 10:17:01.000','2019-10-02 10:17:01.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_keyspace_write_latency','Local write latency (ms)','Local write latency seconds for this keyspace','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, keyspace) (cassandra_keyspace_writelatency_seconds{quantile=''0.99''}) * 1000','Disk','Cassandra','keyspace',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Write Latency:{{humanize $value}}ms|{threshold}ms.','2019-10-02 10:17:01.000','2019-10-02 10:17:01.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_memory_usage','Cluster Memory Usage (%)','All Nodes Memory Usage in cluster.','(1- avg by (xm_clst_id) (((node_memory_MemFree_bytes{xm_entity_type=''Node'', {filter}} + node_memory_Cached_bytes{xm_entity_type=''Node'', {filter}} + node_memory_Buffers_bytes{xm_entity_type=''Node'', {filter}}) <= node_memory_MemTotal_bytes{xm_entity_type=''Node'', {filter}} or node_memory_MemFree_bytes{xm_entity_type=''Node'', {filter}}) / node_memory_MemTotal_bytes{xm_entity_type=''Node'', {filter}})) * 100','Memory','Cluster',NULL,true,true,'CLST:{{$labels.xm_clst_id}} Memory Usage:{{humanize $value}}%|{threshold}%.','2019-07-18 06:12:22.000','2020-04-22 04:59:14.251'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mongodb_connections_metrics_created_total','Incoming Connections Created','Count of all incoming connections created to the server (This number includes connections that have since closed)','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(mongodb_connections_metrics_created_total[1m]))','Connection','MongoDB',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MongoDB Incoming Connections Created Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_disk_io','MySQL Disk I/O','MySQL Disk I/O','sum by (data_type, xm_clst_id, xm_namespace, xm_node_id, instance) ( +label_replace(rate(mysql_global_status_innodb_data_read[1m]), "data_type", "read", "", "") or +label_replace(rate(mysql_global_status_innodb_data_written[1m]), "data_type", "written", "", ""))','Disk','MySQL','data_type',true,false,'CLST:{{$labels.xm_clst_id}} SVC:{{$labels.xm_service_name}} Mysql Disk IO:{{humanize $value}}|{threshold}.','2019-12-05 08:48:30.000','2020-02-13 01:12:05.438'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_pod_capacity_count','Cluster Pod Capacity Count','Cluster Pod Capacity Count','sum by (xm_clst_id) (imxc_kubernetes_node_resource_capacity_pods{{filter}})','Pod','Cluster',NULL,true,true,'CLST:{{$labels.xm_clst_id}} Capacity Pod Counts:{{humanize $value}}|{threshold}.','2019-08-27 04:45:52.000','2019-11-28 08:25:07.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('namespace_kubernetes_event_count','Namespace events count','Kubernetes Namespace Events count','sum by (xm_clst_id, xm_namespace, type) (imxc_kubernetes_event_in_last_min{{filter}})','Event','Namespace','level',false,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Events:{{humanize $value}}|{threshold}.','2019-09-24 06:42:09.000','2019-09-24 06:42:34.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_cpu_capacity_cores','node_cpu_capacity_cores','node_cpu_capacity_cores','imxc_kubernetes_node_resource_capacity_cpu{{filter}}','CPU','Node',NULL,false,false,'None','2019-08-23 08:40:36.000','2019-08-23 08:40:36.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_cpu_allocatable_cores','node_cpu_allocatable_cores','node_cpu_allocatable_cores','imxc_kubernetes_node_resource_allocatable_cpu{{filter}}','CPU','Node',NULL,false,false,'None','2019-08-23 08:40:36.000','2019-08-23 08:40:36.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_pod_capacity_count','Node Pod Capacity Count','Node Pod Capacity Count','imxc_kubernetes_node_resource_capacity_pods{{filter}}','Pod','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Total Capacity Count of Pods:{{humanize $value}}|{threshold}.','2019-10-11 00:29:17.000','2019-11-26 01:29:10.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_memory_allocatable','node_memory_allocatable (Gib)','imxc_kubernetes_node_resource_allocatable_memory in GiB','imxc_kubernetes_node_resource_allocatable_memory{{filter}} / 1024 / 1024 / 1024','Memory','Node',NULL,false,false,'None','2019-08-23 08:45:47.000','2019-08-23 08:45:47.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_memory_limit','node_memory_limit (Gib)','Total container memory limit for the given cluster, node','sum by (xm_clst_id, xm_node_id) (imxc_kubernetes_container_resource_limit_memory{{filter}}) / 1024 / 1024 / 1024','Memory','Node',NULL,false,false,'None','2019-08-23 08:45:47.000','2019-08-23 08:45:47.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_keyspace_readwritelatency_seconds','Cassandra Read/Write Latency (ms)','Cassandra Read/Write Latency (ms)','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, keyspace) (cassandra_keyspace_readlatency_seconds{quantile=''0.99''}) or (cassandra_keyspace_writelatency_seconds{quantile=''0.99''}) * 1000','Disk','Cassandra','keyspace',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} POD:{{$labels.xm_pod_id}} Cassandra Keyspace Readwritelatency Seconds:{{humanize $value}}ms|{threshold}ms.','2019-10-23 01:46:07.000','2019-11-05 09:03:05.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_cpu_usage','Cluster CPU Usage (%)','All Nodes CPU Usage in cluster.','(100 - (avg by (xm_clst_id)(clamp_max(rate(node_cpu_seconds_total{ name=''node-exporter'', mode=''idle'', xm_entity_type=''Node'', {filter} }[1m]),1.0)) * 100))','CPU','Cluster',NULL,true,true,'CLST:{{$labels.xm_clst_id}} CPU Usage:{{humanize $value}}%|{threshold}%','2019-07-18 05:54:39.000','2020-04-22 04:59:14.253'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_bytes_received','Number of Bytes Received','The number of bytes received from all clients','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(mysql_global_status_bytes_received[1m]))','Network','MySQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Number of Bytes Received:{{humanize $value}}KiB|{threshold}KiB.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_memory_request','node_memory_request (Gib)','Total container memory request in GiB for the given cluster, node','sum by (xm_clst_id, xm_node_id) (imxc_kubernetes_container_resource_request_memory{{filter}}) / 1024 / 1024 / 1024','Memory','Node',NULL,false,false,'None','2019-08-23 08:45:47.000','2019-08-23 08:45:47.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_threadpools_tasks','Number of tasks','Number of tasks','sum by (task_type, xm_clst_id, xm_namespace, xm_node_id, instance) ( +label_replace(cassandra_threadpools_activetasks {{filter}}, "task_type", "active", "", "") or +label_replace(cassandra_threadpools_pendingtasks {{filter}}, "task_type", "pending", "", "") or +label_replace(cassandra_client_connectednativeclients {{filter}}, "task_type", "connected", "", "") )','Task','Cassandra','task_type',true,false,'Number of tasks','2019-10-24 01:34:25.000','2020-02-13 01:14:23.895'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_latency_seconds','Local latency seconds','Local latency seconds','sum by(type, xm_clst_id, xm_namespace, xm_node_id, instance) +(label_replace(cassandra_keyspace_readlatency_seconds{quantile=''0.99'', {filter}}, "type", "read", "", "") or +label_replace(cassandra_keyspace_writelatency_seconds{quantile=''0.99'', {filter}}, "type", "write", "", "")) * 1000','Disk','Cassandra',NULL,true,true,'Local latency seconds','2019-10-24 02:14:45.000','2020-02-13 01:23:46.608'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_wait_time_concurrency','Wait-Time - Concurrency','Generic counter metric from v$waitclassmetric view in Oracle','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(oracledb_wait_time_concurrency[1m]))','Wait','OracleDB',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Oracle Wait-Time - Concurrency:{{humanize $value}}|{threshold}.','2020-01-28 13:03:00.000','2020-01-28 13:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_threadpools_pendingtasks','Number of queued tasks queued up','Number of queued tasks queued up','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, path) (cassandra_threadpools_pendingtasks)','Task','Cassandra','path',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Active Task:{{humanize $value}}|{threshold}.','2019-10-01 16:45:21.000','2019-10-01 16:45:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_pod_ready_count','Cluster Pod Ready Count','Cluster Pod Ready Count','sum by (xm_clst_id) (imxc_kubernetes_controller_ready{{filter}})','Pod','Cluster',NULL,true,true,'CLST:{{$labels.xm_clst_id}} Ready Pod Counts:{{humanize $value}}|{threshold}.','2019-08-23 17:36:00.000','2019-11-28 08:25:07.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_pod_allocatable_count','Node Pod Allocatable Count','Node Pod Allocatable Count','imxc_kubernetes_node_resource_allocatable_pods{{filter}}','Pod','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Allocatable Pod Count:{{humanize $value}}|{threshold}.','2019-10-11 00:29:17.000','2019-11-26 01:29:10.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_sparselog_type_conatiner_count','Container Type Sparselog Count','Container-type sparse log count by xm_clst_id, xm_namespace, xm_node_id, xm_pod_id over last 1 min','sum by (xm_entity_type, xm_clst_id, xm_namespace, xm_node_id, xm_pod_id) (round(increase(imxc_sparselog_count_total{xm_entity_type="Pod",{filter}}[1m])))','SparseLog','Pod',NULL,true,true,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} Sparselog Count:{{humanize $value}}|{threshold}.','2020-03-26 15:05:51.828','2020-03-26 15:05:51.828'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_threads_connected','Number of Open Connections','The number of currently open connections','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (mysql_global_status_threads_connected)','Thread','MySQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Number of Open Connections Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('aws_ec2_ebsread_bytes','Bytes read from all EBS volumes (KiB)','Bytes read from all EBS volumes attached to the instance in a specified period of time.','sum by (xm_clst_id, instance_id, instance) (aws_ec2_ebsread_bytes_average{{filter}}) / 1024','EBS','AWS/EC2',NULL,true,true,'CLST:{{$labels.xm_clst_id}} Instance:{{$labels.instance_id}} EBS Read Size:{{humanize $value}}KiB|{threshold}KiB.','2019-08-23 17:38:23.000','2019-08-23 17:38:23.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('namespace_cpu_usage','Namespace CPU Usage (%)','CPU Usage by namespace','sum by (xm_clst_id,xm_entity_type,xm_namespace) (rate(container_cpu_usage_seconds_total{xm_entity_type=''Container'', {filter}}[1m])) * 100','CPU','Namespace',NULL,true,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} CPU Utillization:{{humanize $value}}%|{threshold}%','2019-08-23 01:06:05.000','2019-08-23 01:06:05.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('namespace_memory_usage','Namespace memory usage (Gib)','Memory usage by namespace in bytes / 1073741824','sum by (xm_clst_id,xm_entity_type,xm_namespace) (container_memory_usage_bytes{xm_entity_type=''Container'', {filter}}) / 1073741824','Memory','Namespace',NULL,true,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Memory Utillization:{{humanize $value}}GiB|{threshold}GiB.','2019-08-23 01:21:31.000','2019-08-23 01:21:31.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_memory_free','Node Memory Free (GiB)','Memory information field MemFree_bytes / 1073741824','node_memory_MemFree_bytes{xm_entity_type=''Node'', {filter}} / 1073741824','Memory','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Free Memory Size:{{humanize $value}}GiB|{threshold}GiB.','2019-06-04 16:03:00.000','2019-06-04 16:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_swap_memory_cached','Node Swap Memory Cached (GiB)','Memory information field SwapCached_bytes / 1073741824','node_memory_SwapCached_bytes{xm_entity_type=''Node'', {filter}} / 1073741824','Memory','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Cached Swap Memory Size:{{humanize $value}}GiB|{threshold}GiB.','2019-06-04 16:03:00.000','2019-06-04 16:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_allocator_active_size','Active Memory (MiB)','The total amount of active memory that the Redis allocator has','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (redis_allocator_active_bytes) / 1048576','Memory','Redis',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis Active Memory:{{humanize $value}}MiB|{threshold}MiB.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_up','MySQL Up Count','Whether the last scrape of metrics from MySQL was able to connect to the server','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (mysql_up)','Instance','MySQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Up counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_up','Oracle DB Up Count','Whether the Oracle database server is up','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (oracledb_up)','Instance','OracleDB',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Oracle DB Up Count:{{humanize $value}}|{threshold}.','2020-01-28 13:03:00.000','2020-01-28 13:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_process_count','Process Count','Gauge metric with count of processes','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (oracledb_process_count)','Process','OracleDB',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Oracle Process Count Count:{{humanize $value}}|{threshold}.','2020-01-28 13:03:00.000','2020-01-28 13:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_locks_count','Number of Locks','Number of locks','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, datname, mode) (pg_locks_count)','Lock','PostgreSQL','datname,mode',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} PostgreSQL Lock Counts:{{humanize $value}}|{threshold}.','2019-08-27 15:49:21.000','2019-08-27 15:49:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_stat_database_tup_updated','Number of Rows Updated','Number of rows updated by queries in this database','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, datname) (rate(pg_stat_database_tup_updated[1m]))','Row','PostgreSQL','datname',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} PostgreSQL Updated Row Counts:{{humanize $value}}|{threshold}.','2019-08-27 15:49:21.000','2019-08-27 15:49:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_stat_database_tup_deleted','Number of Rows Deleted','Number of rows deleted by queries in this database','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, datname) (rate(pg_stat_database_tup_deleted[1m]))','Row','PostgreSQL','datname',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} PostgreSQL Deleted Row counts:{{humanize $value}}|{threshold}.','2019-08-27 15:49:21.000','2019-08-27 15:49:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_stat_database_temp_files','Number of Temporary Files Created','Number of temporary files created by queries in this database','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, datname) (rate(pg_stat_database_temp_files[1m]))','TemporaryFile','PostgreSQL','datname',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} PostgreSQL Temporary File counts:{{humanize $value}}|{threshold}.','2019-08-27 15:49:21.000','2019-08-27 15:49:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_load15','Node CPU Load 15m Average','Node CPU 15m load average','node_load15{xm_entity_type=''Node'',{filter}}','CPU','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} CPU 15m Load Avg:{{humanize $value}}|{threshold}.','2019-05-15 08:27:39.000','2019-05-15 08:27:39.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_cpu_throttling','Node CPU Throttling','Number of times this cpu package has been throttled.','increase(node_cpu_package_throttles_total{xm_entity_type=''Node'',{filter}}[1m])','CPU','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} CPU Throttling Counts:{{humanize $value}}|{threshold}.','2019-05-15 08:29:24.000','2019-05-15 08:29:24.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_cpu_usage','Pod CPU Usage (%)','Pod CPU Usage','sum by (xm_clst_id,xm_pod_id,xm_entity_type,xm_namespace) (clamp_min((rate(container_cpu_usage_seconds_total{xm_entity_type=''Container'',{filter}}[1m] offset 10s)),0)) * 100','CPU','Pod',NULL,true,true,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} CPU Usage:{{humanize $value}}%|{threshold}%.','2019-05-15 01:02:23.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_cpu_system','Pod CPU System (%)','Pod CPU Usage (System)','sum by (xm_clst_id,xm_pod_id,xm_entity_type,xm_namespace) (rate(container_cpu_system_seconds_total{xm_entity_type=''Container'',{filter}}[1m])) * 100','CPU','Pod',NULL,true,true,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} CPU System:{{humanize $value}}%|{threshold}%.','2019-06-05 09:07:00.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_fs_usage_bytes','Pod Filesystem Used Bytes (GiB)','Number of bytes that are consumed by the container on this filesystem / 1073741824','sum by (xm_clst_id,xm_pod_id,xm_entity_type,xm_namespace) (container_fs_usage_bytes{xm_entity_type=''Container'',{filter}}) / 1073741824','Filesystem','Pod',NULL,true,true,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} Filesystem Used Bytes:{{humanize $value}}GiB|{threshold}GiB.','2019-06-05 10:27:42.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_fs_limit_bytes','Pod Filesystem Limit Bytes (GiB)','Number of bytes that can be consumed by the container on this filesystem / 1073741824','sum by (xm_clst_id,xm_pod_id,xm_entity_type,xm_namespace) (container_fs_limit_bytes{xm_entity_type=''Container'',{filter}}) / 1073741824','Filesystem','Pod',NULL,true,true,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} Filesystem Limit Bytes:{{humanize $value}}GiB|{threshold}GiB.','2019-06-05 10:27:42.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_load5','Node CPU Load 5m Average','Node CPU 5m load average','node_load5{xm_entity_type=''Node'',{filter}}','CPU','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} CPU 5m Load Avg:{{humanize $value}}|{threshold}.','2019-05-15 08:26:07.000','2019-05-15 08:26:07.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_client_connectednativeclients','Number of Client Connections','Number of clients connected to this nodes native protocol server','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (cassandra_client_connectednativeclients)','Connection','Cassandra',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Connection:{{humanize $value}}|{threshold}.','2019-10-01 16:45:21.000','2019-11-07 11:59:04.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_threadpools_activetasks','Number of tasks being actively worked on','Number of tasks being actively worked on','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, path) (cassandra_threadpools_activetasks)','Task','Cassandra','path',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Connection:{{humanize $value}}|{threshold}.','2019-10-01 16:45:21.000','2019-10-01 16:45:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cloudwatch_requests_count','API requests made to CloudWatch','API requests made to CloudWatch','sum by (xm_clst_id, namespace, action) (rate(cloudwatch_requests_total{{filter}}[10m]))','Request','AWS/Usage',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.namespace}} CloudWatch API Call Volume:{{humanize $value}}|{threshold}.','2019-08-27 15:49:21.000','2019-08-27 15:49:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('aws_ec2_network_out','Bytes Sent Out on All Network Interfaces (KiB)','The number of bytes sent out on all network interfaces by the instance.','sum by (xm_clst_id, instance_id, instance) (aws_ec2_network_out_average{{filter}}) / 1024','Network','AWS/EC2',NULL,true,true,'CLST:{{$labels.xm_clst_id}} Instance:{{$labels.instance_id}} Network Transmit Usage:{{humanize $value}}KiB|{threshold}KiB.','2019-08-23 17:38:23.000','2019-08-23 17:38:23.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('aws_ec2_network_in','Bytes Received on All Network Interfaces (KiB)','The number of bytes received on all network interfaces by the instance.','sum by (xm_clst_id, instance_id, instance) (aws_ec2_network_in_average{{filter}}) / 1024','Network','AWS/EC2',NULL,true,true,'CLST:{{$labels.xm_clst_id}} Instance:{{$labels.instance_id}} Network Receive Usage:{{humanize $value}}KiB|{threshold}KiB.','2019-08-23 17:38:23.000','2019-08-23 17:38:23.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('namespace_pod_count','Namespace Pod Count','Pod count by namesapce','count (sum (container_last_seen{{filter}}) by (xm_clst_id, xm_namespace, xm_pod_id)) by (xm_clst_id, xm_namespace)','Pod','Namespace',NULL,true,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Pod Counts:{{humanize $value}}|{threshold}.','2019-08-22 16:53:32.000','2019-08-23 01:06:12.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_filesystem_usage','Node Filesystem Usage (%)','NODE Filesystem Usage','(1- (sum by (xm_clst_id, xm_node_id, xm_entity_type) (node_filesystem_avail_bytes{xm_entity_type=''Node'', device!=''rootfs'', {filter} }) / sum by (xm_clst_id, xm_node_id, xm_entity_type) (node_filesystem_size_bytes{xm_entity_type=''Node'', device!=''rootfs'', {filter} }))) * 100','Filesystem','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Filesystem Usage:{{humanize $value}}%|{threshold}%.','2019-05-15 01:02:23.000','2019-05-15 01:02:23.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_memory_available','Node Memory Available (GiB)','Memory information field MemAvailable_bytes / 1073741824','node_memory_MemAvailable_bytes{xm_entity_type=''Node'', {filter}} / 1073741824','Memory','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Avail Memory Size:{{humanize $value}}GiB|{threshold}GiB.','2019-06-04 16:03:00.000','2019-06-04 16:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_memory_total','Node Memory Total (GiB)','Memory information field MemTotal_bytes / 1073741824','node_memory_MemTotal_bytes{xm_entity_type=''Node'', {filter}} / 1073741824','Memory','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Total Memory Size:{{humanize $value}}GiB|{threshold}GiB.','2019-06-04 16:03:00.000','2019-06-04 16:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_network_receive','Node Network Receive (KiB)','Network device statistic receive_bytes / 1024','sum by (xm_clst_id, xm_node_id, xm_entity_type) (rate(node_network_receive_bytes_total{xm_entity_type=''Node'',{filter}}[1m]) ) / 1024','Network','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Network Receive Usage:{{humanize $value}}KiB|{threshold}KiB.','2019-05-20 09:07:46.000','2019-05-31 17:45:22.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_network_transmit','Node Network Transmit (KiB)','Network device statistic transmit_bytes / 1024','sum by (xm_clst_id, xm_node_id, xm_entity_type) (rate(node_network_transmit_bytes_total{xm_entity_type=''Node'',{filter}}[1m]) ) / 1024','Network','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Network Transmit Usage:{{humanize $value}}KiB|{threshold}KiB.','2019-05-20 09:09:05.000','2019-05-31 17:46:06.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_pod_allocated_count','Cluster Pod Allocated Count','Cluster Pod Allocated Count','sum by (xm_clst_id) (imxc_kubernetes_node_resource_allocatable_pods{{filter}})','Pod','Cluster',NULL,true,true,'CLST:{{$labels.xm_clst_id}} Allocated Pod Counts:{{humanize $value}}|{threshold}.','2019-08-23 17:36:00.000','2019-11-28 08:25:07.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_pod_desired_count','Cluster Pod Desired Count','Cluster pod desired count by controller','sum by (xm_clst_id) (imxc_kubernetes_controller_replicas{{filter}})','Pod','Cluster',NULL,true,true,'CLST:{{$labels.xm_clst_id}} Desired Pod Counts:{{humanize $value}}|{threshold}.','2019-08-23 02:26:55.000','2019-11-28 08:25:07.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_commands_total','Number of Commands Executed','The number of times each XXX command has been executed','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, command) (rate(mysql_global_status_commands_total[1m]) > 0)','Request','MySQL','command',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Number of Commands Executed Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-12 08:20:06.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_threads_running','Number of Threads Running','The number of threads that are not sleeping','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (mysql_global_status_threads_running)','Thread','MySQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Number of Threads Running Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_count_by_dbname_state','Count by dbname and state in pg','count by dbname and state in pg','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, state) (pg_stat_activity_count)','Connection','PostgreSQL','state',true,false,'count by dbname and state in pg','2020-01-30 06:10:54.000','2020-01-31 11:33:41.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('namespace_alerts_received_count','Namespace alerts received count','Alert count by namespace','sum by (xm_clst_id, xm_namespace, level) (floor(increase(imxc_alerts_received_count_total{status=''firing'', {filter}}[10m])))','Alert','Namespace','level',false,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Alert Count:{{humanize $value}}|{threshold}.','2019-08-23 04:43:29.000','2019-08-23 04:43:29.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_disk_reads_count_device','Node Disk Reads Count per Device (IOPS)','Node Disk Reads Count per Device','sum by (xm_clst_id, xm_node_id, xm_entity_type, device, mountpoint) (rate(node_disk_reads_completed_total{xm_entity_type=''Node'', {filter}}[1m]) )','Disk','Node','device',true,false,'NODE:{{$labels.xm_node_id}} FS:{{$labels.mountpoint}} Disk Reads Count:{{humanize $value}}IOPS|{threshold}IOPS.','2019-08-23 11:26:07.000','2019-08-23 11:26:07.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_disk_read_latency','Node Disk Read Latency (ms)','Node Disk Read Latency','sum by (xm_clst_id,xm_node_id, xm_entity_type) (rate(node_disk_read_time_seconds_total{xm_entity_type=''Node'',{filter}}[1m])) * 1000','Disk','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Disk Read Latency:{{humanize $value}}ms|{threshold}ms.','2019-05-20 10:59:07.000','2019-05-31 17:46:54.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_disk_write_latency_device','Node Disk Write Latency per Device (ms)','Node Disk Write Latency per Device','sum by (xm_clst_id, xm_node_id, xm_entity_type, device, mountpoint) (rate(node_disk_write_time_seconds_total{xm_entity_type=''Node'',{filter}}[1m])) * 1000','Disk','Node','device',true,false,'NODE:{{$labels.xm_node_id}} FS:{{$labels.mountpoint}} Disk Write Latency:{{humanize $value}}ms|{threshold}ms.','2019-08-23 11:26:07.000','2019-08-23 11:26:07.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_disk_write_bytes','Node Disk Write Bytes (KiB)','The total number of bytes written successfully / 1024','sum by (xm_clst_id, xm_node_id, xm_entity_type) (rate(node_disk_written_bytes_total{xm_entity_type=''Node'', {filter}}[1m]) ) / 1024','Disk','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Disk Write Size:{{humanize $value}}KiB|{threshold}KiB.','2019-06-04 18:11:00.000','2019-06-04 18:11:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_filesystem_avail_size_device','Node Filesystem Available Size per Device (GiB)','Filesystem space available to non-root users in bytes / 1073741824','sum by (xm_clst_id, xm_node_id, xm_entity_type, device, fs_type, mountpoint) (node_filesystem_avail_bytes{xm_entity_type=''Node'', device!=''rootfs'', {filter} }) / 1073741824','Filesystem','Node','device,fs_type',true,false,'NODE:{{$labels.xm_node_id}} FS:{{$labels.mountpoint}} Avail Size:{{humanize $value}}GiB|{threshold}GiB.','2019-08-23 11:26:07.000','2019-08-23 11:26:07.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_filesystem_free_size_device','Node Filesystem Free Size per Device (GiB)','Filesystem free space in bytes / 1073741824','sum by (xm_clst_id, xm_node_id, xm_entity_type, device, fs_type, mountpoint) (node_filesystem_free_bytes{xm_entity_type=''Node'', device!=''rootfs'', {filter} }) / 1073741824','Filesystem','Node','device,fs_type',true,false,'NODE:{{$labels.xm_node_id}} FS:{{$labels.mountpoint}} Free Size:{{humanize $value}}GiB|{threshold}GiB.','2019-08-23 11:26:07.000','2019-08-23 11:26:07.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_filesystem_total_size_device','Node Filesystem Total Size per Device (GiB)','Filesystem size in bytes / 1073741824','sum by (xm_clst_id, xm_node_id, xm_entity_type, device, fs_type, mountpoint) (node_filesystem_size_bytes{xm_entity_type=''Node'', device!=''rootfs'', {filter} }) / 1073741824','Filesystem','Node','device,fs_type',true,false,'NODE:{{$labels.xm_node_id}} FS:{{$labels.mountpoint}} Total Size:{{humanize $value}}GiB|{threshold}GiB.','2019-08-23 11:26:07.000','2019-08-23 11:26:07.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_swap_memory_free','Node Swap Memory Free (GiB)','Memory information field SwapFree_bytes / 1073741824','node_memory_SwapFree_bytes{xm_entity_type=''Node'', {filter}} / 1073741824','Memory','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Free Swap Memory Size:{{humanize $value}}GiB|{threshold}GiB.','2019-06-04 16:03:00.000','2019-06-04 16:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_swap_memory_total','Node Swap Memory Total (GiB)','Memory information field SwapTotal_bytes / 1073741824','node_memory_SwapTotal_bytes{xm_entity_type=''Node'', {filter}} / 1073741824','Memory','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Total Swap Memory Size:{{humanize $value}}GiB|{threshold}GiB.','2019-06-04 16:03:00.000','2019-06-04 16:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_up','PostgreSQL Up Count','Whether the last scrape of metrics from PostgreSQL was able to connect to the server','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (pg_up)','Instance','PostgreSQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} PostgreSQL Instance Count:{{humanize $value}}|{threshold}.','2019-08-27 15:49:21.000','2019-08-27 15:49:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_innodb_buffer_pool_write_requests','Number of Writes to Buffer Pool','The number of writes done to the InnoDB buffer pool','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(mysql_global_status_innodb_buffer_pool_write_requests[1m]))','Block','MySQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Number of Writes to Buffer Pool Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_innodb_buffer_pool_read_requests','Number of Logical Read Requests','The number of logical read requests','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(mysql_global_status_innodb_buffer_pool_read_requests[1m]))','Block','MySQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Number of Logical Read Requests Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_innodb_data_read','Amount of Data Read','The amount of data read since the server was started (in bytes)','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(mysql_global_status_innodb_data_read[1m]))','Disk','MySQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Amount of Data Read Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_innodb_os_log_written','Number of Bytes Written to Redo Log','The number of bytes written to the InnoDB redo log files','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(mysql_global_status_innodb_os_log_written[1m]))','Disk','MySQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Number of Bytes Written to Redo Log Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_innodb_data_written','Amount of Data Written','The amount of data written so far, in bytes','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(mysql_global_status_innodb_data_written[1m]))','Disk','MySQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Amount of Data Written Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_memory_sum_by_pod','Container Memory Request/Limits vs Used by Pod','container_memory_sum_by_pod','sum by(xm_clst_id, xm_namespace, xm_node_id, xm_pod_id, xm_cont_name, data_type) ( +label_replace(imxc_kubernetes_container_resource_limit_memory{{filter}}, "data_type", "limit", "" , "") or +label_replace(imxc_kubernetes_container_resource_request_memory{{filter}}, "data_type", "request", "" , "") or +label_replace(container_memory_usage_bytes{xm_entity_type=''Container'',{filter}}, "data_type", "used", "" , ""))','Memory','Pod',NULL,true,false,'Container memory sum by pod (limit, request, used)','2020-07-22 21:44:33.000','2020-07-22 21:44:33.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_cache_hit_ratio','Buffer Cache Hit Ratio','Buffer Cache Hit Ratio','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) ( +(1 - increase(mysql_global_status_innodb_buffer_pool_reads [1h]) / increase(mysql_global_status_innodb_buffer_pool_read_requests [1h])) * 100)','Block','MySQL',NULL,true,false,'.','2019-12-05 07:47:50.000','2019-12-13 01:17:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_cpu_sum_by_cluster','Container CPU Request/Limits vs Used by Cluster','Container cpu sum by cluster (capacity, limit, request, usage)','sum by(xm_clst_id, data_type) ( +label_replace(imxc_kubernetes_node_resource_capacity_cpu{{filter}} *0.001, "data_type", "capacity" , "", "") or +label_replace(sum by (xm_clst_id) (imxc_kubernetes_container_resource_limit_cpu{{filter}})*0.001, "data_type", "limit", "" , "") or +label_replace(sum by (xm_clst_id) (imxc_kubernetes_container_resource_request_cpu{{filter}})*0.001, "data_type", "request", "" , "") or +label_replace(sum by(xm_clst_id)(rate(container_cpu_usage_seconds_total{{filter}}[1m])), "data_type", "used", "" , ""))','CPU','Cluster',NULL,true,false,'Container cpu sum by cluster','2020-07-22 17:49:53.000','2020-07-22 17:49:53.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_filesystem_total_size','Node Filesystem Total Size (GiB)','Filesystem size in bytes / 1073741824','sum by (xm_clst_id, xm_node_id, xm_entity_type) (node_filesystem_size_bytes{xm_entity_type=''Node'', device!=''rootfs'', {filter} }) / 1073741824','Filesystem','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Filesystem Total Size:{{humanize $value}}GiB|{threshold}GiB.','2019-06-04 19:47:00.000','2019-06-04 19:47:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_filesystem_free_size','Node Filesystem Free Size (GiB)','Filesystem free space in bytes / 1073741824','sum by (xm_clst_id, xm_node_id, xm_entity_type) (node_filesystem_free_bytes{xm_entity_type=''Node'', device!=''rootfs'', {filter} }) / 1073741824','Filesystem','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Filesystem Free Size:{{humanize $value}}GiB|{threshold}GiB.','2019-06-04 19:47:00.000','2019-06-04 19:47:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_cpu_sum_by_pod','Container CPU Request/Limits vs Used by Pod','Container cpu sum by pod (capacity, limit, request, usage)','sum by(xm_clst_id, xm_namespace, xm_node_id, xm_pod_id, xm_cont_name, data_type)( +label_replace (rate(container_cpu_usage_seconds_total{xm_entity_type=''Container'',{filter}}[1m]), "data_type", "used", "", "") or +label_replace (imxc_kubernetes_container_resource_limit_cpu{{filter}}*0.001, "data_type", "limit", "", "") or +label_replace (imxc_kubernetes_container_resource_request_cpu{{filter}}*0.001, "data_type", "request", "", "") +)','CPU','Pod',NULL,true,false,'Container cpu sum by Pod','2020-07-22 21:37:45.000','2020-07-22 21:37:45.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_count_by_lockmode','Count_by_lockmode','Count by lockmode','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, mode) (pg_locks_count)','Lock','PostgreSQL','mode',true,false,'Count by lockmode','2020-01-30 07:06:13.000','2020-01-30 07:06:47.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_innodb_row_lock_current_waits','Number of Row Locks ','The number of row locks currently being waited for by operations on InnoDB tables','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (mysql_global_status_innodb_row_lock_current_waits)','Lock','MySQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Number of Row Locks Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_memory_capacity','cluster_memory_capacity (Gib)','imxc_kubernetes_node_resource_capacity_memory','sum by (xm_clst_id) (imxc_kubernetes_node_resource_capacity_memory{{filter}})','Memory','Cluster',NULL,false,false,'CLST:{{$labels.xm_clst_id}} Memory Capacity:{{humanize $value}}GiB|{threshold}GiB.','2019-08-23 08:46:58.000','2020-05-27 09:05:56.427'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_fs_free','Host system Filesystem free','Host File system free','sum by (instance) (node_filesystem_free_bytes{{filter}})','Filesystem','Host',NULL,true,false,'Host:{{$labels.instance}} Filesystem Free Size:{{humanize $value}}KiB|{threshold}KiB.','2020-03-23 04:08:29.025','2020-03-23 04:08:29.025'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_fs_total','Host system Filesystem total','Host File system total','sum by (instance) (node_filesystem_size_bytes{{filter}})','Filesystem','Host',NULL,true,false,'Host:{{$labels.instance}} Filesystem Total Size:{{humanize $value}}KiB|{threshold}KiB.','2020-03-23 04:08:27.634','2020-03-23 04:08:27.634'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_swap_used','Host Swap Memory Used','Host Swap Used','node_memory_SwapTotal_bytes{{filter}} - node_memory_SwapFree_bytes{{filter}}','Memory','Host',NULL,true,false,'Host:{{$labels.instance}} Used Swap Memory Size:{{humanize $value}}KiB|{threshold}KiB.','2020-03-23 04:08:26.169','2020-03-23 04:08:26.169'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_disk_read_bytes_device','Node Disk Read Bytes per Device (KiB)','The total number of bytes read successfully / 1024','sum by (xm_clst_id, xm_node_id, xm_entity_type, device, mountpoint) (rate(node_disk_read_bytes_total{xm_entity_type=''Node'', {filter}}[1m]) ) / 1024','Disk','Node','device',true,false,'NODE:{{$labels.xm_node_id}} FS:{{$labels.mountpoint}} Disk Read Size:{{humanize $value}}KiB|{threshold}KiB.','2019-08-23 11:26:07.000','2019-08-23 11:26:07.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_disk_read_bytes','Node Disk Read Bytes (KiB)','The total number of bytes read successfully / 1024','sum by (xm_clst_id, xm_node_id, xm_entity_type) (rate(node_disk_read_bytes_total{xm_entity_type=''Node'', {filter}}[1m]) ) / 1024','Disk','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Disk Read Size:{{humanize $value}}KiB|{threshold}KiB.','2019-06-04 18:11:00.000','2019-06-04 18:11:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_stat_database_xact_rollback','Number of Transactions Rolled Back','Number of transactions in this database that have been rolled back','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, datname) (rate(pg_stat_database_xact_rollback[1m]))','Transaction','PostgreSQL','datname',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} PostgreSQL Rollback Counts:{{humanize $value}}|{threshold}.','2019-08-27 15:49:21.000','2019-08-27 15:49:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_stat_database_xact_commit','Number of Transactions Committed','Number of transactions in this database that have been committed','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, datname) (rate(pg_stat_database_xact_commit[1m]))','Transaction','PostgreSQL','datname',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} PostgreSQL Commit Counts:{{humanize $value}}|{threshold}.','2019-08-27 15:49:21.000','2019-08-27 15:49:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_innodb_row_ops_total','Number of Rows Operated','The number of rows operated in InnoDB tables','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, operation) (rate(mysql_global_status_innodb_row_ops_total[1m]))','Row','MySQL','operation',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Number of Rows Operated Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_table_locks_immediate','Number of Table Lock Immediate','The number of times that a request for a table lock could be granted immediately','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(mysql_global_status_table_locks_immediate[1m]))','Lock','MySQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Number of Table Lock Immediate Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_keyspace_range_count','Local range scan count (count/s)','Local range scan count for this keyspace','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, keyspace) (rate(cassandra_keyspace_rangelatency_seconds_count[1m]))','Disk','Cassandra','keyspace',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Range Scan Count:{{humanize $value}}|{threshold}.','2019-10-02 10:17:01.000','2019-10-02 10:17:01.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_table_locks_waited','Number of Table Lock Waited','The number of times that a request for a table lock could not be granted immediately and a wait was needed','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(mysql_global_status_table_locks_waited[1m]))','Lock','MySQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Number of Table Lock Waited Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_stat_database_blk_read_time','Time Spent Reading Data File Blocks (ms)','Time spent reading data file blocks by backends in this database, in milliseconds','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, datname) (rate(pg_stat_database_blk_read_time[1m]))','Block','PostgreSQL','datname',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} PostgreSQL Block Read Time:{{humanize $value}}|{threshold}.','2019-08-27 15:49:21.000','2019-08-27 15:49:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_stat_database_blk_write_time','Time Spent Writing Data File Blocks (ms)','Time spent writing data file blocks by backends in this database, in milliseconds','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, datname) (rate(pg_stat_database_blk_write_time[1m]))','Block','PostgreSQL','datname',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} PostgreSQL Block Write Time:{{humanize $value}}|{threshold}.','2019-08-27 15:49:21.000','2019-08-27 15:49:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_stat_database_blks_read','Number of Disk Blocks Read','Number of disk blocks read in this database','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, datname) (rate(pg_stat_database_blks_read[1m]))','Block','PostgreSQL','datname',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} PostgreSQL Block Read Counts:{{humanize $value}}|{threshold}.','2019-08-27 15:49:21.000','2019-08-27 15:49:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_stat_database_blks_hit','Number of Block Cache Hit','Number of times disk blocks were found already in the buffer cache','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, datname) (rate(pg_stat_database_blks_hit[1m]))','Block','PostgreSQL','datname',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} PostgreSQL Block Hit Counts:{{humanize $value}}|{threshold}.','2019-08-27 15:49:21.000','2019-08-27 15:49:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_stat_activity_count','Number of Client Connections','number of connections in this state','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, datname, state) (pg_stat_activity_count{{filter}})','Connection','PostgreSQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} PostgreSQL Connection Counts:{{humanize $value}}|{threshold}.','2019-08-27 15:49:21.000','2019-11-18 04:16:33.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_stat_database_tup_fetched','Number of Rows Fetched','Number of rows fetched by queries in this database','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, datname) (rate(pg_stat_database_tup_fetched[1m]))','Row','PostgreSQL','datname',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} PostgreSQL Fetched Row Counts:{{humanize $value}}|{threshold}.','2019-08-27 15:49:21.000','2019-08-27 15:49:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_stat_database_tup_inserted','Number of Rows Inserted','Number of rows inserted by queries in this database','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, datname) (rate(pg_stat_database_tup_inserted[1m]))','Row','PostgreSQL','datname',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} PostgreSQL Inserted Row Counts:{{humanize $value}}|{threshold}.','2019-08-27 15:49:21.000','2019-08-27 15:49:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_keyspace_range_latency','Local range scan latency (ms)','Local range scan latency seconds for this keyspace','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, keyspace) (cassandra_keyspace_rangelatency_seconds{quantile=''0.99''}) * 1000','Disk','Cassandra','keyspace',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Range Scan Latency:{{humanize $value}}ms|{threshold}ms.','2019-10-02 10:17:01.000','2019-10-02 10:17:01.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_commitlog_size','Size used by commit log segments (KiB/s)','Current size, in bytes, used by all the commit log segments / 1024','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(cassandra_commitlog_totalcommitlogsize[1m]){{filter}}) / 1024','Log','Cassandra',NULL,true,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Commit Log Volume:{{humanize $value}}KiB/s|{threshold}KiB/s.','2019-10-02 10:17:01.000','2019-11-05 08:07:03.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_commitlog_messages','Number of commit log messages written (count/s)','Total number of commit log messages written','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(cassandra_commitlog_completedtasks[1m]))','Log','Cassandra',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Commit Log Message per second:{{humanize $value}}|{threshold}.','2019-10-02 10:17:01.000','2019-10-02 10:17:01.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_clientrequest_count','Number of client requests (count/s)','Number of client requests by request type','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, clientrequest) (rate(cassandra_clientrequest_latency_seconds_count{{filter}}[1m]))','Request','Cassandra','clientrequest',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Client Request per second:{{humanize $value}}|{threshold}.','2019-10-02 10:17:01.000','2019-11-05 11:04:25.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_memory_active','Node Memory Active (GiB)','Memory information field Active_bytes in GiB','node_memory_Active_bytes{xm_entity_type=''Node'', {filter}} / 1024 / 1024 / 1024','Memory','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Active Memory:{{humanize $value}}GiB|{threshold}GiB.','2020-06-04 11:11:11.000','2020-06-04 11:11:11.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_stat_database_tup_returned','Number of Rows Returned','Number of rows returned by queries in this database','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, datname) (rate(pg_stat_database_tup_returned[1m]))','Row','PostgreSQL','datname',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} PostgreSQL Returned Row Counts:{{humanize $value}}|{threshold}.','2019-08-27 15:49:21.000','2019-08-27 15:49:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_keyspace_write_count','Local write count (count/s)','Local write count for this keyspace','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, keyspace) (rate(cassandra_keyspace_writelatency_seconds_count[1m]))','Disk','Cassandra','keyspace',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Write Count:{{humanize $value}}|{threshold}.','2019-10-02 10:17:01.000','2019-10-02 10:17:01.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_memory_sum_by_cluster','Container Memory Request/Limits vs Used by Cluster','Container memory sum by cluster','sum by (xm_clst_id, data_type)( +label_replace(imxc_kubernetes_node_resource_capacity_memory{{filter}}, "data_type", "capacity", "" , "") or +label_replace(imxc_kubernetes_container_resource_limit_memory{{filter}}, "data_type", "limit", "", "") or +label_replace(imxc_kubernetes_container_resource_request_memory{{filter}}, "data_type", "request", "", "") or +label_replace(container_memory_usage_bytes{xm_entity_type=''Container'',{filter}}, "data_type", "used", "" , ""))','Memory','Cluster',NULL,true,false,'Container memory sum by cluster','2020-07-22 21:23:15.000','2020-07-22 21:23:15.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_memory_capacity','node_memory_capacity (Gib)','node memory capacity in GiB','imxc_kubernetes_node_resource_capacity_memory{{filter}} / 1024 / 1024 / 1024','Memory','Node',NULL,false,false,'None','2019-08-23 08:46:58.000','2019-08-23 08:46:58.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_cpu_request_cores','cluster_cpu_request_cores','cluster_cpu_request_cores','sum by (xm_clst_id) (imxc_kubernetes_container_resource_request_cpu{{filter}})','CPU','Cluster',NULL,false,false,'None','2019-08-23 08:40:36.000','2019-08-23 08:40:36.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_cpu_request_cores','node_cpu_request_cores','node_cpu_request_cores','sum by (xm_clst_id, xm_node_id) (imxc_kubernetes_container_resource_request_cpu{{filter}})','CPU','Node',NULL,false,false,'None','2019-08-23 08:40:36.000','2019-08-23 08:40:36.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_cpu_limit_cores','cluster_cpu_limit_cores','cluster_cpu_limit_cores','sum by (xm_clst_id) (imxc_kubernetes_container_resource_limit_cpu{{filter}})','CPU','Cluster',NULL,false,false,'None','2019-08-23 08:40:36.000','2019-08-23 08:40:36.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_cpu_limit_cores','node_cpu_limit_cores','node_cpu_limit_cores','sum by (xm_clst_id, xm_node_id) (imxc_kubernetes_container_resource_limit_cpu{{filter}})','CPU','Node',NULL,false,false,'None','2019-08-23 08:40:36.000','2019-08-23 08:40:36.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_clientrequest_unavailables_count','Number of unavailable exceptions encountered','Number of unavailable exceptions encountered','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, clientrequest) (rate(cassandra_clientrequest_unavailables_count[1m]))','Request','Cassandra','clientrequest',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Unavailable Request:{{humanize $value}}|{threshold}.','2019-10-02 10:17:01.000','2019-10-02 10:17:01.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_up','Cassandra Up Count','Whether the last scrape of metrics from Cassandra was able to connect to the server','count by (xm_clst_id, xm_namespace, xm_node_id, instance) (cassandra_bufferpool_size{{filter}})','Instance','Cassandra',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Instances:{{humanize $value}}|{threshold}.','2019-10-02 10:17:01.000','2019-11-05 17:01:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mongodb_up','MongoDB Up Count','The number of seconds that the current MongoDB process has been active','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(mongodb_instance_uptime_seconds[1m]))','Instance','MongoDB',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MongoDB Up Count Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mongodb_global_lock_current_queue','Number of Operations Waiting','The number of operations that are currently queued and waiting for the read or write lock','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, type) (mongodb_global_lock_current_queue)','Lock','MongoDB','type',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MongoDB Number of Operations Waiting Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mongodb_global_lock_client','Number of Active Client','The number of the active client connections performing read or write operations','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, type) (mongodb_global_lock_client)','Lock','MongoDB','type',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MongoDB Number of Active Client Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mongodb_metrics_document_total','Number of Documents Processed','The total number of documents processed','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, state) (rate(mongodb_metrics_document_total[1m]))','Row','MongoDB','state',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MongoDB Number of Documents Processed Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_keyspace_totaldiskspaceused','Total disk space used (GiB)','Total disk space used belonging to this keyspace / 1073741824','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, keyspace) (cassandra_keyspace_totaldiskspaceused {{filter}}) / 1073741824','Disk','Cassandra','keyspace',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Disk Space:{{humanize $value}}GiB|{threshold}GiB.','2019-10-02 10:17:01.000','2019-11-07 01:14:39.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_keyspace_read_latency','Local read latency (ms)','Local read latency seconds for this keyspace','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, keyspace) (cassandra_keyspace_readlatency_seconds{quantile=''0.99''}) * 1000','Disk','Cassandra','keyspace',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Read Latency:{{humanize $value}}ms|{threshold}ms.','2019-10-02 10:17:01.000','2019-10-02 10:17:01.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_threadpools_totalblockedtasks','Number of tasks that were blocked (count/s)','Number of tasks that were blocked due to queue saturation in a second','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, path) (rate(cassandra_threadpools_totalblockedtasks_count[1m]))','Task','Cassandra','path',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Blocked Task per second:{{humanize $value}}|{threshold}.','2019-10-01 16:45:21.000','2019-10-01 16:45:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_threadpools_completedtasks','Number of tasks completed (count/s)','Number of tasks completed in a second','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, path) (rate(cassandra_threadpools_completedtasks{{filter}}[1m]))','Task','Cassandra','path',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Pending Task per second:{{humanize $value}}|{threshold}.','2019-10-01 16:45:21.000','2019-11-05 08:08:57.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mongodb_memory','Amount of Memory, in MebiByte','The amount of memory, in mebibyte (MiB), currently used by the database process','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, type) (mongodb_memory)','Memory','MongoDB','type',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MongoDB Amount of Memory:{{humanize $value}}MiB|{threshold}MiB.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_resource_utilization','Resource Usage','Gauge metric with resource utilization','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, resource_name) (oracledb_resource_current_utilization)','Resource','OracleDB','resource_name',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Oracle Resource Usage:{{humanize $value}}%|{threshold}%.','2020-01-28 13:03:00.000','2020-01-28 13:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_clientrequest_timeouts_count','Number of timeouts encountered','Number of timeouts encountered','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, clientrequest) (rate(cassandra_clientrequest_timeouts_count[1m]))','Request','Cassandra','clientrequest',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Timeout Request:{{humanize $value}}|{threshold}.','2019-10-02 10:17:01.000','2019-10-02 10:17:01.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mongodb_network_bytes_total','Amount of Network Traffic','The number of bytes that reflects the amount of network traffic','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, state) (rate(mongodb_network_bytes_total[1m]))','Network','MongoDB','state',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MongoDB Amount of Network Traffic Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mongodb_op_counters_total','Number of Operations','The total number of operations since the mongod instance last started','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, type) (rate(mongodb_op_counters_total[1m]))','Request','MongoDB','type',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MongoDB Number of Operations Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_innodb_row_lock_waits','Number of Waits for Row Locks','The number of times operations on InnoDB tables had to wait for a row lock','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(mysql_global_status_innodb_row_lock_waits[1m]))','Lock','MySQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Number of Waits for Row Locks Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_activity_execute_count','Execute Count','Generic counter metric from v$sysstat view in Oracle','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(oracledb_activity_execute_count[1m]))','Request','OracleDB',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Oracle Execute Count:{{humanize $value}}|{threshold}.','2020-01-28 13:03:00.000','2020-01-28 13:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_activity_user_commits','User Commits','Generic counter metric from v$sysstat view in Oracle','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(oracledb_activity_user_commits[1m]))','Request','OracleDB',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Oracle User Commit:{{humanize $value}}|{threshold}.','2020-01-28 13:03:00.000','2020-01-28 13:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_activity_parse_count','Parse Count','Generic counter metric from v$sysstat view in Oracle','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(oracledb_activity_parse_count_total[1m]))','Request','OracleDB',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Oracle Parse Count:{{humanize $value}}|{threshold}.','2020-01-28 13:03:00.000','2020-01-28 13:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_activity_user_rollbacks','User Rollbacks','Generic counter metric from v$sysstat view in Oracle','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(oracledb_activity_user_rollbacks[1m]))','Request','OracleDB',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Oracle User Rollback:{{humanize $value}}|{threshold}.','2020-01-28 13:03:00.000','2020-01-28 13:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_fs_writes','Pod Filesystem Write Bytes (KiB)','Cumulative count of bytes written / 1024','sum by (xm_clst_id,xm_pod_id,xm_entity_type,xm_namespace) (rate(container_fs_writes_bytes_total{xm_entity_type=''Container'',{filter}}[1m])) / 1024','Filesystem','Pod',NULL,true,true,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} Filesystem Write Bytes:{{humanize $value}}KiB|{threshold}KiB.','2019-05-20 05:58:07.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_memory_usage','Pod Memory Usage (%)','Pod Memory Usage Compared to Limit','sum by (xm_clst_id,xm_pod_id,xm_entity_type,xm_namespace) ( +container_memory_usage_bytes{xm_entity_type=''Container'',{filter}} / ((container_spec_memory_limit_bytes{xm_entity_type=''Container'',{filter}} * 100) > 0) or +container_memory_usage_bytes{xm_entity_type=''Container'',{filter}} / 1024)','Memory','Pod',NULL,true,true,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} Used Utillization:{{humanize $value}}%|{threshold}%.','2019-06-05 14:27:36.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_memory_usage_bytes','Pod Memory Used (GiB)','Current memory usage in bytes / 1073741824','sum by (xm_clst_id,xm_pod_id,xm_entity_type,xm_namespace) (container_memory_usage_bytes{xm_entity_type=''Container'',{filter}}) / 1073741824','Memory','Pod',NULL,true,true,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} Used Memory:{{humanize $value}}GiB|{threshold}GiB.','2019-06-05 14:27:36.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_stat_database_cache_hit_ratio','Buffer Cache Hit Ratio (%)','Number of Block Cache Hit / (Number of Block Cache Hit & Blocks Reads) * 100','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, datname) (increase(pg_stat_database_blks_hit[1h]) / (increase(pg_stat_database_blks_read[1h]) + increase(pg_stat_database_blks_hit[1h])) * 100)','Block','PostgreSQL','datname',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} PostgreSQL Cache Hit Ratio:{{humanize $value}}%|{threshold}%.','2019-08-27 15:49:21.000','2019-12-13 01:33:39.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_wait_time_other','Wait-Time - Other','Generic counter metric from v$waitclassmetric view in Oracle','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(oracledb_wait_time_other[1m]))','Wait','OracleDB',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Oracle Wait-Time - Other:{{humanize $value}}|{threshold}.','2020-01-28 13:03:00.000','2020-01-28 13:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_wait_time_configuration','Wait-Time - Configuration','Generic counter metric from v$waitclassmetric view in Oracle','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(oracledb_wait_time_configuration[1m]))','Wait','OracleDB',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Oracle Wait-Time - Configuration{{humanize $value}}|{threshold}','2020-01-28 13:03:00.000','2020-01-28 13:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_wait_time_commit','Wait-Time - Commit','Generic counter metric from v$waitclassmetric view in Oracle','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(oracledb_wait_time_commit[1m]))','Wait','OracleDB',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Oracle Wait-Time - Commit:{{humanize $value}}|{threshold}.','2020-01-28 13:03:00.000','2020-01-28 13:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_wait_time_scheduler','Wait-Time - Scheduler','Generic counter metric from v$waitclassmetric view in Oracle','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(oracledb_wait_time_scheduler[1m]))','Wait','OracleDB',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Oracle Wait-Time - Scheduler:{{humanize $value}}|{threshold}.','2020-01-28 13:03:00.000','2020-01-28 13:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_wait_time_system_io','Wait-Time - System I/O','Generic counter metric from v$waitclassmetric view in Oracle','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(oracledb_wait_time_system_io[1m]))','Wait','OracleDB',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Oracle Wait-Time - System I/O:{{humanize $value}}|{threshold}.','2020-01-28 13:03:00.000','2020-01-28 13:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_wait_time_user_io','Wait-Time - User I/O','Generic counter metric from v$waitclassmetric view in Oracle','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(oracledb_wait_time_user_io[1m]))','Wait','OracleDB',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Oracle Wait-Time - User I/O:{{humanize $value}}|{threshold}.','2020-01-28 13:03:00.000','2020-01-28 13:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_wait_time_network','Wait-Time - Network','Generic counter metric from v$waitclassmetric view in Oracle','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(oracledb_wait_time_network[1m]))','Wait','OracleDB',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Oracle Wait-Time - Network:{{humanize $value}}|{threshold}.','2020-01-28 13:03:00.000','2020-01-28 13:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_blocked_clients','Blocked Clients','Number of clients pending on a blocking call (BLPOP, BRPOP, BRPOPLPUSH)','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (redis_blocked_clients)','Connection','Redis',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis Blocked Clients:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_connected_clients','Connected Clients','Number of client connections (excluding connections from replicas)','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (redis_connected_clients)','Connection','Redis',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis Connected Clients:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_connections_received','Received Connections','Total number of connections accepted by the server','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(redis_connections_received_total[1m]))','Connection','Redis',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis Received Connections:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_rejected_connections','Rejected Connections','Number of connections rejected because of maxclients limit','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(redis_rejected_connections_total[1m]))','Connection','Redis',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis Rejected Connections:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_up','Redis Up Count','Whether the Redis server is up','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (redis_up)','Instance','Redis',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis Up Count:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_commands_total','Call Count / Command','Total number of calls per command','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, cmd) (rate(redis_commands_total[1m]))','Request','Redis','cmd',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis Call Count:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_commands_processed','Processed Commands','Total number of commands processed by the server','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(redis_commands_processed_total[1m]))','Request','Redis',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace:}} Redis Processed Commands:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_key_hit_raito','Redis key hit raito','redis key hit raito','sum by (data_type, xm_clst_id, xm_namespace, xm_node_id, instance) ( +label_replace(rate(redis_keyspace_hits_total [1m]), "data_type", "hits", "" , "") or +label_replace(rate(redis_keyspace_misses_total [1m]), "data_type", "misses", "" , "") )','Keyspace','Redis','data_type',true,false,'redis key hit raito','2020-01-29 02:28:03.000','2020-02-13 00:46:27.568'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_net_byte_total','Network byte','Network byte','sum by (data_type, xm_clst_id, xm_namespace, xm_node_id, instance) ( +label_replace(rate(redis_net_input_bytes_total [1m]), "data_type", "input", "", "") or +label_replace(rate(redis_net_output_bytes_total [1m]), "data_type", "output", "", ""))','Network','PostgreSQL','data_type',true,false,'Network byte','2020-01-30 07:22:12.000','2020-02-13 01:04:18.528'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_memory_cache','Pod Memory Cache (GiB)','Number of bytes of page cache memory / 1073741824','sum by (xm_clst_id,xm_pod_id,xm_entity_type,xm_namespace) (container_memory_cache{xm_entity_type=''Container'',{filter}}) / 1073741824','Memory','Pod',NULL,true,true,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} Cache Memory:{{humanize $value}}GiB|{threshold}GiB.','2019-06-05 14:27:36.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_memory_swap','Pod Memory Swap (GiB)','Pod swap usage in bytes / 1073741824','sum by (xm_clst_id,xm_pod_id,xm_entity_type,xm_namespace) (container_memory_swap{xm_entity_type=''Container'',{filter}}) / 1073741824','Memory','Pod',NULL,true,true,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} Swap Memory:{{humanize $value}}GiB|{threshold}GiB.','2019-06-05 14:27:36.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_wait_time_total','Oracledb wait time total','oracledb wait time total','sum by (data_type, xm_clst_id, xm_namespace, xm_node_id, instance) ( +label_replace(rate(oracledb_wait_time_scheduler[1m]), "data_type", "scheduler", "", "") or +label_replace(rate(oracledb_wait_time_commit[1m]), "data_type", "commit", "", "") or +label_replace(rate(oracledb_wait_time_network[1m]), "data_type", "network", "", "") or +label_replace(rate(oracledb_wait_time_concurrency[1m]), "data_type", "concurrency", "", "") or +label_replace(rate(oracledb_wait_time_Configuration[1m]), "data_type", "configuration", "", "") or +label_replace(rate(oracledb_wait_time_user_io[1m]), "data_type", "user_io", "", "") or +label_replace(rate(oracledb_wait_time_system_io[1m]), "data_type", "system_io", "", "") or +label_replace(rate(oracledb_wait_time_other[1m]), "data_type", "other", "", ""))','Wait','OracleDB','data_type',true,false,'oracledb wait time total','2020-01-29 11:03:20.000','2020-02-13 01:08:01.629'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_activity_count','Oracledb activity count','oracledb activity count','sum by (data_type, xm_clst_id, xm_namespace, xm_node_id, instance) ( +label_replace(rate(oracledb_activity_execute_count [1m]), "data_type", "excutecount", "", "") or +label_replace(rate(oracledb_activity_parse_count_total[1m]), "data_type", "parse_count", "", "") )','Request','OracleDB','data_type',true,false,'oracledb activity count','2020-01-29 10:40:58.000','2020-02-13 01:12:05.436'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_transaction','Oracledb transaction','oracledb transaction','sum by (data_type, xm_clst_id, xm_namespace, xm_node_id, instance) ( +label_replace(rate(oracledb_activity_user_rollbacks[1m]), "data_type", "rollbacks", "", "") or +label_replace(rate(oracledb_activity_user_commits[1m]), "data_type", "commits", "", ""))','Request','OracleDB','data_type',true,false,'oracledb transaction','2020-01-29 11:20:47.000','2020-02-13 01:26:28.558'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_cpu_usage','Redis cpu usage','redis cpu usage','sum by (data_type, xm_clst_id, xm_namespace, xm_node_id, instance) ( +label_replace(rate(redis_used_cpu_sys [1m]), "data_type", "system", "", "") or +label_replace(rate(redis_used_cpu_user [1m]), "data_type", "user", "", "") )','CPU','Redis','data_type',true,false,'redis cpu usage','2020-01-29 01:56:58.000','2020-02-12 04:47:21.228'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_total_load','host total load','host total load','sum by (instance, data_type) ( +label_replace(node_load1 {{filter}}, "data_type", "load 1", "", "") or +label_replace(node_load5 {{filter}}, "data_type", "load 5", "", "") or +label_replace(node_load15 {{filter}}, "data_type", "load15", "", "") )','CPU','Host',NULL,false,false,'host total load','2020-04-01 08:10:26.588','2020-04-03 01:23:47.665'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_used_cpu_sys_children','System CPU Used Background','System CPU consumed by the background processes','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(redis_used_cpu_sys_children[1m]))','CPU','Redis',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis System CPU Used Backedground:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_keyspace_hits','Keyspace Hits','Number of successful lookup of keys in the main dictionary','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(redis_keyspace_hits_total[1m]))','Keyspace','Redis',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis Keyspace Hits:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_keyspace_misses','Keyspace Misses','Number of failed lookup of keys in the main dictionary','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(redis_keyspace_misses_total[1m]))','Keyspace','Redis',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis Keyspace Misses:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_db_keys','DB Keys Count','Total number of keys by DB','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, db) (redis_db_keys)','Keyspace','Redis','db',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis DB Keys Count:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_expired_keys','Expired Keys','Total number of key expiration events','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(redis_expired_keys_total[1m]))','Keyspace','Redis',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis Expired Keys:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_evicted_keys','Evicted Keys','Number of evicted keys due to maxmemory limit','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(redis_evicted_keys_total[1m]))','Keyspace','Redis',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis Evicted Keys:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_db_keys_expiring','DB Keys Count Expiring','Total number of expiring keys by DB','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, db) (redis_db_keys_expiring)','Keyspace','Redis','db',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis DB Keys Count Expiring:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_commands_duration_seconds','Duration Seconds / Command','Total duration seconds per command','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, cmd) (rate(redis_commands_duration_seconds_total[1m]) * 1000)','Request','Redis','cmd',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis Duration Seconds:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-01-29 01:42:36.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_memory_total','Redis memory total','redis memory total','sum by (data_type, xm_clst_id, xm_namespace, xm_node_id, instance) ( +label_replace(redis_allocator_active_bytes / 1048576, "data_type", "active", "" , "") or +label_replace(redis_memory_used_bytes / 1048576, "data_type", "used", "" , "") or +label_replace(redis_allocator_allocated_bytes / 1048576, "data_type", "allocated", "" , "") or +label_replace(redis_allocator_resident_bytes / 1048576, "data_type", "resident", "" , "") )','Memory','Redis','data_type',true,false,'redis memory total','2020-01-29 02:08:28.000','2020-02-13 00:45:28.475'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('count_by_connection_type','Count by connection type','count by connection type','sum by (data_type, xm_clst_id, xm_namespace, xm_node_id, instance) ( +label_replace(rate(redis_connections_received_total [1m]), "data_type", "received connections", "", "") or +label_replace(rate(redis_rejected_connections_total [1m]), "data_type", "rejected connections", "", "") or +label_replace(redis_connected_clients, "data_type", "connected clients", "", "") or +label_replace(redis_blocked_clients, "data_type", "blocked clients", "", "") )','Connection','Redis','data_type',true,false,'count by connection type','2020-01-29 00:49:09.000','2020-02-13 01:04:18.528'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_stat_database_tup_count','Number of row by stat','Number of row by stat','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, data_type) +(label_replace(rate(pg_stat_database_tup_deleted[1m]), "data_type", "deleted", "", "") or +label_replace(rate(pg_stat_database_tup_updated[1m]), "data_type", "updated", "", "") or +label_replace(rate(pg_stat_database_tup_inserted[1m]), "data_type", "inserted", "", "") or +label_replace(rate(pg_stat_database_tup_returned[1m]), "data_type", "returned", "", "") or +label_replace(rate(pg_stat_database_tup_fetched[1m]), "data_type", "fetched", "", "") )','Row','PostgreSQL','data_type',true,true,'Number of row by stat','2019-10-28 07:29:26.000','2020-02-13 01:04:18.528'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_stat_database_blk_read_write_time','Read/Write spent time by file blocks','Read/Write spent time by file blocks','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, data_type) +(label_replace(rate(pg_stat_database_blk_read_time [1m]), "data_type", "read", "", "") or +label_replace(rate(pg_stat_database_blk_write_time [1m]), "data_type", "write", "", ""))','Block','PostgreSQL','data_type',true,false,'Read/Write spent time by file blocks','2019-10-28 10:56:48.000','2020-02-13 01:06:46.680'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_allocator_resident_size','Resident Memory (MiB)','The total amount of resident memory that the Redis allocator has','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (redis_allocator_resident_bytes) / 1048576','Memory','Redis',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis Resident Memory:{{humanize $value}}MiB|{threshold}MiB.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_memory_used_size','Used Memory (MiB)','Total number of bytes allocated by Redis using its allocator','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (redis_memory_used_bytes) / 1048576','Memory','Redis',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis Used Memory:{{humanize $value}}MiB|{threshold}MiB.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_clientrequest_anormal_count','Number of anormal request','Number of anormal request ','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, anormal_type) +(label_replace(rate(cassandra_clientrequest_unavailables_count[1m]), "anormal_type", "unavailables", "", "") or +label_replace(rate(cassandra_clientrequest_timeouts_count[1m]), "anormal_type", "timeouts", "", "") or +label_replace(rate(cassandra_clientrequest_failures_count[1m]), "anormal_type", "failures", "", ""))','Request','Cassandra','anomal_type',true,false,'Number of anormal request ','2019-10-28 02:09:45.000','2020-02-13 01:16:24.862'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_commitlog','Commitlog count and size','Commitlog count and size','sum by (data_type, xm_clst_id, xm_namespace, xm_node_id, instance) +(label_replace(rate(cassandra_commitlog_completedtasks {{filter}}[1m]), "data_type", "log_count", "", "") or +label_replace(rate(cassandra_commitlog_totalcommitlogsize {{filter}}[1m]) / 1048576, "data_type", "log_size", "", ""))','Log','Cassandra','data_type',true,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Cache Hit Rate:{{humanize $value}}|{threshold}.','2019-10-24 10:44:47.000','2020-02-13 01:16:24.864'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_threads_total','Number of Threads','Number of Threads','sum by (data_type, xm_clst_id, xm_namespace, xm_node_id, instance) ( +label_replace(mysql_global_status_threads_running, "data_type", "active", "", "") or +label_replace(mysql_global_status_threads_connected, "data_type", "connected", "", "") or +label_replace(rate(mysql_global_status_connections [1m]), "data_type", "connection attempts[1m]", "", "") )','Thread','MySQL','data_type',true,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Number of Threads Running Counts:{{humanize $value}}|{threshold}.','2019-12-05 06:04:21.000','2020-02-13 01:12:05.436'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_read_write_count','Local read write count','Local read write count','sum by(xm_clst_id, xm_namespace, xm_node_id, instance, type) +(label_replace( rate(cassandra_keyspace_readlatency_seconds_count [1m]), "type", "read", "", "") or +label_replace( rate(cassandra_keyspace_writelatency_seconds_count [1m]), "type", "write", "", ""))','Disk','Cassandra','type',true,true,'Local read write count','2019-10-24 05:18:50.000','2020-02-13 01:23:46.608'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_lock_total','Oracledb lock total','oracledb lock total','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, resource_name) +(oracledb_resource_current_utilization{resource_name =~''.+_locks''})','Resource','OracleDB','resource_name',true,false,'oracledb lock total','2020-01-29 11:17:01.000','2020-02-13 01:34:00.720'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_service_http_requests_per_sec_by_api','Service HTTP Requests Count by API (per Second)','the number of HTTP requests counts per second by API','(sum by (xm_clst_id,xm_service_name,xm_entity_type,xm_namespace,api) (rate(imxc_service_request_milliseconds_count{xm_entity_type="Service",protocol="http",{filter}}[1m])) / on (xm_clst_id, xm_namespace, xm_service_name ) group_left imxc_sampling_param_value) or (sum by (xm_clst_id,xm_service_name,xm_entity_type,xm_namespace,api) (rate(imxc_service_request_milliseconds_count{xm_entity_type="Service",protocol="http",{filter}}[1m])) / on (xm_clst_id) group_left imxc_sampling_default_param_value)','Request','Service',NULL,false,false,'not for alarm','2020-02-18 12:12:12.000','2020-06-03 06:52:05.498'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_cpu_core_count','Host CPU Core Count','Host_cpu_capacity_cores','count without(cpu, mode) (node_cpu_seconds_total{{filter}})','CPU','Host',NULL,true,false,'None','2020-03-23 04:08:05.290','2020-03-23 04:08:05.290'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_load5','Host CPU Load 5m Average','Host CPU 5m load average','node_load5{{filter}}','CPU','Host',NULL,true,false,'Host:{{$labels.instance}} CPU 5m Load Average:{{humanize $value}}%|{threshold}$.','2020-03-23 04:08:11.655','2020-03-23 04:08:11.655'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_phase_count_by_cluster','Pod Phase Count by Cluster','pod phase count by cluster','count by(xm_clst_id, pod_state) (sum by (xm_clst_id, xm_pod_id, pod_state)(rate(imxc_kubernetes_container_resource_limit_cpu{{filter}}[1m])))','Cluster','Pod',NULL,true,false,'CLST:{{$labels.xm_clst_id}} pod phase count:{{humanize $value}}|{threshold}.','2020-08-19 16:45:00.000','2020-08-19 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_network_io_byte','host network io byte','host network io byte','sum by (data_type, instance) ( +label_replace(rate(node_network_receive_bytes_total{{filter}}[1m]) or rate(node_network_receive_bytes_total{{filter}}[5m]), "data_type", "Receive", "", "") or +label_replace(rate(node_network_transmit_bytes_total{{filter}}[1m]) or rate(node_network_transmit_bytes_total{{filter}}[5m]), "data_type", "Transmit", "", "") )','Network','Host',NULL,false,false,'host network io byte','2020-03-24 05:48:31.359','2020-03-24 05:48:31.359'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_contextswitch_and_filedescriptor','host contextswitch and filedescriptor','host contextswitch and filedescriptor','sum by (data_type, instance) ( +label_replace(rate(node_context_switches_total {{filter}}[1m]), "data_type", "Context switch", "", "") or +label_replace(node_filefd_allocated {{filter}}, "data_type", "File descriptor", "", "") )','OS','Host',NULL,false,false,'host contextswitch and filedescriptor','2020-03-24 09:05:51.828','2020-03-24 09:08:06.867'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_swap_usage','Host Swap Memory Usage (%)','Host Swap Memory Usage','node_memory_SwapTotal_bytes{{filter}} - node_memory_SwapFree_bytes{{filter}} / node_memory_SwapTotal_bytes{{filter}} +','Memory','Host',NULL,true,false,'None','2020-03-26 06:39:21.333','2020-03-26 06:39:21.333'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_boot_time','Host Boot time','Host Boot time','node_boot_time_seconds{{filter}}','CPU','Host',NULL,true,false,'None','2020-03-26 08:03:46.189','2020-03-26 08:03:46.189'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_disk_read_latency','Host read Disk latency','Host disk read latency','sum by (instance) (rate(node_disk_reads_completed_total{{filter}}[1m])) == 0 or sum by (instance) (rate(node_disk_read_time_seconds_total{{filter}}[1m])/rate(node_disk_reads_completed_total{{filter}}[1m]) >= 0 )','Disk','Host',NULL,true,false,'Host:{{$labels.instance}} Disk Read Latency:{{humanize $value}}|{threshold}.','2020-03-23 04:08:34.001','2020-03-23 04:08:34.001'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_disk_write_latency','Host write Disk latency','Host disk write latency','sum by (instance) (rate(node_disk_writes_completed_total{{filter}}[1m])) == 0 or sum by (instance) (rate(node_disk_write_time_seconds_total{{filter}}[1m])/rate(node_disk_writes_completed_total{{filter}}[1m]) >= 0 )','Disk','Host',NULL,true,false,'Host:{{$labels.instance}} Disk Write Latency:{{humanize $value}}|{threshold}.','2020-03-23 04:08:35.823','2020-03-23 04:08:35.823'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_memory_usage','Host Memory Usage (%)','Host Memory Usage ','((node_memory_MemTotal_bytes{{filter}} - (node_memory_MemFree_bytes{{filter}} + node_memory_Cached_bytes{{filter}} + node_memory_Buffers_bytes{{filter}} + node_memory_SReclaimable_bytes{{filter}})) >= 0 or (node_memory_MemTotal_bytes{{filter}} - node_memory_MemFree_bytes{{filter}})) / node_memory_MemTotal_bytes{{filter}} * 100','Memory','Host',NULL,true,false,'Host:{{$labels.instance}} Memory Usage:{{humanize $value}}%|{threshold}%.','2020-03-26 06:36:47.931','2020-03-26 06:36:47.931'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_memory_total','Host Memory Total (GiB)','Memory information field MemTotal_bytes','node_memory_MemTotal_bytes{{filter}}','Memory','Host',NULL,true,false,'Host:{{$labels.instance}} Total Memory Size:{{humanize $value}}GiB|{threshold}GiB.','2020-03-23 04:08:16.897','2020-03-23 04:08:16.897'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_bytes_received_sent','Bytes Received & Sent in MySQL','Bytes Received & Sent in MySQL','sum by (data_type, xm_clst_id, xm_namespace, xm_node_id, instance) ( +label_replace(rate(mysql_global_status_bytes_received [1m]), "data_type", "received", "", "") or +label_replace(rate(mysql_global_status_bytes_sent [1m]), "data_type", "sent", "", ""))','Network','MySQL','data_type',true,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Container:{{$labels.xm_cont_name}} Cache Memory:{{humanize $value}}|{threshold}.','2019-12-05 07:58:11.000','2020-02-13 01:12:05.436'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_service_http_requests_time_95th','Service HTTP 95% Elapsed Time (ms)','the maximum time taken to servce the 95% of HTTP requests','histogram_quantile(0.95, sum by (xm_entity_type,xm_clst_id,xm_namespace,xm_service_name,le) (rate(imxc_service_request_milliseconds_bucket{xm_entity_type="Service",protocol="http",{filter}}[1m]))) >=0 or sum by (xm_entity_type,xm_clst_id,xm_namespace,xm_service_name) (rate(imxc_service_request_milliseconds_bucket{xm_entity_type="Service",protocol="http",{filter}}[1m]))','Request','Service',NULL,true,true,'SVC:{{$labels.xm_service_name}} 95th HTTP Requests Time:{{humanize $value}}ms|{threshold}ms.','2020-02-18 12:12:12.000','2020-02-18 12:12:12.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_service_http_requests_time_99th','Service HTTP 99% Elapsed Time (ms)','the maximum time taken to servce the 99% of HTTP requests','histogram_quantile(0.99, sum by (xm_entity_type,xm_clst_id,xm_namespace,xm_service_name,le) (rate(imxc_service_request_milliseconds_bucket{xm_entity_type="Service",protocol="http",{filter}}[1m]))) >=0 or sum by (xm_entity_type,xm_clst_id,xm_namespace,xm_service_name) (rate(imxc_service_request_milliseconds_bucket{xm_entity_type="Service",protocol="http",{filter}}[1m]))','Request','Service',NULL,true,true,'SVC:{{$labels.xm_service_name}} 99th HTTP Requests Time:{{humanize $value}}ms|{threshold}ms.','2020-02-18 12:12:12.000','2020-02-18 12:12:12.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_service_pod_http_error_rate','Service Pod HTTP Requests Error Rate','the number of HTTP error counts / the number of HTTP requests counts for pod','sum by (xm_clst_id,xm_service_name,xm_entity_type,xm_namespace,xm_pod_id) (rate(imxc_service_request_milliseconds_count{xm_entity_type="Service",protocol="http",{filter}}[1m])) == 0 or +sum by (xm_clst_id,xm_service_name,xm_entity_type,xm_namespace,xm_pod_id) (rate(imxc_service_errors_count{xm_entity_type="Service",protocol="http",{filter}}[1m])) +/ sum by (xm_clst_id,xm_service_name,xm_entity_type,xm_namespace,xm_pod_id) (rate(imxc_service_request_milliseconds_count{xm_entity_type="Service",protocol="http",{filter}}[1m]))','Request','Service',NULL,true,false,'SVC:{{$labels.xm_service_name}} Pod Error Request Rate:{{humanize $value}}%|{threshold}%.','2019-11-07 07:52:24.000','2020-02-17 12:12:12.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_service_http_requests_time_90th','Service HTTP 90% Elapsed Time (ms)','the maximum time taken to servce the 90% of HTTP requests','histogram_quantile(0.90, sum by (xm_entity_type,xm_clst_id,xm_namespace,xm_service_name,le) (rate(imxc_service_request_milliseconds_bucket{xm_entity_type="Service",protocol="http",{filter}}[1m]))) >=0 or sum by (xm_entity_type,xm_clst_id,xm_namespace,xm_service_name) (rate(imxc_service_request_milliseconds_bucket{xm_entity_type="Service",protocol="http",{filter}}[1m]))','Request','Service',NULL,true,true,'SVC:{{$labels.xm_service_name}} 90th HTTP Requests Time:{{humanize $value}}ms|{threshold}ms.','2020-02-18 12:12:12.000','2020-02-18 12:12:12.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_fs_total_by_mountpoint','host filesystem size by mountpoint','host filesystem size by mountpoint','sum by(instance, mountpoint, fstype, data_type) ( +label_replace(node_filesystem_size_bytes {fstype!="rootfs",{filter}}, "data_type", "totalsize", "", "") or +label_replace(node_filesystem_avail_bytes {fstype!="rootfs",{filter}}, "data_type", "availablesize", "", ""))','Filesystem','Host',NULL,false,false,'host filesystem size by mountpoint','2020-03-30 04:01:45.322','2020-03-30 05:16:32.252'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('namespace_timeline_count','Namespace timeline count','alert, event count','sum (floor(increase(imxc_kubernetes_event_counts{{filter}}[10m])) or floor(increase(imxc_alerts_received_count_total{status="firing", {filter}}[10m])))by (xm_clst_id, xm_namespace, level)','Timeline','Namespace',NULL,false,false,'None','2020-04-08 06:21:21.392','2020-04-08 06:21:21.392'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_timeline_count','Cluster timeline count','alert, event count','sum (floor(increase(imxc_kubernetes_event_counts{{filter}}[10m])) or floor(increase(imxc_alerts_received_count_total{status="firing", {filter}}[10m])))by (xm_clst_id,level)','Timeline','Cluster',NULL,false,false,'None','2020-04-08 06:19:32.792','2020-04-28 08:07:47.786'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_network_transmit','Cluster Network Transmit','Cluster Network Transmit','sum by (xm_clst_id) (rate(node_network_transmit_bytes_total{{filter}} [1m]))','Network','Cluster',NULL,true,true,'Cluster Network Transmit','2020-04-28 08:10:21.070','2020-04-28 08:29:18.491'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_network_receive','Cluster Network Receive','Cluster Network Receive','sum by (xm_clst_id) (rate(node_network_receive_bytes_total{{filter}} [1m]))','Network','Cluster',NULL,true,true,'Cluster Network Receive','2020-04-28 08:07:26.294','2020-04-28 08:29:18.486'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('namespace_pod_running_count','Namespace Pod Running Count','Running pod count by namespace','count by (xm_clst_id, xm_namespace) (sum by (xm_clst_id, xm_node_id, xm_namespace, xm_pod_id) (imxc_kubernetes_container_resource_limit_cpu{pod_state="Running", {filter}}))','Pod','Namespace',NULL,false,false,'None','2020-05-21 01:18:06.016','2020-05-21 01:18:06.016'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_cpu_request','Pod CPU Request','Pod CPU Request','sum by (xm_clst_id, xm_node_id, xm_pod_id) (imxc_kubernetes_container_resource_request_cpu{{filter}})','CPU','Pod',NULL,false,false,'None','2020-05-21 06:50:49.546','2020-05-21 06:50:49.546'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_network_io_byte','Node Network IO byte','Node Network IO byte','sum by (data_type, instance) ( +label_replace(rate(node_network_receive_bytes_total{{filter}}[1m]), "data_type", "Receive", "", "") or +label_replace(rate(node_network_transmit_bytes_total{{filter}}[1m]), "data_type", "Transmit", "", "") )','Network','Node',NULL,false,false,'Node Network IO byte','2020-05-21 07:32:03.535','2020-05-21 07:32:03.535'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_memory_request','pod_memory_request (Gib)','Total container memory request in GiB for the given pod','sum by (xm_clst_id, xm_node_id, xm_pod_id) (imxc_kubernetes_container_resource_request_memory{{filter}}) / 1073741824','Memory','Pod',NULL,false,false,'None','2020-05-21 11:50:52.717','2020-05-21 11:50:52.717'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_memory_sum_by_node','Container memory sum by node','Container memory sum by node','sum by(xm_clst_id, xm_node_id, data_type) ( +label_replace(imxc_kubernetes_node_resource_capacity_memory{{filter}}, "data_type", "capacity" , "", "") or +label_replace(imxc_kubernetes_container_resource_limit_memory{{filter}}, "data_type", "limit", "" , "") or +label_replace(imxc_kubernetes_container_resource_request_memory{{filter}}, "data_type", "request", "" , "") or +label_replace(container_memory_working_set_bytes{{filter}}, "data_type", "used", "" , ""))','Memory','Node',NULL,false,false,'Container memory sum by node','2020-05-28 09:36:44.000','2020-06-09 01:38:10.694'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_context_switches','Node Context Switches','Node Context Switches','rate(node_context_switches_total {{filter}}[1m])','CPU','Node',NULL,false,false,'None','2020-05-21 01:18:06.000','2020-05-29 09:38:05.521'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_contextswitch_and_filedescriptor','Node contextswitch and filedescriptor','Node contextswitch and filedescriptor','sum by(xm_clst_id, xm_node_id, data_type) ( +label_replace(node_filefd_allocated {{filter}}, "data_type", "file descriptor" , "", "") or +label_replace(rate(node_context_switches_total {{filter}}[1m]), "data_type", "context switches", "" , ""))','File','Node',NULL,false,false,'Node contextswitch and filedescriptor','2020-05-28 12:38:21.587','2020-05-28 12:38:21.587'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_disk_read_write_byte','Node disk read and write bytes','Node disk read and write bytes','sum by(xm_clst_id, xm_node_id, data_type) ( +label_replace(rate(node_disk_read_bytes_total{{filter}}[1m]), "data_type", "Read" , "", "") or +label_replace(rate(node_disk_written_bytes_total{{filter}}[1m]), "data_type", "Write", "" , "") +)','Disk','Node',NULL,false,false,'Node disk read and write bytes','2020-05-28 13:02:44.729','2020-05-28 13:04:35.126'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_swap_total','Host Swap Memory Total','Host Swap Total','node_memory_SwapTotal_bytes{{filter}}','Memory','Host',NULL,true,false,'Host:{{$labels.instance}} Total Swap Memory Size:{{humanize $value}}GiB|{threshold}GiB.','2020-03-23 04:08:23.130','2020-03-23 04:08:23.130'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_cpu_iowait','Host CPU iowait','Host CPU iowait','avg by (instance) (rate(node_cpu_seconds_total{mode=''iowait'',{filter}}[1m])) * 100','CPU','Host',NULL,false,false,'Host:{{$labels.instance}} CPU IO wait:{{humanize $value}}|{threshold}.','2020-03-26 08:03:51.307','2020-03-26 08:03:51.307'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_filefd_allocated','Host statistics Filesystem allocated.','Host File descriptor statistics: allocated.','sum by (instance) (node_filefd_allocated{{filter}})','Filesystem','Host',NULL,true,false,'Host:{{$labels.instance}} Filesystem allocated:{{humanize $value}}|{threshold}.','2020-03-23 04:08:31.970','2020-03-23 04:08:31.970'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_service_http_requests_time_avg','Service HTTP Average Elapsed Time (ms)','the average time taken to serve the HTTP requests','sum (rate(imxc_service_request_milliseconds_count{xm_entity_type="Service",protocol="http",{filter}}[1m])) by (xm_clst_id,xm_service_name,xm_entity_type,xm_namespace) == 0 or +sum (rate(imxc_service_request_milliseconds_sum{xm_entity_type="Service",protocol="http",{filter}}[1m])) by (xm_clst_id,xm_service_name,xm_entity_type,xm_namespace) +/ sum (rate(imxc_service_request_milliseconds_count{xm_entity_type="Service",protocol="http",{filter}}[1m])) by (xm_clst_id,xm_service_name,xm_entity_type,xm_namespace)','Request','Service',NULL,true,true,'SVC:{{$labels.xm_service_name}} Requests Time Avg:{{humanize $value}}ms|{threshold}ms.','2019-10-15 09:37:44.000','2020-03-09 06:42:14.172'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_service_http_error_rate_by_api','Service HTTP Requests Error Rate by API','the number of HTTP error counts by API / the number of HTTP requests counts by API','sum by (xm_entity_type,xm_clst_id,xm_namespace,xm_service_name,api) (rate(imxc_service_request_milliseconds_count{xm_entity_type="Service",protocol="http",{filter}}[1m])) ==0 or +sum by (xm_entity_type,xm_clst_id,xm_namespace,xm_service_name,api) (rate(imxc_service_errors_count{xm_entity_type="Service",protocol="http",{filter}}[1m])) +/ sum by (xm_entity_type,xm_clst_id,xm_namespace,xm_service_name,api) (rate(imxc_service_request_milliseconds_count{xm_entity_type="Service",protocol="http",{filter}}[1m]))','Request','Service',NULL,false,false,'not for alarm','2020-02-18 12:12:12.000','2020-06-03 06:52:05.498'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_service_http_requests_time_avg_by_api','Service HTTP Average Elapsed Time by API (ms)','the average time taken to serve the HTTP requests by API for a service','sum by (xm_entity_type,xm_clst_id,xm_namespace,xm_service_name,api) (rate(imxc_service_request_milliseconds_count{xm_entity_type="Service",protocol="http",{filter}}[1m])) == 0 or +sum by (xm_entity_type,xm_clst_id,xm_namespace,xm_service_name,api) (rate(imxc_service_request_milliseconds_sum{xm_entity_type="Service",protocol="http",{filter}}[1m])) +/ sum by (xm_entity_type,xm_clst_id,xm_namespace,xm_service_name,api) (rate(imxc_service_request_milliseconds_count{xm_entity_type="Service",protocol="http",{filter}}[1m]))','Request','Service',NULL,false,false,'not for alarm','2020-02-18 12:12:12.000','2020-06-03 06:52:05.500'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_cpu_used','Node CPU Used (Cores)','Node CPU Used (Cores)','(100 - (avg by (xm_clst_id, xm_node_id) (clamp_max(rate(node_cpu_seconds_total{name="node-exporter", mode="idle", xm_entity_type="Node", {filter}}[1m]),1.0)) * 100)) * sum by(xm_clst_id, xm_node_id)(imxc_kubernetes_node_resource_capacity_cpu{{filter}}) / 100','CPU','Node',NULL,false,false,'None','2020-05-21 01:18:06.000','2020-05-29 09:38:35.939'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_cpu_iowait','Node CPU I/O Wait','Node CPU I/O Wait','avg by (xm_clst_id, xm_node_id, xm_entity_type) (rate(node_cpu_seconds_total{name="node-exporter", mode="iowait", xm_entity_type="Node" , {filter}}[1m])) * 100','CPU','Node',NULL,false,false,'None','2020-05-21 01:18:06.000','2020-05-29 09:38:20.633'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_cpu_sum_by_node','Container cpu sum by Node','Container cpu sum by Node','sum by(xm_clst_id, xm_node_id, data_type) ( +label_replace(imxc_kubernetes_node_resource_capacity_cpu{{filter}} * 0.001, "data_type", "capacity" , "", "") or +label_replace(sum by (xm_clst_id, xm_node_id) (imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0.001), "data_type", "limit", "" , "") or +label_replace(sum by (xm_clst_id, xm_node_id) (imxc_kubernetes_container_resource_request_cpu{{filter}} * 0.001), "data_type", "request", "" , "") or +label_replace(rate(container_cpu_usage_seconds_total{{filter}}[1m]), "data_type", "used", "" , ""))','CPU','Node',NULL,false,false,'Container cpu sum by Node','2020-05-28 08:06:35.736','2020-06-09 01:46:12.446'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_disk_iops_per_device','Node Disk IOPs per device','Node Disk I/O Operations Per Second (per device)','sum by (xm_clst_id, xm_node_id, device) (rate(node_disk_reads_completed_total{{filter}}[1m]) + rate(node_disk_writes_completed_total{{filter}}[1m]))','Disk','Node','device',false,false,'None','2020-06-10 05:56:05.311','2020-06-10 07:24:15.462'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_disk_iops','Node Disk IOPs','Node Disk I/O Operations Per Second','sum by (xm_clst_id, xm_node_id) (rate(node_disk_reads_completed_total{{filter}}[1m]) + rate(node_disk_writes_completed_total{{filter}}[1m]))','Disk','Node',NULL,false,false,'None','2020-06-10 05:54:01.309','2020-06-10 07:24:15.462'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_disk_iops','Host Disk IOPs','Host Disk IOPs','sum by (instance) ((rate(node_disk_reads_completed_total{{filter}}[1m]) + rate(node_disk_writes_completed_total{{filter}}[1m])) or (rate(node_disk_reads_completed_total{{filter}}[5m]) + rate(node_disk_writes_completed_total{{filter}}[5m])))','Disk','Node',NULL,false,false,'Host Disk IOPs','2020-06-10 07:26:28.895','2020-06-10 07:26:28.895'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_cpu_limit','Pod CPU Limit','Pod CPU Limit','sum by (xm_clst_id, xm_node_id, xm_pod_id) (imxc_kubernetes_container_resource_limit_cpu{{filter}})','CPU','Pod',NULL,false,false,'None','2020-05-21 06:50:49.546','2020-05-21 06:50:49.546'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_memory_limit','pod_memory_limit (Gib)','Total container memory limit in GiB for the given pod','sum by (xm_clst_id, xm_node_id, xm_pod_id) (imxc_kubernetes_container_resource_limit_memory{{filter}}) / 1073741824','Memory','Pod',NULL,false,false,'None','2020-05-21 11:50:52.717','2020-05-21 11:50:52.717'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_memory_usage_bytes','Container Memory Used (GiB)','Current memory usage in GiB, this includes all memory regardless of when it was accessed','sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) (container_memory_usage_bytes{xm_entity_type=''Container'',xm_cont_name!=''POD'',{filter}} / 1024 / 1024 / 1024)','Memory','Container',NULL,true,true,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} Used Memory:{{humanize $value}}GiB|{threshold}GiB.','2019-06-05 14:27:36.000','2020-06-04 11:11:11.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_memory_used','Node Memory Used (GIB)','Node Memory Used (GIB)','((node_memory_MemTotal_bytes{xm_entity_type="Node", {filter}} - (node_memory_MemFree_bytes{xm_entity_type="Node", {filter}} + node_memory_Cached_bytes{xm_entity_type="Node", {filter}} + node_memory_Buffers_bytes{xm_entity_type="Node", {filter}})) >= 0 or node_memory_MemTotal_bytes{xm_entity_type="Node", {filter}} - node_memory_MemFree_bytes{xm_entity_type="Node", {filter}}) / 1024 / 1024 / 1024','Memory','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Memory Used:{{humanize $value}}GiB|{threshold}GiB.','2020-05-21 01:18:06.000','2020-06-04 11:11:11.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_used_cpu_user','User CPU Used','User CPU consumed by the Redis server','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(redis_used_cpu_user[1m]))','CPU','Redis',NULL,false,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis User CPU Used:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-05-29 09:37:22.273'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_cpu_sum_by_container','Container cpu sum by container','container cpu sum by container','sum by(xm_clst_id, data_type, xm_pod_id, xm_cont_name) (label_replace(imxc_kubernetes_container_resource_request_cpu{xm_cont_name!=''POD'',{filter}} * 0.001, "data_type", "request" , "", "") or label_replace(imxc_kubernetes_container_resource_limit_cpu{xm_cont_name!=''POD'',{filter}} * 0.001, "data_type", "limit" , "", "") or label_replace(rate(container_cpu_usage_seconds_total{xm_cont_name!=''POD'',{filter}}[1m]), "data_type", "used", "" , ""))','CPU','Container',NULL,false,false,'None','2020-05-21 06:50:49.546','2020-05-21 06:50:49.546'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_cpu_sum_by_pods','Container cpu sum by pod','Container cpu sum by pod','sum by(xm_clst_id, data_type, xm_pod_id) (label_replace(imxc_kubernetes_container_resource_request_cpu{{filter}} * 0.001, "data_type", "request" , "", "") or label_replace(imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0.001, "data_type", "limit" , "", "") or label_replace(rate(container_cpu_usage_seconds_total{{filter}}[1m]), "data_type", "used", "" , ""))','CPU','Pod',NULL,false,false,'None','2020-05-21 06:50:49.546','2020-05-21 06:50:49.546'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_memory_sum_by_pods','Container memory sum by pod','Container memory sum by pod','sum by(xm_clst_id, data_type, xm_pod_id) (label_replace(imxc_kubernetes_container_resource_limit_memory{{filter}}, "data_type", "limit", "" , "") or label_replace(imxc_kubernetes_container_resource_request_memory{{filter}}, "data_type", "request", "" , "") or label_replace(container_memory_usage_bytes{{filter}}, "data_type", "used", "" , ""))','Memory','Pod',NULL,false,false,'None','2020-05-21 06:50:49.546','2020-05-21 06:50:49.546'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_memory_sum_by_container','Container memory sum by container','Container memory sum by container','sum by(xm_clst_id, data_type, xm_pod_id, xm_cont_name) (label_replace(imxc_kubernetes_container_resource_limit_memory{xm_cont_name!=''POD'',{filter}}, "data_type", "limit", "" , "") or label_replace(imxc_kubernetes_container_resource_request_memory{xm_cont_name!=''POD'',{filter}}, "data_type", "request", "" , "") or label_replace(container_memory_usage_bytes{xm_cont_name!=''POD'',{filter}}, "data_type", "used", "" , ""))','Memory','Container',NULL,false,false,'None','2020-05-21 06:50:49.546','2020-05-21 06:50:49.546'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_disk_read_write_byte','Container disk read and write bytes','Container disk read and write bytes','sum by(xm_clst_id, xm_pod_id, xm_cont_name, data_type) (label_replace(rate(container_fs_writes_bytes_total{xm_entity_type="Container",{filter}}[1m]), "data_type", "Read" , "", "") or label_replace(rate(container_fs_reads_bytes_total{xm_entity_type="Container",{filter}}[1m]), "data_type", "Write", "" , ""))','Disk','Container',NULL,false,false,'None','2020-05-21 06:50:49.546','2020-05-21 06:50:49.546'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_disk_read_write_byte','Pod disk read and write bytes','Pod disk read and write bytes','sum by(xm_clst_id, xm_pod_id, data_type) (label_replace(rate(container_fs_writes_bytes_total{xm_entity_type="Container",{filter}}[1m]), "data_type", "Read" , "", "") or label_replace(rate(container_fs_reads_bytes_total{xm_entity_type="Container",{filter}}[1m]), "data_type", "Write", "" , ""))','Disk','Pod',NULL,false,false,'None','2020-05-21 06:50:49.546','2020-05-21 06:50:49.546'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_network_io_byte','Container Network IO byte','Container Network IO byte','sum by (xm_clst_id, xm_pod_id, xm_cont_name, data_type) (label_replace(rate(container_network_receive_bytes_total{{filter}}[1m]), "data_type", "Receive", "", "") or label_replace(rate(container_network_transmit_bytes_total{{filter}}[1m]), "data_type", "Transmit", "", ""))','Network','Container',NULL,false,false,'None','2020-05-21 06:50:49.546','2020-05-21 06:50:49.546'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_network_io_byte','Pod Network IO byte','Pod Network IO byte','sum by (xm_clst_id, xm_pod_id, data_type) (label_replace(rate(container_network_receive_bytes_total{{filter}}[1m]), "data_type", "Receive", "", "") or label_replace(rate(container_network_transmit_bytes_total{{filter}}[1m]), "data_type", "Transmit", "", ""))','Network','Pod',NULL,false,false,'None','2020-05-21 06:50:49.546','2020-05-21 06:50:49.546'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_load1','Node CPU Load 1m Average','Node CPU 1m load average','node_load1{xm_entity_type=''Node'',{filter}}','CPU','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} CPU 1m Load Avg:{{humanize $value}}|{threshold}.','2019-05-15 08:22:49.000','2019-05-15 08:22:49.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_open_file_descriptor','Node File Descriptor','Node File Descriptor','sum by(xm_clst_id, xm_node_id)(node_filefd_allocated {{filter}})','Filesystem','Node',NULL,true,false,'NODE:{{$labels.xm_node_id}} File Descriptor:{{humanize $value}}|{threshold}.','2020-05-21 01:18:06.000','2020-05-29 09:37:51.101'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_sparselog_type_node_count','Node Type Sparselog Count','Node-type sparse log count by xm_clst_id, xm_node_id over last 1 min','sum by (xm_entity_type, xm_clst_id, xm_node_id) (round(increase(imxc_sparselog_count_total{xm_entity_type="Node",{filter}}[1m])))','SparseLog','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Sparselog Count:{{humanize $value}}|{threshold}.','2020-03-26 15:05:51.828','2020-03-26 15:05:51.828'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_memory_cache','Container Memory Cache (GiB)','Number of bytes of page cache memory / 1073741824','sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) (container_memory_cache{xm_entity_type=''Container'',xm_cont_name!=''POD'',{filter}}) / 1073741824','Memory','Container',NULL,true,true,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} Cache Memory:{{humanize $value}}GiB|{threshold}GiB.','2019-06-05 14:27:36.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_load15','Host CPU Load 15m Average','Host CPU 15m load average','node_load15{{filter}}','CPU','Host',NULL,true,false,'Host:{{$labels.instance}} CPU 15m Load Average:{{humanize $value}}%|{threshold}%','2020-03-23 04:08:13.337','2020-03-23 04:08:13.337'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_disk_write_bytes_device','Node Disk Write Bytes per Device (KiB)','The total number of bytes written successfully / 1024','sum by (xm_clst_id, xm_node_id, xm_entity_type, device, mountpoint) (rate(node_disk_written_bytes_total{xm_entity_type=''Node'', {filter}}[1m]) ) / 1024','Disk','Node','device',true,false,'NODE:{{$labels.xm_node_id}} FS:{{$labels.mountpoint}} Disk Write Size:{{humanize $value}}KiB|{threshold}KiB.','2019-08-23 11:26:07.000','2019-08-23 11:26:07.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_disk_write_latency','Node Disk Write Latency (ms)','Node Disk Write Latency','sum by (xm_clst_id,xm_node_id, xm_entity_type) (rate(node_disk_write_time_seconds_total{xm_entity_type=''Node'',{filter}}[1m])) * 1000','Disk','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Disk Write Latency:{{humanize $value}}ms|{threshold}ms.','2019-05-20 11:00:56.000','2019-05-31 17:47:10.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_disk_writes_count_device','Node Disk Writes Count per Device (IOPS)','Node Disk Writes Counts per Device','sum by (xm_clst_id, xm_node_id, xm_entity_type, device, mountpoint) (rate(node_disk_writes_completed_total{xm_entity_type=''Node'', {filter}}[1m]) )','Disk','Node','device',true,false,'NODE:{{$labels.xm_node_id}} FS:{{$labels.mountpoint}} Disk Writes Count:{{humanize $value}}IOPS|{threshold}IOPS.','2019-08-23 11:26:07.000','2019-08-23 11:26:07.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_throttled_rate','Container CPU Throttled Rate','container throttled rate','sum by(xm_clst_id, xm_cont_id) (rate(container_cpu_cfs_throttled_seconds_total{container_name!="POD", image!="",{filter}}[1m]))','Cluster','Container',NULL,false,false,'CLST:{{$labels.xm_clst_id}} CPU Throttled:{{humanize $value}}|{threshold}.','2020-08-19 16:45:00.000','2020-08-19 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_pod_total_count','Node Pod Total Count','Node Pod Total Count','count by (xm_clst_id, xm_node_id) (sum by (xm_clst_id, xm_node_id, xm_pod_id) (imxc_kubernetes_container_resource_limit_cpu{{filter}}))','Pod','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Pod Count:{{humanize $value}}|{threshold}.','2019-10-11 00:29:17.000','2019-11-26 01:29:10.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_service_http_requests_per_sec','Service HTTP Requests Count (per Second)','the number of HTTP requests counts per second','((sum by (xm_clst_id,xm_service_name,xm_entity_type,xm_namespace) (rate(imxc_service_request_milliseconds_count{xm_entity_type="Service",protocol="http",{filter}}[1m]))/ on (xm_clst_id, xm_namespace, xm_service_name ) group_left imxc_sampling_param_value) or (sum by (xm_clst_id,xm_service_name,xm_entity_type,xm_namespace) (rate(imxc_service_request_milliseconds_count{xm_entity_type="Service",protocol="http",{filter}}[1m])) / on (xm_clst_id) group_left imxc_sampling_default_param_value))','Request','Service',NULL,true,true,'SVC:{{$labels.xm_service_name}} Http Requests/Second:{{humanize $value}}|{threshold}.','2019-10-15 09:37:44.000','2020-02-17 12:12:12.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_service_pod_http_requests_per_sec','Service Pod HTTP Requests Count (per Second)','the number of HTTP requets counts per second for pod','sum by (xm_clst_id,xm_service_name,xm_entity_type,xm_namespace,xm_pod_id) (rate(imxc_service_request_milliseconds_count{xm_entity_type="Service",protocol="http",{filter}}[1m]))','Request','Service',NULL,true,false,'SVC:{{$labels.xm_service_name}} IMXC Svc Pod Http Requests/Seconds:{{humanize $value}}|{threshold}.','2019-11-07 07:51:11.000','2020-03-09 06:34:19.353'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_memory_max_usage_bytes','Container Memory Max Used (GiB)','Maximum memory usage recorded in bytes / 1073741824','sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) (container_memory_max_usage_bytes{xm_entity_type=''Container'',xm_cont_name!=''POD'',{filter}}) / 1073741824','Memory','Container',NULL,true,true,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} Max Memory Usage:{{humanize $value}}GiB|{threshold}GiB.','2019-06-05 14:27:36.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_network_receive','Container Network Receive (KiB)','Network device statistic receive_bytes / 1024','sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) (rate(container_network_receive_bytes_total{xm_entity_type=''Container'',{filter}}[1m]) ) / 1024','Network','Container',NULL,true,true,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} Network Receive Usage:{{humanize $value}}KiB|{threshold}KiB.','2019-05-21 08:23:36.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_service_http_requests_time_50th','Service HTTP 50% Elapsed Time (ms)','the maximum time taken to servce the 50% of HTTP requests','histogram_quantile(0.50, sum by (xm_entity_type,xm_clst_id,xm_namespace,xm_service_name,le) (rate(imxc_service_request_milliseconds_bucket{xm_entity_type="Service",protocol="http",{filter}}[1m]))) >=0 or sum by (xm_entity_type,xm_clst_id,xm_namespace,xm_service_name) (rate(imxc_service_request_milliseconds_bucket{xm_entity_type="Service",protocol="http",{filter}}[1m]))','Request','Service',NULL,true,true,'SVC:{{$labels.xm_service_name}} 50th HTTP Requests Time:{{humanize $value}}ms|{threshold}ms.','2020-02-18 12:12:12.000','2020-02-18 12:12:12.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_service_errors_count','Service Error Count','service error count','sum by(xm_clst_id, xm_namespace, xm_service_name, statuscode ) (imxc_service_errors_count{statuscode!="200",{filter}}) OR on() vector(0)','Request','Service',NULL,true,false,'SVC:{{$labels.xm_service_name}} Svc Error Count:{{humanize $value}}|{threshold}.','2020-08-21 16:45:00.000','2020-08-21 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_memory_used','Host Memory Used (GiB)','Memory information field MemUsed_bytes','((node_memory_MemTotal_bytes{{filter}} - (node_memory_MemFree_bytes{{filter}} + node_memory_Cached_bytes{{filter}} + node_memory_Buffers_bytes{{filter}} + node_memory_SReclaimable_bytes{{filter}})) >= 0 or (node_memory_MemTotal_bytes{{filter}} - node_memory_MemFree_bytes{{filter}}))','Memory','Host',NULL,true,false,'Host:{{$labels.instance}} Memory Utillization:{{humanize $value}}GiB|{threshold}GiB.','2020-03-23 04:08:21.399','2020-03-23 04:08:21.399'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('workload_count_all_state','Workload Count All State','workload total count regardless of pod state','count by(xm_clst_id, controller_kind) (imxc_kubernetes_controller_ready{controller_kind=~"Deployment|DaemonSet|ReplicaSet|StatefulSet|StaticPod",{filter}})','Pod','Namespace',NULL,true,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Workload Total Count:{{humanize $value}}|{threshold}.','2020-08-19 16:45:00.000','2020-08-19 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('workload_count_running_pod','Workload Count Running Pod','workload count of Running state pod','sum by(xm_clst_id,controller_kind ) (imxc_kubernetes_controller_ready{controller_kind=~"Deployment|DaemonSet|ReplicaSet|StatefulSet|StaticPod",{filter}})','Pod','Namespace',NULL,false,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Workload Total Count:{{humanize $value}}|{threshold}.','2020-08-19 16:45:00.000','2020-08-19 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_network_transmit_device','Node Network Transmit per Device(KiB)','Network device statistic transmit_bytes by device / 1024','sum by (xm_clst_id, xm_node_id, xm_entity_type, device, mountpoint) (rate(node_network_transmit_bytes_total{xm_entity_type=''Node'',{filter}}[1m]) ) / 1024','Network','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} DEV:{{$labels.device}} Network Transmit Usage:{{humanize $value}}KiB|{threshold}KiB.','2020-11-06 09:09:05.000','2020-11-06 09:09:05.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_network_receive_device','Node Network Receive per Device(KiB)','Network device statistic receive_bytes by device / 1024','sum by (xm_clst_id, xm_node_id, xm_entity_type, device, mountpoint) (rate(node_network_receive_bytes_total{xm_entity_type=''Node'',{filter}}[1m]) ) / 1024','Network','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} DEV:{{$labels.device}} Network Receive Usage:{{humanize $value}}KiB|{threshold}KiB.','2020-11-06 09:09:05.000','2020-11-06 09:09:05.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_service_pod_http_requests_time_avg','Service Pod HTTP Average Elapsed Time (ms)','the average time taken to serve the HTTP requests for pod','sum by (xm_clst_id,xm_service_name,xm_entity_type,xm_namespace,xm_pod_id) (rate(imxc_service_request_milliseconds_count{xm_entity_type="Service",protocol="http",{filter}}[1m])) == 0 or +sum by (xm_clst_id,xm_service_name,xm_entity_type,xm_namespace,xm_pod_id) (rate(imxc_service_request_milliseconds_sum{xm_entity_type="Service",protocol="http",{filter}}[1m])) +/ sum by (xm_clst_id,xm_service_name,xm_entity_type,xm_namespace,xm_pod_id) (rate(imxc_service_request_milliseconds_count{xm_entity_type="Service",protocol="http",{filter}}[1m]))','Request','Service',NULL,true,false,'SVC:{{$labels.xm_service_name}} IMXC Svc Pod http Requests Time Avg:{{humanize $value}}ms|{threshold}ms.','2019-11-07 07:51:46.000','2020-02-17 12:12:12.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_cpu_system','Container CPU System (%)','Container CPU Usage (System)','sum by (xm_clst_id,xm_node_id,xm_pod_id,xm_cont_name,xm_entity_type,xm_namespace,xm_cont_id) (rate(container_cpu_system_seconds_total{xm_entity_type=''Container'',xm_cont_name!=''POD'',{filter}}[1m])) * 100','CPU','Container',NULL,true,true,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} CPU System:{{humanize $value}}%|{threshold}%.','2019-06-05 09:07:00.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_cpu_usage','Container CPU Usage (%)','Container CPU Usage','sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) (rate(container_cpu_usage_seconds_total{xm_entity_type=''Container'',xm_cont_name!=''POD'',{filter}}[1m])) * 100','CPU','Container',NULL,true,true,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} CPU Usage:{{humanize $value}}%|{threshold}%','2019-05-15 01:02:23.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_phase_count_by_namespace','Pod Phase Count by Namespace','pod phase count by cluster, namespace','count by(xm_clst_id, xm_namespace, pod_state) (imxc_kubernetes_container_resource_limit_cpu{{filter}})','Namespace','Pod',NULL,true,false,'CLST:{{$labels.xm_clst_id}} Pod phase count:{{humanize $value}}|{threshold}.','2020-08-19 16:45:00.000','2020-08-19 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_fs_limit_bytes','Container Filesystem Limit Bytes (GiB)','Number of bytes that can be consumed by the container on this filesystem / 1073741824','sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) (container_fs_limit_bytes{xm_entity_type=''Container'',{filter}}) / 1073741824','Filesystem','Container',NULL,true,true,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} Filesystem Limit:{{humanize $value}}GiB|{threshold}GiB.','2019-06-05 10:27:42.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_memory_usage','Container Memory Usage (%)','Container memory usage compared to limit if limit is non-zero or 1GiB if limit is zero','sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) (container_memory_usage_bytes{xm_entity_type=''Container'', xm_cont_name!=''POD'', {filter}} / (container_spec_memory_limit_bytes{xm_entity_type=''Container'',{filter}} > 0) * 100) or sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) (container_memory_usage_bytes{xm_entity_type=''Container'',{filter}} / 1024 / 1024 / 1024 * 100)','Memory','Container',NULL,true,true,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} Memory Usage:{{humanize $value}}%|{threshold}%.','2019-06-05 14:27:36.000','2020-06-04 11:11:11.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_memory_swap','Container Memory Swap (GiB)','Container swap usage in bytes / 1073741824','sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) (container_memory_swap{xm_entity_type=''Container'',xm_cont_name!=''POD'',{filter}}) / 1073741824','Memory','Container',NULL,true,true,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} Swap Memory:{{humanize $value}}GiB|{threshold}GiB.','2019-06-05 14:27:36.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_network_transmit','Container Network Transmit (KiB)','Network device statistic transmit_bytes / 1024','sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) (rate(container_network_transmit_bytes_total{xm_entity_type=''Container'',{filter}}[1m]) ) / 1024','Network','Container',NULL,true,true,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} Network Transmit Usage:{{humanize $value}}KiB|{threshold}KiB.','2019-05-21 08:26:35.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('controller_pod_count','Controller Pod Count','Controller Pod Count','sum (imxc_kubernetes_controller_counts{{filter}}) by (xm_clst_id, xm_namespace, xm_entity_name, xm_entity_type)','Pod','Controller',NULL,false,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Controller Pod Counts:{{humanize $value}}|{threshold}.','2019-10-10 06:39:09.000','2019-10-10 06:39:09.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_load1','Host CPU Load 1m Average','Host CPU 1m load average','node_load1{{filter}}','CPU','Host',NULL,true,false,'Host:{{$labels.instance}} CPU 1m Load Average:{{humanize $value}}%|{threshold}%','2020-03-23 04:08:09.946','2020-03-23 04:08:09.946'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_cpu_usage','Host CPU Usage (%)','Host CPU Usage','100 - (avg by (instance)(clamp_max(rate(node_cpu_seconds_total{mode=''idle'',{filter}}[1m]),1.0)) * 100)','CPU','Host',NULL,true,false,'Host:{{$labels.instance}} CPU Utillization:{{humanize $value}}%|{threshold}%.','2020-03-23 04:08:07.606','2020-03-23 04:08:07.606'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('aws_ec2_cpuutilization','The percentage of allocated EC2 compute','The percentage of allocated EC2 compute units that are currently in use on the instance.','sum by (xm_clst_id, instance_id, instance) (aws_ec2_cpuutilization_average{{filter}})','CPU','AWS/EC2',NULL,true,true,'CLST:{{$labels.xm_clst_id}} Instance:{{$labels.instance_id}} CPU Utillization:{{humanize $value}}%|{threshold}%','2019-08-23 17:38:23.000','2019-08-23 17:38:23.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mongodb_connections','Number of Incoming Connections','The number of incoming connections from clients to the database server','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, state) (mongodb_connections{{filter}})','Connection','MongoDB','state',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MongoDB Number of Incoming Connections Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-13 02:26:09.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_buffer_io','Block read / write','mysql buffer I/O summary','sum by (data_type, xm_clst_id, xm_namespace, xm_node_id, instance) ( +label_replace(mysql_global_status_innodb_buffer_pool_write_requests, "data_type", "write", "", "") or +label_replace(mysql_global_status_innodb_buffer_pool_read_requests, "data_type", "read", "", "") )','Block','MySQL','data_type',true,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} POD:{{$labels.xm_pod_id}} Mysql Buffer IO:{{humanize $value}}|{threshold}.','2019-12-05 07:30:33.000','2020-02-13 01:14:23.895'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_innodb_buffer_pool_reads','Number of Reads Directly from Disk','The number of logical reads that InnoDB could not satisfy from the buffer pool, and had to read directly from disk','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(mysql_global_status_innodb_buffer_pool_reads[1m]))','Block','MySQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Number of Reads Directly from Disk Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_connections','Number of Connection Attempts','The number of connection attempts (successful or not) to the MySQL server','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(mysql_global_status_connections[1m]))','Connection','MySQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Number of Connection Attempts counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_status_locks','Number of Locks in MySQL','Number of Locks in MySQL','sum by (data_type, xm_clst_id, xm_namespace, xm_node_id, instance) ( +label_replace(rate(mysql_global_status_innodb_row_lock_current_waits[1m]), "data_type", "rowlocks", "", "") or +label_replace(rate(mysql_global_status_innodb_row_lock_waits[1m]), "data_type", "waits for rowlocks", "", "") or +label_replace(rate(mysql_global_status_table_locks_immediate[1m]), "data_type", "tablelock immediate", "", "") or +label_replace(rate(mysql_global_status_table_locks_waited[1m]), "data_type", "tablelock waited", "", "") )','Lock','MySQL','data_type',true,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Mysql Status Locks:{{humanize $value}}|{threshold}.','2019-12-05 08:39:30.000','2020-02-13 01:12:05.438'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_fs_usage_bytes','Container Filesystem Used Bytes (GiB)','Number of bytes that are consumed by the container on this filesystem / 1073741824','sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) (container_fs_usage_bytes{xm_entity_type=''Container'',{filter}}) / 1073741824','Filesystem','Container',NULL,true,true,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} Filesystem Used:{{humanize $value}}GiB||{threshold}GiB.','2019-06-05 10:27:42.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_fs_writes','Container Filesystem Write Bytes (KiB)','Cumulative count of bytes written / 1024','sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) (rate(container_fs_writes_bytes_total{xm_entity_type=''Container'',{filter}}[1m])) / 1024','Filesystem','Container',NULL,true,true,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} Filesystem Writes:{{humanize $value}}KiB|{threshold}KiB.','2019-05-20 05:58:07.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_sessions_value','Session Count','Gauge metric with count of sessions by status and type','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, status_type) +(label_join(oracledb_sessions_value, "status_type", "-", "status", "type"))','Session','OracleDB','status_type',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Oracle Session Count:{{humanize $value}}|{threshold}.','2020-01-28 13:03:00.000','2020-02-13 01:34:00.720'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_stat_database_temp_bytes','Bytes Written to Temporary Files (KiB)','Total amount of data written to temporary files by queries in this database','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, datname) (rate(pg_stat_database_temp_bytes[1m])) / 1024','TemporaryFile','PostgreSQL','datname',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} PostgreSQL Temporary File Write Size:{{humanize $value}}|{threshold}.','2019-08-27 15:49:21.000','2019-08-27 15:49:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_used_cpu_sys','System CPU Used','System CPU consumed by the Redis server','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(redis_used_cpu_sys[1m]))','CPU','Redis',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis System CPU Used:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_used_cpu_user_children','User CPU Used Background','User CPU consumed by the background processes','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(redis_used_cpu_user_children[1m]))','CPU','Redis',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis User CPU Used Background:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_service_http_error_rate','Service HTTP Requests Error Rate','the number of HTTP error counts / the number of HTTP requests counts','sum by(xm_clst_id,xm_service_name,xm_entity_type,xm_namespace) (rate(imxc_service_request_milliseconds_count{xm_entity_type="Service",protocol="http",{filter}}[1m])) == 0 or +sum by (xm_clst_id,xm_service_name,xm_entity_type,xm_namespace) (rate(imxc_service_errors_count{xm_entity_type="Service",protocol="http",{filter}}[1m])) / sum by +(xm_clst_id,xm_service_name,xm_entity_type,xm_namespace) (rate(imxc_service_request_milliseconds_count{xm_entity_type="Service",protocol="http",{filter}}[1m]))','Request','Service',NULL,true,true,'SVC:{{$labels.xm_service_name}} Error Request Rate:{{humanize $value}}%|{threshold}%.','2019-10-15 09:37:44.000','2020-02-17 12:12:12.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_cache_hit_ratio','Buffer Cache Hit Ratio (%)','(Number of Logical Read - Number of Reads Directly from Disk) / (Number of Logical Read) * 100','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) ((increase(mysql_global_status_innodb_buffer_pool_read_requests[1m]) - increase(mysql_global_status_innodb_buffer_pool_reads[1m])) / increase(mysql_global_status_innodb_buffer_pool_read_requests[1m]) * 100)','Block','MySQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Buffer Cache Hit Ratio:{{humanize $value}}%|{threshold}%.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_fs_usage','Pod Filesystem Usage (%)','Pod File System Usage: 100 * (Used Bytes / Limit Bytes)','sum by (xm_clst_id,xm_pod_id,xm_entity_type,xm_namespace) ( +container_fs_usage_bytes{xm_entity_type=''Container'',{filter}} /((container_fs_limit_bytes{xm_entity_type=''Container'',{filter}} * 100) > 0) or +container_fs_usage_bytes{xm_entity_type=''Container'',{filter}} / 1000)','Filesystem','Pod',NULL,true,true,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} Filesystem Usage:{{humanize $value}}%|{threshold}%.','2019-06-05 10:27:42.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_pod_cpu_request','Node Pod CPU Request','Node Pod CPU Request','sum by (xm_clst_id, xm_node_id) (imxc_kubernetes_container_resource_request_cpu{{filter}})','CPU','Node',NULL,true,false,'NODE:{{$labels.xm_node_id}} Pod CPU Requests:{{humanize $value}}|{threshold}.','2020-11-20 06:50:49.546','2020-11-20 06:50:49.546'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_pod_cpu_usage','Node Pod CPU Usage (%)','Node Pod CPU Usage','sum by (xm_clst_id,xm_node_id) (clamp_min((rate(container_cpu_usage_seconds_total{xm_entity_type=''Container'',{filter}}[1m] offset 10s)),0)) * 100','CPU','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Pod CPU Usage:{{humanize $value}}%|{threshold}%.','2020-11-20 06:50:49.546','2020-11-20 06:50:49.546'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,in_use,anomaly_score,message) + VALUES ('container_cpu_usage_core','Container CPU Usage (Core)','Container CPU Usage (Core)','sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) (rate(container_cpu_usage_seconds_total{xm_entity_type=''Container'',xm_cont_name!=''POD'',{filter}}[1m]))','CPU','Container',true,false,'None'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,in_use,anomaly_score,message) + VALUES ('container_cpu_system_core','Container CPU System (Core)','Container CPU Usage (System)(Core)','sum by (xm_clst_id,xm_node_id,xm_pod_id,xm_cont_name,xm_entity_type,xm_namespace,xm_cont_id) (rate(container_cpu_system_seconds_total{xm_entity_type=''Container'',xm_cont_name!=''POD'',{filter}}[1m]))','CPU','Container',true,false,'None'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,in_use,anomaly_score,message) + VALUES ('container_cpu_user_core','Container CPU User (Core)','Container CPU Usage (User)(Core)','sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) (rate(container_cpu_user_seconds_total{xm_entity_type=''Container'',xm_cont_name!=''POD'',{filter}}[1m]))','CPU','Container',true,false,'None'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_pod_info_in_service','pod info in service','pod info(state, node) in service','sum by (xm_clst_id, xm_namespace, xm_service_name,xm_node_id,node_status,xm_pod_id,pod_state) (imxc_kubernetes_endpoint_count{{filter}})','Pod','Service',NULL,false,false,'None','2020-12-22 16:05:00.000','2020-12-22 16:05:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_service_state','Service State Count Sum','service state sum by xm_service_name','sum by (xm_service_name,pod_state) (imxc_kubernetes_endpoint_count{{filter}})','Pod','Service',NULL,false,false,'None','2021-01-06 17:30:00.000','2021-01-06 17:30:00.000'); + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_workload_state','Workload State Count Sum','wokload state sum by owner_name','count by (owner_name, pod_state) (imxc_kubernetes_container_resource_request_cpu{{filter}})','Pod','Workload',NULL,false,false,'None','2021-02-08 17:00:00.000','2021-02-08 17:00:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_pod_info_in_workload','Pod info by workload type','pod info(state, node) by workload type (do filter param)','count by (xm_clst_id, xm_namespace, owner_name, xm_node_id, node_status, xm_pod_id, pod_state) (imxc_kubernetes_container_resource_request_cpu{{filter}})','Pod','Workload',NULL,false,false,'None','2021-02-08 17:00:00.000','2021-02-08 17:00:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_up_state','Node State metric','Node State metric for up, down check','imxc_kubernetes_node_ready{{filter}}','State','Node',NULL,true,false,'Cluster:{{$labels.xm_clst_id}} Node:{{$labels.xm_node_id}} Down {threshold}.','2020-02-02 14:30:00.000','2020-02-02 14:30:00.000'); + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_cpu_user_by_workload', 'Container CPU User By workload (%)', 'Container CPU Usage(User)', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (rate(container_cpu_user_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running", {filter}}) without (instance)) * 0) * 100', 'CPU', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU User (%):{{humanize $value}}%|{threshold}%.', now(), now()); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_cpu_system_core_by_workload', 'Container CPU System By workload (Core)', 'Container CPU(Core)', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (rate(container_cpu_system_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running", {filter}}) without (instance)) * 0)', 'CPU', 'Workload', NULL, TRUE, FALSE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU System (Core) (System):{{humanize $value}}%|{threshold}%.', now(), now()); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_cpu_usage_core_by_workload', 'Container CPU Usage By workload (Core)', 'Container CPU Usage (Core)', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (rate(container_cpu_usage_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running", {filter}}) without (instance)) * 0)', 'CPU', 'Workload', NULL, TRUE, FALSE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU Usage (Core):{{humanize $value}}|{threshold}.', now(), now()); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_cpu_user_core_by_workload', 'Container CPU User By workload (Core)', 'Container CPU Usage (User)(Core)', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (rate(container_cpu_user_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running", {filter}}) without (instance)) * 0)', 'CPU', 'Workload', NULL, TRUE, FALSE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU User (Core):{{humanize $value}}|{threshold}.', now(), now()); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_cpu_system_by_workload', 'Container CPU System By workload (%)', 'Container CPU Usage (System)', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (rate(container_cpu_system_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running", {filter}}) without (instance)) * 0) * 100', 'CPU', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU System (%):{{humanize $value}}%|{threshold}%.', now(), now()); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_cpu_usage_by_workload', 'Container CPU Usage By workload (%)', 'Container CPU Usage', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (rate(container_cpu_usage_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running", {filter}}) without (instance)) * 0)', 'CPU', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU Usage (%):{{humanize $value}}%|{threshold}%', now(), now()); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_fs_reads_by_workload', 'Container Filesystem Read Bytes By workload (KiB)', 'Cumulative count of bytes read / 1024', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (rate(container_fs_reads_bytes_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1024', 'Filesystem', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Filesystem Reads:{{humanize $value}}KiB|{threshold}KiB.', now(), now()); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_fs_limit_bytes_by_workload', 'Container Filesystem Limit Bytes By workload (GiB)', 'Number of bytes that can be consumed by the container on this filesystem / 1073741824', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (container_fs_limit_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running", {filter}}) without (instance)) * 0) / 1073741824', 'Filesystem', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Filesystem Limit:{{humanize $value}}GiB|{threshold}GiB.', now(), now()); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_fs_usage_bytes_by_workload', 'Container Filesystem Used Bytes By workload (GiB)', 'Number of bytes that are consumed by the container on this filesystem / 1073741824', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (container_fs_usage_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1073741824', 'Filesystem', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Filesystem Used:{{humanize $value}}GiB||{threshold}GiB.', now(), now()); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_fs_writes_by_workload', 'Container Filesystem Write Bytes By workload (KiB)', 'Cumulative count of bytes written / 1024', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (rate(container_fs_writes_bytes_total{xm_cont_name!="POD"}[1m]) + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1024', 'Filesystem', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Filesystem Writes:{{humanize $value}}KiB|{threshold}KiB.', now(), now()); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_fs_usage_by_workload', 'Container Filesystem Usage By workload (%)', 'Container File System Usage: 100 * (Used Bytes / Limit Bytes) (not contain persistent volume)', 'sum by (xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) ((container_fs_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0)/ (((container_fs_limit_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) * 100) > 0) or (container_fs_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1000)', 'Filesystem', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Filesystem Usage:{{humanize $value}}%|{threshold}%.', now(), now()); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_memory_max_usage_bytes_by_workload', 'Container Memory Max Used By workload (GiB)', 'Maximum memory usage recorded in bytes / 1073741824', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (container_memory_max_usage_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1073741824', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Max Memory Usage:{{humanize $value}}GiB|{threshold}GiB.', now(), now()); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_memory_usage_bytes_by_workload', 'Container Memory Used By workload (GiB)', 'Current memory usage in GiB, this includes all memory regardless of when it was accessed', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (container_memory_usage_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1024 / 1024 / 1024', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Used Memory:{{humanize $value}}GiB|{threshold}GiB.', now(), now()); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_memory_usage_by_workload', 'Container Memory Usage By workload (%)', 'Container Memory usage compared to limit if limit is non-zero or 1GiB if limit is zero', 'sum by (xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (((container_memory_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / (((container_spec_memory_limit_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0)) > 0) * 100) or sum by (xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) ((container_memory_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1024 / 1024 / 1024 *100))', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Memory Usage:{{humanize $value}}%|{threshold}%.', now(), now()); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_memory_swap_by_workload', 'Container Memory Swap By workload (GiB)', 'Container swap usage in bytes / 1073741824', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (container_memory_swap{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1073741824', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Swap Memory:{{humanize $value}}GiB|{threshold}GiB.', now(), now()); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_memory_working_set_bytes_by_workload', 'Container Memory Working Set By workload (GiB)', 'Current working set in GiB, this includes recently accessed memory, dirty memory, and kernel memory', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (container_memory_working_set_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1024 / 1024 / 1024', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Working Set Memory:{{humanize $value}}GiB|{threshold}GiB.', now(), now()); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_memory_cache_by_workload', 'Container Memory Cache By workload (GiB)', 'Number of bytes of page cache memory / 1073741824', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (container_memory_cache{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1073741824', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Cache Memory:{{humanize $value}}GiB|{threshold}GiB.', now(), now()); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_network_receive_by_workload', 'Container Network Receive By workload (KiB)', 'Network device statistic receive_bytes / 1024', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name,xm_entity_type) (rate(container_network_receive_bytes_total{} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id) group_left(owner_name) sum by (xm_clst_id, xm_namespace, xm_pod_id, owner_name) (imxc_kubernetes_container_resource_limit_cpu{{filter}}) * 0) / 1024', 'Network', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Network Receive Usage:{{humanize $value}}KiB|{threshold}KiB.', now(), now()); + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_network_transmit_by_workload', 'Container Network Transmit By workload (KiB)', 'Network device statistic transmit_bytes / 1024', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (rate(container_network_transmit_bytes_total{} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id) group_left(owner_name) sum by(xm_clst_id, xm_namespace, xm_pod_id, owner_name) (imxc_kubernetes_container_resource_limit_cpu{{filter}}) * 0) / 1024', 'Network', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Network Transmit Usage:{{humanize $value}}KiB|{threshold}KiB.', now(), now()); +--Number of Pods not running +INSERT INTO public.metric_meta2 VALUES ('count_pod_not_running_by_workload','Number of Pods not running By Workload','Number of Pods not running (pod_state)','count by (xm_clst_id, xm_pod_id,xm_cont_id, xm_cont_name, entity_type, xm_namespace, pod_state) (imxc_kubernetes_container_resource_limit_cpu{pod_state!="Running", {filter}})','State','Workload',null,true,false,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} State:{{$labels.pod_state}}.',now(),now()); +--Number of Containers not running +INSERT INTO public.metric_meta2 VALUES ('count_container_not_running_by_workload','Number of Containers not running By Workload','Number of Containers not running (container_state)','count by (xm_clst_id, xm_pod_id, xm_cont_id, xm_cont_name, entity_type, xm_namespace, container_state) (imxc_kubernetes_container_resource_limit_cpu{container_state!="Running", {filter}})','State','Workload',null,true,false,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} State:{{$labels.container_state}}.',now(),now()); +-- Containers Restart count +INSERT INTO public.metric_meta2 VALUES ('cotainer_restart_count_by_workload','Number of Containers Restart','Number of Containers Restart (10m)','increase(imxc_kubernetes_container_restart_count{{filter}}[10m])','State','Workload',null,true,false,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} RESTARTCOUNT FOR 10MINUTE:{{humanize $value}}.',now(),now()); + +INSERT INTO metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_jspd_txn_per_sec','Service Transaction Count (per Second)','Service Transaction Count (per Second)','sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_service_name) (rate(imxc_txn_total_count{{filter}}[1m]))','Request','Service',NULL,true,true,'Service Transaction Count (per Second)','2021-11-15 16:11:19.606','2021-11-15 16:12:21.335'); +INSERT INTO metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_jspd_pod_txn_elapsed_time_avg','Service Pod Transaction Elapsed Time (avg)','Service Average Elapsed Time','sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_pod_id, xm_service_name) (increase(imxc_txn_total_count{{filter}}[1m]))==0 or sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_pod_id, xm_service_name) (increase(imxc_txn_laytency{{filter}}[1m])) / sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_pod_id, xm_service_name) (increase(imxc_txn_total_count{{filter}}[1m]))','Request','Service',NULL,true,true,'Service Average Elapsed Time','2021-11-15 16:09:34.233','2021-11-15 16:12:21.335'); +INSERT INTO metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_jspd_txn_error_rate','Service Transaction Error Rate','Service Transaction Error Rate','sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_service_name) (sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_service_name) (rate(imxc_txn_total_count{{filter}}[1m])) == 0 or sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_service_name) (rate(imxc_txn_error_count{{filter}}[1m])) == 0 or sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_service_name) (rate(imxc_txn_error_count {{filter}} [1m])) / sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_service_name) (rate(imxc_txn_total_count {{filter}} [1m])))','Request','Service',null,true,false,'SVC:{{$labels.xm_service_name}} Error Request Rate:{{humanize $value}}%|{threshold}%.','2022-02-15 14:33:00.118000','2022-02-15 15:40:17.640000'); +INSERT INTO metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_jspd_pod_txn_per_sec','Service Pod Transaction Count (per sec)','The number of transaction counts per second for pod','sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_pod_id, xm_service_name) (rate(imxc_txn_total_count{{filter}}[1m]))','Request','Service',null,true,false,'SVC:{{$labels.xm_service_name}} Svc Pod Transaction count/Seconds:{{humanize $value}}|{threshold}.','2022-02-15 17:59:39.450000','2022-02-15 17:59:39.450000'); +INSERT INTO metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_jspd_txn_elapsed_time_avg','Service Average Elapsed Time','Service Average Elapsed Time','sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_service_name) ((increase(imxc_txn_total_count{{filter}}[1m])))== 0 or sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_service_name) ((increase(imxc_txn_laytency{{filter}}[1m])))/ sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_service_name) ((increase(imxc_txn_total_count{{filter}}[1m])))','Request','Service',null,true,true,'SVC:{{$labels.xm_service_name}} Transaction Requests Time Avg:{{humanize $value}}ms|{threshold}ms.','2021-11-15 16:09:34.233000','2021-11-15 16:12:21.335000'); +INSERT INTO metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_jspd_txn_error_count','Service Transaction Error Count','Service Transaction Error Count','sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_service_name) (sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_service_name) (rate(imxc_txn_error_count{{filter}}[1m])) == 0 or sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_service_name) (rate(imxc_txn_error_count {{filter}} [1m])))','Request','Service',NULL,true,true,'Service Transaction Error Count','2021-11-15 16:10:31.352','2021-11-15 16:12:21.335'); +INSERT INTO metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_jspd_pod_txn_error_rate','Service Pod Transaction Error Rate','The number of transaction error rate for pod','sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_pod_id, xm_service_name) (sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_pod_id, xm_service_name) (rate(imxc_txn_total_count{{filter}}[1m])) == 0 or sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_pod_id, xm_service_name) (rate(imxc_txn_error_count{{filter}}[1m])) == 0 or sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_pod_id, xm_service_name) (rate(imxc_txn_error_count {{filter}} [1m])) / sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_pod_id, xm_service_name) (rate(imxc_txn_total_count {{filter}} [1m])))','Request','Service',null,true,false,'SVC:{{$labels.xm_service_name}} Svc Pod Transaction Error rate:{{humanize $value}}|{threshold}.','2022-02-15 18:08:58.180000','2022-02-15 18:08:58.180000'); + +INSERT INTO metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_jspd_active_txn_per_sec','Service Active Transaction Count (per Second)','Service Active Transaction Count (per Second)','sum by(xm_clst_id, xm_namespace, xm_service_name) (rate(imxc_txn_active_count{{filter}}[1m]))','Request','Service',NULL,true,false,'SVC:{{$labels.xm_service_name}} Svc Active Transaction count/Seconds:{{humanize $value}}|{threshold}.','2022-03-11 15:51:45.946','2022-03-11 15:51:45.946'); +INSERT INTO metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_jspd_pod_active_txn_per_sec','Service Pod Active Transaction Count (per sec)','The number of active transaction counts per second for pod','sum by(xm_clst_id, xm_namespace, xm_service_name, xm_pod_id) (rate(imxc_txn_active_count{{filter}}[1m]))','Request','Service',NULL,true,false,'SVC:{{$labels.xm_service_name}} Svc Pod Active Transaction count/Seconds:{{humanize $value}}|{threshold}.','2022-03-11 15:53:29.252','2022-03-11 15:53:29.252'); + + +INSERT INTO public.license_key (id, license_key, set_time, in_used, tenant_id) VALUES (nextval('hibernate_sequence'), 'A46CB0A0870B60DD0EF554F092FB8490C647C4ACCF17177EB0028FEF1B677A1DC86C08219D3D357E55E87B653A9D2F044F9095576ED493CE5D1E180E8843A04BCFE94E500F85491D408CFC7397B82F00063415F4CF8756545B6ED1A38F07F91A7B6D9381B7FC433A5086CDD2D748527ECB42835677199F23F7C8E33A66E8138182DDD76BE4925FA4B1DFD96FD5578FE80C75E0E20D76877BF6FD570265D8E69CAC34795B982CF8D811669894886567E4F5F62E28990953401374B548787E35374BFF201D5C9AD062B326E72F9B1D7791A610DA1BDF1D4F829819BC537E06C8D54F95FB04F2DAC456698F605DE3BBD72E472FC79658C806B188988B053E1E4D96FFFFFF0312983D630FAD5E9160650653074248047030124045265319328119048121312221292096178141356403289033057286071001044254168244430392446457353385472238471183338511051434316333006127241420429465082200161165099271484261287306170426201314452131350327249112310323036187433166345114324280269098441154231174135226128298344425341164290424093450115453299282209144110060155055496368233391148510223372355438125122460232315097083390283180026090507303464176016343147301028053052418046214169100404193398101492126437150008449359062078276386196105011194373118107003376243188284337378334352432479501211364186021040035210237120336302073022394079272002081397132067383497202300181309396185361017436058208454167203412219275329234043427354024133409339470296204490485256467335056F5B2CABD122B376DAEA67944E1CCE6867DF9EB6504C78F817DF9EB6504C78F81BF1E615E6EC6242C9667BD675FC5FA39C6672FE2068E5D1431C6CD04429D07655865E293C1F77ED7A0D33F5556DA6CD3A8EC2774DB04F797CE4A29B0312F75E585D51D7B4DD227EA6BD5278CB9233040E7DD2B30A6D5119959D5B7EAC826D3DA0537EFB5A034A6A1C91A619F4E168F46A455B594C91F058E1E22C7EA2957EED7533D069C335C95B4FA2B53E71A800343EA7F16B05AFBA04635F1FBDE9C81709C27BA075C78FA26311ED3A4A5226EF47FC84C3024999406B47F2098B5983CC3CAF79F92332074B9872E429CBE8EF12D5092628E4D4A39CBDDFCAAB2E382229CF09A5B10243340C1A7A0C5CBC14C704FCE873571524A5B038F1781CD31A4D8E2C48E02E63A2746E668273BE9D63937B88D8C864CE439528EB13BDFAC3E52EE4B8CB75B4ED65A7C97B42E5DAEE3E41D2331B06FFFBA71BECD9B96AEEB969670FC3869CC59050FD6DFA32457195314104022250232266247291151DEFAULT_TENANT', now(), true, 'DEFAULT_TENANT'); +insert into public.license_key2 (id, license_key, set_time, cluster_id, license_used) values (nextval('hibernate_sequence'), 'D041F44269EAFF1AF7C37ACAA86B7D9CBED89547431E777B797220CF62FE5D6A27C66BEBEAB8F4C89EA5379009C90CDEBFFAE307B7AEB897DC4D8CEAB61654340BB746B0B46679A9FB4791C777BAEBA176308F6BEB1654CE43D4E80E6D0F80CEC00B1EC30E7DA4BB8D3159133EF98AEB50617107DB77BE94676E0D4AA04ADA3B11A66824DB89A60C52BC1AB92926F10189DBBA6210B31478F48CF87B5D754F1A7C6BED0D1637742179DBF7BE82B3B3357AEA82CFAAD9126E39C4E19BABCB1CBDDB816C86A8F7C476D963265720383B627800775B0C9116D67CE5CB7CFC71D0A8A36623965EBB18A5BE1816FB1FAAAEAC361D2ABBC7344EC0B6C61E0395115B13FFFFFF03DEF34E840F2ED2AC84AC44DF368362366124308470063002498494067338303241077065122260378200508377102354337080160182150254091118451110391059070094162363290186239455351194330333503046082379128006166220287276298120398066372099177432015458270176242025196335311342039022343475412085392206244005184417460227292375103433217376511140361223163316121467443014486278407389237024349111268136424371062035285300509195050441367478101310353464249250399393211468032382017479033204215420319027225173414447170427346074048078201158299332476339297492269181214328291096331271222221199421106169418137405411466364104047152090465446480302462385088114481261428257207129020358100073347153355274495263056109229159157348228275180360410147142130230179450079472482323145202198010119F9BFDDF3C203A7E537AB046811BB7CEA37AB046811BB7CEA37AB046811BB7CEAE012403885A8163C0E3E14D7AD6207B5E8CE91579501D84B09D6682339A4DB462F479FFE1B232AFB3D19E925768AF0AA3E62D9AB6F9CEADDB1CDCA351CAA90996631814A556C47270431A6A40891F756FDDCA7BDD05C62A2932F8E77979E0D43C9F12565B1F4BB4F0520B44CC76BAC23F65330AC5966D22B209F32126132F4848E500A013F4DC32306A9620394D40C94B8EBC2406B68EBE31DAB17EF2DF977731A5C41C11311DC36E1FB8BC2529D1AA20D5D46919472212D781B1D77378872CBD14C2A5B783C7ADF0D2680946C52E56E186A7E971E7EAB2CF09511361DD892B5D4A113E8A2C60E3F7FEFA4100753D82B7064101002937733CE0285C73130635F0CBBDF6F1160C2917B2DF9B1C391A8E9D7D9F380BF31A77A84017D0DF26B35BED6B2D145A051EB4345DA90241CA997828B8393ACD5C7316594634356CCC3986EFDD7776AC62C65E500ED125097142489479219130046503035CloudMOA', now(), null, true); + +INSERT INTO public.license_policy +(policy_id, policy_desc, term_year, term_month, term_day, license_type, allowable_range, storage_capacity, cluster_count, node_count, pod_count, service_count, core_count, host_ids, user_division, created_date, modified_date) +VALUES('promotion_license', '프로모션 기간에 사용자들에게 발급되는 라이선스', 0, 0, 14, 'trial', '0', 'unlimited', '1', '10', 'unlimited', 'unlimited', 'unlimited', 'unlimited', '1', now(), null); + +INSERT INTO public.report_template(id, created_by, created_date, modified_by, modified_date, cron_exp, "enable", metric_data, template_data, title) VALUES(nextval('hibernate_sequence'), 'admin', '2020-04-28 09:29:49.466', 'admin', '2020-04-28 09:29:49.466', '0 0 1 ? * * *', true, +'[{"id":"metricItem1587977724113","requestInfo":{"clusterId":"cloudmoa","namespace":"All","entityId":"","metricId":"cluster_cpu_usage","type":"Cluster","selectTime":"hour","dateType":"relative","startTime":"1d0h","endTime":"0d0h"},"metricName":"Cluster CPU Usage (%)","displayType":"line","unit":"%","data":""},{"id":"metricItem1588037028605","requestInfo":{"clusterId":"cloudmoa","namespace":"All","entityId":"","metricId":"cluster_memory_usage","type":"Cluster","selectTime":"hour","dateType":"relative","startTime":"1d0h","endTime":"0d0h"},"metricName":"Cluster Memory Usage (%)","displayType":"line","unit":"%","data":""},{"id":"metricItem1588059107546","requestInfo":{"clusterId":"cloudmoa","namespace":"All","entityId":"","metricId":"cluster_network_receive","type":"Cluster","selectTime":"hour","dateType":"relative","startTime":"1d0h","endTime":"0d0h"},"metricName":"Cluster Network Receive","displayType":"line","unit":"%","data":""},{"id":"metricItem1588059110952","requestInfo":{"clusterId":"cloudmoa","namespace":"All","entityId":"","metricId":"cluster_network_transmit","type":"Cluster","selectTime":"hour","dateType":"relative","startTime":"1d0h","endTime":"0d0h"},"metricName":"Cluster Network Transmit","displayType":"line","unit":"%","data":""},{"id":"metricItem1588059623963","requestInfo":{"clusterId":"cloudmoa","namespace":"All","entityId":"","metricId":"cluster_pod_ready_count","type":"Cluster","selectTime":"hour","dateType":"relative","startTime":"1d0h","endTime":"0d0h"},"metricName":"Cluster Pod Ready Count","displayType":"line","unit":"%","data":""}]', +'

1. Cluster Resource

Today''s Cluster resource usage is displayed.

1. CPU Usage

${metricItem1587977724113}

2. Memory Usage

${metricItem1588037028605}

3. Network

Transmit

${metricItem1588059107546}

Receive

${metricItem1588059110952}

2. Pod


1. Allocated Pods Count Trend

Running Pod Count
${metricItem1588059623963}





', 'cloudmoa Cluster Daily Report'); +INSERT INTO public.report_template (id, created_by, created_date, modified_by, modified_date, cron_exp, "enable", metric_data, template_data, title) +VALUES(nextval('hibernate_sequence'), 'admin', '2020-01-20 01:17:50.182', 'admin', '2020-04-29 08:01:40.841', '0 0 9 ? * * *', false, +'[{"id":"metricItem1579497906163","requestInfo":{"clusterId":"cloudmoa","namespace":"","entityId":"exem-master,exem-node001,exem-node002","metricId":"node_cpu_usage","type":"node","selectTime":"hour","dateType":"relative","startTime":"1d0h","endTime":"0d0h"},"metricName":"Node CPU Usage (%)","displayType":"line","unit":"%","data":""},{"id":"metricItem1579497916213","requestInfo":{"clusterId":"cloudmoa","namespace":"","entityId":"exem-master,exem-node001,exem-node002","metricId":"node_memory_usage","type":"node","selectTime":"hour","dateType":"relative","startTime":"1d0h","endTime":"0d0h"},"metricName":"Node Memory Usage (%)","displayType":"bar","unit":"%","data":""},{"id":"metricItem1579497928963","requestInfo":{"clusterId":"cloudmoa","namespace":"","entityId":"exem-master,exem-node001,exem-node002","metricId":"node_network_receive","type":"node","selectTime":"hour","dateType":"relative","startTime":"1d0h","endTime":"0d0h"},"metricName":"Node Network Receive (KiB)","displayType":"pie","unit":"%","data":""},{"id":"metricItem1579497947243","requestInfo":{"clusterId":"cloudmoa","namespace":"","entityId":"exem-master,exem-node001,exem-node002","metricId":"node_load5","type":"node","selectTime":"hour","dateType":"relative","startTime":"1d0h","endTime":"0d0h"},"metricName":"Node CPU Load 5m Average","displayType":"table","unit":"%","data":""}]', +'

1. editor usage

Let''s write the editor.

1.1 Text Decoration

Bold
Itelic
Strike


1.2 Color and blockquote

What''s your color?

Today is the first day of the rest of your life

1.3 List

  • Apple
  • Banana

  1. postgre
  2. cassandra
  3. prometheus

[ TODO List ]
  • Create DB table
  • Charge file name

1.4 Link, Table, Image




Deamonset NameAgeNamespaceLabelsImageCPUMemory
imxc-agent5
day
imxcimxc-agentregistry.openstacklocal:5000/imxc/imxc-agent:latest83.151.68
GiB
kube-flannel-ds-amd643
month
kube-systemflannelnodequay.io/coreos/flannel:v0.11.0-amd641.0790.88
MiB
kube-proxy10
month
kube-systemkube-proxyk8s.gcr.io/kube-proxy:v1.16.01.18117.66
MiB
node-exporter10
month
defaultnode-exporternode-exporterprom/node-exporter4.7697.54
MiB

exem.jpg

1.6 Metric Item

${metricItem1579497906163}
${metricItem1579497916213}
${metricItem1579497928963}
${metricItem1579497947243}



















', 'Editor usage example'); + +INSERT INTO public.report_static(id, created_by, created_date, modified_by, modified_date, cron_exp, metric_data, template_data, title, "type", report_template_id) VALUES(10582051, 'admin', '2020-04-29 08:27:52.545', 'admin', '2020-04-29 08:27:52.545', '0 0 1 ? * * *', +'[{"id":"metricItem1587977724113","requestInfo":{"clusterId":"cloudmoa","namespace":"All","entityId":"","metricId":"cluster_cpu_usage","type":"Cluster","selectTime":"hour","dateType":"relative","startTime":"1d0h","endTime":"0d0h"},"metricName":"Cluster CPU Usage (%)","displayType":"line","unit":"%","data":""},{"id":"metricItem1588037028605","requestInfo":{"clusterId":"cloudmoa","namespace":"All","entityId":"","metricId":"cluster_memory_usage","type":"Cluster","selectTime":"hour","dateType":"relative","startTime":"1d0h","endTime":"0d0h"},"metricName":"Cluster Memory Usage (%)","displayType":"line","unit":"%","data":""},{"id":"metricItem1588059107546","requestInfo":{"clusterId":"cloudmoa","namespace":"All","entityId":"","metricId":"cluster_network_receive","type":"Cluster","selectTime":"hour","dateType":"relative","startTime":"1d0h","endTime":"0d0h"},"metricName":"Cluster Network Receive","displayType":"line","unit":"%","data":""},{"id":"metricItem1588059110952","requestInfo":{"clusterId":"cloudmoa","namespace":"All","entityId":"","metricId":"cluster_network_transmit","type":"Cluster","selectTime":"hour","dateType":"relative","startTime":"1d0h","endTime":"0d0h"},"metricName":"Cluster Network Transmit","displayType":"line","unit":"%","data":""},{"id":"metricItem1588059623963","requestInfo":{"clusterId":"cloudmoa","namespace":"All","entityId":"","metricId":"cluster_pod_ready_count","type":"Cluster","selectTime":"hour","dateType":"relative","startTime":"1d0h","endTime":"0d0h"},"metricName":"Cluster Pod Ready Count","displayType":"line","unit":"%","data":""}]', +'

1. Cluster Resource

Today''s cluster resource usage flow is shown.

1. CPU Usage

Abnormally high CPU usage by particular programs can be an indication that there is something wrong with the computer system.

${metricItem1587977724113}

2. Memory Usage

The Memory Usage window displays the amount of memory available on your system, as well as the memory currently in use by all applications, including Windows itself.

${metricItem1588037028605}

3. Network

A network transmit/receive provides basic network utilization data in relation to the available network capacity.

Transmit

${metricItem1588059107546}

Receive

${metricItem1588059110952}

2. Pod

1. Allocated Pods Count Trend

Running Pod Count
${metricItem1588059623963}







', +'cloudmoa Cluster Daily Report', 'manual', (select id from report_template where title='cloudmoa Cluster Daily Report')); + +-- INSERT INTO public.dashboard2 (id, created_date, modified_date, layout, title, auth_resource_id, created_by, modified_by, description, "share") VALUES(nextval('hibernate_sequence'), '2020-04-28 09:23:14.286', '2020-04-28 09:23:44.213', '[{"i":"widget0","widget":{"header":"default-header","body":"event-view"},"w":48,"h":2,"minW":2,"minH":1,"maxW":48,"maxH":36,"component":{"params":{"targets":["widget1","widget2","widget3","widget4","widget5","widget6","widget7","widget8"],"action":"changeFilter","options":{"clusterId":{"mod":true,"value":"cloudmoa"},"namespace":{"mod":false,"value":null},"entity":{"mod":true,"type":["node"],"value":["exem-master","exem-node001","exem-node002"]}}},"visualization":{"type":"select"}},"x":0,"y":0},{"i":"widget1","widget":{"header":"default-header","body":"line-chart-view","title":"CPU Usage"},"w":18,"h":11,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":"cloudmoa","metricId":"node_disk_read_latency","entityId":[],"type":"node","namespace":"default"}},"visualization":{"showLegend":true}},"x":0,"y":2},{"i":"widget2","widget":{"header":"default-header","body":"horizontal-bar-chart-view","title":"Memory Usage"},"w":18,"h":11,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":false,"clusterId":"cloudmoa","metricId":"node_memory_usage","entityId":[],"type":"node"}},"visualization":{"showLegend":true}},"x":0,"y":13},{"i":"widget3","widget":{"header":"default-header","body":"line-chart-view","title":"Network Transmit (KiB)"},"w":15,"h":11,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":"cloudmoa","metricId":"node_network_transmit","entityId":[],"type":"node","namespace":"default"}},"visualization":{"showLegend":true}},"x":18,"y":2},{"i":"widget4","widget":{"header":"default-header","body":"line-chart-view","title":"Network Receive (KiB)"},"w":15,"h":11,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":"cloudmoa","metricId":"node_network_receive","entityId":[],"type":"node","namespace":"default"}},"visualization":{"showLegend":true}},"x":33,"y":2},{"i":"widget5","widget":{"header":"default-header","body":"stack-bar-chart-view","title":"Pod Running Count"},"w":30,"h":12,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":"cloudmoa","metricId":"node_pod_running_count","entityId":[],"type":"node","namespace":"default"}},"visualization":{"showLegend":true}},"x":18,"y":24},{"i":"widget6","widget":{"header":"default-header","body":"stack-bar-chart-view","title":"Disk Read Latency (ms)"},"w":15,"h":11,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":"cloudmoa","metricId":"node_disk_read_latency","entityId":[],"type":"node","namespace":"default"}},"visualization":{"showLegend":true}},"x":18,"y":13},{"i":"widget7","widget":{"header":"default-header","body":"stack-bar-chart-view","title":"Disk Write Latency (ms)"},"w":15,"h":11,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":"cloudmoa","metricId":"node_disk_write_latency","entityId":[],"type":"node","namespace":"default"}},"visualization":{"showLegend":true}},"x":33,"y":13},{"i":"widget8","widget":{"header":"default-header","body":"stack-bar-chart-view","title":"Filesystem Usage (%)"},"w":18,"h":12,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":"cloudmoa","metricId":"node_filesystem_usage","entityId":[],"type":"node","namespace":"default"}},"visualization":{"showLegend":true}},"x":0,"y":24}]', 'CloudMOA - Nodes Resource', (select id from auth_resource2 where name='CloudMOA - Nodes Resource'), 'admin', 'admin', NULL, true); +-- INSERT INTO public.dashboard2 (id, created_date, modified_date, layout, title, auth_resource_id, created_by, modified_by, description, "share") VALUES(nextval('hibernate_sequence'), '2020-04-28 09:23:14.286', '2020-04-28 09:23:44.213', '[{"i":"widget0","widget":{"header":"default-header","body":"service-tps-view","title":"Service TPS"},"w":24,"h":7,"minW":12,"minH":6,"maxW":48,"maxH":16,"component":{"api":{"uri":"metric.chart","params":{"clusterId":null,"namespace":null,"entityId":null,"type":"service","range":false}}},"x":0,"y":2},{"i":"widget1","widget":{"header":"default-header","body":"event-view"},"w":48,"h":2,"minW":2,"minH":2,"maxW":48,"maxH":36,"component":{"params":{"targets":["widget0","widget2","widget3","widget4","widget5","widget6","widget7","widget8"],"action":"changeFilter","options":{"clusterId":{"mod":true,"value":null},"namespace":{"mod":true,"value":null},"entity":{"mod":true,"type":["service"],"value":[]}}},"visualization":{"type":"select"}},"viewStyle":{"backgroundColor":"#252525"},"x":0,"y":0},{"i":"widget2","widget":{"header":"default-header","body":"service-treeMap-view"},"w":24,"h":21,"minW":20,"minH":10,"maxW":48,"maxH":48,"component":{"api":{"uri":"metric.chart","params":{"clusterId":null,"namespace":null,"entityId":null,"type":"service","range":false}}},"x":24,"y":2},{"i":"widget3","widget":{"header":"default-header","body":"stack-bar-chart-view","title":"Service Request Count"},"w":12,"h":7,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":null,"namespace":null,"metricId":"imxc_service_http_requests_per_sec","entityId":"","type":null}},"visualization":{"showLegend":true}},"x":0,"y":9},{"i":"widget4","widget":{"header":"default-header","body":"stack-bar-chart-view","title":"Service Total Error Count"},"w":12,"h":7,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":null,"namespace":null,"metricId":"imxc_service_errors_count","entityId":"","type":null}},"visualization":{"showLegend":true}},"x":0,"y":16},{"i":"widget5","widget":{"header":"default-header","body":"scatter-chart-view","bodyClass":["drag-ignore"],"title":"Xview","headerClass":["drag-handle"]},"w":24,"h":13,"minW":20,"minH":12,"maxW":68,"maxH":60,"component":{"api":{"params":{}}},"x":0,"y":23},{"i":"widget6","widget":{"header":"default-header","body":"event-list-view","title":"Event List"},"w":24,"h":13,"minW":24,"minH":12,"maxW":48,"maxH":36,"component":{"api":{"params":{"clusterId":null}}},"x":24,"y":23},{"i":"widget7","widget":{"header":"default-header","body":"line-chart-view","title":"Service Latency"},"w":12,"h":7,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":null,"namespace":null,"metricId":"imxc_service_http_requests_time_avg","entityId":"","type":null}},"visualization":{"showLegend":true}},"x":12,"y":9},{"i":"widget8","widget":{"header":"default-header","body":"stack-bar-chart-view","title":"Service Total Transaction Count"},"w":12,"h":7,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":null,"namespace":null,"metricId":"imxc_service_http_requests_per_sec_by_api","entityId":"","type":null}},"visualization":{"showLegend":true}},"x":12,"y":16}]', 'Service Detail', (select id from auth_resource2 where name='Service Detail'), 'admin', 'admin', NULL, true); + +INSERT INTO public.dashboard2 (id, created_date, modified_date, layout, title, auth_resource_id, created_by, modified_by, description, "share") VALUES(nextval('hibernate_sequence'), '2020-04-28 09:23:14.286', '2020-04-28 09:23:44.213', '[{"i":"widget0","widget":{"header":"default-header","body":"event-view"},"w":48,"h":2,"minW":2,"minH":1,"maxW":48,"maxH":36,"component":{"params":{"targets":["widget1","widget2","widget3","widget4","widget5","widget6","widget7","widget8"],"action":"changeFilter","options":{"clusterId":{"mod":true,"value":"cloudmoa"},"namespace":{"mod":false,"value":null},"entity":{"mod":true,"type":["node"],"value":["exem-master","exem-node001","exem-node002"]}}},"visualization":{"type":"select"}},"x":0,"y":0},{"i":"widget1","widget":{"header":"default-header","body":"line-chart-view","title":"CPU Usage"},"w":18,"h":11,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":"cloudmoa","metricId":"node_disk_read_latency","entityId":[],"type":"node","namespace":"default"}},"visualization":{"showLegend":true}},"x":0,"y":2},{"i":"widget2","widget":{"header":"default-header","body":"horizontal-bar-chart-view","title":"Memory Usage"},"w":18,"h":11,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":false,"clusterId":"cloudmoa","metricId":"node_memory_usage","entityId":[],"type":"node"}},"visualization":{"showLegend":true}},"x":0,"y":13},{"i":"widget3","widget":{"header":"default-header","body":"line-chart-view","title":"Network Transmit (KiB)"},"w":15,"h":11,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":"cloudmoa","metricId":"node_network_transmit","entityId":[],"type":"node","namespace":"default"}},"visualization":{"showLegend":true}},"x":18,"y":2},{"i":"widget4","widget":{"header":"default-header","body":"line-chart-view","title":"Network Receive (KiB)"},"w":15,"h":11,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":"cloudmoa","metricId":"node_network_receive","entityId":[],"type":"node","namespace":"default"}},"visualization":{"showLegend":true}},"x":33,"y":2},{"i":"widget5","widget":{"header":"default-header","body":"stack-bar-chart-view","title":"Pod Running Count"},"w":30,"h":12,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":"cloudmoa","metricId":"node_pod_running_count","entityId":[],"type":"node","namespace":"default"}},"visualization":{"showLegend":true}},"x":18,"y":24},{"i":"widget6","widget":{"header":"default-header","body":"stack-bar-chart-view","title":"Disk Read Latency (ms)"},"w":15,"h":11,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":"cloudmoa","metricId":"node_disk_read_latency","entityId":[],"type":"node","namespace":"default"}},"visualization":{"showLegend":true}},"x":18,"y":13},{"i":"widget7","widget":{"header":"default-header","body":"stack-bar-chart-view","title":"Disk Write Latency (ms)"},"w":15,"h":11,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":"cloudmoa","metricId":"node_disk_write_latency","entityId":[],"type":"node","namespace":"default"}},"visualization":{"showLegend":true}},"x":33,"y":13},{"i":"widget8","widget":{"header":"default-header","body":"stack-bar-chart-view","title":"Filesystem Usage (%)"},"w":18,"h":12,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":"cloudmoa","metricId":"node_filesystem_usage","entityId":[],"type":"node","namespace":"default"}},"visualization":{"showLegend":true}},"x":0,"y":24}]', 'CloudMOA - Nodes Resource', +(select id from auth_resource3 where name='dashboard|admin|CloudMOA - Nodes Resource'), 'admin', 'admin', NULL, true); +INSERT INTO public.dashboard2 (id, created_date, modified_date, layout, title, auth_resource_id, created_by, modified_by, description, "share") VALUES(nextval('hibernate_sequence'), '2020-04-28 09:23:14.286', '2020-04-28 09:23:44.213', '[{"i":"widget0","widget":{"header":"default-header","body":"service-tps-view","title":"Service TPS"},"w":24,"h":7,"minW":12,"minH":6,"maxW":48,"maxH":16,"component":{"api":{"uri":"metric.chart","params":{"clusterId":null,"namespace":null,"entityId":null,"type":"service","range":false}}},"x":0,"y":2},{"i":"widget1","widget":{"header":"default-header","body":"event-view"},"w":48,"h":2,"minW":2,"minH":2,"maxW":48,"maxH":36,"component":{"params":{"targets":["widget0","widget2","widget3","widget4","widget5","widget6","widget7","widget8"],"action":"changeFilter","options":{"clusterId":{"mod":true,"value":null},"namespace":{"mod":true,"value":null},"entity":{"mod":true,"type":["service"],"value":[]}}},"visualization":{"type":"select"}},"viewStyle":{"backgroundColor":"#252525"},"x":0,"y":0},{"i":"widget2","widget":{"header":"default-header","body":"service-treeMap-view"},"w":24,"h":21,"minW":20,"minH":10,"maxW":48,"maxH":48,"component":{"api":{"uri":"metric.chart","params":{"clusterId":null,"namespace":null,"entityId":null,"type":"service","range":false}}},"x":24,"y":2},{"i":"widget3","widget":{"header":"default-header","body":"stack-bar-chart-view","title":"Service Request Count"},"w":12,"h":7,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":null,"namespace":null,"metricId":"imxc_service_http_requests_per_sec","entityId":"","type":null}},"visualization":{"showLegend":true}},"x":0,"y":9},{"i":"widget4","widget":{"header":"default-header","body":"stack-bar-chart-view","title":"Service Total Error Count"},"w":12,"h":7,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":null,"namespace":null,"metricId":"imxc_service_errors_count","entityId":"","type":null}},"visualization":{"showLegend":true}},"x":0,"y":16},{"i":"widget5","widget":{"header":"default-header","body":"scatter-chart-view","bodyClass":["drag-ignore"],"title":"Xview","headerClass":["drag-handle"]},"w":24,"h":13,"minW":20,"minH":12,"maxW":68,"maxH":60,"component":{"api":{"params":{}}},"x":0,"y":23},{"i":"widget6","widget":{"header":"default-header","body":"event-list-view","title":"Event List"},"w":24,"h":13,"minW":24,"minH":12,"maxW":48,"maxH":36,"component":{"api":{"params":{"clusterId":null}}},"x":24,"y":23},{"i":"widget7","widget":{"header":"default-header","body":"line-chart-view","title":"Service Latency"},"w":12,"h":7,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":null,"namespace":null,"metricId":"imxc_service_http_requests_time_avg","entityId":"","type":null}},"visualization":{"showLegend":true}},"x":12,"y":9},{"i":"widget8","widget":{"header":"default-header","body":"stack-bar-chart-view","title":"Service Total Transaction Count"},"w":12,"h":7,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":null,"namespace":null,"metricId":"imxc_service_http_requests_per_sec_by_api","entityId":"","type":null}},"visualization":{"showLegend":true}},"x":12,"y":16}]', 'Service Detail', +(select id from auth_resource3 where name='dashboard|admin|Service Detail'), 'admin', 'admin', NULL, true); + +INSERT INTO public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) VALUES ('normal_score', '20', null, null, 'anomaly', '2020-07-07 18:15:55.000000', '2020-07-07 18:15:53.000000'); +INSERT INTO public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) VALUES ('attention_score', '60', null, null, 'anomaly', '2020-07-07 09:18:04.968765', '2020-07-07 09:18:04.968765'); +INSERT INTO public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) VALUES ('warning_score', '90', null, null, 'anomaly', '2020-07-07 09:18:17.091678', '2020-07-07 09:18:17.091678'); +INSERT INTO public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) VALUES ('collection_weeks', '5', null, null, 'anomaly', '2020-07-13 03:52:44.445408', '2020-07-13 03:52:44.445408'); + +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('topology_storage_period', 7, 'retention period setting value for topology information', null, 'storage', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('trace_storage_period', 3, 'retention period setting value for trace data', null, 'storage', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('event_storage_period', 7, 'retention period setting value for event data', null, 'storage', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('metric_storage_period', 7, 'retention period setting value for metric data', null, 'storage', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('sparse_storage_period', 90, 'retention period setting value for sparse log', null, 'storage', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('anomaly_storage_period', 7, 'retention period setting value for anomaly score', null, 'storage', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('alert_storage_period', 7, 'retention period setting value for alert data', null, 'storage', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('audit_storage_period', 7, 'retention period setting value for audit data', null, 'storage', now(), null); + +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('topology_idx', 'kubernetes_cluster_info:kubernetes_cluster_history:kubernetes_cronjob_info:kubernetes_info:kubernetes_job_info:kubernetes_network_connectivity:kubernetes_pod_info:kubernetes_pod_history', 'elastic search topology type data index', null, 'storageidx', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('trace_idx', 'spaninfo:sta_httpapi:sta_httpsummary:sta_podinfo:sta_relation:sta_tracetrend:sta_externalrelation:sta_traceinfo:jspd_ilm', 'elastic search trace type data index', null, 'storageidx', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('event_idx', 'kubernetes_event_info', 'elastic search for event data index', null, 'storageidx', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('sparse_idx', 'sparse_model:sparse_log', 'elastic search sparse data index', null, 'storageidx', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('anomaly_idx', 'entity_score:metric_score:timeline_score', 'elastic search amomaly data index', null, 'storageidx', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('alert_idx', 'alert_event_history', 'elastic search alert data index', null, 'storageidx', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('audit_idx', 'kubernetes_audit_log', 'elastic search audit type data index', null, 'storageidx', now(), null); + +-- insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) values ('ratelimiting', 2.0, '{"type" : "int", "operator" : "range", "minVal" : "1", "maxVal" : "3000", "desc" : "The time-based sampling method allows input as an integer (e.g. 1 monitors only 1 trace per second)" }', null, 'tracesampling', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('probabilistic', 0.1, '{"type" : "float", "operator" : "range", "minVal" : "0", "maxVal" : "1.0", "desc" : "Probability-based sampling method allows input between 0 and 1 (e.g. 0.1 monitors only 10% of trace information)" }', null, 'tracesampling', '2020-07-30 13:54:52', null); + +INSERT INTO common_setting values('alert_expression','==,<=,<,>=,>', 'alert expression for user custom', null,'alert', now(), now()); + +INSERT INTO common_setting values('job_duration_range','86400', 'job duration range for average', null,'job', now(), now()); + +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Topology Agent', 'topology-agent', 'topology agent deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Metric Agent', 'metric-agent', 'metric agent deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Trace Agent', 'cloudmoa-trace-agent', 'trace agent deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Datagate', 'datagate', 'datagate deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Jspd Collector', 'jspd-lite-collector', 'jspd collector deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Metric Collector', 'metric-collector', 'metric collector deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Cloudmoa Collector', 'cmoa-collector', 'cloudmoa collector deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Authentication Server', 'auth-server', 'authentication server deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Notification Server', 'noti-server', 'notification server deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Eureka Server', 'eureka', 'eureka server deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Zuul Server', 'zuul-deployment', 'zuul server deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Api Server', 'imxc-api', 'api server deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Ui Server', 'imxc-ui', 'ui server deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Metric Analyzer Master', 'metric-analyzer-master', 'metric analyzer master deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Metric Analyzer Worker', 'metric-analyzer-worker', 'metric analyzer worker deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Kafka Stream Txntrend', 'kafka-stream-txntrend-deployment', 'kafka stream txntrend deployment name', null, 'modules', now(), null); + +INSERT INTO public.common_setting +(code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +VALUES('error_msg', 'false', 'Error Message default value', '', 'user_setting', now(), null); +INSERT INTO public.common_setting +(code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +VALUES('alert_sound', 'false', 'Alert Sound default value', '', 'user_setting', now(), null); +INSERT INTO public.common_setting +(code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +VALUES('session_persistence', 'true', 'Session Persistence default value', '', 'user_setting', now(), null); +INSERT INTO public.common_setting +(code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +VALUES('gpu_acc_topology', 'true', 'GPU Accelerated Topology default value', '', 'user_setting', now(), null); + +insert into public.log_management (cluster_id, node_id, log_rotate_dir, log_rotate_count, log_rotate_size, log_rotate_management, back_up_dir, back_up_period, back_up_dir_size, back_up_management, created_date, modified_date) values ('cloudmoa', '', '/var/lib/docker', 3, 100, true, '/home/moa/log', 5, 1000, true, '2020-07-30 13:54:52', null); + +insert into public.agent_install_file_info (id, name, type, description, version, yaml, use_yn, created_date, modified_date) values (5, 'metrics-server', 'agent', 'Metrcis-Server는 Kubernetes의 kubelet에 있는 cAdvisor로부터 Container Metric 데이터를 수집하여 Prometheus에 전달하는 역할을 합니다.', null, '--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: system:cloudmoa-aggregated-metrics-reader + labels: + rbac.authorization.k8s.io/aggregate-to-view: "true" + rbac.authorization.k8s.io/aggregate-to-edit: "true" + rbac.authorization.k8s.io/aggregate-to-admin: "true" +rules: + - apiGroups: ["metrics.k8s.io"] + resources: ["pods"] + verbs: ["get", "list", "watch"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cloudmoa-metrics-server:system:auth-delegator +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:auth-delegator +subjects: + - kind: ServiceAccount + name: cloudmoa-metrics-server + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: cloudmoa-metrics-server-auth-reader + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: extension-apiserver-authentication-reader +subjects: + - kind: ServiceAccount + name: cloudmoa-metrics-server + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: system:cloudmoa-metrics-server +rules: + - apiGroups: + - "" + resources: + - pods + - nodes + - nodes/stats + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: system:cloudmoa-metrics-server +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:cloudmoa-metrics-server +subjects: + - kind: ServiceAccount + name: cloudmoa-metrics-server + namespace: kube-system +--- +apiVersion: v1 +kind: Service +metadata: + name: cloudmoa-metrics-server + namespace: kube-system + labels: + kubernetes.io/name: "Metrics-server" +spec: + selector: + k8s-app: cloudmoa-metrics-server + ports: + - port: 443 + protocol: TCP + targetPort: 443 +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cloudmoa-metrics-server + namespace: kube-system +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cloudmoa-metrics-server + namespace: kube-system + labels: + k8s-app: cloudmoa-metrics-server +spec: + selector: + matchLabels: + k8s-app: cloudmoa-metrics-server + template: + metadata: + name: cloudmoa-metrics-server + labels: + k8s-app: cloudmoa-metrics-server + spec: + serviceAccountName: cloudmoa-metrics-server + volumes: + # mount in tmp so we can safely use from-scratch images and/or read-only containers + - name: tmp-dir + emptyDir: {} + containers: + - name: cloudmoa-metrics-server + image: $DOCKER_REGISTRY_URL/metrics-server-amd64 + command: + - /metrics-server + - --logtostderr + - --v=4 + - --kubelet-insecure-tls=true + - --kubelet-preferred-address-types=InternalIP,Hostname,InternalDNS,ExternalDNS,ExternalIP + volumeMounts: + - name: tmp-dir + mountPath: /tmp1', true, '2021-03-11 13:41:48.000000', '2021-03-11 13:41:56.000000'); +insert into public.agent_install_file_info (id, name, type, description, version, yaml, use_yn, created_date, modified_date) values (7, 'jaeger', 'application', 'CloudMOA에서는 고객사에서 운영 중인 application의 TPS, 서비스 연관관계 등의 데이터를 얻기 위해서 Jaeger를 사용하며, Jaeger 사용을 위해 Jaeger-client, jaeger-agent, jaeger-collector의 설치가 필요합니다. +', null, '--- +apiVersion: v1 +kind: List +items: +- apiVersion: apps/v1 + kind: Deployment + metadata: + name: cloudmoa-trace-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-trace-agent + spec: + selector: + matchLabels: + app: cloudmoa-trace-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-trace-agent + spec: + securityContext: + runAsNonRoot: true + runAsUser: 65534 + containers: + - image: $DOCKER_REGISTRY_URL/trace-agent:$IMAGE_TAG + name: cloudmoa-trace-agent + resources: + requests: + cpu: 100m + memory: 50Mi + limits: + cpu: 200m + memory: 100Mi + ports: + - containerPort: 5775 + protocol: UDP + - containerPort: 6831 + protocol: UDP + - containerPort: 6832 + protocol: UDP + - containerPort: 5778 + protocol: TCP + env: + - name: LOG_LEVEL + value: "INFO" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT +- apiVersion: v1 + kind: Service + metadata: + name: cloudmoa-trace-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-trace-agent + spec: + ports: + - name: agent-zipkin-thrift + port: 5775 + protocol: UDP + targetPort: 5775 + - name: agent-compact + port: 6831 + protocol: UDP + targetPort: 6831 + - name: agent-binary + port: 6832 + protocol: UDP + targetPort: 6832 + - name: agent-configs + port: 5778 + protocol: TCP + targetPort: 5778 + selector: + app: cloudmoa-trace-agent + type: ClusterIP', true, '2021-03-11 17:48:34.000000', '2021-03-11 17:48:39.000000'); +insert into public.agent_install_file_info (id, name, type, description, version, yaml, use_yn, created_date, modified_date) values (4, 'node-exporter', 'agent', 'Node에 관련된 Metric 시계열 데이터를 수집하여 고객사 클러스터에 설치된 Prometheus에 전달하는 역할을 합니다.', null, '--- +apiVersion: v1 +kind: Service +metadata: + annotations: + prometheus.io/scrape: ''true'' + labels: + app: cloudmoa-node-exporter + name: cloudmoa-node-exporter + name: cloudmoa-node-exporter + namespace: $CLOUDMOA_NAMESPACE +spec: + clusterIP: None + ports: + - name: scrape + port: 9110 + protocol: TCP + selector: + app: cloudmoa-node-exporter + type: ClusterIP +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: cloudmoa-node-exporter + namespace: $CLOUDMOA_NAMESPACE +spec: + selector: + matchLabels: + app: cloudmoa-node-exporter + template: + metadata: + labels: + app: cloudmoa-node-exporter + name: cloudmoa-node-exporter + spec: + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - image: $DOCKER_REGISTRY_URL/node-exporter + name: cloudmoa-node-exporter + ports: + - containerPort: 9110 + hostPort: 9110 + name: scrape + args: + - --path.procfs=/host/proc + - --path.sysfs=/host/sys + - --path.rootfs=/host/root + - --collector.filesystem.ignored-mount-points=^/(dev|proc|sys|run|var/lib/docker/.+|var/lib/kubelet/pods/.+)($|/) + - --collector.tcpstat + - --web.listen-address=:9110 + # --log.level=debug + env: + - name: GOMAXPROCS + value: "1" + resources: + limits: + cpu: 250m + memory: 180Mi + requests: + cpu: 102m + memory: 180Mi + volumeMounts: + - mountPath: /host/proc + name: proc + readOnly: false + - mountPath: /host/sys + name: sys + readOnly: false + - mountPath: /host/root + mountPropagation: HostToContainer + name: root + readOnly: true + hostNetwork: true + hostPID: true + securityContext: + runAsNonRoot: true + runAsUser: 65534 + volumes: + - hostPath: + path: /proc + name: proc + - hostPath: + path: /sys + name: sys + - hostPath: + path: / + name: root +', true, '2021-03-11 13:41:02.000000', '2021-03-11 13:41:06.000000'); +insert into public.agent_install_file_info (id, name, type, description, version, yaml, use_yn, created_date, modified_date) values (2, 'agent', 'agent', '관제 대상 클러스터의 Topology 데이터를 수집하여 Kafka를 통해 수집 클러스터에 전달하는 역할을 하며, 그 밖에 API 서버와의 TCP 연결을 통해 관리 기능, Log Viewer 기능 등을 수행합니다.', null, '--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cloudmoa-cluster-role +rules: + - nonResourceURLs: + - "*" + verbs: + - get + - apiGroups: + - metrics.k8s.io + resources: + - pods + - nodes + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - nodes/stats + - endpoints + - namespaces + - events + verbs: + - get + - list + - watch + - apiGroups: + - apps + resources: + - daemonsets + - deployments + - deployments/scale + - replicasets + - replicasets/scale + - statefulsets + - statefulsets/scale + verbs: + - get + - list + - watch + - update + - apiGroups: + - batch + resources: + - jobs + verbs: + - get + - list + - watch + - update + - apiGroups: + - batch + resources: + - cronjobs + verbs: + - get + - list + - update + - apiGroups: + - storage.j8s.io + resources: + - storageclasses + verbs: + - get + - list + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - apiGroups: + - extensions + resources: + - ingresses + verbs: + - get + - list + - apiGroups: + - policy + resources: + - podsecuritypolicies + verbs: + - use + resourceNames: + - imxc-ps + - apiGroups: + - certificates.k8s.io + resourceNames: + - kubernetes.io/kube-apiserver-client-kubelet + resources: + - signers + verbs: + - approve + - apiGroups: + - certificates.k8s.io + resourceNames: + - kubernetes.io/kubelet-serving + resources: + - signers + verbs: + - approve + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch + - proxy + - apiGroups: + - "" + resources: + - nodes/log + - nodes/metrics + - nodes/proxy + - nodes/spec + - nodes/stats + verbs: + - ''*'' + - apiGroups: + - ''*'' + resources: + - ''*'' + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cloudmoa-restricted-rb + namespace: $CLOUDMOA_NAMESPACE +subjects: + - kind: ServiceAccount + name: default + namespace: $CLOUDMOA_NAMESPACE +roleRef: + kind: ClusterRole + name: cloudmoa-cluster-role + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: cloudmoa-psp + namespace: $CLOUDMOA_NAMESPACE +spec: + privileged: true + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + runAsUser: + rule: RunAsAny + fsGroup: + rule: RunAsAny + hostPorts: + - max: 65535 + min: 0 + hostNetwork: true + hostPID: true + volumes: + - configMap + - secret + - emptyDir + - hostPath + - projected + - downwardAPI + - persistentVolumeClaim +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: cloudmoa-topology-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-topology-agent +spec: + selector: + matchLabels: + app: cloudmoa-topology-agent + template: + metadata: + labels: + app: cloudmoa-topology-agent + spec: + hostNetwork: true + hostPID: true + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - name: cloudmoa-topology-agent + image: $DOCKER_REGISTRY_URL/topology-agent:$IMAGE_TAG + resources: + requests: + cpu: 200m + memory: 512Mi + limits: + cpu: 500m + memory: 600Mi + securityContext: + privileged: true + volumeMounts: + - mountPath: /host/usr/bin + name: bin-volume + - mountPath: /var/run/docker.sock + name: docker-volume + - mountPath: /host/proc + name: proc-volume + - mountPath: /root + name: root-volume + - mountPath: /log + name: log-volume + env: + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: ROOT_DIRECTORY + value: /root + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: POD_ID + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LOG_LEVEL + value: "INFO" + volumes: + - name: bin-volume + hostPath: + path: /usr/bin + type: Directory + - name: docker-volume + hostPath: + path: /var/run/docker.sock + - name: proc-volume + hostPath: + path: /proc + - name: root-volume + hostPath: + path: / + - name: log-volume + hostPath: + path: /home', true, '2021-03-11 13:37:48.000000', '2021-03-11 13:37:51.000000'); +insert into public.agent_install_file_info (id, name, type, description, version, yaml, use_yn, created_date, modified_date) values (6, 'prometheus', 'agent', 'Prometheus는 다양한 Exporter들과 연결될 수 있으며, 기본적으로 Node Exporter와 cAdvisor를 통해 수집한 Metric 데이터를 Kafka를 통해 수집 클러스터에 전달하는 역할을 합니다.', '1.16', '--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cloudmoa-metric-agent-config + namespace: $CLOUDMOA_NAMESPACE +data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + metric-agent.yml: | + global: + scrape_interval: 15s + + scrape_configs: + - job_name: ''kubernetes-kubelet'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_memory_SReclaimable_bytes|node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + - job_name: ''kubernetes-cadvisor'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod] + target_label: xm_pod_id + - source_labels: [container] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep + - source_labels: [ __name__, image ] + separator: "@" + regex: "container_cpu.*@" + action: drop + - source_labels: [ __name__, name ] + separator: "@" + regex: "container_memory.*@" + action: drop +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cloudmoa-metric-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-metric-agent +spec: + selector: + matchLabels: + app: cloudmoa-metric-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-metric-agent + spec: + containers: + - name: cloudmoa-metric-agent + image: $DOCKER_REGISTRY_URL/metric-agent:$IMAGE_TAG + args: + - --config.file=/etc/metric-agent/metric-agent.yml + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: /etc/metric-agent/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: LOG_MAXAGE + value: "1" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: STORAGE_TYPE + value: datagate + restartPolicy: Always + volumes: + - name: config-volume + configMap: + name: cloudmoa-metric-agent-config +', false, '2021-03-11 13:39:07.000000', '2021-03-11 13:39:09.000000'); +insert into public.agent_install_file_info (id, name, type, description, version, yaml, use_yn, created_date, modified_date) values (3, 'prometheus', 'agent', 'Prometheus는 다양한 Exporter들과 연결될 수 있으며, 기본적으로 Node Exporter와 cAdvisor를 통해 수집한 Metric 데이터를 Kafka를 통해 수집 클러스터에 전달하는 역할을 합니다.', '1.15', '--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cloudmoa-metric-agent-config + namespace: $CLOUDMOA_NAMESPACE +data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + metric-agent.yml: | + global: + scrape_interval: 15s + + scrape_configs: + - job_name: ''kubernetes-kubelet'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_memory_SReclaimable_bytes|node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + + - job_name: ''kubernetes-cadvisor'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod_name] + target_label: xm_pod_id + - source_labels: [container_name] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cloudmoa-metric-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-metric-agent +spec: + selector: + matchLabels: + app: cloudmoa-metric-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-metric-agent + spec: + containers: + - name: cloudmoa-metric-agent + image: $DOCKER_REGISTRY_URL/metric-agent:$IMAGE_TAG + args: + - --config.file=/etc/metric-agent/metric-agent.yml + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: /etc/metric-agent/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: STORAGE_TYPE + value: datagate + restartPolicy: Always + volumes: + - name: config-volume + configMap: + name: cloudmoa-metric-agent-config +', true, '2021-03-11 13:39:07.000000', '2021-03-11 13:39:09.000000'); + +insert into public.alert_config_info (config_id, created_date, modified_date, config_data, config_default, in_use) values ('config', now(), null, 'global:${GLOBAL}\nroute:${ROUTE}\nreceivers:${RECEIVERS}', 'global:${GLOBAL}\nroute:${ROUTE}\nreceivers:${RECEIVERS}', true); +insert into public.alert_config_info (config_id, created_date, modified_date, config_data, config_default, in_use) values ('global', now(), null, '\n resolve_timeout: ${RESOLVE_TIMEOUT}', '\n resolve_timeout: 5m', true); +insert into public.alert_config_info (config_id, created_date, modified_date, config_data, config_default, in_use) values ('receivers', now(), null, '\n- name: ''${NAME}''\n webhook_configs:${WEBHOOK_CONFIGS}', '\n- name: ''cdms''\n webhook_configs:${WEBHOOK_CONFIGS}', true); +insert into public.alert_config_info (config_id, created_date, modified_date, config_data, config_default, in_use) values ('route', now(), null, '\n receiver: ''${RECEIVER}''\n group_by: [${GROUP_BY}]\n group_wait: ${GROUP_WAIT}\n group_interval: ${GROUP_INTERVAL}\n repeat_interval: ${REPEAT_INTERVAL}\n routes:${ROUTES}', '\n receiver: ''cdms''\n group_by: [xm_clst_id, level]\n group_wait: 30s\n group_interval: 5m\n repeat_interval: 10m\n routes:${ROUTES}', true); +insert into public.alert_config_info (config_id, created_date, modified_date, config_data, config_default, in_use) values ('webhook_configs', now(), null, '\n - url: ''${WEBHOOK_URL}''\n send_resolved: ${SEND_RESOLVED}', '\n - url: ''${WEBHOOK_URL}''\n send_resolved: false', true); +insert into public.alert_config_info (config_id, created_date, modified_date, config_data, config_default, in_use) values ('routes', now(), null, '\n - receiver: ''${ROUTES_RECEIVER}''\n group_by: [${ROUTES_GROUP_BY}]\n group_wait: ${ROUTES_GROUP_WAIT}\n group_interval: ${ROUTES_GROUP_INTERVAL}\n repeat_interval: ${ROUTES_REPEAT_INTERVAL}\n match_re:\n level: ${LEVEL}\n continue: ${CONTINUE}', '\n - receiver: ''cdms''\n group_by: [xm_clst_id, level]\n group_wait: 5s\n group_interval: 5s\n repeat_interval: 1m\n match_re:\n level: Critical\n continue: true', true); + + +insert into public.alert_rule_config_info (config_id, created_date, modified_date, config_data, in_use) values ('config', now(), null, 'groups:${GROUPS}', true); +insert into public.alert_rule_config_info (config_id, created_date, modified_date, config_data, in_use) values ('groups', now(), null, '\n- name: "${NAME}"\n rules:${RULES}', true); +insert into public.alert_rule_config_info (config_id, created_date, modified_date, config_data, in_use) values ('isHost', now(), null, '\n instance: "{{ $labels.instance }}"\n is_host: "true"', true); +insert into public.alert_rule_config_info (config_id, created_date, modified_date, config_data, in_use) values ('rules', now(), null, '\n - alert: "${ALERT}"\n expr: "${EXPR}"\n labels:\n level: "${LEVEL}"\n for: "${FOR}"\n annotations:\n xm_service_name: "{{ $labels.xm_service_name }}"\n level: "${LEVEL}"\n meta_id: "${META_ID}"\n xm_node_id: "{{ $labels.xm_node_id }}"\n threshold: ${THRESHOLD}\n xm_container_id: "{{ $labels.xm_cont_name }}"\n message: "${MESSAGE}"\n rule_id: ${RULE_ID}\n xm_pod_id: "{{ $labels.xm_pod_id }}"\n xm_clst_id: "{{ $labels.xm_clst_id }}"\n xm_namespace: "{{ $labels.xm_namespace }}"\n value: "{{ $value }}"\n xm_entity_type: "{{ $labels.xm_entity_type }}"\n alert_entity_type: "${ALERT_ENTITY_TYPE}"', true); + + +INSERT INTO jspd_prop values('TRX_NAME_TYPE','0', 'Set the transaction name generation method (0:default, 1:parameter, 2:param_nouri, 3:attribute)', 'integer','select','{"default":"0", "parameter":"1", "param_nouri":"2", "attribute":"3"}',true, now(), now()); +INSERT INTO jspd_prop values('TRX_NAME_KEY','', 'Set the transaction name generation method by TRX_NAME_TYPE (parameter(1), param_nouri(2),attribute(3))','string','input','',true, now(), now()); +INSERT INTO jspd_prop values('CURR_TRACE_TXN','*:3000', 'Option to check TXNNAME with startsWith logic and collect calltree based on elapsetime. blank or set to *:0 when collecting all.', 'string','input','', true, now(), now()); +INSERT INTO jspd_prop values('CURR_TRACE_LEVEL','100', 'call tree detection level', 'integer','range','{"gte":"0", "lte":"100"}',true, now(), now()); +INSERT INTO jspd_prop values('TRACE_JDBC','true', 'include call tree data', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('EXCLUDE_SERVICE','gif,js,css,xml', 'exclude service name', 'string','input','',true, now(), now()); +INSERT INTO jspd_prop values('INCLUDE_EXCEPTION','', 'Exception that you do not want to be treated as an exception transaction is set.(type.Exception)', 'string','input','',true, now(), now()); +INSERT INTO jspd_prop values('EXCLUDE_EXCEPTION','', 'Set the exception to be treated as an exception transaction.(type.Exception)', 'string','input','',true, now(), now()); +INSERT INTO jspd_prop values('RESP_HEADER_TID','false', 'include X-Xm-Tid text for gearing imxwsmj', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('USE_RUNTIME_REDEFINE','false', 'rt.jar (socket, file, throwable) function use yn option', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('USE_RUNTIME_REDEFINE_HTTP_REMOTE','false', 'rt.jar (socket, file, throwable) function use yn option', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('RT_RMI','false', 'rt.jar (socket, file, throwable) function use yn option', 'boolean','input','',true, now(), now()); + +INSERT INTO jspd_prop values('RT_RMI_TYPE','3', 'remote key value(1: pkey, 2: ckey, 3: pckey)', 'integer','select','{"pkey":"1", "ckey":"2", "pckey":"3"}',true, now(), now()); +INSERT INTO jspd_prop values('RT_RMI_ELAPSE_TIME','0', 'Collect transactions that are greater than or equal to the option value', 'integer','input','',true, now(), now()); +INSERT INTO jspd_prop values('RT_FILE','0x10', 'Display file input/output in call tree', 'string','input','',true, now(), now()); +INSERT INTO jspd_prop values('RT_SOCKET','0x10', 'Display socket input/output in call tree', 'string','input','',true, now(), now()); + +INSERT INTO jspd_prop values('MTD_LIMIT','100000', 'Limit the number of calltree', 'integer','range','{"gte":"0"}',true, now(), now()); + +INSERT INTO jspd_prop values('LIMIT_SQL','20', 'Collection limits based on SQL sentence length', 'integer','input','',true, now(), now()); +INSERT INTO jspd_prop values('TXN_COUNT_LIMIT','3000', 'Transactions per second', 'integer','input','',true, now(), now()); +INSERT INTO jspd_prop values('USE_SQL_ELLIPSIS','false', 'Collect length of sql string by half of SQL_TEXT_BUFFER_SIZE', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('TXN_SQL_LIMIT_COUNT','2000', 'SQL collection limit', 'integer','input','',true, now(), now()); +INSERT INTO jspd_prop values('TXN_CPU_TIME','false', 'cpu time metric used in transactions option', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('TXN_MEMORY','false', 'memory alloc size metric used in transactions option', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('ENABLE_WEB_ID_WHEN_NO_USERAGENT','false', 'Do not create an web ID unless requested by the browser', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('USE_SQL_SEQ','false', 'Add sequence number to sql and packet', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('TRACE_FETCH_METHOD','false', 'Display the fetch function of ResultSet in the call tree', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('EXCLUDE_THREAD','', 'Ability to block monitoring of a specific thread name, value = String[] (prefix1,prefix2)', 'string','input','',true, now(), now()); +INSERT INTO jspd_prop values('USE_METHOD_SEQ','false', 'Display the calltree in the form of a time series without summary', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('TRACE_METHOD_MEMORY','false', 'Collects allocation memory for each method of calltree. (unit k)', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('TRACE_METHOD_CPUTIME','false', 'Collects cputime for each method of calltree. (unit ms)', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('DISABLE_ROOT_METHOD','false', 'Express the service root method at the top of the call tree', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('MTD_BUFFER_SIZE','2500', 'size of the internal buffer that stores the call tree method data.', 'integer','input','',true, now(), now()); +INSERT INTO jspd_prop values('MTD_STACK_BUFFER_SIZE','100', 'A separate option to additionally collect methods that did not generate an error among methods that were not collected because the MTD_BUFFER_SIZE option value was exceeded.', 'integer','input','',true, now(), now()); +INSERT INTO jspd_prop values('MTD_EXCEPTION_BUFFER_SIZE','100', 'A separate option to additionally collect methods that have an error among methods that could not be collected because the MTD_BUFFER_SIZE option value was exceeded.', 'integer','input','',true, now(), now()); +INSERT INTO jspd_prop values('DEBUG','0x000000000', 'Option to specify log level (Debugging)', 'string','input','',true, now(), now()); + +INSERT INTO jspd_prop values('EXCEPTION_LIMIT', '-1', 'Exception content length limit', 'integer', 'input', '', true, now(), now()); +INSERT INTO jspd_prop values('TXN_SEND_PERIOD', '1000', 'Txninfo transmission cycle (ms)', 'integer', 'input', '', true, now(), now()); +INSERT INTO jspd_prop values('MTD_SEND_PERIOD', '1000', 'Txnmethod transmission cycle (ms)', 'integer', 'input', '', true, now(), now()); +INSERT INTO jspd_prop values('SQL_SEND_PERIOD', '1000', 'Txnspl transmission cycle (ms)', 'integer', 'input', '', true, now(), now()); +INSERT INTO jspd_prop values('ETOE_SEND_PERIOD', '1000', 'E2einfo transmission cycle (ms)', 'integer', 'input', '', true, now(), now()); +INSERT INTO jspd_prop values('TXN_SEND_LIMIT', '15000', 'Txninfo maximum number of transfers', 'integer', 'input', '', true, now(), now()); +INSERT INTO jspd_prop values('MTD_SEND_LIMIT', '15000', 'Txnmethod maximum number of transfers', 'integer', 'input', '', true, now(), now()); +INSERT INTO jspd_prop values('SQL_SEND_LIMIT', '15000', 'Txnsql maximum number of transfers', 'integer', 'input', '', true, now(), now()); +INSERT INTO jspd_prop values('ETOE_SEND_LIMIT', '15000', 'E2einfo maximum number of transfers', 'integer', 'input', '', true, now(), now()); diff --git a/ansible/01_old/roles/cmoa_install/files/04-keycloak/Chart.yaml b/ansible/01_old/roles/cmoa_install/files/04-keycloak/Chart.yaml new file mode 100644 index 0000000..a5d4032 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/04-keycloak/Chart.yaml @@ -0,0 +1,23 @@ +apiVersion: v1 +appVersion: 4.0.0 +description: Modified Authentication Module By EXEM CloudMOA +home: https://www.keycloak.org/ +icon: https://www.keycloak.org/resources/images/keycloak_logo_480x108.png +keywords: +- sso +- idm +- openid connect +- saml +- kerberos +- ldap +maintainers: +- email: unguiculus@gmail.com + name: unguiculus +- email: thomas.darimont+github@gmail.com + name: thomasdarimont +name: keycloak +sources: +- https://github.com/codecentric/helm-charts +- https://github.com/jboss-dockerfiles/keycloak +- https://github.com/bitnami/charts/tree/master/bitnami/postgresql +version: 11.0.1 diff --git a/ansible/01_old/roles/cmoa_install/files/04-keycloak/OWNERS b/ansible/01_old/roles/cmoa_install/files/04-keycloak/OWNERS new file mode 100644 index 0000000..8c2ff0d --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/04-keycloak/OWNERS @@ -0,0 +1,6 @@ +approvers: + - unguiculus + - thomasdarimont +reviewers: + - unguiculus + - thomasdarimont diff --git a/ansible/01_old/roles/cmoa_install/files/04-keycloak/README.md b/ansible/01_old/roles/cmoa_install/files/04-keycloak/README.md new file mode 100644 index 0000000..5f8da10 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/04-keycloak/README.md @@ -0,0 +1,765 @@ +# Keycloak + +[Keycloak](http://www.keycloak.org/) is an open source identity and access management for modern applications and services. + +## TL;DR; + +```console +$ helm install keycloak codecentric/keycloak +``` + +## Introduction + +This chart bootstraps a [Keycloak](http://www.keycloak.org/) StatefulSet on a [Kubernetes](https://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. +It provisions a fully featured Keycloak installation. +For more information on Keycloak and its capabilities, see its [documentation](http://www.keycloak.org/documentation.html). + +## Prerequisites Details + +The chart has an optional dependency on the [PostgreSQL](https://github.com/bitnami/charts/tree/master/bitnami/postgresql) chart. +By default, the PostgreSQL chart requires PV support on underlying infrastructure (may be disabled). + +## Installing the Chart + +To install the chart with the release name `keycloak`: + +```console +$ helm install keycloak codecentric/keycloak +``` + +## Uninstalling the Chart + +To uninstall the `keycloak` deployment: + +```console +$ helm uninstall keycloak +``` + +## Configuration + +The following table lists the configurable parameters of the Keycloak chart and their default values. + +| Parameter | Description | Default | +|---|---|---| +| `fullnameOverride` | Optionally override the fully qualified name | `""` | +| `nameOverride` | Optionally override the name | `""` | +| `replicas` | The number of replicas to create | `1` | +| `image.repository` | The Keycloak image repository | `docker.io/jboss/keycloak` | +| `image.tag` | Overrides the Keycloak image tag whose default is the chart version | `""` | +| `image.pullPolicy` | The Keycloak image pull policy | `IfNotPresent` | +| `imagePullSecrets` | Image pull secrets for the Pod | `[]` | +| `hostAliases` | Mapping between IPs and hostnames that will be injected as entries in the Pod's hosts files | `[]` | +| `enableServiceLinks` | Indicates whether information about services should be injected into Pod's environment variables, matching the syntax of Docker links | `true` | +| `podManagementPolicy` | Pod management policy. One of `Parallel` or `OrderedReady` | `Parallel` | +| `restartPolicy` | Pod restart policy. One of `Always`, `OnFailure`, or `Never` | `Always` | +| `serviceAccount.create` | Specifies whether a ServiceAccount should be created | `true` | +| `serviceAccount.name` | The name of the service account to use. If not set and create is true, a name is generated using the fullname template | `""` | +| `serviceAccount.annotations` | Additional annotations for the ServiceAccount | `{}` | +| `serviceAccount.labels` | Additional labels for the ServiceAccount | `{}` | +| `serviceAccount.imagePullSecrets` | Image pull secrets that are attached to the ServiceAccount | `[]` | +| `rbac.create` | Specifies whether RBAC resources are to be created | `false` +| `rbac.rules` | Custom RBAC rules, e. g. for KUBE_PING | `[]` +| `podSecurityContext` | SecurityContext for the entire Pod. Every container running in the Pod will inherit this SecurityContext. This might be relevant when other components of the environment inject additional containers into running Pods (service meshes are the most prominent example for this) | `{"fsGroup":1000}` | +| `securityContext` | SecurityContext for the Keycloak container | `{"runAsNonRoot":true,"runAsUser":1000}` | +| `extraInitContainers` | Additional init containers, e. g. for providing custom themes | `[]` | +| `extraContainers` | Additional sidecar containers, e. g. for a database proxy, such as Google's cloudsql-proxy | `[]` | +| `lifecycleHooks` | Lifecycle hooks for the Keycloak container | `{}` | +| `terminationGracePeriodSeconds` | Termination grace period in seconds for Keycloak shutdown. Clusters with a large cache might need to extend this to give Infinispan more time to rebalance | `60` | +| `clusterDomain` | The internal Kubernetes cluster domain | `cluster.local` | +| `command` | Overrides the default entrypoint of the Keycloak container | `[]` | +| `args` | Overrides the default args for the Keycloak container | `[]` | +| `extraEnv` | Additional environment variables for Keycloak | `""` | +| `extraEnvFrom` | Additional environment variables for Keycloak mapped from a Secret or ConfigMap | `""` | +| `priorityClassName` | Pod priority class name | `""` | +| `affinity` | Pod affinity | Hard node and soft zone anti-affinity | +| `nodeSelector` | Node labels for Pod assignment | `{}` | +| `tolerations` | Node taints to tolerate | `[]` | +| `podLabels` | Additional Pod labels | `{}` | +| `podAnnotations` | Additional Pod annotations | `{}` | +| `livenessProbe` | Liveness probe configuration | `{"httpGet":{"path":"/health/live","port":"http"},"initialDelaySeconds":300,"timeoutSeconds":5}` | +| `readinessProbe` | Readiness probe configuration | `{"httpGet":{"path":"/auth/realms/master","port":"http"},"initialDelaySeconds":30,"timeoutSeconds":1}` | +| `resources` | Pod resource requests and limits | `{}` | +| `startupScripts` | Startup scripts to run before Keycloak starts up | `{"keycloak.cli":"{{- .Files.Get "scripts/keycloak.cli" \| nindent 2 }}"}` | +| `extraVolumes` | Add additional volumes, e. g. for custom themes | `""` | +| `extraVolumeMounts` | Add additional volumes mounts, e. g. for custom themes | `""` | +| `extraPorts` | Add additional ports, e. g. for admin console or exposing JGroups ports | `[]` | +| `podDisruptionBudget` | Pod disruption budget | `{}` | +| `statefulsetAnnotations` | Annotations for the StatefulSet | `{}` | +| `statefulsetLabels` | Additional labels for the StatefulSet | `{}` | +| `secrets` | Configuration for secrets that should be created | `{}` | +| `service.annotations` | Annotations for headless and HTTP Services | `{}` | +| `service.labels` | Additional labels for headless and HTTP Services | `{}` | +| `service.type` | The Service type | `ClusterIP` | +| `service.loadBalancerIP` | Optional IP for the load balancer. Used for services of type LoadBalancer only | `""` | +| `loadBalancerSourceRanges` | Optional List of allowed source ranges (CIDRs). Used for service of type LoadBalancer only | `[]` | +| `service.httpPort` | The http Service port | `80` | +| `service.httpNodePort` | The HTTP Service node port if type is NodePort | `""` | +| `service.httpsPort` | The HTTPS Service port | `8443` | +| `service.httpsNodePort` | The HTTPS Service node port if type is NodePort | `""` | +| `service.httpManagementPort` | The WildFly management Service port | `8443` | +| `service.httpManagementNodePort` | The WildFly management node port if type is NodePort | `""` | +| `service.extraPorts` | Additional Service ports, e. g. for custom admin console | `[]` | +| `service.sessionAffinity` | sessionAffinity for Service, e. g. "ClientIP" | `""` | +| `service.sessionAffinityConfig` | sessionAffinityConfig for Service | `{}` | +| `ingress.enabled` | If `true`, an Ingress is created | `false` | +| `ingress.rules` | List of Ingress Ingress rule | see below | +| `ingress.rules[0].host` | Host for the Ingress rule | `{{ .Release.Name }}.keycloak.example.com` | +| `ingress.rules[0].paths` | Paths for the Ingress rule | `[/]` | +| `ingress.servicePort` | The Service port targeted by the Ingress | `http` | +| `ingress.annotations` | Ingress annotations | `{}` | +| `ingress.labels` | Additional Ingress labels | `{}` | +| `ingress.tls` | TLS configuration | see below | +| `ingress.tls[0].hosts` | List of TLS hosts | `[keycloak.example.com]` | +| `ingress.tls[0].secretName` | Name of the TLS secret | `""` | +| `ingress.console.enabled` | If `true`, an Ingress for the console is created | `false` | +| `ingress.console.rules` | List of Ingress Ingress rule for the console | see below | +| `ingress.console.rules[0].host` | Host for the Ingress rule for the console | `{{ .Release.Name }}.keycloak.example.com` | +| `ingress.console.rules[0].paths` | Paths for the Ingress rule for the console | `[/auth/admin]` | +| `ingress.console.annotations` | Ingress annotations for the console | `{}` | +| `networkPolicy.enabled` | If true, the ingress network policy is deployed | `false` +| `networkPolicy.extraFrom` | Allows to define allowed external traffic (see Kubernetes doc for network policy `from` format) | `[]` +| `route.enabled` | If `true`, an OpenShift Route is created | `false` | +| `route.path` | Path for the Route | `/` | +| `route.annotations` | Route annotations | `{}` | +| `route.labels` | Additional Route labels | `{}` | +| `route.host` | Host name for the Route | `""` | +| `route.tls.enabled` | If `true`, TLS is enabled for the Route | `true` | +| `route.tls.insecureEdgeTerminationPolicy` | Insecure edge termination policy of the Route. Can be `None`, `Redirect`, or `Allow` | `Redirect` | +| `route.tls.termination` | TLS termination of the route. Can be `edge`, `passthrough`, or `reencrypt` | `edge` | +| `pgchecker.image.repository` | Docker image used to check Postgresql readiness at startup | `docker.io/busybox` | +| `pgchecker.image.tag` | Image tag for the pgchecker image | `1.32` | +| `pgchecker.image.pullPolicy` | Image pull policy for the pgchecker image | `IfNotPresent` | +| `pgchecker.securityContext` | SecurityContext for the pgchecker container | `{"allowPrivilegeEscalation":false,"runAsGroup":1000,"runAsNonRoot":true,"runAsUser":1000}` | +| `pgchecker.resources` | Resource requests and limits for the pgchecker container | `{"limits":{"cpu":"10m","memory":"16Mi"},"requests":{"cpu":"10m","memory":"16Mi"}}` | +| `postgresql.enabled` | If `true`, the Postgresql dependency is enabled | `true` | +| `postgresql.postgresqlUsername` | PostgreSQL User to create | `keycloak` | +| `postgresql.postgresqlPassword` | PostgreSQL Password for the new user | `keycloak` | +| `postgresql.postgresqlDatabase` | PostgreSQL Database to create | `keycloak` | +| `serviceMonitor.enabled` | If `true`, a ServiceMonitor resource for the prometheus-operator is created | `false` | +| `serviceMonitor.namespace` | Optionally sets a target namespace in which to deploy the ServiceMonitor resource | `""` | +| `serviceMonitor.namespaceSelector` | Optionally sets a namespace selector for the ServiceMonitor | `{}` | +| `serviceMonitor.annotations` | Annotations for the ServiceMonitor | `{}` | +| `serviceMonitor.labels` | Additional labels for the ServiceMonitor | `{}` | +| `serviceMonitor.interval` | Interval at which Prometheus scrapes metrics | `10s` | +| `serviceMonitor.scrapeTimeout` | Timeout for scraping | `10s` | +| `serviceMonitor.path` | The path at which metrics are served | `/metrics` | +| `serviceMonitor.port` | The Service port at which metrics are served | `http` | +| `extraServiceMonitor.enabled` | If `true`, an additional ServiceMonitor resource for the prometheus-operator is created. Could be used for additional metrics via [Keycloak Metrics SPI](https://github.com/aerogear/keycloak-metrics-spi) | `false` | +| `extraServiceMonitor.namespace` | Optionally sets a target namespace in which to deploy the additional ServiceMonitor resource | `""` | +| `extraServiceMonitor.namespaceSelector` | Optionally sets a namespace selector for the additional ServiceMonitor | `{}` | +| `extraServiceMonitor.annotations` | Annotations for the additional ServiceMonitor | `{}` | +| `extraServiceMonitor.labels` | Additional labels for the additional ServiceMonitor | `{}` | +| `extraServiceMonitor.interval` | Interval at which Prometheus scrapes metrics | `10s` | +| `extraServiceMonitor.scrapeTimeout` | Timeout for scraping | `10s` | +| `extraServiceMonitor.path` | The path at which metrics are served | `/metrics` | +| `extraServiceMonitor.port` | The Service port at which metrics are served | `http` | +| `prometheusRule.enabled` | If `true`, a PrometheusRule resource for the prometheus-operator is created | `false` | +| `prometheusRule.annotations` | Annotations for the PrometheusRule | `{}` | +| `prometheusRule.labels` | Additional labels for the PrometheusRule | `{}` | +| `prometheusRule.rules` | List of rules for Prometheus | `[]` | +| `autoscaling.enabled` | Enable creation of a HorizontalPodAutoscaler resource | `false` | +| `autoscaling.labels` | Additional labels for the HorizontalPodAutoscaler resource | `{}` | +| `autoscaling.minReplicas` | The minimum number of Pods when autoscaling is enabled | `3` | +| `autoscaling.maxReplicas` | The maximum number of Pods when autoscaling is enabled | `10` | +| `autoscaling.metrics` | The metrics configuration for the HorizontalPodAutoscaler | `[{"resource":{"name":"cpu","target":{"averageUtilization":80,"type":"Utilization"}},"type":"Resource"}]` | +| `autoscaling.behavior` | The scaling policy configuration for the HorizontalPodAutoscaler | `{"scaleDown":{"policies":[{"periodSeconds":300,"type":"Pods","value":1}],"stabilizationWindowSeconds":300}` | +| `test.enabled` | If `true`, test resources are created | `false` | +| `test.image.repository` | The image for the test Pod | `docker.io/unguiculus/docker-python3-phantomjs-selenium` | +| `test.image.tag` | The tag for the test Pod image | `v1` | +| `test.image.pullPolicy` | The image pull policy for the test Pod image | `IfNotPresent` | +| `test.podSecurityContext` | SecurityContext for the entire test Pod | `{"fsGroup":1000}` | +| `test.securityContext` | SecurityContext for the test container | `{"runAsNonRoot":true,"runAsUser":1000}` | + + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example: + +```console +$ helm install keycloak codecentric/keycloak -n keycloak --set replicas=1 +``` + +Alternatively, a YAML file that specifies the values for the parameters can be provided while +installing the chart. For example: + +```console +$ helm install keycloak codecentric/keycloak -n keycloak --values values.yaml +``` + +The chart offers great flexibility. +It can be configured to work with the official Keycloak Docker image but any custom image can be used as well. + +For the offical Docker image, please check it's configuration at https://github.com/keycloak/keycloak-containers/tree/master/server. + +### Usage of the `tpl` Function + +The `tpl` function allows us to pass string values from `values.yaml` through the templating engine. +It is used for the following values: + +* `extraInitContainers` +* `extraContainers` +* `extraEnv` +* `extraEnvFrom` +* `affinity` +* `extraVolumeMounts` +* `extraVolumes` +* `livenessProbe` +* `readinessProbe` + +Additionally, custom labels and annotations can be set on various resources the values of which being passed through `tpl` as well. + +It is important that these values be configured as strings. +Otherwise, installation will fail. +See example for Google Cloud Proxy or default affinity configuration in `values.yaml`. + +### JVM Settings + +Keycloak sets the following system properties by default: +`-Djava.net.preferIPv4Stack=true -Djboss.modules.system.pkgs=$JBOSS_MODULES_SYSTEM_PKGS -Djava.awt.headless=true` + +You can override these by setting the `JAVA_OPTS` environment variable. +Make sure you configure container support. +This allows you to only configure memory using Kubernetes resources and the JVM will automatically adapt. + +```yaml +extraEnv: | + - name: JAVA_OPTS + value: >- + -XX:+UseContainerSupport + -XX:MaxRAMPercentage=50.0 + -Djava.net.preferIPv4Stack=true + -Djboss.modules.system.pkgs=$JBOSS_MODULES_SYSTEM_PKGS + -Djava.awt.headless=true +``` + +### Database Setup + +By default, Bitnami's [PostgreSQL](https://github.com/bitnami/charts/tree/master/bitnami/postgresql) chart is deployed and used as database. +Please refer to this chart for additional PostgreSQL configuration options. + +#### Using an External Database + +The Keycloak Docker image supports various database types. +Configuration happens in a generic manner. + +##### Using a Secret Managed by the Chart + +The following examples uses a PostgreSQL database with a secret that is managed by the Helm chart. + +```yaml +postgresql: + # Disable PostgreSQL dependency + enabled: false + +extraEnv: | + - name: DB_VENDOR + value: postgres + - name: DB_ADDR + value: mypostgres + - name: DB_PORT + value: "5432" + - name: DB_DATABASE + value: mydb + +extraEnvFrom: | + - secretRef: + name: '{{ include "keycloak.fullname" . }}-db' + +secrets: + db: + stringData: + DB_USER: '{{ .Values.dbUser }}' + DB_PASSWORD: '{{ .Values.dbPassword }}' +``` + +`dbUser` and `dbPassword` are custom values you'd then specify on the commandline using `--set-string`. + +##### Using an Existing Secret + +The following examples uses a PostgreSQL database with a secret. +Username and password are mounted as files. + +```yaml +postgresql: + # Disable PostgreSQL dependency + enabled: false + +extraEnv: | + - name: DB_VENDOR + value: postgres + - name: DB_ADDR + value: mypostgres + - name: DB_PORT + value: "5432" + - name: DB_DATABASE + value: mydb + - name: DB_USER_FILE + value: /secrets/db-creds/user + - name: DB_PASSWORD_FILE + value: /secrets/db-creds/password + +extraVolumeMounts: | + - name: db-creds + mountPath: /secrets/db-creds + readOnly: true + +extraVolumes: | + - name: db-creds + secret: + secretName: keycloak-db-creds +``` + +### Creating a Keycloak Admin User + +The Keycloak Docker image supports creating an initial admin user. +It must be configured via environment variables: + +* `KEYCLOAK_USER` or `KEYCLOAK_USER_FILE` +* `KEYCLOAK_PASSWORD` or `KEYCLOAK_PASSWORD_FILE` + +Please refer to the section on database configuration for how to configure a secret for this. + +### High Availability and Clustering + +For high availability, Keycloak must be run with multiple replicas (`replicas > 1`). +The chart has a helper template (`keycloak.serviceDnsName`) that creates the DNS name based on the headless service. + +#### DNS_PING Service Discovery + +JGroups discovery via DNS_PING can be configured as follows: + +```yaml +extraEnv: | + - name: JGROUPS_DISCOVERY_PROTOCOL + value: dns.DNS_PING + - name: JGROUPS_DISCOVERY_PROPERTIES + value: 'dns_query={{ include "keycloak.serviceDnsName" . }}' + - name: CACHE_OWNERS_COUNT + value: "2" + - name: CACHE_OWNERS_AUTH_SESSIONS_COUNT + value: "2" +``` + +#### KUBE_PING Service Discovery + +Recent versions of Keycloak include a new Kubernetes native [KUBE_PING](https://github.com/jgroups-extras/jgroups-kubernetes) service discovery protocol. +This requires a little more configuration than DNS_PING but can easily be achieved with the Helm chart. + +As with DNS_PING some environment variables must be configured as follows: + +```yaml +extraEnv: | + - name: JGROUPS_DISCOVERY_PROTOCOL + value: kubernetes.KUBE_PING + - name: KUBERNETES_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: CACHE_OWNERS_COUNT + value: "2" + - name: CACHE_OWNERS_AUTH_SESSIONS_COUNT + value: "2" +``` + +However, the Keycloak Pods must also get RBAC permissions to `get` and `list` Pods in the namespace which can be configured as follows: + +```yaml +rbac: + create: true + rules: + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - list +``` + +#### Autoscaling + +Due to the caches in Keycloak only replicating to a few nodes (two in the example configuration above) and the limited controls around autoscaling built into Kubernetes, it has historically been problematic to autoscale Keycloak. +However, in Kubernetes 1.18 [additional controls were introduced](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-configurable-scaling-behavior) which make it possible to scale down in a more controlled manner. + +The example autoscaling configuration in the values file scales from three up to a maximum of ten Pods using CPU utilization as the metric. Scaling up is done as quickly as required but scaling down is done at a maximum rate of one Pod per five minutes. + +Autoscaling can be enabled as follows: + +```yaml +autoscaling: + enabled: true +``` + +KUBE_PING service discovery seems to be the most reliable mechanism to use when enabling autoscaling, due to being faster than DNS_PING at detecting changes in the cluster. + +### Running Keycloak Behind a Reverse Proxy + +When running Keycloak behind a reverse proxy, which is the case when using an ingress controller, +proxy address forwarding must be enabled as follows: + +```yaml +extraEnv: | + - name: PROXY_ADDRESS_FORWARDING + value: "true" +``` + +### Providing a Custom Theme + +One option is certainly to provide a custom Keycloak image that includes the theme. +However, if you prefer to stick with the official Keycloak image, you can use an init container as theme provider. + +Create your own theme and package it up into a Docker image. + +```docker +FROM busybox +COPY mytheme /mytheme +``` + +In combination with an `emptyDir` that is shared with the Keycloak container, configure an init container that runs your theme image and copies the theme over to the right place where Keycloak will pick it up automatically. + +```yaml +extraInitContainers: | + - name: theme-provider + image: myuser/mytheme:1 + imagePullPolicy: IfNotPresent + command: + - sh + args: + - -c + - | + echo "Copying theme..." + cp -R /mytheme/* /theme + volumeMounts: + - name: theme + mountPath: /theme + +extraVolumeMounts: | + - name: theme + mountPath: /opt/jboss/keycloak/themes/mytheme + +extraVolumes: | + - name: theme + emptyDir: {} +``` + +### Setting a Custom Realm + +A realm can be added by creating a secret or configmap for the realm json file and then supplying this into the chart. +It can be mounted using `extraVolumeMounts` and then referenced as environment variable `KEYCLOAK_IMPORT`. +First we need to create a Secret from the realm JSON file using `kubectl create secret generic realm-secret --from-file=realm.json` which we need to reference in `values.yaml`: + +```yaml +extraVolumes: | + - name: realm-secret + secret: + secretName: realm-secret + +extraVolumeMounts: | + - name: realm-secret + mountPath: "/realm/" + readOnly: true + +extraEnv: | + - name: KEYCLOAK_IMPORT + value: /realm/realm.json +``` + +Alternatively, the realm file could be added to a custom image. + +After startup the web admin console for the realm should be available on the path /auth/admin/\/console/. + +### Using Google Cloud SQL Proxy + +Depending on your environment you may need a local proxy to connect to the database. +This is, e. g., the case for Google Kubernetes Engine when using Google Cloud SQL. +Create the secret for the credentials as documented [here](https://cloud.google.com/sql/docs/postgres/connect-kubernetes-engine) and configure the proxy as a sidecar. + +Because `extraContainers` is a string that is passed through the `tpl` function, it is possible to create custom values and use them in the string. + +```yaml +postgresql: + # Disable PostgreSQL dependency + enabled: false + +# Custom values for Google Cloud SQL +cloudsql: + project: my-project + region: europe-west1 + instance: my-instance + +extraContainers: | + - name: cloudsql-proxy + image: gcr.io/cloudsql-docker/gce-proxy:1.17 + command: + - /cloud_sql_proxy + args: + - -instances={{ .Values.cloudsql.project }}:{{ .Values.cloudsql.region }}:{{ .Values.cloudsql.instance }}=tcp:5432 + - -credential_file=/secrets/cloudsql/credentials.json + volumeMounts: + - name: cloudsql-creds + mountPath: /secrets/cloudsql + readOnly: true + +extraVolumes: | + - name: cloudsql-creds + secret: + secretName: cloudsql-instance-credentials + +extraEnv: | + - name: DB_VENDOR + value: postgres + - name: DB_ADDR + value: "127.0.0.1" + - name: DB_PORT + value: "5432" + - name: DB_DATABASE + value: postgres + - name: DB_USER + value: myuser + - name: DB_PASSWORD + value: mypassword +``` + +### Changing the Context Path + +By default, Keycloak is served under context `/auth`. +This can be changed as follows: + +```yaml +contextPath: mycontext + +startupScripts: + # cli script that reconfigures WildFly + contextPath.cli: | + embed-server --server-config=standalone-ha.xml --std-out=echo + batch + {{- if ne .Values.contextPath "auth" }} + /subsystem=keycloak-server/:write-attribute(name=web-context,value={{ if eq .Values.contextPath "" }}/{{ else }}{{ .Values.contextPath }}{{ end }}) + {{- if eq .Values.contextPath "" }} + /subsystem=undertow/server=default-server/host=default-host:write-attribute(name=default-web-module,value=keycloak-server.war) + {{- end }} + {{- end }} + run-batch + stop-embedded-server + +livenessProbe: | + httpGet: + path: {{ if ne .Values.contextPath "" }}/{{ .Values.contextPath }}{{ end }}/ + port: http + initialDelaySeconds: 300 + timeoutSeconds: 5 + +readinessProbe: | + httpGet: + path: {{ if ne .Values.contextPath "" }}/{{ .Values.contextPath }}{{ end }}/realms/master + port: http + initialDelaySeconds: 30 + timeoutSeconds: 1 +``` + +The above YAML references introduces the custom value `contextPath` which is possible because `startupScripts`, `livenessProbe`, and `readinessProbe` are templated using the `tpl` function. +Note that it must not start with a slash. +Alternatively, you may supply it via CLI flag: + +```console +--set-string contextPath=mycontext +``` + +### Prometheus Metrics Support + +#### WildFly Metrics + +WildFly can expose metrics on the management port. +In order to achieve this, the environment variable `KEYCLOAK_STATISTICS` must be set. + +```yaml +extraEnv: | + - name: KEYCLOAK_STATISTICS + value: all +``` + +Add a ServiceMonitor if using prometheus-operator: + +```yaml +serviceMonitor: + # If `true`, a ServiceMonitor resource for the prometheus-operator is created + enabled: true +``` + +Checkout `values.yaml` for customizing the ServiceMonitor and for adding custom Prometheus rules. + +Add annotations if you don't use prometheus-operator: + +```yaml +service: + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9990" +``` + +#### Keycloak Metrics SPI + +Optionally, it is possible to add [Keycloak Metrics SPI](https://github.com/aerogear/keycloak-metrics-spi) via init container. + +A separate `ServiceMonitor` can be enabled to scrape metrics from the SPI: + +```yaml +extraServiceMonitor: + # If `true`, an additional ServiceMonitor resource for the prometheus-operator is created + enabled: true +``` + +Checkout `values.yaml` for customizing this ServiceMonitor. + +Note that the metrics endpoint is exposed on the HTTP port. +You may want to restrict access to it in your ingress controller configuration. +For ingress-nginx, this could be done as follows: + +```yaml +annotations: + nginx.ingress.kubernetes.io/server-snippet: | + location ~* /auth/realms/[^/]+/metrics { + return 403; + } +``` + +## Why StatefulSet? + +The chart sets node identifiers to the system property `jboss.node.name` which is in fact the pod name. +Node identifiers must not be longer than 23 characters. +This can be problematic because pod names are quite long. +We would have to truncate the chart's fullname to six characters because pods get a 17-character suffix (e. g. `-697f8b7655-mf5ht`). +Using a StatefulSet allows us to truncate to 20 characters leaving room for up to 99 replicas, which is much better. +Additionally, we get stable values for `jboss.node.name` which can be advantageous for cluster discovery. +The headless service that governs the StatefulSet is used for DNS discovery via DNS_PING. + +## Upgrading + +### From chart < 10.0.0 + +* Keycloak is updated to 12.0.4 + +The upgrade should be seemless. +No special care has to be taken. + +### From chart versions < 9.0.0 + +The Keycloak chart received a major facelift and, thus, comes with breaking changes. +Opinionated stuff and things that are now baked into Keycloak's Docker image were removed. +Configuration is more generic making it easier to use custom Docker images that are configured differently than the official one. + +* Values are no longer nested under `keycloak`. +* Besides setting the node identifier, no CLI changes are performed out of the box +* Environment variables for the Postresql dependency are set automatically if enabled. + Otherwise, no environment variables are set by default. +* Optionally enables creating RBAC resources with configurable rules (e. g. for KUBE_PING) +* PostgreSQL chart dependency is updated to 9.1.1 + +### From chart versions < 8.0.0 + +* Keycloak is updated to 10.0.0 +* PostgreSQL chart dependency is updated to 8.9.5 + +The upgrade should be seemless. +No special care has to be taken. + +### From chart versions < 7.0.0 + +Version 7.0.0 update breaks backwards-compatibility with the existing `keycloak.persistence.existingSecret` scheme. + +#### Changes in Configuring Database Credentials from an Existing Secret + +Both `DB_USER` and `DB_PASS` are always read from a Kubernetes Secret. +This is a requirement if you are provisioning database credentials dynamically - either via an Operator or some secret-management engine. + +The variable referencing the password key name has been renamed from `keycloak.persistence.existingSecretKey` to `keycloak.persistence.existingSecretPasswordKey` + +A new, optional variable for referencing the username key name for populating the `DB_USER` env has been added: +`keycloak.persistence.existingSecretUsernameKey`. + +If `keycloak.persistence.existingSecret` is left unset, a new Secret will be provisioned populated with the `dbUser` and `dbPassword` Helm variables. + +###### Example configuration: +```yaml +keycloak: + persistence: + existingSecret: keycloak-provisioned-db-credentials + existingSecretPasswordKey: PGPASSWORD + existingSecretUsernameKey: PGUSER + ... +``` +### From chart versions < 6.0.0 + +#### Changes in Probe Configuration + +Now both readiness and liveness probes are configured as strings that are then passed through the `tpl` function. +This allows for greater customizability of the readiness and liveness probes. + +The defaults are unchanged, but since 6.0.0 configured as follows: + +```yaml + livenessProbe: | + httpGet: + path: {{ if ne .Values.keycloak.basepath "" }}/{{ .Values.keycloak.basepath }}{{ end }}/ + port: http + initialDelaySeconds: 300 + timeoutSeconds: 5 + readinessProbe: | + httpGet: + path: {{ if ne .Values.keycloak.basepath "" }}/{{ .Values.keycloak.basepath }}{{ end }}/realms/master + port: http + initialDelaySeconds: 30 + timeoutSeconds: 1 +``` + +#### Changes in Existing Secret Configuration + +This can be useful if you create a secret in a parent chart and want to reference that secret. +Applies to `keycloak.existingSecret` and `keycloak.persistence.existingSecret`. + +_`values.yaml` of parent chart:_ +```yaml +keycloak: + keycloak: + existingSecret: '{{ .Release.Name }}-keycloak-secret' +``` + +#### HTTPS Port Added + +The HTTPS port was added to the pod and to the services. +As a result, service ports are now configured differently. + + +### From chart versions < 5.0.0 + +Version 5.0.0 is a major update. + +* The chart now follows the new Kubernetes label recommendations: +https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/ +* Several changes to the StatefulSet render an out-of-the-box upgrade impossible because StatefulSets only allow updates to a limited set of fields +* The chart uses the new support for running scripts at startup that has been added to Keycloak's Docker image. +If you use this feature, you will have to adjust your configuration + +However, with the following manual steps an automatic upgrade is still possible: + +1. Adjust chart configuration as necessary (e. g. startup scripts) +1. Perform a non-cascading deletion of the StatefulSet which keeps the pods running +1. Add the new labels to the pods +1. Run `helm upgrade` + +Use a script like the following to add labels and to delete the StatefulSet: + +```console +#!/bin/sh + +release= +namespace= + +kubectl delete statefulset -n "$namespace" -l app=keycloak -l release="$release" --cascade=false + +kubectl label pod -n "$namespace" -l app=keycloak -l release="$release" app.kubernetes.io/name=keycloak +kubectl label pod -n "$namespace" -l app=keycloak -l release="$release" app.kubernetes.io/instance="$release" +``` + +**NOTE:** Version 5.0.0 also updates the Postgresql dependency which has received a major upgrade as well. +In case you use this dependency, the database must be upgraded first. +Please refer to the Postgresql chart's upgrading section in its README for instructions. diff --git a/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/.helmignore b/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/.helmignore new file mode 100644 index 0000000..f0c1319 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/Chart.yaml b/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/Chart.yaml new file mode 100644 index 0000000..48d8f2f --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/Chart.yaml @@ -0,0 +1,24 @@ +annotations: + category: Database +apiVersion: v1 +appVersion: 11.8.0 +description: Chart for PostgreSQL, an object-relational database management system + (ORDBMS) with an emphasis on extensibility and on standards-compliance. +home: https://www.postgresql.org/ +icon: https://bitnami.com/assets/stacks/postgresql/img/postgresql-stack-110x117.png +keywords: +- postgresql +- postgres +- database +- sql +- replication +- cluster +maintainers: +- email: containers@bitnami.com + name: Bitnami +- email: cedric@desaintmartin.fr + name: desaintmartin +name: postgresql +sources: +- https://github.com/bitnami/bitnami-docker-postgresql +version: 9.1.1 diff --git a/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/README.md b/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/README.md new file mode 100644 index 0000000..c84cc7b --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/README.md @@ -0,0 +1,625 @@ +# PostgreSQL + +[PostgreSQL](https://www.postgresql.org/) is an object-relational database management system (ORDBMS) with an emphasis on extensibility and on standards-compliance. + +For HA, please see [this repo](https://github.com/bitnami/charts/tree/master/bitnami/postgresql-ha) + +## TL;DR; + +```console +$ helm repo add bitnami https://charts.bitnami.com/bitnami +$ helm install my-release bitnami/postgresql +``` + +## Introduction + +This chart bootstraps a [PostgreSQL](https://github.com/bitnami/bitnami-docker-postgresql) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This chart has been tested to work with NGINX Ingress, cert-manager, fluentd and Prometheus on top of the [BKPR](https://kubeprod.io/). + +## Prerequisites + +- Kubernetes 1.12+ +- Helm 2.12+ or Helm 3.0-beta3+ +- PV provisioner support in the underlying infrastructure + +## Installing the Chart +To install the chart with the release name `my-release`: + +```console +$ helm install my-release bitnami/postgresql +``` + +The command deploys PostgreSQL on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```console +$ helm delete my-release +``` + +The command removes all the Kubernetes components but PVC's associated with the chart and deletes the release. + +To delete the PVC's associated with `my-release`: + +```console +$ kubectl delete pvc -l release=my-release +``` + +> **Note**: Deleting the PVC's will delete postgresql data as well. Please be cautious before doing it. + +## Parameters + +The following tables lists the configurable parameters of the PostgreSQL chart and their default values. + +| Parameter | Description | Default | +|-----------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------| +| `global.imageRegistry` | Global Docker Image registry | `nil` | +| `global.postgresql.postgresqlDatabase` | PostgreSQL database (overrides `postgresqlDatabase`) | `nil` | +| `global.postgresql.postgresqlUsername` | PostgreSQL username (overrides `postgresqlUsername`) | `nil` | +| `global.postgresql.existingSecret` | Name of existing secret to use for PostgreSQL passwords (overrides `existingSecret`) | `nil` | +| `global.postgresql.postgresqlPassword` | PostgreSQL admin password (overrides `postgresqlPassword`) | `nil` | +| `global.postgresql.servicePort` | PostgreSQL port (overrides `service.port`) | `nil` | +| `global.postgresql.replicationPassword` | Replication user password (overrides `replication.password`) | `nil` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) | +| `global.storageClass` | Global storage class for dynamic provisioning | `nil` | +| `image.registry` | PostgreSQL Image registry | `docker.io` | +| `image.repository` | PostgreSQL Image name | `bitnami/postgresql` | +| `image.tag` | PostgreSQL Image tag | `{TAG_NAME}` | +| `image.pullPolicy` | PostgreSQL Image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify Image pull secrets | `nil` (does not add image pull secrets to deployed pods) | +| `image.debug` | Specify if debug values should be set | `false` | +| `nameOverride` | String to partially override postgresql.fullname template with a string (will prepend the release name) | `nil` | +| `fullnameOverride` | String to fully override postgresql.fullname template with a string | `nil` | +| `volumePermissions.enabled` | Enable init container that changes volume permissions in the data directory (for cases where the default k8s `runAsUser` and `fsUser` values do not work) | `false` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | +| `volumePermissions.image.repository` | Init container volume-permissions image name | `bitnami/minideb` | +| `volumePermissions.image.tag` | Init container volume-permissions image tag | `buster` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `Always` | +| `volumePermissions.securityContext.runAsUser` | User ID for the init container (when facing issues in OpenShift or uid unknown, try value "auto") | `0` | +| `usePasswordFile` | Have the secrets mounted as a file instead of env vars | `false` | +| `ldap.enabled` | Enable LDAP support | `false` | +| `ldap.existingSecret` | Name of existing secret to use for LDAP passwords | `nil` | +| `ldap.url` | LDAP URL beginning in the form `ldap[s]://host[:port]/basedn[?[attribute][?[scope][?[filter]]]]` | `nil` | +| `ldap.server` | IP address or name of the LDAP server. | `nil` | +| `ldap.port` | Port number on the LDAP server to connect to | `nil` | +| `ldap.scheme` | Set to `ldaps` to use LDAPS. | `nil` | +| `ldap.tls` | Set to `1` to use TLS encryption | `nil` | +| `ldap.prefix` | String to prepend to the user name when forming the DN to bind | `nil` | +| `ldap.suffix` | String to append to the user name when forming the DN to bind | `nil` | +| `ldap.search_attr` | Attribute to match agains the user name in the search | `nil` | +| `ldap.search_filter` | The search filter to use when doing search+bind authentication | `nil` | +| `ldap.baseDN` | Root DN to begin the search for the user in | `nil` | +| `ldap.bindDN` | DN of user to bind to LDAP | `nil` | +| `ldap.bind_password` | Password for the user to bind to LDAP | `nil` | +| `replication.enabled` | Enable replication | `false` | +| `replication.user` | Replication user | `repl_user` | +| `replication.password` | Replication user password | `repl_password` | +| `replication.slaveReplicas` | Number of slaves replicas | `1` | +| `replication.synchronousCommit` | Set synchronous commit mode. Allowed values: `on`, `remote_apply`, `remote_write`, `local` and `off` | `off` | +| `replication.numSynchronousReplicas` | Number of replicas that will have synchronous replication. Note: Cannot be greater than `replication.slaveReplicas`. | `0` | +| `replication.applicationName` | Cluster application name. Useful for advanced replication settings | `my_application` | +| `existingSecret` | Name of existing secret to use for PostgreSQL passwords. The secret has to contain the keys `postgresql-postgres-password` which is the password for `postgresqlUsername` when it is different of `postgres`, `postgresql-password` which will override `postgresqlPassword`, `postgresql-replication-password` which will override `replication.password` and `postgresql-ldap-password` which will be sed to authenticate on LDAP. The value is evaluated as a template. | `nil` | +| `postgresqlPostgresPassword` | PostgreSQL admin password (used when `postgresqlUsername` is not `postgres`, in which case`postgres` is the admin username). | _random 10 character alphanumeric string_ | +| `postgresqlUsername` | PostgreSQL user (creates a non-admin user when `postgresqlUsername` is not `postgres`) | `postgres` | +| `postgresqlPassword` | PostgreSQL user password | _random 10 character alphanumeric string_ | +| `postgresqlDatabase` | PostgreSQL database | `nil` | +| `postgresqlDataDir` | PostgreSQL data dir folder | `/bitnami/postgresql` (same value as persistence.mountPath) | +| `extraEnv` | Any extra environment variables you would like to pass on to the pod. The value is evaluated as a template. | `[]` | +| `extraEnvVarsCM` | Name of a Config Map containing extra environment variables you would like to pass on to the pod. The value is evaluated as a template. | `nil` | +| `postgresqlInitdbArgs` | PostgreSQL initdb extra arguments | `nil` | +| `postgresqlInitdbWalDir` | PostgreSQL location for transaction log | `nil` | +| `postgresqlConfiguration` | Runtime Config Parameters | `nil` | +| `postgresqlExtendedConf` | Extended Runtime Config Parameters (appended to main or default configuration) | `nil` | +| `pgHbaConfiguration` | Content of pg_hba.conf | `nil (do not create pg_hba.conf)` | +| `configurationConfigMap` | ConfigMap with the PostgreSQL configuration files (Note: Overrides `postgresqlConfiguration` and `pgHbaConfiguration`). The value is evaluated as a template. | `nil` | +| `extendedConfConfigMap` | ConfigMap with the extended PostgreSQL configuration files. The value is evaluated as a template. | `nil` | +| `initdbScripts` | Dictionary of initdb scripts | `nil` | +| `initdbUser` | PostgreSQL user to execute the .sql and sql.gz scripts | `nil` | +| `initdbPassword` | Password for the user specified in `initdbUser` | `nil` | +| `initdbScriptsConfigMap` | ConfigMap with the initdb scripts (Note: Overrides `initdbScripts`). The value is evaluated as a template. | `nil` | +| `initdbScriptsSecret` | Secret with initdb scripts that contain sensitive information (Note: can be used with `initdbScriptsConfigMap` or `initdbScripts`). The value is evaluated as a template. | `nil` | +| `service.type` | Kubernetes Service type | `ClusterIP` | +| `service.port` | PostgreSQL port | `5432` | +| `service.nodePort` | Kubernetes Service nodePort | `nil` | +| `service.annotations` | Annotations for PostgreSQL service | `{}` (evaluated as a template) | +| `service.loadBalancerIP` | loadBalancerIP if service type is `LoadBalancer` | `nil` | +| `service.loadBalancerSourceRanges` | Address that are allowed when svc is LoadBalancer | `[]` (evaluated as a template) | +| `schedulerName` | Name of the k8s scheduler (other than default) | `nil` | +| `shmVolume.enabled` | Enable emptyDir volume for /dev/shm for master and slave(s) Pod(s) | `true` | +| `shmVolume.chmod.enabled` | Run at init chmod 777 of the /dev/shm (ignored if `volumePermissions.enabled` is `false`) | `true` | +| `persistence.enabled` | Enable persistence using PVC | `true` | +| `persistence.existingClaim` | Provide an existing `PersistentVolumeClaim`, the value is evaluated as a template. | `nil` | +| `persistence.mountPath` | Path to mount the volume at | `/bitnami/postgresql` | +| `persistence.subPath` | Subdirectory of the volume to mount at | `""` | +| `persistence.storageClass` | PVC Storage Class for PostgreSQL volume | `nil` | +| `persistence.accessModes` | PVC Access Mode for PostgreSQL volume | `[ReadWriteOnce]` | +| `persistence.size` | PVC Storage Request for PostgreSQL volume | `8Gi` | +| `persistence.annotations` | Annotations for the PVC | `{}` | +| `commonAnnotations` | Annotations to be added to all deployed resources (rendered as a template) | `{}` | +| `master.nodeSelector` | Node labels for pod assignment (postgresql master) | `{}` | +| `master.affinity` | Affinity labels for pod assignment (postgresql master) | `{}` | +| `master.tolerations` | Toleration labels for pod assignment (postgresql master) | `[]` | +| `master.anotations` | Map of annotations to add to the statefulset (postgresql master) | `{}` | +| `master.labels` | Map of labels to add to the statefulset (postgresql master) | `{}` | +| `master.podAnnotations` | Map of annotations to add to the pods (postgresql master) | `{}` | +| `master.podLabels` | Map of labels to add to the pods (postgresql master) | `{}` | +| `master.priorityClassName` | Priority Class to use for each pod (postgresql master) | `nil` | +| `master.extraInitContainers` | Additional init containers to add to the pods (postgresql master) | `[]` | +| `master.extraVolumeMounts` | Additional volume mounts to add to the pods (postgresql master) | `[]` | +| `master.extraVolumes` | Additional volumes to add to the pods (postgresql master) | `[]` | +| `master.sidecars` | Add additional containers to the pod | `[]` | +| `master.service.type` | Allows using a different service type for Master | `nil` | +| `master.service.nodePort` | Allows using a different nodePort for Master | `nil` | +| `master.service.clusterIP` | Allows using a different clusterIP for Master | `nil` | +| `slave.nodeSelector` | Node labels for pod assignment (postgresql slave) | `{}` | +| `slave.affinity` | Affinity labels for pod assignment (postgresql slave) | `{}` | +| `slave.tolerations` | Toleration labels for pod assignment (postgresql slave) | `[]` | +| `slave.anotations` | Map of annotations to add to the statefulsets (postgresql slave) | `{}` | +| `slave.labels` | Map of labels to add to the statefulsets (postgresql slave) | `{}` | +| `slave.podAnnotations` | Map of annotations to add to the pods (postgresql slave) | `{}` | +| `slave.podLabels` | Map of labels to add to the pods (postgresql slave) | `{}` | +| `slave.priorityClassName` | Priority Class to use for each pod (postgresql slave) | `nil` | +| `slave.extraInitContainers` | Additional init containers to add to the pods (postgresql slave) | `[]` | +| `slave.extraVolumeMounts` | Additional volume mounts to add to the pods (postgresql slave) | `[]` | +| `slave.extraVolumes` | Additional volumes to add to the pods (postgresql slave) | `[]` | +| `slave.sidecars` | Add additional containers to the pod | `[]` | +| `slave.service.type` | Allows using a different service type for Slave | `nil` | +| `slave.service.nodePort` | Allows using a different nodePort for Slave | `nil` | +| `slave.service.clusterIP` | Allows using a different clusterIP for Slave | `nil` | +| `terminationGracePeriodSeconds` | Seconds the pod needs to terminate gracefully | `nil` | +| `resources` | CPU/Memory resource requests/limits | Memory: `256Mi`, CPU: `250m` | +| `securityContext.enabled` | Enable security context | `true` | +| `securityContext.fsGroup` | Group ID for the container | `1001` | +| `securityContext.runAsUser` | User ID for the container | `1001` | +| `serviceAccount.enabled` | Enable service account (Note: Service Account will only be automatically created if `serviceAccount.name` is not set) | `false` | +| `serviceAccount.name` | Name of existing service account | `nil` | +| `livenessProbe.enabled` | Would you like a livenessProbe to be enabled | `true` | +| `networkPolicy.enabled` | Enable NetworkPolicy | `false` | +| `networkPolicy.allowExternal` | Don't require client label for connections | `true` | +| `networkPolicy.explicitNamespacesSelector` | A Kubernetes LabelSelector to explicitly select namespaces from which ingress traffic could be allowed | `{}` | +| `livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | 30 | +| `livenessProbe.periodSeconds` | How often to perform the probe | 10 | +| `livenessProbe.timeoutSeconds` | When the probe times out | 5 | +| `livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 6 | +| `livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed | 1 | +| `readinessProbe.enabled` | would you like a readinessProbe to be enabled | `true` | +| `readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | 5 | +| `readinessProbe.periodSeconds` | How often to perform the probe | 10 | +| `readinessProbe.timeoutSeconds` | When the probe times out | 5 | +| `readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 6 | +| `readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed | 1 | +| `tls.enabled` | Enable TLS traffic support | `false` | +| `tls.preferServerCiphers` | Whether to use the server's TLS cipher preferences rather than the client's | `true` | +| `tls.certificatesSecret` | Name of an existing secret that contains the certificates | `nil` | +| `tls.certFilename` | Certificate filename | `""` | +| `tls.certKeyFilename` | Certificate key filename | `""` | +| `tls.certCAFilename` | CA Certificate filename. If provided, PostgreSQL will authenticate TLS/SSL clients by requesting them a certificate. |`nil` | +| `tls.crlFilename` | File containing a Certificate Revocation List |`nil` | +| `metrics.enabled` | Start a prometheus exporter | `false` | +| `metrics.service.type` | Kubernetes Service type | `ClusterIP` | +| `service.clusterIP` | Static clusterIP or None for headless services | `nil` | +| `metrics.service.annotations` | Additional annotations for metrics exporter pod | `{ prometheus.io/scrape: "true", prometheus.io/port: "9187"}` | +| `metrics.service.loadBalancerIP` | loadBalancerIP if redis metrics service type is `LoadBalancer` | `nil` | +| `metrics.serviceMonitor.enabled` | Set this to `true` to create ServiceMonitor for Prometheus operator | `false` | +| `metrics.serviceMonitor.additionalLabels` | Additional labels that can be used so ServiceMonitor will be discovered by Prometheus | `{}` | +| `metrics.serviceMonitor.namespace` | Optional namespace in which to create ServiceMonitor | `nil` | +| `metrics.serviceMonitor.interval` | Scrape interval. If not set, the Prometheus default scrape interval is used | `nil` | +| `metrics.serviceMonitor.scrapeTimeout` | Scrape timeout. If not set, the Prometheus default scrape timeout is used | `nil` | +| `metrics.prometheusRule.enabled` | Set this to true to create prometheusRules for Prometheus operator | `false` | +| `metrics.prometheusRule.additionalLabels` | Additional labels that can be used so prometheusRules will be discovered by Prometheus | `{}` | +| `metrics.prometheusRule.namespace` | namespace where prometheusRules resource should be created | the same namespace as postgresql | +| `metrics.prometheusRule.rules` | [rules](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/) to be created, check values for an example. | `[]` | +| `metrics.image.registry` | PostgreSQL Exporter Image registry | `docker.io` | +| `metrics.image.repository` | PostgreSQL Exporter Image name | `bitnami/postgres-exporter` | +| `metrics.image.tag` | PostgreSQL Exporter Image tag | `{TAG_NAME}` | +| `metrics.image.pullPolicy` | PostgreSQL Exporter Image pull policy | `IfNotPresent` | +| `metrics.image.pullSecrets` | Specify Image pull secrets | `nil` (does not add image pull secrets to deployed pods) | +| `metrics.customMetrics` | Additional custom metrics | `nil` | +| `metrics.extraEnvVars` | Extra environment variables to add to exporter | `{}` (evaluated as a template) | +| `metrics.securityContext.enabled` | Enable security context for metrics | `false` | +| `metrics.securityContext.runAsUser` | User ID for the container for metrics | `1001` | +| `metrics.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | 30 | +| `metrics.livenessProbe.periodSeconds` | How often to perform the probe | 10 | +| `metrics.livenessProbe.timeoutSeconds` | When the probe times out | 5 | +| `metrics.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 6 | +| `metrics.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed | 1 | +| `metrics.readinessProbe.enabled` | would you like a readinessProbe to be enabled | `true` | +| `metrics.readinessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | 5 | +| `metrics.readinessProbe.periodSeconds` | How often to perform the probe | 10 | +| `metrics.readinessProbe.timeoutSeconds` | When the probe times out | 5 | +| `metrics.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 6 | +| `metrics.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed | 1 | +| `updateStrategy` | Update strategy policy | `{type: "RollingUpdate"}` | +| `psp.create` | Create Pod Security Policy | `false` | +| `rbac.create` | Create Role and RoleBinding (required for PSP to work) | `false` | + + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```console +$ helm install my-release \ + --set postgresqlPassword=secretpassword,postgresqlDatabase=my-database \ + bitnami/postgresql +``` + +The above command sets the PostgreSQL `postgres` account password to `secretpassword`. Additionally it creates a database named `my-database`. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```console +$ helm install my-release -f values.yaml bitnami/postgresql +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +## Configuration and installation details + +### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/) + +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. + +Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. + +### Production configuration and horizontal scaling + +This chart includes a `values-production.yaml` file where you can find some parameters oriented to production configuration in comparison to the regular `values.yaml`. You can use this file instead of the default one. + +- Enable replication: +```diff +- replication.enabled: false ++ replication.enabled: true +``` + +- Number of slaves replicas: +```diff +- replication.slaveReplicas: 1 ++ replication.slaveReplicas: 2 +``` + +- Set synchronous commit mode: +```diff +- replication.synchronousCommit: "off" ++ replication.synchronousCommit: "on" +``` + +- Number of replicas that will have synchronous replication: +```diff +- replication.numSynchronousReplicas: 0 ++ replication.numSynchronousReplicas: 1 +``` + +- Start a prometheus exporter: +```diff +- metrics.enabled: false ++ metrics.enabled: true +``` + +To horizontally scale this chart, you can use the `--replicas` flag to modify the number of nodes in your PostgreSQL deployment. Also you can use the `values-production.yaml` file or modify the parameters shown above. + +### Customizing Master and Slave services in a replicated configuration + +At the top level, there is a service object which defines the services for both master and slave. For deeper customization, there are service objects for both the master and slave types individually. This allows you to override the values in the top level service object so that the master and slave can be of different service types and with different clusterIPs / nodePorts. Also in the case you want the master and slave to be of type nodePort, you will need to set the nodePorts to different values to prevent a collision. The values that are deeper in the master.service or slave.service objects will take precedence over the top level service object. + +### Change PostgreSQL version + +To modify the PostgreSQL version used in this chart you can specify a [valid image tag](https://hub.docker.com/r/bitnami/postgresql/tags/) using the `image.tag` parameter. For example, `image.tag=X.Y.Z`. This approach is also applicable to other images like exporters. + +### postgresql.conf / pg_hba.conf files as configMap + +This helm chart also supports to customize the whole configuration file. + +Add your custom file to "files/postgresql.conf" in your working directory. This file will be mounted as configMap to the containers and it will be used for configuring the PostgreSQL server. + +Alternatively, you can specify PostgreSQL configuration parameters using the `postgresqlConfiguration` parameter as a dict, using camelCase, e.g. {"sharedBuffers": "500MB"}. + +In addition to these options, you can also set an external ConfigMap with all the configuration files. This is done by setting the `configurationConfigMap` parameter. Note that this will override the two previous options. + +### Allow settings to be loaded from files other than the default `postgresql.conf` + +If you don't want to provide the whole PostgreSQL configuration file and only specify certain parameters, you can add your extended `.conf` files to "files/conf.d/" in your working directory. +Those files will be mounted as configMap to the containers adding/overwriting the default configuration using the `include_dir` directive that allows settings to be loaded from files other than the default `postgresql.conf`. + +Alternatively, you can also set an external ConfigMap with all the extra configuration files. This is done by setting the `extendedConfConfigMap` parameter. Note that this will override the previous option. + +### Initialize a fresh instance + +The [Bitnami PostgreSQL](https://github.com/bitnami/bitnami-docker-postgresql) image allows you to use your custom scripts to initialize a fresh instance. In order to execute the scripts, they must be located inside the chart folder `files/docker-entrypoint-initdb.d` so they can be consumed as a ConfigMap. + +Alternatively, you can specify custom scripts using the `initdbScripts` parameter as dict. + +In addition to these options, you can also set an external ConfigMap with all the initialization scripts. This is done by setting the `initdbScriptsConfigMap` parameter. Note that this will override the two previous options. If your initialization scripts contain sensitive information such as credentials or passwords, you can use the `initdbScriptsSecret` parameter. + +The allowed extensions are `.sh`, `.sql` and `.sql.gz`. + +### Securing traffic using TLS + +TLS support can be enabled in the chart by specifying the `tls.` parameters while creating a release. The following parameters should be configured to properly enable the TLS support in the chart: + +- `tls.enabled`: Enable TLS support. Defaults to `false` +- `tls.certificatesSecret`: Name of an existing secret that contains the certificates. No defaults. +- `tls.certFilename`: Certificate filename. No defaults. +- `tls.certKeyFilename`: Certificate key filename. No defaults. + +For example: + +* First, create the secret with the cetificates files: + + ```console + kubectl create secret generic certificates-tls-secret --from-file=./cert.crt --from-file=./cert.key --from-file=./ca.crt + ``` + +* Then, use the following parameters: + + ```console + volumePermissions.enabled=true + tls.enabled=true + tls.certificatesSecret="certificates-tls-secret" + tls.certFilename="cert.crt" + tls.certKeyFilename="cert.key" + ``` + + > Note TLS and VolumePermissions: PostgreSQL requires certain permissions on sensitive files (such as certificate keys) to start up. Due to an on-going [issue](https://github.com/kubernetes/kubernetes/issues/57923) regarding kubernetes permissions and the use of `securityContext.runAsUser`, you must enable `volumePermissions` to ensure everything works as expected. + +### Sidecars + +If you need additional containers to run within the same pod as PostgreSQL (e.g. an additional metrics or logging exporter), you can do so via the `sidecars` config parameter. Simply define your container according to the Kubernetes container spec. + +```yaml +# For the PostgreSQL master +master: + sidecars: + - name: your-image-name + image: your-image + imagePullPolicy: Always + ports: + - name: portname + containerPort: 1234 +# For the PostgreSQL replicas +slave: + sidecars: + - name: your-image-name + image: your-image + imagePullPolicy: Always + ports: + - name: portname + containerPort: 1234 +``` + +### Metrics + +The chart optionally can start a metrics exporter for [prometheus](https://prometheus.io). The metrics endpoint (port 9187) is not exposed and it is expected that the metrics are collected from inside the k8s cluster using something similar as the described in the [example Prometheus scrape configuration](https://github.com/prometheus/prometheus/blob/master/documentation/examples/prometheus-kubernetes.yml). + +The exporter allows to create custom metrics from additional SQL queries. See the Chart's `values.yaml` for an example and consult the [exporters documentation](https://github.com/wrouesnel/postgres_exporter#adding-new-metrics-via-a-config-file) for more details. + +### Use of global variables + +In more complex scenarios, we may have the following tree of dependencies + +``` + +--------------+ + | | + +------------+ Chart 1 +-----------+ + | | | | + | --------+------+ | + | | | + | | | + | | | + | | | + v v v ++-------+------+ +--------+------+ +--------+------+ +| | | | | | +| PostgreSQL | | Sub-chart 1 | | Sub-chart 2 | +| | | | | | ++--------------+ +---------------+ +---------------+ +``` + +The three charts below depend on the parent chart Chart 1. However, subcharts 1 and 2 may need to connect to PostgreSQL as well. In order to do so, subcharts 1 and 2 need to know the PostgreSQL credentials, so one option for deploying could be deploy Chart 1 with the following parameters: + +``` +postgresql.postgresqlPassword=testtest +subchart1.postgresql.postgresqlPassword=testtest +subchart2.postgresql.postgresqlPassword=testtest +postgresql.postgresqlDatabase=db1 +subchart1.postgresql.postgresqlDatabase=db1 +subchart2.postgresql.postgresqlDatabase=db1 +``` + +If the number of dependent sub-charts increases, installing the chart with parameters can become increasingly difficult. An alternative would be to set the credentials using global variables as follows: + +``` +global.postgresql.postgresqlPassword=testtest +global.postgresql.postgresqlDatabase=db1 +``` + +This way, the credentials will be available in all of the subcharts. + +## Persistence + +The [Bitnami PostgreSQL](https://github.com/bitnami/bitnami-docker-postgresql) image stores the PostgreSQL data and configurations at the `/bitnami/postgresql` path of the container. + +Persistent Volume Claims are used to keep the data across deployments. This is known to work in GCE, AWS, and minikube. +See the [Parameters](#parameters) section to configure the PVC or to disable persistence. + +If you already have data in it, you will fail to sync to standby nodes for all commits, details can refer to [code](https://github.com/bitnami/bitnami-docker-postgresql/blob/8725fe1d7d30ebe8d9a16e9175d05f7ad9260c93/9.6/debian-9/rootfs/libpostgresql.sh#L518-L556). If you need to use those data, please covert them to sql and import after `helm install` finished. + +## NetworkPolicy + +To enable network policy for PostgreSQL, install [a networking plugin that implements the Kubernetes NetworkPolicy spec](https://kubernetes.io/docs/tasks/administer-cluster/declare-network-policy#before-you-begin), and set `networkPolicy.enabled` to `true`. + +For Kubernetes v1.5 & v1.6, you must also turn on NetworkPolicy by setting the DefaultDeny namespace annotation. Note: this will enforce policy for _all_ pods in the namespace: + +```console +$ kubectl annotate namespace default "net.beta.kubernetes.io/network-policy={\"ingress\":{\"isolation\":\"DefaultDeny\"}}" +``` + +With NetworkPolicy enabled, traffic will be limited to just port 5432. + +For more precise policy, set `networkPolicy.allowExternal=false`. This will only allow pods with the generated client label to connect to PostgreSQL. +This label will be displayed in the output of a successful install. + +## Differences between Bitnami PostgreSQL image and [Docker Official](https://hub.docker.com/_/postgres) image + +- The Docker Official PostgreSQL image does not support replication. If you pass any replication environment variable, this would be ignored. The only environment variables supported by the Docker Official image are POSTGRES_USER, POSTGRES_DB, POSTGRES_PASSWORD, POSTGRES_INITDB_ARGS, POSTGRES_INITDB_WALDIR and PGDATA. All the remaining environment variables are specific to the Bitnami PostgreSQL image. +- The Bitnami PostgreSQL image is non-root by default. This requires that you run the pod with `securityContext` and updates the permissions of the volume with an `initContainer`. A key benefit of this configuration is that the pod follows security best practices and is prepared to run on Kubernetes distributions with hard security constraints like OpenShift. +- For OpenShift, one may either define the runAsUser and fsGroup accordingly, or try this more dynamic option: volumePermissions.securityContext.runAsUser="auto",securityContext.enabled=false,shmVolume.chmod.enabled=false + +### Deploy chart using Docker Official PostgreSQL Image + +From chart version 4.0.0, it is possible to use this chart with the Docker Official PostgreSQL image. +Besides specifying the new Docker repository and tag, it is important to modify the PostgreSQL data directory and volume mount point. Basically, the PostgreSQL data dir cannot be the mount point directly, it has to be a subdirectory. + +``` +image.repository=postgres +image.tag=10.6 +postgresqlDataDir=/data/pgdata +persistence.mountPath=/data/ +``` + +## Upgrade + +It's necessary to specify the existing passwords while performing an upgrade to ensure the secrets are not updated with invalid randomly generated passwords. Remember to specify the existing values of the `postgresqlPassword` and `replication.password` parameters when upgrading the chart: + +```bash +$ helm upgrade my-release stable/postgresql \ + --set postgresqlPassword=[POSTGRESQL_PASSWORD] \ + --set replication.password=[REPLICATION_PASSWORD] +``` + +> Note: you need to substitute the placeholders _[POSTGRESQL_PASSWORD]_, and _[REPLICATION_PASSWORD]_ with the values obtained from instructions in the installation notes. + +## 8.0.0 + +Prefixes the port names with their protocols to comply with Istio conventions. + +If you depend on the port names in your setup, make sure to update them to reflect this change. + +## 7.1.0 + +Adds support for LDAP configuration. + +## 7.0.0 + +Helm performs a lookup for the object based on its group (apps), version (v1), and kind (Deployment). Also known as its GroupVersionKind, or GVK. Changing the GVK is considered a compatibility breaker from Kubernetes' point of view, so you cannot "upgrade" those objects to the new GVK in-place. Earlier versions of Helm 3 did not perform the lookup correctly which has since been fixed to match the spec. + +In https://github.com/helm/charts/pull/17281 the `apiVersion` of the statefulset resources was updated to `apps/v1` in tune with the api's deprecated, resulting in compatibility breakage. + +This major version bump signifies this change. + +## 6.5.7 + +In this version, the chart will use PostgreSQL with the Postgis extension included. The version used with Postgresql version 10, 11 and 12 is Postgis 2.5. It has been compiled with the following dependencies: + +- protobuf +- protobuf-c +- json-c +- geos +- proj + +## 5.0.0 + +In this version, the **chart is using PostgreSQL 11 instead of PostgreSQL 10**. You can find the main difference and notable changes in the following links: [https://www.postgresql.org/about/news/1894/](https://www.postgresql.org/about/news/1894/) and [https://www.postgresql.org/about/featurematrix/](https://www.postgresql.org/about/featurematrix/). + +For major releases of PostgreSQL, the internal data storage format is subject to change, thus complicating upgrades, you can see some errors like the following one in the logs: + +```console +Welcome to the Bitnami postgresql container +Subscribe to project updates by watching https://github.com/bitnami/bitnami-docker-postgresql +Submit issues and feature requests at https://github.com/bitnami/bitnami-docker-postgresql/issues +Send us your feedback at containers@bitnami.com + +INFO ==> ** Starting PostgreSQL setup ** +NFO ==> Validating settings in POSTGRESQL_* env vars.. +INFO ==> Initializing PostgreSQL database... +INFO ==> postgresql.conf file not detected. Generating it... +INFO ==> pg_hba.conf file not detected. Generating it... +INFO ==> Deploying PostgreSQL with persisted data... +INFO ==> Configuring replication parameters +INFO ==> Loading custom scripts... +INFO ==> Enabling remote connections +INFO ==> Stopping PostgreSQL... +INFO ==> ** PostgreSQL setup finished! ** + +INFO ==> ** Starting PostgreSQL ** + [1] FATAL: database files are incompatible with server + [1] DETAIL: The data directory was initialized by PostgreSQL version 10, which is not compatible with this version 11.3. +``` + +In this case, you should migrate the data from the old chart to the new one following an approach similar to that described in [this section](https://www.postgresql.org/docs/current/upgrading.html#UPGRADING-VIA-PGDUMPALL) from the official documentation. Basically, create a database dump in the old chart, move and restore it in the new one. + +### 4.0.0 + +This chart will use by default the Bitnami PostgreSQL container starting from version `10.7.0-r68`. This version moves the initialization logic from node.js to bash. This new version of the chart requires setting the `POSTGRES_PASSWORD` in the slaves as well, in order to properly configure the `pg_hba.conf` file. Users from previous versions of the chart are advised to upgrade immediately. + +IMPORTANT: If you do not want to upgrade the chart version then make sure you use the `10.7.0-r68` version of the container. Otherwise, you will get this error + +``` +The POSTGRESQL_PASSWORD environment variable is empty or not set. Set the environment variable ALLOW_EMPTY_PASSWORD=yes to allow the container to be started with blank passwords. This is recommended only for development +``` + +### 3.0.0 + +This releases make it possible to specify different nodeSelector, affinity and tolerations for master and slave pods. +It also fixes an issue with `postgresql.master.fullname` helper template not obeying fullnameOverride. + +#### Breaking changes + +- `affinty` has been renamed to `master.affinity` and `slave.affinity`. +- `tolerations` has been renamed to `master.tolerations` and `slave.tolerations`. +- `nodeSelector` has been renamed to `master.nodeSelector` and `slave.nodeSelector`. + +### 2.0.0 + +In order to upgrade from the `0.X.X` branch to `1.X.X`, you should follow the below steps: + + - Obtain the service name (`SERVICE_NAME`) and password (`OLD_PASSWORD`) of the existing postgresql chart. You can find the instructions to obtain the password in the NOTES.txt, the service name can be obtained by running + +```console +$ kubectl get svc +``` + +- Install (not upgrade) the new version + +```console +$ helm repo update +$ helm install my-release bitnami/postgresql +``` + +- Connect to the new pod (you can obtain the name by running `kubectl get pods`): + +```console +$ kubectl exec -it NAME bash +``` + +- Once logged in, create a dump file from the previous database using `pg_dump`, for that we should connect to the previous postgresql chart: + +```console +$ pg_dump -h SERVICE_NAME -U postgres DATABASE_NAME > /tmp/backup.sql +``` + +After run above command you should be prompted for a password, this password is the previous chart password (`OLD_PASSWORD`). +This operation could take some time depending on the database size. + +- Once you have the backup file, you can restore it with a command like the one below: + +```console +$ psql -U postgres DATABASE_NAME < /tmp/backup.sql +``` + +In this case, you are accessing to the local postgresql, so the password should be the new one (you can find it in NOTES.txt). + +If you want to restore the database and the database schema does not exist, it is necessary to first follow the steps described below. + +```console +$ psql -U postgres +postgres=# drop database DATABASE_NAME; +postgres=# create database DATABASE_NAME; +postgres=# create user USER_NAME; +postgres=# alter role USER_NAME with password 'BITNAMI_USER_PASSWORD'; +postgres=# grant all privileges on database DATABASE_NAME to USER_NAME; +postgres=# alter database DATABASE_NAME owner to USER_NAME; +``` diff --git a/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/charts/common/.helmignore b/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/charts/common/.helmignore new file mode 100644 index 0000000..50af031 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/charts/common/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/charts/common/Chart.yaml b/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/charts/common/Chart.yaml new file mode 100644 index 0000000..b4d8828 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/charts/common/Chart.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +appVersion: 0.3.1 +description: A Library Helm Chart for grouping common logic between bitnami charts. + This chart is not deployable by itself. +home: http://www.bitnami.com/ +icon: https://bitnami.com/downloads/logos/bitnami-mark.png +keywords: +- common +- helper +- template +- function +- bitnami +maintainers: +- email: containers@bitnami.com + name: Bitnami +name: common +sources: +- https://github.com/bitnami/charts +version: 0.3.1 diff --git a/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/charts/common/README.md b/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/charts/common/README.md new file mode 100644 index 0000000..ab50967 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/charts/common/README.md @@ -0,0 +1,228 @@ +# Bitnami Common Library Chart + +A [Helm Library Chart](https://helm.sh/docs/topics/library_charts/#helm) for grouping common logic between bitnami charts. + +## TL;DR; + +```yaml +dependencies: + - name: common + version: 0.1.0 + repository: https://charts.bitnami.com/bitnami +``` + +```bash +$ helm dependency update +``` + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "common.names.fullname" . }} +data: + myvalue: "Hello World" +``` + +## Introduction + +This chart provides a common template helpers which can be used to develop new charts using [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This Helm chart has been tested on top of [Bitnami Kubernetes Production Runtime](https://kubeprod.io/) (BKPR). Deploy BKPR to get automated TLS certificates, logging and monitoring for your applications. + +## Prerequisites + +- Kubernetes 1.12+ +- Helm 2.12+ or Helm 3.0-beta3+ + +## Parameters + +The following table lists the helpers available in the library which are scoped in different sections. + +**Names** + +| Helper identifier | Description | Expected Input | +|---------------------------------------------|------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.names.name` | Expand the name of the chart or use `.Values.nameOverride` | `.` Chart context | +| `common.names.fullname` | Create a default fully qualified app name. | `.` Chart context | +| `common.names.chart` | Chart name plus version | `.` Chart context | + +**Images** + +| Helper identifier | Description | Expected Input | +|---------------------------------------------|------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.images.image` | Return the proper and full image name | `dict "imageRoot" .Values.path.to.the.image "global" $`, see [ImageRoot](#imageroot) for the structure. | +| `common.images.pullSecrets` | Return the proper Docker Image Registry Secret Names | `dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" $` | + +**Labels** + +| Helper identifier | Description | Expected Input | +|---------------------------------------------|------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.labels.standard` | Return Kubernetes standard labels | `.` Chart context | +| `common.labels.matchLabels` | Return the proper Docker Image Registry Secret Names | `.` Chart context | + +**Storage** + +| Helper identifier | Description | Expected Input | +|---------------------------------------------|------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.storage.class` | Return the proper Storage Class | `dict "persistence" .Values.path.to.the.persistence "global" $`, see [Persistence](#persistence) for the structure. | + +**TplValues** + +| Helper identifier | Description | Expected Input | +|---------------------------------------------|------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.tplvalues.render` | Renders a value that contains template | `dict "value" .Values.path.to.the.Value "context" $`, value is the value should rendered as template, context frecuently is the chart context `$` or `.` | + +**Capabilities** + +| Helper identifier | Description | Expected Input | +|---------------------------------------------|------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.capabilities.deployment.apiVersion` | Return the appropriate apiVersion for deployment. | `.` Chart context | +| `common.capabilities.ingress.apiVersion` | Return the appropriate apiVersion for ingress. | `.` Chart context | + +**Warnings** + +| Helper identifier | Description | Expected Input | +|---------------------------------------------|------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.warnings.rollingTag` | Warning about using rolling tag. | `ImageRoot` see [ImageRoot](#imageroot) for the structure. | + +**Secrets** + +| Helper identifier | Description | Expected Input | +|---------------------------------------------|------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.secrets.name` | Generate the name of the secret. | `dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $` see [ExistingSecret](#existingsecret) for the structure. | +| `common.secrets.key` | Generate secret key. | `dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName"` see [ExistingSecret](#existingsecret) for the structure. | + +## Special input schemas + +### ImageRoot + +```yaml +registry: + type: string + description: Docker registry where the image is located + example: docker.io + +repository: + type: string + description: Repository and image name + example: bitnami/nginx + +tag: + type: string + description: image tag + example: 1.16.1-debian-10-r63 + +pullPolicy: + type: string + description: Specify a imagePullPolicy. Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + +pullSecrets: + type: array + items: + type: string + description: Optionally specify an array of imagePullSecrets. + +debug: + type: boolean + description: Set to true if you would like to see extra information on logs + example: false + +## An instance would be: +# registry: docker.io +# repository: bitnami/nginx +# tag: 1.16.1-debian-10-r63 +# pullPolicy: IfNotPresent +# debug: false +``` + +### Persistence + +```yaml +enabled: + type: boolean + description: Whether enable persistence. + example: true + +storageClass: + type: string + description: Ghost data Persistent Volume Storage Class, If set to "-", storageClassName: "" which disables dynamic provisioning. + example: "-" + +accessMode: + type: string + description: Access mode for the Persistent Volume Storage. + example: ReadWriteOnce + +size: + type: string + description: Size the Persistent Volume Storage. + example: 8Gi + +path: + type: string + description: Path to be persisted. + example: /bitnami + +## An instance would be: +# enabled: true +# storageClass: "-" +# accessMode: ReadWriteOnce +# size: 8Gi +# path: /bitnami +``` + +### ExistingSecret +```yaml +name: + type: string + description: Name of the existing secret. + example: mySecret +keyMapping: + description: Mapping between the expected key name and the name of the key in the existing secret. + type: object + +## An instance would be: +# name: mySecret +# keyMapping: +# password: myPasswordKey +``` + +**Example of use** + +When we store sensitive data for a deployment in a secret, some times we want to give to users the possiblity of using theirs existing secrets. + +```yaml +# templates/secret.yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }} + labels: + app: {{ include "common.names.fullname" . }} +type: Opaque +data: + password: {{ .Values.password | b64enc | quote }} + +# templates/dpl.yaml +--- +... + env: + - name: PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "common.secrets.name" (dict "existingSecret" .Values.existingSecret "context" $) }} + key: {{ include "common.secrets.key" (dict "existingSecret" .Values.existingSecret "key" "password") }} +... + +# values.yaml +--- +name: mySecret +keyMapping: + password: myPasswordKey +``` + +## Notable changes + +N/A diff --git a/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/charts/common/templates/_capabilities.tpl b/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/charts/common/templates/_capabilities.tpl new file mode 100644 index 0000000..c0ea2c7 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/charts/common/templates/_capabilities.tpl @@ -0,0 +1,22 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the appropriate apiVersion for deployment. +*/}} +{{- define "common.capabilities.deployment.apiVersion" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for ingress. +*/}} +{{- define "common.capabilities.ingress.apiVersion" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/charts/common/templates/_images.tpl b/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/charts/common/templates/_images.tpl new file mode 100644 index 0000000..ee6673a --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/charts/common/templates/_images.tpl @@ -0,0 +1,44 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper image name +{{ include "common.images.image" ( dict "imageRoot" .Values.path.to.the.image "global" $) }} +*/}} +{{- define "common.images.image" -}} +{{- $registryName := .imageRoot.registry -}} +{{- $repositoryName := .imageRoot.repository -}} +{{- $tag := .imageRoot.tag | toString -}} +{{- if .global }} + {{- if .global.imageRegistry }} + {{- $registryName = .global.imageRegistry -}} + {{- end -}} +{{- end -}} +{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +{{ include "common.images.pullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" $) }} +*/}} +{{- define "common.images.pullSecrets" -}} +{{- if .global }} +{{- if .global.imagePullSecrets }} +imagePullSecrets: + {{- range .global.imagePullSecrets }} + - name: {{ . }} + {{- end }} +{{- end }} +{{- else }} +{{- $pullSecrets := list }} +{{- range .images }} + {{- if .pullSecrets }} + {{- $pullSecrets = append $pullSecrets .pullSecrets }} + {{- end }} +{{- end }} +{{- if $pullSecrets }} +imagePullSecrets: + {{- range $pullSecrets }} + - name: {{ . }} + {{- end }} +{{- end }} +{{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/charts/common/templates/_labels.tpl b/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/charts/common/templates/_labels.tpl new file mode 100644 index 0000000..252066c --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/charts/common/templates/_labels.tpl @@ -0,0 +1,18 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Kubernetes standard labels +*/}} +{{- define "common.labels.standard" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +helm.sh/chart: {{ include "common.names.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{/* +Labels to use on deploy.spec.selector.matchLabels and svc.spec.selector +*/}} +{{- define "common.labels.matchLabels" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/charts/common/templates/_names.tpl b/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/charts/common/templates/_names.tpl new file mode 100644 index 0000000..adf2a74 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/charts/common/templates/_names.tpl @@ -0,0 +1,32 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "common.names.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "common.names.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "common.names.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/charts/common/templates/_secrets.tpl b/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/charts/common/templates/_secrets.tpl new file mode 100644 index 0000000..d6165a2 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/charts/common/templates/_secrets.tpl @@ -0,0 +1,49 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Generate secret name. + +Usage: +{{ include "common.secrets.name" (dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $) }} + +Params: + - existingSecret - ExistingSecret - Optional. The path to the existing secrets in the values.yaml given by the user + to be used istead of the default one. +info: https://github.com/bitnami/charts/tree/master/bitnami/common#existingsecret + - defaultNameSuffix - String - Optional. It is used only if we have several secrets in the same deployment. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.secrets.name" -}} +{{- $name := (include "common.names.fullname" .context) -}} + +{{- if .defaultNameSuffix -}} +{{- $name = cat $name .defaultNameSuffix -}} +{{- end -}} + +{{- with .existingSecret -}} +{{- $name = .name -}} +{{- end -}} + +{{- printf "%s" $name -}} +{{- end -}} + +{{/* +Generate secret key. + +Usage: +{{ include "common.secrets.key" (dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName") }} + +Params: + - existingSecret - ExistingSecret - Optional. The path to the existing secrets in the values.yaml given by the user + to be used istead of the default one. +info: https://github.com/bitnami/charts/tree/master/bitnami/common#existingsecret + - key - String - Required. Name of the key in the secret. +*/}} +{{- define "common.secrets.key" -}} +{{- $key := .key -}} + +{{- if .existingSecret -}} + {{- if .existingSecret.keyMapping -}} + {{- $key = index .existingSecret.keyMapping $.key -}} + {{- end -}} +{{- end -}} + +{{- printf "%s" $key -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/charts/common/templates/_storage.tpl b/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/charts/common/templates/_storage.tpl new file mode 100644 index 0000000..60e2a84 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/charts/common/templates/_storage.tpl @@ -0,0 +1,23 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper Storage Class +{{ include "common.storage.class" ( dict "persistence" .Values.path.to.the.persistence "global" $) }} +*/}} +{{- define "common.storage.class" -}} + +{{- $storageClass := .persistence.storageClass -}} +{{- if .global -}} + {{- if .global.storageClass -}} + {{- $storageClass = .global.storageClass -}} + {{- end -}} +{{- end -}} + +{{- if $storageClass -}} + {{- if (eq "-" $storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" $storageClass -}} + {{- end -}} +{{- end -}} + +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/charts/common/templates/_tplvalues.tpl b/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/charts/common/templates/_tplvalues.tpl new file mode 100644 index 0000000..2db1668 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/charts/common/templates/_tplvalues.tpl @@ -0,0 +1,13 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Renders a value that contains template. +Usage: +{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $) }} +*/}} +{{- define "common.tplvalues.render" -}} + {{- if typeIs "string" .value }} + {{- tpl .value .context }} + {{- else }} + {{- tpl (.value | toYaml) .context }} + {{- end }} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/charts/common/templates/_warnings.tpl b/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/charts/common/templates/_warnings.tpl new file mode 100644 index 0000000..ae10fa4 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/charts/common/templates/_warnings.tpl @@ -0,0 +1,14 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Warning about using rolling tag. +Usage: +{{ include "common.warnings.rollingTag" .Values.path.to.the.imageRoot }} +*/}} +{{- define "common.warnings.rollingTag" -}} + +{{- if and (contains "bitnami/" .repository) (not (.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .repository }}:{{ .tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} + +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/charts/common/values.yaml b/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/charts/common/values.yaml new file mode 100644 index 0000000..9ecdc93 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/charts/common/values.yaml @@ -0,0 +1,3 @@ +## bitnami/common +## It is required by CI/CD tools and processes. +exampleValue: common-chart diff --git a/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/ci/commonAnnotations.yaml b/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/ci/commonAnnotations.yaml new file mode 100644 index 0000000..a936299 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/ci/commonAnnotations.yaml @@ -0,0 +1,4 @@ +commonAnnotations: + helm.sh/hook: "pre-install, pre-upgrade" + helm.sh/hook-weight: "-1" + diff --git a/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/ci/default-values.yaml b/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/ci/default-values.yaml new file mode 100644 index 0000000..fc2ba60 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/ci/default-values.yaml @@ -0,0 +1 @@ +# Leave this file empty to ensure that CI runs builds against the default configuration in values.yaml. diff --git a/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/ci/shmvolume-disabled-values.yaml b/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/ci/shmvolume-disabled-values.yaml new file mode 100644 index 0000000..347d3b4 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/ci/shmvolume-disabled-values.yaml @@ -0,0 +1,2 @@ +shmVolume: + enabled: false diff --git a/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/files/README.md b/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/files/README.md new file mode 100644 index 0000000..1813a2f --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/files/README.md @@ -0,0 +1 @@ +Copy here your postgresql.conf and/or pg_hba.conf files to use it as a config map. diff --git a/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/files/conf.d/README.md b/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/files/conf.d/README.md new file mode 100644 index 0000000..184c187 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/files/conf.d/README.md @@ -0,0 +1,4 @@ +If you don't want to provide the whole configuration file and only specify certain parameters, you can copy here your extended `.conf` files. +These files will be injected as a config maps and add/overwrite the default configuration using the `include_dir` directive that allows settings to be loaded from files other than the default `postgresql.conf`. + +More info in the [bitnami-docker-postgresql README](https://github.com/bitnami/bitnami-docker-postgresql#configuration-file). diff --git a/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/files/docker-entrypoint-initdb.d/README.md b/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/files/docker-entrypoint-initdb.d/README.md new file mode 100644 index 0000000..cba3809 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/files/docker-entrypoint-initdb.d/README.md @@ -0,0 +1,3 @@ +You can copy here your custom `.sh`, `.sql` or `.sql.gz` file so they are executed during the first boot of the image. + +More info in the [bitnami-docker-postgresql](https://github.com/bitnami/bitnami-docker-postgresql#initializing-a-new-instance) repository. \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/requirements.lock b/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/requirements.lock new file mode 100644 index 0000000..1069b62 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/requirements.lock @@ -0,0 +1,6 @@ +dependencies: +- name: common + repository: https://charts.bitnami.com/bitnami + version: 0.3.1 +digest: sha256:740783295d301fdd168fafdbaa760de27ab54b0ff36b513589a5a2515072b885 +generated: "2020-07-15T00:56:02.067804177Z" diff --git a/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/requirements.yaml b/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/requirements.yaml new file mode 100644 index 0000000..868eee6 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/requirements.yaml @@ -0,0 +1,4 @@ +dependencies: + - name: common + version: 0.x.x + repository: https://charts.bitnami.com/bitnami \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/NOTES.txt b/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/NOTES.txt new file mode 100644 index 0000000..6dec604 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/NOTES.txt @@ -0,0 +1,54 @@ +** Please be patient while the chart is being deployed ** + +PostgreSQL can be accessed via port {{ template "postgresql.port" . }} on the following DNS name from within your cluster: + + {{ template "postgresql.fullname" . }}.imxc.svc.cluster.local - Read/Write connection +{{- if .Values.replication.enabled }} + {{ template "postgresql.fullname" . }}-read.imxc.svc.cluster.local - Read only connection +{{- end }} + +{{- if and .Values.postgresqlPostgresPassword (not (eq .Values.postgresqlUsername "postgres")) }} + +To get the password for "postgres" run: + + export POSTGRES_ADMIN_PASSWORD=$(kubectl get secret --namespace imxc {{ template "postgresql.secretName" . }} -o jsonpath="{.data.postgresql-postgres-password}" | base64 --decode) +{{- end }} + +To get the password for "{{ template "postgresql.username" . }}" run: + + export POSTGRES_PASSWORD=$(kubectl get secret --namespace imxc {{ template "postgresql.secretName" . }} -o jsonpath="{.data.postgresql-password}" | base64 --decode) + +To connect to your database run the following command: + + kubectl run {{ template "postgresql.fullname" . }}-client --rm --tty -i --restart='Never' --namespace imxc --image {{ template "postgresql.image" . }} --env="PGPASSWORD=$POSTGRES_PASSWORD" {{- if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }} + --labels="{{ template "postgresql.fullname" . }}-client=true" {{- end }} --command -- psql --host {{ template "postgresql.fullname" . }} -U {{ .Values.postgresqlUsername }} -d {{- if .Values.postgresqlDatabase }} {{ .Values.postgresqlDatabase }}{{- else }} postgres{{- end }} -p {{ template "postgresql.port" . }} + +{{ if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }} +Note: Since NetworkPolicy is enabled, only pods with label {{ template "postgresql.fullname" . }}-client=true" will be able to connect to this PostgreSQL cluster. +{{- end }} + +To connect to your database from outside the cluster execute the following commands: + +{{- if contains "NodePort" .Values.service.type }} + + export NODE_IP=$(kubectl get nodes --namespace imxc -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT=$(kubectl get --namespace imxc -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "postgresql.fullname" . }}) + {{ if (include "postgresql.password" . ) }}PGPASSWORD="$POSTGRES_PASSWORD" {{ end }}psql --host $NODE_IP --port $NODE_PORT -U {{ .Values.postgresqlUsername }} -d {{- if .Values.postgresqlDatabase }} {{ .Values.postgresqlDatabase }}{{- else }} postgres{{- end }} + +{{- else if contains "LoadBalancer" .Values.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace imxc -w {{ template "postgresql.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace imxc {{ template "postgresql.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + {{ if (include "postgresql.password" . ) }}PGPASSWORD="$POSTGRES_PASSWORD" {{ end }}psql --host $SERVICE_IP --port {{ template "postgresql.port" . }} -U {{ .Values.postgresqlUsername }} -d {{- if .Values.postgresqlDatabase }} {{ .Values.postgresqlDatabase }}{{- else }} postgres{{- end }} + +{{- else if contains "ClusterIP" .Values.service.type }} + + kubectl port-forward --namespace imxc svc/{{ template "postgresql.fullname" . }} {{ template "postgresql.port" . }}:{{ template "postgresql.port" . }} & + {{ if (include "postgresql.password" . ) }}PGPASSWORD="$POSTGRES_PASSWORD" {{ end }}psql --host 127.0.0.1 -U {{ .Values.postgresqlUsername }} -d {{- if .Values.postgresqlDatabase }} {{ .Values.postgresqlDatabase }}{{- else }} postgres{{- end }} -p {{ template "postgresql.port" . }} + +{{- end }} + +{{- include "postgresql.validateValues" . -}} + diff --git a/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/_helpers.tpl b/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/_helpers.tpl new file mode 100644 index 0000000..a7008a1 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/_helpers.tpl @@ -0,0 +1,494 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "postgresql.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "postgresql.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "postgresql.master.fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- $fullname := default (printf "%s-%s" .Release.Name $name) .Values.fullnameOverride -}} +{{- if .Values.replication.enabled -}} +{{- printf "%s-%s" $fullname "master" | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s" $fullname | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "postgresql.networkPolicy.apiVersion" -}} +{{- if semverCompare ">=1.4-0, <1.7-0" .Capabilities.KubeVersion.GitVersion -}} +"extensions/v1beta1" +{{- else if semverCompare "^1.7-0" .Capabilities.KubeVersion.GitVersion -}} +"networking.k8s.io/v1" +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "postgresql.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Return the proper PostgreSQL image name +*/}} +{{- define "postgresql.image" -}} +{{- $registryName := .Values.image.registry -}} +{{- $repositoryName := .Values.image.repository -}} +{{- $tag := .Values.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL postgres user password +*/}} +{{- define "postgresql.postgres.password" -}} +{{- if .Values.global.postgresql.postgresqlPostgresPassword }} + {{- .Values.global.postgresql.postgresqlPostgresPassword -}} +{{- else if .Values.postgresqlPostgresPassword -}} + {{- .Values.postgresqlPostgresPassword -}} +{{- else -}} + {{- randAlphaNum 10 -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL password +*/}} +{{- define "postgresql.password" -}} +{{- if .Values.global.postgresql.postgresqlPassword }} + {{- .Values.global.postgresql.postgresqlPassword -}} +{{- else if .Values.postgresqlPassword -}} + {{- .Values.postgresqlPassword -}} +{{- else -}} + {{- randAlphaNum 10 -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL replication password +*/}} +{{- define "postgresql.replication.password" -}} +{{- if .Values.global.postgresql.replicationPassword }} + {{- .Values.global.postgresql.replicationPassword -}} +{{- else if .Values.replication.password -}} + {{- .Values.replication.password -}} +{{- else -}} + {{- randAlphaNum 10 -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL username +*/}} +{{- define "postgresql.username" -}} +{{- if .Values.global.postgresql.postgresqlUsername }} + {{- .Values.global.postgresql.postgresqlUsername -}} +{{- else -}} + {{- .Values.postgresqlUsername -}} +{{- end -}} +{{- end -}} + + +{{/* +Return PostgreSQL replication username +*/}} +{{- define "postgresql.replication.username" -}} +{{- if .Values.global.postgresql.replicationUser }} + {{- .Values.global.postgresql.replicationUser -}} +{{- else -}} + {{- .Values.replication.user -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL port +*/}} +{{- define "postgresql.port" -}} +{{- if .Values.global.postgresql.servicePort }} + {{- .Values.global.postgresql.servicePort -}} +{{- else -}} + {{- .Values.service.port -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL created database +*/}} +{{- define "postgresql.database" -}} +{{- if .Values.global.postgresql.postgresqlDatabase }} + {{- .Values.global.postgresql.postgresqlDatabase -}} +{{- else if .Values.postgresqlDatabase -}} + {{- .Values.postgresqlDatabase -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper image name to change the volume permissions +*/}} +{{- define "postgresql.volumePermissions.image" -}} +{{- $registryName := .Values.volumePermissions.image.registry -}} +{{- $repositoryName := .Values.volumePermissions.image.repository -}} +{{- $tag := .Values.volumePermissions.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper PostgreSQL metrics image name +*/}} +{{- define "postgresql.metrics.image" -}} +{{- $registryName := default "docker.io" .Values.metrics.image.registry -}} +{{- $repositoryName := .Values.metrics.image.repository -}} +{{- $tag := default "latest" .Values.metrics.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Get the password secret. +*/}} +{{- define "postgresql.secretName" -}} +{{- if .Values.global.postgresql.existingSecret }} + {{- printf "%s" (tpl .Values.global.postgresql.existingSecret $) -}} +{{- else if .Values.existingSecret -}} + {{- printf "%s" (tpl .Values.existingSecret $) -}} +{{- else -}} + {{- printf "%s" (include "postgresql.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a secret object should be created +*/}} +{{- define "postgresql.createSecret" -}} +{{- if .Values.global.postgresql.existingSecret }} +{{- else if .Values.existingSecret -}} +{{- else -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Get the configuration ConfigMap name. +*/}} +{{- define "postgresql.configurationCM" -}} +{{- if .Values.configurationConfigMap -}} +{{- printf "%s" (tpl .Values.configurationConfigMap $) -}} +{{- else -}} +{{- printf "%s-configuration" (include "postgresql.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Get the extended configuration ConfigMap name. +*/}} +{{- define "postgresql.extendedConfigurationCM" -}} +{{- if .Values.extendedConfConfigMap -}} +{{- printf "%s" (tpl .Values.extendedConfConfigMap $) -}} +{{- else -}} +{{- printf "%s-extended-configuration" (include "postgresql.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a configmap should be mounted with PostgreSQL configuration +*/}} +{{- define "postgresql.mountConfigurationCM" -}} +{{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Get the initialization scripts ConfigMap name. +*/}} +{{- define "postgresql.initdbScriptsCM" -}} +{{- if .Values.initdbScriptsConfigMap -}} +{{- printf "%s" (tpl .Values.initdbScriptsConfigMap $) -}} +{{- else -}} +{{- printf "%s-init-scripts" (include "postgresql.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Get the initialization scripts Secret name. +*/}} +{{- define "postgresql.initdbScriptsSecret" -}} +{{- printf "%s" (tpl .Values.initdbScriptsSecret $) -}} +{{- end -}} + +{{/* +Get the metrics ConfigMap name. +*/}} +{{- define "postgresql.metricsCM" -}} +{{- printf "%s-metrics" (include "postgresql.fullname" .) -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "postgresql.imagePullSecrets" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +Also, we can not use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} +{{- if .Values.global.imagePullSecrets }} +imagePullSecrets: +{{- range .Values.global.imagePullSecrets }} + - name: {{ . }} +{{- end }} +{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.volumePermissions.image.pullSecrets }} +imagePullSecrets: +{{- range .Values.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.metrics.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.volumePermissions.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- end -}} +{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.volumePermissions.image.pullSecrets }} +imagePullSecrets: +{{- range .Values.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.metrics.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.volumePermissions.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- end -}} +{{- end -}} + +{{/* +Get the readiness probe command +*/}} +{{- define "postgresql.readinessProbeCommand" -}} +- | +{{- if (include "postgresql.database" .) }} + exec pg_isready -U {{ include "postgresql.username" . | quote }} -d "dbname={{ include "postgresql.database" . }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}{{- end }}" -h 127.0.0.1 -p {{ template "postgresql.port" . }} +{{- else }} + exec pg_isready -U {{ include "postgresql.username" . | quote }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} -d "sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}"{{- end }} -h 127.0.0.1 -p {{ template "postgresql.port" . }} +{{- end }} +{{- if contains "bitnami/" .Values.image.repository }} + [ -f /opt/bitnami/postgresql/tmp/.initialized ] || [ -f /bitnami/postgresql/.initialized ] +{{- end -}} +{{- end -}} + +{{/* +Return the proper Storage Class +*/}} +{{- define "postgresql.storageClass" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +*/}} +{{- if .Values.global -}} + {{- if .Values.global.storageClass -}} + {{- if (eq "-" .Values.global.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.global.storageClass -}} + {{- end -}} + {{- else -}} + {{- if .Values.persistence.storageClass -}} + {{- if (eq "-" .Values.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.persistence.storageClass -}} + {{- end -}} + {{- end -}} + {{- end -}} +{{- else -}} + {{- if .Values.persistence.storageClass -}} + {{- if (eq "-" .Values.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.persistence.storageClass -}} + {{- end -}} + {{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Renders a value that contains template. +Usage: +{{ include "postgresql.tplValue" ( dict "value" .Values.path.to.the.Value "context" $) }} +*/}} +{{- define "postgresql.tplValue" -}} + {{- if typeIs "string" .value }} + {{- tpl .value .context }} + {{- else }} + {{- tpl (.value | toYaml) .context }} + {{- end }} +{{- end -}} + +{{/* +Return the appropriate apiVersion for statefulset. +*/}} +{{- define "postgresql.statefulset.apiVersion" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "apps/v1beta2" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Compile all warnings into a single message, and call fail. +*/}} +{{- define "postgresql.validateValues" -}} +{{- $messages := list -}} +{{- $messages := append $messages (include "postgresql.validateValues.ldapConfigurationMethod" .) -}} +{{- $messages := append $messages (include "postgresql.validateValues.psp" .) -}} +{{- $messages := append $messages (include "postgresql.validateValues.tls" .) -}} +{{- $messages := without $messages "" -}} +{{- $message := join "\n" $messages -}} + +{{- if $message -}} +{{- printf "\nVALUES VALIDATION:\n%s" $message | fail -}} +{{- end -}} +{{- end -}} + +{{/* +Validate values of Postgresql - If ldap.url is used then you don't need the other settings for ldap +*/}} +{{- define "postgresql.validateValues.ldapConfigurationMethod" -}} +{{- if and .Values.ldap.enabled (and (not (empty .Values.ldap.url)) (not (empty .Values.ldap.server))) }} +postgresql: ldap.url, ldap.server + You cannot set both `ldap.url` and `ldap.server` at the same time. + Please provide a unique way to configure LDAP. + More info at https://www.postgresql.org/docs/current/auth-ldap.html +{{- end -}} +{{- end -}} + +{{/* +Validate values of Postgresql - If PSP is enabled RBAC should be enabled too +*/}} +{{- define "postgresql.validateValues.psp" -}} +{{- if and .Values.psp.create (not .Values.rbac.create) }} +postgresql: psp.create, rbac.create + RBAC should be enabled if PSP is enabled in order for PSP to work. + More info at https://kubernetes.io/docs/concepts/policy/pod-security-policy/#authorizing-policies +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for podsecuritypolicy. +*/}} +{{- define "podsecuritypolicy.apiVersion" -}} +{{- if semverCompare "<1.10-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "policy/v1beta1" -}} +{{- end -}} +{{- end -}} + +{{/* +Validate values of Postgresql TLS - When TLS is enabled, so must be VolumePermissions +*/}} +{{- define "postgresql.validateValues.tls" -}} +{{- if and .Values.tls.enabled (not .Values.volumePermissions.enabled) }} +postgresql: tls.enabled, volumePermissions.enabled + When TLS is enabled you must enable volumePermissions as well to ensure certificates files have + the right permissions. +{{- end -}} +{{- end -}} + +{{/* +Return the path to the cert file. +*/}} +{{- define "postgresql.tlsCert" -}} +{{- required "Certificate filename is required when TLS in enabled" .Values.tls.certFilename | printf "/opt/bitnami/postgresql/certs/%s" -}} +{{- end -}} + +{{/* +Return the path to the cert key file. +*/}} +{{- define "postgresql.tlsCertKey" -}} +{{- required "Certificate Key filename is required when TLS in enabled" .Values.tls.certKeyFilename | printf "/opt/bitnami/postgresql/certs/%s" -}} +{{- end -}} + +{{/* +Return the path to the CA cert file. +*/}} +{{- define "postgresql.tlsCACert" -}} +{{- printf "/opt/bitnami/postgresql/certs/%s" .Values.tls.certCAFilename -}} +{{- end -}} + +{{/* +Return the path to the CRL file. +*/}} +{{- define "postgresql.tlsCRL" -}} +{{- if .Values.tls.crlFilename -}} +{{- printf "/opt/bitnami/postgresql/certs/%s" .Values.tls.crlFilename -}} +{{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/configmap.yaml b/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/configmap.yaml new file mode 100644 index 0000000..b29ef60 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/configmap.yaml @@ -0,0 +1,26 @@ +{{ if and (or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration) (not .Values.configurationConfigMap) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "postgresql.fullname" . }}-configuration + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: +{{- if (.Files.Glob "files/postgresql.conf") }} +{{ (.Files.Glob "files/postgresql.conf").AsConfig | indent 2 }} +{{- else if .Values.postgresqlConfiguration }} + postgresql.conf: | +{{- range $key, $value := default dict .Values.postgresqlConfiguration }} + {{ $key | snakecase }}={{ $value }} +{{- end }} +{{- end }} +{{- if (.Files.Glob "files/pg_hba.conf") }} +{{ (.Files.Glob "files/pg_hba.conf").AsConfig | indent 2 }} +{{- else if .Values.pgHbaConfiguration }} + pg_hba.conf: | +{{ .Values.pgHbaConfiguration | indent 4 }} +{{- end }} +{{ end }} diff --git a/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/extended-config-configmap.yaml b/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/extended-config-configmap.yaml new file mode 100644 index 0000000..f21a976 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/extended-config-configmap.yaml @@ -0,0 +1,21 @@ +{{- if and (or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf) (not .Values.extendedConfConfigMap)}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "postgresql.fullname" . }}-extended-configuration + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: +{{- with .Files.Glob "files/conf.d/*.conf" }} +{{ .AsConfig | indent 2 }} +{{- end }} +{{ with .Values.postgresqlExtendedConf }} + override.conf: | +{{- range $key, $value := . }} + {{ $key | snakecase }}={{ $value }} +{{- end }} +{{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/initialization-configmap.yaml b/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/initialization-configmap.yaml new file mode 100644 index 0000000..6637867 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/initialization-configmap.yaml @@ -0,0 +1,24 @@ +{{- if and (or (.Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql,sql.gz}") .Values.initdbScripts) (not .Values.initdbScriptsConfigMap) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "postgresql.fullname" . }}-init-scripts + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +{{- with .Files.Glob "files/docker-entrypoint-initdb.d/*.sql.gz" }} +binaryData: +{{- range $path, $bytes := . }} + {{ base $path }}: {{ $.Files.Get $path | b64enc | quote }} +{{- end }} +{{- end }} +data: +{{- with .Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql}" }} +{{ .AsConfig | indent 2 }} +{{- end }} +{{- with .Values.initdbScripts }} +{{ toYaml . | indent 2 }} +{{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/metrics-configmap.yaml b/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/metrics-configmap.yaml new file mode 100644 index 0000000..6b7a317 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/metrics-configmap.yaml @@ -0,0 +1,13 @@ +{{- if and .Values.metrics.enabled .Values.metrics.customMetrics }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "postgresql.metricsCM" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + custom-metrics.yaml: {{ toYaml .Values.metrics.customMetrics | quote }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/metrics-svc.yaml b/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/metrics-svc.yaml new file mode 100644 index 0000000..b993c99 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/metrics-svc.yaml @@ -0,0 +1,25 @@ +{{- if .Values.metrics.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "postgresql.fullname" . }}-metrics + labels: + {{- include "common.labels.standard" . | nindent 4 }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- toYaml .Values.metrics.service.annotations | nindent 4 }} +spec: + type: {{ .Values.metrics.service.type }} + {{- if and (eq .Values.metrics.service.type "LoadBalancer") .Values.metrics.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.metrics.service.loadBalancerIP }} + {{- end }} + ports: + - name: http-metrics + port: 9187 + targetPort: http-metrics + selector: + {{- include "common.labels.matchLabels" . | nindent 4 }} + role: master +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/networkpolicy.yaml b/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/networkpolicy.yaml new file mode 100644 index 0000000..2a7b372 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/networkpolicy.yaml @@ -0,0 +1,36 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ template "postgresql.networkPolicy.apiVersion" . }} +metadata: + name: {{ template "postgresql.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + podSelector: + matchLabels: + {{- include "common.labels.matchLabels" . | nindent 6 }} + ingress: + # Allow inbound connections + - ports: + - port: {{ template "postgresql.port" . }} + {{- if not .Values.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ template "postgresql.fullname" . }}-client: "true" + {{- if .Values.networkPolicy.explicitNamespacesSelector }} + namespaceSelector: +{{ toYaml .Values.networkPolicy.explicitNamespacesSelector | indent 12 }} + {{- end }} + - podSelector: + matchLabels: + {{- include "common.labels.matchLabels" . | nindent 14 }} + role: slave + {{- end }} + # Allow prometheus scrapes + - ports: + - port: 9187 +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/podsecuritypolicy.yaml b/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/podsecuritypolicy.yaml new file mode 100644 index 0000000..da0b3ab --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/podsecuritypolicy.yaml @@ -0,0 +1,37 @@ +{{- if .Values.psp.create }} +apiVersion: {{ include "podsecuritypolicy.apiVersion" . }} +kind: PodSecurityPolicy +metadata: + name: {{ template "postgresql.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + privileged: false + volumes: + - 'configMap' + - 'secret' + - 'persistentVolumeClaim' + - 'emptyDir' + - 'projected' + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: 'MustRunAsNonRoot' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + readOnlyRootFilesystem: false +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/prometheusrule.yaml b/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/prometheusrule.yaml new file mode 100644 index 0000000..b0c41b1 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/prometheusrule.yaml @@ -0,0 +1,23 @@ +{{- if and .Values.metrics.enabled .Values.metrics.prometheusRule.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ template "postgresql.fullname" . }} +{{- with .Values.metrics.prometheusRule.namespace }} + namespace: {{ . }} +{{- end }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- with .Values.metrics.prometheusRule.additionalLabels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: +{{- with .Values.metrics.prometheusRule.rules }} + groups: + - name: {{ template "postgresql.name" $ }} + rules: {{ tpl (toYaml .) $ | nindent 8 }} +{{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/pv.yaml b/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/pv.yaml new file mode 100644 index 0000000..ddd7d7c --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/pv.yaml @@ -0,0 +1,27 @@ +kind: PersistentVolume +apiVersion: v1 +metadata: + name: keycloak-saas +spec: + storageClassName: manual + capacity: + storage: 8Gi + accessModes: + - ReadWriteOnce + #- ReadWriteMany + hostPath: + #path: "/home/keycloak/keycloak" + path: /mnt/keycloak-postgresql + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/hostname + operator: In + values: + #- imxc-worker1 + - {{ .Values.node.affinity }} + claimRef: + name: data-keycloak-saas-postgresql-0 + #namespace: auth + diff --git a/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/role.yaml b/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/role.yaml new file mode 100644 index 0000000..6d3cf50 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/role.yaml @@ -0,0 +1,19 @@ +{{- if .Values.rbac.create }} +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ template "postgresql.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +rules: + {{- if .Values.psp.create }} + - apiGroups: ["extensions"] + resources: ["podsecuritypolicies"] + verbs: ["use"] + resourceNames: + - {{ template "postgresql.fullname" . }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/rolebinding.yaml b/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/rolebinding.yaml new file mode 100644 index 0000000..b7daa2a --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/rolebinding.yaml @@ -0,0 +1,19 @@ +{{- if .Values.rbac.create }} +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ template "postgresql.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +roleRef: + kind: Role + name: {{ template "postgresql.fullname" . }} + apiGroup: rbac.authorization.k8s.io +subjects: + - kind: ServiceAccount + name: {{ default (include "postgresql.fullname" . ) .Values.serviceAccount.name }} + namespace: imxc +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/secrets.yaml b/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/secrets.yaml new file mode 100644 index 0000000..c93dbe0 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/secrets.yaml @@ -0,0 +1,23 @@ +{{- if (include "postgresql.createSecret" .) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "postgresql.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + {{- if and .Values.postgresqlPostgresPassword (not (eq .Values.postgresqlUsername "postgres")) }} + postgresql-postgres-password: {{ include "postgresql.postgres.password" . | b64enc | quote }} + {{- end }} + postgresql-password: {{ include "postgresql.password" . | b64enc | quote }} + {{- if .Values.replication.enabled }} + postgresql-replication-password: {{ include "postgresql.replication.password" . | b64enc | quote }} + {{- end }} + {{- if (and .Values.ldap.enabled .Values.ldap.bind_password)}} + postgresql-ldap-password: {{ .Values.ldap.bind_password | b64enc | quote }} + {{- end }} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/serviceaccount.yaml b/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/serviceaccount.yaml new file mode 100644 index 0000000..17f7ff3 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/serviceaccount.yaml @@ -0,0 +1,11 @@ +{{- if and (.Values.serviceAccount.enabled) (not .Values.serviceAccount.name) }} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + {{- include "common.labels.standard" . | nindent 4 }} + name: {{ template "postgresql.fullname" . }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/servicemonitor.yaml b/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/servicemonitor.yaml new file mode 100644 index 0000000..3e643e1 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/servicemonitor.yaml @@ -0,0 +1,33 @@ +{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "postgresql.fullname" . }} + {{- if .Values.metrics.serviceMonitor.namespace }} + namespace: {{ .Values.metrics.serviceMonitor.namespace }} + {{- end }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.metrics.serviceMonitor.additionalLabels }} + {{- toYaml .Values.metrics.serviceMonitor.additionalLabels | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + +spec: + endpoints: + - port: http-metrics + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + namespaceSelector: + matchNames: + - imxc + selector: + matchLabels: + {{- include "common.labels.matchLabels" . | nindent 6 }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/statefulset-slaves.yaml b/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/statefulset-slaves.yaml new file mode 100644 index 0000000..a712a03 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/statefulset-slaves.yaml @@ -0,0 +1,340 @@ +{{- if .Values.replication.enabled }} +apiVersion: {{ template "postgresql.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: "{{ template "postgresql.fullname" . }}-slave" + labels: + {{- include "common.labels.standard" . | nindent 4 }} +{{- with .Values.slave.labels }} +{{ toYaml . | indent 4 }} +{{- end }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- with .Values.slave.annotations }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + serviceName: {{ template "postgresql.fullname" . }}-headless + replicas: {{ .Values.replication.slaveReplicas }} + selector: + matchLabels: + {{- include "common.labels.matchLabels" . | nindent 6 }} + role: slave + template: + metadata: + name: {{ template "postgresql.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 8 }} + role: slave +{{- with .Values.slave.podLabels }} +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.slave.podAnnotations }} + annotations: +{{ toYaml . | indent 8 }} +{{- end }} + spec: + {{- if .Values.schedulerName }} + schedulerName: "{{ .Values.schedulerName }}" + {{- end }} +{{- include "postgresql.imagePullSecrets" . | indent 6 }} + {{- if .Values.slave.nodeSelector }} + nodeSelector: +{{ toYaml .Values.slave.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.slave.affinity }} + affinity: +{{ toYaml .Values.slave.affinity | indent 8 }} + {{- end }} + {{- if .Values.slave.tolerations }} + tolerations: +{{ toYaml .Values.slave.tolerations | indent 8 }} + {{- end }} + {{- if .Values.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + {{- end }} + {{- if .Values.serviceAccount.enabled }} + serviceAccountName: {{ default (include "postgresql.fullname" . ) .Values.serviceAccount.name}} + {{- end }} + {{- if or .Values.slave.extraInitContainers (and .Values.volumePermissions.enabled (or .Values.persistence.enabled (and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled))) }} + initContainers: + {{- if and .Values.volumePermissions.enabled (or .Values.persistence.enabled (and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled) .Values.tls.enabled) }} + - name: init-chmod-data + image: {{ template "postgresql.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + command: + - /bin/sh + - -cx + - | + {{- if .Values.persistence.enabled }} + mkdir -p {{ .Values.persistence.mountPath }}/data {{- if (include "postgresql.mountConfigurationCM" .) }} {{ .Values.persistence.mountPath }}/conf {{- end }} + chmod 700 {{ .Values.persistence.mountPath }}/data {{- if (include "postgresql.mountConfigurationCM" .) }} {{ .Values.persistence.mountPath }}/conf {{- end }} + find {{ .Values.persistence.mountPath }} -mindepth 1 -maxdepth 1 {{- if not (include "postgresql.mountConfigurationCM" .) }} -not -name "conf" {{- end }} -not -name ".snapshot" -not -name "lost+found" | \ + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + xargs chown -R `id -u`:`id -G | cut -d " " -f2` + {{- else }} + xargs chown -R {{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} + {{- end }} + {{- end }} + {{- if and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled }} + chmod -R 777 /dev/shm + {{- end }} + {{- if .Values.tls.enabled }} + cp /tmp/certs/* /opt/bitnami/postgresql/certs/ + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + chown -R `id -u`:`id -G | cut -d " " -f2` /opt/bitnami/postgresql/certs/ + {{- else }} + chown -R {{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} /opt/bitnami/postgresql/certs/ + {{- end }} + chmod 600 {{ template "postgresql.tlsCertKey" . }} + {{- end }} + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + securityContext: + {{- else }} + securityContext: + runAsUser: {{ .Values.volumePermissions.securityContext.runAsUser }} + {{- end }} + volumeMounts: + {{ if .Values.persistence.enabled }} + - name: data + mountPath: {{ .Values.persistence.mountPath }} + subPath: {{ .Values.persistence.subPath }} + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + mountPath: /dev/shm + {{- end }} + {{- if .Values.tls.enabled }} + - name: raw-certificates + mountPath: /tmp/certs + - name: postgresql-certificates + mountPath: /opt/bitnami/postgresql/certs + {{- end }} + {{- end }} + {{- if .Values.slave.extraInitContainers }} +{{ tpl .Values.slave.extraInitContainers . | indent 8 }} + {{- end }} + {{- end }} + {{- if .Values.slave.priorityClassName }} + priorityClassName: {{ .Values.slave.priorityClassName }} + {{- end }} + containers: + - name: {{ template "postgresql.fullname" . }} + image: {{ template "postgresql.image" . }} + imagePullPolicy: "{{ .Values.image.pullPolicy }}" + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" .Values.image.debug | quote }} + - name: POSTGRESQL_VOLUME_DIR + value: "{{ .Values.persistence.mountPath }}" + - name: POSTGRESQL_PORT_NUMBER + value: "{{ template "postgresql.port" . }}" + {{- if .Values.persistence.mountPath }} + - name: PGDATA + value: {{ .Values.postgresqlDataDir | quote }} + {{- end }} + - name: POSTGRES_REPLICATION_MODE + value: "slave" + - name: POSTGRES_REPLICATION_USER + value: {{ include "postgresql.replication.username" . | quote }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_REPLICATION_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-replication-password" + {{- else }} + - name: POSTGRES_REPLICATION_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-replication-password + {{- end }} + - name: POSTGRES_CLUSTER_APP_NAME + value: {{ .Values.replication.applicationName }} + - name: POSTGRES_MASTER_HOST + value: {{ template "postgresql.fullname" . }} + - name: POSTGRES_MASTER_PORT_NUMBER + value: {{ include "postgresql.port" . | quote }} + {{- if and .Values.postgresqlPostgresPassword (not (eq .Values.postgresqlUsername "postgres")) }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_POSTGRES_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-postgres-password" + {{- else }} + - name: POSTGRES_POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-postgres-password + {{- end }} + {{- end }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-password" + {{- else }} + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-password + {{- end }} + - name: POSTGRESQL_ENABLE_TLS + value: {{ ternary "yes" "no" .Values.tls.enabled | quote }} + {{- if .Values.tls.enabled }} + - name: POSTGRESQL_TLS_PREFER_SERVER_CIPHERS + value: {{ ternary "yes" "no" .Values.tls.preferServerCiphers | quote }} + - name: POSTGRESQL_TLS_CERT_FILE + value: {{ template "postgresql.tlsCert" . }} + - name: POSTGRESQL_TLS_KEY_FILE + value: {{ template "postgresql.tlsCertKey" . }} + {{- if .Values.tls.certCAFilename }} + - name: POSTGRESQL_TLS_CA_FILE + value: {{ template "postgresql.tlsCACert" . }} + {{- end }} + {{- if .Values.tls.crlFilename }} + - name: POSTGRESQL_TLS_CRL_FILE + value: {{ template "postgresql.tlsCRL" . }} + {{- end }} + {{- end }} + ports: + - name: tcp-postgresql + containerPort: {{ template "postgresql.port" . }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - /bin/sh + - -c + {{- if (include "postgresql.database" .) }} + - exec pg_isready -U {{ include "postgresql.username" . | quote }} -d "dbname={{ include "postgresql.database" . }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}{{- end }}" -h 127.0.0.1 -p {{ template "postgresql.port" . }} + {{- else }} + - exec pg_isready -U {{ include "postgresql.username" . | quote }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} -d "sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}"{{- end }} -h 127.0.0.1 -p {{ template "postgresql.port" . }} + {{- end }} + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + exec: + command: + - /bin/sh + - -c + - -e + {{- include "postgresql.readinessProbeCommand" . | nindent 16 }} + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + volumeMounts: + {{- if .Values.usePasswordFile }} + - name: postgresql-password + mountPath: /opt/bitnami/postgresql/secrets/ + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + mountPath: /dev/shm + {{- end }} + {{- if .Values.persistence.enabled }} + - name: data + mountPath: {{ .Values.persistence.mountPath }} + subPath: {{ .Values.persistence.subPath }} + {{ end }} + {{- if or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf .Values.extendedConfConfigMap }} + - name: postgresql-extended-config + mountPath: /bitnami/postgresql/conf/conf.d/ + {{- end }} + {{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap }} + - name: postgresql-config + mountPath: /bitnami/postgresql/conf + {{- end }} + {{- if .Values.tls.enabled }} + - name: postgresql-certificates + mountPath: /opt/bitnami/postgresql/certs + readOnly: true + {{- end }} + {{- if .Values.slave.extraVolumeMounts }} + {{- toYaml .Values.slave.extraVolumeMounts | nindent 12 }} + {{- end }} +{{- if .Values.slave.sidecars }} +{{- include "postgresql.tplValue" ( dict "value" .Values.slave.sidecars "context" $ ) | nindent 8 }} +{{- end }} + volumes: + {{- if .Values.usePasswordFile }} + - name: postgresql-password + secret: + secretName: {{ template "postgresql.secretName" . }} + {{- end }} + {{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap}} + - name: postgresql-config + configMap: + name: {{ template "postgresql.configurationCM" . }} + {{- end }} + {{- if or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf .Values.extendedConfConfigMap }} + - name: postgresql-extended-config + configMap: + name: {{ template "postgresql.extendedConfigurationCM" . }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: raw-certificates + secret: + secretName: {{ required "A secret containing TLS certificates is required when TLS is enabled" .Values.tls.certificatesSecret }} + - name: postgresql-certificates + emptyDir: {} + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + emptyDir: + medium: Memory + sizeLimit: 1Gi + {{- end }} + {{- if not .Values.persistence.enabled }} + - name: data + emptyDir: {} + {{- end }} + {{- if .Values.slave.extraVolumes }} + {{- toYaml .Values.slave.extraVolumes | nindent 8 }} + {{- end }} + updateStrategy: + type: {{ .Values.updateStrategy.type }} + {{- if (eq "Recreate" .Values.updateStrategy.type) }} + rollingUpdate: null + {{- end }} +{{- if .Values.persistence.enabled }} + volumeClaimTemplates: + - metadata: + name: data + {{- with .Values.persistence.annotations }} + annotations: + {{- range $key, $value := . }} + {{ $key }}: {{ $value }} + {{- end }} + {{- end }} + spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{ include "postgresql.storageClass" . }} +{{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/statefulset.yaml b/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/statefulset.yaml new file mode 100644 index 0000000..35c6293 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/statefulset.yaml @@ -0,0 +1,510 @@ +apiVersion: {{ template "postgresql.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ template "postgresql.master.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- with .Values.master.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- with .Values.master.annotations }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + serviceName: {{ template "postgresql.fullname" . }}-headless + replicas: 1 + updateStrategy: + type: {{ .Values.updateStrategy.type }} + {{- if (eq "Recreate" .Values.updateStrategy.type) }} + rollingUpdate: null + {{- end }} + selector: + matchLabels: + {{- include "common.labels.matchLabels" . | nindent 6 }} + role: master + template: + metadata: + name: {{ template "postgresql.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 8 }} + role: master + {{- with .Values.master.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.master.podAnnotations }} + annotations: {{- toYaml . | nindent 8 }} + {{- end }} + spec: + {{- if .Values.schedulerName }} + schedulerName: "{{ .Values.schedulerName }}" + {{- end }} +{{- include "postgresql.imagePullSecrets" . | indent 6 }} + {{- if .Values.master.nodeSelector }} + nodeSelector: {{- toYaml .Values.master.nodeSelector | nindent 8 }} + {{- end }} + {{- if .Values.master.affinity }} + affinity: {{- toYaml .Values.master.affinity | nindent 8 }} + {{- end }} + {{- if .Values.master.tolerations }} + tolerations: {{- toYaml .Values.master.tolerations | nindent 8 }} + {{- end }} + {{- if .Values.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + {{- end }} + {{- if .Values.serviceAccount.enabled }} + serviceAccountName: {{ default (include "postgresql.fullname" . ) .Values.serviceAccount.name }} + {{- end }} + {{- if or .Values.master.extraInitContainers (and .Values.volumePermissions.enabled (or .Values.persistence.enabled (and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled))) }} + initContainers: + {{- if and .Values.volumePermissions.enabled (or .Values.persistence.enabled (and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled) .Values.tls.enabled) }} + - name: init-chmod-data + image: {{ template "postgresql.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + command: + - /bin/sh + - -cx + - | + {{- if .Values.persistence.enabled }} + mkdir -p {{ .Values.persistence.mountPath }}/data {{- if (include "postgresql.mountConfigurationCM" .) }} {{ .Values.persistence.mountPath }}/conf {{- end }} + chmod 700 {{ .Values.persistence.mountPath }}/data {{- if (include "postgresql.mountConfigurationCM" .) }} {{ .Values.persistence.mountPath }}/conf {{- end }} + find {{ .Values.persistence.mountPath }} -mindepth 1 -maxdepth 1 {{- if not (include "postgresql.mountConfigurationCM" .) }} -not -name "conf" {{- end }} -not -name ".snapshot" -not -name "lost+found" | \ + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + xargs chown -R `id -u`:`id -G | cut -d " " -f2` + {{- else }} + xargs chown -R {{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} + {{- end }} + {{- end }} + {{- if and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled }} + chmod -R 777 /dev/shm + {{- end }} + {{- if .Values.tls.enabled }} + cp /tmp/certs/* /opt/bitnami/postgresql/certs/ + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + chown -R `id -u`:`id -G | cut -d " " -f2` /opt/bitnami/postgresql/certs/ + {{- else }} + chown -R {{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} /opt/bitnami/postgresql/certs/ + {{- end }} + chmod 600 {{ template "postgresql.tlsCertKey" . }} + {{- end }} + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + securityContext: + {{- else }} + securityContext: + runAsUser: {{ .Values.volumePermissions.securityContext.runAsUser }} + {{- end }} + volumeMounts: + {{- if .Values.persistence.enabled }} + - name: data + mountPath: {{ .Values.persistence.mountPath }} + subPath: {{ .Values.persistence.subPath }} + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + mountPath: /dev/shm + {{- end }} + {{- if .Values.tls.enabled }} + - name: raw-certificates + mountPath: /tmp/certs + - name: postgresql-certificates + mountPath: /opt/bitnami/postgresql/certs + {{- end }} + {{- end }} + {{- if .Values.master.extraInitContainers }} + {{- include "postgresql.tplValue" ( dict "value" .Values.master.extraInitContainers "context" $ ) | nindent 8 }} + {{- end }} + {{- end }} + {{- if .Values.master.priorityClassName }} + priorityClassName: {{ .Values.master.priorityClassName }} + {{- end }} + containers: + - name: {{ template "postgresql.fullname" . }} + image: {{ template "postgresql.image" . }} + imagePullPolicy: "{{ .Values.image.pullPolicy }}" + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" .Values.image.debug | quote }} + - name: POSTGRESQL_PORT_NUMBER + value: "{{ template "postgresql.port" . }}" + - name: POSTGRESQL_VOLUME_DIR + value: "{{ .Values.persistence.mountPath }}" + {{- if .Values.postgresqlInitdbArgs }} + - name: POSTGRES_INITDB_ARGS + value: {{ .Values.postgresqlInitdbArgs | quote }} + {{- end }} + {{- if .Values.postgresqlInitdbWalDir }} + - name: POSTGRES_INITDB_WALDIR + value: {{ .Values.postgresqlInitdbWalDir | quote }} + {{- end }} + {{- if .Values.initdbUser }} + - name: POSTGRESQL_INITSCRIPTS_USERNAME + value: {{ .Values.initdbUser }} + {{- end }} + {{- if .Values.initdbPassword }} + - name: POSTGRESQL_INITSCRIPTS_PASSWORD + value: {{ .Values.initdbPassword }} + {{- end }} + {{- if .Values.persistence.mountPath }} + - name: PGDATA + value: {{ .Values.postgresqlDataDir | quote }} + {{- end }} + {{- if .Values.replication.enabled }} + - name: POSTGRES_REPLICATION_MODE + value: "master" + - name: POSTGRES_REPLICATION_USER + value: {{ include "postgresql.replication.username" . | quote }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_REPLICATION_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-replication-password" + {{- else }} + - name: POSTGRES_REPLICATION_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-replication-password + {{- end }} + {{- if not (eq .Values.replication.synchronousCommit "off")}} + - name: POSTGRES_SYNCHRONOUS_COMMIT_MODE + value: {{ .Values.replication.synchronousCommit | quote }} + - name: POSTGRES_NUM_SYNCHRONOUS_REPLICAS + value: {{ .Values.replication.numSynchronousReplicas | quote }} + {{- end }} + - name: POSTGRES_CLUSTER_APP_NAME + value: {{ .Values.replication.applicationName }} + {{- end }} + {{- if and .Values.postgresqlPostgresPassword (not (eq .Values.postgresqlUsername "postgres")) }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_POSTGRES_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-postgres-password" + {{- else }} + - name: POSTGRES_POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-postgres-password + {{- end }} + {{- end }} + - name: POSTGRES_USER + value: {{ include "postgresql.username" . | quote }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-password" + {{- else }} + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-password + {{- end }} + {{- if (include "postgresql.database" .) }} + - name: POSTGRES_DB + value: {{ (include "postgresql.database" .) | quote }} + {{- end }} + {{- if .Values.extraEnv }} + {{- include "postgresql.tplValue" (dict "value" .Values.extraEnv "context" $) | nindent 12 }} + {{- end }} + - name: POSTGRESQL_ENABLE_LDAP + value: {{ ternary "yes" "no" .Values.ldap.enabled | quote }} + {{- if .Values.ldap.enabled }} + - name: POSTGRESQL_LDAP_SERVER + value: {{ .Values.ldap.server }} + - name: POSTGRESQL_LDAP_PORT + value: {{ .Values.ldap.port | quote }} + - name: POSTGRESQL_LDAP_SCHEME + value: {{ .Values.ldap.scheme }} + {{- if .Values.ldap.tls }} + - name: POSTGRESQL_LDAP_TLS + value: "1" + {{- end}} + - name: POSTGRESQL_LDAP_PREFIX + value: {{ .Values.ldap.prefix | quote }} + - name: POSTGRESQL_LDAP_SUFFIX + value: {{ .Values.ldap.suffix | quote}} + - name: POSTGRESQL_LDAP_BASE_DN + value: {{ .Values.ldap.baseDN }} + - name: POSTGRESQL_LDAP_BIND_DN + value: {{ .Values.ldap.bindDN }} + {{- if (not (empty .Values.ldap.bind_password)) }} + - name: POSTGRESQL_LDAP_BIND_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-ldap-password + {{- end}} + - name: POSTGRESQL_LDAP_SEARCH_ATTR + value: {{ .Values.ldap.search_attr }} + - name: POSTGRESQL_LDAP_SEARCH_FILTER + value: {{ .Values.ldap.search_filter }} + - name: POSTGRESQL_LDAP_URL + value: {{ .Values.ldap.url }} + {{- end}} + - name: POSTGRESQL_ENABLE_TLS + value: {{ ternary "yes" "no" .Values.tls.enabled | quote }} + {{- if .Values.tls.enabled }} + - name: POSTGRESQL_TLS_PREFER_SERVER_CIPHERS + value: {{ ternary "yes" "no" .Values.tls.preferServerCiphers | quote }} + - name: POSTGRESQL_TLS_CERT_FILE + value: {{ template "postgresql.tlsCert" . }} + - name: POSTGRESQL_TLS_KEY_FILE + value: {{ template "postgresql.tlsCertKey" . }} + {{- if .Values.tls.certCAFilename }} + - name: POSTGRESQL_TLS_CA_FILE + value: {{ template "postgresql.tlsCACert" . }} + {{- end }} + {{- if .Values.tls.crlFilename }} + - name: POSTGRESQL_TLS_CRL_FILE + value: {{ template "postgresql.tlsCRL" . }} + {{- end }} + {{- end }} + {{- if .Values.extraEnvVarsCM }} + envFrom: + - configMapRef: + name: {{ tpl .Values.extraEnvVarsCM . }} + {{- end }} + ports: + - name: tcp-postgresql + containerPort: {{ template "postgresql.port" . }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - /bin/sh + - -c + {{- if (include "postgresql.database" .) }} + - exec pg_isready -U {{ include "postgresql.username" . | quote }} -d "dbname={{ include "postgresql.database" . }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}{{- end }}" -h 127.0.0.1 -p {{ template "postgresql.port" . }} + {{- else }} + - exec pg_isready -U {{ include "postgresql.username" . | quote }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} -d "sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}"{{- end }} -h 127.0.0.1 -p {{ template "postgresql.port" . }} + {{- end }} + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + exec: + command: + - /bin/sh + - -c + - -e + {{- include "postgresql.readinessProbeCommand" . | nindent 16 }} + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + volumeMounts: + {{- if or (.Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql,sql.gz}") .Values.initdbScriptsConfigMap .Values.initdbScripts }} + - name: custom-init-scripts + mountPath: /docker-entrypoint-initdb.d/ + {{- end }} + {{- if .Values.initdbScriptsSecret }} + - name: custom-init-scripts-secret + mountPath: /docker-entrypoint-initdb.d/secret + {{- end }} + {{- if or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf .Values.extendedConfConfigMap }} + - name: postgresql-extended-config + mountPath: /bitnami/postgresql/conf/conf.d/ + {{- end }} + {{- if .Values.usePasswordFile }} + - name: postgresql-password + mountPath: /opt/bitnami/postgresql/secrets/ + {{- end }} + {{- if .Values.tls.enabled }} + - name: postgresql-certificates + mountPath: /opt/bitnami/postgresql/certs + readOnly: true + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + mountPath: /dev/shm + {{- end }} + {{- if .Values.persistence.enabled }} + - name: data + mountPath: {{ .Values.persistence.mountPath }} + subPath: {{ .Values.persistence.subPath }} + {{- end }} + {{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap }} + - name: postgresql-config + mountPath: /bitnami/postgresql/conf + {{- end }} + {{- if .Values.master.extraVolumeMounts }} + {{- toYaml .Values.master.extraVolumeMounts | nindent 12 }} + {{- end }} +{{- if .Values.master.sidecars }} +{{- include "postgresql.tplValue" ( dict "value" .Values.master.sidecars "context" $ ) | nindent 8 }} +{{- end }} +{{- if .Values.metrics.enabled }} + - name: metrics + image: {{ template "postgresql.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + {{- if .Values.metrics.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.metrics.securityContext.runAsUser }} + {{- end }} + env: + {{- $database := required "In order to enable metrics you need to specify a database (.Values.postgresqlDatabase or .Values.global.postgresql.postgresqlDatabase)" (include "postgresql.database" .) }} + {{- $sslmode := ternary "require" "disable" .Values.tls.enabled }} + {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} + - name: DATA_SOURCE_NAME + value: {{ printf "host=127.0.0.1 port=%d user=%s sslmode=%s sslcert=%s sslkey=%s" (int (include "postgresql.port" .)) (include "postgresql.username" .) $sslmode (include "postgresql.tlsCert" .) (include "postgresql.tlsCertKey" .) }} + {{- else }} + - name: DATA_SOURCE_URI + value: {{ printf "127.0.0.1:%d/%s?sslmode=%s" (int (include "postgresql.port" .)) $database $sslmode }} + {{- end }} + {{- if .Values.usePasswordFile }} + - name: DATA_SOURCE_PASS_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-password" + {{- else }} + - name: DATA_SOURCE_PASS + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-password + {{- end }} + - name: DATA_SOURCE_USER + value: {{ template "postgresql.username" . }} + {{- if .Values.metrics.extraEnvVars }} + {{- include "postgresql.tplValue" (dict "value" .Values.metrics.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: / + port: http-metrics + initialDelaySeconds: {{ .Values.metrics.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.metrics.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.metrics.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.metrics.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.metrics.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + httpGet: + path: / + port: http-metrics + initialDelaySeconds: {{ .Values.metrics.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.metrics.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.metrics.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.metrics.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.metrics.readinessProbe.failureThreshold }} + {{- end }} + volumeMounts: + {{- if .Values.usePasswordFile }} + - name: postgresql-password + mountPath: /opt/bitnami/postgresql/secrets/ + {{- end }} + {{- if .Values.tls.enabled }} + - name: postgresql-certificates + mountPath: /opt/bitnami/postgresql/certs + readOnly: true + {{- end }} + {{- if .Values.metrics.customMetrics }} + - name: custom-metrics + mountPath: /conf + readOnly: true + args: ["--extend.query-path", "/conf/custom-metrics.yaml"] + {{- end }} + ports: + - name: http-metrics + containerPort: 9187 + {{- if .Values.metrics.resources }} + resources: {{- toYaml .Values.metrics.resources | nindent 12 }} + {{- end }} +{{- end }} + volumes: + {{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap}} + - name: postgresql-config + configMap: + name: {{ template "postgresql.configurationCM" . }} + {{- end }} + {{- if or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf .Values.extendedConfConfigMap }} + - name: postgresql-extended-config + configMap: + name: {{ template "postgresql.extendedConfigurationCM" . }} + {{- end }} + {{- if .Values.usePasswordFile }} + - name: postgresql-password + secret: + secretName: {{ template "postgresql.secretName" . }} + {{- end }} + {{- if or (.Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql,sql.gz}") .Values.initdbScriptsConfigMap .Values.initdbScripts }} + - name: custom-init-scripts + configMap: + name: {{ template "postgresql.initdbScriptsCM" . }} + {{- end }} + {{- if .Values.initdbScriptsSecret }} + - name: custom-init-scripts-secret + secret: + secretName: {{ template "postgresql.initdbScriptsSecret" . }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: raw-certificates + secret: + secretName: {{ required "A secret containing TLS certificates is required when TLS is enabled" .Values.tls.certificatesSecret }} + - name: postgresql-certificates + emptyDir: {} + {{- end }} + {{- if .Values.master.extraVolumes }} + {{- toYaml .Values.master.extraVolumes | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.customMetrics }} + - name: custom-metrics + configMap: + name: {{ template "postgresql.metricsCM" . }} + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + emptyDir: + medium: Memory + sizeLimit: 1Gi + {{- end }} +{{- if and .Values.persistence.enabled .Values.persistence.existingClaim }} + - name: data + persistentVolumeClaim: +{{- with .Values.persistence.existingClaim }} + #claimName: {{ tpl . $ }} + claimName: data-keycloak-saas-postgresql-0 +{{- end }} +{{- else if not .Values.persistence.enabled }} + - name: data + emptyDir: {} +{{- else if and .Values.persistence.enabled (not .Values.persistence.existingClaim) }} + volumeClaimTemplates: + - metadata: + name: data + {{- with .Values.persistence.annotations }} + annotations: + {{- range $key, $value := . }} + {{ $key }}: {{ $value }} + {{- end }} + {{- end }} + spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{ include "postgresql.storageClass" . }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/svc-headless.yaml b/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/svc-headless.yaml new file mode 100644 index 0000000..4913157 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/svc-headless.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "postgresql.fullname" . }}-headless + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + type: ClusterIP + clusterIP: None + ports: + - name: tcp-postgresql + port: {{ template "postgresql.port" . }} + targetPort: tcp-postgresql + selector: + {{- include "common.labels.matchLabels" . | nindent 4 }} diff --git a/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/svc-read.yaml b/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/svc-read.yaml new file mode 100644 index 0000000..885c7bb --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/svc-read.yaml @@ -0,0 +1,42 @@ +{{- if .Values.replication.enabled }} +{{- $serviceAnnotations := coalesce .Values.slave.service.annotations .Values.service.annotations -}} +{{- $serviceType := coalesce .Values.slave.service.type .Values.service.type -}} +{{- $serviceLoadBalancerIP := coalesce .Values.slave.service.loadBalancerIP .Values.service.loadBalancerIP -}} +{{- $serviceLoadBalancerSourceRanges := coalesce .Values.slave.service.loadBalancerSourceRanges .Values.service.loadBalancerSourceRanges -}} +{{- $serviceClusterIP := coalesce .Values.slave.service.clusterIP .Values.service.clusterIP -}} +{{- $serviceNodePort := coalesce .Values.slave.service.nodePort .Values.service.nodePort -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "postgresql.fullname" . }}-read + labels: + {{- include "common.labels.standard" . | nindent 4 }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if $serviceAnnotations }} + {{- include "postgresql.tplValue" (dict "value" $serviceAnnotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: {{ $serviceType }} + {{- if and $serviceLoadBalancerIP (eq $serviceType "LoadBalancer") }} + loadBalancerIP: {{ $serviceLoadBalancerIP }} + {{- end }} + {{- if and (eq $serviceType "LoadBalancer") $serviceLoadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- include "postgresql.tplValue" (dict "value" $serviceLoadBalancerSourceRanges "context" $) | nindent 4 }} + {{- end }} + {{- if and (eq $serviceType "ClusterIP") $serviceClusterIP }} + clusterIP: {{ $serviceClusterIP }} + {{- end }} + ports: + - name: tcp-postgresql + port: {{ template "postgresql.port" . }} + targetPort: tcp-postgresql + {{- if $serviceNodePort }} + nodePort: {{ $serviceNodePort }} + {{- end }} + selector: + {{- include "common.labels.matchLabels" . | nindent 4 }} + role: slave +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/svc.yaml b/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/svc.yaml new file mode 100644 index 0000000..e9fc504 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/svc.yaml @@ -0,0 +1,40 @@ +{{- $serviceAnnotations := coalesce .Values.master.service.annotations .Values.service.annotations -}} +{{- $serviceType := coalesce .Values.master.service.type .Values.service.type -}} +{{- $serviceLoadBalancerIP := coalesce .Values.master.service.loadBalancerIP .Values.service.loadBalancerIP -}} +{{- $serviceLoadBalancerSourceRanges := coalesce .Values.master.service.loadBalancerSourceRanges .Values.service.loadBalancerSourceRanges -}} +{{- $serviceClusterIP := coalesce .Values.master.service.clusterIP .Values.service.clusterIP -}} +{{- $serviceNodePort := coalesce .Values.master.service.nodePort .Values.service.nodePort -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "postgresql.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if $serviceAnnotations }} + {{- include "postgresql.tplValue" (dict "value" $serviceAnnotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: {{ $serviceType }} + {{- if and $serviceLoadBalancerIP (eq $serviceType "LoadBalancer") }} + loadBalancerIP: {{ $serviceLoadBalancerIP }} + {{- end }} + {{- if and (eq $serviceType "LoadBalancer") $serviceLoadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- include "postgresql.tplValue" (dict "value" $serviceLoadBalancerSourceRanges "context" $) | nindent 4 }} + {{- end }} + {{- if and (eq $serviceType "ClusterIP") $serviceClusterIP }} + clusterIP: {{ $serviceClusterIP }} + {{- end }} + ports: + - name: tcp-postgresql + port: {{ template "postgresql.port" . }} + targetPort: tcp-postgresql + {{- if $serviceNodePort }} + nodePort: {{ $serviceNodePort }} + {{- end }} + selector: + {{- include "common.labels.matchLabels" . | nindent 4 }} + role: master diff --git a/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/values-production.yaml b/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/values-production.yaml new file mode 100644 index 0000000..a43670f --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/values-production.yaml @@ -0,0 +1,591 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +global: + postgresql: {} +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName +# storageClass: myStorageClass + +## Bitnami PostgreSQL image version +## ref: https://hub.docker.com/r/bitnami/postgresql/tags/ +## +image: + registry: 10.10.31.243:5000 # docker.io + repository: postgresql # bitnami/postgresql + tag: 11.8.0-debian-10-r61 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Set to true if you would like to see extra information on logs + ## It turns BASH and NAMI debugging in minideb + ## ref: https://github.com/bitnami/minideb-extras/#turn-on-bash-debugging + debug: false + +## String to partially override postgresql.fullname template (will maintain the release name) +## +# nameOverride: + +## String to fully override postgresql.fullname template +## +# fullnameOverride: + +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: false + image: + registry: 10.10.31.243:5000 # docker.io + repository: minideb # bitnami/minideb + tag: buster + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + ## Init container Security Context + ## Note: the chown of the data folder is done to securityContext.runAsUser + ## and not the below volumePermissions.securityContext.runAsUser + ## When runAsUser is set to special value "auto", init container will try to chwon the + ## data folder to autodetermined user&group, using commands: `id -u`:`id -G | cut -d" " -f2` + ## "auto" is especially useful for OpenShift which has scc with dynamic userids (and 0 is not allowed). + ## You may want to use this volumePermissions.securityContext.runAsUser="auto" in combination with + ## pod securityContext.enabled=false and shmVolume.chmod.enabled=false + ## + securityContext: + runAsUser: 0 + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: + +## Pod Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## Pod Service Account +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +serviceAccount: + enabled: false + ## Name of an already existing service account. Setting this value disables the automatic service account creation. + # name: + +## Pod Security Policy +## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +psp: + create: false + +## Creates role for ServiceAccount +## Required for PSP +rbac: + create: false + +replication: + enabled: true + user: repl_user + password: repl_password + slaveReplicas: 2 + ## Set synchronous commit mode: on, off, remote_apply, remote_write and local + ## ref: https://www.postgresql.org/docs/9.6/runtime-config-wal.html#GUC-WAL-LEVEL + synchronousCommit: "on" + ## From the number of `slaveReplicas` defined above, set the number of those that will have synchronous replication + ## NOTE: It cannot be > slaveReplicas + numSynchronousReplicas: 1 + ## Replication Cluster application name. Useful for defining multiple replication policies + applicationName: my_application + +## PostgreSQL admin password (used when `postgresqlUsername` is not `postgres`) +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-user-on-first-run (see note!) +# postgresqlPostgresPassword: + +## PostgreSQL user (has superuser privileges if username is `postgres`) +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run +postgresqlUsername: postgres + +## PostgreSQL password +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run +## +# postgresqlPassword: + +## PostgreSQL password using existing secret +## existingSecret: secret + +## Mount PostgreSQL secret as a file instead of passing environment variable +# usePasswordFile: false + +## Create a database +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-on-first-run +## +# postgresqlDatabase: + +## PostgreSQL data dir +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +postgresqlDataDir: /bitnami/postgresql/data + +## An array to add extra environment variables +## For example: +## extraEnv: +## - name: FOO +## value: "bar" +## +# extraEnv: +extraEnv: [] + +## Name of a ConfigMap containing extra env vars +## +# extraEnvVarsCM: + +## Specify extra initdb args +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +# postgresqlInitdbArgs: + +## Specify a custom location for the PostgreSQL transaction log +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +# postgresqlInitdbWalDir: + +## PostgreSQL configuration +## Specify runtime configuration parameters as a dict, using camelCase, e.g. +## {"sharedBuffers": "500MB"} +## Alternatively, you can put your postgresql.conf under the files/ directory +## ref: https://www.postgresql.org/docs/current/static/runtime-config.html +## +# postgresqlConfiguration: + +## PostgreSQL extended configuration +## As above, but _appended_ to the main configuration +## Alternatively, you can put your *.conf under the files/conf.d/ directory +## https://github.com/bitnami/bitnami-docker-postgresql#allow-settings-to-be-loaded-from-files-other-than-the-default-postgresqlconf +## +# postgresqlExtendedConf: + +## PostgreSQL client authentication configuration +## Specify content for pg_hba.conf +## Default: do not create pg_hba.conf +## Alternatively, you can put your pg_hba.conf under the files/ directory +# pgHbaConfiguration: |- +# local all all trust +# host all all localhost trust +# host mydatabase mysuser 192.168.0.0/24 md5 + +## ConfigMap with PostgreSQL configuration +## NOTE: This will override postgresqlConfiguration and pgHbaConfiguration +# configurationConfigMap: + +## ConfigMap with PostgreSQL extended configuration +# extendedConfConfigMap: + +## initdb scripts +## Specify dictionary of scripts to be run at first boot +## Alternatively, you can put your scripts under the files/docker-entrypoint-initdb.d directory +## +# initdbScripts: +# my_init_script.sh: | +# #!/bin/sh +# echo "Do something." + +## Specify the PostgreSQL username and password to execute the initdb scripts +# initdbUser: +# initdbPassword: + +## ConfigMap with scripts to be run at first boot +## NOTE: This will override initdbScripts +# initdbScriptsConfigMap: + +## Secret with scripts to be run at first boot (in case it contains sensitive information) +## NOTE: This can work along initdbScripts or initdbScriptsConfigMap +# initdbScriptsSecret: + +## Optional duration in seconds the pod needs to terminate gracefully. +## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods +## +# terminationGracePeriodSeconds: 30 + +## LDAP configuration +## +ldap: + enabled: false + url: "" + server: "" + port: "" + prefix: "" + suffix: "" + baseDN: "" + bindDN: "" + bind_password: + search_attr: "" + search_filter: "" + scheme: "" + tls: false + +## PostgreSQL service configuration +service: + ## PosgresSQL service type + type: ClusterIP + # clusterIP: None + port: 5432 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. Evaluated as a template. + ## + annotations: {} + ## Set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + # loadBalancerIP: + + ## Load Balancer sources. Evaluated as a template. + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## + # loadBalancerSourceRanges: + # - 10.10.10.0/24 + +## Start master and slave(s) pod(s) without limitations on shm memory. +## By default docker and containerd (and possibly other container runtimes) +## limit `/dev/shm` to `64M` (see e.g. the +## [docker issue](https://github.com/docker-library/postgres/issues/416) and the +## [containerd issue](https://github.com/containerd/containerd/issues/3654), +## which could be not enough if PostgreSQL uses parallel workers heavily. +## +shmVolume: + ## Set `shmVolume.enabled` to `true` to mount a new tmpfs volume to remove + ## this limitation. + ## + enabled: true + ## Set to `true` to `chmod 777 /dev/shm` on a initContainer. + ## This option is ingored if `volumePermissions.enabled` is `false` + ## + chmod: + enabled: true + +## PostgreSQL data Persistent Volume Storage Class +## If defined, storageClassName: +## If set to "-", storageClassName: "", which disables dynamic provisioning +## If undefined (the default) or set to null, no storageClassName spec is +## set, choosing the default provisioner. (gp2 on AWS, standard on +## GKE, AWS & OpenStack) +## +persistence: + enabled: true + ## A manually managed Persistent Volume and Claim + ## If defined, PVC must be created manually before volume will be bound + ## The value is evaluated as a template, so, for example, the name can depend on .Release or .Chart + ## + # existingClaim: + + ## The path the volume will be mounted at, useful when using different + ## PostgreSQL images. + ## + mountPath: /bitnami/postgresql + + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + ## + subPath: "" + + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + annotations: {} + +## updateStrategy for PostgreSQL StatefulSet and its slaves StatefulSets +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies +updateStrategy: + type: RollingUpdate + +## +## PostgreSQL Master parameters +## +master: + ## Node, affinity, tolerations, and priorityclass settings for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption + nodeSelector: {} + affinity: {} + tolerations: [] + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + priorityClassName: "" + ## Additional PostgreSQL Master Volume mounts + ## + extraVolumeMounts: [] + ## Additional PostgreSQL Master Volumes + ## + extraVolumes: [] + ## Add sidecars to the pod + ## + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + sidecars: [] + + ## Override the service configuration for master + ## + service: {} + # type: + # nodePort: + # clusterIP: + +## +## PostgreSQL Slave parameters +## +slave: + ## Node, affinity, tolerations, and priorityclass settings for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption + nodeSelector: {} + affinity: {} + tolerations: [] + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + priorityClassName: "" + ## Extra init containers + ## Example + ## + ## extraInitContainers: + ## - name: do-something + ## image: busybox + ## command: ['do', 'something'] + extraInitContainers: [] + ## Additional PostgreSQL Slave Volume mounts + ## + extraVolumeMounts: [] + ## Additional PostgreSQL Slave Volumes + ## + extraVolumes: [] + ## Add sidecars to the pod + ## + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + sidecars: [] + + ## Override the service configuration for slave + ## + service: {} + # type: + # nodePort: + # clusterIP: + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: + requests: + memory: 256Mi + cpu: 250m + +## Add annotations to all the deployed resources +## +commonAnnotations: {} + +networkPolicy: + ## Enable creation of NetworkPolicy resources. Only Ingress traffic is filtered for now. + ## + enabled: false + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port PostgreSQL is listening + ## on. When true, PostgreSQL will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + + ## if explicitNamespacesSelector is missing or set to {}, only client Pods that are in the networkPolicy's namespace + ## and that match other criteria, the ones that have the good label, can reach the DB. + ## But sometimes, we want the DB to be accessible to clients from other namespaces, in this case, we can use this + ## LabelSelector to select these namespaces, note that the networkPolicy's namespace should also be explicitly added. + ## + ## Example: + ## explicitNamespacesSelector: + ## matchLabels: + ## role: frontend + ## matchExpressions: + ## - {key: role, operator: In, values: [frontend]} + explicitNamespacesSelector: {} + +## Configure extra options for liveness and readiness probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +## +## TLS configuration +## +tls: + # Enable TLS traffic + enabled: false + # + # Whether to use the server's TLS cipher preferences rather than the client's. + preferServerCiphers: true + # + # Name of the Secret that contains the certificates + certificatesSecret: "" + # + # Certificate filename + certFilename: "" + # + # Certificate Key filename + certKeyFilename: "" + # + # CA Certificate filename + # If provided, PostgreSQL will authenticate TLS/SSL clients by requesting them a certificate + # ref: https://www.postgresql.org/docs/9.6/auth-methods.html + certCAFilename: + # + # File containing a Certificate Revocation List + crlFilename: + +## Configure metrics exporter +## +metrics: + enabled: true + # resources: {} + service: + type: ClusterIP + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9187" + loadBalancerIP: + serviceMonitor: + enabled: false + additionalLabels: {} + # namespace: monitoring + # interval: 30s + # scrapeTimeout: 10s + ## Custom PrometheusRule to be defined + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + prometheusRule: + enabled: false + additionalLabels: {} + namespace: "" + ## These are just examples rules, please adapt them to your needs. + ## Make sure to constraint the rules to the current postgresql service. + ## rules: + ## - alert: HugeReplicationLag + ## expr: pg_replication_lag{service="{{ template "postgresql.fullname" . }}-metrics"} / 3600 > 1 + ## for: 1m + ## labels: + ## severity: critical + ## annotations: + ## description: replication for {{ template "postgresql.fullname" . }} PostgreSQL is lagging by {{ "{{ $value }}" }} hour(s). + ## summary: PostgreSQL replication is lagging by {{ "{{ $value }}" }} hour(s). + rules: [] + + image: + registry: 10.10.31.243:5000 # docker.io + repository: postgres-exporter # bitnami/postgres-exporter + tag: 0.8.0-debian-10-r166 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + ## Define additional custom metrics + ## ref: https://github.com/wrouesnel/postgres_exporter#adding-new-metrics-via-a-config-file + # customMetrics: + # pg_database: + # query: "SELECT d.datname AS name, CASE WHEN pg_catalog.has_database_privilege(d.datname, 'CONNECT') THEN pg_catalog.pg_database_size(d.datname) ELSE 0 END AS size FROM pg_catalog.pg_database d where datname not in ('template0', 'template1', 'postgres')" + # metrics: + # - name: + # usage: "LABEL" + # description: "Name of the database" + # - size_bytes: + # usage: "GAUGE" + # description: "Size of the database in bytes" + ## An array to add extra env vars to configure postgres-exporter + ## see: https://github.com/wrouesnel/postgres_exporter#environment-variables + ## For example: + # extraEnvVars: + # - name: PG_EXPORTER_DISABLE_DEFAULT_METRICS + # value: "true" + extraEnvVars: {} + + ## Pod Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## + securityContext: + enabled: false + runAsUser: 1001 + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## Configure extra options for liveness and readiness probes + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 diff --git a/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/values.schema.json b/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/values.schema.json new file mode 100644 index 0000000..7b5e2ef --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/values.schema.json @@ -0,0 +1,103 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": { + "postgresqlUsername": { + "type": "string", + "title": "Admin user", + "form": true + }, + "postgresqlPassword": { + "type": "string", + "title": "Password", + "form": true + }, + "persistence": { + "type": "object", + "properties": { + "size": { + "type": "string", + "title": "Persistent Volume Size", + "form": true, + "render": "slider", + "sliderMin": 1, + "sliderMax": 100, + "sliderUnit": "Gi" + } + } + }, + "resources": { + "type": "object", + "title": "Required Resources", + "description": "Configure resource requests", + "form": true, + "properties": { + "requests": { + "type": "object", + "properties": { + "memory": { + "type": "string", + "form": true, + "render": "slider", + "title": "Memory Request", + "sliderMin": 10, + "sliderMax": 2048, + "sliderUnit": "Mi" + }, + "cpu": { + "type": "string", + "form": true, + "render": "slider", + "title": "CPU Request", + "sliderMin": 10, + "sliderMax": 2000, + "sliderUnit": "m" + } + } + } + } + }, + "replication": { + "type": "object", + "form": true, + "title": "Replication Details", + "properties": { + "enabled": { + "type": "boolean", + "title": "Enable Replication", + "form": true + }, + "slaveReplicas": { + "type": "integer", + "title": "Slave Replicas", + "form": true, + "hidden": { + "value": false, + "path": "replication/enabled" + } + } + } + }, + "volumePermissions": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable Init Containers", + "description": "Change the owner of the persist volume mountpoint to RunAsUser:fsGroup" + } + } + }, + "metrics": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "title": "Configure metrics exporter", + "form": true + } + } + } + } +} diff --git a/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/values.yaml b/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/values.yaml new file mode 100644 index 0000000..5f831ef --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/04-keycloak/charts/postgresql/values.yaml @@ -0,0 +1,604 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +global: + postgresql: {} +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName +# storageClass: myStorageClass + +## Bitnami PostgreSQL image version +## ref: https://hub.docker.com/r/bitnami/postgresql/tags/ +## +image: + #registry: cdm-dev.exem-oss.org/keycloak + registry: 10.10.31.243:5000/keycloak # registry.openstacklocal:5000/keycloak + repository: keycloak-postgresql + tag: 11.8.0 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Set to true if you would like to see extra information on logs + ## It turns BASH and NAMI debugging in minideb + ## ref: https://github.com/bitnami/minideb-extras/#turn-on-bash-debugging + debug: false + +## String to partially override postgresql.fullname template (will maintain the release name) +## +# nameOverride: + +## String to fully override postgresql.fullname template +## +# fullnameOverride: + +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: true + image: + #registry: cdm-dev.exem-oss.org + registry: 10.10.31.243:5000 # registry.openstacklocal:5000 + repository: minideb # keycloak/minideb + tag: buster + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + ## Init container Security Context + ## Note: the chown of the data folder is done to securityContext.runAsUser + ## and not the below volumePermissions.securityContext.runAsUser + ## When runAsUser is set to special value "auto", init container will try to chwon the + ## data folder to autodetermined user&group, using commands: `id -u`:`id -G | cut -d" " -f2` + ## "auto" is especially useful for OpenShift which has scc with dynamic userids (and 0 is not allowed). + ## You may want to use this volumePermissions.securityContext.runAsUser="auto" in combination with + ## pod securityContext.enabled=false and shmVolume.chmod.enabled=false + ## + securityContext: + runAsUser: 0 + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: + + +## Pod Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## Pod Service Account +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +serviceAccount: + enabled: false + ## Name of an already existing service account. Setting this value disables the automatic service account creation. + # name: + +## Pod Security Policy +## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +psp: + create: false + +## Creates role for ServiceAccount +## Required for PSP +rbac: + create: false + +replication: + enabled: false + user: repl_user + password: repl_password + slaveReplicas: 1 + ## Set synchronous commit mode: on, off, remote_apply, remote_write and local + ## ref: https://www.postgresql.org/docs/9.6/runtime-config-wal.html#GUC-WAL-LEVEL + synchronousCommit: "off" + ## From the number of `slaveReplicas` defined above, set the number of those that will have synchronous replication + ## NOTE: It cannot be > slaveReplicas + numSynchronousReplicas: 0 + ## Replication Cluster application name. Useful for defining multiple replication policies + applicationName: my_application + +## PostgreSQL admin password (used when `postgresqlUsername` is not `postgres`) +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-user-on-first-run (see note!) +# postgresqlPostgresPassword: + +## PostgreSQL user (has superuser privileges if username is `postgres`) +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run +postgresqlUsername: postgres + +## PostgreSQL password +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run +## +# postgresqlPassword: + +## PostgreSQL password using existing secret +## existingSecret: secret + +## Mount PostgreSQL secret as a file instead of passing environment variable +# usePasswordFile: false + +## Create a database +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-on-first-run +## +# postgresqlDatabase: + +## PostgreSQL data dir +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +postgresqlDataDir: /bitnami/postgresql/data +#postgresqlDataDir: /var/lib/postgresql/data/pgdata + +## An array to add extra environment variables +## For example: +## extraEnv: +## - name: FOO +## value: "bar" +## +# extraEnv: +extraEnv: [] + +## Name of a ConfigMap containing extra env vars +## +# extraEnvVarsCM: + +## Specify extra initdb args +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +# postgresqlInitdbArgs: + +## Specify a custom location for the PostgreSQL transaction log +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +# postgresqlInitdbWalDir: + +## PostgreSQL configuration +## Specify runtime configuration parameters as a dict, using camelCase, e.g. +## {"sharedBuffers": "500MB"} +## Alternatively, you can put your postgresql.conf under the files/ directory +## ref: https://www.postgresql.org/docs/current/static/runtime-config.html +## +# postgresqlConfiguration: + +## PostgreSQL extended configuration +## As above, but _appended_ to the main configuration +## Alternatively, you can put your *.conf under the files/conf.d/ directory +## https://github.com/bitnami/bitnami-docker-postgresql#allow-settings-to-be-loaded-from-files-other-than-the-default-postgresqlconf +## +# postgresqlExtendedConf: + +## PostgreSQL client authentication configuration +## Specify content for pg_hba.conf +## Default: do not create pg_hba.conf +## Alternatively, you can put your pg_hba.conf under the files/ directory +# pgHbaConfiguration: |- +# local all all trust +# host all all localhost trust +# host mydatabase mysuser 192.168.0.0/24 md5 + +## ConfigMap with PostgreSQL configuration +## NOTE: This will override postgresqlConfiguration and pgHbaConfiguration +# configurationConfigMap: + +## ConfigMap with PostgreSQL extended configuration +# extendedConfConfigMap: + +## initdb scripts +## Specify dictionary of scripts to be run at first boot +## Alternatively, you can put your scripts under the files/docker-entrypoint-initdb.d directory +## +# initdbScripts: +# my_init_script.sh: | +# #!/bin/sh +# echo "Do something." + +## ConfigMap with scripts to be run at first boot +## NOTE: This will override initdbScripts +# initdbScriptsConfigMap: + +## Secret with scripts to be run at first boot (in case it contains sensitive information) +## NOTE: This can work along initdbScripts or initdbScriptsConfigMap +# initdbScriptsSecret: + +## Specify the PostgreSQL username and password to execute the initdb scripts +# initdbUser: +# initdbPassword: + +## Optional duration in seconds the pod needs to terminate gracefully. +## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods +## +# terminationGracePeriodSeconds: 30 + +## LDAP configuration +## +ldap: + enabled: false + url: "" + server: "" + port: "" + prefix: "" + suffix: "" + baseDN: "" + bindDN: "" + bind_password: + search_attr: "" + search_filter: "" + scheme: "" + tls: false + +## PostgreSQL service configuration +service: + ## PosgresSQL service type + type: ClusterIP + # clusterIP: None + port: 5432 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. Evaluated as a template. + ## + annotations: {} + ## Set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + # loadBalancerIP: + + ## Load Balancer sources. Evaluated as a template. + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## + # loadBalancerSourceRanges: + # - 10.10.10.0/24 + +## Start master and slave(s) pod(s) without limitations on shm memory. +## By default docker and containerd (and possibly other container runtimes) +## limit `/dev/shm` to `64M` (see e.g. the +## [docker issue](https://github.com/docker-library/postgres/issues/416) and the +## [containerd issue](https://github.com/containerd/containerd/issues/3654), +## which could be not enough if PostgreSQL uses parallel workers heavily. +## +shmVolume: + ## Set `shmVolume.enabled` to `true` to mount a new tmpfs volume to remove + ## this limitation. + ## + enabled: true + ## Set to `true` to `chmod 777 /dev/shm` on a initContainer. + ## This option is ingored if `volumePermissions.enabled` is `false` + ## + chmod: + enabled: true + +## PostgreSQL data Persistent Volume Storage Class +## If defined, storageClassName: +## If set to "-", storageClassName: "", which disables dynamic provisioning +## If undefined (the default) or set to null, no storageClassName spec is +## set, choosing the default provisioner. (gp2 on AWS, standard on +## GKE, AWS & OpenStack) +## +persistence: + enabled: true + ## A manually managed Persistent Volume and Claim + ## If defined, PVC must be created manually before volume will be bound + ## The value is evaluated as a template, so, for example, the name can depend on .Release or .Chart + ## + # existingClaim: + + ## The path the volume will be mounted at, useful when using different + ## PostgreSQL images. + ## + mountPath: /bitnami/postgresql + + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + ## + subPath: "" + + storageClass: "" + accessModes: + - ReadWriteOnce + size: 8Gi + annotations: {} + +## updateStrategy for PostgreSQL StatefulSet and its slaves StatefulSets +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies +updateStrategy: + type: RollingUpdate + +## +## PostgreSQL Master parameters +## +master: + ## Node, affinity, tolerations, and priorityclass settings for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption + nodeSelector: {} + affinity: {} + tolerations: [] + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + priorityClassName: "" + ## Extra init containers + ## Example + ## + ## extraInitContainers: + ## - name: do-something + ## image: busybox + ## command: ['do', 'something'] + extraInitContainers: [] + + ## Additional PostgreSQL Master Volume mounts + ## + extraVolumeMounts: [] + ## Additional PostgreSQL Master Volumes + ## + extraVolumes: [] + ## Add sidecars to the pod + ## + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: IfNotPresent + ## ports: + ## - name: portname + ## containerPort: 1234 + sidecars: [] + + ## Override the service configuration for master + ## + service: {} + # type: + # nodePort: + # clusterIP: + +## +## PostgreSQL Slave parameters +## +slave: + ## Node, affinity, tolerations, and priorityclass settings for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption + nodeSelector: {} + affinity: {} + tolerations: [] + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + priorityClassName: "" + extraInitContainers: | + # - name: do-something + # image: busybox + # command: ['do', 'something'] + ## Additional PostgreSQL Slave Volume mounts + ## + extraVolumeMounts: [] + ## Additional PostgreSQL Slave Volumes + ## + extraVolumes: [] + ## Add sidecars to the pod + ## + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: IfNotPresent + ## ports: + ## - name: portname + ## containerPort: 1234 + sidecars: [] + + ## Override the service configuration for slave + ## + service: {} + # type: + # nodePort: + # clusterIP: + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: + requests: + memory: 256Mi + cpu: 250m + +## Add annotations to all the deployed resources +## +commonAnnotations: {} + +networkPolicy: + ## Enable creation of NetworkPolicy resources. Only Ingress traffic is filtered for now. + ## + enabled: false + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port PostgreSQL is listening + ## on. When true, PostgreSQL will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + + ## if explicitNamespacesSelector is missing or set to {}, only client Pods that are in the networkPolicy's namespace + ## and that match other criteria, the ones that have the good label, can reach the DB. + ## But sometimes, we want the DB to be accessible to clients from other namespaces, in this case, we can use this + ## LabelSelector to select these namespaces, note that the networkPolicy's namespace should also be explicitly added. + ## + ## Example: + ## explicitNamespacesSelector: + ## matchLabels: + ## role: frontend + ## matchExpressions: + ## - {key: role, operator: In, values: [frontend]} + explicitNamespacesSelector: {} + +## Configure extra options for liveness and readiness probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +## +## TLS configuration +## +tls: + # Enable TLS traffic + enabled: false + # + # Whether to use the server's TLS cipher preferences rather than the client's. + preferServerCiphers: true + # + # Name of the Secret that contains the certificates + certificatesSecret: "" + # + # Certificate filename + certFilename: "" + # + # Certificate Key filename + certKeyFilename: "" + # + # CA Certificate filename + # If provided, PostgreSQL will authenticate TLS/SSL clients by requesting them a certificate + # ref: https://www.postgresql.org/docs/9.6/auth-methods.html + certCAFilename: + # + # File containing a Certificate Revocation List + crlFilename: + +## Configure metrics exporter +## +metrics: + enabled: false + # resources: {} + service: + type: ClusterIP + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9187" + loadBalancerIP: + serviceMonitor: + enabled: false + additionalLabels: {} + # namespace: monitoring + # interval: 30s + # scrapeTimeout: 10s + ## Custom PrometheusRule to be defined + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + prometheusRule: + enabled: false + additionalLabels: {} + namespace: "" + ## These are just examples rules, please adapt them to your needs. + ## Make sure to constraint the rules to the current postgresql service. + ## rules: + ## - alert: HugeReplicationLag + ## expr: pg_replication_lag{service="{{ template "postgresql.fullname" . }}-metrics"} / 3600 > 1 + ## for: 1m + ## labels: + ## severity: critical + ## annotations: + ## description: replication for {{ template "postgresql.fullname" . }} PostgreSQL is lagging by {{ "{{ $value }}" }} hour(s). + ## summary: PostgreSQL replication is lagging by {{ "{{ $value }}" }} hour(s). + rules: [] + + image: + registry: 10.10.31.243:5000 # docker.io + repository: postgres-exporter # bitnami/postgres-exporter + tag: 0.8.0-debian-10-r166 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + ## Define additional custom metrics + ## ref: https://github.com/wrouesnel/postgres_exporter#adding-new-metrics-via-a-config-file + # customMetrics: + # pg_database: + # query: "SELECT d.datname AS name, CASE WHEN pg_catalog.has_database_privilege(d.datname, 'CONNECT') THEN pg_catalog.pg_database_size(d.datname) ELSE 0 END AS size_bytes FROM pg_catalog.pg_database d where datname not in ('template0', 'template1', 'postgres')" + # metrics: + # - name: + # usage: "LABEL" + # description: "Name of the database" + # - size_bytes: + # usage: "GAUGE" + # description: "Size of the database in bytes" + # + ## An array to add extra env vars to configure postgres-exporter + ## see: https://github.com/wrouesnel/postgres_exporter#environment-variables + ## For example: + # extraEnvVars: + # - name: PG_EXPORTER_DISABLE_DEFAULT_METRICS + # value: "true" + extraEnvVars: {} + + ## Pod Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## + securityContext: + enabled: false + runAsUser: 1001 + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## Configure extra options for liveness and readiness probes + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 +#persistentVolume nodeAffinity Value Require this value +node: + affinity: imxc-worker1 diff --git a/ansible/01_old/roles/cmoa_install/files/04-keycloak/ci/h2-values.yaml b/ansible/01_old/roles/cmoa_install/files/04-keycloak/ci/h2-values.yaml new file mode 100644 index 0000000..10d1705 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/04-keycloak/ci/h2-values.yaml @@ -0,0 +1,38 @@ +extraEnv: | + - name: DB_VENDOR + value: h2 + - name: KEYCLOAK_USER_FILE + value: /secrets/admin-creds/user + - name: KEYCLOAK_PASSWORD_FILE + value: /secrets/admin-creds/password + - name: JAVA_OPTS + value: >- + -XX:+UseContainerSupport + -XX:MaxRAMPercentage=50.0 + -Djava.net.preferIPv4Stack=true + -Djboss.modules.system.pkgs=$JBOSS_MODULES_SYSTEM_PKGS + -Djava.awt.headless=true + +secrets: + admin-creds: + annotations: + my-test-annotation: Test secret for {{ include "keycloak.fullname" . }} + stringData: + user: admin + password: secret + +extraVolumeMounts: | + - name: admin-creds + mountPath: /secrets/admin-creds + readOnly: true + +extraVolumes: | + - name: admin-creds + secret: + secretName: '{{ include "keycloak.fullname" . }}-admin-creds' + +postgresql: + enabled: false + +test: + enabled: true diff --git a/ansible/01_old/roles/cmoa_install/files/04-keycloak/ci/postgres-ha-values.yaml b/ansible/01_old/roles/cmoa_install/files/04-keycloak/ci/postgres-ha-values.yaml new file mode 100644 index 0000000..e92c2c7 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/04-keycloak/ci/postgres-ha-values.yaml @@ -0,0 +1,73 @@ +replicas: 2 + +podLabels: + test-label: test-label-value + +podAnnotations: + test-annotation: test-annotation-value-{{ .Release.Name }} + test-int-annotation: "12345" + +startupScripts: + hello.sh: | + #!/bin/sh + + echo '********************************************************************************' + echo '* *' + echo '* Hello from my startup script! *' + echo '* *' + echo '********************************************************************************' + +lifecycleHooks: | + postStart: + exec: + command: + - /bin/sh + - -c + - echo 'Hello from lifecycle hook!' + +extraEnv: | + - name: JGROUPS_DISCOVERY_PROTOCOL + value: dns.DNS_PING + - name: JGROUPS_DISCOVERY_PROPERTIES + value: 'dns_query={{ include "keycloak.serviceDnsName" . }}' + - name: CACHE_OWNERS_COUNT + value: "2" + - name: CACHE_OWNERS_AUTH_SESSIONS_COUNT + value: "2" + - name: KEYCLOAK_USER_FILE + value: /secrets/admin-creds/user + - name: KEYCLOAK_PASSWORD_FILE + value: /secrets/admin-creds/password + - name: KEYCLOAK_STATISTICS + value: all + - name: JAVA_OPTS + value: >- + -XX:+UseContainerSupport + -XX:MaxRAMPercentage=50.0 + -Djava.net.preferIPv4Stack=true + -Djboss.modules.system.pkgs=$JBOSS_MODULES_SYSTEM_PKGS + -Djava.awt.headless=true + +secrets: + admin-creds: + stringData: + user: admin + password: secret + +extraVolumeMounts: | + - name: admin-creds + mountPath: /secrets/admin-creds + readOnly: true + +extraVolumes: | + - name: admin-creds + secret: + secretName: '{{ include "keycloak.fullname" . }}-admin-creds' + +postgresql: + enabled: true + persistence: + enabled: true + +test: + enabled: true diff --git a/ansible/01_old/roles/cmoa_install/files/04-keycloak/requirements.lock b/ansible/01_old/roles/cmoa_install/files/04-keycloak/requirements.lock new file mode 100644 index 0000000..4231a57 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/04-keycloak/requirements.lock @@ -0,0 +1,6 @@ +dependencies: +- name: postgresql + repository: https://charts.bitnami.com/bitnami + version: 9.1.1 +digest: sha256:33ee9e6caa9e519633071fd71aedd9de7906b9a9d7fb629eb814d9f72bb8d68e +generated: "2020-07-24T07:40:55.78753+02:00" diff --git a/ansible/01_old/roles/cmoa_install/files/04-keycloak/requirements.yaml b/ansible/01_old/roles/cmoa_install/files/04-keycloak/requirements.yaml new file mode 100644 index 0000000..f3409a3 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/04-keycloak/requirements.yaml @@ -0,0 +1,5 @@ +dependencies: + - name: postgresql + version: 9.1.1 + repository: https://charts.bitnami.com/bitnami + condition: postgresql.enabled diff --git a/ansible/01_old/roles/cmoa_install/files/04-keycloak/scripts/keycloak.cli b/ansible/01_old/roles/cmoa_install/files/04-keycloak/scripts/keycloak.cli new file mode 100644 index 0000000..1469963 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/04-keycloak/scripts/keycloak.cli @@ -0,0 +1,13 @@ +embed-server --server-config=standalone-ha.xml --std-out=echo +batch + +echo Configuring node identifier + +## Sets the node identifier to the node name (= pod name). Node identifiers have to be unique. They can have a +## maximum length of 23 characters. Thus, the chart's fullname template truncates its length accordingly. +/subsystem=transactions:write-attribute(name=node-identifier, value=${jboss.node.name}) + +echo Finished configuring node identifier + +run-batch +stop-embedded-server diff --git a/ansible/01_old/roles/cmoa_install/files/04-keycloak/templates/NOTES.txt b/ansible/01_old/roles/cmoa_install/files/04-keycloak/templates/NOTES.txt new file mode 100644 index 0000000..e76e064 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/04-keycloak/templates/NOTES.txt @@ -0,0 +1,61 @@ +*********************************************************************** +* * +* Keycloak Helm Chart by codecentric AG * +* * +*********************************************************************** + +{{- if .Values.ingress.enabled }} + +Keycloak was installed with an Ingress and an be reached at the following URL(s): +{{ range $unused, $rule := .Values.ingress.rules }} + {{- range $rule.paths }} + - http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $rule.host }}{{ . }} + {{- end }} +{{- end }} + +{{- else if eq "NodePort" .Values.service.type }} + +Keycloak was installed with a Service of type NodePort. +{{ if .Values.service.httpNodePort }} +Get its HTTP URL with the following commands: + +export NODE_PORT=$(kubectl get --namespace imxc service {{ include "keycloak.fullname" . }}-http --template='{{"{{ range .spec.ports }}{{ if eq .name \"http\" }}{{ .nodePort }}{{ end }}{{ end }}"}}') +export NODE_IP=$(kubectl get nodes --namespace imxc -o jsonpath="{.items[0].status.addresses[0].address}") +echo "http://$NODE_IP:$NODE_PORT" +{{- end }} +{{ if .Values.service.httpsNodePort }} +Get its HTTPS URL with the following commands: + +export NODE_PORT=$(kubectl get --namespace imxc service {{ include "keycloak.fullname" . }}-http --template='{{"{{ range .spec.ports }}{{ if eq .name \"https\" }}{{ .nodePort }}{{ end }}{{ end }}"}}') +export NODE_IP=$(kubectl get nodes --namespace imxc -o jsonpath="{.items[0].status.addresses[0].address}") +echo "http://$NODE_IP:$NODE_PORT" +{{- end }} + +{{- else if eq "LoadBalancer" .Values.service.type }} + +Keycloak was installed with a Service of type LoadBalancer + +NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get --namespace imxc service -w {{ include "keycloak.fullname" . }}' + +Get its HTTP URL with the following commands: + +export SERVICE_IP=$(kubectl get service --namespace imxc {{ include "keycloak.fullname" . }}-http --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") +echo "http://$SERVICE_IP:{{ .Values.service.httpPort }}" + +Get its HTTPS URL with the following commands: + +export SERVICE_IP=$(kubectl get service --namespace imxc {{ include "keycloak.fullname" . }}-http --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") +echo "http://$SERVICE_IP:{{ .Values.service.httpsPort }}" + +{{- else if eq "ClusterIP" .Values.service.type }} + +Keycloak was installed with a Service of type ClusterIP + +Create a port-forwarding with the following commands: + +export POD_NAME=$(kubectl get pods --namespace imxc -l "app.kubernetes.io/name={{ include "keycloak.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o name) +echo "Visit http://127.0.0.1:8080 to use your application" +kubectl --namespace imxc port-forward "$POD_NAME" 8080 + +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/04-keycloak/templates/_helpers.tpl b/ansible/01_old/roles/cmoa_install/files/04-keycloak/templates/_helpers.tpl new file mode 100644 index 0000000..d019e17 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/04-keycloak/templates/_helpers.tpl @@ -0,0 +1,87 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "keycloak.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate to 20 characters because this is used to set the node identifier in WildFly which is limited to +23 characters. This allows for a replica suffix for up to 99 replicas. +*/}} +{{- define "keycloak.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 20 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 20 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 20 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "keycloak.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "keycloak.labels" -}} +helm.sh/chart: {{ include "keycloak.chart" . }} +{{ include "keycloak.selectorLabels" . }} +app.kubernetes.io/version: {{ .Values.image.tag | default .Chart.AppVersion | quote }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "keycloak.selectorLabels" -}} +app.kubernetes.io/name: {{ include "keycloak.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "keycloak.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "keycloak.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} + +{{/* +Create a default fully qualified app name for the postgres requirement. +*/}} +{{- define "keycloak.postgresql.fullname" -}} +{{- $postgresContext := dict "Values" .Values.postgresql "Release" .Release "Chart" (dict "Name" "postgresql") -}} +{{ include "postgresql.fullname" $postgresContext }} +{{- end }} + +{{/* +Create the service DNS name. +*/}} +{{- define "keycloak.serviceDnsName" -}} +{{ include "keycloak.fullname" . }}-headless.imxc.svc.{{ .Values.clusterDomain }} +{{- end }} + +{{/* +Return the appropriate apiVersion for ingress. +*/}} +{{- define "keycloak.ingressAPIVersion" -}} +{{- if .Capabilities.APIVersions.Has "networking.k8s.io/v1/Ingress" -}} +{{- print "networking.k8s.io/v1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install/files/04-keycloak/templates/configmap-startup.yaml b/ansible/01_old/roles/cmoa_install/files/04-keycloak/templates/configmap-startup.yaml new file mode 100644 index 0000000..8fbb462 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/04-keycloak/templates/configmap-startup.yaml @@ -0,0 +1,14 @@ +{{- if .Values.startupScripts }} +{{- $highAvailability := gt (int .Values.replicas) 1 -}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "keycloak.fullname" . }}-startup + labels: + {{- include "keycloak.labels" . | nindent 4 }} +data: + {{- range $key, $value := .Values.startupScripts }} + {{ $key }}: | + {{- tpl $value $ | nindent 4 }} + {{- end }} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install/files/04-keycloak/templates/hpa.yaml b/ansible/01_old/roles/cmoa_install/files/04-keycloak/templates/hpa.yaml new file mode 100644 index 0000000..c772b76 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/04-keycloak/templates/hpa.yaml @@ -0,0 +1,22 @@ +{{- if .Values.autoscaling.enabled }} +apiVersion: autoscaling/v2beta2 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "keycloak.fullname" . }} + labels: + {{- include "keycloak.labels" . | nindent 4 }} + {{- range $key, $value := .Values.autoscaling.labels }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: StatefulSet + name: {{ include "keycloak.fullname" . }} + minReplicas: {{ .Values.autoscaling.minReplicas }} + maxReplicas: {{ .Values.autoscaling.maxReplicas }} + metrics: + {{- toYaml .Values.autoscaling.metrics | nindent 4 }} + behavior: + {{- toYaml .Values.autoscaling.behavior | nindent 4 }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/04-keycloak/templates/ingress.yaml b/ansible/01_old/roles/cmoa_install/files/04-keycloak/templates/ingress.yaml new file mode 100644 index 0000000..d749e24 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/04-keycloak/templates/ingress.yaml @@ -0,0 +1,104 @@ +{{- $ingress := .Values.ingress -}} +{{- if $ingress.enabled -}} +apiVersion: {{ include "keycloak.ingressAPIVersion" . }} +kind: Ingress +metadata: + name: {{ include "keycloak.fullname" . }} + {{- with $ingress.annotations }} + annotations: + {{- range $key, $value := . }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} + {{- end }} + labels: + {{- include "keycloak.labels" . | nindent 4 }} + {{- range $key, $value := $ingress.labels }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} +spec: +{{- if $ingress.tls }} + tls: + {{- range $ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ tpl . $ | quote }} + {{- end }} + {{- with .secretName }} + secretName: {{ tpl . $ }} + {{- end }} + {{- end }} +{{- end }} + rules: + {{- range .Values.ingress.rules }} + - host: {{ tpl .host $ | quote }} + http: + paths: + {{- range .paths }} + - path: {{ . }} + {{- if $.Capabilities.APIVersions.Has "networking.k8s.io/v1/Ingress" }} + pathType: Prefix + backend: + service: + name: {{ include "keycloak.fullname" $ }}-http + port: + name: {{ $ingress.servicePort }} + {{- else }} + backend: + serviceName: {{ include "keycloak.fullname" $ }}-http + servicePort: {{ $ingress.servicePort }} + {{- end }} + {{- end }} + {{- end }} +{{- if $ingress.console.enabled }} +--- +apiVersion: {{ include "keycloak.ingressAPIVersion" . }} +kind: Ingress +metadata: + name: {{ include "keycloak.fullname" . }}-console + {{- with $ingress.console.annotations }} + annotations: + {{- range $key, $value := . }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} + {{- end }} + labels: + {{- include "keycloak.labels" . | nindent 4 }} + {{- range $key, $value := $ingress.labels }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} +spec: +{{- if $ingress.tls }} + tls: + {{- range $ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ tpl . $ | quote }} + {{- end }} + {{- with .secretName }} + secretName: {{ tpl . $ }} + {{- end }} + {{- end }} +{{- end }} + rules: + {{- range .Values.ingress.console.rules }} + - host: {{ tpl .host $ | quote }} + http: + paths: + {{- range .paths }} + - path: {{ . }} + {{- if $.Capabilities.APIVersions.Has "networking.k8s.io/v1/Ingress" }} + pathType: Prefix + backend: + service: + name: {{ include "keycloak.fullname" $ }}-http + port: + name: {{ $ingress.servicePort }} + {{- else }} + backend: + serviceName: {{ include "keycloak.fullname" $ }}-http + servicePort: {{ $ingress.servicePort }} + {{- end }} + {{- end }} + {{- end }} +{{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install/files/04-keycloak/templates/networkpolicy.yaml b/ansible/01_old/roles/cmoa_install/files/04-keycloak/templates/networkpolicy.yaml new file mode 100644 index 0000000..5e7c7b6 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/04-keycloak/templates/networkpolicy.yaml @@ -0,0 +1,46 @@ +{{- if .Values.networkPolicy.enabled }} +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: {{ include "keycloak.fullname" . | quote }} + labels: + {{- include "keycloak.labels" . | nindent 4 }} + {{- range $key, $value := .Values.networkPolicy.labels }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} +spec: + policyTypes: + - Ingress + podSelector: + matchLabels: + {{- include "keycloak.selectorLabels" . | nindent 6 }} + ingress: + {{- with .Values.networkPolicy.extraFrom }} + - from: + {{- toYaml . | nindent 8 }} + ports: + - protocol: TCP + port: {{ $.Values.service.httpPort }} + - protocol: TCP + port: {{ $.Values.service.httpsPort }} + {{ range $.Values.extraPorts }} + - protocol: {{ default "TCP" .protocol }} + port: {{ .containerPort }} + {{- end }} + {{- end }} + - from: + - podSelector: + matchLabels: + {{- include "keycloak.selectorLabels" . | nindent 14 }} + ports: + - protocol: TCP + port: {{ .Values.service.httpPort }} + - protocol: TCP + port: {{ .Values.service.httpsPort }} + - protocol: TCP + port: {{ .Values.service.httpManagementPort }} + {{ range .Values.extraPorts }} + - protocol: {{ default "TCP" .protocol }} + port: {{ .containerPort }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/04-keycloak/templates/poddisruptionbudget.yaml b/ansible/01_old/roles/cmoa_install/files/04-keycloak/templates/poddisruptionbudget.yaml new file mode 100644 index 0000000..39cc390 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/04-keycloak/templates/poddisruptionbudget.yaml @@ -0,0 +1,13 @@ +{{- if .Values.podDisruptionBudget -}} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ include "keycloak.fullname" . }} + labels: + {{- include "keycloak.labels" . | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "keycloak.selectorLabels" . | nindent 6 }} + {{- toYaml .Values.podDisruptionBudget | nindent 2 }} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install/files/04-keycloak/templates/prometheusrule.yaml b/ansible/01_old/roles/cmoa_install/files/04-keycloak/templates/prometheusrule.yaml new file mode 100644 index 0000000..69af5e7 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/04-keycloak/templates/prometheusrule.yaml @@ -0,0 +1,24 @@ +{{- with .Values.prometheusRule -}} +{{- if .enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ include "keycloak.fullname" $ }} + {{- with .annotations }} + annotations: + {{- range $key, $value := . }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} + {{- end }} + labels: + {{- include "keycloak.labels" $ | nindent 4 }} + {{- range $key, $value := .labels }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} +spec: + groups: + - name: {{ include "keycloak.fullname" $ }} + rules: + {{- toYaml .rules | nindent 8 }} +{{- end }} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install/files/04-keycloak/templates/rbac.yaml b/ansible/01_old/roles/cmoa_install/files/04-keycloak/templates/rbac.yaml new file mode 100644 index 0000000..9ca0a2b --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/04-keycloak/templates/rbac.yaml @@ -0,0 +1,25 @@ +{{- if and .Values.rbac.create .Values.rbac.rules }} +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ include "keycloak.fullname" . }} + labels: + {{- include "keycloak.labels" . | nindent 4 }} +rules: + {{- toYaml .Values.rbac.rules | nindent 2 }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ include "keycloak.fullname" . }} + labels: + {{- include "keycloak.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ include "keycloak.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ include "keycloak.serviceAccountName" . }} + namespace: {{ .Release.Namespace | quote }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/04-keycloak/templates/route.yaml b/ansible/01_old/roles/cmoa_install/files/04-keycloak/templates/route.yaml new file mode 100644 index 0000000..9507d56 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/04-keycloak/templates/route.yaml @@ -0,0 +1,34 @@ +{{- $route := .Values.route -}} +{{- if $route.enabled -}} +apiVersion: route.openshift.io/v1 +kind: Route +metadata: + name: {{ include "keycloak.fullname" . }} + {{- with $route.annotations }} + annotations: + {{- range $key, $value := . }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} + {{- end }} + labels: + {{- include "keycloak.labels" . | nindent 4 }} + {{- range $key, $value := $route.labels }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} +spec: +{{- if $route.host }} + host: {{ tpl $route.host $ | quote }} +{{- end }} + path: {{ $route.path }} + port: + targetPort: http + to: + kind: Service + name: {{ include "keycloak.fullname" $ }}-http + weight: 100 + {{- if $route.tls.enabled }} + tls: + insecureEdgeTerminationPolicy: {{ $route.tls.insecureEdgeTerminationPolicy }} + termination: {{ $route.tls.termination }} + {{- end }} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install/files/04-keycloak/templates/secrets.yaml b/ansible/01_old/roles/cmoa_install/files/04-keycloak/templates/secrets.yaml new file mode 100644 index 0000000..c1cb796 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/04-keycloak/templates/secrets.yaml @@ -0,0 +1,29 @@ +{{- range $nameSuffix, $values := .Values.secrets -}} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "keycloak.fullname" $ }}-{{ $nameSuffix }} + {{- with $values.annotations }} + annotations: + {{- range $key, $value := . }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} + {{- end }} + labels: + {{- include "keycloak.labels" $ | nindent 4 }} + {{- range $key, $value := $values.labels }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} +type: {{ default "Opaque" $values.type }} +{{- with $values.data }} +data: + {{- toYaml . | nindent 2 }} +{{- end }} +{{- with $values.stringData }} +stringData: + {{- range $key, $value := . }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 2 }} + {{- end }} +{{- end }} +--- +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install/files/04-keycloak/templates/service-headless.yaml b/ansible/01_old/roles/cmoa_install/files/04-keycloak/templates/service-headless.yaml new file mode 100644 index 0000000..0c22ec9 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/04-keycloak/templates/service-headless.yaml @@ -0,0 +1,18 @@ +{{- $highAvailability := gt (int .Values.replicas) 1 -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "keycloak.fullname" . }}-headless + labels: + {{- include "keycloak.labels" . | nindent 4 }} + app.kubernetes.io/component: headless +spec: + type: ClusterIP + clusterIP: None + ports: + - name: http + port: {{ .Values.service.httpPort }} + targetPort: http + protocol: TCP + selector: + {{- include "keycloak.selectorLabels" . | nindent 4 }} diff --git a/ansible/01_old/roles/cmoa_install/files/04-keycloak/templates/service-http.yaml b/ansible/01_old/roles/cmoa_install/files/04-keycloak/templates/service-http.yaml new file mode 100644 index 0000000..c4a1dc9 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/04-keycloak/templates/service-http.yaml @@ -0,0 +1,59 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "keycloak.fullname" . }}-http + {{- with .Values.service.annotations }} + annotations: + {{- range $key, $value := . }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} + {{- end }} + labels: + {{- include "keycloak.labels" . | nindent 4 }} + {{- range $key, $value := .Values.service.labels }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} + app.kubernetes.io/component: http +spec: + type: {{ .Values.service.type }} + {{- if and (eq "LoadBalancer" .Values.service.type) .Values.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.service.loadBalancerIP }} + {{- end }} + {{- if and (eq "LoadBalancer" .Values.service.type) .Values.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{- toYaml .Values.service.loadBalancerSourceRanges | nindent 4 }} + {{- end }} + {{- if .Values.service.sessionAffinity }} + sessionAffinity: {{ .Values.service.sessionAffinity }} + {{- with .Values.service.sessionAffinityConfig }} + sessionAffinityConfig: + {{- toYaml . | nindent 4 }} + {{- end }} + {{- end }} + ports: + - name: http + port: {{ .Values.service.httpPort }} + targetPort: http + {{- if and (or (eq "NodePort" .Values.service.type) (eq "LoadBalancer" .Values.service.type) ) .Values.service.httpNodePort }} + nodePort: {{ .Values.service.httpNodePort }} + {{- end }} + protocol: TCP + - name: https + port: {{ .Values.service.httpsPort }} + targetPort: https + {{- if and (or (eq "NodePort" .Values.service.type) (eq "LoadBalancer" .Values.service.type) ) .Values.service.httpsNodePort }} + nodePort: {{ .Values.service.httpsNodePort }} + {{- end }} + protocol: TCP + - name: http-management + port: {{ .Values.service.httpManagementPort }} + targetPort: http-management + {{- if and (eq "NodePort" .Values.service.type) .Values.service.httpManagementNodePort }} + nodePort: {{ .Values.service.httpManagementNodePort }} + {{- end }} + protocol: TCP + {{- with .Values.service.extraPorts }} + {{- toYaml . | nindent 4 }} + {{- end }} + selector: + {{- include "keycloak.selectorLabels" . | nindent 4 }} diff --git a/ansible/01_old/roles/cmoa_install/files/04-keycloak/templates/serviceaccount.yaml b/ansible/01_old/roles/cmoa_install/files/04-keycloak/templates/serviceaccount.yaml new file mode 100644 index 0000000..1d8f3f0 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/04-keycloak/templates/serviceaccount.yaml @@ -0,0 +1,19 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "keycloak.serviceAccountName" . }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- range $key, $value := . }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} + {{- end }} + labels: + {{- include "keycloak.labels" . | nindent 4 }} + {{- range $key, $value := .Values.serviceAccount.labels }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} +imagePullSecrets: + {{- toYaml .Values.serviceAccount.imagePullSecrets | nindent 4 }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/04-keycloak/templates/servicemonitor.yaml b/ansible/01_old/roles/cmoa_install/files/04-keycloak/templates/servicemonitor.yaml new file mode 100644 index 0000000..ba97f62 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/04-keycloak/templates/servicemonitor.yaml @@ -0,0 +1,39 @@ +{{- range $key, $serviceMonitor := dict "wildfly" .Values.serviceMonitor "extra" .Values.extraServiceMonitor }} +{{- with $serviceMonitor }} +{{- if .enabled }} +--- +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "keycloak.fullname" $ }}-{{ $key }} + {{- with .namespace }} + namespace: {{ . }} + {{- end }} + {{- with .annotations }} + annotations: + {{- range $key, $value := . }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} + {{- end }} + labels: + {{- include "keycloak.labels" $ | nindent 4 }} + {{- range $key, $value := .labels }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} +spec: + {{- with .namespaceSelector }} + namespaceSelector: + {{- toYaml . | nindent 4 }} + {{- end }} + selector: + matchLabels: + {{- include "keycloak.selectorLabels" $ | nindent 6 }} + app.kubernetes.io/component: http + endpoints: + - port: {{ .port }} + path: {{ .path }} + interval: {{ .interval }} + scrapeTimeout: {{ .scrapeTimeout }} +{{- end }} +{{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/04-keycloak/templates/statefulset.yaml b/ansible/01_old/roles/cmoa_install/files/04-keycloak/templates/statefulset.yaml new file mode 100644 index 0000000..8278986 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/04-keycloak/templates/statefulset.yaml @@ -0,0 +1,208 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "keycloak.fullname" . }} + {{- with .Values.statefulsetAnnotations }} + annotations: + {{- range $key, $value := . }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} + {{- end }} + labels: + {{- include "keycloak.labels" . | nindent 4 }} + {{- range $key, $value := .Values.statefulsetLabels }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} +spec: + selector: + matchLabels: + {{- include "keycloak.selectorLabels" . | nindent 6 }} + {{- if not .Values.autoscaling.enabled }} + replicas: {{ .Values.replicas }} + {{- end }} + serviceName: {{ include "keycloak.fullname" . }}-headless + podManagementPolicy: {{ .Values.podManagementPolicy }} + updateStrategy: + type: RollingUpdate + template: + metadata: + annotations: + checksum/config-startup: {{ include (print .Template.BasePath "/configmap-startup.yaml") . | sha256sum }} + checksum/secrets: {{ tpl (toYaml .Values.secrets) . | sha256sum }} + {{- range $key, $value := .Values.podAnnotations }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 8 }} + {{- end }} + labels: + {{- include "keycloak.selectorLabels" . | nindent 8 }} + {{- if and .Values.postgresql.enabled (and .Values.postgresql.networkPolicy .Values.postgresql.networkPolicy.enabled) }} + {{ include "keycloak.postgresql.fullname" . }}-client: "true" + {{- end }} + {{- range $key, $value := .Values.podLabels }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 8 }} + {{- end }} + spec: + {{- if or .Values.postgresql.enabled .Values.extraInitContainers }} + initContainers: + {{- if .Values.postgresql.enabled }} + - name: pgchecker + image: "{{ .Values.pgchecker.image.repository }}:{{ .Values.pgchecker.image.tag }}" + imagePullPolicy: {{ .Values.pgchecker.image.pullPolicy }} + securityContext: + {{- toYaml .Values.pgchecker.securityContext | nindent 12 }} + command: + - sh + - -c + - | + echo 'Waiting for PostgreSQL to become ready...' + + until printf "." && nc -z -w 2 {{ include "keycloak.postgresql.fullname" . }} {{ .Values.postgresql.service.port }}; do + sleep 2; + done; + + echo 'PostgreSQL OK ✓' + volumeMounts: + - mountPath: /opt/jboss/keycloak/themes/cloudmoa/ + name: themes-upper-directory + resources: + {{- toYaml .Values.pgchecker.resources | nindent 12 }} + {{- end }} + {{- with .Values.extraInitContainers }} + {{- tpl . $ | nindent 8 }} + {{- end }} + {{- end }} + containers: + - name: keycloak + securityContext: + {{- toYaml .Values.securityContext | nindent 12 }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + command: + {{- toYaml .Values.command | nindent 12 }} + args: + {{- toYaml .Values.args | nindent 12 }} + {{- with .Values.lifecycleHooks }} + {{- tpl . $ | nindent 12 }} + {{- end }} + env: + - name: KEYCLOAK_USER + value: "admin" + #valueFrom: + # secretKeyRef: + # name: keycloak-secret + # key: KEYCLOAK_MASTER_USERNAME + - name: KEYCLOAK_PASSWORD + value: "admin" + #valueFrom: + # secretKeyRef: + # name: keycloak-secret + # key: KEYCLOAK_MASTER_PASSWORD + {{- if .Values.postgresql.enabled }} + - name: DB_VENDOR + value: postgres + - name: DB_ADDR + value: {{ include "keycloak.postgresql.fullname" . }} + - name: DB_PORT + value: {{ .Values.postgresql.service.port | quote }} + - name: DB_DATABASE + value: {{ .Values.postgresql.postgresqlDatabase | quote }} + - name: DB_USER + value: {{ .Values.postgresql.postgresqlUsername | quote }} + - name: DB_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "keycloak.postgresql.fullname" . }} + key: postgresql-password + {{- end }} + {{- with .Values.extraEnv }} + {{- tpl . $ | nindent 12 }} + {{- end }} + envFrom: + {{- with .Values.extraEnvFrom }} + {{- tpl . $ | nindent 12 }} + {{- end }} + ports: + - name: http + containerPort: 8080 + protocol: TCP + - name: https + containerPort: 8443 + protocol: TCP + - name: http-management + containerPort: 9990 + protocol: TCP + {{- with .Values.extraPorts }} + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.livenessProbe }} + livenessProbe: + {{- tpl . $ | nindent 12 }} + {{- end }} + {{- with .Values.readinessProbe }} + readinessProbe: + {{- tpl . $ | nindent 12 }} + {{- end }} + resources: + {{- toYaml .Values.resources | nindent 12 }} + volumeMounts: + - mountPath: /opt/jboss/keycloak/themes/cloudmoa/ + name: themes-upper-directory + {{- range $key, $value := .Values.startupScripts }} + - name: startup + mountPath: "/opt/jboss/startup-scripts/{{ $key }}" + subPath: "{{ $key }}" + readOnly: true + {{- end }} + {{- with .Values.extraVolumeMounts }} + {{- tpl . $ | nindent 12 }} + {{- end }} + {{- with .Values.extraContainers }} + {{- tpl . $ | nindent 8 }} + {{- end }} + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "keycloak.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} + {{- with .Values.hostAliases }} + hostAliases: + {{- toYaml . | nindent 8 }} + {{- end }} + enableServiceLinks: {{ .Values.enableServiceLinks }} + restartPolicy: {{ .Values.restartPolicy }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- tpl . $ | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.priorityClassName }} + priorityClassName: {{ . }} + {{- end }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} + volumes: + - name: themes-upper-directory + hostPath: + path: /root/oci/infra-set/keycloak/keycloak_theme/ + type: DirectoryOrCreate + {{- with .Values.startupScripts }} + - name: startup + configMap: + name: {{ include "keycloak.fullname" $ }}-startup + defaultMode: 0555 + items: + {{- range $key, $value := . }} + - key: {{ $key }} + path: {{ $key }} + {{- end }} + {{- end }} + {{- with .Values.extraVolumes }} + {{- tpl . $ | nindent 8 }} + {{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/04-keycloak/templates/test/configmap-test.yaml b/ansible/01_old/roles/cmoa_install/files/04-keycloak/templates/test/configmap-test.yaml new file mode 100644 index 0000000..8dda781 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/04-keycloak/templates/test/configmap-test.yaml @@ -0,0 +1,50 @@ +{{- if .Values.test.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "keycloak.fullname" . }}-test + labels: + {{- include "keycloak.labels" . | nindent 4 }} + annotations: + helm.sh/hook: test + helm.sh/hook-delete-policy: hook-succeeded +data: + test.py: | + import os + from selenium import webdriver + from selenium.webdriver.common.by import By + from selenium.webdriver.support.ui import WebDriverWait + from selenium.webdriver.support import expected_conditions + from urllib.parse import urlparse + + print('Creating PhantomJS driver...') + driver = webdriver.PhantomJS(service_log_path='/tmp/ghostdriver.log') + + base_url = 'http://{{ include "keycloak.fullname" . }}-http{{ if ne 80 (int .Values.service.httpPort) }}:{{ .Values.service.httpPort }}{{ end }}' + + print('Opening Keycloak...') + driver.get('{0}/auth/admin/'.format(base_url)) + + username = os.environ['KEYCLOAK_USER'] + password = os.environ['KEYCLOAK_PASSWORD'] + + username_input = WebDriverWait(driver, 30).until(expected_conditions.presence_of_element_located((By.ID, "username"))) + password_input = WebDriverWait(driver, 30).until(expected_conditions.presence_of_element_located((By.ID, "password"))) + login_button = WebDriverWait(driver, 30).until(expected_conditions.presence_of_element_located((By.ID, "kc-login"))) + + print('Entering username...') + username_input.send_keys(username) + + print('Entering password...') + password_input.send_keys(password) + + print('Clicking login button...') + login_button.click() + + WebDriverWait(driver, 30).until(lambda driver: '/auth/admin/master/console/' in driver.current_url) + + print('Admin console visible. Login successful.') + + driver.quit() + + {{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/04-keycloak/templates/test/pod-test.yaml b/ansible/01_old/roles/cmoa_install/files/04-keycloak/templates/test/pod-test.yaml new file mode 100644 index 0000000..5b166f2 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/04-keycloak/templates/test/pod-test.yaml @@ -0,0 +1,43 @@ +{{- if .Values.test.enabled }} +apiVersion: v1 +kind: Pod +metadata: + name: {{ include "keycloak.fullname" . }}-test + labels: + {{- include "keycloak.labels" . | nindent 4 }} + app.kubernetes.io/component: test + annotations: + helm.sh/hook: test +spec: + securityContext: + {{- toYaml .Values.test.podSecurityContext | nindent 4 }} + containers: + - name: keycloak-test + image: "{{ .Values.test.image.repository }}:{{ .Values.test.image.tag }}" + imagePullPolicy: {{ .Values.test.image.pullPolicy }} + securityContext: + {{- toYaml .Values.test.securityContext | nindent 8 }} + command: + - python3 + args: + - /tests/test.py + env: + - name: KEYCLOAK_USER + valueFrom: + secretKeyRef: + name: {{ include "keycloak.fullname" . }}-admin-creds + key: user + - name: KEYCLOAK_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "keycloak.fullname" . }}-admin-creds + key: password + volumeMounts: + - name: tests + mountPath: /tests + volumes: + - name: tests + configMap: + name: {{ include "keycloak.fullname" . }}-test + restartPolicy: Never +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install/files/04-keycloak/values.schema.json b/ansible/01_old/roles/cmoa_install/files/04-keycloak/values.schema.json new file mode 100644 index 0000000..47c2aa3 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/04-keycloak/values.schema.json @@ -0,0 +1,434 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "required": [ + "image" + ], + "definitions": { + "image": { + "type": "object", + "required": [ + "repository", + "tag" + ], + "properties": { + "pullPolicy": { + "type": "string", + "pattern": "^(Always|Never|IfNotPresent)$" + }, + "repository": { + "type": "string" + }, + "tag": { + "type": "string" + } + } + }, + "imagePullSecrets": { + "type": "array", + "items": { + "type": "object", + "properties": { + "name": { + "type": "string" + } + } + } + } + }, + "properties": { + "affinity": { + "type": "string" + }, + "args": { + "type": "array" + }, + "clusterDomain": { + "type": "string" + }, + "command": { + "type": "array" + }, + "enableServiceLinks": { + "type": "boolean" + }, + "extraContainers": { + "type": "string" + }, + "extraEnv": { + "type": "string" + }, + "extraEnvFrom": { + "type": "string" + }, + "extraInitContainers": { + "type": "string" + }, + "extraPorts": { + "type": "array" + }, + "extraVolumeMounts": { + "type": "string" + }, + "extraVolumes": { + "type": "string" + }, + "fullnameOverride": { + "type": "string" + }, + "hostAliases": { + "type": "array" + }, + "image": { + "$ref": "#/definitions/image" + }, + "imagePullSecrets": { + "$ref": "#/definitions/imagePullSecrets" + }, + "ingress": { + "type": "object", + "properties": { + "annotations": { + "type": "object" + }, + "enabled": { + "type": "boolean" + }, + "labels": { + "type": "object" + }, + "rules": { + "type": "array", + "items": { + "type": "object", + "properties": { + "host": { + "type": "string" + }, + "paths": { + "type": "array", + "items": { + "type": "string" + } + } + } + } + }, + "servicePort": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ] + }, + "tls": { + "type": "array", + "items": { + "type": "object", + "properties": { + "hosts": { + "type": "array", + "items": { + "items": { + "type": "string" + } + }, + "secretName": { + "type": "string" + } + } + } + } + } + }, + "lifecycleHooks": { + "type": "string" + }, + "livenessProbe": { + "type": "string" + }, + "nameOverride": { + "type": "string" + }, + "nodeSelector": { + "type": "object" + }, + "pgchecker": { + "type": "object", + "properties": { + "image": { + "$ref": "#/definitions/image" + }, + "resources": { + "type": "object", + "properties": { + "limits": { + "type": "object", + "properties": { + "cpu": { + "type": "string" + }, + "memory": { + "type": "string" + } + } + }, + "requests": { + "type": "object", + "properties": { + "cpu": { + "type": "string" + }, + "memory": { + "type": "string" + } + } + } + } + }, + "securityContext": { + "type": "object" + } + } + }, + "podAnnotations": { + "type": "object" + }, + "podDisruptionBudget": { + "type": "object" + }, + "podLabels": { + "type": "object" + }, + "podManagementPolicy": { + "type": "string" + }, + "podSecurityContext": { + "type": "object" + }, + "postgresql": { + "type": "object" + }, + "priorityClassName": { + "type": "string" + }, + "prometheusRule": { + "type": "object" + }, + "serviceMonitor": { + "type": "object" + }, + "extraServiceMonitor": { + "type": "object" + }, + "readinessProbe": { + "type": "string" + }, + "replicas": { + "type": "integer" + }, + "resources": { + "type": "object" + }, + "restartPolicy": { + "type": "string" + }, + "route": { + "type": "object", + "properties": { + "annotations": { + "type": "object" + }, + "enabled": { + "type": "boolean" + }, + "host": { + "type": "string" + }, + "labels": { + "type": "object" + }, + "path": { + "type": "string" + }, + "tls": { + "type": "object" + } + } + }, + "secrets": { + "type": "object" + }, + "securityContext": { + "type": "object" + }, + "service": { + "type": "object", + "properties": { + "annotations": { + "type": "object" + }, + "extraPorts": { + "type": "array" + }, + "loadBalancerSourceRanges": { + "type": "array" + }, + "httpNodePort": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + }, + "httpPort": { + "type": "integer" + }, + "httpsNodePort": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + }, + "httpsPort": { + "type": "integer" + }, + "httpManagementNodePort": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + }, + "httpManagementPort": { + "type": "integer" + }, + "labels": { + "type": "object" + }, + "nodePort": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + }, + "type": { + "type": "string" + }, + "loadBalancerIP": { + "type": "string" + }, + "sessionAffinity": { + "type": "string" + }, + "sessionAffinityConfig": { + "type": "object" + } + } + }, + "serviceAccount": { + "type": "object", + "properties": { + "annotations": { + "type": "object" + }, + "create": { + "type": "boolean" + }, + "imagePullSecrets": { + "$ref": "#/definitions/imagePullSecrets" + }, + "labels": { + "type": "object" + }, + "name": { + "type": "string" + } + } + }, + "rbac": { + "type": "object", + "properties": { + "create": { + "type": "boolean" + }, + "rules": { + "type": "array" + } + } + }, + "startupScripts": { + "type": "object" + }, + "statefulsetAnnotations": { + "type": "object" + }, + "statefulsetLabels": { + "type": "object" + }, + "terminationGracePeriodSeconds": { + "type": "integer" + }, + "autoscaling": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + }, + "labels": { + "type": "object" + }, + "minReplicas": { + "type": "integer" + }, + "maxReplicas": { + "type": "integer" + }, + "metrics": { + "type": "array" + }, + "behavior": { + "type": "object" + } + } + }, + "test": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + }, + "image": { + "$ref": "#/definitions/image" + }, + "podSecurityContext": { + "type": "object" + }, + "securityContext": { + "type": "object" + } + } + }, + "tolerations": { + "type": "array" + } + } + } +} diff --git a/ansible/01_old/roles/cmoa_install/files/04-keycloak/values.yaml b/ansible/01_old/roles/cmoa_install/files/04-keycloak/values.yaml new file mode 100644 index 0000000..a95521f --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/04-keycloak/values.yaml @@ -0,0 +1,552 @@ +# Optionally override the fully qualified name +fullnameOverride: "imxc-keycloak" + +# Optionally override the name +nameOverride: "" + +# The number of replicas to create (has no effect if autoscaling enabled) +replicas: 2 + +image: + # The Keycloak image repository + #repository: cdm-dev.exem-oss.org/keycloak/keycloak + repository: 10.10.31.243:5000/cmoa3/keycloak + # Overrides the Keycloak image tag whose default is the chart version + tag: "11.0.1" + # The Keycloak image pull policy + pullPolicy: Always + +# Image pull secrets for the Pod +#imagePullSecrets: [] +# - name: myRegistrKeySecretName +imagePullSecrets: + - name: regcred + +# Mapping between IPs and hostnames that will be injected as entries in the Pod's hosts files +hostAliases: [] +# - ip: "1.2.3.4" +# hostnames: +# - "my.host.com" + +# Indicates whether information about services should be injected into Pod's environment variables, matching the syntax of Docker links +enableServiceLinks: true + +# Pod management policy. One of `Parallel` or `OrderedReady` +podManagementPolicy: Parallel + +# Pod restart policy. One of `Always`, `OnFailure`, or `Never` +restartPolicy: Always + +serviceAccount: + # Specifies whether a ServiceAccount should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" + # Additional annotations for the ServiceAccount + annotations: {} + # Additional labels for the ServiceAccount + labels: {} + # Image pull secrets that are attached to the ServiceAccount + #imagePullSecrets: [] + imagePullSecrets: + - name: regcred + +rbac: + create: true + rules: + # RBAC rules for KUBE_PING + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + +# SecurityContext for the entire Pod. Every container running in the Pod will inherit this SecurityContext. This might be relevant when other components of the environment inject additional containers into running Pods (service meshes are the most prominent example for this) +podSecurityContext: + fsGroup: 1000 + +# SecurityContext for the Keycloak container +securityContext: + runAsUser: 1000 + runAsNonRoot: true + +# Additional init containers, e. g. for providing custom themes +extraInitContainers: | + - name: theme-provider + image: 10.10.31.243:5000/cmoa3/theme-provider:latest + imagePullPolicy: IfNotPresent + command: + - sh + args: + - -c + - | + echo "Copying theme ..." + cp -R /mytheme/* /theme + volumeMounts: + - name: theme + mountPath: /theme + +#extraInitContainers: "" + +# Additional sidecar containers, e. g. for a database proxy, such as Google's cloudsql-proxy +extraContainers: "" + +# Lifecycle hooks for the Keycloak container +lifecycleHooks: | +# postStart: +# exec: +# command: +# - /bin/sh +# - -c +# - ls + +# Termination grace period in seconds for Keycloak shutdown. Clusters with a large cache might need to extend this to give Infinispan more time to rebalance +terminationGracePeriodSeconds: 60 + +# The internal Kubernetes cluster domain +clusterDomain: cluster.local + +## Overrides the default entrypoint of the Keycloak container +command: [] + +## Overrides the default args for the Keycloak container +#args: ["-Dkeycloak.profile.feature.scripts=enabled", "-Dkeycloak.profile.feature.upload_scripts=enabled", "-Dkeycloak.profile.feature.admin_fine_grained_authz=enabled"] +args: ["-Dkeycloak.profile.feature.scripts=enabled", "-Dkeycloak.profile.feature.upload_scripts=enabled"] + +# Additional environment variables for Keycloak +extraEnv: | + # HA settings + - name: PROXY_ADDRESS_FORWARDING + value: "true" + - name: JGROUPS_DISCOVERY_PROTOCOL + value: kubernetes.KUBE_PING + - name: KUBERNETES_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: CACHE_OWNERS_COUNT + value: "2" + - name: CACHE_OWNERS_AUTH_SESSIONS_COUNT + value: "2" + # postgresql settings + - name: DB_VENDOR + value: postgres + - name: DB_ADDR + value: postgres + - name: DB_PORT + value: "5432" + - name: DB_DATABASE + value: keycloak + - name: DB_USER + value: admin + - name: DB_PASSWORD + value: eorbahrhkswp +# - name: KEYCLOAK_USER +# value: keycloak +# - name: KEYCLOAK_PASSWORD +# value: keycloak +#extraEnv: "" + # - name: KEYCLOAK_LOGLEVEL + # value: DEBUG + # - name: WILDFLY_LOGLEVEL + # value: DEBUG + # - name: CACHE_OWNERS_COUNT + # value: "2" + # - name: CACHE_OWNERS_AUTH_SESSIONS_COUNT + # value: "2" +#extraEnv: | +# - name: JGROUPS_DISCOVERY_PROTOCOL +# value: dns.DNS_PING +# - name: JGROUPS_DISCOVERY_PROPERTIES +# value: 'dns_query={{ include "keycloak.serviceDnsName" . }}' +# - name: CACHE_OWNERS_COUNT +# value: "2" +# - name: CACHE_OWNERS_AUTH_SESSIONS_COUNT +# value: "2" +# Additional environment variables for Keycloak mapped from Secret or ConfigMap +extraEnvFrom: "" + +# Pod priority class name +#priorityClassName: "manual" + +# Pod affinity +affinity: | + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + {{- include "keycloak.selectorLabels" . | nindent 10 }} + matchExpressions: + - key: app.kubernetes.io/component + operator: NotIn + values: + - test + topologyKey: kubernetes.io/hostname + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchLabels: + {{- include "keycloak.selectorLabels" . | nindent 12 }} + matchExpressions: + - key: app.kubernetes.io/component + operator: NotIn + values: + - test + topologyKey: failure-domain.beta.kubernetes.io/zone + +#affinity: {} + +# Node labels for Pod assignment +nodeSelector: {} + +# Node taints to tolerate +tolerations: [] + +# Additional Pod labels +podLabels: {} + +# Additional Pod annotations +podAnnotations: {} + +# Liveness probe configuration +livenessProbe: | + httpGet: + path: /auth/ + port: http + initialDelaySeconds: 300 + timeoutSeconds: 5 + +# Readiness probe configuration +readinessProbe: | + httpGet: + path: /auth/realms/master + port: http + initialDelaySeconds: 30 + timeoutSeconds: 1 + +# Pod resource requests and limits +#resources: {} + # requests: + # cpu: "500m" + # memory: "1024Mi" + # limits: + # cpu: "500m" + # memory: "1024Mi" +resources: + requests: + memory: "200Mi" + cpu: "10m" + +# Startup scripts to run before Keycloak starts up +startupScripts: + # WildFly CLI script for configuring the node-identifier + keycloak.cli: | + {{- .Files.Get "scripts/keycloak.cli" }} + # mystartup.sh: | + # #!/bin/sh + # + # echo 'Hello from my custom startup script!' + +# Add additional volumes, e. g. for custom themes +extraVolumes: | + - name: theme + emptyDir: {} +#extraVolumes: "" + +# Add additional volumes mounts, e. g. for custom themes +extraVolumeMounts: | + - name: theme + mountPath: /opt/jboss/keycloak/themes +#extraVolumeMounts: "" + +# Add additional ports, e. g. for admin console or exposing JGroups ports +extraPorts: [] + +# Pod disruption budget +podDisruptionBudget: {} +# maxUnavailable: 1 +# minAvailable: 1 + +# Annotations for the StatefulSet +statefulsetAnnotations: {} + +# Additional labels for the StatefulSet +statefulsetLabels: {} + +# Configuration for secrets that should be created +secrets: {} + # mysecret: + # type: {} + # annotations: {} + # labels: {} + # stringData: {} + # data: {} + +service: + # Annotations for headless and HTTP Services + annotations: {} + # Additional labels for headless and HTTP Services + labels: {} + # key: value + # The Service type + type: NodePort + # Optional IP for the load balancer. Used for services of type LoadBalancer only + loadBalancerIP: "" + # The http Service port + httpPort: 80 + # The HTTP Service node port if type is NodePort + httpNodePort: 31082 + # The HTTPS Service port + httpsPort: 8443 + # The HTTPS Service node port if type is NodePort + httpsNodePort: null + # The WildFly management Service port + httpManagementPort: 9990 + # The WildFly management Service node port if type is NodePort + httpManagementNodePort: 31990 + # Additional Service ports, e. g. for custom admin console + extraPorts: [] + # When using Service type LoadBalancer, you can restrict source ranges allowed + # to connect to the LoadBalancer, e. g. will result in Security Groups + # (or equivalent) with inbound source ranges allowed to connect + loadBalancerSourceRanges: [] + # Session affinity + # See https://kubernetes.io/docs/concepts/services-networking/service/#proxy-mode-userspace + sessionAffinity: "" + # Session affinity config + sessionAffinityConfig: {} + +ingress: + # If `true`, an Ingress is created + enabled: false + # The Service port targeted by the Ingress + servicePort: http + # Ingress annotations + annotations: {} + ## Resolve HTTP 502 error using ingress-nginx: + ## See https://www.ibm.com/support/pages/502-error-ingress-keycloak-response + # nginx.ingress.kubernetes.io/proxy-buffer-size: 128k + + # Additional Ingress labels + labels: {} + # List of rules for the Ingress + rules: + - + # Ingress host + host: '{{ .Release.Name }}.keycloak.example.com' + # Paths for the host + paths: + - / + # TLS configuration + tls: + - hosts: + - keycloak.example.com + secretName: "" + + # ingress for console only (/auth/admin) + console: + # If `true`, an Ingress is created for console path only + enabled: false + # Ingress annotations for console ingress only + # Useful to set nginx.ingress.kubernetes.io/whitelist-source-range particularly + annotations: {} + rules: + - + # Ingress host + host: '{{ .Release.Name }}.keycloak.example.com' + # Paths for the host + paths: + - /auth/admin/ + +## Network policy configuration +networkPolicy: + # If true, the Network policies are deployed + enabled: false + + # Additional Network policy labels + labels: {} + + # Define all other external allowed source + # See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#networkpolicypeer-v1-networking-k8s-io + extraFrom: [] + +route: + # If `true`, an OpenShift Route is created + enabled: false + # Path for the Route + path: / + # Route annotations + annotations: {} + # Additional Route labels + labels: {} + # Host name for the Route + host: "" + # TLS configuration + tls: + # If `true`, TLS is enabled for the Route + enabled: false + # Insecure edge termination policy of the Route. Can be `None`, `Redirect`, or `Allow` + insecureEdgeTerminationPolicy: Redirect + # TLS termination of the route. Can be `edge`, `passthrough`, or `reencrypt` + termination: edge + +pgchecker: + image: + # Docker image used to check Postgresql readiness at startup + #repository: cdm-dev.exem-oss.org/keycloak/busybox + #repository: {{ .Values.global.IMXC_REGISTRY }}/keycloak/busybox + repository: 10.10.31.243:5000/cmoa3/busybox + # Image tag for the pgchecker image + tag: 1.32 + # Image pull policy for the pgchecker image + pullPolicy: Always + # SecurityContext for the pgchecker contai/docker.ner + securityContext: + allowPrivilegeEscalation: false + runAsUser: 1000 + runAsGroup: 1000 + runAsNonRoot: true + # Resource requests and limits for the pgchecker container + resources: + requests: + cpu: "10m" + memory: "16Mi" + limits: + cpu: "10m" + memory: "16Mi" + +postgresql: + # If `true`, the Postgresql dependency is enabled + enabled: false + # PostgreSQL User to create + postgresqlUsername: keycloak + # PostgreSQL Password for the new user + postgresqlPassword: keycloak + # PostgreSQL Database to create + postgresqlDatabase: keycloak + # PostgreSQL network policy configuration + networkPolicy: + enabled: false + +serviceMonitor: + # If `true`, a ServiceMonitor resource for the prometheus-operator is created + enabled: false + # Optionally sets a target namespace in which to deploy the ServiceMonitor resource + namespace: "" + # Optionally sets a namespace for the ServiceMonitor + namespaceSelector: {} + # Annotations for the ServiceMonitor + annotations: {} + # Additional labels for the ServiceMonitor + labels: {} + # Interval at which Prometheus scrapes metrics + interval: 10s + # Timeout for scraping + scrapeTimeout: 10s + # The path at which metrics are served + path: /metrics + # The Service port at which metrics are served + port: http-management + +extraServiceMonitor: + # If `true`, a ServiceMonitor resource for the prometheus-operator is created + enabled: false + # Optionally sets a target namespace in which to deploy the ServiceMonitor resource + namespace: "" + # Optionally sets a namespace for the ServiceMonitor + namespaceSelector: {} + # Annotations for the ServiceMonitor + annotations: {} + # Additional labels for the ServiceMonitor + labels: {} + # Interval at which Prometheus scrapes metrics + interval: 10s + # Timeout for scraping + scrapeTimeout: 10s + # The path at which metrics are served + path: /auth/realms/master/metrics + # The Service port at which metrics are served + port: http + +prometheusRule: + # If `true`, a PrometheusRule resource for the prometheus-operator is created + enabled: false + # Annotations for the PrometheusRule + annotations: {} + # Additional labels for the PrometheusRule + labels: {} + # List of rules for Prometheus + rules: [] + # - alert: keycloak-IngressHigh5xxRate + # annotations: + # message: The percentage of 5xx errors for keycloak over the last 5 minutes is over 1%. + # expr: | + # ( + # sum( + # rate( + # nginx_ingress_controller_response_duration_seconds_count{exported_namespace="mynamespace",ingress="mynamespace-keycloak",status=~"5[0-9]{2}"}[1m] + # ) + # ) + # / + # sum( + # rate( + # nginx_ingress_controller_response_duration_seconds_count{exported_namespace="mynamespace",ingress="mynamespace-keycloak"}[1m] + # ) + # ) + # ) * 100 > 1 + # for: 5m + # labels: + # severity: warning + +autoscaling: + # If `true`, a autoscaling/v2beta2 HorizontalPodAutoscaler resource is created (requires Kubernetes 1.18 or above) + # Autoscaling seems to be most reliable when using KUBE_PING service discovery (see README for details) + # This disables the `replicas` field in the StatefulSet + enabled: false + # Additional HorizontalPodAutoscaler labels + labels: {} + # The minimum and maximum number of replicas for the Keycloak StatefulSet + minReplicas: 3 + maxReplicas: 10 + # The metrics to use for scaling + metrics: + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: 80 + # The scaling policy to use. This will scale up quickly but only scale down a single Pod per 5 minutes. + # This is important because caches are usually only replicated to 2 Pods and if one of those Pods is terminated this will give the cluster time to recover. + behavior: + scaleDown: + stabilizationWindowSeconds: 300 + policies: + - type: Pods + value: 1 + periodSeconds: 300 + +test: + # If `true`, test resources are created + enabled: false + image: + # The image for the test Pod + #repository: docker.io/unguiculus/docker-python3-phantomjs-selenium + repository: 10.10.31.243:5000/docker-python3-phantomjs-selenium + # The tag for the test Pod image + tag: v1 + # The image pull policy for the test Pod image + pullPolicy: IfNotPresent + # SecurityContext for the entire test Pod + podSecurityContext: + fsGroup: 1000 + # SecurityContext for the test container + securityContext: + runAsUser: 1000 + runAsNonRoot: true + diff --git a/ansible/01_old/roles/cmoa_install/files/05-imxc/Chart.yaml b/ansible/01_old/roles/cmoa_install/files/05-imxc/Chart.yaml new file mode 100644 index 0000000..e2f559f --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/05-imxc/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for Kubernetes +name: imxc +version: 0.1.0 diff --git a/ansible/01_old/roles/cmoa_install/files/05-imxc/cmoa-manual.yaml b/ansible/01_old/roles/cmoa_install/files/05-imxc/cmoa-manual.yaml new file mode 100644 index 0000000..e94fc14 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/05-imxc/cmoa-manual.yaml @@ -0,0 +1,36 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: manual + namespace: imxc +spec: + selector: + matchLabels: + app: manual + replicas: 1 + template: + metadata: + labels: + app: manual + spec: + containers: + - name: manual + image: {{ .Values.global.IMXC_IN_REGISTRY }}/manual:{{ .Values.global.CMOA_MANUAL_VERSION }} + imagePullPolicy: IfNotPresent + +--- +apiVersion: v1 +kind: Service +metadata: + name: manual + namespace: imxc +spec: + type: NodePort + selector: + app: manual + ports: + - protocol: TCP + port: 8088 + targetPort: 3000 + nodePort: {{ .Values.global.CMOA_MANUAL_PORT }} + diff --git a/ansible/01_old/roles/cmoa_install/files/05-imxc/scripts/init-api-server.sh b/ansible/01_old/roles/cmoa_install/files/05-imxc/scripts/init-api-server.sh new file mode 100644 index 0000000..78a9962 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/05-imxc/scripts/init-api-server.sh @@ -0,0 +1,17 @@ +#! /bin/sh + +STATUS_CODE="$(curl -s -o /dev/null -w '%{http_code}' http://imxc-keycloak-http/auth/realms/exem)" + +if [ $STATUS_CODE -eq 200 ]; then + JWT_KEY="$(curl -s -XGET http://imxc-keycloak-http/auth/realms/exem | jq -r '.public_key')" + export JWT_KEY + + chmod -R 777 /home/cloudmoa/notification/cloudmoa_alert.log + + /sbin/tini -- java -Djava.security.egd=file:/dev/./urandom -jar /app.jar + #java -Djava.security.egd=file:/dev/./urandom -jar /app.jar +elif [ $STATUS_CODE -eq 404 ]; then + echo "not found exem relam. check realm in imxc-keycloak" +else + echo "not found keycloak. check to install keycloak" +fi diff --git a/ansible/01_old/roles/cmoa_install/files/05-imxc/scripts/init-auth-server.sh b/ansible/01_old/roles/cmoa_install/files/05-imxc/scripts/init-auth-server.sh new file mode 100644 index 0000000..279b8a5 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/05-imxc/scripts/init-auth-server.sh @@ -0,0 +1,36 @@ +#! /bin/bash + +# 200 -> 서버 및 realm이 있는 경우 +# 404 -> 서버는 있으나 realm이 없는 경우 +# 000 -> 서버가 없음 +STATUS_CODE="$(curl -s -o /dev/null -w '%{http_code}' http://imxc-keycloak-http/auth/realms/exem)" + +if [ $STATUS_CODE -eq 404 ]; then + TOKEN="$(curl -s -d "client_id=admin-cli" -d "username=admin" -d "password=admin" -d "grant_type=password" http://imxc-keycloak-http/auth/realms/master/protocol/openid-connect/token | jq -r '.access_token')" + + echo $TOKEN + + echo "create realm and client" + # create realm and client + curl -s -v POST -H "Authorization: Bearer $TOKEN" -H "Content-Type: application/json" -d "@/tmp/init.json" http://imxc-keycloak-http/auth/admin/realms + + + echo "create admin and owner" + # create admin and owner + curl -s -v POST -H "Authorization: Bearer $TOKEN" -H "Content-Type: application/json" -d '{"firstName":"","lastName":"", "username":"admin","email":"admin@example.com", "enabled":"true","credentials":[{"type":"password","value":"admin","temporary":false}]}' http://imxc-keycloak-http/auth/admin/realms/exem/users + curl -s -v POST -H "Authorization: Bearer $TOKEN" -H "Content-Type: application/json" -d '{"firstName":"","lastName":"", "username":"owner","email":"owner@example.com", "enabled":"true","credentials":[{"type":"password","value":"admin","temporary":false}]}' http://imxc-keycloak-http/auth/admin/realms/exem/users + + JWT_KEY="$(curl -s -XGET http://imxc-keycloak-http/auth/realms/exem | jq -r '.public_key')" + export JWT_KEY + + java -Djava.security.egd=file:/dev/./urandom -jar /app.jar +elif [ $STATUS_CODE -eq 200 ]; then + echo "exist exem relam" + + JWT_KEY="$(curl -s -XGET http://imxc-keycloak-http/auth/realms/exem | jq -r '.public_key')" + export JWT_KEY + + java -Djava.security.egd=file:/dev/./urandom -jar /app.jar +else + echo "not found keycloak. check to install keycloak" +fi diff --git a/ansible/01_old/roles/cmoa_install/files/05-imxc/scripts/init-noti-server.sh b/ansible/01_old/roles/cmoa_install/files/05-imxc/scripts/init-noti-server.sh new file mode 100644 index 0000000..af73aed --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/05-imxc/scripts/init-noti-server.sh @@ -0,0 +1,14 @@ +#! /bin/sh + +STATUS_CODE="$(curl -s -o /dev/null -w '%{http_code}' http://imxc-keycloak-http/auth/realms/exem)" + +if [ $STATUS_CODE -eq 200 ]; then + JWT_KEY="$(curl -s -XGET http://imxc-keycloak-http/auth/realms/exem | jq -r '.public_key')" + export JWT_KEY + + java -Djava.security.egd=file:/dev/./urandom -jar /app.jar +elif [ $STATUS_CODE -eq 404 ]; then + echo "not found exem relam. check realm in imxc-keycloak" +else + echo "not found keycloak. check to install keycloak" +fi \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install/files/05-imxc/scripts/init-resource.sh b/ansible/01_old/roles/cmoa_install/files/05-imxc/scripts/init-resource.sh new file mode 100644 index 0000000..58db392 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/05-imxc/scripts/init-resource.sh @@ -0,0 +1,6 @@ +#!/bin/sh + +chmod -R 777 /scripts + +sed -i "s/localhost/$REDIRECT_URLS/g" /scripts/init.json +cp /scripts/init.json /tmp/init.json \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install/files/05-imxc/scripts/init.json b/ansible/01_old/roles/cmoa_install/files/05-imxc/scripts/init.json new file mode 100644 index 0000000..dcd68b4 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/05-imxc/scripts/init.json @@ -0,0 +1,2148 @@ +{ + "id": "exem", + "realm": "exem", + "notBefore": 0, + "revokeRefreshToken": false, + "refreshTokenMaxReuse": 0, + "accessTokenLifespan": 300, + "accessTokenLifespanForImplicitFlow": 900, + "ssoSessionIdleTimeout": 1800, + "ssoSessionMaxLifespan": 36000, + "ssoSessionIdleTimeoutRememberMe": 0, + "ssoSessionMaxLifespanRememberMe": 0, + "offlineSessionIdleTimeout": 2592000, + "offlineSessionMaxLifespanEnabled": false, + "offlineSessionMaxLifespan": 5184000, + "clientSessionIdleTimeout": 0, + "clientSessionMaxLifespan": 0, + "clientOfflineSessionIdleTimeout": 0, + "clientOfflineSessionMaxLifespan": 0, + "accessCodeLifespan": 60, + "accessCodeLifespanUserAction": 300, + "accessCodeLifespanLogin": 1800, + "actionTokenGeneratedByAdminLifespan": 43200, + "actionTokenGeneratedByUserLifespan": 300, + "enabled": true, + "sslRequired": "none", + "registrationAllowed": false, + "registrationEmailAsUsername": false, + "rememberMe": false, + "verifyEmail": false, + "loginWithEmailAllowed": true, + "duplicateEmailsAllowed": false, + "resetPasswordAllowed": false, + "editUsernameAllowed": false, + "bruteForceProtected": false, + "permanentLockout": false, + "maxFailureWaitSeconds": 900, + "minimumQuickLoginWaitSeconds": 60, + "waitIncrementSeconds": 60, + "quickLoginCheckMilliSeconds": 1000, + "maxDeltaTimeSeconds": 43200, + "failureFactor": 30, + "roles": { + "realm": [ + { + "id": "b361dcb8-4ec4-484e-a432-8d40a8ca5ac8", + "name": "offline_access", + "description": "${role_offline-access}", + "composite": false, + "clientRole": false, + "containerId": "exem", + "attributes": {} + }, + { + "id": "621155f2-6c01-4e4a-bf11-47111503d696", + "name": "uma_authorization", + "description": "${role_uma_authorization}", + "composite": false, + "clientRole": false, + "containerId": "exem", + "attributes": {} + }, + { + "id": "4aadd73a-e863-466a-932b-5bc81553fbf1", + "name": "access", + "composite": false, + "clientRole": false, + "containerId": "exem", + "attributes": {} + } + ], + "client": { + "realm-management": [ + { + "id": "e3eca547-c372-406a-abe7-30f554e13e63", + "name": "manage-realm", + "description": "${role_manage-realm}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "eb1faff2-4cca-458c-b9da-96c1f6f5f647", + "name": "impersonation", + "description": "${role_impersonation}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "eb0f6ebb-8993-47f8-8979-2152ed92bf62", + "name": "create-client", + "description": "${role_create-client}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "29f0b39d-9cc9-4b40-ad81-00041897ae0c", + "name": "view-clients", + "description": "${role_view-clients}", + "composite": true, + "composites": { + "client": { + "realm-management": [ + "query-clients" + ] + } + }, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "b6307563-9b35-4093-b0c4-a27df7cb82bd", + "name": "query-groups", + "description": "${role_query-groups}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "30091a91-f676-4e39-8ae2-ebfcee36c32a", + "name": "query-clients", + "description": "${role_query-clients}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "b40ca071-2318-4f69-9664-f0dfe471d03b", + "name": "view-realm", + "description": "${role_view-realm}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "efd25ec7-e61f-4659-a772-907791aed58e", + "name": "view-authorization", + "description": "${role_view-authorization}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "4ad18bd0-f9a9-4fc7-8864-99afa71f95e4", + "name": "manage-users", + "description": "${role_manage-users}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "a92c781f-7c6a-48d8-aa88-0b3aefb3c10c", + "name": "manage-events", + "description": "${role_manage-events}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "424933c1-3c03-49cd-955c-34aeeb0a3108", + "name": "manage-authorization", + "description": "${role_manage-authorization}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "5476db80-dbfa-408b-a934-5e8decc0af56", + "name": "manage-clients", + "description": "${role_manage-clients}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "acf53868-d09b-4865-92da-3b906307b979", + "name": "realm-admin", + "description": "${role_realm-admin}", + "composite": true, + "composites": { + "client": { + "realm-management": [ + "manage-realm", + "impersonation", + "create-client", + "view-clients", + "query-groups", + "query-clients", + "view-realm", + "view-authorization", + "manage-users", + "manage-events", + "manage-authorization", + "manage-clients", + "query-users", + "query-realms", + "manage-identity-providers", + "view-users", + "view-events", + "view-identity-providers" + ] + } + }, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "f2ad5f83-ffde-4cf4-acc4-21f7bcec4c38", + "name": "query-users", + "description": "${role_query-users}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "96a017bf-5211-4c20-a1b2-7493bc45a3ad", + "name": "query-realms", + "description": "${role_query-realms}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "d8051d4d-f26c-4a6d-bcdd-b3d8111d9d29", + "name": "manage-identity-providers", + "description": "${role_manage-identity-providers}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "8c929b20-abc3-4b78-88f2-ed3348426667", + "name": "view-users", + "description": "${role_view-users}", + "composite": true, + "composites": { + "client": { + "realm-management": [ + "query-groups", + "query-users" + ] + } + }, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "a337a8f7-8725-4ff7-85fc-ecc4b5ce1433", + "name": "view-events", + "description": "${role_view-events}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "649350cf-925c-4502-84b4-ec8415f956d3", + "name": "view-identity-providers", + "description": "${role_view-identity-providers}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + } + ], + "authorization_server": [ + { + "id": "2346ca49-eb3e-4f2e-b0ec-4def9ea9655c", + "name": "access", + "composite": false, + "clientRole": true, + "containerId": "b9bbda1f-a756-4b72-9cd8-06a6dfd6d5bf", + "attributes": {} + } + ], + "security-admin-console": [], + "admin-cli": [], + "account-console": [], + "broker": [ + { + "id": "133ff901-3a8f-48df-893b-4c7e9047e829", + "name": "read-token", + "description": "${role_read-token}", + "composite": false, + "clientRole": true, + "containerId": "fdc71d6d-db86-414f-bd80-ed1f5e9a6975", + "attributes": {} + } + ], + "account": [ + { + "id": "89c5f56f-5845-400b-ac9f-942c46d082e0", + "name": "manage-account-links", + "description": "${role_manage-account-links}", + "composite": false, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + }, + { + "id": "2cba7fed-0a80-4dbd-bd2d-abfa2c6a985e", + "name": "view-profile", + "description": "${role_view-profile}", + "composite": false, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + }, + { + "id": "f446a93d-143f-4071-9bdc-08aa2fdce6d2", + "name": "view-consent", + "description": "${role_view-consent}", + "composite": false, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + }, + { + "id": "ef3364db-e008-4aec-9e74-04bac25cbe40", + "name": "manage-consent", + "description": "${role_manage-consent}", + "composite": true, + "composites": { + "client": { + "account": [ + "view-consent" + ] + } + }, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + }, + { + "id": "96afbe32-3ac2-4345-bc17-06cf0e8de0b4", + "name": "view-applications", + "description": "${role_view-applications}", + "composite": false, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + }, + { + "id": "cf6861ca-4804-40d4-9016-c48e7ebf1c72", + "name": "manage-account", + "description": "${role_manage-account}", + "composite": true, + "composites": { + "client": { + "account": [ + "manage-account-links" + ] + } + }, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + } + ] + } + }, + "groups": [ + { + "id": "8d3f7332-7f72-47e2-9cb3-38331f0c29b5", + "name": "DEFAULT_TENANT", + "path": "/DEFAULT_TENANT", + "attributes": {}, + "realmRoles": [], + "clientRoles": {}, + "subGroups": [] + } + ], + "defaultRoles": [ + "offline_access", + "uma_authorization" + ], + "requiredCredentials": [ + "password" + ], + "otpPolicyType": "totp", + "otpPolicyAlgorithm": "HmacSHA1", + "otpPolicyInitialCounter": 0, + "otpPolicyDigits": 6, + "otpPolicyLookAheadWindow": 1, + "otpPolicyPeriod": 30, + "otpSupportedApplications": [ + "FreeOTP", + "Google Authenticator" + ], + "webAuthnPolicyRpEntityName": "keycloak", + "webAuthnPolicySignatureAlgorithms": [ + "ES256" + ], + "webAuthnPolicyRpId": "", + "webAuthnPolicyAttestationConveyancePreference": "not specified", + "webAuthnPolicyAuthenticatorAttachment": "not specified", + "webAuthnPolicyRequireResidentKey": "not specified", + "webAuthnPolicyUserVerificationRequirement": "not specified", + "webAuthnPolicyCreateTimeout": 0, + "webAuthnPolicyAvoidSameAuthenticatorRegister": false, + "webAuthnPolicyAcceptableAaguids": [], + "webAuthnPolicyPasswordlessRpEntityName": "keycloak", + "webAuthnPolicyPasswordlessSignatureAlgorithms": [ + "ES256" + ], + "webAuthnPolicyPasswordlessRpId": "", + "webAuthnPolicyPasswordlessAttestationConveyancePreference": "not specified", + "webAuthnPolicyPasswordlessAuthenticatorAttachment": "not specified", + "webAuthnPolicyPasswordlessRequireResidentKey": "not specified", + "webAuthnPolicyPasswordlessUserVerificationRequirement": "not specified", + "webAuthnPolicyPasswordlessCreateTimeout": 0, + "webAuthnPolicyPasswordlessAvoidSameAuthenticatorRegister": false, + "webAuthnPolicyPasswordlessAcceptableAaguids": [], + "scopeMappings": [ + { + "clientScope": "offline_access", + "roles": [ + "offline_access" + ] + } + ], + "clientScopeMappings": { + "account": [ + { + "client": "account-console", + "roles": [ + "manage-account" + ] + } + ] + }, + "clients": [ + { + "id": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "clientId": "account", + "name": "${client_account}", + "rootUrl": "${authBaseUrl}", + "baseUrl": "/realms/exem/account/", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "defaultRoles": [ + "view-profile", + "manage-account" + ], + "redirectUris": [ + "/realms/exem/account/*" + ], + "webOrigins": [], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": false, + "serviceAccountsEnabled": false, + "publicClient": false, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": {}, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "1e3d0c5d-c456-4c5f-93cf-58236273186a", + "clientId": "account-console", + "name": "${client_account-console}", + "rootUrl": "${authBaseUrl}", + "baseUrl": "/realms/exem/account/", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [ + "/realms/exem/account/*" + ], + "webOrigins": [], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": false, + "serviceAccountsEnabled": false, + "publicClient": true, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": { + "pkce.code.challenge.method": "S256" + }, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "protocolMappers": [ + { + "id": "cceae7c8-fa8d-48eb-a0a6-6013a2cc771e", + "name": "audience resolve", + "protocol": "openid-connect", + "protocolMapper": "oidc-audience-resolve-mapper", + "consentRequired": false, + "config": {} + } + ], + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "d4d3e5a5-584c-4aff-a79f-ac3c31ace5a1", + "clientId": "admin-cli", + "name": "${client_admin-cli}", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [], + "webOrigins": [], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": false, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": true, + "serviceAccountsEnabled": false, + "publicClient": true, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": {}, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "b9bbda1f-a756-4b72-9cd8-06a6dfd6d5bf", + "clientId": "authorization_server", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [ + "localhost" + ], + "webOrigins": [ + "*" + ], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": true, + "serviceAccountsEnabled": false, + "publicClient": true, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": { + "saml.assertion.signature": "false", + "saml.force.post.binding": "false", + "saml.multivalued.roles": "false", + "saml.encrypt": "false", + "saml.server.signature": "false", + "saml.server.signature.keyinfo.ext": "false", + "exclude.session.state.from.auth.response": "false", + "saml_force_name_id_format": "false", + "saml.client.signature": "false", + "tls.client.certificate.bound.access.tokens": "false", + "saml.authnstatement": "false", + "display.on.consent.screen": "false", + "saml.onetimeuse.condition": "false" + }, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": true, + "nodeReRegistrationTimeout": -1, + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "fdc71d6d-db86-414f-bd80-ed1f5e9a6975", + "clientId": "broker", + "name": "${client_broker}", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [], + "webOrigins": [], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": false, + "serviceAccountsEnabled": false, + "publicClient": false, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": {}, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "clientId": "realm-management", + "name": "${client_realm-management}", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [], + "webOrigins": [], + "notBefore": 0, + "bearerOnly": true, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": false, + "serviceAccountsEnabled": false, + "publicClient": false, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": {}, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "bb6c56f1-126e-4356-9579-d95992a8d150", + "clientId": "security-admin-console", + "name": "${client_security-admin-console}", + "rootUrl": "${authAdminUrl}", + "baseUrl": "/admin/exem/console/", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [ + "/admin/exem/console/*" + ], + "webOrigins": [ + "+" + ], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": false, + "serviceAccountsEnabled": false, + "publicClient": true, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": { + "pkce.code.challenge.method": "S256" + }, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "protocolMappers": [ + { + "id": "3cf06cab-00dd-486b-8e72-1a453a7031ca", + "name": "locale", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "locale", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "locale", + "jsonType.label": "String" + } + } + ], + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + } + ], + "clientScopes": [ + { + "id": "6a21eaaa-69c9-4519-8732-2155865a1891", + "name": "custom_jwt", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "true" + }, + "protocolMappers": [ + { + "id": "fd7557f5-3174-4c65-8cd1-0e9f015a906f", + "name": "customizingJWT", + "protocol": "openid-connect", + "protocolMapper": "oidc-script-based-protocol-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "multivalued": "true", + "id.token.claim": "false", + "access.token.claim": "true", + "jsonType.label": "String", + "script": "/**\r\n * Available variables: \r\n * user - the current user\r\n * realm - the current realm\r\n * token - the current token\r\n * userSession - the current userSession\r\n * keycloakSession - the current keycloakSession\r\n */\r\n\r\n//insert your code here...\r\n\r\n// you can set standard fields in token - test code\r\n// token.setAcr(\"test value\");\r\n\r\n// you can set claims in the token - test code\r\n// token.getOtherClaims().put(\"claimName\", \"claim value\");\r\n\r\n// work with variables and return multivalued token value\r\nvar ArrayList = Java.type(\"java.util.ArrayList\");\r\nvar HashMap = Java.type(\"java.util.HashMap\");\r\nvar tenantInfoMap = new HashMap();\r\nvar tenantIpMap = new HashMap();\r\n\r\nvar forEach = Array.prototype.forEach;\r\n\r\nvar client = keycloakSession.getContext().getClient();\r\nvar groups = user.getGroups();\r\nvar clientRole = client.getRole(\"access\");\r\n\r\nforEach.call(groups.toArray(), function(group) {\r\n if(group.hasRole(clientRole)) {\r\n tenantIpMap.put(group.getName(), clientRole.getAttribute(\"ip\"));\r\n tenantInfoMap.put(group.getName(), group.getAttributes());\r\n }\r\n});\r\n\r\ntoken.setOtherClaims(\"tenantInfo\", tenantInfoMap);\r\n" + } + }, + { + "id": "2cb34189-9f06-4b9f-b066-c28e7930f0a5", + "name": "custom_phone", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "false", + "user.attribute": "phone", + "id.token.claim": "false", + "access.token.claim": "true", + "claim.name": "attributes.phone", + "jsonType.label": "String" + } + }, + { + "id": "6bcb0aa9-8713-4e4b-b997-2e08d2dda0f4", + "name": "group_attr", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "groups", + "id.token.claim": "false", + "access.token.claim": "true", + "claim.name": "groups.attributes", + "jsonType.label": "String" + } + }, + { + "id": "03deb40b-4f83-436e-9eab-f479eed62460", + "name": "custom_name", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "false", + "user.attribute": "name", + "id.token.claim": "false", + "access.token.claim": "true", + "claim.name": "attributes.name", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "9fed7d81-3f42-41b0-b661-7875abb90b2b", + "name": "microprofile-jwt", + "description": "Microprofile - JWT built-in scope", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "false" + }, + "protocolMappers": [ + { + "id": "d030d675-2c31-401a-a461-534211b3d2ec", + "name": "upn", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "username", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "upn", + "jsonType.label": "String" + } + }, + { + "id": "ca2026a0-84de-4b8d-bf0c-35f3d088b115", + "name": "groups", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-realm-role-mapper", + "consentRequired": false, + "config": { + "multivalued": "true", + "user.attribute": "foo", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "groups", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "cf3e7fce-e9e8-40dc-bd0d-5cf7bac861c0", + "name": "web-origins", + "description": "OpenID Connect scope for add allowed web origins to the access token", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "false", + "display.on.consent.screen": "false", + "consent.screen.text": "" + }, + "protocolMappers": [ + { + "id": "6b909bad-30d8-4095-a80b-d71589e8a0b4", + "name": "allowed web origins", + "protocol": "openid-connect", + "protocolMapper": "oidc-allowed-origins-mapper", + "consentRequired": false, + "config": {} + } + ] + }, + { + "id": "73231863-d614-4725-9707-f5704c70893a", + "name": "roles", + "description": "OpenID Connect scope for add user roles to the access token", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "false", + "display.on.consent.screen": "true", + "consent.screen.text": "${rolesScopeConsentText}" + }, + "protocolMappers": [ + { + "id": "fad2c0b3-d6d6-46c9-b8a5-70cf2f3cd69e", + "name": "realm roles", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-realm-role-mapper", + "consentRequired": false, + "config": { + "multivalued": "true", + "user.attribute": "foo", + "access.token.claim": "true", + "claim.name": "realm_access.roles", + "jsonType.label": "String" + } + }, + { + "id": "1fa51f0e-8fa8-4807-a381-c9756ce1d2ff", + "name": "audience resolve", + "protocol": "openid-connect", + "protocolMapper": "oidc-audience-resolve-mapper", + "consentRequired": false, + "config": {} + }, + { + "id": "8be191ba-c7b8-45f1-a37f-2830595d4b54", + "name": "client roles", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-client-role-mapper", + "consentRequired": false, + "config": { + "multivalued": "true", + "user.attribute": "foo", + "access.token.claim": "true", + "claim.name": "resource_access.${client_id}.roles", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "93a4b53a-a281-4203-a070-0ad31e719b29", + "name": "phone", + "description": "OpenID Connect built-in scope: phone", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "true", + "consent.screen.text": "${phoneScopeConsentText}" + }, + "protocolMappers": [ + { + "id": "c716d4df-ad16-4a47-aa05-ded2a69313a3", + "name": "phone number verified", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "phoneNumberVerified", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "phone_number_verified", + "jsonType.label": "boolean" + } + }, + { + "id": "db0fcb5b-bad6-42b7-8ab0-b90225100b8a", + "name": "phone number", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "phoneNumber", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "phone_number", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "f1723d4c-6d93-40be-b5b8-5ca7083e55c7", + "name": "address", + "description": "OpenID Connect built-in scope: address", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "true", + "consent.screen.text": "${addressScopeConsentText}" + }, + "protocolMappers": [ + { + "id": "9e95dff0-dc01-4efe-a414-21c83d94491c", + "name": "address", + "protocol": "openid-connect", + "protocolMapper": "oidc-address-mapper", + "consentRequired": false, + "config": { + "user.attribute.formatted": "formatted", + "user.attribute.country": "country", + "user.attribute.postal_code": "postal_code", + "userinfo.token.claim": "true", + "user.attribute.street": "street", + "id.token.claim": "true", + "user.attribute.region": "region", + "access.token.claim": "true", + "user.attribute.locality": "locality" + } + } + ] + }, + { + "id": "16524b43-6bfc-4e05-868c-682e7e1e611c", + "name": "email", + "description": "OpenID Connect built-in scope: email", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "true", + "consent.screen.text": "${emailScopeConsentText}" + }, + "protocolMappers": [ + { + "id": "4444c30e-5da5-46e6-a201-64c28ab26e10", + "name": "email verified", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "emailVerified", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "email_verified", + "jsonType.label": "boolean" + } + }, + { + "id": "0faa8ba7-6d4d-4ed4-ab89-334e1d18b503", + "name": "email", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "email", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "email", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "4ccced80-99d8-4081-8d1d-37ed6d5aaf34", + "name": "profile", + "description": "OpenID Connect built-in scope: profile", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "true", + "consent.screen.text": "${profileScopeConsentText}" + }, + "protocolMappers": [ + { + "id": "02aea132-f5e1-483c-968a-5fbb9cdfb82d", + "name": "updated at", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "updatedAt", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "updated_at", + "jsonType.label": "String" + } + }, + { + "id": "eb5d10fc-d4a8-473a-ac3e-35f3fb0f41bb", + "name": "family name", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "lastName", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "family_name", + "jsonType.label": "String" + } + }, + { + "id": "2467b8e5-f340-45a2-abff-c658eccf3ed3", + "name": "zoneinfo", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "zoneinfo", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "zoneinfo", + "jsonType.label": "String" + } + }, + { + "id": "50a9bb17-af12-481d-95dd-6aed1dd4bf56", + "name": "full name", + "protocol": "openid-connect", + "protocolMapper": "oidc-full-name-mapper", + "consentRequired": false, + "config": { + "id.token.claim": "true", + "access.token.claim": "true", + "userinfo.token.claim": "true" + } + }, + { + "id": "80a65208-9425-4e66-b769-98c2f1c91e6e", + "name": "nickname", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "nickname", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "nickname", + "jsonType.label": "String" + } + }, + { + "id": "68a750c6-b4b8-47f4-a919-752319e63213", + "name": "gender", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "gender", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "gender", + "jsonType.label": "String" + } + }, + { + "id": "e27abd0e-72c1-40de-a678-e9e4e2db8e7f", + "name": "given name", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "firstName", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "given_name", + "jsonType.label": "String" + } + }, + { + "id": "04f3fa01-6a4c-44eb-bfd8-0a0e1c31bc4a", + "name": "middle name", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "middleName", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "middle_name", + "jsonType.label": "String" + } + }, + { + "id": "94e697d9-fbee-48d8-91d1-7bbc4f1fb44e", + "name": "username", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "username", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "preferred_username", + "jsonType.label": "String" + } + }, + { + "id": "a2f05d76-947d-4ceb-969b-1b923be9a923", + "name": "website", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "website", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "website", + "jsonType.label": "String" + } + }, + { + "id": "1966f863-ac5c-4cbc-a156-d5bd861728f0", + "name": "profile", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "profile", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "profile", + "jsonType.label": "String" + } + }, + { + "id": "18a9b452-cd8e-4c43-a9a8-0ea532074f74", + "name": "locale", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "locale", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "locale", + "jsonType.label": "String" + } + }, + { + "id": "1583790a-ec7a-4899-a901-60e23fd0d969", + "name": "birthdate", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "birthdate", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "birthdate", + "jsonType.label": "String" + } + }, + { + "id": "7094b64a-492b-4f31-aa73-bb19d06ddb56", + "name": "picture", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "picture", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "picture", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "eff18c11-eaf4-4d6a-8365-90f646ea3cc5", + "name": "role_list", + "description": "SAML role list", + "protocol": "saml", + "attributes": { + "consent.screen.text": "${samlRoleListScopeConsentText}", + "display.on.consent.screen": "true" + }, + "protocolMappers": [ + { + "id": "3bb12700-3e6f-4a73-bfbb-cfd16a8ab007", + "name": "role list", + "protocol": "saml", + "protocolMapper": "saml-role-list-mapper", + "consentRequired": false, + "config": { + "single": "false", + "attribute.nameformat": "Basic", + "attribute.name": "Role" + } + } + ] + }, + { + "id": "e83e35b7-9650-4f7e-b182-65c184d261b3", + "name": "offline_access", + "description": "OpenID Connect built-in scope: offline_access", + "protocol": "openid-connect", + "attributes": { + "consent.screen.text": "${offlineAccessScopeConsentText}", + "display.on.consent.screen": "true" + } + } + ], + "defaultDefaultClientScopes": [ + "role_list", + "profile", + "email", + "roles", + "web-origins", + "custom_jwt" + ], + "defaultOptionalClientScopes": [ + "offline_access", + "address", + "phone", + "microprofile-jwt" + ], + "browserSecurityHeaders": { + "contentSecurityPolicyReportOnly": "", + "xContentTypeOptions": "nosniff", + "xRobotsTag": "none", + "xFrameOptions": "SAMEORIGIN", + "contentSecurityPolicy": "frame-src 'self'; frame-ancestors 'self'; object-src 'none';", + "xXSSProtection": "1; mode=block", + "strictTransportSecurity": "max-age=31536000; includeSubDomains" + }, + "smtpServer": {}, + "eventsEnabled": false, + "eventsListeners": [ + "jboss-logging" + ], + "enabledEventTypes": [], + "adminEventsEnabled": false, + "adminEventsDetailsEnabled": false, + "components": { + "org.keycloak.services.clientregistration.policy.ClientRegistrationPolicy": [ + { + "id": "9b1dcf02-e9ec-4302-8aad-28f3250d1b2d", + "name": "Allowed Protocol Mapper Types", + "providerId": "allowed-protocol-mappers", + "subType": "anonymous", + "subComponents": {}, + "config": { + "allowed-protocol-mapper-types": [ + "oidc-sha256-pairwise-sub-mapper", + "oidc-usermodel-property-mapper", + "saml-role-list-mapper", + "saml-user-attribute-mapper", + "oidc-full-name-mapper", + "oidc-usermodel-attribute-mapper", + "oidc-address-mapper", + "saml-user-property-mapper" + ] + } + }, + { + "id": "752137ea-bc3a-46c3-9d83-49cb370d39a9", + "name": "Max Clients Limit", + "providerId": "max-clients", + "subType": "anonymous", + "subComponents": {}, + "config": { + "max-clients": [ + "200" + ] + } + }, + { + "id": "f365d31f-ccc5-4e57-97bd-b2749b1ab5e5", + "name": "Allowed Client Scopes", + "providerId": "allowed-client-templates", + "subType": "authenticated", + "subComponents": {}, + "config": { + "allow-default-scopes": [ + "true" + ] + } + }, + { + "id": "52e385fd-3aa5-442d-b5e4-6ff659126196", + "name": "Allowed Protocol Mapper Types", + "providerId": "allowed-protocol-mappers", + "subType": "authenticated", + "subComponents": {}, + "config": { + "allowed-protocol-mapper-types": [ + "oidc-sha256-pairwise-sub-mapper", + "saml-user-attribute-mapper", + "oidc-full-name-mapper", + "oidc-usermodel-attribute-mapper", + "oidc-address-mapper", + "oidc-usermodel-property-mapper", + "saml-user-property-mapper", + "saml-role-list-mapper" + ] + } + }, + { + "id": "dbebbc9d-1b14-4d09-906c-b4e5638f9588", + "name": "Consent Required", + "providerId": "consent-required", + "subType": "anonymous", + "subComponents": {}, + "config": {} + }, + { + "id": "b3fc18dc-467f-4240-9b6d-f07df5c40aee", + "name": "Full Scope Disabled", + "providerId": "scope", + "subType": "anonymous", + "subComponents": {}, + "config": {} + }, + { + "id": "19e102da-1d66-4747-958b-9311e5156693", + "name": "Trusted Hosts", + "providerId": "trusted-hosts", + "subType": "anonymous", + "subComponents": {}, + "config": { + "host-sending-registration-request-must-match": [ + "true" + ], + "client-uris-must-match": [ + "true" + ] + } + }, + { + "id": "66e83112-7392-46cb-bbd5-b71586183ada", + "name": "Allowed Client Scopes", + "providerId": "allowed-client-templates", + "subType": "anonymous", + "subComponents": {}, + "config": { + "allow-default-scopes": [ + "true" + ] + } + } + ], + "org.keycloak.keys.KeyProvider": [ + { + "id": "a60adc1b-3f6b-40d4-901f-d4f744f0d71b", + "name": "aes-generated", + "providerId": "aes-generated", + "subComponents": {}, + "config": { + "priority": [ + "100" + ] + } + }, + { + "id": "bc1b25d8-b199-4d87-b606-6cde0f6eafb0", + "name": "hmac-generated", + "providerId": "hmac-generated", + "subComponents": {}, + "config": { + "priority": [ + "100" + ], + "algorithm": [ + "HS256" + ] + } + }, + { + "id": "fe624aa7-54a3-43d8-b2a3-f74b543a9225", + "name": "rsa-generated", + "providerId": "rsa-generated", + "subComponents": {}, + "config": { + "priority": [ + "100" + ] + } + } + ] + }, + "internationalizationEnabled": false, + "supportedLocales": [], + "authenticationFlows": [ + { + "id": "a837df3e-15cb-4d2a-8ce0-5eea5c704e76", + "alias": "Account verification options", + "description": "Method with which to verity the existing account", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "idp-email-verification", + "requirement": "ALTERNATIVE", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "ALTERNATIVE", + "priority": 20, + "flowAlias": "Verify Existing Account by Re-authentication", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "59026e13-e2bd-4977-a868-505ea562f545", + "alias": "Authentication Options", + "description": "Authentication options.", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "basic-auth", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "basic-auth-otp", + "requirement": "DISABLED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "auth-spnego", + "requirement": "DISABLED", + "priority": 30, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "484d422c-d9b4-4c0e-86d5-60463ecd24c9", + "alias": "Browser - Conditional OTP", + "description": "Flow to determine if the OTP is required for the authentication", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "conditional-user-configured", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "auth-otp-form", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "0ec05058-6d09-4951-a116-19e8810e5d8e", + "alias": "Direct Grant - Conditional OTP", + "description": "Flow to determine if the OTP is required for the authentication", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "conditional-user-configured", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "direct-grant-validate-otp", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "667c03cd-114c-4d9a-a7fa-7d2c27f10722", + "alias": "First broker login - Conditional OTP", + "description": "Flow to determine if the OTP is required for the authentication", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "conditional-user-configured", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "auth-otp-form", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "1510fbf7-239f-44aa-9955-72d42f6d99fd", + "alias": "Handle Existing Account", + "description": "Handle what to do if there is existing account with same email/username like authenticated identity provider", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "idp-confirm-link", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "REQUIRED", + "priority": 20, + "flowAlias": "Account verification options", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "5622e71d-e1f4-4711-a425-a8470d0a017e", + "alias": "Reset - Conditional OTP", + "description": "Flow to determine if the OTP should be reset or not. Set to REQUIRED to force.", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "conditional-user-configured", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "reset-otp", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "09dfe405-5ef6-4940-8885-5adf867a74c8", + "alias": "User creation or linking", + "description": "Flow for the existing/non-existing user alternatives", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticatorConfig": "create unique user config", + "authenticator": "idp-create-user-if-unique", + "requirement": "ALTERNATIVE", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "ALTERNATIVE", + "priority": 20, + "flowAlias": "Handle Existing Account", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "a3eb6b61-1943-4fb7-9b2f-137826882662", + "alias": "Verify Existing Account by Re-authentication", + "description": "Reauthentication of existing account", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "idp-username-password-form", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "CONDITIONAL", + "priority": 20, + "flowAlias": "First broker login - Conditional OTP", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "7f5e2f68-84bc-4703-b474-e3b092621195", + "alias": "browser", + "description": "browser based authentication", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "auth-cookie", + "requirement": "ALTERNATIVE", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "auth-spnego", + "requirement": "DISABLED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "identity-provider-redirector", + "requirement": "ALTERNATIVE", + "priority": 25, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "ALTERNATIVE", + "priority": 30, + "flowAlias": "forms", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "224cc520-37f7-445e-ab1f-7ba547a45a0d", + "alias": "clients", + "description": "Base authentication for clients", + "providerId": "client-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "client-secret", + "requirement": "ALTERNATIVE", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "client-jwt", + "requirement": "ALTERNATIVE", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "client-secret-jwt", + "requirement": "ALTERNATIVE", + "priority": 30, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "client-x509", + "requirement": "ALTERNATIVE", + "priority": 40, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "2e58184b-529b-450c-9731-29763d26b087", + "alias": "direct grant", + "description": "OpenID Connect Resource Owner Grant", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "direct-grant-validate-username", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "direct-grant-validate-password", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "CONDITIONAL", + "priority": 30, + "flowAlias": "Direct Grant - Conditional OTP", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "c969ac8c-e7d8-44b5-ad4d-5fcb80514eac", + "alias": "docker auth", + "description": "Used by Docker clients to authenticate against the IDP", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "docker-http-basic-authenticator", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "de2259a4-7f92-42ec-994c-f55d8cba3b59", + "alias": "first broker login", + "description": "Actions taken after first broker login with identity provider account, which is not yet linked to any Keycloak account", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticatorConfig": "review profile config", + "authenticator": "idp-review-profile", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "REQUIRED", + "priority": 20, + "flowAlias": "User creation or linking", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "6c2745d2-be21-4f3c-a291-5b3fc039432a", + "alias": "forms", + "description": "Username, password, otp and other auth forms.", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "auth-username-password-form", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "CONDITIONAL", + "priority": 20, + "flowAlias": "Browser - Conditional OTP", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "ac8f5082-3fd0-47c5-854d-0dd9c3951668", + "alias": "http challenge", + "description": "An authentication flow based on challenge-response HTTP Authentication Schemes", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "no-cookie-redirect", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "REQUIRED", + "priority": 20, + "flowAlias": "Authentication Options", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "32030b4b-c82b-4c1a-a692-3b51eae74bbc", + "alias": "registration", + "description": "registration flow", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "registration-page-form", + "requirement": "REQUIRED", + "priority": 10, + "flowAlias": "registration form", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "b99fca4c-386c-4277-acc1-83e57e29244d", + "alias": "registration form", + "description": "registration form", + "providerId": "form-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "registration-user-creation", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "registration-profile-action", + "requirement": "REQUIRED", + "priority": 40, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "registration-password-action", + "requirement": "REQUIRED", + "priority": 50, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "registration-recaptcha-action", + "requirement": "DISABLED", + "priority": 60, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "5edbc053-816a-434e-9866-6c0cc7e49f89", + "alias": "reset credentials", + "description": "Reset credentials for a user if they forgot their password or something", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "reset-credentials-choose-user", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "reset-credential-email", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "reset-password", + "requirement": "REQUIRED", + "priority": 30, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "CONDITIONAL", + "priority": 40, + "flowAlias": "Reset - Conditional OTP", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "460782e7-9644-4a34-8024-cb428cbe3991", + "alias": "saml ecp", + "description": "SAML ECP Profile Authentication Flow", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "http-basic-authenticator", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + } + ], + "authenticatorConfig": [ + { + "id": "67af6e65-853c-4bfd-9eef-72e735691377", + "alias": "create unique user config", + "config": { + "require.password.update.after.registration": "false" + } + }, + { + "id": "af6c6e01-772d-426a-bdd3-3ebc95537bcd", + "alias": "review profile config", + "config": { + "update.profile.on.first.login": "missing" + } + } + ], + "requiredActions": [ + { + "alias": "CONFIGURE_TOTP", + "name": "Configure OTP", + "providerId": "CONFIGURE_TOTP", + "enabled": true, + "defaultAction": false, + "priority": 10, + "config": {} + }, + { + "alias": "terms_and_conditions", + "name": "Terms and Conditions", + "providerId": "terms_and_conditions", + "enabled": false, + "defaultAction": false, + "priority": 20, + "config": {} + }, + { + "alias": "UPDATE_PASSWORD", + "name": "Update Password", + "providerId": "UPDATE_PASSWORD", + "enabled": true, + "defaultAction": false, + "priority": 30, + "config": {} + }, + { + "alias": "UPDATE_PROFILE", + "name": "Update Profile", + "providerId": "UPDATE_PROFILE", + "enabled": true, + "defaultAction": false, + "priority": 40, + "config": {} + }, + { + "alias": "VERIFY_EMAIL", + "name": "Verify Email", + "providerId": "VERIFY_EMAIL", + "enabled": true, + "defaultAction": false, + "priority": 50, + "config": {} + }, + { + "alias": "update_user_locale", + "name": "Update User Locale", + "providerId": "update_user_locale", + "enabled": true, + "defaultAction": false, + "priority": 1000, + "config": {} + } + ], + "browserFlow": "browser", + "registrationFlow": "registration", + "directGrantFlow": "direct grant", + "resetCredentialsFlow": "reset credentials", + "clientAuthenticationFlow": "clients", + "dockerAuthenticationFlow": "docker auth", + "attributes": { + "clientOfflineSessionMaxLifespan": "0", + "clientSessionIdleTimeout": "0", + "clientSessionMaxLifespan": "0", + "clientOfflineSessionIdleTimeout": "0" + }, + "keycloakVersion": "11.0.1", + "userManagedAccessAllowed": false +} \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install/files/05-imxc/templates/auth-server.yaml b/ansible/01_old/roles/cmoa_install/files/05-imxc/templates/auth-server.yaml new file mode 100644 index 0000000..fb8fe7b --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/05-imxc/templates/auth-server.yaml @@ -0,0 +1,82 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: auth-server + namespace: imxc +spec: + selector: + matchLabels: + app: auth + replicas: 1 + template: + metadata: + labels: + app: auth + spec: + initContainers: + - name: init-resource + image: {{ .Values.global.IMXC_IN_REGISTRY }}/init-resource:latest + imagePullPolicy: IfNotPresent + command: ["/bin/sh", "-c"] + args: ['chmod -R 777 /scripts; cp /scripts/init.json /tmp/init.json'] + volumeMounts: + - name: init + mountPath: /tmp + containers: + - name: auth-server + image: {{ .Values.global.IMXC_IN_REGISTRY }}/auth-server:{{ .Values.global.AUTH_SERVER_VERSION }} + imagePullPolicy: IfNotPresent + command: ["sh", "-c", {{ .Files.Get "scripts/init-auth-server.sh" | quote }}] + env: + # spring profile + - name: SPRING_PROFILES_ACTIVE + value: prd + + # imxc-api-server configuration + - name: IMXC_API-SERVER-URL + value: http://imxc-api-service:8080 + + # keycloak configuration + - name: KEYCLOAK_AUTH-SERVER-URL + value: "{{ .Values.global.KEYCLOAK_AUTH_SERVER_URL }}" + - name: KEYCLOAK_REALM + value: exem + # eureka configuration + - name: EUREKA_CLIENT_SERVICE-URL_DEFAULTZONE + value: http://eureka:8761/eureka + # log4j + - name: LOG4J_FORMAT_MSG_NO_LOOKUPS + value: "true" + - name: LOGGING_LEVEL_COM_EXEM_CLOUD_REPO + value: debug + - name: LOGGING_LEVEL_COM_EXEM_CLOUD_AUTH_AUTHENTICATION_USER_SERVICE + value: debug + # 현대카드는 커스텀으로 해당 값 추가. keycloak만 사용(true), keycloak+내부db 사용(false) + - name: IMXC_KEYCLOAK_ENABLED + value: "true" + + volumeMounts: + - name: init + mountPath: /tmp + resources: + requests: + memory: "200Mi" + cpu: "10m" + + volumes: + - name: init + emptyDir: {} +--- +apiVersion: v1 +kind: Service +metadata: + name: auth-server-service + namespace: imxc +spec: + type: ClusterIP + selector: + app: auth + ports: + - protocol: TCP + port: 8480 + # nodePort: 15016 diff --git a/ansible/01_old/roles/cmoa_install/files/05-imxc/templates/cloudmoa-datagate.yaml b/ansible/01_old/roles/cmoa_install/files/05-imxc/templates/cloudmoa-datagate.yaml new file mode 100644 index 0000000..cbbee9a --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/05-imxc/templates/cloudmoa-datagate.yaml @@ -0,0 +1,79 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: datagate + namespace: imxc + labels: + app: datagate +spec: + selector: + matchLabels: + app: datagate + replicas: 2 + template: + metadata: + labels: + app: datagate + spec: + containers: + - image: {{ .Values.global.IMXC_IN_REGISTRY }}/datagate:{{ .Values.global.DATAGATE_VERSION }} + imagePullPolicy: IfNotPresent + name: datagate + ports: + - containerPort: 50051 + protocol: TCP + - containerPort: 14268 + protocol: TCP + - containerPort: 14269 + protocol: TCP + readinessProbe: + httpGet: + path: "/" + port: 14269 + env: + - name: REDIS_ADDR + value: redis-master:6379 + - name: REDIS_PW + value: dkagh1234! + - name: REDIS_DB + value: "0" + - name: REDIS_TYPE + value: normal + - name: STORAGE_TYPE + value: kafka + - name: KAFKA_PRODUCER_BROKERS + value: kafka-broker:9094 + - name: LOG_LEVEL + value: "INFO" + resources: + requests: + cpu: "100m" + memory: "100Mi" + limits: + cpu: "2000m" + memory: "1Gi" +--- +apiVersion: v1 +kind: Service +metadata: + name: datagate + namespace: imxc + labels: + app: datagate +spec: + ports: + - name: datagate-grpc + port: 50051 + protocol: TCP + targetPort: 50051 + nodePort: 30051 + - name: datagate-http + port: 14268 + targetPort: 14268 +# nodePort: 31268 + - name: datagate-readiness + port: 14269 + targetPort: 14269 + selector: + app: datagate + type: NodePort diff --git a/ansible/01_old/roles/cmoa_install/files/05-imxc/templates/cloudmoa-metric-agent.yaml b/ansible/01_old/roles/cmoa_install/files/05-imxc/templates/cloudmoa-metric-agent.yaml new file mode 100644 index 0000000..45c3d41 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/05-imxc/templates/cloudmoa-metric-agent.yaml @@ -0,0 +1,331 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: metric-agent + namespace: imxc + labels: + app: metric-agent +spec: + selector: + matchLabels: + app: metric-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: metric-agent + spec: + containers: + - name: metric-agent + image: {{ .Values.global.IMXC_IN_REGISTRY }}/metric-agent:{{ .Values.global.METRIC_AGENT_VERSION }} + imagePullPolicy: IfNotPresent + ports: + - containerPort: 14271 + - containerPort: 14272 + args: + - --config.file=/etc/metric-agent/metric-agent.yml + env: + - name: STORAGE_TYPE + value: datagate + - name: DATAGATE + value: datagate:50051 + - name: CLUSTER_ID + value: cloudmoa +# - name: USER_ID +# value: mskim@ex-em.com + volumeMounts: + - mountPath: /etc/metric-agent/ + name: config-volume + resources: + requests: + memory: "256Mi" + cpu: "100m" + limits: + memory: "1000Mi" + cpu: "300m" + volumes: + - name: config-volume + configMap: + name: metric-agent-config + securityContext: + runAsUser: 1000 +--- +apiVersion: v1 +kind: Service +metadata: + name: metric-agent + namespace: imxc + labels: + app: metric-agent +spec: + ports: + - name: metric + port: 14271 + targetPort: 14271 + selector: + app: metric-agent + +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: metric-agent-config + namespace: imxc +data: + metric-agent.yml: | + global: + scrape_interval: 10s + evaluation_interval: 5s # Evaluate rules every 15 seconds. The default is every 1 minute. + + scrape_configs: + - job_name: 'kubernetes-kubelet' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: 'cloudmoa' + - target_label: xm_entity_type + replacement: 'Node' + + # added by mskim 8/19 + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + + - job_name: 'kubernetes-node-exporter' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: '(.*):10250' + replacement: '${1}:9100' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: 'kubernetes-(.*)' + replacement: '${1}' + target_label: name + - target_label: xm_clst_id + replacement: 'cloudmoa' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: 'Node' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + + # added by mskim 8/19 + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + - job_name: 'kubernetes-cadvisor' + scheme: https + + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: 'cloudmoa' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: 'Container' + +{{- if semverCompare ">=1.16-0" .Capabilities.KubeVersion.GitVersion }} + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod] + target_label: xm_pod_id + - source_labels: [container] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + # added by mskim 8/19 + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep + + {{- else }} + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod_name] + target_label: xm_pod_id + - source_labels: [container_name] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + # added by mskim 8/19 + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep +{{- end }} + # CLOUD-8671 | 데이터 필터링 설정 추가 + - source_labels: [ __name__, image ] + separator: "@" + regex: "container_cpu.*@" + action: drop + - source_labels: [ __name__, name ] + separator: "@" + regex: "container_memory.*@" + action: drop + + - job_name: 'kafka-consumer' + metrics_path: /remote_prom + scrape_interval: 5s + scrape_timeout: 5s + scheme: kafka + static_configs: + - targets: ['kafka-broker:9094'] + params: + #server_addrs: ['broker.default.svc.k8s:9094'] + server_addrs: ['kafka-broker:9094'] + encoding: [proto3] + contents: [remote_write] + compression: [snappy] + group: [remote-write-consumer] + workers: [50] + + # job for API server (SpringBoot) commented by ersione 2019-09-19 + - job_name: 'imxc-api' + metrics_path: '/actuator/prometheus' + scrape_interval: 5s + static_configs: + - targets: ['imxc-api-service:8080'] + - job_name: 'imxc-noti' + metrics_path: '/actuator/prometheus' + scrape_interval: 15s + static_configs: + - targets: ['noti-server-service:8080'] + #- job_name: 'imxc-auth' + # metrics_path: '/actuator/prometheus' + # scrape_interval: 15s + # static_configs: + # - targets: ['auth-server-service:8480'] + + + + - job_name: 'alertmanager-exporter' + metrics_path: '/metrics' + scrape_interval: 5s + static_configs: + - targets: ['alertmanager:9093'] + + + # modified by seungtak choi 2020-02-18 + - job_name: 'cmoa-collector' + scrape_interval: 5s + kubernetes_sd_configs: + - role: endpoints + namespaces: + names: + - imxc + relabel_configs: + - source_labels: [__meta_kubernetes_service_name] + action: keep + regex: cmoa-collector + + # added by dwkim 2021-03-15 + - job_name: 'elasticsearch' + scrape_interval: 5s + kubernetes_sd_configs: + - role: endpoints + namespaces: + names: + - imxc + relabel_configs: + - target_label: xm_clst_id + replacement: 'cloudmoa' + - source_labels: [__meta_kubernetes_pod_node_name] + target_label: xm_node_id + - source_labels: [__meta_kubernetes_namespace] + target_label: xm_namespace + - source_labels: [__meta_kubernetes_service_name] + action: keep + regex: es-exporter-elasticsearch-exporter + + # kafka-exporter prometheus 수집 룰 추가 + - job_name: 'kafka-exporter' + kubernetes_sd_configs: + - role: endpoints + namespaces: + names: + - imxc + scheme: http + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_service_label_(.+) + - action: labelmap + regex: __meta_kubernetes_service_annotation_(.+) + - source_labels: [__meta_kubernetes_pod_container_port_number] + action: keep + regex: '(.*)9308' + + # kafka-jmx-exporter configuration yaml 수집룰 추가 + - job_name: 'kafka-jmx' + kubernetes_sd_configs: + - role: endpoints + namespaces: + names: + - imxc + scheme: http + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_service_label_(.+) + - action: labelmap + regex: __meta_kubernetes_service_annotation_(.+) + - source_labels: [__meta_kubernetes_pod_container_port_number] + action: keep + regex: '(.*)9010' + + # job for API Server(Spring Cloud Notification Server) commented by hjyoon 2022-01-26 + - job_name: 'cmoa-noti' + metrics_path: '/actuator/prometheus' + scrape_interval: 15s + static_configs: + - targets: ['noti-server-service:8080'] diff --git a/ansible/01_old/roles/cmoa_install/files/05-imxc/templates/cloudmoa-metric-collector.yaml b/ansible/01_old/roles/cmoa_install/files/05-imxc/templates/cloudmoa-metric-collector.yaml new file mode 100644 index 0000000..3d7acc8 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/05-imxc/templates/cloudmoa-metric-collector.yaml @@ -0,0 +1,45 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: metric-collector + namespace: imxc + labels: + app: metric-collector +spec: + selector: + matchLabels: + app: metric-collector + replicas: 3 + template: + metadata: + labels: + app: metric-collector + spec: + containers: + - name: metric-collector + image: {{ .Values.global.IMXC_IN_REGISTRY }}/metric-collector:{{ .Values.global.METRIC_COLLECTOR_VERSION }} + imagePullPolicy: IfNotPresent + ports: + - containerPort: 14270 + env: + - name: KAFKA_CONSUMER_BROKERS + value: kafka-broker:9094 + - name: HTTP_PUSH + value: http://base-cortex-nginx/api/v1/push + securityContext: + runAsUser: 1000 +--- +apiVersion: v1 +kind: Service +metadata: + name: metric-collector + namespace: imxc + labels: + app: metric-collector +spec: + ports: + - name: metric + port: 14270 + targetPort: 14270 + selector: + app: metric-collector diff --git a/ansible/01_old/roles/cmoa_install/files/05-imxc/templates/cmoa-kube-info-batch.yaml b/ansible/01_old/roles/cmoa_install/files/05-imxc/templates/cmoa-kube-info-batch.yaml new file mode 100644 index 0000000..b20fed2 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/05-imxc/templates/cmoa-kube-info-batch.yaml @@ -0,0 +1,38 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmoa-kube-info-batch + namespace: {{ .Values.global.IMXC_NAMESPACE }} + labels: + app: cmoa-kube-info-batch +spec: + replicas: 1 + selector: + matchLabels: + app: cmoa-kube-info-batch + template: + metadata: + labels: + app: cmoa-kube-info-batch + spec: + containers: + - name: cmoa-kube-info-batch + image: {{ .Values.global.IMXC_IN_REGISTRY }}/kube-info-batch:{{ .Values.global.KUBE_INFO_BATCH_VERSION }} + imagePullPolicy: Always + env: + - name: JDBC_KIND + value: {{ .Values.global.JDBC_KIND }} + - name: JDBC_SERVER + value: {{ .Values.global.JDBC_SERVER }} + - name: JDBC_DB + value: {{ .Values.global.JDBC_DB }} + - name: JDBC_USER + value: {{ .Values.global.JDBC_USER }} + - name: JDBC_PWD + value: {{ .Values.global.JDBC_PWD }} + - name: TABLE_PREFIX + value: {{ .Values.global.TABLE_PREFIX }} + - name: BLACK_LIST + value: {{ .Values.global.BLACK_LIST }} + - name: DELETE_HOUR + value: '{{ .Values.global.DELETE_HOUR }}' diff --git a/ansible/01_old/roles/cmoa_install/files/05-imxc/templates/cmoa-kube-info-connector.yaml b/ansible/01_old/roles/cmoa_install/files/05-imxc/templates/cmoa-kube-info-connector.yaml new file mode 100644 index 0000000..cad91b9 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/05-imxc/templates/cmoa-kube-info-connector.yaml @@ -0,0 +1,48 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmoa-kube-info-connector + namespace: {{ .Values.global.IMXC_NAMESPACE }} + labels: + app: cmoa-kube-info-connector +spec: + replicas: 1 + selector: + matchLabels: + app: cmoa-kube-info-connector + template: + metadata: + labels: + app: cmoa-kube-info-connector + spec: + containers: + - name: cmoa-kube-info-connector + image: {{ .Values.global.IMXC_IN_REGISTRY }}/kube-info-connector:{{ .Values.global.KUBE_INFO_CONNECTOR_VERSION }} + imagePullPolicy: Always + env: + - name: KAFKA_GROUP_ID + value: cmoa-kube-info-connector + - name: KAFKA_SERVER + value: kafka:9092 + - name: JDBC_KIND + value: {{ .Values.global.JDBC_KIND }} + - name: JDBC_SERVER + value: {{ .Values.global.JDBC_SERVER }} + - name: JDBC_DB + value: {{ .Values.global.JDBC_DB }} + - name: JDBC_USER + value: {{ .Values.global.JDBC_USER }} + - name: JDBC_PWD + value: {{ .Values.global.JDBC_PWD }} + - name: TABLE_PREFIX + value: {{ .Values.global.TABLE_PREFIX }} + - name: BLACK_LIST + value: {{ .Values.global.BLACK_LIST }} + - name: MAX_POLL_RECORDS_CONFIG + value: "300" + - name: MAX_POLL_INTERVAL_MS_CONFIG + value: "600000" + - name: SESSION_TIMEOUT_MS_CONFIG + value: "60000" + - name: MAX_PARTITION_FETCH_BYTES_CONFIG + value: "5242880" diff --git a/ansible/01_old/roles/cmoa_install/files/05-imxc/templates/cmoa-kube-info-flat.yaml b/ansible/01_old/roles/cmoa_install/files/05-imxc/templates/cmoa-kube-info-flat.yaml new file mode 100644 index 0000000..6f77ee5 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/05-imxc/templates/cmoa-kube-info-flat.yaml @@ -0,0 +1,35 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmoa-kube-info-flat + namespace: {{ .Values.global.IMXC_NAMESPACE }} + labels: + app: cmoa-kube-info-flat +spec: + replicas: 1 + selector: + matchLabels: + app: cmoa-kube-info-flat + template: + metadata: + labels: + app: cmoa-kube-info-flat + spec: + containers: + - name: cmoa-kube-info-flat + image: {{ .Values.global.IMXC_IN_REGISTRY }}/kube-info-flat:{{ .Values.global.KUBE_INFO_FLAT_VERSION }} + imagePullPolicy: Always + env: + - name: KAFKA_SERVER + value: kafka:9092 + - name: KAFKA_INPUT_TOPIC + value: {{ .Values.global.KAFKA_INPUT_TOPIC }} + - name: TABLE_PREFIX + value: {{ .Values.global.TABLE_PREFIX }} + - name: BLACK_LIST + value: {{ .Values.global.BLACK_LIST }} + resources: + limits: + memory: 1Gi + requests: + memory: 200Mi diff --git a/ansible/01_old/roles/cmoa_install/files/05-imxc/templates/cmoa-manual.yaml b/ansible/01_old/roles/cmoa_install/files/05-imxc/templates/cmoa-manual.yaml new file mode 100644 index 0000000..e94fc14 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/05-imxc/templates/cmoa-manual.yaml @@ -0,0 +1,36 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: manual + namespace: imxc +spec: + selector: + matchLabels: + app: manual + replicas: 1 + template: + metadata: + labels: + app: manual + spec: + containers: + - name: manual + image: {{ .Values.global.IMXC_IN_REGISTRY }}/manual:{{ .Values.global.CMOA_MANUAL_VERSION }} + imagePullPolicy: IfNotPresent + +--- +apiVersion: v1 +kind: Service +metadata: + name: manual + namespace: imxc +spec: + type: NodePort + selector: + app: manual + ports: + - protocol: TCP + port: 8088 + targetPort: 3000 + nodePort: {{ .Values.global.CMOA_MANUAL_PORT }} + diff --git a/ansible/01_old/roles/cmoa_install/files/05-imxc/templates/eureka-server.yaml b/ansible/01_old/roles/cmoa_install/files/05-imxc/templates/eureka-server.yaml new file mode 100644 index 0000000..5ffd9c2 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/05-imxc/templates/eureka-server.yaml @@ -0,0 +1,60 @@ +apiVersion: v1 +kind: Service +metadata: + name: eureka + namespace: imxc + labels: + app: eureka +spec: + type: NodePort + ports: + - port: 8761 + targetPort: 8761 + nodePort: 30030 + name: eureka + selector: + app: eureka +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: eureka + namespace: imxc +spec: + serviceName: 'eureka' + replicas: 3 + selector: + matchLabels: + app: eureka + template: + metadata: + labels: + app: eureka + spec: + containers: + - name: eureka + image: {{ .Values.global.IMXC_IN_REGISTRY }}/eureka-server:{{ .Values.global.EUREKA_SERVER_VERSION }} + imagePullPolicy: IfNotPresent + ports: + - containerPort: 8761 + #resources: + # requests: + # memory: "1Gi" + # cpu: "500m" + # limits: + # memory: "1200Mi" + # cpu: "500m" + env: + - name: SPRING_PROFILES_ACTIVE + value: prd + - name: EUREKA_CLIENT_SERVICE-URL_DEFAULTZONE + value: http://eureka-0.eureka:8761/eureka/,http://eureka-1.eureka:8761/eureka/,http://eureka-2.eureka:8761/eureka/ + - name: JVM_OPTS + value: "-Xms1g -Xmx1g" + # log4j + - name: LOG4J_FORMAT_MSG_NO_LOOKUPS + value: "true" + resources: + requests: + memory: "100Mi" + cpu: "20m" diff --git a/ansible/01_old/roles/cmoa_install/files/05-imxc/templates/imxc-api-server.yaml b/ansible/01_old/roles/cmoa_install/files/05-imxc/templates/imxc-api-server.yaml new file mode 100644 index 0000000..de967a6 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/05-imxc/templates/imxc-api-server.yaml @@ -0,0 +1,245 @@ +--- +kind: Service +apiVersion: v1 +metadata: + name: imxc-api-service + namespace: imxc +spec: + type: NodePort + selector: + app: imxc-api + ports: + - protocol: TCP + name: api + port: 8080 + targetPort: 8080 + nodePort: 32080 + - protocol: TCP + name: netty + port: 10100 + targetPort: 10100 + nodePort: 31100 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: imxc-api + namespace: imxc + labels: + app: imxc-api +spec: + revisionHistoryLimit: 0 + replicas: 1 + selector: + matchLabels: + app: imxc-api + template: + metadata: + labels: + app: imxc-api + build: develop + spec: + securityContext: + #runAsNonRoot: true + runAsUser: 1577 + initContainers: + - name: cloudmoa-api-permission-fix + image: {{ .Values.global.IMXC_IN_REGISTRY }}/busybox:latest + imagePullPolicy: IfNotPresent + securityContext: + runAsUser: 0 +# - sh +# - -c +# - "chmod -R 777 /home/cloudmoa/notification/cloudmoa_alert.log" + volumeMounts: + - mountPath: /home/cloudmoa/notification/ + name: notification-upper-directory + - mountPath: /home/cloudmoa/notification/cloudmoa_alert.log + name: notification-directory + containers: + - name: imxc-api + image: {{ .Values.global.IMXC_IN_REGISTRY }}/api-server:{{ .Values.global.API_SERVER_VERSION }} + resources: + requests: + cpu: 200m + memory: 500Mi + limits: + cpu: 2000m + memory: 5000Mi + imagePullPolicy: IfNotPresent + command: ["sh", "-c", {{ .Files.Get "scripts/init-api-server.sh" | quote }}] + env: + - name: SPRING_PROFILES_ACTIVE + value: prd + - name: SPRING_ELASTIC_URLS + value: elasticsearch + - name: SPRING_ELASTIC_PORT + value: "9200" + - name: SPRING_DATAGATE_URLS + value: "{{ .Values.global.DATAGATE_INSIDE_IP }}" + - name: SPRING_DATAGATE_PORT + value: "{{ .Values.global.DATAGATE_INSIDE_PORT }}" + - name: SPRING_REDIS_URLS + value: {{ .Values.global.REDIS_URLS }} + - name: SPRING_REDIS_PORT + value: "{{ .Values.global.REDIS_PORT }}" + - name: SPRING_REDIS_PASSWORD + value: {{ .Values.global.REDIS_PASSWORD }} + - name: SPRING_DATASOURCE_URL + value: jdbc:log4jdbc:postgresql://postgres:5432/postgresdb + - name: SPRING_BOOT_ADMIN_CLIENT_URL + value: http://{{ .Values.global.IMXC_ADMIN_SERVER_DNS }}:8888 + - name: SPRING_BOOT_ADMIN_CLIENT_INSTANCE_NAME + value: Intermax Cloud API Server + - name: SPRING_BOOT_ADMIN_CLIENT_ENABLED + value: "false" + - name: OPENTRACING_JAEGER_ENABLED + value: "false" + - name: SPRING_JPA_PROPERTIES_HIBERNATE_GENERATE_STATISTICS + value: "false" + - name: IMXC_REPORT_ENABLED + value: "true" + - name: IMXC_ALERT_PERSIST + value: "true" + - name: SPRING_BOOT_ADMIN_CLIENT_INSTANCE_METADATA_TAGS_ENVIRONMENT + value: Demo + - name: SPRING_BOOT_ADMIN_CLIENT_INSTANCE_PREFERIP + value: "true" + - name: SPRING_BOOT_ADMIN_CLIENT_INSTANCE_METADATA_TAGS_NODENAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: SPRING_BOOT_ADMIN_CLIENT_INSTANCE_METADATA_TAGS_PODNAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + - name: SPRING_BOOT_ADMIN_CLIENT_AUTODEREGISTRATION + value: "true" + - name: SPRING_JPA_HIBERNATE_DDL-AUTO + value: validate + - name: KEYCLOAK_AUTH-SERVER-URL + value: "{{ .Values.global.KEYCLOAK_AUTH_SERVER_URL }}" + - name: KEYCLOAK_REALM + value: exem + - name: KEYCLOAK_RESOURCE + value: "{{ .Values.global.KEYCLOAK_RESOURCE }}" + - name: SPRING_KEYCLOAK_MASTER_USERNAME + value: "{{ .Values.global.KEYCLOAK_MASTER_USERNAME }}" + - name: SPRING_KEYCLOAK_MASTER_PASSWORD + value: "{{ .Values.global.KEYCLOAK_MASTER_PASSWORD }}" + - name: SPRING_LDAP_USE + value: "{{ .Values.global.IMXC_LDAP_USE }}" + - name: TIMEZONE + value: Asia/Seoul + - name: IMXC_PROMETHEUS_URL + value: http://base-cortex-nginx/prometheus + - name: IMXC_PROMETHEUS_NAMESPACE + value: "imxc" + - name: LOGGING_LEVEL_ROOT + value: info + - name: IMXC_ALERT_NOTIFICATION_FILE_USE + value: "true" + - name: IMXC_ALERT_NOTIFICATION_FILE_FILE-LIMIT-SIZE-MB + value: "10" + - name: IMXC_ALERT_NOTIFICATION_FILE_PATH + value: /cloudmoa_noti + - name: IMXC_ALERT_NOTIFICATION_FILE_NAME + value: cloudmoa_alert.log + - name: IMXC_ALERT_NOTIFICATION_FILE_FORMAT + value: $[name]/($[level])/$[data]/$[message] + - name: IMXC_ALERT_NOTIFICATION_FILE_LEVELCONTRACT + value: "true" + #R30020210730 추가 :: 현대카드는 true로 설정 + - name: IMXC_ALERT_NOTIFICATION_MAIL_MAIL-HOST + value: "exemmail1.ex-em.com" + - name: IMXC_ALERT_NOTIFICATION_MAIL_MAIL-PORT + value: "587" + - name: IMXC_ALERT_NOTIFICATION_MAIL_MAIL-USERNAME + value: "imxc@ex-em.com" + - name: IMXC_ALERT_NOTIFICATION_MAIL_MAIL-PASSWORD + value: "1234" + - name: IMXC_ALERT_NOTIFICATION_MAIL_PROTOCOL + value: "smtp" + - name: IMXC_ALERT_NOTIFICATION_MAIL_STARTTLS-REQ + value: "true" + - name: IMXC_ALERT_NOTIFICATION_MAIL_STARTTLS-ENB + value: "true" + - name: IMXC_ALERT_NOTIFICATION_MAIL_SMTP-AUTH + value: "true" + - name: IMXC_ALERT_NOTIFICATION_MAIL_DEBUG + value: "true" + - name: IMXC_ANOMALY_BLACK-LIST + value: "false" + - name: IMXC_VERSION_SAAS + value: "false" + - name: LOGGING_LEVEL_COM_EXEM_CLOUD_API_SERVER_KUBERNETES_SERVICE + value: info + - name: IMXC_WEBSOCKET_SCHEDULE_PERIOD_5SECOND + value: "30000" + - name: IMXC_CACHE_INFO_1MCACHE + value: "0 0/1 * * * ?" + - name: IMXC_EXECUTION_LOG_USE + value: "false" + - name: IMXC_EXECUTION_PERMISSION_LOG_USE + value: "false" + - name: IMXC_EXECUTION_CODE-LOG_USE + value: "false" + - name: IMXC_PORTAL_INFO_URL + value: "{{ .Values.global.IMXC_PORTAL_INFO_URL }}" + # Do not remove below rows related to AGENT-INSTALL. Added by youngmin 2021-03-29. + - name: AGENT-INSTALL_COLLECTION-SERVER_KAFKA_IP + value: {{ .Values.global.KAFKA_IP }} + - name: AGENT-INSTALL_COLLECTION-SERVER_KAFKA_INTERFACE-PORT + value: "{{ .Values.global.KAFKA_INTERFACE_PORT }}" + - name: AGENT-INSTALL_COLLECTION-SERVER_APISERVER_IP + value: {{ .Values.global.IMXC_API_SERVER_IP }} + - name: AGENT-INSTALL_COLLECTION-SERVER_APISERVER_NETTY-PORT + value: "{{ .Values.global.APISERVER_NETTY_PORT }}" + - name: AGENT-INSTALL_REGISTRY_URL + value: {{ .Values.global.IMXC_IN_REGISTRY }} + - name: AGENT-INSTALL_IMAGE_TAG + value: {{ .Values.global.AGENT_IMAGE_TAG }} + - name: AGENT-INSTALL_JAEGER_AGENT_CLUSTERIP + value: {{ .Values.global.JAEGER_AGENT_CLUSTERIP }} + - name: AGENT-INSTALL_JAEGER_JAVA-SPECIALAGENT-CLASSPATH + value: {{ .Values.global.JAEGER_JAVA_SPECIALAGENT_CLASSPATH }} + - name: AGENT-INSTALL_COLLECTION-SERVER_DATAGATE_IP + value: "{{ .Values.global.DATAGATE_OUTSIDE_IP }}" + - name: AGENT-INSTALL_COLLECTION-SERVER_DATAGATE_PORT + value: "{{ .Values.global.DATAGATE_OUTSIDE_PORT }}" + - name: IMXC_REST-CONFIG_MAX-CON + value: "200" + - name: IMXC_REST-CONFIG_MAX-CON-ROUTE + value: "65" + # log4j + - name: LOG4J_FORMAT_MSG_NO_LOOKUPS + value: "true" + # Elasticsearch for Security + - name: SPRING_ELASTIC_SSL_USERNAME + value: "{{ .Values.global.CMOA_ES_ID }}" + - name: SPRING_ELASTIC_SSL_PASSWORD + value: "{{ .Values.global.CMOA_ES_PW }}" + - name: IMXC_BACK-LOGIN_ENABLED + value: "{{ .Values.global.BACKLOGIN }}" + volumeMounts: + - mountPath: /var/log/imxc-audit.log + name: auditlog + - mountPath: /home/cloudmoa/notification/cloudmoa_alert.log + name: notification-directory + - mountPath: /home/cloudmoa/notification/ + name: notification-upper-directory + volumes: + - name: auditlog + hostPath: + path: {{ .Values.global.AUDITLOG_PATH }}/imxc-audit.log + type: FileOrCreate + - name: notification-upper-directory + hostPath: + path: /home/ + type: DirectoryOrCreate + - name: notification-directory + hostPath: + path: /home/cloudmoa_event.log + type: FileOrCreate diff --git a/ansible/01_old/roles/cmoa_install/files/05-imxc/templates/imxc-collector.yaml b/ansible/01_old/roles/cmoa_install/files/05-imxc/templates/imxc-collector.yaml new file mode 100644 index 0000000..e125243 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/05-imxc/templates/imxc-collector.yaml @@ -0,0 +1,79 @@ +apiVersion: v1 +kind: List +items: +- apiVersion: apps/v1 + kind: Deployment + metadata: + name: cmoa-collector + namespace: imxc + labels: + app: cmoa-collector + spec: + replicas: 1 + selector: + matchLabels: + app: cmoa-collector + template: + metadata: + labels: + app: cmoa-collector + spec: + securityContext: + runAsNonRoot: true + runAsUser: 65534 + containers: + - name: cmoa-collector + image: {{ .Values.global.IMXC_IN_REGISTRY }}/cmoa-collector:{{ .Values.global.COLLECTOR_VERSION }} + imagePullPolicy: IfNotPresent + resources: + requests: + cpu: 100m + memory: 500Mi + limits: + cpu: 500m + memory: 2500Mi + ports: + - containerPort: 12010 + env: + - name: LOCATION + value: Asia/Seoul + - name: KAFKA_SERVER + value: kafka:9092 + - name: ELASTICSEARCH + value: elasticsearch:9200 +# - name: PROMETHEUS +# value: nginx-cortex/prometheus + - name: REDIS_ADDR + value: redis-master:6379 + - name: REDIS_PW + value: dkagh1234! + - name: REDIS_DB + value: "0" + - name: REDIS_TYPE + value: normal + - name: CMOA_ES_ID + value: {{ .Values.global.CMOA_ES_ID }} + - name: CMOA_ES_PW + value: {{ .Values.global.CMOA_ES_PW }} + resources: + requests: + cpu: "300m" + memory: "1500Mi" + limits: + cpu: "500m" + memory: "2500Mi" +- apiVersion: v1 + kind: Service + metadata: + name: cmoa-collector + namespace: imxc + labels: + app: cmoa-collector + spec: + ports: + - name: cmoa-collector-exporter + port: 12010 + targetPort: 12010 + selector: + app: cmoa-collector + diff --git a/ansible/01_old/roles/cmoa_install/files/05-imxc/templates/noti-server.yaml b/ansible/01_old/roles/cmoa_install/files/05-imxc/templates/noti-server.yaml new file mode 100644 index 0000000..99c7a5b --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/05-imxc/templates/noti-server.yaml @@ -0,0 +1,121 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: noti-server + namespace: imxc +spec: + selector: + matchLabels: + app: noti + replicas: 1 + template: + metadata: + labels: + app: noti + spec: + containers: + - name: noti-server + image: {{ .Values.global.IMXC_IN_REGISTRY }}/notification-server:{{ .Values.global.NOTI_SERVER_VERSION }} + imagePullPolicy: IfNotPresent + command: ["sh", "-c", {{ .Files.Get "scripts/init-noti-server.sh" | quote }}] + env: + # spring profile + - name: SPRING_PROFILES_ACTIVE + value: prd + + # keycloak configuration + - name: KEYCLOAK_AUTH-SERVER-URL + value: {{ .Values.global.KEYCLOAK_AUTH_SERVER_URL }} + - name: KEYCLOAK_REALM + value: exem + + # eureka configuration + - name: EUREKA_CLIENT_SERVICE-URL_DEFAULTZONE + value: http://eureka:8761/eureka + + # postgres configuration + - name: SPRING_DATASOURCE_URL + value: jdbc:log4jdbc:postgresql://postgres:5432/postgresdb + + # redis configuration + - name: SPRING_REDIS_HOST + value: redis-master + - name: SPRING_REDIS_PORT + value: "6379" + - name: SPRING_REDIS_PASSWORD + value: dkagh1234! + + # elasticsearch configuration + - name: SPRING_ELASTIC_URLS + value: elasticsearch + - name: SPRING_ELASTIC_PORT + value: "9200" + + # file I/O configuration + - name: IMXC_ALERT_NOTIFICATION_FILE_USE + value: "true" + - name: IMXC_ALERT_NOTIFICATION_FILE_FILE-LIMIT-SIZE-MB + value: "10" + - name: IMXC_ALERT_NOTIFICATION_FILE_PATH + value: /cloudmoa_noti + - name: IMXC_ALERT_NOTIFICATION_FILE_NAME + value: cloudmoa_alert.log + - name: IMXC_ALERT_NOTIFICATION_FILE_FORMAT + value: $[name]/($[level])/$[data]/$[message] + - name: IMXC_ALERT_NOTIFICATION_FILE_LEVELCONTRACT + value: "true" + + # rabbitmq configuration + - name: IMXC_RABBITMQ_HOST + value: base-rabbitmq + - name: IMXC_RABBITMQ_PORT + value: "61613" + - name: IMXC_RABBITMQ_CLIENT_ID + value: "user" + - name: IMXC_RABBITMQ_CLIENT_PASSWORD + value: "eorbahrhkswp" + - name: IMXC_RABBITMQ_SYSTEM_ID + value: "user" + - name: IMXC_RABBITMQ_SYSTEM_PASSWORD + value: "eorbahrhkswp" + + # api-server configuration + - name: IMXC_API-SERVER-URL + value: "http://imxc-api-service:8080" + + # cortex integration + - name: SPRING_CORTEX_URLS + value: base-cortex-configs + - name: SPRING_CORTEX_PORT + value: "8080" + + # alert webhook + - name: IMXC_ALERT_WEBHOOK_URLS + value: http://noti-server-service:8080/alert + + # etc configuration + - name: IMXC_PROMETHEUS_NAMESPACE + value: {{ .Values.global.IMXC_NAMESPACE }} + - name: IMXC_ALERT_KUBERNETES_NAMESPACE + value: {{ .Values.global.IMXC_NAMESPACE }} + # log4j + - name: LOG4J_FORMAT_MSG_NO_LOOKUPS + value: "true" + resources: + requests: + memory: "100Mi" + cpu: "50m" +--- +apiVersion: v1 +kind: Service +metadata: + name: noti-server-service + namespace: imxc +spec: + type: NodePort + selector: + app: noti + ports: + - protocol: TCP + port: 8080 + nodePort: 31083 diff --git a/ansible/01_old/roles/cmoa_install/files/05-imxc/templates/streams-depl.yaml b/ansible/01_old/roles/cmoa_install/files/05-imxc/templates/streams-depl.yaml new file mode 100644 index 0000000..b3223e5 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/05-imxc/templates/streams-depl.yaml @@ -0,0 +1,26 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: kafka-stream-txntrend-deployment + namespace: imxc + labels: + app: kafka-stream-txntrend +spec: + replicas: 1 + selector: + matchLabels: + app: kafka-stream-txntrend + template: + metadata: + labels: + app: kafka-stream-txntrend + spec: + containers: + - name: kafka-stream-txntrend + image: {{ .Values.global.IMXC_IN_REGISTRY }}/kafka-stream-txntrend:{{ .Values.global.KAFKA_STREAM_VERSION }} + imagePullPolicy: IfNotPresent + env: + - name: SERVICE_KAFKA_HOST + value: kafka-broker:9094 + - name: SERVICE_STREAM_OUTPUT + value: jspd_txntrend diff --git a/ansible/01_old/roles/cmoa_install/files/05-imxc/templates/topology-agent.yaml b/ansible/01_old/roles/cmoa_install/files/05-imxc/templates/topology-agent.yaml new file mode 100644 index 0000000..80476a3 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/05-imxc/templates/topology-agent.yaml @@ -0,0 +1,107 @@ +{{ if semverCompare ">=1.17-0" .Capabilities.KubeVersion.GitVersion }} +apiVersion: rbac.authorization.k8s.io/v1 +{{ else }} +apiVersion: rbac.authorization.k8s.io/v1beta1 +{{ end }} +kind: ClusterRoleBinding +metadata: + name: topology-agent + namespace: imxc + labels: + k8s-app: topology-agent +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +subjects: + - kind: ServiceAccount + name: topology-agent + namespace: imxc +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: topology-agent + namespace: imxc + labels: + app: topology-agent +spec: + selector: + matchLabels: + app: topology-agent + template: + metadata: + labels: + app: topology-agent + spec: + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + # below appended + hostPID: true + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - name: topology-agent + image: {{ .Values.global.IMXC_IN_REGISTRY }}/topology-agent:{{ .Values.global.TOPOLOGY_AGENT_VERSION }} + imagePullPolicy: IfNotPresent + securityContext: + privileged: true + volumeMounts: + - mountPath: /host/usr/bin + name: bin-volume + - mountPath: /var/run/docker.sock + name: docker-volume + - mountPath: /host/proc + name: proc-volume + - mountPath: /root + name: root-volume + env: + - name: CLUSTER_ID + value: cloudmoa + - name: ROOT_DIRECTORY + value: /root + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: POD_ID + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: DATAGATE + value: datagate:50051 + - name: LOG_RNAME_USE + value: "false" + - name: LOG_LEVEL + value: "DEBUG" + - name: CLOUDMOA_SETTING_PATH + value: /home/cloudmoa/setting/ + resources: + requests: + memory: "125Mi" + cpu: "100m" + limits: + memory: "600Mi" + cpu: "500m" + volumes: + - name: bin-volume + hostPath: + path: /usr/bin + type: Directory + - name: docker-volume + hostPath: + path: /var/run/docker.sock + - name: proc-volume + hostPath: + path: /proc + - name: root-volume + hostPath: + path: / diff --git a/ansible/01_old/roles/cmoa_install/files/05-imxc/templates/zuul-server.yaml b/ansible/01_old/roles/cmoa_install/files/05-imxc/templates/zuul-server.yaml new file mode 100644 index 0000000..79969d7 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/05-imxc/templates/zuul-server.yaml @@ -0,0 +1,62 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: zuul-deployment + namespace: imxc + labels: + app: cloud +spec: + selector: + matchLabels: + app: cloud + replicas: 1 + template: + metadata: + labels: + app: cloud + spec: + containers: + - env: + - name: SPRING_PROFILES_ACTIVE + value: prd + - name: SPRING_ZIPKIN_BASE-URL + value: http://zipkin-service:9411 + - name: LOGGING_LEVEL_COM_EXEM_CLOUD_ZUULSERVER_FILTERS_AUTHFILTER + value: info + # log4j + - name: LOG4J_FORMAT_MSG_NO_LOOKUPS + value: "true" + name: zuul + image: {{ .Values.global.IMXC_IN_REGISTRY }}/zuul-server:{{ .Values.global.ZUUL_SERVER_VERSION }} + imagePullPolicy: IfNotPresent + ports: + - containerPort: 8080 + #- containerPort: 6831 + #protocol: UDP + #resources: + # requests: + # memory: "256Mi" + # cpu: "344m" + # limits: + # memory: "1Gi" + # cpu: "700m" + resources: + requests: + memory: "200Mi" + cpu: "50m" +--- +apiVersion: v1 +kind: Service +metadata: + name: zuul + namespace: imxc + labels: + app: cloud +spec: + type: NodePort + selector: + app: cloud + ports: + - port: 8080 + targetPort: 8080 + nodePort: 31081 diff --git a/ansible/01_old/roles/cmoa_install/files/05-imxc/values.yaml b/ansible/01_old/roles/cmoa_install/files/05-imxc/values.yaml new file mode 100644 index 0000000..07c9a47 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/05-imxc/values.yaml @@ -0,0 +1,157 @@ +# Default values for imxc. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: 10.10.31.243:5000/cmoa3/nginx + tag: stable + pullPolicy: IfNotPresent + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 80 + +ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: [] + + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +nodeSelector: {} + +tolerations: [] + +affinity: {} + +global: + IMXC_LDAP_USE: false + IMXC_ADMIN_SERVER_DNS: imxc-admin-service + AUDITLOG_PATH: /var/log + KAFKA_IP: kafka-broker + # 로드밸런서 안 쓴다고 가정했을때 입니다.. + KAFKA_INTERFACE_PORT: 9094 + APISERVER_NETTY_PORT: 10100 + #REGISTRY_URL: cdm-dev.exem-oss.org:5050 + #REGISTRY_URL: 10.10.31.243:5000/cmoa + IMXC_ADMIN_SERVER_DNS: imxc-admin-service + AGENT_IMAGE_TAG: rel0.0.0 + # Jaeger 관련변수 + JAEGER_AGENT_CLUSTERIP: 10.98.94.198 + JAEGER_JAVA_SPECIALAGENT_CLASSPATH: classpath:/install/opentracing-specialagent-1.7.4.jar + # added by DongWoo Kim 2021-06-21 + KEYCLOAK_AUTH_SERVER_URL: http://111.111.111.111:31082/auth + KEYCLOAK_RESOURCE: authorization_server + KEYCLOAK_MASTER_USERNAME: admin + KEYCLOAK_MASTER_PASSWORD: admin + IMXC_PORTAL_INFO_URL: + KEYCLOAK_REALM: exem + # added by EunHye Kim 2021-08-25 + #DATAGATE_URLS: datagate + #DATAGATE_IP: 111.111.111.111 + #DATAGATE_PORT: 14268 + DATAGATE_INSIDE_IP: datagate + DATAGATE_INSIDE_PORT: 14268 + DATAGATE_OUTSIDE_IP: 111.111.111.111 + DATAGATE_OUTSIDE_PORT: 30051 + REDIS_URLS: redis-master + REDIS_PORT: 6379 + REDIS_PASSWORD: dkagh1234! + # added by DongWoo Kim 2021-08-31 (version of each module) + DATAGATE_VERSION: rel0.0.0 + #ADMIN_SERVER_VERSION: v1.0.0 + #API_SERVER_VERSION: CLOUD-172 + API_SERVER_VERSION: rel0.0.0 + COLLECTOR_VERSION: rel0.0.0 + #release-3.3.0 + TOPOLOGY_AGENT_VERSION: rel0.0.0 + METRIC_COLLECTOR_VERSION: rel0.0.0 + #v1.0.0 + METRIC_AGENT_VERSION: rel0.0.0 + # spring cloud + ZUUL_SERVER_VERSION: rel0.0.0 + #CMOA-1269 + EUREKA_SERVER_VERSION: rel0.0.0 + AUTH_SERVER_VERSION: rel0.0.0 + NOTI_SERVER_VERSION: rel0.0.0 + KAFKA_STREAM_VERSION: rel0.0.0 + CMOA_MANUAL_VERSION: rel0.0.0 + KUBE_INFO_FLAT_VERSION: rel0.0.0 + KUBE_INFO_BATCH_VERSION: rel0.0.0 + KUBE_INFO_CONNECTOR_VERSION: rel0.0.0 + + + CMOA_MANUAL_PORT: 31090 + + + # Keycloak + #KEYCLOAK_VERSION: v1.0.0 + + # 레지스트리 변수화 (Public Cloud 대비 / 아래 값 적절히 수정해서 사용할 것) + #IMXC_REGISTRY: 10.10.31.243:5000 + IMXC_IN_REGISTRY: 10.10.31.243:5000/cmoa3 + + + # namespace 추가 + IMXC_NAMESPACE: imxc + + # ZUUL 8080으로 열어놓을것 + + CMOA_ES_ID: elastic + CMOA_ES_PW: elastic + + JDBC_KIND: 'postgres' + JDBC_SERVER: 'postgres:5432' + JDBC_DB: 'postgresdb' + JDBC_USER: 'admin' + JDBC_PWD: 'eorbahrhkswp' + + KAFKA_INPUT_TOPIC: 'kubernetes_info' + + TABLE_PREFIX: 'cmoa_' + BLACK_LIST: 'configmap_base,cronjob_active,endpoint_base,endpoint_addresses,endpoint_notreadyaddresses,endpoint_ports,event_base,node_image,persistentvolume_base,persistentvolumeclaim_base,pod_volume,resourcequota_base,resourcequota_scopeselector' + DELETE_HOUR: '15' + BACKLOGIN: false diff --git a/ansible/01_old/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jaeger/Chart.yaml b/ansible/01_old/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jaeger/Chart.yaml new file mode 100644 index 0000000..e2f559f --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jaeger/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for Kubernetes +name: imxc +version: 0.1.0 diff --git a/ansible/01_old/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jaeger/cmoa-manual.yaml b/ansible/01_old/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jaeger/cmoa-manual.yaml new file mode 100644 index 0000000..e94fc14 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jaeger/cmoa-manual.yaml @@ -0,0 +1,36 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: manual + namespace: imxc +spec: + selector: + matchLabels: + app: manual + replicas: 1 + template: + metadata: + labels: + app: manual + spec: + containers: + - name: manual + image: {{ .Values.global.IMXC_IN_REGISTRY }}/manual:{{ .Values.global.CMOA_MANUAL_VERSION }} + imagePullPolicy: IfNotPresent + +--- +apiVersion: v1 +kind: Service +metadata: + name: manual + namespace: imxc +spec: + type: NodePort + selector: + app: manual + ports: + - protocol: TCP + port: 8088 + targetPort: 3000 + nodePort: {{ .Values.global.CMOA_MANUAL_PORT }} + diff --git a/ansible/01_old/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jaeger/scripts/init-api-server.sh b/ansible/01_old/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jaeger/scripts/init-api-server.sh new file mode 100644 index 0000000..45b8f1e --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jaeger/scripts/init-api-server.sh @@ -0,0 +1,16 @@ +#! /bin/sh + +STATUS_CODE="$(curl -s -o /dev/null -w '%{http_code}' http://imxc-keycloak-http/auth/realms/exem)" + +if [ $STATUS_CODE -eq 200 ]; then + JWT_KEY="$(curl -s -XGET http://imxc-keycloak-http/auth/realms/exem | jq -r '.public_key')" + export JWT_KEY + + chmod -R 777 /home/cloudmoa/notification/cloudmoa_alert.log + + java -Djava.security.egd=file:/dev/./urandom -jar /app.jar +elif [ $STATUS_CODE -eq 404 ]; then + echo "not found exem relam. check realm in imxc-keycloak" +else + echo "not found keycloak. check to install keycloak" +fi \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jaeger/scripts/init-auth-server.sh b/ansible/01_old/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jaeger/scripts/init-auth-server.sh new file mode 100644 index 0000000..279b8a5 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jaeger/scripts/init-auth-server.sh @@ -0,0 +1,36 @@ +#! /bin/bash + +# 200 -> 서버 및 realm이 있는 경우 +# 404 -> 서버는 있으나 realm이 없는 경우 +# 000 -> 서버가 없음 +STATUS_CODE="$(curl -s -o /dev/null -w '%{http_code}' http://imxc-keycloak-http/auth/realms/exem)" + +if [ $STATUS_CODE -eq 404 ]; then + TOKEN="$(curl -s -d "client_id=admin-cli" -d "username=admin" -d "password=admin" -d "grant_type=password" http://imxc-keycloak-http/auth/realms/master/protocol/openid-connect/token | jq -r '.access_token')" + + echo $TOKEN + + echo "create realm and client" + # create realm and client + curl -s -v POST -H "Authorization: Bearer $TOKEN" -H "Content-Type: application/json" -d "@/tmp/init.json" http://imxc-keycloak-http/auth/admin/realms + + + echo "create admin and owner" + # create admin and owner + curl -s -v POST -H "Authorization: Bearer $TOKEN" -H "Content-Type: application/json" -d '{"firstName":"","lastName":"", "username":"admin","email":"admin@example.com", "enabled":"true","credentials":[{"type":"password","value":"admin","temporary":false}]}' http://imxc-keycloak-http/auth/admin/realms/exem/users + curl -s -v POST -H "Authorization: Bearer $TOKEN" -H "Content-Type: application/json" -d '{"firstName":"","lastName":"", "username":"owner","email":"owner@example.com", "enabled":"true","credentials":[{"type":"password","value":"admin","temporary":false}]}' http://imxc-keycloak-http/auth/admin/realms/exem/users + + JWT_KEY="$(curl -s -XGET http://imxc-keycloak-http/auth/realms/exem | jq -r '.public_key')" + export JWT_KEY + + java -Djava.security.egd=file:/dev/./urandom -jar /app.jar +elif [ $STATUS_CODE -eq 200 ]; then + echo "exist exem relam" + + JWT_KEY="$(curl -s -XGET http://imxc-keycloak-http/auth/realms/exem | jq -r '.public_key')" + export JWT_KEY + + java -Djava.security.egd=file:/dev/./urandom -jar /app.jar +else + echo "not found keycloak. check to install keycloak" +fi diff --git a/ansible/01_old/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jaeger/scripts/init-noti-server.sh b/ansible/01_old/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jaeger/scripts/init-noti-server.sh new file mode 100644 index 0000000..af73aed --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jaeger/scripts/init-noti-server.sh @@ -0,0 +1,14 @@ +#! /bin/sh + +STATUS_CODE="$(curl -s -o /dev/null -w '%{http_code}' http://imxc-keycloak-http/auth/realms/exem)" + +if [ $STATUS_CODE -eq 200 ]; then + JWT_KEY="$(curl -s -XGET http://imxc-keycloak-http/auth/realms/exem | jq -r '.public_key')" + export JWT_KEY + + java -Djava.security.egd=file:/dev/./urandom -jar /app.jar +elif [ $STATUS_CODE -eq 404 ]; then + echo "not found exem relam. check realm in imxc-keycloak" +else + echo "not found keycloak. check to install keycloak" +fi \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jaeger/scripts/init-resource.sh b/ansible/01_old/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jaeger/scripts/init-resource.sh new file mode 100644 index 0000000..58db392 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jaeger/scripts/init-resource.sh @@ -0,0 +1,6 @@ +#!/bin/sh + +chmod -R 777 /scripts + +sed -i "s/localhost/$REDIRECT_URLS/g" /scripts/init.json +cp /scripts/init.json /tmp/init.json \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jaeger/scripts/init.json b/ansible/01_old/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jaeger/scripts/init.json new file mode 100644 index 0000000..dcd68b4 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jaeger/scripts/init.json @@ -0,0 +1,2148 @@ +{ + "id": "exem", + "realm": "exem", + "notBefore": 0, + "revokeRefreshToken": false, + "refreshTokenMaxReuse": 0, + "accessTokenLifespan": 300, + "accessTokenLifespanForImplicitFlow": 900, + "ssoSessionIdleTimeout": 1800, + "ssoSessionMaxLifespan": 36000, + "ssoSessionIdleTimeoutRememberMe": 0, + "ssoSessionMaxLifespanRememberMe": 0, + "offlineSessionIdleTimeout": 2592000, + "offlineSessionMaxLifespanEnabled": false, + "offlineSessionMaxLifespan": 5184000, + "clientSessionIdleTimeout": 0, + "clientSessionMaxLifespan": 0, + "clientOfflineSessionIdleTimeout": 0, + "clientOfflineSessionMaxLifespan": 0, + "accessCodeLifespan": 60, + "accessCodeLifespanUserAction": 300, + "accessCodeLifespanLogin": 1800, + "actionTokenGeneratedByAdminLifespan": 43200, + "actionTokenGeneratedByUserLifespan": 300, + "enabled": true, + "sslRequired": "none", + "registrationAllowed": false, + "registrationEmailAsUsername": false, + "rememberMe": false, + "verifyEmail": false, + "loginWithEmailAllowed": true, + "duplicateEmailsAllowed": false, + "resetPasswordAllowed": false, + "editUsernameAllowed": false, + "bruteForceProtected": false, + "permanentLockout": false, + "maxFailureWaitSeconds": 900, + "minimumQuickLoginWaitSeconds": 60, + "waitIncrementSeconds": 60, + "quickLoginCheckMilliSeconds": 1000, + "maxDeltaTimeSeconds": 43200, + "failureFactor": 30, + "roles": { + "realm": [ + { + "id": "b361dcb8-4ec4-484e-a432-8d40a8ca5ac8", + "name": "offline_access", + "description": "${role_offline-access}", + "composite": false, + "clientRole": false, + "containerId": "exem", + "attributes": {} + }, + { + "id": "621155f2-6c01-4e4a-bf11-47111503d696", + "name": "uma_authorization", + "description": "${role_uma_authorization}", + "composite": false, + "clientRole": false, + "containerId": "exem", + "attributes": {} + }, + { + "id": "4aadd73a-e863-466a-932b-5bc81553fbf1", + "name": "access", + "composite": false, + "clientRole": false, + "containerId": "exem", + "attributes": {} + } + ], + "client": { + "realm-management": [ + { + "id": "e3eca547-c372-406a-abe7-30f554e13e63", + "name": "manage-realm", + "description": "${role_manage-realm}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "eb1faff2-4cca-458c-b9da-96c1f6f5f647", + "name": "impersonation", + "description": "${role_impersonation}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "eb0f6ebb-8993-47f8-8979-2152ed92bf62", + "name": "create-client", + "description": "${role_create-client}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "29f0b39d-9cc9-4b40-ad81-00041897ae0c", + "name": "view-clients", + "description": "${role_view-clients}", + "composite": true, + "composites": { + "client": { + "realm-management": [ + "query-clients" + ] + } + }, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "b6307563-9b35-4093-b0c4-a27df7cb82bd", + "name": "query-groups", + "description": "${role_query-groups}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "30091a91-f676-4e39-8ae2-ebfcee36c32a", + "name": "query-clients", + "description": "${role_query-clients}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "b40ca071-2318-4f69-9664-f0dfe471d03b", + "name": "view-realm", + "description": "${role_view-realm}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "efd25ec7-e61f-4659-a772-907791aed58e", + "name": "view-authorization", + "description": "${role_view-authorization}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "4ad18bd0-f9a9-4fc7-8864-99afa71f95e4", + "name": "manage-users", + "description": "${role_manage-users}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "a92c781f-7c6a-48d8-aa88-0b3aefb3c10c", + "name": "manage-events", + "description": "${role_manage-events}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "424933c1-3c03-49cd-955c-34aeeb0a3108", + "name": "manage-authorization", + "description": "${role_manage-authorization}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "5476db80-dbfa-408b-a934-5e8decc0af56", + "name": "manage-clients", + "description": "${role_manage-clients}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "acf53868-d09b-4865-92da-3b906307b979", + "name": "realm-admin", + "description": "${role_realm-admin}", + "composite": true, + "composites": { + "client": { + "realm-management": [ + "manage-realm", + "impersonation", + "create-client", + "view-clients", + "query-groups", + "query-clients", + "view-realm", + "view-authorization", + "manage-users", + "manage-events", + "manage-authorization", + "manage-clients", + "query-users", + "query-realms", + "manage-identity-providers", + "view-users", + "view-events", + "view-identity-providers" + ] + } + }, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "f2ad5f83-ffde-4cf4-acc4-21f7bcec4c38", + "name": "query-users", + "description": "${role_query-users}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "96a017bf-5211-4c20-a1b2-7493bc45a3ad", + "name": "query-realms", + "description": "${role_query-realms}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "d8051d4d-f26c-4a6d-bcdd-b3d8111d9d29", + "name": "manage-identity-providers", + "description": "${role_manage-identity-providers}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "8c929b20-abc3-4b78-88f2-ed3348426667", + "name": "view-users", + "description": "${role_view-users}", + "composite": true, + "composites": { + "client": { + "realm-management": [ + "query-groups", + "query-users" + ] + } + }, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "a337a8f7-8725-4ff7-85fc-ecc4b5ce1433", + "name": "view-events", + "description": "${role_view-events}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "649350cf-925c-4502-84b4-ec8415f956d3", + "name": "view-identity-providers", + "description": "${role_view-identity-providers}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + } + ], + "authorization_server": [ + { + "id": "2346ca49-eb3e-4f2e-b0ec-4def9ea9655c", + "name": "access", + "composite": false, + "clientRole": true, + "containerId": "b9bbda1f-a756-4b72-9cd8-06a6dfd6d5bf", + "attributes": {} + } + ], + "security-admin-console": [], + "admin-cli": [], + "account-console": [], + "broker": [ + { + "id": "133ff901-3a8f-48df-893b-4c7e9047e829", + "name": "read-token", + "description": "${role_read-token}", + "composite": false, + "clientRole": true, + "containerId": "fdc71d6d-db86-414f-bd80-ed1f5e9a6975", + "attributes": {} + } + ], + "account": [ + { + "id": "89c5f56f-5845-400b-ac9f-942c46d082e0", + "name": "manage-account-links", + "description": "${role_manage-account-links}", + "composite": false, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + }, + { + "id": "2cba7fed-0a80-4dbd-bd2d-abfa2c6a985e", + "name": "view-profile", + "description": "${role_view-profile}", + "composite": false, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + }, + { + "id": "f446a93d-143f-4071-9bdc-08aa2fdce6d2", + "name": "view-consent", + "description": "${role_view-consent}", + "composite": false, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + }, + { + "id": "ef3364db-e008-4aec-9e74-04bac25cbe40", + "name": "manage-consent", + "description": "${role_manage-consent}", + "composite": true, + "composites": { + "client": { + "account": [ + "view-consent" + ] + } + }, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + }, + { + "id": "96afbe32-3ac2-4345-bc17-06cf0e8de0b4", + "name": "view-applications", + "description": "${role_view-applications}", + "composite": false, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + }, + { + "id": "cf6861ca-4804-40d4-9016-c48e7ebf1c72", + "name": "manage-account", + "description": "${role_manage-account}", + "composite": true, + "composites": { + "client": { + "account": [ + "manage-account-links" + ] + } + }, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + } + ] + } + }, + "groups": [ + { + "id": "8d3f7332-7f72-47e2-9cb3-38331f0c29b5", + "name": "DEFAULT_TENANT", + "path": "/DEFAULT_TENANT", + "attributes": {}, + "realmRoles": [], + "clientRoles": {}, + "subGroups": [] + } + ], + "defaultRoles": [ + "offline_access", + "uma_authorization" + ], + "requiredCredentials": [ + "password" + ], + "otpPolicyType": "totp", + "otpPolicyAlgorithm": "HmacSHA1", + "otpPolicyInitialCounter": 0, + "otpPolicyDigits": 6, + "otpPolicyLookAheadWindow": 1, + "otpPolicyPeriod": 30, + "otpSupportedApplications": [ + "FreeOTP", + "Google Authenticator" + ], + "webAuthnPolicyRpEntityName": "keycloak", + "webAuthnPolicySignatureAlgorithms": [ + "ES256" + ], + "webAuthnPolicyRpId": "", + "webAuthnPolicyAttestationConveyancePreference": "not specified", + "webAuthnPolicyAuthenticatorAttachment": "not specified", + "webAuthnPolicyRequireResidentKey": "not specified", + "webAuthnPolicyUserVerificationRequirement": "not specified", + "webAuthnPolicyCreateTimeout": 0, + "webAuthnPolicyAvoidSameAuthenticatorRegister": false, + "webAuthnPolicyAcceptableAaguids": [], + "webAuthnPolicyPasswordlessRpEntityName": "keycloak", + "webAuthnPolicyPasswordlessSignatureAlgorithms": [ + "ES256" + ], + "webAuthnPolicyPasswordlessRpId": "", + "webAuthnPolicyPasswordlessAttestationConveyancePreference": "not specified", + "webAuthnPolicyPasswordlessAuthenticatorAttachment": "not specified", + "webAuthnPolicyPasswordlessRequireResidentKey": "not specified", + "webAuthnPolicyPasswordlessUserVerificationRequirement": "not specified", + "webAuthnPolicyPasswordlessCreateTimeout": 0, + "webAuthnPolicyPasswordlessAvoidSameAuthenticatorRegister": false, + "webAuthnPolicyPasswordlessAcceptableAaguids": [], + "scopeMappings": [ + { + "clientScope": "offline_access", + "roles": [ + "offline_access" + ] + } + ], + "clientScopeMappings": { + "account": [ + { + "client": "account-console", + "roles": [ + "manage-account" + ] + } + ] + }, + "clients": [ + { + "id": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "clientId": "account", + "name": "${client_account}", + "rootUrl": "${authBaseUrl}", + "baseUrl": "/realms/exem/account/", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "defaultRoles": [ + "view-profile", + "manage-account" + ], + "redirectUris": [ + "/realms/exem/account/*" + ], + "webOrigins": [], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": false, + "serviceAccountsEnabled": false, + "publicClient": false, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": {}, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "1e3d0c5d-c456-4c5f-93cf-58236273186a", + "clientId": "account-console", + "name": "${client_account-console}", + "rootUrl": "${authBaseUrl}", + "baseUrl": "/realms/exem/account/", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [ + "/realms/exem/account/*" + ], + "webOrigins": [], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": false, + "serviceAccountsEnabled": false, + "publicClient": true, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": { + "pkce.code.challenge.method": "S256" + }, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "protocolMappers": [ + { + "id": "cceae7c8-fa8d-48eb-a0a6-6013a2cc771e", + "name": "audience resolve", + "protocol": "openid-connect", + "protocolMapper": "oidc-audience-resolve-mapper", + "consentRequired": false, + "config": {} + } + ], + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "d4d3e5a5-584c-4aff-a79f-ac3c31ace5a1", + "clientId": "admin-cli", + "name": "${client_admin-cli}", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [], + "webOrigins": [], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": false, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": true, + "serviceAccountsEnabled": false, + "publicClient": true, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": {}, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "b9bbda1f-a756-4b72-9cd8-06a6dfd6d5bf", + "clientId": "authorization_server", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [ + "localhost" + ], + "webOrigins": [ + "*" + ], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": true, + "serviceAccountsEnabled": false, + "publicClient": true, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": { + "saml.assertion.signature": "false", + "saml.force.post.binding": "false", + "saml.multivalued.roles": "false", + "saml.encrypt": "false", + "saml.server.signature": "false", + "saml.server.signature.keyinfo.ext": "false", + "exclude.session.state.from.auth.response": "false", + "saml_force_name_id_format": "false", + "saml.client.signature": "false", + "tls.client.certificate.bound.access.tokens": "false", + "saml.authnstatement": "false", + "display.on.consent.screen": "false", + "saml.onetimeuse.condition": "false" + }, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": true, + "nodeReRegistrationTimeout": -1, + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "fdc71d6d-db86-414f-bd80-ed1f5e9a6975", + "clientId": "broker", + "name": "${client_broker}", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [], + "webOrigins": [], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": false, + "serviceAccountsEnabled": false, + "publicClient": false, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": {}, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "clientId": "realm-management", + "name": "${client_realm-management}", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [], + "webOrigins": [], + "notBefore": 0, + "bearerOnly": true, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": false, + "serviceAccountsEnabled": false, + "publicClient": false, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": {}, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "bb6c56f1-126e-4356-9579-d95992a8d150", + "clientId": "security-admin-console", + "name": "${client_security-admin-console}", + "rootUrl": "${authAdminUrl}", + "baseUrl": "/admin/exem/console/", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [ + "/admin/exem/console/*" + ], + "webOrigins": [ + "+" + ], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": false, + "serviceAccountsEnabled": false, + "publicClient": true, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": { + "pkce.code.challenge.method": "S256" + }, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "protocolMappers": [ + { + "id": "3cf06cab-00dd-486b-8e72-1a453a7031ca", + "name": "locale", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "locale", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "locale", + "jsonType.label": "String" + } + } + ], + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + } + ], + "clientScopes": [ + { + "id": "6a21eaaa-69c9-4519-8732-2155865a1891", + "name": "custom_jwt", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "true" + }, + "protocolMappers": [ + { + "id": "fd7557f5-3174-4c65-8cd1-0e9f015a906f", + "name": "customizingJWT", + "protocol": "openid-connect", + "protocolMapper": "oidc-script-based-protocol-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "multivalued": "true", + "id.token.claim": "false", + "access.token.claim": "true", + "jsonType.label": "String", + "script": "/**\r\n * Available variables: \r\n * user - the current user\r\n * realm - the current realm\r\n * token - the current token\r\n * userSession - the current userSession\r\n * keycloakSession - the current keycloakSession\r\n */\r\n\r\n//insert your code here...\r\n\r\n// you can set standard fields in token - test code\r\n// token.setAcr(\"test value\");\r\n\r\n// you can set claims in the token - test code\r\n// token.getOtherClaims().put(\"claimName\", \"claim value\");\r\n\r\n// work with variables and return multivalued token value\r\nvar ArrayList = Java.type(\"java.util.ArrayList\");\r\nvar HashMap = Java.type(\"java.util.HashMap\");\r\nvar tenantInfoMap = new HashMap();\r\nvar tenantIpMap = new HashMap();\r\n\r\nvar forEach = Array.prototype.forEach;\r\n\r\nvar client = keycloakSession.getContext().getClient();\r\nvar groups = user.getGroups();\r\nvar clientRole = client.getRole(\"access\");\r\n\r\nforEach.call(groups.toArray(), function(group) {\r\n if(group.hasRole(clientRole)) {\r\n tenantIpMap.put(group.getName(), clientRole.getAttribute(\"ip\"));\r\n tenantInfoMap.put(group.getName(), group.getAttributes());\r\n }\r\n});\r\n\r\ntoken.setOtherClaims(\"tenantInfo\", tenantInfoMap);\r\n" + } + }, + { + "id": "2cb34189-9f06-4b9f-b066-c28e7930f0a5", + "name": "custom_phone", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "false", + "user.attribute": "phone", + "id.token.claim": "false", + "access.token.claim": "true", + "claim.name": "attributes.phone", + "jsonType.label": "String" + } + }, + { + "id": "6bcb0aa9-8713-4e4b-b997-2e08d2dda0f4", + "name": "group_attr", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "groups", + "id.token.claim": "false", + "access.token.claim": "true", + "claim.name": "groups.attributes", + "jsonType.label": "String" + } + }, + { + "id": "03deb40b-4f83-436e-9eab-f479eed62460", + "name": "custom_name", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "false", + "user.attribute": "name", + "id.token.claim": "false", + "access.token.claim": "true", + "claim.name": "attributes.name", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "9fed7d81-3f42-41b0-b661-7875abb90b2b", + "name": "microprofile-jwt", + "description": "Microprofile - JWT built-in scope", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "false" + }, + "protocolMappers": [ + { + "id": "d030d675-2c31-401a-a461-534211b3d2ec", + "name": "upn", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "username", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "upn", + "jsonType.label": "String" + } + }, + { + "id": "ca2026a0-84de-4b8d-bf0c-35f3d088b115", + "name": "groups", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-realm-role-mapper", + "consentRequired": false, + "config": { + "multivalued": "true", + "user.attribute": "foo", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "groups", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "cf3e7fce-e9e8-40dc-bd0d-5cf7bac861c0", + "name": "web-origins", + "description": "OpenID Connect scope for add allowed web origins to the access token", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "false", + "display.on.consent.screen": "false", + "consent.screen.text": "" + }, + "protocolMappers": [ + { + "id": "6b909bad-30d8-4095-a80b-d71589e8a0b4", + "name": "allowed web origins", + "protocol": "openid-connect", + "protocolMapper": "oidc-allowed-origins-mapper", + "consentRequired": false, + "config": {} + } + ] + }, + { + "id": "73231863-d614-4725-9707-f5704c70893a", + "name": "roles", + "description": "OpenID Connect scope for add user roles to the access token", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "false", + "display.on.consent.screen": "true", + "consent.screen.text": "${rolesScopeConsentText}" + }, + "protocolMappers": [ + { + "id": "fad2c0b3-d6d6-46c9-b8a5-70cf2f3cd69e", + "name": "realm roles", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-realm-role-mapper", + "consentRequired": false, + "config": { + "multivalued": "true", + "user.attribute": "foo", + "access.token.claim": "true", + "claim.name": "realm_access.roles", + "jsonType.label": "String" + } + }, + { + "id": "1fa51f0e-8fa8-4807-a381-c9756ce1d2ff", + "name": "audience resolve", + "protocol": "openid-connect", + "protocolMapper": "oidc-audience-resolve-mapper", + "consentRequired": false, + "config": {} + }, + { + "id": "8be191ba-c7b8-45f1-a37f-2830595d4b54", + "name": "client roles", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-client-role-mapper", + "consentRequired": false, + "config": { + "multivalued": "true", + "user.attribute": "foo", + "access.token.claim": "true", + "claim.name": "resource_access.${client_id}.roles", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "93a4b53a-a281-4203-a070-0ad31e719b29", + "name": "phone", + "description": "OpenID Connect built-in scope: phone", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "true", + "consent.screen.text": "${phoneScopeConsentText}" + }, + "protocolMappers": [ + { + "id": "c716d4df-ad16-4a47-aa05-ded2a69313a3", + "name": "phone number verified", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "phoneNumberVerified", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "phone_number_verified", + "jsonType.label": "boolean" + } + }, + { + "id": "db0fcb5b-bad6-42b7-8ab0-b90225100b8a", + "name": "phone number", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "phoneNumber", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "phone_number", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "f1723d4c-6d93-40be-b5b8-5ca7083e55c7", + "name": "address", + "description": "OpenID Connect built-in scope: address", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "true", + "consent.screen.text": "${addressScopeConsentText}" + }, + "protocolMappers": [ + { + "id": "9e95dff0-dc01-4efe-a414-21c83d94491c", + "name": "address", + "protocol": "openid-connect", + "protocolMapper": "oidc-address-mapper", + "consentRequired": false, + "config": { + "user.attribute.formatted": "formatted", + "user.attribute.country": "country", + "user.attribute.postal_code": "postal_code", + "userinfo.token.claim": "true", + "user.attribute.street": "street", + "id.token.claim": "true", + "user.attribute.region": "region", + "access.token.claim": "true", + "user.attribute.locality": "locality" + } + } + ] + }, + { + "id": "16524b43-6bfc-4e05-868c-682e7e1e611c", + "name": "email", + "description": "OpenID Connect built-in scope: email", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "true", + "consent.screen.text": "${emailScopeConsentText}" + }, + "protocolMappers": [ + { + "id": "4444c30e-5da5-46e6-a201-64c28ab26e10", + "name": "email verified", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "emailVerified", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "email_verified", + "jsonType.label": "boolean" + } + }, + { + "id": "0faa8ba7-6d4d-4ed4-ab89-334e1d18b503", + "name": "email", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "email", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "email", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "4ccced80-99d8-4081-8d1d-37ed6d5aaf34", + "name": "profile", + "description": "OpenID Connect built-in scope: profile", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "true", + "consent.screen.text": "${profileScopeConsentText}" + }, + "protocolMappers": [ + { + "id": "02aea132-f5e1-483c-968a-5fbb9cdfb82d", + "name": "updated at", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "updatedAt", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "updated_at", + "jsonType.label": "String" + } + }, + { + "id": "eb5d10fc-d4a8-473a-ac3e-35f3fb0f41bb", + "name": "family name", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "lastName", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "family_name", + "jsonType.label": "String" + } + }, + { + "id": "2467b8e5-f340-45a2-abff-c658eccf3ed3", + "name": "zoneinfo", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "zoneinfo", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "zoneinfo", + "jsonType.label": "String" + } + }, + { + "id": "50a9bb17-af12-481d-95dd-6aed1dd4bf56", + "name": "full name", + "protocol": "openid-connect", + "protocolMapper": "oidc-full-name-mapper", + "consentRequired": false, + "config": { + "id.token.claim": "true", + "access.token.claim": "true", + "userinfo.token.claim": "true" + } + }, + { + "id": "80a65208-9425-4e66-b769-98c2f1c91e6e", + "name": "nickname", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "nickname", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "nickname", + "jsonType.label": "String" + } + }, + { + "id": "68a750c6-b4b8-47f4-a919-752319e63213", + "name": "gender", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "gender", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "gender", + "jsonType.label": "String" + } + }, + { + "id": "e27abd0e-72c1-40de-a678-e9e4e2db8e7f", + "name": "given name", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "firstName", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "given_name", + "jsonType.label": "String" + } + }, + { + "id": "04f3fa01-6a4c-44eb-bfd8-0a0e1c31bc4a", + "name": "middle name", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "middleName", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "middle_name", + "jsonType.label": "String" + } + }, + { + "id": "94e697d9-fbee-48d8-91d1-7bbc4f1fb44e", + "name": "username", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "username", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "preferred_username", + "jsonType.label": "String" + } + }, + { + "id": "a2f05d76-947d-4ceb-969b-1b923be9a923", + "name": "website", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "website", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "website", + "jsonType.label": "String" + } + }, + { + "id": "1966f863-ac5c-4cbc-a156-d5bd861728f0", + "name": "profile", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "profile", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "profile", + "jsonType.label": "String" + } + }, + { + "id": "18a9b452-cd8e-4c43-a9a8-0ea532074f74", + "name": "locale", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "locale", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "locale", + "jsonType.label": "String" + } + }, + { + "id": "1583790a-ec7a-4899-a901-60e23fd0d969", + "name": "birthdate", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "birthdate", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "birthdate", + "jsonType.label": "String" + } + }, + { + "id": "7094b64a-492b-4f31-aa73-bb19d06ddb56", + "name": "picture", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "picture", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "picture", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "eff18c11-eaf4-4d6a-8365-90f646ea3cc5", + "name": "role_list", + "description": "SAML role list", + "protocol": "saml", + "attributes": { + "consent.screen.text": "${samlRoleListScopeConsentText}", + "display.on.consent.screen": "true" + }, + "protocolMappers": [ + { + "id": "3bb12700-3e6f-4a73-bfbb-cfd16a8ab007", + "name": "role list", + "protocol": "saml", + "protocolMapper": "saml-role-list-mapper", + "consentRequired": false, + "config": { + "single": "false", + "attribute.nameformat": "Basic", + "attribute.name": "Role" + } + } + ] + }, + { + "id": "e83e35b7-9650-4f7e-b182-65c184d261b3", + "name": "offline_access", + "description": "OpenID Connect built-in scope: offline_access", + "protocol": "openid-connect", + "attributes": { + "consent.screen.text": "${offlineAccessScopeConsentText}", + "display.on.consent.screen": "true" + } + } + ], + "defaultDefaultClientScopes": [ + "role_list", + "profile", + "email", + "roles", + "web-origins", + "custom_jwt" + ], + "defaultOptionalClientScopes": [ + "offline_access", + "address", + "phone", + "microprofile-jwt" + ], + "browserSecurityHeaders": { + "contentSecurityPolicyReportOnly": "", + "xContentTypeOptions": "nosniff", + "xRobotsTag": "none", + "xFrameOptions": "SAMEORIGIN", + "contentSecurityPolicy": "frame-src 'self'; frame-ancestors 'self'; object-src 'none';", + "xXSSProtection": "1; mode=block", + "strictTransportSecurity": "max-age=31536000; includeSubDomains" + }, + "smtpServer": {}, + "eventsEnabled": false, + "eventsListeners": [ + "jboss-logging" + ], + "enabledEventTypes": [], + "adminEventsEnabled": false, + "adminEventsDetailsEnabled": false, + "components": { + "org.keycloak.services.clientregistration.policy.ClientRegistrationPolicy": [ + { + "id": "9b1dcf02-e9ec-4302-8aad-28f3250d1b2d", + "name": "Allowed Protocol Mapper Types", + "providerId": "allowed-protocol-mappers", + "subType": "anonymous", + "subComponents": {}, + "config": { + "allowed-protocol-mapper-types": [ + "oidc-sha256-pairwise-sub-mapper", + "oidc-usermodel-property-mapper", + "saml-role-list-mapper", + "saml-user-attribute-mapper", + "oidc-full-name-mapper", + "oidc-usermodel-attribute-mapper", + "oidc-address-mapper", + "saml-user-property-mapper" + ] + } + }, + { + "id": "752137ea-bc3a-46c3-9d83-49cb370d39a9", + "name": "Max Clients Limit", + "providerId": "max-clients", + "subType": "anonymous", + "subComponents": {}, + "config": { + "max-clients": [ + "200" + ] + } + }, + { + "id": "f365d31f-ccc5-4e57-97bd-b2749b1ab5e5", + "name": "Allowed Client Scopes", + "providerId": "allowed-client-templates", + "subType": "authenticated", + "subComponents": {}, + "config": { + "allow-default-scopes": [ + "true" + ] + } + }, + { + "id": "52e385fd-3aa5-442d-b5e4-6ff659126196", + "name": "Allowed Protocol Mapper Types", + "providerId": "allowed-protocol-mappers", + "subType": "authenticated", + "subComponents": {}, + "config": { + "allowed-protocol-mapper-types": [ + "oidc-sha256-pairwise-sub-mapper", + "saml-user-attribute-mapper", + "oidc-full-name-mapper", + "oidc-usermodel-attribute-mapper", + "oidc-address-mapper", + "oidc-usermodel-property-mapper", + "saml-user-property-mapper", + "saml-role-list-mapper" + ] + } + }, + { + "id": "dbebbc9d-1b14-4d09-906c-b4e5638f9588", + "name": "Consent Required", + "providerId": "consent-required", + "subType": "anonymous", + "subComponents": {}, + "config": {} + }, + { + "id": "b3fc18dc-467f-4240-9b6d-f07df5c40aee", + "name": "Full Scope Disabled", + "providerId": "scope", + "subType": "anonymous", + "subComponents": {}, + "config": {} + }, + { + "id": "19e102da-1d66-4747-958b-9311e5156693", + "name": "Trusted Hosts", + "providerId": "trusted-hosts", + "subType": "anonymous", + "subComponents": {}, + "config": { + "host-sending-registration-request-must-match": [ + "true" + ], + "client-uris-must-match": [ + "true" + ] + } + }, + { + "id": "66e83112-7392-46cb-bbd5-b71586183ada", + "name": "Allowed Client Scopes", + "providerId": "allowed-client-templates", + "subType": "anonymous", + "subComponents": {}, + "config": { + "allow-default-scopes": [ + "true" + ] + } + } + ], + "org.keycloak.keys.KeyProvider": [ + { + "id": "a60adc1b-3f6b-40d4-901f-d4f744f0d71b", + "name": "aes-generated", + "providerId": "aes-generated", + "subComponents": {}, + "config": { + "priority": [ + "100" + ] + } + }, + { + "id": "bc1b25d8-b199-4d87-b606-6cde0f6eafb0", + "name": "hmac-generated", + "providerId": "hmac-generated", + "subComponents": {}, + "config": { + "priority": [ + "100" + ], + "algorithm": [ + "HS256" + ] + } + }, + { + "id": "fe624aa7-54a3-43d8-b2a3-f74b543a9225", + "name": "rsa-generated", + "providerId": "rsa-generated", + "subComponents": {}, + "config": { + "priority": [ + "100" + ] + } + } + ] + }, + "internationalizationEnabled": false, + "supportedLocales": [], + "authenticationFlows": [ + { + "id": "a837df3e-15cb-4d2a-8ce0-5eea5c704e76", + "alias": "Account verification options", + "description": "Method with which to verity the existing account", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "idp-email-verification", + "requirement": "ALTERNATIVE", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "ALTERNATIVE", + "priority": 20, + "flowAlias": "Verify Existing Account by Re-authentication", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "59026e13-e2bd-4977-a868-505ea562f545", + "alias": "Authentication Options", + "description": "Authentication options.", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "basic-auth", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "basic-auth-otp", + "requirement": "DISABLED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "auth-spnego", + "requirement": "DISABLED", + "priority": 30, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "484d422c-d9b4-4c0e-86d5-60463ecd24c9", + "alias": "Browser - Conditional OTP", + "description": "Flow to determine if the OTP is required for the authentication", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "conditional-user-configured", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "auth-otp-form", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "0ec05058-6d09-4951-a116-19e8810e5d8e", + "alias": "Direct Grant - Conditional OTP", + "description": "Flow to determine if the OTP is required for the authentication", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "conditional-user-configured", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "direct-grant-validate-otp", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "667c03cd-114c-4d9a-a7fa-7d2c27f10722", + "alias": "First broker login - Conditional OTP", + "description": "Flow to determine if the OTP is required for the authentication", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "conditional-user-configured", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "auth-otp-form", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "1510fbf7-239f-44aa-9955-72d42f6d99fd", + "alias": "Handle Existing Account", + "description": "Handle what to do if there is existing account with same email/username like authenticated identity provider", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "idp-confirm-link", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "REQUIRED", + "priority": 20, + "flowAlias": "Account verification options", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "5622e71d-e1f4-4711-a425-a8470d0a017e", + "alias": "Reset - Conditional OTP", + "description": "Flow to determine if the OTP should be reset or not. Set to REQUIRED to force.", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "conditional-user-configured", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "reset-otp", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "09dfe405-5ef6-4940-8885-5adf867a74c8", + "alias": "User creation or linking", + "description": "Flow for the existing/non-existing user alternatives", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticatorConfig": "create unique user config", + "authenticator": "idp-create-user-if-unique", + "requirement": "ALTERNATIVE", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "ALTERNATIVE", + "priority": 20, + "flowAlias": "Handle Existing Account", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "a3eb6b61-1943-4fb7-9b2f-137826882662", + "alias": "Verify Existing Account by Re-authentication", + "description": "Reauthentication of existing account", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "idp-username-password-form", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "CONDITIONAL", + "priority": 20, + "flowAlias": "First broker login - Conditional OTP", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "7f5e2f68-84bc-4703-b474-e3b092621195", + "alias": "browser", + "description": "browser based authentication", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "auth-cookie", + "requirement": "ALTERNATIVE", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "auth-spnego", + "requirement": "DISABLED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "identity-provider-redirector", + "requirement": "ALTERNATIVE", + "priority": 25, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "ALTERNATIVE", + "priority": 30, + "flowAlias": "forms", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "224cc520-37f7-445e-ab1f-7ba547a45a0d", + "alias": "clients", + "description": "Base authentication for clients", + "providerId": "client-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "client-secret", + "requirement": "ALTERNATIVE", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "client-jwt", + "requirement": "ALTERNATIVE", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "client-secret-jwt", + "requirement": "ALTERNATIVE", + "priority": 30, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "client-x509", + "requirement": "ALTERNATIVE", + "priority": 40, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "2e58184b-529b-450c-9731-29763d26b087", + "alias": "direct grant", + "description": "OpenID Connect Resource Owner Grant", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "direct-grant-validate-username", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "direct-grant-validate-password", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "CONDITIONAL", + "priority": 30, + "flowAlias": "Direct Grant - Conditional OTP", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "c969ac8c-e7d8-44b5-ad4d-5fcb80514eac", + "alias": "docker auth", + "description": "Used by Docker clients to authenticate against the IDP", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "docker-http-basic-authenticator", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "de2259a4-7f92-42ec-994c-f55d8cba3b59", + "alias": "first broker login", + "description": "Actions taken after first broker login with identity provider account, which is not yet linked to any Keycloak account", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticatorConfig": "review profile config", + "authenticator": "idp-review-profile", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "REQUIRED", + "priority": 20, + "flowAlias": "User creation or linking", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "6c2745d2-be21-4f3c-a291-5b3fc039432a", + "alias": "forms", + "description": "Username, password, otp and other auth forms.", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "auth-username-password-form", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "CONDITIONAL", + "priority": 20, + "flowAlias": "Browser - Conditional OTP", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "ac8f5082-3fd0-47c5-854d-0dd9c3951668", + "alias": "http challenge", + "description": "An authentication flow based on challenge-response HTTP Authentication Schemes", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "no-cookie-redirect", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "REQUIRED", + "priority": 20, + "flowAlias": "Authentication Options", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "32030b4b-c82b-4c1a-a692-3b51eae74bbc", + "alias": "registration", + "description": "registration flow", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "registration-page-form", + "requirement": "REQUIRED", + "priority": 10, + "flowAlias": "registration form", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "b99fca4c-386c-4277-acc1-83e57e29244d", + "alias": "registration form", + "description": "registration form", + "providerId": "form-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "registration-user-creation", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "registration-profile-action", + "requirement": "REQUIRED", + "priority": 40, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "registration-password-action", + "requirement": "REQUIRED", + "priority": 50, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "registration-recaptcha-action", + "requirement": "DISABLED", + "priority": 60, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "5edbc053-816a-434e-9866-6c0cc7e49f89", + "alias": "reset credentials", + "description": "Reset credentials for a user if they forgot their password or something", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "reset-credentials-choose-user", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "reset-credential-email", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "reset-password", + "requirement": "REQUIRED", + "priority": 30, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "CONDITIONAL", + "priority": 40, + "flowAlias": "Reset - Conditional OTP", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "460782e7-9644-4a34-8024-cb428cbe3991", + "alias": "saml ecp", + "description": "SAML ECP Profile Authentication Flow", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "http-basic-authenticator", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + } + ], + "authenticatorConfig": [ + { + "id": "67af6e65-853c-4bfd-9eef-72e735691377", + "alias": "create unique user config", + "config": { + "require.password.update.after.registration": "false" + } + }, + { + "id": "af6c6e01-772d-426a-bdd3-3ebc95537bcd", + "alias": "review profile config", + "config": { + "update.profile.on.first.login": "missing" + } + } + ], + "requiredActions": [ + { + "alias": "CONFIGURE_TOTP", + "name": "Configure OTP", + "providerId": "CONFIGURE_TOTP", + "enabled": true, + "defaultAction": false, + "priority": 10, + "config": {} + }, + { + "alias": "terms_and_conditions", + "name": "Terms and Conditions", + "providerId": "terms_and_conditions", + "enabled": false, + "defaultAction": false, + "priority": 20, + "config": {} + }, + { + "alias": "UPDATE_PASSWORD", + "name": "Update Password", + "providerId": "UPDATE_PASSWORD", + "enabled": true, + "defaultAction": false, + "priority": 30, + "config": {} + }, + { + "alias": "UPDATE_PROFILE", + "name": "Update Profile", + "providerId": "UPDATE_PROFILE", + "enabled": true, + "defaultAction": false, + "priority": 40, + "config": {} + }, + { + "alias": "VERIFY_EMAIL", + "name": "Verify Email", + "providerId": "VERIFY_EMAIL", + "enabled": true, + "defaultAction": false, + "priority": 50, + "config": {} + }, + { + "alias": "update_user_locale", + "name": "Update User Locale", + "providerId": "update_user_locale", + "enabled": true, + "defaultAction": false, + "priority": 1000, + "config": {} + } + ], + "browserFlow": "browser", + "registrationFlow": "registration", + "directGrantFlow": "direct grant", + "resetCredentialsFlow": "reset credentials", + "clientAuthenticationFlow": "clients", + "dockerAuthenticationFlow": "docker auth", + "attributes": { + "clientOfflineSessionMaxLifespan": "0", + "clientSessionIdleTimeout": "0", + "clientSessionMaxLifespan": "0", + "clientOfflineSessionIdleTimeout": "0" + }, + "keycloakVersion": "11.0.1", + "userManagedAccessAllowed": false +} \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jaeger/templates/imxc-ui-config-jaeger.yaml b/ansible/01_old/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jaeger/templates/imxc-ui-config-jaeger.yaml new file mode 100644 index 0000000..9fa97ed --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jaeger/templates/imxc-ui-config-jaeger.yaml @@ -0,0 +1,75 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: imxc-ui-config-jaeger + namespace: imxc +data: + properties.file: | + api.url = {{ .Values.global.SERVELET_URL_PROTOCOL }}://{{ .Values.global.ZUUL_SERVER_IP }}:{{ .Values.global.ZUUL_SERVER_PORT }} + config.js: | + window.appEnv = { + // Env Settings servletURL + offlineAccess: "{{ .Values.global.OFFLINEACCESS }}", + backLogin: "{{ .Values.global.BACKLOGIN }}", + servletURL: "{{ .Values.global.SERVELET_URL_PROTOCOL }}://{{ .Values.global.ZUUL_SERVER_IP }}:{{ .Values.global.ZUUL_SERVER_PORT }}", + demoServletURL: "{{ .Values.global.DEMO_SERVELET_URL_PROTOCOL }}://{{ .Values.global.ZUUL_SERVER_IP }}:{{ .Values.global.ZUUL_SERVER_PORT }}", + // Env Settings socketURL + socketURL: "http://{{ .Values.global.NOTI_SERVER_IP }}:{{ .Values.global.NOTI_SERVER_PORT }}/ui-server-websocket", + manualURL: "http://{{ .Values.global.CMOA_MANUAL_SERVER_IP }}:{{ .Values.global.CMOA_MANUAL_PORT }}", + // Env Settings interMaxURL + interMaxURL: "http://{{ .Values.global.INTERMAX_IP }}:8080/intermax/?", + // Env Settings CloudMOA Version + version: '{{ .Values.global.CLOUDMOA_UI_VERSION }}', + UI_build_ver: '{{ .Values.global.UI_SERVER_VERSION }}', + maxSelectionSize: 30, + loginType: 'keycloak', + keyCloak: { + "realm": "{{ .Values.global.KEYCLOAK_REALM }}", + "auth-server-url": "{{ .Values.global.KEYCLOAK_AUTH_SERVER_URL }}", + "ssl-required": "none", + "resource": "{{ .Values.global.KEYCLOAK_RESOURCE }}", + "public-client": true, + "confidential-port": 0 + }, + healthIndicatorStateInfo: [ + { + state: "critical", + // max: 1.0, + // over: 0.8, + max: 100, + over: 80, + text: "Critical", + color: "#ff4040", + level: 4, + }, { + state: "warning", + // max: 0.8, + // over: 0.5, + max: 80, + over: 50, + text: "Warning", + color: "#ffa733", + level: 3, + }, { + state: "attention", + // max: 0.5, + // over: 0.0, + max: 50, + over: 0, + text: "Attention", + // color: "#B4B83D", + color: "#1cbe85", + level: 2, + }, { + state: "normal", + max: 0, + over: 0, + text: "Normal", + // color: "#64B87D", + color: "#24b0ed", + level: 1, + }, + ] + }; + + diff --git a/ansible/01_old/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jaeger/templates/imxc-ui-server-jaeger.yaml b/ansible/01_old/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jaeger/templates/imxc-ui-server-jaeger.yaml new file mode 100644 index 0000000..a0d959f --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jaeger/templates/imxc-ui-server-jaeger.yaml @@ -0,0 +1,63 @@ +--- +kind: Service +apiVersion: v1 +metadata: + name: imxc-ui-service-jaeger + namespace: imxc +spec: + type: NodePort + selector: + app: imxc-ui-jaeger + ports: + - protocol: TCP + name: ui + port: 80 + targetPort: 9999 + nodePort: 31084 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: imxc-ui-jaeger + namespace: imxc + labels: + app: imxc-ui +spec: + revisionHistoryLimit: 0 + replicas: 1 + selector: + matchLabels: + app: imxc-ui-jaeger + template: + metadata: + labels: + app: imxc-ui-jaeger + spec: + containers: + - name: imxc-ui-jaeger + image: {{ .Values.global.IMXC_IN_REGISTRY }}/ui-server:{{ .Values.global.UI_SERVER_VERSION }} + resources: + requests: + cpu: 100m + memory: 50Mi + limits: + cpu: 200m + memory: 100Mi + imagePullPolicy: IfNotPresent + ports: + - containerPort: 80 + volumeMounts: + - name: config-profile + mountPath: /usr/src/app/web/env + - name: config-server + mountPath: /usr/src/app/config + volumes: + - name: config-profile + configMap: + name: imxc-ui-config-jaeger + items: + - key: "config.js" + path: "config.js" + - name: config-server + configMap: + name: imxc-ui-config-jaeger diff --git a/ansible/01_old/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jaeger/values.yaml b/ansible/01_old/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jaeger/values.yaml new file mode 100644 index 0000000..bd63730 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jaeger/values.yaml @@ -0,0 +1,94 @@ +# Default values for imxc. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: 10.10.31.243:5000/cmoa3/nginx + tag: stable + pullPolicy: IfNotPresent + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 80 + +ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: [] + + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +nodeSelector: {} + +tolerations: [] + +affinity: {} + +global: + INTERMAX_IP: + SERVELET_URL_PROTOCOL : http + DEMO_SERVELET_URL_PROTOCOL : http + KEYCLOAK_AUTH_SERVER_URL: http://111.111.111.111:31082/auth + KEYCLOAK_RESOURCE: authorization_server + KEYCLOAK_REALM: exem + + IMXC_IN_REGISTRY: 10.10.31.243:5000/cmoa3 + + ZUUL_SERVER_IP: 111.111.111.111 + ZUUL_SERVER_PORT: 31081 + + NOTI_SERVER_IP: 111.111.111.111 + NOTI_SERVER_PORT: 31083 + + CMOA_MANUAL_SERVER_IP: 111.111.111.111 + CMOA_MANUAL_PORT: 31090 + + OFFLINEACCESS: false + BACKLOGIN: false + + CLOUDMOA_VERSION: rel0.0.0 + UI_SERVER_VERSION: rel0.0.0 + CMOA_MANUAL_VERSION: rel0.0.0 diff --git a/ansible/01_old/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jspd/Chart.yaml b/ansible/01_old/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jspd/Chart.yaml new file mode 100644 index 0000000..e2f559f --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jspd/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for Kubernetes +name: imxc +version: 0.1.0 diff --git a/ansible/01_old/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jspd/scripts/init-api-server.sh b/ansible/01_old/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jspd/scripts/init-api-server.sh new file mode 100644 index 0000000..45b8f1e --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jspd/scripts/init-api-server.sh @@ -0,0 +1,16 @@ +#! /bin/sh + +STATUS_CODE="$(curl -s -o /dev/null -w '%{http_code}' http://imxc-keycloak-http/auth/realms/exem)" + +if [ $STATUS_CODE -eq 200 ]; then + JWT_KEY="$(curl -s -XGET http://imxc-keycloak-http/auth/realms/exem | jq -r '.public_key')" + export JWT_KEY + + chmod -R 777 /home/cloudmoa/notification/cloudmoa_alert.log + + java -Djava.security.egd=file:/dev/./urandom -jar /app.jar +elif [ $STATUS_CODE -eq 404 ]; then + echo "not found exem relam. check realm in imxc-keycloak" +else + echo "not found keycloak. check to install keycloak" +fi \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jspd/scripts/init-auth-server.sh b/ansible/01_old/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jspd/scripts/init-auth-server.sh new file mode 100644 index 0000000..279b8a5 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jspd/scripts/init-auth-server.sh @@ -0,0 +1,36 @@ +#! /bin/bash + +# 200 -> 서버 및 realm이 있는 경우 +# 404 -> 서버는 있으나 realm이 없는 경우 +# 000 -> 서버가 없음 +STATUS_CODE="$(curl -s -o /dev/null -w '%{http_code}' http://imxc-keycloak-http/auth/realms/exem)" + +if [ $STATUS_CODE -eq 404 ]; then + TOKEN="$(curl -s -d "client_id=admin-cli" -d "username=admin" -d "password=admin" -d "grant_type=password" http://imxc-keycloak-http/auth/realms/master/protocol/openid-connect/token | jq -r '.access_token')" + + echo $TOKEN + + echo "create realm and client" + # create realm and client + curl -s -v POST -H "Authorization: Bearer $TOKEN" -H "Content-Type: application/json" -d "@/tmp/init.json" http://imxc-keycloak-http/auth/admin/realms + + + echo "create admin and owner" + # create admin and owner + curl -s -v POST -H "Authorization: Bearer $TOKEN" -H "Content-Type: application/json" -d '{"firstName":"","lastName":"", "username":"admin","email":"admin@example.com", "enabled":"true","credentials":[{"type":"password","value":"admin","temporary":false}]}' http://imxc-keycloak-http/auth/admin/realms/exem/users + curl -s -v POST -H "Authorization: Bearer $TOKEN" -H "Content-Type: application/json" -d '{"firstName":"","lastName":"", "username":"owner","email":"owner@example.com", "enabled":"true","credentials":[{"type":"password","value":"admin","temporary":false}]}' http://imxc-keycloak-http/auth/admin/realms/exem/users + + JWT_KEY="$(curl -s -XGET http://imxc-keycloak-http/auth/realms/exem | jq -r '.public_key')" + export JWT_KEY + + java -Djava.security.egd=file:/dev/./urandom -jar /app.jar +elif [ $STATUS_CODE -eq 200 ]; then + echo "exist exem relam" + + JWT_KEY="$(curl -s -XGET http://imxc-keycloak-http/auth/realms/exem | jq -r '.public_key')" + export JWT_KEY + + java -Djava.security.egd=file:/dev/./urandom -jar /app.jar +else + echo "not found keycloak. check to install keycloak" +fi diff --git a/ansible/01_old/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jspd/scripts/init-noti-server.sh b/ansible/01_old/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jspd/scripts/init-noti-server.sh new file mode 100644 index 0000000..af73aed --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jspd/scripts/init-noti-server.sh @@ -0,0 +1,14 @@ +#! /bin/sh + +STATUS_CODE="$(curl -s -o /dev/null -w '%{http_code}' http://imxc-keycloak-http/auth/realms/exem)" + +if [ $STATUS_CODE -eq 200 ]; then + JWT_KEY="$(curl -s -XGET http://imxc-keycloak-http/auth/realms/exem | jq -r '.public_key')" + export JWT_KEY + + java -Djava.security.egd=file:/dev/./urandom -jar /app.jar +elif [ $STATUS_CODE -eq 404 ]; then + echo "not found exem relam. check realm in imxc-keycloak" +else + echo "not found keycloak. check to install keycloak" +fi \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jspd/scripts/init-resource.sh b/ansible/01_old/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jspd/scripts/init-resource.sh new file mode 100644 index 0000000..58db392 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jspd/scripts/init-resource.sh @@ -0,0 +1,6 @@ +#!/bin/sh + +chmod -R 777 /scripts + +sed -i "s/localhost/$REDIRECT_URLS/g" /scripts/init.json +cp /scripts/init.json /tmp/init.json \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jspd/scripts/init.json b/ansible/01_old/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jspd/scripts/init.json new file mode 100644 index 0000000..dcd68b4 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jspd/scripts/init.json @@ -0,0 +1,2148 @@ +{ + "id": "exem", + "realm": "exem", + "notBefore": 0, + "revokeRefreshToken": false, + "refreshTokenMaxReuse": 0, + "accessTokenLifespan": 300, + "accessTokenLifespanForImplicitFlow": 900, + "ssoSessionIdleTimeout": 1800, + "ssoSessionMaxLifespan": 36000, + "ssoSessionIdleTimeoutRememberMe": 0, + "ssoSessionMaxLifespanRememberMe": 0, + "offlineSessionIdleTimeout": 2592000, + "offlineSessionMaxLifespanEnabled": false, + "offlineSessionMaxLifespan": 5184000, + "clientSessionIdleTimeout": 0, + "clientSessionMaxLifespan": 0, + "clientOfflineSessionIdleTimeout": 0, + "clientOfflineSessionMaxLifespan": 0, + "accessCodeLifespan": 60, + "accessCodeLifespanUserAction": 300, + "accessCodeLifespanLogin": 1800, + "actionTokenGeneratedByAdminLifespan": 43200, + "actionTokenGeneratedByUserLifespan": 300, + "enabled": true, + "sslRequired": "none", + "registrationAllowed": false, + "registrationEmailAsUsername": false, + "rememberMe": false, + "verifyEmail": false, + "loginWithEmailAllowed": true, + "duplicateEmailsAllowed": false, + "resetPasswordAllowed": false, + "editUsernameAllowed": false, + "bruteForceProtected": false, + "permanentLockout": false, + "maxFailureWaitSeconds": 900, + "minimumQuickLoginWaitSeconds": 60, + "waitIncrementSeconds": 60, + "quickLoginCheckMilliSeconds": 1000, + "maxDeltaTimeSeconds": 43200, + "failureFactor": 30, + "roles": { + "realm": [ + { + "id": "b361dcb8-4ec4-484e-a432-8d40a8ca5ac8", + "name": "offline_access", + "description": "${role_offline-access}", + "composite": false, + "clientRole": false, + "containerId": "exem", + "attributes": {} + }, + { + "id": "621155f2-6c01-4e4a-bf11-47111503d696", + "name": "uma_authorization", + "description": "${role_uma_authorization}", + "composite": false, + "clientRole": false, + "containerId": "exem", + "attributes": {} + }, + { + "id": "4aadd73a-e863-466a-932b-5bc81553fbf1", + "name": "access", + "composite": false, + "clientRole": false, + "containerId": "exem", + "attributes": {} + } + ], + "client": { + "realm-management": [ + { + "id": "e3eca547-c372-406a-abe7-30f554e13e63", + "name": "manage-realm", + "description": "${role_manage-realm}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "eb1faff2-4cca-458c-b9da-96c1f6f5f647", + "name": "impersonation", + "description": "${role_impersonation}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "eb0f6ebb-8993-47f8-8979-2152ed92bf62", + "name": "create-client", + "description": "${role_create-client}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "29f0b39d-9cc9-4b40-ad81-00041897ae0c", + "name": "view-clients", + "description": "${role_view-clients}", + "composite": true, + "composites": { + "client": { + "realm-management": [ + "query-clients" + ] + } + }, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "b6307563-9b35-4093-b0c4-a27df7cb82bd", + "name": "query-groups", + "description": "${role_query-groups}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "30091a91-f676-4e39-8ae2-ebfcee36c32a", + "name": "query-clients", + "description": "${role_query-clients}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "b40ca071-2318-4f69-9664-f0dfe471d03b", + "name": "view-realm", + "description": "${role_view-realm}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "efd25ec7-e61f-4659-a772-907791aed58e", + "name": "view-authorization", + "description": "${role_view-authorization}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "4ad18bd0-f9a9-4fc7-8864-99afa71f95e4", + "name": "manage-users", + "description": "${role_manage-users}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "a92c781f-7c6a-48d8-aa88-0b3aefb3c10c", + "name": "manage-events", + "description": "${role_manage-events}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "424933c1-3c03-49cd-955c-34aeeb0a3108", + "name": "manage-authorization", + "description": "${role_manage-authorization}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "5476db80-dbfa-408b-a934-5e8decc0af56", + "name": "manage-clients", + "description": "${role_manage-clients}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "acf53868-d09b-4865-92da-3b906307b979", + "name": "realm-admin", + "description": "${role_realm-admin}", + "composite": true, + "composites": { + "client": { + "realm-management": [ + "manage-realm", + "impersonation", + "create-client", + "view-clients", + "query-groups", + "query-clients", + "view-realm", + "view-authorization", + "manage-users", + "manage-events", + "manage-authorization", + "manage-clients", + "query-users", + "query-realms", + "manage-identity-providers", + "view-users", + "view-events", + "view-identity-providers" + ] + } + }, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "f2ad5f83-ffde-4cf4-acc4-21f7bcec4c38", + "name": "query-users", + "description": "${role_query-users}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "96a017bf-5211-4c20-a1b2-7493bc45a3ad", + "name": "query-realms", + "description": "${role_query-realms}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "d8051d4d-f26c-4a6d-bcdd-b3d8111d9d29", + "name": "manage-identity-providers", + "description": "${role_manage-identity-providers}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "8c929b20-abc3-4b78-88f2-ed3348426667", + "name": "view-users", + "description": "${role_view-users}", + "composite": true, + "composites": { + "client": { + "realm-management": [ + "query-groups", + "query-users" + ] + } + }, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "a337a8f7-8725-4ff7-85fc-ecc4b5ce1433", + "name": "view-events", + "description": "${role_view-events}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "649350cf-925c-4502-84b4-ec8415f956d3", + "name": "view-identity-providers", + "description": "${role_view-identity-providers}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + } + ], + "authorization_server": [ + { + "id": "2346ca49-eb3e-4f2e-b0ec-4def9ea9655c", + "name": "access", + "composite": false, + "clientRole": true, + "containerId": "b9bbda1f-a756-4b72-9cd8-06a6dfd6d5bf", + "attributes": {} + } + ], + "security-admin-console": [], + "admin-cli": [], + "account-console": [], + "broker": [ + { + "id": "133ff901-3a8f-48df-893b-4c7e9047e829", + "name": "read-token", + "description": "${role_read-token}", + "composite": false, + "clientRole": true, + "containerId": "fdc71d6d-db86-414f-bd80-ed1f5e9a6975", + "attributes": {} + } + ], + "account": [ + { + "id": "89c5f56f-5845-400b-ac9f-942c46d082e0", + "name": "manage-account-links", + "description": "${role_manage-account-links}", + "composite": false, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + }, + { + "id": "2cba7fed-0a80-4dbd-bd2d-abfa2c6a985e", + "name": "view-profile", + "description": "${role_view-profile}", + "composite": false, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + }, + { + "id": "f446a93d-143f-4071-9bdc-08aa2fdce6d2", + "name": "view-consent", + "description": "${role_view-consent}", + "composite": false, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + }, + { + "id": "ef3364db-e008-4aec-9e74-04bac25cbe40", + "name": "manage-consent", + "description": "${role_manage-consent}", + "composite": true, + "composites": { + "client": { + "account": [ + "view-consent" + ] + } + }, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + }, + { + "id": "96afbe32-3ac2-4345-bc17-06cf0e8de0b4", + "name": "view-applications", + "description": "${role_view-applications}", + "composite": false, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + }, + { + "id": "cf6861ca-4804-40d4-9016-c48e7ebf1c72", + "name": "manage-account", + "description": "${role_manage-account}", + "composite": true, + "composites": { + "client": { + "account": [ + "manage-account-links" + ] + } + }, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + } + ] + } + }, + "groups": [ + { + "id": "8d3f7332-7f72-47e2-9cb3-38331f0c29b5", + "name": "DEFAULT_TENANT", + "path": "/DEFAULT_TENANT", + "attributes": {}, + "realmRoles": [], + "clientRoles": {}, + "subGroups": [] + } + ], + "defaultRoles": [ + "offline_access", + "uma_authorization" + ], + "requiredCredentials": [ + "password" + ], + "otpPolicyType": "totp", + "otpPolicyAlgorithm": "HmacSHA1", + "otpPolicyInitialCounter": 0, + "otpPolicyDigits": 6, + "otpPolicyLookAheadWindow": 1, + "otpPolicyPeriod": 30, + "otpSupportedApplications": [ + "FreeOTP", + "Google Authenticator" + ], + "webAuthnPolicyRpEntityName": "keycloak", + "webAuthnPolicySignatureAlgorithms": [ + "ES256" + ], + "webAuthnPolicyRpId": "", + "webAuthnPolicyAttestationConveyancePreference": "not specified", + "webAuthnPolicyAuthenticatorAttachment": "not specified", + "webAuthnPolicyRequireResidentKey": "not specified", + "webAuthnPolicyUserVerificationRequirement": "not specified", + "webAuthnPolicyCreateTimeout": 0, + "webAuthnPolicyAvoidSameAuthenticatorRegister": false, + "webAuthnPolicyAcceptableAaguids": [], + "webAuthnPolicyPasswordlessRpEntityName": "keycloak", + "webAuthnPolicyPasswordlessSignatureAlgorithms": [ + "ES256" + ], + "webAuthnPolicyPasswordlessRpId": "", + "webAuthnPolicyPasswordlessAttestationConveyancePreference": "not specified", + "webAuthnPolicyPasswordlessAuthenticatorAttachment": "not specified", + "webAuthnPolicyPasswordlessRequireResidentKey": "not specified", + "webAuthnPolicyPasswordlessUserVerificationRequirement": "not specified", + "webAuthnPolicyPasswordlessCreateTimeout": 0, + "webAuthnPolicyPasswordlessAvoidSameAuthenticatorRegister": false, + "webAuthnPolicyPasswordlessAcceptableAaguids": [], + "scopeMappings": [ + { + "clientScope": "offline_access", + "roles": [ + "offline_access" + ] + } + ], + "clientScopeMappings": { + "account": [ + { + "client": "account-console", + "roles": [ + "manage-account" + ] + } + ] + }, + "clients": [ + { + "id": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "clientId": "account", + "name": "${client_account}", + "rootUrl": "${authBaseUrl}", + "baseUrl": "/realms/exem/account/", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "defaultRoles": [ + "view-profile", + "manage-account" + ], + "redirectUris": [ + "/realms/exem/account/*" + ], + "webOrigins": [], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": false, + "serviceAccountsEnabled": false, + "publicClient": false, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": {}, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "1e3d0c5d-c456-4c5f-93cf-58236273186a", + "clientId": "account-console", + "name": "${client_account-console}", + "rootUrl": "${authBaseUrl}", + "baseUrl": "/realms/exem/account/", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [ + "/realms/exem/account/*" + ], + "webOrigins": [], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": false, + "serviceAccountsEnabled": false, + "publicClient": true, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": { + "pkce.code.challenge.method": "S256" + }, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "protocolMappers": [ + { + "id": "cceae7c8-fa8d-48eb-a0a6-6013a2cc771e", + "name": "audience resolve", + "protocol": "openid-connect", + "protocolMapper": "oidc-audience-resolve-mapper", + "consentRequired": false, + "config": {} + } + ], + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "d4d3e5a5-584c-4aff-a79f-ac3c31ace5a1", + "clientId": "admin-cli", + "name": "${client_admin-cli}", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [], + "webOrigins": [], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": false, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": true, + "serviceAccountsEnabled": false, + "publicClient": true, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": {}, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "b9bbda1f-a756-4b72-9cd8-06a6dfd6d5bf", + "clientId": "authorization_server", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [ + "localhost" + ], + "webOrigins": [ + "*" + ], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": true, + "serviceAccountsEnabled": false, + "publicClient": true, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": { + "saml.assertion.signature": "false", + "saml.force.post.binding": "false", + "saml.multivalued.roles": "false", + "saml.encrypt": "false", + "saml.server.signature": "false", + "saml.server.signature.keyinfo.ext": "false", + "exclude.session.state.from.auth.response": "false", + "saml_force_name_id_format": "false", + "saml.client.signature": "false", + "tls.client.certificate.bound.access.tokens": "false", + "saml.authnstatement": "false", + "display.on.consent.screen": "false", + "saml.onetimeuse.condition": "false" + }, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": true, + "nodeReRegistrationTimeout": -1, + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "fdc71d6d-db86-414f-bd80-ed1f5e9a6975", + "clientId": "broker", + "name": "${client_broker}", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [], + "webOrigins": [], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": false, + "serviceAccountsEnabled": false, + "publicClient": false, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": {}, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "clientId": "realm-management", + "name": "${client_realm-management}", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [], + "webOrigins": [], + "notBefore": 0, + "bearerOnly": true, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": false, + "serviceAccountsEnabled": false, + "publicClient": false, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": {}, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "bb6c56f1-126e-4356-9579-d95992a8d150", + "clientId": "security-admin-console", + "name": "${client_security-admin-console}", + "rootUrl": "${authAdminUrl}", + "baseUrl": "/admin/exem/console/", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [ + "/admin/exem/console/*" + ], + "webOrigins": [ + "+" + ], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": false, + "serviceAccountsEnabled": false, + "publicClient": true, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": { + "pkce.code.challenge.method": "S256" + }, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "protocolMappers": [ + { + "id": "3cf06cab-00dd-486b-8e72-1a453a7031ca", + "name": "locale", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "locale", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "locale", + "jsonType.label": "String" + } + } + ], + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + } + ], + "clientScopes": [ + { + "id": "6a21eaaa-69c9-4519-8732-2155865a1891", + "name": "custom_jwt", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "true" + }, + "protocolMappers": [ + { + "id": "fd7557f5-3174-4c65-8cd1-0e9f015a906f", + "name": "customizingJWT", + "protocol": "openid-connect", + "protocolMapper": "oidc-script-based-protocol-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "multivalued": "true", + "id.token.claim": "false", + "access.token.claim": "true", + "jsonType.label": "String", + "script": "/**\r\n * Available variables: \r\n * user - the current user\r\n * realm - the current realm\r\n * token - the current token\r\n * userSession - the current userSession\r\n * keycloakSession - the current keycloakSession\r\n */\r\n\r\n//insert your code here...\r\n\r\n// you can set standard fields in token - test code\r\n// token.setAcr(\"test value\");\r\n\r\n// you can set claims in the token - test code\r\n// token.getOtherClaims().put(\"claimName\", \"claim value\");\r\n\r\n// work with variables and return multivalued token value\r\nvar ArrayList = Java.type(\"java.util.ArrayList\");\r\nvar HashMap = Java.type(\"java.util.HashMap\");\r\nvar tenantInfoMap = new HashMap();\r\nvar tenantIpMap = new HashMap();\r\n\r\nvar forEach = Array.prototype.forEach;\r\n\r\nvar client = keycloakSession.getContext().getClient();\r\nvar groups = user.getGroups();\r\nvar clientRole = client.getRole(\"access\");\r\n\r\nforEach.call(groups.toArray(), function(group) {\r\n if(group.hasRole(clientRole)) {\r\n tenantIpMap.put(group.getName(), clientRole.getAttribute(\"ip\"));\r\n tenantInfoMap.put(group.getName(), group.getAttributes());\r\n }\r\n});\r\n\r\ntoken.setOtherClaims(\"tenantInfo\", tenantInfoMap);\r\n" + } + }, + { + "id": "2cb34189-9f06-4b9f-b066-c28e7930f0a5", + "name": "custom_phone", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "false", + "user.attribute": "phone", + "id.token.claim": "false", + "access.token.claim": "true", + "claim.name": "attributes.phone", + "jsonType.label": "String" + } + }, + { + "id": "6bcb0aa9-8713-4e4b-b997-2e08d2dda0f4", + "name": "group_attr", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "groups", + "id.token.claim": "false", + "access.token.claim": "true", + "claim.name": "groups.attributes", + "jsonType.label": "String" + } + }, + { + "id": "03deb40b-4f83-436e-9eab-f479eed62460", + "name": "custom_name", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "false", + "user.attribute": "name", + "id.token.claim": "false", + "access.token.claim": "true", + "claim.name": "attributes.name", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "9fed7d81-3f42-41b0-b661-7875abb90b2b", + "name": "microprofile-jwt", + "description": "Microprofile - JWT built-in scope", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "false" + }, + "protocolMappers": [ + { + "id": "d030d675-2c31-401a-a461-534211b3d2ec", + "name": "upn", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "username", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "upn", + "jsonType.label": "String" + } + }, + { + "id": "ca2026a0-84de-4b8d-bf0c-35f3d088b115", + "name": "groups", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-realm-role-mapper", + "consentRequired": false, + "config": { + "multivalued": "true", + "user.attribute": "foo", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "groups", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "cf3e7fce-e9e8-40dc-bd0d-5cf7bac861c0", + "name": "web-origins", + "description": "OpenID Connect scope for add allowed web origins to the access token", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "false", + "display.on.consent.screen": "false", + "consent.screen.text": "" + }, + "protocolMappers": [ + { + "id": "6b909bad-30d8-4095-a80b-d71589e8a0b4", + "name": "allowed web origins", + "protocol": "openid-connect", + "protocolMapper": "oidc-allowed-origins-mapper", + "consentRequired": false, + "config": {} + } + ] + }, + { + "id": "73231863-d614-4725-9707-f5704c70893a", + "name": "roles", + "description": "OpenID Connect scope for add user roles to the access token", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "false", + "display.on.consent.screen": "true", + "consent.screen.text": "${rolesScopeConsentText}" + }, + "protocolMappers": [ + { + "id": "fad2c0b3-d6d6-46c9-b8a5-70cf2f3cd69e", + "name": "realm roles", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-realm-role-mapper", + "consentRequired": false, + "config": { + "multivalued": "true", + "user.attribute": "foo", + "access.token.claim": "true", + "claim.name": "realm_access.roles", + "jsonType.label": "String" + } + }, + { + "id": "1fa51f0e-8fa8-4807-a381-c9756ce1d2ff", + "name": "audience resolve", + "protocol": "openid-connect", + "protocolMapper": "oidc-audience-resolve-mapper", + "consentRequired": false, + "config": {} + }, + { + "id": "8be191ba-c7b8-45f1-a37f-2830595d4b54", + "name": "client roles", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-client-role-mapper", + "consentRequired": false, + "config": { + "multivalued": "true", + "user.attribute": "foo", + "access.token.claim": "true", + "claim.name": "resource_access.${client_id}.roles", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "93a4b53a-a281-4203-a070-0ad31e719b29", + "name": "phone", + "description": "OpenID Connect built-in scope: phone", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "true", + "consent.screen.text": "${phoneScopeConsentText}" + }, + "protocolMappers": [ + { + "id": "c716d4df-ad16-4a47-aa05-ded2a69313a3", + "name": "phone number verified", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "phoneNumberVerified", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "phone_number_verified", + "jsonType.label": "boolean" + } + }, + { + "id": "db0fcb5b-bad6-42b7-8ab0-b90225100b8a", + "name": "phone number", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "phoneNumber", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "phone_number", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "f1723d4c-6d93-40be-b5b8-5ca7083e55c7", + "name": "address", + "description": "OpenID Connect built-in scope: address", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "true", + "consent.screen.text": "${addressScopeConsentText}" + }, + "protocolMappers": [ + { + "id": "9e95dff0-dc01-4efe-a414-21c83d94491c", + "name": "address", + "protocol": "openid-connect", + "protocolMapper": "oidc-address-mapper", + "consentRequired": false, + "config": { + "user.attribute.formatted": "formatted", + "user.attribute.country": "country", + "user.attribute.postal_code": "postal_code", + "userinfo.token.claim": "true", + "user.attribute.street": "street", + "id.token.claim": "true", + "user.attribute.region": "region", + "access.token.claim": "true", + "user.attribute.locality": "locality" + } + } + ] + }, + { + "id": "16524b43-6bfc-4e05-868c-682e7e1e611c", + "name": "email", + "description": "OpenID Connect built-in scope: email", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "true", + "consent.screen.text": "${emailScopeConsentText}" + }, + "protocolMappers": [ + { + "id": "4444c30e-5da5-46e6-a201-64c28ab26e10", + "name": "email verified", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "emailVerified", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "email_verified", + "jsonType.label": "boolean" + } + }, + { + "id": "0faa8ba7-6d4d-4ed4-ab89-334e1d18b503", + "name": "email", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "email", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "email", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "4ccced80-99d8-4081-8d1d-37ed6d5aaf34", + "name": "profile", + "description": "OpenID Connect built-in scope: profile", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "true", + "consent.screen.text": "${profileScopeConsentText}" + }, + "protocolMappers": [ + { + "id": "02aea132-f5e1-483c-968a-5fbb9cdfb82d", + "name": "updated at", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "updatedAt", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "updated_at", + "jsonType.label": "String" + } + }, + { + "id": "eb5d10fc-d4a8-473a-ac3e-35f3fb0f41bb", + "name": "family name", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "lastName", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "family_name", + "jsonType.label": "String" + } + }, + { + "id": "2467b8e5-f340-45a2-abff-c658eccf3ed3", + "name": "zoneinfo", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "zoneinfo", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "zoneinfo", + "jsonType.label": "String" + } + }, + { + "id": "50a9bb17-af12-481d-95dd-6aed1dd4bf56", + "name": "full name", + "protocol": "openid-connect", + "protocolMapper": "oidc-full-name-mapper", + "consentRequired": false, + "config": { + "id.token.claim": "true", + "access.token.claim": "true", + "userinfo.token.claim": "true" + } + }, + { + "id": "80a65208-9425-4e66-b769-98c2f1c91e6e", + "name": "nickname", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "nickname", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "nickname", + "jsonType.label": "String" + } + }, + { + "id": "68a750c6-b4b8-47f4-a919-752319e63213", + "name": "gender", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "gender", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "gender", + "jsonType.label": "String" + } + }, + { + "id": "e27abd0e-72c1-40de-a678-e9e4e2db8e7f", + "name": "given name", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "firstName", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "given_name", + "jsonType.label": "String" + } + }, + { + "id": "04f3fa01-6a4c-44eb-bfd8-0a0e1c31bc4a", + "name": "middle name", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "middleName", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "middle_name", + "jsonType.label": "String" + } + }, + { + "id": "94e697d9-fbee-48d8-91d1-7bbc4f1fb44e", + "name": "username", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "username", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "preferred_username", + "jsonType.label": "String" + } + }, + { + "id": "a2f05d76-947d-4ceb-969b-1b923be9a923", + "name": "website", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "website", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "website", + "jsonType.label": "String" + } + }, + { + "id": "1966f863-ac5c-4cbc-a156-d5bd861728f0", + "name": "profile", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "profile", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "profile", + "jsonType.label": "String" + } + }, + { + "id": "18a9b452-cd8e-4c43-a9a8-0ea532074f74", + "name": "locale", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "locale", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "locale", + "jsonType.label": "String" + } + }, + { + "id": "1583790a-ec7a-4899-a901-60e23fd0d969", + "name": "birthdate", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "birthdate", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "birthdate", + "jsonType.label": "String" + } + }, + { + "id": "7094b64a-492b-4f31-aa73-bb19d06ddb56", + "name": "picture", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "picture", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "picture", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "eff18c11-eaf4-4d6a-8365-90f646ea3cc5", + "name": "role_list", + "description": "SAML role list", + "protocol": "saml", + "attributes": { + "consent.screen.text": "${samlRoleListScopeConsentText}", + "display.on.consent.screen": "true" + }, + "protocolMappers": [ + { + "id": "3bb12700-3e6f-4a73-bfbb-cfd16a8ab007", + "name": "role list", + "protocol": "saml", + "protocolMapper": "saml-role-list-mapper", + "consentRequired": false, + "config": { + "single": "false", + "attribute.nameformat": "Basic", + "attribute.name": "Role" + } + } + ] + }, + { + "id": "e83e35b7-9650-4f7e-b182-65c184d261b3", + "name": "offline_access", + "description": "OpenID Connect built-in scope: offline_access", + "protocol": "openid-connect", + "attributes": { + "consent.screen.text": "${offlineAccessScopeConsentText}", + "display.on.consent.screen": "true" + } + } + ], + "defaultDefaultClientScopes": [ + "role_list", + "profile", + "email", + "roles", + "web-origins", + "custom_jwt" + ], + "defaultOptionalClientScopes": [ + "offline_access", + "address", + "phone", + "microprofile-jwt" + ], + "browserSecurityHeaders": { + "contentSecurityPolicyReportOnly": "", + "xContentTypeOptions": "nosniff", + "xRobotsTag": "none", + "xFrameOptions": "SAMEORIGIN", + "contentSecurityPolicy": "frame-src 'self'; frame-ancestors 'self'; object-src 'none';", + "xXSSProtection": "1; mode=block", + "strictTransportSecurity": "max-age=31536000; includeSubDomains" + }, + "smtpServer": {}, + "eventsEnabled": false, + "eventsListeners": [ + "jboss-logging" + ], + "enabledEventTypes": [], + "adminEventsEnabled": false, + "adminEventsDetailsEnabled": false, + "components": { + "org.keycloak.services.clientregistration.policy.ClientRegistrationPolicy": [ + { + "id": "9b1dcf02-e9ec-4302-8aad-28f3250d1b2d", + "name": "Allowed Protocol Mapper Types", + "providerId": "allowed-protocol-mappers", + "subType": "anonymous", + "subComponents": {}, + "config": { + "allowed-protocol-mapper-types": [ + "oidc-sha256-pairwise-sub-mapper", + "oidc-usermodel-property-mapper", + "saml-role-list-mapper", + "saml-user-attribute-mapper", + "oidc-full-name-mapper", + "oidc-usermodel-attribute-mapper", + "oidc-address-mapper", + "saml-user-property-mapper" + ] + } + }, + { + "id": "752137ea-bc3a-46c3-9d83-49cb370d39a9", + "name": "Max Clients Limit", + "providerId": "max-clients", + "subType": "anonymous", + "subComponents": {}, + "config": { + "max-clients": [ + "200" + ] + } + }, + { + "id": "f365d31f-ccc5-4e57-97bd-b2749b1ab5e5", + "name": "Allowed Client Scopes", + "providerId": "allowed-client-templates", + "subType": "authenticated", + "subComponents": {}, + "config": { + "allow-default-scopes": [ + "true" + ] + } + }, + { + "id": "52e385fd-3aa5-442d-b5e4-6ff659126196", + "name": "Allowed Protocol Mapper Types", + "providerId": "allowed-protocol-mappers", + "subType": "authenticated", + "subComponents": {}, + "config": { + "allowed-protocol-mapper-types": [ + "oidc-sha256-pairwise-sub-mapper", + "saml-user-attribute-mapper", + "oidc-full-name-mapper", + "oidc-usermodel-attribute-mapper", + "oidc-address-mapper", + "oidc-usermodel-property-mapper", + "saml-user-property-mapper", + "saml-role-list-mapper" + ] + } + }, + { + "id": "dbebbc9d-1b14-4d09-906c-b4e5638f9588", + "name": "Consent Required", + "providerId": "consent-required", + "subType": "anonymous", + "subComponents": {}, + "config": {} + }, + { + "id": "b3fc18dc-467f-4240-9b6d-f07df5c40aee", + "name": "Full Scope Disabled", + "providerId": "scope", + "subType": "anonymous", + "subComponents": {}, + "config": {} + }, + { + "id": "19e102da-1d66-4747-958b-9311e5156693", + "name": "Trusted Hosts", + "providerId": "trusted-hosts", + "subType": "anonymous", + "subComponents": {}, + "config": { + "host-sending-registration-request-must-match": [ + "true" + ], + "client-uris-must-match": [ + "true" + ] + } + }, + { + "id": "66e83112-7392-46cb-bbd5-b71586183ada", + "name": "Allowed Client Scopes", + "providerId": "allowed-client-templates", + "subType": "anonymous", + "subComponents": {}, + "config": { + "allow-default-scopes": [ + "true" + ] + } + } + ], + "org.keycloak.keys.KeyProvider": [ + { + "id": "a60adc1b-3f6b-40d4-901f-d4f744f0d71b", + "name": "aes-generated", + "providerId": "aes-generated", + "subComponents": {}, + "config": { + "priority": [ + "100" + ] + } + }, + { + "id": "bc1b25d8-b199-4d87-b606-6cde0f6eafb0", + "name": "hmac-generated", + "providerId": "hmac-generated", + "subComponents": {}, + "config": { + "priority": [ + "100" + ], + "algorithm": [ + "HS256" + ] + } + }, + { + "id": "fe624aa7-54a3-43d8-b2a3-f74b543a9225", + "name": "rsa-generated", + "providerId": "rsa-generated", + "subComponents": {}, + "config": { + "priority": [ + "100" + ] + } + } + ] + }, + "internationalizationEnabled": false, + "supportedLocales": [], + "authenticationFlows": [ + { + "id": "a837df3e-15cb-4d2a-8ce0-5eea5c704e76", + "alias": "Account verification options", + "description": "Method with which to verity the existing account", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "idp-email-verification", + "requirement": "ALTERNATIVE", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "ALTERNATIVE", + "priority": 20, + "flowAlias": "Verify Existing Account by Re-authentication", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "59026e13-e2bd-4977-a868-505ea562f545", + "alias": "Authentication Options", + "description": "Authentication options.", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "basic-auth", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "basic-auth-otp", + "requirement": "DISABLED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "auth-spnego", + "requirement": "DISABLED", + "priority": 30, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "484d422c-d9b4-4c0e-86d5-60463ecd24c9", + "alias": "Browser - Conditional OTP", + "description": "Flow to determine if the OTP is required for the authentication", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "conditional-user-configured", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "auth-otp-form", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "0ec05058-6d09-4951-a116-19e8810e5d8e", + "alias": "Direct Grant - Conditional OTP", + "description": "Flow to determine if the OTP is required for the authentication", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "conditional-user-configured", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "direct-grant-validate-otp", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "667c03cd-114c-4d9a-a7fa-7d2c27f10722", + "alias": "First broker login - Conditional OTP", + "description": "Flow to determine if the OTP is required for the authentication", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "conditional-user-configured", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "auth-otp-form", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "1510fbf7-239f-44aa-9955-72d42f6d99fd", + "alias": "Handle Existing Account", + "description": "Handle what to do if there is existing account with same email/username like authenticated identity provider", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "idp-confirm-link", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "REQUIRED", + "priority": 20, + "flowAlias": "Account verification options", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "5622e71d-e1f4-4711-a425-a8470d0a017e", + "alias": "Reset - Conditional OTP", + "description": "Flow to determine if the OTP should be reset or not. Set to REQUIRED to force.", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "conditional-user-configured", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "reset-otp", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "09dfe405-5ef6-4940-8885-5adf867a74c8", + "alias": "User creation or linking", + "description": "Flow for the existing/non-existing user alternatives", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticatorConfig": "create unique user config", + "authenticator": "idp-create-user-if-unique", + "requirement": "ALTERNATIVE", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "ALTERNATIVE", + "priority": 20, + "flowAlias": "Handle Existing Account", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "a3eb6b61-1943-4fb7-9b2f-137826882662", + "alias": "Verify Existing Account by Re-authentication", + "description": "Reauthentication of existing account", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "idp-username-password-form", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "CONDITIONAL", + "priority": 20, + "flowAlias": "First broker login - Conditional OTP", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "7f5e2f68-84bc-4703-b474-e3b092621195", + "alias": "browser", + "description": "browser based authentication", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "auth-cookie", + "requirement": "ALTERNATIVE", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "auth-spnego", + "requirement": "DISABLED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "identity-provider-redirector", + "requirement": "ALTERNATIVE", + "priority": 25, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "ALTERNATIVE", + "priority": 30, + "flowAlias": "forms", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "224cc520-37f7-445e-ab1f-7ba547a45a0d", + "alias": "clients", + "description": "Base authentication for clients", + "providerId": "client-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "client-secret", + "requirement": "ALTERNATIVE", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "client-jwt", + "requirement": "ALTERNATIVE", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "client-secret-jwt", + "requirement": "ALTERNATIVE", + "priority": 30, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "client-x509", + "requirement": "ALTERNATIVE", + "priority": 40, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "2e58184b-529b-450c-9731-29763d26b087", + "alias": "direct grant", + "description": "OpenID Connect Resource Owner Grant", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "direct-grant-validate-username", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "direct-grant-validate-password", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "CONDITIONAL", + "priority": 30, + "flowAlias": "Direct Grant - Conditional OTP", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "c969ac8c-e7d8-44b5-ad4d-5fcb80514eac", + "alias": "docker auth", + "description": "Used by Docker clients to authenticate against the IDP", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "docker-http-basic-authenticator", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "de2259a4-7f92-42ec-994c-f55d8cba3b59", + "alias": "first broker login", + "description": "Actions taken after first broker login with identity provider account, which is not yet linked to any Keycloak account", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticatorConfig": "review profile config", + "authenticator": "idp-review-profile", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "REQUIRED", + "priority": 20, + "flowAlias": "User creation or linking", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "6c2745d2-be21-4f3c-a291-5b3fc039432a", + "alias": "forms", + "description": "Username, password, otp and other auth forms.", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "auth-username-password-form", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "CONDITIONAL", + "priority": 20, + "flowAlias": "Browser - Conditional OTP", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "ac8f5082-3fd0-47c5-854d-0dd9c3951668", + "alias": "http challenge", + "description": "An authentication flow based on challenge-response HTTP Authentication Schemes", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "no-cookie-redirect", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "REQUIRED", + "priority": 20, + "flowAlias": "Authentication Options", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "32030b4b-c82b-4c1a-a692-3b51eae74bbc", + "alias": "registration", + "description": "registration flow", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "registration-page-form", + "requirement": "REQUIRED", + "priority": 10, + "flowAlias": "registration form", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "b99fca4c-386c-4277-acc1-83e57e29244d", + "alias": "registration form", + "description": "registration form", + "providerId": "form-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "registration-user-creation", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "registration-profile-action", + "requirement": "REQUIRED", + "priority": 40, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "registration-password-action", + "requirement": "REQUIRED", + "priority": 50, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "registration-recaptcha-action", + "requirement": "DISABLED", + "priority": 60, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "5edbc053-816a-434e-9866-6c0cc7e49f89", + "alias": "reset credentials", + "description": "Reset credentials for a user if they forgot their password or something", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "reset-credentials-choose-user", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "reset-credential-email", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "reset-password", + "requirement": "REQUIRED", + "priority": 30, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "CONDITIONAL", + "priority": 40, + "flowAlias": "Reset - Conditional OTP", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "460782e7-9644-4a34-8024-cb428cbe3991", + "alias": "saml ecp", + "description": "SAML ECP Profile Authentication Flow", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "http-basic-authenticator", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + } + ], + "authenticatorConfig": [ + { + "id": "67af6e65-853c-4bfd-9eef-72e735691377", + "alias": "create unique user config", + "config": { + "require.password.update.after.registration": "false" + } + }, + { + "id": "af6c6e01-772d-426a-bdd3-3ebc95537bcd", + "alias": "review profile config", + "config": { + "update.profile.on.first.login": "missing" + } + } + ], + "requiredActions": [ + { + "alias": "CONFIGURE_TOTP", + "name": "Configure OTP", + "providerId": "CONFIGURE_TOTP", + "enabled": true, + "defaultAction": false, + "priority": 10, + "config": {} + }, + { + "alias": "terms_and_conditions", + "name": "Terms and Conditions", + "providerId": "terms_and_conditions", + "enabled": false, + "defaultAction": false, + "priority": 20, + "config": {} + }, + { + "alias": "UPDATE_PASSWORD", + "name": "Update Password", + "providerId": "UPDATE_PASSWORD", + "enabled": true, + "defaultAction": false, + "priority": 30, + "config": {} + }, + { + "alias": "UPDATE_PROFILE", + "name": "Update Profile", + "providerId": "UPDATE_PROFILE", + "enabled": true, + "defaultAction": false, + "priority": 40, + "config": {} + }, + { + "alias": "VERIFY_EMAIL", + "name": "Verify Email", + "providerId": "VERIFY_EMAIL", + "enabled": true, + "defaultAction": false, + "priority": 50, + "config": {} + }, + { + "alias": "update_user_locale", + "name": "Update User Locale", + "providerId": "update_user_locale", + "enabled": true, + "defaultAction": false, + "priority": 1000, + "config": {} + } + ], + "browserFlow": "browser", + "registrationFlow": "registration", + "directGrantFlow": "direct grant", + "resetCredentialsFlow": "reset credentials", + "clientAuthenticationFlow": "clients", + "dockerAuthenticationFlow": "docker auth", + "attributes": { + "clientOfflineSessionMaxLifespan": "0", + "clientSessionIdleTimeout": "0", + "clientSessionMaxLifespan": "0", + "clientOfflineSessionIdleTimeout": "0" + }, + "keycloakVersion": "11.0.1", + "userManagedAccessAllowed": false +} \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jspd/templates/imxc-ui-config.yaml b/ansible/01_old/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jspd/templates/imxc-ui-config.yaml new file mode 100644 index 0000000..e47ff66 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jspd/templates/imxc-ui-config.yaml @@ -0,0 +1,44 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: imxc-ui-config + namespace: imxc + +data: + properties.file: | + api.url = {{ .Values.global.SERVELET_URL_PROTOCOL }}://{{ .Values.global.ZUUL_SERVER_IP }}:{{ .Values.global.ZUUL_SERVER_PORT }} + config.js: | + window.appEnv = { + offlineAccess: "{{ .Values.global.OFFLINEACCESS }}", + backLogin: "{{ .Values.global.BACKLOGIN }}", + // Env Settings servletURL + servletURL: "{{ .Values.global.SERVELET_URL_PROTOCOL }}://{{ .Values.global.ZUUL_SERVER_IP }}:{{ .Values.global.ZUUL_SERVER_PORT }}", + // Env Settings socketURL + socketURL: "http://{{ .Values.global.NOTI_SERVER_IP }}:{{ .Values.global.NOTI_SERVER_PORT }}/ui-server-websocket", + // Env Settings interMaxURL + // ex) ~/intermax/?paConnect=1&paType=ResponseInspector&fromTime=1556096539206&toTime=1556096599206&serverName=jeus89 + interMaxURL: "", + manualURL: "http://{{ .Values.global.CMOA_MANUAL_SERVER_IP }}:{{ .Values.global.CMOA_MANUAL_PORT }}", + // Env Settings CloudMOA Version + version: '{{ .Values.global.CLOUDMOA_VERSION }}', + loginType: 'keycloak', + keyCloak: { + "realm": "{{ .Values.global.KEYCLOAK_REALM }}", + "auth-server-url": "{{ .Values.global.KEYCLOAK_AUTH_SERVER_URL }}", + "ssl-required": "none", + "resource": "{{ .Values.global.KEYCLOAK_RESOURCE }}", + "public-client": true, + "confidential-port": 0 + }, + // refreshTime: '4', // 리로드 주기 설정 4로 설정시 새벽 4시에 리로드 하게 됨 + intervalTime: { // 5의 배수여야만 함 + short: 5, + medium: 10, + long: 60, + }, + // excludedContents: { + // anomalyScoreSettings: true, // entity black list setting page + // anomalyScoreInSidebar: true, // anomaly score in side bar + // }, + serviceTraceAgentType: 'jspd' + }; diff --git a/ansible/01_old/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jspd/templates/imxc-ui-server.yaml b/ansible/01_old/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jspd/templates/imxc-ui-server.yaml new file mode 100644 index 0000000..35c4b61 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jspd/templates/imxc-ui-server.yaml @@ -0,0 +1,63 @@ +--- +kind: Service +apiVersion: v1 +metadata: + name: imxc-ui-service + namespace: imxc +spec: + type: NodePort + selector: + app: imxc-ui + ports: + - protocol: TCP + name: ui + port: 80 + targetPort: 9999 + nodePort: 31080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: imxc-ui + namespace: imxc + labels: + app: imxc-ui +spec: + revisionHistoryLimit: 0 + replicas: 1 + selector: + matchLabels: + app: imxc-ui + template: + metadata: + labels: + app: imxc-ui + spec: + containers: + - name: imxc-ui + image: {{ .Values.global.IMXC_IN_REGISTRY }}/ui-server:{{ .Values.global.UI_SERVER_VERSION }} + resources: + requests: + cpu: 100m + memory: 50Mi + limits: + cpu: 200m + memory: 100Mi + imagePullPolicy: IfNotPresent + ports: + - containerPort: 80 + volumeMounts: + - name: config-profile + mountPath: /usr/src/app/web/env + - name: config-server + mountPath: /usr/src/app/config + volumes: + - name: config-profile + configMap: + name: imxc-ui-config + items: + - key: "config.js" + path: "config.js" + - name: config-server + configMap: + name: imxc-ui-config diff --git a/ansible/01_old/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jspd/values.yaml b/ansible/01_old/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jspd/values.yaml new file mode 100644 index 0000000..bd63730 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jspd/values.yaml @@ -0,0 +1,94 @@ +# Default values for imxc. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: 10.10.31.243:5000/cmoa3/nginx + tag: stable + pullPolicy: IfNotPresent + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 80 + +ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: [] + + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +nodeSelector: {} + +tolerations: [] + +affinity: {} + +global: + INTERMAX_IP: + SERVELET_URL_PROTOCOL : http + DEMO_SERVELET_URL_PROTOCOL : http + KEYCLOAK_AUTH_SERVER_URL: http://111.111.111.111:31082/auth + KEYCLOAK_RESOURCE: authorization_server + KEYCLOAK_REALM: exem + + IMXC_IN_REGISTRY: 10.10.31.243:5000/cmoa3 + + ZUUL_SERVER_IP: 111.111.111.111 + ZUUL_SERVER_PORT: 31081 + + NOTI_SERVER_IP: 111.111.111.111 + NOTI_SERVER_PORT: 31083 + + CMOA_MANUAL_SERVER_IP: 111.111.111.111 + CMOA_MANUAL_PORT: 31090 + + OFFLINEACCESS: false + BACKLOGIN: false + + CLOUDMOA_VERSION: rel0.0.0 + UI_SERVER_VERSION: rel0.0.0 + CMOA_MANUAL_VERSION: rel0.0.0 diff --git a/ansible/01_old/roles/cmoa_install/files/ip_change b/ansible/01_old/roles/cmoa_install/files/ip_change new file mode 100755 index 0000000..ac13cc7 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/ip_change @@ -0,0 +1,15 @@ +#!/bin/bash +if [ -z "$BASH_VERSION" ]; then exec bash "$0" "$@"; exit; fi + +before_ip=$1 +after_ip=$2 +grep_path=$3 + +if [[ $before_ip == '' || $after_ip == '' ]]; then + echo '[Usage] $0 {before_ip} {after_ip}' + exit +fi + +grep -rn ${before_ip} ${grep_path} | awk -F':' {'print $1'} | uniq | /usr/bin/xargs sed -i "s/${before_ip}/${after_ip}/g" + +echo "success" \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install/files/k8s_status b/ansible/01_old/roles/cmoa_install/files/k8s_status new file mode 100755 index 0000000..16b3c61 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/k8s_status @@ -0,0 +1,86 @@ +#! /usr/bin/python3 +#-*- coding:utf-8 -*- + +import os, sys, subprocess, io, time +from kubernetes import client, config +def debug_print(msg): + print(" # ", msg) + +def k8s_conn(KUBE_CONFIG_PATH): + config.load_kube_config( + config_file=KUBE_CONFIG_PATH + ) + k8s_api = client.CoreV1Api() + + return k8s_api + +def k8s_get_pod(k8s_api, namespace, target=''): + pretty=False + watch=False + timeout_seconds=30 + api_response = k8s_api.list_namespaced_pod(namespace, pretty=pretty, timeout_seconds=timeout_seconds, watch=watch) + pod_list=[] + for pod in api_response.items: + status = pod.status.phase + #container_status = pod.status.container_statuses[0] + #if container_status.started is False or container_status.ready is False: + # waiting_state = container_status.state.waiting + # if waiting_state.message is not None and 'Error' in waiting_state.message: + # status = waiting_state.reason + if target != '': + if target in pod.metadata.name: + return (pod.metadata.name + " " + status) + pod_list.append(pod.metadata.name+" "+status) + return pod_list + +def k8s_pod_status_check(k8s_api, waiting_time, namespace,except_pod=False): + num=0 + while True: + num+=1 + resp=k8s_get_pod(k8s_api, namespace) + all_run_flag=True + if debug_mode: + debug_print('-'*30) + debug_print('pod 상태 체크시도 : {} ({}s)'.format(num, waiting_time)) + debug_print('-'*30) + for i in resp: + if except_pod: + if except_pod in i.lower(): continue + if 'pending' in i.lower(): + all_run_flag=False + result='{} 결과: {}'.format(i, all_run_flag) + debug_print(result) + if all_run_flag: + if debug_mode: + debug_print('-'*30) + debug_print('[{}] pod All Running'.format(namespace)) + debug_print('-'*30) + for i in resp: debug_print(i) + break + else: time.sleep(int(waiting_time)) + +def main(): + namespace = os.sys.argv[1] + + try: + Except_k8s_pod = os.sys.argv[2] + except: + Except_k8s_pod = '' + + try: + KUBE_CONFIG_PATH = os.sys.argv[3] + os.environ["KUBECONFIG"]=KUBE_CONFIG_PATH + except: + KUBE_CONFIG_PATH = os.environ["KUBECONFIG"] + + k8s_api=k8s_conn(KUBE_CONFIG_PATH) + k8s_pod_status_check(k8s_api, 60, namespace, Except_k8s_pod) + + +if __name__ == "__main__": + try: + debug_mode=False + main() + except Exception as err: + print("[Usage] k8s_status {namespace} {Except_pod=(default=false)} {KUBECONFIG_PATH=(default=current env)}") + print(err) diff --git a/ansible/01_old/roles/cmoa_install/files/postgres_check_data b/ansible/01_old/roles/cmoa_install/files/postgres_check_data new file mode 100755 index 0000000..d377aeb --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/postgres_check_data @@ -0,0 +1,6 @@ +#!/bin/bash + +namespace=$1 +pg_pod=`kubectl -n ${namespace} get pod --no-headers | awk '{print $1}' | grep postgres` +kubectl_cmd="kubectl -n ${namespace} exec -it ${pg_pod} --" +${kubectl_cmd} bash -c "echo \"select count(*) from pg_database where datname='keycloak';\" | /usr/bin/psql -U postgres | egrep -iv '(count|---|row)' | tr -d ' ' | tr -d '\n'" \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install/files/rel_change b/ansible/01_old/roles/cmoa_install/files/rel_change new file mode 100755 index 0000000..ae1f6b3 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/files/rel_change @@ -0,0 +1,15 @@ +#!/bin/bash +if [ -z "$BASH_VERSION" ]; then exec bash "$0" "$@"; exit; fi + +before_version=$1 +after_version=$2 +grep_path=$3 + +if [[ $before_version == '' || $after_version == '' ]]; then + echo '[Usage] $0 {before_version} {after_version}' + exit +fi + +grep -rn ${before_version} ${grep_path} | awk -F':' {'print $1'} | uniq | /usr/bin/xargs sed -i "s/${before_version}/${after_version}/g" + +echo "success" \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install/tasks/00-default-settings-master.yml b/ansible/01_old/roles/cmoa_install/tasks/00-default-settings-master.yml new file mode 100644 index 0000000..4a17c4a --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/tasks/00-default-settings-master.yml @@ -0,0 +1,30 @@ +--- +- name: 1. Create a cmoa namespace + kubernetes.core.k8s: + name: "{{ cmoa_namespace }}" + api_version: v1 + kind: Namespace + state: present + +- name: 2. Create secret + kubernetes.core.k8s: + state: present + namespace: "{{ item }}" + src: "{{ role_path }}/files/00-default/secret_nexus.yaml" + apply: yes + with_items: + - "{{ cmoa_namespace }}" + - default + +- name: 3. kubeconfig check + shell: "echo $KUBECONFIG" + register: kubeconfig + +- name: 4. Patch default sa + shell: "{{ role_path }}/files/00-default/sa_patch.sh {{ kubeconfig.stdout }}" + +- name: 5. Master IP Setting + command: "{{ role_path }}/files/ip_change {{ before_ip }} {{ ansible_default_ipv4.address }} {{ role_path }}/files" + +- name: 6. CloudMOA Version Change + command: "{{ role_path }}/files/rel_change {{ before_version }} {{ cmoa_version }} {{ role_path }}/files" diff --git a/ansible/01_old/roles/cmoa_install/tasks/00-default-settings-node.yml b/ansible/01_old/roles/cmoa_install/tasks/00-default-settings-node.yml new file mode 100644 index 0000000..a568b74 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/tasks/00-default-settings-node.yml @@ -0,0 +1,27 @@ +--- +- name: 1. Node add Label (worker1) + kubernetes.core.k8s: + apply: yes + definition: + apiversion: v1 + kind: Node + metadata: + name: "{{ item }}" + labels: + cmoa: worker1 + with_items: + - "{{ ansible_hostname }}" + when: ansible_default_ipv4.address in groups.worker1 + +- name: 2. Node add Label (worker2) + kubernetes.core.k8s: + definition: + apiversion: v1 + kind: Node + metadata: + name: "{{ item }}" + labels: + cmoa: worker2 + with_items: + - "{{ ansible_hostname }}" + when: ansible_default_ipv4.address in groups.worker2 \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install/tasks/01-storage-install.yml b/ansible/01_old/roles/cmoa_install/tasks/01-storage-install.yml new file mode 100644 index 0000000..bef58ef --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/tasks/01-storage-install.yml @@ -0,0 +1,45 @@ +--- +- name: 1. yaml file install (sc, pv) + kubernetes.core.k8s: + state: present + namespace: "{{ cmoa_namespace }}" + src: "{{ role_path }}/files/01-storage/{{ item }}" + apply: yes + with_items: + - 00-storageclass.yaml + - 01-persistentvolume.yaml + +- name: 2. helmchart install (minio) + kubernetes.core.helm: + name: "{{item}}" + release_namespace: "{{ cmoa_namespace }}" + chart_ref: "{{ role_path }}/files/01-storage/{{item}}" + create_namespace: yes + release_state: present + values_files: + - "{{ role_path }}/files/01-storage/{{item}}/values.yaml" + with_items: + - minio + +- name: 3. Change a Minio Api Service (NodePort=minio_nodePort) + kubernetes.core.k8s: + state: present + definition: + apiVersion: v1 + kind: Service + metadata: + name: "{{ minio_service_name }}" + namespace: "{{ cmoa_namespace }}" + spec: + type: NodePort + ports: + - protocol: TCP + port: "{{ minio_service_port }}" + nodePort: "{{ minio_nodePort }}" + apply: yes + +- name: 4. Check Kubernetes Pods (minio) + command: "{{ role_path }}/files/k8s_status {{ cmoa_namespace }}" + +- name: 5. minio setting (minio) + command: "{{ role_path }}/files/01-storage/cmoa_minio {{ ansible_default_ipv4.address }}:{{ minio_nodePort }} {{ minio_user }} {{ bucket_name }} {{ days }} {{ rule_id }}" \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install/tasks/02-base-install.yml b/ansible/01_old/roles/cmoa_install/tasks/02-base-install.yml new file mode 100644 index 0000000..f7924a6 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/tasks/02-base-install.yml @@ -0,0 +1,51 @@ +--- +- name: 1. kafka broker config apply (base) + kubernetes.core.k8s: + state: present + namespace: "{{ cmoa_namespace }}" + src: "{{ role_path }}/files/02-base/{{ item }}" + apply: yes + with_items: + - 00-kafka-broker-config.yaml + +- name: 2. coredns config apply (base) + kubernetes.core.k8s: + state: present + namespace: default + src: "{{ role_path }}/files/02-base/{{ item }}" + apply: yes + with_items: + - 01-coredns.yaml + +- name: 3. helmchart install (base) + kubernetes.core.helm: + name: "{{item}}" + release_name: "{{item}}" + release_namespace: "{{ cmoa_namespace }}" + chart_ref: "{{ role_path }}/files/02-base/{{item}}" + create_namespace: yes + release_state: present + values_files: + - "{{ role_path }}/files/02-base/{{item}}/values.yaml" + with_items: + - base + +- name: 4. Check Kubernetes Pods (base) + command: "{{ role_path }}/files/k8s_status {{ cmoa_namespace }} alertmanage" + +- name: 5. Change a Elasticsearch Service (NodePort=elasticsearch_nodePort) + kubernetes.core.k8s: + state: present + definition: + apiVersion: v1 + kind: Service + metadata: + name: "{{ elasticsearch_service_name }}" + namespace: "{{ cmoa_namespace }}" + spec: + type: NodePort + ports: + - protocol: TCP + port: "{{ elasticsearch_service_port }}" + nodePort: "{{ elasticsearch_nodePort }}" + apply: yes diff --git a/ansible/01_old/roles/cmoa_install/tasks/03-ddl-dml.yml b/ansible/01_old/roles/cmoa_install/tasks/03-ddl-dml.yml new file mode 100644 index 0000000..be5af75 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/tasks/03-ddl-dml.yml @@ -0,0 +1,59 @@ +- name: 1. Check Postgres DB Data + command: "{{ role_path }}/files/postgres_check_data {{ cmoa_namespace }}" + register: pg_check_result + +- name: 2. Insert Elasticsearch template + command: "sh {{ role_path }}/files/03-ddl-dml/elasticsearch/es-ddl-put.sh {{ cmoa_namespace }}" + +- name: 2.1. Elasticsearch dependency deploy restart + command: "kubectl -n {{ cmoa_namespace }} rollout restart deploy alertmanager base-cortex-configs base-cortex-distributor base-cortex-ruler" + register: restart + +- debug: + msg: "{{restart.stdout_lines}}" + +- name: 2.2. Check Kubernetes Pods (Elasticsearch dependency) + command: "{{ role_path }}/files/k8s_status {{ cmoa_namespace }} alertmanage" + +- name: 3. Get a list of all pods from the namespace + command: kubectl -n "{{ cmoa_namespace }}" get pods --no-headers -o custom-columns=":metadata.name" + register: pod_list + when: pg_check_result.stdout != '1' + +- name: 4. Copy psql file in postgres (DDL) + kubernetes.core.k8s_cp: + namespace: "{{ cmoa_namespace }}" + pod: "{{ item }}" + remote_path: /tmp/postgres_insert_ddl.psql + local_path: "{{ role_path }}/files/03-ddl-dml/postgres/postgres_insert_ddl.psql" + when: item is match('postgres') and pg_check_result.stdout != '1' + with_items: "{{ pod_list.stdout_lines }}" + ignore_errors: true + +- name: 5. Execute a command in postgres (DDL) + kubernetes.core.k8s_exec: + namespace: "{{ cmoa_namespace }}" + pod: "{{ item }}" + command: bash -c "PGPASSWORD='eorbahrhkswp' && /usr/bin/psql -h 'localhost' -U 'admin' -d 'postgresdb' -f /tmp/postgres_insert_ddl.psql" + with_items: "{{ pod_list.stdout_lines }}" + when: item is match('postgres') + ignore_errors: true + +- name: 6. Copy psql file in postgres (DML) + kubernetes.core.k8s_cp: + namespace: "{{ cmoa_namespace }}" + pod: "{{ item }}" + remote_path: /tmp/postgres_insert_dml.psql + local_path: "{{ role_path }}/files/03-ddl-dml/postgres/postgres_insert_dml.psql" + with_items: "{{ pod_list.stdout_lines }}" + when: item is match('postgres') + ignore_errors: true + +- name: 7. Execute a command in postgres (DML) + kubernetes.core.k8s_exec: + namespace: "{{ cmoa_namespace }}" + pod: "{{ item }}" + command: bash -c "PGPASSWORD='eorbahrhkswp' && /usr/bin/psql -h 'localhost' -U 'admin' -d 'postgresdb' -f /tmp/postgres_insert_dml.psql" + with_items: "{{ pod_list.stdout_lines }}" + when: item is match('postgres') + ignore_errors: true \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install/tasks/04-keycloak-install.yml b/ansible/01_old/roles/cmoa_install/tasks/04-keycloak-install.yml new file mode 100644 index 0000000..de5fc9c --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/tasks/04-keycloak-install.yml @@ -0,0 +1,34 @@ +--- +- name: 1. helmchart install (keycloak) + kubernetes.core.helm: + name: "{{item}}" + release_name: "{{item}}" + release_namespace: "{{ cmoa_namespace }}" + chart_ref: "{{ role_path }}/files/04-keycloak" + create_namespace: yes + release_state: present + values_files: + - "{{ role_path }}/files/04-keycloak/values.yaml" + with_items: + - keycloak + +- name: 4. Check Kubernetes Pods (base) + command: "{{ role_path }}/files/k8s_status {{ cmoa_namespace }}" + + +- name: 5. Change a Elasticsearch Service (NodePort=elasticsearch_nodePort) + kubernetes.core.k8s: + state: present + definition: + apiVersion: v1 + kind: Service + metadata: + name: "{{ elasticsearch_service_name }}" + namespace: "{{ cmoa_namespace }}" + spec: + type: NodePort + ports: + - protocol: TCP + port: "{{ elasticsearch_service_port }}" + nodePort: "{{ elasticsearch_nodePort }}" + apply: yes diff --git a/ansible/01_old/roles/cmoa_install/tasks/05-imxc-install.yml b/ansible/01_old/roles/cmoa_install/tasks/05-imxc-install.yml new file mode 100644 index 0000000..420d2d1 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/tasks/05-imxc-install.yml @@ -0,0 +1,16 @@ +--- +- name: 1. helmchart install (imxc) + kubernetes.core.helm: + name: "{{item}}" + release_name: "{{item}}" + release_namespace: "{{ cmoa_namespace }}" + chart_ref: "{{ role_path }}/files/05-imxc" + create_namespace: yes + release_state: present + values_files: + - "{{ role_path }}/files/05-imxc/values.yaml" + with_items: + - imxc + +- name: 2. Check Kubernetes Pods (imxc / keycloak) + command: "{{ role_path }}/files/k8s_status {{ cmoa_namespace }}" diff --git a/ansible/01_old/roles/cmoa_install/tasks/06-imxc-ui-install.yml b/ansible/01_old/roles/cmoa_install/tasks/06-imxc-ui-install.yml new file mode 100644 index 0000000..7da82a1 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/tasks/06-imxc-ui-install.yml @@ -0,0 +1,112 @@ +--- +- name: 1. helmchart install (imxc-ui-all) + kubernetes.core.helm: + name: "{{item}}" + release_name: "{{item}}" + release_namespace: "{{ cmoa_namespace }}" + chart_ref: "{{ role_path }}/files/06-imxc-ui/{{ item }}" + create_namespace: yes + release_state: present + values_files: + - "{{ role_path }}/files/06-imxc-ui/{{ item }}/values.yaml" + with_items: + - imxc-ui-jaeger + - imxc-ui-jspd + when: imxc_ui == 'all' + +- name: 1. helmchart install (imxc-ui-jaeger) + kubernetes.core.helm: + name: "{{item}}" + release_name: "{{item}}" + release_namespace: "{{ cmoa_namespace }}" + chart_ref: "{{ role_path }}/files/06-imxc-ui/{{ item }}" + create_namespace: yes + release_state: present + values_files: + - "{{ role_path }}/files/06-imxc-ui/{{ item }}/values.yaml" + with_items: + - imxc-ui-jaeger + when: imxc_ui == 'jaeger' + +- name: 2. Change a imxc-ui Service (imxc-ui-jaeger) + kubernetes.core.k8s: + state: present + definition: + apiVersion: v1 + kind: Service + metadata: + name: "{{ jaeger_servicename }}" + namespace: "{{ cmoa_namespace }}" + spec: + type: NodePort + ports: + - protocol: TCP + port: "{{ jaeger_service_port }}" + nodePort: "{{ jaeger_nodePort }}" + apply: yes + when: imxc_ui == 'jaeger' + +- name: 2. Get a list of all pods from the namespace + command: kubectl -n "{{ cmoa_namespace }}" get pods --no-headers -o custom-columns=":metadata.name" # Output is a column + register: pod_list + when: imxc_ui != 'all' + +- name: 3. Copy psql file in psql (imxc-jaeger) + kubernetes.core.k8s_cp: + namespace: "{{ cmoa_namespace }}" + pod: "{{ item }}" + remote_path: /tmp/jaeger_menumeta.psql + local_path: "{{ role_path }}/files/03-ddl-dml/postgres/jaeger_menumeta.psql" + with_items: "{{ pod_list.stdout_lines }}" + when: + - item is match('postgres') + - imxc_ui == 'jaeger' + ignore_errors: true + +- name: 4. Execute a command in psql (imxc-jaeger) + kubernetes.core.k8s_exec: + namespace: "{{ cmoa_namespace }}" + pod: "{{ item }}" + command: bash -c "PGPASSWORD='eorbahrhkswp' && /usr/bin/psql -h 'localhost' -U 'admin' -d 'postgresdb' -f /tmp/jaeger_menumeta.psql" + with_items: "{{ pod_list.stdout_lines }}" + when: + - item is match('postgres') + - imxc_ui == 'jaeger' + ignore_errors: true + +- name: 1. helmchart install (imxc-ui-jspd) + kubernetes.core.helm: + name: "{{item}}" + release_name: "{{item}}" + release_namespace: "{{ cmoa_namespace }}" + chart_ref: "{{ role_path }}/files/06-imxc-ui/{{ item }}" + create_namespace: yes + release_state: present + values_files: + - "{{ role_path }}/files/06-imxc-ui/{{ item }}/values.yaml" + with_items: + - imxc-ui-jspd + when: imxc_ui == 'jspd' + ignore_errors: true + +- name: 3. Copy psql file in postgres (imxc-ui-jspd) + kubernetes.core.k8s_cp: + namespace: "{{ cmoa_namespace }}" + pod: "{{ item }}" + remote_path: /tmp/jspd_menumeta.psql + local_path: "{{ role_path }}/files/03-ddl-dml/postgres/jspd_menumeta.psql" + with_items: "{{ pod_list.stdout_lines }}" + when: item is match('postgres') and imxc_ui == 'jspd' + ignore_errors: true + +- name: 4. Execute a command in postgres (imxc-ui-jspd) + kubernetes.core.k8s_exec: + namespace: "{{ cmoa_namespace }}" + pod: "{{ item }}" + command: bash -c "PGPASSWORD='eorbahrhkswp' && /usr/bin/psql -h 'localhost' -U 'admin' -d 'postgresdb' -f /tmp/jspd_menumeta.psql" + with_items: "{{ pod_list.stdout_lines }}" + when: item is match('postgres') and imxc_ui == 'jspd' + ignore_errors: true + +- name: 2. Check Kubernetes Pods (imxc ui) + command: "{{ role_path }}/files/k8s_status {{ cmoa_namespace }}" diff --git a/ansible/01_old/roles/cmoa_install/tasks/07-keycloak-setting.yml b/ansible/01_old/roles/cmoa_install/tasks/07-keycloak-setting.yml new file mode 100644 index 0000000..8e90b79 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/tasks/07-keycloak-setting.yml @@ -0,0 +1,90 @@ +--- +- name: 0. Generate keycloak auth token + ansible.builtin.uri: + url: "{{ keycloak_url }}{{ keycloak_context }}/realms/master/protocol/openid-connect/token" + method: POST + body: "client_id={{ keycloak_auth_client }}&username={{ keycloak_admin_user }}&password={{ keycloak_admin_password }}&grant_type=password" + validate_certs: no + register: keycloak_auth_response + until: keycloak_auth_response.status == 200 + retries: 5 + delay: 2 + +- name: 1. Determine if realm exists + ansible.builtin.uri: + url: "{{ keycloak_url }}{{ keycloak_context }}/admin/realms/{{ keycloak_realm }}" + method: GET + status_code: + - 200 + - 404 + headers: + Accept: "application/json" + Authorization: "Bearer {{ keycloak_auth_response.json.access_token }}" + register: keycloak_realm_exists + + +- name: 2. update a keycloak realm + community.general.keycloak_realm: + auth_client_id: "{{ keycloak_auth_client }}" + auth_keycloak_url: "{{ keycloak_url }}{{ keycloak_context }}" + auth_realm: "{{ keycloak_auth_realm }}" + auth_username: "{{ keycloak_admin_user }}" + auth_password: "{{ keycloak_admin_password }}" + realm: "{{ item.realm }}" + login_theme: "{{ keycloak_login_theme }}" + loop: "{{ keycloak_clients | flatten }}" + +- name: 3. Validate Keycloak clients + ansible.builtin.assert: + that: + - item.name is defined and item.name | length > 0 + - (item.client_id is defined and item.client_id | length > 0) or (item.id is defined and item.id | length > 0) + fail_msg: "For each keycloak client, attributes `name` and either `id` or `client_id` is required" + quiet: True + loop: "{{ keycloak_clients | flatten }}" + loop_control: + label: "{{ item.name | default('unnamed client') }}" + + +- name: 4. update a Keycloak client + community.general.keycloak_client: + auth_client_id: "{{ keycloak_auth_client }}" + auth_keycloak_url: "{{ keycloak_url }}{{ keycloak_context }}" + auth_realm: "{{ keycloak_auth_realm }}" + auth_username: "{{ keycloak_admin_user }}" + auth_password: "{{ keycloak_admin_password }}" + realm: "{{ item.realm }}" + default_roles: "{{ item.roles | default(omit) }}" + client_id: "{{ item.client_id | default(omit) }}" + id: "{{ item.id | default(omit) }}" + name: "{{ item.name | default(omit) }}" + description: "{{ item.description | default(omit) }}" + root_url: "{{ item.root_url | default('') }}" + admin_url: "{{ item.admin_url | default('') }}" + base_url: "{{ item.base_url | default('') }}" + enabled: "{{ item.enabled | default(True) }}" + redirect_uris: "{{ item.redirect_uris | default(omit) }}" + web_origins: "{{ item.web_origins | default('+') }}" + bearer_only: "{{ item.bearer_only | default(omit) }}" + standard_flow_enabled: "{{ item.standard_flow_enabled | default(omit) }}" + implicit_flow_enabled: "{{ item.implicit_flow_enabled | default(omit) }}" + direct_access_grants_enabled: "{{ item.direct_access_grants_enabled | default(omit) }}" + service_accounts_enabled: "{{ item.service_accounts_enabled | default(omit) }}" + public_client: "{{ item.public_client | default(False) }}" + protocol: "{{ item.protocol | default(omit) }}" + state: present + register: create_client_result + loop: "{{ keycloak_clients | flatten }}" + when: (item.name is defined and item.client_id is defined) or (item.name is defined and item.id is defined) + +- name: 5. Dependency deploy scale down + command: "kubectl -n {{ cmoa_namespace }} scale --replicas=0 deploy imxc-api noti-server auth-server zuul-deployment" + +- name: 6. Dependency deploy scale up + command: "kubectl -n {{ cmoa_namespace }} scale --replicas=1 deploy imxc-api noti-server auth-server zuul-deployment" + register: restart + +- debug: + msg: "{{restart.stdout_lines}}" + + diff --git a/ansible/01_old/roles/cmoa_install/tasks/08-finish.yml b/ansible/01_old/roles/cmoa_install/tasks/08-finish.yml new file mode 100644 index 0000000..4fd19f4 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/tasks/08-finish.yml @@ -0,0 +1,17 @@ +--- +- name: 0. Check Kubernetes Pods (ALL) + command: "{{ role_path }}/files/k8s_status {{ cmoa_namespace }}" + +- name: 1. IP Setting reset + command: "{{ role_path }}/files/ip_change {{ansible_default_ipv4.address}} {{before_ip}} {{ role_path }}/files" + +- name: 2. CloudMOA Version reset + command: "{{ role_path }}/files/rel_change {{ cmoa_version }} {{ before_version }} {{ role_path }}/files" + +- debug: + msg: + - ======================================================================================= + - "## CloudMOA WEB " + - CloudMOA Jaeger = http://{{ ansible_default_ipv4.address }}:31080 + - CloudMOA JSPD = http://{{ ansible_default_ipv4.address }}:31084 + - ======================================================================================= diff --git a/ansible/01_old/roles/cmoa_install/tasks/helm-install.yml b/ansible/01_old/roles/cmoa_install/tasks/helm-install.yml new file mode 100644 index 0000000..d057455 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/tasks/helm-install.yml @@ -0,0 +1,60 @@ +--- +- name: Create Helm temporary directory + file: + path: /tmp/helm + state: directory + mode: "0755" + +- name: Fetch Helm package + get_url: + url: 'https://get.helm.sh/helm-{{ helm_version }}-linux-amd64.tar.gz' + dest: /tmp/helm.tar.gz + checksum: '{{ helm_checksum }}' + +- name: Extract Helm package + unarchive: + remote_src: true + src: /tmp/helm.tar.gz + dest: /tmp/helm + +- name: Ensure "docker" group exists + group: + name: docker + state: present + become: true + +- name: Install helm to /usr/local/bin + copy: + remote_src: true + src: /tmp/helm/linux-amd64/helm + dest: /usr/local/bin/helm + owner: root + group: docker + mode: "0755" + become: true + +- name: Cleanup Helm temporary directory + file: + path: /tmp/helm + state: absent + +- name: Cleanup Helm temporary download + file: + path: /tmp/helm.tar.gz + state: absent + +- name: Ensure bash_completion.d directory exists + file: + path: /etc/bash_completion.d + state: directory + mode: "0755" + become: true + +- name: Setup Helm tab-completion + shell: | + set -o pipefail + /usr/local/bin/helm completion bash | tee /etc/bash_completion.d/helm + args: + executable: /bin/bash + changed_when: false + become: true diff --git a/ansible/01_old/roles/cmoa_install/tasks/main.yml b/ansible/01_old/roles/cmoa_install/tasks/main.yml new file mode 100644 index 0000000..7239fa3 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/tasks/main.yml @@ -0,0 +1,43 @@ +--- +- include: helm-install.yml + tags: helm-install + +- include: 00-default-settings-master.yml + tags: default_setting + when: kubernetes_role == 'master' + +- include: 00-default-settings-node.yml + tags: default_setting_node + when: kubernetes_role == 'node' + +- include: 01-storage-install.yml + tags: storage-install + when: kubernetes_role == 'master' + +- include: 02-base-install.yml + tags: base-install + when: kubernetes_role == 'master' + +- include: 03-ddl-dml.yml + tags: ddl-dml + when: kubernetes_role == 'master' + +- include: 04-keycloak-install.yml + tags: keycloak-install + when: kubernetes_role == 'master' + +- include: 05-imxc-install.yml + tags: imxc-install + when: kubernetes_role == 'master' + +- include: 06-imxc-ui-install.yml + tags: imxc-ui-install + when: kubernetes_role == 'master' + +- include: 07-keycloak-setting.yml + tags: keycloak-setting + when: kubernetes_role == 'master' + +- include: 08-finish.yml + tags: finish + when: kubernetes_role == 'master' \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install/templates/realm.json.j2 b/ansible/01_old/roles/cmoa_install/templates/realm.json.j2 new file mode 100644 index 0000000..1323ce2 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/templates/realm.json.j2 @@ -0,0 +1,7 @@ +{ + "id": "{{ keycloak_realm }}", + "realm": "{{ keycloak_realm }}", + "enabled": true, + "eventsEnabled": true, + "eventsExpiration": 7200 +} diff --git a/ansible/01_old/roles/cmoa_install/vars/main.yml b/ansible/01_old/roles/cmoa_install/vars/main.yml new file mode 100644 index 0000000..14c8e95 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install/vars/main.yml @@ -0,0 +1,7 @@ +--- +# name of the realm to create, this is a required variable +keycloak_realm: Exem + +# other settings +keycloak_url: "http://{{ ansible_default_ipv4.address }}:{{ keycloak_http_port }}" +keycloak_management_url: "http://{{ ansible_default_ipv4.address }}:{{ keycloak_management_http_port }}" diff --git a/ansible/01_old/roles/cmoa_install_bak/defaults/main.yml b/ansible/01_old/roles/cmoa_install_bak/defaults/main.yml new file mode 100644 index 0000000..11b9651 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/defaults/main.yml @@ -0,0 +1,64 @@ +# helm file install +helm_checksum: sha256:950439759ece902157cf915b209b8d694e6f675eaab5099fb7894f30eeaee9a2 +helm_version: v3.10.3 + +# cmoa info +cmoa_namespace: imxc +cmoa_version: rel3.4.8 + +# default ip/version (not change) +before_ip: 111.111.111.111 +before_version: rel0.0.0 + +# files/00-default in role +docker_secret_file: secret_nexus.yaml + +# all, jaeger, jspd +imxc_ui: all + +# [docker_config_path] +docker_config_nexus: dockerconfig/docker_config_nexus.json + +# [jaeger] +jaeger_servicename: imxc-ui-service-jaeger +jaeger_service_port: 80 +jaeger_nodePort: 31080 # only imxc-ui-jaeger option (imxc-ui-jaeger template default port=31084) + +# [minio] +minio_service_name: minio +minio_service_port: 9000 +minio_nodePort: 32002 +minio_user: cloudmoa +minio_pass: admin1234 +bucket_name: cortex-bucket +days: 42 +rule_id: cloudmoa + +# [Elasticsearch] +elasticsearch_service_name: elasticsearch +elasticsearch_service_port: 9200 +elasticsearch_nodePort: 30200 + +# [Keycloak] +# Keycloak configuration settings +keycloak_http_port: 31082 +keycloak_https_port: 8443 +keycloak_management_http_port: 31990 +keycloak_realm: exem + +# Keycloak administration console user +keycloak_admin_user: admin +keycloak_admin_password: admin +keycloak_auth_realm: master +keycloak_auth_client: admin-cli +keycloak_context: /auth + +# keycloak_clients +keycloak_clients: + - name: 'authorization_server' + client_id: authorization_server + realm: exem + redirect_uris: "http://10.10.30.75:31080/*,http://10.10.30.75:31084/*,http://localhost:8080/*,http://localhost:8081/*" + public_client: True + + diff --git a/ansible/01_old/roles/cmoa_install_bak/files/00-default/sa_patch.sh b/ansible/01_old/roles/cmoa_install_bak/files/00-default/sa_patch.sh new file mode 100755 index 0000000..85cdf09 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/00-default/sa_patch.sh @@ -0,0 +1,6 @@ +#!/bin/bash + +export KUBECONFIG=$1 + +#kubectl -n imxc patch sa default -p '{"imagePullSecrets": [{"name": "regcred"}]}' +kubectl -n default patch sa default -p '{"imagePullSecrets": [{"name": "regcred"}]}' diff --git a/ansible/01_old/roles/cmoa_install_bak/files/00-default/secret_dockerhub.yaml b/ansible/01_old/roles/cmoa_install_bak/files/00-default/secret_dockerhub.yaml new file mode 100644 index 0000000..268027b --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/00-default/secret_dockerhub.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: Secret +metadata: + name: regcred +data: + .dockerconfigjson: ewogICJhdXRocyI6IHsKICAgICJodHRwczovL2luZGV4LmRvY2tlci5pby92MS8iOiB7CiAgICAgICJhdXRoIjogIlpYaGxiV1JsZGpJNk0yWXlObVV6T0RjdFlqY3paQzAwTkRVMUxUazNaRFV0T1dWaU9EWmtObVl4WXpOayIKICAgIH0KICB9Cn0KCg== +type: kubernetes.io/dockerconfigjson diff --git a/ansible/01_old/roles/cmoa_install_bak/files/00-default/secret_nexus.yaml b/ansible/01_old/roles/cmoa_install_bak/files/00-default/secret_nexus.yaml new file mode 100644 index 0000000..6a2543f --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/00-default/secret_nexus.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +data: + .dockerconfigjson: ewogICJhdXRocyI6IHsKICAgICIxMC4xMC4zMS4yNDM6NTAwMCI6IHsKICAgICAgImF1dGgiOiAiWTI5eVpUcGpiM0psWVdSdGFXNHhNak0wIgogICAgfQogIH0KfQoK +kind: Secret +metadata: + name: regcred +type: kubernetes.io/dockerconfigjson + diff --git a/ansible/01_old/roles/cmoa_install_bak/files/01-storage/00-storageclass.yaml b/ansible/01_old/roles/cmoa_install_bak/files/01-storage/00-storageclass.yaml new file mode 100644 index 0000000..8f41292 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/01-storage/00-storageclass.yaml @@ -0,0 +1,6 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: exem-local-storage +provisioner: kubernetes.io/no-provisioner +volumeBindingMode: WaitForFirstConsumer diff --git a/ansible/01_old/roles/cmoa_install_bak/files/01-storage/01-persistentvolume.yaml b/ansible/01_old/roles/cmoa_install_bak/files/01-storage/01-persistentvolume.yaml new file mode 100644 index 0000000..1bd4546 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/01-storage/01-persistentvolume.yaml @@ -0,0 +1,92 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: minio-pv-0 +spec: + capacity: + storage: 50Gi + volumeMode: Filesystem + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Retain + storageClassName: exem-local-storage + local: + path: /media/data/minio/pv1 + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: cmoa + operator: In + values: + - worker1 + +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: minio-pv-1 +spec: + capacity: + storage: 50Gi + volumeMode: Filesystem + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Retain + storageClassName: exem-local-storage + local: + path: /media/data/minio/pv2 + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: cmoa + operator: In + values: + - worker1 +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: minio-pv-2 +spec: + capacity: + storage: 50Gi + volumeMode: Filesystem + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Retain + storageClassName: exem-local-storage + local: + path: /media/data/minio/pv3 + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: cmoa + operator: In + values: + - worker2 +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: minio-pv-3 +spec: + capacity: + storage: 50Gi + volumeMode: Filesystem + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Retain + storageClassName: exem-local-storage + local: + path: /media/data/minio/pv4 + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: cmoa + operator: In + values: + - worker2 diff --git a/ansible/01_old/roles/cmoa_install_bak/files/01-storage/cmoa_minio b/ansible/01_old/roles/cmoa_install_bak/files/01-storage/cmoa_minio new file mode 100755 index 0000000..522b87d --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/01-storage/cmoa_minio @@ -0,0 +1,63 @@ +#! /usr/bin/python3 +#-*- coding:utf-8 -*- + +import os, sys, time, urllib3 +from minio import Minio +from minio.lifecycleconfig import Expiration, LifecycleConfig, Rule, Transition +from minio.commonconfig import ENABLED, Filter + +def minio_conn(ipaddr, portnum, ac_key, sec_key): + conn='{}:{}'.format(ipaddr,portnum) + url='http://{}'.format(conn) + print(url) + minio_client = Minio( + conn, access_key=ac_key, secret_key=sec_key, secure=False, + http_client=urllib3.ProxyManager( + url, timeout=urllib3.Timeout.DEFAULT_TIMEOUT, + retries=urllib3.Retry( + total=5, backoff_factor=0.2, + status_forcelist=[ + 500, 502, 503, 504 + ], + ), + ), + ) + + return minio_client + +def minio_create_buckets(minio_client, bucket_name, days, rule_id="cloudmoa"): + config = LifecycleConfig( + [ + Rule( + ENABLED, + rule_filter=Filter(prefix=""), + rule_id=rule_id, + expiration=Expiration(days=days), + ), + ], + ) + minio_client.set_bucket_lifecycle(bucket_name, config) + +def minio_delete_bucket(client, bucket_name): + client.delete_bucket_lifecycle(bucket_name) + +def main(): + s3_url = os.sys.argv[1].split(':')[0] + s3_url_port = os.sys.argv[1].split(':')[1] + minio_user = os.sys.argv[2] + minio_pass = os.sys.argv[3] + bucket_name = os.sys.argv[4] + minio_days = os.sys.argv[5] + rule_id = os.sys.argv[6] + + print(s3_url, s3_url_port, minio_user, minio_pass) + + minio_client=minio_conn(s3_url, s3_url_port, minio_user, minio_pass) + minio_create_buckets(minio_client, bucket_name, minio_days, rule_id) + +if __name__ == "__main__": + try: + main() + except Exception as err: + print("[Usage] minio {url:port} {username} {password} {bucketName} {days} {ruleId}") + print(err) \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/.helmignore b/ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/.helmignore new file mode 100644 index 0000000..a9fe727 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +# OWNERS file for Kubernetes +OWNERS \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/Chart.yaml b/ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/Chart.yaml new file mode 100644 index 0000000..fc21076 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/Chart.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +description: Multi-Cloud Object Storage +name: minio +version: 4.0.2 +appVersion: RELEASE.2022-05-08T23-50-31Z +keywords: + - minio + - storage + - object-storage + - s3 + - cluster +home: https://min.io +icon: https://min.io/resources/img/logo/MINIO_wordmark.png +sources: +- https://github.com/minio/minio +maintainers: +- name: MinIO, Inc + email: dev@minio.io diff --git a/ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/README.md b/ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/README.md new file mode 100644 index 0000000..ad3eb7d --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/README.md @@ -0,0 +1,235 @@ +# MinIO Helm Chart + +[![Slack](https://slack.min.io/slack?type=svg)](https://slack.min.io) [![license](https://img.shields.io/badge/license-AGPL%20V3-blue)](https://github.com/minio/minio/blob/master/LICENSE) + +MinIO is a High Performance Object Storage released under GNU Affero General Public License v3.0. It is API compatible with Amazon S3 cloud storage service. Use MinIO to build high performance infrastructure for machine learning, analytics and application data workloads. + +For more detailed documentation please visit [here](https://docs.minio.io/) + +## Introduction + +This chart bootstraps MinIO Cluster on [Kubernetes](http://kubernetes.io) using the [Helm](https://helm.sh) package manager. + +## Prerequisites + +- Helm cli with Kubernetes cluster configured. +- PV provisioner support in the underlying infrastructure. (We recommend using ) +- Use Kubernetes version v1.19 and later for best experience. + +## Configure MinIO Helm repo + +```bash +helm repo add minio https://charts.min.io/ +``` + +### Installing the Chart + +Install this chart using: + +```bash +helm install --namespace minio --set rootUser=rootuser,rootPassword=rootpass123 --generate-name minio/minio +``` + +The command deploys MinIO on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation. + +### Upgrading the Chart + +You can use Helm to update MinIO version in a live release. Assuming your release is named as `my-release`, get the values using the command: + +```bash +helm get values my-release > old_values.yaml +``` + +Then change the field `image.tag` in `old_values.yaml` file with MinIO image tag you want to use. Now update the chart using + +```bash +helm upgrade -f old_values.yaml my-release minio/minio +``` + +Default upgrade strategies are specified in the `values.yaml` file. Update these fields if you'd like to use a different strategy. + +### Configuration + +Refer the [Values file](./values.yaml) for all the possible config fields. + +You can specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```bash +helm install --name my-release --set persistence.size=1Ti minio/minio +``` + +The above command deploys MinIO server with a 1Ti backing persistent volume. + +Alternately, you can provide a YAML file that specifies parameter values while installing the chart. For example, + +```bash +helm install --name my-release -f values.yaml minio/minio +``` + +### Persistence + +This chart provisions a PersistentVolumeClaim and mounts corresponding persistent volume to default location `/export`. You'll need physical storage available in the Kubernetes cluster for this to work. If you'd rather use `emptyDir`, disable PersistentVolumeClaim by: + +```bash +helm install --set persistence.enabled=false minio/minio +``` + +> *"An emptyDir volume is first created when a Pod is assigned to a Node, and exists as long as that Pod is running on that node. When a Pod is removed from a node for any reason, the data in the emptyDir is deleted forever."* + +### Existing PersistentVolumeClaim + +If a Persistent Volume Claim already exists, specify it during installation. + +1. Create the PersistentVolume +2. Create the PersistentVolumeClaim +3. Install the chart + +```bash +helm install --set persistence.existingClaim=PVC_NAME minio/minio +``` + +### NetworkPolicy + +To enable network policy for MinIO, +install [a networking plugin that implements the Kubernetes +NetworkPolicy spec](https://kubernetes.io/docs/tasks/administer-cluster/declare-network-policy#before-you-begin), +and set `networkPolicy.enabled` to `true`. + +For Kubernetes v1.5 & v1.6, you must also turn on NetworkPolicy by setting +the DefaultDeny namespace annotation. Note: this will enforce policy for *all* pods in the namespace: + +``` +kubectl annotate namespace default "net.beta.kubernetes.io/network-policy={\"ingress\":{\"isolation\":\"DefaultDeny\"}}" +``` + +With NetworkPolicy enabled, traffic will be limited to just port 9000. + +For more precise policy, set `networkPolicy.allowExternal=true`. This will +only allow pods with the generated client label to connect to MinIO. +This label will be displayed in the output of a successful install. + +### Existing secret + +Instead of having this chart create the secret for you, you can supply a preexisting secret, much +like an existing PersistentVolumeClaim. + +First, create the secret: + +```bash +kubectl create secret generic my-minio-secret --from-literal=rootUser=foobarbaz --from-literal=rootPassword=foobarbazqux +``` + +Then install the chart, specifying that you want to use an existing secret: + +```bash +helm install --set existingSecret=my-minio-secret minio/minio +``` + +The following fields are expected in the secret: + +| .data.\ in Secret | Corresponding variable | Description | Required | +|:------------------------|:-----------------------|:---------------|:---------| +| `rootUser` | `rootUser` | Root user. | yes | +| `rootPassword` | `rootPassword` | Root password. | yes | + +All corresponding variables will be ignored in values file. + +### Configure TLS + +To enable TLS for MinIO containers, acquire TLS certificates from a CA or create self-signed certificates. While creating / acquiring certificates ensure the corresponding domain names are set as per the standard [DNS naming conventions](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-identity) in a Kubernetes StatefulSet (for a distributed MinIO setup). Then create a secret using + +```bash +kubectl create secret generic tls-ssl-minio --from-file=path/to/private.key --from-file=path/to/public.crt +``` + +Then install the chart, specifying that you want to use the TLS secret: + +```bash +helm install --set tls.enabled=true,tls.certSecret=tls-ssl-minio minio/minio +``` + +### Installing certificates from third party CAs + +MinIO can connect to other servers, including MinIO nodes or other server types such as NATs and Redis. If these servers use certificates that were not registered with a known CA, add trust for these certificates to MinIO Server by bundling these certificates into a Kubernetes secret and providing it to Helm via the `trustedCertsSecret` value. If `.Values.tls.enabled` is `true` and you're installing certificates for third party CAs, remember to include MinIO's own certificate with key `public.crt`, if it also needs to be trusted. + +For instance, given that TLS is enabled and you need to add trust for MinIO's own CA and for the CA of a Keycloak server, a Kubernetes secret can be created from the certificate files using `kubectl`: + +``` +kubectl -n minio create secret generic minio-trusted-certs --from-file=public.crt --from-file=keycloak.crt +``` + +If TLS is not enabled, you would need only the third party CA: + +``` +kubectl -n minio create secret generic minio-trusted-certs --from-file=keycloak.crt +``` + +The name of the generated secret can then be passed to Helm using a values file or the `--set` parameter: + +``` +trustedCertsSecret: "minio-trusted-certs" + +or + +--set trustedCertsSecret=minio-trusted-certs +``` + +### Create buckets after install + +Install the chart, specifying the buckets you want to create after install: + +```bash +helm install --set buckets[0].name=bucket1,buckets[0].policy=none,buckets[0].purge=false minio/minio +``` + +Description of the configuration parameters used above - + +- `buckets[].name` - name of the bucket to create, must be a string with length > 0 +- `buckets[].policy` - can be one of none|download|upload|public +- `buckets[].purge` - purge if bucket exists already + +33# Create policies after install +Install the chart, specifying the policies you want to create after install: + +```bash +helm install --set policies[0].name=mypolicy,policies[0].statements[0].resources[0]='arn:aws:s3:::bucket1',policies[0].statements[0].actions[0]='s3:ListBucket',policies[0].statements[0].actions[1]='s3:GetObject' minio/minio +``` + +Description of the configuration parameters used above - + +- `policies[].name` - name of the policy to create, must be a string with length > 0 +- `policies[].statements[]` - list of statements, includes actions and resources +- `policies[].statements[].resources[]` - list of resources that applies the statement +- `policies[].statements[].actions[]` - list of actions granted + +### Create user after install + +Install the chart, specifying the users you want to create after install: + +```bash +helm install --set users[0].accessKey=accessKey,users[0].secretKey=secretKey,users[0].policy=none,users[1].accessKey=accessKey2,users[1].secretRef=existingSecret,users[1].secretKey=password,users[1].policy=none minio/minio +``` + +Description of the configuration parameters used above - + +- `users[].accessKey` - accessKey of user +- `users[].secretKey` - secretKey of usersecretRef +- `users[].existingSecret` - secret name that contains the secretKey of user +- `users[].existingSecretKey` - data key in existingSecret secret containing the secretKey +- `users[].policy` - name of the policy to assign to user + +## Uninstalling the Chart + +Assuming your release is named as `my-release`, delete it using the command: + +```bash +helm delete my-release +``` + +or + +```bash +helm uninstall my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. diff --git a/ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/NOTES.txt b/ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/NOTES.txt new file mode 100644 index 0000000..9337196 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/NOTES.txt @@ -0,0 +1,43 @@ +{{- if eq .Values.service.type "ClusterIP" "NodePort" }} +MinIO can be accessed via port {{ .Values.service.port }} on the following DNS name from within your cluster: +{{ template "minio.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local + +To access MinIO from localhost, run the below commands: + + 1. export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + + 2. kubectl port-forward $POD_NAME 9000 --namespace {{ .Release.Namespace }} + +Read more about port forwarding here: http://kubernetes.io/docs/user-guide/kubectl/kubectl_port-forward/ + +You can now access MinIO server on http://localhost:9000. Follow the below steps to connect to MinIO server with mc client: + + 1. Download the MinIO mc client - https://docs.minio.io/docs/minio-client-quickstart-guide + + 2. export MC_HOST_{{ template "minio.fullname" . }}-local=http://$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "minio.secretName" . }} -o jsonpath="{.data.rootUser}" | base64 --decode):$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "minio.secretName" . }} -o jsonpath="{.data.rootPassword}" | base64 --decode)@localhost:{{ .Values.service.port }} + + 3. mc ls {{ template "minio.fullname" . }}-local + +{{- end }} +{{- if eq .Values.service.type "LoadBalancer" }} +MinIO can be accessed via port {{ .Values.service.port }} on an external IP address. Get the service external IP address by: +kubectl get svc --namespace {{ .Release.Namespace }} -l app={{ template "minio.fullname" . }} + +Note that the public IP may take a couple of minutes to be available. + +You can now access MinIO server on http://:9000. Follow the below steps to connect to MinIO server with mc client: + + 1. Download the MinIO mc client - https://docs.minio.io/docs/minio-client-quickstart-guide + + 2. export MC_HOST_{{ template "minio.fullname" . }}-local=http://$(kubectl get secret {{ template "minio.secretName" . }} --namespace {{ .Release.Namespace }} -o jsonpath="{.data.rootUser}" | base64 --decode):$(kubectl get secret {{ template "minio.secretName" . }} -o jsonpath="{.data.rootPassword}" | base64 --decode)@:{{ .Values.service.port }} + + 3. mc ls {{ template "minio.fullname" . }} + +Alternately, you can use your browser or the MinIO SDK to access the server - https://docs.minio.io/categories/17 +{{- end }} + +{{ if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }} +Note: Since NetworkPolicy is enabled, only pods with label +{{ template "minio.fullname" . }}-client=true" +will be able to connect to this minio cluster. +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/_helper_create_bucket.txt b/ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/_helper_create_bucket.txt new file mode 100644 index 0000000..35a48fc --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/_helper_create_bucket.txt @@ -0,0 +1,109 @@ +#!/bin/sh +set -e ; # Have script exit in the event of a failed command. + +{{- if .Values.configPathmc }} +MC_CONFIG_DIR="{{ .Values.configPathmc }}" +MC="/usr/bin/mc --insecure --config-dir ${MC_CONFIG_DIR}" +{{- else }} +MC="/usr/bin/mc --insecure" +{{- end }} + +# connectToMinio +# Use a check-sleep-check loop to wait for MinIO service to be available +connectToMinio() { + SCHEME=$1 + ATTEMPTS=0 ; LIMIT=29 ; # Allow 30 attempts + set -e ; # fail if we can't read the keys. + ACCESS=$(cat /config/rootUser) ; SECRET=$(cat /config/rootPassword) ; + set +e ; # The connections to minio are allowed to fail. + echo "Connecting to MinIO server: $SCHEME://$MINIO_ENDPOINT:$MINIO_PORT" ; + MC_COMMAND="${MC} alias set myminio $SCHEME://$MINIO_ENDPOINT:$MINIO_PORT $ACCESS $SECRET" ; + $MC_COMMAND ; + STATUS=$? ; + until [ $STATUS = 0 ] + do + ATTEMPTS=`expr $ATTEMPTS + 1` ; + echo \"Failed attempts: $ATTEMPTS\" ; + if [ $ATTEMPTS -gt $LIMIT ]; then + exit 1 ; + fi ; + sleep 2 ; # 1 second intervals between attempts + $MC_COMMAND ; + STATUS=$? ; + done ; + set -e ; # reset `e` as active + return 0 +} + +# checkBucketExists ($bucket) +# Check if the bucket exists, by using the exit code of `mc ls` +checkBucketExists() { + BUCKET=$1 + CMD=$(${MC} ls myminio/$BUCKET > /dev/null 2>&1) + return $? +} + +# createBucket ($bucket, $policy, $purge) +# Ensure bucket exists, purging if asked to +createBucket() { + BUCKET=$1 + POLICY=$2 + PURGE=$3 + VERSIONING=$4 + + # Purge the bucket, if set & exists + # Since PURGE is user input, check explicitly for `true` + if [ $PURGE = true ]; then + if checkBucketExists $BUCKET ; then + echo "Purging bucket '$BUCKET'." + set +e ; # don't exit if this fails + ${MC} rm -r --force myminio/$BUCKET + set -e ; # reset `e` as active + else + echo "Bucket '$BUCKET' does not exist, skipping purge." + fi + fi + + # Create the bucket if it does not exist + if ! checkBucketExists $BUCKET ; then + echo "Creating bucket '$BUCKET'" + ${MC} mb myminio/$BUCKET + else + echo "Bucket '$BUCKET' already exists." + fi + + + # set versioning for bucket + if [ ! -z $VERSIONING ] ; then + if [ $VERSIONING = true ] ; then + echo "Enabling versioning for '$BUCKET'" + ${MC} version enable myminio/$BUCKET + elif [ $VERSIONING = false ] ; then + echo "Suspending versioning for '$BUCKET'" + ${MC} version suspend myminio/$BUCKET + fi + else + echo "Bucket '$BUCKET' versioning unchanged." + fi + + # At this point, the bucket should exist, skip checking for existence + # Set policy on the bucket + echo "Setting policy of bucket '$BUCKET' to '$POLICY'." + ${MC} policy set $POLICY myminio/$BUCKET +} + +# Try connecting to MinIO instance +{{- if .Values.tls.enabled }} +scheme=https +{{- else }} +scheme=http +{{- end }} +connectToMinio $scheme + +{{ if .Values.buckets }} +{{ $global := . }} +# Create the buckets +{{- range .Values.buckets }} +createBucket {{ tpl .name $global }} {{ .policy }} {{ .purge }} {{ .versioning }} +{{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/_helper_create_policy.txt b/ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/_helper_create_policy.txt new file mode 100644 index 0000000..d565b16 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/_helper_create_policy.txt @@ -0,0 +1,75 @@ +#!/bin/sh +set -e ; # Have script exit in the event of a failed command. + +{{- if .Values.configPathmc }} +MC_CONFIG_DIR="{{ .Values.configPathmc }}" +MC="/usr/bin/mc --insecure --config-dir ${MC_CONFIG_DIR}" +{{- else }} +MC="/usr/bin/mc --insecure" +{{- end }} + +# connectToMinio +# Use a check-sleep-check loop to wait for MinIO service to be available +connectToMinio() { + SCHEME=$1 + ATTEMPTS=0 ; LIMIT=29 ; # Allow 30 attempts + set -e ; # fail if we can't read the keys. + ACCESS=$(cat /config/rootUser) ; SECRET=$(cat /config/rootPassword) ; + set +e ; # The connections to minio are allowed to fail. + echo "Connecting to MinIO server: $SCHEME://$MINIO_ENDPOINT:$MINIO_PORT" ; + MC_COMMAND="${MC} alias set myminio $SCHEME://$MINIO_ENDPOINT:$MINIO_PORT $ACCESS $SECRET" ; + $MC_COMMAND ; + STATUS=$? ; + until [ $STATUS = 0 ] + do + ATTEMPTS=`expr $ATTEMPTS + 1` ; + echo \"Failed attempts: $ATTEMPTS\" ; + if [ $ATTEMPTS -gt $LIMIT ]; then + exit 1 ; + fi ; + sleep 2 ; # 1 second intervals between attempts + $MC_COMMAND ; + STATUS=$? ; + done ; + set -e ; # reset `e` as active + return 0 +} + +# checkPolicyExists ($policy) +# Check if the policy exists, by using the exit code of `mc admin policy info` +checkPolicyExists() { + POLICY=$1 + CMD=$(${MC} admin policy info myminio $POLICY > /dev/null 2>&1) + return $? +} + +# createPolicy($name, $filename) +createPolicy () { + NAME=$1 + FILENAME=$2 + + # Create the name if it does not exist + echo "Checking policy: $NAME (in /config/$FILENAME.json)" + if ! checkPolicyExists $NAME ; then + echo "Creating policy '$NAME'" + else + echo "Policy '$NAME' already exists." + fi + ${MC} admin policy add myminio $NAME /config/$FILENAME.json + +} + +# Try connecting to MinIO instance +{{- if .Values.tls.enabled }} +scheme=https +{{- else }} +scheme=http +{{- end }} +connectToMinio $scheme + +{{ if .Values.policies }} +# Create the policies +{{- range $idx, $policy := .Values.policies }} +createPolicy {{ $policy.name }} policy_{{ $idx }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/_helper_create_user.txt b/ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/_helper_create_user.txt new file mode 100644 index 0000000..7771428 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/_helper_create_user.txt @@ -0,0 +1,88 @@ +#!/bin/sh +set -e ; # Have script exit in the event of a failed command. + +{{- if .Values.configPathmc }} +MC_CONFIG_DIR="{{ .Values.configPathmc }}" +MC="/usr/bin/mc --insecure --config-dir ${MC_CONFIG_DIR}" +{{- else }} +MC="/usr/bin/mc --insecure" +{{- end }} + +# connectToMinio +# Use a check-sleep-check loop to wait for MinIO service to be available +connectToMinio() { + SCHEME=$1 + ATTEMPTS=0 ; LIMIT=29 ; # Allow 30 attempts + set -e ; # fail if we can't read the keys. + ACCESS=$(cat /config/rootUser) ; SECRET=$(cat /config/rootPassword) ; + set +e ; # The connections to minio are allowed to fail. + echo "Connecting to MinIO server: $SCHEME://$MINIO_ENDPOINT:$MINIO_PORT" ; + MC_COMMAND="${MC} alias set myminio $SCHEME://$MINIO_ENDPOINT:$MINIO_PORT $ACCESS $SECRET" ; + $MC_COMMAND ; + STATUS=$? ; + until [ $STATUS = 0 ] + do + ATTEMPTS=`expr $ATTEMPTS + 1` ; + echo \"Failed attempts: $ATTEMPTS\" ; + if [ $ATTEMPTS -gt $LIMIT ]; then + exit 1 ; + fi ; + sleep 2 ; # 1 second intervals between attempts + $MC_COMMAND ; + STATUS=$? ; + done ; + set -e ; # reset `e` as active + return 0 +} + +# checkUserExists ($username) +# Check if the user exists, by using the exit code of `mc admin user info` +checkUserExists() { + USER=$1 + CMD=$(${MC} admin user info myminio $USER > /dev/null 2>&1) + return $? +} + +# createUser ($username, $password, $policy) +createUser() { + USER=$1 + PASS=$2 + POLICY=$3 + + # Create the user if it does not exist + if ! checkUserExists $USER ; then + echo "Creating user '$USER'" + ${MC} admin user add myminio $USER $PASS + else + echo "User '$USER' already exists." + fi + + + # set policy for user + if [ ! -z $POLICY -a $POLICY != " " ] ; then + echo "Adding policy '$POLICY' for '$USER'" + ${MC} admin policy set myminio $POLICY user=$USER + else + echo "User '$USER' has no policy attached." + fi +} + +# Try connecting to MinIO instance +{{- if .Values.tls.enabled }} +scheme=https +{{- else }} +scheme=http +{{- end }} +connectToMinio $scheme + +{{ if .Values.users }} +{{ $global := . }} +# Create the users +{{- range .Values.users }} +{{- if .existingSecret }} +createUser {{ tpl .accessKey $global }} $(cat /config/secrets/{{ tpl .accessKey $global }}) {{ .policy }} +{{ else }} +createUser {{ tpl .accessKey $global }} {{ .secretKey }} {{ .policy }} +{{- end }} +{{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/_helper_custom_command.txt b/ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/_helper_custom_command.txt new file mode 100644 index 0000000..b583a77 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/_helper_custom_command.txt @@ -0,0 +1,58 @@ +#!/bin/sh +set -e ; # Have script exit in the event of a failed command. + +{{- if .Values.configPathmc }} +MC_CONFIG_DIR="{{ .Values.configPathmc }}" +MC="/usr/bin/mc --insecure --config-dir ${MC_CONFIG_DIR}" +{{- else }} +MC="/usr/bin/mc --insecure" +{{- end }} + +# connectToMinio +# Use a check-sleep-check loop to wait for MinIO service to be available +connectToMinio() { + SCHEME=$1 + ATTEMPTS=0 ; LIMIT=29 ; # Allow 30 attempts + set -e ; # fail if we can't read the keys. + ACCESS=$(cat /config/rootUser) ; SECRET=$(cat /config/rootPassword) ; + set +e ; # The connections to minio are allowed to fail. + echo "Connecting to MinIO server: $SCHEME://$MINIO_ENDPOINT:$MINIO_PORT" ; + MC_COMMAND="${MC} alias set myminio $SCHEME://$MINIO_ENDPOINT:$MINIO_PORT $ACCESS $SECRET" ; + $MC_COMMAND ; + STATUS=$? ; + until [ $STATUS = 0 ] + do + ATTEMPTS=`expr $ATTEMPTS + 1` ; + echo \"Failed attempts: $ATTEMPTS\" ; + if [ $ATTEMPTS -gt $LIMIT ]; then + exit 1 ; + fi ; + sleep 2 ; # 1 second intervals between attempts + $MC_COMMAND ; + STATUS=$? ; + done ; + set -e ; # reset `e` as active + return 0 +} + +# runCommand ($@) +# Run custom mc command +runCommand() { + ${MC} "$@" + return $? +} + +# Try connecting to MinIO instance +{{- if .Values.tls.enabled }} +scheme=https +{{- else }} +scheme=http +{{- end }} +connectToMinio $scheme + +{{ if .Values.customCommands }} +# Run custom commands +{{- range .Values.customCommands }} +runCommand {{ .command }} +{{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/_helper_policy.tpl b/ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/_helper_policy.tpl new file mode 100644 index 0000000..83a2e15 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/_helper_policy.tpl @@ -0,0 +1,18 @@ +{{- $statements_length := len .statements -}} +{{- $statements_length := sub $statements_length 1 -}} +{ + "Version": "2012-10-17", + "Statement": [ +{{- range $i, $statement := .statements }} + { + "Effect": "Allow", + "Action": [ +"{{ $statement.actions | join "\",\n\"" }}" + ]{{ if $statement.resources }}, + "Resource": [ +"{{ $statement.resources | join "\",\n\"" }}" + ]{{ end }} + }{{ if lt $i $statements_length }},{{end }} +{{- end }} + ] +} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/_helpers.tpl b/ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/_helpers.tpl new file mode 100644 index 0000000..4e38194 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/_helpers.tpl @@ -0,0 +1,218 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "minio.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "minio.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "minio.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "minio.networkPolicy.apiVersion" -}} +{{- if semverCompare ">=1.4-0, <1.7-0" .Capabilities.KubeVersion.Version -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare ">=1.7-0, <1.16-0" .Capabilities.KubeVersion.Version -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else if semverCompare "^1.16-0" .Capabilities.KubeVersion.Version -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for deployment. +*/}} +{{- define "minio.deployment.apiVersion" -}} +{{- if semverCompare "<1.9-0" .Capabilities.KubeVersion.Version -}} +{{- print "apps/v1beta2" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for statefulset. +*/}} +{{- define "minio.statefulset.apiVersion" -}} +{{- if semverCompare "<1.16-0" .Capabilities.KubeVersion.Version -}} +{{- print "apps/v1beta2" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for ingress. +*/}} +{{- define "minio.ingress.apiVersion" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for console ingress. +*/}} +{{- define "minio.consoleIngress.apiVersion" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Determine secret name. +*/}} +{{- define "minio.secretName" -}} +{{- if .Values.existingSecret -}} +{{- .Values.existingSecret }} +{{- else -}} +{{- include "minio.fullname" . -}} +{{- end -}} +{{- end -}} + +{{/* +Determine name for scc role and rolebinding +*/}} +{{- define "minio.sccRoleName" -}} +{{- printf "%s-%s" "scc" (include "minio.fullname" .) | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Properly format optional additional arguments to MinIO binary +*/}} +{{- define "minio.extraArgs" -}} +{{- range .Values.extraArgs -}} +{{ " " }}{{ . }} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "minio.imagePullSecrets" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +Also, we can not use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} +{{- if .Values.global.imagePullSecrets }} +imagePullSecrets: +{{- range .Values.global.imagePullSecrets }} + - name: {{ . }} +{{- end }} +{{- else if .Values.imagePullSecrets }} +imagePullSecrets: + {{ toYaml .Values.imagePullSecrets }} +{{- end -}} +{{- else if .Values.imagePullSecrets }} +imagePullSecrets: + {{ toYaml .Values.imagePullSecrets }} +{{- end -}} +{{- end -}} + +{{/* +Formats volumeMount for MinIO TLS keys and trusted certs +*/}} +{{- define "minio.tlsKeysVolumeMount" -}} +{{- if .Values.tls.enabled }} +- name: cert-secret-volume + mountPath: {{ .Values.certsPath }} +{{- end }} +{{- if or .Values.tls.enabled (ne .Values.trustedCertsSecret "") }} +{{- $casPath := printf "%s/CAs" .Values.certsPath | clean }} +- name: trusted-cert-secret-volume + mountPath: {{ $casPath }} +{{- end }} +{{- end -}} + +{{/* +Formats volume for MinIO TLS keys and trusted certs +*/}} +{{- define "minio.tlsKeysVolume" -}} +{{- if .Values.tls.enabled }} +- name: cert-secret-volume + secret: + secretName: {{ .Values.tls.certSecret }} + items: + - key: {{ .Values.tls.publicCrt }} + path: public.crt + - key: {{ .Values.tls.privateKey }} + path: private.key +{{- end }} +{{- if or .Values.tls.enabled (ne .Values.trustedCertsSecret "") }} +{{- $certSecret := eq .Values.trustedCertsSecret "" | ternary .Values.tls.certSecret .Values.trustedCertsSecret }} +{{- $publicCrt := eq .Values.trustedCertsSecret "" | ternary .Values.tls.publicCrt "" }} +- name: trusted-cert-secret-volume + secret: + secretName: {{ $certSecret }} + {{- if ne $publicCrt "" }} + items: + - key: {{ $publicCrt }} + path: public.crt + {{- end }} +{{- end }} +{{- end -}} + +{{/* +Returns the available value for certain key in an existing secret (if it exists), +otherwise it generates a random value. +*/}} +{{- define "minio.getValueFromSecret" }} + {{- $len := (default 16 .Length) | int -}} + {{- $obj := (lookup "v1" "Secret" .Namespace .Name).data -}} + {{- if $obj }} + {{- index $obj .Key | b64dec -}} + {{- else -}} + {{- randAlphaNum $len -}} + {{- end -}} +{{- end }} + +{{- define "minio.root.username" -}} + {{- if .Values.rootUser }} + {{- .Values.rootUser | toString }} + {{- else }} + {{- include "minio.getValueFromSecret" (dict "Namespace" .Release.Namespace "Name" (include "minio.fullname" .) "Length" 20 "Key" "rootUser") }} + {{- end }} +{{- end -}} + +{{- define "minio.root.password" -}} + {{- if .Values.rootPassword }} + {{- .Values.rootPassword | toString }} + {{- else }} + {{- include "minio.getValueFromSecret" (dict "Namespace" .Release.Namespace "Name" (include "minio.fullname" .) "Length" 40 "Key" "rootPassword") }} + {{- end }} +{{- end -}} \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/configmap.yaml b/ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/configmap.yaml new file mode 100644 index 0000000..95a7c60 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/configmap.yaml @@ -0,0 +1,24 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "minio.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +data: + initialize: |- +{{ include (print $.Template.BasePath "/_helper_create_bucket.txt") . | indent 4 }} + add-user: |- +{{ include (print $.Template.BasePath "/_helper_create_user.txt") . | indent 4 }} + add-policy: |- +{{ include (print $.Template.BasePath "/_helper_create_policy.txt") . | indent 4 }} +{{- range $idx, $policy := .Values.policies }} + # {{ $policy.name }} + policy_{{ $idx }}.json: |- +{{ include (print $.Template.BasePath "/_helper_policy.tpl") . | indent 4 }} +{{ end }} + custom-command: |- +{{ include (print $.Template.BasePath "/_helper_custom_command.txt") . | indent 4 }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/console-ingress.yaml b/ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/console-ingress.yaml new file mode 100644 index 0000000..2ce9a93 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/console-ingress.yaml @@ -0,0 +1,58 @@ +{{- if .Values.consoleIngress.enabled -}} +{{- $fullName := printf "%s-console" (include "minio.fullname" .) -}} +{{- $servicePort := .Values.consoleService.port -}} +{{- $ingressPath := .Values.consoleIngress.path -}} +apiVersion: {{ template "minio.consoleIngress.apiVersion" . }} +kind: Ingress +metadata: + name: {{ $fullName }} + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- with .Values.consoleIngress.labels }} +{{ toYaml . | indent 4 }} +{{- end }} + +{{- with .Values.consoleIngress.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +spec: +{{- if .Values.consoleIngress.ingressClassName }} + ingressClassName: {{ .Values.consoleIngress.ingressClassName }} +{{- end }} +{{- if .Values.consoleIngress.tls }} + tls: + {{- range .Values.consoleIngress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} +{{- end }} + rules: + {{- range .Values.consoleIngress.hosts }} + - http: + paths: + - path: {{ $ingressPath }} + {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} + pathType: Prefix + backend: + service: + name: {{ $fullName }} + port: + number: {{ $servicePort }} + {{- else }} + backend: + serviceName: {{ $fullName }} + servicePort: {{ $servicePort }} + {{- end }} + {{- if . }} + host: {{ . | quote }} + {{- end }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/console-service.yaml b/ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/console-service.yaml new file mode 100644 index 0000000..f4b1294 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/console-service.yaml @@ -0,0 +1,48 @@ +{{ $scheme := "http" }} +{{- if .Values.tls.enabled }} +{{ $scheme = "https" }} +{{ end }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "minio.fullname" . }}-console + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- if .Values.consoleService.annotations }} + annotations: +{{ toYaml .Values.consoleService.annotations | indent 4 }} +{{- end }} +spec: +{{- if (or (eq .Values.consoleService.type "ClusterIP" "") (empty .Values.consoleService.type)) }} + type: ClusterIP + {{- if not (empty .Values.consoleService.clusterIP) }} + clusterIP: {{ .Values.consoleService.clusterIP }} + {{end}} +{{- else if eq .Values.consoleService.type "LoadBalancer" }} + type: {{ .Values.consoleService.type }} + loadBalancerIP: {{ default "" .Values.consoleService.loadBalancerIP }} +{{- else }} + type: {{ .Values.consoleService.type }} +{{- end }} + ports: + - name: {{ $scheme }} + port: {{ .Values.consoleService.port }} + protocol: TCP +{{- if (and (eq .Values.consoleService.type "NodePort") ( .Values.consoleService.nodePort)) }} + nodePort: {{ .Values.consoleService.nodePort }} +{{- else }} + targetPort: {{ .Values.consoleService.port }} +{{- end}} +{{- if .Values.consoleService.externalIPs }} + externalIPs: +{{- range $i , $ip := .Values.consoleService.externalIPs }} + - {{ $ip }} +{{- end }} +{{- end }} + selector: + app: {{ template "minio.name" . }} + release: {{ .Release.Name }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/deployment.yaml b/ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/deployment.yaml new file mode 100644 index 0000000..a06bc35 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/deployment.yaml @@ -0,0 +1,174 @@ +{{- if eq .Values.mode "standalone" }} +{{ $scheme := "http" }} +{{- if .Values.tls.enabled }} +{{ $scheme = "https" }} +{{ end }} +{{ $bucketRoot := or ($.Values.bucketRoot) ($.Values.mountPath) }} +apiVersion: {{ template "minio.deployment.apiVersion" . }} +kind: Deployment +metadata: + name: {{ template "minio.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- if .Values.additionalLabels }} +{{ toYaml .Values.additionalLabels | trimSuffix "\n" | indent 4 }} +{{- end }} +{{- if .Values.additionalAnnotations }} + annotations: +{{ toYaml .Values.additionalAnnotations | trimSuffix "\n" | indent 4 }} +{{- end }} +spec: + strategy: + type: {{ .Values.DeploymentUpdate.type }} + {{- if eq .Values.DeploymentUpdate.type "RollingUpdate" }} + rollingUpdate: + maxSurge: {{ .Values.DeploymentUpdate.maxSurge }} + maxUnavailable: {{ .Values.DeploymentUpdate.maxUnavailable }} + {{- end}} + replicas: 1 + selector: + matchLabels: + app: {{ template "minio.name" . }} + release: {{ .Release.Name }} + template: + metadata: + name: {{ template "minio.fullname" . }} + labels: + app: {{ template "minio.name" . }} + release: {{ .Release.Name }} +{{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 8 }} +{{- end }} + annotations: +{{- if not .Values.ignoreChartChecksums }} + checksum/secrets: {{ include (print $.Template.BasePath "/secrets.yaml") . | sha256sum }} + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} +{{- end }} +{{- if .Values.podAnnotations }} +{{ toYaml .Values.podAnnotations | trimSuffix "\n" | indent 8 }} +{{- end }} + spec: + {{- if .Values.priorityClassName }} + priorityClassName: "{{ .Values.priorityClassName }}" + {{- end }} +{{- if and .Values.securityContext.enabled .Values.persistence.enabled }} + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + runAsGroup: {{ .Values.securityContext.runAsGroup }} + fsGroup: {{ .Values.securityContext.fsGroup }} + {{- if and (ge .Capabilities.KubeVersion.Major "1") (ge .Capabilities.KubeVersion.Minor "20") }} + fsGroupChangePolicy: {{ .Values.securityContext.fsGroupChangePolicy }} + {{- end }} +{{- end }} +{{ if .Values.serviceAccount.create }} + serviceAccountName: {{ .Values.serviceAccount.name }} +{{- end }} + containers: + - name: {{ .Chart.Name }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + command: + - "/bin/sh" + - "-ce" + - "/usr/bin/docker-entrypoint.sh minio server {{ $bucketRoot }} -S {{ .Values.certsPath }} --address :{{ .Values.minioAPIPort }} --console-address :{{ .Values.minioConsolePort }} {{- template "minio.extraArgs" . }}" + volumeMounts: + - name: minio-user + mountPath: "/tmp/credentials" + readOnly: true + {{- if .Values.persistence.enabled }} + - name: export + mountPath: {{ .Values.mountPath }} + {{- if .Values.persistence.subPath }} + subPath: "{{ .Values.persistence.subPath }}" + {{- end }} + {{- end }} + {{- if .Values.extraSecret }} + - name: extra-secret + mountPath: "/tmp/minio-config-env" + {{- end }} + {{- include "minio.tlsKeysVolumeMount" . | indent 12 }} + ports: + - name: {{ $scheme }} + containerPort: {{ .Values.minioAPIPort }} + - name: {{ $scheme }}-console + containerPort: {{ .Values.minioConsolePort }} + env: + - name: MINIO_ROOT_USER + valueFrom: + secretKeyRef: + name: {{ template "minio.secretName" . }} + key: rootUser + - name: MINIO_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "minio.secretName" . }} + key: rootPassword + {{- if .Values.extraSecret }} + - name: MINIO_CONFIG_ENV_FILE + value: "/tmp/minio-config-env/config.env" + {{- end}} + {{- if .Values.metrics.serviceMonitor.public }} + - name: MINIO_PROMETHEUS_AUTH_TYPE + value: "public" + {{- end}} + {{- if .Values.etcd.endpoints }} + - name: MINIO_ETCD_ENDPOINTS + value: {{ join "," .Values.etcd.endpoints | quote }} + {{- if .Values.etcd.clientCert }} + - name: MINIO_ETCD_CLIENT_CERT + value: "/tmp/credentials/etcd_client_cert.pem" + {{- end }} + {{- if .Values.etcd.clientCertKey }} + - name: MINIO_ETCD_CLIENT_CERT_KEY + value: "/tmp/credentials/etcd_client_cert_key.pem" + {{- end }} + {{- if .Values.etcd.pathPrefix }} + - name: MINIO_ETCD_PATH_PREFIX + value: {{ .Values.etcd.pathPrefix }} + {{- end }} + {{- if .Values.etcd.corednsPathPrefix }} + - name: MINIO_ETCD_COREDNS_PATH + value: {{ .Values.etcd.corednsPathPrefix }} + {{- end }} + {{- end }} + {{- range $key, $val := .Values.environment }} + - name: {{ $key }} + value: {{ $val | quote }} + {{- end}} + resources: +{{ toYaml .Values.resources | indent 12 }} +{{- with .Values.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 8 }} +{{- end }} +{{- include "minio.imagePullSecrets" . | indent 6 }} +{{- with .Values.affinity }} + affinity: +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} +{{- end }} + volumes: + - name: export + {{- if .Values.persistence.enabled }} + persistentVolumeClaim: + claimName: {{ .Values.persistence.existingClaim | default (include "minio.fullname" .) }} + {{- else }} + emptyDir: {} + {{- end }} + {{- if .Values.extraSecret }} + - name: extra-secret + secret: + secretName: {{ .Values.extraSecret }} + {{- end }} + - name: minio-user + secret: + secretName: {{ template "minio.secretName" . }} + {{- include "minio.tlsKeysVolume" . | indent 8 }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/gateway-deployment.yaml b/ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/gateway-deployment.yaml new file mode 100644 index 0000000..b14f86b --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/gateway-deployment.yaml @@ -0,0 +1,173 @@ +{{- if eq .Values.mode "gateway" }} +{{ $scheme := "http" }} +{{- if .Values.tls.enabled }} +{{ $scheme = "https" }} +{{ end }} +{{ $bucketRoot := or ($.Values.bucketRoot) ($.Values.mountPath) }} +apiVersion: {{ template "minio.deployment.apiVersion" . }} +kind: Deployment +metadata: + name: {{ template "minio.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- if .Values.additionalLabels }} +{{ toYaml .Values.additionalLabels | trimSuffix "\n" | indent 4 }} +{{- end }} +{{- if .Values.additionalAnnotations }} + annotations: +{{ toYaml .Values.additionalAnnotations | trimSuffix "\n" | indent 4 }} +{{- end }} +spec: + strategy: + type: {{ .Values.DeploymentUpdate.type }} + {{- if eq .Values.DeploymentUpdate.type "RollingUpdate" }} + rollingUpdate: + maxSurge: {{ .Values.DeploymentUpdate.maxSurge }} + maxUnavailable: {{ .Values.DeploymentUpdate.maxUnavailable }} + {{- end}} + replicas: {{ .Values.gateway.replicas }} + selector: + matchLabels: + app: {{ template "minio.name" . }} + release: {{ .Release.Name }} + template: + metadata: + name: {{ template "minio.fullname" . }} + labels: + app: {{ template "minio.name" . }} + release: {{ .Release.Name }} +{{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 8 }} +{{- end }} + annotations: +{{- if not .Values.ignoreChartChecksums }} + checksum/secrets: {{ include (print $.Template.BasePath "/secrets.yaml") . | sha256sum }} + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} +{{- end }} +{{- if .Values.podAnnotations }} +{{ toYaml .Values.podAnnotations | trimSuffix "\n" | indent 8 }} +{{- end }} + spec: + {{- if .Values.priorityClassName }} + priorityClassName: "{{ .Values.priorityClassName }}" + {{- end }} +{{- if and .Values.securityContext.enabled .Values.persistence.enabled }} + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + runAsGroup: {{ .Values.securityContext.runAsGroup }} + fsGroup: {{ .Values.securityContext.fsGroup }} +{{- end }} +{{ if .Values.serviceAccount.create }} + serviceAccountName: {{ .Values.serviceAccount.name }} +{{- end }} + containers: + - name: {{ .Chart.Name }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + command: + - "/bin/sh" + - "-ce" + {{- if eq .Values.gateway.type "nas" }} + - "/usr/bin/docker-entrypoint.sh minio gateway nas {{ $bucketRoot }} -S {{ .Values.certsPath }} --address :{{ .Values.minioAPIPort }} --console-address :{{ .Values.minioConsolePort }} {{- template "minio.extraArgs" . }} " + {{- end }} + volumeMounts: + - name: minio-user + mountPath: "/tmp/credentials" + readOnly: true + {{- if .Values.persistence.enabled }} + - name: export + mountPath: {{ .Values.mountPath }} + {{- if .Values.persistence.subPath }} + subPath: "{{ .Values.persistence.subPath }}" + {{- end }} + {{- end }} + {{- if .Values.extraSecret }} + - name: extra-secret + mountPath: "/tmp/minio-config-env" + {{- end }} + {{- include "minio.tlsKeysVolumeMount" . | indent 12 }} + ports: + - name: {{ $scheme }} + containerPort: {{ .Values.minioAPIPort }} + - name: {{ $scheme }}-console + containerPort: {{ .Values.minioConsolePort }} + env: + - name: MINIO_ROOT_USER + valueFrom: + secretKeyRef: + name: {{ template "minio.secretName" . }} + key: rootUser + - name: MINIO_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "minio.secretName" . }} + key: rootPassword + {{- if .Values.extraSecret }} + - name: MINIO_CONFIG_ENV_FILE + value: "/tmp/minio-config-env/config.env" + {{- end}} + {{- if .Values.metrics.serviceMonitor.public }} + - name: MINIO_PROMETHEUS_AUTH_TYPE + value: "public" + {{- end}} + {{- if .Values.etcd.endpoints }} + - name: MINIO_ETCD_ENDPOINTS + value: {{ join "," .Values.etcd.endpoints | quote }} + {{- if .Values.etcd.clientCert }} + - name: MINIO_ETCD_CLIENT_CERT + value: "/tmp/credentials/etcd_client.crt" + {{- end }} + {{- if .Values.etcd.clientCertKey }} + - name: MINIO_ETCD_CLIENT_CERT_KEY + value: "/tmp/credentials/etcd_client.key" + {{- end }} + {{- if .Values.etcd.pathPrefix }} + - name: MINIO_ETCD_PATH_PREFIX + value: {{ .Values.etcd.pathPrefix }} + {{- end }} + {{- if .Values.etcd.corednsPathPrefix }} + - name: MINIO_ETCD_COREDNS_PATH + value: {{ .Values.etcd.corednsPathPrefix }} + {{- end }} + {{- end }} + {{- range $key, $val := .Values.environment }} + - name: {{ $key }} + value: {{ $val | quote }} + {{- end}} + resources: +{{ toYaml .Values.resources | indent 12 }} +{{- with .Values.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 8 }} +{{- end }} +{{- include "minio.imagePullSecrets" . | indent 6 }} +{{- with .Values.affinity }} + affinity: +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} +{{- end }} + volumes: + - name: export + {{- if .Values.persistence.enabled }} + persistentVolumeClaim: + claimName: {{ .Values.persistence.existingClaim | default (include "minio.fullname" .) }} + {{- else }} + emptyDir: {} + {{- end }} + - name: minio-user + secret: + secretName: {{ template "minio.secretName" . }} + {{- if .Values.extraSecret }} + - name: extra-secret + secret: + secretName: {{ .Values.extraSecret }} + {{- end }} + {{- include "minio.tlsKeysVolume" . | indent 8 }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/ingress.yaml b/ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/ingress.yaml new file mode 100644 index 0000000..8d9a837 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/ingress.yaml @@ -0,0 +1,58 @@ +{{- if .Values.ingress.enabled -}} +{{- $fullName := include "minio.fullname" . -}} +{{- $servicePort := .Values.service.port -}} +{{- $ingressPath := .Values.ingress.path -}} +apiVersion: {{ template "minio.ingress.apiVersion" . }} +kind: Ingress +metadata: + name: {{ $fullName }} + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- with .Values.ingress.labels }} +{{ toYaml . | indent 4 }} +{{- end }} + +{{- with .Values.ingress.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +spec: +{{- if .Values.ingress.ingressClassName }} + ingressClassName: {{ .Values.ingress.ingressClassName }} +{{- end }} +{{- if .Values.ingress.tls }} + tls: + {{- range .Values.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} +{{- end }} + rules: + {{- range .Values.ingress.hosts }} + - http: + paths: + - path: {{ $ingressPath }} + {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} + pathType: Prefix + backend: + service: + name: {{ $fullName }} + port: + number: {{ $servicePort }} + {{- else }} + backend: + serviceName: {{ $fullName }} + servicePort: {{ $servicePort }} + {{- end }} + {{- if . }} + host: {{ . | quote }} + {{- end }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/networkpolicy.yaml b/ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/networkpolicy.yaml new file mode 100644 index 0000000..68a2599 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/networkpolicy.yaml @@ -0,0 +1,27 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ template "minio.networkPolicy.apiVersion" . }} +metadata: + name: {{ template "minio.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + podSelector: + matchLabels: + app: {{ template "minio.name" . }} + release: {{ .Release.Name }} + ingress: + - ports: + - port: {{ .Values.service.port }} + - port: {{ .Values.consoleService.port }} + {{- if not .Values.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ template "minio.name" . }}-client: "true" + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/poddisruptionbudget.yaml b/ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/poddisruptionbudget.yaml new file mode 100644 index 0000000..8037eb7 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/poddisruptionbudget.yaml @@ -0,0 +1,14 @@ +{{- if .Values.podDisruptionBudget.enabled }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: minio + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }} +spec: + maxUnavailable: {{ .Values.podDisruptionBudget.maxUnavailable }} + selector: + matchLabels: + app: {{ template "minio.name" . }} +{{- end }} \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/post-install-create-bucket-job.yaml b/ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/post-install-create-bucket-job.yaml new file mode 100644 index 0000000..434b31d --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/post-install-create-bucket-job.yaml @@ -0,0 +1,87 @@ +{{- if .Values.buckets }} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ template "minio.fullname" . }}-make-bucket-job + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }}-make-bucket-job + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + annotations: + "helm.sh/hook": post-install,post-upgrade + "helm.sh/hook-delete-policy": hook-succeeded,before-hook-creation +{{- with .Values.makeBucketJob.annotations }} +{{ toYaml . | indent 4 }} +{{- end }} +spec: + template: + metadata: + labels: + app: {{ template "minio.name" . }}-job + release: {{ .Release.Name }} +{{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 8 }} +{{- end }} +{{- if .Values.makeBucketJob.podAnnotations }} + annotations: +{{ toYaml .Values.makeBucketJob.podAnnotations | indent 8 }} +{{- end }} + spec: + restartPolicy: OnFailure +{{- include "minio.imagePullSecrets" . | indent 6 }} +{{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.makeBucketJob.nodeSelector | indent 8 }} +{{- end }} +{{- with .Values.makeBucketJob.affinity }} + affinity: +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.makeBucketJob.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} +{{- end }} +{{- if .Values.makeBucketJob.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.makeBucketJob.securityContext.runAsUser }} + runAsGroup: {{ .Values.makeBucketJob.securityContext.runAsGroup }} + fsGroup: {{ .Values.makeBucketJob.securityContext.fsGroup }} +{{- end }} + volumes: + - name: minio-configuration + projected: + sources: + - configMap: + name: {{ template "minio.fullname" . }} + - secret: + name: {{ template "minio.secretName" . }} + {{- if .Values.tls.enabled }} + - name: cert-secret-volume-mc + secret: + secretName: {{ .Values.tls.certSecret }} + items: + - key: {{ .Values.tls.publicCrt }} + path: CAs/public.crt + {{ end }} + containers: + - name: minio-mc + image: "{{ .Values.mcImage.repository }}:{{ .Values.mcImage.tag }}" + imagePullPolicy: {{ .Values.mcImage.pullPolicy }} + command: ["/bin/sh", "/config/initialize"] + env: + - name: MINIO_ENDPOINT + value: {{ template "minio.fullname" . }} + - name: MINIO_PORT + value: {{ .Values.service.port | quote }} + volumeMounts: + - name: minio-configuration + mountPath: /config + {{- if .Values.tls.enabled }} + - name: cert-secret-volume-mc + mountPath: {{ .Values.configPathmc }}certs + {{ end }} + resources: +{{ toYaml .Values.makeBucketJob.resources | indent 10 }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/post-install-create-policy-job.yaml b/ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/post-install-create-policy-job.yaml new file mode 100644 index 0000000..ae78769 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/post-install-create-policy-job.yaml @@ -0,0 +1,87 @@ +{{- if .Values.policies }} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ template "minio.fullname" . }}-make-policies-job + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }}-make-policies-job + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + annotations: + "helm.sh/hook": post-install,post-upgrade + "helm.sh/hook-delete-policy": hook-succeeded,before-hook-creation +{{- with .Values.makePolicyJob.annotations }} +{{ toYaml . | indent 4 }} +{{- end }} +spec: + template: + metadata: + labels: + app: {{ template "minio.name" . }}-job + release: {{ .Release.Name }} +{{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 8 }} +{{- end }} +{{- if .Values.makePolicyJob.podAnnotations }} + annotations: +{{ toYaml .Values.makePolicyJob.podAnnotations | indent 8 }} +{{- end }} + spec: + restartPolicy: OnFailure +{{- include "minio.imagePullSecrets" . | indent 6 }} +{{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.makePolicyJob.nodeSelector | indent 8 }} +{{- end }} +{{- with .Values.makePolicyJob.affinity }} + affinity: +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.makePolicyJob.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} +{{- end }} +{{- if .Values.makePolicyJob.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.makePolicyJob.securityContext.runAsUser }} + runAsGroup: {{ .Values.makePolicyJob.securityContext.runAsGroup }} + fsGroup: {{ .Values.makePolicyJob.securityContext.fsGroup }} +{{- end }} + volumes: + - name: minio-configuration + projected: + sources: + - configMap: + name: {{ template "minio.fullname" . }} + - secret: + name: {{ template "minio.secretName" . }} + {{- if .Values.tls.enabled }} + - name: cert-secret-volume-mc + secret: + secretName: {{ .Values.tls.certSecret }} + items: + - key: {{ .Values.tls.publicCrt }} + path: CAs/public.crt + {{ end }} + containers: + - name: minio-mc + image: "{{ .Values.mcImage.repository }}:{{ .Values.mcImage.tag }}" + imagePullPolicy: {{ .Values.mcImage.pullPolicy }} + command: ["/bin/sh", "/config/add-policy"] + env: + - name: MINIO_ENDPOINT + value: {{ template "minio.fullname" . }} + - name: MINIO_PORT + value: {{ .Values.service.port | quote }} + volumeMounts: + - name: minio-configuration + mountPath: /config + {{- if .Values.tls.enabled }} + - name: cert-secret-volume-mc + mountPath: {{ .Values.configPathmc }}certs + {{ end }} + resources: +{{ toYaml .Values.makePolicyJob.resources | indent 10 }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/post-install-create-user-job.yaml b/ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/post-install-create-user-job.yaml new file mode 100644 index 0000000..d3750e8 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/post-install-create-user-job.yaml @@ -0,0 +1,97 @@ +{{- $global := . -}} +{{- if .Values.users }} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ template "minio.fullname" . }}-make-user-job + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }}-make-user-job + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + annotations: + "helm.sh/hook": post-install,post-upgrade + "helm.sh/hook-delete-policy": hook-succeeded,before-hook-creation +{{- with .Values.makeUserJob.annotations }} +{{ toYaml . | indent 4 }} +{{- end }} +spec: + template: + metadata: + labels: + app: {{ template "minio.name" . }}-job + release: {{ .Release.Name }} +{{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 8 }} +{{- end }} +{{- if .Values.makeUserJob.podAnnotations }} + annotations: +{{ toYaml .Values.makeUserJob.podAnnotations | indent 8 }} +{{- end }} + spec: + restartPolicy: OnFailure +{{- include "minio.imagePullSecrets" . | indent 6 }} +{{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.makeUserJob.nodeSelector | indent 8 }} +{{- end }} +{{- with .Values.makeUserJob.affinity }} + affinity: +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.makeUserJob.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} +{{- end }} +{{- if .Values.makeUserJob.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.makeUserJob.securityContext.runAsUser }} + runAsGroup: {{ .Values.makeUserJob.securityContext.runAsGroup }} + fsGroup: {{ .Values.makeUserJob.securityContext.fsGroup }} +{{- end }} + volumes: + - name: minio-configuration + projected: + sources: + - configMap: + name: {{ template "minio.fullname" . }} + - secret: + name: {{ template "minio.secretName" . }} + {{- range .Values.users }} + {{- if .existingSecret }} + - secret: + name: {{ tpl .existingSecret $global }} + items: + - key: {{ .existingSecretKey }} + path: secrets/{{ tpl .accessKey $global }} + {{- end }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: cert-secret-volume-mc + secret: + secretName: {{ .Values.tls.certSecret }} + items: + - key: {{ .Values.tls.publicCrt }} + path: CAs/public.crt + {{ end }} + containers: + - name: minio-mc + image: "{{ .Values.mcImage.repository }}:{{ .Values.mcImage.tag }}" + imagePullPolicy: {{ .Values.mcImage.pullPolicy }} + command: ["/bin/sh", "/config/add-user"] + env: + - name: MINIO_ENDPOINT + value: {{ template "minio.fullname" . }} + - name: MINIO_PORT + value: {{ .Values.service.port | quote }} + volumeMounts: + - name: minio-configuration + mountPath: /config + {{- if .Values.tls.enabled }} + - name: cert-secret-volume-mc + mountPath: {{ .Values.configPathmc }}certs + {{ end }} + resources: +{{ toYaml .Values.makeUserJob.resources | indent 10 }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/post-install-custom-command.yaml b/ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/post-install-custom-command.yaml new file mode 100644 index 0000000..7e83faf --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/post-install-custom-command.yaml @@ -0,0 +1,87 @@ +{{- if .Values.customCommands }} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ template "minio.fullname" . }}-custom-command-job + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }}-custom-command-job + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + annotations: + "helm.sh/hook": post-install,post-upgrade + "helm.sh/hook-delete-policy": hook-succeeded,before-hook-creation +{{- with .Values.customCommandJob.annotations }} +{{ toYaml . | indent 4 }} +{{- end }} +spec: + template: + metadata: + labels: + app: {{ template "minio.name" . }}-job + release: {{ .Release.Name }} +{{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 8 }} +{{- end }} +{{- if .Values.customCommandJob.podAnnotations }} + annotations: +{{ toYaml .Values.customCommandJob.podAnnotations | indent 8 }} +{{- end }} + spec: + restartPolicy: OnFailure +{{- include "minio.imagePullSecrets" . | indent 6 }} +{{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.customCommandJob.nodeSelector | indent 8 }} +{{- end }} +{{- with .Values.customCommandJob.affinity }} + affinity: +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.customCommandJob.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} +{{- end }} +{{- if .Values.customCommandJob.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.customCommandJob.securityContext.runAsUser }} + runAsGroup: {{ .Values.customCommandJob.securityContext.runAsGroup }} + fsGroup: {{ .Values.customCommandJob.securityContext.fsGroup }} +{{- end }} + volumes: + - name: minio-configuration + projected: + sources: + - configMap: + name: {{ template "minio.fullname" . }} + - secret: + name: {{ template "minio.secretName" . }} + {{- if .Values.tls.enabled }} + - name: cert-secret-volume-mc + secret: + secretName: {{ .Values.tls.certSecret }} + items: + - key: {{ .Values.tls.publicCrt }} + path: CAs/public.crt + {{ end }} + containers: + - name: minio-mc + image: "{{ .Values.mcImage.repository }}:{{ .Values.mcImage.tag }}" + imagePullPolicy: {{ .Values.mcImage.pullPolicy }} + command: ["/bin/sh", "/config/custom-command"] + env: + - name: MINIO_ENDPOINT + value: {{ template "minio.fullname" . }} + - name: MINIO_PORT + value: {{ .Values.service.port | quote }} + volumeMounts: + - name: minio-configuration + mountPath: /config + {{- if .Values.tls.enabled }} + - name: cert-secret-volume-mc + mountPath: {{ .Values.configPathmc }}certs + {{ end }} + resources: +{{ toYaml .Values.customCommandJob.resources | indent 10 }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/pvc.yaml b/ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/pvc.yaml new file mode 100644 index 0000000..369aade --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/pvc.yaml @@ -0,0 +1,35 @@ +{{- if eq .Values.mode "standalone" }} +{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) }} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: {{ template "minio.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- if .Values.persistence.annotations }} + annotations: +{{ toYaml .Values.persistence.annotations | trimSuffix "\n" | indent 4 }} +{{- end }} +spec: + accessModes: + - {{ .Values.persistence.accessMode | quote }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + +{{- if .Values.persistence.storageClass }} +{{- if (eq "-" .Values.persistence.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.persistence.storageClass }}" +{{- end }} +{{- end }} +{{- if .Values.persistence.VolumeName }} + volumeName: "{{ .Values.persistence.VolumeName }}" +{{- end }} +{{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/secrets.yaml b/ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/secrets.yaml new file mode 100644 index 0000000..da2ecab --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/secrets.yaml @@ -0,0 +1,22 @@ +{{- if not .Values.existingSecret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "minio.secretName" . }} + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +type: Opaque +data: + rootUser: {{ include "minio.root.username" . | b64enc | quote }} + rootPassword: {{ include "minio.root.password" . | b64enc | quote }} + {{- if .Values.etcd.clientCert }} + etcd_client.crt: {{ .Values.etcd.clientCert | toString | b64enc | quote }} + {{- end }} + {{- if .Values.etcd.clientCertKey }} + etcd_client.key: {{ .Values.etcd.clientCertKey | toString | b64enc | quote }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/securitycontextconstraints.yaml b/ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/securitycontextconstraints.yaml new file mode 100644 index 0000000..4bac7e3 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/securitycontextconstraints.yaml @@ -0,0 +1,45 @@ +{{- if and .Values.securityContext.enabled .Values.persistence.enabled (.Capabilities.APIVersions.Has "security.openshift.io/v1") }} +apiVersion: security.openshift.io/v1 +kind: SecurityContextConstraints +metadata: + name: {{ template "minio.fullname" . }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +allowHostDirVolumePlugin: false +allowHostIPC: false +allowHostNetwork: false +allowHostPID: false +allowHostPorts: false +allowPrivilegeEscalation: true +allowPrivilegedContainer: false +allowedCapabilities: [] +readOnlyRootFilesystem: false +defaultAddCapabilities: [] +requiredDropCapabilities: +- KILL +- MKNOD +- SETUID +- SETGID +fsGroup: + type: MustRunAs + ranges: + - max: {{ .Values.securityContext.fsGroup }} + min: {{ .Values.securityContext.fsGroup }} +runAsUser: + type: MustRunAs + uid: {{ .Values.securityContext.runAsUser }} +seLinuxContext: + type: MustRunAs +supplementalGroups: + type: RunAsAny +volumes: +- configMap +- downwardAPI +- emptyDir +- persistentVolumeClaim +- projected +- secret +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/service.yaml b/ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/service.yaml new file mode 100644 index 0000000..64aa990 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/service.yaml @@ -0,0 +1,49 @@ +{{ $scheme := "http" }} +{{- if .Values.tls.enabled }} +{{ $scheme = "https" }} +{{ end }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "minio.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + monitoring: "true" +{{- if .Values.service.annotations }} + annotations: +{{ toYaml .Values.service.annotations | indent 4 }} +{{- end }} +spec: +{{- if (or (eq .Values.service.type "ClusterIP" "") (empty .Values.service.type)) }} + type: ClusterIP + {{- if not (empty .Values.service.clusterIP) }} + clusterIP: {{ .Values.service.clusterIP }} + {{end}} +{{- else if eq .Values.service.type "LoadBalancer" }} + type: {{ .Values.service.type }} + loadBalancerIP: {{ default "" .Values.service.loadBalancerIP }} +{{- else }} + type: {{ .Values.service.type }} +{{- end }} + ports: + - name: {{ $scheme }} + port: {{ .Values.service.port }} + protocol: TCP +{{- if (and (eq .Values.service.type "NodePort") ( .Values.service.nodePort)) }} + nodePort: {{ .Values.service.nodePort }} +{{- else }} + targetPort: 9000 +{{- end}} +{{- if .Values.service.externalIPs }} + externalIPs: +{{- range $i , $ip := .Values.service.externalIPs }} + - {{ $ip }} +{{- end }} +{{- end }} + selector: + app: {{ template "minio.name" . }} + release: {{ .Release.Name }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/serviceaccount.yaml b/ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/serviceaccount.yaml new file mode 100644 index 0000000..6a4bd94 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/serviceaccount.yaml @@ -0,0 +1,7 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Values.serviceAccount.name | quote }} + namespace: {{ .Release.Namespace | quote }} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/servicemonitor.yaml b/ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/servicemonitor.yaml new file mode 100644 index 0000000..809848f --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/servicemonitor.yaml @@ -0,0 +1,51 @@ +{{- if .Values.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "minio.fullname" . }} + {{- if .Values.metrics.serviceMonitor.namespace }} + namespace: {{ .Values.metrics.serviceMonitor.namespace }} + {{ else }} + namespace: {{ .Release.Namespace | quote }} + {{- end }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- if .Values.metrics.serviceMonitor.additionalLabels }} +{{ toYaml .Values.metrics.serviceMonitor.additionalLabels | indent 4 }} + {{- end }} +spec: + endpoints: + {{- if .Values.tls.enabled }} + - port: https + scheme: https + {{ else }} + - port: http + scheme: http + {{- end }} + path: /minio/v2/metrics/cluster + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.relabelConfigs }} +{{ toYaml .Values.metrics.serviceMonitor.relabelConfigs | indent 6 }} + {{- end }} + {{- if not .Values.metrics.serviceMonitor.public }} + bearerTokenSecret: + name: {{ template "minio.fullname" . }}-prometheus + key: token + {{- end }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace | quote }} + selector: + matchLabels: + app: {{ include "minio.name" . }} + release: {{ .Release.Name }} + monitoring: "true" +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/statefulset.yaml b/ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/statefulset.yaml new file mode 100644 index 0000000..b4160f0 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/templates/statefulset.yaml @@ -0,0 +1,217 @@ +{{- if eq .Values.mode "distributed" }} +{{ $poolCount := .Values.pools | int }} +{{ $nodeCount := .Values.replicas | int }} +{{ $drivesPerNode := .Values.drivesPerNode | int }} +{{ $scheme := "http" }} +{{- if .Values.tls.enabled }} +{{ $scheme = "https" }} +{{ end }} +{{ $mountPath := .Values.mountPath }} +{{ $bucketRoot := or ($.Values.bucketRoot) ($.Values.mountPath) }} +{{ $subPath := .Values.persistence.subPath }} +{{ $penabled := .Values.persistence.enabled }} +{{ $accessMode := .Values.persistence.accessMode }} +{{ $storageClass := .Values.persistence.storageClass }} +{{ $psize := .Values.persistence.size }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "minio.fullname" . }}-svc + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +spec: + publishNotReadyAddresses: true + clusterIP: None + ports: + - name: {{ $scheme }} + port: {{ .Values.service.port }} + protocol: TCP + selector: + app: {{ template "minio.name" . }} + release: {{ .Release.Name }} +--- +apiVersion: {{ template "minio.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ template "minio.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- if .Values.additionalLabels }} +{{ toYaml .Values.additionalLabels | trimSuffix "\n" | indent 4 }} +{{- end }} +{{- if .Values.additionalAnnotations }} + annotations: +{{ toYaml .Values.additionalAnnotations | trimSuffix "\n" | indent 4 }} +{{- end }} +spec: + updateStrategy: + type: {{ .Values.StatefulSetUpdate.updateStrategy }} + podManagementPolicy: "Parallel" + serviceName: {{ template "minio.fullname" . }}-svc + replicas: {{ mul $poolCount $nodeCount }} + selector: + matchLabels: + app: {{ template "minio.name" . }} + release: {{ .Release.Name }} + template: + metadata: + name: {{ template "minio.fullname" . }} + labels: + app: {{ template "minio.name" . }} + release: {{ .Release.Name }} +{{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 8 }} +{{- end }} + annotations: +{{- if not .Values.ignoreChartChecksums }} + checksum/secrets: {{ include (print $.Template.BasePath "/secrets.yaml") . | sha256sum }} + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} +{{- end }} +{{- if .Values.podAnnotations }} +{{ toYaml .Values.podAnnotations | trimSuffix "\n" | indent 8 }} +{{- end }} + spec: + {{- if .Values.priorityClassName }} + priorityClassName: "{{ .Values.priorityClassName }}" + {{- end }} +{{- if and .Values.securityContext.enabled .Values.persistence.enabled }} + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + runAsGroup: {{ .Values.securityContext.runAsGroup }} + fsGroup: {{ .Values.securityContext.fsGroup }} + {{- if and (ge .Capabilities.KubeVersion.Major "1") (ge .Capabilities.KubeVersion.Minor "20") }} + fsGroupChangePolicy: {{ .Values.securityContext.fsGroupChangePolicy }} + {{- end }} +{{- end }} +{{ if .Values.serviceAccount.create }} + serviceAccountName: {{ .Values.serviceAccount.name }} +{{- end }} + containers: + - name: {{ .Chart.Name }} + image: {{ .Values.image.repository }}:{{ .Values.image.tag }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + + command: [ "/bin/sh", + "-ce", + "/usr/bin/docker-entrypoint.sh minio server {{- range $i := until $poolCount }}{{ $factor := mul $i $nodeCount }}{{ $endIndex := add $factor $nodeCount }}{{ $beginIndex := mul $i $nodeCount }} {{ $scheme }}://{{ template `minio.fullname` $ }}-{{ `{` }}{{ $beginIndex }}...{{ sub $endIndex 1 }}{{ `}`}}.{{ template `minio.fullname` $ }}-svc.{{ $.Release.Namespace }}.svc.{{ $.Values.clusterDomain }}{{if (gt $drivesPerNode 1)}}{{ $bucketRoot }}-{{ `{` }}0...{{ sub $drivesPerNode 1 }}{{ `}` }}{{else}}{{ $bucketRoot }}{{end}}{{- end}} -S {{ .Values.certsPath }} --address :{{ .Values.minioAPIPort }} --console-address :{{ .Values.minioConsolePort }} {{- template `minio.extraArgs` . }}" ] + volumeMounts: + {{- if $penabled }} + {{- if (gt $drivesPerNode 1) }} + {{- range $i := until $drivesPerNode }} + - name: export-{{ $i }} + mountPath: {{ $mountPath }}-{{ $i }} + {{- if and $penabled $subPath }} + subPath: {{ $subPath }} + {{- end }} + {{- end }} + {{- else }} + - name: export + mountPath: {{ $mountPath }} + {{- if and $penabled $subPath }} + subPath: {{ $subPath }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.extraSecret }} + - name: extra-secret + mountPath: "/tmp/minio-config-env" + {{- end }} + {{- include "minio.tlsKeysVolumeMount" . | indent 12 }} + ports: + - name: {{ $scheme }} + containerPort: {{ .Values.minioAPIPort }} + - name: {{ $scheme }}-console + containerPort: {{ .Values.minioConsolePort }} + env: + - name: MINIO_ROOT_USER + valueFrom: + secretKeyRef: + name: {{ template "minio.secretName" . }} + key: rootUser + - name: MINIO_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "minio.secretName" . }} + key: rootPassword + {{- if .Values.extraSecret }} + - name: MINIO_CONFIG_ENV_FILE + value: "/tmp/minio-config-env/config.env" + {{- end}} + {{- if .Values.metrics.serviceMonitor.public }} + - name: MINIO_PROMETHEUS_AUTH_TYPE + value: "public" + {{- end}} + {{- range $key, $val := .Values.environment }} + - name: {{ $key }} + value: {{ $val | quote }} + {{- end}} + resources: +{{ toYaml .Values.resources | indent 12 }} + {{- with .Values.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 8 }} + {{- end }} +{{- include "minio.imagePullSecrets" . | indent 6 }} + {{- with .Values.affinity }} + affinity: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} + {{- end }} + volumes: + - name: minio-user + secret: + secretName: {{ template "minio.secretName" . }} + {{- if .Values.extraSecret }} + - name: extra-secret + secret: + secretName: {{ .Values.extraSecret }} + {{- end }} + {{- include "minio.tlsKeysVolume" . | indent 8 }} +{{- if .Values.persistence.enabled }} + volumeClaimTemplates: + {{- if gt $drivesPerNode 1 }} + {{- range $diskId := until $drivesPerNode}} + - metadata: + name: export-{{ $diskId }} + {{- if $.Values.persistence.annotations }} + annotations: +{{ toYaml $.Values.persistence.annotations | trimSuffix "\n" | indent 10 }} + {{- end }} + spec: + accessModes: [ {{ $accessMode | quote }} ] + {{- if $storageClass }} + storageClassName: {{ $storageClass }} + {{- end }} + resources: + requests: + storage: {{ $psize }} + {{- end }} + {{- else }} + - metadata: + name: export + {{- if $.Values.persistence.annotations }} + annotations: +{{ toYaml $.Values.persistence.annotations | trimSuffix "\n" | indent 10 }} + {{- end }} + spec: + accessModes: [ {{ $accessMode | quote }} ] + {{- if $storageClass }} + storageClassName: {{ $storageClass }} + {{- end }} + resources: + requests: + storage: {{ $psize }} + {{- end }} +{{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/values.yaml b/ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/values.yaml new file mode 100644 index 0000000..a957f7f --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/01-storage/minio/values.yaml @@ -0,0 +1,461 @@ +## Provide a name in place of minio for `app:` labels +## +nameOverride: "" + +## Provide a name to substitute for the full names of resources +## +fullnameOverride: "" + +## set kubernetes cluster domain where minio is running +## +clusterDomain: cluster.local + +## Set default image, imageTag, and imagePullPolicy. mode is used to indicate the +## +image: + repository: 10.10.31.243:5000/cmoa3/minio + tag: RELEASE.2022-05-08T23-50-31Z + pullPolicy: IfNotPresent + +imagePullSecrets: + - name: "regcred" +# - name: "image-pull-secret" + +## Set default image, imageTag, and imagePullPolicy for the `mc` (the minio +## client used to create a default bucket). +## +mcImage: + repository: 10.10.31.243:5000/cmoa3/mc + tag: RELEASE.2022-05-09T04-08-26Z + pullPolicy: IfNotPresent + +## minio mode, i.e. standalone or distributed or gateway. +mode: distributed ## other supported values are "standalone", "gateway" + +## Additional labels to include with deployment or statefulset +additionalLabels: [] + +## Additional annotations to include with deployment or statefulset +additionalAnnotations: [] + +## Typically the deployment/statefulset includes checksums of secrets/config, +## So that when these change on a subsequent helm install, the deployment/statefulset +## is restarted. This can result in unnecessary restarts under GitOps tooling such as +## flux, so set to "true" to disable this behaviour. +ignoreChartChecksums: false + +## Additional arguments to pass to minio binary +extraArgs: [] + +## Port number for MinIO S3 API Access +minioAPIPort: "9000" + +## Port number for MinIO Browser COnsole Access +minioConsolePort: "9001" + +## Update strategy for Deployments +DeploymentUpdate: + type: RollingUpdate + maxUnavailable: 0 + maxSurge: 100% + +## Update strategy for StatefulSets +StatefulSetUpdate: + updateStrategy: RollingUpdate + +## Pod priority settings +## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ +## +priorityClassName: "" + +## Set default rootUser, rootPassword +## AccessKey and secretKey is generated when not set +## Distributed MinIO ref: https://docs.minio.io/docs/distributed-minio-quickstart-guide +## +rootUser: "admin" +rootPassword: "passW0rd" + +## Use existing Secret that store following variables: +## +## | Chart var | .data. in Secret | +## |:----------------------|:-------------------------| +## | rootUser | rootUser | +## | rootPassword | rootPassword | +## +## All mentioned variables will be ignored in values file. +## .data.rootUser and .data.rootPassword are mandatory, +## others depend on enabled status of corresponding sections. +existingSecret: "" + +## Directory on the MinIO pof +certsPath: "/etc/minio/certs/" +configPathmc: "/etc/minio/mc/" + +## Path where PV would be mounted on the MinIO Pod +mountPath: "/export" +## Override the root directory which the minio server should serve from. +## If left empty, it defaults to the value of {{ .Values.mountPath }} +## If defined, it must be a sub-directory of the path specified in {{ .Values.mountPath }} +## +bucketRoot: "" + +# Number of drives attached to a node +drivesPerNode: 2 +# Number of MinIO containers running +#replicas: 16 +replicas: 2 +# Number of expanded MinIO clusters +pools: 1 + +# Deploy if 'mode == gateway' - 4 replicas. +gateway: + type: "nas" # currently only "nas" are supported. + replicas: 4 + +## TLS Settings for MinIO +tls: + enabled: false + ## Create a secret with private.key and public.crt files and pass that here. Ref: https://github.com/minio/minio/tree/master/docs/tls/kubernetes#2-create-kubernetes-secret + certSecret: "" + publicCrt: public.crt + privateKey: private.key + +## Trusted Certificates Settings for MinIO. Ref: https://docs.minio.io/docs/how-to-secure-access-to-minio-server-with-tls#install-certificates-from-third-party-cas +## Bundle multiple trusted certificates into one secret and pass that here. Ref: https://github.com/minio/minio/tree/master/docs/tls/kubernetes#2-create-kubernetes-secret +## When using self-signed certificates, remember to include MinIO's own certificate in the bundle with key public.crt. +## If certSecret is left empty and tls is enabled, this chart installs the public certificate from .Values.tls.certSecret. +trustedCertsSecret: "" + +## Enable persistence using Persistent Volume Claims +## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ +## +persistence: + enabled: true + annotations: {} + + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + existingClaim: "" + + ## minio data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + ## Storage class of PV to bind. By default it looks for standard storage class. + ## If the PV uses a different storage class, specify that here. + storageClass: "exem-local-storage" + VolumeName: "" + accessMode: ReadWriteOnce + size: 50Gi + + ## If subPath is set mount a sub folder of a volume instead of the root of the volume. + ## This is especially handy for volume plugins that don't natively support sub mounting (like glusterfs). + ## + subPath: "" + +## Expose the MinIO service to be accessed from outside the cluster (LoadBalancer service). +## or access it from within the cluster (ClusterIP service). Set the service type and the port to serve it. +## ref: http://kubernetes.io/docs/user-guide/services/ +## +#service: +# type: NodePort +# clusterIP: ~ + ## Make sure to match it to minioAPIPort +# port: "9000" +# nodePort: "32002" + +service: + type: ClusterIP + clusterIP: ~ + ## Make sure to match it to minioAPIPort + port: "9000" + +## Configure Ingress based on the documentation here: https://kubernetes.io/docs/concepts/services-networking/ingress/ +## + +ingress: + enabled: false + # ingressClassName: "" + labels: {} + # node-role.kubernetes.io/ingress: platform + + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + # kubernetes.io/ingress.allow-http: "false" + # kubernetes.io/ingress.global-static-ip-name: "" + # nginx.ingress.kubernetes.io/secure-backends: "true" + # nginx.ingress.kubernetes.io/backend-protocol: "HTTPS" + # nginx.ingress.kubernetes.io/whitelist-source-range: 0.0.0.0/0 + path: / + hosts: + - minio-example.local + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +consoleService: + type: NodePort + clusterIP: ~ + ## Make sure to match it to minioConsolePort + port: "9001" + nodePort: "32001" + +consoleIngress: + enabled: false + # ingressClassName: "" + labels: {} + # node-role.kubernetes.io/ingress: platform + + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + # kubernetes.io/ingress.allow-http: "false" + # kubernetes.io/ingress.global-static-ip-name: "" + # nginx.ingress.kubernetes.io/secure-backends: "true" + # nginx.ingress.kubernetes.io/backend-protocol: "HTTPS" + # nginx.ingress.kubernetes.io/whitelist-source-range: 0.0.0.0/0 + path: / + hosts: + - console.minio-example.local + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +## Node labels for pod assignment +## Ref: https://kubernetes.io/docs/user-guide/node-selection/ +## +nodeSelector: {} +tolerations: [] +affinity: {} + +## Add stateful containers to have security context, if enabled MinIO will run as this +## user and group NOTE: securityContext is only enabled if persistence.enabled=true +securityContext: + enabled: true + runAsUser: 1000 + runAsGroup: 1000 + fsGroup: 1000 + fsGroupChangePolicy: "OnRootMismatch" + +# Additational pod annotations +podAnnotations: {} + +# Additional pod labels +podLabels: {} + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: + requests: + #memory: 16Gi + memory: 1Gi + cpu: 200m + +## List of policies to be created after minio install +## +## In addition to default policies [readonly|readwrite|writeonly|consoleAdmin|diagnostics] +## you can define additional policies with custom supported actions and resources +policies: [] +## writeexamplepolicy policy grants creation or deletion of buckets with name +## starting with example. In addition, grants objects write permissions on buckets starting with +## example. +# - name: writeexamplepolicy +# statements: +# - resources: +# - 'arn:aws:s3:::example*/*' +# actions: +# - "s3:AbortMultipartUpload" +# - "s3:GetObject" +# - "s3:DeleteObject" +# - "s3:PutObject" +# - "s3:ListMultipartUploadParts" +# - resources: +# - 'arn:aws:s3:::example*' +# actions: +# - "s3:CreateBucket" +# - "s3:DeleteBucket" +# - "s3:GetBucketLocation" +# - "s3:ListBucket" +# - "s3:ListBucketMultipartUploads" +## readonlyexamplepolicy policy grants access to buckets with name starting with example. +## In addition, grants objects read permissions on buckets starting with example. +# - name: readonlyexamplepolicy +# statements: +# - resources: +# - 'arn:aws:s3:::example*/*' +# actions: +# - "s3:GetObject" +# - resources: +# - 'arn:aws:s3:::example*' +# actions: +# - "s3:GetBucketLocation" +# - "s3:ListBucket" +# - "s3:ListBucketMultipartUploads" +## Additional Annotations for the Kubernetes Job makePolicyJob +makePolicyJob: + podAnnotations: + annotations: + securityContext: + enabled: false + runAsUser: 1000 + runAsGroup: 1000 + fsGroup: 1000 + resources: + requests: + memory: 128Mi + nodeSelector: {} + tolerations: [] + affinity: {} + +## List of users to be created after minio install +## +users: + ## Username, password and policy to be assigned to the user + ## Default policies are [readonly|readwrite|writeonly|consoleAdmin|diagnostics] + ## Add new policies as explained here https://docs.min.io/docs/minio-multi-user-quickstart-guide.html + ## NOTE: this will fail if LDAP is enabled in your MinIO deployment + ## make sure to disable this if you are using LDAP. + - accessKey: cloudmoa + secretKey: admin1234 + policy: consoleAdmin + # Or you can refer to specific secret + #- accessKey: externalSecret + # existingSecret: my-secret + # existingSecretKey: password + # policy: readonly + + +## Additional Annotations for the Kubernetes Job makeUserJob +makeUserJob: + podAnnotations: + annotations: + securityContext: + enabled: false + runAsUser: 1000 + runAsGroup: 1000 + fsGroup: 1000 + resources: + requests: + memory: 128Mi + nodeSelector: {} + tolerations: [] + affinity: {} + +## List of buckets to be created after minio install +## +buckets: + - name: cortex-bucket + policy: none + purge: false + versioning: false + + # # Name of the bucket + # - name: bucket1 + # # Policy to be set on the + # # bucket [none|download|upload|public] + # policy: none + # # Purge if bucket exists already + # purge: false + # # set versioning for + # # bucket [true|false] + # versioning: false + # - name: bucket2 + # policy: none + # purge: false + # versioning: true + +## Additional Annotations for the Kubernetes Job makeBucketJob +makeBucketJob: + podAnnotations: + annotations: + securityContext: + enabled: false + runAsUser: 1000 + runAsGroup: 1000 + fsGroup: 1000 + resources: + requests: + memory: 128Mi + nodeSelector: {} + tolerations: [] + affinity: {} + +## List of command to run after minio install +## NOTE: the mc command TARGET is always "myminio" +customCommands: + # - command: "admin policy set myminio consoleAdmin group='cn=ops,cn=groups,dc=example,dc=com'" + +## Additional Annotations for the Kubernetes Job customCommandJob +customCommandJob: + podAnnotations: + annotations: + securityContext: + enabled: false + runAsUser: 1000 + runAsGroup: 1000 + fsGroup: 1000 + resources: + requests: + memory: 128Mi + nodeSelector: {} + tolerations: [] + affinity: {} + +## Use this field to add environment variables relevant to MinIO server. These fields will be passed on to MinIO container(s) +## when Chart is deployed +environment: + ## Please refer for comprehensive list https://docs.min.io/minio/baremetal/reference/minio-server/minio-server.html + ## MINIO_SUBNET_LICENSE: "License key obtained from https://subnet.min.io" + ## MINIO_BROWSER: "off" + +## The name of a secret in the same kubernetes namespace which contain secret values +## This can be useful for LDAP password, etc +## The key in the secret must be 'config.env' +## +# extraSecret: minio-extraenv + +networkPolicy: + enabled: false + allowExternal: true + +## PodDisruptionBudget settings +## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ +## +podDisruptionBudget: + enabled: false + maxUnavailable: 1 + +## Specify the service account to use for the MinIO pods. If 'create' is set to 'false' +## and 'name' is left unspecified, the account 'default' will be used. +serviceAccount: + create: true + ## The name of the service account to use. If 'create' is 'true', a service account with that name + ## will be created. + name: "minio-sa" + +metrics: + serviceMonitor: + enabled: false + public: true + additionalLabels: {} + relabelConfigs: {} + # namespace: monitoring + # interval: 30s + # scrapeTimeout: 10s + +## ETCD settings: https://github.com/minio/minio/blob/master/docs/sts/etcd.md +## Define endpoints to enable this section. +etcd: + endpoints: [] + pathPrefix: "" + corednsPathPrefix: "" + clientCert: "" + clientCertKey: "" diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/00-kafka-broker-config.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/00-kafka-broker-config.yaml new file mode 100644 index 0000000..ddf76e1 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/00-kafka-broker-config.yaml @@ -0,0 +1,161 @@ +kind: ConfigMap +metadata: + name: broker-config + namespace: imxc +apiVersion: v1 +data: + init.sh: |- + #!/bin/bash + set -e + set -x + cp /etc/kafka-configmap/log4j.properties /etc/kafka/ + KAFKA_BROKER_ID=${HOSTNAME##*-} + SEDS=("s/#init#broker.id=#init#/broker.id=$KAFKA_BROKER_ID/") + LABELS="kafka-broker-id=$KAFKA_BROKER_ID" + ANNOTATIONS="" + hash kubectl 2>/dev/null || { + SEDS+=("s/#init#broker.rack=#init#/#init#broker.rack=# kubectl not found in path/") + } && { + ZONE=$(kubectl get node "$NODE_NAME" -o=go-template='{{index .metadata.labels "failure-domain.beta.kubernetes.io/zone"}}') + if [ $? -ne 0 ]; then + SEDS+=("s/#init#broker.rack=#init#/#init#broker.rack=# zone lookup failed, see -c init-config logs/") + elif [ "x$ZONE" == "x" ]; then + SEDS+=("s/#init#broker.rack=#init#/#init#broker.rack=# zone label not found for node $NODE_NAME/") + else + SEDS+=("s/#init#broker.rack=#init#/broker.rack=$ZONE/") + LABELS="$LABELS kafka-broker-rack=$ZONE" + fi + # Node Port 설정 주석처리 + # OUTSIDE_HOST=$(kubectl get node "$NODE_NAME" -o jsonpath='{.status.addresses[?(@.type=="InternalIP")].address}') + OUTSIDE_HOST=kafka-outside-${KAFKA_BROKER_ID} + GLOBAL_HOST=kafka-global-${KAFKA_BROKER_ID} + if [ $? -ne 0 ]; then + echo "Outside (i.e. cluster-external access) host lookup command failed" + else + OUTSIDE_PORT=3240${KAFKA_BROKER_ID} + GLOBAL_PORT=3250${KAFKA_BROKER_ID} + # datagate 도입했으므로 Kube DNS 기반 통신 + SEDS+=("s|#init#advertised.listeners=OUTSIDE://#init#|advertised.listeners=OUTSIDE://${OUTSIDE_HOST}:${OUTSIDE_PORT},GLOBAL://${GLOBAL_HOST}:${GLOBAL_PORT}|") + ANNOTATIONS="$ANNOTATIONS kafka-listener-outside-host=$OUTSIDE_HOST kafka-listener-outside-port=$OUTSIDE_PORT" + fi + if [ ! -z "$LABELS" ]; then + kubectl -n $POD_NAMESPACE label pod $POD_NAME $LABELS || echo "Failed to label $POD_NAMESPACE.$POD_NAME - RBAC issue?" + fi + if [ ! -z "$ANNOTATIONS" ]; then + kubectl -n $POD_NAMESPACE annotate pod $POD_NAME $ANNOTATIONS || echo "Failed to annotate $POD_NAMESPACE.$POD_NAME - RBAC issue?" + fi + } + printf '%s\n' "${SEDS[@]}" | sed -f - /etc/kafka-configmap/server.properties > /etc/kafka/server.properties.tmp + [ $? -eq 0 ] && mv /etc/kafka/server.properties.tmp /etc/kafka/server.properties + server.properties: |- + log.dirs=/var/lib/kafka/data/topics + ############################# Zookeeper ############################# + zookeeper.connect=zookeeper:2181 + #zookeeper.connection.timeout.ms=6000 + ############################# Group Coordinator Settings ############################# + #group.initial.rebalance.delay.ms=0 + ############################# Thread ############################# + #background.threads=10 + #num.recovery.threads.per.data.dir=1 + ############################# Topic ############################# + auto.create.topics.enable=true + delete.topic.enable=true + default.replication.factor=2 + ############################# Msg Replication ############################# + min.insync.replicas=1 + num.io.threads=10 + num.network.threads=4 + num.replica.fetchers=4 + replica.fetch.min.bytes=1 + socket.receive.buffer.bytes=1048576 + socket.send.buffer.bytes=1048576 + replica.socket.receive.buffer.bytes=1048576 + socket.request.max.bytes=204857600 + ############################# Partition ############################# + #auto.leader.rebalance.enable=true + num.partitions=12 + ############################# Log size ############################# + message.max.bytes=204857600 + max.message.bytes=204857600 + ############################# Log Flush Policy ############################# + #log.flush.interval.messages=10000 + #log.flush.interval.ms=1000 + ############################# Log Retention Policy ############################# + log.retention.minutes=1 + offsets.retention.minutes=1440 + #log.retention.bytes=1073741824 + #log.segment.bytes=1073741824 + log.retention.check.interval.ms=10000 + ############################# Internal Topic Settings ############################# + offsets.topic.replication.factor=1 + #transaction.state.log.replication.factor=1 + #transaction.state.log.min.isr=1 + ############################# ETC ############################# + listeners=OUTSIDE://:9094,PLAINTEXT://:9092,GLOBAL://:9095 + listener.security.protocol.map=PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL,OUTSIDE:PLAINTEXT,GLOBAL:PLAINTEXT + #listeners=PLAINTEXT://:9092 + inter.broker.listener.name=PLAINTEXT + #init#broker.id=#init# + #init#broker.rack=#init# + log4j.properties: |- + # Unspecified loggers and loggers with additivity=true output to server.log and stdout + # Note that INFO only applies to unspecified loggers, the log level of the child logger is used otherwise + log4j.rootLogger=INFO, stdout + log4j.appender.stdout=org.apache.log4j.ConsoleAppender + log4j.appender.stdout.layout=org.apache.log4j.PatternLayout + log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n + log4j.appender.kafkaAppender=org.apache.log4j.DailyRollingFileAppender + log4j.appender.kafkaAppender.DatePattern='.'yyyy-MM-dd-HH + log4j.appender.kafkaAppender.File=${kafka.logs.dir}/server.log + log4j.appender.kafkaAppender.layout=org.apache.log4j.PatternLayout + log4j.appender.kafkaAppender.layout.ConversionPattern=[%d] %p %m (%c)%n + log4j.appender.stateChangeAppender=org.apache.log4j.DailyRollingFileAppender + log4j.appender.stateChangeAppender.DatePattern='.'yyyy-MM-dd-HH + log4j.appender.stateChangeAppender.File=${kafka.logs.dir}/state-change.log + log4j.appender.stateChangeAppender.layout=org.apache.log4j.PatternLayout + log4j.appender.stateChangeAppender.layout.ConversionPattern=[%d] %p %m (%c)%n + log4j.appender.requestAppender=org.apache.log4j.DailyRollingFileAppender + log4j.appender.requestAppender.DatePattern='.'yyyy-MM-dd-HH + log4j.appender.requestAppender.File=${kafka.logs.dir}/kafka-request.log + log4j.appender.requestAppender.layout=org.apache.log4j.PatternLayout + log4j.appender.requestAppender.layout.ConversionPattern=[%d] %p %m (%c)%n + log4j.appender.cleanerAppender=org.apache.log4j.DailyRollingFileAppender + log4j.appender.cleanerAppender.DatePattern='.'yyyy-MM-dd-HH + log4j.appender.cleanerAppender.File=${kafka.logs.dir}/log-cleaner.log + log4j.appender.cleanerAppender.layout=org.apache.log4j.PatternLayout + log4j.appender.cleanerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n + log4j.appender.controllerAppender=org.apache.log4j.DailyRollingFileAppender + log4j.appender.controllerAppender.DatePattern='.'yyyy-MM-dd-HH + log4j.appender.controllerAppender.File=${kafka.logs.dir}/controller.log + log4j.appender.controllerAppender.layout=org.apache.log4j.PatternLayout + log4j.appender.controllerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n + log4j.appender.authorizerAppender=org.apache.log4j.DailyRollingFileAppender + log4j.appender.authorizerAppender.DatePattern='.'yyyy-MM-dd-HH + log4j.appender.authorizerAppender.File=${kafka.logs.dir}/kafka-authorizer.log + log4j.appender.authorizerAppender.layout=org.apache.log4j.PatternLayout + log4j.appender.authorizerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n + # Change the two lines below to adjust ZK client logging + log4j.logger.org.I0Itec.zkclient.ZkClient=INFO + log4j.logger.org.apache.zookeeper=INFO + # Change the two lines below to adjust the general broker logging level (output to server.log and stdout) + log4j.logger.kafka=INFO + log4j.logger.org.apache.kafka=INFO + # Change to DEBUG or TRACE to enable request logging + log4j.logger.kafka.request.logger=WARN, requestAppender + log4j.additivity.kafka.request.logger=false + # Uncomment the lines below and change log4j.logger.kafka.network.RequestChannel$ to TRACE for additional output + # related to the handling of requests + #log4j.logger.kafka.network.Processor=TRACE, requestAppender + #log4j.logger.kafka.server.KafkaApis=TRACE, requestAppender + #log4j.additivity.kafka.server.KafkaApis=false + log4j.logger.kafka.network.RequestChannel$=WARN, requestAppender + log4j.additivity.kafka.network.RequestChannel$=false + log4j.logger.kafka.controller=TRACE, controllerAppender + log4j.additivity.kafka.controller=false + log4j.logger.kafka.log.LogCleaner=INFO, cleanerAppender + log4j.additivity.kafka.log.LogCleaner=false + log4j.logger.state.change.logger=TRACE, stateChangeAppender + log4j.additivity.state.change.logger=false + # Change to DEBUG to enable audit log for the authorizer + log4j.logger.kafka.authorizer.logger=WARN, authorizerAppender + log4j.additivity.kafka.authorizer.logger=false diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/01-coredns.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/01-coredns.yaml new file mode 100644 index 0000000..c1cb74b --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/01-coredns.yaml @@ -0,0 +1,35 @@ +apiVersion: v1 +kind: Service +metadata: + annotations: + prometheus.io/port: "9153" + prometheus.io/scrape: "true" + labels: + addonmanager.kubernetes.io/mode: Reconcile + k8s-app: kube-dns + kubernetes.io/name: coredns + name: coredns + namespace: kube-system +spec: + internalTrafficPolicy: Cluster + ipFamilies: + - IPv4 + ipFamilyPolicy: SingleStack + ports: + - name: dns + port: 53 + protocol: UDP + targetPort: 53 + - name: dns-tcp + port: 53 + protocol: TCP + targetPort: 53 + - name: metrics + port: 9153 + protocol: TCP + targetPort: 9153 + selector: + k8s-app: kube-dns + sessionAffinity: None + type: ClusterIP + diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/.helmignore b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/.helmignore new file mode 100644 index 0000000..50af031 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/Chart.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/Chart.yaml new file mode 100644 index 0000000..74d1d30 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for Kubernetes +name: base +version: 0.1.0 diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/analysis/.helmignore b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/analysis/.helmignore new file mode 100644 index 0000000..50af031 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/analysis/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/analysis/Chart.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/analysis/Chart.yaml new file mode 100644 index 0000000..74b9505 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/analysis/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for Kubernetes +name: analysis +version: 0.1.0 diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/analysis/templates/imxc-metric-analyzer-master.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/analysis/templates/imxc-metric-analyzer-master.yaml new file mode 100644 index 0000000..21a9298 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/analysis/templates/imxc-metric-analyzer-master.yaml @@ -0,0 +1,87 @@ +#docker run -d --hostname my-rabbit --name some-rabbit -p 8080:15672 -p 5672:5672 rabbitmq:3-management + +--- +kind: Service +apiVersion: v1 +metadata: + name: metric-analyzer-master + namespace: imxc +spec: +# clusterIP: None # We need a headless service to allow the pods to discover each + ports: # other during autodiscover phase for cluster creation. + - name: http # A ClusterIP will prevent resolving dns requests for other pods + protocol: TCP # under the same service. + port: 15672 + targetPort: 15672 +# nodePort: 30001 + - name: amqp + protocol: TCP + port: 5672 + targetPort: 5672 +# nodePort: 30002 + selector: + app: metric-analyzer-master +# type: NodePort +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: metric-analyzer-master + name: metric-analyzer-master + namespace: imxc +spec: + replicas: 1 + selector: + matchLabels: + app: metric-analyzer-master + template: + metadata: + labels: + app: metric-analyzer-master + spec: + containers: + - image: {{ .Values.global.IMXC_IN_REGISTRY }}/metric_analyzer:{{ .Values.global.METRIC_ANALYZER_MASTER_VERSION }} + imagePullPolicy: IfNotPresent + name: master +# volumeMounts: +# - mountPath: /etc/localtime +# name: timezone-config + env: + - name: BROKER + value: base-rabbitmq + - name: IMXC_RABBITMQ_CLIENT_ID + value: "user" + - name: IMXC_RABBITMQ_CLIENT_PASSWORD + value: "eorbahrhkswp" + - name: POSTGRES_SERVER + value: postgres + - name: POSTGRES_USER + value: admin + - name: POSTGRES_PW + value: eorbahrhkswp + - name: POSTGRES_DB + value: postgresdb + - name: PROMETHEUS_URL + value: http://base-cortex-nginx/prometheus + - name: POSTGRES_PORT + value: "5432" + - name: ES_SERVER + value: elasticsearch + - name: ES_PORT + value: "9200" + - name: ES_ID + value: "elastic" + - name: ES_PWD + value: "elastic" + - name: LOG_LEVEL + value: INFO + - name: AI_TYPE + value: BASELINE + - name: BASELINE_SIZE + value: "3" + - name: CHECK_DAY + value: "2" + resources: + requests: + memory: "100Mi" diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/analysis/templates/imxc-metric-analyzer-worker.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/analysis/templates/imxc-metric-analyzer-worker.yaml new file mode 100644 index 0000000..7e6eaea --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/analysis/templates/imxc-metric-analyzer-worker.yaml @@ -0,0 +1,38 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: metric-analyzer-worker + name: metric-analyzer-worker + namespace: imxc +spec: + replicas: 10 + selector: + matchLabels: + app: metric-analyzer-worker + template: + metadata: + labels: + app: metric-analyzer-worker + spec: + containers: + - image: {{ .Values.global.IMXC_IN_REGISTRY }}/metric_analyzer_worker:{{ .Values.global.METRIC_ANALYZER_WORKER_VERSION }} + imagePullPolicy: IfNotPresent + name: worker +# volumeMounts: +# - mountPath: /etc/localtime +# name: timezone-config + env: + - name: BROKER + value: base-rabbitmq + - name: IMXC_RABBITMQ_CLIENT_ID + value: "user" + - name: IMXC_RABBITMQ_CLIENT_PASSWORD + value: "eorbahrhkswp" +# volumes: +# - hostPath: +# path: /usr/share/zoneinfo/Asia/Seoul +# name: timezone-config + resources: + requests: + memory: "100Mi" diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/analysis/values.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/analysis/values.yaml new file mode 100644 index 0000000..d764210 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/analysis/values.yaml @@ -0,0 +1,68 @@ +# Default values for analysis. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: 10.10.31.243:5000/cmoa3/nginx + tag: stable + pullPolicy: IfNotPresent + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 80 + +ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: [] + + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +nodeSelector: {} + +tolerations: [] + +affinity: {} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/.helmignore b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/.helmignore new file mode 100644 index 0000000..db3418b --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/.helmignore @@ -0,0 +1,29 @@ +# Git +.git/ +.gitignore +.github/ + +# IDE +.project +.idea/ +*.tmproj + +# Common backup files +*.swp +*.bak +*.tmp +*~ + +# Cortex ignore +docs/ +tools/ +ct.yaml +ci/ +README.md.gotmpl +.prettierignore +CHANGELOG.md +MAINTAINERS.md +LICENSE +Makefile +renovate.json + diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/Chart.lock b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/Chart.lock new file mode 100644 index 0000000..f909218 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/Chart.lock @@ -0,0 +1,24 @@ +dependencies: +- name: memcached + repository: https://charts.bitnami.com/bitnami + version: 5.15.12 +- name: memcached + repository: https://charts.bitnami.com/bitnami + version: 5.15.12 +- name: memcached + repository: https://charts.bitnami.com/bitnami + version: 5.15.12 +- name: memcached + repository: https://charts.bitnami.com/bitnami + version: 5.15.12 +- name: memcached + repository: https://charts.bitnami.com/bitnami + version: 5.15.12 +- name: memcached + repository: https://charts.bitnami.com/bitnami + version: 5.15.12 +- name: memcached + repository: https://charts.bitnami.com/bitnami + version: 5.15.12 +digest: sha256:a6b7c1239f9cabc85dd647798a6f92ae8a9486756ab1e87fc11af2180ab03ee4 +generated: "2021-12-25T19:21:57.666697218Z" diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/Chart.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/Chart.yaml new file mode 100644 index 0000000..9122fe6 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/Chart.yaml @@ -0,0 +1,56 @@ +apiVersion: v2 +appVersion: v1.11.0 +dependencies: +- alias: memcached + condition: memcached.enabled + name: memcached + repository: https://charts.bitnami.com/bitnami + version: 5.15.12 +- alias: memcached-index-read + condition: memcached-index-read.enabled + name: memcached + repository: https://charts.bitnami.com/bitnami + version: 5.15.12 +- alias: memcached-index-write + condition: memcached-index-write.enabled + name: memcached + repository: https://charts.bitnami.com/bitnami + version: 5.15.12 +- alias: memcached-frontend + condition: memcached-frontend.enabled + name: memcached + repository: https://charts.bitnami.com/bitnami + version: 5.15.12 +- alias: memcached-blocks-index + name: memcached + repository: https://charts.bitnami.com/bitnami + tags: + - blocks-storage-memcached + version: 5.15.12 +- alias: memcached-blocks + name: memcached + repository: https://charts.bitnami.com/bitnami + tags: + - blocks-storage-memcached + version: 5.15.12 +- alias: memcached-blocks-metadata + name: memcached + repository: https://charts.bitnami.com/bitnami + tags: + - blocks-storage-memcached + version: 5.15.12 +description: Horizontally scalable, highly available, multi-tenant, long term Prometheus. +home: https://cortexmetrics.io/ +icon: https://avatars2.githubusercontent.com/u/43045022?s=200&v=4 +kubeVersion: ^1.19.0-0 +maintainers: +- email: thayward@infoblox.com + name: Tom Hayward + url: https://github.com/kd7lxl +- email: Niclas.Schad@plusserver.com + name: Niclas Schad + url: https://github.com/ShuzZzle +name: cortex +sources: +- https://github.com/cortexproject/cortex-helm-chart +version: 1.2.0 diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/README.md b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/README.md new file mode 100644 index 0000000..9a793d3 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/README.md @@ -0,0 +1,754 @@ + + +# cortex + +![Version: 1.2.0](https://img.shields.io/badge/Version-1.2.0-informational?style=flat-square) ![AppVersion: v1.11.0](https://img.shields.io/badge/AppVersion-v1.11.0-informational?style=flat-square) + +Horizontally scalable, highly available, multi-tenant, long term Prometheus. + +**Homepage:** + +## Maintainers + +| Name | Email | Url | +| ---- | ------ | --- | +| Tom Hayward | thayward@infoblox.com | https://github.com/kd7lxl | +| Niclas Schad | Niclas.Schad@plusserver.com | https://github.com/ShuzZzle | + +## Documentation + +Checkout our documentation for the cortex-helm-chart [here](https://cortexproject.github.io/cortex-helm-chart/) + +## Dependencies + +### Key-Value store + +Cortex requires a Key-Value (KV) store to store the ring. It can use traditional KV stores like [Consul](https://www.consul.io/) or [etcd](https://etcd.io/), but it can also build its own KV store on top of memberlist library using a gossip algorithm. + +The recommended approach is to use the built-in memberlist as a KV store, where supported. + +External KV stores can be installed alongside Cortex using their respective helm charts https://github.com/bitnami/charts/tree/master/bitnami/etcd and https://github.com/helm/charts/tree/master/stable/consul. + +### Storage + +Cortex requires a storage backend to store metrics and indexes. +See [cortex documentation](https://cortexmetrics.io/docs/) for details on storage types and documentation + +## Installation + +[Helm](https://helm.sh) must be installed to use the charts. +Please refer to Helm's [documentation](https://helm.sh/docs/) to get started. + +Once Helm is set up properly, add the repo as follows: + +```bash + helm repo add cortex-helm https://cortexproject.github.io/cortex-helm-chart +``` + +Cortex can now be installed with the following command: + +```bash + helm install cortex --namespace cortex cortex-helm/cortex +``` + +If you have custom options or values you want to override: + +```bash + helm install cortex --namespace cortex -f my-cortex-values.yaml cortex-helm/cortex +``` + +Specific versions of the chart can be installed using the `--version` option, with the default being the latest release. +What versions are available for installation can be listed with the following command: + +```bash + helm search repo cortex-helm +``` + +As part of this chart many different pods and services are installed which all +have varying resource requirements. Please make sure that you have sufficient +resources (CPU/memory) available in your cluster before installing Cortex Helm +chart. + +## Upgrades + +To upgrade Cortex use the following command: + +```bash + helm upgrade cortex -f my-cortex-values.yaml cortex-helm/cortex +``` +Note that it might be necessary to use `--reset-values` since some default values in the values.yaml might have changed or were removed. + +Source code can be found [here](https://cortexmetrics.io/) + +## Requirements + +Kubernetes: `^1.19.0-0` + +| Repository | Name | Version | +|------------|------|---------| +| https://charts.bitnami.com/bitnami | memcached(memcached) | 5.15.12 | +| https://charts.bitnami.com/bitnami | memcached-index-read(memcached) | 5.15.12 | +| https://charts.bitnami.com/bitnami | memcached-index-write(memcached) | 5.15.12 | +| https://charts.bitnami.com/bitnami | memcached-frontend(memcached) | 5.15.12 | +| https://charts.bitnami.com/bitnami | memcached-blocks-index(memcached) | 5.15.12 | +| https://charts.bitnami.com/bitnami | memcached-blocks(memcached) | 5.15.12 | +| https://charts.bitnami.com/bitnami | memcached-blocks-metadata(memcached) | 5.15.12 | + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| alertmanager.​affinity | object | `{}` | | +| alertmanager.​annotations | object | `{}` | | +| alertmanager.​containerSecurityContext.​enabled | bool | `true` | | +| alertmanager.​containerSecurityContext.​readOnlyRootFilesystem | bool | `true` | | +| alertmanager.​enabled | bool | `true` | | +| alertmanager.​env | list | `[]` | Extra env variables to pass to the cortex container | +| alertmanager.​extraArgs | object | `{}` | Additional Cortex container arguments, e.g. log level (debug, info, warn, error) | +| alertmanager.​extraContainers | list | `[]` | Additional containers to be added to the cortex pod. | +| alertmanager.​extraPorts | list | `[]` | Additional ports to the cortex services. Useful to expose extra container ports. | +| alertmanager.​extraVolumeMounts | list | `[]` | Extra volume mounts that will be added to the cortex container | +| alertmanager.​extraVolumes | list | `[]` | Additional volumes to the cortex pod. | +| alertmanager.​initContainers | list | `[]` | Init containers to be added to the cortex pod. | +| alertmanager.​livenessProbe.​httpGet.​path | string | `"/ready"` | | +| alertmanager.​livenessProbe.​httpGet.​port | string | `"http-metrics"` | | +| alertmanager.​nodeSelector | object | `{}` | | +| alertmanager.​persistentVolume.​accessModes | list | `["ReadWriteOnce"]` | Alertmanager data Persistent Volume access modes Must match those of existing PV or dynamic provisioner Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ | +| alertmanager.​persistentVolume.​annotations | object | `{}` | Alertmanager data Persistent Volume Claim annotations | +| alertmanager.​persistentVolume.​enabled | bool | `true` | If true and alertmanager.statefulSet.enabled is true, Alertmanager will create/use a Persistent Volume Claim If false, use emptyDir | +| alertmanager.​persistentVolume.​size | string | `"2Gi"` | Alertmanager data Persistent Volume size | +| alertmanager.​persistentVolume.​storageClass | string | `nil` | Alertmanager data Persistent Volume Storage Class If defined, storageClassName: If set to "-", storageClassName: "", which disables dynamic provisioning If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner. | +| alertmanager.​persistentVolume.​subPath | string | `""` | Subdirectory of Alertmanager data Persistent Volume to mount Useful if the volume's root directory is not empty | +| alertmanager.​podAnnotations | object | `{"prometheus.io/port":"8080","prometheus.io/scrape":"true"}` | Pod Annotations | +| alertmanager.​podDisruptionBudget | object | `{"maxUnavailable":1}` | If not set then a PodDisruptionBudget will not be created | +| alertmanager.​podLabels | object | `{}` | Pod Labels | +| alertmanager.​readinessProbe.​httpGet.​path | string | `"/ready"` | | +| alertmanager.​readinessProbe.​httpGet.​port | string | `"http-metrics"` | | +| alertmanager.​replicas | int | `1` | | +| alertmanager.​resources | object | `{}` | | +| alertmanager.​securityContext | object | `{}` | | +| alertmanager.​service.​annotations | object | `{}` | | +| alertmanager.​service.​labels | object | `{}` | | +| alertmanager.​serviceAccount.​name | string | `""` | "" disables the individual serviceAccount and uses the global serviceAccount for that component | +| alertmanager.​serviceMonitor.​additionalLabels | object | `{}` | | +| alertmanager.​serviceMonitor.​enabled | bool | `false` | | +| alertmanager.​serviceMonitor.​extraEndpointSpec | object | `{}` | Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint | +| alertmanager.​serviceMonitor.​metricRelabelings | list | `[]` | | +| alertmanager.​serviceMonitor.​relabelings | list | `[]` | | +| alertmanager.​sidecar | object | `{"containerSecurityContext":{"enabled":true,"readOnlyRootFilesystem":true},"defaultFolderName":null,"enableUniqueFilenames":false,"enabled":false,"folder":"/data","folderAnnotation":null,"image":{"repository":"quay.io/kiwigrid/k8s-sidecar","sha":"","tag":"1.10.7"},"imagePullPolicy":"IfNotPresent","label":"cortex_alertmanager","labelValue":null,"resources":{},"searchNamespace":null,"skipTlsVerify":false,"watchMethod":null}` | Sidecars that collect the configmaps with specified label and stores the included files them into the respective folders | +| alertmanager.​sidecar.​skipTlsVerify | bool | `false` | skipTlsVerify Set to true to skip tls verification for kube api calls | +| alertmanager.​startupProbe.​failureThreshold | int | `10` | | +| alertmanager.​startupProbe.​httpGet.​path | string | `"/ready"` | | +| alertmanager.​startupProbe.​httpGet.​port | string | `"http-metrics"` | | +| alertmanager.​statefulSet.​enabled | bool | `false` | If true, use a statefulset instead of a deployment for pod management. This is useful for using a persistent volume for storing silences between restarts. | +| alertmanager.​statefulStrategy.​type | string | `"RollingUpdate"` | | +| alertmanager.​strategy.​rollingUpdate.​maxSurge | int | `0` | | +| alertmanager.​strategy.​rollingUpdate.​maxUnavailable | int | `1` | | +| alertmanager.​strategy.​type | string | `"RollingUpdate"` | | +| alertmanager.​terminationGracePeriodSeconds | int | `60` | | +| alertmanager.​tolerations | list | `[]` | Tolerations for pod assignment ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ | +| clusterDomain | string | `"cluster.local"` | Kubernetes cluster DNS domain | +| compactor.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​key | string | `"app.kubernetes.io/component"` | | +| compactor.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​operator | string | `"In"` | | +| compactor.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​values[0] | string | `"compactor"` | | +| compactor.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​topologyKey | string | `"kubernetes.io/hostname"` | | +| compactor.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​weight | int | `100` | | +| compactor.​annotations | object | `{}` | | +| compactor.​containerSecurityContext.​enabled | bool | `true` | | +| compactor.​containerSecurityContext.​readOnlyRootFilesystem | bool | `true` | | +| compactor.​enabled | bool | `true` | | +| compactor.​env | list | `[]` | | +| compactor.​extraArgs | object | `{}` | Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) | +| compactor.​extraContainers | list | `[]` | | +| compactor.​extraPorts | list | `[]` | | +| compactor.​extraVolumeMounts | list | `[]` | | +| compactor.​extraVolumes | list | `[]` | | +| compactor.​initContainers | list | `[]` | | +| compactor.​livenessProbe.​httpGet.​path | string | `"/ready"` | | +| compactor.​livenessProbe.​httpGet.​port | string | `"http-metrics"` | | +| compactor.​livenessProbe.​httpGet.​scheme | string | `"HTTP"` | | +| compactor.​nodeSelector | object | `{}` | | +| compactor.​persistentVolume.​accessModes | list | `["ReadWriteOnce"]` | compactor data Persistent Volume access modes Must match those of existing PV or dynamic provisioner Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ | +| compactor.​persistentVolume.​annotations | object | `{}` | compactor data Persistent Volume Claim annotations | +| compactor.​persistentVolume.​enabled | bool | `true` | If true compactor will create/use a Persistent Volume Claim If false, use emptyDir | +| compactor.​persistentVolume.​size | string | `"2Gi"` | | +| compactor.​persistentVolume.​storageClass | string | `nil` | compactor data Persistent Volume Storage Class If defined, storageClassName: If set to "-", storageClassName: "", which disables dynamic provisioning If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner. | +| compactor.​persistentVolume.​subPath | string | `""` | Subdirectory of compactor data Persistent Volume to mount Useful if the volume's root directory is not empty | +| compactor.​podAnnotations | object | `{"prometheus.io/port":"8080","prometheus.io/scrape":"true"}` | Pod Annotations | +| compactor.​podDisruptionBudget.​maxUnavailable | int | `1` | | +| compactor.​podLabels | object | `{}` | Pod Labels | +| compactor.​readinessProbe.​httpGet.​path | string | `"/ready"` | | +| compactor.​readinessProbe.​httpGet.​port | string | `"http-metrics"` | | +| compactor.​replicas | int | `1` | | +| compactor.​resources | object | `{}` | | +| compactor.​securityContext | object | `{}` | | +| compactor.​service.​annotations | object | `{}` | | +| compactor.​service.​labels | object | `{}` | | +| compactor.​serviceAccount.​name | string | `""` | "" disables the individual serviceAccount and uses the global serviceAccount for that component | +| compactor.​serviceMonitor.​additionalLabels | object | `{}` | | +| compactor.​serviceMonitor.​enabled | bool | `false` | | +| compactor.​serviceMonitor.​extraEndpointSpec | object | `{}` | Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint | +| compactor.​serviceMonitor.​metricRelabelings | list | `[]` | | +| compactor.​serviceMonitor.​relabelings | list | `[]` | | +| compactor.​startupProbe.​failureThreshold | int | `60` | | +| compactor.​startupProbe.​httpGet.​path | string | `"/ready"` | | +| compactor.​startupProbe.​httpGet.​port | string | `"http-metrics"` | | +| compactor.​startupProbe.​httpGet.​scheme | string | `"HTTP"` | | +| compactor.​startupProbe.​initialDelaySeconds | int | `120` | | +| compactor.​startupProbe.​periodSeconds | int | `30` | | +| compactor.​strategy.​type | string | `"RollingUpdate"` | | +| compactor.​terminationGracePeriodSeconds | int | `240` | | +| compactor.​tolerations | list | `[]` | | +| config.​alertmanager.​enable_api | bool | `false` | Enable the experimental alertmanager config api. | +| config.​alertmanager.​external_url | string | `"/api/prom/alertmanager"` | | +| config.​alertmanager.​storage | object | `{}` | Type of backend to use to store alertmanager configs. Supported values are: "configdb", "gcs", "s3", "local". refer to: https://cortexmetrics.io/docs/configuration/configuration-file/#alertmanager_config | +| config.​api.​prometheus_http_prefix | string | `"/prometheus"` | | +| config.​api.​response_compression_enabled | bool | `true` | Use GZIP compression for API responses. Some endpoints serve large YAML or JSON blobs which can benefit from compression. | +| config.​auth_enabled | bool | `false` | | +| config.​blocks_storage.​bucket_store.​bucket_index.​enabled | bool | `true` | | +| config.​blocks_storage.​bucket_store.​sync_dir | string | `"/data/tsdb-sync"` | | +| config.​blocks_storage.​tsdb.​dir | string | `"/data/tsdb"` | | +| config.​distributor.​pool.​health_check_ingesters | bool | `true` | | +| config.​distributor.​shard_by_all_labels | bool | `true` | Distribute samples based on all labels, as opposed to solely by user and metric name. | +| config.​frontend.​log_queries_longer_than | string | `"10s"` | | +| config.​ingester.​lifecycler.​final_sleep | string | `"30s"` | Duration to sleep for before exiting, to ensure metrics are scraped. | +| config.​ingester.​lifecycler.​join_after | string | `"10s"` | We don't want to join immediately, but wait a bit to see other ingesters and their tokens first. It can take a while to have the full picture when using gossip | +| config.​ingester.​lifecycler.​num_tokens | int | `512` | | +| config.​ingester.​lifecycler.​observe_period | string | `"10s"` | To avoid generating same tokens by multiple ingesters, they can "observe" the ring for a while, after putting their own tokens into it. This is only useful when using gossip, since multiple ingesters joining at the same time can have conflicting tokens if they don't see each other yet. | +| config.​ingester.​lifecycler.​ring.​kvstore.​store | string | `"memberlist"` | | +| config.​ingester.​lifecycler.​ring.​replication_factor | int | `3` | Ingester replication factor per default is 3 | +| config.​ingester_client.​grpc_client_config.​max_recv_msg_size | int | `10485760` | | +| config.​ingester_client.​grpc_client_config.​max_send_msg_size | int | `10485760` | | +| config.​limits.​enforce_metric_name | bool | `true` | Enforce that every sample has a metric name | +| config.​limits.​max_query_lookback | string | `"0s"` | | +| config.​limits.​reject_old_samples | bool | `true` | | +| config.​limits.​reject_old_samples_max_age | string | `"168h"` | | +| config.​memberlist.​bind_port | int | `7946` | | +| config.​memberlist.​join_members | list | `["{{ include \"cortex.fullname\" $ }}-memberlist"]` | the service name of the memberlist if using memberlist discovery | +| config.​querier.​active_query_tracker_dir | string | `"/data/active-query-tracker"` | | +| config.​querier.​query_ingesters_within | string | `"13h"` | Maximum lookback beyond which queries are not sent to ingester. 0 means all queries are sent to ingester. Ingesters by default have no data older than 12 hours, so we can safely set this 13 hours | +| config.​querier.​query_store_after | string | `"12h"` | The time after which a metric should be queried from storage and not just ingesters. | +| config.​querier.​store_gateway_addresses | string | automatic | Comma separated list of store-gateway addresses in DNS Service Discovery format. This option should is set automatically when using the blocks storage and the store-gateway sharding is disabled (when enabled, the store-gateway instances form a ring and addresses are picked from the ring). | +| config.​query_range.​align_queries_with_step | bool | `true` | | +| config.​query_range.​cache_results | bool | `true` | | +| config.​query_range.​results_cache.​cache.​memcached.​expiration | string | `"1h"` | | +| config.​query_range.​results_cache.​cache.​memcached_client.​timeout | string | `"1s"` | | +| config.​query_range.​split_queries_by_interval | string | `"24h"` | | +| config.​ruler.​enable_alertmanager_discovery | bool | `false` | | +| config.​ruler.​enable_api | bool | `true` | Enable the experimental ruler config api. | +| config.​ruler.​storage | object | `{}` | Method to use for backend rule storage (configdb, azure, gcs, s3, swift, local) refer to https://cortexmetrics.io/docs/configuration/configuration-file/#ruler_config | +| config.​runtime_config.​file | string | `"/etc/cortex-runtime-config/runtime_config.yaml"` | | +| config.​server.​grpc_listen_port | int | `9095` | | +| config.​server.​grpc_server_max_concurrent_streams | int | `10000` | | +| config.​server.​grpc_server_max_recv_msg_size | int | `10485760` | | +| config.​server.​grpc_server_max_send_msg_size | int | `10485760` | | +| config.​server.​http_listen_port | int | `8080` | | +| config.​storage | object | `{"engine":"blocks","index_queries_cache_config":{"memcached":{"expiration":"1h"},"memcached_client":{"timeout":"1s"}}}` | See https://github.com/cortexproject/cortex/blob/master/docs/configuration/config-file-reference.md#storage_config | +| config.​storage.​index_queries_cache_config.​memcached.​expiration | string | `"1h"` | How long keys stay in the memcache | +| config.​storage.​index_queries_cache_config.​memcached_client.​timeout | string | `"1s"` | Maximum time to wait before giving up on memcached requests. | +| config.​store_gateway | object | `{"sharding_enabled":false}` | https://cortexmetrics.io/docs/configuration/configuration-file/#store_gateway_config | +| configs.​affinity | object | `{}` | | +| configs.​annotations | object | `{}` | | +| configs.​containerSecurityContext.​enabled | bool | `true` | | +| configs.​containerSecurityContext.​readOnlyRootFilesystem | bool | `true` | | +| configs.​enabled | bool | `false` | | +| configs.​env | list | `[]` | | +| configs.​extraArgs | object | `{}` | Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) | +| configs.​extraContainers | list | `[]` | | +| configs.​extraPorts | list | `[]` | | +| configs.​extraVolumeMounts | list | `[]` | | +| configs.​extraVolumes | list | `[]` | | +| configs.​initContainers | list | `[]` | | +| configs.​livenessProbe.​httpGet.​path | string | `"/ready"` | | +| configs.​livenessProbe.​httpGet.​port | string | `"http-metrics"` | | +| configs.​nodeSelector | object | `{}` | | +| configs.​persistentVolume.​subPath | string | `nil` | | +| configs.​podAnnotations | object | `{"prometheus.io/port":"8080","prometheus.io/scrape":"true"}` | Pod Annotations | +| configs.​podDisruptionBudget.​maxUnavailable | int | `1` | | +| configs.​podLabels | object | `{}` | Pod Labels | +| configs.​readinessProbe.​httpGet.​path | string | `"/ready"` | | +| configs.​readinessProbe.​httpGet.​port | string | `"http-metrics"` | | +| configs.​replicas | int | `1` | | +| configs.​resources | object | `{}` | | +| configs.​securityContext | object | `{}` | | +| configs.​service.​annotations | object | `{}` | | +| configs.​service.​labels | object | `{}` | | +| configs.​serviceAccount.​name | string | `""` | "" disables the individual serviceAccount and uses the global serviceAccount for that component | +| configs.​serviceMonitor.​additionalLabels | object | `{}` | | +| configs.​serviceMonitor.​enabled | bool | `false` | | +| configs.​serviceMonitor.​extraEndpointSpec | object | `{}` | Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint | +| configs.​serviceMonitor.​metricRelabelings | list | `[]` | | +| configs.​serviceMonitor.​relabelings | list | `[]` | | +| configs.​startupProbe.​failureThreshold | int | `10` | | +| configs.​startupProbe.​httpGet.​path | string | `"/ready"` | | +| configs.​startupProbe.​httpGet.​port | string | `"http-metrics"` | | +| configs.​strategy.​rollingUpdate.​maxSurge | int | `0` | | +| configs.​strategy.​rollingUpdate.​maxUnavailable | int | `1` | | +| configs.​strategy.​type | string | `"RollingUpdate"` | | +| configs.​terminationGracePeriodSeconds | int | `180` | | +| configs.​tolerations | list | `[]` | | +| configsdb_postgresql.​auth.​existing_secret.​key | string | `nil` | | +| configsdb_postgresql.​auth.​existing_secret.​name | string | `nil` | | +| configsdb_postgresql.​auth.​password | string | `nil` | | +| configsdb_postgresql.​enabled | bool | `false` | | +| configsdb_postgresql.​uri | string | `nil` | | +| distributor.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​key | string | `"app.kubernetes.io/component"` | | +| distributor.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​operator | string | `"In"` | | +| distributor.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​values[0] | string | `"distributor"` | | +| distributor.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​topologyKey | string | `"kubernetes.io/hostname"` | | +| distributor.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​weight | int | `100` | | +| distributor.​annotations | object | `{}` | | +| distributor.​autoscaling.​behavior | object | `{}` | Ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-configurable-scaling-behavior | +| distributor.​autoscaling.​enabled | bool | `false` | Creates a HorizontalPodAutoscaler for the distributor pods. | +| distributor.​autoscaling.​maxReplicas | int | `30` | | +| distributor.​autoscaling.​minReplicas | int | `2` | | +| distributor.​autoscaling.​targetCPUUtilizationPercentage | int | `80` | | +| distributor.​autoscaling.​targetMemoryUtilizationPercentage | int | `0` | | +| distributor.​containerSecurityContext.​enabled | bool | `true` | | +| distributor.​containerSecurityContext.​readOnlyRootFilesystem | bool | `true` | | +| distributor.​env | list | `[]` | | +| distributor.​extraArgs | object | `{}` | Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) | +| distributor.​extraContainers | list | `[]` | | +| distributor.​extraPorts | list | `[]` | | +| distributor.​extraVolumeMounts | list | `[]` | | +| distributor.​extraVolumes | list | `[]` | | +| distributor.​initContainers | list | `[]` | | +| distributor.​lifecycle | object | `{}` | | +| distributor.​livenessProbe.​httpGet.​path | string | `"/ready"` | | +| distributor.​livenessProbe.​httpGet.​port | string | `"http-metrics"` | | +| distributor.​nodeSelector | object | `{}` | | +| distributor.​persistentVolume.​subPath | string | `nil` | | +| distributor.​podAnnotations | object | `{"prometheus.io/port":"8080","prometheus.io/scrape":"true"}` | Pod Annotations | +| distributor.​podDisruptionBudget.​maxUnavailable | int | `1` | | +| distributor.​podLabels | object | `{}` | Pod Labels | +| distributor.​readinessProbe.​httpGet.​path | string | `"/ready"` | | +| distributor.​readinessProbe.​httpGet.​port | string | `"http-metrics"` | | +| distributor.​replicas | int | `2` | | +| distributor.​resources | object | `{}` | | +| distributor.​securityContext | object | `{}` | | +| distributor.​service.​annotations | object | `{}` | | +| distributor.​service.​labels | object | `{}` | | +| distributor.​serviceAccount.​name | string | `""` | "" disables the individual serviceAccount and uses the global serviceAccount for that component | +| distributor.​serviceMonitor.​additionalLabels | object | `{}` | | +| distributor.​serviceMonitor.​enabled | bool | `false` | | +| distributor.​serviceMonitor.​extraEndpointSpec | object | `{}` | Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint | +| distributor.​serviceMonitor.​metricRelabelings | list | `[]` | | +| distributor.​serviceMonitor.​relabelings | list | `[]` | | +| distributor.​startupProbe.​failureThreshold | int | `10` | | +| distributor.​startupProbe.​httpGet.​path | string | `"/ready"` | | +| distributor.​startupProbe.​httpGet.​port | string | `"http-metrics"` | | +| distributor.​strategy.​rollingUpdate.​maxSurge | int | `0` | | +| distributor.​strategy.​rollingUpdate.​maxUnavailable | int | `1` | | +| distributor.​strategy.​type | string | `"RollingUpdate"` | | +| distributor.​terminationGracePeriodSeconds | int | `60` | | +| distributor.​tolerations | list | `[]` | | +| externalConfigSecretName | string | `"secret-with-config.yaml"` | | +| externalConfigVersion | string | `"0"` | | +| image.​pullPolicy | string | `"IfNotPresent"` | | +| image.​pullSecrets | list | `[]` | Optionally specify an array of imagePullSecrets. Secrets must be manually created in the namespace. ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ | +| image.​repository | string | `"quay.io/cortexproject/cortex"` | | +| image.​tag | string | `""` | Allows you to override the cortex version in this chart. Use at your own risk. | +| ingester.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​key | string | `"app.kubernetes.io/component"` | | +| ingester.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​operator | string | `"In"` | | +| ingester.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​values[0] | string | `"ingester"` | | +| ingester.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​topologyKey | string | `"kubernetes.io/hostname"` | | +| ingester.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​weight | int | `100` | | +| ingester.​annotations | object | `{}` | | +| ingester.​autoscaling.​behavior.​scaleDown.​policies | list | `[{"periodSeconds":1800,"type":"Pods","value":1}]` | see https://cortexmetrics.io/docs/guides/ingesters-scaling-up-and-down/#scaling-down for scaledown details | +| ingester.​autoscaling.​behavior.​scaleDown.​stabilizationWindowSeconds | int | `3600` | uses metrics from the past 1h to make scaleDown decisions | +| ingester.​autoscaling.​behavior.​scaleUp.​policies | list | `[{"periodSeconds":1800,"type":"Pods","value":1}]` | This default scaleup policy allows adding 1 pod every 30 minutes. Ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-configurable-scaling-behavior | +| ingester.​autoscaling.​enabled | bool | `false` | | +| ingester.​autoscaling.​maxReplicas | int | `30` | | +| ingester.​autoscaling.​minReplicas | int | `3` | | +| ingester.​autoscaling.​targetMemoryUtilizationPercentage | int | `80` | | +| ingester.​containerSecurityContext.​enabled | bool | `true` | | +| ingester.​containerSecurityContext.​readOnlyRootFilesystem | bool | `true` | | +| ingester.​env | list | `[]` | | +| ingester.​extraArgs | object | `{}` | Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) | +| ingester.​extraContainers | list | `[]` | | +| ingester.​extraPorts | list | `[]` | | +| ingester.​extraVolumeMounts | list | `[]` | | +| ingester.​extraVolumes | list | `[]` | | +| ingester.​initContainers | list | `[]` | | +| ingester.​lifecycle.​preStop | object | `{"httpGet":{"path":"/ingester/shutdown","port":"http-metrics"}}` | The /shutdown preStop hook is recommended as part of the ingester scaledown process, but can be removed to optimize rolling restarts in instances that will never be scaled down or when using chunks storage with WAL disabled. https://cortexmetrics.io/docs/guides/ingesters-scaling-up-and-down/#scaling-down | +| ingester.​livenessProbe | object | `{}` | Startup/liveness probes for ingesters are not recommended. Ref: https://cortexmetrics.io/docs/guides/running-cortex-on-kubernetes/#take-extra-care-with-ingesters | +| ingester.​nodeSelector | object | `{}` | | +| ingester.​persistentVolume.​accessModes | list | `["ReadWriteOnce"]` | Ingester data Persistent Volume access modes Must match those of existing PV or dynamic provisioner Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ | +| ingester.​persistentVolume.​annotations | object | `{}` | Ingester data Persistent Volume Claim annotations | +| ingester.​persistentVolume.​enabled | bool | `true` | If true and ingester.statefulSet.enabled is true, Ingester will create/use a Persistent Volume Claim If false, use emptyDir | +| ingester.​persistentVolume.​size | string | `"2Gi"` | Ingester data Persistent Volume size | +| ingester.​persistentVolume.​storageClass | string | `nil` | Ingester data Persistent Volume Storage Class If defined, storageClassName: If set to "-", storageClassName: "", which disables dynamic provisioning If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner. | +| ingester.​persistentVolume.​subPath | string | `""` | Subdirectory of Ingester data Persistent Volume to mount Useful if the volume's root directory is not empty | +| ingester.​podAnnotations | object | `{"prometheus.io/port":"8080","prometheus.io/scrape":"true"}` | Pod Annotations | +| ingester.​podDisruptionBudget.​maxUnavailable | int | `1` | | +| ingester.​podLabels | object | `{}` | Pod Labels | +| ingester.​readinessProbe.​httpGet.​path | string | `"/ready"` | | +| ingester.​readinessProbe.​httpGet.​port | string | `"http-metrics"` | | +| ingester.​replicas | int | `3` | | +| ingester.​resources | object | `{}` | | +| ingester.​securityContext | object | `{}` | | +| ingester.​service.​annotations | object | `{}` | | +| ingester.​service.​labels | object | `{}` | | +| ingester.​serviceAccount.​name | string | `nil` | | +| ingester.​serviceMonitor.​additionalLabels | object | `{}` | | +| ingester.​serviceMonitor.​enabled | bool | `false` | | +| ingester.​serviceMonitor.​extraEndpointSpec | object | `{}` | Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint | +| ingester.​serviceMonitor.​metricRelabelings | list | `[]` | | +| ingester.​serviceMonitor.​relabelings | list | `[]` | | +| ingester.​startupProbe | object | `{}` | Startup/liveness probes for ingesters are not recommended. Ref: https://cortexmetrics.io/docs/guides/running-cortex-on-kubernetes/#take-extra-care-with-ingesters | +| ingester.​statefulSet.​enabled | bool | `false` | If true, use a statefulset instead of a deployment for pod management. This is useful when using WAL | +| ingester.​statefulSet.​podManagementPolicy | string | `"OrderedReady"` | ref: https://cortexmetrics.io/docs/guides/ingesters-scaling-up-and-down/#scaling-down and https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-management-policies for scaledown details | +| ingester.​statefulStrategy.​type | string | `"RollingUpdate"` | | +| ingester.​strategy.​rollingUpdate.​maxSurge | int | `0` | | +| ingester.​strategy.​rollingUpdate.​maxUnavailable | int | `1` | | +| ingester.​strategy.​type | string | `"RollingUpdate"` | | +| ingester.​terminationGracePeriodSeconds | int | `240` | | +| ingester.​tolerations | list | `[]` | | +| ingress.​annotations | object | `{}` | | +| ingress.​enabled | bool | `false` | | +| ingress.​hosts[0].​host | string | `"chart-example.local"` | | +| ingress.​hosts[0].​paths[0] | string | `"/"` | | +| ingress.​ingressClass.​enabled | bool | `false` | | +| ingress.​ingressClass.​name | string | `"nginx"` | | +| ingress.​tls | list | `[]` | | +| memcached | object | `{"architecture":"high-availability","enabled":false,"extraEnv":[{"name":"MEMCACHED_CACHE_SIZE","value":"1024"},{"name":"MEMCACHED_MAX_CONNECTIONS","value":"1024"},{"name":"MEMCACHED_THREADS","value":"4"}],"metrics":{"enabled":true,"serviceMonitor":{"enabled":false}},"replicaCount":2,"resources":{}}` | chunk caching for legacy chunk storage engine | +| memcached-blocks-index.​architecture | string | `"high-availability"` | | +| memcached-blocks-index.​extraEnv[0] | object | `{"name":"MEMCACHED_CACHE_SIZE","value":"1024"}` | MEMCACHED_CACHE_SIZE is the amount of memory allocated to memcached for object storage | +| memcached-blocks-index.​extraEnv[1] | object | `{"name":"MEMCACHED_MAX_CONNECTIONS","value":"1024"}` | MEMCACHED_MAX_CONNECTIONS is the maximum number of simultaneous connections to the memcached service | +| memcached-blocks-index.​extraEnv[2] | object | `{"name":"MEMCACHED_THREADS","value":"4"}` | MEMCACHED_THREADS is the number of threads to use when processing incoming requests. By default, memcached is configured to use 4 concurrent threads. The threading improves the performance of storing and retrieving data in the cache, using a locking system to prevent different threads overwriting or updating the same values. | +| memcached-blocks-index.​metrics.​enabled | bool | `true` | | +| memcached-blocks-index.​metrics.​serviceMonitor.​enabled | bool | `false` | | +| memcached-blocks-index.​replicaCount | int | `2` | | +| memcached-blocks-index.​resources | object | `{}` | | +| memcached-blocks-metadata.​architecture | string | `"high-availability"` | | +| memcached-blocks-metadata.​extraEnv[0] | object | `{"name":"MEMCACHED_CACHE_SIZE","value":"1024"}` | MEMCACHED_CACHE_SIZE is the amount of memory allocated to memcached for object storage | +| memcached-blocks-metadata.​extraEnv[1] | object | `{"name":"MEMCACHED_MAX_CONNECTIONS","value":"1024"}` | MEMCACHED_MAX_CONNECTIONS is the maximum number of simultaneous connections to the memcached service | +| memcached-blocks-metadata.​extraEnv[2] | object | `{"name":"MEMCACHED_THREADS","value":"4"}` | MEMCACHED_THREADS is the number of threads to use when processing incoming requests. By default, memcached is configured to use 4 concurrent threads. The threading improves the performance of storing and retrieving data in the cache, using a locking system to prevent different threads overwriting or updating the same values. | +| memcached-blocks-metadata.​metrics.​enabled | bool | `true` | | +| memcached-blocks-metadata.​metrics.​serviceMonitor.​enabled | bool | `false` | | +| memcached-blocks-metadata.​replicaCount | int | `2` | | +| memcached-blocks-metadata.​resources | object | `{}` | | +| memcached-blocks.​architecture | string | `"high-availability"` | | +| memcached-blocks.​extraEnv[0] | object | `{"name":"MEMCACHED_CACHE_SIZE","value":"1024"}` | MEMCACHED_CACHE_SIZE is the amount of memory allocated to memcached for object storage | +| memcached-blocks.​extraEnv[1] | object | `{"name":"MEMCACHED_MAX_CONNECTIONS","value":"1024"}` | MEMCACHED_MAX_CONNECTIONS is the maximum number of simultaneous connections to the memcached service | +| memcached-blocks.​extraEnv[2] | object | `{"name":"MEMCACHED_THREADS","value":"4"}` | MEMCACHED_THREADS is the number of threads to use when processing incoming requests. By default, memcached is configured to use 4 concurrent threads. The threading improves the performance of storing and retrieving data in the cache, using a locking system to prevent different threads overwriting or updating the same values. | +| memcached-blocks.​metrics.​enabled | bool | `true` | | +| memcached-blocks.​metrics.​serviceMonitor.​enabled | bool | `false` | | +| memcached-blocks.​replicaCount | int | `2` | | +| memcached-blocks.​resources | object | `{}` | | +| memcached-frontend.​architecture | string | `"high-availability"` | | +| memcached-frontend.​enabled | bool | `false` | | +| memcached-frontend.​extraEnv[0] | object | `{"name":"MEMCACHED_CACHE_SIZE","value":"1024"}` | MEMCACHED_CACHE_SIZE is the amount of memory allocated to memcached for object storage | +| memcached-frontend.​extraEnv[1] | object | `{"name":"MEMCACHED_MAX_CONNECTIONS","value":"1024"}` | MEMCACHED_MAX_CONNECTIONS is the maximum number of simultaneous connections to the memcached service | +| memcached-frontend.​extraEnv[2] | object | `{"name":"MEMCACHED_THREADS","value":"4"}` | MEMCACHED_THREADS is the number of threads to use when processing incoming requests. By default, memcached is configured to use 4 concurrent threads. The threading improves the performance of storing and retrieving data in the cache, using a locking system to prevent different threads overwriting or updating the same values. | +| memcached-frontend.​metrics.​enabled | bool | `true` | | +| memcached-frontend.​metrics.​serviceMonitor.​enabled | bool | `false` | | +| memcached-frontend.​replicaCount | int | `2` | | +| memcached-frontend.​resources | object | `{}` | | +| memcached-index-read | object | `{"architecture":"high-availability","enabled":false,"extraEnv":[{"name":"MEMCACHED_CACHE_SIZE","value":"1024"},{"name":"MEMCACHED_MAX_CONNECTIONS","value":"1024"},{"name":"MEMCACHED_THREADS","value":"4"}],"metrics":{"enabled":true,"serviceMonitor":{"enabled":false}},"replicaCount":2,"resources":{}}` | index read caching for legacy chunk storage engine | +| memcached-index-read.​extraEnv[0] | object | `{"name":"MEMCACHED_CACHE_SIZE","value":"1024"}` | MEMCACHED_CACHE_SIZE is the amount of memory allocated to memcached for object storage | +| memcached-index-read.​extraEnv[1] | object | `{"name":"MEMCACHED_MAX_CONNECTIONS","value":"1024"}` | MEMCACHED_MAX_CONNECTIONS is the maximum number of simultaneous connections to the memcached service | +| memcached-index-read.​extraEnv[2] | object | `{"name":"MEMCACHED_THREADS","value":"4"}` | MEMCACHED_THREADS is the number of threads to use when processing incoming requests. By default, memcached is configured to use 4 concurrent threads. The threading improves the performance of storing and retrieving data in the cache, using a locking system to prevent different threads overwriting or updating the same values. | +| memcached-index-write | object | `{"architecture":"high-availability","enabled":false,"extraEnv":[{"name":"MEMCACHED_CACHE_SIZE","value":"1024"},{"name":"MEMCACHED_MAX_CONNECTIONS","value":"1024"},{"name":"MEMCACHED_THREADS","value":"4"}],"metrics":{"enabled":true,"serviceMonitor":{"enabled":false}},"replicaCount":2,"resources":{}}` | index write caching for legacy chunk storage engine | +| memcached-index-write.​extraEnv[0] | object | `{"name":"MEMCACHED_CACHE_SIZE","value":"1024"}` | MEMCACHED_CACHE_SIZE is the amount of memory allocated to memcached for object storage | +| memcached-index-write.​extraEnv[1] | object | `{"name":"MEMCACHED_MAX_CONNECTIONS","value":"1024"}` | MEMCACHED_MAX_CONNECTIONS is the maximum number of simultaneous connections to the memcached service | +| memcached-index-write.​extraEnv[2] | object | `{"name":"MEMCACHED_THREADS","value":"4"}` | MEMCACHED_THREADS is the number of threads to use when processing incoming requests. By default, memcached is configured to use 4 concurrent threads. The threading improves the performance of storing and retrieving data in the cache, using a locking system to prevent different threads overwriting or updating the same values. | +| memcached.​extraEnv[0] | object | `{"name":"MEMCACHED_CACHE_SIZE","value":"1024"}` | MEMCACHED_CACHE_SIZE is the amount of memory allocated to memcached for object storage | +| memcached.​extraEnv[1] | object | `{"name":"MEMCACHED_MAX_CONNECTIONS","value":"1024"}` | MEMCACHED_MAX_CONNECTIONS is the maximum number of simultaneous connections to the memcached service | +| memcached.​extraEnv[2] | object | `{"name":"MEMCACHED_THREADS","value":"4"}` | MEMCACHED_THREADS is the number of threads to use when processing incoming requests. By default, memcached is configured to use 4 concurrent threads. The threading improves the performance of storing and retrieving data in the cache, using a locking system to prevent different threads overwriting or updating the same values. | +| nginx.​affinity | object | `{}` | | +| nginx.​annotations | object | `{}` | | +| nginx.​autoscaling.​behavior | object | `{}` | Ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-configurable-scaling-behavior | +| nginx.​autoscaling.​enabled | bool | `false` | Creates a HorizontalPodAutoscaler for the nginx pods. | +| nginx.​autoscaling.​maxReplicas | int | `30` | | +| nginx.​autoscaling.​minReplicas | int | `2` | | +| nginx.​autoscaling.​targetCPUUtilizationPercentage | int | `80` | | +| nginx.​autoscaling.​targetMemoryUtilizationPercentage | int | `0` | | +| nginx.​config.​auth_orgs | list | `[]` | (optional) List of [auth tenants](https://cortexmetrics.io/docs/guides/auth/) to set in the nginx config | +| nginx.​config.​basicAuthSecretName | string | `""` | (optional) Name of basic auth secret. In order to use this option, a secret with htpasswd formatted contents at the key ".htpasswd" must exist. For example: apiVersion: v1 kind: Secret metadata: name: my-secret namespace: stringData: .htpasswd: | user1:$apr1$/woC1jnP$KAh0SsVn5qeSMjTtn0E9Q0 user2:$apr1$QdR8fNLT$vbCEEzDj7LyqCMyNpSoBh/ Please note that the use of basic auth will not identify organizations the way X-Scope-OrgID does. Thus, the use of basic auth alone will not prevent one tenant from viewing the metrics of another. To ensure tenants are scoped appropriately, explicitly set the `X-Scope-OrgID` header in the nginx config. Example setHeaders: X-Scope-OrgID: $remote_user | +| nginx.​config.​client_max_body_size | string | `"1M"` | ref: http://nginx.org/en/docs/http/ngx_http_core_module.html#client_max_body_size | +| nginx.​config.​dnsResolver | string | `"coredns.kube-system.svc.cluster.local"` | | +| nginx.​config.​httpSnippet | string | `""` | arbitrary snippet to inject in the http { } section of the nginx config | +| nginx.​config.​mainSnippet | string | `""` | arbitrary snippet to inject in the top section of the nginx config | +| nginx.​config.​serverSnippet | string | `""` | arbitrary snippet to inject in the server { } section of the nginx config | +| nginx.​config.​setHeaders | object | `{}` | | +| nginx.​containerSecurityContext.​enabled | bool | `true` | | +| nginx.​containerSecurityContext.​readOnlyRootFilesystem | bool | `false` | | +| nginx.​enabled | bool | `true` | | +| nginx.​env | list | `[]` | | +| nginx.​extraArgs | object | `{}` | Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) | +| nginx.​extraContainers | list | `[]` | | +| nginx.​extraPorts | list | `[]` | | +| nginx.​extraVolumeMounts | list | `[]` | | +| nginx.​extraVolumes | list | `[]` | | +| nginx.​http_listen_port | int | `80` | | +| nginx.​image.​pullPolicy | string | `"IfNotPresent"` | | +| nginx.​image.​repository | string | `"nginx"` | | +| nginx.​image.​tag | float | `1.21` | | +| nginx.​initContainers | list | `[]` | | +| nginx.​livenessProbe.​httpGet.​path | string | `"/healthz"` | | +| nginx.​livenessProbe.​httpGet.​port | string | `"http-metrics"` | | +| nginx.​nodeSelector | object | `{}` | | +| nginx.​persistentVolume.​subPath | string | `nil` | | +| nginx.​podAnnotations | object | `{}` | Pod Annotations | +| nginx.​podDisruptionBudget.​maxUnavailable | int | `1` | | +| nginx.​podLabels | object | `{}` | Pod Labels | +| nginx.​readinessProbe.​httpGet.​path | string | `"/healthz"` | | +| nginx.​readinessProbe.​httpGet.​port | string | `"http-metrics"` | | +| nginx.​replicas | int | `2` | | +| nginx.​resources | object | `{}` | | +| nginx.​securityContext | object | `{}` | | +| nginx.​service.​annotations | object | `{}` | | +| nginx.​service.​labels | object | `{}` | | +| nginx.​service.​type | string | `"ClusterIP"` | | +| nginx.​serviceAccount.​name | string | `""` | "" disables the individual serviceAccount and uses the global serviceAccount for that component | +| nginx.​startupProbe.​failureThreshold | int | `10` | | +| nginx.​startupProbe.​httpGet.​path | string | `"/healthz"` | | +| nginx.​startupProbe.​httpGet.​port | string | `"http-metrics"` | | +| nginx.​strategy.​rollingUpdate.​maxSurge | int | `0` | | +| nginx.​strategy.​rollingUpdate.​maxUnavailable | int | `1` | | +| nginx.​strategy.​type | string | `"RollingUpdate"` | | +| nginx.​terminationGracePeriodSeconds | int | `10` | | +| nginx.​tolerations | list | `[]` | | +| querier.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​key | string | `"app.kubernetes.io/component"` | | +| querier.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​operator | string | `"In"` | | +| querier.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​values[0] | string | `"querier"` | | +| querier.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​topologyKey | string | `"kubernetes.io/hostname"` | | +| querier.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​weight | int | `100` | | +| querier.​annotations | object | `{}` | | +| querier.​autoscaling.​behavior | object | `{}` | Ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-configurable-scaling-behavior | +| querier.​autoscaling.​enabled | bool | `false` | Creates a HorizontalPodAutoscaler for the querier pods. | +| querier.​autoscaling.​maxReplicas | int | `30` | | +| querier.​autoscaling.​minReplicas | int | `2` | | +| querier.​autoscaling.​targetCPUUtilizationPercentage | int | `80` | | +| querier.​autoscaling.​targetMemoryUtilizationPercentage | int | `0` | | +| querier.​containerSecurityContext.​enabled | bool | `true` | | +| querier.​containerSecurityContext.​readOnlyRootFilesystem | bool | `true` | | +| querier.​env | list | `[]` | | +| querier.​extraArgs | object | `{}` | Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) | +| querier.​extraContainers | list | `[]` | | +| querier.​extraPorts | list | `[]` | | +| querier.​extraVolumeMounts | list | `[]` | | +| querier.​extraVolumes | list | `[]` | | +| querier.​initContainers | list | `[]` | | +| querier.​lifecycle | object | `{}` | | +| querier.​livenessProbe.​httpGet.​path | string | `"/ready"` | | +| querier.​livenessProbe.​httpGet.​port | string | `"http-metrics"` | | +| querier.​nodeSelector | object | `{}` | | +| querier.​persistentVolume.​subPath | string | `nil` | | +| querier.​podAnnotations | object | `{"prometheus.io/port":"8080","prometheus.io/scrape":"true"}` | Pod Annotations | +| querier.​podDisruptionBudget.​maxUnavailable | int | `1` | | +| querier.​podLabels | object | `{}` | Pod Labels | +| querier.​readinessProbe.​httpGet.​path | string | `"/ready"` | | +| querier.​readinessProbe.​httpGet.​port | string | `"http-metrics"` | | +| querier.​replicas | int | `2` | | +| querier.​resources | object | `{}` | | +| querier.​securityContext | object | `{}` | | +| querier.​service.​annotations | object | `{}` | | +| querier.​service.​labels | object | `{}` | | +| querier.​serviceAccount.​name | string | `""` | "" disables the individual serviceAccount and uses the global serviceAccount for that component | +| querier.​serviceMonitor.​additionalLabels | object | `{}` | | +| querier.​serviceMonitor.​enabled | bool | `false` | | +| querier.​serviceMonitor.​extraEndpointSpec | object | `{}` | Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint | +| querier.​serviceMonitor.​metricRelabelings | list | `[]` | | +| querier.​serviceMonitor.​relabelings | list | `[]` | | +| querier.​startupProbe.​failureThreshold | int | `10` | | +| querier.​startupProbe.​httpGet.​path | string | `"/ready"` | | +| querier.​startupProbe.​httpGet.​port | string | `"http-metrics"` | | +| querier.​strategy.​rollingUpdate.​maxSurge | int | `0` | | +| querier.​strategy.​rollingUpdate.​maxUnavailable | int | `1` | | +| querier.​strategy.​type | string | `"RollingUpdate"` | | +| querier.​terminationGracePeriodSeconds | int | `180` | | +| querier.​tolerations | list | `[]` | | +| query_frontend.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​key | string | `"app.kubernetes.io/component"` | | +| query_frontend.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​operator | string | `"In"` | | +| query_frontend.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​values[0] | string | `"query-frontend"` | | +| query_frontend.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​topologyKey | string | `"kubernetes.io/hostname"` | | +| query_frontend.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​weight | int | `100` | | +| query_frontend.​annotations | object | `{}` | | +| query_frontend.​containerSecurityContext.​enabled | bool | `true` | | +| query_frontend.​containerSecurityContext.​readOnlyRootFilesystem | bool | `true` | | +| query_frontend.​env | list | `[]` | | +| query_frontend.​extraArgs | object | `{}` | Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) | +| query_frontend.​extraContainers | list | `[]` | | +| query_frontend.​extraPorts | list | `[]` | | +| query_frontend.​extraVolumeMounts | list | `[]` | | +| query_frontend.​extraVolumes | list | `[]` | | +| query_frontend.​initContainers | list | `[]` | | +| query_frontend.​lifecycle | object | `{}` | | +| query_frontend.​livenessProbe.​httpGet.​path | string | `"/ready"` | | +| query_frontend.​livenessProbe.​httpGet.​port | string | `"http-metrics"` | | +| query_frontend.​nodeSelector | object | `{}` | | +| query_frontend.​persistentVolume.​subPath | string | `nil` | | +| query_frontend.​podAnnotations | object | `{"prometheus.io/port":"8080","prometheus.io/scrape":"true"}` | Pod Annotations | +| query_frontend.​podDisruptionBudget.​maxUnavailable | int | `1` | | +| query_frontend.​podLabels | object | `{}` | Pod Labels | +| query_frontend.​readinessProbe.​httpGet.​path | string | `"/ready"` | | +| query_frontend.​readinessProbe.​httpGet.​port | string | `"http-metrics"` | | +| query_frontend.​replicas | int | `2` | | +| query_frontend.​resources | object | `{}` | | +| query_frontend.​securityContext | object | `{}` | | +| query_frontend.​service.​annotations | object | `{}` | | +| query_frontend.​service.​labels | object | `{}` | | +| query_frontend.​serviceAccount.​name | string | `""` | "" disables the individual serviceAccount and uses the global serviceAccount for that component | +| query_frontend.​serviceMonitor.​additionalLabels | object | `{}` | | +| query_frontend.​serviceMonitor.​enabled | bool | `false` | | +| query_frontend.​serviceMonitor.​extraEndpointSpec | object | `{}` | Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint | +| query_frontend.​serviceMonitor.​metricRelabelings | list | `[]` | | +| query_frontend.​serviceMonitor.​relabelings | list | `[]` | | +| query_frontend.​startupProbe.​failureThreshold | int | `10` | | +| query_frontend.​startupProbe.​httpGet.​path | string | `"/ready"` | | +| query_frontend.​startupProbe.​httpGet.​port | string | `"http-metrics"` | | +| query_frontend.​strategy.​rollingUpdate.​maxSurge | int | `0` | | +| query_frontend.​strategy.​rollingUpdate.​maxUnavailable | int | `1` | | +| query_frontend.​strategy.​type | string | `"RollingUpdate"` | | +| query_frontend.​terminationGracePeriodSeconds | int | `180` | | +| query_frontend.​tolerations | list | `[]` | | +| ruler.​affinity | object | `{}` | | +| ruler.​annotations | object | `{}` | | +| ruler.​containerSecurityContext.​enabled | bool | `true` | | +| ruler.​containerSecurityContext.​readOnlyRootFilesystem | bool | `true` | | +| ruler.​directories | object | `{}` | allow configuring rules via configmap. ref: https://cortexproject.github.io/cortex-helm-chart/guides/configure_rules_via_configmap.html | +| ruler.​enabled | bool | `true` | | +| ruler.​env | list | `[]` | | +| ruler.​extraArgs | object | `{}` | Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) | +| ruler.​extraContainers | list | `[]` | | +| ruler.​extraPorts | list | `[]` | | +| ruler.​extraVolumeMounts | list | `[]` | | +| ruler.​extraVolumes | list | `[]` | | +| ruler.​initContainers | list | `[]` | | +| ruler.​livenessProbe.​httpGet.​path | string | `"/ready"` | | +| ruler.​livenessProbe.​httpGet.​port | string | `"http-metrics"` | | +| ruler.​nodeSelector | object | `{}` | | +| ruler.​persistentVolume.​subPath | string | `nil` | | +| ruler.​podAnnotations | object | `{"prometheus.io/port":"8080","prometheus.io/scrape":"true"}` | Pod Annotations | +| ruler.​podDisruptionBudget.​maxUnavailable | int | `1` | | +| ruler.​podLabels | object | `{}` | Pod Labels | +| ruler.​readinessProbe.​httpGet.​path | string | `"/ready"` | | +| ruler.​readinessProbe.​httpGet.​port | string | `"http-metrics"` | | +| ruler.​replicas | int | `1` | | +| ruler.​resources | object | `{}` | | +| ruler.​securityContext | object | `{}` | | +| ruler.​service.​annotations | object | `{}` | | +| ruler.​service.​labels | object | `{}` | | +| ruler.​serviceAccount.​name | string | `""` | "" disables the individual serviceAccount and uses the global serviceAccount for that component | +| ruler.​serviceMonitor.​additionalLabels | object | `{}` | | +| ruler.​serviceMonitor.​enabled | bool | `false` | | +| ruler.​serviceMonitor.​extraEndpointSpec | object | `{}` | Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint | +| ruler.​serviceMonitor.​metricRelabelings | list | `[]` | | +| ruler.​serviceMonitor.​relabelings | list | `[]` | | +| ruler.​sidecar | object | `{"containerSecurityContext":{"enabled":true,"readOnlyRootFilesystem":true},"defaultFolderName":null,"enableUniqueFilenames":false,"enabled":false,"folder":"/tmp/rules","folderAnnotation":null,"image":{"repository":"quay.io/kiwigrid/k8s-sidecar","sha":"","tag":"1.10.7"},"imagePullPolicy":"IfNotPresent","label":"cortex_rules","labelValue":null,"resources":{},"searchNamespace":null,"watchMethod":null}` | Sidecars that collect the configmaps with specified label and stores the included files them into the respective folders | +| ruler.​sidecar.​defaultFolderName | string | `nil` | The default folder name, it will create a subfolder under the `folder` and put rules in there instead | +| ruler.​sidecar.​folder | string | `"/tmp/rules"` | folder in the pod that should hold the collected rules (unless `defaultFolderName` is set) | +| ruler.​sidecar.​folderAnnotation | string | `nil` | If specified, the sidecar will look for annotation with this name to create folder and put graph here. You can use this parameter together with `provider.foldersFromFilesStructure`to annotate configmaps and create folder structure. | +| ruler.​sidecar.​label | string | `"cortex_rules"` | label that the configmaps with rules are marked with | +| ruler.​sidecar.​labelValue | string | `nil` | value of label that the configmaps with rules are set to | +| ruler.​sidecar.​searchNamespace | string | `nil` | If specified, the sidecar will search for rules config-maps inside this namespace. Otherwise the namespace in which the sidecar is running will be used. It's also possible to specify ALL to search in all namespaces | +| ruler.​startupProbe.​failureThreshold | int | `10` | | +| ruler.​startupProbe.​httpGet.​path | string | `"/ready"` | | +| ruler.​startupProbe.​httpGet.​port | string | `"http-metrics"` | | +| ruler.​strategy.​rollingUpdate.​maxSurge | int | `0` | | +| ruler.​strategy.​rollingUpdate.​maxUnavailable | int | `1` | | +| ruler.​strategy.​type | string | `"RollingUpdate"` | | +| ruler.​terminationGracePeriodSeconds | int | `180` | | +| ruler.​tolerations | list | `[]` | | +| runtimeconfigmap.​annotations | object | `{}` | | +| runtimeconfigmap.​create | bool | `true` | If true, a configmap for the `runtime_config` will be created. If false, the configmap _must_ exist already on the cluster or pods will fail to create. | +| runtimeconfigmap.​runtime_config | object | `{}` | https://cortexmetrics.io/docs/configuration/arguments/#runtime-configuration-file | +| serviceAccount.​annotations | object | `{}` | | +| serviceAccount.​automountServiceAccountToken | bool | `true` | | +| serviceAccount.​create | bool | `true` | | +| serviceAccount.​name | string | `nil` | | +| store_gateway.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​key | string | `"app.kubernetes.io/component"` | | +| store_gateway.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​operator | string | `"In"` | | +| store_gateway.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​values[0] | string | `"store-gateway"` | | +| store_gateway.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​topologyKey | string | `"kubernetes.io/hostname"` | | +| store_gateway.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​weight | int | `100` | | +| store_gateway.​annotations | object | `{}` | | +| store_gateway.​containerSecurityContext.​enabled | bool | `true` | | +| store_gateway.​containerSecurityContext.​readOnlyRootFilesystem | bool | `true` | | +| store_gateway.​env | list | `[]` | | +| store_gateway.​extraArgs | object | `{}` | Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) | +| store_gateway.​extraContainers | list | `[]` | | +| store_gateway.​extraPorts | list | `[]` | | +| store_gateway.​extraVolumeMounts | list | `[]` | | +| store_gateway.​extraVolumes | list | `[]` | | +| store_gateway.​initContainers | list | `[]` | | +| store_gateway.​livenessProbe.​httpGet.​path | string | `"/ready"` | | +| store_gateway.​livenessProbe.​httpGet.​port | string | `"http-metrics"` | | +| store_gateway.​livenessProbe.​httpGet.​scheme | string | `"HTTP"` | | +| store_gateway.​nodeSelector | object | `{}` | | +| store_gateway.​persistentVolume.​accessModes | list | `["ReadWriteOnce"]` | Store-gateway data Persistent Volume access modes Must match those of existing PV or dynamic provisioner Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ | +| store_gateway.​persistentVolume.​annotations | object | `{}` | Store-gateway data Persistent Volume Claim annotations | +| store_gateway.​persistentVolume.​enabled | bool | `true` | If true Store-gateway will create/use a Persistent Volume Claim If false, use emptyDir | +| store_gateway.​persistentVolume.​size | string | `"2Gi"` | Store-gateway data Persistent Volume size | +| store_gateway.​persistentVolume.​storageClass | string | `nil` | Store-gateway data Persistent Volume Storage Class If defined, storageClassName: If set to "-", storageClassName: "", which disables dynamic provisioning If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner. | +| store_gateway.​persistentVolume.​subPath | string | `""` | Subdirectory of Store-gateway data Persistent Volume to mount Useful if the volume's root directory is not empty | +| store_gateway.​podAnnotations | object | `{"prometheus.io/port":"8080","prometheus.io/scrape":"true"}` | Pod Annotations | +| store_gateway.​podDisruptionBudget.​maxUnavailable | int | `1` | | +| store_gateway.​podLabels | object | `{}` | Pod Labels | +| store_gateway.​readinessProbe.​httpGet.​path | string | `"/ready"` | | +| store_gateway.​readinessProbe.​httpGet.​port | string | `"http-metrics"` | | +| store_gateway.​replicas | int | `1` | | +| store_gateway.​resources | object | `{}` | | +| store_gateway.​securityContext | object | `{}` | | +| store_gateway.​service.​annotations | object | `{}` | | +| store_gateway.​service.​labels | object | `{}` | | +| store_gateway.​serviceAccount.​name | string | `""` | "" disables the individual serviceAccount and uses the global serviceAccount for that component | +| store_gateway.​serviceMonitor.​additionalLabels | object | `{}` | | +| store_gateway.​serviceMonitor.​enabled | bool | `false` | | +| store_gateway.​serviceMonitor.​extraEndpointSpec | object | `{}` | Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint | +| store_gateway.​serviceMonitor.​metricRelabelings | list | `[]` | | +| store_gateway.​serviceMonitor.​relabelings | list | `[]` | | +| store_gateway.​startupProbe.​failureThreshold | int | `60` | | +| store_gateway.​startupProbe.​httpGet.​path | string | `"/ready"` | | +| store_gateway.​startupProbe.​httpGet.​port | string | `"http-metrics"` | | +| store_gateway.​startupProbe.​httpGet.​scheme | string | `"HTTP"` | | +| store_gateway.​startupProbe.​initialDelaySeconds | int | `120` | | +| store_gateway.​startupProbe.​periodSeconds | int | `30` | | +| store_gateway.​strategy.​type | string | `"RollingUpdate"` | | +| store_gateway.​terminationGracePeriodSeconds | int | `240` | | +| store_gateway.​tolerations | list | `[]` | | +| table_manager.​affinity | object | `{}` | | +| table_manager.​annotations | object | `{}` | | +| table_manager.​containerSecurityContext.​enabled | bool | `true` | | +| table_manager.​containerSecurityContext.​readOnlyRootFilesystem | bool | `true` | | +| table_manager.​env | list | `[]` | | +| table_manager.​extraArgs | object | `{}` | Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) | +| table_manager.​extraContainers | list | `[]` | | +| table_manager.​extraPorts | list | `[]` | | +| table_manager.​extraVolumeMounts | list | `[]` | | +| table_manager.​extraVolumes | list | `[]` | | +| table_manager.​initContainers | list | `[]` | | +| table_manager.​livenessProbe.​httpGet.​path | string | `"/ready"` | | +| table_manager.​livenessProbe.​httpGet.​port | string | `"http-metrics"` | | +| table_manager.​nodeSelector | object | `{}` | | +| table_manager.​persistentVolume.​subPath | string | `nil` | | +| table_manager.​podAnnotations | object | `{"prometheus.io/port":"8080","prometheus.io/scrape":"true"}` | Pod Annotations | +| table_manager.​podDisruptionBudget.​maxUnavailable | int | `1` | | +| table_manager.​podLabels | object | `{}` | Pod Labels | +| table_manager.​readinessProbe.​httpGet.​path | string | `"/ready"` | | +| table_manager.​readinessProbe.​httpGet.​port | string | `"http-metrics"` | | +| table_manager.​replicas | int | `1` | | +| table_manager.​resources | object | `{}` | | +| table_manager.​securityContext | object | `{}` | | +| table_manager.​service.​annotations | object | `{}` | | +| table_manager.​service.​labels | object | `{}` | | +| table_manager.​serviceAccount.​name | string | `""` | "" disables the individual serviceAccount and uses the global serviceAccount for that component | +| table_manager.​serviceMonitor.​additionalLabels | object | `{}` | | +| table_manager.​serviceMonitor.​enabled | bool | `false` | | +| table_manager.​serviceMonitor.​extraEndpointSpec | object | `{}` | Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint | +| table_manager.​serviceMonitor.​metricRelabelings | list | `[]` | | +| table_manager.​serviceMonitor.​relabelings | list | `[]` | | +| table_manager.​startupProbe.​failureThreshold | int | `10` | | +| table_manager.​startupProbe.​httpGet.​path | string | `"/ready"` | | +| table_manager.​startupProbe.​httpGet.​port | string | `"http-metrics"` | | +| table_manager.​strategy.​rollingUpdate.​maxSurge | int | `0` | | +| table_manager.​strategy.​rollingUpdate.​maxUnavailable | int | `1` | | +| table_manager.​strategy.​type | string | `"RollingUpdate"` | | +| table_manager.​terminationGracePeriodSeconds | int | `180` | | +| table_manager.​tolerations | list | `[]` | | +| tags.​blocks-storage-memcached | bool | `false` | Set to true to enable block storage memcached caching | +| useConfigMap | bool | `false` | | +| useExternalConfig | bool | `false` | | + diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/NOTES.txt b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/NOTES.txt new file mode 100644 index 0000000..1bd3203 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/NOTES.txt @@ -0,0 +1,9 @@ +{{- if eq .Values.config.storage.engine "chunks" }} +Cortex chunks storage has been deprecated, and it's now in maintenance mode: all Cortex users are encouraged to migrate to the blocks storage. +No new features will be added to the chunks storage. +Unlike the official cortex default configuration this helm-chart does not run the chunk engine by default. +{{- end }} + +Verify the application is working by running these commands: + kubectl --namespace {{ .Release.Namespace }} port-forward service/{{ include "cortex.querierFullname" . }} {{ .Values.config.server.http_listen_port }} + curl http://127.0.0.1:{{ .Values.config.server.http_listen_port }}/services diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/_helpers.tpl b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/_helpers.tpl new file mode 100644 index 0000000..81914c9 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/_helpers.tpl @@ -0,0 +1,155 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "cortex.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "cortex.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "cortex.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create the name of the service account +*/}} +{{- define "cortex.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "cortex.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Create the app name of cortex clients. Defaults to the same logic as "cortex.fullname", and default client expects "prometheus". +*/}} +{{- define "client.name" -}} +{{- if .Values.client.name -}} +{{- .Values.client.name -}} +{{- else if .Values.client.fullnameOverride -}} +{{- .Values.client.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default "prometheus" .Values.client.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + + +{{/* +Common labels +*/}} +{{- define "cortex.labels" -}} +helm.sh/chart: {{ include "cortex.chart" . }} +{{ include "cortex.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "cortex.selectorLabels" -}} +app.kubernetes.io/name: {{ include "cortex.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create configuration parameters for memcached configuration +*/}} +{{- define "cortex.memcached" -}} +{{- if and (eq .Values.config.storage.engine "blocks") (index .Values "tags" "blocks-storage-memcached") }} +- "-blocks-storage.bucket-store.index-cache.backend=memcached" +- "-blocks-storage.bucket-store.index-cache.memcached.addresses=dns+{{ .Release.Name }}-memcached-blocks-index.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}:11211" +- "-blocks-storage.bucket-store.chunks-cache.backend=memcached" +- "-blocks-storage.bucket-store.chunks-cache.memcached.addresses=dns+{{ .Release.Name }}-memcached-blocks.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}:11211" +- "-blocks-storage.bucket-store.metadata-cache.backend=memcached" +- "-blocks-storage.bucket-store.metadata-cache.memcached.addresses=dns+{{ .Release.Name }}-memcached-blocks-metadata.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}:11211" +{{- end -}} +{{- if and (ne .Values.config.storage.engine "blocks") .Values.memcached.enabled }} +- "-store.chunks-cache.memcached.addresses=dns+{{ .Release.Name }}-memcached.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}:11211" +{{- end -}} +{{- if and (ne .Values.config.storage.engine "blocks") (index .Values "memcached-index-read" "enabled") }} +- "-store.index-cache-read.memcached.addresses=dns+{{ .Release.Name }}-memcached-index-read.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}:11211" +{{- end -}} +{{- if and (ne .Values.config.storage.engine "blocks") (index .Values "memcached-index-write" "enabled") }} +- "-store.index-cache-write.memcached.addresses=dns+{{ .Release.Name }}-memcached-index-write.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}:11211" +{{- end -}} +{{- end -}} + +{{/* +Create configuration for frontend memcached configuration +*/}} +{{- define "cortex.frontend-memcached" -}} +{{- if index .Values "memcached-frontend" "enabled" }} +- "-frontend.memcached.addresses=dns+{{ template "cortex.fullname" . }}-memcached-frontend.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}:11211" +{{- end -}} +{{- end -}} + +{{/* +Determine the policy api version +*/}} +{{- define "cortex.pdbVersion" -}} +{{- if or (.Capabilities.APIVersions.Has "policy/v1/PodDisruptionBudget") (semverCompare ">=1.21" .Capabilities.KubeVersion.Version) -}} +policy/v1 +{{- else -}} +policy/v1beta1 +{{- end -}} +{{- end -}} + +{{/* +Get checksum of config secret or configMap +*/}} +{{- define "cortex.configChecksum" -}} +{{- if .Values.useExternalConfig -}} +{{- .Values.externalConfigVersion -}} +{{- else if .Values.useConfigMap -}} +{{- include (print $.Template.BasePath "/configmap.yaml") . | sha256sum -}} +{{- else -}} +{{- include (print $.Template.BasePath "/secret.yaml") . | sha256sum -}} +{{- end -}} +{{- end -}} + +{{/* +Get volume of config secret of configMap +*/}} +{{- define "cortex.configVolume" -}} +- name: config + {{- if .Values.useExternalConfig }} + secret: + secretName: {{ .Values.externalConfigSecretName }} + {{- else if .Values.useConfigMap }} + configMap: + name: {{ template "cortex.fullname" . }}-config + {{- else }} + secret: + secretName: {{ template "cortex.fullname" . }} + {{- end }} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/alertmanager/alertmanager-dep.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/alertmanager/alertmanager-dep.yaml new file mode 100644 index 0000000..49c4ca7 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/alertmanager/alertmanager-dep.yaml @@ -0,0 +1,30 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: alertmanager + namespace: imxc +spec: + replicas: 1 + selector: + matchLabels: + name: alertmanager + template: + metadata: + labels: + name: alertmanager + spec: + containers: + - name: alertmanager +# image: quay.io/cortexproject/cortex:v1.9.0 +# image: registry.cloud.intermax:5000/library/cortex:v1.11.0 + image: {{ .Values.global.IMXC_IN_REGISTRY }}/cortex:v1.11.0 + imagePullPolicy: IfNotPresent + args: + - -target=alertmanager +# - -log.level=debug + - -server.http-listen-port=80 + - -alertmanager.configs.url=http://{{ template "cortex.fullname" . }}-configs:8080 + - -alertmanager.web.external-url=/alertmanager + ports: + - containerPort: 80 diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/alertmanager/alertmanager-svc.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/alertmanager/alertmanager-svc.yaml new file mode 100644 index 0000000..989feb2 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/alertmanager/alertmanager-svc.yaml @@ -0,0 +1,10 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: alertmanager +spec: + ports: + - port: 80 + selector: + name: alertmanager diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/clusterrole.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/clusterrole.yaml new file mode 100644 index 0000000..cf7f25a --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/clusterrole.yaml @@ -0,0 +1,12 @@ +{{- if or .Values.ruler.sidecar.enabled .Values.alertmanager.sidecar.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "cortex.fullname" . }}-clusterrole + labels: + {{- include "cortex.labels" . | nindent 4 }} +rules: + - apiGroups: [""] # "" indicates the core API group + resources: ["configmaps", "secrets"] + verbs: ["get", "watch", "list"] +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/clusterrolebinding.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/clusterrolebinding.yaml new file mode 100644 index 0000000..c1d9884 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/clusterrolebinding.yaml @@ -0,0 +1,16 @@ +{{- if or .Values.ruler.sidecar.enabled .Values.alertmanager.sidecar.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "cortex.fullname" . }}-clusterrolebinding + labels: + {{- include "cortex.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "cortex.fullname" . }}-clusterrole +subjects: + - kind: ServiceAccount + name: {{ template "cortex.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/compactor/_helpers-compactor.tpl b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/compactor/_helpers-compactor.tpl new file mode 100644 index 0000000..f89b33c --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/compactor/_helpers-compactor.tpl @@ -0,0 +1,23 @@ + +{{/* +compactor fullname +*/}} +{{- define "cortex.compactorFullname" -}} +{{ include "cortex.fullname" . }}-compactor +{{- end }} + +{{/* +compactor common labels +*/}} +{{- define "cortex.compactorLabels" -}} +{{ include "cortex.labels" . }} +app.kubernetes.io/component: compactor +{{- end }} + +{{/* +compactor selector labels +*/}} +{{- define "cortex.compactorSelectorLabels" -}} +{{ include "cortex.selectorLabels" . }} +app.kubernetes.io/component: compactor +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/compactor/compactor-poddisruptionbudget.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/compactor/compactor-poddisruptionbudget.yaml new file mode 100644 index 0000000..8634e4c --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/compactor/compactor-poddisruptionbudget.yaml @@ -0,0 +1,14 @@ +{{- if and (gt (int .Values.compactor.replicas) 1) (.Values.compactor.podDisruptionBudget) }} +apiVersion: {{ include "cortex.pdbVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "cortex.compactorFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.compactorLabels" . | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "cortex.compactorSelectorLabels" . | nindent 6 }} + {{- toYaml .Values.compactor.podDisruptionBudget | nindent 2 }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/compactor/compactor-servicemonitor.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/compactor/compactor-servicemonitor.yaml new file mode 100644 index 0000000..a33e849 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/compactor/compactor-servicemonitor.yaml @@ -0,0 +1,42 @@ +{{- if .Values.compactor.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "cortex.compactorFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.compactorLabels" . | nindent 4 }} + {{- if .Values.compactor.serviceMonitor.additionalLabels }} +{{ toYaml .Values.compactor.serviceMonitor.additionalLabels | indent 4 }} + {{- end }} + {{- if .Values.compactor.serviceMonitor.annotations }} + annotations: +{{ toYaml .Values.compactor.serviceMonitor.annotations | indent 4 }} + {{- end }} +spec: + selector: + matchLabels: + {{- include "cortex.compactorSelectorLabels" . | nindent 6 }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace | quote }} + endpoints: + - port: http-metrics + {{- if .Values.compactor.serviceMonitor.interval }} + interval: {{ .Values.compactor.serviceMonitor.interval }} + {{- end }} + {{- if .Values.compactor.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.compactor.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.compactor.serviceMonitor.relabelings }} + relabelings: + {{- toYaml .Values.compactor.serviceMonitor.relabelings | nindent 4 }} + {{- end }} + {{- if .Values.compactor.serviceMonitor.metricRelabelings }} + metricRelabelings: + {{- toYaml .Values.compactor.serviceMonitor.metricRelabelings | nindent 4 }} + {{- end }} + {{- with .Values.compactor.serviceMonitor.extraEndpointSpec }} + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/compactor/compactor-statefulset.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/compactor/compactor-statefulset.yaml new file mode 100644 index 0000000..c0a1baf --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/compactor/compactor-statefulset.yaml @@ -0,0 +1,141 @@ +{{- if eq .Values.config.storage.engine "blocks" -}} +{{- if .Values.compactor.enabled -}} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "cortex.compactorFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.compactorLabels" . | nindent 4 }} + app.kubernetes.io/part-of: memberlist + annotations: + {{- toYaml .Values.compactor.annotations | nindent 4 }} +spec: + replicas: {{ .Values.compactor.replicas }} + selector: + matchLabels: + {{- include "cortex.compactorSelectorLabels" . | nindent 6 }} + updateStrategy: + {{- toYaml .Values.compactor.strategy | nindent 4 }} + serviceName: {{ template "cortex.fullname" . }}-compactor + {{- if .Values.compactor.persistentVolume.enabled }} + volumeClaimTemplates: + - metadata: + name: storage + {{- if .Values.compactor.persistentVolume.annotations }} + annotations: + {{ toYaml .Values.compactor.persistentVolume.annotations | nindent 10 }} + {{- end }} + spec: + {{- if .Values.compactor.persistentVolume.storageClass }} + {{- if (eq "-" .Values.compactor.persistentVolume.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.compactor.persistentVolume.storageClass }}" + {{- end }} + {{- end }} + accessModes: + {{ toYaml .Values.compactor.persistentVolume.accessModes | nindent 10 }} + resources: + requests: + storage: "{{ .Values.compactor.persistentVolume.size }}" + {{- end }} + template: + metadata: + labels: + {{- include "cortex.compactorLabels" . | nindent 8 }} + app.kubernetes.io/part-of: memberlist + {{- with .Values.compactor.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + annotations: + checksum/config: {{ include "cortex.configChecksum" . }} + {{- with .Values.compactor.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ .Values.compactor.serviceAccount.name | default (include "cortex.serviceAccountName" . ) }} + {{- if .Values.compactor.priorityClassName }} + priorityClassName: {{ .Values.compactor.priorityClassName }} + {{- end }} + {{- if .Values.compactor.securityContext.enabled }} + securityContext: {{- omit .Values.compactor.securityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + initContainers: + {{- toYaml .Values.compactor.initContainers | nindent 8 }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} + nodeSelector: + {{- toYaml .Values.compactor.nodeSelector | nindent 8 }} + affinity: + {{- toYaml .Values.compactor.affinity | nindent 8 }} + tolerations: + {{- toYaml .Values.compactor.tolerations | nindent 8 }} + terminationGracePeriodSeconds: {{ .Values.compactor.terminationGracePeriodSeconds }} + volumes: + {{- include "cortex.configVolume" . | nindent 8 }} + - name: runtime-config + configMap: + name: {{ template "cortex.fullname" . }}-runtime-config + {{- if not .Values.compactor.persistentVolume.enabled }} + - name: storage + emptyDir: {} + {{- end }} + {{- if .Values.compactor.extraVolumes }} + {{- toYaml .Values.compactor.extraVolumes | nindent 8 }} + {{- end }} + containers: + {{- if .Values.compactor.extraContainers }} + {{ toYaml .Values.compactor.extraContainers | nindent 8 }} + {{- end }} + - name: compactor + image: "{{ .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: + - "-target=compactor" + - "-config.file=/etc/cortex/cortex.yaml" + {{- include "cortex.memcached" . | nindent 12}} + {{- range $key, $value := .Values.compactor.extraArgs }} + - "-{{ $key }}={{ $value }}" + {{- end }} + volumeMounts: + {{- if .Values.compactor.extraVolumeMounts }} + {{- toYaml .Values.compactor.extraVolumeMounts | nindent 12}} + {{- end }} + - name: config + mountPath: /etc/cortex + - name: runtime-config + mountPath: /etc/cortex-runtime-config + - name: storage + mountPath: "/data" + {{- if .Values.compactor.persistentVolume.subPath }} + subPath: {{ .Values.compactor.persistentVolume.subPath }} + {{- end }} + ports: + - name: http-metrics + containerPort: {{ .Values.config.server.http_listen_port }} + protocol: TCP + - name: gossip + containerPort: {{ .Values.config.memberlist.bind_port }} + protocol: TCP + startupProbe: + {{- toYaml .Values.compactor.startupProbe | nindent 12 }} + livenessProbe: + {{- toYaml .Values.compactor.livenessProbe | nindent 12 }} + readinessProbe: + {{- toYaml .Values.compactor.readinessProbe | nindent 12 }} + resources: + {{- toYaml .Values.compactor.resources | nindent 12 }} + {{- if .Values.compactor.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.compactor.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.compactor.env }} + env: + {{- toYaml .Values.compactor.env | nindent 12 }} + {{- end }} +{{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/compactor/compactor-svc.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/compactor/compactor-svc.yaml new file mode 100644 index 0000000..ae20f78 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/compactor/compactor-svc.yaml @@ -0,0 +1,25 @@ +{{- if eq .Values.config.storage.engine "blocks" -}} +{{- if .Values.compactor.enabled -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "cortex.compactorFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.compactorLabels" . | nindent 4 }} + {{- with .Values.compactor.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- toYaml .Values.compactor.service.annotations | nindent 4 }} +spec: + type: ClusterIP + ports: + - port: {{ .Values.config.server.http_listen_port }} + protocol: TCP + name: http-metrics + targetPort: http-metrics + selector: + {{- include "cortex.compactorSelectorLabels" . | nindent 4 }} +{{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/configmap.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/configmap.yaml new file mode 100644 index 0000000..001b13a --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/configmap.yaml @@ -0,0 +1,12 @@ +{{- if (and (not .Values.useExternalConfig) (.Values.useConfigMap)) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "cortex.fullname" . }}-config + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.labels" . | nindent 4 }} +data: + cortex.yaml: | + {{- tpl (toYaml .Values.config) . | nindent 4 }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/configs/_helpers-configs.tpl b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/configs/_helpers-configs.tpl new file mode 100644 index 0000000..c8945dc --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/configs/_helpers-configs.tpl @@ -0,0 +1,23 @@ + +{{/* +configs fullname +*/}} +{{- define "cortex.configsFullname" -}} +{{ include "cortex.fullname" . }}-configs +{{- end }} + +{{/* +configs common labels +*/}} +{{- define "cortex.configsLabels" -}} +{{ include "cortex.labels" . }} +app.kubernetes.io/component: configs +{{- end }} + +{{/* +configs selector labels +*/}} +{{- define "cortex.configsSelectorLabels" -}} +{{ include "cortex.selectorLabels" . }} +app.kubernetes.io/component: configs +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/configs/configs-dep.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/configs/configs-dep.yaml new file mode 100644 index 0000000..86048ce --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/configs/configs-dep.yaml @@ -0,0 +1,124 @@ +{{- if .Values.configs.enabled -}} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "cortex.configsFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.configsLabels" . | nindent 4 }} + annotations: + {{- toYaml .Values.configs.annotations | nindent 4 }} +spec: + replicas: {{ .Values.configs.replicas }} + selector: + matchLabels: + {{- include "cortex.configsSelectorLabels" . | nindent 6 }} + strategy: + {{- toYaml .Values.configs.strategy | nindent 4 }} + template: + metadata: + labels: + {{- include "cortex.configsLabels" . | nindent 8 }} + {{- with .Values.configs.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + annotations: + checksum/config: {{ include "cortex.configChecksum" . }} + {{- with .Values.configs.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ .Values.configs.serviceAccount.name | default (include "cortex.serviceAccountName" . ) }} + {{- if .Values.configs.priorityClassName }} + priorityClassName: {{ .Values.configs.priorityClassName }} + {{- end }} + {{- if .Values.configs.securityContext.enabled }} + securityContext: {{- omit .Values.configs.securityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + initContainers: + {{- toYaml .Values.configs.initContainers | nindent 8 }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} + containers: + - name: configs + image: "{{ .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: + - "-target=configs" + - "-config.file=/etc/cortex/cortex.yaml" + {{- if .Values.configsdb_postgresql.enabled }} + - "-configs.database.uri={{ .Values.configsdb_postgresql.uri }}" + - "-configs.database.password-file=/etc/postgresql/password" + - "-configs.database.migrations-dir=/migrations" + {{- else }} + - "-configs.database.uri=memory://" + {{- end }} + {{- range $key, $value := .Values.configs.extraArgs }} + - "-{{ $key }}={{ $value }}" + {{- end }} + volumeMounts: + - name: config + mountPath: /etc/cortex + subPath: {{ .Values.configs.persistentVolume.subPath }} + - name: runtime-config + mountPath: /etc/cortex-runtime-config + {{- if .Values.configsdb_postgresql.enabled }} + - name: postgres-password + mountPath: /etc/postgresql + {{- end }} + {{- if .Values.configs.extraVolumeMounts }} + {{- toYaml .Values.configs.extraVolumeMounts | nindent 12}} + {{- end }} + ports: + - name: http-metrics + containerPort: {{ .Values.config.server.http_listen_port }} + protocol: TCP + - name: gossip + containerPort: {{ .Values.config.memberlist.bind_port }} + protocol: TCP + startupProbe: + {{- toYaml .Values.configs.startupProbe | nindent 12 }} + livenessProbe: + {{- toYaml .Values.configs.livenessProbe | nindent 12 }} + readinessProbe: + {{- toYaml .Values.configs.readinessProbe | nindent 12 }} + resources: + {{- toYaml .Values.configs.resources | nindent 12 }} + {{- if .Values.configs.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.configs.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.configs.env }} + env: + {{- toYaml .Values.configs.env | nindent 12 }} + {{- end }} + {{- if .Values.configs.extraContainers }} + {{- toYaml .Values.configs.extraContainers | nindent 8}} + {{- end }} + nodeSelector: + {{- toYaml .Values.configs.nodeSelector | nindent 8 }} + affinity: + {{- toYaml .Values.configs.affinity | nindent 8 }} + tolerations: + {{- toYaml .Values.configs.tolerations | nindent 8 }} + terminationGracePeriodSeconds: {{ .Values.configs.terminationGracePeriodSeconds }} + volumes: + {{- include "cortex.configVolume" . | nindent 8 }} + {{- if .Values.configsdb_postgresql.enabled }} + - name: postgres-password + secret: + secretName: {{ if .Values.configsdb_postgresql.auth.existing_secret.name }}{{ .Values.configsdb_postgresql.auth.existing_secret.name }}{{ else }}{{ template "cortex.fullname" . }}-postgresql{{ end }} + items: + - key: {{ if .Values.configsdb_postgresql.auth.existing_secret.name }}{{ .Values.configsdb_postgresql.auth.existing_secret.key }}{{ else }}postgresql-password{{ end }} + path: password + {{- end }} + - name: runtime-config + configMap: + name: {{ template "cortex.fullname" . }}-runtime-config + {{- if .Values.configs.extraVolumes }} + {{- toYaml .Values.configs.extraVolumes | nindent 8}} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/configs/configs-poddisruptionbudget.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/configs/configs-poddisruptionbudget.yaml new file mode 100644 index 0000000..b6e46b4 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/configs/configs-poddisruptionbudget.yaml @@ -0,0 +1,14 @@ +{{- if and (gt (int .Values.configs.replicas) 1) (.Values.configs.podDisruptionBudget) }} +apiVersion: {{ include "cortex.pdbVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "cortex.configsFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.configsLabels" . | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "cortex.configsSelectorLabels" . | nindent 6 }} + {{- toYaml .Values.configs.podDisruptionBudget | nindent 2 }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/configs/configs-servicemonitor.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/configs/configs-servicemonitor.yaml new file mode 100644 index 0000000..393bc32 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/configs/configs-servicemonitor.yaml @@ -0,0 +1,42 @@ +{{- if .Values.configs.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "cortex.configsFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.configsLabels" . | nindent 4 }} + {{- if .Values.configs.serviceMonitor.additionalLabels }} +{{ toYaml .Values.configs.serviceMonitor.additionalLabels | indent 4 }} + {{- end }} + {{- if .Values.configs.serviceMonitor.annotations }} + annotations: +{{ toYaml .Values.configs.serviceMonitor.annotations | indent 4 }} + {{- end }} +spec: + selector: + matchLabels: + {{- include "cortex.configsSelectorLabels" . | nindent 6 }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace | quote }} + endpoints: + - port: http-metrics + {{- if .Values.configs.serviceMonitor.interval }} + interval: {{ .Values.configs.serviceMonitor.interval }} + {{- end }} + {{- if .Values.configs.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.configs.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.configs.serviceMonitor.relabelings }} + relabelings: + {{- toYaml .Values.configs.serviceMonitor.relabelings | nindent 4 }} + {{- end }} + {{- if .Values.configs.serviceMonitor.metricRelabelings }} + metricRelabelings: + {{- toYaml .Values.configs.serviceMonitor.metricRelabelings | nindent 4 }} + {{- end }} + {{- with .Values.configs.serviceMonitor.extraEndpointSpec }} + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/configs/configs-svc.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/configs/configs-svc.yaml new file mode 100644 index 0000000..6dbc2cd --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/configs/configs-svc.yaml @@ -0,0 +1,23 @@ +{{- if .Values.configs.enabled -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "cortex.configsFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.configsLabels" . | nindent 4 }} + {{- with .Values.configs.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- toYaml .Values.configs.service.annotations | nindent 4 }} +spec: + type: ClusterIP + ports: + - port: {{ .Values.config.server.http_listen_port }} + protocol: TCP + name: http-metrics + targetPort: http-metrics + selector: + {{- include "cortex.configsSelectorLabels" . | nindent 4 }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/cortex-pv.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/cortex-pv.yaml new file mode 100644 index 0000000..472f83e --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/cortex-pv.yaml @@ -0,0 +1,68 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: ingester-pv-0 +spec: + capacity: + storage: 2Gi + volumeMode: Filesystem + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Retain + storageClassName: {{ .Values.global.DEFAULT_STORAGE_CLASS }} + local: + path: {{ .Values.global.IMXC_INGESTER_PV_PATH1 }} + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .Values.global.affinity_key }} + operator: In + values: + - {{ .Values.global.affinity_value1 }} +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: ingester-pv-1 +spec: + capacity: + storage: 2Gi + volumeMode: Filesystem + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Retain + storageClassName: {{ .Values.global.DEFAULT_STORAGE_CLASS }} + local: + path: {{ .Values.global.IMXC_INGESTER_PV_PATH2 }} + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .Values.global.affinity_key }} + operator: In + values: + - {{ .Values.global.affinity_value2 }} +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: ingester-pv-2 +spec: + capacity: + storage: 2Gi + volumeMode: Filesystem + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Retain + storageClassName: {{ .Values.global.DEFAULT_STORAGE_CLASS }} + local: + path: {{ .Values.global.IMXC_INGESTER_PV_PATH3 }} + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .Values.global.affinity_key }} + operator: In + values: + - {{ .Values.global.affinity_value3 }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/distributor/_helpers-distributor.tpl b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/distributor/_helpers-distributor.tpl new file mode 100644 index 0000000..24e8d00 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/distributor/_helpers-distributor.tpl @@ -0,0 +1,23 @@ + +{{/* +distributor fullname +*/}} +{{- define "cortex.distributorFullname" -}} +{{ include "cortex.fullname" . }}-distributor +{{- end }} + +{{/* +distributor common labels +*/}} +{{- define "cortex.distributorLabels" -}} +{{ include "cortex.labels" . }} +app.kubernetes.io/component: distributor +{{- end }} + +{{/* +distributor selector labels +*/}} +{{- define "cortex.distributorSelectorLabels" -}} +{{ include "cortex.selectorLabels" . }} +app.kubernetes.io/component: distributor +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/distributor/distributor-dep.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/distributor/distributor-dep.yaml new file mode 100644 index 0000000..fc9c0ba --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/distributor/distributor-dep.yaml @@ -0,0 +1,121 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "cortex.distributorFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.distributorLabels" . | nindent 4 }} + app.kubernetes.io/part-of: memberlist + annotations: + {{- toYaml .Values.distributor.annotations | nindent 4 }} +spec: + {{- if not .Values.distributor.autoscaling.enabled }} + replicas: {{ .Values.distributor.replicas }} + {{- end }} + selector: + matchLabels: + {{- include "cortex.distributorSelectorLabels" . | nindent 6 }} + strategy: + {{- toYaml .Values.distributor.strategy | nindent 4 }} + template: + metadata: + labels: + {{- include "cortex.distributorLabels" . | nindent 8 }} + app.kubernetes.io/part-of: memberlist + {{- with .Values.distributor.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + annotations: + checksum/config: {{ include "cortex.configChecksum" . }} + {{- with .Values.distributor.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ .Values.distributor.serviceAccount.name | default (include "cortex.serviceAccountName" . ) }} + {{- if .Values.distributor.priorityClassName }} + priorityClassName: {{ .Values.distributor.priorityClassName }} + {{- end }} + {{- if .Values.distributor.securityContext.enabled }} + securityContext: {{- omit .Values.distributor.securityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + initContainers: + {{- toYaml .Values.distributor.initContainers | nindent 8 }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} + containers: + - name: distributor + image: "{{ .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: + - "-target=distributor" + - "-config.file=/etc/cortex/cortex.yaml" + {{- range $key, $value := .Values.distributor.extraArgs }} + - "-{{ $key }}={{ $value }}" + {{- end }} + volumeMounts: + {{- if .Values.distributor.extraVolumeMounts }} + {{- toYaml .Values.distributor.extraVolumeMounts | nindent 12}} + {{- end }} + - name: config + mountPath: /etc/cortex + - name: runtime-config + mountPath: /etc/cortex-runtime-config + - name: storage + mountPath: "/data" + subPath: {{ .Values.distributor.persistentVolume.subPath }} + ports: + - name: http-metrics + containerPort: {{ .Values.config.server.http_listen_port }} + protocol: TCP + - name: gossip + containerPort: {{ .Values.config.memberlist.bind_port }} + protocol: TCP + - name: grpc + containerPort: {{ .Values.config.server.grpc_listen_port }} + protocol: TCP + startupProbe: + {{- toYaml .Values.distributor.startupProbe | nindent 12 }} + livenessProbe: + {{- toYaml .Values.distributor.livenessProbe | nindent 12 }} + readinessProbe: + {{- toYaml .Values.distributor.readinessProbe | nindent 12 }} + resources: + {{- toYaml .Values.distributor.resources | nindent 12 }} + {{- if .Values.distributor.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.distributor.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.distributor.env }} + env: + {{- toYaml .Values.distributor.env | nindent 12 }} + {{- end }} + {{- with .Values.distributor.lifecycle }} + lifecycle: + {{- toYaml . | nindent 12 }} + {{- end }} + resources: + requests: + cpu: "100m" + {{- if .Values.distributor.extraContainers }} + {{- toYaml .Values.distributor.extraContainers | nindent 8}} + {{- end }} + nodeSelector: + {{- toYaml .Values.distributor.nodeSelector | nindent 8 }} + affinity: + {{- toYaml .Values.distributor.affinity | nindent 8 }} + tolerations: + {{- toYaml .Values.distributor.tolerations | nindent 8 }} + terminationGracePeriodSeconds: {{ .Values.distributor.terminationGracePeriodSeconds }} + volumes: + {{- include "cortex.configVolume" . | nindent 8 }} + - name: runtime-config + configMap: + name: {{ template "cortex.fullname" . }}-runtime-config + - name: storage + emptyDir: {} + {{- if .Values.distributor.extraVolumes }} + {{- toYaml .Values.distributor.extraVolumes | nindent 8}} + {{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/distributor/distributor-hpa.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/distributor/distributor-hpa.yaml new file mode 100644 index 0000000..0c1c9f6 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/distributor/distributor-hpa.yaml @@ -0,0 +1,39 @@ +{{- with .Values.distributor.autoscaling -}} +{{- if .enabled }} +apiVersion: autoscaling/v2beta2 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "cortex.distributorFullname" $ }} + namespace: {{ $.Release.Namespace }} + labels: + {{- include "cortex.distributorLabels" $ | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "cortex.distributorFullname" $ }} + minReplicas: {{ .minReplicas }} + maxReplicas: {{ .maxReplicas }} + metrics: + {{- with .targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: {{ . }} + {{- end }} + {{- with .targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ . }} + {{- end }} + {{- with .behavior }} + behavior: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/distributor/distributor-poddisruptionbudget.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/distributor/distributor-poddisruptionbudget.yaml new file mode 100644 index 0000000..7b05701 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/distributor/distributor-poddisruptionbudget.yaml @@ -0,0 +1,14 @@ +{{- if and (gt (int .Values.distributor.replicas) 1) (.Values.distributor.podDisruptionBudget) }} +apiVersion: {{ include "cortex.pdbVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "cortex.distributorFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.distributorLabels" . | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "cortex.distributorSelectorLabels" . | nindent 6 }} + {{- toYaml .Values.distributor.podDisruptionBudget | nindent 2 }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/distributor/distributor-servicemonitor.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/distributor/distributor-servicemonitor.yaml new file mode 100644 index 0000000..5db8389 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/distributor/distributor-servicemonitor.yaml @@ -0,0 +1,42 @@ +{{- if .Values.distributor.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "cortex.distributorFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.distributorLabels" . | nindent 4 }} + {{- if .Values.distributor.serviceMonitor.additionalLabels }} +{{ toYaml .Values.distributor.serviceMonitor.additionalLabels | indent 4 }} + {{- end }} + {{- if .Values.distributor.serviceMonitor.annotations }} + annotations: +{{ toYaml .Values.distributor.serviceMonitor.annotations | indent 4 }} + {{- end }} +spec: + selector: + matchLabels: + {{- include "cortex.distributorSelectorLabels" . | nindent 6 }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace | quote }} + endpoints: + - port: http-metrics + {{- if .Values.distributor.serviceMonitor.interval }} + interval: {{ .Values.distributor.serviceMonitor.interval }} + {{- end }} + {{- if .Values.distributor.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.distributor.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.distributor.serviceMonitor.relabelings }} + relabelings: + {{- toYaml .Values.distributor.serviceMonitor.relabelings | nindent 4 }} + {{- end }} + {{- if .Values.distributor.serviceMonitor.metricRelabelings }} + metricRelabelings: + {{- toYaml .Values.distributor.serviceMonitor.metricRelabelings | nindent 4 }} + {{- end }} + {{- with .Values.distributor.serviceMonitor.extraEndpointSpec }} + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/distributor/distributor-svc-headless.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/distributor/distributor-svc-headless.yaml new file mode 100644 index 0000000..1c4f7f6 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/distributor/distributor-svc-headless.yaml @@ -0,0 +1,23 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "cortex.distributorFullname" . }}-headless + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.distributorLabels" . | nindent 4 }} + {{- with .Values.distributor.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- toYaml .Values.distributor.service.annotations | nindent 4 }} +spec: + type: ClusterIP + clusterIP: None + publishNotReadyAddresses: true + ports: + - port: {{ .Values.config.server.grpc_listen_port }} + protocol: TCP + name: grpc + targetPort: grpc + selector: + {{- include "cortex.distributorSelectorLabels" . | nindent 4 }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/distributor/distributor-svc.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/distributor/distributor-svc.yaml new file mode 100644 index 0000000..2db7197 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/distributor/distributor-svc.yaml @@ -0,0 +1,21 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "cortex.distributorFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.distributorLabels" . | nindent 4 }} + {{- with .Values.distributor.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- toYaml .Values.distributor.service.annotations | nindent 4 }} +spec: + type: ClusterIP + ports: + - port: {{ .Values.config.server.http_listen_port }} + protocol: TCP + name: http-metrics + targetPort: http-metrics + selector: + {{- include "cortex.distributorSelectorLabels" . | nindent 4 }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/ingester/_helpers-ingester.tpl b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/ingester/_helpers-ingester.tpl new file mode 100644 index 0000000..4705327 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/ingester/_helpers-ingester.tpl @@ -0,0 +1,23 @@ + +{{/* +ingester fullname +*/}} +{{- define "cortex.ingesterFullname" -}} +{{ include "cortex.fullname" . }}-ingester +{{- end }} + +{{/* +ingester common labels +*/}} +{{- define "cortex.ingesterLabels" -}} +{{ include "cortex.labels" . }} +app.kubernetes.io/component: ingester +{{- end }} + +{{/* +ingester selector labels +*/}} +{{- define "cortex.ingesterSelectorLabels" -}} +{{ include "cortex.selectorLabels" . }} +app.kubernetes.io/component: ingester +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/ingester/ingester-dep.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/ingester/ingester-dep.yaml new file mode 100644 index 0000000..b26d3a3 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/ingester/ingester-dep.yaml @@ -0,0 +1,130 @@ +{{- if not .Values.ingester.statefulSet.enabled -}} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "cortex.ingesterFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.ingesterLabels" . | nindent 4 }} + app.kubernetes.io/part-of: memberlist + annotations: + {{- toYaml .Values.ingester.annotations | nindent 4 }} +spec: + {{- if not .Values.ingester.autoscaling.enabled }} + replicas: {{ .Values.ingester.replicas }} + {{- end }} + selector: + matchLabels: + {{- include "cortex.ingesterSelectorLabels" . | nindent 6 }} + strategy: + {{- toYaml .Values.ingester.strategy | nindent 4 }} + template: + metadata: + labels: + {{- include "cortex.ingesterLabels" . | nindent 8 }} + app.kubernetes.io/part-of: memberlist + {{- with .Values.ingester.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + annotations: + checksum/config: {{ include "cortex.configChecksum" . }} + {{- with .Values.ingester.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ .Values.ingester.serviceAccount.name | default (include "cortex.serviceAccountName" . ) }} + {{- if .Values.ingester.priorityClassName }} + priorityClassName: {{ .Values.ingester.priorityClassName }} + {{- end }} + {{- if .Values.ingester.securityContext.enabled }} + securityContext: {{- omit .Values.ingester.securityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + initContainers: + {{- toYaml .Values.ingester.initContainers | nindent 8 }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} + containers: + - name: ingester + image: "{{ .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: + - "-target=ingester" + - "-config.file=/etc/cortex/cortex.yaml" + {{- include "cortex.memcached" . | nindent 12}} + {{- range $key, $value := .Values.ingester.extraArgs }} + - "-{{ $key }}={{ $value }}" + {{- end }} + volumeMounts: + {{- if .Values.ingester.extraVolumeMounts }} + {{- toYaml .Values.ingester.extraVolumeMounts | nindent 12}} + {{- end }} + - name: config + mountPath: /etc/cortex + - name: runtime-config + mountPath: /etc/cortex-runtime-config + - name: storage + mountPath: "/data" + {{- with .Values.ingester.persistentVolume.subPath }} + subPath: {{ . }} + {{- end }} + ports: + - name: http-metrics + containerPort: {{ .Values.config.server.http_listen_port }} + protocol: TCP + - name: grpc + containerPort: {{ .Values.config.server.grpc_listen_port }} + protocol: TCP + - name: gossip + containerPort: {{ .Values.config.memberlist.bind_port }} + protocol: TCP + {{- if .Values.ingester.startupProbe }} + startupProbe: + {{- toYaml .Values.ingester.startupProbe | nindent 12 }} + {{- end }} + {{- if .Values.ingester.livenessProbe }} + livenessProbe: + {{- toYaml .Values.ingester.livenessProbe | nindent 12 }} + {{- end }} + readinessProbe: + {{- toYaml .Values.ingester.readinessProbe | nindent 12 }} + resources: + {{- toYaml .Values.ingester.resources | nindent 12 }} + {{- if .Values.ingester.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.ingester.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + env: + {{- if .Values.ingester.env }} + {{ toYaml .Values.ingester.env | nindent 12 }} + {{- end }} + {{- with .Values.ingester.lifecycle }} + lifecycle: + {{- toYaml . | nindent 12 }} + {{- end }} + resources: + requests: + cpu: "100m" + {{- with .Values.ingester.extraContainers }} + {{- toYaml . | nindent 8 }} + {{- end }} + nodeSelector: + {{- toYaml .Values.ingester.nodeSelector | nindent 8 }} + affinity: + {{- toYaml .Values.ingester.affinity | nindent 8 }} + tolerations: + {{- toYaml .Values.ingester.tolerations | nindent 8 }} + terminationGracePeriodSeconds: {{ .Values.ingester.terminationGracePeriodSeconds }} + volumes: + {{- include "cortex.configVolume" . | nindent 8 }} + - name: runtime-config + configMap: + name: {{ template "cortex.fullname" . }}-runtime-config + - name: storage + emptyDir: {} + {{- if .Values.ingester.extraVolumes }} + {{- toYaml .Values.ingester.extraVolumes | nindent 8}} + {{- end }} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/ingester/ingester-hpa.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/ingester/ingester-hpa.yaml new file mode 100644 index 0000000..97c5290 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/ingester/ingester-hpa.yaml @@ -0,0 +1,29 @@ +{{- with .Values.ingester.autoscaling -}} +{{- if .enabled }} +apiVersion: autoscaling/v2beta2 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "cortex.ingesterFullname" $ }} + namespace: {{ $.Release.Namespace }} + labels: + {{- include "cortex.ingesterLabels" $ | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: {{ if $.Values.ingester.statefulSet.enabled }}StatefulSet{{ else }}Deployment{{ end }} + name: {{ include "cortex.ingesterFullname" $ }} + minReplicas: {{ .minReplicas }} + maxReplicas: {{ .maxReplicas }} + metrics: + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: {{ .targetMemoryUtilizationPercentage }} + {{- with .behavior }} + behavior: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/ingester/ingester-poddisruptionbudget.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/ingester/ingester-poddisruptionbudget.yaml new file mode 100644 index 0000000..a47ecb4 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/ingester/ingester-poddisruptionbudget.yaml @@ -0,0 +1,14 @@ +{{- if and (gt (int .Values.ingester.replicas) 1) (.Values.ingester.podDisruptionBudget) }} +apiVersion: {{ include "cortex.pdbVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "cortex.ingesterFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.ingesterLabels" . | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "cortex.ingesterSelectorLabels" . | nindent 6 }} + {{- toYaml .Values.ingester.podDisruptionBudget | nindent 2 }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/ingester/ingester-servicemonitor.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/ingester/ingester-servicemonitor.yaml new file mode 100644 index 0000000..310ca54 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/ingester/ingester-servicemonitor.yaml @@ -0,0 +1,42 @@ +{{- if .Values.ingester.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "cortex.ingesterFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.ingesterLabels" . | nindent 4 }} + {{- if .Values.ingester.serviceMonitor.additionalLabels }} +{{ toYaml .Values.ingester.serviceMonitor.additionalLabels | indent 4 }} + {{- end }} + {{- if .Values.ingester.serviceMonitor.annotations }} + annotations: +{{ toYaml .Values.ingester.serviceMonitor.annotations | indent 4 }} + {{- end }} +spec: + selector: + matchLabels: + {{- include "cortex.ingesterSelectorLabels" . | nindent 6 }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace | quote }} + endpoints: + - port: http-metrics + {{- if .Values.ingester.serviceMonitor.interval }} + interval: {{ .Values.ingester.serviceMonitor.interval }} + {{- end }} + {{- if .Values.ingester.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.ingester.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.ingester.serviceMonitor.relabelings }} + relabelings: + {{- toYaml .Values.ingester.serviceMonitor.relabelings | nindent 4 }} + {{- end }} + {{- if .Values.ingester.serviceMonitor.metricRelabelings }} + metricRelabelings: + {{- toYaml .Values.ingester.serviceMonitor.metricRelabelings | nindent 4 }} + {{- end }} + {{- with .Values.ingester.serviceMonitor.extraEndpointSpec }} + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/ingester/ingester-statefulset.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/ingester/ingester-statefulset.yaml new file mode 100644 index 0000000..8016441 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/ingester/ingester-statefulset.yaml @@ -0,0 +1,153 @@ +{{- if .Values.ingester.statefulSet.enabled -}} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "cortex.ingesterFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.ingesterLabels" . | nindent 4 }} + app.kubernetes.io/part-of: memberlist + annotations: + {{- toYaml .Values.ingester.annotations | nindent 4 }} +spec: + {{- if not .Values.ingester.autoscaling.enabled }} + replicas: {{ .Values.ingester.replicas }} + {{- end }} + selector: + matchLabels: + {{- include "cortex.ingesterSelectorLabels" . | nindent 6 }} + updateStrategy: + {{- toYaml .Values.ingester.statefulStrategy | nindent 4 }} + podManagementPolicy: "{{ .Values.ingester.statefulSet.podManagementPolicy }}" + serviceName: {{ template "cortex.fullname" . }}-ingester-headless + {{- if .Values.ingester.persistentVolume.enabled }} + volumeClaimTemplates: + - metadata: + name: storage + {{- if .Values.ingester.persistentVolume.annotations }} + annotations: + {{ toYaml .Values.ingester.persistentVolume.annotations | nindent 10 }} + {{- end }} + spec: + {{- if .Values.ingester.persistentVolume.storageClass }} + {{- if (eq "-" .Values.ingester.persistentVolume.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.ingester.persistentVolume.storageClass }}" + {{- end }} + {{- end }} + accessModes: + {{ toYaml .Values.ingester.persistentVolume.accessModes | nindent 10 }} + resources: + requests: + storage: "{{ .Values.ingester.persistentVolume.size }}" + {{- end }} + template: + metadata: + labels: + {{- include "cortex.ingesterLabels" . | nindent 8 }} + app.kubernetes.io/part-of: memberlist + {{- with .Values.ingester.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + annotations: + checksum/config: {{ include "cortex.configChecksum" . }} + {{- with .Values.ingester.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ .Values.ingester.serviceAccount.name | default (include "cortex.serviceAccountName" . ) }} + {{- if .Values.ingester.priorityClassName }} + priorityClassName: {{ .Values.ingester.priorityClassName }} + {{- end }} + {{- if .Values.ingester.securityContext.enabled }} + securityContext: {{- omit .Values.ingester.securityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + initContainers: + {{- toYaml .Values.ingester.initContainers | nindent 8 }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} + nodeSelector: + {{- toYaml .Values.ingester.nodeSelector | nindent 8 }} + affinity: + {{- toYaml .Values.ingester.affinity | nindent 8 }} + tolerations: + {{- toYaml .Values.ingester.tolerations | nindent 8 }} + terminationGracePeriodSeconds: {{ .Values.ingester.terminationGracePeriodSeconds }} + volumes: + {{- include "cortex.configVolume" . | nindent 8 }} + - name: runtime-config + configMap: + name: {{ template "cortex.fullname" . }}-runtime-config + {{- if not .Values.ingester.persistentVolume.enabled }} + - name: storage + emptyDir: {} + {{- end }} + {{- if .Values.ingester.extraVolumes }} + {{- toYaml .Values.ingester.extraVolumes | nindent 8 }} + {{- end }} + containers: + {{- if .Values.ingester.extraContainers }} + {{- toYaml .Values.ingester.extraContainers | nindent 8 }} + {{- end }} + - name: ingester + image: "{{ .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: + - "-target=ingester" + - "-config.file=/etc/cortex/cortex.yaml" + {{- include "cortex.memcached" . | nindent 12}} + {{- range $key, $value := .Values.ingester.extraArgs }} + - "-{{ $key }}={{ $value }}" + {{- end }} + volumeMounts: + {{- if .Values.ingester.extraVolumeMounts }} + {{- toYaml .Values.ingester.extraVolumeMounts | nindent 12}} + {{- end }} + - name: config + mountPath: /etc/cortex + - name: runtime-config + mountPath: /etc/cortex-runtime-config + - name: storage + mountPath: "/data" + {{- with .Values.ingester.persistentVolume.subPath }} + subPath: {{ . }} + {{- end }} + ports: + - name: http-metrics + containerPort: {{ .Values.config.server.http_listen_port }} + protocol: TCP + - name: grpc + containerPort: {{ .Values.config.server.grpc_listen_port }} + protocol: TCP + - name: gossip + containerPort: {{ .Values.config.memberlist.bind_port }} + protocol: TCP + {{- if .Values.ingester.startupProbe }} + startupProbe: + {{- toYaml .Values.ingester.startupProbe | nindent 12 }} + {{- end }} + {{- if .Values.ingester.livenessProbe }} + livenessProbe: + {{- toYaml .Values.ingester.livenessProbe | nindent 12 }} + {{- end }} + readinessProbe: + {{- toYaml .Values.ingester.readinessProbe | nindent 12 }} + resources: + {{- toYaml .Values.ingester.resources | nindent 12 }} + {{- if .Values.ingester.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.ingester.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.ingester.env }} + env: + {{- toYaml .Values.ingester.env | nindent 12 }} + {{- end }} + {{- with .Values.ingester.lifecycle }} + lifecycle: + {{- toYaml . | nindent 12 }} + {{- end }} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/ingester/ingester-svc-headless.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/ingester/ingester-svc-headless.yaml new file mode 100644 index 0000000..b783caa --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/ingester/ingester-svc-headless.yaml @@ -0,0 +1,22 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "cortex.ingesterFullname" . }}-headless + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.ingesterLabels" . | nindent 4 }} + {{- with .Values.ingester.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- toYaml .Values.ingester.service.annotations | nindent 4 }} +spec: + type: ClusterIP + clusterIP: None + ports: + - port: {{ .Values.config.server.grpc_listen_port }} + protocol: TCP + name: grpc + targetPort: grpc + selector: + {{- include "cortex.ingesterSelectorLabels" . | nindent 4 }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/ingester/ingester-svc.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/ingester/ingester-svc.yaml new file mode 100644 index 0000000..02183ae --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/ingester/ingester-svc.yaml @@ -0,0 +1,21 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "cortex.ingesterFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.ingesterLabels" . | nindent 4 }} + {{- with .Values.ingester.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- toYaml .Values.ingester.service.annotations | nindent 4 }} +spec: + type: ClusterIP + ports: + - port: {{ .Values.config.server.http_listen_port }} + protocol: TCP + name: http-metrics + targetPort: http-metrics + selector: + {{- include "cortex.ingesterSelectorLabels" . | nindent 4 }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/nginx/_helpers-nginx.tpl b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/nginx/_helpers-nginx.tpl new file mode 100644 index 0000000..61d8b78 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/nginx/_helpers-nginx.tpl @@ -0,0 +1,23 @@ + +{{/* +nginx fullname +*/}} +{{- define "cortex.nginxFullname" -}} +{{ include "cortex.fullname" . }}-nginx +{{- end }} + +{{/* +nginx common labels +*/}} +{{- define "cortex.nginxLabels" -}} +{{ include "cortex.labels" . }} +app.kubernetes.io/component: nginx +{{- end }} + +{{/* +nginx selector labels +*/}} +{{- define "cortex.nginxSelectorLabels" -}} +{{ include "cortex.selectorLabels" . }} +app.kubernetes.io/component: nginx +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/nginx/nginx-config.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/nginx/nginx-config.yaml new file mode 100644 index 0000000..fd3474d --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/nginx/nginx-config.yaml @@ -0,0 +1,140 @@ +{{- if .Values.nginx.enabled }} +{{- $rootDomain := printf "%s.svc.%s:%d" .Release.Namespace .Values.clusterDomain (.Values.config.server.http_listen_port | int) }} +kind: ConfigMap +apiVersion: v1 +metadata: + name: {{ include "cortex.nginxFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.nginxLabels" . | nindent 4 }} +data: + nginx.conf: |- + worker_processes 5; ## Default: 1 + error_log /dev/stderr; + pid /tmp/nginx.pid; + worker_rlimit_nofile 8192; + + events { + worker_connections 4096; ## Default: 1024 + } + + {{- with .Values.nginx.config.mainSnippet }} + {{ tpl . $ | nindent 4 }} + {{- end }} + + http { + default_type application/octet-stream; + client_max_body_size {{.Values.nginx.config.client_max_body_size}}; + log_format main '$remote_addr - $remote_user [$time_local] $status ' + '"$request" $body_bytes_sent "$http_referer" ' + '"$http_user_agent" "$http_x_forwarded_for" $http_x_scope_orgid'; + access_log /dev/stderr main; + sendfile on; + tcp_nopush on; + resolver {{ default (printf "coredns.kube-system.svc.%s" .Values.clusterDomain ) .Values.nginx.config.dnsResolver }}; + + {{- with .Values.nginx.config.httpSnippet }} + {{ tpl . $ | nindent 6 }} + {{- end }} + + server { # simple reverse-proxy + listen {{ .Values.nginx.http_listen_port }}; + proxy_connect_timeout 300s; + proxy_send_timeout 300s; + proxy_read_timeout 300s; + proxy_http_version 1.1; + proxy_set_header X-Scope-OrgID 0; + + {{- range $key, $value := .Values.nginx.config.setHeaders }} + proxy_set_header {{ $key }} {{ $value }}; + {{- end }} + + {{ if .Values.nginx.config.basicAuthSecretName -}} + auth_basic "Restricted Content"; + auth_basic_user_file /etc/apache2/.htpasswd; + {{- end }} + + {{- with .Values.nginx.config.serverSnippet }} + {{ tpl . $ | nindent 8 }} + {{- end }} + + location = /healthz { + # auth_basic off is not set here, even when a basic auth directive is + # included in the server block, as Nginx's NGX_HTTP_REWRITE_PHASE + # (point when this return statement is evaluated) comes before the + # NGX_HTTP_ACCESS_PHASE (point when basic auth is evaluated). Thus, + # this return statement returns a response before basic auth is + # evaluated. + return 200 'alive'; + } + + # Distributor Config + location = /ring { + proxy_pass http://{{ template "cortex.fullname" . }}-distributor.{{ $rootDomain }}$request_uri; + } + + location = /all_user_stats { + proxy_pass http://{{ template "cortex.fullname" . }}-distributor.{{ $rootDomain }}$request_uri; + } + + location = /api/prom/push { + proxy_pass http://{{ template "cortex.fullname" . }}-distributor.{{ $rootDomain }}$request_uri; + } + + ## New Remote write API. Ref: https://cortexmetrics.io/docs/api/#remote-write + location = /api/v1/push { + proxy_pass http://{{ template "cortex.fullname" . }}-distributor.{{ $rootDomain }}$request_uri; + } + + # Alertmanager Config + location ~ /api/prom/alertmanager/.* { + proxy_pass http://{{ template "cortex.fullname" . }}-alertmanager.{{ $rootDomain }}$request_uri; + } + + location ~ /api/v1/alerts { + proxy_pass http://{{ template "cortex.fullname" . }}-alertmanager.{{ $rootDomain }}$request_uri; + } + + location ~ /multitenant_alertmanager/status { + proxy_pass http://{{ template "cortex.fullname" . }}-alertmanager.{{ $rootDomain }}$request_uri; + } + + # Ruler Config + location ~ /api/v1/rules { + proxy_pass http://{{ template "cortex.fullname" . }}-ruler.{{ $rootDomain }}$request_uri; + } + + location ~ /ruler/ring { + proxy_pass http://{{ template "cortex.fullname" . }}-ruler.{{ $rootDomain }}$request_uri; + } + + # Config Config + location ~ /api/prom/configs/.* { + proxy_pass http://{{ template "cortex.fullname" . }}-configs.{{ $rootDomain }}$request_uri; + } + + # Query Config + location ~ /api/prom/.* { + proxy_pass http://{{ template "cortex.fullname" . }}-query-frontend.{{ $rootDomain }}$request_uri; + } + + ## New Query frontend APIs as per https://cortexmetrics.io/docs/api/#querier--query-frontend + location ~ ^{{.Values.config.api.prometheus_http_prefix}}/api/v1/(read|metadata|labels|series|query_range|query) { + proxy_pass http://{{ template "cortex.fullname" . }}-query-frontend.{{ $rootDomain }}$request_uri; + } + + location ~ {{.Values.config.api.prometheus_http_prefix}}/api/v1/label/.* { + proxy_pass http://{{ template "cortex.fullname" . }}-query-frontend.{{ $rootDomain }}$request_uri; + } + {{- if and (.Values.config.auth_enabled) (.Values.nginx.config.auth_orgs) }} + # Auth orgs + {{- range $org := compact .Values.nginx.config.auth_orgs | uniq }} + location = /api/v1/push/{{ $org }} { + proxy_set_header X-Scope-OrgID {{ $org }}; + proxy_pass http://{{ template "cortex.fullname" $ }}-distributor.{{ $rootDomain }}/api/v1/push; + } + {{- end }} + {{- end }} + } + } +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/nginx/nginx-dep.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/nginx/nginx-dep.yaml new file mode 100644 index 0000000..bbd3a9d --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/nginx/nginx-dep.yaml @@ -0,0 +1,111 @@ +{{- if .Values.nginx.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "cortex.nginxFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.nginxLabels" . | nindent 4 }} + annotations: + {{- toYaml .Values.nginx.annotations | nindent 4 }} +spec: + {{- if not .Values.nginx.autoscaling.enabled }} + replicas: {{ .Values.nginx.replicas }} + {{- end }} + selector: + matchLabels: + {{- include "cortex.nginxSelectorLabels" . | nindent 6 }} + strategy: + {{- toYaml .Values.nginx.strategy | nindent 4 }} + template: + metadata: + labels: + {{- include "cortex.nginxLabels" . | nindent 8 }} + {{- with .Values.nginx.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + annotations: + checksum/config: {{ include (print $.Template.BasePath "/nginx/nginx-config.yaml") . | sha256sum }} + {{- with .Values.nginx.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ .Values.nginx.serviceAccount.name | default (include "cortex.serviceAccountName" . ) }} + {{- if .Values.nginx.priorityClassName }} + priorityClassName: {{ .Values.nginx.priorityClassName }} + {{- end }} + {{- if .Values.nginx.securityContext.enabled }} + securityContext: {{- omit .Values.nginx.securityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + initContainers: + {{- toYaml .Values.nginx.initContainers | nindent 8 }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} + containers: + - name: nginx + image: "{{ .Values.nginx.image.repository }}:{{ .Values.nginx.image.tag }}" + imagePullPolicy: {{ .Values.nginx.image.pullPolicy }} + {{- if .Values.nginx.extraArgs }} + args: + {{- range $key, $value := .Values.nginx.extraArgs }} + - "-{{ $key }}={{ $value }}" + {{- end }} + {{- end }} + volumeMounts: + {{- if .Values.nginx.extraVolumeMounts }} + {{- toYaml .Values.nginx.extraVolumeMounts | nindent 12}} + {{- end }} + - name: config + mountPath: /etc/nginx + {{- if .Values.nginx.config.basicAuthSecretName }} + - name: htpasswd + mountPath: /etc/apache2 + readOnly: true + {{- end }} + ports: + - name: http-metrics + containerPort: {{ .Values.nginx.http_listen_port }} + protocol: TCP + startupProbe: + {{- toYaml .Values.nginx.startupProbe | nindent 12 }} + livenessProbe: + {{- toYaml .Values.nginx.livenessProbe | nindent 12 }} + readinessProbe: + {{- toYaml .Values.nginx.readinessProbe | nindent 12 }} + resources: + {{- toYaml .Values.nginx.resources | nindent 12 }} + {{- if .Values.nginx.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.nginx.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.nginx.env }} + env: + {{- toYaml .Values.nginx.env | nindent 12 }} + {{- end }} + {{- if .Values.nginx.extraContainers }} + {{ toYaml .Values.nginx.extraContainers | indent 8}} + {{- end }} + nodeSelector: + {{- toYaml .Values.nginx.nodeSelector | nindent 8 }} + affinity: + {{- toYaml .Values.nginx.affinity | nindent 8 }} + tolerations: + {{- toYaml .Values.nginx.tolerations | nindent 8 }} + terminationGracePeriodSeconds: {{ .Values.nginx.terminationGracePeriodSeconds }} + volumes: + - name: config + configMap: + name: {{ template "cortex.fullname" . }}-nginx + {{- if .Values.nginx.config.basicAuthSecretName }} + - name: htpasswd + secret: + defaultMode: 420 + secretName: {{ .Values.nginx.config.basicAuthSecretName }} + {{- end }} + {{- if .Values.nginx.extraVolumes }} + {{- toYaml .Values.nginx.extraVolumes | nindent 8}} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/nginx/nginx-hpa.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/nginx/nginx-hpa.yaml new file mode 100644 index 0000000..b93a13d --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/nginx/nginx-hpa.yaml @@ -0,0 +1,39 @@ +{{- if and .Values.nginx.enabled .Values.nginx.autoscaling.enabled }} +{{- with .Values.nginx.autoscaling -}} +apiVersion: autoscaling/v2beta2 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "cortex.nginxFullname" $ }} + namespace: {{ $.Release.Namespace }} + labels: + {{- include "cortex.nginxLabels" $ | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "cortex.nginxFullname" $ }} + minReplicas: {{ .minReplicas }} + maxReplicas: {{ .maxReplicas }} + metrics: + {{- with .targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: {{ . }} + {{- end }} + {{- with .targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ . }} + {{- end }} + {{- with .behavior }} + behavior: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/nginx/nginx-ingress.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/nginx/nginx-ingress.yaml new file mode 100644 index 0000000..51e6609 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/nginx/nginx-ingress.yaml @@ -0,0 +1,40 @@ +{{- if and .Values.ingress.enabled .Values.nginx.enabled -}} +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: {{ include "cortex.nginxFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.nginxLabels" . | nindent 4 }} + annotations: + {{- toYaml .Values.ingress.annotations | nindent 4 }} +spec: +{{- if .Values.ingress.ingressClass.enabled }} + ingressClassName: {{ .Values.ingress.ingressClass.name }} +{{- end }} +{{- if .Values.ingress.tls }} + tls: + {{- range .Values.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} +{{- end }} + rules: + {{- range .Values.ingress.hosts }} + - host: {{ .host | quote }} + http: + paths: + {{- range .paths }} + - path: {{ . }} + pathType: "Prefix" + backend: + service: + name: {{ include "cortex.nginxFullname" $ }} + port: + number: {{ $.Values.nginx.http_listen_port }} + {{- end }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/nginx/nginx-poddisruptionbudget.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/nginx/nginx-poddisruptionbudget.yaml new file mode 100644 index 0000000..959764a --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/nginx/nginx-poddisruptionbudget.yaml @@ -0,0 +1,14 @@ +{{- if and (.Values.nginx.enabled) (gt (int .Values.nginx.replicas) 1) (.Values.nginx.podDisruptionBudget) }} +apiVersion: {{ include "cortex.pdbVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "cortex.nginxFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.nginxLabels" . | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "cortex.nginxSelectorLabels" . | nindent 6 }} + {{- toYaml .Values.nginx.podDisruptionBudget | nindent 2 }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/nginx/nginx-svc.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/nginx/nginx-svc.yaml new file mode 100644 index 0000000..72a2c44 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/nginx/nginx-svc.yaml @@ -0,0 +1,23 @@ +{{- if .Values.nginx.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "cortex.nginxFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.nginxLabels" . | nindent 4 }} + {{- with .Values.nginx.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- toYaml .Values.nginx.service.annotations | nindent 4 }} +spec: + type: {{ .Values.nginx.service.type }} + ports: + - port: {{ .Values.nginx.http_listen_port }} + protocol: TCP + name: http-metrics + targetPort: http-metrics + selector: + {{- include "cortex.nginxSelectorLabels" . | nindent 4 }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/node-exporter.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/node-exporter.yaml new file mode 100644 index 0000000..7bb3983 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/node-exporter.yaml @@ -0,0 +1,96 @@ +apiVersion: v1 +kind: Service +metadata: + annotations: + prometheus.io/scrape: 'true' + labels: + app: node-exporter + name: node-exporter + name: node-exporter + namespace: imxc +spec: + clusterIP: None + ports: + - name: scrape + port: 9100 + protocol: TCP + selector: + app: node-exporter + type: ClusterIP +--- +{{- if semverCompare ">=1.16-0" .Capabilities.KubeVersion.GitVersion }} +apiVersion: apps/v1 +{{- else }} +apiVersion: extensions/v1beta1 +{{- end }} +kind: DaemonSet +metadata: + name: node-exporter + namespace: imxc +spec: +{{- if semverCompare ">=1.16-0" .Capabilities.KubeVersion.GitVersion }} + selector: + matchLabels: + app: node-exporter +{{- end }} + template: + metadata: + labels: + app: node-exporter + name: node-exporter + spec: + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - image: {{ .Values.global.IMXC_IN_REGISTRY }}/node-exporter + name: node-exporter + resources: + limits: + cpu: 250m + memory: 180Mi + requests: + cpu: 102m + memory: 180Mi + ports: + - containerPort: 9100 + hostPort: 9100 + name: scrape + args: + - --path.procfs=/host/proc + - --path.sysfs=/host/sys + - --path.rootfs=/host/root + - --collector.filesystem.ignored-mount-points=^/(dev|proc|sys|run|var/lib/docker/.+|var/lib/kubelet/pods/.+)($|/) + - --collector.tcpstat + # --log.level=debug + env: + - name: GOMAXPROCS + value: "1" + volumeMounts: + - mountPath: /host/proc + name: proc + readOnly: false + - mountPath: /host/sys + name: sys + readOnly: false + - mountPath: /host/root + mountPropagation: HostToContainer + name: root + readOnly: true + hostNetwork: true + hostPID: true + securityContext: + runAsNonRoot: true + runAsUser: 65534 + volumes: + - hostPath: + path: /proc + name: proc + - hostPath: + path: /sys + name: sys + - hostPath: + path: / + name: root diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/querier/_helpers-querier.tpl b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/querier/_helpers-querier.tpl new file mode 100644 index 0000000..c0a6204 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/querier/_helpers-querier.tpl @@ -0,0 +1,23 @@ + +{{/* +querier fullname +*/}} +{{- define "cortex.querierFullname" -}} +{{ include "cortex.fullname" . }}-querier +{{- end }} + +{{/* +querier common labels +*/}} +{{- define "cortex.querierLabels" -}} +{{ include "cortex.labels" . }} +app.kubernetes.io/component: querier +{{- end }} + +{{/* +querier selector labels +*/}} +{{- define "cortex.querierSelectorLabels" -}} +{{ include "cortex.selectorLabels" . }} +app.kubernetes.io/component: querier +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/querier/querier-dep.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/querier/querier-dep.yaml new file mode 100644 index 0000000..a84ba8a --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/querier/querier-dep.yaml @@ -0,0 +1,115 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "cortex.querierFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.querierLabels" . | nindent 4 }} + annotations: + {{- toYaml .Values.querier.annotations | nindent 4 }} +spec: + {{- if not .Values.querier.autoscaling.enabled }} + replicas: {{ .Values.querier.replicas }} + {{- end }} + selector: + matchLabels: + {{- include "cortex.querierSelectorLabels" . | nindent 6 }} + strategy: + {{- toYaml .Values.querier.strategy | nindent 4 }} + template: + metadata: + labels: + {{- include "cortex.querierLabels" . | nindent 8 }} + {{- with .Values.querier.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + annotations: + checksum/config: {{ include "cortex.configChecksum" . }} + {{- with .Values.querier.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ .Values.querier.serviceAccount.name | default (include "cortex.serviceAccountName" . ) }} + {{- if .Values.querier.priorityClassName }} + priorityClassName: {{ .Values.querier.priorityClassName }} + {{- end }} + {{- if .Values.querier.securityContext.enabled }} + securityContext: {{- omit .Values.querier.securityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + initContainers: + {{- toYaml .Values.querier.initContainers | nindent 8 }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} + containers: + - name: querier + image: "{{ .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: + - "-target=querier" + - "-config.file=/etc/cortex/cortex.yaml" + - "-querier.frontend-address={{ template "cortex.fullname" . }}-query-frontend-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}:{{ .Values.config.server.grpc_listen_port }}" + {{- include "cortex.memcached" . | nindent 12}} + {{- range $key, $value := .Values.querier.extraArgs }} + - "-{{ $key }}={{ $value }}" + {{- end }} + volumeMounts: + {{- if .Values.querier.extraVolumeMounts }} + {{- toYaml .Values.querier.extraVolumeMounts | nindent 12}} + {{- end }} + - name: config + mountPath: /etc/cortex + - name: runtime-config + mountPath: /etc/cortex-runtime-config + - name: storage + mountPath: "/data" + subPath: {{ .Values.querier.persistentVolume.subPath }} + ports: + - name: http-metrics + containerPort: {{ .Values.config.server.http_listen_port }} + protocol: TCP + startupProbe: + {{- toYaml .Values.querier.startupProbe | nindent 12 }} + livenessProbe: + {{- toYaml .Values.querier.livenessProbe | nindent 12 }} + readinessProbe: + {{- toYaml .Values.querier.readinessProbe | nindent 12 }} + resources: + {{- toYaml .Values.querier.resources | nindent 12 }} + {{- if .Values.querier.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.querier.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + env: + {{- if .Values.querier.env }} + {{- toYaml .Values.querier.env | nindent 12 }} + {{- end }} + {{- with .Values.querier.lifecycle }} + lifecycle: + {{- toYaml . | nindent 12 }} + {{- end }} + resources: + requests: + cpu: "100m" + {{- if .Values.querier.extraContainers }} + {{- toYaml .Values.querier.extraContainers | nindent 8}} + {{- end }} + nodeSelector: + {{- toYaml .Values.querier.nodeSelector | nindent 8 }} + affinity: + {{- toYaml .Values.querier.affinity | nindent 8 }} + tolerations: + {{- toYaml .Values.querier.tolerations | nindent 8 }} + terminationGracePeriodSeconds: {{ .Values.querier.terminationGracePeriodSeconds }} + volumes: + {{- include "cortex.configVolume" . | nindent 8 }} + - name: runtime-config + configMap: + name: {{ template "cortex.fullname" . }}-runtime-config + - name: storage + emptyDir: {} + {{- if .Values.querier.extraVolumes }} + {{- toYaml .Values.querier.extraVolumes | nindent 8}} + {{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/querier/querier-hpa.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/querier/querier-hpa.yaml new file mode 100644 index 0000000..f078526 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/querier/querier-hpa.yaml @@ -0,0 +1,39 @@ +{{- with .Values.querier.autoscaling -}} +{{- if .enabled }} +apiVersion: autoscaling/v2beta2 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "cortex.querierFullname" $ }} + namespace: {{ $.Release.Namespace }} + labels: + {{- include "cortex.querierLabels" $ | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "cortex.querierFullname" $ }} + minReplicas: {{ .minReplicas }} + maxReplicas: {{ .maxReplicas }} + metrics: + {{- with .targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: {{ . }} + {{- end }} + {{- with .targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ . }} + {{- end }} + {{- with .behavior }} + behavior: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/querier/querier-poddisruptionbudget.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/querier/querier-poddisruptionbudget.yaml new file mode 100644 index 0000000..b69de62 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/querier/querier-poddisruptionbudget.yaml @@ -0,0 +1,14 @@ +{{- if and (gt (int .Values.querier.replicas) 1) (.Values.querier.podDisruptionBudget) }} +apiVersion: {{ include "cortex.pdbVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "cortex.querierFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.querierLabels" . | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "cortex.querierSelectorLabels" . | nindent 6 }} + {{- toYaml .Values.querier.podDisruptionBudget | nindent 2 }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/querier/querier-servicemonitor.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/querier/querier-servicemonitor.yaml new file mode 100644 index 0000000..c84d1a4 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/querier/querier-servicemonitor.yaml @@ -0,0 +1,42 @@ +{{- if .Values.querier.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "cortex.querierFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.querierLabels" . | nindent 4 }} + {{- if .Values.querier.serviceMonitor.additionalLabels }} +{{ toYaml .Values.querier.serviceMonitor.additionalLabels | indent 4 }} + {{- end }} + {{- if .Values.querier.serviceMonitor.annotations }} + annotations: +{{ toYaml .Values.querier.serviceMonitor.annotations | indent 4 }} + {{- end }} +spec: + selector: + matchLabels: + {{- include "cortex.querierSelectorLabels" . | nindent 6 }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace | quote }} + endpoints: + - port: http-metrics + {{- if .Values.querier.serviceMonitor.interval }} + interval: {{ .Values.querier.serviceMonitor.interval }} + {{- end }} + {{- if .Values.querier.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.querier.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.querier.serviceMonitor.relabelings }} + relabelings: + {{- toYaml .Values.querier.serviceMonitor.relabelings | nindent 4 }} + {{- end }} + {{- if .Values.querier.serviceMonitor.metricRelabelings }} + metricRelabelings: + {{- toYaml .Values.querier.serviceMonitor.metricRelabelings | nindent 4 }} + {{- end }} + {{- with .Values.querier.serviceMonitor.extraEndpointSpec }} + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/querier/querier-svc.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/querier/querier-svc.yaml new file mode 100644 index 0000000..0701b7d --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/querier/querier-svc.yaml @@ -0,0 +1,21 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "cortex.querierFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.querierLabels" . | nindent 4 }} + {{- with .Values.querier.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- toYaml .Values.querier.service.annotations | nindent 4 }} +spec: + type: ClusterIP + ports: + - port: {{ .Values.config.server.http_listen_port }} + protocol: TCP + name: http-metrics + targetPort: http-metrics + selector: + {{- include "cortex.querierSelectorLabels" . | nindent 4 }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/query-frontend/_helpers-query-frontend.tpl b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/query-frontend/_helpers-query-frontend.tpl new file mode 100644 index 0000000..c1f74c9 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/query-frontend/_helpers-query-frontend.tpl @@ -0,0 +1,23 @@ + +{{/* +query-frontend fullname +*/}} +{{- define "cortex.queryFrontendFullname" -}} +{{ include "cortex.fullname" . }}-query-frontend +{{- end }} + +{{/* +query-frontend common labels +*/}} +{{- define "cortex.queryFrontendLabels" -}} +{{ include "cortex.labels" . }} +app.kubernetes.io/component: query-frontend +{{- end }} + +{{/* +query-frontend selector labels +*/}} +{{- define "cortex.queryFrontendSelectorLabels" -}} +{{ include "cortex.selectorLabels" . }} +app.kubernetes.io/component: query-frontend +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/query-frontend/query-frontend-dep.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/query-frontend/query-frontend-dep.yaml new file mode 100644 index 0000000..3e31d18 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/query-frontend/query-frontend-dep.yaml @@ -0,0 +1,107 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "cortex.queryFrontendFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.queryFrontendLabels" . | nindent 4 }} + annotations: + {{- toYaml .Values.query_frontend.annotations | nindent 4 }} +spec: + replicas: {{ .Values.query_frontend.replicas }} + selector: + matchLabels: + {{- include "cortex.queryFrontendSelectorLabels" . | nindent 6 }} + strategy: + {{- toYaml .Values.query_frontend.strategy | nindent 4 }} + template: + metadata: + labels: + {{- include "cortex.queryFrontendLabels" . | nindent 8 }} + {{- with .Values.query_frontend.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + annotations: + checksum/config: {{ include "cortex.configChecksum" . }} + {{- with .Values.query_frontend.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ .Values.query_frontend.serviceAccount.name | default (include "cortex.serviceAccountName" . ) }} + {{- if .Values.query_frontend.priorityClassName }} + priorityClassName: {{ .Values.query_frontend.priorityClassName }} + {{- end }} + {{- if .Values.query_frontend.securityContext.enabled }} + securityContext: {{- omit .Values.query_frontend.securityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + initContainers: + {{- toYaml .Values.query_frontend.initContainers | nindent 8 }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} + containers: + - name: query-frontend + image: "{{ .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: + - "-target=query-frontend" + - "-config.file=/etc/cortex/cortex.yaml" + {{- include "cortex.frontend-memcached" . | nindent 12 }} + {{- range $key, $value := .Values.query_frontend.extraArgs }} + - "-{{ $key }}={{ $value }}" + {{- end }} + volumeMounts: + {{- if .Values.query_frontend.extraVolumeMounts }} + {{- toYaml .Values.query_frontend.extraVolumeMounts | nindent 12}} + {{- end }} + - name: config + mountPath: /etc/cortex + - name: runtime-config + mountPath: /etc/cortex-runtime-config + ports: + - name: http-metrics + containerPort: {{ .Values.config.server.http_listen_port }} + protocol: TCP + - name: grpc + containerPort: {{ .Values.config.server.grpc_listen_port }} + protocol: TCP + startupProbe: + {{- toYaml .Values.query_frontend.startupProbe | nindent 12 }} + livenessProbe: + {{- toYaml .Values.query_frontend.livenessProbe | nindent 12 }} + readinessProbe: + {{- toYaml .Values.query_frontend.readinessProbe | nindent 12 }} + resources: + {{- toYaml .Values.query_frontend.resources | nindent 12 }} + {{- if .Values.query_frontend.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.query_frontend.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.query_frontend.env }} + env: + {{- toYaml .Values.query_frontend.env | nindent 12 }} + {{- end }} + {{- with .Values.query_frontend.lifecycle }} + lifecycle: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- if .Values.query_frontend.extraContainers }} + {{- toYaml .Values.query_frontend.extraContainers | nindent 8}} + {{- end }} + nodeSelector: + {{- toYaml .Values.query_frontend.nodeSelector | nindent 8 }} + affinity: + {{- toYaml .Values.query_frontend.affinity | nindent 8 }} + tolerations: + {{- toYaml .Values.query_frontend.tolerations | nindent 8 }} + terminationGracePeriodSeconds: {{ .Values.query_frontend.terminationGracePeriodSeconds }} + volumes: + {{- include "cortex.configVolume" . | nindent 8 }} + - name: runtime-config + configMap: + name: {{ template "cortex.fullname" . }}-runtime-config + {{- if .Values.query_frontend.extraVolumes }} + {{- toYaml .Values.query_frontend.extraVolumes | nindent 8}} + {{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/query-frontend/query-frontend-servicemonitor.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/query-frontend/query-frontend-servicemonitor.yaml new file mode 100644 index 0000000..2d76c6b --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/query-frontend/query-frontend-servicemonitor.yaml @@ -0,0 +1,42 @@ +{{- if .Values.query_frontend.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "cortex.queryFrontendFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.queryFrontendLabels" . | nindent 4 }} + {{- if .Values.query_frontend.serviceMonitor.additionalLabels }} +{{ toYaml .Values.query_frontend.serviceMonitor.additionalLabels | indent 4 }} + {{- end }} + {{- if .Values.query_frontend.serviceMonitor.annotations }} + annotations: +{{ toYaml .Values.query_frontend.serviceMonitor.annotations | indent 4 }} + {{- end }} +spec: + selector: + matchLabels: + {{- include "cortex.queryFrontendSelectorLabels" . | nindent 6 }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace | quote }} + endpoints: + - port: http-metrics + {{- if .Values.query_frontend.serviceMonitor.interval }} + interval: {{ .Values.query_frontend.serviceMonitor.interval }} + {{- end }} + {{- if .Values.query_frontend.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.query_frontend.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.query_frontend.serviceMonitor.relabelings }} + relabelings: + {{- toYaml .Values.query_frontend.serviceMonitor.relabelings | nindent 4 }} + {{- end }} + {{- if .Values.query_frontend.serviceMonitor.metricRelabelings }} + metricRelabelings: + {{- toYaml .Values.query_frontend.serviceMonitor.metricRelabelings | nindent 4 }} + {{- end }} + {{- with .Values.query_frontend.serviceMonitor.extraEndpointSpec }} + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/query-frontend/query-frontend-svc-headless.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/query-frontend/query-frontend-svc-headless.yaml new file mode 100644 index 0000000..939457c --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/query-frontend/query-frontend-svc-headless.yaml @@ -0,0 +1,23 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "cortex.queryFrontendFullname" . }}-headless + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.queryFrontendLabels" . | nindent 4 }} + {{- with .Values.query_frontend.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- toYaml .Values.query_frontend.service.annotations | nindent 4 }} +spec: + type: ClusterIP + clusterIP: None + publishNotReadyAddresses: true + ports: + - port: {{ .Values.config.server.grpc_listen_port }} + protocol: TCP + name: grpc + targetPort: grpc + selector: + {{- include "cortex.queryFrontendSelectorLabels" . | nindent 4 }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/query-frontend/query-frontend-svc.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/query-frontend/query-frontend-svc.yaml new file mode 100644 index 0000000..85ff2e8 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/query-frontend/query-frontend-svc.yaml @@ -0,0 +1,21 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "cortex.queryFrontendFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.queryFrontendLabels" . | nindent 4 }} + {{- with .Values.query_frontend.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- toYaml .Values.query_frontend.service.annotations | nindent 4 }} +spec: + type: ClusterIP + ports: + - port: {{ .Values.config.server.http_listen_port }} + protocol: TCP + name: http-metrics + targetPort: http-metrics + selector: + {{- include "cortex.queryFrontendSelectorLabels" . | nindent 4 }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/query-frontend/query-poddisruptionbudget.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/query-frontend/query-poddisruptionbudget.yaml new file mode 100644 index 0000000..5256949 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/query-frontend/query-poddisruptionbudget.yaml @@ -0,0 +1,14 @@ +{{- if and (gt (int .Values.query_frontend.replicas) 1) (.Values.query_frontend.podDisruptionBudget) }} +apiVersion: {{ include "cortex.pdbVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "cortex.queryFrontendFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.queryFrontendLabels" . | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "cortex.queryFrontendSelectorLabels" . | nindent 6 }} + {{- toYaml .Values.query_frontend.podDisruptionBudget | nindent 2 }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/ruler/_helpers-ruler.tpl b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/ruler/_helpers-ruler.tpl new file mode 100644 index 0000000..86270d0 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/ruler/_helpers-ruler.tpl @@ -0,0 +1,30 @@ + +{{/* +ruler fullname +*/}} +{{- define "cortex.rulerFullname" -}} +{{ include "cortex.fullname" . }}-ruler +{{- end }} + +{{/* +ruler common labels +*/}} +{{- define "cortex.rulerLabels" -}} +{{ include "cortex.labels" . }} +app.kubernetes.io/component: ruler +{{- end }} + +{{/* +ruler selector labels +*/}} +{{- define "cortex.rulerSelectorLabels" -}} +{{ include "cortex.selectorLabels" . }} +app.kubernetes.io/component: ruler +{{- end }} + +{{/* +format rules dir +*/}} +{{- define "cortex.rulerRulesDirName" -}} +rules-{{ . | replace "_" "-" | trimSuffix "-" }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/ruler/ruler-configmap.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/ruler/ruler-configmap.yaml new file mode 100644 index 0000000..8448108 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/ruler/ruler-configmap.yaml @@ -0,0 +1,14 @@ +{{- if .Values.ruler.enabled }} +{{- range $dir, $files := .Values.ruler.directories }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "cortex.rulerFullname" $ }}-{{ include "cortex.rulerRulesDirName" $dir }} + namespace: {{ $.Release.Namespace }} + labels: + {{- include "cortex.rulerLabels" $ | nindent 4 }} +data: + {{- toYaml $files | nindent 2}} +{{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/ruler/ruler-dep.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/ruler/ruler-dep.yaml new file mode 100644 index 0000000..a8e034d --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/ruler/ruler-dep.yaml @@ -0,0 +1,191 @@ +{{- if .Values.ruler.enabled -}} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "cortex.rulerFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.rulerLabels" . | nindent 4 }} + app.kubernetes.io/part-of: memberlist + annotations: + {{- toYaml .Values.ruler.annotations | nindent 4 }} +spec: + replicas: {{ .Values.ruler.replicas }} + selector: + matchLabels: + {{- include "cortex.rulerSelectorLabels" . | nindent 6 }} + strategy: + {{- toYaml .Values.ruler.strategy | nindent 4 }} + template: + metadata: + labels: + {{- include "cortex.rulerLabels" . | nindent 8 }} + app.kubernetes.io/part-of: memberlist + {{- with .Values.ruler.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + annotations: + checksum/config: {{ include "cortex.configChecksum" . }} + {{- with .Values.ruler.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ .Values.ruler.serviceAccount.name | default (include "cortex.serviceAccountName" . ) }} + {{- if .Values.ruler.priorityClassName }} + priorityClassName: {{ .Values.ruler.priorityClassName }} + {{- end }} + {{- if .Values.ruler.securityContext.enabled }} + securityContext: {{- omit .Values.ruler.securityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + initContainers: + {{- toYaml .Values.ruler.initContainers | nindent 8 }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} + containers: + {{- if .Values.ruler.sidecar.enabled }} + - name: {{ template "cortex.name" . }}-sc-rules + {{- if .Values.ruler.sidecar.image.sha }} + image: "{{ .Values.ruler.sidecar.image.repository }}:{{ .Values.ruler.sidecar.image.tag }}@sha256:{{ .Values.ruler.sidecar.image.sha }}" + {{- else }} + image: "{{ .Values.ruler.sidecar.image.repository }}:{{ .Values.ruler.sidecar.image.tag }}" + {{- end }} + imagePullPolicy: {{ .Values.ruler.sidecar.imagePullPolicy }} + env: + {{- if .Values.ruler.sidecar.watchMethod }} + - name: METHOD + value: {{ .Values.ruler.sidecar.watchMethod }} + {{ end }} + - name: LABEL + value: "{{ .Values.ruler.sidecar.label }}" + {{- if .Values.ruler.sidecar.labelValue }} + - name: LABEL_VALUE + value: {{ quote .Values.ruler.sidecar.labelValue }} + {{- end }} + - name: FOLDER + value: "{{ .Values.ruler.sidecar.folder }}{{- with .Values.ruler.sidecar.defaultFolderName }}/{{ . }}{{- end }}" + - name: RESOURCE + value: "both" + {{- if .Values.ruler.sidecar.enableUniqueFilenames }} + - name: UNIQUE_FILENAMES + value: "{{ .Values.ruler.sidecar.enableUniqueFilenames }}" + {{- end }} + {{- if .Values.ruler.sidecar.searchNamespace }} + - name: NAMESPACE + value: "{{ .Values.ruler.sidecar.searchNamespace }}" + {{- end }} + {{- if .Values.ruler.sidecar.skipTlsVerify }} + - name: SKIP_TLS_VERIFY + value: "{{ .Values.ruler.sidecar.skipTlsVerify }}" + {{- end }} + {{- if .Values.ruler.sidecar.folderAnnotation }} + - name: FOLDER_ANNOTATION + value: "{{ .Values.ruler.sidecar.folderAnnotation }}" + {{- end }} + resources: + {{- toYaml .Values.ruler.sidecar.resources | nindent 12 }} + {{- if .Values.ruler.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.ruler.sidecar.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + volumeMounts: + - name: sc-rules-volume + mountPath: {{ .Values.ruler.sidecar.folder | quote }} + {{- end }} + - name: rules + image: "{{ .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: + - "-target=ruler" + - "-config.file=/etc/cortex/cortex.yaml" + {{- if .Values.configs.enabled }} + - "-ruler.configs.url=http://{{ template "cortex.configsFullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}:{{ .Values.config.server.http_listen_port }}" + {{- end }} + {{- if not .Values.config.ruler.alertmanager_url }} + {{- if .Values.config.ruler.enable_alertmanager_discovery }} + - "-ruler.alertmanager-url=http://_http-metrics._tcp.{{ template "cortex.name" . }}-alertmanager-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}/api/prom/alertmanager/" + {{- else }} + - "-ruler.alertmanager-url=http://{{ template "cortex.alertmanagerFullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}:{{ .Values.config.server.http_listen_port }}/api/prom/alertmanager/" + {{- end }} + {{- end }} + {{- include "cortex.memcached" . | nindent 12}} + {{- range $key, $value := .Values.ruler.extraArgs }} + - "-{{ $key }}={{ $value }}" + {{- end }} + volumeMounts: + {{- if .Values.ruler.extraVolumeMounts }} + {{- toYaml .Values.ruler.extraVolumeMounts | nindent 12}} + {{- end }} + {{- if .Values.ruler.sidecar.enabled }} + - name: sc-rules-volume + mountPath: {{ .Values.ruler.sidecar.folder | quote }} + {{ end }} + - name: config + mountPath: /etc/cortex + - name: runtime-config + mountPath: /etc/cortex-runtime-config + - name: storage + mountPath: /data + subPath: {{ .Values.ruler.persistentVolume.subPath }} + - name: tmp + mountPath: /rules + {{- range $dir, $_ := .Values.ruler.directories }} + - name: {{ include "cortex.rulerRulesDirName" $dir }} + mountPath: /etc/cortex/rules/{{ $dir }} + {{- end }} + ports: + - name: http-metrics + containerPort: {{ .Values.config.server.http_listen_port }} + protocol: TCP + - name: gossip + containerPort: {{ .Values.config.memberlist.bind_port }} + protocol: TCP + startupProbe: + {{- toYaml .Values.ruler.startupProbe | nindent 12 }} + livenessProbe: + {{- toYaml .Values.ruler.livenessProbe | nindent 12 }} + readinessProbe: + {{- toYaml .Values.ruler.readinessProbe | nindent 12 }} + resources: + {{- toYaml .Values.ruler.resources | nindent 12 }} + {{- if .Values.ruler.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.ruler.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.ruler.env }} + env: + {{- toYaml .Values.ruler.env | nindent 12 }} + {{- end }} + {{- if .Values.ruler.extraContainers }} + {{- toYaml .Values.ruler.extraContainers | nindent 8}} + {{- end }} + nodeSelector: + {{- toYaml .Values.ruler.nodeSelector | nindent 8 }} + affinity: + {{- toYaml .Values.ruler.affinity | nindent 8 }} + tolerations: + {{- toYaml .Values.ruler.tolerations | nindent 8 }} + terminationGracePeriodSeconds: {{ .Values.ruler.terminationGracePeriodSeconds }} + volumes: + {{- include "cortex.configVolume" . | nindent 8 }} + - name: runtime-config + configMap: + name: {{ template "cortex.fullname" . }}-runtime-config + - name: tmp + emptyDir: {} + {{- range $dir, $_ := .Values.ruler.directories }} + - name: {{ include "cortex.rulerRulesDirName" $dir }} + configMap: + name: {{ include "cortex.rulerFullname" $ }}-{{ include "cortex.rulerRulesDirName" $dir }} + {{- end }} + - name: storage + emptyDir: {} + {{- if .Values.ruler.sidecar.enabled }} + - name: sc-rules-volume + emptyDir: {} + {{- end }} + {{- if .Values.ruler.extraVolumes }} + {{- toYaml .Values.ruler.extraVolumes | nindent 8}} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/ruler/ruler-poddisruptionbudget.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/ruler/ruler-poddisruptionbudget.yaml new file mode 100644 index 0000000..52fb3e0 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/ruler/ruler-poddisruptionbudget.yaml @@ -0,0 +1,14 @@ +{{- if and (gt (int .Values.ruler.replicas) 1) (.Values.ruler.podDisruptionBudget) }} +apiVersion: {{ include "cortex.pdbVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "cortex.rulerFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.rulerLabels" . | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "cortex.rulerSelectorLabels" . | nindent 6 }} + {{- toYaml .Values.ruler.podDisruptionBudget | nindent 2 }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/ruler/ruler-servicemonitor.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/ruler/ruler-servicemonitor.yaml new file mode 100644 index 0000000..de6744f --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/ruler/ruler-servicemonitor.yaml @@ -0,0 +1,42 @@ +{{- if .Values.ruler.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "cortex.rulerFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.rulerLabels" . | nindent 4 }} + {{- if .Values.ruler.serviceMonitor.additionalLabels }} +{{ toYaml .Values.ruler.serviceMonitor.additionalLabels | indent 4 }} + {{- end }} + {{- if .Values.ruler.serviceMonitor.annotations }} + annotations: +{{ toYaml .Values.ruler.serviceMonitor.annotations | indent 4 }} + {{- end }} +spec: + selector: + matchLabels: + {{- include "cortex.rulerSelectorLabels" . | nindent 6 }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace | quote }} + endpoints: + - port: http-metrics + {{- if .Values.ruler.serviceMonitor.interval }} + interval: {{ .Values.ruler.serviceMonitor.interval }} + {{- end }} + {{- if .Values.ruler.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.ruler.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.ruler.serviceMonitor.relabelings }} + relabelings: + {{- toYaml .Values.ruler.serviceMonitor.relabelings | nindent 4 }} + {{- end }} + {{- if .Values.ruler.serviceMonitor.metricRelabelings }} + metricRelabelings: + {{- toYaml .Values.ruler.serviceMonitor.metricRelabelings | nindent 4 }} + {{- end }} + {{- with .Values.ruler.serviceMonitor.extraEndpointSpec }} + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/ruler/ruler-svc.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/ruler/ruler-svc.yaml new file mode 100644 index 0000000..7752ef4 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/ruler/ruler-svc.yaml @@ -0,0 +1,23 @@ +{{- if .Values.ruler.enabled -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "cortex.rulerFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.rulerLabels" . | nindent 4 }} + {{- with .Values.ruler.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- toYaml .Values.ruler.service.annotations | nindent 4 }} +spec: + type: ClusterIP + ports: + - port: {{ .Values.config.server.http_listen_port }} + protocol: TCP + name: http-metrics + targetPort: http-metrics + selector: + {{- include "cortex.rulerSelectorLabels" . | nindent 4 }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/runtime-configmap.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/runtime-configmap.yaml new file mode 100644 index 0000000..2b30599 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/runtime-configmap.yaml @@ -0,0 +1,18 @@ +{{- with .Values.runtimeconfigmap }} +{{- if .create }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "cortex.fullname" $ }}-runtime-config + namespace: {{ $.Release.Namespace }} + labels: + {{- include "cortex.labels" $ | nindent 4 }} + {{- with .annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +data: + runtime_config.yaml: | + {{- tpl (toYaml .runtime_config) $ | nindent 4 }} +{{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/secret-postgresql.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/secret-postgresql.yaml new file mode 100644 index 0000000..9194971 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/secret-postgresql.yaml @@ -0,0 +1,11 @@ +{{- if and .Values.configsdb_postgresql.enabled .Values.configsdb_postgresql.auth.password -}} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "cortex.fullname" . }}-postgresql + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.labels" . | nindent 4 }} +data: + postgresql-password: {{ .Values.configsdb_postgresql.auth.password | b64enc}} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/secret.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/secret.yaml new file mode 100644 index 0000000..ff0e78f --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/secret.yaml @@ -0,0 +1,11 @@ +{{- if (and (not .Values.useExternalConfig) (not .Values.useConfigMap)) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "cortex.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.labels" . | nindent 4 }} +data: + cortex.yaml: {{ tpl (toYaml .Values.config) . | b64enc }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/serviceaccount.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/serviceaccount.yaml new file mode 100644 index 0000000..963f866 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/serviceaccount.yaml @@ -0,0 +1,12 @@ +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "cortex.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.labels" . | nindent 4 }} + annotations: + {{- toYaml .Values.serviceAccount.annotations | nindent 4 }} +automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/store-gateway/_helpers-store-gateway.tpl b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/store-gateway/_helpers-store-gateway.tpl new file mode 100644 index 0000000..3cca867 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/store-gateway/_helpers-store-gateway.tpl @@ -0,0 +1,23 @@ + +{{/* +store-gateway fullname +*/}} +{{- define "cortex.storeGatewayFullname" -}} +{{ include "cortex.fullname" . }}-store-gateway +{{- end }} + +{{/* +store-gateway common labels +*/}} +{{- define "cortex.storeGatewayLabels" -}} +{{ include "cortex.labels" . }} +app.kubernetes.io/component: store-gateway +{{- end }} + +{{/* +store-gateway selector labels +*/}} +{{- define "cortex.storeGatewaySelectorLabels" -}} +{{ include "cortex.selectorLabels" . }} +app.kubernetes.io/component: store-gateway +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-poddisruptionbudget.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-poddisruptionbudget.yaml new file mode 100644 index 0000000..1019cc8 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-poddisruptionbudget.yaml @@ -0,0 +1,14 @@ +{{- if and (gt (int .Values.store_gateway.replicas) 1) (.Values.store_gateway.podDisruptionBudget) }} +apiVersion: {{ include "cortex.pdbVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "cortex.storeGatewayFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.storeGatewayLabels" . | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "cortex.storeGatewaySelectorLabels" . | nindent 6 }} + {{- toYaml .Values.store_gateway.podDisruptionBudget | nindent 2 }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-servicemonitor.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-servicemonitor.yaml new file mode 100644 index 0000000..39eaeda --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-servicemonitor.yaml @@ -0,0 +1,42 @@ +{{- if .Values.store_gateway.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "cortex.storeGatewayFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.storeGatewayLabels" . | nindent 4 }} + {{- if .Values.store_gateway.serviceMonitor.additionalLabels }} +{{ toYaml .Values.store_gateway.serviceMonitor.additionalLabels | indent 4 }} + {{- end }} + {{- if .Values.store_gateway.serviceMonitor.annotations }} + annotations: +{{ toYaml .Values.store_gateway.serviceMonitor.annotations | indent 4 }} + {{- end }} +spec: + selector: + matchLabels: + {{- include "cortex.storeGatewaySelectorLabels" . | nindent 6 }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace | quote }} + endpoints: + - port: http-metrics + {{- if .Values.store_gateway.serviceMonitor.interval }} + interval: {{ .Values.store_gateway.serviceMonitor.interval }} + {{- end }} + {{- if .Values.store_gateway.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.store_gateway.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.store_gateway.serviceMonitor.relabelings }} + relabelings: + {{- toYaml .Values.store_gateway.serviceMonitor.relabelings | nindent 4 }} + {{- end }} + {{- if .Values.store_gateway.serviceMonitor.metricRelabelings }} + metricRelabelings: + {{- toYaml .Values.store_gateway.serviceMonitor.metricRelabelings | nindent 4 }} + {{- end }} + {{- with .Values.store_gateway.serviceMonitor.extraEndpointSpec }} + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-statefulset.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-statefulset.yaml new file mode 100644 index 0000000..0238c75 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-statefulset.yaml @@ -0,0 +1,142 @@ +{{- if eq .Values.config.storage.engine "blocks" -}} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "cortex.storeGatewayFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.storeGatewayLabels" . | nindent 4 }} + app.kubernetes.io/part-of: memberlist + annotations: + {{- toYaml .Values.store_gateway.annotations | nindent 4 }} +spec: + replicas: {{ .Values.store_gateway.replicas }} + selector: + matchLabels: + {{- include "cortex.storeGatewaySelectorLabels" . | nindent 6 }} + updateStrategy: + {{- toYaml .Values.store_gateway.strategy | nindent 4 }} + serviceName: {{ template "cortex.fullname" . }}-store-gateway-headless + {{- if .Values.store_gateway.persistentVolume.enabled }} + volumeClaimTemplates: + - metadata: + name: storage + {{- if .Values.store_gateway.persistentVolume.annotations }} + annotations: + {{ toYaml .Values.store_gateway.persistentVolume.annotations | nindent 10 }} + {{- end }} + spec: + {{- if .Values.store_gateway.persistentVolume.storageClass }} + {{- if (eq "-" .Values.store_gateway.persistentVolume.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.store_gateway.persistentVolume.storageClass }}" + {{- end }} + {{- end }} + accessModes: + {{- toYaml .Values.store_gateway.persistentVolume.accessModes | nindent 10 }} + resources: + requests: + storage: "{{ .Values.store_gateway.persistentVolume.size }}" + {{- end }} + template: + metadata: + labels: + {{- include "cortex.storeGatewayLabels" . | nindent 8 }} + app.kubernetes.io/part-of: memberlist + {{- with .Values.store_gateway.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + annotations: + checksum/config: {{ include "cortex.configChecksum" . }} + {{- with .Values.store_gateway.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ .Values.store_gateway.serviceAccount.name | default (include "cortex.serviceAccountName" . ) }} + {{- if .Values.store_gateway.priorityClassName }} + priorityClassName: {{ .Values.store_gateway.priorityClassName }} + {{- end }} + {{- if .Values.store_gateway.securityContext.enabled }} + securityContext: {{- omit .Values.store_gateway.securityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + initContainers: + {{- toYaml .Values.store_gateway.initContainers | nindent 8 }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} + nodeSelector: + {{- toYaml .Values.store_gateway.nodeSelector | nindent 8 }} + affinity: + {{- toYaml .Values.store_gateway.affinity | nindent 8 }} + tolerations: + {{- toYaml .Values.store_gateway.tolerations | nindent 8 }} + terminationGracePeriodSeconds: {{ .Values.store_gateway.terminationGracePeriodSeconds }} + volumes: + {{- include "cortex.configVolume" . | nindent 8 }} + - name: runtime-config + configMap: + name: {{ template "cortex.fullname" . }}-runtime-config + {{- if not .Values.store_gateway.persistentVolume.enabled }} + - name: storage + emptyDir: {} + {{- end }} + {{- if .Values.store_gateway.extraVolumes }} + {{- toYaml .Values.store_gateway.extraVolumes | nindent 8 }} + {{- end }} + containers: + {{- if .Values.store_gateway.extraContainers }} + {{ toYaml .Values.store_gateway.extraContainers | nindent 8 }} + {{- end }} + - name: store-gateway + image: "{{ .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: + - "-target=store-gateway" + - "-config.file=/etc/cortex/cortex.yaml" + {{- include "cortex.memcached" . | nindent 12}} + {{- range $key, $value := .Values.store_gateway.extraArgs }} + - "-{{ $key }}={{ $value }}" + {{- end }} + volumeMounts: + {{- if .Values.store_gateway.extraVolumeMounts }} + {{- toYaml .Values.store_gateway.extraVolumeMounts | nindent 12}} + {{- end }} + - name: config + mountPath: /etc/cortex + - name: runtime-config + mountPath: /etc/cortex-runtime-config + - name: storage + mountPath: "/data" + {{- if .Values.store_gateway.persistentVolume.subPath }} + subPath: {{ .Values.store_gateway.persistentVolume.subPath }} + {{- end }} + ports: + - name: http-metrics + containerPort: {{ .Values.config.server.http_listen_port }} + protocol: TCP + - name: grpc + containerPort: {{ .Values.config.server.grpc_listen_port }} + protocol: TCP + - name: gossip + containerPort: {{ .Values.config.memberlist.bind_port }} + protocol: TCP + startupProbe: + {{- toYaml .Values.store_gateway.startupProbe | nindent 12 }} + livenessProbe: + {{- toYaml .Values.store_gateway.livenessProbe | nindent 12 }} + readinessProbe: + {{- toYaml .Values.store_gateway.readinessProbe | nindent 12 }} + resources: + {{- toYaml .Values.store_gateway.resources | nindent 12 }} + {{- if .Values.store_gateway.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.store_gateway.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.store_gateway.env }} + env: + {{- toYaml .Values.store_gateway.env | nindent 12 }} + {{- end }} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-svc-headless.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-svc-headless.yaml new file mode 100644 index 0000000..c56ec77 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-svc-headless.yaml @@ -0,0 +1,24 @@ +{{- if eq .Values.config.storage.engine "blocks" -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "cortex.storeGatewayFullname" . }}-headless + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.storeGatewayLabels" . | nindent 4 }} + {{- with .Values.store_gateway.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- toYaml .Values.store_gateway.service.annotations | nindent 4 }} +spec: + type: ClusterIP + clusterIP: None + ports: + - port: {{ .Values.config.server.grpc_listen_port }} + protocol: TCP + name: grpc + targetPort: grpc + selector: + {{- include "cortex.storeGatewaySelectorLabels" . | nindent 4 }} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-svc.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-svc.yaml new file mode 100644 index 0000000..f58019b --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-svc.yaml @@ -0,0 +1,23 @@ +{{- if eq .Values.config.storage.engine "blocks" -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "cortex.storeGatewayFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.storeGatewayLabels" . | nindent 4 }} + {{- with .Values.store_gateway.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- toYaml .Values.store_gateway.service.annotations | nindent 4 }} +spec: + type: ClusterIP + ports: + - port: {{ .Values.config.server.http_listen_port }} + protocol: TCP + name: http-metrics + targetPort: http-metrics + selector: + {{- include "cortex.storeGatewaySelectorLabels" . | nindent 4 }} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/svc-memberlist-headless.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/svc-memberlist-headless.yaml new file mode 100644 index 0000000..fc41461 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/svc-memberlist-headless.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "cortex.fullname" . }}-memberlist + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.labels" . | nindent 4 }} +spec: + type: ClusterIP + clusterIP: None + ports: + - port: {{ .Values.config.memberlist.bind_port }} + protocol: TCP + name: gossip + targetPort: gossip + selector: + {{- include "cortex.selectorLabels" . | nindent 4 }} + app.kubernetes.io/part-of: memberlist diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/table-manager/_helpers-table-manager.tpl b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/table-manager/_helpers-table-manager.tpl new file mode 100644 index 0000000..4798c6d --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/table-manager/_helpers-table-manager.tpl @@ -0,0 +1,23 @@ + +{{/* +table-manager fullname +*/}} +{{- define "cortex.tableManagerFullname" -}} +{{ include "cortex.fullname" . }}-table-manager +{{- end }} + +{{/* +table-manager common labels +*/}} +{{- define "cortex.tableManagerLabels" -}} +{{ include "cortex.labels" . }} +app.kubernetes.io/component: table-manager +{{- end }} + +{{/* +table-manager selector labels +*/}} +{{- define "cortex.tableManagerSelectorLabels" -}} +{{ include "cortex.selectorLabels" . }} +app.kubernetes.io/component: table-manager +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/table-manager/table-manager-dep.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/table-manager/table-manager-dep.yaml new file mode 100644 index 0000000..d24dcc3 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/table-manager/table-manager-dep.yaml @@ -0,0 +1,106 @@ +{{- if ne .Values.config.storage.engine "blocks" -}} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "cortex.tableManagerFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.tableManagerLabels" . | nindent 4 }} + annotations: + {{- toYaml .Values.table_manager.annotations | nindent 4 }} +spec: + replicas: {{ .Values.table_manager.replicas }} + selector: + matchLabels: + {{- include "cortex.tableManagerSelectorLabels" . | nindent 6 }} + strategy: + {{- toYaml .Values.table_manager.strategy | nindent 4 }} + template: + metadata: + labels: + {{- include "cortex.tableManagerLabels" . | nindent 8 }} + {{- with .Values.table_manager.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + annotations: + checksum/config: {{ include "cortex.configChecksum" . }} + {{- with .Values.table_manager.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ .Values.table_manager.serviceAccount.name | default (include "cortex.serviceAccountName" . ) }} + {{- if .Values.table_manager.priorityClassName }} + priorityClassName: {{ .Values.table_manager.priorityClassName }} + {{- end }} + {{- if .Values.table_manager.securityContext.enabled }} + securityContext: {{- omit .Values.table_manager.securityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + initContainers: + {{- toYaml .Values.table_manager.initContainers | nindent 8 }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} + containers: + - name: table-manager + image: "{{ .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: + - "-target=table-manager" + - "-config.file=/etc/cortex/cortex.yaml" + {{- range $key, $value := .Values.table_manager.extraArgs }} + - "-{{ $key }}={{ $value }}" + {{- end }} + volumeMounts: + {{- if .Values.table_manager.extraVolumeMounts }} + {{- toYaml .Values.table_manager.extraVolumeMounts | nindent 12}} + {{- end }} + - name: config + mountPath: /etc/cortex + - name: runtime-config + mountPath: /etc/cortex-runtime-config + - name: storage + mountPath: "/data" + subPath: {{ .Values.table_manager.persistentVolume.subPath }} + ports: + - name: http-metrics + containerPort: {{ .Values.config.server.http_listen_port }} + protocol: TCP + startupProbe: + {{- toYaml .Values.table_manager.startupProbe | nindent 12 }} + livenessProbe: + {{- toYaml .Values.table_manager.livenessProbe | nindent 12 }} + readinessProbe: + {{- toYaml .Values.table_manager.readinessProbe | nindent 12 }} + resources: + {{- toYaml .Values.table_manager.resources | nindent 12 }} + {{- if .Values.table_manager.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.table_manager.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.table_manager.env }} + env: + {{- toYaml .Values.table_manager.env | nindent 12 }} + {{- end }} + {{- if .Values.table_manager.extraContainers }} + {{- toYaml .Values.table_manager.extraContainers | nindent 8}} + {{- end }} + nodeSelector: + {{- toYaml .Values.table_manager.nodeSelector | nindent 8 }} + affinity: + {{- toYaml .Values.table_manager.affinity | nindent 8 }} + tolerations: + {{- toYaml .Values.table_manager.tolerations | nindent 8 }} + terminationGracePeriodSeconds: {{ .Values.table_manager.terminationGracePeriodSeconds }} + volumes: + {{- include "cortex.configVolume" . | nindent 8 }} + - name: runtime-config + configMap: + name: {{ template "cortex.fullname" . }}-runtime-config + - name: storage + emptyDir: {} + {{- if .Values.table_manager.extraVolumes }} + {{- toYaml .Values.table_manager.extraVolumes | nindent 8}} + {{- end }} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/table-manager/table-manager-poddisruptionbudget.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/table-manager/table-manager-poddisruptionbudget.yaml new file mode 100644 index 0000000..91adabf --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/table-manager/table-manager-poddisruptionbudget.yaml @@ -0,0 +1,14 @@ +{{- if and (gt (int .Values.table_manager.replicas) 1) (.Values.table_manager.podDisruptionBudget) }} +apiVersion: {{ include "cortex.pdbVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "cortex.tableManagerFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.tableManagerLabels" . | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "cortex.tableManagerSelectorLabels" . | nindent 6 }} + {{- toYaml .Values.table_manager.podDisruptionBudget | nindent 2 }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/table-manager/table-manager-servicemonitor.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/table-manager/table-manager-servicemonitor.yaml new file mode 100644 index 0000000..9748724 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/table-manager/table-manager-servicemonitor.yaml @@ -0,0 +1,42 @@ +{{- if .Values.table_manager.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "cortex.tableManagerFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.tableManagerLabels" . | nindent 4 }} + {{- if .Values.table_manager.serviceMonitor.additionalLabels }} +{{ toYaml .Values.table_manager.serviceMonitor.additionalLabels | indent 4 }} + {{- end }} + {{- if .Values.table_manager.serviceMonitor.annotations }} + annotations: +{{ toYaml .Values.table_manager.serviceMonitor.annotations | indent 4 }} + {{- end }} +spec: + selector: + matchLabels: + {{- include "cortex.tableManagerSelectorLabels" . | nindent 6 }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace | quote }} + endpoints: + - port: http-metrics + {{- if .Values.table_manager.serviceMonitor.interval }} + interval: {{ .Values.table_manager.serviceMonitor.interval }} + {{- end }} + {{- if .Values.table_manager.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.table_manager.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.table_manager.serviceMonitor.relabelings }} + relabelings: + {{- toYaml .Values.table_manager.serviceMonitor.relabelings | nindent 4 }} + {{- end }} + {{- if .Values.table_manager.serviceMonitor.metricRelabelings }} + metricRelabelings: + {{- toYaml .Values.table_manager.serviceMonitor.metricRelabelings | nindent 4 }} + {{- end }} + {{- with .Values.table_manager.serviceMonitor.extraEndpointSpec }} + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/table-manager/table-manager-svc.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/table-manager/table-manager-svc.yaml new file mode 100644 index 0000000..ff3c57d --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/templates/table-manager/table-manager-svc.yaml @@ -0,0 +1,23 @@ +{{- if ne .Values.config.storage.engine "blocks" -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "cortex.tableManagerFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.tableManagerLabels" . | nindent 4 }} + {{- with .Values.table_manager.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- toYaml .Values.table_manager.service.annotations | nindent 4 }} +spec: + type: ClusterIP + ports: + - port: {{ .Values.config.server.http_listen_port }} + protocol: TCP + name: http-metrics + targetPort: http-metrics + selector: + {{- include "cortex.tableManagerSelectorLabels" . | nindent 4 }} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/values.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/values.yaml new file mode 100644 index 0000000..4a0f8c8 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/cortex/values.yaml @@ -0,0 +1,1605 @@ +image: + #repository: quay.io/cortexproject/cortex + repository: 10.10.31.243:5000/cmoa3/cortex + # -- Allows you to override the cortex version in this chart. Use at your own risk. + #tag: "" + tag: v1.11.0 + pullPolicy: IfNotPresent + + # -- Optionally specify an array of imagePullSecrets. + # Secrets must be manually created in the namespace. + # ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + # pullSecrets: [] + pullSecrets: + - regcred + + +# -- Kubernetes cluster DNS domain +clusterDomain: cluster.local + +tags: + # -- Set to true to enable block storage memcached caching + blocks-storage-memcached: false + +ingress: + enabled: false + ingressClass: + enabled: false + name: "nginx" + annotations: {} + hosts: + - host: chart-example.local + paths: + - / + tls: [] + +serviceAccount: + create: true + name: + annotations: {} + automountServiceAccountToken: true + +useConfigMap: false +useExternalConfig: false +externalConfigSecretName: 'secret-with-config.yaml' +externalConfigVersion: '0' + +config: + auth_enabled: false + api: + prometheus_http_prefix: '/prometheus' + # -- Use GZIP compression for API responses. Some endpoints serve large YAML or JSON blobs + # which can benefit from compression. + response_compression_enabled: true + ingester: + walconfig: + wal_enabled: true + flush_on_shutdown_with_wal_enabled: true + recover_from_wal: true + lifecycler: + # -- We don't want to join immediately, but wait a bit to see other ingesters and their tokens first. + # It can take a while to have the full picture when using gossip + join_after: 10s + + # -- To avoid generating same tokens by multiple ingesters, they can "observe" the ring for a while, + # after putting their own tokens into it. This is only useful when using gossip, since multiple + # ingesters joining at the same time can have conflicting tokens if they don't see each other yet. + observe_period: 10s + # -- Duration to sleep for before exiting, to ensure metrics are scraped. + final_sleep: 30s + num_tokens: 512 + ring: + # -- Ingester replication factor per default is 3 + replication_factor: 3 + kvstore: + store: "memberlist" + limits: + # -- Enforce that every sample has a metric name + enforce_metric_name: true + reject_old_samples: true + reject_old_samples_max_age: 168h + max_query_lookback: 0s + server: + http_listen_port: 8080 + grpc_listen_port: 9095 + grpc_server_max_recv_msg_size: 10485760 + grpc_server_max_send_msg_size: 10485760 + grpc_server_max_concurrent_streams: 10000 + ingester_client: + grpc_client_config: + max_recv_msg_size: 10485760 + max_send_msg_size: 10485760 + # -- See https://github.com/cortexproject/cortex/blob/master/docs/configuration/config-file-reference.md#storage_config + storage: + engine: blocks + index_queries_cache_config: + memcached: + # -- How long keys stay in the memcache + expiration: 1h + memcached_client: + # -- Maximum time to wait before giving up on memcached requests. + timeout: 1s + blocks_storage: + # custume backend setting related to using s3 + backend: s3 + s3: + bucket_name: cortex-bucket + # -- The S3 bucket endpoint. It could be an AWS S3 endpoint listed at + # https://docs.aws.amazon.com/general/latest/gr/s3.html or the address of an + # S3-compatible service in hostname:port format. + endpoint: minio.imxc.svc.cluster.local:9000 + secret_access_key: admin1234 + access_key_id: cloudmoa + insecure: true + + tsdb: + dir: /data/tsdb + bucket_store: + sync_dir: /data/tsdb-sync + bucket_index: + enabled: true + # -- https://cortexmetrics.io/docs/configuration/configuration-file/#store_gateway_config + store_gateway: + sharding_enabled: false + distributor: + # -- Distribute samples based on all labels, as opposed to solely by user and + # metric name. + shard_by_all_labels: true + pool: + health_check_ingesters: true + memberlist: + bind_port: 7946 + # -- the service name of the memberlist + # if using memberlist discovery + join_members: + - '{{ include "cortex.fullname" $ }}-memberlist' + querier: + active_query_tracker_dir: /data/active-query-tracker + # -- Maximum lookback beyond which queries are not sent to ingester. 0 means all + # queries are sent to ingester. Ingesters by default have no data older than 12 hours, + # so we can safely set this 13 hours + query_ingesters_within: 9h + # -- The time after which a metric should be queried from storage and not just + # ingesters. + query_store_after: 7h + # -- Comma separated list of store-gateway addresses in DNS Service Discovery + # format. This option should is set automatically when using the blocks storage and the + # store-gateway sharding is disabled (when enabled, the store-gateway instances + # form a ring and addresses are picked from the ring). + # @default -- automatic + store_gateway_addresses: |- + {{ if and (eq .Values.config.storage.engine "blocks") (not .Values.config.store_gateway.sharding_enabled) -}} + dns+{{ include "cortex.storeGatewayFullname" $ }}-headless:9095 + {{- end }} + query_range: + split_queries_by_interval: 24h + align_queries_with_step: true + cache_results: true + results_cache: + cache: + memcached: + expiration: 1h + memcached_client: + timeout: 1s + ruler: + enable_alertmanager_discovery: false + # -- Enable the experimental ruler config api. + alertmanager_url: 'http://alertmanager.imxc/alertmanager' + enable_api: true + # -- Method to use for backend rule storage (configdb, azure, gcs, s3, swift, local) refer to https://cortexmetrics.io/docs/configuration/configuration-file/#ruler_config + storage: {} + runtime_config: + file: /etc/cortex-runtime-config/runtime_config.yaml + alertmanager: + # -- Enable the experimental alertmanager config api. + enable_api: true + external_url: 'http://alertmanager.imxc/alertmanager' + #external_url: '/api/prom/alertmanager' + # -- Type of backend to use to store alertmanager configs. Supported values are: "configdb", "gcs", "s3", "local". refer to: https://cortexmetrics.io/docs/configuration/configuration-file/#alertmanager_config + storage: {} + frontend: + log_queries_longer_than: 10s + # S3 사용 관련 커스텀 설정 + alertmanager_storage: + s3: + bucket_name: cortex-alertmanager + endpoint: minio.imxc.svc.cluster.local:9000 + secret_access_key: admin1234 + access_key_id: cloudmoa + insecure: true + ruler_storage: + s3: + bucket_name: cortex-ruler + endpoint: minio.imxc.svc.cluster.local:9000 + secret_access_key: admin1234 + access_key_id: cloudmoa + insecure: true + +runtimeconfigmap: + # -- If true, a configmap for the `runtime_config` will be created. + # If false, the configmap _must_ exist already on the cluster or pods will fail to create. + create: true + annotations: {} + # -- https://cortexmetrics.io/docs/configuration/arguments/#runtime-configuration-file + # 설정부 + runtime_config: {} +alertmanager: + enabled: true + replicas: 1 + + statefulSet: + # -- If true, use a statefulset instead of a deployment for pod management. + # This is useful for using a persistent volume for storing silences between restarts. + enabled: false + + service: + annotations: {} + labels: {} + + serviceAccount: + # -- "" disables the individual serviceAccount and uses the global serviceAccount for that component + name: "" + + serviceMonitor: + enabled: false + additionalLabels: {} + relabelings: [] + metricRelabelings: [] + # -- Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint + extraEndpointSpec: {} + + resources: {} + + # -- Additional Cortex container arguments, e.g. log level (debug, info, warn, error) + extraArgs: {} + # -experimental.alertmanager.enable-api: "true" + # -alertmanager.web.external-url: /alertmanager + # -- Pod Labels + podLabels: {} + + # -- Pod Annotations + podAnnotations: + prometheus.io/scrape: 'true' + prometheus.io/port: '8080' + + nodeSelector: {} + affinity: {} + annotations: {} + + persistentVolume: + # -- If true and alertmanager.statefulSet.enabled is true, + # Alertmanager will create/use a Persistent Volume Claim + # If false, use emptyDir + enabled: false + + # -- Alertmanager data Persistent Volume Claim annotations + annotations: {} + + # -- Alertmanager data Persistent Volume access modes + # Must match those of existing PV or dynamic provisioner + # Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + accessModes: + - ReadWriteOnce + + # -- Alertmanager data Persistent Volume size + size: 2Gi + + # -- Subdirectory of Alertmanager data Persistent Volume to mount + # Useful if the volume's root directory is not empty + subPath: '' + + # -- Alertmanager data Persistent Volume Storage Class + # If defined, storageClassName: + # If set to "-", storageClassName: "", which disables dynamic provisioning + # If undefined (the default) or set to null, no storageClassName spec is + # set, choosing the default provisioner. + storageClass: null + + startupProbe: + httpGet: + path: /ready + port: http-metrics + failureThreshold: 10 + livenessProbe: + httpGet: + path: /ready + port: http-metrics + readinessProbe: + httpGet: + path: /ready + port: http-metrics + + securityContext: {} + + containerSecurityContext: + enabled: true + readOnlyRootFilesystem: true + + # -- Tolerations for pod assignment + # ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + tolerations: [] + + # -- If not set then a PodDisruptionBudget will not be created + podDisruptionBudget: + maxUnavailable: 1 + + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + statefulStrategy: + type: RollingUpdate + + terminationGracePeriodSeconds: 60 + + # -- Init containers to be added to the cortex pod. + initContainers: [] + + # -- Additional containers to be added to the cortex pod. + extraContainers: [] + + # -- Additional volumes to the cortex pod. + extraVolumes: [] + + # -- Extra volume mounts that will be added to the cortex container + extraVolumeMounts: [] + + # -- Additional ports to the cortex services. Useful to expose extra container ports. + extraPorts: [] + + # -- Extra env variables to pass to the cortex container + env: [] + + # -- Sidecars that collect the configmaps with specified label and stores the included files them into the respective folders + sidecar: + image: + repository: 10.10.31.243:5000/cmoa3/k8s-sidecar + tag: 1.10.7 + sha: "" + imagePullPolicy: IfNotPresent + resources: {} + # -- skipTlsVerify Set to true to skip tls verification for kube api calls + skipTlsVerify: false + enableUniqueFilenames: false + enabled: false + label: cortex_alertmanager + watchMethod: null + labelValue: null + folder: /data + defaultFolderName: null + searchNamespace: null + folderAnnotation: null + containerSecurityContext: + enabled: true + readOnlyRootFilesystem: true + +distributor: + replicas: 2 + + service: + annotations: {} + labels: {} + + serviceAccount: + # -- "" disables the individual serviceAccount and uses the global serviceAccount for that component + name: "" + + serviceMonitor: + enabled: false + additionalLabels: {} + relabelings: [] + metricRelabelings: [] + # -- Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint + extraEndpointSpec: {} + + resources: {} + + # -- Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) + extraArgs: + -validation.max-label-names-per-series: "45" + + # -- Pod Labels + podLabels: {} + + # -- Pod Annotations + podAnnotations: + prometheus.io/scrape: 'true' + prometheus.io/port: '8080' + + nodeSelector: {} + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app.kubernetes.io/component + operator: In + values: + - distributor + topologyKey: 'kubernetes.io/hostname' + + annotations: {} + + autoscaling: + # -- Creates a HorizontalPodAutoscaler for the distributor pods. + enabled: false + minReplicas: 2 + maxReplicas: 30 + targetCPUUtilizationPercentage: 80 + targetMemoryUtilizationPercentage: 0 # 80 + # -- Ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-configurable-scaling-behavior + behavior: {} + + persistentVolume: + subPath: + + startupProbe: + httpGet: + path: /ready + port: http-metrics + failureThreshold: 10 + livenessProbe: + httpGet: + path: /ready + port: http-metrics + readinessProbe: + httpGet: + path: /ready + port: http-metrics + + securityContext: {} + + containerSecurityContext: + enabled: true + readOnlyRootFilesystem: true + + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + + terminationGracePeriodSeconds: 60 + + tolerations: [] + + podDisruptionBudget: + maxUnavailable: 1 + + initContainers: [] + extraContainers: [] + extraVolumes: [] + extraVolumeMounts: [] + extraPorts: [] + env: [] + lifecycle: {} + +ingester: + replicas: 3 + + statefulSet: + # -- If true, use a statefulset instead of a deployment for pod management. + # This is useful when using WAL + enabled: true + # -- ref: https://cortexmetrics.io/docs/guides/ingesters-scaling-up-and-down/#scaling-down and https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-management-policies for scaledown details + podManagementPolicy: OrderedReady + + service: + annotations: {} + labels: {} + + serviceAccount: + name: + + serviceMonitor: + enabled: false + additionalLabels: {} + relabelings: [] + metricRelabelings: [] + # -- Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint + extraEndpointSpec: {} + + resources: {} + + # -- Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) + extraArgs: {} + + # -- Pod Labels + podLabels: {} + + # -- Pod Annotations + podAnnotations: + prometheus.io/scrape: 'true' + prometheus.io/port: '8080' + + nodeSelector: {} + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app.kubernetes.io/component + operator: In + values: + - ingester + topologyKey: 'kubernetes.io/hostname' + + annotations: {} + + autoscaling: + enabled: false + minReplicas: 3 + maxReplicas: 30 + targetMemoryUtilizationPercentage: 80 + behavior: + scaleDown: + # -- see https://cortexmetrics.io/docs/guides/ingesters-scaling-up-and-down/#scaling-down for scaledown details + policies: + - type: Pods + value: 1 + # set to no less than 2x the maximum between -blocks-storage.bucket-store.sync-interval and -compactor.cleanup-interval + periodSeconds: 1800 + # -- uses metrics from the past 1h to make scaleDown decisions + stabilizationWindowSeconds: 3600 + scaleUp: + # -- This default scaleup policy allows adding 1 pod every 30 minutes. + # Ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-configurable-scaling-behavior + policies: + - type: Pods + value: 1 + periodSeconds: 1800 + + lifecycle: + # -- The /shutdown preStop hook is recommended as part of the ingester + # scaledown process, but can be removed to optimize rolling restarts in + # instances that will never be scaled down or when using chunks storage + # with WAL disabled. + # https://cortexmetrics.io/docs/guides/ingesters-scaling-up-and-down/#scaling-down + preStop: + httpGet: + path: "/ingester/shutdown" + port: http-metrics + + persistentVolume: + # -- If true and ingester.statefulSet.enabled is true, + # Ingester will create/use a Persistent Volume Claim + # If false, use emptyDir + enabled: true + + # -- Ingester data Persistent Volume Claim annotations + annotations: {} + + # -- Ingester data Persistent Volume access modes + # Must match those of existing PV or dynamic provisioner + # Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + accessModes: + - ReadWriteOnce + + # -- Ingester data Persistent Volume size + size: 2Gi + + # -- Subdirectory of Ingester data Persistent Volume to mount + # Useful if the volume's root directory is not empty + subPath: '' + + # -- Ingester data Persistent Volume Storage Class + # If defined, storageClassName: + # If set to "-", storageClassName: "", which disables dynamic provisioning + # If undefined (the default) or set to null, no storageClassName spec is + # set, choosing the default provisioner. + storageClass: exem-local-storage + + # -- Startup/liveness probes for ingesters are not recommended. + # Ref: https://cortexmetrics.io/docs/guides/running-cortex-on-kubernetes/#take-extra-care-with-ingesters + startupProbe: {} + + # -- Startup/liveness probes for ingesters are not recommended. + # Ref: https://cortexmetrics.io/docs/guides/running-cortex-on-kubernetes/#take-extra-care-with-ingesters + livenessProbe: {} + readinessProbe: + httpGet: + path: /ready + port: http-metrics + + securityContext: {} + + containerSecurityContext: + enabled: true + readOnlyRootFilesystem: true + + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + statefulStrategy: + type: RollingUpdate + + terminationGracePeriodSeconds: 240 + + tolerations: [] + + podDisruptionBudget: + maxUnavailable: 1 + + initContainers: [] + extraContainers: [] + extraVolumes: [] + extraVolumeMounts: [] + extraPorts: [] + env: [] + +ruler: + enabled: true + replicas: 1 + + service: + annotations: {} + labels: {} + + serviceAccount: + # -- "" disables the individual serviceAccount and uses the global serviceAccount for that component + name: "" + + serviceMonitor: + enabled: false + additionalLabels: {} + relabelings: [] + metricRelabelings: [] + # -- Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint + extraEndpointSpec: {} + + resources: {} + + # -- Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) + extraArgs: + #-ruler.configs.url: http://cortex-configs:8080 + #-ruler.alertmanager-url: http://cortex-alertmanager:8080 + -ruler.storage.type: configdb + + # -- Pod Labels + podLabels: {} + + # -- Pod Annotations + podAnnotations: + prometheus.io/scrape: 'true' + prometheus.io/port: '8080' + + nodeSelector: {} + affinity: {} + annotations: {} + persistentVolume: + subPath: + + startupProbe: + httpGet: + path: /ready + port: http-metrics + failureThreshold: 10 + livenessProbe: + httpGet: + path: /ready + port: http-metrics + readinessProbe: + httpGet: + path: /ready + port: http-metrics + + securityContext: {} + + containerSecurityContext: + enabled: true + readOnlyRootFilesystem: true + + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + + terminationGracePeriodSeconds: 180 + + tolerations: [] + + podDisruptionBudget: + maxUnavailable: 1 + + initContainers: [] + extraContainers: [] + extraVolumes: [] + extraVolumeMounts: [] + extraPorts: [] + env: [] + # -- allow configuring rules via configmap. ref: https://cortexproject.github.io/cortex-helm-chart/guides/configure_rules_via_configmap.html + directories: {} + + # -- Sidecars that collect the configmaps with specified label and stores the included files them into the respective folders + sidecar: + image: + repository: 10.10.31.243:5000/cmoa3/k8s-sidecar + tag: 1.10.7 + sha: "" + imagePullPolicy: IfNotPresent + resources: {} + # limits: + # cpu: 100m + # memory: 100Mi + # requests: + # cpu: 50m + # memory: 50Mi + # skipTlsVerify Set to true to skip tls verification for kube api calls + # skipTlsVerify: true + enableUniqueFilenames: false + enabled: false + # -- label that the configmaps with rules are marked with + label: cortex_rules + watchMethod: null + # -- value of label that the configmaps with rules are set to + labelValue: null + # -- folder in the pod that should hold the collected rules (unless `defaultFolderName` is set) + folder: /tmp/rules + # -- The default folder name, it will create a subfolder under the `folder` and put rules in there instead + defaultFolderName: null + # -- If specified, the sidecar will search for rules config-maps inside this namespace. + # Otherwise the namespace in which the sidecar is running will be used. + # It's also possible to specify ALL to search in all namespaces + searchNamespace: null + # -- If specified, the sidecar will look for annotation with this name to create folder and put graph here. + # You can use this parameter together with `provider.foldersFromFilesStructure`to annotate configmaps and create folder structure. + folderAnnotation: null + containerSecurityContext: + enabled: true + readOnlyRootFilesystem: true + +querier: + replicas: 2 + + service: + annotations: {} + labels: {} + + serviceAccount: + # -- "" disables the individual serviceAccount and uses the global serviceAccount for that component + name: "" + + serviceMonitor: + enabled: false + additionalLabels: {} + relabelings: [] + metricRelabelings: [] + # -- Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint + extraEndpointSpec: {} + + resources: {} + + # -- Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) + extraArgs: {} + + # -- Pod Labels + podLabels: {} + + # -- Pod Annotations + podAnnotations: + prometheus.io/scrape: 'true' + prometheus.io/port: '8080' + + nodeSelector: {} + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app.kubernetes.io/component + operator: In + values: + - querier + topologyKey: 'kubernetes.io/hostname' + + annotations: {} + + autoscaling: + # -- Creates a HorizontalPodAutoscaler for the querier pods. + enabled: false + minReplicas: 2 + maxReplicas: 30 + targetCPUUtilizationPercentage: 80 + targetMemoryUtilizationPercentage: 0 # 80 + # -- Ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-configurable-scaling-behavior + behavior: {} + + persistentVolume: + subPath: + + startupProbe: + httpGet: + path: /ready + port: http-metrics + failureThreshold: 10 + livenessProbe: + httpGet: + path: /ready + port: http-metrics + readinessProbe: + httpGet: + path: /ready + port: http-metrics + + securityContext: {} + + containerSecurityContext: + enabled: true + readOnlyRootFilesystem: true + + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + + terminationGracePeriodSeconds: 180 + + tolerations: [] + + podDisruptionBudget: + maxUnavailable: 1 + + initContainers: [] + extraContainers: [] + extraVolumes: [] + extraVolumeMounts: [] + extraPorts: [] + env: [] + lifecycle: {} + +query_frontend: + replicas: 2 + + service: + annotations: {} + labels: {} + + serviceAccount: + # -- "" disables the individual serviceAccount and uses the global serviceAccount for that component + name: "" + + serviceMonitor: + enabled: false + additionalLabels: {} + relabelings: [] + metricRelabelings: [] + # -- Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint + extraEndpointSpec: {} + + resources: {} + + # -- Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) + extraArgs: {} + + # -- Pod Labels + podLabels: {} + + # -- Pod Annotations + podAnnotations: + prometheus.io/scrape: 'true' + prometheus.io/port: '8080' + + nodeSelector: {} + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app.kubernetes.io/component + operator: In + values: + - query-frontend + topologyKey: 'kubernetes.io/hostname' + + annotations: {} + persistentVolume: + subPath: + + startupProbe: + httpGet: + path: /ready + port: http-metrics + failureThreshold: 10 + livenessProbe: + httpGet: + path: /ready + port: http-metrics + readinessProbe: + httpGet: + path: /ready + port: http-metrics + + securityContext: {} + containerSecurityContext: + enabled: true + readOnlyRootFilesystem: true + + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + + terminationGracePeriodSeconds: 180 + + tolerations: [] + + podDisruptionBudget: + maxUnavailable: 1 + + initContainers: [] + extraContainers: [] + extraVolumes: [] + extraVolumeMounts: [] + extraPorts: [] + env: [] + lifecycle: {} + +table_manager: + replicas: 1 + + service: + annotations: {} + labels: {} + + serviceAccount: + # -- "" disables the individual serviceAccount and uses the global serviceAccount for that component + name: "" + + serviceMonitor: + enabled: false + additionalLabels: {} + relabelings: [] + metricRelabelings: [] + # -- Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint + extraEndpointSpec: {} + + resources: {} + + # -- Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) + extraArgs: {} + + # -- Pod Labels + podLabels: {} + + # -- Pod Annotations + podAnnotations: + prometheus.io/scrape: 'true' + prometheus.io/port: '8080' + + nodeSelector: {} + affinity: {} + annotations: {} + persistentVolume: + subPath: + + startupProbe: + httpGet: + path: /ready + port: http-metrics + failureThreshold: 10 + livenessProbe: + httpGet: + path: /ready + port: http-metrics + readinessProbe: + httpGet: + path: /ready + port: http-metrics + + securityContext: {} + + containerSecurityContext: + enabled: true + readOnlyRootFilesystem: true + + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + + terminationGracePeriodSeconds: 180 + + tolerations: [] + + podDisruptionBudget: + maxUnavailable: 1 + + initContainers: [] + extraContainers: [] + extraVolumes: [] + extraVolumeMounts: [] + extraPorts: [] + env: [] + +configs: + enabled: true + replicas: 1 + + service: + annotations: {} + labels: {} + + serviceAccount: + # -- "" disables the individual serviceAccount and uses the global serviceAccount for that component + name: "" + + serviceMonitor: + enabled: false + additionalLabels: {} + relabelings: [] + metricRelabelings: [] + # -- Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint + extraEndpointSpec: {} + + resources: {} + + # -- Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) + extraArgs: + # -configs.database.migrations-dir: /migrations + # -- Pod Labels + podLabels: {} + + # -- Pod Annotations + podAnnotations: + prometheus.io/scrape: 'true' + prometheus.io/port: '8080' + + nodeSelector: {} + affinity: {} + annotations: {} + persistentVolume: + subPath: + + startupProbe: + httpGet: + path: /ready + port: http-metrics + failureThreshold: 10 + livenessProbe: + httpGet: + path: /ready + port: http-metrics + readinessProbe: + httpGet: + path: /ready + port: http-metrics + + securityContext: {} + + containerSecurityContext: + enabled: true + readOnlyRootFilesystem: true + + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + + terminationGracePeriodSeconds: 180 + + tolerations: [] + + podDisruptionBudget: + maxUnavailable: 1 + + initContainers: [] + extraContainers: [] + extraVolumes: [] + extraVolumeMounts: [] + extraPorts: [] + env: [] + +nginx: + enabled: true + replicas: 2 + http_listen_port: 80 + config: + dnsResolver: coredns.kube-system.svc.cluster.local + # -- ref: http://nginx.org/en/docs/http/ngx_http_core_module.html#client_max_body_size + client_max_body_size: 20M + # -- arbitrary snippet to inject in the http { } section of the nginx config + httpSnippet: "" + # -- arbitrary snippet to inject in the top section of the nginx config + mainSnippet: "" + # -- arbitrary snippet to inject in the server { } section of the nginx config + serverSnippet: "" + setHeaders: {} + # -- (optional) List of [auth tenants](https://cortexmetrics.io/docs/guides/auth/) to set in the nginx config + auth_orgs: [] + # -- (optional) Name of basic auth secret. + # In order to use this option, a secret with htpasswd formatted contents at + # the key ".htpasswd" must exist. For example: + # + # apiVersion: v1 + # kind: Secret + # metadata: + # name: my-secret + # namespace: + # stringData: + # .htpasswd: | + # user1:$apr1$/woC1jnP$KAh0SsVn5qeSMjTtn0E9Q0 + # user2:$apr1$QdR8fNLT$vbCEEzDj7LyqCMyNpSoBh/ + # + # Please note that the use of basic auth will not identify organizations + # the way X-Scope-OrgID does. Thus, the use of basic auth alone will not + # prevent one tenant from viewing the metrics of another. To ensure tenants + # are scoped appropriately, explicitly set the `X-Scope-OrgID` header + # in the nginx config. Example + # setHeaders: + # X-Scope-OrgID: $remote_user + basicAuthSecretName: "" + + image: + repository: 10.10.31.243:5000/cmoa3/nginx + tag: 1.21 + pullPolicy: IfNotPresent + + service: + type: ClusterIP + annotations: {} + labels: {} + + serviceAccount: + # -- "" disables the individual serviceAccount and uses the global serviceAccount for that component + name: "" + + resources: {} + + # -- Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) + extraArgs: {} + + # -- Pod Labels + podLabels: {} + + # -- Pod Annotations + podAnnotations: {} + + nodeSelector: {} + affinity: {} + annotations: {} + persistentVolume: + subPath: + + startupProbe: + httpGet: + path: /healthz + port: http-metrics + failureThreshold: 10 + livenessProbe: + httpGet: + path: /healthz + port: http-metrics + readinessProbe: + httpGet: + path: /healthz + port: http-metrics + + securityContext: {} + + containerSecurityContext: + enabled: true + readOnlyRootFilesystem: false + + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + + terminationGracePeriodSeconds: 10 + + tolerations: [] + + podDisruptionBudget: + maxUnavailable: 1 + + initContainers: [] + extraContainers: [] + extraVolumes: [] + extraVolumeMounts: [] + extraPorts: [] + env: [] + + autoscaling: + # -- Creates a HorizontalPodAutoscaler for the nginx pods. + enabled: false + minReplicas: 2 + maxReplicas: 30 + targetCPUUtilizationPercentage: 80 + targetMemoryUtilizationPercentage: 0 # 80 + # -- Ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-configurable-scaling-behavior + behavior: {} + +store_gateway: + replicas: 1 + + service: + annotations: {} + labels: {} + + serviceAccount: + # -- "" disables the individual serviceAccount and uses the global serviceAccount for that component + name: "" + + serviceMonitor: + enabled: false + additionalLabels: {} + relabelings: [] + metricRelabelings: [] + # -- Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint + extraEndpointSpec: {} + + resources: {} + + # -- Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) + extraArgs: {} + + # -- Pod Labels + podLabels: {} + + # -- Pod Annotations + podAnnotations: + prometheus.io/scrape: 'true' + prometheus.io/port: '8080' + + nodeSelector: {} + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app.kubernetes.io/component + operator: In + values: + - store-gateway + topologyKey: 'kubernetes.io/hostname' + + annotations: {} + + persistentVolume: + # -- If true Store-gateway will create/use a Persistent Volume Claim + # If false, use emptyDir + enabled: false + + # -- Store-gateway data Persistent Volume Claim annotations + annotations: {} + + # -- Store-gateway data Persistent Volume access modes + # Must match those of existing PV or dynamic provisioner + # Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + accessModes: + - ReadWriteOnce + + # -- Store-gateway data Persistent Volume size + size: 2Gi + + # -- Subdirectory of Store-gateway data Persistent Volume to mount + # Useful if the volume's root directory is not empty + subPath: '' + + # -- Store-gateway data Persistent Volume Storage Class + # If defined, storageClassName: + # If set to "-", storageClassName: "", which disables dynamic provisioning + # If undefined (the default) or set to null, no storageClassName spec is + # set, choosing the default provisioner. + storageClass: null + + startupProbe: + failureThreshold: 60 + initialDelaySeconds: 120 + periodSeconds: 30 + httpGet: + path: /ready + port: http-metrics + scheme: HTTP + livenessProbe: + httpGet: + path: /ready + port: http-metrics + scheme: HTTP + readinessProbe: + httpGet: + path: /ready + port: http-metrics + + securityContext: {} + + containerSecurityContext: + enabled: true + readOnlyRootFilesystem: true + + strategy: + type: RollingUpdate + + terminationGracePeriodSeconds: 240 + + tolerations: [] + + podDisruptionBudget: + maxUnavailable: 1 + + initContainers: [] + extraContainers: [] + extraVolumes: [] + extraVolumeMounts: [] + extraPorts: [] + env: [] + +compactor: + enabled: true + replicas: 1 + + service: + annotations: {} + labels: {} + + serviceAccount: + # -- "" disables the individual serviceAccount and uses the global serviceAccount for that component + name: "" + + serviceMonitor: + enabled: false + additionalLabels: {} + relabelings: [] + metricRelabelings: [] + # -- Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint + extraEndpointSpec: {} + + resources: {} + + # -- Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) + extraArgs: {} + + # -- Pod Labels + podLabels: {} + + # -- Pod Annotations + podAnnotations: + prometheus.io/scrape: 'true' + prometheus.io/port: '8080' + + nodeSelector: {} + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app.kubernetes.io/component + operator: In + values: + - compactor + topologyKey: 'kubernetes.io/hostname' + + annotations: {} + + persistentVolume: + # -- If true compactor will create/use a Persistent Volume Claim + # If false, use emptyDir + enabled: false + + # -- compactor data Persistent Volume Claim annotations + annotations: {} + + # -- compactor data Persistent Volume access modes + # Must match those of existing PV or dynamic provisioner + # Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + accessModes: + - ReadWriteOnce + + # compactor data Persistent Volume size + size: 2Gi + + # -- Subdirectory of compactor data Persistent Volume to mount + # Useful if the volume's root directory is not empty + subPath: '' + + # -- compactor data Persistent Volume Storage Class + # If defined, storageClassName: + # If set to "-", storageClassName: "", which disables dynamic provisioning + # If undefined (the default) or set to null, no storageClassName spec is + # set, choosing the default provisioner. + storageClass: null + + startupProbe: + failureThreshold: 60 + initialDelaySeconds: 120 + periodSeconds: 30 + httpGet: + path: /ready + port: http-metrics + scheme: HTTP + livenessProbe: + httpGet: + path: /ready + port: http-metrics + scheme: HTTP + readinessProbe: + httpGet: + path: /ready + port: http-metrics + + securityContext: {} + containerSecurityContext: + enabled: true + readOnlyRootFilesystem: true + + strategy: + type: RollingUpdate + + terminationGracePeriodSeconds: 240 + + tolerations: [] + + podDisruptionBudget: + maxUnavailable: 1 + + initContainers: [] + extraContainers: [] + extraVolumes: [] + extraVolumeMounts: [] + extraPorts: [] + env: [] + +# -- chunk caching for legacy chunk storage engine +memcached: + enabled: false + architecture: "high-availability" + replicaCount: 2 + resources: {} + extraEnv: + # -- MEMCACHED_CACHE_SIZE is the amount of memory allocated to memcached for object storage + - name: MEMCACHED_CACHE_SIZE + value: "1024" + # -- MEMCACHED_MAX_CONNECTIONS is the maximum number of simultaneous connections to the memcached service + - name: MEMCACHED_MAX_CONNECTIONS + value: "1024" + # -- MEMCACHED_THREADS is the number of threads to use when processing incoming requests. + # By default, memcached is configured to use 4 concurrent threads. The threading improves the performance of + # storing and retrieving data in the cache, using a locking system to prevent different threads overwriting or updating the same values. + - name: MEMCACHED_THREADS + value: "4" + metrics: + enabled: true + serviceMonitor: + enabled: false + +# -- index read caching for legacy chunk storage engine +memcached-index-read: + enabled: false + architecture: "high-availability" + replicaCount: 2 + resources: {} + extraEnv: + # -- MEMCACHED_CACHE_SIZE is the amount of memory allocated to memcached for object storage + - name: MEMCACHED_CACHE_SIZE + value: "1024" + # -- MEMCACHED_MAX_CONNECTIONS is the maximum number of simultaneous connections to the memcached service + - name: MEMCACHED_MAX_CONNECTIONS + value: "1024" + # -- MEMCACHED_THREADS is the number of threads to use when processing incoming requests. + # By default, memcached is configured to use 4 concurrent threads. The threading improves the performance of + # storing and retrieving data in the cache, using a locking system to prevent different threads overwriting or updating the same values. + - name: MEMCACHED_THREADS + value: "4" + metrics: + enabled: true + serviceMonitor: + enabled: false + +# -- index write caching for legacy chunk storage engine +memcached-index-write: + enabled: false + architecture: "high-availability" + replicaCount: 2 + resources: {} + extraEnv: + # -- MEMCACHED_CACHE_SIZE is the amount of memory allocated to memcached for object storage + - name: MEMCACHED_CACHE_SIZE + value: "1024" + # -- MEMCACHED_MAX_CONNECTIONS is the maximum number of simultaneous connections to the memcached service + - name: MEMCACHED_MAX_CONNECTIONS + value: "1024" + # -- MEMCACHED_THREADS is the number of threads to use when processing incoming requests. + # By default, memcached is configured to use 4 concurrent threads. The threading improves the performance of + # storing and retrieving data in the cache, using a locking system to prevent different threads overwriting or updating the same values. + - name: MEMCACHED_THREADS + value: "4" + metrics: + enabled: true + serviceMonitor: + enabled: false + +memcached-frontend: + enabled: false + architecture: "high-availability" + replicaCount: 2 + resources: {} + extraEnv: + # -- MEMCACHED_CACHE_SIZE is the amount of memory allocated to memcached for object storage + - name: MEMCACHED_CACHE_SIZE + value: "1024" + # -- MEMCACHED_MAX_CONNECTIONS is the maximum number of simultaneous connections to the memcached service + - name: MEMCACHED_MAX_CONNECTIONS + value: "1024" + # -- MEMCACHED_THREADS is the number of threads to use when processing incoming requests. + # By default, memcached is configured to use 4 concurrent threads. The threading improves the performance of + # storing and retrieving data in the cache, using a locking system to prevent different threads overwriting or updating the same values. + - name: MEMCACHED_THREADS + value: "4" + metrics: + enabled: true + serviceMonitor: + enabled: false + +memcached-blocks-index: + architecture: "high-availability" + replicaCount: 2 + resources: {} + extraEnv: + # -- MEMCACHED_CACHE_SIZE is the amount of memory allocated to memcached for object storage + - name: MEMCACHED_CACHE_SIZE + value: "1024" + # -- MEMCACHED_MAX_CONNECTIONS is the maximum number of simultaneous connections to the memcached service + - name: MEMCACHED_MAX_CONNECTIONS + value: "1024" + # -- MEMCACHED_THREADS is the number of threads to use when processing incoming requests. + # By default, memcached is configured to use 4 concurrent threads. The threading improves the performance of + # storing and retrieving data in the cache, using a locking system to prevent different threads overwriting or updating the same values. + - name: MEMCACHED_THREADS + value: "4" + metrics: + enabled: true + serviceMonitor: + enabled: false + +memcached-blocks: + architecture: "high-availability" + replicaCount: 2 + resources: {} + extraEnv: + # -- MEMCACHED_CACHE_SIZE is the amount of memory allocated to memcached for object storage + - name: MEMCACHED_CACHE_SIZE + value: "1024" + # -- MEMCACHED_MAX_CONNECTIONS is the maximum number of simultaneous connections to the memcached service + - name: MEMCACHED_MAX_CONNECTIONS + value: "1024" + # -- MEMCACHED_THREADS is the number of threads to use when processing incoming requests. + # By default, memcached is configured to use 4 concurrent threads. The threading improves the performance of + # storing and retrieving data in the cache, using a locking system to prevent different threads overwriting or updating the same values. + - name: MEMCACHED_THREADS + value: "4" + metrics: + enabled: true + serviceMonitor: + enabled: false + +memcached-blocks-metadata: + # enabled/disabled via the tags.blocks-storage-memcached boolean + architecture: "high-availability" + replicaCount: 2 + resources: {} + extraEnv: + # -- MEMCACHED_CACHE_SIZE is the amount of memory allocated to memcached for object storage + - name: MEMCACHED_CACHE_SIZE + value: "1024" + # -- MEMCACHED_MAX_CONNECTIONS is the maximum number of simultaneous connections to the memcached service + - name: MEMCACHED_MAX_CONNECTIONS + value: "1024" + # -- MEMCACHED_THREADS is the number of threads to use when processing incoming requests. + # By default, memcached is configured to use 4 concurrent threads. The threading improves the performance of + # storing and retrieving data in the cache, using a locking system to prevent different threads overwriting or updating the same values. + - name: MEMCACHED_THREADS + value: "4" + metrics: + enabled: true + serviceMonitor: + enabled: false + +configsdb_postgresql: + enabled: true + uri: postgres://admin@postgres/configs?sslmode=disable + auth: + password: eorbahrhkswp + existing_secret: + name: + key: diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/elasticsearch/.helmignore b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/elasticsearch/.helmignore new file mode 100644 index 0000000..e12c0b4 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/elasticsearch/.helmignore @@ -0,0 +1,2 @@ +tests/ +.pytest_cache/ diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/elasticsearch/Chart.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/elasticsearch/Chart.yaml new file mode 100644 index 0000000..be38643 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/elasticsearch/Chart.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +appVersion: 7.6.0 +description: Official Elastic helm chart for Elasticsearch +home: https://github.com/elastic/helm-charts +icon: https://helm.elastic.co/icons/elasticsearch.png +maintainers: +- email: helm-charts@elastic.co + name: Elastic +name: elasticsearch +sources: +- https://github.com/elastic/elasticsearch +version: 7.6.0 diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/elasticsearch/templates/1.headless_service.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/elasticsearch/templates/1.headless_service.yaml new file mode 100644 index 0000000..2631417 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/elasticsearch/templates/1.headless_service.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Service +metadata: + namespace: imxc + name: elasticsearch-headless + labels: + app: elasticsearch +spec: + clusterIP: None + selector: + app: elasticsearch + ports: + - name: transport + port: 9300 diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/elasticsearch/templates/2.service.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/elasticsearch/templates/2.service.yaml new file mode 100644 index 0000000..505cc5a --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/elasticsearch/templates/2.service.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Service +metadata: + namespace: imxc + name: elasticsearch + labels: + app: elasticsearch +spec: + selector: + app: elasticsearch + ports: + - name: http + port: 9200 + targetPort: 9200 +# nodePort: 30200 +# type: NodePort + type: ClusterIP diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/elasticsearch/templates/3.configmap.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/elasticsearch/templates/3.configmap.yaml new file mode 100644 index 0000000..ee0a42d --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/elasticsearch/templates/3.configmap.yaml @@ -0,0 +1,41 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + namespace: imxc + name: elasticsearch-config + labels: + app: elasticsearch +data: +# discovery.seed_hosts: ["elasticsearch-0.elasticsearch", "elasticsearch-1.elasticsearch", "elasticsearch-2.elasticsearch"] +# cluster.initial_master_nodes: ["elasticsearch-0","elasticsearch-1", "elasticsearch-2"] +# ES_JAVA_OPTS: -Xms8g -Xmx8g + elasticsearch.yml: | + cluster.name: imxc-elasticsearch-cluster + network.host: ${POD_NAME} + discovery.seed_hosts: ["elasticsearch-0.elasticsearch", "elasticsearch-1.elasticsearch"] + cluster.initial_master_nodes: ["elasticsearch-0","elasticsearch-1"] + xpack.ml.enabled: false + xpack.security.enabled: true + xpack.security.transport.ssl.enabled: true + xpack.security.transport.ssl.verification_mode: certificate + xpack.security.transport.ssl.client_authentication: required + xpack.security.transport.ssl.keystore.path: elastic-certificates.p12 + xpack.security.transport.ssl.truststore.path: elastic-certificates.p12 + xpack.security.transport.filter.enabled: true + xpack.security.transport.filter.allow: _all + xpack.security.http.ssl.enabled: true + xpack.security.http.ssl.keystore.path: http.p12 + node.ml: false + cluster.routing.rebalance.enable: "all" + cluster.routing.allocation.allow_rebalance: "indices_all_active" + cluster.routing.allocation.cluster_concurrent_rebalance: 2 + cluster.routing.allocation.balance.shard: 0.3 + cluster.routing.allocation.balance.index: 0.7 + cluster.routing.allocation.balance.threshold: 1 + cluster.routing.allocation.disk.threshold_enabled: true + cluster.routing.allocation.disk.watermark.low: "85%" + cluster.routing.allocation.disk.watermark.high: "90%" + cluster.routing.allocation.disk.watermark.flood_stage: "95%" + thread_pool.write.queue_size: 1000 + thread_pool.write.size: 2 + ES_JAVA_OPTS: -Xms8g -Xmx8g diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/elasticsearch/templates/4.pv.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/elasticsearch/templates/4.pv.yaml new file mode 100644 index 0000000..5a53f57 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/elasticsearch/templates/4.pv.yaml @@ -0,0 +1,74 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: data-elasticsearch-cluster-0 + labels: + type: local + app: elasticsearch +spec: + capacity: + storage: 30Gi + accessModes: + - ReadWriteOnce + hostPath: + path: {{ .Values.global.ELASTICSEARCH_PATH1 }} + persistentVolumeReclaimPolicy: Retain + storageClassName: elasticsearch-storage + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .Values.global.affinity_key }} + operator: In + values: + - {{ .Values.global.affinity_value1 }} +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: data-elasticsearch-cluster-1 + labels: + type: local + app: elasticsearch +spec: + capacity: + storage: 30Gi + accessModes: + - ReadWriteOnce + hostPath: + path: {{ .Values.global.ELASTICSEARCH_PATH2 }} + persistentVolumeReclaimPolicy: Retain + storageClassName: elasticsearch-storage + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .Values.global.affinity_key }} + operator: In + values: + - {{ .Values.global.affinity_value2 }} +--- +#apiVersion: v1 +#kind: PersistentVolume +#metadata: +# name: data-elasticsearch-cluster-2 +# labels: +# type: local +# app: elasticsearch +#spec: +# capacity: +# storage: 30Gi +# accessModes: +# - ReadWriteOnce +# hostPath: +# path: {{ .Values.global.ELASTICSEARCH_PATH3 }} +# persistentVolumeReclaimPolicy: Retain +# storageClassName: elasticsearch-storage +# nodeAffinity: +# required: +# nodeSelectorTerms: +# - matchExpressions: +# - key: kubernetes.io/hostname +# operator: In +# values: +# - {{ .Values.global.ELASTICSEARCH_HOST3 }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/elasticsearch/templates/5.pvc.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/elasticsearch/templates/5.pvc.yaml new file mode 100644 index 0000000..a4ae2db --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/elasticsearch/templates/5.pvc.yaml @@ -0,0 +1,53 @@ +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + namespace: imxc + name: elasticsearch-data-elasticsearch-0 +spec: + accessModes: + - ReadWriteOnce + volumeMode: Filesystem + resources: + requests: + storage: 30Gi + storageClassName: elasticsearch-storage + selector: + matchLabels: + type: local + app: elasticsearch +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + namespace: imxc + name: elasticsearch-data-elasticsearch-1 +spec: + accessModes: + - ReadWriteOnce + volumeMode: Filesystem + resources: + requests: + storage: 30Gi + storageClassName: elasticsearch-storage + selector: + matchLabels: + type: local + app: elasticsearch +--- +#kind: PersistentVolumeClaim +#apiVersion: v1 +#metadata: +# namespace: imxc +# name: elasticsearch-data-elasticsearch-2 +#spec: +# accessModes: +# - ReadWriteOnce +# volumeMode: Filesystem +# resources: +# requests: +# storage: 30Gi +# storageClassName: elasticsearch-storage +# selector: +# matchLabels: +# type: local +# app: elasticsearch \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/elasticsearch/templates/6.statefulset.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/elasticsearch/templates/6.statefulset.yaml new file mode 100644 index 0000000..2cbd4b8 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/elasticsearch/templates/6.statefulset.yaml @@ -0,0 +1,146 @@ +{{- if semverCompare ">=1.16-0" .Capabilities.KubeVersion.GitVersion }} +apiVersion: apps/v1 +{{- else }} +apiVersion: apps/v1beta1 +{{- end }} +kind: StatefulSet +metadata: + namespace: imxc + name: elasticsearch +spec: +{{- if semverCompare ">=1.16-0" .Capabilities.KubeVersion.GitVersion }} + selector: + matchLabels: + app: elasticsearch +{{- end }} + serviceName: elasticsearch + replicas: 2 #3 + updateStrategy: + type: RollingUpdate + template: + metadata: + labels: + app: elasticsearch + spec: + securityContext: + fsGroup: 1000 + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: "app" + operator: In + values: + - elasticsearch + topologyKey: "kubernetes.io/hostname" + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: elastic-node + operator: In + values: + - "true" + initContainers: + - name: init-sysctl + image: {{ .Values.global.IMXC_IN_REGISTRY }}/busybox:latest + imagePullPolicy: IfNotPresent + securityContext: + privileged: true + #command: ["sysctl", "-w", "vm.max_map_count=262144"] + command: ["/bin/sh", "-c"] + args: ["sysctl -w vm.max_map_count=262144; chown -R 1000:1000 /usr/share/elasticsearch/data"] + volumeMounts: + - name: elasticsearch-data + mountPath: /usr/share/elasticsearch/data + containers: + - name: elasticsearch + resources: + requests: + cpu: 1000m + memory: 16000Mi #32000Mi + limits: + cpu: 2000m + memory: 16000Mi #32000Mi + securityContext: + privileged: true + runAsUser: 1000 + capabilities: + add: + - IPC_LOCK + - SYS_RESOURCE + image: {{ .Values.global.IMXC_IN_REGISTRY }}/elasticsearch:{{ .Values.global.ELASTICSEARCH_VERSION }} + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: ES_JAVA_OPTS + valueFrom: + configMapKeyRef: + name: elasticsearch-config + key: ES_JAVA_OPTS + # log4j patch + - name: LOG4J_FORMAT_MSG_NO_LOOKUPS + value: "true" + - name: ELASTIC_USERNAME + value: {{ .Values.global.CMOA_ES_ID }} + - name: ELASTIC_PASSWORD + value: {{ .Values.global.CMOA_ES_PW }} + readinessProbe: + httpGet: + scheme: HTTPS + path: /_cluster/health?local=true + port: 9200 + httpHeaders: + - name: Authorization + # encode base64 by elastic:elastic + value: Basic ZWxhc3RpYzplbGFzdGlj + initialDelaySeconds: 5 + ports: + - containerPort: 9200 + name: es-http + - containerPort: 9300 + name: es-transport + volumeMounts: + - name: elasticsearch-data + mountPath: /usr/share/elasticsearch/data + - name: elasticsearch-config + mountPath: /usr/share/elasticsearch/config/elasticsearch.yml + subPath: elasticsearch.yml + - name: es-cert-certificate + mountPath: /usr/share/elasticsearch/config/elastic-certificates.p12 + subPath: elastic-certificates.p12 + - name: es-cert-ca + mountPath: /usr/share/elasticsearch/config/elastic-stack-ca.p12 + subPath: elastic-stack-ca.p12 + - name: es-cert-http + mountPath: /usr/share/elasticsearch/config/http.p12 + subPath: http.p12 + volumes: + - name: elasticsearch-config + configMap: + name: elasticsearch-config + items: + - key: elasticsearch.yml + path: elasticsearch.yml + - name: es-cert-certificate + secret: + secretName: es-cert + - name: es-cert-ca + secret: + secretName: es-cert + - name: es-cert-http + secret: + secretName: es-cert + volumeClaimTemplates: + - metadata: + name: elasticsearch-data + spec: + accessModes: [ "ReadWriteOnce" ] + storageClassName: elasticsearch-storage + resources: + requests: + storage: 10Gi diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/elasticsearch/templates/7.secrets.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/elasticsearch/templates/7.secrets.yaml new file mode 100644 index 0000000..2a24b92 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/elasticsearch/templates/7.secrets.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +data: + elastic-certificates.p12: MIINbwIBAzCCDSgGCSqGSIb3DQEHAaCCDRkEgg0VMIINETCCBW0GCSqGSIb3DQEHAaCCBV4EggVaMIIFVjCCBVIGCyqGSIb3DQEMCgECoIIE+zCCBPcwKQYKKoZIhvcNAQwBAzAbBBRrCrEWs79GCUPrYkFrkDBEF9uz4gIDAMNQBIIEyJUjcP339Anee6bdJls469HbsqYGgzidG41xto7ignNdZdP9LTTca+w8sN8tbVnTUZi4kQYcPSQqv+cWobi66KpgvQ7HhA/YE9K5L7wR7KEj0o61LYvucHm19hRdt788EvBy4mi8cDAr3m49NNuLUM6wyeCEKr2W2dwZFIyxFTPVv6/ef6cuHyDNLXJtjUmOIzNDL8Olqk8JGAd9bwXlizcShfmbiHHX8pAhK0u9JThFQePvCGiKA4LwzeuuwuEniznMlUQ4T/TjLjLLYcoS4vktfOJKPOgL3esjsc5hPoVgbw+ZpNCxRq1RVs/5eOBkxzXhJ7hdNELJDcMjitBfl71MlSDtMV4FhlVuhjilsuHx6URucsEE2l1V3asg4QP1PoSiACqncr2WhCcrKu0d8DztlIkCYG7D8oiAx4nEzsm0xmOhIcigHw6GP4MNeCieJCgAwLkJf1m73IYcxyaKsJAc57jfs9ue62KkVHL2NxNRjTps2j0Cl5NJQRE4CTkieU0etsNS1nJEwiJunVTyHXAa53MF6j40awEqs2Ko4gQENPpuQc599yJb+ZTHfHPe8bpfrmnxiEAaeiABu+OVH9bdLK5gtCyD5vXGZKVtHbyR+0+UlBggw/horFQIP+x7SKO53+ho0iCnYyQK52kJiv93JNgStGHpxf1SkPTtWHOraR2qSZTX6F7vjBtIq3Y6ocb6yo/jMNhzk3spHdz+F99S6uV3NLmDfX2vJmu1YSaPwaNZGDggcFI/g2S5ylBWyHpk2rB5gtklUIQEWxFFvbFOp37ffcdC0mZ6SgpOxj+IxuVLqTvyDLjrfteEvfjRAFXsT8E4XikC8QKjQ+KAwDYETidOiYB0/ByCh7t1KbcKJWU8XYxqzukX88CyVtO9Lp/f97x3ycvaF1UfzLBrm/bnTa0jPEP2/OdzpbjQJcEGX64+QY92k38zjPe4tedUz5H/C9aw8Q8r/DSxUhn2sdDXssR9jytITLLOJHDJX7XCfZxtoW60bwRm5MyXc4bJmjZT2BgxTWIVokaOhk0IZwpbC/oxh1QkaHBioP6+slASXg8Xu9l+mACevb1b9RvpN+fhurW2wOHl4Kul775BCohuTtiqKAce8KEACwncwYz+ZfcPTkbLRy6+p6NI3zNWpZE+iFlPtLh+2+T/QQHEfKTNUxcXLt8WCMOZuCe776T41nY8UhbUQJKqlEvom3MzCcsvFBoahlpjv+rg9/Ay7ESMil49e2x3qbD2929X0BHz//RcvPO5fvSEK/tC2uHzWzqHf0ZaRwtO19Z95Uv3GjGNF0SO8qri830LfJ+ctjk320qLyZmxA9QgPoI2oMHSxkaX1fgVeiN9coBM8yJbPK8ZdOOg4abnYOhqrTJXaoSFo+SYyAVZoTiQIIk/JScL5Qcw9IJw6sSKmOdChy2spYQKeo1NU9ecLD8YRBqRP0EET7e7NDPKlIWQ1vB5y2hokyL7bxvbGgzqQBAyo9wKJ3v1g4IYEWA9mluvQapOMVEHBYh6wv2nTJpE9EqMxpYQBU1w+vgX0EUgZDEOBkbvd5wubAeERt0mJqjea6vxWJIbeqMVIIoJSZEDaPE5qVNYaosoc8yvAZ9+U3lZlZObHzHEAIUx/2pP/jFEMB8GCSqGSIb3DQEJFDESHhAAaQBuAHMAdABhAG4AYwBlMCEGCSqGSIb3DQEJFTEUBBJUaW1lIDE2NTM5NzE4MTk0NzgwggecBgkqhkiG9w0BBwagggeNMIIHiQIBADCCB4IGCSqGSIb3DQEHATApBgoqhkiG9w0BDAEGMBsEFP43u2ii0k7JTUfInMhUBwjWZrS/AgMAw1CAggdItHB4SBc5KdDVc8eXuF8Ex1WP/Y2wz76PoNNpYm2LeIVozsp5c/2RDN2KqhcvhTihlY44esqWWVCOx+OTwmAPFwzZSrMaOYpcOP3fRWaHJLw98cK8a1ZuNv3eXWecf333TrsvU/bpT3v0KNO915qnSbtNwlvXaOMm6jbw6eBnkB7i6jxA7kgVAW6soa3ZHOrV78quBSbAjXZddHsI8x3MS4rxdvkp6GHet22/fQxjxz8UlQEDqzQgK7F4RqULRJeU//JID7VJqfbHRHfnYsKszsirhWKeJsxLVhG1VU/zRgxs0C35NfQeR/o7jmFpE7CCvvC0Rea2pybNojb51HLvyycXtpGn0gAdTBVNnwK1X58uSDWH7jM61uX9f+/gcDZqlUj6UVc6mzqxAgzDtf6B32G0VQq2szaJjbRVEVXhCAOIdVj6pRpI3l3gRv8OkNAWsGwYDMjeFxnrEpw1AQkEj7FRgI6iNOxEfUhOVYIEsflGTUdcd+K+zlCCHAJoMzbqiwPyHHgvLOp04A7fog+H3/cn6Tdmrp/J7TxpaW1ZwwcHtTRLoq0F77Sj8XJule3CzaDtg6IBen/Yo7H9hhK3ORodlGjJYA285dHAd1mtqmHmoWeDNoVrlVyymge78yXGmlFsBWF83VUChRx+9noF3Zhz+QMPBNsKHk4TM9yRHiWpMZIdkEZKq+obCPU2PmC21wnWx13nhb88gaNyBjHxFsGE91SgEyQh/cPhi01Y7+yNYQvYOXJe3EQ6oqFCBkPUnrbAMiHDP//AVN/tUrgVbmpIclfFprP2YIRcfGa7qch48RFbmhnX5N/OYLaPnNYdbxOiwZ0f/KIpDKWS67kS2N+jDKWs/SCLs2g89q1z2EGvbVwKMD6Vl559EZxAfNRv+eZu0MvTejEkuykIHJpXCyP+8EphUyWW9Cqll1ux4rXMUDkgl5sh1WgSoIEASX2j5TJ3fIh0nBkjAkBi0n2BINZgVWKj9U1zHNdRF67Eb+97lUuY6JIkbFhLSgZiIZqnI9bnW8OKUJFtvVtlSKG4xqdOeAroB8GLw2iR/GjF2Dvy4rIZo+qeTCIN+bm+iFkCri7L2K0/KR25h7bAtXwBxwMct5F4A1vltlLs408efMRJ7dg3iqMGhRyXdwxKexWJLbp02uJQVU9/ogYeLfSiIZEm25qjEMQZqRpQpwLaH5JB9oLKqdLEdeuxOfqb6weHDOtITlFHToeRNzIEmbiT9gbdpMwKTxs/rtwMHgGU6kIJmIFgnw2gauKvpiIuDCY79JpSNipsicvvLTIa4cc8sZCCllZ1wAmbNDsCH6p0bh8CooMjGf2vUbRClSe9+R19/lRMFGSp4N6fElW7MxNw85xpkFjG0s053fvIJmfPhxVqUHMP3fFQv0DUvvQNvNTsRGdDjohkC0095v9EWy7n9Frv2wIM2G7uVHvrlgkQfPK2JsYZKsUE0KXa4HUQptWL71kp7RQSmOmXFzsthjYVXu/pfXA+u+PAtHvQpo1nTPreXn3UZqiEiQmNkmMPLAYzpIi35tjNewfw5XwDj77pqH5OFcMZDTKbiInV1LuvFlKxCEYh4gvTThC0XTsrsiHgldtNcw9ZB017uPW9AAqbj2IB0d5b0ZB3yMZ67uzt1pretcxmEfSoA64QWOC9lBYp4DVE9QxcCnsSgibWreqpdJHmX5MR4umwIb6WaM1pJdCY1bW4tO3ZVT4DA/4ry7jqxUH4AcZRNK0zYR6DAtZndB7LTJhT+8d5EBtmAHzC5HT9KLmHV6mAG1QLMlwhNXmtM0YCJsKxcZo+xLBy/2cHl41EU4ACiuEq1JrM5j9fQk+hmJHT+JB0aqv+kvdxGmgBuVWGHQBtNTV6TYeLzqzDpIl9uXi3qFKFBuTQOska2zAMv7gLOe79w1cVb/SJKdcYjWtLR0v6wfaRgVeBwLvTvh7nNXhXRqKfQKe3e2Tjgq4nV4kOQHI21WDKGSd4ONyyvXGMwNzRgcZwpDFAcvshZATwaBtAo4JWi6D3vJB6H1PHRtyqHjErKkPazoZMjR2sZI8S4BMo4R5fa1ZztZO4p2lJYUIAQHj872UdGXHTXgyZKU8t/ifiVfxon5UtZJRi0Xq5OMdN//Qtq2kVwQxntf0eWsygkKMtNr1XLzu0TAMUMItnohdQWUw5w8UeXYOAYfZFqZEhKfcwkJsfq1q56ptzVBI3T2hDFM7xuVFNn5y+FCTx9pB9FCbln/3ZlKuUiTH/eLMKdQYGkRX4X0qzkx3YqAn6jDLQPEG3Rz0JP53T43uLxGpqa8+jn1XIUCNj50mqZGiah7bdo1qsDHbFWYCe7uoOjPapontpaoEQaZog1INqBNerS19a+i4S0/uAsGApykwUhk/zGfr9UudpKJWd7AznlF3+yfZfk/9mCSajBpoWafCIWmOvxJD77L86YAs9STuhWUGQvL2rxPf2uyS4WAi2+DgbdrGTSiwNB/1YX8iHp/cw6DA+MCEwCQYFKw4DAhoFAAQUSvLiFrAQlmfgL3Cewez5Fw2+0okEFH+RyXvcJHVaYbaqjejrXkgUS0JsAgMBhqA= + elastic-stack-ca.p12: MIIJ2wIBAzCCCZQGCSqGSIb3DQEHAaCCCYUEggmBMIIJfTCCBWEGCSqGSIb3DQEHAaCCBVIEggVOMIIFSjCCBUYGCyqGSIb3DQEMCgECoIIE+zCCBPcwKQYKKoZIhvcNAQwBAzAbBBTQSr5nf5M77CSAHwj38PF//hiFVgIDAMNQBIIEyBrOipz1FxDRF9VG/4bMmue7Dt+Qm37ySQ/ZfV3hFTg6xwjEcHje6hvhzQtFeWppCvd4+7U/MG8G5xL0vfV5GzX1RhVlpgYRfClqMZo3URqBNu6Y5t3sum+X37zbXQ1GI6wo3YURStZkDHlVtObZB667qqj5rO4fIajzRalaxTFda8aS2xAmQklMcCEXASsO5j0+ufVKiOiG2SIEV2LjjYlUymP7d9+LAZ2I6vR+k/jo2oNoPeq0v68qFd9aOB2ojI9Q/PDFA7Nj1kKMK7KjpxGN5/Ocfr8qrxF1mviA6rPdl8GV3WCFMFKcJER4fRmskWGNE/AdwU3laXvJux/qz4rjiYoJX+5rSyXBDxdznaFiSyN1LYkFJ+nao6HSAmPPyfEPVPRICc6XHMUM4BZOVlJO49M1xg7NFQUtkyVm8+ooDwXCiGEUHDZNw+hCcuUewp0ZXki695D0tESnzi3BE56w7CRySeaNR8psAtL74IUtov9I66GlBEI7HSbyLTT9Fa7+o+ElJWnFqIyW8WzNF3T5fvRv2LfKjYO5KiISlOM03KlETWE1F60TZqW3EbP9WjLhRnovFcJVsNyha+wDVTu44DAylMX4Oh2xKYm2YW+Oi0aeCFmJbDp/TlxYhm5ACYUxma6CVxbEgHkxwjWyFfiNQp2MBL/5HFJGxuny2lVnN8yUSCvDdnOlVTB36/EByY/oA8S+GF/QRYd3PMew56s7aBgPt8mhncN5Cdm+GCD/Nb/ibcuTId9HAaT6o3wMsc7bYusjHGCjFbz9fEdU2MdpLJO+FXVM9E1sEKoTpPLeJDh2a9RUWJQPUCLu8MgEdiJohtEpOtvM7y5+XbuAkYaDsBw3ym5M/kwovN09X1m5x5qM0QSRIVKHf1qo6wo68VMeVQDEBNxJ5/tuZ11qE3siGRfwDnUkCpb9H54+w3zaScPHGAdwplYYwaqnFMwi8nFMtjZvGOLT2wqPLPnKVeQGt4TCVWPXuB4kYnmbTWoJbUT5Wpurcnyn8l6uzLmypCD4k8YiQoDb1b9HIFUAypn580KIUF19eCSGeIHl4hbmusuISxQ1qXk7Ijbj7PiVtMKy5h8rG/c57KJvfvnMQy9hauM5kcZmlTUvrHDw+7cUFB96/wXbvqmcPKGKutgXRqHcTYyBOPEJnSUMBIM2r59wgFjlMuQLrJurzwzox/IEKu/KMilIBDp4k+MHz6NrINWfbV7xa6yAja1kWyvUmwYjCHhlXZmhCb2fmhP1lsnN4BNAkDsdfxHBRCBISy6fuHSY+c4RsokxZ4RomHhVvJsEY/AE4DCvVXDunY8t4ARrQCqXYso3+kVjm6+aelKk+KgyLZ3St0eAIl/Y2xqEXgh0wHGrx3CLZqGqq864f5MmrxiytmlSzHP4RSad20drsN3VchaJZkyrGbKEs6ZJDU2dq5NiC5unqx5tLw6XNRTydIC2PaiVl9m3GLUCh6hQSRJnvcXrqOd8a9K1uV5OoA3TRdc2V5lyxWRIJsdK5KfiAiTsNeM+Tt+Dh2pZjt2l2h4n4BjgYApxG8u10BP1iZ1e1OsCRgLGbgiuXtXrlrjwvJzrB5i11oy9mt3vqgtbjAciQpsQYGGfnVqyGXfEc55hIYWClNAFZDE4MBMGCSqGSIb3DQEJFDEGHgQAYwBhMCEGCSqGSIb3DQEJFTEUBBJUaW1lIDE2NTM5NzE3OTU1MTUwggQUBgkqhkiG9w0BBwagggQFMIIEAQIBADCCA/oGCSqGSIb3DQEHATApBgoqhkiG9w0BDAEGMBsEFEVjuzIvhFF9BzWGr3Ee4cw/mLcqAgMAw1CAggPAwroH+zLRt2Jtb8IWeOaIbXAv4sVGUljreWkJE8dkoXNcEQpATEt5H7L4uwnDsevLi1yfWtUDN1OxM8gb7iR4Jysrd+8uM1r0nn9YStz/I3qhN9Fb6yAb+ENTCzwo/oAnyDBM/lXR9fL0EPHRfsDmK+6kC+hZ4AZIao+1oWRD0Bu970yK6gwv7TIRCsS/RBZfC/d4Slz1+IQChiWS4ttTzxK/IuhaFbia0JYtUpjmMGMBQwYRyvITgYpOIct39Il/mabQ4BA1/wk7Oecfe3RHzIfM49AxJtwKppfVfaRJjtK1aoO/GKS6CZuvIIX8q3Mt32OEaoRN9FJM9EkUkKCcYhtRfq0/8MTO97MbrcKeO8XICn8vZwOMM7k7IFtCq44/3QBXa9fpc2BFMVYOoQ22W2ZuMNMRp6OYc6Da1BG4Ik9mt1T4k9NkvfrhpNceR27v6Q0pZNUTN26aPr11/SfS/IZmLGXF7cGAfxITMOQwK2ig6qivXzvwLxfnyW4aHF7K/jL59kDg9Vf9zKmlvPJpHSEWv53U9SFYvvrMISd6E8np0bHRM5p49mgH/KXGauRRaLWUxlBwrhjeZRimTF9x//a0luGf5tIW8ymi32wn8LNiu7fbnkldnivfgWVmktNrPMH+70HNlCWkfaNibSHpzyDQRTzg9PjHEcFH+pQAXCc+A8y8FSvlT+nx9dpXXRK5pqbrGnWyrm5D3oY1ceO0E85R9Fx4Ss0f+mMBtNDYpz7zS5BSX36MNn0gm6MkhlOVbbcAob4WbZAEM7zaiV1ilLegXPZYPCGQydN02Q+lJ7HHZ18T4mzTrjF6M1PFIx31cR1r0ZtJhkCrOWdlTrmovvYYEgEStsiE3pi6dW4v1NgcJVevpnJJ//vpGXasH9Ue/ZNdk1tj/h7cQ/qbKlmvrcuH/UQ969RsNX+K3B1xeYnfbV88BXqFLuqhuWy38wwvBvKO37vq+ioPNIjwaIyCVzoF9/MAx2aNOdk/x04mSNVYh5q0ZKv+3JC3W2vJxV2aonc/ybFgi2GZz2erVYNZTSXz+bEefx8QWzcW6/zr437jh/peQRyQ92PsN+eZV9GB2lrwmF7K2579vNQoVcpzTvTFf+eZZhF8u/1HZW4uFHRUyqE3rHyOukSFukD7XWnFL1yUcWw/SGNIm1HNZD3nXjqcwdAIXl7OvqdO0z/Qt2bny6KpOSJqjMUjB5AX5/yt2xlZBDhlsoGtRfbSWefGf7qTdpg2T9+ClMb7vS1dLzrGRzNgGc7KO2IQdkNcfj+1MD4wITAJBgUrDgMCGgUABBSoZ3hv7XnZag72Gq3IDQUfHtup5gQUHZH4AQTUUCeOS0WnPOdFYNvm1KUCAwGGoA== + http.p12: MIINZwIBAzCCDSAGCSqGSIb3DQEHAaCCDREEgg0NMIINCTCCBWUGCSqGSIb3DQEHAaCCBVYEggVSMIIFTjCCBUoGCyqGSIb3DQEMCgECoIIE+zCCBPcwKQYKKoZIhvcNAQwBAzAbBBRl7KAO2Y5ZolA3Si0i+pNdXpn42AIDAMNQBIIEyE9fBFRMMy358/KJQcAD9Ts0Xs0TR0UEl/an+IaNTz/9doU6Es6P22roJUK8j4l09I8ptGGKYdeGzrVBzWEjPhGAZ3EXZPHi2Sr/QKbaiWUnYvqqbPVoWNLukrPvK5NpEyPO2ulfxXN46wHzQMnk5l+BjR4wzqKquxgSzacXRJCqznVj59shjLoTK9FtJ3KVEl+JfukcAh/3EqkP7PRAXrPeQ5UcvYbYMZgxw8xHYg/sdKqyHBxwQqNtvGlfGHQ6jyb4/CS2vu0ZehGHQoMgmry2pvNMjA9ypSVWRGspcrdcQOJNgYtHmBiBScoURLB+9KJX2ivY8zJFI5e8Hb48sLASkp4HQemBWMQTukSnlgddsAtIKgpoRZWpcJ7PunHuWXAKZPCMH6uF14G71/lhluRjjy5GEnkKhKkKnlX15kmLmylTZJVdMbMRnsGK7exsVS8ot7sYJ9EMIvKJUqKf/RmZvUxZqlGp1oy3Uo5JgBU5MF61wnkad+L1UJsB2ZzPV0S/jYKPFVzBsWXj9IH74D02TcQz774+FQqAXlVLlpglmlnMwOU3IboKOH2Z4LIj7Kx7wfZZMi3/sQbYJM2PWCd8OS/keDf53ZwMKNxWPh1ZB7kX4mqhmMHdNgRblcWXP3LtWKck31Vq1UdGfK4/T/nudD1ve15NPUP1DvcVsDOWnRF4s3IDXZwXWqvag+hz0zVyB/T0X1XkqrPtBNX/o5qeTDP30W2GVdGL6SIlgZHaqqNuamHlhGra43ExKTwRPBsskTrziC2fb/JeqXxJBES/YufiomXw14BnQUpyBfVeV3cDDEZUnfu7lJz19jS+2aTtA6v9Qnps+q0rNnLa54JLf9bWlw4RomSWcJCqkkW/EG0AdTKrqNFYPZVZTLvt+4B8ehWrUWas8MK5jAXeTklr0ao5acGOKWip1wmqIRKRAIT2OBbs9jCmigb2xJNDK4RdUtDYsJeltJ69DvnG7bmTLjfsOQcVIaI40k91N8nnda9+/6BdKFDQtMDB6efGkciWp9ce24uGUzKszD7CmKTlCJiqn/V2bbOKGdk4Tafy4B2HzeaX+fMFjpWu01UMaJJrvYbAnXww1Yg2IjbwdAMTv7z8zPIJ0a+drouylUfvKKeun6BnLe0fR+XbRRs77Rengb30c1plozEFHZjzmQ10uVQSh1wWURJnVSru6b1pyVI+KR3WZHB4vgDx+BDlQjxCk53+Hxm5wv8SgpvNxVkepPVF8ucut9FkGNHov1gyatlEKSzYlrFt0mFQWg20rKMrkB6pEDO8f5W2InR3znO15NTbw/l3BXYGOe1lS0tHljc5zJkmMTdVrJnFEd2RqNPNmFWEn+1bm4NeAr6QEY9fiyBCMWBHEELTfHtu4iS37D1cBEKudpCszaWJiPgEeDu75+IuXa/guZdxWJj/ktDfZQJpp9ork2QScgu31l7QdGfC24C2E6kQp4UHZ3k7wXSTUt61bdmK7BHqjiz3HuP76phzd7nZxwLCpEg8fhtwhNgPx3IrU1B4JX40Wzsy1Tz/8oIcvjykDmI967chWtw/WSschamGBelNt+TV1gVKoLlMpL9QxFcAqXhEC6Nr9nXRZRJAIRun3Vj+EabZoR2YsdghDE9boTE8MBcGCSqGSIb3DQEJFDEKHggAaAB0AHQAcDAhBgkqhkiG9w0BCRUxFAQSVGltZSAxNjUzOTcyMDczODY4MIIHnAYJKoZIhvcNAQcGoIIHjTCCB4kCAQAwggeCBgkqhkiG9w0BBwEwKQYKKoZIhvcNAQwBBjAbBBRmhTM5a6OsdDd4LLR/07U/28/dqgIDAMNQgIIHSCCLUDdxl9rcX65CAYiQD1mrnoDJe+c8hWww8KI+RD1/3U8skUZ+NHjf2cjCrDQdtVZcycc37lkJ4HEU0keMdVE7I9tja81EfQclnZAUgx/zzLQqVV9qc1AcKX0pzUczLewoQZdXQHdpXh0u8Hf4xFeYM3EAGxB0mUYGwZXWSxYSdaHmxTgeftqNHF6tudt0vpPgq9Rbqp7zP8z48VUOSUkbNTXZOgNVpMgs/yKivvURdWBwJMkpOs/daeR+QbOLkhrhTtT8FjwFUlpnQ//8i7UsBBJKcEKvlrfBEDWcIGw8M6oAssoPsCGyXnsP7ZCVBDBgv941mBTJ9Z9vMoKPpr9jZzSVJrU2+DDuxkfSy1KL0vUvZm5PGSiZA72OpRZkNi8ZUbJTRKf71R+hsCtX/ZUQtMlGCX50XUEQl44cvyX32XQb2VlyGvWu0rqgEVS+QZbuWJoZBZAedhzHvnfGiIsnn2PhRyKBvALyGcWAgK0XvC26WF676g2oMk8sjBrp8saPDvMXj06XmD6746i5KC52gLiRAcwlT4zJoA0OB5jYgxXv+/GP9iXNIK578cCGpBes28b7R+hLDBCc/fMv1jMhKWPVXWJZ6VkcpUgH73uxFl43guTZzJfHI1kMF1+PbOviWPdlSj1D44ajloMJP5FXubIfYEIqV19BdU42ZXZ8ISIZYTAj9OhNCUkkTjjGH2VhFz/FjZDxdk9m/Sw+du8dg1v0+6XIMScjuutbLxxol8Dx1yfRSgZZGN+D3vi0hW1OgcpnUhVI/x48LjdWm1IA0XWOzFiJAe98BiL0roTsUk0pgyujzvLcwDFGP9hnQ0YLdCy22UsQ39hRyQzwGAVO8O49bU8sgNy75+4++8Z3pqI91hdoHyzNMSx6fJn/Qd6UcAdTF0divh17q5bZi+x3D7AQEvh5NwePD0HIqBZexT0yNTVTHragJZUetI5FZgE1cZrfchckP/Ub5jdn3e/Cvu8J/yZFAM8glJvO1D+4BZ+/MVAw3AkO7kLhGeXMXr9s9+A/uPlznoC6b9bpjj3X46bFz7dPIYC0aeya87vISA0/5VPkkUZ+U6A9nLkCIcl5XQElMjrzidFJyBmtxHXLrAu5yiWorl3KVOf9QOrKrZt1UrNihIaSIq/46jI5yBQX6LV7fUBrZKe/oMbuf6W0LliNJbKSwZi0RRHo0jBPotUiOsn1qmnh+hZp6rwi1KGOsCAPSMSGnURwoXAdTUmAyPriDjDBKjm2EiDZJ9T3XgNDHVU24SqKjsSoByrD4FcVyqFAl3w0CaSNXloZswE0UqGKoQUy6Up0ceWoeHYfA/FJyaGfkFGRkmYun+wUJZvhpoLv6bn377CziWTSc0o3nl+UZ4pTsRJOlG0FOxzWApjSd8bPIdezPxak2DM0qj6aiUocfEBMLnFn4Sjj1vVFmIGPNXiOPlJF0Ef99I5Gno3YAd4ZHBqpkeUq7+bWur+xhv5zsXs5ARK6TVOVqlMPiKRpDX7lEQoya++U6HIj6zb7arSZivM5YrZeqHFKK4gpORvpg6icApQCBniDgmNxZJFobgzvIwKTABJjoivHs4zIIw6TCjbz38GEFdzbsUuCXQo3tFWaxgiGkxtLnjYr0PTIxFdBfQ5dkRkkxLvUg7uR1uP9IcmO/8QzzyLeSA+I+teZME8QCzui6CY/lhIfjxJimawejCJx33nS9uXNibQ0my41SmXRDGVgiH6el8veIbEHU9RY+elVR6eqlemCuIHfU8QNPNbe7Gzqaaoccd2VUY3PXNHxU87DC7Nttvn99Ow5zxZ8xZUQVfLFntS9d2hgKp8gJ9lgVKzEuYCiL59wuxbNtnAb8mET0Buw24JeQew9e8DdYL2vDLhQz+IqPXKAhlf7BSpPyQTOeaba657CNmkzdiNk3RHGeTRrq4c3/nl1M+ZsPwf8WxoTcmu+W0Y7/j9nps8r+fKlNB23hOEIWZ4KN+Y4qZRKltTARhqmdjLIhUtWh4D49eTe5sS3MqzsZJJwsEHPPOvZKvOG5UU3jXMg9R4F8CaYgx/M4ClwIIlHvcdW7R7sXke9E/qccIG3jQ5b/mgHCk3pVkAyrRWfBZqXxlfWn+cfzVALtUXWePwhN8+i3CQbjLLOgE6yH3/rBfXQQVYHwrZqoyFchDwlFF5FtF5GThnj04kvhZbq0EcF4lbiULAOiBkJong4Op287QYgq4W8szOn9F2m/4M2XNaI3X7w67GADFHs5TtPXjWx1l6kKIwMM2pcpltXblqgH087payQHx1LnCpztxcxmeoFb3owvwKWmQpV0Gh6CIKfa7hqwCsNggOcKEQWwRJtADEXzPhRYG0mPelWLQMdLLaEzUqh9HElXu3awKazlHa1HkV0nywgldm23DPCKj5Fi6hux7vl7vt8K0Q4KA8Xoys4Pw43eRi9puQM3jOJgxX8Q/MsABHHxPBa94bOsRLFUa/Td70xbHpOrCCp64M7cm6kDKAwPjAhMAkGBSsOAwIaBQAEFEi1rtKgyohIpB9yF4t2L1CpwF+ABBSDiyukmk2pIV5XfqW5AtbEC9LvtQIDAYag +kind: Secret +metadata: + creationTimestamp: null + name: es-cert + namespace: imxc diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/elasticsearch/templates/needtocheck_storageclass.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/elasticsearch/templates/needtocheck_storageclass.yaml new file mode 100644 index 0000000..d2bff8e --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/elasticsearch/templates/needtocheck_storageclass.yaml @@ -0,0 +1,8 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: local-storage +provisioner: kubernetes.io/no-provisioner +reclaimPolicy: Delete +volumeBindingMode: WaitForFirstConsumer + diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/elasticsearch/values.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/elasticsearch/values.yaml new file mode 100644 index 0000000..7b0bd6d --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/elasticsearch/values.yaml @@ -0,0 +1,68 @@ +# Default values for sample. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: 10.10.31.243:5000/cmoa3/nginx + tag: stable + pullPolicy: IfNotPresent + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 80 + +ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: [] + + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +nodeSelector: {} + +tolerations: [] + +affinity: {} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/kafka-manager/.helmignore b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/kafka-manager/.helmignore new file mode 100644 index 0000000..50af031 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/kafka-manager/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/kafka-manager/Chart.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/kafka-manager/Chart.yaml new file mode 100644 index 0000000..61a7b7f --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/kafka-manager/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for Kubernetes +name: kafka-manager +version: 0.1.0 diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/kafka-manager/templates/0.kafka-manager-service.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/kafka-manager/templates/0.kafka-manager-service.yaml new file mode 100644 index 0000000..b20900d --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/kafka-manager/templates/0.kafka-manager-service.yaml @@ -0,0 +1,14 @@ +kind: Service +apiVersion: v1 +metadata: + name: kafka-manager + namespace: imxc +spec: + type: NodePort + ports: + - protocol: TCP + port: 80 + nodePort : 32090 + targetPort: 80 + selector: + app: kafka-manager diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/kafka-manager/templates/1.kafka-manager.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/kafka-manager/templates/1.kafka-manager.yaml new file mode 100644 index 0000000..4edcf32 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/kafka-manager/templates/1.kafka-manager.yaml @@ -0,0 +1,33 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: kafka-manager + namespace: imxc +spec: + replicas: 1 + selector: + matchLabels: + app: kafka-manager + template: + metadata: + labels: + app: kafka-manager + spec: + containers: + - name: kafka-manager + image: {{ .Values.global.IMXC_IN_REGISTRY }}/kafka-manager:{{ .Values.global.KAFKA_MANAGER_VERSION }} + resources: + requests: + cpu: 100m + memory: 500Mi + limits: + cpu: 200m + memory: 1000Mi + ports: + - containerPort: 80 + env: + - name: ZK_HOSTS + value: zookeeper:2181 + command: + - ./bin/kafka-manager + - -Dhttp.port=80 diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/kafka-manager/values.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/kafka-manager/values.yaml new file mode 100644 index 0000000..b5532cd --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/kafka-manager/values.yaml @@ -0,0 +1,68 @@ +# Default values for kafka-manager. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: 10.10.31.243:5000/cmoa3/nginx + tag: stable + pullPolicy: IfNotPresent + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 80 + +ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: [] + + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +nodeSelector: {} + +tolerations: [] + +affinity: {} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/kafka/.helmignore b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/kafka/.helmignore new file mode 100644 index 0000000..50af031 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/kafka/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/kafka/1.broker-config.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/kafka/1.broker-config.yaml new file mode 100644 index 0000000..ddf76e1 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/kafka/1.broker-config.yaml @@ -0,0 +1,161 @@ +kind: ConfigMap +metadata: + name: broker-config + namespace: imxc +apiVersion: v1 +data: + init.sh: |- + #!/bin/bash + set -e + set -x + cp /etc/kafka-configmap/log4j.properties /etc/kafka/ + KAFKA_BROKER_ID=${HOSTNAME##*-} + SEDS=("s/#init#broker.id=#init#/broker.id=$KAFKA_BROKER_ID/") + LABELS="kafka-broker-id=$KAFKA_BROKER_ID" + ANNOTATIONS="" + hash kubectl 2>/dev/null || { + SEDS+=("s/#init#broker.rack=#init#/#init#broker.rack=# kubectl not found in path/") + } && { + ZONE=$(kubectl get node "$NODE_NAME" -o=go-template='{{index .metadata.labels "failure-domain.beta.kubernetes.io/zone"}}') + if [ $? -ne 0 ]; then + SEDS+=("s/#init#broker.rack=#init#/#init#broker.rack=# zone lookup failed, see -c init-config logs/") + elif [ "x$ZONE" == "x" ]; then + SEDS+=("s/#init#broker.rack=#init#/#init#broker.rack=# zone label not found for node $NODE_NAME/") + else + SEDS+=("s/#init#broker.rack=#init#/broker.rack=$ZONE/") + LABELS="$LABELS kafka-broker-rack=$ZONE" + fi + # Node Port 설정 주석처리 + # OUTSIDE_HOST=$(kubectl get node "$NODE_NAME" -o jsonpath='{.status.addresses[?(@.type=="InternalIP")].address}') + OUTSIDE_HOST=kafka-outside-${KAFKA_BROKER_ID} + GLOBAL_HOST=kafka-global-${KAFKA_BROKER_ID} + if [ $? -ne 0 ]; then + echo "Outside (i.e. cluster-external access) host lookup command failed" + else + OUTSIDE_PORT=3240${KAFKA_BROKER_ID} + GLOBAL_PORT=3250${KAFKA_BROKER_ID} + # datagate 도입했으므로 Kube DNS 기반 통신 + SEDS+=("s|#init#advertised.listeners=OUTSIDE://#init#|advertised.listeners=OUTSIDE://${OUTSIDE_HOST}:${OUTSIDE_PORT},GLOBAL://${GLOBAL_HOST}:${GLOBAL_PORT}|") + ANNOTATIONS="$ANNOTATIONS kafka-listener-outside-host=$OUTSIDE_HOST kafka-listener-outside-port=$OUTSIDE_PORT" + fi + if [ ! -z "$LABELS" ]; then + kubectl -n $POD_NAMESPACE label pod $POD_NAME $LABELS || echo "Failed to label $POD_NAMESPACE.$POD_NAME - RBAC issue?" + fi + if [ ! -z "$ANNOTATIONS" ]; then + kubectl -n $POD_NAMESPACE annotate pod $POD_NAME $ANNOTATIONS || echo "Failed to annotate $POD_NAMESPACE.$POD_NAME - RBAC issue?" + fi + } + printf '%s\n' "${SEDS[@]}" | sed -f - /etc/kafka-configmap/server.properties > /etc/kafka/server.properties.tmp + [ $? -eq 0 ] && mv /etc/kafka/server.properties.tmp /etc/kafka/server.properties + server.properties: |- + log.dirs=/var/lib/kafka/data/topics + ############################# Zookeeper ############################# + zookeeper.connect=zookeeper:2181 + #zookeeper.connection.timeout.ms=6000 + ############################# Group Coordinator Settings ############################# + #group.initial.rebalance.delay.ms=0 + ############################# Thread ############################# + #background.threads=10 + #num.recovery.threads.per.data.dir=1 + ############################# Topic ############################# + auto.create.topics.enable=true + delete.topic.enable=true + default.replication.factor=2 + ############################# Msg Replication ############################# + min.insync.replicas=1 + num.io.threads=10 + num.network.threads=4 + num.replica.fetchers=4 + replica.fetch.min.bytes=1 + socket.receive.buffer.bytes=1048576 + socket.send.buffer.bytes=1048576 + replica.socket.receive.buffer.bytes=1048576 + socket.request.max.bytes=204857600 + ############################# Partition ############################# + #auto.leader.rebalance.enable=true + num.partitions=12 + ############################# Log size ############################# + message.max.bytes=204857600 + max.message.bytes=204857600 + ############################# Log Flush Policy ############################# + #log.flush.interval.messages=10000 + #log.flush.interval.ms=1000 + ############################# Log Retention Policy ############################# + log.retention.minutes=1 + offsets.retention.minutes=1440 + #log.retention.bytes=1073741824 + #log.segment.bytes=1073741824 + log.retention.check.interval.ms=10000 + ############################# Internal Topic Settings ############################# + offsets.topic.replication.factor=1 + #transaction.state.log.replication.factor=1 + #transaction.state.log.min.isr=1 + ############################# ETC ############################# + listeners=OUTSIDE://:9094,PLAINTEXT://:9092,GLOBAL://:9095 + listener.security.protocol.map=PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL,OUTSIDE:PLAINTEXT,GLOBAL:PLAINTEXT + #listeners=PLAINTEXT://:9092 + inter.broker.listener.name=PLAINTEXT + #init#broker.id=#init# + #init#broker.rack=#init# + log4j.properties: |- + # Unspecified loggers and loggers with additivity=true output to server.log and stdout + # Note that INFO only applies to unspecified loggers, the log level of the child logger is used otherwise + log4j.rootLogger=INFO, stdout + log4j.appender.stdout=org.apache.log4j.ConsoleAppender + log4j.appender.stdout.layout=org.apache.log4j.PatternLayout + log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n + log4j.appender.kafkaAppender=org.apache.log4j.DailyRollingFileAppender + log4j.appender.kafkaAppender.DatePattern='.'yyyy-MM-dd-HH + log4j.appender.kafkaAppender.File=${kafka.logs.dir}/server.log + log4j.appender.kafkaAppender.layout=org.apache.log4j.PatternLayout + log4j.appender.kafkaAppender.layout.ConversionPattern=[%d] %p %m (%c)%n + log4j.appender.stateChangeAppender=org.apache.log4j.DailyRollingFileAppender + log4j.appender.stateChangeAppender.DatePattern='.'yyyy-MM-dd-HH + log4j.appender.stateChangeAppender.File=${kafka.logs.dir}/state-change.log + log4j.appender.stateChangeAppender.layout=org.apache.log4j.PatternLayout + log4j.appender.stateChangeAppender.layout.ConversionPattern=[%d] %p %m (%c)%n + log4j.appender.requestAppender=org.apache.log4j.DailyRollingFileAppender + log4j.appender.requestAppender.DatePattern='.'yyyy-MM-dd-HH + log4j.appender.requestAppender.File=${kafka.logs.dir}/kafka-request.log + log4j.appender.requestAppender.layout=org.apache.log4j.PatternLayout + log4j.appender.requestAppender.layout.ConversionPattern=[%d] %p %m (%c)%n + log4j.appender.cleanerAppender=org.apache.log4j.DailyRollingFileAppender + log4j.appender.cleanerAppender.DatePattern='.'yyyy-MM-dd-HH + log4j.appender.cleanerAppender.File=${kafka.logs.dir}/log-cleaner.log + log4j.appender.cleanerAppender.layout=org.apache.log4j.PatternLayout + log4j.appender.cleanerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n + log4j.appender.controllerAppender=org.apache.log4j.DailyRollingFileAppender + log4j.appender.controllerAppender.DatePattern='.'yyyy-MM-dd-HH + log4j.appender.controllerAppender.File=${kafka.logs.dir}/controller.log + log4j.appender.controllerAppender.layout=org.apache.log4j.PatternLayout + log4j.appender.controllerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n + log4j.appender.authorizerAppender=org.apache.log4j.DailyRollingFileAppender + log4j.appender.authorizerAppender.DatePattern='.'yyyy-MM-dd-HH + log4j.appender.authorizerAppender.File=${kafka.logs.dir}/kafka-authorizer.log + log4j.appender.authorizerAppender.layout=org.apache.log4j.PatternLayout + log4j.appender.authorizerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n + # Change the two lines below to adjust ZK client logging + log4j.logger.org.I0Itec.zkclient.ZkClient=INFO + log4j.logger.org.apache.zookeeper=INFO + # Change the two lines below to adjust the general broker logging level (output to server.log and stdout) + log4j.logger.kafka=INFO + log4j.logger.org.apache.kafka=INFO + # Change to DEBUG or TRACE to enable request logging + log4j.logger.kafka.request.logger=WARN, requestAppender + log4j.additivity.kafka.request.logger=false + # Uncomment the lines below and change log4j.logger.kafka.network.RequestChannel$ to TRACE for additional output + # related to the handling of requests + #log4j.logger.kafka.network.Processor=TRACE, requestAppender + #log4j.logger.kafka.server.KafkaApis=TRACE, requestAppender + #log4j.additivity.kafka.server.KafkaApis=false + log4j.logger.kafka.network.RequestChannel$=WARN, requestAppender + log4j.additivity.kafka.network.RequestChannel$=false + log4j.logger.kafka.controller=TRACE, controllerAppender + log4j.additivity.kafka.controller=false + log4j.logger.kafka.log.LogCleaner=INFO, cleanerAppender + log4j.additivity.kafka.log.LogCleaner=false + log4j.logger.state.change.logger=TRACE, stateChangeAppender + log4j.additivity.state.change.logger=false + # Change to DEBUG to enable audit log for the authorizer + log4j.logger.kafka.authorizer.logger=WARN, authorizerAppender + log4j.additivity.kafka.authorizer.logger=false diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/kafka/Chart.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/kafka/Chart.yaml new file mode 100644 index 0000000..9565567 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/kafka/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for Kubernetes +name: kafka +version: 0.1.0 diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/kafka/templates/2.dns.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/kafka/templates/2.dns.yaml new file mode 100644 index 0000000..8ffb3f8 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/kafka/templates/2.dns.yaml @@ -0,0 +1,14 @@ +# A headless service to create DNS records +--- +apiVersion: v1 +kind: Service +metadata: + name: kafka-headless + namespace: imxc +spec: + ports: + - port: 9092 + clusterIP: None + selector: + app: kafka +--- diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/kafka/templates/3.bootstrap-service.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/kafka/templates/3.bootstrap-service.yaml new file mode 100644 index 0000000..1cd7406 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/kafka/templates/3.bootstrap-service.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: Service +metadata: +# name: bootstrap + name: kafka + namespace: imxc +spec: + ports: + - port: 9092 + selector: + app: kafka diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/kafka/templates/4.persistent-volume.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/kafka/templates/4.persistent-volume.yaml new file mode 100644 index 0000000..6f67ab4 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/kafka/templates/4.persistent-volume.yaml @@ -0,0 +1,76 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: data-kafka-cluster-1 + labels: + type: local + app: kafka +spec: + capacity: + storage: 30Gi + accessModes: + - ReadWriteOnce + hostPath: + path: {{ .Values.global.IMXC_KAFKA_PV_PATH1 }} + persistentVolumeReclaimPolicy: Retain + storageClassName: kafka-broker + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .Values.global.affinity_key }} + operator: In + values: + - {{ .Values.global.affinity_value1 }} + +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: data-kafka-cluster-2 + labels: + type: local + app: kafka +spec: + capacity: + storage: 30Gi + accessModes: + - ReadWriteOnce + hostPath: + path: {{ .Values.global.IMXC_KAFKA_PV_PATH2 }} + persistentVolumeReclaimPolicy: Retain + storageClassName: kafka-broker + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .Values.global.affinity_key }} + operator: In + values: + - {{ .Values.global.affinity_value2 }} +--- +# On-prem/워커노드 두개/브로커 두개 환경에서 발생할 수 있는 affinity 충돌때문에 주석처리 +#apiVersion: v1 +#kind: PersistentVolume +#metadata: +# name: data-kafka-cluster-3 +# labels: +# type: local +# app: kafka +#spec: +# capacity: +# storage: 30Gi +# accessModes: +# - ReadWriteOnce +# hostPath: +# path: {{ .Values.global.IMXC_KAFKA_PV_PATH3 }} +# persistentVolumeReclaimPolicy: Retain +# storageClassName: kafka-broker +# nodeAffinity: +# required: +# nodeSelectorTerms: +# - matchExpressions: +# - key: kubernetes.io/hostname +# operator: In +# values: + # - {{ .Values.global.IMXC_KAFKA_HOST3 }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/kafka/templates/5.kafka.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/kafka/templates/5.kafka.yaml new file mode 100644 index 0000000..1982584 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/kafka/templates/5.kafka.yaml @@ -0,0 +1,132 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: kafka + namespace: imxc +spec: + selector: + matchLabels: + app: kafka + serviceName: "kafka-headless" + replicas: 2 + updateStrategy: + type: RollingUpdate + podManagementPolicy: Parallel + template: + metadata: + labels: + app: kafka + annotations: + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: "app" + operator: In + values: + - kafka + topologyKey: "kubernetes.io/hostname" + podAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: "app" + operator: In + values: + - zookeeper + topologyKey: "kubernetes.io/hostname" + terminationGracePeriodSeconds: 30 + initContainers: + - name: init-config + image: {{ .Values.global.IMXC_IN_REGISTRY }}/kafka-initutils:{{ .Values.global.KAFKA_INITUTILS_VERSION }} + env: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + command: ['/bin/bash', '/etc/kafka-configmap/init.sh'] + volumeMounts: + - name: configmap + mountPath: /etc/kafka-configmap + - name: config + mountPath: /etc/kafka + - name: extensions + mountPath: /opt/kafka/libs/extensions + containers: + - name: broker + image: {{ .Values.global.IMXC_IN_REGISTRY }}/kafka:{{ .Values.global.KAFKA_VERSION }} + resources: + requests: + cpu: 100m + memory: 6000Mi + limits: + # This limit was intentionally set low as a reminder that + # the entire Yolean/kubernetes-kafka is meant to be tweaked + # before you run production workloads + cpu: 500m + memory: 10000Mi + env: + - name: CLASSPATH + value: /opt/kafka/libs/extensions/* + - name: KAFKA_LOG4J_OPTS + value: -Dlog4j.configuration=file:/etc/kafka/log4j.properties + - name: JMX_PORT + value: "5555" + - name: KAFKA_OPTS + value: -javaagent:/opt/kafka/jmx_prometheus_javaagent-0.15.0.jar=9010:/opt/kafka/config.yaml + ports: + - name: inside + containerPort: 9092 + - name: outside + containerPort: 9094 + - name: global + containerPort: 9095 + - name: jmx + containerPort: 9010 + command: + - ./bin/kafka-server-start.sh + - /etc/kafka/server.properties + lifecycle: + preStop: + exec: + command: ["sh", "-ce", "rm -rf /var/lib/kafka/data/*;kill -s TERM 1; while $(kill -0 1 2>/dev/null); do sleep 1; done"] +# readinessProbe: +# tcpSocket: +# port: 9092 +# timeoutSeconds: 1 + volumeMounts: + - name: config + mountPath: /etc/kafka + - name: data + mountPath: /var/lib/kafka/data + - name: extensions + mountPath: /opt/kafka/libs/extensions + volumes: + - name: configmap + configMap: + name: broker-config + - name: config + emptyDir: {} + - name: extensions + emptyDir: {} + volumeClaimTemplates: + - metadata: + name: data + spec: + accessModes: [ "ReadWriteOnce" ] + storageClassName: kafka-broker + resources: + requests: + storage: 30Gi diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/kafka/templates/6.outside.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/kafka/templates/6.outside.yaml new file mode 100644 index 0000000..c2d8170 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/kafka/templates/6.outside.yaml @@ -0,0 +1,89 @@ +kind: Service +apiVersion: v1 +metadata: + name: kafka-outside-0 + namespace: imxc +spec: + selector: + app: kafka + kafka-broker-id: "0" + ports: + - protocol: TCP + targetPort: 9094 + port: 32400 + type: ClusterIP +--- +kind: Service +apiVersion: v1 +metadata: + name: kafka-outside-1 + namespace: imxc +spec: + selector: + app: kafka + kafka-broker-id: "1" + ports: + - protocol: TCP + targetPort: 9094 + port: 32401 + type: ClusterIP +--- +kind: Service +apiVersion: v1 +metadata: + name: kafka-global-0 + namespace: imxc +spec: + selector: + app: kafka + kafka-broker-id: "0" + ports: + - protocol: TCP + targetPort: 9095 + port: 32500 + type: ClusterIP +--- +kind: Service +apiVersion: v1 +metadata: + name: kafka-global-1 + namespace: imxc +spec: + selector: + app: kafka + kafka-broker-id: "1" + ports: + - protocol: TCP + targetPort: 9095 + port: 32501 + type: ClusterIP +--- +apiVersion: v1 +kind: Service +metadata: + name: kafka-broker + namespace: imxc +spec: + type: ClusterIP + ports: + - port: 9094 + name: kafka + protocol: TCP + targetPort: 9094 + selector: + app: kafka +--- +apiVersion: v1 +kind: Service +metadata: + name: kafka-broker-global + namespace: imxc +spec: + type: ClusterIP + ports: + - port: 9095 + name: kafka + protocol: TCP + targetPort: 9095 + selector: + app: kafka diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/kafka/values.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/kafka/values.yaml new file mode 100644 index 0000000..cb0e677 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/kafka/values.yaml @@ -0,0 +1,68 @@ +# Default values for kafka. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: 10.10.31.243:5000/cmoa3/nginx + tag: stable + pullPolicy: IfNotPresent + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 80 + +ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: [] + + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +nodeSelector: {} + +tolerations: [] + +affinity: {} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/postgres/.helmignore b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/postgres/.helmignore new file mode 100644 index 0000000..50af031 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/postgres/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/postgres/Chart.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/postgres/Chart.yaml new file mode 100644 index 0000000..d602e29 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/postgres/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for Kubernetes +name: postgres +version: 0.1.0 diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/postgres/templates/1.postgres-configmap.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/postgres/templates/1.postgres-configmap.yaml new file mode 100644 index 0000000..95c8bda --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/postgres/templates/1.postgres-configmap.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: postgres-config + namespace: imxc + labels: + app: postgres +data: + POSTGRES_DB: postgresdb + POSTGRES_USER: admin + POSTGRES_PASSWORD: eorbahrhkswp diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/postgres/templates/2.postgres-storage.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/postgres/templates/2.postgres-storage.yaml new file mode 100644 index 0000000..dfbd714 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/postgres/templates/2.postgres-storage.yaml @@ -0,0 +1,38 @@ +kind: PersistentVolume +apiVersion: v1 +metadata: + name: postgres-pv-volume + labels: + type: local + app: postgres +spec: + storageClassName: manual + capacity: + storage: 5Gi + accessModes: + - ReadWriteMany + hostPath: + path: "{{ .Values.global.IMXC_POSTGRES_PV_PATH }}" + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .Values.global.affinity_key }} + operator: In + values: + - {{ .Values.global.affinity_value1 }} +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: postgres-pv-claim + namespace: imxc + labels: + app: postgres +spec: + storageClassName: manual + accessModes: + - ReadWriteMany + resources: + requests: + storage: 5Gi diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/postgres/templates/3.postgres-service.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/postgres/templates/3.postgres-service.yaml new file mode 100644 index 0000000..31e90a2 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/postgres/templates/3.postgres-service.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Service +metadata: + name: postgres + namespace: imxc + labels: + app: postgres +spec: + type: ClusterIP + ports: + - port: 5432 + # nodePort: 5432 + selector: + app: postgres diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/postgres/templates/4.postgres-deployment.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/postgres/templates/4.postgres-deployment.yaml new file mode 100644 index 0000000..14993e8 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/postgres/templates/4.postgres-deployment.yaml @@ -0,0 +1,45 @@ +{{- if semverCompare ">=1.16-0" .Capabilities.KubeVersion.GitVersion }} +apiVersion: apps/v1 +{{- else }} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Deployment +metadata: + name: postgres + namespace: imxc +spec: +{{- if semverCompare ">=1.16-0" .Capabilities.KubeVersion.GitVersion }} + selector: + matchLabels: + app: postgres +{{- end }} + replicas: 1 + template: + metadata: + labels: + app: postgres + spec: + containers: + - name: postgres + image: {{ .Values.global.IMXC_IN_REGISTRY }}/postgres:{{ .Values.global.POSTGRES_VERSION }} + resources: + requests: + cpu: 100m + memory: 2000Mi + limits: + cpu: 300m + memory: 2000Mi + imagePullPolicy: "IfNotPresent" + ports: + - containerPort: 5432 + args: ["-c","max_connections=1000","-c","shared_buffers=512MB","-c","deadlock_timeout=5s","-c","statement_timeout=15s","-c","idle_in_transaction_session_timeout=60s"] + envFrom: + - configMapRef: + name: postgres-config + volumeMounts: + - mountPath: /var/lib/postgresql/data + name: postgredb + volumes: + - name: postgredb + persistentVolumeClaim: + claimName: postgres-pv-claim diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/postgres/values.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/postgres/values.yaml new file mode 100644 index 0000000..9972ab8 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/postgres/values.yaml @@ -0,0 +1,68 @@ +# Default values for postgres. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: 10.10.31.243:5000/cmoa3/nginx + tag: stable + pullPolicy: IfNotPresent + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 80 + +ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: [] + + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +nodeSelector: {} + +tolerations: [] + +affinity: {} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/.helmignore b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/.helmignore new file mode 100644 index 0000000..f0c1319 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/Chart.lock b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/Chart.lock new file mode 100644 index 0000000..21ff14f --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/Chart.lock @@ -0,0 +1,6 @@ +dependencies: +- name: common + repository: https://charts.bitnami.com/bitnami + version: 1.8.0 +digest: sha256:3e342a25057f87853e52d83e1d14e6d8727c15fd85aaae22e7594489cc129f15 +generated: "2021-08-09T15:49:41.56962208Z" diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/Chart.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/Chart.yaml new file mode 100644 index 0000000..3b08f9c --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/Chart.yaml @@ -0,0 +1,26 @@ +annotations: + category: Infrastructure +apiVersion: v2 +appVersion: 3.8.22 +dependencies: +- name: common + repository: https://charts.bitnami.com/bitnami + tags: + - bitnami-common + version: 1.x.x +description: Open source message broker software that implements the Advanced Message + Queuing Protocol (AMQP) +home: https://github.com/bitnami/charts/tree/master/bitnami/rabbitmq +icon: https://bitnami.com/assets/stacks/rabbitmq/img/rabbitmq-stack-220x234.png +keywords: +- rabbitmq +- message queue +- AMQP +maintainers: +- email: containers@bitnami.com + name: Bitnami +name: rabbitmq +sources: +- https://github.com/bitnami/bitnami-docker-rabbitmq +- https://www.rabbitmq.com +version: 8.20.5 diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/README.md b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/README.md new file mode 100644 index 0000000..9b26b09 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/README.md @@ -0,0 +1,566 @@ +# RabbitMQ + +[RabbitMQ](https://www.rabbitmq.com/) is an open source message broker software that implements the Advanced Message Queuing Protocol (AMQP). + +## TL;DR + +```bash +$ helm repo add bitnami https://charts.bitnami.com/bitnami +$ helm install my-release bitnami/rabbitmq +``` + +## Introduction + +This chart bootstraps a [RabbitMQ](https://github.com/bitnami/bitnami-docker-rabbitmq) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This chart has been tested to work with NGINX Ingress, cert-manager, fluentd and Prometheus on top of the [BKPR](https://kubeprod.io/). + +## Prerequisites + +- Kubernetes 1.12+ +- Helm 3.1.0 +- PV provisioner support in the underlying infrastructure + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```bash +$ helm install my-release bitnami/rabbitmq +``` + +The command deploys RabbitMQ on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```bash +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Parameters + +### Global parameters + +| Name | Description | Value | +| ------------------------- | ----------------------------------------------- | ----- | +| `global.imageRegistry` | Global Docker image registry | `""` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` | +| `global.storageClass` | Global StorageClass for Persistent Volume(s) | `""` | + + +### RabitMQ Image parameters + +| Name | Description | Value | +| ------------------- | -------------------------------------------------------------- | ---------------------- | +| `image.registry` | RabbitMQ image registry | `docker.io` | +| `image.repository` | RabbitMQ image repository | `bitnami/rabbitmq` | +| `image.tag` | RabbitMQ image tag (immutable tags are recommended) | `3.8.21-debian-10-r13` | +| `image.pullPolicy` | RabbitMQ image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | +| `image.debug` | Set to true if you would like to see extra information on logs | `false` | + + +### Common parameters + +| Name | Description | Value | +| ---------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------- | +| `nameOverride` | String to partially override rabbitmq.fullname template (will maintain the release name) | `""` | +| `fullnameOverride` | String to fully override rabbitmq.fullname template | `""` | +| `kubeVersion` | Force target Kubernetes version (using Helm capabilities if not set) | `""` | +| `clusterDomain` | Kubernetes Cluster Domain | `cluster.local` | +| `extraDeploy` | Array of extra objects to deploy with the release | `[]` | +| `diagnosticMode.enabled` | Enable diagnostic mode (all probes will be disabled and the command will be overridden) | `false` | +| `diagnosticMode.command` | Command to override all containers in the deployment | `[]` | +| `diagnosticMode.args` | Args to override all containers in the deployment | `[]` | +| `hostAliases` | Deployment pod host aliases | `[]` | +| `commonAnnotations` | Annotations to add to all deployed objects | `{}` | +| `auth.username` | RabbitMQ application username | `user` | +| `auth.password` | RabbitMQ application password | `""` | +| `auth.existingPasswordSecret` | Existing secret with RabbitMQ credentials (must contain a value for `rabbitmq-password` key) | `""` | +| `auth.erlangCookie` | Erlang cookie to determine whether different nodes are allowed to communicate with each other | `""` | +| `auth.existingErlangSecret` | Existing secret with RabbitMQ Erlang cookie (must contain a value for `rabbitmq-erlang-cookie` key) | `""` | +| `auth.tls.enabled` | Enable TLS support on RabbitMQ | `false` | +| `auth.tls.autoGenerated` | Generate automatically self-signed TLS certificates | `false` | +| `auth.tls.failIfNoPeerCert` | When set to true, TLS connection will be rejected if client fails to provide a certificate | `true` | +| `auth.tls.sslOptionsVerify` | Should [peer verification](https://www.rabbitmq.com/ssl.html#peer-verification) be enabled? | `verify_peer` | +| `auth.tls.caCertificate` | Certificate Authority (CA) bundle content | `""` | +| `auth.tls.serverCertificate` | Server certificate content | `""` | +| `auth.tls.serverKey` | Server private key content | `""` | +| `auth.tls.existingSecret` | Existing secret with certificate content to RabbitMQ credentials | `""` | +| `auth.tls.existingSecretFullChain` | Whether or not the existing secret contains the full chain in the certificate (`tls.crt`). Will be used in place of `ca.cert` if `true`. | `false` | +| `logs` | Path of the RabbitMQ server's Erlang log file. Value for the `RABBITMQ_LOGS` environment variable | `-` | +| `ulimitNofiles` | RabbitMQ Max File Descriptors | `65536` | +| `maxAvailableSchedulers` | RabbitMQ maximum available scheduler threads | `""` | +| `onlineSchedulers` | RabbitMQ online scheduler threads | `""` | +| `memoryHighWatermark.enabled` | Enable configuring Memory high watermark on RabbitMQ | `false` | +| `memoryHighWatermark.type` | Memory high watermark type. Either `absolute` or `relative` | `relative` | +| `memoryHighWatermark.value` | Memory high watermark value | `0.4` | +| `plugins` | List of default plugins to enable (should only be altered to remove defaults; for additional plugins use `extraPlugins`) | `rabbitmq_management rabbitmq_peer_discovery_k8s` | +| `communityPlugins` | List of Community plugins (URLs) to be downloaded during container initialization | `""` | +| `extraPlugins` | Extra plugins to enable (single string containing a space-separated list) | `rabbitmq_auth_backend_ldap` | +| `clustering.enabled` | Enable RabbitMQ clustering | `true` | +| `clustering.addressType` | Switch clustering mode. Either `ip` or `hostname` | `hostname` | +| `clustering.rebalance` | Rebalance master for queues in cluster when new replica is created | `false` | +| `clustering.forceBoot` | Force boot of an unexpectedly shut down cluster (in an unexpected order). | `false` | +| `loadDefinition.enabled` | Enable loading a RabbitMQ definitions file to configure RabbitMQ | `false` | +| `loadDefinition.existingSecret` | Existing secret with the load definitions file | `""` | +| `command` | Override default container command (useful when using custom images) | `[]` | +| `args` | Override default container args (useful when using custom images) | `[]` | +| `terminationGracePeriodSeconds` | Default duration in seconds k8s waits for container to exit before sending kill signal. | `120` | +| `extraEnvVars` | Extra environment variables to add to RabbitMQ pods | `[]` | +| `extraEnvVarsCM` | Name of existing ConfigMap containing extra environment variables | `""` | +| `extraEnvVarsSecret` | Name of existing Secret containing extra environment variables (in case of sensitive data) | `""` | +| `extraContainerPorts` | Extra ports to be included in container spec, primarily informational | `[]` | +| `configuration` | RabbitMQ Configuration file content: required cluster configuration | `""` | +| `extraConfiguration` | Configuration file content: extra configuration to be appended to RabbitMQ configuration | `""` | +| `advancedConfiguration` | Configuration file content: advanced configuration | `""` | +| `ldap.enabled` | Enable LDAP support | `false` | +| `ldap.servers` | List of LDAP servers hostnames | `[]` | +| `ldap.port` | LDAP servers port | `389` | +| `ldap.user_dn_pattern` | Pattern used to translate the provided username into a value to be used for the LDAP bind | `cn=${username},dc=example,dc=org` | +| `ldap.tls.enabled` | If you enable TLS/SSL you can set advanced options using the `advancedConfiguration` parameter | `false` | +| `extraVolumeMounts` | Optionally specify extra list of additional volumeMounts | `[]` | +| `extraVolumes` | Optionally specify extra list of additional volumes . | `[]` | +| `extraSecrets` | Optionally specify extra secrets to be created by the chart. | `{}` | +| `extraSecretsPrependReleaseName` | Set this flag to true if extraSecrets should be created with prepended. | `false` | + + +### Statefulset parameters + +| Name | Description | Value | +| ------------------------------------ | ------------------------------------------------------------------------------------------------------------------------ | --------------- | +| `replicaCount` | Number of RabbitMQ replicas to deploy | `1` | +| `schedulerName` | Use an alternate scheduler, e.g. "stork". | `""` | +| `podManagementPolicy` | Pod management policy | `OrderedReady` | +| `podLabels` | RabbitMQ Pod labels. Evaluated as a template | `{}` | +| `podAnnotations` | RabbitMQ Pod annotations. Evaluated as a template | `{}` | +| `updateStrategyType` | Update strategy type for RabbitMQ statefulset | `RollingUpdate` | +| `statefulsetLabels` | RabbitMQ statefulset labels. Evaluated as a template | `{}` | +| `priorityClassName` | Name of the priority class to be used by RabbitMQ pods, priority class needs to be created beforehand | `""` | +| `podAffinityPreset` | Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `nodeAffinityPreset.type` | Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `nodeAffinityPreset.key` | Node label key to match Ignored if `affinity` is set. | `""` | +| `nodeAffinityPreset.values` | Node label values to match. Ignored if `affinity` is set. | `[]` | +| `affinity` | Affinity for pod assignment. Evaluated as a template | `{}` | +| `nodeSelector` | Node labels for pod assignment. Evaluated as a template | `{}` | +| `tolerations` | Tolerations for pod assignment. Evaluated as a template | `[]` | +| `topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `{}` | +| `podSecurityContext.enabled` | Enable RabbitMQ pods' Security Context | `true` | +| `podSecurityContext.fsGroup` | Group ID for the filesystem used by the containers | `1001` | +| `podSecurityContext.runAsUser` | User ID for the service user running the pod | `1001` | +| `containerSecurityContext` | RabbitMQ containers' Security Context | `{}` | +| `resources.limits` | The resources limits for RabbitMQ containers | `{}` | +| `resources.requests` | The requested resources for RabbitMQ containers | `{}` | +| `livenessProbe.enabled` | Enable livenessProbe | `true` | +| `livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `120` | +| `livenessProbe.periodSeconds` | Period seconds for livenessProbe | `30` | +| `livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `20` | +| `livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `6` | +| `livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `readinessProbe.enabled` | Enable readinessProbe | `true` | +| `readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `10` | +| `readinessProbe.periodSeconds` | Period seconds for readinessProbe | `30` | +| `readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `20` | +| `readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `3` | +| `readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `customLivenessProbe` | Override default liveness probe | `{}` | +| `customReadinessProbe` | Override default readiness probe | `{}` | +| `customStartupProbe` | Define a custom startup probe | `{}` | +| `initContainers` | Add init containers to the RabbitMQ pod | `[]` | +| `sidecars` | Add sidecar containers to the RabbitMQ pod | `[]` | +| `pdb.create` | Enable/disable a Pod Disruption Budget creation | `false` | +| `pdb.minAvailable` | Minimum number/percentage of pods that should remain scheduled | `1` | +| `pdb.maxUnavailable` | Maximum number/percentage of pods that may be made unavailable | `""` | + + +### RBAC parameters + +| Name | Description | Value | +| ----------------------- | --------------------------------------------------- | ------ | +| `serviceAccount.create` | Enable creation of ServiceAccount for RabbitMQ pods | `true` | +| `serviceAccount.name` | Name of the created serviceAccount | `""` | +| `rbac.create` | Whether RBAC rules should be created | `true` | + + +### Persistence parameters + +| Name | Description | Value | +| --------------------------- | ----------------------------------------------- | --------------- | +| `persistence.enabled` | Enable RabbitMQ data persistence using PVC | `true` | +| `persistence.storageClass` | PVC Storage Class for RabbitMQ data volume | `""` | +| `persistence.selector` | Selector to match an existing Persistent Volume | `{}` | +| `persistence.accessMode` | PVC Access Mode for RabbitMQ data volume | `ReadWriteOnce` | +| `persistence.existingClaim` | Provide an existing PersistentVolumeClaims | `""` | +| `persistence.size` | PVC Storage Request for RabbitMQ data volume | `8Gi` | +| `persistence.volumes` | Additional volumes without creating PVC | `[]` | + + +### Exposure parameters + +| Name | Description | Value | +| ---------------------------------- | ----------------------------------------------------------------------------------------------------------------------- | ------------------------ | +| `service.type` | Kubernetes Service type | `ClusterIP` | +| `service.portEnabled` | Amqp port. Cannot be disabled when `auth.tls.enabled` is `false`. Listener can be disabled with `listeners.tcp = none`. | `true` | +| `service.port` | Amqp port | `5672` | +| `service.portName` | Amqp service port name | `amqp` | +| `service.tlsPort` | Amqp TLS port | `5671` | +| `service.tlsPortName` | Amqp TLS service port name | `amqp-ssl` | +| `service.nodePort` | Node port override for `amqp` port, if serviceType is `NodePort` or `LoadBalancer` | `""` | +| `service.tlsNodePort` | Node port override for `amqp-ssl` port, if serviceType is `NodePort` or `LoadBalancer` | `""` | +| `service.distPort` | Erlang distribution server port | `25672` | +| `service.distPortName` | Erlang distribution service port name | `dist` | +| `service.distNodePort` | Node port override for `dist` port, if serviceType is `NodePort` | `""` | +| `service.managerPortEnabled` | RabbitMQ Manager port | `true` | +| `service.managerPort` | RabbitMQ Manager port | `15672` | +| `service.managerPortName` | RabbitMQ Manager service port name | `http-stats` | +| `service.managerNodePort` | Node port override for `http-stats` port, if serviceType `NodePort` | `""` | +| `service.metricsPort` | RabbitMQ Prometheues metrics port | `9419` | +| `service.metricsPortName` | RabbitMQ Prometheues metrics service port name | `metrics` | +| `service.metricsNodePort` | Node port override for `metrics` port, if serviceType is `NodePort` | `""` | +| `service.epmdNodePort` | Node port override for `epmd` port, if serviceType is `NodePort` | `""` | +| `service.epmdPortName` | EPMD Discovery service port name | `epmd` | +| `service.extraPorts` | Extra ports to expose in the service | `[]` | +| `service.loadBalancerSourceRanges` | Address(es) that are allowed when service is `LoadBalancer` | `[]` | +| `service.externalIPs` | Set the ExternalIPs | `[]` | +| `service.externalTrafficPolicy` | Enable client source IP preservation | `Cluster` | +| `service.loadBalancerIP` | Set the LoadBalancerIP | `""` | +| `service.labels` | Service labels. Evaluated as a template | `{}` | +| `service.annotations` | Service annotations. Evaluated as a template | `{}` | +| `service.annotationsHeadless` | Headless Service annotations. Evaluated as a template | `{}` | +| `ingress.enabled` | Enable ingress resource for Management console | `false` | +| `ingress.path` | Path for the default host. You may need to set this to '/*' in order to use this with ALB ingress controllers. | `/` | +| `ingress.pathType` | Ingress path type | `ImplementationSpecific` | +| `ingress.hostname` | Default host for the ingress resource | `rabbitmq.local` | +| `ingress.annotations` | Ingress annotations | `{}` | +| `ingress.tls` | Enable TLS configuration for the hostname defined at `ingress.hostname` parameter | `false` | +| `ingress.certManager` | Set this to true in order to add the corresponding annotations for cert-manager | `false` | +| `ingress.selfSigned` | Set this to true in order to create a TLS secret for this ingress record | `false` | +| `ingress.extraHosts` | The list of additional hostnames to be covered with this ingress record. | `[]` | +| `ingress.extraTls` | The tls configuration for additional hostnames to be covered with this ingress record. | `[]` | +| `ingress.secrets` | Custom TLS certificates as secrets | `[]` | +| `ingress.ingressClassName` | IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+) | `""` | +| `networkPolicy.enabled` | Enable creation of NetworkPolicy resources | `false` | +| `networkPolicy.allowExternal` | Don't require client label for connections | `true` | +| `networkPolicy.additionalRules` | Additional NetworkPolicy Ingress "from" rules to set. Note that all rules are OR-ed. | `[]` | + + +### Metrics Parameters + +| Name | Description | Value | +| ----------------------------------------- | -------------------------------------------------------------------------------------- | --------------------- | +| `metrics.enabled` | Enable exposing RabbitMQ metrics to be gathered by Prometheus | `false` | +| `metrics.plugins` | Plugins to enable Prometheus metrics in RabbitMQ | `rabbitmq_prometheus` | +| `metrics.podAnnotations` | Annotations for enabling prometheus to access the metrics endpoint | `{}` | +| `metrics.serviceMonitor.enabled` | Create ServiceMonitor Resource for scraping metrics using PrometheusOperator | `false` | +| `metrics.serviceMonitor.namespace` | Specify the namespace in which the serviceMonitor resource will be created | `""` | +| `metrics.serviceMonitor.interval` | Specify the interval at which metrics should be scraped | `30s` | +| `metrics.serviceMonitor.scrapeTimeout` | Specify the timeout after which the scrape is ended | `""` | +| `metrics.serviceMonitor.relabellings` | Specify Metric Relabellings to add to the scrape endpoint | `[]` | +| `metrics.serviceMonitor.honorLabels` | honorLabels chooses the metric's labels on collisions with target labels | `false` | +| `metrics.serviceMonitor.additionalLabels` | Used to pass Labels that are required by the installed Prometheus Operator | `{}` | +| `metrics.serviceMonitor.targetLabels` | Used to keep given service's labels in target | `{}` | +| `metrics.serviceMonitor.podTargetLabels` | Used to keep given pod's labels in target | `{}` | +| `metrics.serviceMonitor.path` | Define the path used by ServiceMonitor to scrap metrics | `""` | +| `metrics.prometheusRule.enabled` | Set this to true to create prometheusRules for Prometheus operator | `false` | +| `metrics.prometheusRule.additionalLabels` | Additional labels that can be used so prometheusRules will be discovered by Prometheus | `{}` | +| `metrics.prometheusRule.namespace` | namespace where prometheusRules resource should be created | `""` | +| `metrics.prometheusRule.rules` | List of rules, used as template by Helm. | `[]` | + + +### Init Container Parameters + +| Name | Description | Value | +| -------------------------------------- | -------------------------------------------------------------------------------------------------------------------- | ----------------------- | +| `volumePermissions.enabled` | Enable init container that changes the owner and group of the persistent volume(s) mountpoint to `runAsUser:fsGroup` | `false` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | +| `volumePermissions.image.repository` | Init container volume-permissions image repository | `bitnami/bitnami-shell` | +| `volumePermissions.image.tag` | Init container volume-permissions image tag | `10-debian-10-r172` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `Always` | +| `volumePermissions.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | +| `volumePermissions.resources.limits` | Init container volume-permissions resource limits | `{}` | +| `volumePermissions.resources.requests` | Init container volume-permissions resource requests | `{}` | + + +The above parameters map to the env variables defined in [bitnami/rabbitmq](http://github.com/bitnami/bitnami-docker-rabbitmq). For more information please refer to the [bitnami/rabbitmq](http://github.com/bitnami/bitnami-docker-rabbitmq) image documentation. + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```bash +$ helm install my-release \ + --set auth.username=admin,auth.password=secretpassword,auth.erlangCookie=secretcookie \ + bitnami/rabbitmq +``` + +The above command sets the RabbitMQ admin username and password to `admin` and `secretpassword` respectively. Additionally the secure erlang cookie is set to `secretcookie`. + +> NOTE: Once this chart is deployed, it is not possible to change the application's access credentials, such as usernames or passwords, using Helm. To change these application credentials after deployment, delete any persistent volumes (PVs) used by the chart and re-deploy it, or use the application's built-in administrative tools if available. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```bash +$ helm install my-release -f values.yaml bitnami/rabbitmq +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +## Configuration and installation details + +### [Rolling vs Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/) + +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. + +Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. + +### Set pod affinity + +This chart allows you to set your custom affinity using the `affinity` parameter. Find more information about Pod's affinity in the [kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity). + +As an alternative, you can use of the preset configurations for pod affinity, pod anti-affinity, and node affinity available at the [bitnami/common](https://github.com/bitnami/charts/tree/master/bitnami/common#affinities) chart. To do so, set the `podAffinityPreset`, `podAntiAffinityPreset`, or `nodeAffinityPreset` parameters. + +### Scale horizontally + +To horizontally scale this chart once it has been deployed, two options are available: + +- Use the `kubectl scale` command. +- Upgrade the chart modifying the `replicaCount` parameter. + +> NOTE: It is mandatory to specify the password and Erlang cookie that was set the first time the chart was installed when upgrading the chart. + +When scaling down the solution, unnecessary RabbitMQ nodes are automatically stopped, but they are not removed from the cluster. You need to manually remove them by running the `rabbitmqctl forget_cluster_node` command. + +Refer to the chart documentation for [more information on scaling the Rabbit cluster horizontally](https://docs.bitnami.com/kubernetes/infrastructure/rabbitmq/administration/scale-deployment/). + +### Enable TLS support + +To enable TLS support, first generate the certificates as described in the [RabbitMQ documentation for SSL certificate generation](https://www.rabbitmq.com/ssl.html#automated-certificate-generation). + +Once the certificates are generated, you have two alternatives: + +* Create a secret with the certificates and associate the secret when deploying the chart +* Include the certificates in the *values.yaml* file when deploying the chart + +Set the *auth.tls.failIfNoPeerCert* parameter to *false* to allow a TLS connection if the client fails to provide a certificate. + +Set the *auth.tls.sslOptionsVerify* to *verify_peer* to force a node to perform peer verification. When set to *verify_none*, peer verification will be disabled and certificate exchange won't be performed. + +Refer to the chart documentation for [more information and examples of enabling TLS and using Let's Encrypt certificates](https://docs.bitnami.com/kubernetes/infrastructure/rabbitmq/administration/enable-tls/). + +### Load custom definitions + +It is possible to [load a RabbitMQ definitions file to configure RabbitMQ](http://www.rabbitmq.com/management.html#load-definitions). + +Because definitions may contain RabbitMQ credentials, [store the JSON as a Kubernetes secret](https://kubernetes.io/docs/concepts/configuration/secret/#using-secrets-as-files-from-a-pod). Within the secret's data, choose a key name that corresponds with the desired load definitions filename (i.e. `load_definition.json`) and use the JSON object as the value. + +Next, specify the `load_definitions` property as an `extraConfiguration` pointing to the load definition file path within the container (i.e. `/app/load_definition.json`) and set `loadDefinition.enable` to `true`. Any load definitions specified will be available within in the container at `/app`. + +> NOTE: Loading a definition will take precedence over any configuration done through [Helm values](#parameters). + +If needed, you can use `extraSecrets` to let the chart create the secret for you. This way, you don't need to manually create it before deploying a release. These secrets can also be templated to use supplied chart values. + +Refer to the chart documentation for [more information and configuration examples of loading custom definitions](https://docs.bitnami.com/kubernetes/infrastructure/rabbitmq/configuration/load-files/). + +### Configure LDAP support + +LDAP support can be enabled in the chart by specifying the `ldap.*` parameters while creating a release. Refer to the chart documentation for [more information and a configuration example](https://docs.bitnami.com/kubernetes/infrastructure/rabbitmq/configuration/configure-ldap/). + +### Configure memory high watermark + +It is possible to configure a memory high watermark on RabbitMQ to define [memory thresholds](https://www.rabbitmq.com/memory.html#threshold) using the `memoryHighWatermark.*` parameters. To do so, you have two alternatives: + +* Set an absolute limit of RAM to be used on each RabbitMQ node, as shown in the configuration example below: + +``` +memoryHighWatermark.enabled="true" +memoryHighWatermark.type="absolute" +memoryHighWatermark.value="512MB" +``` + +* Set a relative limit of RAM to be used on each RabbitMQ node. To enable this feature, define the memory limits at pod level too. An example configuration is shown below: + +``` +memoryHighWatermark.enabled="true" +memoryHighWatermark.type="relative" +memoryHighWatermark.value="0.4" +resources.limits.memory="2Gi" +``` + +### Add extra environment variables + +In case you want to add extra environment variables (useful for advanced operations like custom init scripts), you can use the `extraEnvVars` property. + +```yaml +extraEnvVars: + - name: LOG_LEVEL + value: error +``` + +Alternatively, you can use a ConfigMap or a Secret with the environment variables. To do so, use the `.extraEnvVarsCM` or the `extraEnvVarsSecret` properties. + +### Use plugins + +The Bitnami Docker RabbitMQ image ships a set of plugins by default. By default, this chart enables `rabbitmq_management` and `rabbitmq_peer_discovery_k8s` since they are required for RabbitMQ to work on K8s. + +To enable extra plugins, set the `extraPlugins` parameter with the list of plugins you want to enable. In addition to this, the `communityPlugins` parameter can be used to specify a list of URLs (separated by spaces) for custom plugins for RabbitMQ. + +Refer to the chart documentation for [more information on using RabbitMQ plugins](https://docs.bitnami.com/kubernetes/infrastructure/rabbitmq/configuration/use-plugins/). + +### Recover the cluster from complete shutdown + +> IMPORTANT: Some of these procedures can lead to data loss. Always make a backup beforehand. + +The RabbitMQ cluster is able to support multiple node failures but, in a situation in which all the nodes are brought down at the same time, the cluster might not be able to self-recover. + +This happens if the pod management policy of the statefulset is not `Parallel` and the last pod to be running wasn't the first pod of the statefulset. If that happens, update the pod management policy to recover a healthy state: + +```console +$ kubectl delete statefulset STATEFULSET_NAME --cascade=false +$ helm upgrade RELEASE_NAME bitnami/rabbitmq \ + --set podManagementPolicy=Parallel \ + --set replicaCount=NUMBER_OF_REPLICAS \ + --set auth.password=PASSWORD \ + --set auth.erlangCookie=ERLANG_COOKIE +``` + +For a faster resyncronization of the nodes, you can temporarily disable the readiness probe by setting `readinessProbe.enabled=false`. Bear in mind that the pods will be exposed before they are actually ready to process requests. + +If the steps above don't bring the cluster to a healthy state, it could be possible that none of the RabbitMQ nodes think they were the last node to be up during the shutdown. In those cases, you can force the boot of the nodes by specifying the `clustering.forceBoot=true` parameter (which will execute [`rabbitmqctl force_boot`](https://www.rabbitmq.com/rabbitmqctl.8.html#force_boot) in each pod): + +```console +$ helm upgrade RELEASE_NAME bitnami/rabbitmq \ + --set podManagementPolicy=Parallel \ + --set clustering.forceBoot=true \ + --set replicaCount=NUMBER_OF_REPLICAS \ + --set auth.password=PASSWORD \ + --set auth.erlangCookie=ERLANG_COOKIE +``` + +More information: [Clustering Guide: Restarting](https://www.rabbitmq.com/clustering.html#restarting). + +### Known issues + +- Changing the password through RabbitMQ's UI can make the pod fail due to the default liveness probes. If you do so, remember to make the chart aware of the new password. Updating the default secret with the password you set through RabbitMQ's UI will automatically recreate the pods. If you are using your own secret, you may have to manually recreate the pods. + +## Persistence + +The [Bitnami RabbitMQ](https://github.com/bitnami/bitnami-docker-rabbitmq) image stores the RabbitMQ data and configurations at the `/opt/bitnami/rabbitmq/var/lib/rabbitmq/` path of the container. + +The chart mounts a [Persistent Volume](http://kubernetes.io/docs/user-guide/persistent-volumes/) at this location. By default, the volume is created using dynamic volume provisioning. An existing PersistentVolumeClaim can also be defined. + +### Use existing PersistentVolumeClaims + +1. Create the PersistentVolume +1. Create the PersistentVolumeClaim +1. Install the chart + +```bash +$ helm install my-release --set persistence.existingClaim=PVC_NAME bitnami/rabbitmq +``` + +### Adjust permissions of the persistence volume mountpoint + +As the image runs as non-root by default, it is necessary to adjust the ownership of the persistent volume so that the container can write data into it. + +By default, the chart is configured to use Kubernetes Security Context to automatically change the ownership of the volume. However, this feature does not work in all Kubernetes distributions. +As an alternative, this chart supports using an `initContainer` to change the ownership of the volume before mounting it in the final destination. + +You can enable this `initContainer` by setting `volumePermissions.enabled` to `true`. + +### Configure the default user/vhost + +If you want to create default user/vhost and set the default permission. you can use `extraConfiguration`: + +```yaml +auth: + username: default-user +extraConfiguration: |- + default_vhost = default-vhost + default_permissions.configure = .* + default_permissions.read = .* + default_permissions.write = .* +``` + +## Troubleshooting + +Find more information about how to deal with common errors related to Bitnami’s Helm charts in [this troubleshooting guide](https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues). + +## Upgrading + +It's necessary to set the `auth.password` and `auth.erlangCookie` parameters when upgrading for readiness/liveness probes to work properly. When you install this chart for the first time, some notes will be displayed providing the credentials you must use under the 'Credentials' section. Please note down the password and the cookie, and run the command below to upgrade your chart: + +```bash +$ helm upgrade my-release bitnami/rabbitmq --set auth.password=[PASSWORD] --set auth.erlangCookie=[RABBITMQ_ERLANG_COOKIE] +``` + +| Note: you need to substitute the placeholders [PASSWORD] and [RABBITMQ_ERLANG_COOKIE] with the values obtained in the installation notes. + +### To 8.0.0 + +[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +[Learn more about this change and related upgrade considerations](https://docs.bitnami.com/kubernetes/infrastructure/rabbitmq/administration/upgrade-helm3/). + +### To 7.0.0 + +- Several parameters were renamed or disappeared in favor of new ones on this major version: + - `replicas` is renamed to `replicaCount`. + - `securityContext.*` is deprecated in favor of `podSecurityContext` and `containerSecurityContext`. + - Authentication parameters were reorganized under the `auth.*` parameter: + - `rabbitmq.username`, `rabbitmq.password`, and `rabbitmq.erlangCookie` are now `auth.username`, `auth.password`, and `auth.erlangCookie` respectively. + - `rabbitmq.tls.*` parameters are now under `auth.tls.*`. + - Parameters prefixed with `rabbitmq.` were renamed removing the prefix. E.g. `rabbitmq.configuration` -> renamed to `configuration`. + - `rabbitmq.rabbitmqClusterNodeName` is deprecated. + - `rabbitmq.setUlimitNofiles` is deprecated. + - `forceBoot.enabled` is renamed to `clustering.forceBoot`. + - `loadDefinition.secretName` is renamed to `loadDefinition.existingSecret`. + - `metics.port` is remamed to `service.metricsPort`. + - `service.extraContainerPorts` is renamed to `extraContainerPorts`. + - `service.nodeTlsPort` is renamed to `service.tlsNodePort`. + - `podDisruptionBudget` is deprecated in favor of `pdb.create`, `pdb.minAvailable`, and `pdb.maxUnavailable`. + - `rbacEnabled` -> deprecated in favor of `rbac.create`. + - New parameters: `serviceAccount.create`, and `serviceAccount.name`. + - New parameters: `memoryHighWatermark.enabled`, `memoryHighWatermark.type`, and `memoryHighWatermark.value`. +- Chart labels and Ingress configuration were adapted to follow the Helm charts best practices. +- Initialization logic now relies on the container. +- This version introduces `bitnami/common`, a [library chart](https://helm.sh/docs/topics/library_charts/#helm) as a dependency. More documentation about this new utility could be found [here](https://github.com/bitnami/charts/tree/master/bitnami/common#bitnami-common-library-chart). Please, make sure that you have updated the chart dependencies before executing any upgrade. + +Consequences: + +- Backwards compatibility is not guaranteed. +- Compatibility with non Bitnami images is not guaranteed anymore. + +### To 6.0.0 + +This new version updates the RabbitMQ image to a [new version based on bash instead of node.js](https://github.com/bitnami/bitnami-docker-rabbitmq#3715-r18-3715-ol-7-r19). However, since this Chart overwrites the container's command, the changes to the container shouldn't affect the Chart. To upgrade, it may be needed to enable the `fastBoot` option, as it is already the case from upgrading from 5.X to 5.Y. + +### To 5.0.0 + +This major release changes the clustering method from `ip` to `hostname`. +This change is needed to fix the persistence. The data dir will now depend on the hostname which is stable instead of the pod IP that might change. + +> IMPORTANT: Note that if you upgrade from a previous version you will lose your data. + +### To 3.0.0 + +Backwards compatibility is not guaranteed unless you modify the labels used on the chart's deployments. +Use the workaround below to upgrade from versions previous to 3.0.0. The following example assumes that the release name is rabbitmq: + +```console +$ kubectl delete statefulset rabbitmq --cascade=false +``` + +## Bitnami Kubernetes Documentation + +Bitnami Kubernetes documentation is available at [https://docs.bitnami.com/](https://docs.bitnami.com/). You can find there the following resources: + +- [Documentation for RabbitMQ Helm chart](https://docs.bitnami.com/kubernetes/infrastructure/rabbitmq/) +- [Get Started with Kubernetes guides](https://docs.bitnami.com/kubernetes/) +- [Bitnami Helm charts documentation](https://docs.bitnami.com/kubernetes/apps/) +- [Kubernetes FAQs](https://docs.bitnami.com/kubernetes/faq/) +- [Kubernetes Developer guides](https://docs.bitnami.com/tutorials/) diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/charts/common/.helmignore b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/charts/common/.helmignore new file mode 100644 index 0000000..50af031 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/charts/common/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/charts/common/Chart.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/charts/common/Chart.yaml new file mode 100644 index 0000000..344c403 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/charts/common/Chart.yaml @@ -0,0 +1,23 @@ +annotations: + category: Infrastructure +apiVersion: v2 +appVersion: 1.8.0 +description: A Library Helm Chart for grouping common logic between bitnami charts. + This chart is not deployable by itself. +home: https://github.com/bitnami/charts/tree/master/bitnami/common +icon: https://bitnami.com/downloads/logos/bitnami-mark.png +keywords: +- common +- helper +- template +- function +- bitnami +maintainers: +- email: containers@bitnami.com + name: Bitnami +name: common +sources: +- https://github.com/bitnami/charts +- http://www.bitnami.com/ +type: library +version: 1.8.0 diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/charts/common/README.md b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/charts/common/README.md new file mode 100644 index 0000000..054e51f --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/charts/common/README.md @@ -0,0 +1,327 @@ +# Bitnami Common Library Chart + +A [Helm Library Chart](https://helm.sh/docs/topics/library_charts/#helm) for grouping common logic between bitnami charts. + +## TL;DR + +```yaml +dependencies: + - name: common + version: 0.x.x + repository: https://charts.bitnami.com/bitnami +``` + +```bash +$ helm dependency update +``` + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "common.names.fullname" . }} +data: + myvalue: "Hello World" +``` + +## Introduction + +This chart provides a common template helpers which can be used to develop new charts using [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This Helm chart has been tested on top of [Bitnami Kubernetes Production Runtime](https://kubeprod.io/) (BKPR). Deploy BKPR to get automated TLS certificates, logging and monitoring for your applications. + +## Prerequisites + +- Kubernetes 1.12+ +- Helm 3.1.0 + +## Parameters + +The following table lists the helpers available in the library which are scoped in different sections. + +### Affinities + +| Helper identifier | Description | Expected Input | +|-------------------------------|------------------------------------------------------|------------------------------------------------| +| `common.affinities.node.soft` | Return a soft nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` | +| `common.affinities.node.hard` | Return a hard nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` | +| `common.affinities.pod.soft` | Return a soft podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` | +| `common.affinities.pod.hard` | Return a hard podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` | + +### Capabilities + +| Helper identifier | Description | Expected Input | +|----------------------------------------------|------------------------------------------------------------------------------------------------|-------------------| +| `common.capabilities.kubeVersion` | Return the target Kubernetes version (using client default if .Values.kubeVersion is not set). | `.` Chart context | +| `common.capabilities.cronjob.apiVersion` | Return the appropriate apiVersion for cronjob. | `.` Chart context | +| `common.capabilities.deployment.apiVersion` | Return the appropriate apiVersion for deployment. | `.` Chart context | +| `common.capabilities.statefulset.apiVersion` | Return the appropriate apiVersion for statefulset. | `.` Chart context | +| `common.capabilities.ingress.apiVersion` | Return the appropriate apiVersion for ingress. | `.` Chart context | +| `common.capabilities.rbac.apiVersion` | Return the appropriate apiVersion for RBAC resources. | `.` Chart context | +| `common.capabilities.crd.apiVersion` | Return the appropriate apiVersion for CRDs. | `.` Chart context | +| `common.capabilities.policy.apiVersion` | Return the appropriate apiVersion for policy | `.` Chart context | +| `common.capabilities.supportsHelmVersion` | Returns true if the used Helm version is 3.3+ | `.` Chart context | + +### Errors + +| Helper identifier | Description | Expected Input | +|-----------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------| +| `common.errors.upgrade.passwords.empty` | It will ensure required passwords are given when we are upgrading a chart. If `validationErrors` is not empty it will throw an error and will stop the upgrade action. | `dict "validationErrors" (list $validationError00 $validationError01) "context" $` | + +### Images + +| Helper identifier | Description | Expected Input | +|-----------------------------|------------------------------------------------------|---------------------------------------------------------------------------------------------------------| +| `common.images.image` | Return the proper and full image name | `dict "imageRoot" .Values.path.to.the.image "global" $`, see [ImageRoot](#imageroot) for the structure. | +| `common.images.pullSecrets` | Return the proper Docker Image Registry Secret Names (deprecated: use common.images.renderPullSecrets instead) | `dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global` | +| `common.images.renderPullSecrets` | Return the proper Docker Image Registry Secret Names (evaluates values as templates) | `dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "context" $` | + +### Ingress + +| Helper identifier | Description | Expected Input | +|-------------------------------------------|----------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.ingress.backend` | Generate a proper Ingress backend entry depending on the API version | `dict "serviceName" "foo" "servicePort" "bar"`, see the [Ingress deprecation notice](https://kubernetes.io/blog/2019/07/18/api-deprecations-in-1-16/) for the syntax differences | +| `common.ingress.supportsPathType` | Prints "true" if the pathType field is supported | `.` Chart context | +| `common.ingress.supportsIngressClassname` | Prints "true" if the ingressClassname field is supported | `.` Chart context | + +### Labels + +| Helper identifier | Description | Expected Input | +|-----------------------------|------------------------------------------------------|-------------------| +| `common.labels.standard` | Return Kubernetes standard labels | `.` Chart context | +| `common.labels.matchLabels` | Return the proper Docker Image Registry Secret Names | `.` Chart context | + +### Names + +| Helper identifier | Description | Expected Inpput | +|-------------------------|------------------------------------------------------------|-------------------| +| `common.names.name` | Expand the name of the chart or use `.Values.nameOverride` | `.` Chart context | +| `common.names.fullname` | Create a default fully qualified app name. | `.` Chart context | +| `common.names.chart` | Chart name plus version | `.` Chart context | + +### Secrets + +| Helper identifier | Description | Expected Input | +|---------------------------|--------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.secrets.name` | Generate the name of the secret. | `dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $` see [ExistingSecret](#existingsecret) for the structure. | +| `common.secrets.key` | Generate secret key. | `dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName"` see [ExistingSecret](#existingsecret) for the structure. | +| `common.passwords.manage` | Generate secret password or retrieve one if already created. | `dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $`, length, strong and chartNAme fields are optional. | +| `common.secrets.exists` | Returns whether a previous generated secret already exists. | `dict "secret" "secret-name" "context" $` | + +### Storage + +| Helper identifier | Description | Expected Input | +|-------------------------------|---------------------------------------|---------------------------------------------------------------------------------------------------------------------| +| `common.affinities.node.soft` | Return a soft nodeAffinity definition | `dict "persistence" .Values.path.to.the.persistence "global" $`, see [Persistence](#persistence) for the structure. | + +### TplValues + +| Helper identifier | Description | Expected Input | +|---------------------------|----------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.tplvalues.render` | Renders a value that contains template | `dict "value" .Values.path.to.the.Value "context" $`, value is the value should rendered as template, context frequently is the chart context `$` or `.` | + +### Utils + +| Helper identifier | Description | Expected Input | +|--------------------------------|------------------------------------------------------------------------------------------|------------------------------------------------------------------------| +| `common.utils.fieldToEnvVar` | Build environment variable name given a field. | `dict "field" "my-password"` | +| `common.utils.secret.getvalue` | Print instructions to get a secret value. | `dict "secret" "secret-name" "field" "secret-value-field" "context" $` | +| `common.utils.getValueFromKey` | Gets a value from `.Values` object given its key path | `dict "key" "path.to.key" "context" $` | +| `common.utils.getKeyFromList` | Returns first `.Values` key with a defined value or first of the list if all non-defined | `dict "keys" (list "path.to.key1" "path.to.key2") "context" $` | + +### Validations + +| Helper identifier | Description | Expected Input | +|--------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.validations.values.single.empty` | Validate a value must not be empty. | `dict "valueKey" "path.to.value" "secret" "secret.name" "field" "my-password" "subchart" "subchart" "context" $` secret, field and subchart are optional. In case they are given, the helper will generate a how to get instruction. See [ValidateValue](#validatevalue) | +| `common.validations.values.multiple.empty` | Validate a multiple values must not be empty. It returns a shared error for all the values. | `dict "required" (list $validateValueConf00 $validateValueConf01) "context" $`. See [ValidateValue](#validatevalue) | +| `common.validations.values.mariadb.passwords` | This helper will ensure required password for MariaDB are not empty. It returns a shared error for all the values. | `dict "secret" "mariadb-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mariadb chart and the helper. | +| `common.validations.values.postgresql.passwords` | This helper will ensure required password for PostgreSQL are not empty. It returns a shared error for all the values. | `dict "secret" "postgresql-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use postgresql chart and the helper. | +| `common.validations.values.redis.passwords` | This helper will ensure required password for Redis™ are not empty. It returns a shared error for all the values. | `dict "secret" "redis-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use redis chart and the helper. | +| `common.validations.values.cassandra.passwords` | This helper will ensure required password for Cassandra are not empty. It returns a shared error for all the values. | `dict "secret" "cassandra-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use cassandra chart and the helper. | +| `common.validations.values.mongodb.passwords` | This helper will ensure required password for MongoDB® are not empty. It returns a shared error for all the values. | `dict "secret" "mongodb-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mongodb chart and the helper. | + +### Warnings + +| Helper identifier | Description | Expected Input | +|------------------------------|----------------------------------|------------------------------------------------------------| +| `common.warnings.rollingTag` | Warning about using rolling tag. | `ImageRoot` see [ImageRoot](#imageroot) for the structure. | + +## Special input schemas + +### ImageRoot + +```yaml +registry: + type: string + description: Docker registry where the image is located + example: docker.io + +repository: + type: string + description: Repository and image name + example: bitnami/nginx + +tag: + type: string + description: image tag + example: 1.16.1-debian-10-r63 + +pullPolicy: + type: string + description: Specify a imagePullPolicy. Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + +pullSecrets: + type: array + items: + type: string + description: Optionally specify an array of imagePullSecrets (evaluated as templates). + +debug: + type: boolean + description: Set to true if you would like to see extra information on logs + example: false + +## An instance would be: +# registry: docker.io +# repository: bitnami/nginx +# tag: 1.16.1-debian-10-r63 +# pullPolicy: IfNotPresent +# debug: false +``` + +### Persistence + +```yaml +enabled: + type: boolean + description: Whether enable persistence. + example: true + +storageClass: + type: string + description: Ghost data Persistent Volume Storage Class, If set to "-", storageClassName: "" which disables dynamic provisioning. + example: "-" + +accessMode: + type: string + description: Access mode for the Persistent Volume Storage. + example: ReadWriteOnce + +size: + type: string + description: Size the Persistent Volume Storage. + example: 8Gi + +path: + type: string + description: Path to be persisted. + example: /bitnami + +## An instance would be: +# enabled: true +# storageClass: "-" +# accessMode: ReadWriteOnce +# size: 8Gi +# path: /bitnami +``` + +### ExistingSecret + +```yaml +name: + type: string + description: Name of the existing secret. + example: mySecret +keyMapping: + description: Mapping between the expected key name and the name of the key in the existing secret. + type: object + +## An instance would be: +# name: mySecret +# keyMapping: +# password: myPasswordKey +``` + +#### Example of use + +When we store sensitive data for a deployment in a secret, some times we want to give to users the possibility of using theirs existing secrets. + +```yaml +# templates/secret.yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }} + labels: + app: {{ include "common.names.fullname" . }} +type: Opaque +data: + password: {{ .Values.password | b64enc | quote }} + +# templates/dpl.yaml +--- +... + env: + - name: PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "common.secrets.name" (dict "existingSecret" .Values.existingSecret "context" $) }} + key: {{ include "common.secrets.key" (dict "existingSecret" .Values.existingSecret "key" "password") }} +... + +# values.yaml +--- +name: mySecret +keyMapping: + password: myPasswordKey +``` + +### ValidateValue + +#### NOTES.txt + +```console +{{- $validateValueConf00 := (dict "valueKey" "path.to.value00" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value01" "secret" "secretName" "field" "password-01") -}} + +{{ include "common.validations.values.multiple.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} +``` + +If we force those values to be empty we will see some alerts + +```console +$ helm install test mychart --set path.to.value00="",path.to.value01="" + 'path.to.value00' must not be empty, please add '--set path.to.value00=$PASSWORD_00' to the command. To get the current value: + + export PASSWORD_00=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-00}" | base64 --decode) + + 'path.to.value01' must not be empty, please add '--set path.to.value01=$PASSWORD_01' to the command. To get the current value: + + export PASSWORD_01=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-01}" | base64 --decode) +``` + +## Upgrading + +### To 1.0.0 + +[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +**What changes were introduced in this major version?** + +- Previous versions of this Helm Chart use `apiVersion: v1` (installable by both Helm 2 and 3), this Helm Chart was updated to `apiVersion: v2` (installable by Helm 3 only). [Here](https://helm.sh/docs/topics/charts/#the-apiversion-field) you can find more information about the `apiVersion` field. +- Use `type: library`. [Here](https://v3.helm.sh/docs/faq/#library-chart-support) you can find more information. +- The different fields present in the *Chart.yaml* file has been ordered alphabetically in a homogeneous way for all the Bitnami Helm Charts + +**Considerations when upgrading to this version** + +- If you want to upgrade to this version from a previous one installed with Helm v3, you shouldn't face any issues +- If you want to upgrade to this version using Helm v2, this scenario is not supported as this version doesn't support Helm v2 anymore +- If you installed the previous version with Helm v2 and wants to upgrade to this version with Helm v3, please refer to the [official Helm documentation](https://helm.sh/docs/topics/v2_v3_migration/#migration-use-cases) about migrating from Helm v2 to v3 + +**Useful links** + +- https://docs.bitnami.com/tutorials/resolve-helm2-helm3-post-migration-issues/ +- https://helm.sh/docs/topics/v2_v3_migration/ +- https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/ diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/charts/common/templates/_affinities.tpl b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/charts/common/templates/_affinities.tpl new file mode 100644 index 0000000..189ea40 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/charts/common/templates/_affinities.tpl @@ -0,0 +1,102 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return a soft nodeAffinity definition +{{ include "common.affinities.nodes.soft" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.soft" -}} +preferredDuringSchedulingIgnoredDuringExecution: + - preference: + matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . | quote }} + {{- end }} + weight: 1 +{{- end -}} + +{{/* +Return a hard nodeAffinity definition +{{ include "common.affinities.nodes.hard" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.hard" -}} +requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . | quote }} + {{- end }} +{{- end -}} + +{{/* +Return a nodeAffinity definition +{{ include "common.affinities.nodes" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.nodes.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.nodes.hard" . -}} + {{- end -}} +{{- end -}} + +{{/* +Return a soft podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.soft" (dict "component" "FOO" "extraMatchLabels" .Values.extraMatchLabels "context" $) -}} +*/}} +{{- define "common.affinities.pods.soft" -}} +{{- $component := default "" .component -}} +{{- $extraMatchLabels := default (dict) .extraMatchLabels -}} +preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 10 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := $extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + namespaces: + - {{ .context.Release.Namespace | quote }} + topologyKey: kubernetes.io/hostname + weight: 1 +{{- end -}} + +{{/* +Return a hard podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.hard" (dict "component" "FOO" "extraMatchLabels" .Values.extraMatchLabels "context" $) -}} +*/}} +{{- define "common.affinities.pods.hard" -}} +{{- $component := default "" .component -}} +{{- $extraMatchLabels := default (dict) .extraMatchLabels -}} +requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 8 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := $extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + namespaces: + - {{ .context.Release.Namespace | quote }} + topologyKey: kubernetes.io/hostname +{{- end -}} + +{{/* +Return a podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.pods" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.pods.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.pods.hard" . -}} + {{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/charts/common/templates/_capabilities.tpl b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/charts/common/templates/_capabilities.tpl new file mode 100644 index 0000000..ae45d5e --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/charts/common/templates/_capabilities.tpl @@ -0,0 +1,117 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return the target Kubernetes version +*/}} +{{- define "common.capabilities.kubeVersion" -}} +{{- if .Values.global }} + {{- if .Values.global.kubeVersion }} + {{- .Values.global.kubeVersion -}} + {{- else }} + {{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} + {{- end -}} +{{- else }} +{{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for policy. +*/}} +{{- define "common.capabilities.policy.apiVersion" -}} +{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "policy/v1beta1" -}} +{{- else -}} +{{- print "policy/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for cronjob. +*/}} +{{- define "common.capabilities.cronjob.apiVersion" -}} +{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "batch/v1beta1" -}} +{{- else -}} +{{- print "batch/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for deployment. +*/}} +{{- define "common.capabilities.deployment.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for statefulset. +*/}} +{{- define "common.capabilities.statefulset.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apps/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for ingress. +*/}} +{{- define "common.capabilities.ingress.apiVersion" -}} +{{- if .Values.ingress -}} +{{- if .Values.ingress.apiVersion -}} +{{- .Values.ingress.apiVersion -}} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end }} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for RBAC resources. +*/}} +{{- define "common.capabilities.rbac.apiVersion" -}} +{{- if semverCompare "<1.17-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "rbac.authorization.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "rbac.authorization.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for CRDs. +*/}} +{{- define "common.capabilities.crd.apiVersion" -}} +{{- if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apiextensions.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "apiextensions.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if the used Helm version is 3.3+. +A way to check the used Helm version was not introduced until version 3.3.0 with .Capabilities.HelmVersion, which contains an additional "{}}" structure. +This check is introduced as a regexMatch instead of {{ if .Capabilities.HelmVersion }} because checking for the key HelmVersion in <3.3 results in a "interface not found" error. +**To be removed when the catalog's minimun Helm version is 3.3** +*/}} +{{- define "common.capabilities.supportsHelmVersion" -}} +{{- if regexMatch "{(v[0-9])*[^}]*}}$" (.Capabilities | toString ) }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/charts/common/templates/_errors.tpl b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/charts/common/templates/_errors.tpl new file mode 100644 index 0000000..a79cc2e --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/charts/common/templates/_errors.tpl @@ -0,0 +1,23 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Through error when upgrading using empty passwords values that must not be empty. + +Usage: +{{- $validationError00 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password00" "secret" "secretName" "field" "password-00") -}} +{{- $validationError01 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password01" "secret" "secretName" "field" "password-01") -}} +{{ include "common.errors.upgrade.passwords.empty" (dict "validationErrors" (list $validationError00 $validationError01) "context" $) }} + +Required password params: + - validationErrors - String - Required. List of validation strings to be return, if it is empty it won't throw error. + - context - Context - Required. Parent context. +*/}} +{{- define "common.errors.upgrade.passwords.empty" -}} + {{- $validationErrors := join "" .validationErrors -}} + {{- if and $validationErrors .context.Release.IsUpgrade -}} + {{- $errorString := "\nPASSWORDS ERROR: You must provide your current passwords when upgrading the release." -}} + {{- $errorString = print $errorString "\n Note that even after reinstallation, old credentials may be needed as they may be kept in persistent volume claims." -}} + {{- $errorString = print $errorString "\n Further information can be obtained at https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues/#credential-errors-while-upgrading-chart-releases" -}} + {{- $errorString = print $errorString "\n%s" -}} + {{- printf $errorString $validationErrors | fail -}} + {{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/charts/common/templates/_images.tpl b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/charts/common/templates/_images.tpl new file mode 100644 index 0000000..42ffbc7 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/charts/common/templates/_images.tpl @@ -0,0 +1,75 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper image name +{{ include "common.images.image" ( dict "imageRoot" .Values.path.to.the.image "global" $) }} +*/}} +{{- define "common.images.image" -}} +{{- $registryName := .imageRoot.registry -}} +{{- $repositoryName := .imageRoot.repository -}} +{{- $tag := .imageRoot.tag | toString -}} +{{- if .global }} + {{- if .global.imageRegistry }} + {{- $registryName = .global.imageRegistry -}} + {{- end -}} +{{- end -}} +{{- if $registryName }} +{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- else -}} +{{- printf "%s:%s" $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names (deprecated: use common.images.renderPullSecrets instead) +{{ include "common.images.pullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global) }} +*/}} +{{- define "common.images.pullSecrets" -}} + {{- $pullSecrets := list }} + + {{- if .global }} + {{- range .global.imagePullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) }} +imagePullSecrets: + {{- range $pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names evaluating values as templates +{{ include "common.images.renderPullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "context" $) }} +*/}} +{{- define "common.images.renderPullSecrets" -}} + {{- $pullSecrets := list }} + {{- $context := .context }} + + {{- if $context.Values.global }} + {{- range $context.Values.global.imagePullSecrets -}} + {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) }} +imagePullSecrets: + {{- range $pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/charts/common/templates/_ingress.tpl b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/charts/common/templates/_ingress.tpl new file mode 100644 index 0000000..f905f20 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/charts/common/templates/_ingress.tpl @@ -0,0 +1,55 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Generate backend entry that is compatible with all Kubernetes API versions. + +Usage: +{{ include "common.ingress.backend" (dict "serviceName" "backendName" "servicePort" "backendPort" "context" $) }} + +Params: + - serviceName - String. Name of an existing service backend + - servicePort - String/Int. Port name (or number) of the service. It will be translated to different yaml depending if it is a string or an integer. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.ingress.backend" -}} +{{- $apiVersion := (include "common.capabilities.ingress.apiVersion" .context) -}} +{{- if or (eq $apiVersion "extensions/v1beta1") (eq $apiVersion "networking.k8s.io/v1beta1") -}} +serviceName: {{ .serviceName }} +servicePort: {{ .servicePort }} +{{- else -}} +service: + name: {{ .serviceName }} + port: + {{- if typeIs "string" .servicePort }} + name: {{ .servicePort }} + {{- else if or (typeIs "int" .servicePort) (typeIs "float64" .servicePort) }} + number: {{ .servicePort | int }} + {{- end }} +{{- end -}} +{{- end -}} + +{{/* +Print "true" if the API pathType field is supported +Usage: +{{ include "common.ingress.supportsPathType" . }} +*/}} +{{- define "common.ingress.supportsPathType" -}} +{{- if (semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .)) -}} +{{- print "false" -}} +{{- else -}} +{{- print "true" -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if the ingressClassname field is supported +Usage: +{{ include "common.ingress.supportsIngressClassname" . }} +*/}} +{{- define "common.ingress.supportsIngressClassname" -}} +{{- if semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "false" -}} +{{- else -}} +{{- print "true" -}} +{{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/charts/common/templates/_labels.tpl b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/charts/common/templates/_labels.tpl new file mode 100644 index 0000000..252066c --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/charts/common/templates/_labels.tpl @@ -0,0 +1,18 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Kubernetes standard labels +*/}} +{{- define "common.labels.standard" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +helm.sh/chart: {{ include "common.names.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{/* +Labels to use on deploy.spec.selector.matchLabels and svc.spec.selector +*/}} +{{- define "common.labels.matchLabels" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/charts/common/templates/_names.tpl b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/charts/common/templates/_names.tpl new file mode 100644 index 0000000..adf2a74 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/charts/common/templates/_names.tpl @@ -0,0 +1,32 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "common.names.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "common.names.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "common.names.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/charts/common/templates/_secrets.tpl b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/charts/common/templates/_secrets.tpl new file mode 100644 index 0000000..60b84a7 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/charts/common/templates/_secrets.tpl @@ -0,0 +1,129 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Generate secret name. + +Usage: +{{ include "common.secrets.name" (dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $) }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/master/bitnami/common#existingsecret + - defaultNameSuffix - String - Optional. It is used only if we have several secrets in the same deployment. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.secrets.name" -}} +{{- $name := (include "common.names.fullname" .context) -}} + +{{- if .defaultNameSuffix -}} +{{- $name = printf "%s-%s" $name .defaultNameSuffix | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- with .existingSecret -}} +{{- if not (typeIs "string" .) -}} +{{- with .name -}} +{{- $name = . -}} +{{- end -}} +{{- else -}} +{{- $name = . -}} +{{- end -}} +{{- end -}} + +{{- printf "%s" $name -}} +{{- end -}} + +{{/* +Generate secret key. + +Usage: +{{ include "common.secrets.key" (dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName") }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/master/bitnami/common#existingsecret + - key - String - Required. Name of the key in the secret. +*/}} +{{- define "common.secrets.key" -}} +{{- $key := .key -}} + +{{- if .existingSecret -}} + {{- if not (typeIs "string" .existingSecret) -}} + {{- if .existingSecret.keyMapping -}} + {{- $key = index .existingSecret.keyMapping $.key -}} + {{- end -}} + {{- end }} +{{- end -}} + +{{- printf "%s" $key -}} +{{- end -}} + +{{/* +Generate secret password or retrieve one if already created. + +Usage: +{{ include "common.secrets.passwords.manage" (dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - key - String - Required - Name of the key in the secret. + - providedValues - List - Required - The path to the validating value in the values.yaml, e.g: "mysql.password". Will pick first parameter with a defined value. + - length - int - Optional - Length of the generated random password. + - strong - Boolean - Optional - Whether to add symbols to the generated random password. + - chartName - String - Optional - Name of the chart used when said chart is deployed as a subchart. + - context - Context - Required - Parent context. +*/}} +{{- define "common.secrets.passwords.manage" -}} + +{{- $password := "" }} +{{- $subchart := "" }} +{{- $chartName := default "" .chartName }} +{{- $passwordLength := default 10 .length }} +{{- $providedPasswordKey := include "common.utils.getKeyFromList" (dict "keys" .providedValues "context" $.context) }} +{{- $providedPasswordValue := include "common.utils.getValueFromKey" (dict "key" $providedPasswordKey "context" $.context) }} +{{- $secret := (lookup "v1" "Secret" $.context.Release.Namespace .secret) }} +{{- if $secret }} + {{- if index $secret.data .key }} + {{- $password = index $secret.data .key }} + {{- end -}} +{{- else if $providedPasswordValue }} + {{- $password = $providedPasswordValue | toString | b64enc | quote }} +{{- else }} + + {{- if .context.Values.enabled }} + {{- $subchart = $chartName }} + {{- end -}} + + {{- $requiredPassword := dict "valueKey" $providedPasswordKey "secret" .secret "field" .key "subchart" $subchart "context" $.context -}} + {{- $requiredPasswordError := include "common.validations.values.single.empty" $requiredPassword -}} + {{- $passwordValidationErrors := list $requiredPasswordError -}} + {{- include "common.errors.upgrade.passwords.empty" (dict "validationErrors" $passwordValidationErrors "context" $.context) -}} + + {{- if .strong }} + {{- $subStr := list (lower (randAlpha 1)) (randNumeric 1) (upper (randAlpha 1)) | join "_" }} + {{- $password = randAscii $passwordLength }} + {{- $password = regexReplaceAllLiteral "\\W" $password "@" | substr 5 $passwordLength }} + {{- $password = printf "%s%s" $subStr $password | toString | shuffle | b64enc | quote }} + {{- else }} + {{- $password = randAlphaNum $passwordLength | b64enc | quote }} + {{- end }} +{{- end -}} +{{- printf "%s" $password -}} +{{- end -}} + +{{/* +Returns whether a previous generated secret already exists + +Usage: +{{ include "common.secrets.exists" (dict "secret" "secret-name" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - context - Context - Required - Parent context. +*/}} +{{- define "common.secrets.exists" -}} +{{- $secret := (lookup "v1" "Secret" $.context.Release.Namespace .secret) }} +{{- if $secret }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/charts/common/templates/_storage.tpl b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/charts/common/templates/_storage.tpl new file mode 100644 index 0000000..60e2a84 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/charts/common/templates/_storage.tpl @@ -0,0 +1,23 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper Storage Class +{{ include "common.storage.class" ( dict "persistence" .Values.path.to.the.persistence "global" $) }} +*/}} +{{- define "common.storage.class" -}} + +{{- $storageClass := .persistence.storageClass -}} +{{- if .global -}} + {{- if .global.storageClass -}} + {{- $storageClass = .global.storageClass -}} + {{- end -}} +{{- end -}} + +{{- if $storageClass -}} + {{- if (eq "-" $storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" $storageClass -}} + {{- end -}} +{{- end -}} + +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/charts/common/templates/_tplvalues.tpl b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/charts/common/templates/_tplvalues.tpl new file mode 100644 index 0000000..2db1668 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/charts/common/templates/_tplvalues.tpl @@ -0,0 +1,13 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Renders a value that contains template. +Usage: +{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $) }} +*/}} +{{- define "common.tplvalues.render" -}} + {{- if typeIs "string" .value }} + {{- tpl .value .context }} + {{- else }} + {{- tpl (.value | toYaml) .context }} + {{- end }} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/charts/common/templates/_utils.tpl b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/charts/common/templates/_utils.tpl new file mode 100644 index 0000000..ea083a2 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/charts/common/templates/_utils.tpl @@ -0,0 +1,62 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Print instructions to get a secret value. +Usage: +{{ include "common.utils.secret.getvalue" (dict "secret" "secret-name" "field" "secret-value-field" "context" $) }} +*/}} +{{- define "common.utils.secret.getvalue" -}} +{{- $varname := include "common.utils.fieldToEnvVar" . -}} +export {{ $varname }}=$(kubectl get secret --namespace {{ .context.Release.Namespace | quote }} {{ .secret }} -o jsonpath="{.data.{{ .field }}}" | base64 --decode) +{{- end -}} + +{{/* +Build env var name given a field +Usage: +{{ include "common.utils.fieldToEnvVar" dict "field" "my-password" }} +*/}} +{{- define "common.utils.fieldToEnvVar" -}} + {{- $fieldNameSplit := splitList "-" .field -}} + {{- $upperCaseFieldNameSplit := list -}} + + {{- range $fieldNameSplit -}} + {{- $upperCaseFieldNameSplit = append $upperCaseFieldNameSplit ( upper . ) -}} + {{- end -}} + + {{ join "_" $upperCaseFieldNameSplit }} +{{- end -}} + +{{/* +Gets a value from .Values given +Usage: +{{ include "common.utils.getValueFromKey" (dict "key" "path.to.key" "context" $) }} +*/}} +{{- define "common.utils.getValueFromKey" -}} +{{- $splitKey := splitList "." .key -}} +{{- $value := "" -}} +{{- $latestObj := $.context.Values -}} +{{- range $splitKey -}} + {{- if not $latestObj -}} + {{- printf "please review the entire path of '%s' exists in values" $.key | fail -}} + {{- end -}} + {{- $value = ( index $latestObj . ) -}} + {{- $latestObj = $value -}} +{{- end -}} +{{- printf "%v" (default "" $value) -}} +{{- end -}} + +{{/* +Returns first .Values key with a defined value or first of the list if all non-defined +Usage: +{{ include "common.utils.getKeyFromList" (dict "keys" (list "path.to.key1" "path.to.key2") "context" $) }} +*/}} +{{- define "common.utils.getKeyFromList" -}} +{{- $key := first .keys -}} +{{- $reverseKeys := reverse .keys }} +{{- range $reverseKeys }} + {{- $value := include "common.utils.getValueFromKey" (dict "key" . "context" $.context ) }} + {{- if $value -}} + {{- $key = . }} + {{- end -}} +{{- end -}} +{{- printf "%s" $key -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/charts/common/templates/_warnings.tpl b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/charts/common/templates/_warnings.tpl new file mode 100644 index 0000000..ae10fa4 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/charts/common/templates/_warnings.tpl @@ -0,0 +1,14 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Warning about using rolling tag. +Usage: +{{ include "common.warnings.rollingTag" .Values.path.to.the.imageRoot }} +*/}} +{{- define "common.warnings.rollingTag" -}} + +{{- if and (contains "bitnami/" .repository) (not (.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .repository }}:{{ .tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} + +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_cassandra.tpl b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_cassandra.tpl new file mode 100644 index 0000000..8679ddf --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_cassandra.tpl @@ -0,0 +1,72 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Cassandra required passwords are not empty. + +Usage: +{{ include "common.validations.values.cassandra.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where Cassandra values are stored, e.g: "cassandra-passwords-secret" + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.cassandra.passwords" -}} + {{- $existingSecret := include "common.cassandra.values.existingSecret" . -}} + {{- $enabled := include "common.cassandra.values.enabled" . -}} + {{- $dbUserPrefix := include "common.cassandra.values.key.dbUser" . -}} + {{- $valueKeyPassword := printf "%s.password" $dbUserPrefix -}} + + {{- if and (not $existingSecret) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "cassandra-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.cassandra.values.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.cassandra.dbUser.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.dbUser.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled cassandra. + +Usage: +{{ include "common.cassandra.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.cassandra.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.cassandra.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key dbUser + +Usage: +{{ include "common.cassandra.values.key.dbUser" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.key.dbUser" -}} + {{- if .subchart -}} + cassandra.dbUser + {{- else -}} + dbUser + {{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_mariadb.tpl b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_mariadb.tpl new file mode 100644 index 0000000..bb5ed72 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_mariadb.tpl @@ -0,0 +1,103 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MariaDB required passwords are not empty. + +Usage: +{{ include "common.validations.values.mariadb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MariaDB values are stored, e.g: "mysql-passwords-secret" + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mariadb.passwords" -}} + {{- $existingSecret := include "common.mariadb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mariadb.values.enabled" . -}} + {{- $architecture := include "common.mariadb.values.architecture" . -}} + {{- $authPrefix := include "common.mariadb.values.key.auth" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}} + + {{- if and (not $existingSecret) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mariadb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- if not (empty $valueUsername) -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mariadb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replication") -}} + {{- $requiredReplicationPassword := dict "valueKey" $valueKeyReplicationPassword "secret" .secret "field" "mariadb-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mariadb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mariadb. + +Usage: +{{ include "common.mariadb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mariadb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mariadb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mariadb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mariadb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.key.auth" -}} + {{- if .subchart -}} + mariadb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_mongodb.tpl b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_mongodb.tpl new file mode 100644 index 0000000..1e5bba9 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_mongodb.tpl @@ -0,0 +1,108 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MongoDB® required passwords are not empty. + +Usage: +{{ include "common.validations.values.mongodb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MongoDB® values are stored, e.g: "mongodb-passwords-secret" + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mongodb.passwords" -}} + {{- $existingSecret := include "common.mongodb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mongodb.values.enabled" . -}} + {{- $authPrefix := include "common.mongodb.values.key.auth" . -}} + {{- $architecture := include "common.mongodb.values.architecture" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyDatabase := printf "%s.database" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicaSetKey := printf "%s.replicaSetKey" $authPrefix -}} + {{- $valueKeyAuthEnabled := printf "%s.enabled" $authPrefix -}} + + {{- $authEnabled := include "common.utils.getValueFromKey" (dict "key" $valueKeyAuthEnabled "context" .context) -}} + + {{- if and (not $existingSecret) (eq $enabled "true") (eq $authEnabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mongodb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- $valueDatabase := include "common.utils.getValueFromKey" (dict "key" $valueKeyDatabase "context" .context) }} + {{- if and $valueUsername $valueDatabase -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mongodb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replicaset") -}} + {{- $requiredReplicaSetKey := dict "valueKey" $valueKeyReplicaSetKey "secret" .secret "field" "mongodb-replica-set-key" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicaSetKey -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mongodb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDb is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mongodb. + +Usage: +{{ include "common.mongodb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mongodb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mongodb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mongodb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.key.auth" -}} + {{- if .subchart -}} + mongodb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mongodb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_postgresql.tpl b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_postgresql.tpl new file mode 100644 index 0000000..992bcd3 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_postgresql.tpl @@ -0,0 +1,131 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate PostgreSQL required passwords are not empty. + +Usage: +{{ include "common.validations.values.postgresql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where postgresql values are stored, e.g: "postgresql-passwords-secret" + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.postgresql.passwords" -}} + {{- $existingSecret := include "common.postgresql.values.existingSecret" . -}} + {{- $enabled := include "common.postgresql.values.enabled" . -}} + {{- $valueKeyPostgresqlPassword := include "common.postgresql.values.key.postgressPassword" . -}} + {{- $valueKeyPostgresqlReplicationEnabled := include "common.postgresql.values.key.replicationPassword" . -}} + + {{- if and (not $existingSecret) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredPostgresqlPassword := dict "valueKey" $valueKeyPostgresqlPassword "secret" .secret "field" "postgresql-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlPassword -}} + + {{- $enabledReplication := include "common.postgresql.values.enabled.replication" . -}} + {{- if (eq $enabledReplication "true") -}} + {{- $requiredPostgresqlReplicationPassword := dict "valueKey" $valueKeyPostgresqlReplicationEnabled "secret" .secret "field" "postgresql-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to decide whether evaluate global values. + +Usage: +{{ include "common.postgresql.values.use.global" (dict "key" "key-of-global" "context" $) }} +Params: + - key - String - Required. Field to be evaluated within global, e.g: "existingSecret" +*/}} +{{- define "common.postgresql.values.use.global" -}} + {{- if .context.Values.global -}} + {{- if .context.Values.global.postgresql -}} + {{- index .context.Values.global.postgresql .key | quote -}} + {{- end -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.postgresql.values.existingSecret" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.existingSecret" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "existingSecret" "context" .context) -}} + + {{- if .subchart -}} + {{- default (.context.Values.postgresql.existingSecret | quote) $globalValue -}} + {{- else -}} + {{- default (.context.Values.existingSecret | quote) $globalValue -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled postgresql. + +Usage: +{{ include "common.postgresql.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key postgressPassword. + +Usage: +{{ include "common.postgresql.values.key.postgressPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.postgressPassword" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "postgresqlUsername" "context" .context) -}} + + {{- if not $globalValue -}} + {{- if .subchart -}} + postgresql.postgresqlPassword + {{- else -}} + postgresqlPassword + {{- end -}} + {{- else -}} + global.postgresql.postgresqlPassword + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled.replication. + +Usage: +{{ include "common.postgresql.values.enabled.replication" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.enabled.replication" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.replication.enabled -}} + {{- else -}} + {{- printf "%v" .context.Values.replication.enabled -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key replication.password. + +Usage: +{{ include "common.postgresql.values.key.replicationPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.replicationPassword" -}} + {{- if .subchart -}} + postgresql.replication.password + {{- else -}} + replication.password + {{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_redis.tpl b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_redis.tpl new file mode 100644 index 0000000..18d9813 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_redis.tpl @@ -0,0 +1,76 @@ + +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Redis™ required passwords are not empty. + +Usage: +{{ include "common.validations.values.redis.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where redis values are stored, e.g: "redis-passwords-secret" + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.redis.passwords" -}} + {{- $enabled := include "common.redis.values.enabled" . -}} + {{- $valueKeyPrefix := include "common.redis.values.keys.prefix" . -}} + {{- $standarizedVersion := include "common.redis.values.standarized.version" . }} + + {{- $existingSecret := ternary (printf "%s%s" $valueKeyPrefix "auth.existingSecret") (printf "%s%s" $valueKeyPrefix "existingSecret") (eq $standarizedVersion "true") }} + {{- $existingSecretValue := include "common.utils.getValueFromKey" (dict "key" $existingSecret "context" .context) }} + + {{- $valueKeyRedisPassword := ternary (printf "%s%s" $valueKeyPrefix "auth.password") (printf "%s%s" $valueKeyPrefix "password") (eq $standarizedVersion "true") }} + {{- $valueKeyRedisUseAuth := ternary (printf "%s%s" $valueKeyPrefix "auth.enabled") (printf "%s%s" $valueKeyPrefix "usePassword") (eq $standarizedVersion "true") }} + + {{- if and (not $existingSecretValue) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $useAuth := include "common.utils.getValueFromKey" (dict "key" $valueKeyRedisUseAuth "context" .context) -}} + {{- if eq $useAuth "true" -}} + {{- $requiredRedisPassword := dict "valueKey" $valueKeyRedisPassword "secret" .secret "field" "redis-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRedisPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled redis. + +Usage: +{{ include "common.redis.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.redis.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.redis.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right prefix path for the values + +Usage: +{{ include "common.redis.values.key.prefix" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.redis.values.keys.prefix" -}} + {{- if .subchart -}}redis.{{- else -}}{{- end -}} +{{- end -}} + +{{/* +Checks whether the redis chart's includes the standarizations (version >= 14) + +Usage: +{{ include "common.redis.values.standarized.version" (dict "context" $) }} +*/}} +{{- define "common.redis.values.standarized.version" -}} + + {{- $standarizedAuth := printf "%s%s" (include "common.redis.values.keys.prefix" .) "auth" -}} + {{- $standarizedAuthValues := include "common.utils.getValueFromKey" (dict "key" $standarizedAuth "context" .context) }} + + {{- if $standarizedAuthValues -}} + {{- true -}} + {{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_validations.tpl b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_validations.tpl new file mode 100644 index 0000000..9a814cf --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_validations.tpl @@ -0,0 +1,46 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate values must not be empty. + +Usage: +{{- $validateValueConf00 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-01") -}} +{{ include "common.validations.values.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" +*/}} +{{- define "common.validations.values.multiple.empty" -}} + {{- range .required -}} + {{- include "common.validations.values.single.empty" (dict "valueKey" .valueKey "secret" .secret "field" .field "context" $.context) -}} + {{- end -}} +{{- end -}} + +{{/* +Validate a value must not be empty. + +Usage: +{{ include "common.validations.value.empty" (dict "valueKey" "mariadb.password" "secret" "secretName" "field" "my-password" "subchart" "subchart" "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" + - subchart - String - Optional - Name of the subchart that the validated password is part of. +*/}} +{{- define "common.validations.values.single.empty" -}} + {{- $value := include "common.utils.getValueFromKey" (dict "key" .valueKey "context" .context) }} + {{- $subchart := ternary "" (printf "%s." .subchart) (empty .subchart) }} + + {{- if not $value -}} + {{- $varname := "my-value" -}} + {{- $getCurrentValue := "" -}} + {{- if and .secret .field -}} + {{- $varname = include "common.utils.fieldToEnvVar" . -}} + {{- $getCurrentValue = printf " To get the current value:\n\n %s\n" (include "common.utils.secret.getvalue" .) -}} + {{- end -}} + {{- printf "\n '%s' must not be empty, please add '--set %s%s=$%s' to the command.%s" .valueKey $subchart .valueKey $varname $getCurrentValue -}} + {{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/charts/common/values.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/charts/common/values.yaml new file mode 100644 index 0000000..f2df68e --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/charts/common/values.yaml @@ -0,0 +1,5 @@ +## bitnami/common +## It is required by CI/CD tools and processes. +## @skip exampleValue +## +exampleValue: common-chart diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/ci/default-values.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/ci/default-values.yaml new file mode 100644 index 0000000..fc2ba60 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/ci/default-values.yaml @@ -0,0 +1 @@ +# Leave this file empty to ensure that CI runs builds against the default configuration in values.yaml. diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/ci/tolerations-values.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/ci/tolerations-values.yaml new file mode 100644 index 0000000..de92d88 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/ci/tolerations-values.yaml @@ -0,0 +1,4 @@ +tolerations: + - key: foo + operator: "Equal" + value: bar diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/templates/NOTES.txt b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/templates/NOTES.txt new file mode 100644 index 0000000..24ffa89 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/templates/NOTES.txt @@ -0,0 +1,167 @@ +{{- $servicePort := or (.Values.service.portEnabled) (not .Values.auth.tls.enabled) | ternary .Values.service.port .Values.service.tlsPort -}} +{{- $serviceNodePort := or (.Values.service.portEnabled) (not .Values.auth.tls.enabled) | ternary .Values.service.nodePort .Values.service.tlsNodePort -}} +** Please be patient while the chart is being deployed ** + +{{- if .Values.diagnosticMode.enabled }} +The chart has been deployed in diagnostic mode. All probes have been disabled and the command has been overwritten with: + + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 4 }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 4 }} + +Get the list of pods by executing: + + kubectl get pods --namespace {{ .Release.Namespace }} -l app.kubernetes.io/instance={{ .Release.Name }} + +Access the pod you want to debug by executing + + kubectl exec --namespace {{ .Release.Namespace }} -ti -- bash + +In order to replicate the container startup scripts execute this command: + + /opt/bitnami/scripts/rabbitmq/entrypoint.sh /opt/bitnami/scripts/rabbitmq/run.sh + +{{- else }} + +Credentials: + +{{- if not .Values.loadDefinition.enabled }} + echo "Username : {{ .Values.auth.username }}" + echo "Password : $(kubectl get secret --namespace {{ .Release.Namespace }} {{ include "rabbitmq.secretPasswordName" . }} -o jsonpath="{.data.rabbitmq-password}" | base64 --decode)" +{{- end }} + echo "ErLang Cookie : $(kubectl get secret --namespace {{ .Release.Namespace }} {{ include "rabbitmq.secretErlangName" . }} -o jsonpath="{.data.rabbitmq-erlang-cookie}" | base64 --decode)" + +Note that the credentials are saved in persistent volume claims and will not be changed upon upgrade or reinstallation unless the persistent volume claim has been deleted. If this is not the first installation of this chart, the credentials may not be valid. +This is applicable when no passwords are set and therefore the random password is autogenerated. In case of using a fixed password, you should specify it when upgrading. +More information about the credentials may be found at https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues/#credential-errors-while-upgrading-chart-releases. + +RabbitMQ can be accessed within the cluster on port {{ $serviceNodePort }} at {{ include "rabbitmq.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clustering.k8s_domain }} + +To access for outside the cluster, perform the following steps: + +{{- if .Values.ingress.enabled }} +{{- if contains "NodePort" .Values.service.type }} + +To Access the RabbitMQ AMQP port: + +1. Obtain the NodePort IP and ports: + + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT_AMQP=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[1].nodePort}" services {{ include "rabbitmq.fullname" . }}) + echo "URL : amqp://$NODE_IP:$NODE_PORT_AMQP/" + +{{- else if contains "LoadBalancer" .Values.service.type }} + +To Access the RabbitMQ AMQP port: + +1. Obtain the LoadBalancer IP: + +NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ include "rabbitmq.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "rabbitmq.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + echo "URL : amqp://$SERVICE_IP:{{ $servicePort }}/" + +{{- else if contains "ClusterIP" .Values.service.type }} + +To Access the RabbitMQ AMQP port: + +1. Create a port-forward to the AMQP port: + + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ include "rabbitmq.fullname" . }} {{ $servicePort }}:{{ $servicePort }} & + echo "URL : amqp://127.0.0.1:{{ $servicePort }}/" + +{{- end }} + +2. Access RabbitMQ using using the obtained URL. + +To Access the RabbitMQ Management interface: + +1. Get the RabbitMQ Management URL and associate its hostname to your cluster external IP: + + export CLUSTER_IP=$(minikube ip) # On Minikube. Use: `kubectl cluster-info` on others K8s clusters + echo "RabbitMQ Management: http{{ if .Values.ingress.tls }}s{{ end }}://{{ .Values.ingress.hostname }}/" + echo "$CLUSTER_IP {{ .Values.ingress.hostname }}" | sudo tee -a /etc/hosts + +2. Open a browser and access RabbitMQ Management using the obtained URL. + +{{- else }} +{{- if contains "NodePort" .Values.service.type }} + +Obtain the NodePort IP and ports: + + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT_AMQP=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[1].nodePort}" services {{ include "rabbitmq.fullname" . }}) + export NODE_PORT_STATS=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[3].nodePort}" services {{ include "rabbitmq.fullname" . }}) + +To Access the RabbitMQ AMQP port: + + echo "URL : amqp://$NODE_IP:$NODE_PORT_AMQP/" + +To Access the RabbitMQ Management interface: + + echo "URL : http://$NODE_IP:$NODE_PORT_STATS/" + +{{- else if contains "LoadBalancer" .Values.service.type }} + +Obtain the LoadBalancer IP: + +NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ include "rabbitmq.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "rabbitmq.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + +To Access the RabbitMQ AMQP port: + + echo "URL : amqp://$SERVICE_IP:{{ $servicePort }}/" + +To Access the RabbitMQ Management interface: + + echo "URL : http://$SERVICE_IP:{{ .Values.service.managerPort }}/" + +{{- else if contains "ClusterIP" .Values.service.type }} + +To Access the RabbitMQ AMQP port: + + echo "URL : amqp://127.0.0.1:{{ $servicePort }}/" + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ include "rabbitmq.fullname" . }} {{ $servicePort }}:{{ $servicePort }} + +To Access the RabbitMQ Management interface: + + echo "URL : http://127.0.0.1:{{ .Values.service.managerPort }}/" + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ include "rabbitmq.fullname" . }} {{ .Values.service.managerPort }}:{{ .Values.service.managerPort }} + +{{- end }} +{{- end }} + +{{- if .Values.metrics.enabled }} + +To access the RabbitMQ Prometheus metrics, get the RabbitMQ Prometheus URL by running: + + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ include "rabbitmq.fullname" . }} {{ .Values.service.metricsPort }}:{{ .Values.service.metricsPort }} & + echo "Prometheus Metrics URL: http://127.0.0.1:{{ .Values.service.metricsPort }}/metrics" + +Then, open the obtained URL in a browser. + +{{- end }} + +{{- include "common.warnings.rollingTag" .Values.image }} +{{- include "rabbitmq.validateValues" . -}} + +{{- $requiredPassword := list -}} +{{- $secretNameRabbitmq := include "rabbitmq.secretPasswordName" . -}} + +{{- if and (not .Values.auth.existingPasswordSecret) (not .Values.loadDefinition.enabled) -}} + {{- $requiredRabbitmqPassword := dict "valueKey" "auth.password" "secret" $secretNameRabbitmq "field" "rabbitmq-password" -}} + {{- $requiredPassword = append $requiredPassword $requiredRabbitmqPassword -}} +{{- end -}} + +{{- if not .Values.auth.existingErlangSecret -}} + {{- $requiredErlangPassword := dict "valueKey" "auth.erlangCookie" "secret" $secretNameRabbitmq "field" "rabbitmq-erlang-cookie" -}} + {{- $requiredPassword = append $requiredPassword $requiredErlangPassword -}} +{{- end -}} + +{{- $requiredRabbitmqPasswordErrors := include "common.validations.values.multiple.empty" (dict "required" $requiredPassword "context" $) -}} + +{{- include "common.errors.upgrade.passwords.empty" (dict "validationErrors" (list $requiredRabbitmqPasswordErrors) "context" $) -}} + +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/templates/_helpers.tpl b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/templates/_helpers.tpl new file mode 100644 index 0000000..6b46b23 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/templates/_helpers.tpl @@ -0,0 +1,247 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "rabbitmq.name" -}} +{{- include "common.names.name" . -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "rabbitmq.fullname" -}} +{{- include "common.names.fullname" . -}} +{{- end -}} + +{{/* +Return the proper RabbitMQ image name +*/}} +{{- define "rabbitmq.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper image name (for the init container volume-permissions image) +*/}} +{{- define "rabbitmq.volumePermissions.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.volumePermissions.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "rabbitmq.imagePullSecrets" -}} +{{ include "common.images.pullSecrets" (dict "images" (list .Values.image .Values.volumePermissions.image) "global" .Values.global) }} +{{- end -}} + +{{/* +Return podAnnotations +*/}} +{{- define "rabbitmq.podAnnotations" -}} +{{- if .Values.podAnnotations }} +{{ include "common.tplvalues.render" (dict "value" .Values.podAnnotations "context" $) }} +{{- end }} +{{- if and .Values.metrics.enabled .Values.metrics.podAnnotations }} +{{ include "common.tplvalues.render" (dict "value" .Values.metrics.podAnnotations "context" $) }} +{{- end }} +{{- end -}} + +{{/* + Create the name of the service account to use + */}} +{{- define "rabbitmq.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "rabbitmq.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Get the password secret. +*/}} +{{- define "rabbitmq.secretPasswordName" -}} + {{- if .Values.auth.existingPasswordSecret -}} + {{- printf "%s" (tpl .Values.auth.existingPasswordSecret $) -}} + {{- else -}} + {{- printf "%s" (include "rabbitmq.fullname" .) -}} + {{- end -}} +{{- end -}} + +{{/* +Get the erlang secret. +*/}} +{{- define "rabbitmq.secretErlangName" -}} + {{- if .Values.auth.existingErlangSecret -}} + {{- printf "%s" (tpl .Values.auth.existingErlangSecret $) -}} + {{- else -}} + {{- printf "%s" (include "rabbitmq.fullname" .) -}} + {{- end -}} +{{- end -}} + +{{/* +Get the TLS secret. +*/}} +{{- define "rabbitmq.tlsSecretName" -}} + {{- if .Values.auth.tls.existingSecret -}} + {{- printf "%s" (tpl .Values.auth.tls.existingSecret $) -}} + {{- else -}} + {{- printf "%s-certs" (include "rabbitmq.fullname" .) -}} + {{- end -}} +{{- end -}} + +{{/* +Return true if a TLS credentials secret object should be created +*/}} +{{- define "rabbitmq.createTlsSecret" -}} +{{- if and .Values.auth.tls.enabled (not .Values.auth.tls.existingSecret) }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper RabbitMQ plugin list +*/}} +{{- define "rabbitmq.plugins" -}} +{{- $plugins := .Values.plugins -}} +{{- if .Values.extraPlugins -}} +{{- $plugins = printf "%s %s" $plugins .Values.extraPlugins -}} +{{- end -}} +{{- if .Values.metrics.enabled -}} +{{- $plugins = printf "%s %s" $plugins .Values.metrics.plugins -}} +{{- end -}} +{{- printf "%s" $plugins | replace " " ", " -}} +{{- end -}} + +{{/* +Return the number of bytes given a value +following a base 2 o base 10 number system. +Usage: +{{ include "rabbitmq.toBytes" .Values.path.to.the.Value }} +*/}} +{{- define "rabbitmq.toBytes" -}} +{{- $value := int (regexReplaceAll "([0-9]+).*" . "${1}") }} +{{- $unit := regexReplaceAll "[0-9]+(.*)" . "${1}" }} +{{- if eq $unit "Ki" }} + {{- mul $value 1024 }} +{{- else if eq $unit "Mi" }} + {{- mul $value 1024 1024 }} +{{- else if eq $unit "Gi" }} + {{- mul $value 1024 1024 1024 }} +{{- else if eq $unit "Ti" }} + {{- mul $value 1024 1024 1024 1024 }} +{{- else if eq $unit "Pi" }} + {{- mul $value 1024 1024 1024 1024 1024 }} +{{- else if eq $unit "Ei" }} + {{- mul $value 1024 1024 1024 1024 1024 1024 }} +{{- else if eq $unit "K" }} + {{- mul $value 1000 }} +{{- else if eq $unit "M" }} + {{- mul $value 1000 1000 }} +{{- else if eq $unit "G" }} + {{- mul $value 1000 1000 1000 }} +{{- else if eq $unit "T" }} + {{- mul $value 1000 1000 1000 1000 }} +{{- else if eq $unit "P" }} + {{- mul $value 1000 1000 1000 1000 1000 }} +{{- else if eq $unit "E" }} + {{- mul $value 1000 1000 1000 1000 1000 1000 }} +{{- end }} +{{- end -}} + +{{/* +Compile all warnings into a single message, and call fail. +*/}} +{{- define "rabbitmq.validateValues" -}} +{{- $messages := list -}} +{{- $messages := append $messages (include "rabbitmq.validateValues.ldap" .) -}} +{{- $messages := append $messages (include "rabbitmq.validateValues.memoryHighWatermark" .) -}} +{{- $messages := append $messages (include "rabbitmq.validateValues.ingress.tls" .) -}} +{{- $messages := append $messages (include "rabbitmq.validateValues.auth.tls" .) -}} +{{- $messages := without $messages "" -}} +{{- $message := join "\n" $messages -}} + +{{- if $message -}} +{{- printf "\nVALUES VALIDATION:\n%s" $message | fail -}} +{{- end -}} +{{- end -}} + +{{/* +Validate values of rabbitmq - LDAP support +*/}} +{{- define "rabbitmq.validateValues.ldap" -}} +{{- if .Values.ldap.enabled }} +{{- $serversListLength := len .Values.ldap.servers }} +{{- if or (not (gt $serversListLength 0)) (not (and .Values.ldap.port .Values.ldap.user_dn_pattern)) }} +rabbitmq: LDAP + Invalid LDAP configuration. When enabling LDAP support, the parameters "ldap.servers", + "ldap.port", and "ldap. user_dn_pattern" are mandatory. Please provide them: + + $ helm install {{ .Release.Name }} bitnami/rabbitmq \ + --set ldap.enabled=true \ + --set ldap.servers[0]="lmy-ldap-server" \ + --set ldap.port="389" \ + --set user_dn_pattern="cn=${username},dc=example,dc=org" +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Validate values of rabbitmq - Memory high watermark +*/}} +{{- define "rabbitmq.validateValues.memoryHighWatermark" -}} +{{- if and (not (eq .Values.memoryHighWatermark.type "absolute")) (not (eq .Values.memoryHighWatermark.type "relative")) }} +rabbitmq: memoryHighWatermark.type + Invalid Memory high watermark type. Valid values are "absolute" and + "relative". Please set a valid mode (--set memoryHighWatermark.type="xxxx") +{{- else if and .Values.memoryHighWatermark.enabled (not .Values.resources.limits.memory) (eq .Values.memoryHighWatermark.type "relative") }} +rabbitmq: memoryHighWatermark + You enabled configuring memory high watermark using a relative limit. However, + no memory limits were defined at POD level. Define your POD limits as shown below: + + $ helm install {{ .Release.Name }} bitnami/rabbitmq \ + --set memoryHighWatermark.enabled=true \ + --set memoryHighWatermark.type="relative" \ + --set memoryHighWatermark.value="0.4" \ + --set resources.limits.memory="2Gi" + + Altenatively, user an absolute value for the memory memory high watermark : + + $ helm install {{ .Release.Name }} bitnami/rabbitmq \ + --set memoryHighWatermark.enabled=true \ + --set memoryHighWatermark.type="absolute" \ + --set memoryHighWatermark.value="512MB" +{{- end -}} +{{- end -}} + +{{/* +Validate values of rabbitmq - TLS configuration for Ingress +*/}} +{{- define "rabbitmq.validateValues.ingress.tls" -}} +{{- if and .Values.ingress.enabled .Values.ingress.tls (not .Values.ingress.certManager) (not .Values.ingress.selfSigned) (empty .Values.ingress.extraTls) }} +rabbitmq: ingress.tls + You enabled the TLS configuration for the default ingress hostname but + you did not enable any of the available mechanisms to create the TLS secret + to be used by the Ingress Controller. + Please use any of these alternatives: + - Use the `ingress.extraTls` and `ingress.secrets` parameters to provide your custom TLS certificates. + - Relay on cert-manager to create it by setting `ingress.certManager=true` + - Relay on Helm to create self-signed certificates by setting `ingress.selfSigned=true` +{{- end -}} +{{- end -}} + +{{/* +Validate values of RabbitMQ - Auth TLS enabled +*/}} +{{- define "rabbitmq.validateValues.auth.tls" -}} +{{- if and .Values.auth.tls.enabled (not .Values.auth.tls.autoGenerated) (not .Values.auth.tls.existingSecret) (not .Values.auth.tls.caCertificate) (not .Values.auth.tls.serverCertificate) (not .Values.auth.tls.serverKey) }} +rabbitmq: auth.tls + You enabled TLS for RabbitMQ but you did not enable any of the available mechanisms to create the TLS secret. + Please use any of these alternatives: + - Provide an existing secret containing the TLS certificates using `auth.tls.existingSecret` + - Provide the plain text certificates using `auth.tls.caCertificate`, `auth.tls.serverCertificate` and `auth.tls.serverKey`. + - Enable auto-generated certificates using `auth.tls.autoGenerated`. +{{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/templates/configuration.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/templates/configuration.yaml new file mode 100644 index 0000000..5ba6b72 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/templates/configuration.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "rabbitmq.fullname" . }}-config + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + rabbitmq.conf: |- + {{- include "common.tplvalues.render" (dict "value" .Values.configuration "context" $) | nindent 4 }} + {{- if .Values.advancedConfiguration}} + advanced.config: |- + {{- include "common.tplvalues.render" (dict "value" .Values.advancedConfiguration "context" $) | nindent 4 }} + {{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/templates/extra-list.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/templates/extra-list.yaml new file mode 100644 index 0000000..9ac65f9 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/templates/extra-list.yaml @@ -0,0 +1,4 @@ +{{- range .Values.extraDeploy }} +--- +{{ include "common.tplvalues.render" (dict "value" . "context" $) }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/templates/ingress.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/templates/ingress.yaml new file mode 100644 index 0000000..db74e50 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/templates/ingress.yaml @@ -0,0 +1,57 @@ +{{- if .Values.ingress.enabled }} +apiVersion: {{ include "common.capabilities.ingress.apiVersion" . }} +kind: Ingress +metadata: + name: {{ include "rabbitmq.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + annotations: + {{- if .Values.ingress.certManager }} + kubernetes.io/tls-acme: "true" + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.ingress.annotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.ingress.annotations "context" $) | nindent 4 }} + {{- end }} +spec: + {{- if and .Values.ingress.ingressClassName (eq "true" (include "common.ingress.supportsIngressClassname" .)) }} + ingressClassName: {{ .Values.ingress.ingressClassName | quote }} + {{- end }} + rules: + {{- if .Values.ingress.hostname }} + - host: {{ include "common.tplvalues.render" ( dict "value" .Values.ingress.hostname "context" $ ) }} + http: + paths: + {{- if .Values.ingress.extraPaths }} + {{- toYaml .Values.ingress.extraPaths | nindent 10 }} + {{- end }} + - path: {{ .Values.ingress.path }} + {{- if eq "true" (include "common.ingress.supportsPathType" .) }} + pathType: {{ .Values.ingress.pathType }} + {{- end }} + backend: {{- include "common.ingress.backend" (dict "serviceName" (include "common.names.fullname" .) "servicePort" .Values.service.managerPortName "context" $) | nindent 14 }} + {{- end }} + {{- range .Values.ingress.extraHosts }} + - host: {{ include "common.tplvalues.render" ( dict "value" .name "context" $ ) }} + http: + paths: + - path: {{ default "/" .path }} + {{- if eq "true" (include "common.ingress.supportsPathType" $) }} + pathType: {{ default "ImplementationSpecific" .pathType }} + {{- end }} + backend: {{- include "common.ingress.backend" (dict "serviceName" (include "common.names.fullname" $) "servicePort" "http-stats" "context" $) | nindent 14 }} + {{- end }} + {{- if or (and .Values.ingress.tls (or .Values.ingress.certManager .Values.ingress.selfSigned)) .Values.ingress.extraTls }} + tls: + {{- if and .Values.ingress.tls (or .Values.ingress.certManager .Values.ingress.selfSigned) }} + - hosts: + - {{ .Values.ingress.hostname | quote }} + secretName: {{ printf "%s-tls" .Values.ingress.hostname }} + {{- end }} + {{- if .Values.ingress.extraTls }} + {{- include "common.tplvalues.render" (dict "value" .Values.ingress.extraTls "context" $) | nindent 4 }} + {{- end }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/templates/networkpolicy.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/templates/networkpolicy.yaml new file mode 100644 index 0000000..158aeaa --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/templates/networkpolicy.yaml @@ -0,0 +1,37 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + name: {{ include "rabbitmq.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + podSelector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + ingress: + # Allow inbound connections + - ports: + - port: 4369 # EPMD + - port: {{ .Values.service.port }} + - port: {{ .Values.service.tlsPort }} + - port: {{ .Values.service.distPort }} + - port: {{ .Values.service.managerPort }} + {{- if not .Values.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ template "rabbitmq.fullname" . }}-client: "true" + - podSelector: + matchLabels: + {{- include "common.labels.matchLabels" . | nindent 14 }} + {{- if .Values.networkPolicy.additionalRules }} + {{- include "common.tplvalues.render" (dict "value" .Values.networkPolicy.additionalRules "context" $) | nindent 8 }} + {{- end }} + {{- end }} + # Allow prometheus scrapes + - ports: + - port: {{ .Values.service.metricsPort }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/templates/pdb.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/templates/pdb.yaml new file mode 100644 index 0000000..bf06b66 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/templates/pdb.yaml @@ -0,0 +1,20 @@ +{{- if .Values.pdb.create }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ include "rabbitmq.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.pdb.minAvailable }} + minAvailable: {{ .Values.pdb.minAvailable }} + {{- end }} + {{- if .Values.pdb.maxUnavailable }} + maxUnavailable: {{ .Values.pdb.maxUnavailable }} + {{- end }} + selector: + matchLabels: {{ include "common.labels.matchLabels" . | nindent 6 }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/templates/prometheusrule.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/templates/prometheusrule.yaml new file mode 100644 index 0000000..a1ba629 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/templates/prometheusrule.yaml @@ -0,0 +1,24 @@ +{{- if and .Values.metrics.enabled .Values.metrics.prometheusRule.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ include "rabbitmq.fullname" . }} + {{- if .Values.metrics.prometheusRule.namespace }} + namespace: {{ .Values.metrics.prometheusRule.namespace }} + {{- else }} + namespace: {{ .Release.Namespace | quote }} + {{- end }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.metrics.prometheusRule.additionalLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.prometheusRule.additionalLabels "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + groups: + {{- with .Values.metrics.prometheusRule.rules }} + - name: {{ template "rabbitmq.name" $ }} + rules: {{- include "common.tplvalues.render" (dict "value" . "context" $) | nindent 8 }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/templates/pv.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/templates/pv.yaml new file mode 100644 index 0000000..d0f8bdd --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/templates/pv.yaml @@ -0,0 +1,22 @@ +kind: PersistentVolume +apiVersion: v1 +metadata: + name: rabbitmq-pv + labels: + app: rabbitmq +spec: + storageClassName: rabbitmq + capacity: + storage: 5Gi + accessModes: + - ReadWriteMany + hostPath: + path: {{ .Values.global.RABBITMQ_PATH }} + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .Values.global.affinity_key }} + operator: In + values: + - {{ .Values.global.affinity_value1 }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/templates/pvc.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/templates/pvc.yaml new file mode 100644 index 0000000..c677752 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/templates/pvc.yaml @@ -0,0 +1,15 @@ +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: rabbitmq-pvc + namespace: imxc + labels: + app: rabbitmq +spec: + storageClassName: rabbitmq + accessModes: + - ReadWriteMany + resources: + requests: + storage: 5Gi + diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/templates/role.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/templates/role.yaml new file mode 100644 index 0000000..9bd029e --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/templates/role.yaml @@ -0,0 +1,18 @@ +{{- if .Values.rbac.create }} +kind: Role +apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }} +metadata: + name: {{ template "rabbitmq.fullname" . }}-endpoint-reader + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +rules: + - apiGroups: [""] + resources: ["endpoints"] + verbs: ["get"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create"] +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/templates/rolebinding.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/templates/rolebinding.yaml new file mode 100644 index 0000000..74f82f0 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/templates/rolebinding.yaml @@ -0,0 +1,18 @@ +{{- if and .Values.serviceAccount.create .Values.rbac.create }} +kind: RoleBinding +apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }} +metadata: + name: {{ template "rabbitmq.fullname" . }}-endpoint-reader + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +subjects: + - kind: ServiceAccount + name: {{ template "rabbitmq.serviceAccountName" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ template "rabbitmq.fullname" . }}-endpoint-reader +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/templates/secrets.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/templates/secrets.yaml new file mode 100644 index 0000000..4d14e4e --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/templates/secrets.yaml @@ -0,0 +1,43 @@ +{{- if or (not .Values.auth.existingErlangSecret) (not .Values.auth.existingPasswordSecret) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "rabbitmq.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + {{- if and (not .Values.auth.existingPasswordSecret) (not .Values.loadDefinition.enabled) }} + {{- if .Values.auth.password }} + rabbitmq-password: {{ .Values.auth.password | b64enc | quote }} + {{- else }} + rabbitmq-password: {{ randAlphaNum 10 | b64enc | quote }} + {{- end }} + {{- end }} + {{- if not .Values.auth.existingErlangSecret }} + {{- if .Values.auth.erlangCookie }} + rabbitmq-erlang-cookie: {{ .Values.auth.erlangCookie | b64enc | quote }} + {{- else }} + rabbitmq-erlang-cookie: {{ randAlphaNum 32 | b64enc | quote }} + {{- end }} + {{- end }} +{{- end }} +{{- $extraSecretsPrependReleaseName := .Values.extraSecretsPrependReleaseName }} +{{- range $key, $value := .Values.extraSecrets }} +--- +apiVersion: v1 +kind: Secret +metadata: + {{- if $extraSecretsPrependReleaseName }} + name: {{ $.Release.Name }}-{{ $key }} + {{- else }} + name: {{ $key }} + {{- end }} + namespace: {{ $.Release.Namespace | quote }} + labels: {{- include "common.labels.standard" $ | nindent 4 }} +type: Opaque +stringData: {{- include "common.tplvalues.render" (dict "value" $value "context" $) | nindent 2 }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/templates/serviceaccount.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/templates/serviceaccount.yaml new file mode 100644 index 0000000..562fde9 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/templates/serviceaccount.yaml @@ -0,0 +1,14 @@ +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "rabbitmq.serviceAccountName" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +secrets: + - name: {{ include "rabbitmq.fullname" . }} +{{- end }} + diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/templates/servicemonitor.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/templates/servicemonitor.yaml new file mode 100644 index 0000000..46b9040 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/templates/servicemonitor.yaml @@ -0,0 +1,49 @@ +{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "rabbitmq.fullname" . }} + {{- if .Values.metrics.serviceMonitor.namespace }} + namespace: {{ .Values.metrics.serviceMonitor.namespace }} + {{- else }} + namespace: {{ .Release.Namespace | quote }} + {{- end }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.metrics.serviceMonitor.additionalLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.serviceMonitor.additionalLabels "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + endpoints: + - port: metrics + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.honorLabels }} + honorLabels: {{ .Values.metrics.serviceMonitor.honorLabels }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.relabellings }} + metricRelabelings: {{- toYaml .Values.metrics.serviceMonitor.relabellings | nindent 6 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.path }} + path: {{ .Values.metrics.serviceMonitor.path }} + {{- end }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace | quote }} + {{- with .Values.metrics.serviceMonitor.podTargetLabels }} + podTargetLabels: + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.metrics.serviceMonitor.targetLabels }} + targetLabels: + {{- toYaml . | nindent 4 }} + {{- end }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/templates/statefulset.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/templates/statefulset.yaml new file mode 100644 index 0000000..45abd14 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/templates/statefulset.yaml @@ -0,0 +1,382 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "rabbitmq.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.statefulsetLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.statefulsetLabels "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + serviceName: {{ template "rabbitmq.fullname" . }}-headless + podManagementPolicy: {{ .Values.podManagementPolicy }} + replicas: {{ .Values.replicaCount }} + updateStrategy: + type: {{ .Values.updateStrategyType }} + {{- if (eq "OnDelete" .Values.updateStrategyType) }} + rollingUpdate: null + {{- end }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + template: + metadata: + labels: {{- include "common.labels.standard" . | nindent 8 }} + {{- if .Values.podLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.podLabels "context" $) | nindent 8 }} + {{- end }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 8 }} + {{- end }} + checksum/config: {{ include (print $.Template.BasePath "/configuration.yaml") . | sha256sum }} + {{- if or (not .Values.auth.existingErlangSecret) (not .Values.auth.existingPasswordSecret) .Values.extraSecrets }} + checksum/secret: {{ include (print $.Template.BasePath "/secrets.yaml") . | sha256sum }} + {{- end }} + {{- if or .Values.podAnnotations .Values.metrics.enabled }} + {{- include "rabbitmq.podAnnotations" . | nindent 8 }} + {{- end }} + spec: + {{- include "rabbitmq.imagePullSecrets" . | nindent 6 }} + {{- if .Values.schedulerName }} + schedulerName: {{ .Values.schedulerName | quote }} + {{- end }} + serviceAccountName: {{ template "rabbitmq.serviceAccountName" . }} + {{- if .Values.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.affinity "context" .) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.podAffinityPreset "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.podAntiAffinityPreset "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.nodeAffinityPreset.type "key" .Values.nodeAffinityPreset.key "values" .Values.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.nodeSelector "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.tolerations "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.topologySpreadConstraints "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.priorityClassName }} + priorityClassName: {{ .Values.priorityClassName }} + {{- end }} + {{- if .Values.podSecurityContext.enabled }} + securityContext: {{- omit .Values.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} + {{- if or (.Values.initContainers) (and .Values.volumePermissions.enabled .Values.persistence.enabled .Values.podSecurityContext) }} + initContainers: + {{- if and .Values.volumePermissions.enabled .Values.persistence.enabled .Values.podSecurityContext }} + - name: volume-permissions + image: {{ include "rabbitmq.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: + - /bin/bash + args: + - -ec + - | + mkdir -p "/bitnami/rabbitmq/mnesia" + chown -R "{{ .Values.podSecurityContext.runAsUser }}:{{ .Values.podSecurityContext.fsGroup }}" "/bitnami/rabbitmq/mnesia" + securityContext: + runAsUser: 0 + {{- if .Values.volumePermissions.resources }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: data + mountPath: /bitnami/rabbitmq/mnesia + {{- end }} + {{- if .Values.initContainers }} + {{- include "common.tplvalues.render" (dict "value" .Values.initContainers "context" $) | nindent 8 }} + {{- end }} + {{- end }} + containers: + - name: rabbitmq + image: {{ template "rabbitmq.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext }} + securityContext: {{- toYaml .Values.containerSecurityContext | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.args "context" $) | nindent 12 }} + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} + - name: MY_POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: MY_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: MY_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: K8S_SERVICE_NAME + value: "{{ template "rabbitmq.fullname" . }}-headless" + - name: K8S_ADDRESS_TYPE + value: {{ .Values.clustering.addressType }} + - name: RABBITMQ_FORCE_BOOT + value: {{ ternary "yes" "no" .Values.clustering.forceBoot | quote }} + {{- if (eq "hostname" .Values.clustering.addressType) }} + - name: RABBITMQ_NODE_NAME + value: "rabbit@$(MY_POD_NAME).$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.{{ .Values.clusterDomain }}" + - name: K8S_HOSTNAME_SUFFIX + value: ".$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.{{ .Values.clusterDomain }}" + {{- else }} + - name: RABBITMQ_NODE_NAME + value: "rabbit@$(MY_POD_NAME)" + {{- end }} + - name: RABBITMQ_MNESIA_DIR + value: "/bitnami/rabbitmq/mnesia/$(RABBITMQ_NODE_NAME)" + - name: RABBITMQ_LDAP_ENABLE + value: {{ ternary "yes" "no" .Values.ldap.enabled | quote }} + {{- if .Values.ldap.enabled }} + - name: RABBITMQ_LDAP_TLS + value: {{ ternary "yes" "no" .Values.ldap.tls.enabled | quote }} + - name: RABBITMQ_LDAP_SERVERS + value: {{ .Values.ldap.servers | join "," | quote }} + - name: RABBITMQ_LDAP_SERVERS_PORT + value: {{ .Values.ldap.port | quote }} + - name: RABBITMQ_LDAP_USER_DN_PATTERN + value: {{ .Values.ldap.user_dn_pattern }} + {{- end }} + - name: RABBITMQ_LOGS + value: {{ .Values.logs | quote }} + - name: RABBITMQ_ULIMIT_NOFILES + value: {{ .Values.ulimitNofiles | quote }} + {{- if and .Values.maxAvailableSchedulers }} + - name: RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS + value: {{ printf "+S %s:%s" (toString .Values.maxAvailableSchedulers) (toString .Values.onlineSchedulers) -}} + {{- end }} + - name: RABBITMQ_USE_LONGNAME + value: "true" + - name: RABBITMQ_ERL_COOKIE + valueFrom: + secretKeyRef: + name: {{ template "rabbitmq.secretErlangName" . }} + key: rabbitmq-erlang-cookie + {{- if .Values.loadDefinition.enabled }} + - name: RABBITMQ_LOAD_DEFINITIONS + value: "yes" + - name: RABBITMQ_SECURE_PASSWORD + value: "no" + {{- else }} + - name: RABBITMQ_LOAD_DEFINITIONS + value: "no" + - name: RABBITMQ_SECURE_PASSWORD + value: "yes" + - name: RABBITMQ_USERNAME + value: {{ .Values.auth.username | quote }} + - name: RABBITMQ_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "rabbitmq.secretPasswordName" . }} + key: rabbitmq-password + {{- end }} + - name: RABBITMQ_PLUGINS + value: {{ include "rabbitmq.plugins" . | quote }} + {{- if .Values.communityPlugins }} + - name: RABBITMQ_COMMUNITY_PLUGINS + value: {{ .Values.communityPlugins | quote }} + {{- end }} + {{- if .Values.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if or .Values.extraEnvVarsCM .Values.extraEnvVarsSecret }} + envFrom: + {{- if .Values.extraEnvVarsCM }} + - configMapRef: + name: {{ tpl .Values.extraEnvVarsCM . | quote }} + {{- end }} + {{- if .Values.extraEnvVarsSecret }} + - secretRef: + name: {{ tpl .Values.extraEnvVarsSecret . | quote }} + {{- end }} + {{- end }} + ports: + {{- if or (.Values.service.portEnabled) (not .Values.auth.tls.enabled) }} + - name: amqp + containerPort: 5672 + {{- end }} + {{- if .Values.auth.tls.enabled }} + - name: amqp-ssl + containerPort: {{ .Values.service.tlsPort }} + {{- end }} + - name: dist + containerPort: 25672 + - name: stats + containerPort: 15672 + - name: epmd + containerPort: 4369 + {{- if .Values.metrics.enabled }} + - name: metrics + containerPort: 9419 + {{- end }} + {{- if .Values.extraContainerPorts }} + {{- toYaml .Values.extraContainerPorts | nindent 12 }} + {{- end }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.livenessProbe.enabled }} + - name: stomp + containerPort: 61613 + livenessProbe: + exec: + command: + - /bin/bash + - -ec + - rabbitmq-diagnostics -q ping + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- else if .Values.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customLivenessProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + exec: + command: + - /bin/bash + - -ec + - rabbitmq-diagnostics -q check_running && rabbitmq-diagnostics -q check_local_alarms + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- else if .Values.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customReadinessProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customStartupProbe "context" $) | nindent 12 }} + {{- end }} + lifecycle: + {{- if and .Values.clustering.rebalance (gt (.Values.replicaCount | int) 1) }} + postStart: + exec: + command: + - /bin/bash + - -ec + - | + until rabbitmqctl cluster_status >/dev/null; do + echo "Waiting for cluster readiness..." + sleep 5 + done + rabbitmq-queues rebalance "all" + {{- end }} + preStop: + exec: + command: + - /bin/bash + - -ec + - | + if [[ -f /opt/bitnami/scripts/rabbitmq/nodeshutdown.sh ]]; then + /opt/bitnami/scripts/rabbitmq/nodeshutdown.sh -t {{ .Values.terminationGracePeriodSeconds | quote }} -d {{ ternary "true" "false" .Values.image.debug | quote }} + else + rabbitmqctl stop_app + fi + {{- end }} + resources: + requests: + memory: "500Mi" + cpu: "150m" + volumeMounts: + - name: configuration + mountPath: /bitnami/rabbitmq/conf + - name: data + mountPath: /bitnami/rabbitmq/mnesia + {{- if .Values.auth.tls.enabled }} + - name: certs + mountPath: /opt/bitnami/rabbitmq/certs + {{- end }} + {{- if .Values.loadDefinition.enabled }} + - name: load-definition-volume + mountPath: /app + readOnly: true + {{- end }} + {{- if .Values.extraVolumeMounts }} + {{- toYaml .Values.extraVolumeMounts | nindent 12 }} + {{- end }} + {{- if .Values.sidecars }} + {{- include "common.tplvalues.render" (dict "value" .Values.sidecars "context" $) | nindent 8 }} + {{- end }} + volumes: + {{- if .Values.persistence.volumes }} + {{- toYaml .Values.persistence.volumes | nindent 8 }} + {{- end }} + {{- if .Values.auth.tls.enabled }} + - name: certs + secret: + secretName: {{ template "rabbitmq.tlsSecretName" . }} + items: + - key: {{ ternary "tls.crt" "ca.crt" .Values.auth.tls.existingSecretFullChain }} + path: ca_certificate.pem + - key: tls.crt + path: server_certificate.pem + - key: tls.key + path: server_key.pem + {{- end }} + - name: configuration + configMap: + name: {{ template "rabbitmq.fullname" . }}-config + items: + - key: rabbitmq.conf + path: rabbitmq.conf + {{- if .Values.advancedConfiguration}} + - key: advanced.config + path: advanced.config + {{- end }} + {{- if .Values.loadDefinition.enabled }} + - name: load-definition-volume + secret: + secretName: {{ tpl .Values.loadDefinition.existingSecret . | quote }} + {{- end }} + {{- if .Values.extraVolumes }} + {{- toYaml .Values.extraVolumes | nindent 8 }} + {{- end }} + {{- if not (contains "data" (quote .Values.persistence.volumes)) }} + {{- if not .Values.persistence.enabled }} + - name: data + emptyDir: {} + {{- else if .Values.persistence.existingClaim }} + - name: data + persistentVolumeClaim: + {{- with .Values.persistence.existingClaim }} + claimName: {{ tpl . $ }} + {{- end }} + {{- else }} + volumeClaimTemplates: + - metadata: + name: data + labels: {{- include "common.labels.matchLabels" . | nindent 10 }} + spec: + accessModes: + - {{ .Values.persistence.accessMode | quote }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{ include "common.storage.class" (dict "persistence" .Values.persistence "global" .Values.global) }} + {{- if .Values.persistence.selector }} + selector: {{- include "common.tplvalues.render" (dict "value" .Values.persistence.selector "context" $) | nindent 10 }} + {{- end -}} + {{- end }} + {{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/templates/svc-headless.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/templates/svc-headless.yaml new file mode 100644 index 0000000..4ed26cc --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/templates/svc-headless.yaml @@ -0,0 +1,40 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "rabbitmq.fullname" . }}-headless + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if or (.Values.service.annotationsHeadless) (.Values.commonAnnotations) }} + annotations: + {{- if .Values.commonAnnotations}} + {{- include "common.tplvalues.render" (dict "value" .Values.commonAnnotations "context" $) | nindent 4 }} + {{- end -}} + {{- if .Values.service.annotationsHeadless}} + {{- include "common.tplvalues.render" (dict "value" .Values.service.annotationsHeadless "context" $) | nindent 4 }} + {{- end -}} + {{- end }} +spec: + clusterIP: None + ports: + - name: {{ .Values.service.epmdPortName }} + port: 4369 + targetPort: epmd + {{- if or (.Values.service.portEnabled) (not .Values.auth.tls.enabled) }} + - name: amqp + port: {{ .Values.service.port }} + targetPort: {{ .Values.service.portName }} + {{- end }} + {{- if .Values.auth.tls.enabled }} + - name: {{ .Values.service.tlsPortName }} + port: {{ .Values.service.tlsPort }} + targetPort: amqp-tls + {{- end }} + - name: {{ .Values.service.distPortName }} + port: {{ .Values.service.distPort }} + targetPort: dist + {{- if .Values.service.managerPortEnabled }} + - name: {{ .Values.service.managerPortName }} + port: {{ .Values.service.managerPort }} + targetPort: stats + {{- end }} + selector: {{ include "common.labels.matchLabels" . | nindent 4 }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/templates/svc.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/templates/svc.yaml new file mode 100644 index 0000000..2b4c224 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/templates/svc.yaml @@ -0,0 +1,95 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "rabbitmq.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.service.labels }} + {{- include "common.tplvalues.render" (dict "value" .Values.service.labels "context" $) | nindent 4 }} + {{- end }} + {{- if or (.Values.service.annotations) (.Values.commonAnnotations) }} + annotations: + {{- if .Values.commonAnnotations}} + {{- include "common.tplvalues.render" (dict "value" .Values.commonAnnotations "context" $) | nindent 4 }} + {{- end -}} + {{- if .Values.service.annotations}} + {{- include "common.tplvalues.render" (dict "value" .Values.service.annotations "context" $) | nindent 4 }} + {{- end -}} + {{- end }} +spec: + type: {{ .Values.service.type }} + {{- if eq .Values.service.type "LoadBalancer" }} + {{- if not (empty .Values.service.loadBalancerIP) }} + loadBalancerIP: {{ .Values.service.loadBalancerIP }} + {{- end }} + {{- if .Values.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- toYaml .Values.service.loadBalancerSourceRanges | nindent 4 }} + {{- end }} + {{- end }} + {{- if (or (eq .Values.service.type "LoadBalancer") (eq .Values.service.type "NodePort")) }} + externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy | quote }} + {{- end }} + {{- if .Values.service.externalIPs }} + externalIPs: {{- toYaml .Values.service.externalIPs | nindent 4 }} + {{- end }} + ports: + {{- if or (.Values.service.portEnabled) (not .Values.auth.tls.enabled) }} + - name: {{ .Values.service.portName }} + port: {{ .Values.service.port }} + targetPort: amqp + {{- if (eq .Values.service.type "ClusterIP") }} + nodePort: null + {{- else if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePort)) }} + nodePort: {{ .Values.service.nodePort }} + {{- end }} + {{- end }} + {{- if .Values.auth.tls.enabled }} + - name: {{ .Values.service.tlsPortName }} + port: {{ .Values.service.tlsPort }} + targetPort: amqp-ssl + {{- if (eq .Values.service.type "ClusterIP") }} + nodePort: null + {{- else if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.tlsNodePort)) }} + nodePort: {{ .Values.service.tlsNodePort }} + {{- end }} + {{- end }} + - name: {{ .Values.service.epmdPortName }} + port: 4369 + targetPort: epmd + {{- if (eq .Values.service.type "ClusterIP") }} + nodePort: null + {{- else if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.epmdNodePort))) }} + nodePort: {{ .Values.service.epmdNodePort }} + {{- end }} + - name: {{ .Values.service.distPortName }} + port: {{ .Values.service.distPort }} + targetPort: dist + {{- if eq .Values.service.type "ClusterIP" }} + nodePort: null + {{- else if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.distNodePort))) }} + nodePort: {{ .Values.service.distNodePort }} + {{- end }} + {{- if .Values.service.managerPortEnabled }} + - name: {{ .Values.service.managerPortName }} + port: {{ .Values.service.managerPort }} + targetPort: stats + {{- if eq .Values.service.type "ClusterIP" }} + nodePort: null + {{- else if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.managerNodePort))) }} + nodePort: {{ .Values.service.managerNodePort }} + {{- end }} + {{- end }} + {{- if .Values.metrics.enabled }} + - name: {{ .Values.service.metricsPortName }} + port: {{ .Values.service.metricsPort }} + targetPort: metrics + {{- if eq .Values.service.type "ClusterIP" }} + nodePort: null + {{- else if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.metricsNodePort))) }} + nodePort: {{ .Values.service.metricsNodePort }} + {{- end }} + {{- end }} + {{- if .Values.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" .Values.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + selector: {{ include "common.labels.matchLabels" . | nindent 4 }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/templates/tls-secrets.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/templates/tls-secrets.yaml new file mode 100644 index 0000000..b6a6078 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/templates/tls-secrets.yaml @@ -0,0 +1,74 @@ +{{- if .Values.ingress.enabled }} +{{- if .Values.ingress.secrets }} +{{- range .Values.ingress.secrets }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ .name }} + namespace: {{ $.Release.Namespace | quote }} + labels: {{- include "common.labels.standard" $ | nindent 4 }} + {{- if $.Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if $.Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + tls.crt: {{ .certificate | b64enc }} + tls.key: {{ .key | b64enc }} +--- +{{- end }} +{{- end }} +{{- if and .Values.ingress.tls .Values.ingress.selfSigned }} +{{- $ca := genCA "rabbitmq-ca" 365 }} +{{- $cert := genSignedCert .Values.ingress.hostname nil (list .Values.ingress.hostname) 365 $ca }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ printf "%s-tls" .Values.ingress.hostname }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + tls.crt: {{ $cert.Cert | b64enc | quote }} + tls.key: {{ $cert.Key | b64enc | quote }} + ca.crt: {{ $ca.Cert | b64enc | quote }} +--- +{{- end }} +{{- end }} +{{- if (include "rabbitmq.createTlsSecret" . )}} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "rabbitmq.fullname" . }}-certs + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + {{- if or (not .Values.auth.tls.autoGenerated ) (and .Values.auth.tls.caCertificate .Values.auth.tls.serverCertificate .Values.auth.tls.serverKey) }} + ca.crt: {{ required "A valid .Values.auth.tls.caCertificate entry required!" .Values.auth.tls.caCertificate | b64enc | quote }} + tls.crt: {{ required "A valid .Values.auth.tls.serverCertificate entry required!" .Values.auth.tls.serverCertificate| b64enc | quote }} + tls.key: {{ required "A valid .Values.auth.tls.serverKey entry required!" .Values.auth.tls.serverKey | b64enc | quote }} + {{- else }} + {{- $ca := genCA "rabbitmq-internal-ca" 365 }} + {{- $fullname := include "rabbitmq.fullname" . }} + {{- $releaseNamespace := .Release.Namespace }} + {{- $clusterDomain := .Values.clusterDomain }} + {{- $serviceName := include "rabbitmq.fullname" . }} + {{- $altNames := list (printf "*.%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) $fullname }} + {{- $crt := genSignedCert $fullname nil $altNames 365 $ca }} + ca.crt: {{ $ca.Cert | b64enc | quote }} + tls.crt: {{ $crt.Cert | b64enc | quote }} + tls.key: {{ $crt.Key | b64enc | quote }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/values.schema.json b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/values.schema.json new file mode 100644 index 0000000..8ef33ef --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/values.schema.json @@ -0,0 +1,100 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": { + "auth": { + "type": "object", + "properties": { + "username": { + "type": "string", + "title": "RabbitMQ user", + "form": true + }, + "password": { + "type": "string", + "title": "RabbitMQ password", + "form": true, + "description": "Defaults to a random 10-character alphanumeric string if not set" + } + } + }, + "extraConfiguration": { + "type": "string", + "title": "Extra RabbitMQ Configuration", + "form": true, + "render": "textArea", + "description": "Extra configuration to be appended to RabbitMQ Configuration" + }, + "replicaCount": { + "type": "integer", + "form": true, + "title": "Number of replicas", + "description": "Number of replicas to deploy" + }, + "persistence": { + "type": "object", + "title": "Persistence configuration", + "form": true, + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable persistence", + "description": "Enable persistence using Persistent Volume Claims" + }, + "size": { + "type": "string", + "title": "Persistent Volume Size", + "form": true, + "render": "slider", + "sliderMin": 1, + "sliderMax": 100, + "sliderUnit": "Gi", + "hidden": { + "value": false, + "path": "persistence/enabled" + } + } + } + }, + "volumePermissions": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable Init Containers", + "description": "Use an init container to set required folder permissions on the data volume before mounting it in the final destination" + } + } + }, + "metrics": { + "type": "object", + "form": true, + "title": "Prometheus metrics details", + "properties": { + "enabled": { + "type": "boolean", + "title": "Enable Prometheus metrics for RabbitMQ", + "description": "Install Prometheus plugin in the RabbitMQ container", + "form": true + }, + "serviceMonitor": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "title": "Create Prometheus Operator ServiceMonitor", + "description": "Create a ServiceMonitor to track metrics using Prometheus Operator", + "form": true, + "hidden": { + "value": false, + "path": "metrics/enabled" + } + } + } + } + } + } + } +} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/values.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/values.yaml new file mode 100644 index 0000000..5b74e6c --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/rabbitmq/values.yaml @@ -0,0 +1,1151 @@ +## @section Global parameters +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass + +## @param global.imageRegistry Global Docker image registry +## @param global.imagePullSecrets Global Docker registry secret names as an array +## @param global.storageClass Global StorageClass for Persistent Volume(s) +## +## @section RabitMQ Image parameters +## Bitnami RabbitMQ image version +## ref: https://hub.docker.com/r/bitnami/rabbitmq/tags/ +## @param image.registry RabbitMQ image registry +## @param image.repository RabbitMQ image repository +## @param image.tag RabbitMQ image tag (immutable tags are recommended) +## @param image.pullPolicy RabbitMQ image pull policy +## @param image.pullSecrets Specify docker-registry secret names as an array +## @param image.debug Set to true if you would like to see extra information on logs +## +image: + registry: 10.10.31.243:5000/cmoa3 + repository: rabbitmq + tag: v1.0.0 # {{ .Values.global.RABBITMQ_VERSION }} + + ## set to true if you would like to see extra information on logs + ## It turns BASH and/or NAMI debugging in the image + ## + debug: false + + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: + - regcred + +## @section Common parameters + +## @param nameOverride String to partially override rabbitmq.fullname template (will maintain the release name) +## +nameOverride: "" + +## @param fullnameOverride String to fully override rabbitmq.fullname template +## +fullnameOverride: "" + +## @param kubeVersion Force target Kubernetes version (using Helm capabilities if not set) +## +kubeVersion: "" + +## @param clusterDomain Kubernetes Cluster Domain +## +clusterDomain: cluster.local + +## @param extraDeploy Array of extra objects to deploy with the release +## +extraDeploy: [] + +## Enable diagnostic mode in the deployment +## +diagnosticMode: + ## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden) + ## + enabled: false + ## @param diagnosticMode.command Command to override all containers in the deployment + ## + command: + - sleep + ## @param diagnosticMode.args Args to override all containers in the deployment + ## + args: + - infinity + +## @param hostAliases Deployment pod host aliases +## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ +## +hostAliases: [] +## @param commonAnnotations Annotations to add to all deployed objects +## +commonAnnotations: {} +## RabbitMQ Authentication parameters +## +auth: + ## @param auth.username RabbitMQ application username + ## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables + ## + username: user + + ## @param auth.password RabbitMQ application password + ## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables + ## + password: "eorbahrhkswp" + ## @param auth.existingPasswordSecret Existing secret with RabbitMQ credentials (must contain a value for `rabbitmq-password` key) + ## e.g: + ## existingPasswordSecret: name-of-existing-secret + ## + existingPasswordSecret: "" + + ## @param auth.erlangCookie Erlang cookie to determine whether different nodes are allowed to communicate with each other + ## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables + ## + erlangCookie: "pf6t82zTrqY9iaupUmkPOJxPXjmjiNEd" + ## @param auth.existingErlangSecret Existing secret with RabbitMQ Erlang cookie (must contain a value for `rabbitmq-erlang-cookie` key) + ## e.g: + ## existingErlangSecret: name-of-existing-secret + ## + existingErlangSecret: "" + + ## Enable encryption to rabbitmq + ## ref: https://www.rabbitmq.com/ssl.html + ## @param auth.tls.enabled Enable TLS support on RabbitMQ + ## @param auth.tls.autoGenerated Generate automatically self-signed TLS certificates + ## @param auth.tls.failIfNoPeerCert When set to true, TLS connection will be rejected if client fails to provide a certificate + ## @param auth.tls.sslOptionsVerify Should [peer verification](https://www.rabbitmq.com/ssl.html#peer-verification) be enabled? + ## @param auth.tls.caCertificate Certificate Authority (CA) bundle content + ## @param auth.tls.serverCertificate Server certificate content + ## @param auth.tls.serverKey Server private key content + ## @param auth.tls.existingSecret Existing secret with certificate content to RabbitMQ credentials + ## @param auth.tls.existingSecretFullChain Whether or not the existing secret contains the full chain in the certificate (`tls.crt`). Will be used in place of `ca.cert` if `true`. + ## + tls: + enabled: false + autoGenerated: false + failIfNoPeerCert: true + sslOptionsVerify: verify_peer + caCertificate: |- + serverCertificate: |- + serverKey: |- + existingSecret: "" + existingSecretFullChain: false + +## @param logs Path of the RabbitMQ server's Erlang log file. Value for the `RABBITMQ_LOGS` environment variable +## ref: https://www.rabbitmq.com/logging.html#log-file-location +## +logs: "-" + +## @param ulimitNofiles RabbitMQ Max File Descriptors +## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables +## ref: https://www.rabbitmq.com/install-debian.html#kernel-resource-limits +## +ulimitNofiles: "65536" + +## RabbitMQ maximum available scheduler threads and online scheduler threads. By default it will create a thread per CPU detected, with the following parameters you can tune it manually. +## ref: https://hamidreza-s.github.io/erlang/scheduling/real-time/preemptive/migration/2016/02/09/erlang-scheduler-details.html#scheduler-threads +## ref: https://github.com/bitnami/charts/issues/2189 +## @param maxAvailableSchedulers RabbitMQ maximum available scheduler threads +## @param onlineSchedulers RabbitMQ online scheduler threads +## +maxAvailableSchedulers: "" +onlineSchedulers: "" + +## The memory threshold under which RabbitMQ will stop reading from client network sockets, in order to avoid being killed by the OS +## ref: https://www.rabbitmq.com/alarms.html +## ref: https://www.rabbitmq.com/memory.html#threshold +## +memoryHighWatermark: + ## @param memoryHighWatermark.enabled Enable configuring Memory high watermark on RabbitMQ + ## + enabled: false + ## @param memoryHighWatermark.type Memory high watermark type. Either `absolute` or `relative` + ## + type: "relative" + ## Memory high watermark value. + ## @param memoryHighWatermark.value Memory high watermark value + ## The default value of 0.4 stands for 40% of available RAM + ## Note: the memory relative limit is applied to the resource.limits.memory to calculate the memory threshold + ## You can also use an absolute value, e.g.: 256MB + ## + value: 0.4 + +## @param plugins List of default plugins to enable (should only be altered to remove defaults; for additional plugins use `extraPlugins`) +## +plugins: "rabbitmq_management rabbitmq_peer_discovery_k8s rabbitmq_stomp" + +## @param communityPlugins List of Community plugins (URLs) to be downloaded during container initialization +## Combine it with extraPlugins to also enable them. +## +communityPlugins: "" + +## @param extraPlugins Extra plugins to enable (single string containing a space-separated list) +## Use this instead of `plugins` to add new plugins +## +extraPlugins: "rabbitmq_auth_backend_ldap rabbitmq_stomp" + +## Clustering settings +## +clustering: + ## @param clustering.enabled Enable RabbitMQ clustering + ## + enabled: false + ## @param clustering.addressType Switch clustering mode. Either `ip` or `hostname` + ## + addressType: hostname + ## @param clustering.rebalance Rebalance master for queues in cluster when new replica is created + ## ref: https://www.rabbitmq.com/rabbitmq-queues.8.html#rebalance + ## + rebalance: false + + ## @param clustering.forceBoot Force boot of an unexpectedly shut down cluster (in an unexpected order). + ## forceBoot executes 'rabbitmqctl force_boot' to force boot cluster shut down unexpectedly in an unknown order + ## ref: https://www.rabbitmq.com/rabbitmqctl.8.html#force_boot + ## + forceBoot: false + +## Loading a RabbitMQ definitions file to configure RabbitMQ +## +loadDefinition: + ## @param loadDefinition.enabled Enable loading a RabbitMQ definitions file to configure RabbitMQ + ## + enabled: false + ## @param loadDefinition.existingSecret Existing secret with the load definitions file + ## Can be templated if needed, e.g: + ## existingSecret: "{{ .Release.Name }}-load-definition" + ## + existingSecret: "" + +## @param command Override default container command (useful when using custom images) +## +command: [] +## @param args Override default container args (useful when using custom images) +args: [] + +## @param terminationGracePeriodSeconds Default duration in seconds k8s waits for container to exit before sending kill signal. +## Any time in excess of 10 seconds will be spent waiting for any synchronization necessary for cluster not to lose data. +## +terminationGracePeriodSeconds: 120 + +## @param extraEnvVars Extra environment variables to add to RabbitMQ pods +## E.g: +## extraEnvVars: +## - name: FOO +## value: BAR +## +extraEnvVars: [] + +## @param extraEnvVarsCM Name of existing ConfigMap containing extra environment variables +## +extraEnvVarsCM: "" + +## @param extraEnvVarsSecret Name of existing Secret containing extra environment variables (in case of sensitive data) +## +extraEnvVarsSecret: "" + +## @param extraContainerPorts Extra ports to be included in container spec, primarily informational +## E.g: +## extraContainerPorts: +## - name: new_port_name +## containerPort: 1234 +## +extraContainerPorts: [] + +## @param configuration [string] RabbitMQ Configuration file content: required cluster configuration +## Do not override unless you know what you are doing. +## To add more configuration, use `extraConfiguration` of `advancedConfiguration` instead +## +configuration: |- + {{- if not .Values.loadDefinition.enabled -}} + ## Username and password + ## + default_user = {{ .Values.auth.username }} + default_pass = eorbahrhkswp + {{- end }} + {{- if .Values.clustering.enabled }} + ## Clustering + ## + cluster_formation.peer_discovery_backend = rabbit_peer_discovery_k8s + cluster_formation.k8s.host = kubernetes.default.svc.{{ .Values.clusterDomain }} + cluster_formation.node_cleanup.interval = 10 + cluster_formation.node_cleanup.only_log_warning = true + cluster_partition_handling = autoheal + {{- end }} + # queue master locator + queue_master_locator = min-masters + # enable guest user + loopback_users.guest = false + {{ tpl .Values.extraConfiguration . }} + {{- if .Values.auth.tls.enabled }} + ssl_options.verify = {{ .Values.auth.tls.sslOptionsVerify }} + listeners.ssl.default = {{ .Values.service.tlsPort }} + ssl_options.fail_if_no_peer_cert = {{ .Values.auth.tls.failIfNoPeerCert }} + ssl_options.cacertfile = /opt/bitnami/rabbitmq/certs/ca_certificate.pem + ssl_options.certfile = /opt/bitnami/rabbitmq/certs/server_certificate.pem + ssl_options.keyfile = /opt/bitnami/rabbitmq/certs/server_key.pem + {{- end }} + {{- if .Values.ldap.enabled }} + auth_backends.1 = rabbit_auth_backend_ldap + auth_backends.2 = internal + {{- range $index, $server := .Values.ldap.servers }} + auth_ldap.servers.{{ add $index 1 }} = {{ $server }} + {{- end }} + auth_ldap.port = {{ .Values.ldap.port }} + auth_ldap.user_dn_pattern = {{ .Values.ldap.user_dn_pattern }} + {{- if .Values.ldap.tls.enabled }} + auth_ldap.use_ssl = true + {{- end }} + {{- end }} + {{- if .Values.metrics.enabled }} + ## Prometheus metrics + ## + prometheus.tcp.port = 9419 + {{- end }} + {{- if .Values.memoryHighWatermark.enabled }} + ## Memory Threshold + ## + total_memory_available_override_value = {{ include "rabbitmq.toBytes" .Values.resources.limits.memory }} + vm_memory_high_watermark.{{ .Values.memoryHighWatermark.type }} = {{ .Values.memoryHighWatermark.value }} + {{- end }} + +## @param extraConfiguration [string] Configuration file content: extra configuration to be appended to RabbitMQ configuration +## Use this instead of `configuration` to add more configuration +## +extraConfiguration: |- + #default_vhost = {{ .Release.Namespace }}-vhost + #disk_free_limit.absolute = 50MB + #load_definitions = /app/load_definition.json + +## @param advancedConfiguration Configuration file content: advanced configuration +## Use this as additional configuration in classic config format (Erlang term configuration format) +## +## If you set LDAP with TLS/SSL enabled and you are using self-signed certificates, uncomment these lines. +## advancedConfiguration: |- +## [{ +## rabbitmq_auth_backend_ldap, +## [{ +## ssl_options, +## [{ +## verify, verify_none +## }, { +## fail_if_no_peer_cert, +## false +## }] +## ]} +## }]. +## +advancedConfiguration: |- + +## LDAP configuration +## +ldap: + ## @param ldap.enabled Enable LDAP support + ## + enabled: false + ## @param ldap.servers List of LDAP servers hostnames + ## + servers: [] + ## @param ldap.port LDAP servers port + ## + port: "389" + ## Pattern used to translate the provided username into a value to be used for the LDAP bind + ## @param ldap.user_dn_pattern Pattern used to translate the provided username into a value to be used for the LDAP bind + ## ref: https://www.rabbitmq.com/ldap.html#usernames-and-dns + ## + user_dn_pattern: cn=${username},dc=example,dc=org + tls: + ## @param ldap.tls.enabled If you enable TLS/SSL you can set advanced options using the `advancedConfiguration` parameter + ## + enabled: false + +## @param extraVolumeMounts Optionally specify extra list of additional volumeMounts +## Examples: +## extraVolumeMounts: +## - name: extras +## mountPath: /usr/share/extras +## readOnly: true +## +extraVolumeMounts: [] +## @param extraVolumes Optionally specify extra list of additional volumes . +## Example: +## extraVolumes: +## - name: extras +## emptyDir: {} +## +extraVolumes: [] + +## @param extraSecrets Optionally specify extra secrets to be created by the chart. +## This can be useful when combined with load_definitions to automatically create the secret containing the definitions to be loaded. +## Example: +## extraSecrets: +## load-definition: +## load_definition.json: | +## { +## ... +## } +## +extraSecrets: {} +## @param extraSecretsPrependReleaseName Set this flag to true if extraSecrets should be created with prepended. +## +extraSecretsPrependReleaseName: false + +## @section Statefulset parameters + +## @param replicaCount Number of RabbitMQ replicas to deploy +## +replicaCount: 1 + +## @param schedulerName Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +schedulerName: "" + +## RabbitMQ should be initialized one by one when building cluster for the first time. +## Therefore, the default value of podManagementPolicy is 'OrderedReady' +## Once the RabbitMQ participates in the cluster, it waits for a response from another +## RabbitMQ in the same cluster at reboot, except the last RabbitMQ of the same cluster. +## If the cluster exits gracefully, you do not need to change the podManagementPolicy +## because the first RabbitMQ of the statefulset always will be last of the cluster. +## However if the last RabbitMQ of the cluster is not the first RabbitMQ due to a failure, +## you must change podManagementPolicy to 'Parallel'. +## ref : https://www.rabbitmq.com/clustering.html#restarting +## @param podManagementPolicy Pod management policy +## +podManagementPolicy: OrderedReady + +## @param podLabels RabbitMQ Pod labels. Evaluated as a template +## Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +## +podLabels: {} + +## @param podAnnotations RabbitMQ Pod annotations. Evaluated as a template +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +## +podAnnotations: {} + +## @param updateStrategyType Update strategy type for RabbitMQ statefulset +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies +## +updateStrategyType: RollingUpdate + +## @param statefulsetLabels RabbitMQ statefulset labels. Evaluated as a template +## Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +## +statefulsetLabels: {} + +## @param priorityClassName Name of the priority class to be used by RabbitMQ pods, priority class needs to be created beforehand +## Ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ +## +priorityClassName: "" + +## @param podAffinityPreset Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` +## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity +## +podAffinityPreset: "" + +## @param podAntiAffinityPreset Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` +## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity +## +podAntiAffinityPreset: soft + +## Node affinity preset +## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity +## +nodeAffinityPreset: + ## @param nodeAffinityPreset.type Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param nodeAffinityPreset.key Node label key to match Ignored if `affinity` is set. + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## @param nodeAffinityPreset.values Node label values to match. Ignored if `affinity` is set. + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + +## @param affinity Affinity for pod assignment. Evaluated as a template +## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set +## +affinity: {} + +## @param nodeSelector Node labels for pod assignment. Evaluated as a template +## ref: https://kubernetes.io/docs/user-guide/node-selection/ +## +nodeSelector: {} + +## @param tolerations Tolerations for pod assignment. Evaluated as a template +## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +## +tolerations: [] + +## @param topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template +## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods +## +topologySpreadConstraints: {} + +## RabbitMQ pods' Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod +## @param podSecurityContext.enabled Enable RabbitMQ pods' Security Context +## @param podSecurityContext.fsGroup Group ID for the filesystem used by the containers +## @param podSecurityContext.runAsUser User ID for the service user running the pod +## +podSecurityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## @param containerSecurityContext RabbitMQ containers' Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container +## Example: +## containerSecurityContext: +## capabilities: +## drop: ["NET_RAW"] +## readOnlyRootFilesystem: true +## +containerSecurityContext: {} + +## RabbitMQ containers' resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## We usually recommend not to specify default resources and to leave this as a conscious +## choice for the user. This also increases chances charts run on environments with little +## resources, such as Minikube. If you do want to specify resources, uncomment the following +## lines, adjust them as necessary, and remove the curly braces after 'resources:'. +## @param resources.limits The resources limits for RabbitMQ containers +## @param resources.requests The requested resources for RabbitMQ containers +## +resources: + ## Example: + ## limits: + ## cpu: 1000m + ## memory: 2Gi + limits: {} + ## Examples: + ## requests: + ## cpu: 1000m + ## memory: 2Gi + requests: {} + +## Configure RabbitMQ containers' extra options for liveness probe +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes +## @param livenessProbe.enabled Enable livenessProbe +## @param livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe +## @param livenessProbe.periodSeconds Period seconds for livenessProbe +## @param livenessProbe.timeoutSeconds Timeout seconds for livenessProbe +## @param livenessProbe.failureThreshold Failure threshold for livenessProbe +## @param livenessProbe.successThreshold Success threshold for livenessProbe +## +livenessProbe: + enabled: true + initialDelaySeconds: 120 + timeoutSeconds: 20 + periodSeconds: 30 + failureThreshold: 6 + successThreshold: 1 +## Configure RabbitMQ containers' extra options for readiness probe +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes +## @param readinessProbe.enabled Enable readinessProbe +## @param readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe +## @param readinessProbe.periodSeconds Period seconds for readinessProbe +## @param readinessProbe.timeoutSeconds Timeout seconds for readinessProbe +## @param readinessProbe.failureThreshold Failure threshold for readinessProbe +## @param readinessProbe.successThreshold Success threshold for readinessProbe +## +readinessProbe: + enabled: true + initialDelaySeconds: 10 + timeoutSeconds: 20 + periodSeconds: 30 + failureThreshold: 3 + successThreshold: 1 + +## @param customLivenessProbe Override default liveness probe +## +customLivenessProbe: {} + +## @param customReadinessProbe Override default readiness probe +## +customReadinessProbe: {} + +## @param customStartupProbe Define a custom startup probe +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-startup-probes +## +customStartupProbe: {} + +## @param initContainers Add init containers to the RabbitMQ pod +## Example: +## initContainers: +## - name: your-image-name +## image: your-image +## imagePullPolicy: IfNotPresent +## ports: +## - name: portname +## containerPort: 1234 +## +initContainers: [] + +## @param sidecars Add sidecar containers to the RabbitMQ pod +## Example: +## sidecars: +## - name: your-image-name +## image: your-image +## imagePullPolicy: IfNotPresent +## ports: +## - name: portname +## containerPort: 1234 +## +sidecars: [] + +## Pod Disruption Budget configuration +## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ +## +pdb: + ## @param pdb.create Enable/disable a Pod Disruption Budget creation + ## + create: false + ## @param pdb.minAvailable Minimum number/percentage of pods that should remain scheduled + ## + minAvailable: 1 + ## @param pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable + ## + maxUnavailable: "" + +## @section RBAC parameters + +## RabbitMQ pods ServiceAccount +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +## +serviceAccount: + ## @param serviceAccount.create Enable creation of ServiceAccount for RabbitMQ pods + ## + create: true + ## @param serviceAccount.name Name of the created serviceAccount + ## If not set and create is true, a name is generated using the rabbitmq.fullname template + ## + name: "" + +## Role Based Access +## ref: https://kubernetes.io/docs/admin/authorization/rbac/ +## +rbac: + ## @param rbac.create Whether RBAC rules should be created + ## binding RabbitMQ ServiceAccount to a role + ## that allows RabbitMQ pods querying the K8s API + ## + create: true + +## @section Persistence parameters + +persistence: + ## @param persistence.enabled Enable RabbitMQ data persistence using PVC + ## + enabled: true + + ## @param persistence.storageClass PVC Storage Class for RabbitMQ data volume + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "rabbitmq" + ## @param persistence.selector Selector to match an existing Persistent Volume + ## selector: + ## matchLabels: + ## app: my-app + ## + selector: {} + ## @param persistence.accessMode PVC Access Mode for RabbitMQ data volume + ## + accessMode: ReadWriteOnce + + ## @param persistence.existingClaim Provide an existing PersistentVolumeClaims + ## The value is evaluated as a template + ## So, for example, the name can depend on .Release or .Chart + ## + existingClaim: "rabbitmq-pvc" + + ## @param persistence.size PVC Storage Request for RabbitMQ data volume + ## If you change this value, you might have to adjust `rabbitmq.diskFreeLimit` as well + ## + size: 5Gi + + ## @param persistence.volumes Additional volumes without creating PVC + ## - name: volume_name + ## emptyDir: {} + ## + volumes: [] + +## @section Exposure parameters + +## Kubernetes service type +## +service: + ## @param service.type Kubernetes Service type + ## + # type: NodePort + type: ClusterIP + + ## @param service.portEnabled Amqp port. Cannot be disabled when `auth.tls.enabled` is `false`. Listener can be disabled with `listeners.tcp = none`. + portEnabled: true + + ## @param service.port Amqp port + ## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables + ## + port: 5672 + + ## @param service.portName Amqp service port name + ## + portName: amqp + + ## @param service.tlsPort Amqp TLS port + ## + tlsPort: 5671 + + ## @param service.tlsPortName Amqp TLS service port name + ## + tlsPortName: amqp-ssl + + ## @param service.nodePort Node port override for `amqp` port, if serviceType is `NodePort` or `LoadBalancer` + ## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables + ## e.g: + ## nodePort: 30672 + ## + nodePort: "" + + ## @param service.tlsNodePort Node port override for `amqp-ssl` port, if serviceType is `NodePort` or `LoadBalancer` + ## e.g: + ## tlsNodePort: 30671 + ## + tlsNodePort: "" + + ## @param service.distPort Erlang distribution server port + ## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables + ## + distPort: 25672 + + ## @param service.distPortName Erlang distribution service port name + ## + distPortName: dist + + ## @param service.distNodePort Node port override for `dist` port, if serviceType is `NodePort` + ## e.g: + ## distNodePort: 30676 + ## + distNodePort: "" + + ## @param service.managerPortEnabled RabbitMQ Manager port + ## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables + ## + managerPortEnabled: true + + ## @param service.managerPort RabbitMQ Manager port + ## + managerPort: 15672 + + ## @param service.managerPortName RabbitMQ Manager service port name + ## + managerPortName: http-stats + + ## @param service.managerNodePort Node port override for `http-stats` port, if serviceType `NodePort` + ## e.g: + ## managerNodePort: 30673 + ## + managerNodePort: "" + + ## @param service.metricsPort RabbitMQ Prometheues metrics port + ## + metricsPort: 9419 + + ## @param service.metricsPortName RabbitMQ Prometheues metrics service port name + ## + metricsPortName: metrics + + ## @param service.metricsNodePort Node port override for `metrics` port, if serviceType is `NodePort` + ## e.g: + ## metricsNodePort: 30674 + ## + metricsNodePort: "" + + ## @param service.epmdNodePort Node port override for `epmd` port, if serviceType is `NodePort` + ## e.g: + ## epmdNodePort: 30675 + ## + epmdNodePort: "" + + ## @param service.epmdPortName EPMD Discovery service port name + ## + epmdPortName: epmd + + ## @param service.extraPorts Extra ports to expose in the service + ## E.g.: + ## extraPorts: + ## - name: new_svc_name + ## port: 1234 + ## targetPort: 1234 + ## + extraPorts: + - name: stomp + port: 61613 + targetPort: 61613 + #nodePort: 31613 + + ## @param service.loadBalancerSourceRanges Address(es) that are allowed when service is `LoadBalancer` + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + + ## @param service.externalIPs Set the ExternalIPs + ## + externalIPs: [] + + ## @param service.externalTrafficPolicy Enable client source IP preservation + ## ref http://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + + ## @param service.loadBalancerIP Set the LoadBalancerIP + ## + loadBalancerIP: "" + + ## @param service.labels Service labels. Evaluated as a template + ## + labels: {} + + ## @param service.annotations Service annotations. Evaluated as a template + ## Example: + ## annotations: + ## service.beta.kubernetes.io/aws-load-balancer-internal: 0.0.0.0/0 + ## + annotations: {} + ## @param service.annotationsHeadless Headless Service annotations. Evaluated as a template + ## Example: + ## annotations: + ## external-dns.alpha.kubernetes.io/internal-hostname: rabbitmq.example.com + ## + annotationsHeadless: {} + +## Configure the ingress resource that allows you to access the +## RabbitMQ installation. Set up the URL +## ref: http://kubernetes.io/docs/user-guide/ingress/ +## +ingress: + ## @param ingress.enabled Enable ingress resource for Management console + ## + enabled: false + + ## @param ingress.path Path for the default host. You may need to set this to '/*' in order to use this with ALB ingress controllers. + ## + path: / + + ## @param ingress.pathType Ingress path type + ## + pathType: ImplementationSpecific + + ## @param ingress.hostname Default host for the ingress resource + ## + hostname: rabbitmq.local + + ## @param ingress.annotations Ingress annotations + ## For a full list of possible ingress annotations, please see + ## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/annotations.md + ## + ## If certManager is set to true, annotation kubernetes.io/tls-acme: "true" will automatically be set + ## + annotations: {} + + ## @param ingress.tls Enable TLS configuration for the hostname defined at `ingress.hostname` parameter + ## TLS certificates will be retrieved from a TLS secret with name: {{- printf "%s-tls" .Values.ingress.hostname }} + ## You can: + ## - Use the `ingress.secrets` parameter to create this TLS secret + ## - Relay on cert-manager to create it by setting `ingress.certManager=true` + ## - Relay on Helm to create self-signed certificates by setting `ingress.selfSigned=true` + ## + tls: false + + ## @param ingress.certManager Set this to true in order to add the corresponding annotations for cert-manager + ## to generate a TLS secret for the ingress record + ## + certManager: false + + ## @param ingress.selfSigned Set this to true in order to create a TLS secret for this ingress record + ## using self-signed certificates generated by Helm + ## + selfSigned: false + + ## @param ingress.extraHosts The list of additional hostnames to be covered with this ingress record. + ## Most likely the hostname above will be enough, but in the event more hosts are needed, this is an array + ## e.g: + ## extraHosts: + ## - name: rabbitmq.local + ## path: / + ## + extraHosts: [] + + ## @param ingress.extraTls The tls configuration for additional hostnames to be covered with this ingress record. + ## see: https://kubernetes.io/docs/concepts/services-networking/ingress/#tls + ## e.g: + ## extraTls: + ## - hosts: + ## - rabbitmq.local + ## secretName: rabbitmq.local-tls + ## + extraTls: [] + + ## @param ingress.secrets Custom TLS certificates as secrets + ## NOTE: 'key' and 'certificate' are expected in PEM format + ## NOTE: 'name' should line up with a 'secretName' set further up + ## If it is not set and you're using cert-manager, this is unneeded, as it will create a secret for you with valid certificates + ## If it is not set and you're NOT using cert-manager either, self-signed certificates will be created valid for 365 days + ## It is also possible to create and manage the certificates outside of this helm chart + ## Please see README.md for more information + ## e.g: + ## secrets: + ## - name: rabbitmq.local-tls + ## key: |- + ## -----BEGIN RSA PRIVATE KEY----- + ## ... + ## -----END RSA PRIVATE KEY----- + ## certificate: |- + ## -----BEGIN CERTIFICATE----- + ## ... + ## -----END CERTIFICATE----- + ## + secrets: [] + + ## @param ingress.ingressClassName IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+) + ## This is supported in Kubernetes 1.18+ and required if you have more than one IngressClass marked as the default for your cluster . + ## ref: https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/ + ## + ingressClassName: "" + +## Network Policy configuration +## ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ +## +networkPolicy: + ## @param networkPolicy.enabled Enable creation of NetworkPolicy resources + ## + enabled: false + ## @param networkPolicy.allowExternal Don't require client label for connections + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the ports RabbitMQ is listening + ## on. When true, RabbitMQ will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + ## @param networkPolicy.additionalRules Additional NetworkPolicy Ingress "from" rules to set. Note that all rules are OR-ed. + ## e.g: + ## additionalRules: + ## - matchLabels: + ## - role: frontend + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + ## + additionalRules: [] + +## @section Metrics Parameters + +## Prometheus Metrics +## +metrics: + ## @param metrics.enabled Enable exposing RabbitMQ metrics to be gathered by Prometheus + ## + enabled: false + + ## @param metrics.plugins Plugins to enable Prometheus metrics in RabbitMQ + ## + plugins: "rabbitmq_prometheus" + ## Prometheus pod annotations + ## @param metrics.podAnnotations [object] Annotations for enabling prometheus to access the metrics endpoint + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "{{ .Values.service.metricsPort }}" + + ## Prometheus Service Monitor + ## ref: https://github.com/coreos/prometheus-operator + ## + serviceMonitor: + ## @param metrics.serviceMonitor.enabled Create ServiceMonitor Resource for scraping metrics using PrometheusOperator + ## + enabled: false + ## @param metrics.serviceMonitor.namespace Specify the namespace in which the serviceMonitor resource will be created + ## + namespace: "" + ## @param metrics.serviceMonitor.interval Specify the interval at which metrics should be scraped + ## + interval: 30s + ## @param metrics.serviceMonitor.scrapeTimeout Specify the timeout after which the scrape is ended + ## e.g: + ## scrapeTimeout: 30s + ## + scrapeTimeout: "" + ## @param metrics.serviceMonitor.relabellings Specify Metric Relabellings to add to the scrape endpoint + ## + relabellings: [] + ## @param metrics.serviceMonitor.honorLabels honorLabels chooses the metric's labels on collisions with target labels + ## + honorLabels: false + ## @param metrics.serviceMonitor.additionalLabels Used to pass Labels that are required by the installed Prometheus Operator + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusspec + ## + additionalLabels: {} + ## @param metrics.serviceMonitor.targetLabels Used to keep given service's labels in target + ## e.g: + ## - app.kubernetes.io/name + ## + targetLabels: {} + ## @param metrics.serviceMonitor.podTargetLabels Used to keep given pod's labels in target + ## e.g: + ## - app.kubernetes.io/name + ## + podTargetLabels: {} + ## @param metrics.serviceMonitor.path Define the path used by ServiceMonitor to scrap metrics + ## Could be /metrics for aggregated metrics or /metrics/per-object for more details + path: "" + + ## Custom PrometheusRule to be defined + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + ## + prometheusRule: + ## @param metrics.prometheusRule.enabled Set this to true to create prometheusRules for Prometheus operator + ## + enabled: false + ## @param metrics.prometheusRule.additionalLabels Additional labels that can be used so prometheusRules will be discovered by Prometheus + ## + additionalLabels: {} + ## @param metrics.prometheusRule.namespace namespace where prometheusRules resource should be created + ## + namespace: "" + ## List of rules, used as template by Helm. + ## @param metrics.prometheusRule.rules List of rules, used as template by Helm. + ## These are just examples rules inspired from https://awesome-prometheus-alerts.grep.to/rules.html + ## rules: + ## - alert: RabbitmqDown + ## expr: rabbitmq_up{service="{{ template "rabbitmq.fullname" . }}"} == 0 + ## for: 5m + ## labels: + ## severity: error + ## annotations: + ## summary: Rabbitmq down (instance {{ "{{ $labels.instance }}" }}) + ## description: RabbitMQ node down + ## - alert: ClusterDown + ## expr: | + ## sum(rabbitmq_running{service="{{ template "rabbitmq.fullname" . }}"}) + ## < {{ .Values.replicaCount }} + ## for: 5m + ## labels: + ## severity: error + ## annotations: + ## summary: Cluster down (instance {{ "{{ $labels.instance }}" }}) + ## description: | + ## Less than {{ .Values.replicaCount }} nodes running in RabbitMQ cluster + ## VALUE = {{ "{{ $value }}" }} + ## - alert: ClusterPartition + ## expr: rabbitmq_partitions{service="{{ template "rabbitmq.fullname" . }}"} > 0 + ## for: 5m + ## labels: + ## severity: error + ## annotations: + ## summary: Cluster partition (instance {{ "{{ $labels.instance }}" }}) + ## description: | + ## Cluster partition + ## VALUE = {{ "{{ $value }}" }} + ## - alert: OutOfMemory + ## expr: | + ## rabbitmq_node_mem_used{service="{{ template "rabbitmq.fullname" . }}"} + ## / rabbitmq_node_mem_limit{service="{{ template "rabbitmq.fullname" . }}"} + ## * 100 > 90 + ## for: 5m + ## labels: + ## severity: warning + ## annotations: + ## summary: Out of memory (instance {{ "{{ $labels.instance }}" }}) + ## description: | + ## Memory available for RabbmitMQ is low (< 10%)\n VALUE = {{ "{{ $value }}" }} + ## LABELS: {{ "{{ $labels }}" }} + ## - alert: TooManyConnections + ## expr: rabbitmq_connectionsTotal{service="{{ template "rabbitmq.fullname" . }}"} > 1000 + ## for: 5m + ## labels: + ## severity: warning + ## annotations: + ## summary: Too many connections (instance {{ "{{ $labels.instance }}" }}) + ## description: | + ## RabbitMQ instance has too many connections (> 1000) + ## VALUE = {{ "{{ $value }}" }}\n LABELS: {{ "{{ $labels }}" }} + ## + rules: [] + +## @section Init Container Parameters + +## Init Container parameters +## Change the owner and group of the persistent volume(s) mountpoint(s) to 'runAsUser:fsGroup' on each component +## values from the securityContext section of the component +## +volumePermissions: + ## @param volumePermissions.enabled Enable init container that changes the owner and group of the persistent volume(s) mountpoint to `runAsUser:fsGroup` + ## + enabled: false + ## @param volumePermissions.image.registry Init container volume-permissions image registry + ## @param volumePermissions.image.repository Init container volume-permissions image repository + ## @param volumePermissions.image.tag Init container volume-permissions image tag + ## @param volumePermissions.image.pullPolicy Init container volume-permissions image pull policy + ## @param volumePermissions.image.pullSecrets Specify docker-registry secret names as an array + ## + image: + registry: 10.10.31.243:5000/cmoa3 # docker.io + repository: bitnami-shell # bitnami/bitnami-shell + tag: 10-debian-10-r175 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: + - regcred + ## Init Container resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## We usually recommend not to specify default resources and to leave this as a conscious + ## choice for the user. This also increases chances charts run on environments with little + ## resources, such as Minikube. If you do want to specify resources, uncomment the following + ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. + ## @param volumePermissions.resources.limits Init container volume-permissions resource limits + ## @param volumePermissions.resources.requests Init container volume-permissions resource requests + ## + resources: + ## Example: + ## limits: + ## cpu: 100m + ## memory: 128Mi + limits: {} + ## Examples: + ## requests: + ## cpu: 100m + ## memory: 128Mi + requests: {} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/.helmignore b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/.helmignore new file mode 100644 index 0000000..f0c1319 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/Chart.lock b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/Chart.lock new file mode 100644 index 0000000..ee0ecb7 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/Chart.lock @@ -0,0 +1,6 @@ +dependencies: +- name: common + repository: https://charts.bitnami.com/bitnami + version: 1.3.3 +digest: sha256:264db18c8d0962b5c4340840f62306f45fe8d2c1c8999dd41c0f2d62fc93a220 +generated: "2021-01-15T00:05:10.125742807Z" diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/Chart.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/Chart.yaml new file mode 100644 index 0000000..6924d59 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/Chart.yaml @@ -0,0 +1,29 @@ +annotations: + category: Database +apiVersion: v2 +appVersion: 6.0.10 +dependencies: +- name: common + repository: https://charts.bitnami.com/bitnami + tags: + - bitnami-common + version: 1.x.x +description: Open source, advanced key-value store. It is often referred to as a data + structure server since keys can contain strings, hashes, lists, sets and sorted + sets. +home: https://github.com/bitnami/charts/tree/master/bitnami/redis +icon: https://bitnami.com/assets/stacks/redis/img/redis-stack-220x234.png +keywords: +- redis +- keyvalue +- database +maintainers: +- email: containers@bitnami.com + name: Bitnami +- email: cedric@desaintmartin.fr + name: desaintmartin +name: redis +sources: +- https://github.com/bitnami/bitnami-docker-redis +- http://redis.io/ +version: 12.7.0 diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/README.md b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/README.md new file mode 100644 index 0000000..3befa8c --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/README.md @@ -0,0 +1,707 @@ +# RedisTM Chart packaged by Bitnami + +[RedisTM](http://redis.io/) is an advanced key-value cache and store. It is often referred to as a data structure server since keys can contain strings, hashes, lists, sets, sorted sets, bitmaps and hyperloglogs. + +Disclaimer: REDIS® is a registered trademark of Redis Labs Ltd.Any rights therein are reserved to Redis Labs Ltd. Any use by Bitnami is for referential purposes only and does not indicate any sponsorship, endorsement, or affiliation between Redis Labs Ltd. + +## TL;DR + +```bash +$ helm repo add bitnami https://charts.bitnami.com/bitnami +$ helm install my-release bitnami/redis +``` + +## Introduction + +This chart bootstraps a [RedisTM](https://github.com/bitnami/bitnami-docker-redis) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This chart has been tested to work with NGINX Ingress, cert-manager, fluentd and Prometheus on top of the [BKPR](https://kubeprod.io/). + +### Choose between RedisTM Helm Chart and RedisTM Cluster Helm Chart + +You can choose any of the two RedisTM Helm charts for deploying a RedisTM cluster. +While [RedisTM Helm Chart](https://github.com/bitnami/charts/tree/master/bitnami/redis) will deploy a master-slave cluster using RedisTM Sentinel, the [RedisTM Cluster Helm Chart](https://github.com/bitnami/charts/tree/master/bitnami/redis-cluster) will deploy a RedisTM Cluster topology with sharding. +The main features of each chart are the following: + +| RedisTM | RedisTM Cluster | +|--------------------------------------------------------|------------------------------------------------------------------------| +| Supports multiple databases | Supports only one database. Better if you have a big dataset | +| Single write point (single master) | Multiple write points (multiple masters) | +| ![RedisTM Topology](img/redis-topology.png) | ![RedisTM Cluster Topology](img/redis-cluster-topology.png) | + +## Prerequisites + +- Kubernetes 1.12+ +- Helm 3.1.0 +- PV provisioner support in the underlying infrastructure + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```bash +$ helm install my-release bitnami/redis +``` + +The command deploys RedisTM on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```bash +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Parameters + +The following table lists the configurable parameters of the RedisTM chart and their default values. + +| Parameter | Description | Default | +|:------------------------------------------------------|:----------------------------------------------------------------------------------------------------------------------------------------------------|:--------------------------------------------------------| +| `global.imageRegistry` | Global Docker image registry | `nil` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) | +| `global.storageClass` | Global storage class for dynamic provisioning | `nil` | +| `global.redis.password` | RedisTM password (overrides `password`) | `nil` | +| `image.registry` | RedisTM Image registry | `docker.io` | +| `image.repository` | RedisTM Image name | `bitnami/redis` | +| `image.tag` | RedisTM Image tag | `{TAG_NAME}` | +| `image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify docker-registry secret names as an array | `nil` | +| `nameOverride` | String to partially override redis.fullname template with a string (will prepend the release name) | `nil` | +| `fullnameOverride` | String to fully override redis.fullname template with a string | `nil` | +| `cluster.enabled` | Use master-slave topology | `true` | +| `cluster.slaveCount` | Number of slaves | `2` | +| `existingSecret` | Name of existing secret object (for password authentication) | `nil` | +| `existingSecretPasswordKey` | Name of key containing password to be retrieved from the existing secret | `nil` | +| `usePassword` | Use password | `true` | +| `usePasswordFile` | Mount passwords as files instead of environment variables | `false` | +| `password` | RedisTM password (ignored if existingSecret set) | Randomly generated | +| `configmap` | Additional common RedisTM node configuration (this value is evaluated as a template) | See values.yaml | +| `clusterDomain` | Kubernetes DNS Domain name to use | `cluster.local` | +| `networkPolicy.enabled` | Enable NetworkPolicy | `false` | +| `networkPolicy.allowExternal` | Don't require client label for connections | `true` | +| `networkPolicy.ingressNSMatchLabels` | Allow connections from other namespaces | `{}` | +| `networkPolicy.ingressNSPodMatchLabels` | For other namespaces match by pod labels and namespace labels | `{}` | +| `securityContext.*` | Other pod security context to be included as-is in the pod spec | `{}` | +| `securityContext.enabled` | Enable security context (both redis master and slave pods) | `true` | +| `securityContext.fsGroup` | Group ID for the container (both redis master and slave pods) | `1001` | +| `containerSecurityContext.*` | Other container security context to be included as-is in the container spec | `{}` | +| `containerSecurityContext.enabled` | Enable security context (both redis master and slave containers) | `true` | +| `containerSecurityContext.runAsUser` | User ID for the container (both redis master and slave containers) | `1001` | +| `serviceAccount.create` | Specifies whether a ServiceAccount should be created | `false` | +| `serviceAccount.name` | The name of the ServiceAccount to create | Generated using the fullname template | +| `serviceAccount.annotations` | Specifies annotations to add to ServiceAccount. | `nil` | +| `rbac.create` | Specifies whether RBAC resources should be created | `false` | +| `rbac.role.rules` | Rules to create | `[]` | +| `metrics.enabled` | Start a side-car prometheus exporter | `false` | +| `metrics.image.registry` | RedisTM exporter image registry | `docker.io` | +| `metrics.image.repository` | RedisTM exporter image name | `bitnami/redis-exporter` | +| `metrics.image.tag` | RedisTM exporter image tag | `{TAG_NAME}` | +| `metrics.image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `metrics.image.pullSecrets` | Specify docker-registry secret names as an array | `nil` | +| `metrics.extraArgs` | Extra arguments for the binary; possible values [here](https://github.com/oliver006/redis_exporter#flags) | {} | +| `metrics.podLabels` | Additional labels for Metrics exporter pod | {} | +| `metrics.podAnnotations` | Additional annotations for Metrics exporter pod | {} | +| `metrics.resources` | Exporter resource requests/limit | Memory: `256Mi`, CPU: `100m` | +| `metrics.serviceMonitor.enabled` | if `true`, creates a Prometheus Operator ServiceMonitor (also requires `metrics.enabled` to be `true`) | `false` | +| `metrics.serviceMonitor.namespace` | Optional namespace which Prometheus is running in | `nil` | +| `metrics.serviceMonitor.interval` | How frequently to scrape metrics (use by default, falling back to Prometheus' default) | `nil` | +| `metrics.serviceMonitor.selector` | Default to kube-prometheus install (CoreOS recommended), but should be set according to Prometheus install | `{ prometheus: kube-prometheus }` | +| `metrics.serviceMonitor.relabelings` | ServiceMonitor relabelings. Value is evaluated as a template | `[]` | +| `metrics.serviceMonitor.metricRelabelings` | ServiceMonitor metricRelabelings. Value is evaluated as a template | `[]` | +| `metrics.service.type` | Kubernetes Service type (redis metrics) | `ClusterIP` | +| `metrics.service.externalTrafficPolicy` | External traffic policy (when service type is LoadBalancer) | `Cluster` | +| `metrics.service.annotations` | Annotations for the services to monitor (redis master and redis slave service) | {} | +| `metrics.service.labels` | Additional labels for the metrics service | {} | +| `metrics.service.loadBalancerIP` | loadBalancerIP if redis metrics service type is `LoadBalancer` | `nil` | +| `metrics.priorityClassName` | Metrics exporter pod priorityClassName | `nil` | +| `metrics.prometheusRule.enabled` | Set this to true to create prometheusRules for Prometheus operator | `false` | +| `metrics.prometheusRule.additionalLabels` | Additional labels that can be used so prometheusRules will be discovered by Prometheus | `{}` | +| `metrics.prometheusRule.namespace` | namespace where prometheusRules resource should be created | Same namespace as redis | +| `metrics.prometheusRule.rules` | [rules](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/) to be created, check values for an example. | `[]` | +| `persistence.existingClaim` | Provide an existing PersistentVolumeClaim | `nil` | +| `master.persistence.enabled` | Use a PVC to persist data (master node) | `true` | +| `master.hostAliases` | Add deployment host aliases | `[]` | +| `master.persistence.path` | Path to mount the volume at, to use other images | `/data` | +| `master.persistence.subPath` | Subdirectory of the volume to mount at | `""` | +| `master.persistence.storageClass` | Storage class of backing PVC | `generic` | +| `master.persistence.accessModes` | Persistent Volume Access Modes | `[ReadWriteOnce]` | +| `master.persistence.size` | Size of data volume | `8Gi` | +| `master.persistence.matchLabels` | matchLabels persistent volume selector | `{}` | +| `master.persistence.matchExpressions` | matchExpressions persistent volume selector | `{}` | +| `master.persistence.volumes` | Additional volumes without creating PVC | `{}` | +| `master.statefulset.labels` | Additional labels for redis master StatefulSet | `{}` | +| `master.statefulset.annotations` | Additional annotations for redis master StatefulSet | `{}` | +| `master.statefulset.updateStrategy` | Update strategy for StatefulSet | onDelete | +| `master.statefulset.rollingUpdatePartition` | Partition update strategy | `nil` | +| `master.statefulset.volumeClaimTemplates.labels` | Additional labels for redis master StatefulSet volumeClaimTemplates | `{}` | +| `master.statefulset.volumeClaimTemplates.annotations` | Additional annotations for redis master StatefulSet volumeClaimTemplates | `{}` | +| `master.podLabels` | Additional labels for RedisTM master pod | {} | +| `master.podAnnotations` | Additional annotations for RedisTM master pod | {} | +| `master.extraEnvVars` | Additional Environment Variables passed to the pod of the master's stateful set set | `[]` | +| `master.extraEnvVarCMs` | Additional Environment Variables ConfigMappassed to the pod of the master's stateful set set | `[]` | +| `master.extraEnvVarsSecret` | Additional Environment Variables Secret passed to the master's stateful set | `[]` | +| `podDisruptionBudget.enabled` | Pod Disruption Budget toggle | `false` | +| `podDisruptionBudget.minAvailable` | Minimum available pods | `1` | +| `podDisruptionBudget.maxUnavailable` | Maximum unavailable | `nil` | +| `redisPort` | RedisTM port (in both master and slaves) | `6379` | +| `tls.enabled` | Enable TLS support for replication traffic | `false` | +| `tls.authClients` | Require clients to authenticate or not | `true` | +| `tls.certificatesSecret` | Name of the secret that contains the certificates | `nil` | +| `tls.certFilename` | Certificate filename | `nil` | +| `tls.certKeyFilename` | Certificate key filename | `nil` | +| `tls.certCAFilename` | CA Certificate filename | `nil` | +| `tls.dhParamsFilename` | DH params (in order to support DH based ciphers) | `nil` | +| `master.command` | RedisTM master entrypoint string. The command `redis-server` is executed if this is not provided. Note this is prepended with `exec` | `/run.sh` | +| `master.preExecCmds` | Text to inset into the startup script immediately prior to `master.command`. Use this if you need to run other ad-hoc commands as part of startup | `nil` | +| `master.configmap` | Additional RedisTM configuration for the master nodes (this value is evaluated as a template) | `nil` | +| `master.disableCommands` | Array of RedisTM commands to disable (master) | `["FLUSHDB", "FLUSHALL"]` | +| `master.extraFlags` | RedisTM master additional command line flags | [] | +| `master.nodeSelector` | RedisTM master Node labels for pod assignment | {"beta.kubernetes.io/arch": "amd64"} | +| `master.tolerations` | Toleration labels for RedisTM master pod assignment | [] | +| `master.affinity` | Affinity settings for RedisTM master pod assignment | {} | +| `master.schedulerName` | Name of an alternate scheduler | `nil` | +| `master.service.type` | Kubernetes Service type (redis master) | `ClusterIP` | +| `master.service.externalTrafficPolicy` | External traffic policy (when service type is LoadBalancer) | `Cluster` | +| `master.service.port` | Kubernetes Service port (redis master) | `6379` | +| `master.service.nodePort` | Kubernetes Service nodePort (redis master) | `nil` | +| `master.service.annotations` | annotations for redis master service | {} | +| `master.service.labels` | Additional labels for redis master service | {} | +| `master.service.loadBalancerIP` | loadBalancerIP if redis master service type is `LoadBalancer` | `nil` | +| `master.service.loadBalancerSourceRanges` | loadBalancerSourceRanges if redis master service type is `LoadBalancer` | `nil` | +| `master.resources` | RedisTM master CPU/Memory resource requests/limits | Memory: `256Mi`, CPU: `100m` | +| `master.livenessProbe.enabled` | Turn on and off liveness probe (redis master pod) | `true` | +| `master.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (redis master pod) | `5` | +| `master.livenessProbe.periodSeconds` | How often to perform the probe (redis master pod) | `5` | +| `master.livenessProbe.timeoutSeconds` | When the probe times out (redis master pod) | `5` | +| `master.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis master pod) | `1` | +| `master.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `5` | +| `master.readinessProbe.enabled` | Turn on and off readiness probe (redis master pod) | `true` | +| `master.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated (redis master pod) | `5` | +| `master.readinessProbe.periodSeconds` | How often to perform the probe (redis master pod) | `5` | +| `master.readinessProbe.timeoutSeconds` | When the probe times out (redis master pod) | `1` | +| `master.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis master pod) | `1` | +| `master.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `5` | +| `master.shareProcessNamespace` | RedisTM Master pod `shareProcessNamespace` option. Enables /pause reap zombie PIDs. | `false` | +| `master.priorityClassName` | RedisTM Master pod priorityClassName | `nil` | +| `volumePermissions.enabled` | Enable init container that changes volume permissions in the registry (for cases where the default k8s `runAsUser` and `fsUser` values do not work) | `false` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | +| `volumePermissions.image.repository` | Init container volume-permissions image name | `bitnami/minideb` | +| `volumePermissions.image.tag` | Init container volume-permissions image tag | `buster` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `Always` | +| `volumePermissions.resources ` | Init container volume-permissions CPU/Memory resource requests/limits | {} | +| `volumePermissions.securityContext.*` | Security context of the init container | `{}` | +| `volumePermissions.securityContext.runAsUser` | UserID for the init container (when facing issues in OpenShift or uid unknown, try value "auto") | 0 | +| `slave.hostAliases` | Add deployment host aliases | `[]` | +| `slave.service.type` | Kubernetes Service type (redis slave) | `ClusterIP` | +| `slave.service.externalTrafficPolicy` | External traffic policy (when service type is LoadBalancer) | `Cluster` | +| `slave.service.nodePort` | Kubernetes Service nodePort (redis slave) | `nil` | +| `slave.service.annotations` | annotations for redis slave service | {} | +| `slave.service.labels` | Additional labels for redis slave service | {} | +| `slave.service.port` | Kubernetes Service port (redis slave) | `6379` | +| `slave.service.loadBalancerIP` | LoadBalancerIP if RedisTM slave service type is `LoadBalancer` | `nil` | +| `slave.service.loadBalancerSourceRanges` | loadBalancerSourceRanges if RedisTM slave service type is `LoadBalancer` | `nil` | +| `slave.command` | RedisTM slave entrypoint string. The command `redis-server` is executed if this is not provided. Note this is prepended with `exec` | `/run.sh` | +| `slave.preExecCmds` | Text to inset into the startup script immediately prior to `slave.command`. Use this if you need to run other ad-hoc commands as part of startup | `nil` | +| `slave.configmap` | Additional RedisTM configuration for the slave nodes (this value is evaluated as a template) | `nil` | +| `slave.disableCommands` | Array of RedisTM commands to disable (slave) | `[FLUSHDB, FLUSHALL]` | +| `slave.extraFlags` | RedisTM slave additional command line flags | `[]` | +| `slave.livenessProbe.enabled` | Turn on and off liveness probe (redis slave pod) | `true` | +| `slave.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (redis slave pod) | `5` | +| `slave.livenessProbe.periodSeconds` | How often to perform the probe (redis slave pod) | `5` | +| `slave.livenessProbe.timeoutSeconds` | When the probe times out (redis slave pod) | `5` | +| `slave.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis slave pod) | `1` | +| `slave.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `5` | +| `slave.readinessProbe.enabled` | Turn on and off slave.readiness probe (redis slave pod) | `true` | +| `slave.readinessProbe.initialDelaySeconds` | Delay before slave.readiness probe is initiated (redis slave pod) | `5` | +| `slave.readinessProbe.periodSeconds` | How often to perform the probe (redis slave pod) | `5` | +| `slave.readinessProbe.timeoutSeconds` | When the probe times out (redis slave pod) | `1` | +| `slave.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis slave pod) | `1` | +| `slave.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. (redis slave pod) | `5` | +| `slave.shareProcessNamespace` | RedisTM slave pod `shareProcessNamespace` option. Enables /pause reap zombie PIDs. | `false` | +| `slave.persistence.enabled` | Use a PVC to persist data (slave node) | `true` | +| `slave.persistence.path` | Path to mount the volume at, to use other images | `/data` | +| `slave.persistence.subPath` | Subdirectory of the volume to mount at | `""` | +| `slave.persistence.storageClass` | Storage class of backing PVC | `generic` | +| `slave.persistence.accessModes` | Persistent Volume Access Modes | `[ReadWriteOnce]` | +| `slave.persistence.size` | Size of data volume | `8Gi` | +| `slave.persistence.matchLabels` | matchLabels persistent volume selector | `{}` | +| `slave.persistence.matchExpressions` | matchExpressions persistent volume selector | `{}` | +| `slave.statefulset.labels` | Additional labels for redis slave StatefulSet | `{}` | +| `slave.statefulset.annotations` | Additional annotations for redis slave StatefulSet | `{}` | +| `slave.statefulset.updateStrategy` | Update strategy for StatefulSet | onDelete | +| `slave.statefulset.rollingUpdatePartition` | Partition update strategy | `nil` | +| `slave.statefulset.volumeClaimTemplates.labels` | Additional labels for redis slave StatefulSet volumeClaimTemplates | `{}` | +| `slave.statefulset.volumeClaimTemplates.annotations` | Additional annotations for redis slave StatefulSet volumeClaimTemplates | `{}` | +| `slave.extraEnvVars` | Additional Environment Variables passed to the pod of the slave's stateful set set | `[]` | +| `slave.extraEnvVarCMs` | Additional Environment Variables ConfigMappassed to the pod of the slave's stateful set set | `[]` | +| `masslaveter.extraEnvVarsSecret` | Additional Environment Variables Secret passed to the slave's stateful set | `[]` | +| `slave.podLabels` | Additional labels for RedisTM slave pod | `master.podLabels` | +| `slave.podAnnotations` | Additional annotations for RedisTM slave pod | `master.podAnnotations` | +| `slave.schedulerName` | Name of an alternate scheduler | `nil` | +| `slave.resources` | RedisTM slave CPU/Memory resource requests/limits | `{}` | +| `slave.affinity` | Enable node/pod affinity for slaves | {} | +| `slave.tolerations` | Toleration labels for RedisTM slave pod assignment | [] | +| `slave.spreadConstraints` | [Topology Spread Constraints](https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/) for RedisTM slave pod | {} | +| `slave.priorityClassName` | RedisTM Slave pod priorityClassName | `nil` | +| `sentinel.enabled` | Enable sentinel containers | `false` | +| `sentinel.usePassword` | Use password for sentinel containers | `true` | +| `sentinel.masterSet` | Name of the sentinel master set | `mymaster` | +| `sentinel.initialCheckTimeout` | Timeout for querying the redis sentinel service for the active sentinel list | `5` | +| `sentinel.quorum` | Quorum for electing a new master | `2` | +| `sentinel.downAfterMilliseconds` | Timeout for detecting a RedisTM node is down | `60000` | +| `sentinel.failoverTimeout` | Timeout for performing a election failover | `18000` | +| `sentinel.parallelSyncs` | Number of parallel syncs in the cluster | `1` | +| `sentinel.port` | RedisTM Sentinel port | `26379` | +| `sentinel.configmap` | Additional RedisTM configuration for the sentinel nodes (this value is evaluated as a template) | `nil` | +| `sentinel.staticID` | Enable static IDs for sentinel replicas (If disabled IDs will be randomly generated on startup) | `false` | +| `sentinel.service.type` | Kubernetes Service type (redis sentinel) | `ClusterIP` | +| `sentinel.service.externalTrafficPolicy` | External traffic policy (when service type is LoadBalancer) | `Cluster` | +| `sentinel.service.nodePort` | Kubernetes Service nodePort (redis sentinel) | `nil` | +| `sentinel.service.annotations` | annotations for redis sentinel service | {} | +| `sentinel.service.labels` | Additional labels for redis sentinel service | {} | +| `sentinel.service.redisPort` | Kubernetes Service port for RedisTM read only operations | `6379` | +| `sentinel.service.sentinelPort` | Kubernetes Service port for RedisTM sentinel | `26379` | +| `sentinel.service.redisNodePort` | Kubernetes Service node port for RedisTM read only operations | `` | +| `sentinel.service.sentinelNodePort` | Kubernetes Service node port for RedisTM sentinel | `` | +| `sentinel.service.loadBalancerIP` | LoadBalancerIP if RedisTM sentinel service type is `LoadBalancer` | `nil` | +| `sentinel.livenessProbe.enabled` | Turn on and off liveness probe (redis sentinel pod) | `true` | +| `sentinel.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (redis sentinel pod) | `5` | +| `sentinel.livenessProbe.periodSeconds` | How often to perform the probe (redis sentinel container) | `5` | +| `sentinel.livenessProbe.timeoutSeconds` | When the probe times out (redis sentinel container) | `5` | +| `sentinel.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis sentinel container) | `1` | +| `sentinel.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `5` | +| `sentinel.readinessProbe.enabled` | Turn on and off sentinel.readiness probe (redis sentinel pod) | `true` | +| `sentinel.readinessProbe.initialDelaySeconds` | Delay before sentinel.readiness probe is initiated (redis sentinel pod) | `5` | +| `sentinel.readinessProbe.periodSeconds` | How often to perform the probe (redis sentinel pod) | `5` | +| `sentinel.readinessProbe.timeoutSeconds` | When the probe times out (redis sentinel container) | `1` | +| `sentinel.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis sentinel container) | `1` | +| `sentinel.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. (redis sentinel container) | `5` | +| `sentinel.resources` | RedisTM sentinel CPU/Memory resource requests/limits | `{}` | +| `sentinel.image.registry` | RedisTM Sentinel Image registry | `docker.io` | +| `sentinel.image.repository` | RedisTM Sentinel Image name | `bitnami/redis-sentinel` | +| `sentinel.image.tag` | RedisTM Sentinel Image tag | `{TAG_NAME}` | +| `sentinel.image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `sentinel.image.pullSecrets` | Specify docker-registry secret names as an array | `nil` | +| `sentinel.extraEnvVars` | Additional Environment Variables passed to the pod of the sentinel node stateful set set | `[]` | +| `sentinel.extraEnvVarCMs` | Additional Environment Variables ConfigMappassed to the pod of the sentinel node stateful set set | `[]` | +| `sentinel.extraEnvVarsSecret` | Additional Environment Variables Secret passed to the sentinel node statefulset | `[]` | +| `sentinel.preExecCmds` | Text to inset into the startup script immediately prior to `sentinel.command`. Use this if you need to run other ad-hoc commands as part of startup | `nil` | +| `sysctlImage.enabled` | Enable an init container to modify Kernel settings | `false` | +| `sysctlImage.command` | sysctlImage command to execute | [] | +| `sysctlImage.registry` | sysctlImage Init container registry | `docker.io` | +| `sysctlImage.repository` | sysctlImage Init container name | `bitnami/minideb` | +| `sysctlImage.tag` | sysctlImage Init container tag | `buster` | +| `sysctlImage.pullPolicy` | sysctlImage Init container pull policy | `Always` | +| `sysctlImage.mountHostSys` | Mount the host `/sys` folder to `/host-sys` | `false` | +| `sysctlImage.resources` | sysctlImage Init container CPU/Memory resource requests/limits | {} | +| `podSecurityPolicy.create` | Specifies whether a PodSecurityPolicy should be created | `false` | + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```bash +$ helm install my-release \ + --set password=secretpassword \ + bitnami/redis +``` + +The above command sets the RedisTM server password to `secretpassword`. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```bash +$ helm install my-release -f values.yaml bitnami/redis +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +> **Note for minikube users**: Current versions of minikube (v0.24.1 at the time of writing) provision `hostPath` persistent volumes that are only writable by root. Using chart defaults cause pod failure for the RedisTM pod as it attempts to write to the `/bitnami` directory. Consider installing RedisTM with `--set persistence.enabled=false`. See minikube issue [1990](https://github.com/kubernetes/minikube/issues/1990) for more information. + +## Configuration and installation details + +### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/) + +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. + +Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. + +### Change RedisTM version + +To modify the RedisTM version used in this chart you can specify a [valid image tag](https://hub.docker.com/r/bitnami/redis/tags/) using the `image.tag` parameter. For example, `image.tag=X.Y.Z`. This approach is also applicable to other images like exporters. + +### Cluster topologies + +#### Default: Master-Slave + +When installing the chart with `cluster.enabled=true`, it will deploy a RedisTM master StatefulSet (only one master node allowed) and a RedisTM slave StatefulSet. The slaves will be read-replicas of the master. Two services will be exposed: + + - RedisTM Master service: Points to the master, where read-write operations can be performed + - RedisTM Slave service: Points to the slaves, where only read operations are allowed. + +In case the master crashes, the slaves will wait until the master node is respawned again by the Kubernetes Controller Manager. + +#### Master-Slave with Sentinel + +When installing the chart with `cluster.enabled=true` and `sentinel.enabled=true`, it will deploy a RedisTM master StatefulSet (only one master allowed) and a RedisTM slave StatefulSet. In this case, the pods will contain an extra container with RedisTM Sentinel. This container will form a cluster of RedisTM Sentinel nodes, which will promote a new master in case the actual one fails. In addition to this, only one service is exposed: + + - RedisTM service: Exposes port 6379 for RedisTM read-only operations and port 26379 for accessing RedisTM Sentinel. + +For read-only operations, access the service using port 6379. For write operations, it's necessary to access the RedisTM Sentinel cluster and query the current master using the command below (using redis-cli or similar: + +``` +SENTINEL get-master-addr-by-name +``` +This command will return the address of the current master, which can be accessed from inside the cluster. + +In case the current master crashes, the Sentinel containers will elect a new master node. + +### Using password file +To use a password file for RedisTM you need to create a secret containing the password. + +> *NOTE*: It is important that the file with the password must be called `redis-password` + +And then deploy the Helm Chart using the secret name as parameter: + +```console +usePassword=true +usePasswordFile=true +existingSecret=redis-password-file +sentinels.enabled=true +metrics.enabled=true +``` + +### Securing traffic using TLS + +TLS support can be enabled in the chart by specifying the `tls.` parameters while creating a release. The following parameters should be configured to properly enable the TLS support in the chart: + +- `tls.enabled`: Enable TLS support. Defaults to `false` +- `tls.certificatesSecret`: Name of the secret that contains the certificates. No defaults. +- `tls.certFilename`: Certificate filename. No defaults. +- `tls.certKeyFilename`: Certificate key filename. No defaults. +- `tls.certCAFilename`: CA Certificate filename. No defaults. + +For example: + +First, create the secret with the cetificates files: + +```console +kubectl create secret generic certificates-tls-secret --from-file=./cert.pem --from-file=./cert.key --from-file=./ca.pem +``` + +Then, use the following parameters: + +```console +tls.enabled="true" +tls.certificatesSecret="certificates-tls-secret" +tls.certFilename="cert.pem" +tls.certKeyFilename="cert.key" +tls.certCAFilename="ca.pem" +``` + +### Metrics + +The chart optionally can start a metrics exporter for [prometheus](https://prometheus.io). The metrics endpoint (port 9121) is exposed in the service. Metrics can be scraped from within the cluster using something similar as the described in the [example Prometheus scrape configuration](https://github.com/prometheus/prometheus/blob/master/documentation/examples/prometheus-kubernetes.yml). If metrics are to be scraped from outside the cluster, the Kubernetes API proxy can be utilized to access the endpoint. + +If you have enabled TLS by specifying `tls.enabled=true` you also need to specify TLS option to the metrics exporter. You can do that via `metrics.extraArgs`. You can find the metrics exporter CLI flags for TLS [here](https://github.com/oliver006/redis_exporter#command-line-flags). For example: + +You can either specify `metrics.extraArgs.skip-tls-verification=true` to skip TLS verification or providing the following values under `metrics.extraArgs` for TLS client authentication: + +```console +tls-client-key-file +tls-client-cert-file +tls-ca-cert-file +``` + +### Host Kernel Settings + +RedisTM may require some changes in the kernel of the host machine to work as expected, in particular increasing the `somaxconn` value and disabling transparent huge pages. +To do so, you can set up a privileged initContainer with the `sysctlImage` config values, for example: + +``` +sysctlImage: + enabled: true + mountHostSys: true + command: + - /bin/sh + - -c + - |- + install_packages procps + sysctl -w net.core.somaxconn=10000 + echo never > /host-sys/kernel/mm/transparent_hugepage/enabled +``` + +Alternatively, for Kubernetes 1.12+ you can set `securityContext.sysctls` which will configure sysctls for master and slave pods. Example: + +```yaml +securityContext: + sysctls: + - name: net.core.somaxconn + value: "10000" +``` + +Note that this will not disable transparent huge tables. + +## Persistence + +By default, the chart mounts a [Persistent Volume](http://kubernetes.io/docs/user-guide/persistent-volumes/) at the `/data` path. The volume is created using dynamic volume provisioning. If a Persistent Volume Claim already exists, specify it during installation. + +### Existing PersistentVolumeClaim + +1. Create the PersistentVolume +2. Create the PersistentVolumeClaim +3. Install the chart + +```bash +$ helm install my-release --set persistence.existingClaim=PVC_NAME bitnami/redis +``` + +## Backup and restore + +### Backup + +To perform a backup you will need to connect to one of the nodes and execute: + +```bash +$ kubectl exec -it my-redis-master-0 bash + +$ redis-cli +127.0.0.1:6379> auth your_current_redis_password +OK +127.0.0.1:6379> save +OK +``` + +Then you will need to get the created dump file form the redis node: + +```bash +$ kubectl cp my-redis-master-0:/data/dump.rdb dump.rdb -c redis +``` + +### Restore + +To restore in a new cluster, you will need to change a parameter in the redis.conf file and then upload the `dump.rdb` to the volume. + +Follow the following steps: + +- First you will need to set in the `values.yaml` the parameter `appendonly` to `no`, if it is already `no` you can skip this step. + +```yaml +configmap: |- + # Enable AOF https://redis.io/topics/persistence#append-only-file + appendonly no + # Disable RDB persistence, AOF persistence already enabled. + save "" +``` + +- Start the new cluster to create the PVCs. + +For example, : + +```bash +helm install new-redis -f values.yaml . --set cluster.enabled=true --set cluster.slaveCount=3 +``` + +- Now that the PVC were created, stop it and copy the `dump.rdp` on the persisted data by using a helping pod. + +``` +$ helm delete new-redis + +$ kubectl run --generator=run-pod/v1 -i --rm --tty volpod --overrides=' +{ + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "name": "redisvolpod" + }, + "spec": { + "containers": [{ + "command": [ + "tail", + "-f", + "/dev/null" + ], + "image": "bitnami/minideb", + "name": "mycontainer", + "volumeMounts": [{ + "mountPath": "/mnt", + "name": "redisdata" + }] + }], + "restartPolicy": "Never", + "volumes": [{ + "name": "redisdata", + "persistentVolumeClaim": { + "claimName": "redis-data-new-redis-master-0" + } + }] + } +}' --image="bitnami/minideb" + +$ kubectl cp dump.rdb redisvolpod:/mnt/dump.rdb +$ kubectl delete pod volpod +``` + +- Start again the cluster: + +``` +helm install new-redis -f values.yaml . --set cluster.enabled=true --set cluster.slaveCount=3 +``` + +## NetworkPolicy + +To enable network policy for RedisTM, install +[a networking plugin that implements the Kubernetes NetworkPolicy spec](https://kubernetes.io/docs/tasks/administer-cluster/declare-network-policy#before-you-begin), +and set `networkPolicy.enabled` to `true`. + +For Kubernetes v1.5 & v1.6, you must also turn on NetworkPolicy by setting +the DefaultDeny namespace annotation. Note: this will enforce policy for _all_ pods in the namespace: + + kubectl annotate namespace default "net.beta.kubernetes.io/network-policy={\"ingress\":{\"isolation\":\"DefaultDeny\"}}" + +With NetworkPolicy enabled, only pods with the generated client label will be +able to connect to RedisTM. This label will be displayed in the output +after a successful install. + +With `networkPolicy.ingressNSMatchLabels` pods from other namespaces can connect to redis. Set `networkPolicy.ingressNSPodMatchLabels` to match pod labels in matched namespace. For example, for a namespace labeled `redis=external` and pods in that namespace labeled `redis-client=true` the fields should be set: + +``` +networkPolicy: + enabled: true + ingressNSMatchLabels: + redis: external + ingressNSPodMatchLabels: + redis-client: true +``` + +## Troubleshooting + +Find more information about how to deal with common errors related to Bitnami’s Helm charts in [this troubleshooting guide](https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues). + +## Upgrading an existing Release to a new major version + +A major chart version change (like v1.2.3 -> v2.0.0) indicates that there is an +incompatible breaking change needing manual actions. + +### To 11.0.0 + +When using sentinel, a new statefulset called `-node` was introduced. This will break upgrading from a previous version where the statefulsets are called master and slave. Hence the PVC will not match the new naming and won't be reused. If you want to keep your data, you will need to perform a backup and then a restore the data in this new version. + +### To 10.0.0 + +For releases with `usePassword: true`, the value `sentinel.usePassword` controls whether the password authentication also applies to the sentinel port. This defaults to `true` for a secure configuration, however it is possible to disable to account for the following cases: + +- Using a version of redis-sentinel prior to `5.0.1` where the authentication feature was introduced. +- Where redis clients need to be updated to support sentinel authentication. + +If using a master/slave topology, or with `usePassword: false`, no action is required. + +### To 8.0.18 + +For releases with `metrics.enabled: true` the default tag for the exporter image is now `v1.x.x`. This introduces many changes including metrics names. You'll want to use [this dashboard](https://github.com/oliver006/redis_exporter/blob/master/contrib/grafana_prometheus_redis_dashboard.json) now. Please see the [redis_exporter github page](https://github.com/oliver006/redis_exporter#upgrading-from-0x-to-1x) for more details. + +### To 7.0.0 + +This version causes a change in the RedisTM Master StatefulSet definition, so the command helm upgrade would not work out of the box. As an alternative, one of the following could be done: + +- Recommended: Create a clone of the RedisTM Master PVC (for example, using projects like [this one](https://github.com/edseymour/pvc-transfer)). Then launch a fresh release reusing this cloned PVC. + + ``` + helm install my-release bitnami/redis --set persistence.existingClaim= + ``` + +- Alternative (not recommended, do at your own risk): `helm delete --purge` does not remove the PVC assigned to the RedisTM Master StatefulSet. As a consequence, the following commands can be done to upgrade the release + + ``` + helm delete --purge + helm install bitnami/redis + ``` + +Previous versions of the chart were not using persistence in the slaves, so this upgrade would add it to them. Another important change is that no values are inherited from master to slaves. For example, in 6.0.0 `slaves.readinessProbe.periodSeconds`, if empty, would be set to `master.readinessProbe.periodSeconds`. This approach lacked transparency and was difficult to maintain. From now on, all the slave parameters must be configured just as it is done with the masters. + +Some values have changed as well: + +- `master.port` and `slave.port` have been changed to `redisPort` (same value for both master and slaves) +- `master.securityContext` and `slave.securityContext` have been changed to `securityContext`(same values for both master and slaves) + +By default, the upgrade will not change the cluster topology. In case you want to use RedisTM Sentinel, you must explicitly set `sentinel.enabled` to `true`. + +### To 6.0.0 + +Previous versions of the chart were using an init-container to change the permissions of the volumes. This was done in case the `securityContext` directive in the template was not enough for that (for example, with cephFS). In this new version of the chart, this container is disabled by default (which should not affect most of the deployments). If your installation still requires that init container, execute `helm upgrade` with the `--set volumePermissions.enabled=true`. + +### To 5.0.0 + +The default image in this release may be switched out for any image containing the `redis-server` +and `redis-cli` binaries. If `redis-server` is not the default image ENTRYPOINT, `master.command` +must be specified. + +#### Breaking changes + +- `master.args` and `slave.args` are removed. Use `master.command` or `slave.command` instead in order to override the image entrypoint, or `master.extraFlags` to pass additional flags to `redis-server`. +- `disableCommands` is now interpreted as an array of strings instead of a string of comma separated values. +- `master.persistence.path` now defaults to `/data`. + +### 4.0.0 + +This version removes the `chart` label from the `spec.selector.matchLabels` +which is immutable since `StatefulSet apps/v1beta2`. It has been inadvertently +added, causing any subsequent upgrade to fail. See https://github.com/helm/charts/issues/7726. + +It also fixes https://github.com/helm/charts/issues/7726 where a deployment `extensions/v1beta1` can not be upgraded if `spec.selector` is not explicitly set. + +Finally, it fixes https://github.com/helm/charts/issues/7803 by removing mutable labels in `spec.VolumeClaimTemplate.metadata.labels` so that it is upgradable. + +In order to upgrade, delete the RedisTM StatefulSet before upgrading: + +```bash +kubectl delete statefulsets.apps --cascade=false my-release-redis-master +``` + +And edit the RedisTM slave (and metrics if enabled) deployment: + +```bash +kubectl patch deployments my-release-redis-slave --type=json -p='[{"op": "remove", "path": "/spec/selector/matchLabels/chart"}]' +kubectl patch deployments my-release-redis-metrics --type=json -p='[{"op": "remove", "path": "/spec/selector/matchLabels/chart"}]' +``` + +## Upgrading + +### To 12.0.0 + +[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +**What changes were introduced in this major version?** + +- Previous versions of this Helm Chart use `apiVersion: v1` (installable by both Helm 2 and 3), this Helm Chart was updated to `apiVersion: v2` (installable by Helm 3 only). [Here](https://helm.sh/docs/topics/charts/#the-apiversion-field) you can find more information about the `apiVersion` field. +- The different fields present in the *Chart.yaml* file has been ordered alphabetically in a homogeneous way for all the Bitnami Helm Charts + +**Considerations when upgrading to this version** + +- If you want to upgrade to this version from a previous one installed with Helm v3, you shouldn't face any issues +- If you want to upgrade to this version using Helm v2, this scenario is not supported as this version doesn't support Helm v2 anymore +- If you installed the previous version with Helm v2 and wants to upgrade to this version with Helm v3, please refer to the [official Helm documentation](https://helm.sh/docs/topics/v2_v3_migration/#migration-use-cases) about migrating from Helm v2 to v3 + +**Useful links** + +- https://docs.bitnami.com/tutorials/resolve-helm2-helm3-post-migration-issues/ +- https://helm.sh/docs/topics/v2_v3_migration/ +- https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/ + +### To 11.0.0 + +When deployed with sentinel enabled, only a group of nodes is deployed and the master/slave role is handled in the group. To avoid breaking the compatibility, the settings for this nodes are given through the `slave.xxxx` parameters in `values.yaml` + +### To 9.0.0 + +The metrics exporter has been changed from a separate deployment to a sidecar container, due to the latest changes in the RedisTM exporter code. Check the [official page](https://github.com/oliver006/redis_exporter/) for more information. The metrics container image was changed from oliver006/redis_exporter to bitnami/redis-exporter (Bitnami's maintained package of oliver006/redis_exporter). + +### To 7.0.0 + +In order to improve the performance in case of slave failure, we added persistence to the read-only slaves. That means that we moved from Deployment to StatefulSets. This should not affect upgrades from previous versions of the chart, as the deployments did not contain any persistence at all. + +This version also allows enabling RedisTM Sentinel containers inside of the RedisTM Pods (feature disabled by default). In case the master crashes, a new RedisTM node will be elected as master. In order to query the current master (no redis master service is exposed), you need to query first the Sentinel cluster. Find more information [in this section](#master-slave-with-sentinel). diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/charts/common/.helmignore b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/charts/common/.helmignore new file mode 100644 index 0000000..50af031 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/charts/common/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/charts/common/Chart.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/charts/common/Chart.yaml new file mode 100644 index 0000000..ceb5648 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/charts/common/Chart.yaml @@ -0,0 +1,23 @@ +annotations: + category: Infrastructure +apiVersion: v2 +appVersion: 1.3.3 +description: A Library Helm Chart for grouping common logic between bitnami charts. + This chart is not deployable by itself. +home: https://github.com/bitnami/charts/tree/master/bitnami/common +icon: https://bitnami.com/downloads/logos/bitnami-mark.png +keywords: +- common +- helper +- template +- function +- bitnami +maintainers: +- email: containers@bitnami.com + name: Bitnami +name: common +sources: +- https://github.com/bitnami/charts +- http://www.bitnami.com/ +type: library +version: 1.3.3 diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/charts/common/README.md b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/charts/common/README.md new file mode 100644 index 0000000..461fdc9 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/charts/common/README.md @@ -0,0 +1,316 @@ +# Bitnami Common Library Chart + +A [Helm Library Chart](https://helm.sh/docs/topics/library_charts/#helm) for grouping common logic between bitnami charts. + +## TL;DR + +```yaml +dependencies: + - name: common + version: 0.x.x + repository: https://charts.bitnami.com/bitnami +``` + +```bash +$ helm dependency update +``` + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "common.names.fullname" . }} +data: + myvalue: "Hello World" +``` + +## Introduction + +This chart provides a common template helpers which can be used to develop new charts using [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This Helm chart has been tested on top of [Bitnami Kubernetes Production Runtime](https://kubeprod.io/) (BKPR). Deploy BKPR to get automated TLS certificates, logging and monitoring for your applications. + +## Prerequisites + +- Kubernetes 1.12+ +- Helm 3.0-beta3+ + +## Parameters + +The following table lists the helpers available in the library which are scoped in different sections. + +### Affinities + +| Helper identifier | Description | Expected Input | +|-------------------------------|------------------------------------------------------|------------------------------------------------| +| `common.affinities.node.soft` | Return a soft nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` | +| `common.affinities.node.hard` | Return a hard nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` | +| `common.affinities.pod.soft` | Return a soft podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` | +| `common.affinities.pod.hard` | Return a hard podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` | + +### Capabilities + +| Helper identifier | Description | Expected Input | +|----------------------------------------------|------------------------------------------------------------------------------------------------|-------------------| +| `common.capabilities.kubeVersion` | Return the target Kubernetes version (using client default if .Values.kubeVersion is not set). | `.` Chart context | +| `common.capabilities.deployment.apiVersion` | Return the appropriate apiVersion for deployment. | `.` Chart context | +| `common.capabilities.statefulset.apiVersion` | Return the appropriate apiVersion for statefulset. | `.` Chart context | +| `common.capabilities.ingress.apiVersion` | Return the appropriate apiVersion for ingress. | `.` Chart context | + +### Errors + +| Helper identifier | Description | Expected Input | +|-----------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------| +| `common.errors.upgrade.passwords.empty` | It will ensure required passwords are given when we are upgrading a chart. If `validationErrors` is not empty it will throw an error and will stop the upgrade action. | `dict "validationErrors" (list $validationError00 $validationError01) "context" $` | + +### Images + +| Helper identifier | Description | Expected Input | +|-----------------------------|------------------------------------------------------|---------------------------------------------------------------------------------------------------------| +| `common.images.image` | Return the proper and full image name | `dict "imageRoot" .Values.path.to.the.image "global" $`, see [ImageRoot](#imageroot) for the structure. | +| `common.images.pullSecrets` | Return the proper Docker Image Registry Secret Names | `dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global` | + +### Ingress + +| Helper identifier | Description | Expected Input | +|--------------------------|----------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.ingress.backend` | Generate a proper Ingress backend entry depending on the API version | `dict "serviceName" "foo" "servicePort" "bar"`, see the [Ingress deprecation notice](https://kubernetes.io/blog/2019/07/18/api-deprecations-in-1-16/) for the syntax differences | + +### Labels + +| Helper identifier | Description | Expected Input | +|-----------------------------|------------------------------------------------------|-------------------| +| `common.labels.standard` | Return Kubernetes standard labels | `.` Chart context | +| `common.labels.matchLabels` | Return the proper Docker Image Registry Secret Names | `.` Chart context | + +### Names + +| Helper identifier | Description | Expected Inpput | +|-------------------------|------------------------------------------------------------|-------------------| +| `common.names.name` | Expand the name of the chart or use `.Values.nameOverride` | `.` Chart context | +| `common.names.fullname` | Create a default fully qualified app name. | `.` Chart context | +| `common.names.chart` | Chart name plus version | `.` Chart context | + +### Secrets + +| Helper identifier | Description | Expected Input | +|-----------------------|----------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.secrets.name` | Generate the name of the secret. | `dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $` see [ExistingSecret](#existingsecret) for the structure. | +| `common.secrets.key` | Generate secret key. | `dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName"` see [ExistingSecret](#existingsecret) for the structure. | + +### Storage + +| Helper identifier | Description | Expected Input | +|-------------------------------|---------------------------------------|---------------------------------------------------------------------------------------------------------------------| +| `common.affinities.node.soft` | Return a soft nodeAffinity definition | `dict "persistence" .Values.path.to.the.persistence "global" $`, see [Persistence](#persistence) for the structure. | + +### TplValues + +| Helper identifier | Description | Expected Input | +|---------------------------|----------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.tplvalues.render` | Renders a value that contains template | `dict "value" .Values.path.to.the.Value "context" $`, value is the value should rendered as template, context frequently is the chart context `$` or `.` | + +### Utils + +| Helper identifier | Description | Expected Input | +|--------------------------------|-------------------------------------------------------|------------------------------------------------------------------------| +| `common.utils.fieldToEnvVar` | Build environment variable name given a field. | `dict "field" "my-password"` | +| `common.utils.secret.getvalue` | Print instructions to get a secret value. | `dict "secret" "secret-name" "field" "secret-value-field" "context" $` | +| `common.utils.getValueFromKey` | Gets a value from `.Values` object given its key path | `dict "key" "path.to.key" "context" $` | + +### Validations + +| Helper identifier | Description | Expected Input | +|--------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.validations.values.single.empty` | Validate a value must not be empty. | `dict "valueKey" "path.to.value" "secret" "secret.name" "field" "my-password" "context" $` secret and field are optional. In case they are given, the helper will generate a how to get instruction. See [ValidateValue](#validatevalue) | +| `common.validations.values.multiple.empty` | Validate a multiple values must not be empty. It returns a shared error for all the values. | `dict "required" (list $validateValueConf00 $validateValueConf01) "context" $`. See [ValidateValue](#validatevalue) | +| `common.validations.values.mariadb.passwords` | This helper will ensure required password for MariaDB are not empty. It returns a shared error for all the values. | `dict "secret" "mariadb-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mariadb chart and the helper. | +| `common.validations.values.postgresql.passwords` | This helper will ensure required password for PostgreSQL are not empty. It returns a shared error for all the values. | `dict "secret" "postgresql-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use postgresql chart and the helper. | +| `common.validations.values.redis.passwords` | This helper will ensure required password for RedisTM are not empty. It returns a shared error for all the values. | `dict "secret" "redis-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use redis chart and the helper. | +| `common.validations.values.cassandra.passwords` | This helper will ensure required password for Cassandra are not empty. It returns a shared error for all the values. | `dict "secret" "cassandra-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use cassandra chart and the helper. | +| `common.validations.values.mongodb.passwords` | This helper will ensure required password for MongoDB are not empty. It returns a shared error for all the values. | `dict "secret" "mongodb-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mongodb chart and the helper. | + +### Warnings + +| Helper identifier | Description | Expected Input | +|------------------------------|----------------------------------|------------------------------------------------------------| +| `common.warnings.rollingTag` | Warning about using rolling tag. | `ImageRoot` see [ImageRoot](#imageroot) for the structure. | + +## Special input schemas + +### ImageRoot + +```yaml +registry: + type: string + description: Docker registry where the image is located + example: docker.io + +repository: + type: string + description: Repository and image name + example: bitnami/nginx + +tag: + type: string + description: image tag + example: 1.16.1-debian-10-r63 + +pullPolicy: + type: string + description: Specify a imagePullPolicy. Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + +pullSecrets: + type: array + items: + type: string + description: Optionally specify an array of imagePullSecrets. + +debug: + type: boolean + description: Set to true if you would like to see extra information on logs + example: false + +## An instance would be: +# registry: docker.io +# repository: bitnami/nginx +# tag: 1.16.1-debian-10-r63 +# pullPolicy: IfNotPresent +# debug: false +``` + +### Persistence + +```yaml +enabled: + type: boolean + description: Whether enable persistence. + example: true + +storageClass: + type: string + description: Ghost data Persistent Volume Storage Class, If set to "-", storageClassName: "" which disables dynamic provisioning. + example: "-" + +accessMode: + type: string + description: Access mode for the Persistent Volume Storage. + example: ReadWriteOnce + +size: + type: string + description: Size the Persistent Volume Storage. + example: 8Gi + +path: + type: string + description: Path to be persisted. + example: /bitnami + +## An instance would be: +# enabled: true +# storageClass: "-" +# accessMode: ReadWriteOnce +# size: 8Gi +# path: /bitnami +``` + +### ExistingSecret + +```yaml +name: + type: string + description: Name of the existing secret. + example: mySecret +keyMapping: + description: Mapping between the expected key name and the name of the key in the existing secret. + type: object + +## An instance would be: +# name: mySecret +# keyMapping: +# password: myPasswordKey +``` + +#### Example of use + +When we store sensitive data for a deployment in a secret, some times we want to give to users the possibility of using theirs existing secrets. + +```yaml +# templates/secret.yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }} + labels: + app: {{ include "common.names.fullname" . }} +type: Opaque +data: + password: {{ .Values.password | b64enc | quote }} + +# templates/dpl.yaml +--- +... + env: + - name: PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "common.secrets.name" (dict "existingSecret" .Values.existingSecret "context" $) }} + key: {{ include "common.secrets.key" (dict "existingSecret" .Values.existingSecret "key" "password") }} +... + +# values.yaml +--- +name: mySecret +keyMapping: + password: myPasswordKey +``` + +### ValidateValue + +#### NOTES.txt + +```console +{{- $validateValueConf00 := (dict "valueKey" "path.to.value00" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value01" "secret" "secretName" "field" "password-01") -}} + +{{ include "common.validations.values.multiple.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} +``` + +If we force those values to be empty we will see some alerts + +```console +$ helm install test mychart --set path.to.value00="",path.to.value01="" + 'path.to.value00' must not be empty, please add '--set path.to.value00=$PASSWORD_00' to the command. To get the current value: + + export PASSWORD_00=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-00}" | base64 --decode) + + 'path.to.value01' must not be empty, please add '--set path.to.value01=$PASSWORD_01' to the command. To get the current value: + + export PASSWORD_01=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-01}" | base64 --decode) +``` + +## Upgrading + +### To 1.0.0 + +[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +**What changes were introduced in this major version?** + +- Previous versions of this Helm Chart use `apiVersion: v1` (installable by both Helm 2 and 3), this Helm Chart was updated to `apiVersion: v2` (installable by Helm 3 only). [Here](https://helm.sh/docs/topics/charts/#the-apiversion-field) you can find more information about the `apiVersion` field. +- Use `type: library`. [Here](https://v3.helm.sh/docs/faq/#library-chart-support) you can find more information. +- The different fields present in the *Chart.yaml* file has been ordered alphabetically in a homogeneous way for all the Bitnami Helm Charts + +**Considerations when upgrading to this version** + +- If you want to upgrade to this version from a previous one installed with Helm v3, you shouldn't face any issues +- If you want to upgrade to this version using Helm v2, this scenario is not supported as this version doesn't support Helm v2 anymore +- If you installed the previous version with Helm v2 and wants to upgrade to this version with Helm v3, please refer to the [official Helm documentation](https://helm.sh/docs/topics/v2_v3_migration/#migration-use-cases) about migrating from Helm v2 to v3 + +**Useful links** + +- https://docs.bitnami.com/tutorials/resolve-helm2-helm3-post-migration-issues/ +- https://helm.sh/docs/topics/v2_v3_migration/ +- https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/ diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/charts/common/templates/_affinities.tpl b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/charts/common/templates/_affinities.tpl new file mode 100644 index 0000000..1ff26d5 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/charts/common/templates/_affinities.tpl @@ -0,0 +1,94 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return a soft nodeAffinity definition +{{ include "common.affinities.nodes.soft" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.soft" -}} +preferredDuringSchedulingIgnoredDuringExecution: + - preference: + matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . }} + {{- end }} + weight: 1 +{{- end -}} + +{{/* +Return a hard nodeAffinity definition +{{ include "common.affinities.nodes.hard" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.hard" -}} +requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . }} + {{- end }} +{{- end -}} + +{{/* +Return a nodeAffinity definition +{{ include "common.affinities.nodes" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.nodes.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.nodes.hard" . -}} + {{- end -}} +{{- end -}} + +{{/* +Return a soft podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.soft" (dict "component" "FOO" "context" $) -}} +*/}} +{{- define "common.affinities.pods.soft" -}} +{{- $component := default "" .component -}} +preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 10 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + namespaces: + - {{ .context.Release.Namespace }} + topologyKey: kubernetes.io/hostname + weight: 1 +{{- end -}} + +{{/* +Return a hard podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.hard" (dict "component" "FOO" "context" $) -}} +*/}} +{{- define "common.affinities.pods.hard" -}} +{{- $component := default "" .component -}} +requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 8 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + namespaces: + - {{ .context.Release.Namespace }} + topologyKey: kubernetes.io/hostname +{{- end -}} + +{{/* +Return a podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.pods" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.pods.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.pods.hard" . -}} + {{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/charts/common/templates/_capabilities.tpl b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/charts/common/templates/_capabilities.tpl new file mode 100644 index 0000000..d95b569 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/charts/common/templates/_capabilities.tpl @@ -0,0 +1,61 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return the target Kubernetes version +*/}} +{{- define "common.capabilities.kubeVersion" -}} +{{- if .Values.global }} + {{- if .Values.global.kubeVersion }} + {{- .Values.global.kubeVersion -}} + {{- else }} + {{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} + {{- end -}} +{{- else }} +{{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for deployment. +*/}} +{{- define "common.capabilities.deployment.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for statefulset. +*/}} +{{- define "common.capabilities.statefulset.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apps/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for ingress. +*/}} +{{- define "common.capabilities.ingress.apiVersion" -}} +{{- if .Values.ingress -}} +{{- if .Values.ingress.apiVersion -}} +{{- .Values.ingress.apiVersion -}} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end }} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/charts/common/templates/_images.tpl b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/charts/common/templates/_images.tpl new file mode 100644 index 0000000..aafde9f --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/charts/common/templates/_images.tpl @@ -0,0 +1,43 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper image name +{{ include "common.images.image" ( dict "imageRoot" .Values.path.to.the.image "global" $) }} +*/}} +{{- define "common.images.image" -}} +{{- $registryName := .imageRoot.registry -}} +{{- $repositoryName := .imageRoot.repository -}} +{{- $tag := .imageRoot.tag | toString -}} +{{- if .global }} + {{- if .global.imageRegistry }} + {{- $registryName = .global.imageRegistry -}} + {{- end -}} +{{- end -}} +{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +{{ include "common.images.pullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global) }} +*/}} +{{- define "common.images.pullSecrets" -}} + {{- $pullSecrets := list }} + + {{- if .global }} + {{- range .global.imagePullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) }} +imagePullSecrets: + {{- range $pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/charts/common/templates/_ingress.tpl b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/charts/common/templates/_ingress.tpl new file mode 100644 index 0000000..622ef50 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/charts/common/templates/_ingress.tpl @@ -0,0 +1,42 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Generate backend entry that is compatible with all Kubernetes API versions. + +Usage: +{{ include "common.ingress.backend" (dict "serviceName" "backendName" "servicePort" "backendPort" "context" $) }} + +Params: + - serviceName - String. Name of an existing service backend + - servicePort - String/Int. Port name (or number) of the service. It will be translated to different yaml depending if it is a string or an integer. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.ingress.backend" -}} +{{- $apiVersion := (include "common.capabilities.ingress.apiVersion" .context) -}} +{{- if or (eq $apiVersion "extensions/v1beta1") (eq $apiVersion "networking.k8s.io/v1beta1") -}} +serviceName: {{ .serviceName }} +servicePort: {{ .servicePort }} +{{- else -}} +service: + name: {{ .serviceName }} + port: + {{- if typeIs "string" .servicePort }} + name: {{ .servicePort }} + {{- else if typeIs "int" .servicePort }} + number: {{ .servicePort }} + {{- end }} +{{- end -}} +{{- end -}} + +{{/* +Print "true" if the API pathType field is supported +Usage: +{{ include "common.ingress.supportsPathType" . }} +*/}} +{{- define "common.ingress.supportsPathType" -}} +{{- if (semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .)) -}} +{{- print "false" -}} +{{- else -}} +{{- print "true" -}} +{{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/charts/common/templates/_labels.tpl b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/charts/common/templates/_labels.tpl new file mode 100644 index 0000000..252066c --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/charts/common/templates/_labels.tpl @@ -0,0 +1,18 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Kubernetes standard labels +*/}} +{{- define "common.labels.standard" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +helm.sh/chart: {{ include "common.names.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{/* +Labels to use on deploy.spec.selector.matchLabels and svc.spec.selector +*/}} +{{- define "common.labels.matchLabels" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/charts/common/templates/_names.tpl b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/charts/common/templates/_names.tpl new file mode 100644 index 0000000..adf2a74 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/charts/common/templates/_names.tpl @@ -0,0 +1,32 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "common.names.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "common.names.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "common.names.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/charts/common/templates/_secrets.tpl b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/charts/common/templates/_secrets.tpl new file mode 100644 index 0000000..4931d94 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/charts/common/templates/_secrets.tpl @@ -0,0 +1,127 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Generate secret name. + +Usage: +{{ include "common.secrets.name" (dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $) }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/master/bitnami/common#existingsecret + - defaultNameSuffix - String - Optional. It is used only if we have several secrets in the same deployment. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.secrets.name" -}} +{{- $name := (include "common.names.fullname" .context) -}} + +{{- if .defaultNameSuffix -}} +{{- $name = printf "%s-%s" $name .defaultNameSuffix | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- with .existingSecret -}} +{{- if not (typeIs "string" .) -}} +{{- $name = .name -}} +{{- else -}} +{{- $name = . -}} +{{- end -}} +{{- end -}} + +{{- printf "%s" $name -}} +{{- end -}} + +{{/* +Generate secret key. + +Usage: +{{ include "common.secrets.key" (dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName") }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/master/bitnami/common#existingsecret + - key - String - Required. Name of the key in the secret. +*/}} +{{- define "common.secrets.key" -}} +{{- $key := .key -}} + +{{- if .existingSecret -}} + {{- if not (typeIs "string" .existingSecret) -}} + {{- if .existingSecret.keyMapping -}} + {{- $key = index .existingSecret.keyMapping $.key -}} + {{- end -}} + {{- end }} +{{- end -}} + +{{- printf "%s" $key -}} +{{- end -}} + +{{/* +Generate secret password or retrieve one if already created. + +Usage: +{{ include "common.secrets.passwords.manage" (dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - key - String - Required - Name of the key in the secret. + - providedValues - List - Required - The path to the validating value in the values.yaml, e.g: "mysql.password". Will pick first parameter with a defined value. + - length - int - Optional - Length of the generated random password. + - strong - Boolean - Optional - Whether to add symbols to the generated random password. + - chartName - String - Optional - Name of the chart used when said chart is deployed as a subchart. + - context - Context - Required - Parent context. +*/}} +{{- define "common.secrets.passwords.manage" -}} + +{{- $password := "" }} +{{- $subchart := "" }} +{{- $chartName := default "" .chartName }} +{{- $passwordLength := default 10 .length }} +{{- $providedPasswordKey := include "common.utils.getKeyFromList" (dict "keys" .providedValues "context" $.context) }} +{{- $providedPasswordValue := include "common.utils.getValueFromKey" (dict "key" $providedPasswordKey "context" $.context) }} +{{- $secret := (lookup "v1" "Secret" $.context.Release.Namespace .secret) }} +{{- if $secret }} + {{- if index $secret.data .key }} + {{- $password = index $secret.data .key }} + {{- end -}} +{{- else if $providedPasswordValue }} + {{- $password = $providedPasswordValue | toString | b64enc | quote }} +{{- else }} + + {{- if .context.Values.enabled }} + {{- $subchart = $chartName }} + {{- end -}} + + {{- $requiredPassword := dict "valueKey" $providedPasswordKey "secret" .secret "field" .key "subchart" $subchart "context" $.context -}} + {{- $requiredPasswordError := include "common.validations.values.single.empty" $requiredPassword -}} + {{- $passwordValidationErrors := list $requiredPasswordError -}} + {{- include "common.errors.upgrade.passwords.empty" (dict "validationErrors" $passwordValidationErrors "context" $.context) -}} + + {{- if .strong }} + {{- $subStr := list (lower (randAlpha 1)) (randNumeric 1) (upper (randAlpha 1)) | join "_" }} + {{- $password = randAscii $passwordLength }} + {{- $password = regexReplaceAllLiteral "\\W" $password "@" | substr 5 $passwordLength }} + {{- $password = printf "%s%s" $subStr $password | toString | shuffle | b64enc | quote }} + {{- else }} + {{- $password = randAlphaNum $passwordLength | b64enc | quote }} + {{- end }} +{{- end -}} +{{- printf "%s" $password -}} +{{- end -}} + +{{/* +Returns whether a previous generated secret already exists + +Usage: +{{ include "common.secrets.exists" (dict "secret" "secret-name" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - context - Context - Required - Parent context. +*/}} +{{- define "common.secrets.exists" -}} +{{- $secret := (lookup "v1" "Secret" $.context.Release.Namespace .secret) }} +{{- if $secret }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/charts/common/templates/_storage.tpl b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/charts/common/templates/_storage.tpl new file mode 100644 index 0000000..60e2a84 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/charts/common/templates/_storage.tpl @@ -0,0 +1,23 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper Storage Class +{{ include "common.storage.class" ( dict "persistence" .Values.path.to.the.persistence "global" $) }} +*/}} +{{- define "common.storage.class" -}} + +{{- $storageClass := .persistence.storageClass -}} +{{- if .global -}} + {{- if .global.storageClass -}} + {{- $storageClass = .global.storageClass -}} + {{- end -}} +{{- end -}} + +{{- if $storageClass -}} + {{- if (eq "-" $storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" $storageClass -}} + {{- end -}} +{{- end -}} + +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/charts/common/templates/_tplvalues.tpl b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/charts/common/templates/_tplvalues.tpl new file mode 100644 index 0000000..2db1668 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/charts/common/templates/_tplvalues.tpl @@ -0,0 +1,13 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Renders a value that contains template. +Usage: +{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $) }} +*/}} +{{- define "common.tplvalues.render" -}} + {{- if typeIs "string" .value }} + {{- tpl .value .context }} + {{- else }} + {{- tpl (.value | toYaml) .context }} + {{- end }} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/charts/common/templates/_utils.tpl b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/charts/common/templates/_utils.tpl new file mode 100644 index 0000000..77bcc2b --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/charts/common/templates/_utils.tpl @@ -0,0 +1,62 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Print instructions to get a secret value. +Usage: +{{ include "common.utils.secret.getvalue" (dict "secret" "secret-name" "field" "secret-value-field" "context" $) }} +*/}} +{{- define "common.utils.secret.getvalue" -}} +{{- $varname := include "common.utils.fieldToEnvVar" . -}} +export {{ $varname }}=$(kubectl get secret --namespace {{ .context.Release.Namespace }} {{ .secret }} -o jsonpath="{.data.{{ .field }}}" | base64 --decode) +{{- end -}} + +{{/* +Build env var name given a field +Usage: +{{ include "common.utils.fieldToEnvVar" dict "field" "my-password" }} +*/}} +{{- define "common.utils.fieldToEnvVar" -}} + {{- $fieldNameSplit := splitList "-" .field -}} + {{- $upperCaseFieldNameSplit := list -}} + + {{- range $fieldNameSplit -}} + {{- $upperCaseFieldNameSplit = append $upperCaseFieldNameSplit ( upper . ) -}} + {{- end -}} + + {{ join "_" $upperCaseFieldNameSplit }} +{{- end -}} + +{{/* +Gets a value from .Values given +Usage: +{{ include "common.utils.getValueFromKey" (dict "key" "path.to.key" "context" $) }} +*/}} +{{- define "common.utils.getValueFromKey" -}} +{{- $splitKey := splitList "." .key -}} +{{- $value := "" -}} +{{- $latestObj := $.context.Values -}} +{{- range $splitKey -}} + {{- if not $latestObj -}} + {{- printf "please review the entire path of '%s' exists in values" $.key | fail -}} + {{- end -}} + {{- $value = ( index $latestObj . ) -}} + {{- $latestObj = $value -}} +{{- end -}} +{{- printf "%v" (default "" $value) -}} +{{- end -}} + +{{/* +Returns first .Values key with a defined value or first of the list if all non-defined +Usage: +{{ include "common.utils.getKeyFromList" (dict "keys" (list "path.to.key1" "path.to.key2") "context" $) }} +*/}} +{{- define "common.utils.getKeyFromList" -}} +{{- $key := first .keys -}} +{{- $reverseKeys := reverse .keys }} +{{- range $reverseKeys }} + {{- $value := include "common.utils.getValueFromKey" (dict "key" . "context" $.context ) }} + {{- if $value -}} + {{- $key = . }} + {{- end -}} +{{- end -}} +{{- printf "%s" $key -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/charts/common/templates/_warnings.tpl b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/charts/common/templates/_warnings.tpl new file mode 100644 index 0000000..ae10fa4 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/charts/common/templates/_warnings.tpl @@ -0,0 +1,14 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Warning about using rolling tag. +Usage: +{{ include "common.warnings.rollingTag" .Values.path.to.the.imageRoot }} +*/}} +{{- define "common.warnings.rollingTag" -}} + +{{- if and (contains "bitnami/" .repository) (not (.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .repository }}:{{ .tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} + +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/charts/common/templates/validations/_cassandra.tpl b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/charts/common/templates/validations/_cassandra.tpl new file mode 100644 index 0000000..8679ddf --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/charts/common/templates/validations/_cassandra.tpl @@ -0,0 +1,72 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Cassandra required passwords are not empty. + +Usage: +{{ include "common.validations.values.cassandra.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where Cassandra values are stored, e.g: "cassandra-passwords-secret" + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.cassandra.passwords" -}} + {{- $existingSecret := include "common.cassandra.values.existingSecret" . -}} + {{- $enabled := include "common.cassandra.values.enabled" . -}} + {{- $dbUserPrefix := include "common.cassandra.values.key.dbUser" . -}} + {{- $valueKeyPassword := printf "%s.password" $dbUserPrefix -}} + + {{- if and (not $existingSecret) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "cassandra-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.cassandra.values.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.cassandra.dbUser.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.dbUser.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled cassandra. + +Usage: +{{ include "common.cassandra.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.cassandra.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.cassandra.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key dbUser + +Usage: +{{ include "common.cassandra.values.key.dbUser" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.key.dbUser" -}} + {{- if .subchart -}} + cassandra.dbUser + {{- else -}} + dbUser + {{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/charts/common/templates/validations/_mariadb.tpl b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/charts/common/templates/validations/_mariadb.tpl new file mode 100644 index 0000000..bb5ed72 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/charts/common/templates/validations/_mariadb.tpl @@ -0,0 +1,103 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MariaDB required passwords are not empty. + +Usage: +{{ include "common.validations.values.mariadb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MariaDB values are stored, e.g: "mysql-passwords-secret" + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mariadb.passwords" -}} + {{- $existingSecret := include "common.mariadb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mariadb.values.enabled" . -}} + {{- $architecture := include "common.mariadb.values.architecture" . -}} + {{- $authPrefix := include "common.mariadb.values.key.auth" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}} + + {{- if and (not $existingSecret) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mariadb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- if not (empty $valueUsername) -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mariadb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replication") -}} + {{- $requiredReplicationPassword := dict "valueKey" $valueKeyReplicationPassword "secret" .secret "field" "mariadb-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mariadb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mariadb. + +Usage: +{{ include "common.mariadb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mariadb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mariadb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mariadb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mariadb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.key.auth" -}} + {{- if .subchart -}} + mariadb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/charts/common/templates/validations/_mongodb.tpl b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/charts/common/templates/validations/_mongodb.tpl new file mode 100644 index 0000000..a786188 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/charts/common/templates/validations/_mongodb.tpl @@ -0,0 +1,108 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MongoDB required passwords are not empty. + +Usage: +{{ include "common.validations.values.mongodb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MongoDB values are stored, e.g: "mongodb-passwords-secret" + - subchart - Boolean - Optional. Whether MongoDB is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mongodb.passwords" -}} + {{- $existingSecret := include "common.mongodb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mongodb.values.enabled" . -}} + {{- $authPrefix := include "common.mongodb.values.key.auth" . -}} + {{- $architecture := include "common.mongodb.values.architecture" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyDatabase := printf "%s.database" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicaSetKey := printf "%s.replicaSetKey" $authPrefix -}} + {{- $valueKeyAuthEnabled := printf "%s.enabled" $authPrefix -}} + + {{- $authEnabled := include "common.utils.getValueFromKey" (dict "key" $valueKeyAuthEnabled "context" .context) -}} + + {{- if and (not $existingSecret) (eq $enabled "true") (eq $authEnabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mongodb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- $valueDatabase := include "common.utils.getValueFromKey" (dict "key" $valueKeyDatabase "context" .context) }} + {{- if and $valueUsername $valueDatabase -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mongodb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replicaset") -}} + {{- $requiredReplicaSetKey := dict "valueKey" $valueKeyReplicaSetKey "secret" .secret "field" "mongodb-replica-set-key" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicaSetKey -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mongodb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDb is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mongodb. + +Usage: +{{ include "common.mongodb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mongodb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mongodb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mongodb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDB is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.key.auth" -}} + {{- if .subchart -}} + mongodb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mongodb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/charts/common/templates/validations/_postgresql.tpl b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/charts/common/templates/validations/_postgresql.tpl new file mode 100644 index 0000000..992bcd3 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/charts/common/templates/validations/_postgresql.tpl @@ -0,0 +1,131 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate PostgreSQL required passwords are not empty. + +Usage: +{{ include "common.validations.values.postgresql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where postgresql values are stored, e.g: "postgresql-passwords-secret" + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.postgresql.passwords" -}} + {{- $existingSecret := include "common.postgresql.values.existingSecret" . -}} + {{- $enabled := include "common.postgresql.values.enabled" . -}} + {{- $valueKeyPostgresqlPassword := include "common.postgresql.values.key.postgressPassword" . -}} + {{- $valueKeyPostgresqlReplicationEnabled := include "common.postgresql.values.key.replicationPassword" . -}} + + {{- if and (not $existingSecret) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredPostgresqlPassword := dict "valueKey" $valueKeyPostgresqlPassword "secret" .secret "field" "postgresql-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlPassword -}} + + {{- $enabledReplication := include "common.postgresql.values.enabled.replication" . -}} + {{- if (eq $enabledReplication "true") -}} + {{- $requiredPostgresqlReplicationPassword := dict "valueKey" $valueKeyPostgresqlReplicationEnabled "secret" .secret "field" "postgresql-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to decide whether evaluate global values. + +Usage: +{{ include "common.postgresql.values.use.global" (dict "key" "key-of-global" "context" $) }} +Params: + - key - String - Required. Field to be evaluated within global, e.g: "existingSecret" +*/}} +{{- define "common.postgresql.values.use.global" -}} + {{- if .context.Values.global -}} + {{- if .context.Values.global.postgresql -}} + {{- index .context.Values.global.postgresql .key | quote -}} + {{- end -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.postgresql.values.existingSecret" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.existingSecret" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "existingSecret" "context" .context) -}} + + {{- if .subchart -}} + {{- default (.context.Values.postgresql.existingSecret | quote) $globalValue -}} + {{- else -}} + {{- default (.context.Values.existingSecret | quote) $globalValue -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled postgresql. + +Usage: +{{ include "common.postgresql.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key postgressPassword. + +Usage: +{{ include "common.postgresql.values.key.postgressPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.postgressPassword" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "postgresqlUsername" "context" .context) -}} + + {{- if not $globalValue -}} + {{- if .subchart -}} + postgresql.postgresqlPassword + {{- else -}} + postgresqlPassword + {{- end -}} + {{- else -}} + global.postgresql.postgresqlPassword + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled.replication. + +Usage: +{{ include "common.postgresql.values.enabled.replication" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.enabled.replication" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.replication.enabled -}} + {{- else -}} + {{- printf "%v" .context.Values.replication.enabled -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key replication.password. + +Usage: +{{ include "common.postgresql.values.key.replicationPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.replicationPassword" -}} + {{- if .subchart -}} + postgresql.replication.password + {{- else -}} + replication.password + {{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/charts/common/templates/validations/_redis.tpl b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/charts/common/templates/validations/_redis.tpl new file mode 100644 index 0000000..3e2a47c --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/charts/common/templates/validations/_redis.tpl @@ -0,0 +1,72 @@ + +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Redis(TM) required passwords are not empty. + +Usage: +{{ include "common.validations.values.redis.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where redis values are stored, e.g: "redis-passwords-secret" + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.redis.passwords" -}} + {{- $existingSecret := include "common.redis.values.existingSecret" . -}} + {{- $enabled := include "common.redis.values.enabled" . -}} + {{- $valueKeyPrefix := include "common.redis.values.keys.prefix" . -}} + {{- $valueKeyRedisPassword := printf "%s%s" $valueKeyPrefix "password" -}} + {{- $valueKeyRedisUsePassword := printf "%s%s" $valueKeyPrefix "usePassword" -}} + + {{- if and (not $existingSecret) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $usePassword := include "common.utils.getValueFromKey" (dict "key" $valueKeyRedisUsePassword "context" .context) -}} + {{- if eq $usePassword "true" -}} + {{- $requiredRedisPassword := dict "valueKey" $valueKeyRedisPassword "secret" .secret "field" "redis-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRedisPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Redis Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.redis.values.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Redis(TM) is used as subchart or not. Default: false +*/}} +{{- define "common.redis.values.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.redis.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled redis. + +Usage: +{{ include "common.redis.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.redis.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.redis.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right prefix path for the values + +Usage: +{{ include "common.redis.values.key.prefix" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.redis.values.keys.prefix" -}} + {{- if .subchart -}}redis.{{- else -}}{{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/charts/common/templates/validations/_validations.tpl b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/charts/common/templates/validations/_validations.tpl new file mode 100644 index 0000000..fb2fe60 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/charts/common/templates/validations/_validations.tpl @@ -0,0 +1,46 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate values must not be empty. + +Usage: +{{- $validateValueConf00 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-01") -}} +{{ include "common.validations.values.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" +*/}} +{{- define "common.validations.values.multiple.empty" -}} + {{- range .required -}} + {{- include "common.validations.values.single.empty" (dict "valueKey" .valueKey "secret" .secret "field" .field "context" $.context) -}} + {{- end -}} +{{- end -}} + +{{/* +Validate a value must not be empty. + +Usage: +{{ include "common.validations.value.empty" (dict "valueKey" "mariadb.password" "secret" "secretName" "field" "my-password" "subchart" "subchart "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" + - subchart - String - Optional - Name of the subchart that the validated password is part of. +*/}} +{{- define "common.validations.values.single.empty" -}} + {{- $value := include "common.utils.getValueFromKey" (dict "key" .valueKey "context" .context) }} + {{- $subchart := ternary "" (printf "%s." .subchart) (empty .subchart) }} + + {{- if not $value -}} + {{- $varname := "my-value" -}} + {{- $getCurrentValue := "" -}} + {{- if and .secret .field -}} + {{- $varname = include "common.utils.fieldToEnvVar" . -}} + {{- $getCurrentValue = printf " To get the current value:\n\n %s\n" (include "common.utils.secret.getvalue" .) -}} + {{- end -}} + {{- printf "\n '%s' must not be empty, please add '--set %s%s=$%s' to the command.%s" .valueKey $subchart .valueKey $varname $getCurrentValue -}} + {{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/charts/common/values.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/charts/common/values.yaml new file mode 100644 index 0000000..9ecdc93 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/charts/common/values.yaml @@ -0,0 +1,3 @@ +## bitnami/common +## It is required by CI/CD tools and processes. +exampleValue: common-chart diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/ci/default-values.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/ci/default-values.yaml new file mode 100644 index 0000000..fc2ba60 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/ci/default-values.yaml @@ -0,0 +1 @@ +# Leave this file empty to ensure that CI runs builds against the default configuration in values.yaml. diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/ci/extra-flags-values.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/ci/extra-flags-values.yaml new file mode 100644 index 0000000..71132f7 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/ci/extra-flags-values.yaml @@ -0,0 +1,11 @@ +master: + extraFlags: + - --maxmemory-policy allkeys-lru + persistence: + enabled: false +slave: + extraFlags: + - --maxmemory-policy allkeys-lru + persistence: + enabled: false +usePassword: false diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/ci/production-sentinel-values.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/ci/production-sentinel-values.yaml new file mode 100644 index 0000000..7efeda3 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/ci/production-sentinel-values.yaml @@ -0,0 +1,682 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +global: + # imageRegistry: myRegistryName + # imagePullSecrets: + # - myRegistryKeySecretName + # storageClass: myStorageClass + redis: {} + +## Bitnami Redis(TM) image version +## ref: https://hub.docker.com/r/bitnami/redis/tags/ +## +image: + registry: 10.10.31.243:5000 # docker.io + repository: redis # bitnami/redis + ## Bitnami Redis(TM) image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis#supported-tags-and-respective-dockerfile-links + ## + tag: 5.0.9-debian-10-r0 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + +## String to partially override redis.fullname template (will maintain the release name) +## +# nameOverride: + +## String to fully override redis.fullname template +## +# fullnameOverride: + +## Cluster settings +cluster: + enabled: true + slaveCount: 3 + +## Use redis sentinel in the redis pod. This will disable the master and slave services and +## create one redis service with ports to the sentinel and the redis instances +sentinel: + enabled: true + ## Require password authentication on the sentinel itself + ## ref: https://redis.io/topics/sentinel + usePassword: true + ## Bitnami Redis(TM) Sentintel image version + ## ref: https://hub.docker.com/r/bitnami/redis-sentinel/tags/ + ## + image: + registry: 10.10.31.243:5000 # docker.io + repository: redis-sentinel # bitnami/redis-sentinel + ## Bitnami Redis(TM) image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis-sentinel#supported-tags-and-respective-dockerfile-links + ## + tag: 5.0.9-debian-10-r0 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + masterSet: mymaster + initialCheckTimeout: 5 + quorum: 2 + downAfterMilliseconds: 60000 + failoverTimeout: 18000 + parallelSyncs: 1 + port: 26379 + ## Additional Redis(TM) configuration for the sentinel nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Enable or disable static sentinel IDs for each replicas + ## If disabled each sentinel will generate a random id at startup + ## If enabled, each replicas will have a constant ID on each start-up + ## + staticID: false + ## Configure extra options for Redis(TM) Sentinel liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + customLivenessProbe: {} + customReadinessProbe: {} + ## Redis(TM) Sentinel resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Redis(TM) Sentinel Service properties + service: + ## Redis(TM) Sentinel Service type + type: ClusterIP + sentinelPort: 26379 + redisPort: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # sentinelNodePort: + # redisNodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + +## Specifies the Kubernetes Cluster's Domain Name. +## +clusterDomain: cluster.local + +networkPolicy: + ## Specifies whether a NetworkPolicy should be created + ## + enabled: true + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port Redis(TM) is listening + ## on. When true, Redis(TM) will accept connections from any source + ## (with the correct destination port). + ## + # allowExternal: true + + ## Allow connections from other namespacess. Just set label for namespace and set label for pods (optional). + ## + ingressNSMatchLabels: {} + ingressNSPodMatchLabels: {} + +serviceAccount: + ## Specifies whether a ServiceAccount should be created + ## + create: false + ## The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the fullname template + name: + +rbac: + ## Specifies whether RBAC resources should be created + ## + create: false + + role: + ## Rules to create. It follows the role specification + # rules: + # - apiGroups: + # - extensions + # resources: + # - podsecuritypolicies + # verbs: + # - use + # resourceNames: + # - gce.unprivileged + rules: [] + +## Redis(TM) pod Security Context +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + ## sysctl settings for master and slave pods + ## + ## Uncomment the setting below to increase the net.core.somaxconn value + ## + # sysctls: + # - name: net.core.somaxconn + # value: "10000" + +## Use password authentication +usePassword: true +## Redis(TM) password (both master and slave) +## Defaults to a random 10-character alphanumeric string if not set and usePassword is true +## ref: https://github.com/bitnami/bitnami-docker-redis#setting-the-server-password-on-first-run +## +password: +## Use existing secret (ignores previous password) +# existingSecret: +## Password key to be retrieved from Redis(TM) secret +## +# existingSecretPasswordKey: + +## Mount secrets as files instead of environment variables +usePasswordFile: false + +## Persist data to a persistent volume (Redis Master) +persistence: + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + existingClaim: + +# Redis(TM) port +redisPort: 6379 + +## +## Redis(TM) Master parameters +## +master: + ## Redis(TM) command arguments + ## + ## Can be used to specify command line arguments, for example: + ## + command: "/run.sh" + ## Additional Redis(TM) configuration for the master nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Redis(TM) additional command line flags + ## + ## Can be used to specify command line flags, for example: + ## + ## extraFlags: + ## - "--maxmemory-policy volatile-ttl" + ## - "--repl-backlog-size 1024mb" + extraFlags: [] + ## Comma-separated list of Redis(TM) commands to disable + ## + ## Can be used to disable Redis(TM) commands for security reasons. + ## Commands will be completely disabled by renaming each to an empty string. + ## ref: https://redis.io/topics/security#disabling-of-specific-commands + ## + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis(TM) Master additional pod labels and annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + podLabels: {} + podAnnotations: {} + + ## Redis(TM) Master resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Configure extra options for Redis(TM) Master liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + + ## Configure custom probes for images other images like + ## rhscl/redis-32-rhel7 rhscl/redis-5-rhel7 + ## Only used if readinessProbe.enabled: false / livenessProbe.enabled: false + ## + # customLivenessProbe: + # tcpSocket: + # port: 6379 + # initialDelaySeconds: 10 + # periodSeconds: 5 + # customReadinessProbe: + # initialDelaySeconds: 30 + # periodSeconds: 10 + # timeoutSeconds: 5 + # exec: + # command: + # - "container-entrypoint" + # - "bash" + # - "-c" + # - "redis-cli set liveness-probe \"`date`\" | grep OK" + customLivenessProbe: {} + customReadinessProbe: {} + + ## Redis(TM) Master Node selectors and tolerations for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + ## Redis(TM) Master pod/node affinity/anti-affinity + ## + affinity: {} + + ## Redis(TM) Master Service properties + service: + ## Redis(TM) Master Service type + type: ClusterIP + port: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + # loadBalancerSourceRanges: ["10.0.0.0/8"] + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis(TM) images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + ## Persistent Volume selectors + ## https://kubernetes.io/docs/concepts/storage/persistent-volumes/#selector + matchLabels: {} + matchExpressions: {} + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + ## Redis(TM) Master pod priorityClassName + ## + priorityClassName: {} + +## +## Redis(TM) Slave properties +## Note: service.type is a mandatory parameter +## The rest of the parameters are either optional or, if undefined, will inherit those declared in Redis(TM) Master +## +slave: + ## Slave Service properties + service: + ## Redis(TM) Slave Service type + type: ClusterIP + ## Redis(TM) port + port: 6379 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + # loadBalancerSourceRanges: ["10.0.0.0/8"] + + ## Redis(TM) slave port + port: 6379 + ## Can be used to specify command line arguments, for example: + ## + command: "/run.sh" + ## Additional Redis(TM) configuration for the slave nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Redis(TM) extra flags + extraFlags: [] + ## List of Redis(TM) commands to disable + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis(TM) Slave pod/node affinity/anti-affinity + ## + affinity: {} + + ## Configure extra options for Redis(TM) Slave liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 10 + successThreshold: 1 + failureThreshold: 5 + + ## Configure custom probes for images other images like + ## rhscl/redis-32-rhel7 rhscl/redis-5-rhel7 + ## Only used if readinessProbe.enabled: false / livenessProbe.enabled: false + ## + # customLivenessProbe: + # tcpSocket: + # port: 6379 + # initialDelaySeconds: 10 + # periodSeconds: 5 + # customReadinessProbe: + # initialDelaySeconds: 30 + # periodSeconds: 10 + # timeoutSeconds: 5 + # exec: + # command: + # - "container-entrypoint" + # - "bash" + # - "-c" + # - "redis-cli set liveness-probe \"`date`\" | grep OK" + customLivenessProbe: {} + customReadinessProbe: {} + + ## Redis(TM) slave Resource + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + + ## Redis(TM) slave selectors and tolerations for pod assignment + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Redis(TM) slave pod Annotation and Labels + podLabels: {} + podAnnotations: {} + + ## Redis(TM) slave pod priorityClassName + # priorityClassName: {} + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis(TM) images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + ## Persistent Volume selectors + ## https://kubernetes.io/docs/concepts/storage/persistent-volumes/#selector + matchLabels: {} + matchExpressions: {} + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + +## Prometheus Exporter / Metrics +## +metrics: + enabled: true + + image: + registry: 10.10.31.243:5000 # docker.io + repository: redis-exporter # bitnami/redis-exporter + tag: 1.5.3-debian-10-r14 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + # resources: {} + + ## Extra arguments for Metrics exporter, for example: + ## extraArgs: + ## check-keys: myKey,myOtherKey + # extraArgs: {} + + ## Metrics exporter pod Annotation and Labels + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9121" + # podLabels: {} + + # Enable this if you're using https://github.com/coreos/prometheus-operator + serviceMonitor: + enabled: false + ## Specify a namespace if needed + # namespace: monitoring + # fallback to the prometheus default unless specified + # interval: 10s + ## Defaults to what's used if you follow CoreOS [Prometheus Install Instructions](https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#tldr) + ## [Prometheus Selector Label](https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#prometheus-operator-1) + ## [Kube Prometheus Selector Label](https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#exporters) + selector: + prometheus: kube-prometheus + + ## Custom PrometheusRule to be defined + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + prometheusRule: + enabled: false + additionalLabels: {} + namespace: "" + ## Redis(TM) prometheus rules + ## These are just examples rules, please adapt them to your needs. + ## Make sure to constraint the rules to the current postgresql service. + # rules: + # - alert: RedisDown + # expr: redis_up{service="{{ template "redis.fullname" . }}-metrics"} == 0 + # for: 2m + # labels: + # severity: error + # annotations: + # summary: Redis(TM) instance {{ "{{ $labels.instance }}" }} down + # description: Redis(TM) instance {{ "{{ $labels.instance }}" }} is down + # - alert: RedisMemoryHigh + # expr: > + # redis_memory_used_bytes{service="{{ template "redis.fullname" . }}-metrics"} * 100 + # / + # redis_memory_max_bytes{service="{{ template "redis.fullname" . }}-metrics"} + # > 90 + # for: 2m + # labels: + # severity: error + # annotations: + # summary: Redis(TM) instance {{ "{{ $labels.instance }}" }} is using too much memory + # description: | + # Redis(TM) instance {{ "{{ $labels.instance }}" }} is using {{ "{{ $value }}" }}% of its available memory. + # - alert: RedisKeyEviction + # expr: | + # increase(redis_evicted_keys_total{service="{{ template "redis.fullname" . }}-metrics"}[5m]) > 0 + # for: 1s + # labels: + # severity: error + # annotations: + # summary: Redis(TM) instance {{ "{{ $labels.instance }}" }} has evicted keys + # description: | + # Redis(TM) instance {{ "{{ $labels.instance }}" }} has evicted {{ "{{ $value }}" }} keys in the last 5 minutes. + rules: [] + + ## Metrics exporter pod priorityClassName + # priorityClassName: {} + service: + type: ClusterIP + ## Use serviceLoadBalancerIP to request a specific static IP, + ## otherwise leave blank + # loadBalancerIP: + annotations: {} + labels: {} + +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: false + image: + registry: 10.10.31.243:5000 # docker.io + repository: minideb # bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m + +## Redis(TM) config file +## ref: https://redis.io/topics/config +## +configmap: |- + # Enable AOF https://redis.io/topics/persistence#append-only-file + appendonly yes + # Disable RDB persistence, AOF persistence already enabled. + save "" + +## Sysctl InitContainer +## used to perform sysctl operation to modify Kernel settings (needed sometimes to avoid warnings) +sysctlImage: + enabled: false + command: [] + registry: 10.10.31.243:5000 # docker.io + repository: minideb # bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + mountHostSys: false + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m + +## PodSecurityPolicy configuration +## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +## +podSecurityPolicy: + ## Specifies whether a PodSecurityPolicy should be created + ## + create: false diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/templates/NOTES.txt b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/templates/NOTES.txt new file mode 100644 index 0000000..a254f58 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/templates/NOTES.txt @@ -0,0 +1,136 @@ +** Please be patient while the chart is being deployed ** + +{{- if contains .Values.master.service.type "LoadBalancer" }} +{{- if not .Values.usePassword }} +{{ if and (not .Values.networkPolicy.enabled) (.Values.networkPolicy.allowExternal) }} + +------------------------------------------------------------------------------- + WARNING + + By specifying "master.service.type=LoadBalancer" and "usePassword=false" you have + most likely exposed the Redis(TM) service externally without any authentication + mechanism. + + For security reasons, we strongly suggest that you switch to "ClusterIP" or + "NodePort". As alternative, you can also switch to "usePassword=true" + providing a valid password on "password" parameter. + +------------------------------------------------------------------------------- +{{- end }} +{{- end }} +{{- end }} + +{{- if and .Values.sentinel.enabled (not .Values.cluster.enabled)}} + +------------------------------------------------------------------------------- + WARNING + + Using redis sentinel without a cluster is not supported. A single pod with + standalone redis has been deployed. + + To deploy redis sentinel, please use the values "cluster.enabled=true" and + "sentinel.enabled=true". + +------------------------------------------------------------------------------- +{{- end }} + +{{- if .Values.cluster.enabled }} +{{- if .Values.sentinel.enabled }} +Redis can be accessed via port {{ .Values.sentinel.service.redisPort }} on the following DNS name from within your cluster: + +{{ template "redis.fullname" . }}.imxc.svc.{{ .Values.clusterDomain }} for read only operations + +For read/write operations, first access the Redis(TM) Sentinel cluster, which is available in port {{ .Values.sentinel.service.sentinelPort }} using the same domain name above. + +{{- else }} +Redis can be accessed via port {{ .Values.redisPort }} on the following DNS names from within your cluster: + +{{ template "redis.fullname" . }}-master.imxc.svc.{{ .Values.clusterDomain }} for read/write operations +{{ template "redis.fullname" . }}-slave.imxc.svc.{{ .Values.clusterDomain }} for read-only operations +{{- end }} + +{{- else }} +Redis can be accessed via port {{ .Values.redisPort }} on the following DNS name from within your cluster: + +{{ template "redis.fullname" . }}-master.imxc.svc.{{ .Values.clusterDomain }} + +{{- end }} + +{{ if .Values.usePassword }} +To get your password run: + + export REDIS_PASSWORD=$(kubectl get secret --namespace imxc {{ template "redis.secretName" . }} -o jsonpath="{.data.redis-password}" | base64 --decode) +{{- end }} + +To connect to your Redis(TM) server: + +1. Run a Redis(TM) pod that you can use as a client: + +{{- if .Values.tls.enabled }} + kubectl run --namespace imxc {{ template "redis.fullname" . }}-client --restart='Never' --env REDIS_PASSWORD=$REDIS_PASSWORD --image {{ template "redis.image" . }} --command -- sleep infinity + + Copy your TLS certificates to the pod: + + kubectl cp --namespace imxc /path/to/client.cert {{ template "redis.fullname" . }}-client:/tmp/client.cert + kubectl cp --namespace imxc /path/to/client.key {{ template "redis.fullname" . }}-client:/tmp/client.key + kubectl cp --namespace imxc /path/to/CA.cert {{ template "redis.fullname" . }}-client:/tmp/CA.cert + + Use the following command to attach to the pod: + + kubectl exec --tty -i {{ template "redis.fullname" . }}-client \ + {{- if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }}--labels="{{ template "redis.fullname" . }}-client=true" \{{- end }} + --namespace imxc -- bash +{{- else }} + kubectl run --namespace imxc {{ template "redis.fullname" . }}-client --rm --tty -i --restart='Never' \ + {{ if .Values.usePassword }} --env REDIS_PASSWORD=$REDIS_PASSWORD \{{ end }} + {{- if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }}--labels="{{ template "redis.fullname" . }}-client=true" \{{- end }} + --image {{ template "redis.image" . }} -- bash +{{- end }} + +2. Connect using the Redis(TM) CLI: + +{{- if .Values.cluster.enabled }} + {{- if .Values.sentinel.enabled }} + redis-cli -h {{ template "redis.fullname" . }} -p {{ .Values.sentinel.service.redisPort }}{{ if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }}{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} # Read only operations + redis-cli -h {{ template "redis.fullname" . }} -p {{ .Values.sentinel.service.sentinelPort }}{{ if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }}{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} # Sentinel access + {{- else }} + redis-cli -h {{ template "redis.fullname" . }}-master{{ if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }}{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} + redis-cli -h {{ template "redis.fullname" . }}-slave{{ if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }}{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} + {{- end }} +{{- else }} + redis-cli -h {{ template "redis.fullname" . }}-master{{ if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }}{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} +{{- end }} + +{{ if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }} +Note: Since NetworkPolicy is enabled, only pods with label +{{ template "redis.fullname" . }}-client=true" +will be able to connect to redis. +{{- else -}} + +To connect to your database from outside the cluster execute the following commands: + +{{- if contains "NodePort" .Values.master.service.type }} + + export NODE_IP=$(kubectl get nodes --namespace imxc -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT=$(kubectl get --namespace imxc -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "redis.fullname" . }}-master) + redis-cli -h $NODE_IP -p $NODE_PORT {{- if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }}{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} + +{{- else if contains "LoadBalancer" .Values.master.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace imxc -w {{ template "redis.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace imxc {{ template "redis.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + redis-cli -h $SERVICE_IP -p {{ .Values.master.service.port }} {{- if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }}{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} + +{{- else if contains "ClusterIP" .Values.master.service.type }} + + kubectl port-forward --namespace imxc svc/{{ template "redis.fullname" . }}-master {{ .Values.redisPort }}:{{ .Values.redisPort }} & + redis-cli -h 127.0.0.1 -p {{ .Values.redisPort }} {{- if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }}{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} + +{{- end }} +{{- end }} + +{{ include "redis.checkRollingTags" . }} + +{{- include "redis.validateValues" . }} \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/templates/_helpers.tpl b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/templates/_helpers.tpl new file mode 100644 index 0000000..193105d --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/templates/_helpers.tpl @@ -0,0 +1,421 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "redis.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Expand the chart plus release name (used by the chart label) +*/}} +{{- define "redis.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "redis.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "networkPolicy.apiVersion" -}} +{{- if semverCompare ">=1.4-0, <1.7-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiGroup for PodSecurityPolicy. +*/}} +{{- define "podSecurityPolicy.apiGroup" -}} +{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "policy" -}} +{{- else -}} +{{- print "extensions" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for PodSecurityPolicy. +*/}} +{{- define "podSecurityPolicy.apiVersion" -}} +{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "policy/v1beta1" -}} +{{- else -}} +{{- print "extensions/v1beta1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Redis(TM) image name +*/}} +{{- define "redis.image" -}} +{{- $registryName := .Values.image.registry -}} +{{- $repositoryName := .Values.image.repository -}} +{{- $tag := .Values.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Redis(TM) Sentinel image name +*/}} +{{- define "sentinel.image" -}} +{{- $registryName := .Values.sentinel.image.registry -}} +{{- $repositoryName := .Values.sentinel.image.repository -}} +{{- $tag := .Values.sentinel.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper image name (for the metrics image) +*/}} +{{- define "redis.metrics.image" -}} +{{- $registryName := .Values.metrics.image.registry -}} +{{- $repositoryName := .Values.metrics.image.repository -}} +{{- $tag := .Values.metrics.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper image name (for the init container volume-permissions image) +*/}} +{{- define "redis.volumePermissions.image" -}} +{{- $registryName := .Values.volumePermissions.image.registry -}} +{{- $repositoryName := .Values.volumePermissions.image.repository -}} +{{- $tag := .Values.volumePermissions.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the path to the cert file. +*/}} +{{- define "redis.tlsCert" -}} +{{- required "Certificate filename is required when TLS in enabled" .Values.tls.certFilename | printf "/opt/bitnami/redis/certs/%s" -}} +{{- end -}} + +{{/* +Return the path to the cert key file. +*/}} +{{- define "redis.tlsCertKey" -}} +{{- required "Certificate Key filename is required when TLS in enabled" .Values.tls.certKeyFilename | printf "/opt/bitnami/redis/certs/%s" -}} +{{- end -}} + +{{/* +Return the path to the CA cert file. +*/}} +{{- define "redis.tlsCACert" -}} +{{- required "Certificate CA filename is required when TLS in enabled" .Values.tls.certCAFilename | printf "/opt/bitnami/redis/certs/%s" -}} +{{- end -}} + +{{/* +Return the path to the DH params file. +*/}} +{{- define "redis.tlsDHParams" -}} +{{- if .Values.tls.dhParamsFilename -}} +{{- printf "/opt/bitnami/redis/certs/%s" .Values.tls.dhParamsFilename -}} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the service account to use +*/}} +{{- define "redis.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "redis.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Get the password secret. +*/}} +{{- define "redis.secretName" -}} +{{- if .Values.existingSecret -}} +{{- printf "%s" .Values.existingSecret -}} +{{- else -}} +{{- printf "%s" (include "redis.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Get the password key to be retrieved from Redis(TM) secret. +*/}} +{{- define "redis.secretPasswordKey" -}} +{{- if and .Values.existingSecret .Values.existingSecretPasswordKey -}} +{{- printf "%s" .Values.existingSecretPasswordKey -}} +{{- else -}} +{{- printf "redis-password" -}} +{{- end -}} +{{- end -}} + +{{/* +Return Redis(TM) password +*/}} +{{- define "redis.password" -}} +{{- if not (empty .Values.global.redis.password) }} + {{- .Values.global.redis.password -}} +{{- else if not (empty .Values.password) -}} + {{- .Values.password -}} +{{- else -}} + {{- randAlphaNum 10 -}} +{{- end -}} +{{- end -}} + +{{/* +Return sysctl image +*/}} +{{- define "redis.sysctl.image" -}} +{{- $registryName := default "docker.io" .Values.sysctlImage.registry -}} +{{- $repositoryName := .Values.sysctlImage.repository -}} +{{- $tag := default "buster" .Values.sysctlImage.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "redis.imagePullSecrets" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +Also, we can not use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} +{{- if .Values.global.imagePullSecrets }} +imagePullSecrets: +{{- range .Values.global.imagePullSecrets }} + - name: {{ . }} +{{- end }} +{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.sysctlImage.pullSecrets .Values.volumePermissions.image.pullSecrets }} +imagePullSecrets: +{{- range .Values.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.metrics.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.sysctlImage.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.volumePermissions.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- end -}} +{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.sysctlImage.pullSecrets .Values.volumePermissions.image.pullSecrets }} +imagePullSecrets: +{{- range .Values.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.metrics.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.sysctlImage.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.volumePermissions.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- end -}} +{{- end -}} + +{{/* Check if there are rolling tags in the images */}} +{{- define "redis.checkRollingTags" -}} +{{- if and (contains "bitnami/" .Values.image.repository) (not (.Values.image.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .Values.image.repository }}:{{ .Values.image.tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} +{{- if and (contains "bitnami/" .Values.sentinel.image.repository) (not (.Values.sentinel.image.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .Values.sentinel.image.repository }}:{{ .Values.sentinel.image.tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} +{{- end -}} + +{{/* +Return the proper Storage Class for master +*/}} +{{- define "redis.master.storageClass" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +*/}} +{{- if .Values.global -}} + {{- if .Values.global.storageClass -}} + {{- if (eq "-" .Values.global.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.global.storageClass -}} + {{- end -}} + {{- else -}} + {{- if .Values.master.persistence.storageClass -}} + {{- if (eq "-" .Values.master.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.master.persistence.storageClass -}} + {{- end -}} + {{- end -}} + {{- end -}} +{{- else -}} + {{- if .Values.master.persistence.storageClass -}} + {{- if (eq "-" .Values.master.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.master.persistence.storageClass -}} + {{- end -}} + {{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Storage Class for slave +*/}} +{{- define "redis.slave.storageClass" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +*/}} +{{- if .Values.global -}} + {{- if .Values.global.storageClass -}} + {{- if (eq "-" .Values.global.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.global.storageClass -}} + {{- end -}} + {{- else -}} + {{- if .Values.slave.persistence.storageClass -}} + {{- if (eq "-" .Values.slave.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.slave.persistence.storageClass -}} + {{- end -}} + {{- end -}} + {{- end -}} +{{- else -}} + {{- if .Values.slave.persistence.storageClass -}} + {{- if (eq "-" .Values.slave.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.slave.persistence.storageClass -}} + {{- end -}} + {{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Compile all warnings into a single message, and call fail. +*/}} +{{- define "redis.validateValues" -}} +{{- $messages := list -}} +{{- $messages := append $messages (include "redis.validateValues.spreadConstraints" .) -}} +{{- $messages := without $messages "" -}} +{{- $message := join "\n" $messages -}} + +{{- if $message -}} +{{- printf "\nVALUES VALIDATION:\n%s" $message | fail -}} +{{- end -}} +{{- end -}} + +{{/* Validate values of Redis(TM) - spreadConstrainsts K8s version */}} +{{- define "redis.validateValues.spreadConstraints" -}} +{{- if and (semverCompare "<1.16-0" .Capabilities.KubeVersion.GitVersion) .Values.slave.spreadConstraints -}} +redis: spreadConstraints + Pod Topology Spread Constraints are only available on K8s >= 1.16 + Find more information at https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ +{{- end -}} +{{- end -}} + +{{/* +Renders a value that contains template. +Usage: +{{ include "redis.tplValue" (dict "value" .Values.path.to.the.Value "context" $) }} +*/}} +{{- define "redis.tplValue" -}} + {{- if typeIs "string" .value }} + {{- tpl .value .context }} + {{- else }} + {{- tpl (.value | toYaml) .context }} + {{- end }} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/templates/configmap-scripts.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/templates/configmap-scripts.yaml new file mode 100644 index 0000000..02411c8 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/templates/configmap-scripts.yaml @@ -0,0 +1,393 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "redis.fullname" . }}-scripts + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: +{{- if and .Values.cluster.enabled .Values.sentinel.enabled }} + start-node.sh: | + #!/bin/bash + is_boolean_yes() { + local -r bool="${1:-}" + # comparison is performed without regard to the case of alphabetic characters + shopt -s nocasematch + if [[ "$bool" = 1 || "$bool" =~ ^(yes|true)$ ]]; then + true + else + false + fi + } + + HEADLESS_SERVICE="{{ template "redis.fullname" . }}-headless.imxc.svc.{{ .Values.clusterDomain }}" + REDIS_SERVICE="{{ template "redis.fullname" . }}.imxc.svc.{{ .Values.clusterDomain }}" + + export REDIS_REPLICATION_MODE="slave" + if [[ -z "$(getent ahosts "$HEADLESS_SERVICE" | grep -v "^$(hostname -i) ")" ]]; then + export REDIS_REPLICATION_MODE="master" + fi + + {{- if and .Values.securityContext.runAsUser (eq (.Values.securityContext.runAsUser | int) 0) }} + useradd redis + chown -R redis {{ .Values.slave.persistence.path }} + {{- end }} + + if [[ -n $REDIS_PASSWORD_FILE ]]; then + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux + fi + + if [[ -n $REDIS_MASTER_PASSWORD_FILE ]]; then + password_aux=`cat ${REDIS_MASTER_PASSWORD_FILE}` + export REDIS_MASTER_PASSWORD=$password_aux + fi + + if [[ "$REDIS_REPLICATION_MODE" == "master" ]]; then + echo "I am master" + if [[ ! -f /opt/bitnami/redis/etc/master.conf ]];then + cp /opt/bitnami/redis/mounted-etc/master.conf /opt/bitnami/redis/etc/master.conf + fi + else + if [[ ! -f /opt/bitnami/redis/etc/replica.conf ]];then + cp /opt/bitnami/redis/mounted-etc/replica.conf /opt/bitnami/redis/etc/replica.conf + fi + + if is_boolean_yes "$REDIS_TLS_ENABLED"; then + sentinel_info_command="redis-cli {{- if .Values.usePassword }} -a $REDIS_PASSWORD {{- end }} -h $REDIS_SERVICE -p {{ .Values.sentinel.port }} --tls --cert ${REDIS_TLS_CERT_FILE} --key ${REDIS_TLS_KEY_FILE} --cacert ${REDIS_TLS_CA_FILE} sentinel get-master-addr-by-name {{ .Values.sentinel.masterSet }}" + else + sentinel_info_command="redis-cli {{- if .Values.usePassword }} -a $REDIS_PASSWORD {{- end }} -h $REDIS_SERVICE -p {{ .Values.sentinel.port }} sentinel get-master-addr-by-name {{ .Values.sentinel.masterSet }}" + fi + REDIS_SENTINEL_INFO=($($sentinel_info_command)) + REDIS_MASTER_HOST=${REDIS_SENTINEL_INFO[0]} + REDIS_MASTER_PORT_NUMBER=${REDIS_SENTINEL_INFO[1]} + + + # Immediately attempt to connect to the reported master. If it doesn't exist the connection attempt will either hang + # or fail with "port unreachable" and give no data. The liveness check will then timeout waiting for the redis + # container to be ready and restart the it. By then the new master will likely have been elected + if is_boolean_yes "$REDIS_TLS_ENABLED"; then + sentinel_info_command="redis-cli {{- if .Values.usePassword }} -a $REDIS_PASSWORD {{- end }} -h $REDIS_MASTER_HOST -p {{ .Values.sentinel.port }} --tls --cert ${REDIS_TLS_CERT_FILE} --key ${REDIS_TLS_KEY_FILE} --cacert ${REDIS_TLS_CA_FILE} sentinel get-master-addr-by-name {{ .Values.sentinel.masterSet }}" + else + sentinel_info_command="redis-cli {{- if .Values.usePassword }} -a $REDIS_PASSWORD {{- end }} -h $REDIS_MASTER_HOST -p {{ .Values.sentinel.port }} sentinel get-master-addr-by-name {{ .Values.sentinel.masterSet }}" + fi + + if [[ ! ($($sentinel_info_command)) ]]; then + # master doesn't actually exist, this probably means the remaining pods haven't elected a new one yet + # and are reporting the old one still. Once this happens the container will get stuck and never see the new + # master. We stop here to allow the container to not pass the liveness check and be restarted. + exit 1 + fi + fi + + if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then + cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf + fi + {{- if .Values.tls.enabled }} + ARGS=("--port" "0") + ARGS+=("--tls-port" "${REDIS_TLS_PORT}") + ARGS+=("--tls-cert-file" "${REDIS_TLS_CERT_FILE}") + ARGS+=("--tls-key-file" "${REDIS_TLS_KEY_FILE}") + ARGS+=("--tls-ca-cert-file" "${REDIS_TLS_CA_FILE}") + ARGS+=("--tls-auth-clients" "${REDIS_TLS_AUTH_CLIENTS}") + ARGS+=("--tls-replication" "yes") + {{- if .Values.tls.dhParamsFilename }} + ARGS+=("--tls-dh-params-file" "${REDIS_TLS_DH_PARAMS_FILE}") + {{- end }} + {{- else }} + ARGS=("--port" "${REDIS_PORT}") + {{- end }} + + if [[ "$REDIS_REPLICATION_MODE" == "slave" ]]; then + ARGS+=("--slaveof" "${REDIS_MASTER_HOST}" "${REDIS_MASTER_PORT_NUMBER}") + fi + + {{- if .Values.usePassword }} + ARGS+=("--requirepass" "${REDIS_PASSWORD}") + ARGS+=("--masterauth" "${REDIS_MASTER_PASSWORD}") + {{- else }} + ARGS+=("--protected-mode" "no") + {{- end }} + + if [[ "$REDIS_REPLICATION_MODE" == "master" ]]; then + ARGS+=("--include" "/opt/bitnami/redis/etc/master.conf") + else + ARGS+=("--include" "/opt/bitnami/redis/etc/replica.conf") + fi + + ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf") + {{- if .Values.slave.extraFlags }} + {{- range .Values.slave.extraFlags }} + ARGS+=({{ . | quote }}) + {{- end }} + {{- end }} + + {{- if .Values.slave.preExecCmds }} + {{ .Values.slave.preExecCmds | nindent 4}} + {{- end }} + + {{- if .Values.slave.command }} + exec {{ .Values.slave.command }} "${ARGS[@]}" + {{- else }} + exec redis-server "${ARGS[@]}" + {{- end }} + + start-sentinel.sh: | + #!/bin/bash + replace_in_file() { + local filename="${1:?filename is required}" + local match_regex="${2:?match regex is required}" + local substitute_regex="${3:?substitute regex is required}" + local posix_regex=${4:-true} + + local result + + # We should avoid using 'sed in-place' substitutions + # 1) They are not compatible with files mounted from ConfigMap(s) + # 2) We found incompatibility issues with Debian10 and "in-place" substitutions + del=$'\001' # Use a non-printable character as a 'sed' delimiter to avoid issues + if [[ $posix_regex = true ]]; then + result="$(sed -E "s${del}${match_regex}${del}${substitute_regex}${del}g" "$filename")" + else + result="$(sed "s${del}${match_regex}${del}${substitute_regex}${del}g" "$filename")" + fi + echo "$result" > "$filename" + } + sentinel_conf_set() { + local -r key="${1:?missing key}" + local value="${2:-}" + + # Sanitize inputs + value="${value//\\/\\\\}" + value="${value//&/\\&}" + value="${value//\?/\\?}" + [[ "$value" = "" ]] && value="\"$value\"" + + replace_in_file "/opt/bitnami/redis-sentinel/etc/sentinel.conf" "^#*\s*${key} .*" "${key} ${value}" false + } + sentinel_conf_add() { + echo $'\n'"$@" >> "/opt/bitnami/redis-sentinel/etc/sentinel.conf" + } + is_boolean_yes() { + local -r bool="${1:-}" + # comparison is performed without regard to the case of alphabetic characters + shopt -s nocasematch + if [[ "$bool" = 1 || "$bool" =~ ^(yes|true)$ ]]; then + true + else + false + fi + } + host_id() { + echo "$1" | openssl sha1 | awk '{print $2}' + } + + HEADLESS_SERVICE="{{ template "redis.fullname" . }}-headless.imxc.svc.{{ .Values.clusterDomain }}" + REDIS_SERVICE="{{ template "redis.fullname" . }}.imxc.svc.{{ .Values.clusterDomain }}" + + if [[ -n $REDIS_PASSWORD_FILE ]]; then + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux + fi + + if [[ ! -f /opt/bitnami/redis-sentinel/etc/sentinel.conf ]]; then + cp /opt/bitnami/redis-sentinel/mounted-etc/sentinel.conf /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- if .Values.usePassword }} + printf "\nsentinel auth-pass %s %s" "{{ .Values.sentinel.masterSet }}" "$REDIS_PASSWORD" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- if .Values.sentinel.usePassword }} + printf "\nrequirepass %s" "$REDIS_PASSWORD" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- end }} + {{- end }} + {{- if .Values.sentinel.staticID }} + printf "\nsentinel myid %s" "$(host_id "$HOSTNAME")" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- end }} + fi + + export REDIS_REPLICATION_MODE="slave" + if [[ -z "$(getent ahosts "$HEADLESS_SERVICE" | grep -v "^$(hostname -i) ")" ]]; then + export REDIS_REPLICATION_MODE="master" + fi + + if [[ "$REDIS_REPLICATION_MODE" == "master" ]]; then + REDIS_MASTER_HOST="$(hostname -i)" + REDIS_MASTER_PORT_NUMBER="{{ .Values.redisPort }}" + else + if is_boolean_yes "$REDIS_SENTINEL_TLS_ENABLED"; then + sentinel_info_command="redis-cli {{- if .Values.usePassword }} -a $REDIS_PASSWORD {{- end }} -h $REDIS_SERVICE -p {{ .Values.sentinel.port }} --tls --cert ${REDIS_SENTINEL_TLS_CERT_FILE} --key ${REDIS_SENTINEL_TLS_KEY_FILE} --cacert ${REDIS_SENTINEL_TLS_CA_FILE} sentinel get-master-addr-by-name {{ .Values.sentinel.masterSet }}" + else + sentinel_info_command="redis-cli {{- if .Values.usePassword }} -a $REDIS_PASSWORD {{- end }} -h $REDIS_SERVICE -p {{ .Values.sentinel.port }} sentinel get-master-addr-by-name {{ .Values.sentinel.masterSet }}" + fi + REDIS_SENTINEL_INFO=($($sentinel_info_command)) + REDIS_MASTER_HOST=${REDIS_SENTINEL_INFO[0]} + REDIS_MASTER_PORT_NUMBER=${REDIS_SENTINEL_INFO[1]} + + # Immediately attempt to connect to the reported master. If it doesn't exist the connection attempt will either hang + # or fail with "port unreachable" and give no data. The liveness check will then timeout waiting for the sentinel + # container to be ready and restart the it. By then the new master will likely have been elected + if is_boolean_yes "$REDIS_SENTINEL_TLS_ENABLED"; then + sentinel_info_command="redis-cli {{- if .Values.usePassword }} -a $REDIS_PASSWORD {{- end }} -h $REDIS_MASTER_HOST -p {{ .Values.sentinel.port }} --tls --cert ${REDIS_SENTINEL_TLS_CERT_FILE} --key ${REDIS_SENTINEL_TLS_KEY_FILE} --cacert ${REDIS_SENTINEL_TLS_CA_FILE} sentinel get-master-addr-by-name {{ .Values.sentinel.masterSet }}" + else + sentinel_info_command="redis-cli {{- if .Values.usePassword }} -a $REDIS_PASSWORD {{- end }} -h $REDIS_MASTER_HOST -p {{ .Values.sentinel.port }} sentinel get-master-addr-by-name {{ .Values.sentinel.masterSet }}" + fi + + if [[ ! ($($sentinel_info_command)) ]]; then + # master doesn't actually exist, this probably means the remaining pods haven't elected a new one yet + # and are reporting the old one still. Once this happens the container will get stuck and never see the new + # master. We stop here to allow the container to not pass the liveness check and be restarted. + exit 1 + fi + fi + sentinel_conf_set "sentinel monitor" "{{ .Values.sentinel.masterSet }} "$REDIS_MASTER_HOST" "$REDIS_MASTER_PORT_NUMBER" {{ .Values.sentinel.quorum }}" + + add_replica() { + if [[ "$1" != "$REDIS_MASTER_HOST" ]]; then + sentinel_conf_add "sentinel known-replica {{ .Values.sentinel.masterSet }} $1 {{ .Values.redisPort }}" + fi + } + + {{- if .Values.sentinel.staticID }} + # remove generated known sentinels and replicas + tmp="$(sed -e '/^sentinel known-/d' -e '/^$/d' /opt/bitnami/redis-sentinel/etc/sentinel.conf)" + echo "$tmp" > /opt/bitnami/redis-sentinel/etc/sentinel.conf + + for node in $(seq 0 {{ .Values.cluster.slaveCount }}); do + NAME="{{ template "redis.fullname" . }}-node-$node" + IP="$(getent hosts "$NAME.$HEADLESS_SERVICE" | awk ' {print $1 }')" + if [[ "$NAME" != "$HOSTNAME" && -n "$IP" ]]; then + sentinel_conf_add "sentinel known-sentinel {{ .Values.sentinel.masterSet }} $IP {{ .Values.sentinel.port }} $(host_id "$NAME")" + add_replica "$IP" + fi + done + add_replica "$(hostname -i)" + {{- end }} + + {{- if .Values.tls.enabled }} + ARGS=("--port" "0") + ARGS+=("--tls-port" "${REDIS_SENTINEL_TLS_PORT_NUMBER}") + ARGS+=("--tls-cert-file" "${REDIS_SENTINEL_TLS_CERT_FILE}") + ARGS+=("--tls-key-file" "${REDIS_SENTINEL_TLS_KEY_FILE}") + ARGS+=("--tls-ca-cert-file" "${REDIS_SENTINEL_TLS_CA_FILE}") + ARGS+=("--tls-replication" "yes") + ARGS+=("--tls-auth-clients" "${REDIS_SENTINEL_TLS_AUTH_CLIENTS}") + {{- if .Values.tls.dhParamsFilename }} + ARGS+=("--tls-dh-params-file" "${REDIS_SENTINEL_TLS_DH_PARAMS_FILE}") + {{- end }} + {{- end }} + {{- if .Values.sentinel.preExecCmds }} + {{ .Values.sentinel.preExecCmds | nindent 4 }} + {{- end }} + exec redis-server /opt/bitnami/redis-sentinel/etc/sentinel.conf --sentinel {{- if .Values.tls.enabled }} "${ARGS[@]}" {{- end }} +{{- else }} + start-master.sh: | + #!/bin/bash + {{- if and .Values.securityContext.runAsUser (eq (.Values.securityContext.runAsUser | int) 0) }} + useradd redis + chown -R redis {{ .Values.master.persistence.path }} + {{- end }} + if [[ -n $REDIS_PASSWORD_FILE ]]; then + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux + fi + if [[ ! -f /opt/bitnami/redis/etc/master.conf ]];then + cp /opt/bitnami/redis/mounted-etc/master.conf /opt/bitnami/redis/etc/master.conf + fi + if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then + cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf + fi + {{- if .Values.tls.enabled }} + ARGS=("--port" "0") + ARGS+=("--tls-port" "${REDIS_TLS_PORT}") + ARGS+=("--tls-cert-file" "${REDIS_TLS_CERT_FILE}") + ARGS+=("--tls-key-file" "${REDIS_TLS_KEY_FILE}") + ARGS+=("--tls-ca-cert-file" "${REDIS_TLS_CA_FILE}") + ARGS+=("--tls-auth-clients" "${REDIS_TLS_AUTH_CLIENTS}") + {{- if .Values.tls.dhParamsFilename }} + ARGS+=("--tls-dh-params-file" "${REDIS_TLS_DH_PARAMS_FILE}") + {{- end }} + {{- else }} + ARGS=("--port" "${REDIS_PORT}") + {{- end }} + {{- if .Values.usePassword }} + ARGS+=("--requirepass" "${REDIS_PASSWORD}") + ARGS+=("--masterauth" "${REDIS_PASSWORD}") + {{- else }} + ARGS+=("--protected-mode" "no") + {{- end }} + ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf") + ARGS+=("--include" "/opt/bitnami/redis/etc/master.conf") + {{- if .Values.master.extraFlags }} + {{- range .Values.master.extraFlags }} + ARGS+=({{ . | quote }}) + {{- end }} + {{- end }} + {{- if .Values.master.preExecCmds }} + {{ .Values.master.preExecCmds | nindent 4}} + {{- end }} + {{- if .Values.master.command }} + exec {{ .Values.master.command }} "${ARGS[@]}" + {{- else }} + exec redis-server "${ARGS[@]}" + {{- end }} + {{- if .Values.cluster.enabled }} + start-slave.sh: | + #!/bin/bash + {{- if and .Values.securityContext.runAsUser (eq (.Values.securityContext.runAsUser | int) 0) }} + useradd redis + chown -R redis {{ .Values.slave.persistence.path }} + {{- end }} + if [[ -n $REDIS_PASSWORD_FILE ]]; then + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux + fi + if [[ -n $REDIS_MASTER_PASSWORD_FILE ]]; then + password_aux=`cat ${REDIS_MASTER_PASSWORD_FILE}` + export REDIS_MASTER_PASSWORD=$password_aux + fi + if [[ ! -f /opt/bitnami/redis/etc/replica.conf ]];then + cp /opt/bitnami/redis/mounted-etc/replica.conf /opt/bitnami/redis/etc/replica.conf + fi + if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then + cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf + fi + {{- if .Values.tls.enabled }} + ARGS=("--port" "0") + ARGS+=("--tls-port" "${REDIS_TLS_PORT}") + ARGS+=("--tls-cert-file" "${REDIS_TLS_CERT_FILE}") + ARGS+=("--tls-key-file" "${REDIS_TLS_KEY_FILE}") + ARGS+=("--tls-ca-cert-file" "${REDIS_TLS_CA_FILE}") + ARGS+=("--tls-auth-clients" "${REDIS_TLS_AUTH_CLIENTS}") + ARGS+=("--tls-replication" "yes") + {{- if .Values.tls.dhParamsFilename }} + ARGS+=("--tls-dh-params-file" "${REDIS_TLS_DH_PARAMS_FILE}") + {{- end }} + {{- else }} + ARGS=("--port" "${REDIS_PORT}") + {{- end }} + ARGS+=("--slaveof" "${REDIS_MASTER_HOST}" "${REDIS_MASTER_PORT_NUMBER}") + {{- if .Values.usePassword }} + ARGS+=("--requirepass" "${REDIS_PASSWORD}") + ARGS+=("--masterauth" "${REDIS_MASTER_PASSWORD}") + {{- else }} + ARGS+=("--protected-mode" "no") + {{- end }} + ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf") + ARGS+=("--include" "/opt/bitnami/redis/etc/replica.conf") + {{- if .Values.slave.extraFlags }} + {{- range .Values.slave.extraFlags }} + ARGS+=({{ . | quote }}) + {{- end }} + {{- end }} + {{- if .Values.slave.preExecCmds }} + {{ .Values.slave.preExecCmds | nindent 4}} + {{- end }} + {{- if .Values.slave.command }} + exec {{ .Values.slave.command }} "${ARGS[@]}" + {{- else }} + exec redis-server "${ARGS[@]}" + {{- end }} + {{- end }} + +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/templates/configmap.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/templates/configmap.yaml new file mode 100644 index 0000000..923272c --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/templates/configmap.yaml @@ -0,0 +1,53 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "redis.fullname" . }} + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: + redis.conf: |- +{{- if .Values.configmap }} + # User-supplied configuration: +{{- tpl .Values.configmap . | nindent 4 }} +{{- end }} + master.conf: |- + dir {{ .Values.master.persistence.path }} +{{- if .Values.master.configmap }} + # User-supplied master configuration: +{{- tpl .Values.master.configmap . | nindent 4 }} +{{- end }} +{{- if .Values.master.disableCommands }} +{{- range .Values.master.disableCommands }} + rename-command {{ . }} "" +{{- end }} +{{- end }} + replica.conf: |- + dir {{ .Values.slave.persistence.path }} + slave-read-only yes +{{- if .Values.slave.configmap }} + # User-supplied slave configuration: +{{- tpl .Values.slave.configmap . | nindent 4 }} +{{- end }} +{{- if .Values.slave.disableCommands }} +{{- range .Values.slave.disableCommands }} + rename-command {{ . }} "" +{{- end }} +{{- end }} +{{- if .Values.sentinel.enabled }} + sentinel.conf: |- + dir "/tmp" + bind 0.0.0.0 + port {{ .Values.sentinel.port }} + sentinel monitor {{ .Values.sentinel.masterSet }} {{ template "redis.fullname" . }}-node-0.{{ template "redis.fullname" . }}-headless.imxc.svc.{{ .Values.clusterDomain }} {{ .Values.redisPort }} {{ .Values.sentinel.quorum }} + sentinel down-after-milliseconds {{ .Values.sentinel.masterSet }} {{ .Values.sentinel.downAfterMilliseconds }} + sentinel failover-timeout {{ .Values.sentinel.masterSet }} {{ .Values.sentinel.failoverTimeout }} + sentinel parallel-syncs {{ .Values.sentinel.masterSet }} {{ .Values.sentinel.parallelSyncs }} +{{- if .Values.sentinel.configmap }} + # User-supplied sentinel configuration: +{{- tpl .Values.sentinel.configmap . | nindent 4 }} +{{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/templates/headless-svc.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/templates/headless-svc.yaml new file mode 100644 index 0000000..7db7371 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/templates/headless-svc.yaml @@ -0,0 +1,28 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "redis.fullname" . }}-headless + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + type: ClusterIP + clusterIP: None + {{- if .Values.sentinel.enabled }} + publishNotReadyAddresses: true + {{- end }} + ports: + - name: redis + port: {{ .Values.redisPort }} + targetPort: redis + {{- if .Values.sentinel.enabled }} + - name: redis-sentinel + port: {{ .Values.sentinel.port }} + targetPort: redis-sentinel + {{- end }} + selector: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/templates/health-configmap.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/templates/health-configmap.yaml new file mode 100644 index 0000000..0bbbfb6 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/templates/health-configmap.yaml @@ -0,0 +1,176 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "redis.fullname" . }}-health + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: + ping_readiness_local.sh: |- + #!/bin/bash +{{- if .Values.usePasswordFile }} + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux +{{- end }} + export REDISCLI_AUTH="$REDIS_PASSWORD" + response=$( + timeout -s 3 $1 \ + redis-cli \ + -h localhost \ +{{- if .Values.tls.enabled }} + -p $REDIS_TLS_PORT \ + --tls \ + --cacert {{ template "redis.tlsCACert" . }} \ + {{- if .Values.tls.authClients }} + --cert {{ template "redis.tlsCert" . }} \ + --key {{ template "redis.tlsCertKey" . }} \ + {{- end }} +{{- else }} + -p $REDIS_PORT \ +{{- end }} + ping + ) + if [ "$response" != "PONG" ]; then + echo "$response" + exit 1 + fi + ping_liveness_local.sh: |- + #!/bin/bash +{{- if .Values.usePasswordFile }} + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux +{{- end }} + export REDISCLI_AUTH="$REDIS_PASSWORD" + response=$( + timeout -s 3 $1 \ + redis-cli \ + -h localhost \ +{{- if .Values.tls.enabled }} + -p $REDIS_TLS_PORT \ + --tls \ + --cacert {{ template "redis.tlsCACert" . }} \ + {{- if .Values.tls.authClients }} + --cert {{ template "redis.tlsCert" . }} \ + --key {{ template "redis.tlsCertKey" . }} \ + {{- end }} +{{- else }} + -p $REDIS_PORT \ +{{- end }} + ping + ) + if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then + echo "$response" + exit 1 + fi +{{- if .Values.sentinel.enabled }} + ping_sentinel.sh: |- + #!/bin/bash +{{- if .Values.usePasswordFile }} + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux +{{- end }} + export REDISCLI_AUTH="$REDIS_PASSWORD" + response=$( + timeout -s 3 $1 \ + redis-cli \ + -h localhost \ +{{- if .Values.tls.enabled }} + -p $REDIS_SENTINEL_TLS_PORT_NUMBER \ + --tls \ + --cacert {{ template "redis.tlsCACert" . }} \ + {{- if .Values.tls.authClients }} + --cert {{ template "redis.tlsCert" . }} \ + --key {{ template "redis.tlsCertKey" . }} \ + {{- end }} +{{- else }} + -p $REDIS_SENTINEL_PORT \ +{{- end }} + ping + ) + if [ "$response" != "PONG" ]; then + echo "$response" + exit 1 + fi + parse_sentinels.awk: |- + /ip/ {FOUND_IP=1} + /port/ {FOUND_PORT=1} + /runid/ {FOUND_RUNID=1} + !/ip|port|runid/ { + if (FOUND_IP==1) { + IP=$1; FOUND_IP=0; + } + else if (FOUND_PORT==1) { + PORT=$1; + FOUND_PORT=0; + } else if (FOUND_RUNID==1) { + printf "\nsentinel known-sentinel {{ .Values.sentinel.masterSet }} %s %s %s", IP, PORT, $0; FOUND_RUNID=0; + } + } +{{- end }} + ping_readiness_master.sh: |- + #!/bin/bash +{{- if .Values.usePasswordFile }} + password_aux=`cat ${REDIS_MASTER_PASSWORD_FILE}` + export REDIS_MASTER_PASSWORD=$password_aux +{{- end }} + export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD" + response=$( + timeout -s 3 $1 \ + redis-cli \ + -h $REDIS_MASTER_HOST \ + -p $REDIS_MASTER_PORT_NUMBER \ +{{- if .Values.tls.enabled }} + --tls \ + --cacert {{ template "redis.tlsCACert" . }} \ + {{- if .Values.tls.authClients }} + --cert {{ template "redis.tlsCert" . }} \ + --key {{ template "redis.tlsCertKey" . }} \ + {{- end }} +{{- end }} + ping + ) + if [ "$response" != "PONG" ]; then + echo "$response" + exit 1 + fi + ping_liveness_master.sh: |- + #!/bin/bash +{{- if .Values.usePasswordFile }} + password_aux=`cat ${REDIS_MASTER_PASSWORD_FILE}` + export REDIS_MASTER_PASSWORD=$password_aux +{{- end }} + export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD" + response=$( + timeout -s 3 $1 \ + redis-cli \ + -h $REDIS_MASTER_HOST \ + -p $REDIS_MASTER_PORT_NUMBER \ +{{- if .Values.tls.enabled }} + --tls \ + --cacert {{ template "redis.tlsCACert" . }} \ + {{- if .Values.tls.authClients }} + --cert {{ template "redis.tlsCert" . }} \ + --key {{ template "redis.tlsCertKey" . }} \ + {{- end }} +{{- end }} + ping + ) + if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then + echo "$response" + exit 1 + fi + ping_readiness_local_and_master.sh: |- + script_dir="$(dirname "$0")" + exit_status=0 + "$script_dir/ping_readiness_local.sh" $1 || exit_status=$? + "$script_dir/ping_readiness_master.sh" $1 || exit_status=$? + exit $exit_status + ping_liveness_local_and_master.sh: |- + script_dir="$(dirname "$0")" + exit_status=0 + "$script_dir/ping_liveness_local.sh" $1 || exit_status=$? + "$script_dir/ping_liveness_master.sh" $1 || exit_status=$? + exit $exit_status diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/templates/metrics-prometheus.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/templates/metrics-prometheus.yaml new file mode 100644 index 0000000..928f9a8 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/templates/metrics-prometheus.yaml @@ -0,0 +1,39 @@ +{{- if and (.Values.metrics.enabled) (.Values.metrics.serviceMonitor.enabled) }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "redis.fullname" . }} +# {{- if .Values.metrics.serviceMonitor.namespace }} +# namespace: {{ .Values.metrics.serviceMonitor.namespace }} +# {{- else }} + namespace: imxc +# {{- end }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- range $key, $value := .Values.metrics.serviceMonitor.selector }} + {{ $key }}: {{ $value | quote }} + {{- end }} +spec: + endpoints: + - port: metrics + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.relabelings }} + relabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.relabelings "context" $) | nindent 6 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.metricRelabelings }} + metricRelabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.metricRelabelings "context" $) | nindent 6 }} + {{- end }} + selector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + app.kubernetes.io/component: "metrics" + namespaceSelector: + matchNames: + - imxc +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/templates/metrics-svc.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/templates/metrics-svc.yaml new file mode 100644 index 0000000..4dae3bc --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/templates/metrics-svc.yaml @@ -0,0 +1,34 @@ +{{- if .Values.metrics.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "redis.fullname" . }}-metrics + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + app.kubernetes.io/component: "metrics" + {{- if .Values.metrics.service.labels -}} + {{- toYaml .Values.metrics.service.labels | nindent 4 }} + {{- end -}} + {{- if .Values.metrics.service.annotations }} + annotations: {{- toYaml .Values.metrics.service.annotations | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.metrics.service.type }} + {{ if eq .Values.metrics.service.type "LoadBalancer" }} + externalTrafficPolicy: {{ .Values.metrics.service.externalTrafficPolicy }} + {{- end }} + {{ if and (eq .Values.metrics.service.type "LoadBalancer") .Values.metrics.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.metrics.service.loadBalancerIP }} + {{- end }} + ports: + - name: metrics + port: 9121 + targetPort: metrics + selector: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/templates/networkpolicy.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/templates/networkpolicy.yaml new file mode 100644 index 0000000..ae27ebb --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/templates/networkpolicy.yaml @@ -0,0 +1,74 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ template "networkPolicy.apiVersion" . }} +metadata: + name: {{ template "redis.fullname" . }} + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + podSelector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + {{- if .Values.cluster.enabled }} + policyTypes: + - Ingress + - Egress + egress: + # Allow dns resolution + - ports: + - port: 53 + protocol: UDP + # Allow outbound connections to other cluster pods + - ports: + - port: {{ .Values.redisPort }} + {{- if .Values.sentinel.enabled }} + - port: {{ .Values.sentinel.port }} + {{- end }} + to: + - podSelector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + {{- end }} + ingress: + # Allow inbound connections + - ports: + - port: {{ .Values.redisPort }} + {{- if .Values.sentinel.enabled }} + - port: {{ .Values.sentinel.port }} + {{- end }} + {{- if not .Values.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ template "redis.fullname" . }}-client: "true" + - podSelector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + {{- if .Values.networkPolicy.ingressNSMatchLabels }} + - namespaceSelector: + matchLabels: + {{- range $key, $value := .Values.networkPolicy.ingressNSMatchLabels }} + {{ $key | quote }}: {{ $value | quote }} + {{- end }} + {{- if .Values.networkPolicy.ingressNSPodMatchLabels }} + podSelector: + matchLabels: + {{- range $key, $value := .Values.networkPolicy.ingressNSPodMatchLabels }} + {{ $key | quote }}: {{ $value | quote }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.metrics.enabled }} + # Allow prometheus scrapes for metrics + - ports: + - port: 9121 + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/templates/pdb.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/templates/pdb.yaml new file mode 100644 index 0000000..e2ad471 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/templates/pdb.yaml @@ -0,0 +1,22 @@ +{{- if .Values.podDisruptionBudget.enabled }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ template "redis.fullname" . }} + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} +spec: + {{- if .Values.podDisruptionBudget.minAvailable }} + minAvailable: {{ .Values.podDisruptionBudget.minAvailable }} + {{- end }} + {{- if .Values.podDisruptionBudget.maxUnavailable }} + maxUnavailable: {{ .Values.podDisruptionBudget.maxUnavailable }} + {{- end }} + selector: + matchLabels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/templates/prometheusrule.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/templates/prometheusrule.yaml new file mode 100644 index 0000000..fba6450 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/templates/prometheusrule.yaml @@ -0,0 +1,25 @@ +{{- if and .Values.metrics.enabled .Values.metrics.prometheusRule.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ template "redis.fullname" . }} + {{- if .Values.metrics.prometheusRule.namespace }} + namespace: {{ .Values.metrics.prometheusRule.namespace }} + {{- else }} + namespace: imxc + {{- end }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +{{- with .Values.metrics.prometheusRule.additionalLabels }} +{{- toYaml . | nindent 4 }} +{{- end }} +spec: +{{- with .Values.metrics.prometheusRule.rules }} + groups: + - name: {{ template "redis.name" $ }} + rules: {{- tpl (toYaml .) $ | nindent 8 }} +{{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/templates/psp.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/templates/psp.yaml new file mode 100644 index 0000000..f3c9390 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/templates/psp.yaml @@ -0,0 +1,43 @@ +{{- if .Values.podSecurityPolicy.create }} +apiVersion: {{ template "podSecurityPolicy.apiVersion" . }} +kind: PodSecurityPolicy +metadata: + name: {{ template "redis.fullname" . }} + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + allowPrivilegeEscalation: false + fsGroup: + rule: 'MustRunAs' + ranges: + - min: {{ .Values.securityContext.fsGroup }} + max: {{ .Values.securityContext.fsGroup }} + hostIPC: false + hostNetwork: false + hostPID: false + privileged: false + readOnlyRootFilesystem: false + requiredDropCapabilities: + - ALL + runAsUser: + rule: 'MustRunAs' + ranges: + - min: {{ .Values.containerSecurityContext.runAsUser }} + max: {{ .Values.containerSecurityContext.runAsUser }} + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: {{ .Values.containerSecurityContext.runAsUser }} + max: {{ .Values.containerSecurityContext.runAsUser }} + volumes: + - 'configMap' + - 'secret' + - 'emptyDir' + - 'persistentVolumeClaim' +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/templates/redis-master-statefulset.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/templates/redis-master-statefulset.yaml new file mode 100644 index 0000000..78aa2e6 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/templates/redis-master-statefulset.yaml @@ -0,0 +1,378 @@ +{{- if or (not .Values.cluster.enabled) (not .Values.sentinel.enabled) }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "redis.fullname" . }}-master + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- if .Values.master.statefulset.labels }} + {{- toYaml .Values.master.statefulset.labels | nindent 4 }} + {{- end }} +{{- if .Values.master.statefulset.annotations }} + annotations: + {{- toYaml .Values.master.statefulset.annotations | nindent 4 }} +{{- end }} +spec: + selector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + role: master + serviceName: {{ template "redis.fullname" . }}-headless + template: + metadata: + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + role: master + {{- if .Values.master.podLabels }} + {{- toYaml .Values.master.podLabels | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podLabels }} + {{- toYaml .Values.metrics.podLabels | nindent 8 }} + {{- end }} + annotations: + checksum/health: {{ include (print $.Template.BasePath "/health-configmap.yaml") . | sha256sum }} + checksum/configmap: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} + {{- if .Values.master.podAnnotations }} + {{- toYaml .Values.master.podAnnotations | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podAnnotations }} + {{- toYaml .Values.metrics.podAnnotations | nindent 8 }} + {{- end }} + spec: + {{- include "redis.imagePullSecrets" . | nindent 6 }} + {{- if .Values.master.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.master.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: {{- omit .Values.securityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + serviceAccountName: {{ template "redis.serviceAccountName" . }} + {{- if .Values.master.priorityClassName }} + priorityClassName: {{ .Values.master.priorityClassName | quote }} + {{- end }} + {{- with .Values.master.affinity }} + affinity: {{- tpl (toYaml .) $ | nindent 8 }} + {{- end }} + {{- if .Values.master.nodeSelector }} + nodeSelector: {{- toYaml .Values.master.nodeSelector | nindent 8 }} + {{- end }} + {{- if .Values.master.tolerations }} + tolerations: {{- toYaml .Values.master.tolerations | nindent 8 }} + {{- end }} + {{- if .Values.master.shareProcessNamespace }} + shareProcessNamespace: {{ .Values.master.shareProcessNamespace }} + {{- end }} + {{- if .Values.master.schedulerName }} + schedulerName: {{ .Values.master.schedulerName }} + {{- end }} + containers: + - name: {{ template "redis.name" . }} + image: {{ template "redis.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + command: + - /bin/sh + - -c + - /opt/bitnami/scripts/start-scripts/start-master.sh + env: + - name: REDIS_REPLICATION_MODE + value: master + {{- if .Values.usePassword }} + {{- if .Values.usePasswordFile }} + - name: REDIS_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + {{- else }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- else }} + - name: ALLOW_EMPTY_PASSWORD + value: "yes" + {{- end }} + - name: REDIS_TLS_ENABLED + value: {{ ternary "yes" "no" .Values.tls.enabled | quote }} + {{- if .Values.tls.enabled }} + - name: REDIS_TLS_PORT + value: {{ .Values.redisPort | quote }} + - name: REDIS_TLS_AUTH_CLIENTS + value: {{ ternary "yes" "no" .Values.tls.authClients | quote }} + - name: REDIS_TLS_CERT_FILE + value: {{ template "redis.tlsCert" . }} + - name: REDIS_TLS_KEY_FILE + value: {{ template "redis.tlsCertKey" . }} + - name: REDIS_TLS_CA_FILE + value: {{ template "redis.tlsCACert" . }} + {{- if .Values.tls.dhParamsFilename }} + - name: REDIS_TLS_DH_PARAMS_FILE + value: {{ template "redis.tlsDHParams" . }} + {{- end }} + {{- else }} + - name: REDIS_PORT + value: {{ .Values.redisPort | quote }} + {{- end }} + {{- if .Values.master.extraEnvVars }} + {{- include "redis.tplValue" (dict "value" .Values.master.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if or .Values.master.extraEnvVarsCM .Values.master.extraEnvVarsSecret }} + envFrom: + {{- if .Values.master.extraEnvVarsCM }} + - configMapRef: + name: {{ .Values.master.extraEnvVarsCM }} + {{- end }} + {{- if .Values.master.extraEnvVarsSecret }} + - secretRef: + name: {{ .Values.master.extraEnvVarsSecret }} + {{- end }} + {{- end }} + ports: + - name: redis + containerPort: {{ .Values.redisPort }} + {{- if .Values.master.livenessProbe.enabled }} + livenessProbe: + initialDelaySeconds: {{ .Values.master.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.master.livenessProbe.periodSeconds }} + # One second longer than command timeout should prevent generation of zombie processes. + timeoutSeconds: {{ add1 .Values.master.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.master.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.master.livenessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_liveness_local.sh {{ .Values.master.livenessProbe.timeoutSeconds }} + {{- else if .Values.master.customLivenessProbe }} + livenessProbe: {{- toYaml .Values.master.customLivenessProbe | nindent 12 }} + {{- end }} + {{- if .Values.master.readinessProbe.enabled}} + readinessProbe: + initialDelaySeconds: {{ .Values.master.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.master.readinessProbe.periodSeconds }} + timeoutSeconds: {{ add1 .Values.master.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.master.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.master.readinessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_readiness_local.sh {{ .Values.master.readinessProbe.timeoutSeconds }} + {{- else if .Values.master.customReadinessProbe }} + readinessProbe: {{- toYaml .Values.master.customReadinessProbe | nindent 12 }} + {{- end }} + resources: {{- toYaml .Values.master.resources | nindent 12 }} + volumeMounts: + - name: start-scripts + mountPath: /opt/bitnami/scripts/start-scripts + - name: health + mountPath: /health + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /opt/bitnami/redis/secrets/ + {{- end }} + - name: redis-data + mountPath: {{ .Values.master.persistence.path }} + subPath: {{ .Values.master.persistence.subPath }} + - name: config + mountPath: /opt/bitnami/redis/mounted-etc + - name: redis-tmp-conf + mountPath: /opt/bitnami/redis/etc/ + {{- if .Values.tls.enabled }} + - name: redis-certificates + mountPath: /opt/bitnami/redis/certs + readOnly: true + {{- end }} + {{- if .Values.metrics.enabled }} + - name: metrics + image: {{ template "redis.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + command: + - /bin/sh + - -c + - | + if [[ -f '/secrets/redis-password' ]]; then + export REDIS_PASSWORD=$(cat /secrets/redis-password) + fi + redis_exporter{{- range $key, $value := .Values.metrics.extraArgs }} --{{ $key }}={{ $value }}{{- end }} + env: + - name: REDIS_ALIAS + value: {{ template "redis.fullname" . }} + {{- if and .Values.usePassword (not .Values.usePasswordFile) }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: REDIS_ADDR + value: rediss://localhost:{{ .Values.redisPort }} + - name: REDIS_EXPORTER_TLS_CLIENT_KEY_FILE + value: {{ template "redis.tlsCertKey" . }} + - name: REDIS_EXPORTER_TLS_CLIENT_CERT_FILE + value: {{ template "redis.tlsCert" . }} + - name: REDIS_EXPORTER_TLS_CA_CERT_FILE + value: {{ template "redis.tlsCACert" . }} + {{- end }} + volumeMounts: + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /secrets/ + {{- end }} + {{- if .Values.tls.enabled }} + - name: redis-certificates + mountPath: /opt/bitnami/redis/certs + readOnly: true + {{- end }} + ports: + - name: metrics + containerPort: 9121 + resources: {{- toYaml .Values.metrics.resources | nindent 12 }} + {{- end }} + {{- $needsVolumePermissions := and .Values.volumePermissions.enabled .Values.master.persistence.enabled .Values.securityContext.enabled .Values.containerSecurityContext.enabled }} + {{- if or $needsVolumePermissions .Values.sysctlImage.enabled }} + initContainers: + {{- if $needsVolumePermissions }} + - name: volume-permissions + image: "{{ template "redis.volumePermissions.image" . }}" + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: + - /bin/sh + - -ec + - | + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + chown -R `id -u`:`id -G | cut -d " " -f2` {{ .Values.master.persistence.path }} + {{- else }} + chown -R {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} {{ .Values.master.persistence.path }} + {{- end }} + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto "}} + securityContext: {{- omit .Values.volumePermissions.securityContext "runAsUser" | toYaml | nindent 12 }} + {{- else }} + securityContext: {{- .Values.volumePermissions.securityContext | toYaml | nindent 12 }} + {{- end }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 10 }} + volumeMounts: + - name: redis-data + mountPath: {{ .Values.master.persistence.path }} + subPath: {{ .Values.master.persistence.subPath }} + {{- end }} + {{- if .Values.sysctlImage.enabled }} + - name: init-sysctl + image: {{ template "redis.sysctl.image" . }} + imagePullPolicy: {{ default "" .Values.sysctlImage.pullPolicy | quote }} + resources: {{- toYaml .Values.sysctlImage.resources | nindent 10 }} + {{- if .Values.sysctlImage.mountHostSys }} + volumeMounts: + - name: host-sys + mountPath: /host-sys + {{- end }} + command: {{- toYaml .Values.sysctlImage.command | nindent 10 }} + securityContext: + privileged: true + runAsUser: 0 + {{- end }} + {{- end }} + volumes: + - name: start-scripts + configMap: + name: {{ include "redis.fullname" . }}-scripts + defaultMode: 0755 + - name: health + configMap: + name: {{ template "redis.fullname" . }}-health + defaultMode: 0755 + {{- if .Values.usePasswordFile }} + - name: redis-password + secret: + secretName: {{ template "redis.secretName" . }} + items: + - key: {{ template "redis.secretPasswordKey" . }} + path: redis-password + {{- end }} + - name: config + configMap: + name: {{ template "redis.fullname" . }} + {{- if not .Values.master.persistence.enabled }} + - name: "redis-data" + emptyDir: {} + {{- else }} + {{- if .Values.persistence.existingClaim }} + - name: "redis-data" + persistentVolumeClaim: + claimName: {{ include "redis.tplValue" (dict "value" .Values.persistence.existingClaim "context" $) }} + {{- end }} + {{- if .Values.master.persistence.volumes }} + {{- toYaml .Values.master.persistence.volumes | nindent 8 }} + {{- end }} + {{- end }} + {{- if .Values.sysctlImage.mountHostSys }} + - name: host-sys + hostPath: + path: /sys + {{- end }} + - name: redis-tmp-conf + emptyDir: {} + {{- if .Values.tls.enabled }} + - name: redis-certificates + secret: + secretName: {{ required "A secret containing the certificates for the TLS traffic is required when TLS in enabled" .Values.tls.certificatesSecret }} + defaultMode: 256 + {{- end }} + {{- if and .Values.master.persistence.enabled (not .Values.persistence.existingClaim) (not .Values.master.persistence.volumes) }} + volumeClaimTemplates: + - metadata: + name: redis-data + labels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + component: master + {{- if .Values.master.statefulset.volumeClaimTemplates.labels }} + {{- toYaml .Values.master.statefulset.volumeClaimTemplates.labels | nindent 10 }} + {{- end }} + {{- if .Values.master.statefulset.volumeClaimTemplates.annotations }} + annotations: + {{- toYaml .Values.master.statefulset.volumeClaimTemplates.annotations | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.master.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.master.persistence.size | quote }} + {{ include "redis.master.storageClass" . }} + selector: + {{- if .Values.master.persistence.matchLabels }} + matchLabels: {{- toYaml .Values.master.persistence.matchLabels | nindent 12 }} + {{- end -}} + {{- if .Values.master.persistence.matchExpressions }} + matchExpressions: {{- toYaml .Values.master.persistence.matchExpressions | nindent 12 }} + {{- end -}} + {{- end }} + updateStrategy: + type: {{ .Values.master.statefulset.updateStrategy }} + {{- if .Values.master.statefulset.rollingUpdatePartition }} + {{- if (eq "Recreate" .Values.master.statefulset.updateStrategy) }} + rollingUpdate: null + {{- else }} + rollingUpdate: + partition: {{ .Values.master.statefulset.rollingUpdatePartition }} + {{- end }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/templates/redis-master-svc.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/templates/redis-master-svc.yaml new file mode 100644 index 0000000..56ba5f1 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/templates/redis-master-svc.yaml @@ -0,0 +1,43 @@ +{{- if not .Values.sentinel.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "redis.fullname" . }}-master + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- if .Values.master.service.labels -}} + {{- toYaml .Values.master.service.labels | nindent 4 }} + {{- end -}} +{{- if .Values.master.service.annotations }} + annotations: {{- toYaml .Values.master.service.annotations | nindent 4 }} +{{- end }} +spec: + type: {{ .Values.master.service.type }} + {{ if eq .Values.master.service.type "LoadBalancer" }} + externalTrafficPolicy: {{ .Values.master.service.externalTrafficPolicy }} + {{- end }} + {{- if and (eq .Values.master.service.type "LoadBalancer") .Values.master.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.master.service.loadBalancerIP }} + {{- end }} + {{- if and (eq .Values.master.service.type "LoadBalancer") .Values.master.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{- with .Values.master.service.loadBalancerSourceRanges }} +{{- toYaml . | nindent 4 }} +{{- end }} + {{- end }} + ports: + - name: redis + port: {{ .Values.master.service.port }} + targetPort: redis + {{- if .Values.master.service.nodePort }} + nodePort: {{ .Values.master.service.nodePort }} + {{- end }} + selector: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + role: master +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/templates/redis-node-statefulset.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/templates/redis-node-statefulset.yaml new file mode 100644 index 0000000..5d697de --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/templates/redis-node-statefulset.yaml @@ -0,0 +1,494 @@ +{{- if and .Values.cluster.enabled .Values.sentinel.enabled }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "redis.fullname" . }}-node + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- if .Values.slave.statefulset.labels }} + {{- toYaml .Values.slave.statefulset.labels | nindent 4 }} + {{- end }} +{{- if .Values.slave.statefulset.annotations }} + annotations: + {{- toYaml .Values.slave.statefulset.annotations | nindent 4 }} +{{- end }} +spec: +{{- if .Values.slave.updateStrategy }} + strategy: {{- toYaml .Values.slave.updateStrategy | nindent 4 }} +{{- end }} + replicas: {{ .Values.cluster.slaveCount }} + serviceName: {{ template "redis.fullname" . }}-headless + selector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + role: node + template: + metadata: + labels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + chart: {{ template "redis.chart" . }} + role: node + {{- if .Values.slave.podLabels }} + {{- toYaml .Values.slave.podLabels | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podLabels }} + {{- toYaml .Values.metrics.podLabels | nindent 8 }} + {{- end }} + annotations: + checksum/health: {{ include (print $.Template.BasePath "/health-configmap.yaml") . | sha256sum }} + checksum/configmap: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} + {{- if .Values.slave.podAnnotations }} + {{- toYaml .Values.slave.podAnnotations | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podAnnotations }} + {{- toYaml .Values.metrics.podAnnotations | nindent 8 }} + {{- end }} + spec: + {{- include "redis.imagePullSecrets" . | nindent 6 }} + {{- if .Values.slave.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.slave.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: {{- omit .Values.securityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + serviceAccountName: {{ template "redis.serviceAccountName" . }} + {{- if .Values.slave.priorityClassName }} + priorityClassName: "{{ .Values.slave.priorityClassName }}" + {{- end }} + {{- if .Values.slave.nodeSelector }} + nodeSelector: {{- toYaml .Values.slave.nodeSelector | nindent 8 }} + {{- end }} + {{- if .Values.slave.tolerations }} + tolerations: {{- toYaml .Values.slave.tolerations | nindent 8 }} + {{- end }} + {{- if .Values.slave.schedulerName }} + schedulerName: {{ .Values.slave.schedulerName }} + {{- end }} + {{- if .Values.master.spreadConstraints }} + topologySpreadConstraints: {{- toYaml .Values.master.spreadConstraints | nindent 8 }} + {{- end }} + {{- with .Values.slave.affinity }} + affinity: {{- tpl (toYaml .) $ | nindent 8 }} + {{- end }} + containers: + - name: {{ template "redis.name" . }} + image: {{ template "redis.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + command: + - /bin/sh + - -c + - /opt/bitnami/scripts/start-scripts/start-node.sh + env: + - name: REDIS_MASTER_PORT_NUMBER + value: {{ .Values.redisPort | quote }} + {{- if .Values.usePassword }} + {{- if .Values.usePasswordFile }} + - name: REDIS_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + - name: REDIS_MASTER_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + {{- else }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + - name: REDIS_MASTER_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- else }} + - name: ALLOW_EMPTY_PASSWORD + value: "yes" + {{- end }} + - name: REDIS_TLS_ENABLED + value: {{ ternary "yes" "no" .Values.tls.enabled | quote }} + {{- if .Values.tls.enabled }} + - name: REDIS_TLS_PORT + value: {{ .Values.redisPort | quote }} + - name: REDIS_TLS_AUTH_CLIENTS + value: {{ ternary "yes" "no" .Values.tls.authClients | quote }} + - name: REDIS_TLS_CERT_FILE + value: {{ template "redis.tlsCert" . }} + - name: REDIS_TLS_KEY_FILE + value: {{ template "redis.tlsCertKey" . }} + - name: REDIS_TLS_CA_FILE + value: {{ template "redis.tlsCACert" . }} + {{- if .Values.tls.dhParamsFilename }} + - name: REDIS_TLS_DH_PARAMS_FILE + value: {{ template "redis.tlsDHParams" . }} + {{- end }} + {{- else }} + - name: REDIS_PORT + value: {{ .Values.redisPort | quote }} + {{- end }} + - name: REDIS_DATA_DIR + value: {{ .Values.slave.persistence.path }} + {{- if .Values.sentinel.extraEnvVars }} + {{- include "redis.tplValue" (dict "value" .Values.sentinel.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if or .Values.sentinel.extraEnvVarsCM .Values.sentinel.extraEnvVarsSecret }} + envFrom: + {{- if .Values.sentinel.extraEnvVarsCM }} + - configMapRef: + name: {{ .Values.sentinel.extraEnvVarsCM }} + {{- end }} + {{- if .Values.sentinel.extraEnvVarsSecret }} + - secretRef: + name: {{ .Values.sentinel.extraEnvVarsSecret }} + {{- end }} + {{- end }} + ports: + - name: redis + containerPort: {{ .Values.redisPort }} + {{- if .Values.slave.livenessProbe.enabled }} + livenessProbe: + initialDelaySeconds: {{ .Values.slave.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.slave.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.slave.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.slave.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.slave.livenessProbe.failureThreshold}} + exec: + command: + - sh + - -c + {{- if .Values.sentinel.enabled }} + - /health/ping_liveness_local.sh {{ .Values.slave.livenessProbe.timeoutSeconds }} + {{- else }} + - /health/ping_liveness_local_and_master.sh {{ .Values.slave.livenessProbe.timeoutSeconds }} + {{- end }} + {{- else if .Values.slave.customLivenessProbe }} + livenessProbe: {{- toYaml .Values.slave.customLivenessProbe | nindent 12 }} + {{- end }} + {{- if .Values.slave.readinessProbe.enabled }} + readinessProbe: + initialDelaySeconds: {{ .Values.slave.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.slave.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.slave.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.slave.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.slave.readinessProbe.failureThreshold }} + exec: + command: + - sh + - -c + {{- if .Values.sentinel.enabled }} + - /health/ping_readiness_local.sh {{ .Values.slave.livenessProbe.timeoutSeconds }} + {{- else }} + - /health/ping_readiness_local_and_master.sh {{ .Values.slave.livenessProbe.timeoutSeconds }} + {{- end }} + {{- else if .Values.slave.customReadinessProbe }} + readinessProbe: {{- toYaml .Values.slave.customReadinessProbe | nindent 12 }} + {{- end }} + resources: {{- toYaml .Values.slave.resources | nindent 12 }} + volumeMounts: + - name: start-scripts + mountPath: /opt/bitnami/scripts/start-scripts + - name: health + mountPath: /health + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /opt/bitnami/redis/secrets/ + {{- end }} + - name: redis-data + mountPath: {{ .Values.slave.persistence.path }} + subPath: {{ .Values.slave.persistence.subPath }} + - name: config + mountPath: /opt/bitnami/redis/mounted-etc + - name: redis-tmp-conf + mountPath: /opt/bitnami/redis/etc + {{- if .Values.tls.enabled }} + - name: redis-certificates + mountPath: /opt/bitnami/redis/certs + readOnly: true + {{- end }} + {{- if and .Values.cluster.enabled .Values.sentinel.enabled }} + - name: sentinel + image: {{ template "sentinel.image" . }} + imagePullPolicy: {{ .Values.sentinel.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + command: + - /bin/sh + - -c + - /opt/bitnami/scripts/start-scripts/start-sentinel.sh + env: + {{- if .Values.usePassword }} + {{- if .Values.usePasswordFile }} + - name: REDIS_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + {{- else }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- else }} + - name: ALLOW_EMPTY_PASSWORD + value: "yes" + {{- end }} + - name: REDIS_SENTINEL_TLS_ENABLED + value: {{ ternary "yes" "no" .Values.tls.enabled | quote }} + {{- if .Values.tls.enabled }} + - name: REDIS_SENTINEL_TLS_PORT_NUMBER + value: {{ .Values.sentinel.port | quote }} + - name: REDIS_SENTINEL_TLS_AUTH_CLIENTS + value: {{ ternary "yes" "no" .Values.tls.authClients | quote }} + - name: REDIS_SENTINEL_TLS_CERT_FILE + value: {{ template "redis.tlsCert" . }} + - name: REDIS_SENTINEL_TLS_KEY_FILE + value: {{ template "redis.tlsCertKey" . }} + - name: REDIS_SENTINEL_TLS_CA_FILE + value: {{ template "redis.tlsCACert" . }} + {{- if .Values.tls.dhParamsFilename }} + - name: REDIS_SENTINEL_TLS_DH_PARAMS_FILE + value: {{ template "redis.dhParams" . }} + {{- end }} + {{- else }} + - name: REDIS_SENTINEL_PORT + value: {{ .Values.sentinel.port | quote }} + {{- end }} + ports: + - name: redis-sentinel + containerPort: {{ .Values.sentinel.port }} + {{- if .Values.sentinel.livenessProbe.enabled }} + livenessProbe: + initialDelaySeconds: {{ .Values.sentinel.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.sentinel.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.sentinel.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.sentinel.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.sentinel.livenessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_sentinel.sh {{ .Values.sentinel.livenessProbe.timeoutSeconds }} + {{- else if .Values.sentinel.customLivenessProbe }} + livenessProbe: {{- toYaml .Values.sentinel.customLivenessProbe | nindent 12 }} + {{- end }} + {{- if .Values.sentinel.readinessProbe.enabled}} + readinessProbe: + initialDelaySeconds: {{ .Values.sentinel.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.sentinel.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.sentinel.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.sentinel.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.sentinel.readinessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_sentinel.sh {{ .Values.sentinel.livenessProbe.timeoutSeconds }} + {{- else if .Values.sentinel.customReadinessProbe }} + readinessProbe: {{- toYaml .Values.sentinel.customReadinessProbe | nindent 12 }} + {{- end }} + resources: {{- toYaml .Values.sentinel.resources | nindent 12 }} + volumeMounts: + - name: start-scripts + mountPath: /opt/bitnami/scripts/start-scripts + - name: health + mountPath: /health + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /opt/bitnami/redis/secrets/ + {{- end }} + - name: redis-data + mountPath: {{ .Values.slave.persistence.path }} + subPath: {{ .Values.slave.persistence.subPath }} + - name: config + mountPath: /opt/bitnami/redis-sentinel/mounted-etc + - name: sentinel-tmp-conf + mountPath: /opt/bitnami/redis-sentinel/etc + {{- if .Values.tls.enabled }} + - name: redis-certificates + mountPath: /opt/bitnami/redis/certs + readOnly: true + {{- end }} + {{- end }} + {{- if .Values.metrics.enabled }} + - name: metrics + image: {{ template "redis.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + command: + - /bin/sh + - -c + - | + if [[ -f '/secrets/redis-password' ]]; then + export REDIS_PASSWORD=$(cat /secrets/redis-password) + fi + redis_exporter{{- range $key, $value := .Values.metrics.extraArgs }} --{{ $key }}={{ $value }}{{- end }} + env: + - name: REDIS_ALIAS + value: {{ template "redis.fullname" . }} + {{- if and .Values.usePassword (not .Values.usePasswordFile) }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: REDIS_ADDR + value: rediss://localhost:{{ .Values.redisPort }} + - name: REDIS_EXPORTER_TLS_CLIENT_KEY_FILE + value: {{ template "redis.tlsCertKey" . }} + - name: REDIS_EXPORTER_TLS_CLIENT_CERT_FILE + value: {{ template "redis.tlsCert" . }} + - name: REDIS_EXPORTER_TLS_CA_CERT_FILE + value: {{ template "redis.tlsCACert" . }} + {{- end }} + volumeMounts: + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /secrets/ + {{- end }} + {{- if .Values.tls.enabled }} + - name: redis-certificates + mountPath: /opt/bitnami/redis/certs + readOnly: true + {{- end }} + ports: + - name: metrics + containerPort: 9121 + resources: {{- toYaml .Values.metrics.resources | nindent 12 }} + {{- end }} + {{- $needsVolumePermissions := and .Values.volumePermissions.enabled .Values.slave.persistence.enabled .Values.securityContext.enabled .Values.containerSecurityContext.enabled }} + {{- if or $needsVolumePermissions .Values.sysctlImage.enabled }} + initContainers: + {{- if $needsVolumePermissions }} + - name: volume-permissions + image: {{ template "redis.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: + - /bin/sh + - -ec + - | + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + chown -R `id -u`:`id -G | cut -d " " -f2` {{ .Values.slave.persistence.path }} + {{- else }} + chown -R {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} {{ .Values.slave.persistence.path }} + {{- end }} + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto "}} + securityContext: {{- omit .Values.volumePermissions.securityContext "runAsUser" | toYaml | nindent 12 }} + {{- else }} + securityContext: {{- .Values.volumePermissions.securityContext | toYaml | nindent 12 }} + {{- end }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} + volumeMounts: + - name: redis-data + mountPath: {{ .Values.slave.persistence.path }} + subPath: {{ .Values.slave.persistence.subPath }} + {{- end }} + {{- if .Values.sysctlImage.enabled }} + - name: init-sysctl + image: {{ template "redis.sysctl.image" . }} + imagePullPolicy: {{ default "" .Values.sysctlImage.pullPolicy | quote }} + resources: {{- toYaml .Values.sysctlImage.resources | nindent 12 }} + {{- if .Values.sysctlImage.mountHostSys }} + volumeMounts: + - name: host-sys + mountPath: /host-sys + {{- end }} + command: {{- toYaml .Values.sysctlImage.command | nindent 12 }} + securityContext: + privileged: true + runAsUser: 0 + {{- end }} + {{- end }} + volumes: + - name: start-scripts + configMap: + name: {{ include "redis.fullname" . }}-scripts + defaultMode: 0755 + - name: health + configMap: + name: {{ template "redis.fullname" . }}-health + defaultMode: 0755 + {{- if .Values.usePasswordFile }} + - name: redis-password + secret: + secretName: {{ template "redis.secretName" . }} + items: + - key: {{ template "redis.secretPasswordKey" . }} + path: redis-password + {{- end }} + - name: config + configMap: + name: {{ template "redis.fullname" . }} + {{- if .Values.sysctlImage.mountHostSys }} + - name: host-sys + hostPath: + path: /sys + {{- end }} + - name: sentinel-tmp-conf + emptyDir: {} + - name: redis-tmp-conf + emptyDir: {} + {{- if .Values.tls.enabled }} + - name: redis-certificates + secret: + secretName: {{ required "A secret containing the certificates for the TLS traffic is required when TLS in enabled" .Values.tls.certificatesSecret }} + defaultMode: 256 + {{- end }} + {{- if not .Values.slave.persistence.enabled }} + - name: redis-data + emptyDir: {} + {{- else }} + volumeClaimTemplates: + - metadata: + name: redis-data + labels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + component: slave + {{- if .Values.slave.statefulset.volumeClaimTemplates.labels }} + {{- toYaml .Values.slave.statefulset.volumeClaimTemplates.labels | nindent 10 }} + {{- end }} + {{- if .Values.slave.statefulset.volumeClaimTemplates.annotations }} + annotations: + {{- toYaml .Values.slave.statefulset.volumeClaimTemplates.annotations | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.slave.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.slave.persistence.size | quote }} + {{ include "redis.slave.storageClass" . }} + selector: + {{- if .Values.slave.persistence.matchLabels }} + matchLabels: {{- toYaml .Values.slave.persistence.matchLabels | nindent 12 }} + {{- end -}} + {{- if .Values.slave.persistence.matchExpressions }} + matchExpressions: {{- toYaml .Values.slave.persistence.matchExpressions | nindent 12 }} + {{- end -}} + {{- end }} + updateStrategy: + type: {{ .Values.slave.statefulset.updateStrategy }} + {{- if .Values.slave.statefulset.rollingUpdatePartition }} + {{- if (eq "Recreate" .Values.slave.statefulset.updateStrategy) }} + rollingUpdate: null + {{- else }} + rollingUpdate: + partition: {{ .Values.slave.statefulset.rollingUpdatePartition }} + {{- end }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/templates/redis-pv.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/templates/redis-pv.yaml new file mode 100644 index 0000000..adb5416 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/templates/redis-pv.yaml @@ -0,0 +1,92 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + labels: + app: redis + type: local + name: redis-pv-master +spec: + storageClassName: manual + accessModes: + - ReadWriteOnce + capacity: + storage: 8Gi + claimRef: + kind: PersistentVolumeClaim + name: redis-data-redis-master-0 + namespace: imxc + hostPath: + path: {{ .Values.global.IMXC_REDIS_PV_PATH1 }} + persistentVolumeReclaimPolicy: Retain + storageClassName: manual + volumeMode: Filesystem + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .Values.global.affinity_key }} + operator: In + values: + - {{ .Values.global.affinity_value1 }} +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + labels: + app: redis + type: local + name: redis-pv-slave-0 +spec: + storageClassName: manual + accessModes: + - ReadWriteOnce + capacity: + storage: 8Gi + claimRef: + kind: PersistentVolumeClaim + name: redis-data-redis-slave-0 + namespace: imxc + hostPath: + path: {{ .Values.global.IMXC_REDIS_PV_PATH2 }} + persistentVolumeReclaimPolicy: Retain + storageClassName: manual + volumeMode: Filesystem + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .Values.global.affinity_key }} + operator: In + values: + - {{ .Values.global.affinity_value2 }} +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + labels: + app: redis + type: local + name: redis-pv-slave-1 +spec: + storageClassName: manual + accessModes: + - ReadWriteOnce + capacity: + storage: 8Gi + claimRef: + kind: PersistentVolumeClaim + name: redis-data-redis-slave-1 + namespace: imxc + hostPath: + path: {{ .Values.global.IMXC_REDIS_PV_PATH3 }} + persistentVolumeReclaimPolicy: Retain + storageClassName: manual + volumeMode: Filesystem + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .Values.global.affinity_key }} + operator: In + values: + - {{ .Values.global.affinity_value3 }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/templates/redis-role.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/templates/redis-role.yaml new file mode 100644 index 0000000..0d14129 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/templates/redis-role.yaml @@ -0,0 +1,22 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ template "redis.fullname" . }} + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +rules: +{{- if .Values.podSecurityPolicy.create }} + - apiGroups: ['{{ template "podSecurityPolicy.apiGroup" . }}'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: [{{ template "redis.fullname" . }}] +{{- end -}} +{{- if .Values.rbac.role.rules }} +{{- toYaml .Values.rbac.role.rules | nindent 2 }} +{{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/templates/redis-rolebinding.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/templates/redis-rolebinding.yaml new file mode 100644 index 0000000..83c87f5 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/templates/redis-rolebinding.yaml @@ -0,0 +1,19 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ template "redis.fullname" . }} + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ template "redis.fullname" . }} +subjects: +- kind: ServiceAccount + name: {{ template "redis.serviceAccountName" . }} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/templates/redis-serviceaccount.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/templates/redis-serviceaccount.yaml new file mode 100644 index 0000000..9452003 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/templates/redis-serviceaccount.yaml @@ -0,0 +1,15 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "redis.serviceAccountName" . }} + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- if .Values.serviceAccount.annotations }} + annotations: {{ toYaml .Values.serviceAccount.annotations | nindent 4 }} + {{- end }} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/templates/redis-slave-statefulset.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/templates/redis-slave-statefulset.yaml new file mode 100644 index 0000000..be0894b --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/templates/redis-slave-statefulset.yaml @@ -0,0 +1,384 @@ +{{- if and .Values.cluster.enabled (not .Values.sentinel.enabled) }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "redis.fullname" . }}-slave + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- if .Values.slave.statefulset.labels }} + {{- toYaml .Values.slave.statefulset.labels | nindent 4 }} + {{- end }} +{{- if .Values.slave.statefulset.annotations }} + annotations: + {{- toYaml .Values.slave.statefulset.annotations | nindent 4 }} +{{- end }} +spec: +{{- if .Values.slave.updateStrategy }} + strategy: {{- toYaml .Values.slave.updateStrategy | nindent 4 }} +{{- end }} + replicas: {{ .Values.cluster.slaveCount }} + serviceName: {{ template "redis.fullname" . }}-headless + selector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + role: slave + template: + metadata: + labels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + chart: {{ template "redis.chart" . }} + role: slave + {{- if .Values.slave.podLabels }} + {{- toYaml .Values.slave.podLabels | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podLabels }} + {{- toYaml .Values.metrics.podLabels | nindent 8 }} + {{- end }} + annotations: + checksum/health: {{ include (print $.Template.BasePath "/health-configmap.yaml") . | sha256sum }} + checksum/configmap: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} + {{- if .Values.slave.podAnnotations }} + {{- toYaml .Values.slave.podAnnotations | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podAnnotations }} + {{- toYaml .Values.metrics.podAnnotations | nindent 8 }} + {{- end }} + spec: + {{- include "redis.imagePullSecrets" . | nindent 6 }} + {{- if .Values.slave.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.slave.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: {{- omit .Values.securityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + serviceAccountName: {{ template "redis.serviceAccountName" . }} + {{- if .Values.slave.priorityClassName }} + priorityClassName: {{ .Values.slave.priorityClassName | quote }} + {{- end }} + {{- if .Values.slave.nodeSelector }} + nodeSelector: {{- toYaml .Values.slave.nodeSelector | nindent 8 }} + {{- end }} + {{- if .Values.slave.tolerations }} + tolerations: {{- toYaml .Values.slave.tolerations | nindent 8 }} + {{- end }} + {{- if .Values.slave.shareProcessNamespace }} + shareProcessNamespace: {{ .Values.slave.shareProcessNamespace }} + {{- end }} + {{- if .Values.slave.schedulerName }} + schedulerName: {{ .Values.slave.schedulerName }} + {{- end }} + {{- if .Values.master.spreadConstraints }} + topologySpreadConstraints: {{- toYaml .Values.master.spreadConstraints | nindent 8 }} + {{- end }} + {{- with .Values.slave.affinity }} + affinity: {{- tpl (toYaml .) $ | nindent 8 }} + {{- end }} + containers: + - name: {{ template "redis.name" . }} + image: {{ template "redis.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + command: + - /bin/sh + - -c + - /opt/bitnami/scripts/start-scripts/start-slave.sh + env: + - name: REDIS_REPLICATION_MODE + value: slave + - name: REDIS_MASTER_HOST + value: {{ template "redis.fullname" . }}-master-0.{{ template "redis.fullname" . }}-headless.imxc.svc.{{ .Values.clusterDomain }} + - name: REDIS_MASTER_PORT_NUMBER + value: {{ .Values.redisPort | quote }} + {{- if .Values.usePassword }} + {{- if .Values.usePasswordFile }} + - name: REDIS_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + - name: REDIS_MASTER_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + {{- else }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + - name: REDIS_MASTER_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- else }} + - name: ALLOW_EMPTY_PASSWORD + value: "yes" + {{- end }} + - name: REDIS_TLS_ENABLED + value: {{ ternary "yes" "no" .Values.tls.enabled | quote }} + {{- if .Values.tls.enabled }} + - name: REDIS_TLS_PORT + value: {{ .Values.redisPort | quote }} + - name: REDIS_TLS_AUTH_CLIENTS + value: {{ ternary "yes" "no" .Values.tls.authClients | quote }} + - name: REDIS_TLS_CERT_FILE + value: {{ template "redis.tlsCert" . }} + - name: REDIS_TLS_KEY_FILE + value: {{ template "redis.tlsCertKey" . }} + - name: REDIS_TLS_CA_FILE + value: {{ template "redis.tlsCACert" . }} + {{- if .Values.tls.dhParamsFilename }} + - name: REDIS_TLS_DH_PARAMS_FILE + value: {{ template "redis.tlsDHParams" . }} + {{- end }} + {{- else }} + - name: REDIS_PORT + value: {{ .Values.redisPort | quote }} + {{- end }} + {{- if .Values.slave.extraEnvVars }} + {{- include "redis.tplValue" (dict "value" .Values.slave.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if or .Values.slave.extraEnvVarsCM .Values.slave.extraEnvVarsSecret }} + envFrom: + {{- if .Values.slave.extraEnvVarsCM }} + - configMapRef: + name: {{ .Values.slave.extraEnvVarsCM }} + {{- end }} + {{- if .Values.slave.extraEnvVarsSecret }} + - secretRef: + name: {{ .Values.slave.extraEnvVarsSecret }} + {{- end }} + {{- end }} + ports: + - name: redis + containerPort: {{ .Values.redisPort }} + {{- if .Values.slave.livenessProbe.enabled }} + livenessProbe: + initialDelaySeconds: {{ .Values.slave.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.slave.livenessProbe.periodSeconds }} + timeoutSeconds: {{ add1 .Values.slave.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.slave.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.slave.livenessProbe.failureThreshold}} + exec: + command: + - sh + - -c + - /health/ping_liveness_local_and_master.sh {{ .Values.slave.livenessProbe.timeoutSeconds }} + {{- else if .Values.slave.customLivenessProbe }} + livenessProbe: {{- toYaml .Values.slave.customLivenessProbe | nindent 12 }} + {{- end }} + {{- if .Values.slave.readinessProbe.enabled }} + readinessProbe: + initialDelaySeconds: {{ .Values.slave.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.slave.readinessProbe.periodSeconds }} + timeoutSeconds: {{ add1 .Values.slave.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.slave.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.slave.readinessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_readiness_local_and_master.sh {{ .Values.slave.readinessProbe.timeoutSeconds }} + {{- else if .Values.slave.customReadinessProbe }} + readinessProbe: {{- toYaml .Values.slave.customReadinessProbe | nindent 12 }} + {{- end }} + resources: {{- toYaml .Values.slave.resources | nindent 12 }} + volumeMounts: + - name: start-scripts + mountPath: /opt/bitnami/scripts/start-scripts + - name: health + mountPath: /health + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /opt/bitnami/redis/secrets/ + {{- end }} + - name: redis-data + mountPath: /data + - name: config + mountPath: /opt/bitnami/redis/mounted-etc + - name: redis-tmp-conf + mountPath: /opt/bitnami/redis/etc + {{- if .Values.tls.enabled }} + - name: redis-certificates + mountPath: /opt/bitnami/redis/certs + readOnly: true + {{- end }} + {{- if .Values.metrics.enabled }} + - name: metrics + image: {{ template "redis.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + command: + - /bin/bash + - -c + - | + if [[ -f '/secrets/redis-password' ]]; then + export REDIS_PASSWORD=$(cat /secrets/redis-password) + fi + redis_exporter{{- range $key, $value := .Values.metrics.extraArgs }} --{{ $key }}={{ $value }}{{- end }} + env: + - name: REDIS_ALIAS + value: {{ template "redis.fullname" . }} + {{- if and .Values.usePassword (not .Values.usePasswordFile) }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: REDIS_ADDR + value: rediss://localhost:{{ .Values.redisPort }} + - name: REDIS_EXPORTER_TLS_CLIENT_KEY_FILE + value: {{ template "redis.tlsCertKey" . }} + - name: REDIS_EXPORTER_TLS_CLIENT_CERT_FILE + value: {{ template "redis.tlsCert" . }} + - name: REDIS_EXPORTER_TLS_CA_CERT_FILE + value: {{ template "redis.tlsCACert" . }} + {{- end }} + volumeMounts: + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /secrets/ + {{- end }} + {{- if .Values.tls.enabled }} + - name: redis-certificates + mountPath: /opt/bitnami/redis/certs + readOnly: true + {{- end }} + ports: + - name: metrics + containerPort: 9121 + resources: {{- toYaml .Values.metrics.resources | nindent 12 }} + {{- end }} + {{- $needsVolumePermissions := and .Values.volumePermissions.enabled .Values.slave.persistence.enabled .Values.securityContext.enabled .Values.containerSecurityContext.enabled }} + {{- if or $needsVolumePermissions .Values.sysctlImage.enabled }} + initContainers: + {{- if $needsVolumePermissions }} + - name: volume-permissions + image: {{ template "redis.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: + - /bin/sh + - -ec + - | + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + chown -R `id -u`:`id -G | cut -d " " -f2` {{ .Values.slave.persistence.path }} + {{- else }} + chown -R {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} {{ .Values.slave.persistence.path }} + {{- end }} + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto "}} + securityContext: {{- omit .Values.volumePermissions.securityContext "runAsUser" | toYaml | nindent 12 }} + {{- else }} + securityContext: {{- .Values.volumePermissions.securityContext | toYaml | nindent 12 }} + {{- end }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} + volumeMounts: + - name: redis-data + mountPath: {{ .Values.slave.persistence.path }} + subPath: {{ .Values.slave.persistence.subPath }} + {{- end }} + {{- if .Values.sysctlImage.enabled }} + - name: init-sysctl + image: {{ template "redis.sysctl.image" . }} + imagePullPolicy: {{ default "" .Values.sysctlImage.pullPolicy | quote }} + resources: {{- toYaml .Values.sysctlImage.resources | nindent 12 }} + {{- if .Values.sysctlImage.mountHostSys }} + volumeMounts: + - name: host-sys + mountPath: /host-sys + {{- end }} + command: {{- toYaml .Values.sysctlImage.command | nindent 12 }} + securityContext: + privileged: true + runAsUser: 0 + {{- end }} + {{- end }} + volumes: + - name: start-scripts + configMap: + name: {{ include "redis.fullname" . }}-scripts + defaultMode: 0755 + - name: health + configMap: + name: {{ template "redis.fullname" . }}-health + defaultMode: 0755 + {{- if .Values.usePasswordFile }} + - name: redis-password + secret: + secretName: {{ template "redis.secretName" . }} + items: + - key: {{ template "redis.secretPasswordKey" . }} + path: redis-password + {{- end }} + - name: config + configMap: + name: {{ template "redis.fullname" . }} + {{- if .Values.sysctlImage.mountHostSys }} + - name: host-sys + hostPath: + path: /sys + {{- end }} + - name: redis-tmp-conf + emptyDir: {} + {{- if .Values.tls.enabled }} + - name: redis-certificates + secret: + secretName: {{ required "A secret containing the certificates for the TLS traffic is required when TLS in enabled" .Values.tls.certificatesSecret }} + defaultMode: 256 + {{- end }} + {{- if not .Values.slave.persistence.enabled }} + - name: redis-data + emptyDir: {} + {{- else }} + volumeClaimTemplates: + - metadata: + name: redis-data + labels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + component: slave + {{- if .Values.slave.statefulset.volumeClaimTemplates.labels }} + {{- toYaml .Values.slave.statefulset.volumeClaimTemplates.labels | nindent 10 }} + {{- end }} + {{- if .Values.slave.statefulset.volumeClaimTemplates.annotations }} + annotations: + {{- toYaml .Values.slave.statefulset.volumeClaimTemplates.annotations | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.slave.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.slave.persistence.size | quote }} + {{ include "redis.slave.storageClass" . }} + selector: + {{- if .Values.slave.persistence.matchLabels }} + matchLabels: {{- toYaml .Values.slave.persistence.matchLabels | nindent 12 }} + {{- end -}} + {{- if .Values.slave.persistence.matchExpressions }} + matchExpressions: {{- toYaml .Values.slave.persistence.matchExpressions | nindent 12 }} + {{- end -}} + {{- end }} + updateStrategy: + type: {{ .Values.slave.statefulset.updateStrategy }} + {{- if .Values.slave.statefulset.rollingUpdatePartition }} + {{- if (eq "Recreate" .Values.slave.statefulset.updateStrategy) }} + rollingUpdate: null + {{- else }} + rollingUpdate: + partition: {{ .Values.slave.statefulset.rollingUpdatePartition }} + {{- end }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/templates/redis-slave-svc.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/templates/redis-slave-svc.yaml new file mode 100644 index 0000000..c1f3ae5 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/templates/redis-slave-svc.yaml @@ -0,0 +1,43 @@ +{{- if and .Values.cluster.enabled (not .Values.sentinel.enabled) }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "redis.fullname" . }}-slave + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- if .Values.slave.service.labels -}} + {{- toYaml .Values.slave.service.labels | nindent 4 }} + {{- end -}} +{{- if .Values.slave.service.annotations }} + annotations: {{- toYaml .Values.slave.service.annotations | nindent 4 }} +{{- end }} +spec: + type: {{ .Values.slave.service.type }} + {{ if eq .Values.slave.service.type "LoadBalancer" }} + externalTrafficPolicy: {{ .Values.slave.service.externalTrafficPolicy }} + {{- end }} + {{- if and (eq .Values.slave.service.type "LoadBalancer") .Values.slave.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.slave.service.loadBalancerIP }} + {{- end }} + {{- if and (eq .Values.slave.service.type "LoadBalancer") .Values.slave.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{- with .Values.slave.service.loadBalancerSourceRanges }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- end }} + ports: + - name: redis + port: {{ .Values.slave.service.port }} + targetPort: redis + {{- if .Values.slave.service.nodePort }} + nodePort: {{ .Values.slave.service.nodePort }} + {{- end }} + selector: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + role: slave +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/templates/redis-with-sentinel-svc.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/templates/redis-with-sentinel-svc.yaml new file mode 100644 index 0000000..3b3458e --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/templates/redis-with-sentinel-svc.yaml @@ -0,0 +1,43 @@ +{{- if .Values.sentinel.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "redis.fullname" . }} + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- if .Values.sentinel.service.labels }} + {{- toYaml .Values.sentinel.service.labels | nindent 4 }} + {{- end }} +{{- if .Values.sentinel.service.annotations }} + annotations: {{- toYaml .Values.sentinel.service.annotations | nindent 4 }} +{{- end }} +spec: + type: {{ .Values.sentinel.service.type }} + {{ if eq .Values.sentinel.service.type "LoadBalancer" }} + externalTrafficPolicy: {{ .Values.sentinel.service.externalTrafficPolicy }} + {{- end }} + {{ if eq .Values.sentinel.service.type "LoadBalancer" -}} {{ if .Values.sentinel.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.sentinel.service.loadBalancerIP }} + {{ end -}} + {{- end -}} + ports: + - name: redis + port: {{ .Values.sentinel.service.redisPort }} + targetPort: redis + {{- if .Values.sentinel.service.redisNodePort }} + nodePort: {{ .Values.sentinel.service.redisNodePort }} + {{- end }} + - name: redis-sentinel + port: {{ .Values.sentinel.service.sentinelPort }} + targetPort: redis-sentinel + {{- if .Values.sentinel.service.sentinelNodePort }} + nodePort: {{ .Values.sentinel.service.sentinelNodePort }} + {{- end }} + selector: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/templates/secret.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/templates/secret.yaml new file mode 100644 index 0000000..c1103d2 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/templates/secret.yaml @@ -0,0 +1,15 @@ +{{- if and .Values.usePassword (not .Values.existingSecret) -}} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "redis.fullname" . }} + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +type: Opaque +data: + redis-password: {{ include "redis.password" . | b64enc | quote }} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/values.schema.json b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/values.schema.json new file mode 100644 index 0000000..3188d0c --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/values.schema.json @@ -0,0 +1,168 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": { + "usePassword": { + "type": "boolean", + "title": "Use password authentication", + "form": true + }, + "password": { + "type": "string", + "title": "Password", + "form": true, + "description": "Defaults to a random 10-character alphanumeric string if not set", + "hidden": { + "value": false, + "path": "usePassword" + } + }, + "cluster": { + "type": "object", + "title": "Cluster Settings", + "form": true, + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable master-slave", + "description": "Enable master-slave architecture" + }, + "slaveCount": { + "type": "integer", + "title": "Slave Replicas", + "form": true, + "hidden": { + "value": false, + "path": "cluster/enabled" + } + } + } + }, + "master": { + "type": "object", + "title": "Master replicas settings", + "form": true, + "properties": { + "persistence": { + "type": "object", + "title": "Persistence for master replicas", + "form": true, + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable persistence", + "description": "Enable persistence using Persistent Volume Claims" + }, + "size": { + "type": "string", + "title": "Persistent Volume Size", + "form": true, + "render": "slider", + "sliderMin": 1, + "sliderMax": 100, + "sliderUnit": "Gi", + "hidden": { + "value": false, + "path": "master/persistence/enabled" + } + }, + "matchLabels": { + "type": "object", + "title": "Persistent Match Labels Selector" + }, + "matchExpressions": { + "type": "object", + "title": "Persistent Match Expressions Selector" + } + } + } + } + }, + "slave": { + "type": "object", + "title": "Slave replicas settings", + "form": true, + "hidden": { + "value": false, + "path": "cluster/enabled" + }, + "properties": { + "persistence": { + "type": "object", + "title": "Persistence for slave replicas", + "form": true, + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable persistence", + "description": "Enable persistence using Persistent Volume Claims" + }, + "size": { + "type": "string", + "title": "Persistent Volume Size", + "form": true, + "render": "slider", + "sliderMin": 1, + "sliderMax": 100, + "sliderUnit": "Gi", + "hidden": { + "value": false, + "path": "slave/persistence/enabled" + } + }, + "matchLabels": { + "type": "object", + "title": "Persistent Match Labels Selector" + }, + "matchExpressions": { + "type": "object", + "title": "Persistent Match Expressions Selector" + } + } + } + } + }, + "volumePermissions": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable Init Containers", + "description": "Use an init container to set required folder permissions on the data volume before mounting it in the final destination" + } + } + }, + "metrics": { + "type": "object", + "form": true, + "title": "Prometheus metrics details", + "properties": { + "enabled": { + "type": "boolean", + "title": "Create Prometheus metrics exporter", + "description": "Create a side-car container to expose Prometheus metrics", + "form": true + }, + "serviceMonitor": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "title": "Create Prometheus Operator ServiceMonitor", + "description": "Create a ServiceMonitor to track metrics using Prometheus Operator", + "form": true, + "hidden": { + "value": false, + "path": "metrics/enabled" + } + } + } + } + } + } + } +} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/values.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/values.yaml new file mode 100644 index 0000000..fcd8710 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/redis/values.yaml @@ -0,0 +1,932 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +global: + # imageRegistry: myRegistryName + # imagePullSecrets: + # - myRegistryKeySecretName + # storageClass: myStorageClass + redis: {} + +## Bitnami Redis(TM) image version +## ref: https://hub.docker.com/r/bitnami/redis/tags/ +## +image: + registry: 10.10.31.243:5000/cmoa3 + repository: redis + ## Bitnami Redis(TM) image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis#supported-tags-and-respective-dockerfile-links + ## + tag: latest + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + +## String to partially override redis.fullname template (will maintain the release name) +## +# nameOverride: + +## String to fully override redis.fullname template +## +fullnameOverride: redis + +## Cluster settings +## +cluster: + enabled: true + slaveCount: 2 + +## Use redis sentinel in the redis pod. This will disable the master and slave services and +## create one redis service with ports to the sentinel and the redis instances +## +sentinel: + enabled: false + #enabled: true + ## Require password authentication on the sentinel itself + ## ref: https://redis.io/topics/sentinel + ## + usePassword: true + ## Bitnami Redis(TM) Sentintel image version + ## ref: https://hub.docker.com/r/bitnami/redis-sentinel/tags/ + ## + image: + #registry: docker.io + registry: 10.10.31.243:5000 + repository: bitnami/redis-sentinel + ## Bitnami Redis(TM) image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis-sentinel#supported-tags-and-respective-dockerfile-links + ## + tag: 6.0.10-debian-10-r0 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + masterSet: mymaster + initialCheckTimeout: 5 + quorum: 2 + downAfterMilliseconds: 60000 + failoverTimeout: 18000 + parallelSyncs: 1 + port: 26379 + ## Additional Redis(TM) configuration for the sentinel nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Enable or disable static sentinel IDs for each replicas + ## If disabled each sentinel will generate a random id at startup + ## If enabled, each replicas will have a constant ID on each start-up + ## + staticID: false + ## Configure extra options for Redis(TM) Sentinel liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + customLivenessProbe: {} + customReadinessProbe: {} + ## Redis(TM) Sentinel resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Redis(TM) Sentinel Service properties + ## + service: + ## Redis(TM) Sentinel Service type + ## + type: ClusterIP + sentinelPort: 26379 + redisPort: 6379 + + ## External traffic policy (when service type is LoadBalancer) + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # sentinelNodePort: + # redisNodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + + ## Additional commands to run prior to starting Redis(TM) node with sentinel + ## + preExecCmds: "" + + ## An array to add extra env var to the sentinel node configurations + ## For example: + ## extraEnvVars: + ## - name: name + ## value: value + ## - name: other_name + ## valueFrom: + ## fieldRef: + ## fieldPath: fieldPath + ## + extraEnvVars: [] + + ## ConfigMap with extra env vars: + ## + extraEnvVarsCM: [] + + ## Secret with extra env vars: + ## + extraEnvVarsSecret: [] + +## Specifies the Kubernetes Cluster's Domain Name. +## +clusterDomain: cluster.local + +networkPolicy: + ## Specifies whether a NetworkPolicy should be created + ## + enabled: true + #enabled: false + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port Redis(TM) is listening + ## on. When true, Redis(TM) will accept connections from any source + ## (with the correct destination port). + ## + # allowExternal: true + allowExternal: true + + ## Allow connections from other namespaces. Just set label for namespace and set label for pods (optional). + ## + ingressNSMatchLabels: {} + ingressNSPodMatchLabels: {} + +serviceAccount: + ## Specifies whether a ServiceAccount should be created + ## + create: false + ## The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the fullname template + ## + name: + ## Add annotations to service account + # annotations: + # iam.gke.io/gcp-service-account: "sa@project.iam.gserviceaccount.com" + +rbac: + ## Specifies whether RBAC resources should be created + ## + create: false + + role: + ## Rules to create. It follows the role specification + # rules: + # - apiGroups: + # - extensions + # resources: + # - podsecuritypolicies + # verbs: + # - use + # resourceNames: + # - gce.unprivileged + rules: [] + +## Redis(TM) pod Security Context +## +securityContext: + enabled: true + fsGroup: 1001 + ## sysctl settings for master and slave pods + ## + ## Uncomment the setting below to increase the net.core.somaxconn value + ## + # sysctls: + # - name: net.core.somaxconn + # value: "10000" + +## Container Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +containerSecurityContext: + enabled: true + runAsUser: 1001 + +## Use password authentication +## +usePassword: true +## Redis(TM) password (both master and slave) +## Defaults to a random 10-character alphanumeric string if not set and usePassword is true +## ref: https://github.com/bitnami/bitnami-docker-redis#setting-the-server-password-on-first-run +## +password: "dkagh1234!" +## Use existing secret (ignores previous password) +# existingSecret: +## Password key to be retrieved from Redis(TM) secret +## +# existingSecretPasswordKey: + +## Mount secrets as files instead of environment variables +## +usePasswordFile: false + +## Persist data to a persistent volume (Redis Master) +## +persistence: + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + ## + existingClaim: + +# Redis(TM) port +redisPort: 6379 + +## +## TLS configuration +## +tls: + # Enable TLS traffic + enabled: false + # + # Whether to require clients to authenticate or not. + authClients: true + # + # Name of the Secret that contains the certificates + certificatesSecret: + # + # Certificate filename + certFilename: + # + # Certificate Key filename + certKeyFilename: + # + # CA Certificate filename + certCAFilename: + # + # File containing DH params (in order to support DH based ciphers) + # dhParamsFilename: + +## +## Redis(TM) Master parameters +## +master: + ## Redis(TM) command arguments + ## + ## Can be used to specify command line arguments, for example: + ## Note `exec` is prepended to command + ## + command: "/run.sh" + ## Additional commands to run prior to starting Redis(TM) + ## + preExecCmds: "" + ## Additional Redis(TM) configuration for the master nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Deployment pod host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## Redis(TM) additional command line flags + ## + ## Can be used to specify command line flags, for example: + ## extraFlags: + ## - "--maxmemory-policy volatile-ttl" + ## - "--repl-backlog-size 1024mb" + ## + extraFlags: [] + ## Comma-separated list of Redis(TM) commands to disable + ## + ## Can be used to disable Redis(TM) commands for security reasons. + ## Commands will be completely disabled by renaming each to an empty string. + ## ref: https://redis.io/topics/security#disabling-of-specific-commands + ## + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis(TM) Master additional pod labels and annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + podAnnotations: {} + + ## Redis(TM) Master resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + # Enable shared process namespace in a pod. + # If set to false (default), each container will run in separate namespace, redis will have PID=1. + # If set to true, the /pause will run as init process and will reap any zombie PIDs, + # for example, generated by a custom exec probe running longer than a probe timeoutSeconds. + # Enable this only if customLivenessProbe or customReadinessProbe is used and zombie PIDs are accumulating. + # Ref: https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/ + shareProcessNamespace: false + ## Configure extra options for Redis(TM) Master liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + + ## Configure custom probes for images other images like + ## rhscl/redis-32-rhel7 rhscl/redis-5-rhel7 + ## Only used if readinessProbe.enabled: false / livenessProbe.enabled: false + ## + # customLivenessProbe: + # tcpSocket: + # port: 6379 + # initialDelaySeconds: 10 + # periodSeconds: 5 + # customReadinessProbe: + # initialDelaySeconds: 30 + # periodSeconds: 10 + # timeoutSeconds: 5 + # exec: + # command: + # - "container-entrypoint" + # - "bash" + # - "-c" + # - "redis-cli set liveness-probe \"`date`\" | grep OK" + customLivenessProbe: {} + customReadinessProbe: {} + + ## Redis(TM) Master Node selectors and tolerations for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + ## Redis(TM) Master pod/node affinity/anti-affinity + ## + affinity: {} + + ## Redis(TM) Master Service properties + ## + service: + ## Redis(TM) Master Service type + ## + type: ClusterIP + # type: NodePort + port: 6379 + + ## External traffic policy (when service type is LoadBalancer) + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: 31379 + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + # loadBalancerSourceRanges: ["10.0.0.0/8"] + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis(TM) images. + ## + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + ## + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + ## Persistent Volume selectors + ## https://kubernetes.io/docs/concepts/storage/persistent-volumes/#selector + ## + matchLabels: {} + matchExpressions: {} + volumes: + # - name: volume_name + # emptyDir: {} + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + ## + statefulset: + labels: {} + annotations: {} + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + volumeClaimTemplates: + labels: {} + annotations: {} + + ## Redis(TM) Master pod priorityClassName + ## + priorityClassName: '' + + ## An array to add extra env vars + ## For example: + ## extraEnvVars: + ## - name: name + ## value: value + ## - name: other_name + ## valueFrom: + ## fieldRef: + ## fieldPath: fieldPath + ## + extraEnvVars: [] + + ## ConfigMap with extra env vars: + ## + extraEnvVarsCM: [] + + ## Secret with extra env vars: + ## + extraEnvVarsSecret: [] + +## +## Redis(TM) Slave properties +## Note: service.type is a mandatory parameter +## The rest of the parameters are either optional or, if undefined, will inherit those declared in Redis(TM) Master +## +slave: + ## Slave Service properties + ## + service: + ## Redis(TM) Slave Service type + ## + type: ClusterIP + #type: NodePort + ## Redis(TM) port + ## + port: 6379 + + ## External traffic policy (when service type is LoadBalancer) + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: 31380 + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + # loadBalancerSourceRanges: ["10.0.0.0/8"] + + ## Redis(TM) slave port + ## + port: 6379 + ## Deployment pod host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## Can be used to specify command line arguments, for example: + ## Note `exec` is prepended to command + ## + command: "/run.sh" + ## Additional commands to run prior to starting Redis(TM) + ## + preExecCmds: "" + ## Additional Redis(TM) configuration for the slave nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Redis(TM) extra flags + ## + extraFlags: [] + ## List of Redis(TM) commands to disable + ## + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis(TM) Slave pod/node affinity/anti-affinity + ## + affinity: {} + + ## Kubernetes Spread Constraints for pod assignment + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + ## + # - maxSkew: 1 + # topologyKey: node + # whenUnsatisfiable: DoNotSchedule + spreadConstraints: {} + + # Enable shared process namespace in a pod. + # If set to false (default), each container will run in separate namespace, redis will have PID=1. + # If set to true, the /pause will run as init process and will reap any zombie PIDs, + # for example, generated by a custom exec probe running longer than a probe timeoutSeconds. + # Enable this only if customLivenessProbe or customReadinessProbe is used and zombie PIDs are accumulating. + # Ref: https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/ + shareProcessNamespace: false + ## Configure extra options for Redis(TM) Slave liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 10 + successThreshold: 1 + failureThreshold: 5 + + ## Configure custom probes for images other images like + ## rhscl/redis-32-rhel7 rhscl/redis-5-rhel7 + ## Only used if readinessProbe.enabled: false / livenessProbe.enabled: false + ## + # customLivenessProbe: + # tcpSocket: + # port: 6379 + # initialDelaySeconds: 10 + # periodSeconds: 5 + # customReadinessProbe: + # initialDelaySeconds: 30 + # periodSeconds: 10 + # timeoutSeconds: 5 + # exec: + # command: + # - "container-entrypoint" + # - "bash" + # - "-c" + # - "redis-cli set liveness-probe \"`date`\" | grep OK" + customLivenessProbe: {} + customReadinessProbe: {} + + ## Redis(TM) slave Resource + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + + ## Redis(TM) slave selectors and tolerations for pod assignment + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Redis(TM) slave pod Annotation and Labels + ## + podLabels: {} + podAnnotations: {} + + ## Redis(TM) slave pod priorityClassName + # priorityClassName: '' + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis(TM) images. + ## + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + ## + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + ## Persistent Volume selectors + ## https://kubernetes.io/docs/concepts/storage/persistent-volumes/#selector + ## + matchLabels: {} + matchExpressions: {} + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + ## + statefulset: + labels: {} + annotations: {} + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + volumeClaimTemplates: + labels: {} + annotations: {} + + ## An array to add extra env vars + ## For example: + ## extraEnvVars: + ## - name: name + ## value: value + ## - name: other_name + ## valueFrom: + ## fieldRef: + ## fieldPath: fieldPath + ## + extraEnvVars: [] + + ## ConfigMap with extra env vars: + ## + extraEnvVarsCM: [] + + ## Secret with extra env vars: + ## + extraEnvVarsSecret: [] + +## Prometheus Exporter / Metrics +## +metrics: + enabled: false +# enabled: true + + image: + registry: 10.10.31.243:5000 # registry.cloud.intermax:5000 + repository: redis/redis-exporter + #tag: 1.15.1-debian-10-r2 + tag: latest + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + # resources: {} + + ## Extra arguments for Metrics exporter, for example: + ## extraArgs: + ## check-keys: myKey,myOtherKey + # extraArgs: {} + + ## Metrics exporter pod Annotation and Labels + ## + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9121" + # podLabels: {} + + # Enable this if you're using https://github.com/coreos/prometheus-operator + serviceMonitor: + enabled: false + ## Specify a namespace if needed + # namespace: monitoring + # fallback to the prometheus default unless specified + # interval: 10s + ## Defaults to what's used if you follow CoreOS [Prometheus Install Instructions](https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#tldr) + ## [Prometheus Selector Label](https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#prometheus-operator-1) + ## [Kube Prometheus Selector Label](https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#exporters) + ## + selector: + prometheus: kube-prometheus + + ## RelabelConfigs to apply to samples before scraping + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#relabelconfig + ## Value is evalued as a template + ## + relabelings: [] + + ## MetricRelabelConfigs to apply to samples before ingestion + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#relabelconfig + ## Value is evalued as a template + ## + metricRelabelings: [] + # - sourceLabels: + # - "__name__" + # targetLabel: "__name__" + # action: replace + # regex: '(.*)' + # replacement: 'example_prefix_$1' + + ## Custom PrometheusRule to be defined + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + ## + prometheusRule: + enabled: false + additionalLabels: {} + namespace: "" + ## Redis(TM) prometheus rules + ## These are just examples rules, please adapt them to your needs. + ## Make sure to constraint the rules to the current redis service. + # rules: + # - alert: RedisDown + # expr: redis_up{service="{{ template "redis.fullname" . }}-metrics"} == 0 + # for: 2m + # labels: + # severity: error + # annotations: + # summary: Redis(TM) instance {{ "{{ $labels.instance }}" }} down + # description: Redis(TM) instance {{ "{{ $labels.instance }}" }} is down + # - alert: RedisMemoryHigh + # expr: > + # redis_memory_used_bytes{service="{{ template "redis.fullname" . }}-metrics"} * 100 + # / + # redis_memory_max_bytes{service="{{ template "redis.fullname" . }}-metrics"} + # > 90 + # for: 2m + # labels: + # severity: error + # annotations: + # summary: Redis(TM) instance {{ "{{ $labels.instance }}" }} is using too much memory + # description: | + # Redis(TM) instance {{ "{{ $labels.instance }}" }} is using {{ "{{ $value }}" }}% of its available memory. + # - alert: RedisKeyEviction + # expr: | + # increase(redis_evicted_keys_total{service="{{ template "redis.fullname" . }}-metrics"}[5m]) > 0 + # for: 1s + # labels: + # severity: error + # annotations: + # summary: Redis(TM) instance {{ "{{ $labels.instance }}" }} has evicted keys + # description: | + # Redis(TM) instance {{ "{{ $labels.instance }}" }} has evicted {{ "{{ $value }}" }} keys in the last 5 minutes. + rules: [] + + ## Metrics exporter pod priorityClassName + # priorityClassName: '' + service: + type: ClusterIP + + ## External traffic policy (when service type is LoadBalancer) + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + + ## Use serviceLoadBalancerIP to request a specific static IP, + ## otherwise leave blank + # loadBalancerIP: + annotations: {} + labels: {} + +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: false + image: + registry: 10.10.31.243:5000 # docker.io + repository: minideb # bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m + + ## Init container Security Context + ## Note: the chown of the data folder is done to containerSecurityContext.runAsUser + ## and not the below volumePermissions.securityContext.runAsUser + ## When runAsUser is set to special value "auto", init container will try to chwon the + ## data folder to autodetermined user&group, using commands: `id -u`:`id -G | cut -d" " -f2` + ## "auto" is especially useful for OpenShift which has scc with dynamic userids (and 0 is not allowed). + ## You may want to use this volumePermissions.securityContext.runAsUser="auto" in combination with + ## podSecurityContext.enabled=false,containerSecurityContext.enabled=false + ## + securityContext: + runAsUser: 0 + +## Redis(TM) config file +## ref: https://redis.io/topics/config +## +configmap: |- + # Enable AOF https://redis.io/topics/persistence#append-only-file + appendonly yes + # Disable RDB persistence, AOF persistence already enabled. + save "" + +## Sysctl InitContainer +## used to perform sysctl operation to modify Kernel settings (needed sometimes to avoid warnings) +## +sysctlImage: + enabled: false + command: [] + registry: 10.10.31.243:5000 # docker.io + repository: minideb # bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + mountHostSys: false + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m + +## PodSecurityPolicy configuration +## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +## +podSecurityPolicy: + ## Specifies whether a PodSecurityPolicy should be created + ## + create: false + +## Define a disruption budget +## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ +## +podDisruptionBudget: + enabled: false + minAvailable: 1 + # maxUnavailable: 1 diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/zookeeper/.helmignore b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/zookeeper/.helmignore new file mode 100644 index 0000000..50af031 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/zookeeper/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/zookeeper/Chart.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/zookeeper/Chart.yaml new file mode 100644 index 0000000..c9a2bfb --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/zookeeper/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for Kubernetes +name: zookeeper +version: 0.1.0 diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/zookeeper/templates/0.config.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/zookeeper/templates/0.config.yaml new file mode 100644 index 0000000..3b23a9e --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/zookeeper/templates/0.config.yaml @@ -0,0 +1,35 @@ +kind: ConfigMap +metadata: + name: zookeeper-config + namespace: imxc +apiVersion: v1 +data: + init.sh: |- + #!/bin/bash + set -e + set -x + [ -d /var/lib/zookeeper/data ] || mkdir /var/lib/zookeeper/data + [ -z "$ID_OFFSET" ] && ID_OFFSET=1 + export ZOOKEEPER_SERVER_ID=$((${HOSTNAME##*-} + $ID_OFFSET)) + echo "${ZOOKEEPER_SERVER_ID:-1}" | tee /var/lib/zookeeper/data/myid + cp -Lur /etc/kafka-configmap/* /etc/kafka/ + sed -i "s/server\.$ZOOKEEPER_SERVER_ID\=[a-z0-9.-]*/server.$ZOOKEEPER_SERVER_ID=0.0.0.0/" /etc/kafka/zookeeper.properties + zookeeper.properties: |- + tickTime=2000 + dataDir=/var/lib/zookeeper/data + dataLogDir=/var/lib/zookeeper/log + clientPort=2181 + maxClientCnxns=1 + initLimit=5 + syncLimit=2 + server.1=zookeeper-0.zookeeper-headless.imxc.svc.cluster.local:2888:3888:participant + server.2=zookeeper-1.zookeeper-headless.imxc.svc.cluster.local:2888:3888:participant + server.3=zookeeper-2.zookeeper-headless.imxc.svc.cluster.local:2888:3888:participant + log4j.properties: |- + log4j.rootLogger=INFO, stdout + log4j.appender.stdout=org.apache.log4j.ConsoleAppender + log4j.appender.stdout.layout=org.apache.log4j.PatternLayout + log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n + # Suppress connection log messages, three lines per livenessProbe execution + log4j.logger.org.apache.zookeeper.server.NIOServerCnxnFactory=WARN + log4j.logger.org.apache.zookeeper.server.NIOServerCnxn=WARN diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/zookeeper/templates/1.service-leader-election.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/zookeeper/templates/1.service-leader-election.yaml new file mode 100644 index 0000000..422433a --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/zookeeper/templates/1.service-leader-election.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Service +metadata: + name: zookeeper-headless + namespace: imxc +spec: + ports: + - port: 2888 + name: peer + - port: 3888 + name: leader-election + clusterIP: None + selector: + app: zookeeper + storage: persistent + diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/zookeeper/templates/2.service-client.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/zookeeper/templates/2.service-client.yaml new file mode 100644 index 0000000..9fdcf95 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/zookeeper/templates/2.service-client.yaml @@ -0,0 +1,12 @@ +# the headless service is for PetSet DNS, this one is for clients +apiVersion: v1 +kind: Service +metadata: + name: zookeeper + namespace: imxc +spec: + ports: + - port: 2181 + name: client + selector: + app: zookeeper diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/zookeeper/templates/3.persistent-volume.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/zookeeper/templates/3.persistent-volume.yaml new file mode 100644 index 0000000..2a909f7 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/zookeeper/templates/3.persistent-volume.yaml @@ -0,0 +1,74 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: zookeeper-cluster-1 + labels: + type: local + app: zookeeper +spec: + capacity: + storage: 30Gi + accessModes: + - ReadWriteOnce + hostPath: + path: {{ .Values.global.IMXC_ZOOKEEPER_PATH1 }} + persistentVolumeReclaimPolicy: Retain + storageClassName: zookeeper-storage + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .Values.global.affinity_key }} + operator: In + values: + - {{ .Values.global.affinity_value1 }} +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: zookeeper-cluster-2 + labels: + type: local + app: zookeeper +spec: + capacity: + storage: 30Gi + accessModes: + - ReadWriteOnce + hostPath: + path: {{ .Values.global.IMXC_ZOOKEEPER_PATH2 }} + persistentVolumeReclaimPolicy: Retain + storageClassName: zookeeper-storage + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .Values.global.affinity_key }} + operator: In + values: + - {{ .Values.global.affinity_value2 }} +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: zookeeper-cluster-3 + labels: + type: local + app: zookeeper +spec: + capacity: + storage: 30Gi + accessModes: + - ReadWriteOnce + hostPath: + path: {{ .Values.global.IMXC_ZOOKEEPER_PATH3 }} + persistentVolumeReclaimPolicy: Retain + storageClassName: zookeeper-storage + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .Values.global.affinity_key }} + operator: In + values: + - {{ .Values.global.affinity_value3 }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/zookeeper/templates/4.statefulset.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/zookeeper/templates/4.statefulset.yaml new file mode 100644 index 0000000..a9e5cb8 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/zookeeper/templates/4.statefulset.yaml @@ -0,0 +1,87 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: zookeeper + namespace: imxc +spec: + selector: + matchLabels: + app: zookeeper + storage: persistent + serviceName: "zookeeper-headless" + replicas: 3 + updateStrategy: + type: RollingUpdate + podManagementPolicy: Parallel + template: + metadata: + labels: + app: zookeeper + storage: persistent + annotations: + spec: + terminationGracePeriodSeconds: 10 + initContainers: + - name: init-config + image: {{ .Values.global.IMXC_IN_REGISTRY }}/kafka-initutils:{{ .Values.global.KAFKA_INITUTILS_VERSION }} + command: ['/bin/bash', '/etc/kafka-configmap/init.sh'] + volumeMounts: + - name: configmap + mountPath: /etc/kafka-configmap + - name: config + mountPath: /etc/kafka + - name: data + mountPath: /var/lib/zookeeper + containers: + - name: zookeeper + image: {{ .Values.global.IMXC_IN_REGISTRY }}/kafka:{{ .Values.global.KAFKA_VERSION }} + resources: + requests: + cpu: 100m + memory: 200Mi + limits: + cpu: 200m + memory: 500Mi + env: + - name: KAFKA_LOG4J_OPTS + value: -Dlog4j.configuration=file:/etc/kafka/log4j.properties + command: + - ./bin/zookeeper-server-start.sh + - /etc/kafka/zookeeper.properties + lifecycle: + preStop: + exec: + command: ["sh", "-ce", "kill -s TERM 1; while $(kill -0 1 2>/dev/null); do sleep 1; done"] + ports: + - containerPort: 2181 + name: client + - containerPort: 2888 + name: peer + - containerPort: 3888 + name: leader-election +# readinessProbe: +# exec: +# command: +# - /bin/sh +# - -c +# - '[ "imok" = "$(echo ruok | nc -w 1 -q 1 127.0.0.1 2181)" ]' + volumeMounts: + - name: config + mountPath: /etc/kafka + - name: data + mountPath: /var/lib/zookeeper + volumes: + - name: configmap + configMap: + name: zookeeper-config + - name: config + emptyDir: {} + volumeClaimTemplates: + - metadata: + name: data + spec: + accessModes: [ "ReadWriteOnce" ] + storageClassName: zookeeper-storage + resources: + requests: + storage: 30Gi diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/zookeeper/templates/5.pvc.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/zookeeper/templates/5.pvc.yaml new file mode 100644 index 0000000..e08ed54 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/zookeeper/templates/5.pvc.yaml @@ -0,0 +1,50 @@ +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + namespace: imxc + name: data-zookeeper-0 +spec: + accessModes: + - ReadWriteOnce + volumeMode: Filesystem + resources: + requests: + storage: 30Gi + storageClassName: zookeeper-storage + selector: + matchLabels: + app: zookeeper +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + namespace: imxc + name: data-zookeeper-1 +spec: + accessModes: + - ReadWriteOnce + volumeMode: Filesystem + resources: + requests: + storage: 30Gi + storageClassName: zookeeper-storage + selector: + matchLabels: + app: zookeeper +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + namespace: imxc + name: data-zookeeper-2 +spec: + accessModes: + - ReadWriteOnce + volumeMode: Filesystem + resources: + requests: + storage: 30Gi + storageClassName: zookeeper-storage + selector: + matchLabels: + app: zookeeper \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/zookeeper/values.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/zookeeper/values.yaml new file mode 100644 index 0000000..7b06985 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/charts/zookeeper/values.yaml @@ -0,0 +1,68 @@ +# Default values for zookeeper. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: 10.10.31.243:5000/cmoa3/nginx + tag: stable + pullPolicy: IfNotPresent + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 80 + +ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: [] + + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +nodeSelector: {} + +tolerations: [] + +affinity: {} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/index.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/index.yaml new file mode 100644 index 0000000..62a41a3 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/index.yaml @@ -0,0 +1,3 @@ +apiVersion: v1 +entries: {} +generated: "2019-11-05T09:47:03.285264152+09:00" diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/templates/role.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/templates/role.yaml new file mode 100644 index 0000000..28f0e32 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/templates/role.yaml @@ -0,0 +1,16 @@ +kind: ClusterRoleBinding +{{- if semverCompare ">=1.17-0" .Capabilities.KubeVersion.GitVersion }} +apiVersion: rbac.authorization.k8s.io/v1 +{{- else }} +apiVersion: rbac.authorization.k8s.io/v1beta1 +{{- end }} +metadata: + name: imxc-cluster-admin-clusterrolebinding +subjects: +- kind: ServiceAccount + name: default + namespace: imxc +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin diff --git a/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/values.yaml b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/values.yaml new file mode 100644 index 0000000..b7c22ff --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/02-base/base/values.yaml @@ -0,0 +1,73 @@ +global: + # cluster variables + CLUSTER_ID: cloudmoa + + # default storageClass + DEFAULT_STORAGE_CLASS: exem-local-storage + + # nodeAffinity + affinity_key: cmoa + affinity_value1: worker1 + affinity_value2: worker2 + affinity_value3: worker2 + + # postgres variables + IMXC_POSTGRES_PV_PATH: /media/data/postgres/postgres-data-0 + + #elastic variables + ELASTICSEARCH_PATH1: /media/data/elasticsearch/elasticsearch-data-0 + ELASTICSEARCH_PATH2: /media/data/elasticsearch/elasticsearch-data-1 + + CMOA_ES_ID: elastic + CMOA_ES_PW: elastic + + # zookeeper variables + IMXC_ZOOKEEPER_PATH1: /media/data/zookeeper/zookeeper-data-0 + IMXC_ZOOKEEPER_PATH2: /media/data/zookeeper/zookeeper-data-1 + IMXC_ZOOKEEPER_PATH3: /media/data/zookeeper/zookeeper-data-2 + + # kafka variables + IMXC_KAFKA_PV_PATH1: /media/data/kafka/kafka-data-0 + IMXC_KAFKA_PV_PATH2: /media/data/kafka/kafka-data-1 + IMXC_KAFKA_PV_PATH3: /media/data/kafka/kafka-data-2 + KAFKA_BROKER_CONFIG: "{{index .metadata.labels \"failure-domain.beta.kubernetes.io/zone\"}}" + + # cortex variables + IMXC_INGESTER_PV_PATH1: /media/cloudmoa/ingester/ingester-data-1 + IMXC_INGESTER_PV_PATH2: /media/cloudmoa/ingester/ingester-data-2 + IMXC_INGESTER_PV_PATH3: /media/cloudmoa/ingester/ingester-data-3 + + # redis variables + IMXC_REDIS_PV_PATH1: /media/data/redis/redis-data-0 + IMXC_REDIS_PV_PATH2: /media/data/redis/redis-data-1 + IMXC_REDIS_PV_PATH3: /media/data/redis/redis-data-2 + + # rabbitmq variables + RABBITMQ_PATH: /media/data/rabbitmq + + # custom or etc variables + # IMXC_WORKER_NODE_NAME: $IMXC_WORKER_NODE_NAME # deprecated 2021.10.21 + # IMXC_MASTER_IP: 10.10.30.202 + IMXC_API_SERVER_DNS: imxc-api-service + + METRIC_ANALYZER_MASTER_VERSION: rel3.4.8 + METRIC_ANALYZER_WORKER_VERSION: rel3.4.8 + ELASTICSEARCH_VERSION: v1.0.0 + KAFKA_MANAGER_VERSION: v1.0.0 + KAFKA_INITUTILS_VERSION: v1.0.0 + #KAFKA_VERSION: v1.0.0 + KAFKA_VERSION: v1.0.1 + METRICS_SERVER_VERSION: v1.0.0 + POSTGRES_VERSION: v1.0.0 + CASSANDRA_VERSION: v1.0.0 + RABBITMQ_VERSION: v1.0.0 + CORTEX_VERSION: v1.11.0 #v1.9.0 + #CONSUL_VERSION: 0.7.1 + + # 레지스트리 변수화 (Public Cloud 대비 / 아래 값 적절히 수정해서 사용할 것) + IMXC_IN_REGISTRY: 10.10.31.243:5000/cmoa3 + + rabbitmq: + image: + registry: 10.10.31.243:5000/cmoa3 # {{ .Values.global.IMXC_REGISTRY }} + tag: v1.0.0 # {{ .Values.global.RABBITMQ_VERSION }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/elasticsearch/es-ddl-put.sh b/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/elasticsearch/es-ddl-put.sh new file mode 100755 index 0000000..b3a27ed --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/elasticsearch/es-ddl-put.sh @@ -0,0 +1,3085 @@ +#!/bin/sh + +#!/bin/bash + +namespace=$1 +export ES_NODEPORT=`kubectl -n ${namespace} get svc elasticsearch -o jsonpath='{.spec.ports[*].nodePort}'` + +export MASTER_IP=`kubectl get node -o wide | grep control-plane | awk '{print $6}'` + +export NUM_SHARDS=2 +export NUM_REPLICAS=1 + +SECURE=true + +if [ $SECURE = true ] +then +PARAM="-u elastic:elastic --insecure" +PROTO="https" +else +PARAM="" +PROTO="http" +fi + +echo Secure=$SECURE +echo Param=$PARAM +echo Proto=$PROTO + +curl ${PARAM} -X GET ${PROTO}://${MASTER_IP}:${ES_NODEPORT}/_cat/indices + +echo "curl ${PARAM} -X GET ${PROTO}://${MASTER_IP}:${ES_NODEPORT}/_cat/indices" + +# kubernetes_cluster_info +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/kubernetes_cluster_info' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "kubernetes_cluster_info" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "sort.field": "mtime", + "sort.order": "desc" + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "date": { + "type": "long" + }, + "mtime": { + "type": "long" + }, + "nodes": { + "type": "text", + "index": false + } + } + } +}' + +# kubernetes_cluster_history +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/kubernetes_cluster_history' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "1d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/kubernetes_cluster_history' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "kubernetes_cluster_history-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "kubernetes_cluster_history" + }, + "sort.field": "mtime", + "sort.order": "desc" + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "mtime": { + "type": "long" + }, + "nodes": { + "type": "text", + "index": false + } + } + }, + "aliases": { + "kubernetes_cluster_history": {} + } +}' + +# kubernetes_info +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/kubernetes_info' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "1d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/kubernetes_info' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "kubernetes_info-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "kubernetes_info" + }, + "sort.field": "mtime", + "sort.order": "desc" + } + }, + "mappings": { + "properties": { + "id": { + "type": "keyword" + }, + "mtime": { + "type": "long" + }, + "data": { + "type": "text", + "index": false + } + } + }, + "aliases": { + "kubernetes_info": {} + } +}' + + + +# kubernetes_event_info +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/kubernetes_event_info' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/kubernetes_event_info' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "kubernetes_event_info-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "kubernetes_event_info" + } + }, + "analysis": { + "analyzer": { + "my_customer_ngram_analyzer": { + "tokenizer": "my_customer_ngram_tokenizer" + } + }, + "tokenizer": { + "my_customer_ngram_tokenizer": { + "type": "ngram", + "min_gram": "2", + "max_gram": "3" + } + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "type": { + "type": "keyword" + }, + "unixtime": { + "type": "long" + }, + "kind": { + "type": "keyword" + }, + "name": { + "type": "keyword" + }, + "firsttime": { + "type": "long" + }, + "lasttime": { + "type": "long" + }, + "data": { + "type": "text", + "index": false + }, + "id": { + "type": "keyword" + }, + "reason": { + "type": "keyword" + }, + "message": { + "type": "text", + "fields": { + "ngram": { + "type": "text", + "analyzer": "my_customer_ngram_analyzer" + } + } + }, + "count": { + "type": "integer" + }, + "sourceComponent": { + "type": "keyword" + }, + "sourceHost": { + "type": "keyword" + } + } + }, + "aliases": { + "kubernetes_event_info": {} + } +}' + + + + +# kubernetes_job_info +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/kubernetes_job_info' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/kubernetes_job_info' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "kubernetes_job_info-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "kubernetes_job_info" + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "name": { + "type": "keyword" + }, + "starttime": { + "type": "long" + }, + "endtime": { + "type": "long" + }, + "duration": { + "type": "long" + }, + "commandlist": { + "type": "text", + "index": false + }, + "labellist": { + "type": "text", + "index": false + }, + "active": { + "type": "boolean" + }, + "status": { + "type": "keyword" + } + } + }, + "aliases": { + "kubernetes_job_info": {} + } +}' + + + +# kubernetes_cronjob_info +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/kubernetes_cronjob_info' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/kubernetes_cronjob_info' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "kubernetes_cronjob_info-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "kubernetes_cronjob_info" + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "name": { + "type": "keyword" + }, + "jobname": { + "type": "keyword" + }, + "kind": { + "type": "keyword" + }, + "starttime": { + "type": "long" + }, + "endtime": { + "type": "long" + }, + "duration": { + "type": "long" + }, + "lastruntime": { + "type": "long" + }, + "arguments": { + "type": "text", + "index": false + }, + "schedule": { + "type": "keyword" + }, + "active": { + "type": "boolean" + }, + "status": { + "type": "keyword" + } + } + }, + "aliases": { + "kubernetes_cronjob_info": {} + } +}' + + + + +# kubernetes_network_connectivity +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/kubernetes_network_connectivity' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "1d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/kubernetes_network_connectivity' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "kubernetes_network_connectivity-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "kubernetes_network_connectivity" + } + } + }, + "mappings": { + "properties": { + "timestamp": { + "type": "long" + }, + "cluster": { + "type": "keyword" + }, + "node": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "service": { + "type": "keyword" + }, + "pod": { + "type": "keyword" + }, + "container": { + "type": "keyword" + }, + "pid": { + "type": "integer" + }, + "peerNode": { + "type": "keyword" + }, + "peerNamespace": { + "type": "keyword" + }, + "peerService": { + "type": "keyword" + }, + "peerPod": { + "type": "keyword" + }, + "peerContainer": { + "type": "keyword" + }, + "peerPid": { + "type": "integer" + } + } + }, + "aliases": { + "kubernetes_network_connectivity": {} + } +}' + + + +# sparse_log +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/sparse_log' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/sparse_log' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "sparse_log-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "sparse_log" + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "date": { + "type": "keyword" + }, + "targetType": { + "type": "keyword" + }, + "targetId": { + "type": "keyword" + }, + "unixtime": { + "type": "long" + }, + "logpath": { + "type": "text", + "index": false + }, + "contents": { + "type": "text" + }, + "lineNumber": { + "type": "integer" + }, + "probability": { + "type": "float" + }, + "subentityId": { + "type": "keyword" + } + } + }, + "aliases": { + "sparse_log": {} + } +}' + + + +# sparse_model +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/sparse_model' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "sparse_model" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s" + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "targetType": { + "type": "keyword" + }, + "targetId": { + "type": "keyword" + }, + "modifiedDate": { + "type": "long" + }, + "logPath": { + "type": "keyword" + }, + "savedModel": { + "type": "text", + "index": false + } + } + } +}' + + + +# kubernetes_pod_info +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/kubernetes_pod_info' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/kubernetes_pod_info' -H 'Content-Type: application/json' -d '{ +"order": 0, + "index_patterns": [ + "kubernetes_pod_info-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "kubernetes_pod_info" + } + } + }, + "mappings": { + "properties": { + "eventType": {"type": "keyword"}, + "cluster": {"type": "keyword"}, + "namespace": {"type": "keyword"}, + "node": {"type": "keyword"}, + "pod": {"type": "keyword"}, + "podUID": {"type": "keyword"}, + "podCreationTimestamp": {"type": "long"}, + "podDeletionTimestamp": {"type": "long"}, + "podDeletionGracePeriod": {"type": "long"}, + "resourceVersion": {"type": "keyword"}, + "ownerKind": {"type": "keyword"}, + "ownerName": {"type": "keyword"}, + "ownerUID": {"type": "keyword"}, + "podPhase": {"type": "keyword"}, + "podIP": {"type": "keyword"}, + "podStartTime": {"type": "long"}, + "podReady": {"type": "boolean"}, + "podContainersReady": {"type": "boolean"}, + "isInitContainer": {"type": "boolean"}, + "containerName": {"type": "keyword"}, + "containerID": {"type": "keyword"}, + "containerImage": {"type": "keyword"}, + "containerImageShort": {"type": "keyword"}, + "containerReady": {"type": "boolean"}, + "containerRestartCount": {"type": "integer"}, + "containerState": {"type": "keyword"}, + "containerStartTime": {"type": "long"}, + "containerMessage": {"type": "keyword"}, + "containerReason": {"type": "keyword"}, + "containerFinishTime": {"type": "long"}, + "containerExitCode": {"type": "integer"}, + "containerLastState": {"type": "keyword"}, + "containerLastStartTime": {"type": "long"}, + "containerLastMessage": {"type": "keyword"}, + "containerLastReason": {"type": "keyword"}, + "containerLastFinishTime": {"type": "long"}, + "containerLastExitCode": {"type": "integer"} + } + }, + "aliases": { + "kubernetes_pod_info": {} + } +}' + + + +# kubernetes_pod_history +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/kubernetes_pod_history' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "1d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/kubernetes_pod_history' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "kubernetes_pod_history-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "kubernetes_pod_history" + } + } + }, + "mappings": { + "properties": { + "deployName": { + "type": "keyword" + }, + "deployType": { + "type": "keyword" + }, + "deployDate": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "nodeId": { + "type": "keyword" + }, + "podId": { + "type": "keyword" + }, + "podPhase": { + "type": "keyword" + }, + "startTime": { + "type": "keyword" + }, + "endTime": { + "type": "keyword" + }, + "exitCode": { + "type": "integer" + }, + "reason": { + "type": "keyword" + }, + "message": { + "type": "text" + }, + "time": { + "type": "long" + }, + "containerId": { + "type": "keyword" + }, + "containerName": { + "type": "keyword" + }, + "containerPhase": { + "type": "keyword" + }, + "eventAction": { + "type": "keyword" + }, + "containerStartTime": { + "type": "keyword" + }, + "containerEndTime": { + "type": "keyword" + }, + "containerImage": { + "type": "keyword" + }, + "containerImageShort": { + "type": "keyword" + } + } + }, + "aliases": { + "kubernetes_pod_history": {} + } +}' + + + + +# metric_score +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/metric_score' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/metric_score' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "metric_score-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "metric_score" + }, + "sort.field": "unixtime", + "sort.order": "desc" + } + }, + "mappings": { + "properties": { + "anomaly": { + "type": "boolean" + }, + "clstId": { + "type": "keyword" + }, + "contName": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "podId": { + "type": "keyword" + }, + "instance": { + "type": "keyword" + }, + "entityId": { + "type": "keyword" + }, + "entityType": { + "type": "keyword" + }, + "metricId": { + "type": "keyword" + }, + "nodeId": { + "type": "keyword" + }, + "score": { + "type": "integer" + }, + "subKey": { + "type": "keyword" + }, + "unixtime": { + "type": "long" + }, + "yhatLowerUpper": { + "type": "text", + "fields": { + "keyword": { + "type": "keyword", + "ignore_above": 256 + } + } + } + } + }, + "aliases": { + "metric_score": {} + } +}' + + + + +# entity_score +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/entity_score' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/entity_score' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "entity_score-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "entity_score" + }, + "sort.field": "unixtime", + "sort.order": "desc" + } + }, + "mappings": { + "properties": { + "clstId": { + "type": "keyword" + }, + "contName": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "podId": { + "type": "keyword" + }, + "entityId": { + "type": "keyword" + }, + "entityType": { + "type": "keyword" + }, + "unixtime": { + "type": "long" + }, + "nodeId": { + "type": "keyword" + }, + "maxId": { + "type": "keyword" + }, + "maxScore": { + "type": "integer" + }, + "entityScore": { + "type": "integer" + } + } + }, + "aliases": { + "entity_score": {} + } +}' + + +# timeline_score +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/timeline_score' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/timeline_score' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "timeline_score-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "timeline_score" + }, + "sort.field": "unixtime", + "sort.order": "desc" + } + }, + "mappings": { + "properties": { + "clstId": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "entityType": { + "type": "keyword" + }, + "criticalCount": { + "type": "integer" + }, + "warningCount": { + "type": "integer" + }, + "attentionCount": { + "type": "integer" + }, + "normalCount": { + "type": "integer" + }, + "unixtime": { + "type": "long" + } + } + }, + "aliases": { + "timeline_score": {} + } +}' + + + +# spaninfo +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/spaninfo' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/spaninfo' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "spaninfo-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": "0", + "refresh_interval": "1s", + "lifecycle": { + "name": "spaninfo" + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "node": { + "type": "keyword" + }, + "service": { + "type": "keyword" + }, + "pod": { + "type": "keyword" + }, + "version": { + "type": "keyword" + }, + "ip": { + "type": "keyword" + }, + "traceId": { + "type": "keyword" + }, + "spanId": { + "type": "keyword" + }, + "parentSpanId": { + "type": "keyword" + }, + "protocolType": { + "type": "keyword" + }, + "startTime": { + "type": "long" + }, + "duration": { + "type": "long" + }, + "endTime": { + "type": "long" + }, + "operation": { + "type": "keyword" + }, + "spanKind": { + "type": "keyword" + }, + "component": { + "type": "keyword" + }, + "error": { + "type": "boolean" + }, + "peerAddress": { + "type": "keyword" + }, + "peerHostname": { + "type": "keyword" + }, + "peerIpv4": { + "type": "keyword" + }, + "peerIpv6": { + "type": "keyword" + }, + "peerPort": { + "type": "integer" + }, + "peerService": { + "type": "keyword" + }, + "samplingPriority": { + "type": "keyword" + }, + "httpStatusCode": { + "type": "integer" + }, + "httpUrl": { + "type": "keyword" + }, + "httpMethod": { + "type": "keyword" + }, + "httpApi": { + "type": "keyword" + }, + "dbInstance": { + "type": "keyword" + }, + "dbStatement": { + "type": "keyword" + }, + "dbType": { + "type": "keyword" + }, + "dbUser": { + "type": "keyword" + }, + "messagebusDestination": { + "type": "keyword" + }, + "logs": { + "dynamic": false, + "type": "nested", + "properties": { + "fields": { + "dynamic": false, + "type": "nested", + "properties": { + "value": { + "ignore_above": 256, + "type": "keyword" + }, + "key": { + "type": "keyword" + } + } + }, + "timestamp": { + "type": "long" + } + } + } + } + }, + "aliases": { + "spaninfo": {} + } +}' + + + +# sta_podinfo +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/sta_podinfo' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/sta_podinfo' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "sta_podinfo-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": "0", + "refresh_interval": "1s", + "lifecycle": { + "name": "sta_podinfo" + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "service": { + "type": "keyword" + }, + "pod": { + "type": "keyword" + }, + "timestamp": { + "type": "long" + }, + "version": { + "type": "keyword" + }, + "components": { + "type": "keyword", + "fields": { + "keyword": { + "type": "keyword" + } + } + } + } + }, + "aliases": { + "sta_podinfo": {} + } +}' + + +# sta_httpapi +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/sta_httpapi' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "1d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/sta_httpapi' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "sta_httpapi-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": "0", + "refresh_interval": "1s", + "lifecycle": { + "name": "sta_httpapi" + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "service": { + "type": "keyword" + }, + "timestamp": { + "type": "long" + }, + "api": { + "type": "keyword" + } + } + }, + "aliases": { + "sta_httpapi": {} + } +}' + + + +# sta_httpsummary +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/sta_httpsummary' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "1d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/sta_httpsummary' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "sta_httpsummary-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": "0", + "refresh_interval": "1s", + "lifecycle": { + "name": "sta_httpsummary" + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "timestamp": { + "type": "long" + }, + "pod": { + "type": "keyword" + }, + "service": { + "type": "keyword" + }, + "api": { + "type": "keyword" + }, + "countTotal": { + "type": "integer" + }, + "errorCountTotal": { + "type": "integer" + }, + "timeTotalMicrosec": { + "type": "integer" + }, + "methods": { + "type": "keyword", + "fields": { + "keyword": { + "type": "keyword" + } + } + }, + "statuses": { + "type": "integer", + "fields": { + "integer": { + "type": "integer" + } + } + } + } + }, + "aliases": { + "sta_httpsummary": {} + } +}' + + + +# sta_relation +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/sta_relation' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "1d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/sta_relation' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "sta_relation-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": "0", + "refresh_interval": "1s", + "lifecycle": { + "name": "sta_relation" + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "timestamp": { + "type": "long" + }, + "parent": { + "type": "keyword" + }, + "children": { + "type": "nested", + "properties": { + "name": { + "type": "keyword" + }, + "count": { + "type": "integer" + } + } + } + } + }, + "aliases": { + "sta_relation": {} + } +}' + + + +# sta_externalrelation +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/sta_externalrelation' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "1d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/sta_externalrelation' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "sta_externalrelation-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": "0", + "refresh_interval": "1s", + "lifecycle": { + "name": "sta_externalrelation" + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "timestamp": { + "type": "long" + }, + "externalNamespace": { + "type": "keyword" + }, + "externalService": { + "type": "keyword" + } + } + }, + "aliases": { + "sta_externalrelation": {} + } +}' + + + +# sta_traceinfo +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/sta_traceinfo' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/sta_traceinfo' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "sta_traceinfo-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": "0", + "refresh_interval": "1s", + "lifecycle": { + "name": "sta_traceinfo" + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "traceId": { + "type": "keyword" + }, + "serviceName": { + "type": "keyword" + }, + "operationName": { + "type": "keyword" + }, + "spanSize": { + "type": "integer" + }, + "relatedServices": { + "type": "keyword", + "fields": { + "keyword": { + "type": "keyword" + } + } + }, + "startTime": { + "type": "long" + }, + "endTime": { + "type": "long" + }, + "duration": { + "type": "long" + }, + "error": { + "type": "boolean" + } + } + }, + "aliases": { + "sta_traceinfo": {} + } +}' + + + +# sta_tracetrend +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/sta_tracetrend' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/sta_tracetrend' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "sta_tracetrend-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": "0", + "refresh_interval": "1s", + "lifecycle": { + "name": "sta_tracetrend" + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "serviceName": { + "type": "keyword" + }, + "endTimeGTE": { + "type": "long" + }, + "endTimeLT": { + "type": "long" + } + }, + "dynamic_templates": [ + { + "totals": { + "match": "total*", + "mapping": {"type": "integer"} + } + }, + { + "errors": { + "match": "error*", + "mapping": {"type": "integer"} + } + } + ] + }, + "aliases": { + "sta_tracetrend": {} + } +}' + +# script_history +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/script_history' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + + + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/script_history' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "script_history-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "script_history" + } + } + }, + "mappings": { + "properties": { + "taskId": { + "type": "long" + }, + "scriptName": { + "type": "keyword" + }, + "agentName": { + "type": "keyword" + }, + "targetFile": { + "type": "keyword" + }, + "args": { + "type": "keyword", + "fields": { + "keyword": { + "type": "keyword" + } + } + }, + "validCmd": { + "type": "keyword" + }, + "validVal": { + "type": "keyword" + }, + "valid": { + "type": "boolean" + }, + "validResult": { + "type": "keyword" + }, + "cronExp": { + "type": "keyword" + }, + "createUser": { + "type": "keyword" + }, + "startTime": { + "type": "long" + }, + "endTime": { + "type": "long" + }, + "error": { + "type": "boolean" + }, + "result": { + "type": "keyword" + }, + "order": { + "type": "keyword" + }, + "mtime": { + "type": "keyword" + } + } + }, + "aliases": { + "script_history": {} + } +}' + + +# kubernetes_audit_log +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/kubernetes_audit_log' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/kubernetes_audit_log' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "kubernetes_audit_log-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": "1", + "refresh_interval": "1s", + "lifecycle": { + "name": "kubernetes_audit_log" + }, + "sort.field": "stageTimestamp", + "sort.order": "desc" + } + }, + "mappings": { + "properties": { + "verb": { + "type": "keyword" + }, + "userName": { + "type": "keyword" + }, + "sourceIps": { + "type": "keyword" + }, + "resource": { + "type": "keyword" + }, + "code": { + "type": "keyword" + }, + "requestReceivedTimestamp": { + "type": "long" + }, + "stageTimestamp": { + "type": "long" + }, + "durationTimestamp": { + "type": "long" + }, + "data": { + "type": "text", + "index": false + } + } + }, + "aliases": { + "kubernetes_audit_log": {} + } +}' + +# license_history +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/license_history' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "90d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/license_history' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "license_history-*" + ], + "settings": { + "index": { + "number_of_shards": "2", + "number_of_replicas": "1", + "refresh_interval": "1s", + "lifecycle": { + "name": "license_history" + }, + "sort.field": "checkTime", + "sort.order": "desc" + } + }, + "mappings": { + "properties": { + "licenseType": { + "type": "integer" + }, + "expireDate": { + "type": "text" + }, + "targetNodesCount": { + "type": "integer" + }, + "realNodesCount": { + "type": "integer" + }, + "targetPodsCount": { + "type": "integer" + }, + "realPodsCount": { + "type": "integer" + }, + "targetSvcsCount": { + "type": "integer" + }, + "realSvcsCount": { + "type": "integer" + }, + "targetCoreCount": { + "type": "integer" + }, + "realCoreCount": { + "type": "integer" + }, + "allowableRange": { + "type": "integer" + }, + "licenseClusterId": { + "type": "keyword" + }, + "tenantId": { + "type": "keyword" + }, + "checkTime": { + "type": "date", + "format": "epoch_millis" + }, + "checkResult": { + "type": "integer" + } + } + }, + "aliases": { + "license_history": {} + } +}' + +# alert_event_history +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/alert_event_history' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/alert_event_history' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "alert_event_history-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "alert_event_history" + } + } + }, + "mappings": { + "properties": { + "alertName": { + "type": "keyword" + }, + "clusterId": { + "type": "keyword" + }, + "data": { + "type": "text", + "index": false + }, + "entityId": { + "type": "keyword" + }, + "entityType": { + "type": "keyword" + }, + "level": { + "type": "keyword" + }, + "metaId": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "startsAt": { + "type": "long" + }, + "threshold": { + "type": "double" + }, + "value": { + "type": "double" + }, + "message": { + "type": "keyword" + }, + "endsAt": { + "type": "long" + }, + "status": { + "type": "keyword" + }, + "hookCollectAt": { + "type": "long" + } + } + }, + "aliases": { + "alert_event_history": {} + } +}' + +# JSPD ilm +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/jspd_ilm' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "1d", + "actions": { + "delete": {} + } + } + } + } +}' + +# jspd_lite-activetxn +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/jspd_lite-activetxn' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "jspd_lite-activetxn-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "jspd_ilm" + } + } + }, + "mappings": { + "properties": { + "server_uuid": { + "type": "keyword" + }, + "time": { + "type": "long" + }, + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "node": { + "type": "keyword" + }, + "service": { + "type": "keyword" + }, + "pod": { + "type": "keyword" + }, + "start_time": { + "type": "long" + }, + "tid": { + "type": "keyword" + }, + "txn_name": { + "type": "text", + "fields": { + "keyword": { + "ignore_above": 256, + "type": "keyword" + } + } + }, + "cpu_time": { + "type": "integer" + }, + "memory_usage": { + "type": "integer" + }, + "web_id": { + "type": "integer" + }, + "prepare_count": { + "type": "integer" + }, + "sql_exec_count": { + "type": "integer" + }, + "fetch_count": { + "type": "integer" + }, + "active_sql_elapse_time": { + "type": "integer" + }, + "db_id": { + "type": "integer" + }, + "sql_text": { + "type": "text", + "fields": { + "keyword": { + "ignore_above": 102400, + "type": "keyword" + } + } + }, + "thread_id": { + "type": "long" + }, + "state": { + "type": "short" + }, + "method_id": { + "type": "integer" + }, + "method_seq": { + "type": "integer" + }, + "stack_crc": { + "type": "integer" + }, + "thread_memory_usage": { + "type": "integer" + }, + "http_method": { + "type": "keyword" + } + } + }, + "aliases": { + "jspd_lite-activetxn": {} + } +}' + +# jspd_lite-alert +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/jspd_lite-alert' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "jspd_lite-alert-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "jspd_ilm" + } + } + }, + "mappings": { + "properties": { + "server_uuid": { + "type": "keyword" + }, + "time": { + "type": "long" + }, + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "node": { + "type": "keyword" + }, + "service": { + "type": "keyword" + }, + "pod": { + "type": "keyword" + }, + "name": { + "type": "keyword" + }, + "status": { + "type": "short" + }, + "value": { + "type": "integer" + }, + "pid": { + "type": "integer" + } + } + }, + "aliases": { + "jspd_lite-alert": {} + } +}' + +# jspd_lite-e2einfo +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/jspd_lite-e2einfo' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "jspd_lite-e2einfo-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "jspd_ilm" + } + } + }, + "mappings": { + "properties": { + "server_uuid": { + "type": "keyword" + }, + "time": { + "type": "long" + }, + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "node": { + "type": "keyword" + }, + "service": { + "type": "keyword" + }, + "pod": { + "type": "keyword" + }, + "root_tid": { + "type": "keyword" + }, + "tid": { + "type": "keyword" + }, + "e2e_info_type": { + "type": "short" + }, + "e2e_key": { + "type": "keyword" + }, + "elapse_time": { + "type": "integer" + }, + "dest_url": { + "type": "keyword" + } + } + }, + "aliases": { + "jspd_lite-e2einfo": {} + } +}' + +# jspd_lite-methodname +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/jspd_lite-methodname' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "jspd_lite-methodname-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "jspd_ilm" + } + } + }, + "mappings": { + "properties": { + "server_uuid": { + "type": "keyword" + }, + "time": { + "type": "long" + }, + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "node": { + "type": "keyword" + }, + "service": { + "type": "keyword" + }, + "pod": { + "type": "keyword" + }, + "method_id": { + "type": "integer" + }, + "class_name": { + "type": "text", + "fields": { + "keyword": { + "ignore_above": 256, + "type": "keyword" + } + } + }, + "method_name": { + "type": "text", + "fields": { + "keyword": { + "ignore_above": 256, + "type": "keyword" + } + } + } + } + }, + "aliases": { + "jspd_lite-methodname": {} + } +}' + +# jspd_lite-sqldbinfo +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/jspd_lite-sqldbinfo' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "jspd_lite-sqldbinfo-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "jspd_ilm" + } + } + }, + "mappings": { + "properties": { + "server_uuid": { + "type": "keyword" + }, + "time": { + "type": "long" + }, + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "node": { + "type": "keyword" + }, + "service": { + "type": "keyword" + }, + "pod": { + "type": "keyword" + }, + "db_id": { + "type": "integer" + }, + "url": { + "type": "keyword" + } + } + }, + "aliases": { + "jspd_lite-sqldbinfo": {} + } +}' + +# jspd_lite-txninfo +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/jspd_lite-txninfo' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "jspd_lite-txninfo-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "jspd_ilm" + } + } + }, + "mappings": { + "properties": { + "server_uuid": { + "type": "keyword" + }, + "time": { + "type": "long" + }, + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "node": { + "type": "keyword" + }, + "service": { + "type": "keyword" + }, + "pod": { + "type": "keyword" + }, + "start_time": { + "type": "long" + }, + "end_time": { + "type": "long" + }, + "tid": { + "type": "keyword" + }, + "txn_name": { + "type": "keyword" + }, + "client_ip": { + "type": "keyword" + }, + "exception": { + "type": "short" + }, + "thread_cpu_time": { + "type": "integer" + }, + "thread_memory_usage": { + "type": "integer" + }, + "web_id": { + "type": "integer" + }, + "open_conn": { + "type": "integer" + }, + "close_conn": { + "type": "integer" + }, + "open_stmt": { + "type": "integer" + }, + "close_stmt": { + "type": "integer" + }, + "open_rs": { + "type": "integer" + }, + "close_rs": { + "type": "integer" + }, + "prepare_count": { + "type": "integer" + }, + "sql_execute_count": { + "type": "integer" + }, + "sql_elapse_time": { + "type": "integer" + }, + "sql_elapse_max": { + "type": "integer" + }, + "fetch_count": { + "type": "integer" + }, + "fetch_time": { + "type": "integer" + }, + "internal_fetch_count": { + "type": "integer" + }, + "txn_flag": { + "type": "integer" + }, + "http_method": { + "type": "keyword" + }, + "http_status": { + "type": "integer" + }, + "duration": { + "type": "long" + } + } + }, + "aliases": { + "jspd_lite-txninfo": {} + } +}' + +# jspd_lite-txnmethod +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/jspd_lite-txnmethod' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "jspd_lite-txnmethod-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "jspd_ilm" + } + } + }, + "mappings": { + "properties": { + "server_uuid": { + "type": "keyword" + }, + "time": { + "type": "long" + }, + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "node": { + "type": "keyword" + }, + "service": { + "type": "keyword" + }, + "pod": { + "type": "keyword" + }, + "tid": { + "type": "keyword" + }, + "method_seq": { + "type": "integer" + }, + "method_id": { + "type": "integer" + }, + "calling_method_id": { + "type": "integer" + }, + "stack_crc32": { + "type": "integer" + }, + "calling_stack_crc32": { + "type": "integer" + }, + "elapse_time": { + "type": "integer" + }, + "exec_count": { + "type": "integer" + }, + "error_count": { + "type": "integer" + }, + "cpu_time": { + "type": "integer" + }, + "memory": { + "type": "integer" + }, + "start_time": { + "type": "long" + }, + "method_depth": { + "type": "integer" + }, + "exception": { + "type": "text", + "fields": { + "keyword": { + "ignore_above": 32768, + "type": "keyword" + } + } + } + } + }, + "aliases": { + "jspd_lite-txnmethod": {} + } +}' + +# jspd_lite-txnsql +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/jspd_lite-txnsql' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "jspd_lite-txnsql-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "jspd_ilm" + } + } + }, + "mappings": { + "properties": { + "server_uuid": { + "type": "keyword" + }, + "time": { + "type": "long" + }, + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "node": { + "type": "keyword" + }, + "service": { + "type": "keyword" + }, + "pod": { + "type": "keyword" + }, + "tid": { + "type": "keyword" + }, + "db_id": { + "type": "integer" + }, + "cursor_id": { + "type": "integer" + }, + "sql_text": { + "type": "text", + "fields": { + "keyword": { + "ignore_above": 102400, + "type": "keyword" + } + } + }, + "method_id": { + "type": "integer" + }, + "execute_count": { + "type": "integer" + }, + "elapsed_time": { + "type": "integer" + }, + "elapsed_time_max": { + "type": "integer" + }, + "fetch_count": { + "type": "integer" + }, + "fetch_time": { + "type": "integer" + }, + "fetch_time_max": { + "type": "integer" + }, + "internal_fetch_count": { + "type": "integer" + } + } + }, + "aliases": { + "jspd_lite-txnsql": {} + } +}' + +# jspd_lite-wasstat +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/jspd_lite-wasstat' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "jspd_lite-wasstat-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "jspd_ilm" + } + } + }, + "mappings": { + "properties": { + "server_uuid": { + "type": "keyword" + }, + "time": { + "type": "long" + }, + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "node": { + "type": "keyword" + }, + "service": { + "type": "keyword" + }, + "pod": { + "type": "keyword" + }, + "active_txns": { + "type": "integer" + }, + "sql_exec_count": { + "type": "long" + }, + "sql_prepare_count": { + "type": "long" + }, + "sql_fetch_count": { + "type": "long" + }, + "txn_end_count": { + "type": "long" + }, + "open_file_count": { + "type": "integer" + }, + "close_file_count": { + "type": "integer" + }, + "open_socket_count": { + "type": "integer" + }, + "close_socket_count": { + "type": "integer" + }, + "txn_elapse": { + "type": "long" + }, + "sql_elapse": { + "type": "long" + }, + "txn_elapse_max": { + "type": "long" + }, + "sql_elapse_max": { + "type": "long" + }, + "txn_error_count": { + "type": "integer" + } + } + }, + "aliases": { + "jspd_lite-wasstat": {} + } +}' + +# jspd_tta-externalrelation +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/jspd_tta-externalrelation' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "jspd_tta-externalrelation-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "jspd_ilm" + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "time": { + "type": "long" + }, + "external_namespace": { + "type": "keyword" + }, + "external_service": { + "type": "keyword" + } + } + }, + "aliases": { + "jspd_tta-externalrelation": {} + } +}' + +# jspd_tta-relation +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/jspd_tta-relation' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "jspd_tta-relation-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "jspd_ilm" + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "time": { + "type": "long" + }, + "from_service": { + "type": "keyword" + }, + "to_service": { + "type": "keyword" + }, + "count": { + "type": "integer" + } + } + }, + "aliases": { + "jspd_tta-relation": {} + } +}' + +# jspd_tta-txnlist +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/jspd_tta-txnlist' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "jspd_tta-txnlist-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "jspd_ilm" + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "service": { + "type": "keyword" + }, + "pod": { + "type": "keyword" + }, + "time": { + "type": "long" + }, + "txn_name": { + "type": "keyword" + } + } + }, + "aliases": { + "jspd_tta-txnlist": {} + } +}' + +# jspd_tta-txnsummary +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/jspd_tta-txnsummary' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "jspd_tta-txnsummary-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "jspd_ilm" + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "service": { + "type": "keyword" + }, + "pod": { + "type": "keyword" + }, + "time": { + "type": "long" + }, + "txn_name": { + "type": "keyword" + }, + "req_count": { + "type": "integer" + }, + "resp_count": { + "type": "integer" + }, + "total_duration": { + "type": "long" + }, + "failed": { + "type": "integer" + }, + "http_methods": { + "type": "keyword", + "fields": { + "keyword": { + "type": "keyword" + } + } + }, + "http_statuses": { + "type": "integer", + "fields": { + "integer": { + "type": "integer" + } + } + } + } + }, + "aliases": { + "jspd_tta-txnsummary": {} + } +}' + +# jspd_tta-txntrend +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/jspd_tta-txntrend' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "jspd_tta-txntrend-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "jspd_ilm" + } + } + }, + "mappings": { + "properties": { + "server_uuid": { + "type": "keyword" + }, + "time": { + "type": "long" + }, + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "service": { + "type": "keyword" + }, + "pod": { + "type": "keyword" + }, + "endTimeGTE": { + "type": "long" + }, + "endTimeLT": { + "type": "long" + } + }, + "dynamic_templates": [ + { + "totals": { + "match": "total*", + "mapping": { + "type": "integer" + } + } + }, + { + "errors": { + "match": "error*", + "mapping": { + "type": "integer" + } + } + } + ] + }, + "aliases": { + "jspd_tta-txntrend": {} + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/maximum_metrics' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "5d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/maximum_metrics' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "maximum_metrics" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "maximum_metrics" + }, + "sort.field": "date", + "sort.order": "desc" + } + }, + "mappings": { + "properties": { + "kind": { + "type": "keyword" + }, + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "entity": { + "type": "keyword" + }, + "maximum": { + "type": "float" + }, + "date": { + "type": "date", + "format": "yyyy-MM-dd" + } + } + } +}' diff --git a/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/1_kubernete_event_info_create_dest_source_index.sh b/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/1_kubernete_event_info_create_dest_source_index.sh new file mode 100644 index 0000000..46007cd --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/1_kubernete_event_info_create_dest_source_index.sh @@ -0,0 +1,220 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +export NUM_SHARDS=2 +export NUM_REPLICAS=1 + +SOURCE_INDEX='kubernetes_event_info' +DEST_INDEX='kubernetes_event_info_backup' + +# 기존 index 재매핑 +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/'"${SOURCE_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/'"${SOURCE_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "'${SOURCE_INDEX}'-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "'${SOURCE_INDEX}'" + } + }, + "analysis": { + "analyzer": { + "my_customer_ngram_analyzer": { + "tokenizer": "my_customer_ngram_tokenizer" + } + }, + "tokenizer": { + "my_customer_ngram_tokenizer": { + "type": "ngram", + "min_gram": "2", + "max_gram": "3" + } + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "type": { + "type": "keyword" + }, + "unixtime": { + "type": "long" + }, + "kind": { + "type": "keyword" + }, + "name": { + "type": "keyword" + }, + "firsttime": { + "type": "long" + }, + "lasttime": { + "type": "long" + }, + "data": { + "type": "text", + "index": false + }, + "id": { + "type": "keyword" + }, + "reason": { + "type": "keyword" + }, + "message": { + "type": "text", + "fields": { + "ngram": { + "type": "text", + "analyzer": "my_customer_ngram_analyzer" + } + } + }, + "count": { + "type": "integer" + }, + "sourceComponent": { + "type": "keyword" + }, + "sourceHost": { + "type": "keyword" + } + } + }, + "aliases": { + "'${SOURCE_INDEX}'": {} + } +}' + +# 기존 index 데이터 백업용 index 매핑 +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/'"${DEST_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/'"${DEST_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "'${DEST_INDEX}'-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "'${DEST_INDEX}'" + } + }, + "analysis": { + "analyzer": { + "my_customer_ngram_analyzer": { + "tokenizer": "my_customer_ngram_tokenizer" + } + }, + "tokenizer": { + "my_customer_ngram_tokenizer": { + "type": "ngram", + "min_gram": "2", + "max_gram": "3" + } + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "type": { + "type": "keyword" + }, + "unixtime": { + "type": "long" + }, + "kind": { + "type": "keyword" + }, + "name": { + "type": "keyword" + }, + "firsttime": { + "type": "long" + }, + "lasttime": { + "type": "long" + }, + "data": { + "type": "text", + "index": false + }, + "id": { + "type": "keyword" + }, + "reason": { + "type": "keyword" + }, + "message": { + "type": "text", + "fields": { + "ngram": { + "type": "text", + "analyzer": "my_customer_ngram_analyzer" + } + } + }, + "count": { + "type": "integer" + }, + "sourceComponent": { + "type": "keyword" + }, + "sourceHost": { + "type": "keyword" + } + } + }, + "aliases": { + "'${DEST_INDEX}'": {} + } +}' \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/2_kubernete_event_info_reindex_to_dest_from_source.sh b/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/2_kubernete_event_info_reindex_to_dest_from_source.sh new file mode 100644 index 0000000..a9c833c --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/2_kubernete_event_info_reindex_to_dest_from_source.sh @@ -0,0 +1,28 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='kubernetes_event_info' +DEST_INDEX='kubernetes_event_info_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${SOURCE_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X POST 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_reindex?wait_for_completion=false' -H 'Content-Type: application/json' -d '{ + "source": { + "index": "'${source_index_date}'" + }, + "dest": { + "index": "'${dest_index_date}'" + } + }' +done \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/3_kubernete_event_info_reindex_to_source_from_dest.sh b/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/3_kubernete_event_info_reindex_to_source_from_dest.sh new file mode 100644 index 0000000..abaa743 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/3_kubernete_event_info_reindex_to_source_from_dest.sh @@ -0,0 +1,30 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='kubernetes_event_info' +DEST_INDEX='kubernetes_event_info_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${DEST_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X DELETE 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/'${source_index_date} + + curl -X POST 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_reindex?wait_for_completion=false' -H 'Content-Type: application/json' -d '{ + "source": { + "index": "'${dest_index_date}'" + }, + "dest": { + "index": "'${source_index_date}'" + } + }' +done diff --git a/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/4_kubernete_event_info_delete_dest_index.sh b/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/4_kubernete_event_info_delete_dest_index.sh new file mode 100644 index 0000000..7948b08 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/4_kubernete_event_info_delete_dest_index.sh @@ -0,0 +1,21 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='kubernetes_event_info' +DEST_INDEX='kubernetes_event_info_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${DEST_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X DELETE 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/'${dest_index_date} +done diff --git a/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/5_license_history_create_dest_source_index.sh b/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/5_license_history_create_dest_source_index.sh new file mode 100644 index 0000000..0ddc9ff --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/5_license_history_create_dest_source_index.sh @@ -0,0 +1,184 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +export NUM_SHARDS=2 +export NUM_REPLICAS=1 + +SOURCE_INDEX='license_history' +DEST_INDEX='license_history_backup' + +# 기존 index 재매핑 +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/'"${SOURCE_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "90d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/'"${SOURCE_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "'${SOURCE_INDEX}'-*" + ], + "settings": { + "index": { + "number_of_shards": "2", + "number_of_replicas": "1", + "refresh_interval": "1s", + "lifecycle": { + "name": "'${SOURCE_INDEX}'" + }, + "sort.field": "checkTime", + "sort.order": "desc" + } + }, + "mappings": { + "properties": { + "licenseType": { + "type": "integer" + }, + "expireDate": { + "type": "text" + }, + "targetNodesCount": { + "type": "integer" + }, + "realNodesCount": { + "type": "integer" + }, + "targetPodsCount": { + "type": "integer" + }, + "realPodsCount": { + "type": "integer" + }, + "targetSvcsCount": { + "type": "integer" + }, + "realSvcsCount": { + "type": "integer" + }, + "targetCoreCount": { + "type": "integer" + }, + "realCoreCount": { + "type": "integer" + }, + "allowableRange": { + "type": "integer" + }, + "licenseClusterId": { + "type": "keyword" + }, + "tenantId": { + "type": "keyword" + }, + "checkTime": { + "type": "date", + "format": "epoch_millis" + }, + "checkResult": { + "type": "integer" + } + } + }, + "aliases": { + "'${SOURCE_INDEX}'": {} + } +}' + +# 기존 index 데이터 백업용 index 매핑 +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/'"${DEST_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "90d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/'"${DEST_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "'${DEST_INDEX}'-*" + ], + "settings": { + "index": { + "number_of_shards": "2", + "number_of_replicas": "1", + "refresh_interval": "1s", + "lifecycle": { + "name": "'${DEST_INDEX}'" + }, + "sort.field": "checkTime", + "sort.order": "desc" + } + }, + "mappings": { + "properties": { + "licenseType": { + "type": "integer" + }, + "expireDate": { + "type": "text" + }, + "targetNodesCount": { + "type": "integer" + }, + "realNodesCount": { + "type": "integer" + }, + "targetPodsCount": { + "type": "integer" + }, + "realPodsCount": { + "type": "integer" + }, + "targetSvcsCount": { + "type": "integer" + }, + "realSvcsCount": { + "type": "integer" + }, + "targetCoreCount": { + "type": "integer" + }, + "realCoreCount": { + "type": "integer" + }, + "allowableRange": { + "type": "integer" + }, + "licenseClusterId": { + "type": "keyword" + }, + "tenantId": { + "type": "keyword" + }, + "checkTime": { + "type": "date", + "format": "epoch_millis" + }, + "checkResult": { + "type": "integer" + } + } + }, + "aliases": { + "'${DEST_INDEX}'": {} + } +}' \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/6_license_history_reindex_to_dest_from_source.sh b/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/6_license_history_reindex_to_dest_from_source.sh new file mode 100644 index 0000000..b1de084 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/6_license_history_reindex_to_dest_from_source.sh @@ -0,0 +1,32 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='license_history' +DEST_INDEX='license_history_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${SOURCE_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X POST 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_reindex?wait_for_completion=false' -H 'Content-Type: application/json' -d '{ + "source": { + "index": "'${source_index_date}'" + }, + "dest": { + "index": "'${dest_index_date}'" + }, + "script": { + "lang": "painless", + "source": "ctx._source.checkTime = Instant.ofEpochSecond(ctx._source.checkTime).toEpochMilli()" + } + }' +done \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/7_license_history_reindex_to_source_from_dest.sh b/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/7_license_history_reindex_to_source_from_dest.sh new file mode 100644 index 0000000..e7e0a5c --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/7_license_history_reindex_to_source_from_dest.sh @@ -0,0 +1,30 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='license_history' +DEST_INDEX='license_history_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${DEST_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X DELETE 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/'${source_index_date} + + curl -X POST 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_reindex?wait_for_completion=false' -H 'Content-Type: application/json' -d '{ + "source": { + "index": "'${dest_index_date}'" + }, + "dest": { + "index": "'${source_index_date}'" + } + }' +done diff --git a/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/8_license_history_delete_dest_index.sh b/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/8_license_history_delete_dest_index.sh new file mode 100644 index 0000000..3d63181 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/8_license_history_delete_dest_index.sh @@ -0,0 +1,21 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='license_history' +DEST_INDEX='license_history_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${DEST_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X DELETE 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/'${dest_index_date} +done diff --git a/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/manual.txt b/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/manual.txt new file mode 100644 index 0000000..95900be --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/manual.txt @@ -0,0 +1,31 @@ +** 두 인덱스간에 데이터 복재가 잘 됐는지 확인해가며 실행 ** + +1) 1_kubernete_event_info_create_dest_source_index.sh 스크립트 실행 + : 기존 인덱스에 새로운 데이터 타입 매핑작업 + : 기존 인덱스 데이터 백업용 인덱스 매핑작업 + +2) 2_kubernete_event_info_reindex_to_dest_from_source.sh 스크립트 실행 + : 기존 인덱스 데이터 백업용 인덱스로 리인덱싱 + +3) curl -X GET http://{IP}:{PORT}/_cat/indices?pretty | grep kubernete_event_info + : 백업용 인덱스에 기존 인덱스 데이터가 백업될때까지 대기하기 + : 7번째 칸에 숫자가 일자별 인덱스 숫자와 동일할때까지 대기하기 + +4) 3_kubernete_event_info_reindex_to_source_from_dest.sh 스크립트 실행 + : 기존 인덱스 삭제 + : 새로 매핑된 기존 인덱스에 백업용 인덱스에 담긴 데이터 다시 리인덱싱 + +5) curl -X GET http://{IP}:{PORT}/_cat/indices?pretty | grep kubernete_event_info + : 새로 매핑된 인덱스에 백업용 인덱스 데이터가 백업될때까지 대기하기 + : 7번째 칸에 숫자가 일자별 인덱스 숫자와 동일할때까지 대기하기 + +6) 4_kubernete_event_info_delete_dest_index.sh 스크립트 실행 + : 백업용 인덱스 삭제 + +** 아래 스크립트도 위와같은 순서로 진행 ** +** grep license_history 로 변경해서 데이터 복재 확인 ** +5_license_history_create_dest_source_index.sh +6_license_history_reindex_to_dest_from_source.sh +7_license_history_reindex_to_source_from_dest.sh +8_license_history_delete_dest_index.sh + diff --git a/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/1_kubernete_event_info_create_dest_source_index.sh b/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/1_kubernete_event_info_create_dest_source_index.sh new file mode 100644 index 0000000..46007cd --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/1_kubernete_event_info_create_dest_source_index.sh @@ -0,0 +1,220 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +export NUM_SHARDS=2 +export NUM_REPLICAS=1 + +SOURCE_INDEX='kubernetes_event_info' +DEST_INDEX='kubernetes_event_info_backup' + +# 기존 index 재매핑 +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/'"${SOURCE_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/'"${SOURCE_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "'${SOURCE_INDEX}'-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "'${SOURCE_INDEX}'" + } + }, + "analysis": { + "analyzer": { + "my_customer_ngram_analyzer": { + "tokenizer": "my_customer_ngram_tokenizer" + } + }, + "tokenizer": { + "my_customer_ngram_tokenizer": { + "type": "ngram", + "min_gram": "2", + "max_gram": "3" + } + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "type": { + "type": "keyword" + }, + "unixtime": { + "type": "long" + }, + "kind": { + "type": "keyword" + }, + "name": { + "type": "keyword" + }, + "firsttime": { + "type": "long" + }, + "lasttime": { + "type": "long" + }, + "data": { + "type": "text", + "index": false + }, + "id": { + "type": "keyword" + }, + "reason": { + "type": "keyword" + }, + "message": { + "type": "text", + "fields": { + "ngram": { + "type": "text", + "analyzer": "my_customer_ngram_analyzer" + } + } + }, + "count": { + "type": "integer" + }, + "sourceComponent": { + "type": "keyword" + }, + "sourceHost": { + "type": "keyword" + } + } + }, + "aliases": { + "'${SOURCE_INDEX}'": {} + } +}' + +# 기존 index 데이터 백업용 index 매핑 +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/'"${DEST_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/'"${DEST_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "'${DEST_INDEX}'-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "'${DEST_INDEX}'" + } + }, + "analysis": { + "analyzer": { + "my_customer_ngram_analyzer": { + "tokenizer": "my_customer_ngram_tokenizer" + } + }, + "tokenizer": { + "my_customer_ngram_tokenizer": { + "type": "ngram", + "min_gram": "2", + "max_gram": "3" + } + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "type": { + "type": "keyword" + }, + "unixtime": { + "type": "long" + }, + "kind": { + "type": "keyword" + }, + "name": { + "type": "keyword" + }, + "firsttime": { + "type": "long" + }, + "lasttime": { + "type": "long" + }, + "data": { + "type": "text", + "index": false + }, + "id": { + "type": "keyword" + }, + "reason": { + "type": "keyword" + }, + "message": { + "type": "text", + "fields": { + "ngram": { + "type": "text", + "analyzer": "my_customer_ngram_analyzer" + } + } + }, + "count": { + "type": "integer" + }, + "sourceComponent": { + "type": "keyword" + }, + "sourceHost": { + "type": "keyword" + } + } + }, + "aliases": { + "'${DEST_INDEX}'": {} + } +}' \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/2_kubernete_event_info_reindex_to_dest_from_source.sh b/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/2_kubernete_event_info_reindex_to_dest_from_source.sh new file mode 100644 index 0000000..a9c833c --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/2_kubernete_event_info_reindex_to_dest_from_source.sh @@ -0,0 +1,28 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='kubernetes_event_info' +DEST_INDEX='kubernetes_event_info_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${SOURCE_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X POST 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_reindex?wait_for_completion=false' -H 'Content-Type: application/json' -d '{ + "source": { + "index": "'${source_index_date}'" + }, + "dest": { + "index": "'${dest_index_date}'" + } + }' +done \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/3_kubernete_event_info_reindex_to_source_from_dest.sh b/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/3_kubernete_event_info_reindex_to_source_from_dest.sh new file mode 100644 index 0000000..abaa743 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/3_kubernete_event_info_reindex_to_source_from_dest.sh @@ -0,0 +1,30 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='kubernetes_event_info' +DEST_INDEX='kubernetes_event_info_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${DEST_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X DELETE 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/'${source_index_date} + + curl -X POST 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_reindex?wait_for_completion=false' -H 'Content-Type: application/json' -d '{ + "source": { + "index": "'${dest_index_date}'" + }, + "dest": { + "index": "'${source_index_date}'" + } + }' +done diff --git a/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/4_kubernete_event_info_delete_dest_index.sh b/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/4_kubernete_event_info_delete_dest_index.sh new file mode 100644 index 0000000..7948b08 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/4_kubernete_event_info_delete_dest_index.sh @@ -0,0 +1,21 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='kubernetes_event_info' +DEST_INDEX='kubernetes_event_info_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${DEST_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X DELETE 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/'${dest_index_date} +done diff --git a/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/5_license_history_create_dest_source_index.sh b/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/5_license_history_create_dest_source_index.sh new file mode 100644 index 0000000..0ddc9ff --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/5_license_history_create_dest_source_index.sh @@ -0,0 +1,184 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +export NUM_SHARDS=2 +export NUM_REPLICAS=1 + +SOURCE_INDEX='license_history' +DEST_INDEX='license_history_backup' + +# 기존 index 재매핑 +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/'"${SOURCE_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "90d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/'"${SOURCE_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "'${SOURCE_INDEX}'-*" + ], + "settings": { + "index": { + "number_of_shards": "2", + "number_of_replicas": "1", + "refresh_interval": "1s", + "lifecycle": { + "name": "'${SOURCE_INDEX}'" + }, + "sort.field": "checkTime", + "sort.order": "desc" + } + }, + "mappings": { + "properties": { + "licenseType": { + "type": "integer" + }, + "expireDate": { + "type": "text" + }, + "targetNodesCount": { + "type": "integer" + }, + "realNodesCount": { + "type": "integer" + }, + "targetPodsCount": { + "type": "integer" + }, + "realPodsCount": { + "type": "integer" + }, + "targetSvcsCount": { + "type": "integer" + }, + "realSvcsCount": { + "type": "integer" + }, + "targetCoreCount": { + "type": "integer" + }, + "realCoreCount": { + "type": "integer" + }, + "allowableRange": { + "type": "integer" + }, + "licenseClusterId": { + "type": "keyword" + }, + "tenantId": { + "type": "keyword" + }, + "checkTime": { + "type": "date", + "format": "epoch_millis" + }, + "checkResult": { + "type": "integer" + } + } + }, + "aliases": { + "'${SOURCE_INDEX}'": {} + } +}' + +# 기존 index 데이터 백업용 index 매핑 +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/'"${DEST_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "90d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/'"${DEST_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "'${DEST_INDEX}'-*" + ], + "settings": { + "index": { + "number_of_shards": "2", + "number_of_replicas": "1", + "refresh_interval": "1s", + "lifecycle": { + "name": "'${DEST_INDEX}'" + }, + "sort.field": "checkTime", + "sort.order": "desc" + } + }, + "mappings": { + "properties": { + "licenseType": { + "type": "integer" + }, + "expireDate": { + "type": "text" + }, + "targetNodesCount": { + "type": "integer" + }, + "realNodesCount": { + "type": "integer" + }, + "targetPodsCount": { + "type": "integer" + }, + "realPodsCount": { + "type": "integer" + }, + "targetSvcsCount": { + "type": "integer" + }, + "realSvcsCount": { + "type": "integer" + }, + "targetCoreCount": { + "type": "integer" + }, + "realCoreCount": { + "type": "integer" + }, + "allowableRange": { + "type": "integer" + }, + "licenseClusterId": { + "type": "keyword" + }, + "tenantId": { + "type": "keyword" + }, + "checkTime": { + "type": "date", + "format": "epoch_millis" + }, + "checkResult": { + "type": "integer" + } + } + }, + "aliases": { + "'${DEST_INDEX}'": {} + } +}' \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/6_license_history_reindex_to_dest_from_source.sh b/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/6_license_history_reindex_to_dest_from_source.sh new file mode 100644 index 0000000..b1de084 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/6_license_history_reindex_to_dest_from_source.sh @@ -0,0 +1,32 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='license_history' +DEST_INDEX='license_history_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${SOURCE_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X POST 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_reindex?wait_for_completion=false' -H 'Content-Type: application/json' -d '{ + "source": { + "index": "'${source_index_date}'" + }, + "dest": { + "index": "'${dest_index_date}'" + }, + "script": { + "lang": "painless", + "source": "ctx._source.checkTime = Instant.ofEpochSecond(ctx._source.checkTime).toEpochMilli()" + } + }' +done \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/7_license_history_reindex_to_source_from_dest.sh b/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/7_license_history_reindex_to_source_from_dest.sh new file mode 100644 index 0000000..e7e0a5c --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/7_license_history_reindex_to_source_from_dest.sh @@ -0,0 +1,30 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='license_history' +DEST_INDEX='license_history_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${DEST_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X DELETE 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/'${source_index_date} + + curl -X POST 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_reindex?wait_for_completion=false' -H 'Content-Type: application/json' -d '{ + "source": { + "index": "'${dest_index_date}'" + }, + "dest": { + "index": "'${source_index_date}'" + } + }' +done diff --git a/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/8_license_history_delete_dest_index.sh b/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/8_license_history_delete_dest_index.sh new file mode 100644 index 0000000..3d63181 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/8_license_history_delete_dest_index.sh @@ -0,0 +1,21 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='license_history' +DEST_INDEX='license_history_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${DEST_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X DELETE 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/'${dest_index_date} +done diff --git a/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/manual.txt b/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/manual.txt new file mode 100644 index 0000000..95900be --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/manual.txt @@ -0,0 +1,31 @@ +** 두 인덱스간에 데이터 복재가 잘 됐는지 확인해가며 실행 ** + +1) 1_kubernete_event_info_create_dest_source_index.sh 스크립트 실행 + : 기존 인덱스에 새로운 데이터 타입 매핑작업 + : 기존 인덱스 데이터 백업용 인덱스 매핑작업 + +2) 2_kubernete_event_info_reindex_to_dest_from_source.sh 스크립트 실행 + : 기존 인덱스 데이터 백업용 인덱스로 리인덱싱 + +3) curl -X GET http://{IP}:{PORT}/_cat/indices?pretty | grep kubernete_event_info + : 백업용 인덱스에 기존 인덱스 데이터가 백업될때까지 대기하기 + : 7번째 칸에 숫자가 일자별 인덱스 숫자와 동일할때까지 대기하기 + +4) 3_kubernete_event_info_reindex_to_source_from_dest.sh 스크립트 실행 + : 기존 인덱스 삭제 + : 새로 매핑된 기존 인덱스에 백업용 인덱스에 담긴 데이터 다시 리인덱싱 + +5) curl -X GET http://{IP}:{PORT}/_cat/indices?pretty | grep kubernete_event_info + : 새로 매핑된 인덱스에 백업용 인덱스 데이터가 백업될때까지 대기하기 + : 7번째 칸에 숫자가 일자별 인덱스 숫자와 동일할때까지 대기하기 + +6) 4_kubernete_event_info_delete_dest_index.sh 스크립트 실행 + : 백업용 인덱스 삭제 + +** 아래 스크립트도 위와같은 순서로 진행 ** +** grep license_history 로 변경해서 데이터 복재 확인 ** +5_license_history_create_dest_source_index.sh +6_license_history_reindex_to_dest_from_source.sh +7_license_history_reindex_to_source_from_dest.sh +8_license_history_delete_dest_index.sh + diff --git a/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/jaeger_menumeta.psql b/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/jaeger_menumeta.psql new file mode 100644 index 0000000..c8252dd --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/jaeger_menumeta.psql @@ -0,0 +1,21 @@ +-- 이미 존재한다는 (insert 시) 에러메세지나 , 존재하지 않는다는 (delete 시) 에러메세지는 무시하셔도 무방합니다. +-- service - active transaction 삭제 +-- auth_resource3 +DELETE FROM public.auth_resource3 WHERE name = 'menu|Services|Active Transaction'; + +-- menu_meta +DELETE FROM public.menu_meta WHERE id = 26; + + +-- service - overview 추가 +-- auth_resource2 +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Overview', (select id from auth_resource2 where type='menu' and name='Services'), 'menu'); + +-- auth_resource3 +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Services|Overview', false, null); + +-- menu_meta +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (22, 'ServiceOverview', NULL, 1, 'overviewServices', (select id from auth_resource3 where name='menu|Services|Overview'), 0); + +-- user_permission2 +INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Services|Overview'), 'owner'); \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/jspd_menumeta.psql b/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/jspd_menumeta.psql new file mode 100644 index 0000000..4541fb2 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/jspd_menumeta.psql @@ -0,0 +1,22 @@ +-- 이미 존재한다는 (insert 시) 에러메세지나 , 존재하지 않는다는 (delete 시) 에러메세지는 무시하셔도 무방합니다. + +-- service - overview 삭제 +-- user_permission2 +DELETE FROM public.user_permission2 WHERE auth_resource_id = (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Services|Overview') AND user_id = 'owner'; + +-- menu_meta +DELETE FROM public.menu_meta WHERE id = 22; + +-- auth_resource2 +DELETE FROM public.auth_resource2 WHERE name = 'Overview' AND parent_id = (select id from auth_resource2 where type='menu' and name='Services'); + +-- auth_resource3 +DELETE FROM public.auth_resource3 WHERE name = 'menu|Services|Overview'; + + +-- service - active transaction 추가 +-- auth_resource3 +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Services|Active Transaction', false, null); + +-- menu_meta +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (26, 'Active Transaction', NULL, 5, 'overviewServiceJSPD', (select id from auth_resource3 where name='menu|Services|Active Transaction'), 2); \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/1_kubernete_event_info_create_dest_source_index.sh b/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/1_kubernete_event_info_create_dest_source_index.sh new file mode 100644 index 0000000..46007cd --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/1_kubernete_event_info_create_dest_source_index.sh @@ -0,0 +1,220 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +export NUM_SHARDS=2 +export NUM_REPLICAS=1 + +SOURCE_INDEX='kubernetes_event_info' +DEST_INDEX='kubernetes_event_info_backup' + +# 기존 index 재매핑 +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/'"${SOURCE_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/'"${SOURCE_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "'${SOURCE_INDEX}'-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "'${SOURCE_INDEX}'" + } + }, + "analysis": { + "analyzer": { + "my_customer_ngram_analyzer": { + "tokenizer": "my_customer_ngram_tokenizer" + } + }, + "tokenizer": { + "my_customer_ngram_tokenizer": { + "type": "ngram", + "min_gram": "2", + "max_gram": "3" + } + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "type": { + "type": "keyword" + }, + "unixtime": { + "type": "long" + }, + "kind": { + "type": "keyword" + }, + "name": { + "type": "keyword" + }, + "firsttime": { + "type": "long" + }, + "lasttime": { + "type": "long" + }, + "data": { + "type": "text", + "index": false + }, + "id": { + "type": "keyword" + }, + "reason": { + "type": "keyword" + }, + "message": { + "type": "text", + "fields": { + "ngram": { + "type": "text", + "analyzer": "my_customer_ngram_analyzer" + } + } + }, + "count": { + "type": "integer" + }, + "sourceComponent": { + "type": "keyword" + }, + "sourceHost": { + "type": "keyword" + } + } + }, + "aliases": { + "'${SOURCE_INDEX}'": {} + } +}' + +# 기존 index 데이터 백업용 index 매핑 +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/'"${DEST_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/'"${DEST_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "'${DEST_INDEX}'-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "'${DEST_INDEX}'" + } + }, + "analysis": { + "analyzer": { + "my_customer_ngram_analyzer": { + "tokenizer": "my_customer_ngram_tokenizer" + } + }, + "tokenizer": { + "my_customer_ngram_tokenizer": { + "type": "ngram", + "min_gram": "2", + "max_gram": "3" + } + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "type": { + "type": "keyword" + }, + "unixtime": { + "type": "long" + }, + "kind": { + "type": "keyword" + }, + "name": { + "type": "keyword" + }, + "firsttime": { + "type": "long" + }, + "lasttime": { + "type": "long" + }, + "data": { + "type": "text", + "index": false + }, + "id": { + "type": "keyword" + }, + "reason": { + "type": "keyword" + }, + "message": { + "type": "text", + "fields": { + "ngram": { + "type": "text", + "analyzer": "my_customer_ngram_analyzer" + } + } + }, + "count": { + "type": "integer" + }, + "sourceComponent": { + "type": "keyword" + }, + "sourceHost": { + "type": "keyword" + } + } + }, + "aliases": { + "'${DEST_INDEX}'": {} + } +}' \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/2_kubernete_event_info_reindex_to_dest_from_source.sh b/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/2_kubernete_event_info_reindex_to_dest_from_source.sh new file mode 100644 index 0000000..a9c833c --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/2_kubernete_event_info_reindex_to_dest_from_source.sh @@ -0,0 +1,28 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='kubernetes_event_info' +DEST_INDEX='kubernetes_event_info_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${SOURCE_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X POST 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_reindex?wait_for_completion=false' -H 'Content-Type: application/json' -d '{ + "source": { + "index": "'${source_index_date}'" + }, + "dest": { + "index": "'${dest_index_date}'" + } + }' +done \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/3_kubernete_event_info_reindex_to_source_from_dest.sh b/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/3_kubernete_event_info_reindex_to_source_from_dest.sh new file mode 100644 index 0000000..abaa743 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/3_kubernete_event_info_reindex_to_source_from_dest.sh @@ -0,0 +1,30 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='kubernetes_event_info' +DEST_INDEX='kubernetes_event_info_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${DEST_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X DELETE 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/'${source_index_date} + + curl -X POST 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_reindex?wait_for_completion=false' -H 'Content-Type: application/json' -d '{ + "source": { + "index": "'${dest_index_date}'" + }, + "dest": { + "index": "'${source_index_date}'" + } + }' +done diff --git a/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/4_kubernete_event_info_delete_dest_index.sh b/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/4_kubernete_event_info_delete_dest_index.sh new file mode 100644 index 0000000..7948b08 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/4_kubernete_event_info_delete_dest_index.sh @@ -0,0 +1,21 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='kubernetes_event_info' +DEST_INDEX='kubernetes_event_info_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${DEST_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X DELETE 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/'${dest_index_date} +done diff --git a/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/5_license_history_create_dest_source_index.sh b/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/5_license_history_create_dest_source_index.sh new file mode 100644 index 0000000..0ddc9ff --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/5_license_history_create_dest_source_index.sh @@ -0,0 +1,184 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +export NUM_SHARDS=2 +export NUM_REPLICAS=1 + +SOURCE_INDEX='license_history' +DEST_INDEX='license_history_backup' + +# 기존 index 재매핑 +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/'"${SOURCE_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "90d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/'"${SOURCE_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "'${SOURCE_INDEX}'-*" + ], + "settings": { + "index": { + "number_of_shards": "2", + "number_of_replicas": "1", + "refresh_interval": "1s", + "lifecycle": { + "name": "'${SOURCE_INDEX}'" + }, + "sort.field": "checkTime", + "sort.order": "desc" + } + }, + "mappings": { + "properties": { + "licenseType": { + "type": "integer" + }, + "expireDate": { + "type": "text" + }, + "targetNodesCount": { + "type": "integer" + }, + "realNodesCount": { + "type": "integer" + }, + "targetPodsCount": { + "type": "integer" + }, + "realPodsCount": { + "type": "integer" + }, + "targetSvcsCount": { + "type": "integer" + }, + "realSvcsCount": { + "type": "integer" + }, + "targetCoreCount": { + "type": "integer" + }, + "realCoreCount": { + "type": "integer" + }, + "allowableRange": { + "type": "integer" + }, + "licenseClusterId": { + "type": "keyword" + }, + "tenantId": { + "type": "keyword" + }, + "checkTime": { + "type": "date", + "format": "epoch_millis" + }, + "checkResult": { + "type": "integer" + } + } + }, + "aliases": { + "'${SOURCE_INDEX}'": {} + } +}' + +# 기존 index 데이터 백업용 index 매핑 +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/'"${DEST_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "90d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/'"${DEST_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "'${DEST_INDEX}'-*" + ], + "settings": { + "index": { + "number_of_shards": "2", + "number_of_replicas": "1", + "refresh_interval": "1s", + "lifecycle": { + "name": "'${DEST_INDEX}'" + }, + "sort.field": "checkTime", + "sort.order": "desc" + } + }, + "mappings": { + "properties": { + "licenseType": { + "type": "integer" + }, + "expireDate": { + "type": "text" + }, + "targetNodesCount": { + "type": "integer" + }, + "realNodesCount": { + "type": "integer" + }, + "targetPodsCount": { + "type": "integer" + }, + "realPodsCount": { + "type": "integer" + }, + "targetSvcsCount": { + "type": "integer" + }, + "realSvcsCount": { + "type": "integer" + }, + "targetCoreCount": { + "type": "integer" + }, + "realCoreCount": { + "type": "integer" + }, + "allowableRange": { + "type": "integer" + }, + "licenseClusterId": { + "type": "keyword" + }, + "tenantId": { + "type": "keyword" + }, + "checkTime": { + "type": "date", + "format": "epoch_millis" + }, + "checkResult": { + "type": "integer" + } + } + }, + "aliases": { + "'${DEST_INDEX}'": {} + } +}' \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/6_license_history_reindex_to_dest_from_source.sh b/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/6_license_history_reindex_to_dest_from_source.sh new file mode 100644 index 0000000..b1de084 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/6_license_history_reindex_to_dest_from_source.sh @@ -0,0 +1,32 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='license_history' +DEST_INDEX='license_history_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${SOURCE_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X POST 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_reindex?wait_for_completion=false' -H 'Content-Type: application/json' -d '{ + "source": { + "index": "'${source_index_date}'" + }, + "dest": { + "index": "'${dest_index_date}'" + }, + "script": { + "lang": "painless", + "source": "ctx._source.checkTime = Instant.ofEpochSecond(ctx._source.checkTime).toEpochMilli()" + } + }' +done \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/7_license_history_reindex_to_source_from_dest.sh b/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/7_license_history_reindex_to_source_from_dest.sh new file mode 100644 index 0000000..e7e0a5c --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/7_license_history_reindex_to_source_from_dest.sh @@ -0,0 +1,30 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='license_history' +DEST_INDEX='license_history_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${DEST_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X DELETE 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/'${source_index_date} + + curl -X POST 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_reindex?wait_for_completion=false' -H 'Content-Type: application/json' -d '{ + "source": { + "index": "'${dest_index_date}'" + }, + "dest": { + "index": "'${source_index_date}'" + } + }' +done diff --git a/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/8_license_history_delete_dest_index.sh b/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/8_license_history_delete_dest_index.sh new file mode 100644 index 0000000..3d63181 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/8_license_history_delete_dest_index.sh @@ -0,0 +1,21 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='license_history' +DEST_INDEX='license_history_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${DEST_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X DELETE 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/'${dest_index_date} +done diff --git a/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/manual.txt b/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/manual.txt new file mode 100644 index 0000000..95900be --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/manual.txt @@ -0,0 +1,31 @@ +** 두 인덱스간에 데이터 복재가 잘 됐는지 확인해가며 실행 ** + +1) 1_kubernete_event_info_create_dest_source_index.sh 스크립트 실행 + : 기존 인덱스에 새로운 데이터 타입 매핑작업 + : 기존 인덱스 데이터 백업용 인덱스 매핑작업 + +2) 2_kubernete_event_info_reindex_to_dest_from_source.sh 스크립트 실행 + : 기존 인덱스 데이터 백업용 인덱스로 리인덱싱 + +3) curl -X GET http://{IP}:{PORT}/_cat/indices?pretty | grep kubernete_event_info + : 백업용 인덱스에 기존 인덱스 데이터가 백업될때까지 대기하기 + : 7번째 칸에 숫자가 일자별 인덱스 숫자와 동일할때까지 대기하기 + +4) 3_kubernete_event_info_reindex_to_source_from_dest.sh 스크립트 실행 + : 기존 인덱스 삭제 + : 새로 매핑된 기존 인덱스에 백업용 인덱스에 담긴 데이터 다시 리인덱싱 + +5) curl -X GET http://{IP}:{PORT}/_cat/indices?pretty | grep kubernete_event_info + : 새로 매핑된 인덱스에 백업용 인덱스 데이터가 백업될때까지 대기하기 + : 7번째 칸에 숫자가 일자별 인덱스 숫자와 동일할때까지 대기하기 + +6) 4_kubernete_event_info_delete_dest_index.sh 스크립트 실행 + : 백업용 인덱스 삭제 + +** 아래 스크립트도 위와같은 순서로 진행 ** +** grep license_history 로 변경해서 데이터 복재 확인 ** +5_license_history_create_dest_source_index.sh +6_license_history_reindex_to_dest_from_source.sh +7_license_history_reindex_to_source_from_dest.sh +8_license_history_delete_dest_index.sh + diff --git a/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/patch/memu_meta/jaeger_menumeta.psql b/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/patch/memu_meta/jaeger_menumeta.psql new file mode 100644 index 0000000..c8252dd --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/patch/memu_meta/jaeger_menumeta.psql @@ -0,0 +1,21 @@ +-- 이미 존재한다는 (insert 시) 에러메세지나 , 존재하지 않는다는 (delete 시) 에러메세지는 무시하셔도 무방합니다. +-- service - active transaction 삭제 +-- auth_resource3 +DELETE FROM public.auth_resource3 WHERE name = 'menu|Services|Active Transaction'; + +-- menu_meta +DELETE FROM public.menu_meta WHERE id = 26; + + +-- service - overview 추가 +-- auth_resource2 +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Overview', (select id from auth_resource2 where type='menu' and name='Services'), 'menu'); + +-- auth_resource3 +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Services|Overview', false, null); + +-- menu_meta +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (22, 'ServiceOverview', NULL, 1, 'overviewServices', (select id from auth_resource3 where name='menu|Services|Overview'), 0); + +-- user_permission2 +INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Services|Overview'), 'owner'); \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/patch/memu_meta/jspd_menumeta.psql b/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/patch/memu_meta/jspd_menumeta.psql new file mode 100644 index 0000000..4541fb2 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/patch/memu_meta/jspd_menumeta.psql @@ -0,0 +1,22 @@ +-- 이미 존재한다는 (insert 시) 에러메세지나 , 존재하지 않는다는 (delete 시) 에러메세지는 무시하셔도 무방합니다. + +-- service - overview 삭제 +-- user_permission2 +DELETE FROM public.user_permission2 WHERE auth_resource_id = (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Services|Overview') AND user_id = 'owner'; + +-- menu_meta +DELETE FROM public.menu_meta WHERE id = 22; + +-- auth_resource2 +DELETE FROM public.auth_resource2 WHERE name = 'Overview' AND parent_id = (select id from auth_resource2 where type='menu' and name='Services'); + +-- auth_resource3 +DELETE FROM public.auth_resource3 WHERE name = 'menu|Services|Overview'; + + +-- service - active transaction 추가 +-- auth_resource3 +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Services|Active Transaction', false, null); + +-- menu_meta +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (26, 'Active Transaction', NULL, 5, 'overviewServiceJSPD', (select id from auth_resource3 where name='menu|Services|Active Transaction'), 2); \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/patch/postgres_patch_3.2.0.psql b/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/patch/postgres_patch_3.2.0.psql new file mode 100644 index 0000000..7ed34ad --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/patch/postgres_patch_3.2.0.psql @@ -0,0 +1,803 @@ +UPDATE public.metric_meta2 SET expr='sum by (xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) ((container_memory_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / (((container_spec_memory_limit_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0)) > 0) * 100) or sum by (xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) ((container_memory_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1024 / 1024 / 1024 *100)' WHERE id = 'container_memory_usage_by_workload'; + +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: v1 +kind: List +items: +- apiVersion: apps/v1 + kind: Deployment + metadata: + name: cloudmoa-trace-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-trace-agent + spec: + selector: + matchLabels: + app: cloudmoa-trace-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-trace-agent + spec: + securityContext: + runAsNonRoot: true + runAsUser: 65534 + containers: + - image: $DOCKER_REGISTRY_URL/trace-agent:$IMAGE_TAG + name: cloudmoa-trace-agent + resources: + requests: + cpu: 100m + memory: 50Mi + limits: + cpu: 200m + memory: 100Mi + ports: + - containerPort: 5775 + protocol: UDP + - containerPort: 6831 + protocol: UDP + - containerPort: 6832 + protocol: UDP + - containerPort: 5778 + protocol: TCP + env: + - name: LOG_LEVEL + value: "INFO" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT +- apiVersion: v1 + kind: Service + metadata: + name: cloudmoa-trace-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-trace-agent + spec: + ports: + - name: agent-zipkin-thrift + port: 5775 + protocol: UDP + targetPort: 5775 + - name: agent-compact + port: 6831 + protocol: UDP + targetPort: 6831 + - name: agent-binary + port: 6832 + protocol: UDP + targetPort: 6832 + - name: agent-configs + port: 5778 + protocol: TCP + targetPort: 5778 + selector: + app: cloudmoa-trace-agent + type: ClusterIP' WHERE id = 7; + +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: v1 +kind: Service +metadata: + annotations: + prometheus.io/scrape: ''true'' + labels: + app: cloudmoa-node-exporter + name: cloudmoa-node-exporter + name: cloudmoa-node-exporter + namespace: $CLOUDMOA_NAMESPACE +spec: + clusterIP: None + ports: + - name: scrape + port: 9110 + protocol: TCP + selector: + app: cloudmoa-node-exporter + type: ClusterIP +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: cloudmoa-node-exporter + namespace: $CLOUDMOA_NAMESPACE +spec: + selector: + matchLabels: + app: cloudmoa-node-exporter + template: + metadata: + labels: + app: cloudmoa-node-exporter + name: cloudmoa-node-exporter + spec: + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - image: $DOCKER_REGISTRY_URL/node-exporter + name: cloudmoa-node-exporter + ports: + - containerPort: 9110 + hostPort: 9110 + name: scrape + args: + - --path.procfs=/host/proc + - --path.sysfs=/host/sys + - --path.rootfs=/host/root + - --collector.filesystem.ignored-mount-points=^/(dev|proc|sys|run|var/lib/docker/.+|var/lib/kubelet/pods/.+)($|/) + - --collector.tcpstat + - --web.listen-address=:9110 + # --log.level=debug + resources: + limits: + cpu: 250m + memory: 180Mi + requests: + cpu: 102m + memory: 180Mi + volumeMounts: + - mountPath: /host/proc + name: proc + readOnly: false + - mountPath: /host/sys + name: sys + readOnly: false + - mountPath: /host/root + mountPropagation: HostToContainer + name: root + readOnly: true + hostNetwork: true + hostPID: true + securityContext: + runAsNonRoot: true + runAsUser: 65534 + volumes: + - hostPath: + path: /proc + name: proc + - hostPath: + path: /sys + name: sys + - hostPath: + path: / + name: root +' WHERE id = 4; + +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cloudmoa-cluster-role +rules: + - nonResourceURLs: + - "*" + verbs: + - get + - apiGroups: + - metrics.k8s.io + resources: + - pods + - nodes + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - nodes/stats + - endpoints + - namespaces + - events + verbs: + - get + - list + - watch + - apiGroups: + - apps + resources: + - daemonsets + - deployments + - deployments/scale + - replicasets + - replicasets/scale + - statefulsets + - statefulsets/scale + verbs: + - get + - list + - watch + - apiGroups: + - batch + resources: + - jobs + verbs: + - get + - list + - watch + - update + - apiGroups: + - batch + resources: + - cronjobs + verbs: + - get + - list + - update + - apiGroups: + - storage.j8s.io + resources: + - storageclasses + verbs: + - get + - list + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - apiGroups: + - extensions + resources: + - ingresses + verbs: + - get + - list + - apiGroups: + - policy + resources: + - podsecuritypolicies + verbs: + - use + resourceNames: + - imxc-ps + - apiGroups: + - certificates.k8s.io + resourceNames: + - kubernetes.io/kube-apiserver-client-kubelet + resources: + - signers + verbs: + - approve + - apiGroups: + - certificates.k8s.io + resourceNames: + - kubernetes.io/kubelet-serving + resources: + - signers + verbs: + - approve + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch + - proxy + - apiGroups: + - "" + resources: + - nodes/log + - nodes/metrics + - nodes/proxy + - nodes/spec + - nodes/stats + verbs: + - ''*'' + - apiGroups: + - ''*'' + resources: + - ''*'' + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cloudmoa-restricted-rb + namespace: $CLOUDMOA_NAMESPACE +subjects: + - kind: ServiceAccount + name: default + namespace: $CLOUDMOA_NAMESPACE +roleRef: + kind: ClusterRole + name: cloudmoa-cluster-role + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: cloudmoa-psp + namespace: $CLOUDMOA_NAMESPACE +spec: + privileged: true + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + runAsUser: + rule: RunAsAny + fsGroup: + rule: RunAsAny + hostPorts: + - max: 65535 + min: 0 + hostNetwork: true + hostPID: true + volumes: + - configMap + - secret + - emptyDir + - hostPath + - projected + - downwardAPI + - persistentVolumeClaim +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: cloudmoa-topology-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-topology-agent +spec: + selector: + matchLabels: + app: cloudmoa-topology-agent + template: + metadata: + labels: + app: cloudmoa-topology-agent + spec: + hostNetwork: true + hostPID: true + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - name: cloudmoa-topology-agent + image: $DOCKER_REGISTRY_URL/topology-agent:$IMAGE_TAG + imagePullPolicy: Always + resources: + requests: + cpu: 200m + memory: 512Mi + limits: + cpu: 500m + memory: 600Mi + securityContext: + privileged: true + volumeMounts: + - mountPath: /host/usr/bin + name: bin-volume + - mountPath: /var/run/docker.sock + name: docker-volume + - mountPath: /host/proc + name: proc-volume + - mountPath: /root + name: root-volume + - mountPath: /log + name: log-volume + env: + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: ROOT_DIRECTORY + value: /root + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: LOG_LEVEL + value: "INFO" + + volumes: + - name: bin-volume + hostPath: + path: /usr/bin + type: Directory + - name: docker-volume + hostPath: + path: /var/run/docker.sock + - name: proc-volume + hostPath: + path: /proc + - name: root-volume + hostPath: + path: / + - name: log-volume + hostPath: + path: /home' WHERE id = 2; + +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cloudmoa-metric-agent-config + namespace: $CLOUDMOA_NAMESPACE +data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + metric-agent.yml: | + global: + scrape_interval: 15s + + scrape_configs: + - job_name: ''kubernetes-kubelet'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + - job_name: ''kubernetes-cadvisor'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod] + target_label: xm_pod_id + - source_labels: [container] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [container] + regex: (.+) + action: keep + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cloudmoa-metric-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-metric-agent +spec: + selector: + matchLabels: + app: cloudmoa-metric-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-metric-agent + spec: + containers: + - name: cloudmoa-metric-agent + image: $DOCKER_REGISTRY_URL/metric-agent:$IMAGE_TAG + args: + - --config.file=/etc/metric-agent/metric-agent.yml + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: /etc/metric-agent/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: LOG_MAXAGE + value: "1" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: STORAGE_TYPE + value: datagate + restartPolicy: Always + volumes: + - name: config-volume + configMap: + name: cloudmoa-metric-agent-config +' WHERE id = 6; + +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cloudmoa-metric-agent-config + namespace: $CLOUDMOA_NAMESPACE +data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + metric-agent.yml: | + global: + scrape_interval: 15s + + scrape_configs: + - job_name: ''kubernetes-kubelet'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + + - job_name: ''kubernetes-cadvisor'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod_name] + target_label: xm_pod_id + - source_labels: [container_name] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [container_name] + regex: (.+) + action: keep + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cloudmoa-metric-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-metric-agent +spec: + selector: + matchLabels: + app: cloudmoa-metric-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-metric-agent + spec: + containers: + - name: cloudmoa-metric-agent + image: $DOCKER_REGISTRY_URL/metric-agent:$IMAGE_TAG + args: + - --config.file=/etc/metric-agent/metric-agent.yml + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: /etc/metric-agent/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: STORAGE_TYPE + value: datagate + restartPolicy: Always + volumes: + - name: config-volume + configMap: + name: cloudmoa-metric-agent-config +' WHERE id = 3; \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/patch/postgres_patch_3.3.0.psql b/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/patch/postgres_patch_3.3.0.psql new file mode 100644 index 0000000..6b63e62 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/patch/postgres_patch_3.3.0.psql @@ -0,0 +1,919 @@ + +-- from diff + +CREATE DATABASE CONFIGS; +CREATE DATABASE keycloak; + +-- cortex alert +create table public.alert_rule_config_info ( + config_id varchar not null, + config_data text not null, + in_use boolean default true not null, + created_date timestamp, + modified_date timestamp +); +create table alert_config_info +( + config_id varchar not null, + config_data text not null, + config_default text not null, + in_use boolean default true not null, + created_date timestamp, + modified_date timestamp +); +create table alert_config +( + id bigint not null, + cluster_id varchar, + resolve_timeout varchar, + receiver varchar, + group_by varchar, + group_wait varchar, + group_interval varchar, + repeat_interval varchar, + routes_level varchar, + routes_continue varchar, + receiver_name varchar, + webhook_url varchar, + send_resolved varchar, + inner_route boolean, + inner_webhook boolean, + in_use boolean default true not null, + created_date timestamp, + modified_date timestamp +); +ALTER TABLE public.alert_rule_config_info ADD CONSTRAINT alert_rule_config_info_config_id_pk PRIMARY KEY (config_id); +ALTER TABLE public.alert_config_info ADD CONSTRAINT alert_config_info_config_id_pk PRIMARY KEY (config_id); +ALTER TABLE public.alert_config ADD CONSTRAINT alert_config_id_pk PRIMARY KEY (id); + + + +alter table tenant_info + add delete_scheduler_date timestamp; + +alter table tenant_info + add tenant_init_clusters varchar(255); + +alter table cloud_user + add dormancy_date timestamp; + +alter table cloud_user + add status varchar(255) default 'use'::character varying not null; + +-- DELETE +-- FROM public.auth_resource3 +-- WHERE name = 'menu|Health Check|Check Script'; + +-- DELETE +-- FROM public.auth_resource3 +-- WHERE name = 'menu|Health Check'; + +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Services|Active Transaction', false, null); + +UPDATE public.menu_meta +SET position = 10::integer +WHERE id = 80::bigint; + +UPDATE public.menu_meta +SET position = 99::integer +WHERE id = 90::bigint; + + + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (26, 'Active Transaction', NULL, 5, 'overviewServiceJSPD', (select id from auth_resource3 where name='menu|Services|Active Transaction'), 2); +insert into public.alert_config_info (config_id, created_date, modified_date, config_data, config_default, in_use) values ('config', now(), null, 'global:${GLOBAL}\nroute:${ROUTE}\nreceivers:${RECEIVERS}', 'global:${GLOBAL}\nroute:${ROUTE}\nreceivers:${RECEIVERS}', true); +insert into public.alert_config_info (config_id, created_date, modified_date, config_data, config_default, in_use) values ('global', now(), null, '\n resolve_timeout: ${RESOLVE_TIMEOUT}', '\n resolve_timeout: 5m', true); +insert into public.alert_config_info (config_id, created_date, modified_date, config_data, config_default, in_use) values ('receivers', now(), null, '\n- name: ''${NAME}''\n webhook_configs:${WEBHOOK_CONFIGS}', '\n- name: ''cdms''\n webhook_configs:${WEBHOOK_CONFIGS}', true); +insert into public.alert_config_info (config_id, created_date, modified_date, config_data, config_default, in_use) values ('route', now(), null, '\n receiver: ''${RECEIVER}''\n group_by: [${GROUP_BY}]\n group_wait: ${GROUP_WAIT}\n group_interval: ${GROUP_INTERVAL}\n repeat_interval: ${REPEAT_INTERVAL}\n routes:${ROUTES}', '\n receiver: ''cdms''\n group_by: [xm_clst_id, level]\n group_wait: 30s\n group_interval: 5m\n repeat_interval: 10m\n routes:${ROUTES}', true); +insert into public.alert_config_info (config_id, created_date, modified_date, config_data, config_default, in_use) values ('webhook_configs', now(), null, '\n - url: ''${WEBHOOK_URL}''\n send_resolved: ${SEND_RESOLVED}', '\n - url: ''${WEBHOOK_URL}''\n send_resolved: false', true); +insert into public.alert_config_info (config_id, created_date, modified_date, config_data, config_default, in_use) values ('routes', now(), null, '\n - receiver: ''${ROUTES_RECEIVER}''\n group_by: [${ROUTES_GROUP_BY}]\n group_wait: ${ROUTES_GROUP_WAIT}\n group_interval: ${ROUTES_GROUP_INTERVAL}\n repeat_interval: ${ROUTES_REPEAT_INTERVAL}\n match_re:\n level: ${LEVEL}\n continue: ${CONTINUE}', '\n - receiver: ''cdms''\n group_by: [xm_clst_id, level]\n group_wait: 5s\n group_interval: 5s\n repeat_interval: 1m\n match_re:\n level: Critical\n continue: true', true); +insert into public.alert_rule_config_info (config_id, created_date, modified_date, config_data, in_use) values ('config', now(), null, 'groups:${GROUPS}', true); +insert into public.alert_rule_config_info (config_id, created_date, modified_date, config_data, in_use) values ('groups', now(), null, '\n- name: "${NAME}"\n rules:${RULES}', true); +insert into public.alert_rule_config_info (config_id, created_date, modified_date, config_data, in_use) values ('isHost', now(), null, '\n instance: "{{ $labels.instance }}"\n is_host: "true"', true); +insert into public.alert_rule_config_info (config_id, created_date, modified_date, config_data, in_use) values ('rules', now(), null, '\n - alert: "${ALERT}"\n expr: "${EXPR}"\n labels:\n level: "${LEVEL}"\n for: "${FOR}"\n annotations:\n xm_service_name: "{{ $labels.xm_service_name }}"\n level: "${LEVEL}"\n meta_id: "${META_ID}"\n xm_node_id: "{{ $labels.xm_node_id }}"\n threshold: ${THRESHOLD}\n xm_container_id: "{{ $labels.xm_cont_name }}"\n message: "${MESSAGE}"\n rule_id: ${RULE_ID}\n xm_pod_id: "{{ $labels.xm_pod_id }}"\n xm_clst_id: "{{ $labels.xm_clst_id }}"\n xm_namespace: "{{ $labels.xm_namespace }}"\n value: "{{ $value }}"\n xm_entity_type: "{{ $labels.xm_entity_type }}"', true); + + + +-- JSPD 옵션 값 테이블 +CREATE TABLE public.jspd_prop ( + code_id character varying(255) NOT NULL, + default_value character varying(255) NOT NULL, + description text, + code_type character varying(255), + input_type character varying(255), + input_props character varying(255), + use_yn boolean NOT NULL, + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone NOT NULL +); + +ALTER TABLE ONLY public.jspd_prop ADD CONSTRAINT jspd_prop_pkey PRIMARY KEY (code_id); + +-- JSPD 옵션 값 설정 LIST table +CREATE TABLE public.jspd_config ( + cluster_id character varying(255) NOT NULL, + namespace character varying(255) NOT NULL, + service character varying(255) NOT NULL, + code_id character varying(255), + code_value character varying(255), + code_type character varying(255), + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone NOT NULL +); +-- ALTER TABLE public.jspd_prop +-- ADD input_type character varying(255); + +-- ALTER TABLE public.jspd_prop +-- ADD input_props character varying(255); + + +ALTER TABLE public.jspd_config + ADD CONSTRAINT jspd_config_pkey PRIMARY KEY (cluster_id, namespace, service, code_id); + +ALTER TABLE ONLY public.jspd_config + ADD CONSTRAINT jspd_config_code_id_fk FOREIGN KEY (code_id) REFERENCES public.jspd_prop(code_id); + +INSERT INTO jspd_prop values('TRX_NAME_TYPE','0', 'Set the transaction name generation method (0:default, 1:parameter, 2:param_nouri, 3:attribute)', 'integer','select','{"default":"0", "parameter":"1", "param_nouri":"2", "attribute":"3"}',true, now(), now()); +INSERT INTO jspd_prop values('TRX_NAME_KEY','', 'Set the transaction name generation method by TRX_NAME_TYPE (parameter(1), param_nouri(2),attribute(3))','string','input','',true, now(), now()); +INSERT INTO jspd_prop values('CURR_TRACE_TXN','*:3000', 'Option to check TXNNAME with startsWith logic and collect calltree based on elapsetime. blank or set to *:0 when collecting all.', 'string','input','', true, now(), now()); +INSERT INTO jspd_prop values('CURR_TRACE_LEVEL','100', 'call tree detection level', 'integer','range','{"gte":"0", "lte":"100"}',true, now(), now()); +INSERT INTO jspd_prop values('TRACE_JDBC','true', 'include call tree data', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('EXCLUDE_SERVICE','gif,js,css,xml', 'exclude service name', 'string','input','',true, now(), now()); +INSERT INTO jspd_prop values('INCLUDE_EXCEPTION','', 'Exception that you do not want to be treated as an exception transaction is set.(type.Exception)', 'string','input','',true, now(), now()); +INSERT INTO jspd_prop values('EXCLUDE_EXCEPTION','', 'Set the exception to be treated as an exception transaction.(type.Exception)', 'string','input','',true, now(), now()); +INSERT INTO jspd_prop values('RESP_HEADER_TID','false', 'include X-Xm-Tid text for gearing imxwsmj', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('USE_RUNTIME_REDEFINE','false', 'rt.jar (socket, file, throwable) function use yn option', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('USE_RUNTIME_REDEFINE_HTTP_REMOTE','false', 'rt.jar (socket, file, throwable) function use yn option', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('RT_RMI','false', 'rt.jar (socket, file, throwable) function use yn option', 'boolean','input','',true, now(), now()); + +INSERT INTO jspd_prop values('RT_RMI_TYPE','3', 'remote key value(1: pkey, 2: ckey, 3: pckey)', 'integer','select','{"pkey":"1", "ckey":"2", "pckey":"3"}',true, now(), now()); +INSERT INTO jspd_prop values('RT_RMI_ELAPSE_TIME','0', 'Collect transactions that are greater than or equal to the option value', 'integer','input','',true, now(), now()); +INSERT INTO jspd_prop values('RT_FILE','0x10', 'Display file input/output in call tree', 'string','input','',true, now(), now()); +INSERT INTO jspd_prop values('RT_SOCKET','0x10', 'Display socket input/output in call tree', 'string','input','',true, now(), now()); + +INSERT INTO jspd_prop values('MTD_LIMIT','100000', 'Limit the number of calltree', 'integer','range','{"gte":"0"}',true, now(), now()); + +INSERT INTO jspd_prop values('LIMIT_SQL','20', 'Collection limits based on SQL sentence length', 'integer','input','',true, now(), now()); +INSERT INTO jspd_prop values('TXN_COUNT_LIMIT','3000', 'Transactions per second', 'integer','input','',true, now(), now()); +INSERT INTO jspd_prop values('USE_SQL_ELLIPSIS','false', 'Collect length of sql string by half of SQL_TEXT_BUFFER_SIZE', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('TXN_SQL_LIMIT_COUNT','2000', 'SQL collection limit', 'integer','input','',true, now(), now()); +INSERT INTO jspd_prop values('TXN_CPU_TIME','false', 'cpu time metric used in transactions option', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('TXN_MEMORY','false', 'memory alloc size metric used in transactions option', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('ENABLE_WEB_ID_WHEN_NO_USERAGENT','false', 'Do not create an web ID unless requested by the browser', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('USE_SQL_SEQ','false', 'Add sequence number to sql and packet', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('TRACE_FETCH_METHOD','false', 'Display the fetch function of ResultSet in the call tree', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('EXCLUDE_THREAD','', 'Ability to block monitoring of a specific thread name, value = String[] (prefix1,prefix2)', 'string','input','',true, now(), now()); +INSERT INTO jspd_prop values('USE_METHOD_SEQ','false', 'Display the calltree in the form of a time series without summary', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('TRACE_METHOD_MEMORY','false', 'Collects allocation memory for each method of calltree. (unit k)', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('TRACE_METHOD_CPUTIME','false', 'Collects cputime for each method of calltree. (unit ms)', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('DISABLE_ROOT_METHOD','false', 'Express the service root method at the top of the call tree', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('MTD_BUFFER_SIZE','2500', 'size of the internal buffer that stores the call tree method data.', 'integer','input','',true, now(), now()); +INSERT INTO jspd_prop values('MTD_STACK_BUFFER_SIZE','100', 'A separate option to additionally collect methods that did not generate an error among methods that were not collected because the MTD_BUFFER_SIZE option value was exceeded.', 'integer','input','',true, now(), now()); +INSERT INTO jspd_prop values('MTD_EXCEPTION_BUFFER_SIZE','100', 'A separate option to additionally collect methods that have an error among methods that could not be collected because the MTD_BUFFER_SIZE option value was exceeded.', 'integer','input','',true, now(), now()); +INSERT INTO jspd_prop values('DEBUG','0x000000000', 'Option to specify log level (Debugging)', 'string','input','',true, now(), now()); + +INSERT INTO jspd_prop values('EXCEPTION_LIMIT', '-1', 'Exception content length limit', 'integer', 'input', '', true, now(), now()); +INSERT INTO jspd_prop values('TXN_SEND_PERIOD', '1000', 'Txninfo transmission cycle (ms)', 'integer', 'input', '', true, now(), now()); +INSERT INTO jspd_prop values('MTD_SEND_PERIOD', '1000', 'Txnmethod transmission cycle (ms)', 'integer', 'input', '', true, now(), now()); +INSERT INTO jspd_prop values('SQL_SEND_PERIOD', '1000', 'Txnspl transmission cycle (ms)', 'integer', 'input', '', true, now(), now()); +INSERT INTO jspd_prop values('ETOE_SEND_PERIOD', '1000', 'E2einfo transmission cycle (ms)', 'integer', 'input', '', true, now(), now()); +INSERT INTO jspd_prop values('TXN_SEND_LIMIT', '15000', 'Txninfo maximum number of transfers', 'integer', 'input', '', true, now(), now()); +INSERT INTO jspd_prop values('MTD_SEND_LIMIT', '15000', 'Txnmethod maximum number of transfers', 'integer', 'input', '', true, now(), now()); +INSERT INTO jspd_prop values('SQL_SEND_LIMIT', '15000', 'Txnsql maximum number of transfers', 'integer', 'input', '', true, now(), now()); +INSERT INTO jspd_prop values('ETOE_SEND_LIMIT', '15000', 'E2einfo maximum number of transfers', 'integer', 'input', '', true, now(), now()); + + +---public.metric_meta2 +UPDATE public.metric_meta2 SET expr = '((node_memory_MemTotal_bytes{xm_entity_type="Node", {filter}} - (node_memory_MemFree_bytes{xm_entity_type="Node", {filter}} + node_memory_Cached_bytes{xm_entity_type="Node", {filter}} + node_memory_Buffers_bytes{xm_entity_type="Node", {filter}} + node_memory_SReclaimable_bytes{xm_entity_type="Node", {filter}})) >= 0 or node_memory_MemTotal_bytes{xm_entity_type="Node", {filter}} - node_memory_MemFree_bytes{xm_entity_type="Node", {filter}}) / 1024 / 1024 / 1024'::text WHERE id LIKE 'node#_memory#_used' ESCAPE '#'; + +UPDATE public.metric_meta2 SET expr = '((node_memory_MemTotal_bytes{{filter}} - (node_memory_MemFree_bytes{{filter}} + node_memory_Cached_bytes{{filter}} + node_memory_Buffers_bytes{{filter}} + node_memory_SReclaimable_bytes{{filter}})) >= 0 or (node_memory_MemTotal_bytes{{filter}} - node_memory_MemFree_bytes{{filter}})) / node_memory_MemTotal_bytes{{filter}} * 100'::text WHERE id LIKE 'host#_memory#_usage' ESCAPE '#'; + +UPDATE public.metric_meta2 SET expr = 'sum by(instance, mountpoint, fstype, data_type) ( +label_replace(node_filesystem_size_bytes {fstype!="rootfs",{filter}}, "data_type", "totalsize", "", "") or +label_replace(node_filesystem_avail_bytes {fstype!="rootfs",{filter}}, "data_type", "availablesize", "", ""))'::text WHERE id LIKE 'host#_fs#_total#_by#_mountpoint' ESCAPE '#'; + +UPDATE public.metric_meta2 SET expr = '(1- avg by (xm_clst_id) (((node_memory_MemFree_bytes{xm_entity_type=''Node'', {filter}} + node_memory_Cached_bytes{xm_entity_type=''Node'', {filter}} + node_memory_Buffers_bytes{xm_entity_type=''Node'', {filter}}) <= node_memory_MemTotal_bytes{xm_entity_type=''Node'', {filter}} or node_memory_MemFree_bytes{xm_entity_type=''Node'', {filter}}) / node_memory_MemTotal_bytes{xm_entity_type=''Node'', {filter}})) * 100'::text WHERE id LIKE 'cluster#_memory#_usage' ESCAPE '#'; + + +UPDATE public.metric_meta2 SET expr = '((node_memory_MemTotal_bytes{xm_entity_type=''Node'', {filter}} - (node_memory_MemFree_bytes{xm_entity_type=''Node'', {filter}} + node_memory_Cached_bytes{xm_entity_type=''Node'', {filter}} + node_memory_Buffers_bytes{xm_entity_type=''Node'', {filter}} + node_memory_SReclaimable_bytes{xm_entity_type=''Node'', {filter}})) >= 0 or (node_memory_MemTotal_bytes{xm_entity_type=''Node'', {filter}} - node_memory_MemFree_bytes{xm_entity_type=''Node'', {filter}})) / node_memory_MemTotal_bytes{xm_entity_type=''Node'', {filter}} * 100'::text WHERE id LIKE 'node#_memory#_usage' ESCAPE '#'; + +UPDATE public.metric_meta2 SET expr = '(node_memory_MemTotal_bytes{{filter}} - (node_memory_MemFree_bytes{{filter}} + node_memory_Cached_bytes{{filter}} + node_memory_Buffers_bytes{{filter}} + node_memory_SReclaimable_bytes{{filter}})) >= 0 or (node_memory_MemTotal_bytes{{filter}} - node_memory_MemFree_bytes{{filter}})'::text WHERE id LIKE 'host#_memory#_used' ESCAPE '#'; + + +INSERT INTO public.metric_meta2 (id, meta_name, description, expr, resource_type, entity_type, groupby_keys, in_use, anomaly_score, message, created_date, modified_date) VALUES +('imxc_jspd_pod_txn_error_rate', 'Service Pod Transaction Error Rate', 'The number of transaction error rate for pod', 'sum by(xm_clst_id, xm_namespace, xm_pod_id, xm_service_name) (rate(imxc_txn_total_count{{filter}}[1m])) == 0 or sum by(xm_clst_id, xm_namespace, xm_pod_id, xm_service_name) (rate(imxc_txn_error_count{{filter}}[1m])) == 0 or sum by(xm_clst_id, xm_namespace, xm_pod_id, xm_service_name) (rate(imxc_txn_error_count {{filter}} [1m])) / sum by(xm_clst_id, xm_namespace, xm_pod_id, xm_service_name) (rate(imxc_txn_total_count {{filter}} [1m]))', 'Request', 'Service', NULL, 't', 'f', 'SVC:{{$labels.xm_service_name}} Svc Pod Transaction Error rate:{{humanize $value}}|{threshold}.', '2022-02-15 18:08:58.18', '2022-02-15 18:08:58.18'); +INSERT INTO public.metric_meta2 (id, meta_name, description, expr, resource_type, entity_type, groupby_keys, in_use, anomaly_score, message, created_date, modified_date) VALUES +('imxc_jspd_txn_error_rate', 'Service Transaction Error Rate', 'Service Transaction Error Rate', 'sum by(xm_clst_id, xm_namespace, xm_service_name) (rate(imxc_txn_total_count{{filter}}[1m])) == 0 or sum by(xm_clst_id, xm_namespace, xm_service_name) (rate(imxc_txn_error_count{{filter}}[1m])) == 0 or sum by(xm_clst_id, xm_namespace, xm_service_name) (rate(imxc_txn_error_count {{filter}} [1m])) / sum by(xm_clst_id, xm_namespace, xm_service_name) (rate(imxc_txn_total_count {{filter}} [1m]))', 'Request', 'Service', NULL, 't', 'f', 'SVC:{{$labels.xm_service_name}} Error Request Rate:{{humanize $value}}%|{threshold}%.', '2022-02-15 14:33:00.118', '2022-02-15 15:40:17.64'); +INSERT INTO public.metric_meta2 (id, meta_name, description, expr, resource_type, entity_type, groupby_keys, in_use, anomaly_score, message, created_date, modified_date) VALUES +('imxc_jspd_txn_elapsed_time_avg', 'Service Transaction Elapsed Time (avg)', 'Service Average Elapsed Time', 'sum by(xm_clst_id, xm_namespace, xm_service_name) ((increase(imxc_txn_total_count{{filter}}[1m])))== 0 or sum by(xm_clst_id, xm_namespace, xm_service_name) ((increase(imxc_txn_laytency{{filter}}[1m])))/ sum by(xm_clst_id, xm_namespace, xm_service_name) ((increase(imxc_txn_total_count{{filter}}[1m])))', 'Request', 'Service', NULL, 't', 't', 'SVC:{{$labels.xm_service_name}} Transaction Requests Time Avg:{{humanize $value}}ms|{threshold}ms.', '2021-11-15 16:09:34.233', '2021-11-15 16:12:21.335'); +INSERT INTO public.metric_meta2 (id, meta_name, description, expr, resource_type, entity_type, groupby_keys, in_use, anomaly_score, message, created_date, modified_date) VALUES +('imxc_jspd_pod_txn_elapsed_time_avg', 'Service Pod Transaction Elapsed Time (avg)', 'The number of transaction counts per second for pod', 'sum by(xm_clst_id, xm_namespace, xm_pod_id, xm_service_name) (increase(imxc_txn_total_count{{filter}}[1m]))==0 or sum by(xm_clst_id, xm_namespace, xm_pod_id, xm_service_name) (increase(imxc_txn_laytency{{filter}}[1m])) / sum by(xm_clst_id, xm_namespace, xm_pod_id, xm_service_name) (increase(imxc_txn_total_count{{filter}}[1m]))', 'Request', 'Service', NULL, 't', 'f', 'SVC:{{$labels.xm_service_name}} Pod Transaction Requests Time Avg:{{humanize $value}}ms|{threshold}ms.', '2022-02-15 18:04:55.228', '2022-02-15 18:04:55.228'); +INSERT INTO public.metric_meta2 (id, meta_name, description, expr, resource_type, entity_type, groupby_keys, in_use, anomaly_score, message, created_date, modified_date) VALUES +('imxc_jspd_txn_error_count', 'Service Transaction Error Count', 'Service Transaction Error Count', 'sum by(xm_clst_id, xm_namespace, xm_service_name) (rate(imxc_txn_error_count{{filter}}[1m])) == 0 or sum by(xm_clst_id, xm_namespace, xm_service_name) (rate(imxc_txn_error_count {{filter}} [1m])) ', 'Request', 'Service', NULL, 't', 't', 'SVC:{{$labels.xm_service_name}} Error Request count:{{humanize $value}}%|{threshold}%.', '2021-11-15 16:10:31.352', '2021-11-15 16:12:21.335'); +INSERT INTO public.metric_meta2 (id, meta_name, description, expr, resource_type, entity_type, groupby_keys, in_use, anomaly_score, message, created_date, modified_date) VALUES +('imxc_jspd_txn_per_sec', 'Service Transaction Count (per Second)', 'Service Transaction Count (per Second)', 'sum by(xm_clst_id, xm_namespace, xm_service_name) (rate(imxc_txn_total_count{{filter}}[1m]))', 'Request', 'Service', NULL, 't', 't', 'SVC:{{$labels.xm_service_name}} Svc Transaction count/Seconds:{{humanize $value}}|{threshold}.', '2021-11-15 16:11:19.606', '2021-11-15 16:12:21.335'); +INSERT INTO public.metric_meta2 (id, meta_name, description, expr, resource_type, entity_type, groupby_keys, in_use, anomaly_score, message, created_date, modified_date) VALUES +('imxc_jspd_pod_txn_per_sec', 'Service Pod Transaction Count (per sec)', 'The number of transaction counts per second for pod', 'sum by(xm_clst_id, xm_namespace, xm_pod_id, xm_service_name) (rate(imxc_txn_total_count{{filter}}[1m]))', 'Request', 'Service', NULL, 't', 'f', 'SVC:{{$labels.xm_service_name}} Svc Pod Transaction count/Seconds:{{humanize $value}}|{threshold}.', '2022-02-15 17:59:39.45', '2022-02-15 17:59:39.45'); + + + +-- Auto-generated SQL script #202202221030 +UPDATE public.metric_meta2 + SET expr='sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (rate(container_cpu_system_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running", {filter}}) without (instance)) * 0) * 100' + WHERE id='container_cpu_system_by_workload'; +UPDATE public.metric_meta2 + SET expr='sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (rate(container_cpu_system_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running", {filter}}) without (instance)) * 0)' + WHERE id='container_cpu_system_core_by_workload'; +UPDATE public.metric_meta2 + SET expr='sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (rate(container_cpu_usage_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running", {filter}}) without (instance)) * 0)' + WHERE id='container_cpu_usage_by_workload'; +UPDATE public.metric_meta2 + SET expr='sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (rate(container_cpu_usage_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running", {filter}}) without (instance)) * 0)' + WHERE id='container_cpu_usage_core_by_workload'; +UPDATE public.metric_meta2 + SET expr='sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (rate(container_cpu_user_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running", {filter}}) without (instance)) * 0) * 100' + WHERE id='container_cpu_user_by_workload'; +UPDATE public.metric_meta2 + SET expr='sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (rate(container_cpu_user_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running", {filter}}) without (instance)) * 0)' + WHERE id='container_cpu_user_core_by_workload'; +UPDATE public.metric_meta2 + SET expr='sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (container_fs_limit_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running", {filter}}) without (instance)) * 0) / 1073741824' + WHERE id='container_fs_limit_bytes_by_workload'; +UPDATE public.metric_meta2 + SET expr='sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (rate(container_fs_reads_bytes_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1024' + WHERE id='container_fs_reads_by_workload'; +UPDATE public.metric_meta2 + SET expr='sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (container_fs_usage_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1073741824' + WHERE id='container_fs_usage_bytes_by_workload'; +UPDATE public.metric_meta2 + SET expr='sum by (xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) ((container_fs_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0)/ (((container_fs_limit_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) * 100) > 0) or (container_fs_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1000)' + WHERE id='container_fs_usage_by_workload'; +UPDATE public.metric_meta2 + SET expr='sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (rate(container_fs_writes_bytes_total{xm_cont_name!="POD"}[1m]) + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1024' + WHERE id='container_fs_writes_by_workload'; +UPDATE public.metric_meta2 + SET expr='sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (container_memory_cache{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1073741824' + WHERE id='container_memory_cache_by_workload'; +UPDATE public.metric_meta2 + SET expr='sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (container_memory_max_usage_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1073741824' + WHERE id='container_memory_max_usage_bytes_by_workload'; +UPDATE public.metric_meta2 + SET expr='sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (container_memory_swap{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1073741824' + WHERE id='container_memory_swap_by_workload'; +UPDATE public.metric_meta2 + SET expr='sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (container_memory_usage_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1024 / 1024 / 1024' + WHERE id='container_memory_usage_bytes_by_workload'; +UPDATE public.metric_meta2 + SET expr='sum by (xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) ((container_memory_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / (((container_spec_memory_limit_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0)) > 0) * 100) or sum by (xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) ((container_memory_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1024 / 1024 / 1024 *100)' + WHERE id='container_memory_usage_by_workload'; +UPDATE public.metric_meta2 + SET expr='sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (container_memory_working_set_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1024 / 1024 / 1024' + WHERE id='container_memory_working_set_bytes_by_workload'; + +INSERT INTO public.metric_meta2 (id, meta_name, description, expr, resource_type, entity_type, groupby_keys, in_use, anomaly_score, message, created_date, modified_date) VALUES('imxc_jspd_active_txn_per_sec', 'Service Active Transaction Count (per Second)', 'Service Active Transaction Count (per Second)', 'sum by(xm_clst_id, xm_namespace, xm_service_name) (rate(imxc_txn_active_count {{filter}}[1m]))', 'Request', 'Service', NULL, true, false, 'SVC:{{$labels.xm_service_name}} Svc Active Transaction count/Seconds:{{humanize $value}}|{threshold}.', '2022-03-11 15:51:45.946', '2022-03-11 15:51:45.946') ON +CONFLICT (id) DO +UPDATE +SET + expr = 'sum by(xm_clst_id, xm_namespace, xm_service_name) (rate(imxc_txn_active_count {{filter}}[1m]))' +WHERE id = 'imxc_jspd_active_txn_per_sec'; + +INSERT INTO public.metric_meta2 (id, meta_name, description, expr, resource_type, entity_type, groupby_keys, in_use, anomaly_score, message, created_date, modified_date) VALUES('imxc_jspd_pod_active_txn_per_sec', 'Service Pod Active Transaction Count (per sec)', 'The number of active transaction counts per second for pod', 'sum by(xm_clst_id, xm_namespace, xm_service_name, xm_pod_id) (rate(imxc_txn_active_count{{filter}}[1m]))', 'Request', 'Service', NULL, true, false, 'SVC:{{$labels.xm_service_name}} Svc Pod Active Transaction count/Seconds:{{humanize $value}}|{threshold}.', '2022-03-11 15:53:29.252', '2022-03-11 15:53:29.252') ON +CONFLICT (id) DO +UPDATE +SET + expr = 'sum by(xm_clst_id, xm_namespace, xm_service_name, xm_pod_id) (rate(imxc_txn_active_count{{filter}}[1m]))' +WHERE id = 'imxc_jspd_pod_active_txn_per_sec'; + + +--public.agent_install_file_info + +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cloudmoa-cluster-role +rules: + - nonResourceURLs: + - "*" + verbs: + - get + - apiGroups: + - metrics.k8s.io + resources: + - pods + - nodes + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - nodes/stats + - endpoints + - namespaces + - events + verbs: + - get + - list + - watch + - apiGroups: + - apps + resources: + - daemonsets + - deployments + - deployments/scale + - replicasets + - replicasets/scale + - statefulsets + - statefulsets/scale + verbs: + - get + - list + - watch + - apiGroups: + - batch + resources: + - jobs + verbs: + - get + - list + - watch + - update + - apiGroups: + - batch + resources: + - cronjobs + verbs: + - get + - list + - update + - apiGroups: + - storage.j8s.io + resources: + - storageclasses + verbs: + - get + - list + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - apiGroups: + - extensions + resources: + - ingresses + verbs: + - get + - list + - apiGroups: + - policy + resources: + - podsecuritypolicies + verbs: + - use + resourceNames: + - imxc-ps + - apiGroups: + - certificates.k8s.io + resourceNames: + - kubernetes.io/kube-apiserver-client-kubelet + resources: + - signers + verbs: + - approve + - apiGroups: + - certificates.k8s.io + resourceNames: + - kubernetes.io/kubelet-serving + resources: + - signers + verbs: + - approve + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch + - proxy + - apiGroups: + - "" + resources: + - nodes/log + - nodes/metrics + - nodes/proxy + - nodes/spec + - nodes/stats + verbs: + - ''*'' + - apiGroups: + - ''*'' + resources: + - ''*'' + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cloudmoa-restricted-rb + namespace: $CLOUDMOA_NAMESPACE +subjects: + - kind: ServiceAccount + name: default + namespace: $CLOUDMOA_NAMESPACE +roleRef: + kind: ClusterRole + name: cloudmoa-cluster-role + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: cloudmoa-psp + namespace: $CLOUDMOA_NAMESPACE +spec: + privileged: true + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + runAsUser: + rule: RunAsAny + fsGroup: + rule: RunAsAny + hostPorts: + - max: 65535 + min: 0 + hostNetwork: true + hostPID: true + volumes: + - configMap + - secret + - emptyDir + - hostPath + - projected + - downwardAPI + - persistentVolumeClaim +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: cloudmoa-topology-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-topology-agent +spec: + selector: + matchLabels: + app: cloudmoa-topology-agent + template: + metadata: + labels: + app: cloudmoa-topology-agent + spec: + hostNetwork: true + hostPID: true + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - name: cloudmoa-topology-agent + image: $DOCKER_REGISTRY_URL/topology-agent:$IMAGE_TAG + imagePullPolicy: Always + resources: + requests: + cpu: 200m + memory: 512Mi + limits: + cpu: 500m + memory: 600Mi + securityContext: + privileged: true + volumeMounts: + - mountPath: /host/usr/bin + name: bin-volume + - mountPath: /var/run/docker.sock + name: docker-volume + - mountPath: /host/proc + name: proc-volume + - mountPath: /root + name: root-volume + - mountPath: /log + name: log-volume + env: + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: ROOT_DIRECTORY + value: /root + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: POD_ID + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LOG_LEVEL + value: "INFO" + volumes: + - name: bin-volume + hostPath: + path: /usr/bin + type: Directory + - name: docker-volume + hostPath: + path: /var/run/docker.sock + - name: proc-volume + hostPath: + path: /proc + - name: root-volume + hostPath: + path: / + - name: log-volume + hostPath: + path: /home'::text WHERE id = 2::bigint; + +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cloudmoa-metric-agent-config + namespace: $CLOUDMOA_NAMESPACE +data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + metric-agent.yml: | + global: + scrape_interval: 15s + + scrape_configs: + - job_name: ''kubernetes-kubelet'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + + - job_name: ''kubernetes-cadvisor'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod_name] + target_label: xm_pod_id + - source_labels: [container_name] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [container_name] + regex: (.+) + action: keep + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cloudmoa-metric-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-metric-agent +spec: + selector: + matchLabels: + app: cloudmoa-metric-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-metric-agent + spec: + containers: + - name: cloudmoa-metric-agent + image: $DOCKER_REGISTRY_URL/metric-agent:$IMAGE_TAG + args: + - --config.file=/etc/metric-agent/metric-agent.yml + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: /etc/metric-agent/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: STORAGE_TYPE + value: datagate + restartPolicy: Always + volumes: + - name: config-volume + configMap: + name: cloudmoa-metric-agent-config +'::text WHERE id = 3::bigint; + +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: v1 +kind: List +items: +- apiVersion: apps/v1 + kind: Deployment + metadata: + name: cloudmoa-trace-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-trace-agent + spec: + selector: + matchLabels: + app: cloudmoa-trace-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-trace-agent + spec: + securityContext: + runAsNonRoot: true + runAsUser: 65534 + containers: + - image: $DOCKER_REGISTRY_URL/trace-agent:$IMAGE_TAG + name: cloudmoa-trace-agent + resources: + requests: + cpu: 100m + memory: 50Mi + limits: + cpu: 200m + memory: 100Mi + ports: + - containerPort: 5775 + protocol: UDP + - containerPort: 6831 + protocol: UDP + - containerPort: 6832 + protocol: UDP + - containerPort: 5778 + protocol: TCP + env: + - name: LOG_LEVEL + value: "INFO" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT +- apiVersion: v1 + kind: Service + metadata: + name: cloudmoa-trace-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-trace-agent + spec: + ports: + - name: agent-zipkin-thrift + port: 5775 + protocol: UDP + targetPort: 5775 + - name: agent-compact + port: 6831 + protocol: UDP + targetPort: 6831 + - name: agent-binary + port: 6832 + protocol: UDP + targetPort: 6832 + - name: agent-configs + port: 5778 + protocol: TCP + targetPort: 5778 + selector: + app: cloudmoa-trace-agent + type: ClusterIP'::text WHERE id = 7::bigint; + +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: v1 +kind: Service +metadata: + annotations: + prometheus.io/scrape: ''true'' + labels: + app: cloudmoa-node-exporter + name: cloudmoa-node-exporter + name: cloudmoa-node-exporter + namespace: $CLOUDMOA_NAMESPACE +spec: + clusterIP: None + ports: + - name: scrape + port: 9110 + protocol: TCP + selector: + app: cloudmoa-node-exporter + type: ClusterIP +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: cloudmoa-node-exporter + namespace: $CLOUDMOA_NAMESPACE +spec: + selector: + matchLabels: + app: cloudmoa-node-exporter + template: + metadata: + labels: + app: cloudmoa-node-exporter + name: cloudmoa-node-exporter + spec: + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - image: $DOCKER_REGISTRY_URL/prom/node-exporter + name: cloudmoa-node-exporter + ports: + - containerPort: 9110 + hostPort: 9110 + name: scrape + args: + - --path.procfs=/host/proc + - --path.sysfs=/host/sys + - --path.rootfs=/host/root + - --collector.filesystem.ignored-mount-points=^/(dev|proc|sys|run|var/lib/docker/.+|var/lib/kubelet/pods/.+)($|/) + - --collector.tcpstat + - --web.listen-address=:9110 + # --log.level=debug + resources: + limits: + cpu: 250m + memory: 180Mi + requests: + cpu: 102m + memory: 180Mi + volumeMounts: + - mountPath: /host/proc + name: proc + readOnly: false + - mountPath: /host/sys + name: sys + readOnly: false + - mountPath: /host/root + mountPropagation: HostToContainer + name: root + readOnly: true + hostNetwork: true + hostPID: true + securityContext: + runAsNonRoot: true + runAsUser: 65534 + volumes: + - hostPath: + path: /proc + name: proc + - hostPath: + path: /sys + name: sys + - hostPath: + path: / + name: root +'::text WHERE id = 4::bigint; diff --git a/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/patch/postgres_patch_3.3.2.psql b/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/patch/postgres_patch_3.3.2.psql new file mode 100644 index 0000000..e84e9be --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/patch/postgres_patch_3.3.2.psql @@ -0,0 +1,459 @@ + UPDATE public.agent_install_file_info SET yaml = '--- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: cloudmoa-cluster-role + rules: + - nonResourceURLs: + - "*" + verbs: + - get + - apiGroups: + - metrics.k8s.io + resources: + - pods + - nodes + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - nodes/stats + - endpoints + - namespaces + - events + verbs: + - get + - list + - watch + - apiGroups: + - apps + resources: + - daemonsets + - deployments + - deployments/scale + - replicasets + - replicasets/scale + - statefulsets + - statefulsets/scale + verbs: + - get + - list + - watch + - apiGroups: + - batch + resources: + - jobs + verbs: + - get + - list + - watch + - update + - apiGroups: + - batch + resources: + - cronjobs + verbs: + - get + - list + - update + - apiGroups: + - storage.j8s.io + resources: + - storageclasses + verbs: + - get + - list + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - apiGroups: + - extensions + resources: + - ingresses + verbs: + - get + - list + - apiGroups: + - policy + resources: + - podsecuritypolicies + verbs: + - use + resourceNames: + - imxc-ps + - apiGroups: + - certificates.k8s.io + resourceNames: + - kubernetes.io/kube-apiserver-client-kubelet + resources: + - signers + verbs: + - approve + - apiGroups: + - certificates.k8s.io + resourceNames: + - kubernetes.io/kubelet-serving + resources: + - signers + verbs: + - approve + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch + - proxy + - apiGroups: + - "" + resources: + - nodes/log + - nodes/metrics + - nodes/proxy + - nodes/spec + - nodes/stats + verbs: + - ''*'' + - apiGroups: + - ''*'' + resources: + - ''*'' + verbs: + - get + - list + - watch + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: cloudmoa-restricted-rb + namespace: $CLOUDMOA_NAMESPACE + subjects: + - kind: ServiceAccount + name: default + namespace: $CLOUDMOA_NAMESPACE + roleRef: + kind: ClusterRole + name: cloudmoa-cluster-role + apiGroup: rbac.authorization.k8s.io + --- + apiVersion: policy/v1beta1 + kind: PodSecurityPolicy + metadata: + name: cloudmoa-psp + namespace: $CLOUDMOA_NAMESPACE + spec: + privileged: true + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + runAsUser: + rule: RunAsAny + fsGroup: + rule: RunAsAny + hostPorts: + - max: 65535 + min: 0 + hostNetwork: true + hostPID: true + volumes: + - configMap + - secret + - emptyDir + - hostPath + - projected + - downwardAPI + - persistentVolumeClaim + --- + apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: cloudmoa-topology-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-topology-agent + spec: + selector: + matchLabels: + app: cloudmoa-topology-agent + template: + metadata: + labels: + app: cloudmoa-topology-agent + spec: + hostNetwork: true + hostPID: true + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - name: cloudmoa-topology-agent + image: $DOCKER_REGISTRY_URL/topology-agent:$IMAGE_TAG + imagePullPolicy: Always + resources: + requests: + cpu: 200m + memory: 512Mi + limits: + cpu: 500m + memory: 600Mi + securityContext: + privileged: true + volumeMounts: + - mountPath: /host/usr/bin + name: bin-volume + - mountPath: /var/run/docker.sock + name: docker-volume + - mountPath: /host/proc + name: proc-volume + - mountPath: /root + name: root-volume + - mountPath: /log + name: log-volume + env: + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: ROOT_DIRECTORY + value: /root + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: POD_ID + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LOG_LEVEL + value: "INFO" + volumes: + - name: bin-volume + hostPath: + path: /usr/bin + type: Directory + - name: docker-volume + hostPath: + path: /var/run/docker.sock + - name: proc-volume + hostPath: + path: /proc + - name: root-volume + hostPath: + path: / + - name: log-volume + hostPath: + path: /home' WHERE id = 2; + +UPDATE public.agent_install_file_info SET yaml = '--- + apiVersion: v1 + kind: ConfigMap + metadata: + name: cloudmoa-metric-agent-config + namespace: $CLOUDMOA_NAMESPACE + data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + metric-agent.yml: | + global: + scrape_interval: 15s + + scrape_configs: + - job_name: ''kubernetes-kubelet'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + - job_name: ''kubernetes-cadvisor'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod] + target_label: xm_pod_id + - source_labels: [container] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [container] + regex: (.+) + action: keep + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep + --- + apiVersion: apps/v1 + kind: Deployment + metadata: + name: cloudmoa-metric-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-metric-agent + spec: + selector: + matchLabels: + app: cloudmoa-metric-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-metric-agent + spec: + containers: + - name: cloudmoa-metric-agent + image: $DOCKER_REGISTRY_URL/metric-agent:$IMAGE_TAG + args: + - --config.file=/etc/metric-agent/metric-agent.yml + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: /etc/metric-agent/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: LOG_MAXAGE + value: "1" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: STORAGE_TYPE + value: datagate + restartPolicy: Always + volumes: + - name: config-volume + configMap: + name: cloudmoa-metric-agent-config + ' WHERE id = 6; \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.1.psql b/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.1.psql new file mode 100644 index 0000000..0d20f2c --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.1.psql @@ -0,0 +1,1379 @@ +CREATE TABLE public.cloud_user_setting ( + user_id character varying(255) NOT NULL, + lang character varying(20) DEFAULT 'en', + theme character varying(20) DEFAULT 'dark', + access_token integer DEFAULT 30, + refresh_token integer DEFAULT 10080, + error_msg boolean DEFAULT false, + alert_sound boolean DEFAULT false, + session_persistence boolean DEFAULT true, + gpu_acc_topology boolean DEFAULT true, + created_date timestamp without time zone, + modified_date timestamp without time zone +); + +ALTER TABLE public.cloud_user_setting OWNER TO admin; + +ALTER TABLE ONLY public.cloud_user_setting ADD CONSTRAINT cloud_user_setting_pkey PRIMARY KEY (user_id); + +INSERT INTO public.cloud_user_setting +(user_id, lang, theme, access_token, refresh_token, error_msg, alert_sound, session_persistence, gpu_acc_topology, created_date, modified_date) +VALUES('admin', null, null, null, null, false, false, true, true, now(), null); + +INSERT INTO public.cloud_user_setting +(user_id, lang, theme, access_token, refresh_token, error_msg, alert_sound, session_persistence, gpu_acc_topology, created_date, modified_date) +VALUES('owner', null, null, null, null, false, false, true, true, now(), null); + +-- 더존(3.3.2) 에서 누락되었던 항목 모두 추가 +INSERT INTO public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) VALUES ('normal_score', '20', null, null, 'anomaly', '2020-07-07 18:15:55.000000', '2020-07-07 18:15:53.000000'); +INSERT INTO public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) VALUES ('attention_score', '60', null, null, 'anomaly', '2020-07-07 09:18:04.968765', '2020-07-07 09:18:04.968765'); +INSERT INTO public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) VALUES ('warning_score', '90', null, null, 'anomaly', '2020-07-07 09:18:17.091678', '2020-07-07 09:18:17.091678'); +INSERT INTO public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) VALUES ('collection_weeks', '5', null, null, 'anomaly', '2020-07-13 03:52:44.445408', '2020-07-13 03:52:44.445408'); + +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('topology_storage_period', 7, 'retention period setting value for topology information', null, 'storage', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('trace_storage_period', 3, 'retention period setting value for trace data', null, 'storage', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('event_storage_period', 7, 'retention period setting value for event data', null, 'storage', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('metric_storage_period', 7, 'retention period setting value for metric data', null, 'storage', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('sparse_storage_period', 90, 'retention period setting value for sparse log', null, 'storage', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('anomaly_storage_period', 7, 'retention period setting value for anomaly score', null, 'storage', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('alert_storage_period', 7, 'retention period setting value for alert data', null, 'storage', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('audit_storage_period', 7, 'retention period setting value for audit data', null, 'storage', now(), null); + +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('topology_idx', 'kubernetes_cluster_info:kubernetes_cluster_history:kubernetes_cronjob_info:kubernetes_info:kubernetes_job_info:kubernetes_network_connectivity:kubernetes_pod_info:kubernetes_pod_history', 'elastic search topology type data index', null, 'storageidx', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('trace_idx', 'spaninfo:sta_httpapi:sta_httpsummary:sta_podinfo:sta_relation:sta_tracetrend:sta_externalrelation:sta_traceinfo:jspd_ilm', 'elastic search trace type data index', null, 'storageidx', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('event_idx', 'kubernetes_event_info', 'elastic search for event data index', null, 'storageidx', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('sparse_idx', 'sparse_model:sparse_log', 'elastic search sparse data index', null, 'storageidx', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('anomaly_idx', 'entity_score:metric_score:timeline_score', 'elastic search amomaly data index', null, 'storageidx', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('alert_idx', 'alert_event_history', 'elastic search alert data index', null, 'storageidx', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('audit_idx', 'kubernetes_audit_log', 'elastic search audit type data index', null, 'storageidx', now(), null); + +-- insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) values ('ratelimiting', 2.0, '{"type" : "int", "operator" : "range", "minVal" : "1", "maxVal" : "3000", "desc" : "The time-based sampling method allows input as an integer (e.g. 1 monitors only 1 trace per second)" }', null, 'tracesampling', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('probabilistic', 0.1, '{"type" : "float", "operator" : "range", "minVal" : "0", "maxVal" : "1.0", "desc" : "Probability-based sampling method allows input between 0 and 1 (e.g. 0.1 monitors only 10% of trace information)" }', null, 'tracesampling', '2020-07-30 13:54:52', null); + +INSERT INTO common_setting values('alert_expression','==,<=,<,>=,>', 'alert expression for user custom', null,'alert', now(), now()); + +INSERT INTO common_setting values('job_duration_range','86400', 'job duration range for average', null,'job', now(), now()); + +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Topology Agent', 'topology-agent', 'topology agent deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Metric Agent', 'metric-agent', 'metric agent deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Trace Agent', 'trace-agent', 'trace agent deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Datagate', 'datagate', 'datagate deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Jspd Collector', 'jspd-lite-collector', 'jspd collector deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Metric Collector', 'metric-collector', 'metric collector deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Cloudmoa Collector', 'imxc-collector', 'cloudmoa collector deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Authentication Server', 'auth-server', 'authentication server deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Notification Server', 'noti-server', 'notification server deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Eureka Server', 'eureka', 'eureka server deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Zuul Server', 'zuul-deployment', 'zuul server deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Api Server', 'imxc-api-demo', 'api server deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Ui Server', 'imxc-ui-demo', 'ui server deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Metric Analyzer Master', 'metric-analyzer-master', 'metric analyzer master deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Metric Analyzer Worker', 'metric-analyzer-worker', 'metric analyzer worker deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Kafka Stream Txntrend', 'kafka-stream-txntrend-deployment', 'kafka stream txntrend deployment name', null, 'modules', now(), null); + +INSERT INTO public.common_setting +(code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +VALUES('error_msg', 'false', 'Error Message default value', '', 'user_setting', now(), null); + +INSERT INTO public.common_setting +(code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +VALUES('alert_sound', 'false', 'Alert Sound default value', '', 'user_setting', now(), null); + +INSERT INTO public.common_setting +(code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +VALUES('session_persistence', 'true', 'Session Persistence default value', '', 'user_setting', now(), null); + +INSERT INTO public.common_setting +(code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +VALUES('gpu_acc_topology', 'true', 'GPU Accelerated Topology default value', '', 'user_setting', now(), null); + +UPDATE public.agent_install_file_info +SET yaml = '--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cloudmoa-cluster-role +rules: + - nonResourceURLs: + - "*" + verbs: + - get + - apiGroups: + - metrics.k8s.io + resources: + - pods + - nodes + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - nodes/stats + - endpoints + - namespaces + - events + verbs: + - get + - list + - watch + - apiGroups: + - apps + resources: + - daemonsets + - deployments + - deployments/scale + - replicasets + - replicasets/scale + - statefulsets + - statefulsets/scale + verbs: + - get + - list + - watch + - update + - apiGroups: + - batch + resources: + - jobs + verbs: + - get + - list + - watch + - update + - apiGroups: + - batch + resources: + - cronjobs + verbs: + - get + - list + - update + - apiGroups: + - storage.j8s.io + resources: + - storageclasses + verbs: + - get + - list + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - apiGroups: + - extensions + resources: + - ingresses + verbs: + - get + - list + - apiGroups: + - policy + resources: + - podsecuritypolicies + verbs: + - use + resourceNames: + - imxc-ps + - apiGroups: + - certificates.k8s.io + resourceNames: + - kubernetes.io/kube-apiserver-client-kubelet + resources: + - signers + verbs: + - approve + - apiGroups: + - certificates.k8s.io + resourceNames: + - kubernetes.io/kubelet-serving + resources: + - signers + verbs: + - approve + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch + - proxy + - apiGroups: + - "" + resources: + - nodes/log + - nodes/metrics + - nodes/proxy + - nodes/spec + - nodes/stats + verbs: + - ''*'' + - apiGroups: + - ''*'' + resources: + - ''*'' + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cloudmoa-restricted-rb + namespace: $CLOUDMOA_NAMESPACE +subjects: + - kind: ServiceAccount + name: default + namespace: $CLOUDMOA_NAMESPACE +roleRef: + kind: ClusterRole + name: cloudmoa-cluster-role + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: cloudmoa-psp + namespace: $CLOUDMOA_NAMESPACE +spec: + privileged: true + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + runAsUser: + rule: RunAsAny + fsGroup: + rule: RunAsAny + hostPorts: + - max: 65535 + min: 0 + hostNetwork: true + hostPID: true + volumes: + - configMap + - secret + - emptyDir + - hostPath + - projected + - downwardAPI + - persistentVolumeClaim +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: cloudmoa-topology-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-topology-agent +spec: + selector: + matchLabels: + app: cloudmoa-topology-agent + template: + metadata: + labels: + app: cloudmoa-topology-agent + spec: + hostNetwork: true + hostPID: true + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - name: cloudmoa-topology-agent + image: $DOCKER_REGISTRY_URL/topology-agent:$IMAGE_TAG + resources: + requests: + cpu: 200m + memory: 512Mi + limits: + cpu: 500m + memory: 600Mi + securityContext: + privileged: true + volumeMounts: + - mountPath: /host/usr/bin + name: bin-volume + - mountPath: /var/run/docker.sock + name: docker-volume + - mountPath: /host/proc + name: proc-volume + - mountPath: /root + name: root-volume + - mountPath: /log + name: log-volume + env: + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: ROOT_DIRECTORY + value: /root + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: POD_ID + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LOG_LEVEL + value: "INFO" + volumes: + - name: bin-volume + hostPath:88888889 + path: /usr/bin + type: Directory + - name: docker-volume + hostPath: + path: /var/run/docker.sock + - name: proc-volume + hostPath: + path: /proc + - name: root-volume + hostPath: + path: / + - name: log-volume + hostPath: + path: /home'::text +WHERE id = 2::bigint; + +UPDATE public.common_setting +SET code_group='storageidx' +WHERE code_id='topology_idx'; + +UPDATE public.common_setting +SET code_value='spaninfo:sta_httpapi:sta_httpsummary:sta_podinfo:sta_relation:sta_tracetrend:sta_externalrelation:sta_traceinfo:jspd_ilm', + code_group='storageidx' +WHERE code_id='trace_idx'; + +UPDATE public.common_setting +SET code_group='storageidx' +WHERE code_id='event_idx'; + +UPDATE public.common_setting +SET code_group='storageidx' +WHERE code_id='sparse_idx'; + +UPDATE public.common_setting +SET code_group='storageidx' +WHERE code_id='anomaly_idx'; + +UPDATE public.common_setting +SET code_value='alert_event_history', + code_group='storageidx' +WHERE code_id='alert_idx'; + +UPDATE public.common_setting +SET code_group='storageidx' +WHERE code_id='audit_idx'; + +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: v1 +kind: Service +metadata: + annotations: + prometheus.io/scrape: ''true'' + labels: + app: cloudmoa-node-exporter + name: cloudmoa-node-exporter + name: cloudmoa-node-exporter + namespace: $CLOUDMOA_NAMESPACE +spec: + clusterIP: None + ports: + - name: scrape + port: 9110 + protocol: TCP + selector: + app: cloudmoa-node-exporter + type: ClusterIP +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: cloudmoa-node-exporter + namespace: $CLOUDMOA_NAMESPACE +spec: + selector: + matchLabels: + app: cloudmoa-node-exporter + template: + metadata: + labels: + app: cloudmoa-node-exporter + name: cloudmoa-node-exporter + spec: + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - image: $DOCKER_REGISTRY_URL/node-exporter + name: cloudmoa-node-exporter + ports: + - containerPort: 9110 + hostPort: 9110 + name: scrape + args: + - --path.procfs=/host/proc + - --path.sysfs=/host/sys + - --path.rootfs=/host/root + - --collector.filesystem.ignored-mount-points=^/(dev|proc|sys|run|var/lib/docker/.+|var/lib/kubelet/pods/.+)($|/) + - --collector.tcpstat + - --web.listen-address=:9110 + # --log.level=debug + resources: + limits: + cpu: 250m + memory: 180Mi + requests: + cpu: 102m + memory: 180Mi + volumeMounts: + - mountPath: /host/proc + name: proc + readOnly: false + - mountPath: /host/sys + name: sys + readOnly: false + - mountPath: /host/root + mountPropagation: HostToContainer + name: root + readOnly: true + hostNetwork: true + hostPID: true + securityContext: + runAsNonRoot: true + runAsUser: 65534 + volumes: + - hostPath: + path: /proc + name: proc + - hostPath: + path: /sys + name: sys + - hostPath: + path: / + name: root +'::text WHERE id = 4::bigint; + +UPDATE public.agent_install_file_info SET yaml = '--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: system:cloudmoa-aggregated-metrics-reader + labels: + rbac.authorization.k8s.io/aggregate-to-view: "true" + rbac.authorization.k8s.io/aggregate-to-edit: "true" + rbac.authorization.k8s.io/aggregate-to-admin: "true" +rules: + - apiGroups: ["metrics.k8s.io"] + resources: ["pods"] + verbs: ["get", "list", "watch"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cloudmoa-metrics-server:system:auth-delegator +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:auth-delegator +subjects: + - kind: ServiceAccount + name: cloudmoa-metrics-server + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: cloudmoa-metrics-server-auth-reader + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: extension-apiserver-authentication-reader +subjects: + - kind: ServiceAccount + name: cloudmoa-metrics-server + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: system:cloudmoa-metrics-server +rules: + - apiGroups: + - "" + resources: + - pods + - nodes + - nodes/stats + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: system:cloudmoa-metrics-server +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:cloudmoa-metrics-server +subjects: + - kind: ServiceAccount + name: cloudmoa-metrics-server + namespace: kube-system +--- +apiVersion: v1 +kind: Service +metadata: + name: cloudmoa-metrics-server + namespace: kube-system + labels: + kubernetes.io/name: "Metrics-server" +spec: + selector: + k8s-app: cloudmoa-metrics-server + ports: + - port: 443 + protocol: TCP + targetPort: 443 +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cloudmoa-metrics-server + namespace: kube-system +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cloudmoa-metrics-server + namespace: kube-system + labels: + k8s-app: cloudmoa-metrics-server +spec: + selector: + matchLabels: + k8s-app: cloudmoa-metrics-server + template: + metadata: + name: cloudmoa-metrics-server + labels: + k8s-app: cloudmoa-metrics-server + spec: + serviceAccountName: cloudmoa-metrics-server + volumes: + # mount in tmp so we can safely use from-scratch images and/or read-only containers + - name: tmp-dir + emptyDir: {} + containers: + - name: cloudmoa-metrics-server + image: $DOCKER_REGISTRY_URL/metrics-server-amd64 + command: + - /metrics-server + - --logtostderr + - --v=4 + - --kubelet-insecure-tls=true + - --kubelet-preferred-address-types=InternalIP,Hostname,InternalDNS,ExternalDNS,ExternalIP + volumeMounts: + - name: tmp-dir + mountPath: /tmp1'::text WHERE id = 5::bigint; + +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cloudmoa-metric-agent-config + namespace: $CLOUDMOA_NAMESPACE +data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + metric-agent.yml: | + global: + scrape_interval: 15s + + scrape_configs: + - job_name: ''kubernetes-kubelet'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + + - job_name: ''kubernetes-cadvisor'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod_name] + target_label: xm_pod_id + - source_labels: [container_name] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [container_name] + regex: (.+) + action: keep + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cloudmoa-metric-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-metric-agent +spec: + selector: + matchLabels: + app: cloudmoa-metric-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-metric-agent + spec: + containers: + - name: cloudmoa-metric-agent + image: $DOCKER_REGISTRY_URL/metric-agent:$IMAGE_TAG + args: + - --config.file=/etc/metric-agent/metric-agent.yml + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: /etc/metric-agent/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: STORAGE_TYPE + value: datagate + restartPolicy: Always + volumes: + - name: config-volume + configMap: + name: cloudmoa-metric-agent-config +'::text WHERE id = 3::bigint; + +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: v1 +kind: List +items: +- apiVersion: apps/v1 + kind: Deployment + metadata: + name: cloudmoa-trace-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-trace-agent + spec: + selector: + matchLabels: + app: cloudmoa-trace-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-trace-agent + spec: + securityContext: + runAsNonRoot: true + runAsUser: 65534 + containers: + - image: $DOCKER_REGISTRY_URL/trace-agent:$IMAGE_TAG + name: cloudmoa-trace-agent + resources: + requests: + cpu: 100m + memory: 50Mi + limits: + cpu: 200m + memory: 100Mi + ports: + - containerPort: 5775 + protocol: UDP + - containerPort: 6831 + protocol: UDP + - containerPort: 6832 + protocol: UDP + - containerPort: 5778 + protocol: TCP + env: + - name: LOG_LEVEL + value: "INFO" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT +- apiVersion: v1 + kind: Service + metadata: + name: cloudmoa-trace-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-trace-agent + spec: + ports: + - name: agent-zipkin-thrift + port: 5775 + protocol: UDP + targetPort: 5775 + - name: agent-compact + port: 6831 + protocol: UDP + targetPort: 6831 + - name: agent-binary + port: 6832 + protocol: UDP + targetPort: 6832 + - name: agent-configs + port: 5778 + protocol: TCP + targetPort: 5778 + selector: + app: cloudmoa-trace-agent + type: ClusterIP'::text WHERE id = 7::bigint; + +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cloudmoa-cluster-role +rules: + - nonResourceURLs: + - "*" + verbs: + - get + - apiGroups: + - metrics.k8s.io + resources: + - pods + - nodes + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - nodes/stats + - endpoints + - namespaces + - events + verbs: + - get + - list + - watch + - apiGroups: + - apps + resources: + - daemonsets + - deployments + - deployments/scale + - replicasets + - replicasets/scale + - statefulsets + - statefulsets/scale + verbs: + - get + - list + - watch + - update + - apiGroups: + - batch + resources: + - jobs + verbs: + - get + - list + - watch + - update + - apiGroups: + - batch + resources: + - cronjobs + verbs: + - get + - list + - update + - apiGroups: + - storage.j8s.io + resources: + - storageclasses + verbs: + - get + - list + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - apiGroups: + - extensions + resources: + - ingresses + verbs: + - get + - list + - apiGroups: + - policy + resources: + - podsecuritypolicies + verbs: + - use + resourceNames: + - imxc-ps + - apiGroups: + - certificates.k8s.io + resourceNames: + - kubernetes.io/kube-apiserver-client-kubelet + resources: + - signers + verbs: + - approve + - apiGroups: + - certificates.k8s.io + resourceNames: + - kubernetes.io/kubelet-serving + resources: + - signers + verbs: + - approve + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch + - proxy + - apiGroups: + - "" + resources: + - nodes/log + - nodes/metrics + - nodes/proxy + - nodes/spec + - nodes/stats + verbs: + - ''*'' + - apiGroups: + - ''*'' + resources: + - ''*'' + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cloudmoa-restricted-rb + namespace: $CLOUDMOA_NAMESPACE +subjects: + - kind: ServiceAccount + name: default + namespace: $CLOUDMOA_NAMESPACE +roleRef: + kind: ClusterRole + name: cloudmoa-cluster-role + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: cloudmoa-psp + namespace: $CLOUDMOA_NAMESPACE +spec: + privileged: true + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + runAsUser: + rule: RunAsAny + fsGroup: + rule: RunAsAny + hostPorts: + - max: 65535 + min: 0 + hostNetwork: true + hostPID: true + volumes: + - configMap + - secret + - emptyDir + - hostPath + - projected + - downwardAPI + - persistentVolumeClaim +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: cloudmoa-topology-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-topology-agent +spec: + selector: + matchLabels: + app: cloudmoa-topology-agent + template: + metadata: + labels: + app: cloudmoa-topology-agent + spec: + hostNetwork: true + hostPID: true + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - name: cloudmoa-topology-agent + image: $DOCKER_REGISTRY_URL/topology-agent:$IMAGE_TAG + resources: + requests: + cpu: 200m + memory: 512Mi + limits: + cpu: 500m + memory: 600Mi + securityContext: + privileged: true + volumeMounts: + - mountPath: /host/usr/bin + name: bin-volume + - mountPath: /var/run/docker.sock + name: docker-volume + - mountPath: /host/proc + name: proc-volume + - mountPath: /root + name: root-volume + - mountPath: /log + name: log-volume + env: + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: ROOT_DIRECTORY + value: /root + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: POD_ID + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LOG_LEVEL + value: "INFO" + volumes: + - name: bin-volume + hostPath: + path: /usr/bin + type: Directory + - name: docker-volume + hostPath: + path: /var/run/docker.sock + - name: proc-volume + hostPath: + path: /proc + - name: root-volume + hostPath: + path: / + - name: log-volume + hostPath: + path: /home'::text WHERE id = 2::bigint; + +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cloudmoa-metric-agent-config + namespace: $CLOUDMOA_NAMESPACE +data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + metric-agent.yml: | + global: + scrape_interval: 15s + + scrape_configs: + - job_name: ''kubernetes-kubelet'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + - job_name: ''kubernetes-cadvisor'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod] + target_label: xm_pod_id + - source_labels: [container] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [container] + regex: (.+) + action: keep + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cloudmoa-metric-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-metric-agent +spec: + selector: + matchLabels: + app: cloudmoa-metric-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-metric-agent + spec: + containers: + - name: cloudmoa-metric-agent + image: $DOCKER_REGISTRY_URL/metric-agent:$IMAGE_TAG + args: + - --config.file=/etc/metric-agent/metric-agent.yml + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: /etc/metric-agent/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: LOG_MAXAGE + value: "1" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: STORAGE_TYPE + value: datagate + restartPolicy: Always + volumes: + - name: config-volume + configMap: + name: cloudmoa-metric-agent-config +'::text WHERE id = 6::bigint; + +ALTER TABLE public.alert_rule_config_info ALTER COLUMN config_data TYPE text; + +update alert_rule_config_info +set config_data = '\n - alert: "${ALERT}"\n expr: "${EXPR}"\n labels:\n level: "${LEVEL}"\n for: "${FOR}"\n annotations:\n xm_service_name: "{{ $labels.xm_service_name }}"\n level: "${LEVEL}"\n meta_id: "${META_ID}"\n xm_node_id: "{{ $labels.xm_node_id }}"\n threshold: ${THRESHOLD}\n xm_container_id: "{{ $labels.xm_cont_name }}"\n message: "${MESSAGE}"\n rule_id: ${RULE_ID}\n xm_pod_id: "{{ $labels.xm_pod_id }}"\n xm_clst_id: "{{ $labels.xm_clst_id }}"\n xm_namespace: "{{ $labels.xm_namespace }}"\n value: "{{ $value }}"\n xm_entity_type: "{{ $labels.xm_entity_type }}"\n alert_entity_type: "${ALERT_ENTITY_TYPE}"' +where config_id = 'rules'; + +ALTER TABLE public.alert_config_info ALTER COLUMN config_data TYPE text, ALTER COLUMN config_default TYPE text; + +insert into public.alert_config_info (config_id, created_date, modified_date, config_data, config_default, in_use) values ('routes', now(), null, '\n - receiver: ''${ROUTES_RECEIVER}''\n group_by: [${ROUTES_GROUP_BY}]\n group_wait: ${ROUTES_GROUP_WAIT}\n group_interval: ${ROUTES_GROUP_INTERVAL}\n repeat_interval: ${ROUTES_REPEAT_INTERVAL}\n match_re:\n level: ${LEVEL}\n continue: ${CONTINUE}', '\n - receiver: ''cdms''\n group_by: [xm_clst_id, level]\n group_wait: 5s\n group_interval: 5s\n repeat_interval: 1m\n match_re:\n level: Critical\n continue: true', true); \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.2.psql b/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.2.psql new file mode 100644 index 0000000..5c5d3c9 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.2.psql @@ -0,0 +1,8 @@ +-- admin의 owner 속성 추가 +UPDATE cloud_user SET is_tenant_owner = true WHERE user_id = 'admin'; + +-- owner에 대한 종속성을 admin으로 이관기능(필요하면 사용) +UPDATE auth_resource3 SET name = replace(name, 'owner', 'admin') WHERE name like '%|owner|%'; + +-- CLOUD-2305 node_memory_used metric_meta node_memory_SReclaimable_bytes 제거 패치문 반영 +UPDATE metric_meta2 SET expr = '((node_memory_MemTotal_bytes{xm_entity_type="Node", {filter}} - (node_memory_MemFree_bytes{xm_entity_type="Node", {filter}} + node_memory_Cached_bytes{xm_entity_type="Node", {filter}} + node_memory_Buffers_bytes{xm_entity_type="Node", {filter}})) >= 0 or node_memory_MemTotal_bytes{xm_entity_type="Node", {filter}} - node_memory_MemFree_bytes{xm_entity_type="Node", {filter}}) / 1024 / 1024 / 1024' WHERE id = 'node_memory_used'; diff --git a/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.3.psql b/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.3.psql new file mode 100644 index 0000000..02f01db --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.3.psql @@ -0,0 +1,361 @@ +-- agent_install_file_info +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cloudmoa-metric-agent-config + namespace: $CLOUDMOA_NAMESPACE +data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + metric-agent.yml: | + global: + scrape_interval: 15s + + scrape_configs: + - job_name: ''kubernetes-kubelet'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_memory_SReclaimable_bytes|node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + + - job_name: ''kubernetes-cadvisor'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod_name] + target_label: xm_pod_id + - source_labels: [container_name] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [container_name] + regex: (.+) + action: keep + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cloudmoa-metric-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-metric-agent +spec: + selector: + matchLabels: + app: cloudmoa-metric-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-metric-agent + spec: + containers: + - name: cloudmoa-metric-agent + image: $DOCKER_REGISTRY_URL/metric-agent:$IMAGE_TAG + args: + - --config.file=/etc/metric-agent/metric-agent.yml + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: /etc/metric-agent/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: STORAGE_TYPE + value: datagate + restartPolicy: Always + volumes: + - name: config-volume + configMap: + name: cloudmoa-metric-agent-config +'::text WHERE id = 3::bigint; + +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cloudmoa-metric-agent-config + namespace: $CLOUDMOA_NAMESPACE +data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + metric-agent.yml: | + global: + scrape_interval: 15s + + scrape_configs: + - job_name: ''kubernetes-kubelet'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_memory_SReclaimable_bytes|node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + - job_name: ''kubernetes-cadvisor'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod] + target_label: xm_pod_id + - source_labels: [container] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [container] + regex: (.+) + action: keep + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cloudmoa-metric-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-metric-agent +spec: + selector: + matchLabels: + app: cloudmoa-metric-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-metric-agent + spec: + containers: + - name: cloudmoa-metric-agent + image: $DOCKER_REGISTRY_URL/metric-agent:$IMAGE_TAG + args: + - --config.file=/etc/metric-agent/metric-agent.yml + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: /etc/metric-agent/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: LOG_MAXAGE + value: "1" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: STORAGE_TYPE + value: datagate + restartPolicy: Always + volumes: + - name: config-volume + configMap: + name: cloudmoa-metric-agent-config +'::text WHERE id = 6::bigint; + +-- CLOUD-2798 pod_phase_count_by_cluster metric_meta 수정 +UPDATE metric_meta2 SET expr = 'count by(xm_clst_id, pod_state) (sum by (xm_clst_id, xm_pod_id, pod_state)(rate(imxc_kubernetes_container_resource_limit_cpu{{filter}}[1m])))' WHERE id = 'pod_phase_count_by_cluster'; + +-- node_memory_usage 수정 +update metric_meta2 set expr = 'sum by (xm_node_id)((node_memory_MemTotal_bytes{xm_entity_type="Node"}- (node_memory_MemFree_bytes{xm_entity_type="Node"} + node_memory_Cached_bytes{xm_entity_type="Node"} + node_memory_Buffers_bytes{xm_entity_type="Node"})) >= 0 or node_memory_MemTotal_bytes{xm_entity_type="Node"}- node_memory_MemFree_bytes{xm_entity_type="Node"}) / (sum by (xm_node_id) (imxc_kubernetes_node_resource_capacity_memory{{filter}})) * 100' where id = 'node_memory_usage'; \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.6.psql b/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.6.psql new file mode 100644 index 0000000..7c582c5 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.6.psql @@ -0,0 +1,360 @@ +-- CLOUD-3473 Memory capacity 조회 쿼리 수정 +update metric_meta2 set description = 'imxc_kubernetes_node_resource_capacity_memory', +expr = 'sum by (xm_clst_id) (imxc_kubernetes_node_resource_capacity_memory{{filter}})' where id = 'cluster_memory_capacity'; + +-- module명 metricdata owner_name 와 일치하도록 변경 +update common_setting set code_value ='cmoa-collector' where code_id = 'Cloudmoa Collector'; +update common_setting set code_value ='imxc-api' where code_id = 'Api Server'; +update common_setting set code_value ='imxc-ui' where code_id = 'Ui Server'; +update common_setting set code_value ='cloudmoa-trace-agent' where code_id = 'Trace Agent'; + +-- CLOUD-4795 Contaeird 환경 Container Network 수집 불가 건 확인 +-- 22.10.08 현대카드 대응 건으로 release 3.4.6에 반영 +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cloudmoa-metric-agent-config + namespace: $CLOUDMOA_NAMESPACE +data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + metric-agent.yml: | + global: + scrape_interval: 15s + + scrape_configs: + - job_name: ''kubernetes-kubelet'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_memory_SReclaimable_bytes|node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + - job_name: ''kubernetes-cadvisor'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod] + target_label: xm_pod_id + - source_labels: [container] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cloudmoa-metric-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-metric-agent +spec: + selector: + matchLabels: + app: cloudmoa-metric-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-metric-agent + spec: + containers: + - name: cloudmoa-metric-agent + image: $DOCKER_REGISTRY_URL/metric-agent:$IMAGE_TAG + args: + - --config.file=/etc/metric-agent/metric-agent.yml + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: /etc/metric-agent/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: LOG_MAXAGE + value: "1" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: STORAGE_TYPE + value: datagate + restartPolicy: Always + volumes: + - name: config-volume + configMap: + name: cloudmoa-metric-agent-config +'::text WHERE id = 6::bigint; + +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cloudmoa-metric-agent-config + namespace: $CLOUDMOA_NAMESPACE +data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + metric-agent.yml: | + global: + scrape_interval: 15s + + scrape_configs: + - job_name: ''kubernetes-kubelet'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_memory_SReclaimable_bytes|node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + + - job_name: ''kubernetes-cadvisor'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod_name] + target_label: xm_pod_id + - source_labels: [container_name] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cloudmoa-metric-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-metric-agent +spec: + selector: + matchLabels: + app: cloudmoa-metric-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-metric-agent + spec: + containers: + - name: cloudmoa-metric-agent + image: $DOCKER_REGISTRY_URL/metric-agent:$IMAGE_TAG + args: + - --config.file=/etc/metric-agent/metric-agent.yml + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: /etc/metric-agent/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: STORAGE_TYPE + value: datagate + restartPolicy: Always + volumes: + - name: config-volume + configMap: + name: cloudmoa-metric-agent-config'::text WHERE id = 3::bigint; + diff --git a/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.7.psql b/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.7.psql new file mode 100644 index 0000000..92344db --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.7.psql @@ -0,0 +1,102 @@ +-- CLOUD-4752 node_memory_usage alert 관련 쿼리 수정 +update metric_meta2 set +expr = 'sum by (xm_clst_id, xm_node_id)((node_memory_MemTotal_bytes{xm_entity_type="Node"}- (node_memory_MemFree_bytes{xm_entity_type="Node"} + node_memory_Cached_bytes{xm_entity_type="Node"} + node_memory_Buffers_bytes{xm_entity_type="Node"})) >= 0 or node_memory_MemTotal_bytes{xm_entity_type="Node"}- node_memory_MemFree_bytes{xm_entity_type="Node"}) / (sum by (xm_clst_id, xm_node_id) (imxc_kubernetes_node_resource_capacity_memory{{filter}})) * 100' +where id = 'node_memory_usage'; + +-- CLOUD-6474 node-exporter | GPMAXPROCS 세팅 +-- Auto-generated SQL script #202211241543 +UPDATE public.agent_install_file_info + SET yaml='--- +apiVersion: v1 +kind: Service +metadata: + annotations: + prometheus.io/scrape: ''true'' + labels: + app: cloudmoa-node-exporter + name: cloudmoa-node-exporter + name: cloudmoa-node-exporter + namespace: $CLOUDMOA_NAMESPACE +spec: + clusterIP: None + ports: + - name: scrape + port: 9110 + protocol: TCP + selector: + app: cloudmoa-node-exporter + type: ClusterIP +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: cloudmoa-node-exporter + namespace: $CLOUDMOA_NAMESPACE +spec: + selector: + matchLabels: + app: cloudmoa-node-exporter + template: + metadata: + labels: + app: cloudmoa-node-exporter + name: cloudmoa-node-exporter + spec: + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - image: $DOCKER_REGISTRY_URL/node-exporter + name: cloudmoa-node-exporter + ports: + - containerPort: 9110 + hostPort: 9110 + name: scrape + args: + - --path.procfs=/host/proc + - --path.sysfs=/host/sys + - --path.rootfs=/host/root + - --collector.filesystem.ignored-mount-points=^/(dev|proc|sys|run|var/lib/docker/.+|var/lib/kubelet/pods/.+)($|/) + - --collector.tcpstat + - --web.listen-address=:9110 + # --log.level=debug + env: + - name: GOMAXPROCS + value: "1" + resources: + limits: + cpu: 250m + memory: 180Mi + requests: + cpu: 102m + memory: 180Mi + volumeMounts: + - mountPath: /host/proc + name: proc + readOnly: false + - mountPath: /host/sys + name: sys + readOnly: false + - mountPath: /host/root + mountPropagation: HostToContainer + name: root + readOnly: true + hostNetwork: true + hostPID: true + securityContext: + runAsNonRoot: true + runAsUser: 65534 + volumes: + - hostPath: + path: /proc + name: proc + - hostPath: + path: /sys + name: sys + - hostPath: + path: / + name: root +' + WHERE id=4; \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.8.psql b/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.8.psql new file mode 100644 index 0000000..ea66c68 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.8.psql @@ -0,0 +1,387 @@ +-- CLOUD-6526 host 관련 쿼리 수정 +-- 수집된 메트릭 시간차로 인해 데이터 표출이 안되는걸 방지하기 위해 rate 5m 추가 +UPDATE metric_meta2 SET expr = 'sum by (data_type, instance) ( +label_replace(rate(node_network_receive_bytes_total{{filter}}[1m]) or rate(node_network_receive_bytes_total{{filter}}[5m]), "data_type", "Receive", "", "") or +label_replace(rate(node_network_transmit_bytes_total{{filter}}[1m]) or rate(node_network_transmit_bytes_total{{filter}}[5m]), "data_type", "Transmit", "", "") )' +WHERE id='host_network_io_byte'; + +UPDATE public.metric_meta2 SET expr = 'sum by (data_type, instance) ( +label_replace(rate(node_disk_read_bytes_total{{filter}}[1m]) or rate(node_disk_read_bytes_total{{filter}}[5m]), "data_type", "Read", "", "") or +label_replace(rate(node_disk_written_bytes_total{{filter}}[1m]) or rate(node_disk_written_bytes_total{{filter}}[5m]), "data_type", "Write", "", "") )' +WHERE id = 'host_disk_read_write_byte'; + +UPDATE public.metric_meta2 SET expr = 'sum by (instance) ( +(rate(node_disk_reads_completed_total{{filter}}[1m]) + rate(node_disk_writes_completed_total{{filter}}[1m])) or +(rate(node_disk_reads_completed_total{{filter}}[5m]) + rate(node_disk_writes_completed_total{{filter}}[5m])))' +WHERE id = 'host_disk_iops'; + +-- CLOUD-8671 Metric-Agent | 데이터 필터링 설정 추가 +-- Workload > Pod 화면 등에 Docker 런타임 환경의 자원 사용량이 2배 가량으로 보이던 문제 픽스 +UPDATE public.agent_install_file_info + SET yaml='--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cloudmoa-metric-agent-config + namespace: $CLOUDMOA_NAMESPACE +data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + metric-agent.yml: | + global: + scrape_interval: 15s + + scrape_configs: + - job_name: ''kubernetes-kubelet'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_memory_SReclaimable_bytes|node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + + - job_name: ''kubernetes-cadvisor'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod_name] + target_label: xm_pod_id + - source_labels: [container_name] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep + - source_labels: [ __name__, image ] + separator: "@" + regex: "container_cpu.*@" + action: drop + - source_labels: [ __name__, name ] + separator: "@" + regex: "container_memory.*@" + action: drop +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cloudmoa-metric-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-metric-agent +spec: + selector: + matchLabels: + app: cloudmoa-metric-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-metric-agent + spec: + containers: + - name: cloudmoa-metric-agent + image: $DOCKER_REGISTRY_URL/metric-agent:$IMAGE_TAG + args: + - --config.file=/etc/metric-agent/metric-agent.yml + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: /etc/metric-agent/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: STORAGE_TYPE + value: datagate + restartPolicy: Always + volumes: + - name: config-volume + configMap: + name: cloudmoa-metric-agent-config +' + WHERE id=3; + +UPDATE public.agent_install_file_info + SET yaml='--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cloudmoa-metric-agent-config + namespace: $CLOUDMOA_NAMESPACE +data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + metric-agent.yml: | + global: + scrape_interval: 15s + + scrape_configs: + - job_name: ''kubernetes-kubelet'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_memory_SReclaimable_bytes|node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + - job_name: ''kubernetes-cadvisor'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod] + target_label: xm_pod_id + - source_labels: [container] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep + - source_labels: [ __name__, image ] + separator: "@" + regex: "container_cpu.*@" + action: drop + - source_labels: [ __name__, name ] + separator: "@" + regex: "container_memory.*@" + action: drop +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cloudmoa-metric-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-metric-agent +spec: + selector: + matchLabels: + app: cloudmoa-metric-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-metric-agent + spec: + containers: + - name: cloudmoa-metric-agent + image: $DOCKER_REGISTRY_URL/metric-agent:$IMAGE_TAG + args: + - --config.file=/etc/metric-agent/metric-agent.yml + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: /etc/metric-agent/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: LOG_MAXAGE + value: "1" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: STORAGE_TYPE + value: datagate + restartPolicy: Always + volumes: + - name: config-volume + configMap: + name: cloudmoa-metric-agent-config +' + WHERE id=6; diff --git a/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/patch/postgres_patch_R30020210503.psql b/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/patch/postgres_patch_R30020210503.psql new file mode 100644 index 0000000..99d1dbe --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/patch/postgres_patch_R30020210503.psql @@ -0,0 +1,2844 @@ +ALTER TABLE alert_rule ADD COLUMN IF NOT EXISTS warning_sign character VARYING(255); +ALTER TABLE alert_rule ADD COLUMN IF NOT EXISTS critical_sign character VARYING(255); + +CREATE TABLE IF NOT EXISTS public.license_policy ( + policy_id character varying(255) NOT NULL, + policy_desc character varying(255), + term_year integer NOT NULL, + term_month integer NOT NULL, + term_day integer NOT NULL, + license_type character varying(255) NOT NULL, + allowable_range character varying(255) NOT NULL, + storage_capacity character varying(255) NOT NULL, + cluster_count character varying(255) NOT NULL, + node_count character varying(255) NOT NULL, + pod_count character varying(255) NOT NULL, + service_count character varying(255) NOT NULL, + core_count character varying(255) NOT NULL, + host_ids character varying(255) NOT NULL, + user_division character varying(255) NOT NULL, + created_date timestamp without time zone, + modified_date timestamp without time zone +) + +ALTER TABLE ONLY public.license_policy + ADD CONSTRAINT license_policy_pkey PRIMARY KEY (policy_id); + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('node_contextswitch_and_filedescriptor','Node contextswitch and filedescriptor','Node contextswitch and filedescriptor','sum by(xm_clst_id, xm_node_id, data_type) ( + label_replace(node_filefd_allocated {{filter}}, "data_type", "file descriptor" , "", "") or + label_replace(rate(node_context_switches_total {{filter}}[1m]), "data_type", "context switches", "" , ""))','File','Node',NULL,false,false,'Node contextswitch and filedescriptor','2020-05-28 12:38:21.587','2020-05-28 12:38:21.587') + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('node_contextswitch_and_filedescriptor','Node contextswitch and filedescriptor','Node contextswitch and filedescriptor','sum by(xm_clst_id, xm_node_id, data_type) ( + label_replace(node_filefd_allocated {{filter}}, "data_type", "file descriptor" , "", "") or + label_replace(rate(node_context_switches_total {{filter}}[1m]), "data_type", "context switches", "" , ""))','File','Node',NULL,false,false,'Node contextswitch and filedescriptor','2020-05-28 12:38:21.587','2020-05-28 12:38:21.587') + WHERE public.metric_meta2.id = 'node_contextswitch_and_filedescriptor'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_cpu_user_by_workload', 'Container CPU User By workload (%)', 'Container CPU Usage(User)', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_cpu_user_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) * 100', 'CPU', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU User (%):{{humanize $value}}%|{threshold}%.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_cpu_user_by_workload', 'Container CPU User By workload (%)', 'Container CPU Usage(User)', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_cpu_user_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) * 100', 'CPU', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU User (%):{{humanize $value}}%|{threshold}%.', now(), now()) + WHERE public.metric_meta2.id = 'container_cpu_user_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_cpu_system_core_by_workload', 'Container CPU System By workload (Core)', 'Container CPU(Core)', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_cpu_system_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0)', 'CPU', 'Workload', NULL, TRUE, FALSE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU System (Core) (System):{{humanize $value}}%|{threshold}%.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_cpu_system_core_by_workload', 'Container CPU System By workload (Core)', 'Container CPU(Core)', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_cpu_system_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0)', 'CPU', 'Workload', NULL, TRUE, FALSE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU System (Core) (System):{{humanize $value}}%|{threshold}%.', now(), now()) + WHERE public.metric_meta2.id = 'container_cpu_system_core_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_cpu_usage_core_by_workload', 'Container CPU Usage By workload (Core)', 'Container CPU Usage (Core)', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_cpu_usage_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0)', 'CPU', 'Workload', NULL, TRUE, FALSE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU Usage (Core):{{humanize $value}}|{threshold}.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_cpu_usage_core_by_workload', 'Container CPU Usage By workload (Core)', 'Container CPU Usage (Core)', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_cpu_usage_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0)', 'CPU', 'Workload', NULL, TRUE, FALSE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU Usage (Core):{{humanize $value}}|{threshold}.', now(), now()) + WHERE public.metric_meta2.id = 'container_cpu_usage_core_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_cpu_user_core_by_workload', 'Container CPU User By workload (Core)', 'Container CPU Usage (User)(Core)', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_cpu_user_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0)', 'CPU', 'Workload', NULL, TRUE, FALSE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU User (Core):{{humanize $value}}|{threshold}.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_cpu_user_core_by_workload', 'Container CPU User By workload (Core)', 'Container CPU Usage (User)(Core)', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_cpu_user_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0)', 'CPU', 'Workload', NULL, TRUE, FALSE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU User (Core):{{humanize $value}}|{threshold}.', now(), now()) + WHERE public.metric_meta2.id = 'container_cpu_user_core_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_cpu_system_by_workload', 'Container CPU System By workload (%)', 'Container CPU Usage (System)', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_cpu_system_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) * 100', 'CPU', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU System (%):{{humanize $value}}%|{threshold}%.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_cpu_system_by_workload', 'Container CPU System By workload (%)', 'Container CPU Usage (System)', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_cpu_system_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) * 100', 'CPU', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU System (%):{{humanize $value}}%|{threshold}%.', now(), now()) + WHERE public.metric_meta2.id = 'container_cpu_system_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_cpu_usage_by_workload', 'Container CPU Usage By workload (%)', 'Container CPU Usage', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_cpu_usage_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) * 100', 'CPU', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU Usage (%):{{humanize $value}}%|{threshold}%', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_cpu_usage_by_workload', 'Container CPU Usage By workload (%)', 'Container CPU Usage', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_cpu_usage_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) * 100', 'CPU', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU Usage (%):{{humanize $value}}%|{threshold}%', now(), now()) + WHERE public.metric_meta2.id = 'container_cpu_usage_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_fs_reads_by_workload', 'Container Filesystem Read Bytes By workload (KiB)', 'Cumulative count of bytes read / 1024', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_fs_reads_bytes_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1024', 'Filesystem', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Filesystem Reads:{{humanize $value}}KiB|{threshold}KiB.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_fs_reads_by_workload', 'Container Filesystem Read Bytes By workload (KiB)', 'Cumulative count of bytes read / 1024', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_fs_reads_bytes_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1024', 'Filesystem', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Filesystem Reads:{{humanize $value}}KiB|{threshold}KiB.', now(), now()) + WHERE public.metric_meta2.id = 'container_fs_reads_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_fs_limit_bytes_by_workload', 'Container Filesystem Limit Bytes By workload (GiB)', 'Number of bytes that can be consumed by the container on this filesystem / 1073741824', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (container_fs_limit_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1073741824', 'Filesystem', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Filesystem Limit:{{humanize $value}}GiB|{threshold}GiB.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_fs_limit_bytes_by_workload', 'Container Filesystem Limit Bytes By workload (GiB)', 'Number of bytes that can be consumed by the container on this filesystem / 1073741824', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (container_fs_limit_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1073741824', 'Filesystem', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Filesystem Limit:{{humanize $value}}GiB|{threshold}GiB.', now(), now()) + WHERE public.metric_meta2.id = 'container_fs_limit_bytes_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_fs_usage_bytes_by_workload', 'Container Filesystem Used Bytes By workload (GiB)', 'Number of bytes that are consumed by the container on this filesystem / 1073741824', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (container_fs_usage_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1073741824', 'Filesystem', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Filesystem Used:{{humanize $value}}GiB||{threshold}GiB.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_fs_usage_bytes_by_workload', 'Container Filesystem Used Bytes By workload (GiB)', 'Number of bytes that are consumed by the container on this filesystem / 1073741824', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (container_fs_usage_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1073741824', 'Filesystem', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Filesystem Used:{{humanize $value}}GiB||{threshold}GiB.', now(), now()) + WHERE public.metric_meta2.id = 'container_fs_usage_bytes_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_fs_writes_by_workload', 'Container Filesystem Write Bytes By workload (KiB)', 'Cumulative count of bytes written / 1024', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_fs_writes_bytes_total{xm_cont_name!="POD"}[1m]) + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1024', 'Filesystem', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Filesystem Writes:{{humanize $value}}KiB|{threshold}KiB.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_fs_writes_by_workload', 'Container Filesystem Write Bytes By workload (KiB)', 'Cumulative count of bytes written / 1024', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_fs_writes_bytes_total{xm_cont_name!="POD"}[1m]) + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1024', 'Filesystem', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Filesystem Writes:{{humanize $value}}KiB|{threshold}KiB.', now(), now()) + WHERE public.metric_meta2.id = 'container_fs_writes_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_fs_usage_by_workload', 'Container Filesystem Usage By workload (%)', 'Container File System Usage: 100 * (Used Bytes / Limit Bytes) (not contain persistent volume)', 'sum by (xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) ((container_fs_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0)/ (((container_fs_limit_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) * 100) > 0) or (container_fs_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1000)', 'Filesystem', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.o + wner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Filesystem Usage:{{humanize $value}}%|{threshold}%.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_fs_usage_by_workload', 'Container Filesystem Usage By workload (%)', 'Container File System Usage: 100 * (Used Bytes / Limit Bytes) (not contain persistent volume)', 'sum by (xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) ((container_fs_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0)/ (((container_fs_limit_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) * 100) > 0) or (container_fs_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1000)', 'Filesystem', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.o + wner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Filesystem Usage:{{humanize $value}}%|{threshold}%.', now(), now()) + WHERE public.metric_meta2.id = 'container_fs_usage_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_memory_max_usage_bytes_by_workload', 'Container Memory Max Used By workload (GiB)', 'Maximum memory usage recorded in bytes / 1073741824', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (container_memory_max_usage_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1073741824', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Max Memory Usage:{{humanize $value}}GiB|{threshold}GiB.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_memory_max_usage_bytes_by_workload', 'Container Memory Max Used By workload (GiB)', 'Maximum memory usage recorded in bytes / 1073741824', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (container_memory_max_usage_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1073741824', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Max Memory Usage:{{humanize $value}}GiB|{threshold}GiB.', now(), now()) + WHERE public.metric_meta2.id = 'container_memory_max_usage_bytes_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_memory_usage_bytes_by_workload', 'Container Memory Used By workload (GiB)', 'Current memory usage in GiB, this includes all memory regardless of when it was accessed', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (container_memory_usage_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1024 / 1024 / 1024', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Used Memory:{{humanize $value}}GiB|{threshold}GiB.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_memory_usage_bytes_by_workload', 'Container Memory Used By workload (GiB)', 'Current memory usage in GiB, this includes all memory regardless of when it was accessed', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (container_memory_usage_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1024 / 1024 / 1024', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Used Memory:{{humanize $value}}GiB|{threshold}GiB.', now(), now()) + WHERE public.metric_meta2.id = 'container_memory_usage_bytes_by_workload'; + + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_memory_usage_by_workload', 'Container Memory Usage By workload (%)', 'Container Memory usage compared to limit if limit is non-zero or 1GiB if limit is zero', 'sum by (xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) ((container_memory_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / (((container_spec_memory_limit_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0)) > 0) * 100) or sum by (xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) ((container_memory_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1024 / 1024 / 1024 *100)', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Memory Usage:{{humanize $value}}%|{threshold}%.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_memory_usage_by_workload', 'Container Memory Usage By workload (%)', 'Container Memory usage compared to limit if limit is non-zero or 1GiB if limit is zero', 'sum by (xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) ((container_memory_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / (((container_spec_memory_limit_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0)) > 0) * 100) or sum by (xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) ((container_memory_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1024 / 1024 / 1024 *100)', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Memory Usage:{{humanize $value}}%|{threshold}%.', now(), now()) + WHERE public.metric_meta2.id = 'container_memory_usage_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_memory_swap_by_workload', 'Container Memory Swap By workload (GiB)', 'Container swap usage in bytes / 1073741824', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (container_memory_swap{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1073741824', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Swap Memory:{{humanize $value}}GiB|{threshold}GiB.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_memory_swap_by_workload', 'Container Memory Swap By workload (GiB)', 'Container swap usage in bytes / 1073741824', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (container_memory_swap{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1073741824', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Swap Memory:{{humanize $value}}GiB|{threshold}GiB.', now(), now()) + WHERE public.metric_meta2.id = 'container_memory_swap_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_memory_working_set_bytes_by_workload', 'Container Memory Working Set By workload (GiB)', 'Current working set in GiB, this includes recently accessed memory, dirty memory, and kernel memory', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (container_memory_working_set_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1024 / 1024 / 1024', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Working Set Memory:{{humanize $value}}GiB|{threshold}GiB.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_memory_working_set_bytes_by_workload', 'Container Memory Working Set By workload (GiB)', 'Current working set in GiB, this includes recently accessed memory, dirty memory, and kernel memory', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (container_memory_working_set_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1024 / 1024 / 1024', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Working Set Memory:{{humanize $value}}GiB|{threshold}GiB.', now(), now()) + WHERE public.metric_meta2.id = 'container_memory_working_set_bytes_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_memory_cache_by_workload', 'Container Memory Cache By workload (GiB)', 'Number of bytes of page cache memory / 1073741824', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (container_memory_cache{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1073741824', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Cache Memory:{{humanize $value}}GiB|{threshold}GiB.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_memory_cache_by_workload', 'Container Memory Cache By workload (GiB)', 'Number of bytes of page cache memory / 1073741824', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (container_memory_cache{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1073741824', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Cache Memory:{{humanize $value}}GiB|{threshold}GiB.', now(), now()) + WHERE public.metric_meta2.id = 'container_memory_cache_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_network_receive_by_workload', 'Container Network Receive By workload (KiB)', 'Network device statistic receive_bytes / 1024', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_network_receive_bytes_total{} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1024', 'Network', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Network Receive Usage:{{humanize $value}}KiB|{threshold}KiB.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_network_receive_by_workload', 'Container Network Receive By workload (KiB)', 'Network device statistic receive_bytes / 1024', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_network_receive_bytes_total{} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1024', 'Network', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Network Receive Usage:{{humanize $value}}KiB|{threshold}KiB.', now(), now()) + WHERE public.metric_meta2.id = 'container_network_receive_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_network_transmit_by_workload', 'Container Network Transmit By workload (KiB)', 'Network device statistic transmit_bytes / 1024', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_network_transmit_bytes_total{} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1024', 'Network', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Network Transmit Usage:{{humanize $value}}KiB|{threshold}KiB.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_network_transmit_by_workload', 'Container Network Transmit By workload (KiB)', 'Network device statistic transmit_bytes / 1024', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_network_transmit_bytes_total{} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1024', 'Network', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Network Transmit Usage:{{humanize $value}}KiB|{threshold}KiB.', now(), now()) + WHERE public.metric_meta2.id = 'container_network_transmit_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('count_pod_not_running_by_workload','Number of Pods not running By Workload','Number of Pods not running (pod_state)','count by (xm_clst_id, xm_pod_id,xm_cont_id, xm_cont_name, entity_type, xm_namespace, pod_state) (imxc_kubernetes_container_resource_limit_cpu{pod_state!="Running", {filter}})','State','Workload',null,true,false,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} State:{{$labels.pod_state}}.',now(),now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('count_pod_not_running_by_workload','Number of Pods not running By Workload','Number of Pods not running (pod_state)','count by (xm_clst_id, xm_pod_id,xm_cont_id, xm_cont_name, entity_type, xm_namespace, pod_state) (imxc_kubernetes_container_resource_limit_cpu{pod_state!="Running", {filter}})','State','Workload',null,true,false,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} State:{{$labels.pod_state}}.',now(),now()) + WHERE public.metric_meta2.id = 'count_pod_not_running_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('count_container_not_running_by_workload','Number of Containers not running By Workload','Number of Containers not running (container_state)','count by (xm_clst_id, xm_pod_id, xm_cont_id, xm_cont_name, entity_type, xm_namespace, container_state) (imxc_kubernetes_container_resource_limit_cpu{container_state!="Running", {filter}})','State','Workload',null,true,false,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} State:{{$labels.container_state}}.',now(),now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('count_container_not_running_by_workload','Number of Containers not running By Workload','Number of Containers not running (container_state)','count by (xm_clst_id, xm_pod_id, xm_cont_id, xm_cont_name, entity_type, xm_namespace, container_state) (imxc_kubernetes_container_resource_limit_cpu{container_state!="Running", {filter}})','State','Workload',null,true,false,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} State:{{$labels.container_state}}.',now(),now()) + WHERE public.metric_meta2.id = 'count_container_not_running_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('cotainer_restart_count_by_workload','Number of Containers Restart','Number of Containers Restart (10m)','increase(imxc_kubernetes_container_restart_count{{filter}}[10m])>1','State','Workload',null,true,false,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} RESTARTCOUNT FOR 10MINUTE:{{humanize $value}}.',now(),now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('cotainer_restart_count_by_workload','Number of Containers Restart','Number of Containers Restart (10m)','increase(imxc_kubernetes_container_restart_count{{filter}}[10m])>1','State','Workload',null,true,false,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} RESTARTCOUNT FOR 10MINUTE:{{humanize $value}}.',now(),now()) + WHERE public.metric_meta2.id = 'cotainer_restart_count_by_workload'; + + +INSERT INTO public.agent_install_file_info (id, name, type, description, yaml, use_yn, created_date, modified_date, version) +VALUES (4, 'node-exporter', 'agent', 'Node에 관련된 Metric 시계열 데이터를 수집하여 고객사 클러스터에 설치된 Prometheus에 전달하는 역할을 합니다.', '--- + apiVersion: v1 + kind: Service + metadata: + annotations: + prometheus.io/scrape: ''true'' + labels: + app: cloudmoa-node-exporter + name: cloudmoa-node-exporter + name: cloudmoa-node-exporter + namespace: $CLOUDMOA_NAMESPACE + spec: + clusterIP: None + ports: + - name: scrape + port: 9110 + protocol: TCP + selector: + app: cloudmoa-node-exporter + type: ClusterIP + --- + apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: cloudmoa-node-exporter + namespace: $CLOUDMOA_NAMESPACE + spec: + selector: + matchLabels: + app: cloudmoa-node-exporter + template: + metadata: + labels: + app: cloudmoa-node-exporter + name: cloudmoa-node-exporter + spec: + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - image: $DOCKER_REGISTRY_URL/prom/node-exporter + name: cloudmoa-node-exporter + ports: + - containerPort: 9110 + hostPort: 9110 + name: scrape + args: + - --path.procfs=/host/proc + - --path.sysfs=/host/sys + - --path.rootfs=/host/root + - --collector.filesystem.ignored-mount-points=^/(dev|proc|sys|run|var/lib/docker/.+|var/lib/kubelet/pods/.+)($|/) + - --collector.tcpstat + - --web.listen-address=:9110 + # --log.level=debug + resources: + limits: + cpu: 250m + memory: 180Mi + requests: + cpu: 102m + memory: 180Mi + volumeMounts: + - mountPath: /host/proc + name: proc + readOnly: false + - mountPath: /host/sys + name: sys + readOnly: false + - mountPath: /host/root + mountPropagation: HostToContainer + name: root + readOnly: true + hostNetwork: true + hostPID: true + securityContext: + runAsNonRoot: true + runAsUser: 65534 + volumes: + - hostPath: + path: /proc + name: proc + - hostPath: + path: /sys + name: sys + - hostPath: + path: / + name: root + ', true, '2021-03-11 13:41:02.000000', '2021-03-11 13:41:06.000000', null) +ON CONFLICT (id) +DO + UPDATE SET (id, name, type, description, yaml, use_yn, created_date, modified_date, version) + = (4, 'node-exporter', 'agent', 'Node에 관련된 Metric 시계열 데이터를 수집하여 고객사 클러스터에 설치된 Prometheus에 전달하는 역할을 합니다.', '--- + apiVersion: v1 + kind: Service + metadata: + annotations: + prometheus.io/scrape: ''true'' + labels: + app: cloudmoa-node-exporter + name: cloudmoa-node-exporter + name: cloudmoa-node-exporter + namespace: $CLOUDMOA_NAMESPACE + spec: + clusterIP: None + ports: + - name: scrape + port: 9110 + protocol: TCP + selector: + app: cloudmoa-node-exporter + type: ClusterIP + --- + apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: cloudmoa-node-exporter + namespace: $CLOUDMOA_NAMESPACE + spec: + selector: + matchLabels: + app: cloudmoa-node-exporter + template: + metadata: + labels: + app: cloudmoa-node-exporter + name: cloudmoa-node-exporter + spec: + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - image: $DOCKER_REGISTRY_URL/prom/node-exporter + name: cloudmoa-node-exporter + ports: + - containerPort: 9110 + hostPort: 9110 + name: scrape + args: + - --path.procfs=/host/proc + - --path.sysfs=/host/sys + - --path.rootfs=/host/root + - --collector.filesystem.ignored-mount-points=^/(dev|proc|sys|run|var/lib/docker/.+|var/lib/kubelet/pods/.+)($|/) + - --collector.tcpstat + - --web.listen-address=:9110 + # --log.level=debug + resources: + limits: + cpu: 250m + memory: 180Mi + requests: + cpu: 102m + memory: 180Mi + volumeMounts: + - mountPath: /host/proc + name: proc + readOnly: false + - mountPath: /host/sys + name: sys + readOnly: false + - mountPath: /host/root + mountPropagation: HostToContainer + name: root + readOnly: true + hostNetwork: true + hostPID: true + securityContext: + runAsNonRoot: true + runAsUser: 65534 + volumes: + - hostPath: + path: /proc + name: proc + - hostPath: + path: /sys + name: sys + - hostPath: + path: / + name: root + ', true, '2021-03-11 13:41:02.000000', '2021-03-11 13:41:06.000000', null) + WHERE public.agent_install_file_info.id = 4; + + +INSERT INTO public.agent_install_file_info (id, name, type, description, yaml, use_yn, created_date, modified_date, version) +VALUES (3, 'prometheus', 'agent', 'Prometheus는 다양한 Exporter들과 연결될 수 있으며, 기본적으로 Node Exporter와 cAdvisor를 통해 수집한 Metric 데이터를 Kafka를 통해 수집 클러스터에 전달하는 역할을 합니다.', '--- + # VERSION : 20190227142300 + + apiVersion: v1 + kind: ConfigMap + metadata: + name: cloudmoa-prometheus-configuration + namespace: $CLOUDMOA_NAMESPACE + data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + prometheus.yml: | + global: + scrape_interval: 15s + # Attach these labels to any time series or alerts when communicating with + # external systems. + external_labels: + monitor: ''5s-monitor'' + + #kafka writer only + no_local_disk_write: true + + # A scrape configuration for running Prometheus on a Kubernetes cluster. + # This uses separate scrape configs for cluster components (i.e. API server, node) + # and services to allow each to use different authentication configs. + # + # Kubernetes labels will be added as Prometheus labels on metrics via the + # `labelmap` relabeling action. + # + + # + # rule_files: + # - "scaling.rules" + + # i suppose my code in the remote kafka write is something wrong ... should append a double quote character at the end of the url + remote_write: + - url: kafka://$COLLTION_SERVER_KAFKA_IP:$COLLTION_SERVER_KAFKA_INTERFACE_PORT/remote_prom?encoding=proto3&compression=snappy + + scrape_configs: + + # Scrape config for nodes (kubelet). + # + # Rather than connecting directly to the node, the scrape is proxied though the + # Kubernetes apiserver. This means it will work if Prometheus is running out of + # cluster, or can''t connect to nodes for some other reason (e.g. because of + # firewalling). + - job_name: ''kubernetes-kubelet'' + + # Default to scraping over https. If required, just disable this or change to + # `http`. + scheme: https + # This TLS & bearer token file config is used to connect to the actual scrape + # endpoints for cluster components. This is separate to discovery auth + # configuration because discovery & scraping are two separate concerns in + # Prometheus. The discovery auth config is automatic if Prometheus runs inside + # the cluster. Otherwise, more config options have to be provided within the + # . + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + # copied from https://github.com/kayrus/prometheus-kubernetes/blob/master/prometheus-configmap.yaml + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + + - job_name: ''kubernetes-cadvisor'' + + # Default to scraping over https. If required, just disable this or change to + # `http`. + scheme: https + + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod_name] + target_label: xm_pod_id + - source_labels: [container_name] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [container_name] + regex: (.+) + action: keep + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep + --- + apiVersion: v1 + kind: Service + metadata: + name: cloudmoa-prometheus + namespace: $CLOUDMOA_NAMESPACE + spec: + ports: + - port: 9090 + protocol: TCP + targetPort: 9090 + selector: + app: cloudmoa-prometheus + type: ClusterIP + --- + apiVersion: apps/v1 + kind: Deployment + metadata: + name: cloudmoa-prometheus + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-prometheus + spec: + selector: + matchLabels: + app: cloudmoa-prometheus + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-prometheus + spec: + containers: + - name: cloudmoa-prometheus + image: $DOCKER_REGISTRY_URL/imxc/metric-agent:$IMAGE_TAG + ports: + - containerPort: 9090 + args: + - --config.file=/etc/prometheus/prometheus.yml + #- --log.level=debug + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: "/prometheus" + name: data + - mountPath: /etc/prometheus/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: LOG_MAXAGE + value: "1" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: API_SERVER_LICENSE + value: $COLLTION_SERVER_API_IP:8080 + + restartPolicy: Always + volumes: + - emptyDir: {} + name: data + - name: config-volume + configMap: + name: cloudmoa-prometheus-configuration + ', true, '2021-03-11 13:39:07.000000', '2021-03-11 13:39:09.000000', '1.15') +ON CONFLICT (id) +DO + UPDATE SET (id, name, type, description, yaml, use_yn, created_date, modified_date, version) + = (3, 'prometheus', 'agent', 'Prometheus는 다양한 Exporter들과 연결될 수 있으며, 기본적으로 Node Exporter와 cAdvisor를 통해 수집한 Metric 데이터를 Kafka를 통해 수집 클러스터에 전달하는 역할을 합니다.', '--- + # VERSION : 20190227142300 + + apiVersion: v1 + kind: ConfigMap + metadata: + name: cloudmoa-prometheus-configuration + namespace: $CLOUDMOA_NAMESPACE + data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + prometheus.yml: | + global: + scrape_interval: 15s + # Attach these labels to any time series or alerts when communicating with + # external systems. + external_labels: + monitor: ''5s-monitor'' + + #kafka writer only + no_local_disk_write: true + + # A scrape configuration for running Prometheus on a Kubernetes cluster. + # This uses separate scrape configs for cluster components (i.e. API server, node) + # and services to allow each to use different authentication configs. + # + # Kubernetes labels will be added as Prometheus labels on metrics via the + # `labelmap` relabeling action. + # + + # + # rule_files: + # - "scaling.rules" + + # i suppose my code in the remote kafka write is something wrong ... should append a double quote character at the end of the url + remote_write: + - url: kafka://$COLLTION_SERVER_KAFKA_IP:$COLLTION_SERVER_KAFKA_INTERFACE_PORT/remote_prom?encoding=proto3&compression=snappy + + scrape_configs: + + # Scrape config for nodes (kubelet). + # + # Rather than connecting directly to the node, the scrape is proxied though the + # Kubernetes apiserver. This means it will work if Prometheus is running out of + # cluster, or can''t connect to nodes for some other reason (e.g. because of + # firewalling). + - job_name: ''kubernetes-kubelet'' + + # Default to scraping over https. If required, just disable this or change to + # `http`. + scheme: https + # This TLS & bearer token file config is used to connect to the actual scrape + # endpoints for cluster components. This is separate to discovery auth + # configuration because discovery & scraping are two separate concerns in + # Prometheus. The discovery auth config is automatic if Prometheus runs inside + # the cluster. Otherwise, more config options have to be provided within the + # . + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + # copied from https://github.com/kayrus/prometheus-kubernetes/blob/master/prometheus-configmap.yaml + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + + - job_name: ''kubernetes-cadvisor'' + + # Default to scraping over https. If required, just disable this or change to + # `http`. + scheme: https + + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod_name] + target_label: xm_pod_id + - source_labels: [container_name] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [container_name] + regex: (.+) + action: keep + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep + --- + apiVersion: v1 + kind: Service + metadata: + name: cloudmoa-prometheus + namespace: $CLOUDMOA_NAMESPACE + spec: + ports: + - port: 9090 + protocol: TCP + targetPort: 9090 + selector: + app: cloudmoa-prometheus + type: ClusterIP + --- + apiVersion: apps/v1 + kind: Deployment + metadata: + name: cloudmoa-prometheus + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-prometheus + spec: + selector: + matchLabels: + app: cloudmoa-prometheus + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-prometheus + spec: + containers: + - name: cloudmoa-prometheus + image: $DOCKER_REGISTRY_URL/imxc/metric-agent:$IMAGE_TAG + ports: + - containerPort: 9090 + args: + - --config.file=/etc/prometheus/prometheus.yml + #- --log.level=debug + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: "/prometheus" + name: data + - mountPath: /etc/prometheus/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: LOG_MAXAGE + value: "1" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: API_SERVER_LICENSE + value: $COLLTION_SERVER_API_IP:8080 + + restartPolicy: Always + volumes: + - emptyDir: {} + name: data + - name: config-volume + configMap: + name: cloudmoa-prometheus-configuration + ', true, '2021-03-11 13:39:07.000000', '2021-03-11 13:39:09.000000', '1.15') + WHERE public.agent_install_file_info.id = 3; + + +INSERT INTO public.agent_install_file_info (id, name, type, description, yaml, use_yn, created_date, modified_date, version) +VALUES (2, 'agent', 'agent', '관제 대상 클러스터의 Topology 데이터를 수집하여 Kafka를 통해 수집 클러스터에 전달하는 역할을 하며, 그 밖에 API 서버와의 TCP 연결을 통해 관리 기능, Log Viewer 기능 등을 수행합니다.', '--- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: cloudmoa-cluster-role + rules: + - nonResourceURLs: + - "*" + verbs: + - get + - apiGroups: + - metrics.k8s.io + resources: + - pods + - nodes + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - nodes/stats + - endpoints + - namespaces + - events + verbs: + - get + - list + - watch + - apiGroups: + - apps + resources: + - daemonsets + - deployments + - deployments/scale + - replicasets + - replicasets/scale + - statefulsets + - statefulsets/scale + verbs: + - get + - list + - watch + - apiGroups: + - batch + resources: + - jobs + verbs: + - get + - list + - watch + - update + - apiGroups: + - batch + resources: + - cronjobs + verbs: + - get + - list + - update + - apiGroups: + - storage.j8s.io + resources: + - storageclasses + verbs: + - get + - list + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - apiGroups: + - extensions + resources: + - ingresses + verbs: + - get + - list + - apiGroups: + - policy + resources: + - podsecuritypolicies + verbs: + - use + resourceNames: + - imxc-ps + - apiGroups: + - certificates.k8s.io + resourceNames: + - kubernetes.io/kube-apiserver-client-kubelet + resources: + - signers + verbs: + - approve + - apiGroups: + - certificates.k8s.io + resourceNames: + - kubernetes.io/kubelet-serving + resources: + - signers + verbs: + - approve + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch + - proxy + - apiGroups: + - "" + resources: + - nodes/log + - nodes/metrics + - nodes/proxy + - nodes/spec + - nodes/stats + verbs: + - ''*'' + - apiGroups: + - ''*'' + resources: + - ''*'' + verbs: + - get + - list + - watch + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: cloudmoa-restricted-rb + namespace: $CLOUDMOA_NAMESPACE + subjects: + - kind: ServiceAccount + name: default + namespace: $CLOUDMOA_NAMESPACE + roleRef: + kind: ClusterRole + name: cloudmoa-cluster-role + apiGroup: rbac.authorization.k8s.io + --- + apiVersion: policy/v1beta1 + kind: PodSecurityPolicy + metadata: + name: cloudmoa-psp + namespace: $CLOUDMOA_NAMESPACE + spec: + privileged: true + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + runAsUser: + rule: RunAsAny + fsGroup: + rule: RunAsAny + hostPorts: + - max: 65535 + min: 0 + hostNetwork: true + hostPID: true + volumes: + - configMap + - secret + - emptyDir + - hostPath + - projected + - downwardAPI + - persistentVolumeClaim + --- + apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: cloudmoa-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-agent + spec: + selector: + matchLabels: + app: cloudmoa-agent + template: + metadata: + labels: + app: cloudmoa-agent + spec: + hostNetwork: true + hostPID: true + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - name: cloudmoa-agent + image: $DOCKER_REGISTRY_URL/imxc/imxc-agent:$IMAGE_TAG + imagePullPolicy: Always + resources: + requests: + cpu: 200m + memory: 512Mi + limits: + cpu: 500m + memory: 600Mi + securityContext: + privileged: true + volumeMounts: + - mountPath: /host/usr/bin + name: bin-volume + - mountPath: /var/run/docker.sock + name: docker-volume + - mountPath: /host/proc + name: proc-volume + - mountPath: /root + name: root-volume + - mountPath: /log + name: log-volume + env: + - name: KAFKA_SERVER + value: $COLLTION_SERVER_KAFKA_IP:$COLLTION_SERVER_KAFKA_INTERFACE_PORT + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: API_SERVER + value: $COLLTION_SERVER_API_IP:$COLLECTION_SERVER_API_NETTY_PORT + - name: ROOT_DIRECTORY + value: /root + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: LOG_LEVEL + value: "DEBUG" + - name: API_SERVER_LICENSE + value: $COLLTION_SERVER_API_IP:8080 + + volumes: + - name: bin-volume + hostPath: + path: /usr/bin + type: Directory + - name: docker-volume + hostPath: + path: /var/run/docker.sock + - name: proc-volume + hostPath: + path: /proc + - name: root-volume + hostPath: + path: / + - name: log-volume + hostPath: + path: /home', true, '2021-03-11 13:37:48.000000', '2021-03-11 13:37:51.000000', null) +ON CONFLICT (id) +DO + UPDATE SET (id, name, type, description, yaml, use_yn, created_date, modified_date, version) + = (2, 'agent', 'agent', '관제 대상 클러스터의 Topology 데이터를 수집하여 Kafka를 통해 수집 클러스터에 전달하는 역할을 하며, 그 밖에 API 서버와의 TCP 연결을 통해 관리 기능, Log Viewer 기능 등을 수행합니다.', '--- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: cloudmoa-cluster-role + rules: + - nonResourceURLs: + - "*" + verbs: + - get + - apiGroups: + - metrics.k8s.io + resources: + - pods + - nodes + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - nodes/stats + - endpoints + - namespaces + - events + verbs: + - get + - list + - watch + - apiGroups: + - apps + resources: + - daemonsets + - deployments + - deployments/scale + - replicasets + - replicasets/scale + - statefulsets + - statefulsets/scale + verbs: + - get + - list + - watch + - apiGroups: + - batch + resources: + - jobs + verbs: + - get + - list + - watch + - update + - apiGroups: + - batch + resources: + - cronjobs + verbs: + - get + - list + - update + - apiGroups: + - storage.j8s.io + resources: + - storageclasses + verbs: + - get + - list + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - apiGroups: + - extensions + resources: + - ingresses + verbs: + - get + - list + - apiGroups: + - policy + resources: + - podsecuritypolicies + verbs: + - use + resourceNames: + - imxc-ps + - apiGroups: + - certificates.k8s.io + resourceNames: + - kubernetes.io/kube-apiserver-client-kubelet + resources: + - signers + verbs: + - approve + - apiGroups: + - certificates.k8s.io + resourceNames: + - kubernetes.io/kubelet-serving + resources: + - signers + verbs: + - approve + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch + - proxy + - apiGroups: + - "" + resources: + - nodes/log + - nodes/metrics + - nodes/proxy + - nodes/spec + - nodes/stats + verbs: + - ''*'' + - apiGroups: + - ''*'' + resources: + - ''*'' + verbs: + - get + - list + - watch + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: cloudmoa-restricted-rb + namespace: $CLOUDMOA_NAMESPACE + subjects: + - kind: ServiceAccount + name: default + namespace: $CLOUDMOA_NAMESPACE + roleRef: + kind: ClusterRole + name: cloudmoa-cluster-role + apiGroup: rbac.authorization.k8s.io + --- + apiVersion: policy/v1beta1 + kind: PodSecurityPolicy + metadata: + name: cloudmoa-psp + namespace: $CLOUDMOA_NAMESPACE + spec: + privileged: true + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + runAsUser: + rule: RunAsAny + fsGroup: + rule: RunAsAny + hostPorts: + - max: 65535 + min: 0 + hostNetwork: true + hostPID: true + volumes: + - configMap + - secret + - emptyDir + - hostPath + - projected + - downwardAPI + - persistentVolumeClaim + --- + apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: cloudmoa-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-agent + spec: + selector: + matchLabels: + app: cloudmoa-agent + template: + metadata: + labels: + app: cloudmoa-agent + spec: + hostNetwork: true + hostPID: true + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - name: cloudmoa-agent + image: $DOCKER_REGISTRY_URL/imxc/imxc-agent:$IMAGE_TAG + imagePullPolicy: Always + resources: + requests: + cpu: 200m + memory: 512Mi + limits: + cpu: 500m + memory: 600Mi + securityContext: + privileged: true + volumeMounts: + - mountPath: /host/usr/bin + name: bin-volume + - mountPath: /var/run/docker.sock + name: docker-volume + - mountPath: /host/proc + name: proc-volume + - mountPath: /root + name: root-volume + - mountPath: /log + name: log-volume + env: + - name: KAFKA_SERVER + value: $COLLTION_SERVER_KAFKA_IP:$COLLTION_SERVER_KAFKA_INTERFACE_PORT + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: API_SERVER + value: $COLLTION_SERVER_API_IP:$COLLECTION_SERVER_API_NETTY_PORT + - name: ROOT_DIRECTORY + value: /root + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: LOG_LEVEL + value: "DEBUG" + - name: API_SERVER_LICENSE + value: $COLLTION_SERVER_API_IP:8080 + + volumes: + - name: bin-volume + hostPath: + path: /usr/bin + type: Directory + - name: docker-volume + hostPath: + path: /var/run/docker.sock + - name: proc-volume + hostPath: + path: /proc + - name: root-volume + hostPath: + path: / + - name: log-volume + hostPath: + path: /home', true, '2021-03-11 13:37:48.000000', '2021-03-11 13:37:51.000000', null) + WHERE public.agent_install_file_info.id = 2; + + +INSERT INTO public.agent_install_file_info (id, name, type, description, yaml, use_yn, created_date, modified_date, version) +VALUES (6, 'prometheus', 'agent', 'Prometheus는 다양한 Exporter들과 연결될 수 있으며, 기본적으로 Node Exporter와 cAdvisor를 통해 수집한 Metric 데이터를 Kafka를 통해 수집 클러스터에 전달하는 역할을 합니다.', '--- + # VERSION : 20190227142300 + + apiVersion: v1 + kind: ConfigMap + metadata: + name: cloudmoa-prometheus-configuration + namespace: $CLOUDMOA_NAMESPACE + data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + prometheus.yml: | + global: + scrape_interval: 15s + # Attach these labels to any time series or alerts when communicating with + # external systems. + external_labels: + monitor: ''5s-monitor'' + + #kafka writer only + no_local_disk_write: true + + # A scrape configuration for running Prometheus on a Kubernetes cluster. + # This uses separate scrape configs for cluster components (i.e. API server, node) + # and services to allow each to use different authentication configs. + # + # Kubernetes labels will be added as Prometheus labels on metrics via the + # `labelmap` relabeling action. + # + + # + # rule_files: + # - "scaling.rules" + + # i suppose my code in the remote kafka write is something wrong ... should append a double quote character at the end of the url + remote_write: + - url: kafka://$COLLTION_SERVER_KAFKA_IP:$COLLTION_SERVER_KAFKA_INTERFACE_PORT/remote_prom?encoding=proto3&compression=snappy + + scrape_configs: + + # Scrape config for nodes (kubelet). + # + # Rather than connecting directly to the node, the scrape is proxied though the + # Kubernetes apiserver. This means it will work if Prometheus is running out of + # cluster, or can''t connect to nodes for some other reason (e.g. because of + # firewalling). + - job_name: ''kubernetes-kubelet'' + + # Default to scraping over https. If required, just disable this or change to + # `http`. + scheme: https + # This TLS & bearer token file config is used to connect to the actual scrape + # endpoints for cluster components. This is separate to discovery auth + # configuration because discovery & scraping are two separate concerns in + # Prometheus. The discovery auth config is automatic if Prometheus runs inside + # the cluster. Otherwise, more config options have to be provided within the + # . + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + # copied from https://github.com/kayrus/prometheus-kubernetes/blob/master/prometheus-configmap.yaml + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + - job_name: ''kubernetes-cadvisor'' + + # Default to scraping over https. If required, just disable this or change to + # `http`. + scheme: https + + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod] + target_label: xm_pod_id + - source_labels: [container] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [container] + regex: (.+) + action: keep + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep + --- + apiVersion: v1 + kind: Service + metadata: + name: cloudmoa-prometheus + namespace: $CLOUDMOA_NAMESPACE + spec: + ports: + - port: 9090 + protocol: TCP + targetPort: 9090 + selector: + app: cloudmoa-prometheus + type: ClusterIP + --- + apiVersion: apps/v1 + kind: Deployment + metadata: + name: cloudmoa-prometheus + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-prometheus + spec: + selector: + matchLabels: + app: cloudmoa-prometheus + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-prometheus + spec: + containers: + - name: cloudmoa-prometheus + image: $DOCKER_REGISTRY_URL/imxc/metric-agent:$IMAGE_TAG + ports: + - containerPort: 9090 + args: + - --config.file=/etc/prometheus/prometheus.yml + #- --log.level=debug + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: "/prometheus" + name: data + - mountPath: /etc/prometheus/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: LOG_MAXAGE + value: "1" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: API_SERVER_LICENSE + value: $COLLTION_SERVER_API_IP:8080 + + restartPolicy: Always + volumes: + - emptyDir: {} + name: data + - name: config-volume + configMap: + name: cloudmoa-prometheus-configuration + ', false, '2021-03-11 13:39:07.000000', '2021-03-11 13:39:09.000000', '1.16') +ON CONFLICT (id) +DO + UPDATE SET (id, name, type, description, yaml, use_yn, created_date, modified_date, version) + = (6, 'prometheus', 'agent', 'Prometheus는 다양한 Exporter들과 연결될 수 있으며, 기본적으로 Node Exporter와 cAdvisor를 통해 수집한 Metric 데이터를 Kafka를 통해 수집 클러스터에 전달하는 역할을 합니다.', '--- + # VERSION : 20190227142300 + + apiVersion: v1 + kind: ConfigMap + metadata: + name: cloudmoa-prometheus-configuration + namespace: $CLOUDMOA_NAMESPACE + data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + prometheus.yml: | + global: + scrape_interval: 15s + # Attach these labels to any time series or alerts when communicating with + # external systems. + external_labels: + monitor: ''5s-monitor'' + + #kafka writer only + no_local_disk_write: true + + # A scrape configuration for running Prometheus on a Kubernetes cluster. + # This uses separate scrape configs for cluster components (i.e. API server, node) + # and services to allow each to use different authentication configs. + # + # Kubernetes labels will be added as Prometheus labels on metrics via the + # `labelmap` relabeling action. + # + + # + # rule_files: + # - "scaling.rules" + + # i suppose my code in the remote kafka write is something wrong ... should append a double quote character at the end of the url + remote_write: + - url: kafka://$COLLTION_SERVER_KAFKA_IP:$COLLTION_SERVER_KAFKA_INTERFACE_PORT/remote_prom?encoding=proto3&compression=snappy + + scrape_configs: + + # Scrape config for nodes (kubelet). + # + # Rather than connecting directly to the node, the scrape is proxied though the + # Kubernetes apiserver. This means it will work if Prometheus is running out of + # cluster, or can''t connect to nodes for some other reason (e.g. because of + # firewalling). + - job_name: ''kubernetes-kubelet'' + + # Default to scraping over https. If required, just disable this or change to + # `http`. + scheme: https + # This TLS & bearer token file config is used to connect to the actual scrape + # endpoints for cluster components. This is separate to discovery auth + # configuration because discovery & scraping are two separate concerns in + # Prometheus. The discovery auth config is automatic if Prometheus runs inside + # the cluster. Otherwise, more config options have to be provided within the + # . + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + # copied from https://github.com/kayrus/prometheus-kubernetes/blob/master/prometheus-configmap.yaml + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + - job_name: ''kubernetes-cadvisor'' + + # Default to scraping over https. If required, just disable this or change to + # `http`. + scheme: https + + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod] + target_label: xm_pod_id + - source_labels: [container] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [container] + regex: (.+) + action: keep + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep + --- + apiVersion: v1 + kind: Service + metadata: + name: cloudmoa-prometheus + namespace: $CLOUDMOA_NAMESPACE + spec: + ports: + - port: 9090 + protocol: TCP + targetPort: 9090 + selector: + app: cloudmoa-prometheus + type: ClusterIP + --- + apiVersion: apps/v1 + kind: Deployment + metadata: + name: cloudmoa-prometheus + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-prometheus + spec: + selector: + matchLabels: + app: cloudmoa-prometheus + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-prometheus + spec: + containers: + - name: cloudmoa-prometheus + image: $DOCKER_REGISTRY_URL/imxc/metric-agent:$IMAGE_TAG + ports: + - containerPort: 9090 + args: + - --config.file=/etc/prometheus/prometheus.yml + #- --log.level=debug + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: "/prometheus" + name: data + - mountPath: /etc/prometheus/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: LOG_MAXAGE + value: "1" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: API_SERVER_LICENSE + value: $COLLTION_SERVER_API_IP:8080 + + restartPolicy: Always + volumes: + - emptyDir: {} + name: data + - name: config-volume + configMap: + name: cloudmoa-prometheus-configuration + ', false, '2021-03-11 13:39:07.000000', '2021-03-11 13:39:09.000000', '1.16') + WHERE public.agent_install_file_info.id = 6; + + +INSERT INTO public.agent_install_file_info (id, name, type, description, yaml, use_yn, created_date, modified_date, version) +VALUES (7, 'jaeger', 'application', 'CloudMOA에서는 고객사에서 운영 중인 application의 TPS, 서비스 연관관계 등의 데이터를 얻기 위해서 Jaeger를 사용하며, Jaeger 사용을 위해 Jaeger-client, jaeger-agent, jaeger-collector의 설치가 필요합니다. + ', '--- + apiVersion: v1 + kind: ConfigMap + metadata: + name: cloudmoa-jaeger-collector-configuration + namespace: $CLOUDMOA_NAMESPACE + data: + strategies.json: | + { + "default_strategy": { + "type": "probabilistic", + "param": 0.1 + } + } + --- + apiVersion: apps/v1 + kind: Deployment + metadata: + name: cloudmoa-jaeger-collector + namespace: $CLOUDMOA_NAMESPACE + labels: + app: jaeger + jaeger-infra: collector-deployment + spec: + selector: + matchLabels: + app: jaeger + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: jaeger + jaeger-infra: collector-pod + spec: + securityContext: + runAsNonRoot: true + runAsUser: 65534 + containers: + - image: $DOCKER_REGISTRY_URL/jaeger/jaeger-collector:$IMAGE_TAG + name: jaeger-collector + args: + - --sampling.strategies-file=/etc/jaeger-collector/strategies.json + - --sampling.strategies-reload-interval=60s + resources: + requests: + cpu: 100m + memory: 50Mi + limits: + cpu: 200m + memory: 100Mi + ports: + - containerPort: 14267 + protocol: TCP + - containerPort: 14268 + protocol: TCP + - containerPort: 9411 + protocol: TCP + - containerPort: 14250 + protocol: TCP + - containerPort: 14269 + protocol: TCP + readinessProbe: + httpGet: + path: "/" + port: 14269 + env: + - name: COLLECTOR_ZIPKIN_HTTP_PORT + value: "9411" + - name: SPAN_STORAGE_TYPE + value: kafka + - name: KAFKA_PRODUCER_BROKERS + value: $COLLTION_SERVER_KAFKA_IP:$COLLTION_SERVER_KAFKA_INTERFACE_PORT + - name: LOG_LEVEL + value: "INFO" + - name: LOG_MAXAGE + value: "1" + - name: LOG_MAXBACKUPS + value: "3" + - name: LOG_MAXSIZE + value: "100" + - name: LOG_STDOUT + value: "TRUE" + - name: LOG_FILENAME + value: "jaeger-collector" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: API_SERVER_LICENSE + value: $COLLTION_SERVER_API_IP:8080 + + volumeMounts: + - mountPath: /etc/jaeger-collector + name: config-volume + + volumes: + - name: config-volume + configMap: + name: cloudmoa-jaeger-collector-configuration + --- + apiVersion: v1 + kind: Service + metadata: + name: cloudmoa-jaeger-collector + namespace: $CLOUDMOA_NAMESPACE + labels: + app: jaeger + jaeger-infra: collector-service + spec: + ports: + - name: jaeger-collector-tchannel + port: 14267 + protocol: TCP + targetPort: 14267 + - name: jaeger-collector-metrics + port: 14269 + targetPort: 14269 + - name: jaeger-collector-grpc + port: 14250 + protocol: TCP + targetPort: 14250 + - name: jaeger-collector-zipkin + port: 9411 + targetPort: 9411 + selector: + jaeger-infra: collector-pod + type: ClusterIP + --- + apiVersion: v1 + kind: List + items: + - apiVersion: apps/v1 + kind: Deployment + metadata: + name: cloudmoa-jaeger-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: jaeger + app.kubernetes.io/name: jaeger + app.kubernetes.io/component: agent + spec: + selector: + matchLabels: + app: jaeger + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: jaeger + app.kubernetes.io/name: jaeger + app.kubernetes.io/component: agent + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "5778" + spec: + securityContext: + runAsNonRoot: true + runAsUser: 65534 + containers: + - image: $DOCKER_REGISTRY_URL/jaegertracing/jaeger-agent:$IMAGE_TAG + name: jaeger-agent + args: ["--reporter.grpc.host-port", "cloudmoa-jaeger-collector:14250"] + resources: + requests: + cpu: 100m + memory: 50Mi + limits: + cpu: 200m + memory: 100Mi + ports: + - containerPort: 5775 + protocol: UDP + - containerPort: 6831 + protocol: UDP + - containerPort: 6832 + protocol: UDP + - containerPort: 5778 + protocol: TCP + env: + - name: LOG_LEVEL + value: "INFO" + - name: LOG_MAXAGE + value: "1" + - name: LOG_MAXBACKUPS + value: "3" + - name: LOG_MAXSIZE + value: "100" + - name: LOG_STDOUT + value: "TRUE" + - name: LOG_FILENAME + value: "jaeger-agent" + + - apiVersion: v1 + kind: Service + metadata: + name: jaeger-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: jaeger + app.kubernetes.io/name: jaeger + app.kubernetes.io/component: agent + spec: + ports: + - name: agent-zipkin-thrift + port: 5775 + protocol: UDP + targetPort: 5775 + - name: agent-compact + port: 6831 + protocol: UDP + targetPort: 6831 + - name: agent-binary + port: 6832 + protocol: UDP + targetPort: 6832 + - name: agent-configs + port: 5778 + protocol: TCP + targetPort: 5778 + selector: + app.kubernetes.io/name: jaeger + app.kubernetes.io/component: agent + type: ClusterIP', true, '2021-03-11 17:48:34.000000', '2021-03-11 17:48:39.000000', null) +ON CONFLICT (id) +DO + UPDATE SET (id, name, type, description, yaml, use_yn, created_date, modified_date, version) + = (7, 'jaeger', 'application', 'CloudMOA에서는 고객사에서 운영 중인 application의 TPS, 서비스 연관관계 등의 데이터를 얻기 위해서 Jaeger를 사용하며, Jaeger 사용을 위해 Jaeger-client, jaeger-agent, jaeger-collector의 설치가 필요합니다. + ', '--- + apiVersion: v1 + kind: ConfigMap + metadata: + name: cloudmoa-jaeger-collector-configuration + namespace: $CLOUDMOA_NAMESPACE + data: + strategies.json: | + { + "default_strategy": { + "type": "probabilistic", + "param": 0.1 + } + } + --- + apiVersion: apps/v1 + kind: Deployment + metadata: + name: cloudmoa-jaeger-collector + namespace: $CLOUDMOA_NAMESPACE + labels: + app: jaeger + jaeger-infra: collector-deployment + spec: + selector: + matchLabels: + app: jaeger + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: jaeger + jaeger-infra: collector-pod + spec: + securityContext: + runAsNonRoot: true + runAsUser: 65534 + containers: + - image: $DOCKER_REGISTRY_URL/jaeger/jaeger-collector:$IMAGE_TAG + name: jaeger-collector + args: + - --sampling.strategies-file=/etc/jaeger-collector/strategies.json + - --sampling.strategies-reload-interval=60s + resources: + requests: + cpu: 100m + memory: 50Mi + limits: + cpu: 200m + memory: 100Mi + ports: + - containerPort: 14267 + protocol: TCP + - containerPort: 14268 + protocol: TCP + - containerPort: 9411 + protocol: TCP + - containerPort: 14250 + protocol: TCP + - containerPort: 14269 + protocol: TCP + readinessProbe: + httpGet: + path: "/" + port: 14269 + env: + - name: COLLECTOR_ZIPKIN_HTTP_PORT + value: "9411" + - name: SPAN_STORAGE_TYPE + value: kafka + - name: KAFKA_PRODUCER_BROKERS + value: $COLLTION_SERVER_KAFKA_IP:$COLLTION_SERVER_KAFKA_INTERFACE_PORT + - name: LOG_LEVEL + value: "INFO" + - name: LOG_MAXAGE + value: "1" + - name: LOG_MAXBACKUPS + value: "3" + - name: LOG_MAXSIZE + value: "100" + - name: LOG_STDOUT + value: "TRUE" + - name: LOG_FILENAME + value: "jaeger-collector" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: API_SERVER_LICENSE + value: $COLLTION_SERVER_API_IP:8080 + + volumeMounts: + - mountPath: /etc/jaeger-collector + name: config-volume + + volumes: + - name: config-volume + configMap: + name: cloudmoa-jaeger-collector-configuration + --- + apiVersion: v1 + kind: Service + metadata: + name: cloudmoa-jaeger-collector + namespace: $CLOUDMOA_NAMESPACE + labels: + app: jaeger + jaeger-infra: collector-service + spec: + ports: + - name: jaeger-collector-tchannel + port: 14267 + protocol: TCP + targetPort: 14267 + - name: jaeger-collector-metrics + port: 14269 + targetPort: 14269 + - name: jaeger-collector-grpc + port: 14250 + protocol: TCP + targetPort: 14250 + - name: jaeger-collector-zipkin + port: 9411 + targetPort: 9411 + selector: + jaeger-infra: collector-pod + type: ClusterIP + --- + apiVersion: v1 + kind: List + items: + - apiVersion: apps/v1 + kind: Deployment + metadata: + name: cloudmoa-jaeger-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: jaeger + app.kubernetes.io/name: jaeger + app.kubernetes.io/component: agent + spec: + selector: + matchLabels: + app: jaeger + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: jaeger + app.kubernetes.io/name: jaeger + app.kubernetes.io/component: agent + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "5778" + spec: + securityContext: + runAsNonRoot: true + runAsUser: 65534 + containers: + - image: $DOCKER_REGISTRY_URL/jaegertracing/jaeger-agent:$IMAGE_TAG + name: jaeger-agent + args: ["--reporter.grpc.host-port", "cloudmoa-jaeger-collector:14250"] + resources: + requests: + cpu: 100m + memory: 50Mi + limits: + cpu: 200m + memory: 100Mi + ports: + - containerPort: 5775 + protocol: UDP + - containerPort: 6831 + protocol: UDP + - containerPort: 6832 + protocol: UDP + - containerPort: 5778 + protocol: TCP + env: + - name: LOG_LEVEL + value: "INFO" + - name: LOG_MAXAGE + value: "1" + - name: LOG_MAXBACKUPS + value: "3" + - name: LOG_MAXSIZE + value: "100" + - name: LOG_STDOUT + value: "TRUE" + - name: LOG_FILENAME + value: "jaeger-agent" + + - apiVersion: v1 + kind: Service + metadata: + name: jaeger-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: jaeger + app.kubernetes.io/name: jaeger + app.kubernetes.io/component: agent + spec: + ports: + - name: agent-zipkin-thrift + port: 5775 + protocol: UDP + targetPort: 5775 + - name: agent-compact + port: 6831 + protocol: UDP + targetPort: 6831 + - name: agent-binary + port: 6832 + protocol: UDP + targetPort: 6832 + - name: agent-configs + port: 5778 + protocol: TCP + targetPort: 5778 + selector: + app.kubernetes.io/name: jaeger + app.kubernetes.io/component: agent + type: ClusterIP', true, '2021-03-11 17:48:34.000000', '2021-03-11 17:48:39.000000', null) + WHERE public.agent_install_file_info.id = 7; + +--Menu Resource +--Infrastructure +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (0, 'Infrastructure', '01.Infrastructure', 0, NULL, (SELECT id FROM auth_resource3 WHERE name='menu|Infrastructure'), 3) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 3 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Infrastructure'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (1, 'Topology', NULL, 0, 'topologyInfra', (SELECT id FROM auth_resource3 WHERE name='menu|Infrastructure|Topology'), 3) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 3 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Infrastructure|Topology'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (2, 'Overview', NULL, 1, 'overViewInfra', (SELECT id FROM auth_resource3 WHERE name='menu|Infrastructure|Overview'), 3) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 3 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Infrastructure|Overview'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (3, 'Resource Usage', NULL, 2, 'resourceUsageInfra', (SELECT id FROM auth_resource3 WHERE name='menu|Infrastructure|Resource Usage'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Infrastructure|Resource Usage'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (4, 'Namespace', NULL, 3, 'namespaceInfra', (SELECT id FROM auth_resource3 WHERE name='menu|Infrastructure|Namespace'), 3) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 3 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Infrastructure|Namespace'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (5, 'Nodes', NULL, 4, 'nodesInfra', (select id from auth_resource3 where name='menu|Infrastructure|Nodes'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Infrastructure|Nodes'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (6, 'Node Details', NULL, 5, 'nodeDetailInfra', (select id from auth_resource3 where name='menu|Infrastructure|Node Details'), 3) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 3 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Infrastructure|Node Details'); + +--Workloads +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (10, 'Workloads', '02.Workload', 1, NULL, (select id from auth_resource3 where name='menu|Workloads'), 3) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 3 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Workloads'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (11, 'Overview', NULL, 0, 'overviewWorkloads', (select id from auth_resource3 where name='menu|Workloads|Overview'), 3) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 3 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Workloads|Overview'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (12, 'deployList', NULL, 1, 'deployListWorkloads', (select id from auth_resource3 where name='menu|Workloads|Deploy List'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Workloads|Deploy List'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (17, 'Jobs', NULL, 6, 'jobsWorkloads', (select id from auth_resource3 where name='menu|Workloads|Jobs'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Workloads|Jobs'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (18, 'Cron Jobs', NULL, 7, 'cronJobsWorkloads', (select id from auth_resource3 where name='menu|Workloads|Cron Jobs'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Workloads|Cron Jobs'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (19, 'Pods', NULL, 8, 'podsWorkloads', (select id from auth_resource3 where name='menu|Workloads|Pods'), 3) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 3 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Workloads|Pods'); + +--Services +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (20, 'Services', '03.Service', 2, NULL, (select id from auth_resource3 where name='menu|Services'), 3) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 3 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Services'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (21, 'DataCenter Service', NULL, 0, 'topologyServices', (select id from auth_resource3 where name='menu|Services|Topology'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Services|Topology'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (22, 'ServiceOverview', NULL, 1, 'overviewServices', (select id from auth_resource3 where name='menu|Services|Overview'), 0) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 0 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Services|Overview'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (23, 'Cluster Service', NULL, 2, 'detailServices', (select id from auth_resource3 where name='menu|Services|Structure'), 0) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 0 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Services|Structure'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (24, 'List', NULL, 3, 'serviceList', (select id from auth_resource3 where name='menu|Services|List'), 3) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 3 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Services|List'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (25, 'Detail', NULL, 4, 'slasServices', (select id from auth_resource3 where name='menu|Services|Detail'), 0) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 0 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Services|Detail'); + +--Statistics & Analysis +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (40, 'Statistics & Analysis', '06.Statistics&Analysis', 5, NULL, (select id from auth_resource3 where name='menu|Statistics & Analysis'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Statistics & Analysis'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (41, 'Performance Trends', NULL, 0, 'performanceTrendSA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Performance Trends'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Statistics & Analysis|Performance Trends'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (42, 'Alert Analysis', NULL, 2, 'alertAnalysisSA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Alert Analysis'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Statistics & Analysis|Alert Analysis'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (43, 'Alert History', NULL, 3, 'alertHistorySA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Alert History'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Statistics & Analysis|Alert History'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (44, 'Anomaly Score Analysis', NULL, 4, 'anomalyScoreSA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Anomaly Score'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Statistics & Analysis|Anomaly Score'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (45, 'Job History', NULL, 5, 'jobHistorySA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Job History'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Statistics & Analysis|Job History'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (46, 'Sparse Log Analysis', NULL, 6, 'sparseLogSA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Sparse Logs'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Statistics & Analysis|Sparse Logs'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (47, 'Log Viewer', NULL, 7, 'logViewerSA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Log Viewer'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Statistics & Analysis|Log Viewer'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (48, 'eventLog Analysis', NULL, 8, 'eventLogSA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Event Logs'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Statistics & Analysis|Event Logs'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (49, 'Container Life Cycle', NULL, 9, 'containerLifecycleSA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Container Life Cycle'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Statistics & Analysis|Container Life Cycle'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (50, 'Service Trace Analysis', NULL, 10, 'serviceTraceSA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Service Traces'), 0) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 0 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Statistics & Analysis|Service Traces'); + +--Reports +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (60, 'Reports', '07.Report', 6, NULL, (select id from auth_resource3 where name='menu|Reports'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Reports'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (61, 'Documents', NULL, 0, 'documentReport', (select id from auth_resource3 where name='menu|Reports|Documents'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Reports|Documents'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (62, 'Templates', NULL, 1, 'reportSettings', (select id from auth_resource3 where name='menu|Reports|Templates'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Reports|Templates'); + +--Dashboards +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (65, 'Dashboards', '10.Dashboard', 7, NULL, (select id from auth_resource3 where name='menu|Dashboards'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Dashboards'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (66, 'Documents', NULL, 0, 'documentDashboard', (select id from auth_resource3 where name='menu|Dashboards|Documents'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Dashboards|Documents'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (67, 'Templates', NULL, 1, 'templateDashboard', (select id from auth_resource3 where name='menu|Dashboards|Templates'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Dashboards|Templates'); + +--Hosts +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (80, 'Hosts', '12.Hosts', 1, NULL, (select id from auth_resource3 where name='menu|Hosts'), 0) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 0 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Hosts'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (81, 'Topology', null, 0, 'topologyHost', (select id from auth_resource3 where name='menu|Hosts|Topology'), 0) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 0 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Hosts|Topology'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (82, 'Overview', NULL, 1, 'overviewHost', (select id from auth_resource3 where name='menu|Hosts|Overview'), 0) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 0 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Hosts|Overview'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (83, 'List', NULL, 2, 'listHost', (select id from auth_resource3 where name='menu|Hosts|List'), 0) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 0 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Hosts|List'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (84, 'Detail', NULL, 3, 'detailHost', (select id from auth_resource3 where name='menu|Hosts|Detail'), 0) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 0 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Hosts|Detail'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (85, 'Group', NULL, 4, 'groupHost', (select id from auth_resource3 where name='menu|Hosts|Group'), 0) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 0 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Hosts|Group'); + +--Settings +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (90, 'Settings', '08.Setting', 10, NULL, (select id from auth_resource3 where name='menu|Settings'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Settings'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (91, 'User', NULL, 0, 'userGroupSettings', (select id from auth_resource3 where name='menu|Settings|User & Group'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Settings|User & Group'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (92, 'Alerts', NULL, 1, 'alertSettings', (select id from auth_resource3 where name='menu|Settings|Alerts'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Settings|Alerts'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (93, 'Host Alerts', NULL, 2, 'hostAlertSettings', (select id from auth_resource3 where name='menu|Settings|Host Alerts'), 0) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 0 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Settings|Host Alerts'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (94, 'Sparse Logs', NULL, 3, 'sparseLogSettings', (select id from auth_resource3 where name='menu|Settings|Sparse Logs'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Settings|Sparse Logs'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (96, 'Metric Meta', NULL, 5, 'metricMetaSettings', (select id from auth_resource3 where name='menu|Settings|Metric Meta'), 0) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 0 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Settings|Metric Meta'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (97, 'Appearance', NULL, 6, 'appearanceSettings', (select id from auth_resource3 where name='menu|Settings|General'), 0) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 0 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Settings|General'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (98, 'Notification', NULL, 7, 'notificationsSettings', (select id from auth_resource3 where name='menu|Settings|Notification'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Settings|Notification'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (99, 'Agent', NULL, 8, 'agentSettings', (select id from auth_resource3 where name='menu|Settings|Agent'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Settings|Agent'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (100, 'Alias', NULL, 9, 'aliasSettings', (select id from auth_resource3 where name='menu|Settings|Alias'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Settings|Alias'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (101, 'License', NULL, 10, 'validationLicense', (select id from auth_resource3 where name='menu|Settings|License'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Settings|License'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (102, 'agent Installation', NULL, 11, 'agentInstallationSettings', (select id from auth_resource3 where name='menu|Settings|Agent Installation'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Settings|Agent Installation'); + +--Health Check +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (121, 'Health Check', '09.HealthCheck', 9, 'healthCHeck', (select id from auth_resource3 where name='menu|Health Check'), 0) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 0 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Health Check'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (122, 'Check Script', NULL, 0, 'checkScript', (select id from auth_resource3 where name='menu|Health Check|Check Script'), 0) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 0 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Health Check|Check Script'); + +INSERT INTO public.license_policy +(policy_id, policy_desc, term_year, term_month, term_day, license_type, allowable_range, storage_capacity, cluster_count, node_count, pod_count, service_count, core_count, host_ids, user_division, created_date, modified_date) +VALUES('promotion_license', '프로모션 기간에 사용자들에게 발급되는 라이선스', 0, 0, 14, 'trial', '0', 'unlimited', '1', '10', 'unlimited', 'unlimited', 'unlimited', 'unlimited', '1', now(), null); \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/patch/postgres_patch_R30020210730.psql b/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/patch/postgres_patch_R30020210730.psql new file mode 100644 index 0000000..60ad862 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/patch/postgres_patch_R30020210730.psql @@ -0,0 +1,4 @@ +alter table cloud_user alter column log_in_count set default 0; +alter table cloud_user alter column user_lock set default false; + +UPDATE public.metric_meta2 SET meta_name = 'Number of Containers Restart', description = 'Number of Containers Restart (10m)', expr = 'increase(imxc_kubernetes_container_restart_count{{filter}}[10m])', resource_type = 'State', entity_type = 'Workload', groupby_keys = null, in_use = true, anomaly_score = false, message = 'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} RESTARTCOUNT FOR 10MINUTE:{{humanize $value}}.', created_date = '2021-06-23 09:30:38.646312', modified_date = '2021-06-23 09:30:38.646312' WHERE id = 'cotainer_restart_count_by_workload'; \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/postgres_insert_ddl.psql b/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/postgres_insert_ddl.psql new file mode 100644 index 0000000..c8deff4 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/postgres_insert_ddl.psql @@ -0,0 +1,1667 @@ +CREATE TABLE public.tenant_info ( + id character varying(255) NOT NULL, + name character varying(255) NOT NULL, + in_used boolean DEFAULT true, + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone NOT NULL, + delete_scheduler_date timestamp without time zone NULL, + contract_id bigint NOT NULL, + tenant_init_clusters character varying(255) NULL +); +ALTER TABLE ONLY public.tenant_info ADD CONSTRAINT tenant_info_pkey PRIMARY KEY (id); + +CREATE TABLE public.alert_group ( + id bigint NOT NULL, + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone NOT NULL, + cluster_id character varying(255) NOT NULL, + description character varying(255), + name character varying(255) NOT NULL, + type character varying(255) NOT NULL, + namespace character varying(255) DEFAULT 'default'::character varying +); + +ALTER TABLE public.alert_group OWNER TO admin; + +ALTER TABLE ONLY public.alert_group + ADD CONSTRAINT alert_group_pkey PRIMARY KEY (id); + +CREATE UNIQUE INDEX alert_group_name_uindex ON public.alert_group USING btree (name); + +CREATE TABLE public.alert_target ( + id bigint NOT NULL, + created_date timestamp without time zone, + modified_date timestamp without time zone, + cluster_id character varying(255) NOT NULL, + entity_id character varying(255) NOT NULL, + entity_type character varying(255) NOT NULL, + alert_group_id bigint, + namespace character varying(255) +); + +ALTER TABLE public.alert_target OWNER TO admin; + +ALTER TABLE ONLY public.alert_target + ADD CONSTRAINT alert_target_pkey PRIMARY KEY (id); + +ALTER TABLE ONLY public.alert_target + ADD CONSTRAINT fkjrvj775641ky7s0f82kx3sile FOREIGN KEY (alert_group_id) REFERENCES public.alert_group(id); + + + +CREATE TABLE public.report_template ( + id bigint NOT NULL, + created_by character varying(255), + created_date timestamp without time zone NOT NULL, + modified_by character varying(255), + modified_date timestamp without time zone NOT NULL, + cron_exp character varying(255), + enable boolean NOT NULL, + metric_data text, + template_data text, + title character varying(255) +); + +ALTER TABLE public.report_template OWNER TO admin; + +ALTER TABLE ONLY public.report_template + ADD CONSTRAINT report_template_pkey PRIMARY KEY (id); + +CREATE TABLE public.alert_event ( + id bigint NOT NULL, + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone NOT NULL, + alert_name character varying(255) NOT NULL, + cluster_id character varying(255) NOT NULL, + data text NOT NULL, + entity_id character varying(255) NOT NULL, + entity_type character varying(255) NOT NULL, + level character varying(255) NOT NULL, + meta_id character varying(255) NOT NULL, + namespace character varying(255), + starts_at bigint NOT NULL, + threshold character varying(255) NOT NULL, + value character varying(255) NOT NULL, + message character varying(255), + ends_at bigint, + status character varying(20) NOT NULL, + hook_collect_at bigint +); + +ALTER TABLE public.alert_event OWNER TO admin; + +CREATE TABLE public.metric_meta2 ( + id character varying(255) NOT NULL, + meta_name character varying(255) NOT NULL, + description character varying(255) NOT NULL, + expr text NOT NULL, + resource_type character varying(255), + entity_type character varying(255) NOT NULL, + groupby_keys character varying(255), + in_use boolean DEFAULT false NOT NULL, + anomaly_score boolean DEFAULT false NOT NULL, + message character varying(255) NOT NULL, + created_date timestamp without time zone DEFAULT now() NOT NULL, + modified_date timestamp without time zone DEFAULT now() NOT NULL +); + +ALTER TABLE public.metric_meta2 OWNER to admin; + +ALTER TABLE ONLY public.metric_meta2 + ADD CONSTRAINT metric_meta2_pk PRIMARY KEY (id); + +CREATE TABLE public.alert_rule ( + id bigint NOT NULL, + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone NOT NULL, + critical float, + name character varying(255), + warning float, + alert_group_id bigint, + alert_rule_meta_id character varying(255) NOT NULL, + alert_target_id bigint, + duration character varying(255) NOT NULL, + pause boolean DEFAULT false NOT NULL, + warning_sign character varying(255), + critical_sign character varying(255) +); + +ALTER TABLE public.alert_rule OWNER TO admin; + +ALTER TABLE ONLY public.alert_rule + ADD CONSTRAINT alert_rule_pkey PRIMARY KEY (id); + +ALTER TABLE ONLY public.alert_rule + ADD CONSTRAINT fk6b09d1xfyago6wiiqhdiv03s3 FOREIGN KEY (alert_rule_meta_id) REFERENCES public.metric_meta2(id); + +ALTER TABLE ONLY public.alert_rule + ADD CONSTRAINT fk8wkucwkgr48hkfg8cvuptww0f FOREIGN KEY (alert_group_id) REFERENCES public.alert_group(id); + +ALTER TABLE ONLY public.alert_rule + ADD CONSTRAINT fkiqaskea7ts0f872u3nx9ne25u FOREIGN KEY (alert_target_id) REFERENCES public.alert_target(id); + +CREATE TABLE public.alert_rule_meta ( + id bigint NOT NULL, + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone NOT NULL, + description text NOT NULL, + expr character varying(255) NOT NULL, + meta_name character varying(255) NOT NULL, + target character varying(255) NOT NULL, + message character varying(255) +); + +ALTER TABLE public.alert_rule_meta OWNER TO admin; + +ALTER TABLE ONLY public.alert_rule_meta + ADD CONSTRAINT alert_rule_meta_pkey PRIMARY KEY (id); + +CREATE SEQUENCE hibernate_sequence; + +CREATE TABLE public.cloud_group ( + id bigint NOT NULL, + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone NOT NULL, + name character varying(255) NOT NULL, + description character varying(255), + created_by character varying(255), + auth_resource_id bigint +); + +ALTER TABLE public.cloud_group OWNER TO admin; + +ALTER TABLE ONLY public.cloud_group + ADD CONSTRAINT cloud_group_pkey PRIMARY KEY (id); + +CREATE UNIQUE INDEX cloud_group_name_uindex ON public.cloud_group USING btree (name); + +CREATE TABLE public.cloud_user ( + user_id character varying(255) NOT NULL, + email character varying(255), + is_admin boolean NOT NULL, + phone character varying(255), + user_nm character varying(255) NOT NULL, + user_pw character varying(255) NOT NULL, + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone NOT NULL, + dormancy_date timestamp without time zone NULL, + company character varying(255), + department character varying(255), + last_log_in_date timestamp without time zone, + "position" character varying(255), + use_ldap boolean NOT NULL, + auth_method character varying(255) NOT NULL, + log_in_count integer default 0 NOT NULL, + user_lock boolean default false NOT NULL, + user_lock_date timestamp without time zone, + tenant_id character varying(120), + is_tenant_owner boolean default false, + auth_resource_id bigint, + status character varying(255) default 'use' NOT NULL +); + +ALTER TABLE public.cloud_user OWNER TO admin; + +ALTER TABLE ONLY public.cloud_user ADD CONSTRAINT cloud_user_pkey PRIMARY KEY (user_id); + +ALTER TABLE ONLY public.cloud_user + ADD CONSTRAINT cloud_user_tenant_id_fk FOREIGN KEY (tenant_id) REFERENCES public.tenant_info(id); + +CREATE TABLE public.menu_meta ( + id bigint NOT NULL, + description character varying(255), + icon character varying(255), + "position" integer NOT NULL, + url character varying(255), + auth_resource3_id bigint NOT NULL, + scope_level int default 0 +); + +ALTER TABLE public.menu_meta OWNER TO admin; + +ALTER TABLE ONLY public.menu_meta + ADD CONSTRAINT menu_meta_pkey PRIMARY KEY (id); + + + +CREATE TABLE public.metric_base ( + meta_name character varying(255) NOT NULL, + provider character varying(255) NOT NULL, + description character varying(255) NOT NULL, + resource_type character varying(255), + diag_type character varying(255), + entity_type character varying(255) NOT NULL, + metric_type character varying(255) NOT NULL, + keys character varying(255), + created_date timestamp without time zone DEFAULT now() NOT NULL, + modified_date timestamp without time zone DEFAULT now() NOT NULL +); + + +ALTER TABLE public.metric_base OWNER TO admin; + +ALTER TABLE ONLY public.metric_base + ADD CONSTRAINT metric_base_pk PRIMARY KEY (meta_name); + +CREATE TABLE public.report_static ( + id bigint NOT NULL, + created_by character varying(255), + created_date timestamp without time zone NOT NULL, + modified_by character varying(255), + modified_date timestamp without time zone NOT NULL, + cron_exp character varying(255), + metric_data text, + template_data text, + title character varying(255), + type character varying(255), + report_template_id bigint +); + +ALTER TABLE public.report_static OWNER TO admin; + +ALTER TABLE ONLY public.report_static + ADD CONSTRAINT report_static_pkey PRIMARY KEY (id); + +ALTER TABLE ONLY public.report_static + ADD CONSTRAINT fk7o821ym9a57lrcfipf928cfpe FOREIGN KEY (report_template_id) REFERENCES public.report_template(id); + +CREATE TABLE public.user_group ( + user_group_id bigint NOT NULL, + user_id character varying(255) NOT NULL +); + +ALTER TABLE public.user_group OWNER TO admin; + +ALTER TABLE ONLY public.user_group + ADD CONSTRAINT user_group_pkey PRIMARY KEY (user_group_id, user_id); + +ALTER TABLE ONLY public.user_group + ADD CONSTRAINT fkooy6rip2craw6jy3geb5wnix6 FOREIGN KEY (user_id) REFERENCES public.cloud_user(user_id); + +ALTER TABLE ONLY public.user_group + ADD CONSTRAINT fkowo8h9te5nwashab3u30docg FOREIGN KEY (user_group_id) REFERENCES public.cloud_group(id); + +CREATE TABLE public.cloud_user_profile ( + user_id character varying(255) NOT NULL, + created_date timestamp without time zone, + modified_date timestamp without time zone, + profile_image oid +); + +ALTER TABLE public.cloud_user_profile OWNER TO admin; + +ALTER TABLE ONLY public.cloud_user_profile + ADD CONSTRAINT cloud_user_profile_pkey PRIMARY KEY (user_id); + + +CREATE TABLE public.common_setting ( + code_id character varying(255) NOT NULL, + code_value character varying(255), + code_desc character varying(255), + code_auth character varying(255), + code_group character varying(255), + created_date timestamp without time zone, + modified_date timestamp without time zone +); + + +ALTER TABLE public.common_setting OWNER TO admin; + +ALTER TABLE ONLY public.common_setting + ADD CONSTRAINT common_setting_pkey PRIMARY KEY (code_id); + + + +CREATE TABLE public.dashboard_thumbnail ( + id bigint NOT NULL, + thumbnail_image oid, + created_date timestamp without time zone, + modified_date timestamp without time zone +); + + +ALTER TABLE public.dashboard_thumbnail OWNER TO admin; + +ALTER TABLE ONLY public.dashboard_thumbnail + ADD CONSTRAINT dashboard_thumbnail_pkey PRIMARY KEY (id); + + + +CREATE TABLE public.notification_channel ( + id bigint NOT NULL, + created_by character varying(255), + created_date timestamp without time zone, + modified_by character varying(255), + modified_date timestamp without time zone, + cluster_id character varying(255), + config text, + name character varying(255), + type character varying(255) +); + +ALTER TABLE public.notification_channel OWNER TO admin; + +ALTER TABLE ONLY public.notification_channel + ADD CONSTRAINT notification_channel_pkey PRIMARY KEY (id); + + +CREATE TABLE public.notification_registry ( + id bigint NOT NULL, + alert_rule_id bigint NOT NULL, + notification_channel_id bigint +); + +ALTER TABLE public.notification_registry OWNER TO admin; + +ALTER TABLE ONLY public.notification_registry + ADD CONSTRAINT notification_registry_pkey PRIMARY KEY (id); + +ALTER TABLE ONLY public.notification_registry + ADD CONSTRAINT fk28xo8snm6fd19i3uap0oba0d1 FOREIGN KEY (notification_channel_id) REFERENCES public.notification_channel(id); + + +CREATE TABLE public.license_check_2 ( + id bigint NOT NULL, + site_name character varying(255) NOT NULL, + license_type integer NOT NULL, + expire_date character varying(255) NOT NULL, + imxc_host_id integer NOT NULL, + real_host_id integer NOT NULL, + imxc_cpu_count integer NOT NULL, + real_cpu_count integer NOT NULL, + target_clusters_count integer NOT NULL, + real_clusters_count integer NOT NULL, + target_nodes_count integer NOT NULL, + real_nodes_count integer NOT NULL, + target_pods_count integer NOT NULL, + real_pods_count integer NOT NULL, + target_svcs_count integer NOT NULL, + real_svcs_count integer NOT NULL, + target_core_count integer NOT NULL, + real_core_count integer NOT NULL, + features_bitmap integer NOT NULL, + allowable_range integer NOT NULL, + check_time timestamp without time zone NOT NULL, + check_result integer NOT NULL +); + +ALTER TABLE public.license_check_2 + ADD CONSTRAINT license_check_pkey PRIMARY KEY (id); + +CREATE INDEX license_check_check_time_idx ON license_check_2(check_time); + + +CREATE TABLE public.license_violation ( + id bigint not null, + check_id bigint not null, + check_time timestamp without time zone not null, + violation_item varchar not null, + allow_time timestamp without time zone not null, + resolved_id bigint, + resolved_time timestamp without time zone +); + +ALTER TABLE public.license_violation + ADD CONSTRAINT license_violation_pkey PRIMARY KEY (id); + +ALTER TABLE public.license_violation + ADD CONSTRAINT license_violation_check_id_fk FOREIGN KEY (check_id) REFERENCES public.license_check_2(id); + +ALTER TABLE public.license_violation + ADD CONSTRAINT license_violation_resolved_id_fk FOREIGN KEY (resolved_id) REFERENCES public.license_check_2(id); + +CREATE INDEX license_violation_check_time_idx ON license_violation(check_time); +CREATE INDEX license_violation_resolved_time_idx ON license_violation(resolved_time); + + +CREATE TABLE public.license_key ( + id bigint NOT NULL, + license_key text NOT NULL, + set_time timestamp NOT NULL, + in_used bool NULL, + tenant_id varchar NULL, + cluster_id bigint NULL, + CONSTRAINT license_key_pkey PRIMARY KEY (id) +); + +ALTER TABLE public.license_key ADD CONSTRAINT license_key_tenant_id_fk FOREIGN KEY (tenant_id) REFERENCES public.tenant_info(id); + +CREATE TABLE public.license_check2 ( + id bigint NOT NULL, + site_name character varying(255) NOT NULL, + license_type integer NOT NULL, + expire_date character varying(255) NOT NULL, + imxc_host_ids character varying(255), + real_host_ids character varying(255), + target_nodes_count integer NOT NULL, + real_nodes_count integer NOT NULL, + target_pods_count integer NOT NULL, + real_pods_count integer NOT NULL, + target_svcs_count integer NOT NULL, + real_svcs_count integer NOT NULL, + target_core_count integer NOT NULL, + real_core_count integer NOT NULL, + allowable_range integer NOT NULL, + license_cluster_id character varying(255), + check_time timestamp without time zone NOT NULL, + check_result integer NOT null +); + +ALTER TABLE public.license_check2 + ADD CONSTRAINT license_check2_pkey PRIMARY KEY (id); + +CREATE INDEX license_check2_time_idx ON license_check2(check_time); + +CREATE TABLE public.license_violation2 ( + id bigint not null, + check_id bigint not null, + check_time timestamp without time zone not null, + violation_item varchar not null, + allow_time timestamp without time zone not null, + resolved_id bigint, + resolved_time timestamp without time zone, + cluster_id varchar not null +); + +ALTER TABLE public.license_violation2 + ADD CONSTRAINT license_violation2_pkey PRIMARY KEY (id); + +ALTER TABLE public.license_violation2 + ADD CONSTRAINT license_violation2_check_id_fk FOREIGN KEY (check_id) REFERENCES public.license_check2(id); + +ALTER TABLE public.license_violation2 + ADD CONSTRAINT license_violation2_resolved_id_fk FOREIGN KEY (resolved_id) REFERENCES public.license_check2(id); + +CREATE INDEX license_violation2_check_time_idx ON license_violation2(check_time); +CREATE INDEX license_violation2_resolved_time_idx ON license_violation2(resolved_time); + +CREATE TABLE public.license_key2 ( + id bigint not null, + license_key text not null, + set_time timestamp without time zone not null, + cluster_id varchar, + license_used bool not null +); + +ALTER TABLE public.license_key2 + ADD CONSTRAINT license_key2_pkey PRIMARY KEY (id); + +create table public.license_policy ( + policy_id character varying(255) NOT NULL, + policy_desc character varying(255), + term_year integer NOT NULL, + term_month integer NOT NULL, + term_day integer NOT NULL, + license_type character varying(255) NOT NULL, + allowable_range character varying(255) NOT NULL, + storage_capacity character varying(255) NOT NULL, + cluster_count character varying(255) NOT NULL, + node_count character varying(255) NOT NULL, + pod_count character varying(255) NOT NULL, + service_count character varying(255) NOT NULL, + core_count character varying(255) NOT NULL, + host_ids character varying(255) NOT NULL, + user_division character varying(255) NOT NULL, + created_date timestamp without time zone, + modified_date timestamp without time zone +); + +ALTER TABLE ONLY public.license_policy + ADD CONSTRAINT license_policy_pkey PRIMARY KEY (policy_id); + + +CREATE TABLE public.auth_resource2 ( + id bigint NOT NULL default nextval('hibernate_sequence'), + access_type integer NOT NULL, + name character varying(255) NOT NULL, + parent_id bigint, + type character varying(255) NOT NULL +); + +ALTER TABLE public.auth_resource2 OWNER TO admin; + +ALTER TABLE ONLY public.auth_resource2 + ADD CONSTRAINT auth_resource2_pkey PRIMARY KEY (id); + +ALTER TABLE ONLY public.auth_resource2 + ADD CONSTRAINT resource_name_uniq UNIQUE (name, type, parent_id); + +--ALTER TABLE ONLY public.auth_resource2 +-- ADD CONSTRAINT auth_resource2_auth_resource_id_fk FOREIGN KEY (parent_id) REFERENCES public.auth_resource2(id); +-- +--ALTER TABLE ONLY public.menu_meta +-- ADD CONSTRAINT fk2tqq4ybf6w130fsaejhrsnw5s FOREIGN KEY (auth_resource_id) REFERENCES public.auth_resource2(id); + +CREATE TABLE public.user_permission2 ( + id bigint NOT NULL, + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone NOT NULL, + all_child boolean NOT NULL, + permission integer NOT NULL, + auth_resource_id bigint, + user_id character varying(255) +); + +ALTER TABLE public.user_permission2 OWNER TO admin; + +ALTER TABLE ONLY public.user_permission2 + ADD CONSTRAINT user_permission2_pkey PRIMARY KEY (id); + +-- ALTER TABLE ONLY public.user_permission2 +-- ADD CONSTRAINT user_permission2_auth_resource2_fk FOREIGN KEY (auth_resource_id) REFERENCES public.auth_resource2(id); + +ALTER TABLE ONLY public.user_permission2 + ADD CONSTRAINT user_permission2_user_id_fk FOREIGN KEY (user_id) REFERENCES public.cloud_user(user_id); + + +CREATE TABLE public.group_permission2 ( + id bigint NOT NULL, + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone NOT NULL, + all_child boolean NOT NULL, + permission integer NOT NULL, + auth_resource_id bigint, + user_group_id bigint +); + +ALTER TABLE public.group_permission2 OWNER TO admin; + +ALTER TABLE ONLY public.group_permission2 + ADD CONSTRAINT group_permission2_pkey PRIMARY KEY (id); + +ALTER TABLE ONLY public.group_permission2 + ADD CONSTRAINT group_permission2_user_group_id_fk FOREIGN KEY (user_group_id) REFERENCES public.cloud_group(id); + +-- ALTER TABLE ONLY public.group_permission2 +-- ADD CONSTRAINT group_permission2_auth_resource2_fk FOREIGN KEY (auth_resource_id) REFERENCES public.auth_resource2(id); + +CREATE TABLE public.resource_group2 ( + id int8 NOT NULL, + created_date timestamp NOT NULL, + modified_date timestamp NOT NULL, + "name" varchar(255) NOT NULL, + description varchar(255) NULL, + CONSTRAINT resource_group2_pkey PRIMARY KEY (id) +-- CONSTRAINT resource_group2_fk1 FOREIGN KEY (id) REFERENCES auth_resource2(id) +); + +ALTER TABLE public.resource_group2 OWNER TO "admin"; +GRANT ALL ON TABLE public.resource_group2 TO "admin"; + +CREATE TABLE public.resource_member2 ( + resource_group_id int8 NOT NULL, + auth_resource_id int8 NOT NULL, + CONSTRAINT resource_member2_pkey PRIMARY KEY (resource_group_id, auth_resource_id), + CONSTRAINT resource_member2_fkey1 FOREIGN KEY (resource_group_id) REFERENCES resource_group2(id) +-- CONSTRAINT resource_member2_fkey2 FOREIGN KEY (auth_resource_id) REFERENCES auth_resource2(id) +); + +ALTER TABLE public.resource_member2 OWNER TO "admin"; +GRANT ALL ON TABLE public.resource_member2 TO "admin"; + +CREATE TABLE public.dashboard2 ( + id bigint NOT NULL, + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone NOT NULL, + layout text NOT NULL, + title character varying(255) NOT NULL, + auth_resource_id bigint NOT NULL, + created_by character varying(255) NOT NULL, + modified_by character varying(255) NOT NULL, + description character varying(255), + share boolean DEFAULT false +); + +ALTER TABLE public.dashboard2 OWNER TO admin; + +ALTER TABLE ONLY public.dashboard2 + ADD CONSTRAINT dashboard2_pkey PRIMARY KEY (id); + +-- ALTER TABLE ONLY public.dashboard2 +-- ADD CONSTRAINT dashboard_resource_fk FOREIGN KEY (auth_resource_id) REFERENCES public.auth_resource2(id); + +CREATE TABLE public.log_management ( + cluster_id varchar NOT NULL, + node_id varchar NOT NULL, + log_rotate_dir varchar, + log_rotate_count integer, + log_rotate_size integer, + log_rotate_management boolean NOT NULL, + back_up_dir varchar, + back_up_period integer, + back_up_dir_size integer, + back_up_management boolean NOT NULL, + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone +); + +alter table public.log_management add constraint log_management_pkey primary key (cluster_id, node_id); + +CREATE TABLE public.sampling_setting ( + service_id bigint NOT NULL, + service_name character varying(255), + sampling_type character varying(255), + sampling_param character varying(255), + cluster varchar, + namespace varchar, + cluster_id bigint +); +ALTER TABLE public.sampling_setting OWNER TO admin; + +ALTER TABLE ONLY public.sampling_setting + ADD CONSTRAINT sampling_setting_pkey PRIMARY KEY (service_id); + +CREATE TABLE public.operation_setting ( + id bigint NOT NULL, + service_id bigint NOT NULL, + sampling_type character varying(255), + sampling_param character varying(255), + operation_name character varying(255) +); + +ALTER TABLE public.operation_setting OWNER TO admin; + +ALTER TABLE ONLY public.operation_setting + ADD CONSTRAINT operation_setting_pkey PRIMARY KEY (id); + +ALTER TABLE ONLY public.operation_setting + ADD CONSTRAINT operation_setting_fkey FOREIGN KEY (service_id) REFERENCES public.sampling_setting(service_id); + +CREATE TABLE public.cluster_setting ( + cluster_id bigint NOT NULL, + param_type character varying(255), + param_value character varying(255), + cluster_name varchar, + name character varying(255) +); + +ALTER TABLE ONLY public.cluster_setting + ADD CONSTRAINT cluster_setting_pkey PRIMARY KEY (cluster_id); + +CREATE TABLE public.alias_code ( + user_id varchar NOT NULL, + id varchar NOT NULL, + name varchar, + type varchar, + use_yn varchar, + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone +); + +ALTER TABLE ONLY public.alias_code add constraint alias_code_pkey primary key (user_id, id); + +CREATE TABLE public.sparse_log_info ( + id varchar NOT NULL, + cluster_id varchar, + namespace varchar, + target_type varchar, + target_id varchar, + log_path varchar, + created_date timestamp, + modified_date timestamp, + threshold float4, + PRIMARY KEY ("id") +); + +CREATE TABLE public.view_code ( + user_id varchar NOT NULL, + view_id varchar NOT NULL, + json_data text, + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone +); + +ALTER TABLE ONLY public.view_code add constraint view_code_pkey primary key (user_id, view_id); + +CREATE TABLE public.entity_black_list ( + entity_type varchar not null, + entity_name varchar not null, + cluster_id varchar not null, + namespace varchar, + black_list bool not null, + workload varchar(255) not null +); + +ALTER TABLE public.entity_black_list + ADD CONSTRAINT entity_black_list_pkey PRIMARY KEY (entity_type, entity_name, cluster_id, namespace); + +CREATE TABLE public.script_setting ( + id bigint NOT NULL, + name character varying(255), + agent_list character varying(255), + file_path character varying(255), + args character varying(255), + valid_cmd character varying(255), + valid_val character varying(255), + cron_exp character varying(255), + create_user character varying(255), + mtime BIGINT, + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone +); + +ALTER TABLE ONLY public.script_setting + ADD CONSTRAINT script_setting_pkey PRIMARY KEY (id); + +CREATE TABLE public.agent_install_file_info ( + id bigint NOT NULL, + name character varying(255) NOT NULL, + type character varying(255) NOT NULL, + description text, + version character varying(255), + yaml text, + use_yn boolean NOT NULL, + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone NOT NULL +); + +ALTER TABLE ONLY public.agent_install_file_info ADD CONSTRAINT agent_install_file_info_pkey PRIMARY KEY (id); + +create table auth_resource3( + id bigint NOT NULL default nextval('hibernate_sequence'), + name character varying(255) NOT NULL, + is_deleted boolean not null, + tenant_id character varying(255) +); + +ALTER TABLE public.auth_resource3 owner to admin; + +ALTER TABLE ONLY public.auth_resource3 + ADD CONSTRAINT auth_resource3_pkey PRIMARY KEY (id); + +ALTER TABLE ONLY public.auth_resource3 + ADD CONSTRAINT auth_resource3_name_uniq UNIQUE (name); + +create table resource_member3( + resource_group_id bigint not null, + auth_resource3_id bigint not null +); + +ALTER TABLE resource_member3 owner to admin; + +ALTER TABLE ONLY public.resource_member3 + ADD CONSTRAINT resource_member3_pkey primary key (resource_group_id, auth_resource3_id); + +ALTER TABLE ONLY public.auth_resource3 ADD CONSTRAINT auth_resource3_tenant_id_fk FOREIGN KEY (tenant_id) REFERENCES public.tenant_info(id); + +ALTER TABLE public.menu_meta ADD CONSTRAINT menu_meta_auth_resource3_fk FOREIGN KEY (auth_resource3_id) REFERENCES auth_resource3(id); +ALTER TABLE public.user_permission2 ADD CONSTRAINT user_permission2_auth_resource3_fk FOREIGN KEY (auth_resource_id) REFERENCES auth_resource3(id); +ALTER TABLE public.resource_group2 ADD CONSTRAINT resource_group2_auth_resource3_fk1 FOREIGN KEY (id) REFERENCES auth_resource3(id); +ALTER TABLE public.resource_member3 ADD CONSTRAINT resource_member3_auth_resource3_fkey1 FOREIGN KEY (resource_group_id) REFERENCES public.resource_group2(id); +ALTER TABLE public.resource_member3 ADD CONSTRAINT resource_member3_auth_resource3_fkey2 FOREIGN KEY (auth_resource3_id) REFERENCES auth_resource3(id); +ALTER TABLE public.group_permission2 ADD CONSTRAINT group_permission2_auth_resource3_fk FOREIGN KEY (auth_resource_id) REFERENCES auth_resource3(id); +ALTER TABLE public.dashboard2 ADD CONSTRAINT dashboard2_auth_resource3_fk FOREIGN KEY (auth_resource_id) REFERENCES auth_resource3(id); +ALTER TABLE public.cloud_user ADD CONSTRAINT cloud_user_auth_resource3_fk FOREIGN KEY (auth_resource_id) REFERENCES auth_resource3(id); +ALTER TABLE public.cloud_group ADD CONSTRAINT cloud_group_auth_resource3_fk FOREIGN KEY (auth_resource_id) REFERENCES auth_resource3(id); + +CREATE DATABASE CONFIGS; +CREATE DATABASE keycloak; + +-- JSPD 옵션 값 테이블 +CREATE TABLE public.jspd_prop ( + code_id character varying(255) NOT NULL, + default_value character varying(255) NOT NULL, + description text, + code_type character varying(255), + input_type character varying(255), + input_props character varying(255), + use_yn boolean NOT NULL, + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone NOT NULL +); + +ALTER TABLE ONLY public.jspd_prop ADD CONSTRAINT jspd_prop_pkey PRIMARY KEY (code_id); + +-- JSPD 옵션 값 설정 LIST table +CREATE TABLE public.jspd_config ( + cluster_id character varying(255) NOT NULL, + namespace character varying(255) NOT NULL, + service character varying(255) NOT NULL, + code_id character varying(255), + code_value character varying(255), + code_type character varying(255), + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone NOT NULL +); +-- ALTER TABLE public.jspd_prop +-- ADD input_type character varying(255); + +-- ALTER TABLE public.jspd_prop +-- ADD input_props character varying(255); + + +ALTER TABLE public.jspd_config + ADD CONSTRAINT jspd_config_pkey PRIMARY KEY (cluster_id, namespace, service, code_id); + +ALTER TABLE ONLY public.jspd_config + ADD CONSTRAINT jspd_config_code_id_fk FOREIGN KEY (code_id) REFERENCES public.jspd_prop(code_id); + +-- noti server table +CREATE TABLE public.alert_group_v2 ( + id bigint NOT NULL, + created_date timestamp NOT NULL, + modified_date timestamp NOT NULL, + cluster_id varchar(255) NOT NULL, + description varchar(255), + name varchar(255) NOT NULL, + type varchar(255) NOT NULL, + namespace varchar(255) default 'default'::character varying, + destination varchar(255) NOT NULL, + created_by varchar(255) NOT NULL +); + +CREATE TABLE public.alert_target_v2 ( + id bigint NOT NULL, + created_date timestamp, + modified_date timestamp, + cluster_id varchar(255) NOT NULL, + entity_id varchar(255) NOT NULL, + entity_type varchar(255) NOT NULL, + alert_group_id bigint, + namespace varchar(255) +); + +CREATE TABLE public.alert_rule_v2 ( + id bigint NOT NULL, + created_date timestamp NOT NULL, + modified_date timestamp NOT NULL, + critical double precision, + name varchar(255), + warning double precision, + alert_group_id bigint, + alert_rule_meta_id varchar(255) NOT NULL, + alert_target_id bigint, + duration varchar(255) NOT NULL, + pause boolean DEFAULT false NOT NULL, + critical_sign varchar(255), + warning_sign varchar(255), + destination varchar(255), + created_by varchar(255) +); + +ALTER TABLE public.alert_group_v2 ADD CONSTRAINT alert_group_v2_id_pk PRIMARY KEY (id); +ALTER TABLE public.alert_target_v2 ADD CONSTRAINT alert_target_v2_id_pk PRIMARY KEY (id); +ALTER TABLE public.alert_rule_v2 ADD CONSTRAINT alert_rule_v2_id_pk PRIMARY KEY (id); + +ALTER TABLE public.alert_target_v2 ADD CONSTRAINT alert_target_v2_alert_group_id_fk FOREIGN KEY (alert_group_id) REFERENCES public.alert_group_v2(id); +ALTER TABLE public.alert_rule_v2 ADD CONSTRAINT alert_rule_v2_alert_group_id_fk FOREIGN KEY (alert_group_id) REFERENCES public.alert_group_v2(id); +ALTER TABLE public.alert_rule_v2 ADD CONSTRAINT alert_rule_v2_alert_rule_meta_id_fk FOREIGN KEY (alert_rule_meta_id) REFERENCES public.metric_meta2(id); +ALTER TABLE public.alert_rule_v2 ADD CONSTRAINT alert_rule_v2_alert_target_id_fk FOREIGN KEY (alert_target_id) REFERENCES public.alert_target_v2(id); +ALTER TABLE ONLY public.notification_registry + ADD CONSTRAINT fk4lljw4fnija73tm3lthjg90rx FOREIGN KEY (alert_rule_id) REFERENCES public.alert_rule_v2(id); + + +-- cortex alert +create table public.alert_rule_config_info ( + config_id varchar not null, + config_data text not null, + in_use boolean default true not null, + created_date timestamp, + modified_date timestamp +); + +create table alert_config_info +( + config_id varchar not null, + config_data text not null, + config_default text not null, + in_use boolean default true not null, + created_date timestamp, + modified_date timestamp +); + +create table alert_config +( + id varchar not null, + cluster_id varchar, + resolve_timeout varchar, + receiver varchar, + group_by varchar, + group_wait varchar, + group_interval varchar, + repeat_interval varchar, + routes_level varchar, + routes_continue varchar, + receiver_name varchar, + webhook_url varchar, + send_resolved varchar, + inner_route boolean, + inner_webhook boolean, + in_use boolean default true not null, + created_date timestamp, + modified_date timestamp +); + +ALTER TABLE public.alert_rule_config_info ADD CONSTRAINT alert_rule_config_info_config_id_pk PRIMARY KEY (config_id); +ALTER TABLE public.alert_config_info ADD CONSTRAINT alert_config_info_config_id_pk PRIMARY KEY (config_id); +ALTER TABLE public.alert_config ADD CONSTRAINT alert_config_id_pk PRIMARY KEY (id); + +CREATE TABLE public.cloud_user_setting ( + user_id character varying(255) NOT NULL, + lang character varying(20) DEFAULT 'en', + theme character varying(20) DEFAULT 'dark', + access_token integer DEFAULT 30, + refresh_token integer DEFAULT 10080, + error_msg boolean DEFAULT false, + alert_sound boolean DEFAULT false, + session_persistence boolean DEFAULT true, + gpu_acc_topology boolean DEFAULT true, + created_date timestamp without time zone, + modified_date timestamp without time zone +); + +ALTER TABLE public.cloud_user_setting OWNER TO admin; + +ALTER TABLE ONLY public.cloud_user_setting ADD CONSTRAINT cloud_user_setting_pkey PRIMARY KEY (user_id); + +-------- 2022-05-31 KubeInfo flatting table -------- +CREATE TABLE cmoa_configmap_base( + kube_flatting_time bigint, + cluster_id varchar(255), + kind varchar(30), + metadata_uid varchar(40), + row_index int, + metadata_name text, + kind_status varchar(50), + metadata_resourceVersion text, + metadata_annotations text, + metadata_creationTimestamp varchar(25), + metadata_labels text, + metadata_namespace text, + binaryData text, + data text, + immutable text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, cluster_id, kind, metadata_uid, row_index) +); +----------------------- +CREATE TABLE cmoa_cronjob_active( + kube_flatting_time bigint, + kind varchar(30), + metadata_uid varchar(40), + row_index int, + metadata_name text, + status_active_apiVersion text, + status_active_fieldPath text, + status_active_kind text, + status_active_name text, + status_active_namespace text, + status_active_resourceVersion text, + status_active_uid text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_cronjob_base( + kube_flatting_time bigint, + cluster_id varchar(255), + kind varchar(30), + metadata_uid varchar(40), + row_index int, + kind_status varchar(50), + metadata_annotations text, + metadata_creationTimestamp varchar(25), + metadata_labels text, + metadata_name text, + metadata_namespace text, + metadata_resourceVersion text, + spec_failedJobsHistoryLimit text, + spec_schedule text, + spec_successfulJobsHistoryLimit text, + spec_suspend text, + status_lastScheduleTime text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, cluster_id, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_daemonset_base( + kube_flatting_time bigint, + cluster_id varchar(255), + kind varchar(30), + metadata_uid varchar(40), + row_index int, + kind_status varchar(50), + metadata_annotations text, + metadata_creationTimestamp varchar(25), + metadata_labels text, + metadata_name text, + metadata_namespace text, + metadata_resourceVersion text, + status_currentNumberScheduled text, + status_desiredNumberScheduled text, + status_numberAvailable text, + status_numberMisscheduled text, + status_numberReady text, + status_numberUnavailable text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, cluster_id, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_deployment_base( + kube_flatting_time bigint, + cluster_id varchar(255), + kind varchar(30), + metadata_uid varchar(40), + row_index int, + kind_status varchar(50), + metadata_annotations text, + metadata_creationTimestamp varchar(25), + metadata_labels text, + metadata_name text, + metadata_namespace text, + metadata_resourceVersion text, + spec_replicas text, + spec_template_spec_containers_image text, + status_availableReplicas text, + status_readyReplicas text, + status_replicas text, + status_unavailableReplicas text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, cluster_id, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_endpoint_addresses( + kube_flatting_time bigint, + kind varchar(30), + metadata_uid varchar(40), + row_index int, + metadata_name text, + subset_addresses_ip text, + subset_addresses_hostname text, + subset_addresses_nodeName text, + subset_addresses_targetRef text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_endpoint_base( + kube_flatting_time bigint, + cluster_id varchar(255), + kind varchar(30), + metadata_uid varchar(40), + row_index int, + kind_status varchar(50), + metadata_name text, + metadata_resourceVersion text, + metadata_annotations text, + metadata_creationTimestamp varchar(25), + metadata_labels text, + metadata_namespace text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, cluster_id, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_endpoint_notreadyaddresses( + kube_flatting_time bigint, + kind varchar(30), + metadata_uid varchar(40), + row_index int, + metadata_name text, + subset_notreadyaddresses_ip text, + subset_notreadyaddresses_hostname text, + subset_notreadyaddresses_nodename text, + subset_notreadyaddresses_targetref text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_endpoint_ports( + kube_flatting_time bigint, + kind varchar(30), + metadata_uid varchar(40), + row_index int, + metadata_name text, + subset_ports_port text, + subset_ports_appprotocol text, + subset_ports_name text, + subset_ports_protocol text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_event_base ( + kube_flatting_time bigint, + cluster_id varchar(255), + kind varchar(30), + metadata_uid varchar(40), + row_index int, + kind_status varchar(50), + action text, + count text, + eventtime text, + firsttimestamp text, + involvedobject_apiversion text, + involvedobject_fieldpath text, + involvedobject_kind text, + involvedobject_name text, + involvedobject_namespace text, + involvedobject_resourceversion text, + involvedobject_uid text, + lasttimestamp text, + message text, + metadata_annotations text, + metadata_creationtimestamp varchar(25), + metadata_labels text, + metadata_name text, + metadata_namespace text, + metadata_resourceversion text, + reason text, + related_apiversion text, + related_fieldpath text, + related_kind text, + related_name text, + related_namespace text, + related_resourceversion text, + related_uid text, + series_count text, + series_lastobservedtime text, + series_state text, + source_component text, + source_host text, + type text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, cluster_id, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_job_base ( + kube_flatting_time bigint, + cluster_id varchar(255), + kind varchar(30), + metadata_uid varchar(40), + row_index int, + kind_status varchar(50), + metadata_annotations text, + metadata_creationtimestamp varchar(25), + metadata_labels text, + metadata_name text, + metadata_namespace text, + metadata_ownerreferences text, + metadata_ownerReferences_kind varchar(30), + metadata_ownerReferences_uid varchar(40), + metadata_resourceversion text, + spec_backofflimit text, + spec_completions text, + spec_parallelism text, + status_active text, + status_completiontime text, + status_failed text, + status_starttime text, + status_succeeded text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, cluster_id, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_job_template ( + kube_flatting_time bigint, + kind varchar(30), + metadata_uid varchar(40), + row_index int, + metadata_name text, + spec_template_spec_containers_args text, + spec_template_spec_containers_command text, + spec_template_spec_containers_image text, + spec_template_spec_containers_name text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_namespace_base ( + kube_flatting_time bigint, + cluster_id varchar(255), + kind varchar(30), + metadata_uid varchar(40), + row_index int, + kind_status varchar(50), + metadata_name text, + metadata_resourceversion text, + metadata_annotations text, + metadata_creationtimestamp varchar(25), + metadata_labels text, + metadata_namespace text, + spec_finalizers text, + status_phase text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, cluster_id, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_node_base ( + kube_flatting_time bigint, + cluster_id varchar(255), + kind varchar(30), + metadata_uid varchar(40), + row_index int, + kind_status varchar(50), + metadata_name text, + metadata_selflink text, + metadata_resourceversion text, + metadata_creationtimestamp varchar(25), + metadata_labels text, + metadata_annotations text, + spec_podcidr text, + spec_taints text, + status_capacity_cpu text, + status_capacity_ephemeral_storage text, + status_capacity_hugepages_1gi text, + status_capacity_hugepages_2mi text, + status_capacity_memory text, + status_capacity_pods text, + status_allocatable_cpu text, + status_allocatable_ephemeral_storage text, + status_allocatable_hugepages_1gi text, + status_allocatable_hugepages_2mi text, + status_allocatable_memory text, + status_allocatable_pods text, + status_addresses text, + status_daemonendpoints_kubeletendpoint_port text, + status_nodeinfo_machineid text, + status_nodeinfo_systemuuid text, + status_nodeinfo_bootid text, + status_nodeinfo_kernelversion text, + status_nodeinfo_osimage text, + status_nodeinfo_containerruntimeversion text, + status_nodeinfo_kubeletversion text, + status_nodeinfo_kubeproxyversion text, + status_nodeinfo_operatingsystem text, + status_nodeinfo_architecture text, + status_volumesinuse text, + status_volumesattached text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, cluster_id, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_node_condition ( + kube_flatting_time bigint, + kind varchar(30), + metadata_uid varchar(40), + row_index int, + metadata_name text, + status_conditions_type text, + status_conditions_status text, + status_conditions_lastheartbeattime text, + status_conditions_lasttransitiontime text, + status_conditions_reason text, + status_conditions_message text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_node_image ( + kube_flatting_time bigint, + kind varchar(30), + metadata_uid varchar(40), + row_index int, + metadata_name text, + status_images_names text, + status_images_sizebytes text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_persistentvolume_base ( + kube_flatting_time bigint, + cluster_id varchar(255), + kind varchar(30), + metadata_uid varchar(40), + row_index int, + kind_status varchar(50), + metadata_annotations text, + metadata_creationtimestamp varchar(25), + metadata_labels text, + metadata_name text, + metadata_namespace text, + metadata_resourceversion text, + spec_accessmodes text, + spec_awselasticblockstore text, + spec_azuredisk text, + spec_azurefile text, + spec_capacity text, + spec_claimref_apiversion text, + spec_claimref_fieldpath text, + spec_claimref_kind text, + spec_claimref_name text, + spec_claimref_namespace text, + spec_claimref_resourceversion text, + spec_claimref_uid text, + spec_csi text, + spec_fc text, + spec_flexvolume text, + spec_flocker text, + spec_gcepersistentdisk text, + spec_glusterfs text, + spec_hostpath text, + spec_iscsi text, + spec_local text, + spec_nfs text, + spec_persistentvolumereclaimpolicy text, + spec_photonpersistentdisk text, + spec_portworxvolume text, + spec_quobyte text, + spec_rbd text, + spec_scaleio text, + spec_storageclassname text, + spec_storageos text, + spec_volumemode text, + spec_vspherevolume text, + status_message text, + status_phase text, + status_reason text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, cluster_id, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_persistentvolumeclaim_base ( + kube_flatting_time bigint, + cluster_id varchar(255), + kind varchar(30), + metadata_uid varchar(40), + row_index int, + kind_status varchar(50), + metadata_annotations text, + metadata_creationtimestamp varchar(25), + metadata_labels text, + metadata_name text, + metadata_namespace text, + metadata_resourceversion text, + spec_accessmodes text, + spec_storageclassname text, + spec_volumemode text, + spec_volumename text, + status_accessmodes text, + status_capacity text, + status_phase text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, cluster_id, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_pod_base ( + kube_flatting_time bigint, + cluster_id varchar(255), + kind varchar(30), + kind_status varchar(50), + metadata_uid varchar(40), + row_index int, + metadata_name text, + metadata_selflink text, + metadata_resourceversion text, + metadata_creationtimestamp varchar(25), + metadata_generatename text, + metadata_namespace text, + metadata_deletiontimestamp text, + metadata_deletiongraceperiodseconds text, + metadata_labels text, + metadata_ownerreferences text, + metadata_ownerReferences_kind varchar(30), + metadata_ownerReferences_uid varchar(40), + metadata_annotations text, + spec_hostnetwork text, + spec_priorityclassname text, + spec_enableservicelinks text, + spec_priority text, + spec_schedulername text, + spec_hostpid text, + spec_nodename text, + spec_serviceaccount text, + spec_serviceaccountname text, + spec_dnspolicy text, + spec_terminationgraceperiodseconds text, + spec_restartpolicy text, + spec_securitycontext text, + spec_nodeselector_kubernetes_io_hostname text, + spec_tolerations text, + status_phase text, + status_hostip text, + status_podip text, + status_starttime text, + status_qosclass text, + status_reason text, + status_message text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, cluster_id, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_pod_conditions ( + kube_flatting_time bigint, + kind varchar(30), + metadata_uid varchar(40), + row_index int, + metadata_name text, + status_conditions_type text, + status_conditions_status text, + status_conditions_lasttransitiontime text, + status_conditions_reason text, + status_conditions_message text, + status_conditions_lastprobetime text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_pod_containerstatuses ( + kube_flatting_time bigint, + kind varchar(30), + metadata_uid varchar(40), + row_index int, + metadata_name text, + status_containerstatuses_name text, + status_containerstatuses_ready text, + status_containerstatuses_restartcount text, + status_containerstatuses_image text, + status_containerstatuses_imageid text, + status_containerstatuses_containerid text, + status_containerstatuses_state_terminated_exitcode text, + status_containerstatuses_state_terminated_reason text, + status_containerstatuses_state_terminated_startedat text, + status_containerstatuses_state_terminated_finishedat text, + status_containerstatuses_state_terminated_containerid text, + status_containerstatuses_state_waiting_reason text, + status_containerstatuses_state_waiting_message text, + status_containerstatuses_state_running_startedat text, + status_containerstatuses_laststate_terminated_exitcode text, + status_containerstatuses_laststate_terminated_reason text, + status_containerstatuses_laststate_terminated_startedat text, + status_containerstatuses_laststate_terminated_finishedat text, + status_containerstatuses_laststate_terminated_containerid text, + status_containerstatuses_laststate_waiting_reason text, + status_containerstatuses_laststate_waiting_message text, + status_containerstatuses_laststate_running_startedat text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_pod_containers ( + kube_flatting_time bigint, + kind varchar(30), + metadata_uid varchar(40), + row_index int, + metadata_name text, + spec_containers_name text, + spec_containers_image text, + spec_containers_env text, + spec_containers_resources_limits_cpu text, + spec_containers_resources_limits_memory text, + spec_containers_resources_requests_cpu text, + spec_containers_resources_requests_memory text, + spec_containers_volumemounts text, + spec_containers_securitycontext_privileged text, + spec_containers_command text, + spec_containers_ports text, + spec_containers_args text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_pod_volume ( + kube_flatting_time bigint, + kind varchar(30), + metadata_uid varchar(40), + row_index int, + metadata_name text, + spec_volumes_name text, + spec_volumes_hostpath text, + spec_volumes_secret text, + spec_volumes_configmap text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_replicaset_base ( + kube_flatting_time bigint, + cluster_id varchar(255), + kind varchar(30), + metadata_uid varchar(40), + row_index int, + kind_status varchar(50), + metadata_annotations text, + metadata_creationtimestamp varchar(25), + metadata_labels text, + metadata_name text, + metadata_namespace text, + metadata_resourceversion text, + spec_replicas text, + status_availablereplicas text, + status_readyreplicas text, + status_replicas text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, cluster_id, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_resourcequota_base ( + kube_flatting_time bigint, + cluster_id varchar(255), + kind varchar(30), + metadata_uid varchar(40), + row_index int, + kind_status varchar(50), + metadata_annotations text, + metadata_creationtimestamp varchar(25), + metadata_labels text, + metadata_name text, + metadata_namespace text, + metadata_resourceversion text, + spec_hard text, + spec_scopes text, + status_hard text, + status_used text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, cluster_id, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_resourcequota_scopeselector ( + kube_flatting_time bigint, + kind varchar(30), + metadata_uid varchar(40), + row_index int, + metadata_name text, + spec_scopeselector_matchexpressions_operator text, + spec_scopeselector_matchexpressions_scopename text, + spec_scopeselector_matchexpressions_values text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_service_base ( + kube_flatting_time bigint, + cluster_id varchar(255), + kind varchar(30), + metadata_uid varchar(40), + row_index int, + kind_status varchar(50), + metadata_name text, + metadata_resourceversion text, + metadata_ownerreferences text, + metadata_ownerReferences_kind varchar(30), + metadata_ownerReferences_uid varchar(40), + metadata_annotations text, + metadata_creationtimestamp varchar(25), + metadata_deletiongraceperiodseconds text, + metadata_deletiontimestamp text, + metadata_labels text, + metadata_namespace text, + spec_clusterip text, + spec_externalips text, + spec_selector text, + spec_type text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, cluster_id, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_service_ports ( + kube_flatting_time bigint, + kind varchar(30), + metadata_uid varchar(40), + row_index int, + metadata_name text, + spec_ports_appprotocol text, + spec_ports_name text, + spec_ports_nodeport text, + spec_ports_port text, + spec_ports_protocol text, + spec_ports_targetport text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_statefulset_base ( + kube_flatting_time bigint, + cluster_id varchar(255), + kind varchar(30), + metadata_uid varchar(40), + row_index int, + kind_status varchar(50), + metadata_annotations text, + metadata_creationtimestamp varchar(25), + metadata_labels text, + metadata_name text, + metadata_namespace text, + metadata_resourceversion text, + spec_replicas text, + status_readyreplicas text, + status_replicas text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, cluster_id, kind, metadata_uid, row_index) +); + +CREATE TABLE public.api_error_history ( + id int8 NOT NULL, + api_msg varchar(255) NULL, + code varchar(255) NULL, + "exception" varchar(255) NULL, + http_error varchar(255) NULL, + http_status int4 NULL, + occureence_time varchar(255) NULL, + params varchar(255) NULL, + "path" varchar(255) NULL, + "type" varchar(255) NULL, + CONSTRAINT api_error_history_pkey PRIMARY KEY (id) +); + +CREATE TABLE public.metric_score ( + clst_id varchar(255) NOT NULL, + entity_id varchar(255) NOT NULL, + entity_type varchar(255) NOT NULL, + metric_id varchar(255) NOT NULL, + sub_key varchar(255) NOT NULL, + unixtime int4 NOT NULL, + anomaly bool NOT NULL, + cont_name varchar(255) NULL, + "instance" varchar(255) NULL, + "namespace" varchar(255) NULL, + node_id varchar(255) NULL, + pod_id varchar(255) NULL, + score int4 NOT NULL, + yhat_lower_upper json NULL, + CONSTRAINT metric_score_pkey PRIMARY KEY (clst_id, entity_id, entity_type, metric_id, sub_key, unixtime) +); + + +CREATE TABLE public.tenant_info_auth_resources ( + tenant_info_id varchar(255) NOT NULL, + auth_resources_id int8 NOT NULL, + CONSTRAINT tenant_info_auth_resources_pkey PRIMARY KEY (tenant_info_id, auth_resources_id), + CONSTRAINT uk_7s6l8e2c8gli4js43c4xoifcl UNIQUE (auth_resources_id) +); + + +-- public.tenant_info_auth_resources foreign keys + +ALTER TABLE public.tenant_info_auth_resources ADD CONSTRAINT fkkecsc13ydhwg8u05aumkqbnx1 FOREIGN KEY (tenant_info_id) REFERENCES public.tenant_info(id); +ALTER TABLE public.tenant_info_auth_resources ADD CONSTRAINT fkpvvec4ju3hsma6s1rtgvr4mf6 FOREIGN KEY (auth_resources_id) REFERENCES public.auth_resource3(id); \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/postgres_insert_dml.psql b/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/postgres_insert_dml.psql new file mode 100644 index 0000000..e6335f3 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/03-ddl-dml/postgres/postgres_insert_dml.psql @@ -0,0 +1,2380 @@ +INSERT INTO public.tenant_info (id, name, in_used, created_date, modified_date, contract_id) VALUES ('DEFAULT_TENANT', 'admin', true, now(), now(), 0); + +INSERT INTO public.auth_resource2 (id, access_type, name, parent_id, type) VALUES (-1, 4, 'null', NULL, 'null'); + +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Infrastructure', -1 , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Workloads', -1 , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Services', -1 , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Diagnosis', -1 , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Statistics & Analysis', -1 , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Reports', -1 , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Settings', -1 , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Hosts', -1, 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Dashboards', -1 , 'menu'); +--INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Health Check', -1, 'menu'); + +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Topology', (select id from auth_resource2 where type='menu' and name='Infrastructure') , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Overview', (select id from auth_resource2 where type='menu' and name='Infrastructure') , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Namespace', (select id from auth_resource2 where type='menu' and name='Infrastructure') , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Nodes', (select id from auth_resource2 where type='menu' and name='Infrastructure') , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Node Details', (select id from auth_resource2 where type='menu' and name='Infrastructure') , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Resource Usage', (select id from auth_resource2 where type='menu' and name='Infrastructure') , 'menu'); +-- INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Persistent Volume', (select id from auth_resource2 where type='menu' and name='Infrastructure') , 'menu'); + +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Overview', (select id from auth_resource2 where type='menu' and name='Workloads') , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Pods', (select id from auth_resource2 where type='menu' and name='Workloads') , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Jobs', (select id from auth_resource2 where type='menu' and name='Workloads') , 'menu'); +-- INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Cron Jobs', (select id from auth_resource2 where type='menu' and name='Workloads') , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Deploy List', (select id from auth_resource2 where type='menu' and name='Workloads'), 'menu'); + +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Topology', (select id from auth_resource2 where type='menu' and name='Services') , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Structure', (select id from auth_resource2 where type='menu' and name='Services') , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Overview', (select id from auth_resource2 where type='menu' and name='Services'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Detail', (select id from auth_resource2 where type='menu' and name='Services'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'List', (select id from auth_resource2 where type='menu' and name='Services'), 'menu'); + +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Anomaly Score', (select id from auth_resource2 where type='menu' and name='Diagnosis'), 'menu'); +-- INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Troubleshooting', (select id from auth_resource2 where type='menu' and name='Diagnosis') , 'menu'); + +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Performance Trends', (select id from auth_resource2 where type='menu' and name='Statistics & Analysis'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Alert History', (select id from auth_resource2 where type='menu' and name='Statistics & Analysis'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Anomaly Score', (select id from auth_resource2 where type='menu' and name='Statistics & Analysis'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Job History', (select id from auth_resource2 where type='menu' and name='Statistics & Analysis'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Log Viewer', (select id from auth_resource2 where type='menu' and name='Statistics & Analysis'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Sparse Logs', (select id from auth_resource2 where type='menu' and name='Statistics & Analysis'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Event Logs', (select id from auth_resource2 where type='menu' and name='Statistics & Analysis') , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Alert Analysis', (select id from auth_resource2 where type='menu' and name='Statistics & Analysis') , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Container Life Cycle', (select id from auth_resource2 where type='menu' and name='Statistics & Analysis'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Service Traces', (select id from auth_resource2 where type='menu' and name='Statistics & Analysis'), 'menu'); +-- INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Resource Used Trends', (select id from auth_resource2 where type='menu' and name='Statistics & Analysis'), 'menu'); + +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Documents', (select id from auth_resource2 where type='menu' and name='Reports'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Templates', (select id from auth_resource2 where type='menu' and name='Reports'), 'menu'); + +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'User & Group', (select id from auth_resource2 where type='menu' and name='Settings') , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Alerts', (select id from auth_resource2 where type='menu' and name='Settings') , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Sparse Logs', (select id from auth_resource2 where type='menu' and name='Settings') , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'General', (select id from auth_resource2 where type='menu' and name='Settings') , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Metric Meta', (select id from auth_resource2 where type='menu' and name='Settings'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Notification', (select id from auth_resource2 where type='menu' and name='Settings'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Host Alerts', (select id from auth_resource2 where type='menu' and name='Settings'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'License', (select id from auth_resource2 where type='menu' and name='Settings'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Agent', (select id from auth_resource2 where type='menu' and name='Settings'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Alias', (select id from auth_resource2 where type='menu' and name='Settings'), 'menu'); + +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Documents', (select id from auth_resource2 where type='menu' and name='Dashboards'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Templates', (select id from auth_resource2 where type='menu' and name='Dashboards'), 'menu'); + +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Topology', (select id from auth_resource2 where type='menu' and name='Hosts'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Overview', (select id from auth_resource2 where type='menu' and name='Hosts'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'List', (select id from auth_resource2 where type='menu' and name='Hosts'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Detail', (select id from auth_resource2 where type='menu' and name='Hosts'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Group', (select id from auth_resource2 where type='menu' and name='Hosts'), 'menu'); + +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'CloudMOA - Nodes Resource', NULL, 'dashboard'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Service Detail', NULL, 'dashboard'); + +--INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES(4, 'Check Script', (select id from auth_resource2 where type='menu' and name='Health Check'), 'menu'); + +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Infrastructure', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Workloads', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Services', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Diagnosis', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Statistics & Analysis', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Reports', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Settings', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Hosts', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Dashboards', false, null); +--INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Health Check', false, null); + +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Infrastructure|Topology', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Infrastructure|Overview', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Infrastructure|Namespace', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Infrastructure|Nodes', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Infrastructure|Node Details', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Infrastructure|Resource Usage', false, null); + +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Workloads|Overview', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Workloads|Pods', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Workloads|Jobs', false, null); +-- NSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Workloads|Cron Jobs', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Workloads|Deploy List', false, null); + +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Services|Topology', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Services|Structure', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Services|Overview', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Services|Detail', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Services|List', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Services|Active Transaction', false, null); + +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Diagnosis|Anomaly Score', false, null); + +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Statistics & Analysis|Performance Trends', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Statistics & Analysis|Alert History', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Statistics & Analysis|Anomaly Score', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Statistics & Analysis|Job History', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Statistics & Analysis|Log Viewer', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Statistics & Analysis|Sparse Logs', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Statistics & Analysis|Event Logs', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Statistics & Analysis|Alert Analysis', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Statistics & Analysis|Container Life Cycle', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Statistics & Analysis|Service Traces', false, null); + +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Reports|Documents', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Reports|Templates', false, null); + +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Settings|User & Group', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Settings|Alerts', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Settings|Sparse Logs', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Settings|General', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Settings|Metric Meta', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Settings|Notification', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Settings|Host Alerts', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Settings|License', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Settings|Agent', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Settings|Alias', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Settings|Agent Installation', false, NULL); + + +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Dashboards|Documents', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Dashboards|Templates', false, null); + +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Hosts|Topology', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Hosts|Overview', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Hosts|List', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Hosts|Detail', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Hosts|Group', false, null); + +--INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Health Check|Check Script', false, null); + +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('user|admin', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('userGroup|admin|default', false, null); + +--INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('user|admin|owner', false, 'DEFAULT_TENANT'); + +INSERT INTO public.cloud_user (user_id, email, is_admin, phone, user_nm, user_pw, created_date, modified_date, company, department, last_log_in_date, "position", use_ldap, auth_method, log_in_count, user_lock, user_lock_date, tenant_id, is_tenant_owner, auth_resource_id) VALUES ('admin', NULL, true, NULL, 'admin', '$2a$10$a0XPdet9RCL8uF8ZVZ2Yzu4y0po5RWCesyB0e03MhrTIfG.0Y6xfS',now() , now() , NULL , NULL , NULL , NULL, false, 'default', 0, false, null, 'DEFAULT_TENANT', true, (select id from auth_resource3 where name='user|admin')); +INSERT INTO public.cloud_group (id, created_date, modified_date, name, description) VALUES ((select id from auth_resource3 where name='userGroup|admin|default'), now(), now(), 'default', '기본그룹정의'); + +--INSERT INTO public.cloud_user (user_id, email, is_admin, phone, user_nm, user_pw, created_date, modified_date, company, department, last_log_in_date, "position", use_ldap, auth_method, log_in_count, user_lock, user_lock_date, tenant_id, is_tenant_owner, auth_resource_id) VALUES ('owner', NULL, false, NULL, 'owner', '$2a$10$a0XPdet9RCL8uF8ZVZ2Yzu4y0po5RWCesyB0e03MhrTIfG.0Y6xfS',now() , now() , NULL , NULL , NULL , NULL, false, 'default', 0, false, null, 'DEFAULT_TENANT', true, (select id from auth_resource3 where name='user|admin|owner')); + +INSERT INTO public.cloud_user_setting +(user_id, lang, theme, access_token, refresh_token, error_msg, alert_sound, session_persistence, gpu_acc_topology, created_date, modified_date) +VALUES('admin', null, null, null, null, false, false, true, true, now(), null); + +--INSERT INTO public.cloud_user_setting +--(user_id, lang, theme, access_token, refresh_token, error_msg, alert_sound, session_persistence, gpu_acc_topology, created_date, modified_date) +--VALUES('owner', null, null, null, null, false, false, true, true, now(), null); + +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('dashboard|admin|CloudMOA - Nodes Resource', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('dashboard|admin|Service Detail', false, null); + +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('cluster|cloudmoa', false, 'DEFAULT_TENANT'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (0, 'Infrastructure', '01.Infrastructure', 0, NULL, (select id from auth_resource3 where name='menu|Infrastructure'), 3); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (1, 'Topology', NULL, 0, 'topologyInfra', (select id from auth_resource3 where name='menu|Infrastructure|Topology'), 3); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (2, 'Overview', NULL, 1, 'overViewInfra', (select id from auth_resource3 where name='menu|Infrastructure|Overview'), 3); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (3, 'Resource Usage', NULL, 2, 'resourceUsageInfra', (select id from auth_resource3 where name='menu|Infrastructure|Resource Usage'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (4, 'Namespace', NULL, 3, 'namespaceInfra', (select id from auth_resource3 where name='menu|Infrastructure|Namespace'), 3); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (5, 'Nodes', NULL, 4, 'nodesInfra', (select id from auth_resource3 where name='menu|Infrastructure|Nodes'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (6, 'Node Details', NULL, 5, 'nodeDetailInfra', (select id from auth_resource3 where name='menu|Infrastructure|Node Details'), 3); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (10, 'Workloads', '02.Workload', 1, NULL, (select id from auth_resource3 where name='menu|Workloads'), 3); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (11, 'Overview', NULL, 0, 'overviewWorkloads', (select id from auth_resource3 where name='menu|Workloads|Overview'), 3); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (12, 'deployList', NULL, 1, 'deployListWorkloads', (select id from auth_resource3 where name='menu|Workloads|Deploy List'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (17, 'Jobs', NULL, 6, 'jobsWorkloads', (select id from auth_resource3 where name='menu|Workloads|Jobs'), 2); +-- INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (18, 'Cron Jobs', NULL, 7, 'cronJobsWorkloads', (select id from auth_resource3 where name='menu|Workloads|Cron Jobs'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (19, 'Pods', NULL, 8, 'podsWorkloads', (select id from auth_resource3 where name='menu|Workloads|Pods'), 3); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (20, 'Services', '03.Service', 2, NULL, (select id from auth_resource3 where name='menu|Services'), 3); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (21, 'DataCenter Service', NULL, 0, 'topologyServices', (select id from auth_resource3 where name='menu|Services|Topology'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (22, 'ServiceOverview', NULL, 1, 'overviewServices', (select id from auth_resource3 where name='menu|Services|Overview'), 0); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (23, 'Cluster Service', NULL, 2, 'detailServices', (select id from auth_resource3 where name='menu|Services|Structure'), 0); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (24, 'List', NULL, 3, 'serviceList', (select id from auth_resource3 where name='menu|Services|List'), 3); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (25, 'Detail', NULL, 4, 'slasServices', (select id from auth_resource3 where name='menu|Services|Detail'), 0); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (26, 'Active Transaction', NULL, 5, 'overviewServiceJSPD', (select id from auth_resource3 where name='menu|Services|Active Transaction'), 2); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (30, 'Diagnosis', '05.Diagnosis', 4, NULL, (select id from auth_resource3 where name='menu|Diagnosis'), 0); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (31, 'Anomaly Score Detail', NULL, 0, 'anomalyScoreDiagnosis', (select id from auth_resource3 where name='menu|Diagnosis|Anomaly Score'), 0); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (40, 'Statistics & Analysis', '06.Statistics&Analysis', 5, NULL, (select id from auth_resource3 where name='menu|Statistics & Analysis'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (41, 'Performance Trends', NULL, 0, 'performanceTrendSA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Performance Trends'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (42, 'Alert Analysis', NULL, 2, 'alertAnalysisSA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Alert Analysis'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (43, 'Alert History', NULL, 3, 'alertHistorySA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Alert History'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (44, 'Anomaly Score Analysis', NULL, 4, 'anomalyScoreSA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Anomaly Score'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (45, 'Job History', NULL, 5, 'jobHistorySA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Job History'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (46, 'Sparse Log Analysis', NULL, 6, 'sparseLogSA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Sparse Logs'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (47, 'Log Viewer', NULL, 7, 'logViewerSA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Log Viewer'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (48, 'eventLog Analysis', NULL, 8, 'eventLogSA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Event Logs'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (49, 'Container Life Cycle', NULL, 9, 'containerLifecycleSA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Container Life Cycle'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (50, 'Service Trace Analysis', NULL, 10, 'serviceTraceSA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Service Traces'), 0); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (60, 'Reports', '07.Report', 6, NULL, (select id from auth_resource3 where name='menu|Reports'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (61, 'Documents', NULL, 0, 'documentReport', (select id from auth_resource3 where name='menu|Reports|Documents'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (62, 'Templates', NULL, 1, 'templateReport', (select id from auth_resource3 where name='menu|Reports|Templates'), 2); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (65, 'Dashboards', '10.Dashboard', 7, NULL, (select id from auth_resource3 where name='menu|Dashboards'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (66, 'Documents', NULL, 0, 'documentDashboard', (select id from auth_resource3 where name='menu|Dashboards|Documents'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (67, 'Templates', NULL, 1, 'templateDashboard', (select id from auth_resource3 where name='menu|Dashboards|Templates'), 2); + + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (80, 'Hosts', '12.Hosts', 10, NULL, (select id from auth_resource3 where name='menu|Hosts'), 0); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (81, 'Topology', null, 0, 'topologyHost', (select id from auth_resource3 where name='menu|Hosts|Topology'), 0); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (82, 'Overview', NULL, 1, 'overviewHost', (select id from auth_resource3 where name='menu|Hosts|Overview'), 0); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (83, 'List', NULL, 2, 'listHost', (select id from auth_resource3 where name='menu|Hosts|List'), 0); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (84, 'Detail', NULL, 3, 'detailHost', (select id from auth_resource3 where name='menu|Hosts|Detail'), 0); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (85, 'Group', NULL, 4, 'groupHost', (select id from auth_resource3 where name='menu|Hosts|Group'), 0); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (90, 'Settings', '08.Setting', 99, NULL, (select id from auth_resource3 where name='menu|Settings'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (91, 'User', NULL, 0, 'userGroupSettings', (select id from auth_resource3 where name='menu|Settings|User & Group'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (92, 'Alerts', NULL, 1, 'alertSettings', (select id from auth_resource3 where name='menu|Settings|Alerts'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (93, 'Host Alerts', NULL, 2, 'hostAlertSettings', (select id from auth_resource3 where name='menu|Settings|Host Alerts'), 0); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (94, 'Sparse Logs', NULL, 3, 'sparseLogSettings', (select id from auth_resource3 where name='menu|Settings|Sparse Logs'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (96, 'Metric Meta', NULL, 5, 'metricMetaSettings', (select id from auth_resource3 where name='menu|Settings|Metric Meta'), 0); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (97, 'Appearance', NULL, 6, 'appearanceSettings', (select id from auth_resource3 where name='menu|Settings|General'), 0); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (98, 'Notification', NULL, 7, 'notificationsSettings', (select id from auth_resource3 where name='menu|Settings|Notification'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (99, 'Agent', NULL, 8, 'agentSettings', (select id from auth_resource3 where name='menu|Settings|Agent'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (100, 'Alias', NULL, 9, 'aliasSettings', (select id from auth_resource3 where name='menu|Settings|Alias'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (101, 'License', NULL, 10, 'validationLicense', (select id from auth_resource3 where name='menu|Settings|License'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (102, 'agent Installation', NULL, 11, 'agentInstallationSettings', (select id from auth_resource3 where name='menu|Settings|Agent Installation'), 2); + +-- INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (121, 'Health Check', '09.HealthCheck', 9, 'healthCHeck', (select id from auth_resource3 where name='menu|Health Check'), 0); +-- INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (122, 'Check Script', NULL, 0, 'checkScript', (select id from auth_resource3 where name='menu|Health Check|Check Script'), 0); + +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Infrastructure'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Infrastructure|Topology'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Infrastructure|Overview'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Infrastructure|Resource Usage'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Infrastructure|Namespace'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Infrastructure|Nodes'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Infrastructure|Node Details'), 'owner'); +-- +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Workloads'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Workloads|Overview'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Workloads|Deploy List'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Workloads|Jobs'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Workloads|Cron Jobs'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Workloads|Pods'), 'owner'); +-- +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Services'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Services|Topology'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Services|Overview'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Services|Structure'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Services|List'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Services|Detail'), 'owner'); +-- +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Diagnosis'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Diagnosis|Anomaly Score'), 'owner'); +-- +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Statistics & Analysis'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Statistics & Analysis|Performance Trends'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Statistics & Analysis|Alert Analysis'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Statistics & Analysis|Alert History'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Statistics & Analysis|Anomaly Score'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Statistics & Analysis|Job History'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Statistics & Analysis|Sparse Logs'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Statistics & Analysis|Log Viewer'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Statistics & Analysis|Event Logs'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Statistics & Analysis|Container Life Cycle'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Statistics & Analysis|Service Traces'), 'owner'); +-- +-- +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Reports'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Reports|Documents'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Reports|Templates'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Dashboards'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Dashboards|Documents'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Dashboards|Templates'), 'owner'); +-- +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Settings'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Settings|User & Group'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Settings|Alerts'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Settings|Sparse Logs'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Settings|Metric Meta'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Settings|General'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Settings|Notification'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Settings|Agent'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Settings|Alias'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Settings|License'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Settings|Agent Installation'), 'owner'); + +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cadvisor_version_info', 'cadvisor', 'A metric with a constant ''1'' value labeled by kernel version, OS version, docker version, cadvisor version & cadvisor revision.', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_cpu_cfs_periods_total', 'cadvisor', 'Number of elapsed enforcement period intervals.', 'CPU', 'LOAD', 'Container', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_cpu_cfs_throttled_periods_total', 'cadvisor', 'Number of throttled period intervals.', 'CPU', 'LOAD', 'Container', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_cpu_cfs_throttled_seconds_total', 'cadvisor', 'Total time duration the container has been throttled.', 'CPU', 'LOAD', 'Container', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_cpu_load_average_10s', 'cadvisor', 'Value of container cpu load average over the last 10 seconds.', 'CPU', 'LOAD', 'Container', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_cpu_schedstat_run_periods_total', 'cadvisor', 'Number of times processes of the cgroup have run on the cpu', 'CPU', 'LOAD', 'Container', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_cpu_schedstat_run_seconds_total', 'cadvisor', 'Time duration the processes of the container have run on the CPU.', 'CPU', 'LOAD', 'Container', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_cpu_schedstat_runqueue_seconds_total', 'cadvisor', 'Time duration processes of the container have been waiting on a runqueue.', 'CPU', 'LOAD', 'Container', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_cpu_system_seconds_total', 'cadvisor', 'Cumulative system cpu time consumed in seconds.', 'CPU', 'LOAD', 'Container', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_cpu_usage_seconds_total', 'cadvisor', 'Cumulative cpu time consumed in seconds.', 'CPU', 'LOAD', 'Container', 'counter', 'cpu', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_cpu_user_seconds_total', 'cadvisor', 'Cumulative user cpu time consumed in seconds.', 'CPU', 'LOAD', 'Container', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_fs_limit_bytes', 'cadvisor', 'Number of bytes that can be consumed by the container on this filesystem.', NULL, NULL, 'Container', 'gauge', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_last_seen', 'cadvisor', 'Last time a container was seen by the exporter', NULL, NULL, 'Container', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_network_receive_bytes_total', 'cadvisor', 'Cumulative count of bytes received', 'NIC', 'LOAD', 'Container', 'counter', 'interface', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_network_receive_packets_dropped_total', 'cadvisor', 'Cumulative count of packets dropped while receiving', 'NIC', 'LOAD', 'Container', 'counter', 'interface', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_network_receive_packets_total', 'cadvisor', 'Cumulative count of packets received', 'NIC', 'LOAD', 'Container', 'counter', 'interface', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_network_transmit_bytes_total', 'cadvisor', 'Cumulative count of bytes transmitted', 'NIC', 'LOAD', 'Container', 'counter', 'interface', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_network_transmit_errors_total', 'cadvisor', 'Cumulative count of errors encountered while transmitting', 'NIC', 'LOAD', 'Container', 'counter', 'interface', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_network_transmit_packets_dropped_total', 'cadvisor', 'Cumulative count of packets dropped while transmitting', 'NIC', 'LOAD', 'Container', 'counter', 'interface', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_network_transmit_packets_total', 'cadvisor', 'Cumulative count of packets transmitted', 'NIC', 'LOAD', 'Container', 'counter', 'interface', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_scrape_error', 'cadvisor', '1 if there was an error while getting container metrics, 0 otherwise', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_spec_cpu_period', 'cadvisor', 'CPU period of the container', NULL, NULL, 'Container', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_spec_cpu_quota', 'cadvisor', 'CPU quota of the container', NULL, NULL, 'Container', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_memory_cache', 'cadvisor', 'Number of bytes of page cache memory.', 'Memory', 'LOAD', 'Container', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_memory_failcnt', 'cadvisor', 'Number of memory usage hits limits', 'Memory', 'LOAD', 'Container', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_memory_failures_total', 'cadvisor', 'Cumulative count of memory allocation failures.', 'Memory', 'LOAD', 'Container', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_memory_max_usage_bytes', 'cadvisor', 'Maximum memory usage recorded in bytes', 'Memory', 'LOAD', 'Container', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_memory_rss', 'cadvisor', 'Size of RSS in bytes.', 'Memory', 'LOAD', 'Container', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_memory_swap', 'cadvisor', 'Container swap usage in bytes.', 'Memory', 'LOAD', 'Container', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_memory_usage_bytes', 'cadvisor', 'Current memory usage in bytes, including all memory regardless of when it was accessed', 'Memory', 'LOAD', 'Container', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_memory_working_set_bytes', 'cadvisor', 'Current working set in bytes.', 'Memory', 'LOAD', 'Container', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_network_tcp_usage_total', 'cadvisor', 'tcp connection usage statistic for container', 'Network', 'LOAD', 'Container', 'counter', 'tcp_state', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_network_udp_usage_total', 'cadvisor', 'udp connection usage statistic for container', 'Network', 'LOAD', 'Container', 'counter', 'udp_state', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_spec_cpu_shares', 'cadvisor', 'CPU share of the container', NULL, NULL, 'Container', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_spec_memory_limit_bytes', 'cadvisor', 'Memory limit for the container.', NULL, NULL, 'Container', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_spec_memory_swap_limit_bytes', 'cadvisor', 'Memory swap limit for the container.', NULL, NULL, 'Container', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_start_time_seconds', 'cadvisor', 'Start time of the container since unix epoch in seconds.', NULL, NULL, 'Container', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_tasks_state', 'cadvisor', 'Number of tasks in given state', NULL, NULL, 'Container', 'gauge', 'state', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('http_request_duration_microseconds', 'prometheus', 'The HTTP request latencies in microseconds.', NULL, 'DURATION', 'Node', 'summary', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('http_request_duration_microseconds_count', 'prometheus', '', NULL, NULL, 'Node', '', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('http_request_duration_microseconds_sum', 'prometheus', '', NULL, NULL, 'Node', '', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('http_request_size_bytes_count', 'prometheus', '', NULL, NULL, 'Node', '', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('http_request_size_bytes_sum', 'prometheus', '', NULL, NULL, 'Node', '', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('http_requests_total', 'prometheus', 'Total number of scrapes by HTTP status code.', NULL, 'ERROR', 'Node', 'counter', 'code,method', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('aws_ec2_ebsread_bytes_average', 'cloudwatch', 'Bytes read from all EBS volumes attached to the instance in a specified period of time.', 'EBS', 'LOAD', 'AWS/EC2', 'gauge', 'instance_id', '2019-07-24 15:23:37.148501', '2019-07-24 15:23:37.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('http_response_size_bytes_count', 'prometheus', '', NULL, NULL, 'Node', '', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('http_response_size_bytes_sum', 'prometheus', '', NULL, NULL, 'Node', '', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('http_server_requests_seconds', 'micrometer', 'Server Response in second', NULL, 'RATE', 'Service', 'summary', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('http_server_requests_seconds_count', 'micrometer', 'the total number of requests.', NULL, NULL, 'Service', '', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('http_server_requests_seconds_sum', 'micrometer', 'the total time taken to serve the requests', NULL, NULL, 'Service', '', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('http_server_requests_seconds_max', 'micrometer', 'the max number of requests.', NULL, 'RATE', 'Service', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('aws_ec2_ebswrite_bytes_average', 'cloudwatch', 'Bytes written to all EBS volumes attached to the instance in a specified period of time.', 'EBS', 'LOAD', 'AWS/EC2', 'gauge', 'instance_id', '2019-07-24 15:23:37.148501', '2019-07-24 15:23:37.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_classes_loaded', 'micrometer', 'jvm info', 'GC', 'LOAD', 'Process', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_classes_unloaded_total', 'micrometer', 'jvm info', 'GC', 'LOAD', 'Process', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_gc_live_data_size_bytes', 'micrometer', 'jvm info', 'GC', 'LOAD', 'Process', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_gc_max_data_size_bytes', 'micrometer', 'jvm info', 'GC', 'LOAD', 'Process', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_gc_memory_allocated_bytes_total', 'micrometer', 'jvm info', 'GC', 'LOAD', 'Process', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_gc_memory_promoted_bytes_total', 'micrometer', 'jvm info', 'GC', 'LOAD', 'Process', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_gc_pause_seconds', 'micrometer', 'jvm info', 'GC', 'LOAD', 'Process', 'summary', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_gc_pause_seconds_count', 'micrometer', 'jvm info', NULL, NULL, 'Process', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_gc_pause_seconds_max', 'micrometer', 'jvm info', 'GC', 'LOAD', 'Process', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_gc_pause_seconds_sum', 'micrometer', 'jvm info', NULL, NULL, 'Process', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_arp_entries', 'node_exporter', 'ARP entries by device', 'OS', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_boot_time_seconds', 'node_exporter', 'Node boot time, in unixtime.', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_context_switches_total', 'node_exporter', 'Total number of context switches.', 'OS', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_cpu_core_throttles_total', 'node_exporter', 'Number of times this cpu core has been throttled.', 'CPU', 'LOAD', 'Node', 'counter', 'core', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_cpu_frequency_hertz', 'node_exporter', 'Current cpu thread frequency in hertz.', 'CPU', 'LOAD', 'Node', 'gauge', 'cpu', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_cpu_frequency_max_hertz', 'node_exporter', 'Maximum cpu thread frequency in hertz.', NULL, NULL, 'Node', 'gauge', 'cpu', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_cpu_frequency_min_hertz', 'node_exporter', 'Minimum cpu thread frequency in hertz.', NULL, NULL, 'Node', 'gauge', 'cpu', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_cpu_guest_seconds_total', 'node_exporter', 'Seconds the cpus spent in guests (VMs) for each mode.', 'CPU', 'LOAD', 'Node', 'counter', 'cpu', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_cpu_package_throttles_total', 'node_exporter', 'Number of times this cpu package has been throttled.', 'CPU', 'LOAD', 'Node', 'counter', 'package', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_cpu_seconds_total', 'node_exporter', 'Seconds the cpus spent in each mode.', 'CPU', 'LOAD', 'Node', 'counter', 'cpu,mode', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_entropy_available_bits', 'node_exporter', 'Bits of available entropy.', 'OS', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_exporter_build_info', 'node_exporter', 'A metric with a constant ''1'' value labeled by version, revision, branch, and goversion from which node_exporter was built.', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('aws_ec2_cpuutilization_average', 'cloudwatch', 'The percentage of allocated EC2 compute units that are currently in use on the instance.', 'CPU', 'LOAD', 'AWS/EC2', 'gauge', 'instance_id', '2019-07-24 15:23:37.148501', '2019-07-24 15:23:37.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('aws_ec2_disk_read_ops_average', 'cloudwatch', 'Completed read operations from all instance store volumes available to the instance in a specified period of time.', 'Disk', 'LOAD', 'AWS/EC2', 'gauge', 'instance_id', '2019-07-24 15:23:37.148501', '2019-07-24 15:23:37.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('aws_ec2_disk_write_ops_average', 'cloudwatch', 'Completed write operations to all instance store volumes available to the instance in a specified period of time.', 'Disk', 'LOAD', 'AWS/EC2', 'gauge', 'instance_id', '2019-07-24 15:23:37.148501', '2019-07-24 15:23:37.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('aws_ec2_disk_read_bytes_average', 'cloudwatch', 'Bytes read from all instance store volumes available to the instance.', 'Disk', 'LOAD', 'AWS/EC2', 'gauge', 'instance_id', '2019-07-24 15:23:37.148501', '2019-07-24 15:23:37.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('aws_ec2_disk_write_bytes_average', 'cloudwatch', 'Bytes written to all instance store volumes available to the instance.', 'Disk', 'LOAD', 'AWS/EC2', 'gauge', 'instance_id', '2019-07-24 15:23:37.148501', '2019-07-24 15:23:37.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('aws_ec2_network_in_average', 'cloudwatch', 'The number of bytes received on all network interfaces by the instance.', 'Network', 'LOAD', 'AWS/EC2', 'gauge', 'instance_id', '2019-07-24 15:23:37.148501', '2019-07-24 15:23:37.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('aws_ec2_network_out_average', 'cloudwatch', 'The number of bytes sent out on all network interfaces by the instance.', 'Network', 'LOAD', 'AWS/EC2', 'gauge', 'instance_id', '2019-07-24 15:23:37.148501', '2019-07-24 15:23:37.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_filesystem_readonly', 'node_exporter', 'Filesystem read-only status.', NULL, NULL, 'Node', 'gauge', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('aws_ec2_network_packets_in_average', 'cloudwatch', 'The number of packets received on all network interfaces by the instance.', 'Network', 'LOAD', 'AWS/EC2', 'gauge', 'instance_id', '2019-07-24 15:23:37.148501', '2019-07-24 15:23:37.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_forks_total', 'node_exporter', 'Total number of forks.', 'OS', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_hwmon_chip_names', 'node_exporter', 'Annotation metric for human-readable chip names', 'CPU', 'LOAD', 'Node', 'gauge', 'chip', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_hwmon_fan_rpm', 'node_exporter', 'Hardware monitor for fan revolutions per minute (input)', 'CPU', 'LOAD', 'Node', 'gauge', 'chip,sensor', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_hwmon_pwm', 'node_exporter', 'Hardware monitor pwm element ', 'CPU', 'LOAD', 'Node', 'gauge', 'chip,sensor', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_hwmon_sensor_label', 'node_exporter', 'Label for given chip and sensor', 'CPU', 'LOAD', 'Node', 'gauge', 'chip,sensor', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_hwmon_temp_celsius', 'node_exporter', 'Hardware monitor for temperature (input)', 'CPU', 'LOAD', 'Node', 'gauge', 'chip,sensor', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_hwmon_temp_crit_alarm_celsius', 'node_exporter', 'Hardware monitor for temperature (crit_alarm)', 'CPU', 'LOAD', 'Node', 'gauge', 'chip,sensor', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_hwmon_temp_crit_celsius', 'node_exporter', 'Hardware monitor for temperature (crit)', 'CPU', 'LOAD', 'Node', 'gauge', 'chip,sensor', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_hwmon_temp_max_celsius', 'node_exporter', 'Hardware monitor for temperature (max)', NULL, NULL, 'Node', 'gauge', 'chip,sensor', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_intr_total', 'node_exporter', 'Total number of interrupts serviced.', 'OS', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('aws_ec2_network_packets_out_average', 'cloudwatch', 'The number of packets sent out on all network interfaces by the instance.', 'Network', 'LOAD', 'AWS/EC2', 'gauge', 'instance_id', '2019-07-24 15:23:37.148501', '2019-07-24 15:23:37.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('aws_ec2_ebsread_ops_average', 'cloudwatch', 'Completed read operations from all Amazon EBS volumes attached to the instance in a specified period of time.', 'EBS', 'LOAD', 'AWS/EC2', 'gauge', 'instance_id', '2019-07-24 15:23:37.148501', '2019-07-24 15:23:37.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('aws_ec2_ebswrite_ops_average', 'cloudwatch', 'Completed write operations to all EBS volumes attached to the instance in a specified period of time.', 'EBS', 'LOAD', 'AWS/EC2', 'gauge', 'instance_id', '2019-07-24 15:23:37.148501', '2019-07-24 15:23:37.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_load1', 'node_exporter', '1m load average.', 'CPU', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_load15', 'node_exporter', '15m load average.', 'CPU', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_load5', 'node_exporter', '5m load average.', 'CPU', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_disk_reads_completed_total', 'node_exporter', 'The total number of reads completed successfully.', 'Disk', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_disk_reads_merged_total', 'node_exporter', 'The total number of reads merged.', 'Disk', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_disk_write_time_seconds_total', 'node_exporter', 'This is the total number of seconds spent by all writes.', 'Disk', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_disk_writes_completed_total', 'node_exporter', 'The total number of writes completed successfully.', 'Disk', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_disk_writes_merged_total', 'node_exporter', 'The number of writes merged.', 'Disk', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_disk_written_bytes_total', 'node_exporter', 'The total number of bytes written successfully.', 'Disk', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_nf_conntrack_entries', 'node_exporter', 'Number of currently allocated flow entries for connection tracking.', 'OS', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_nf_conntrack_entries_limit', 'node_exporter', 'Maximum size of connection tracking table.', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_scrape_collector_duration_seconds', 'node_exporter', 'node_exporter: Duration of a collector scrape.', NULL, NULL, 'Node', 'gauge', 'collector', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_scrape_collector_success', 'node_exporter', 'node_exporter: Whether a collector succeeded.', NULL, NULL, 'Node', 'gauge', 'collector', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_textfile_scrape_error', 'node_exporter', '1 if there was an error opening or reading a file, 0 otherwise', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_time_seconds', 'node_exporter', 'System time in seconds since epoch (1970).', NULL, NULL, 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_timex_estimated_error_seconds', 'node_exporter', 'Estimated error in seconds.', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_timex_frequency_adjustment_ratio', 'node_exporter', 'Local clock frequency adjustment.', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_timex_loop_time_constant', 'node_exporter', 'Phase-locked loop time constant.', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_timex_maxerror_seconds', 'node_exporter', 'Maximum error in seconds.', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_timex_offset_seconds', 'node_exporter', 'Time offset in between local system and reference clock.', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_timex_pps_calibration_total', 'node_exporter', 'Pulse per second count of calibration intervals.', NULL, NULL, 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_timex_pps_error_total', 'node_exporter', 'Pulse per second count of calibration errors.', NULL, NULL, 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_timex_pps_frequency_hertz', 'node_exporter', 'Pulse per second frequency.', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_timex_pps_jitter_seconds', 'node_exporter', 'Pulse per second jitter.', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_timex_pps_jitter_total', 'node_exporter', 'Pulse per second count of jitter limit exceeded events.', NULL, NULL, 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_timex_pps_shift_seconds', 'node_exporter', 'Pulse per second interval duration.', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_timex_pps_stability_exceeded_total', 'node_exporter', 'Pulse per second count of stability limit exceeded events.', NULL, NULL, 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_timex_pps_stability_hertz', 'node_exporter', 'Pulse per second stability, average of recent frequency changes.', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_timex_status', 'node_exporter', 'Value of the status array bits.', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_timex_sync_status', 'node_exporter', 'Is clock synchronized to a reliable server (1 = yes, 0 = no).', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_timex_tai_offset_seconds', 'node_exporter', 'International Atomic Time (TAI) offset.', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_timex_tick_seconds', 'node_exporter', 'Seconds between clock ticks.', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_uname_info', 'node_exporter', 'Labeled system information as provided by the uname system call.', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_vmstat_oom_kill', 'node_exporter', '/proc/vmstat information field oom_kill.', NULL, 'ERROR', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('process_cpu_usage', 'micrometer', 'The "recent cpu usage" for the Java Virtual Machine process', 'CPU', 'LOAD', 'Process', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('process_uptime_seconds', 'micrometer', 'Process uptime in seconds.', NULL, NULL, 'Process', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('service_elapsed_seconds', 'micrometer', 'custom service', NULL, 'DURATION', 'Service', 'summary', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('service_elapsed_seconds_count', 'micrometer', 'custom service', NULL, NULL, 'Service', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('service_elapsed_seconds_max', 'micrometer', 'custom service', NULL, 'DURATION', 'Service', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('service_elapsed_seconds_sum', 'micrometer', 'custom service', NULL, NULL, 'Service', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('system_cpu_usage', 'micrometer', 'The "recent cpu usage" for the whole system', 'CPU', 'LOAD', 'Process', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('system_load_average_1m', 'micrometer', 'The sum of the number of runnable entities queued to available processors and the number of runnable entities running on the available processors averaged over a period of time', 'CPU', 'LOAD', 'Process', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('up', 'prometheus', '1 if the instance is healthy, i.e. reachable, or 0 if the scrape failed.', NULL, 'ERROR', 'Any', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('go_threads', 'prometheus', 'Number of OS threads created.', 'Thread', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('http_request_size_bytes', 'prometheus', 'The HTTP request sizes in bytes.', 'Network', 'LOAD', 'Node', 'summary', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('http_response_size_bytes', 'prometheus', 'The HTTP response sizes in bytes.', 'Network', 'LOAD', 'Node', 'summary', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_buffer_count', 'micrometer', 'jvm info', 'Memory', 'LOAD', 'Process', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_buffer_memory_used_bytes', 'micrometer', 'jvm info', 'Memory', 'LOAD', 'Process', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_buffer_total_capacity_bytes', 'micrometer', 'jvm info', 'Memory', 'LOAD', 'Process', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_memory_committed_bytes', 'micrometer', 'jvm info', 'Memory', 'LOAD', 'Process', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_memory_max_bytes', 'micrometer', 'jvm info', 'Memory', 'LOAD', 'Process', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_memory_used_bytes', 'micrometer', 'jvm info', 'Memory', 'LOAD', 'Process', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_threads_daemon', 'micrometer', 'jvm info', 'Thread', 'LOAD', 'Process', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_threads_live', 'micrometer', 'jvm info', 'Thread', 'LOAD', 'Process', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_threads_peak', 'micrometer', 'jvm info', 'Thread', 'LOAD', 'Process', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_disk_io_now', 'node_exporter', 'The number of I/Os currently in progress.', 'Disk', 'LOAD', 'Node', 'gauge', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_disk_io_time_seconds_total', 'node_exporter', 'Total seconds spent doing I/Os.', 'Disk', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_disk_io_time_weighted_seconds_total', 'node_exporter', 'The weighted # of seconds spent doing I/Os.', 'Disk', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_disk_read_bytes_total', 'node_exporter', 'The total number of bytes read successfully.', 'Disk', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_disk_read_time_seconds_total', 'node_exporter', 'The total number of seconds spent by all reads.', 'Disk', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_filefd_allocated', 'node_exporter', 'File descriptor statistics: allocated.', 'File', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_filefd_maximum', 'node_exporter', 'File descriptor statistics: maximum.', 'File', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_ipvs_connections_total', 'node_exporter', 'The total number of connections made.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_ipvs_incoming_bytes_total', 'node_exporter', 'The total amount of incoming data.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_ipvs_incoming_packets_total', 'node_exporter', 'The total number of incoming packets.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_ipvs_outgoing_bytes_total', 'node_exporter', 'The total amount of outgoing data.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_ipvs_outgoing_packets_total', 'node_exporter', 'The total number of outgoing packets.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Active_anon_bytes', 'node_exporter', 'Memory information field Active_anon_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Active_bytes', 'node_exporter', 'Memory information field Active_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Active_file_bytes', 'node_exporter', 'Memory information field Active_file_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_AnonHugePages_bytes', 'node_exporter', 'Memory information field AnonHugePages_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_AnonPages_bytes', 'node_exporter', 'Memory information field AnonPages_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Bounce_bytes', 'node_exporter', 'Memory information field Bounce_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Buffers_bytes', 'node_exporter', 'Memory information field Buffers_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Cached_bytes', 'node_exporter', 'Memory information field Cached_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_CmaFree_bytes', 'node_exporter', 'Memory information field CmaFree_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_CmaTotal_bytes', 'node_exporter', 'Memory information field CmaTotal_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_CommitLimit_bytes', 'node_exporter', 'Memory information field CommitLimit_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Committed_AS_bytes', 'node_exporter', 'Memory information field Committed_AS_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_DirectMap1G_bytes', 'node_exporter', 'Memory information field DirectMap1G_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_DirectMap2M_bytes', 'node_exporter', 'Memory information field DirectMap2M_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_DirectMap4k_bytes', 'node_exporter', 'Memory information field DirectMap4k_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Dirty_bytes', 'node_exporter', 'Memory information field Dirty_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_HardwareCorrupted_bytes', 'node_exporter', 'Memory information field HardwareCorrupted_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_HugePages_Free', 'node_exporter', 'Memory information field HugePages_Free.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_HugePages_Rsvd', 'node_exporter', 'Memory information field HugePages_Rsvd.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_HugePages_Surp', 'node_exporter', 'Memory information field HugePages_Surp.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_HugePages_Total', 'node_exporter', 'Memory information field HugePages_Total.', 'Memory', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Hugepagesize_bytes', 'node_exporter', 'Memory information field Hugepagesize_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Inactive_anon_bytes', 'node_exporter', 'Memory information field Inactive_anon_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Inactive_bytes', 'node_exporter', 'Memory information field Inactive_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Inactive_file_bytes', 'node_exporter', 'Memory information field Inactive_file_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_KernelStack_bytes', 'node_exporter', 'Memory information field KernelStack_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Mapped_bytes', 'node_exporter', 'Memory information field Mapped_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_MemAvailable_bytes', 'node_exporter', 'Memory information field MemAvailable_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_MemFree_bytes', 'node_exporter', 'Memory information field MemFree_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_MemTotal_bytes', 'node_exporter', 'Memory information field MemTotal_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Mlocked_bytes', 'node_exporter', 'Memory information field Mlocked_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_NFS_Unstable_bytes', 'node_exporter', 'Memory information field NFS_Unstable_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_PageTables_bytes', 'node_exporter', 'Memory information field PageTables_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Shmem_bytes', 'node_exporter', 'Memory information field Shmem_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_ShmemHugePages_bytes', 'node_exporter', 'Memory information field ShmemHugePages_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_ShmemPmdMapped_bytes', 'node_exporter', 'Memory information field ShmemPmdMapped_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Slab_bytes', 'node_exporter', 'Memory information field Slab_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_SReclaimable_bytes', 'node_exporter', 'Memory information field SReclaimable_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_SUnreclaim_bytes', 'node_exporter', 'Memory information field SUnreclaim_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_SwapCached_bytes', 'node_exporter', 'Memory information field SwapCached_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_SwapFree_bytes', 'node_exporter', 'Memory information field SwapFree_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_SwapTotal_bytes', 'node_exporter', 'Memory information field SwapTotal_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Unevictable_bytes', 'node_exporter', 'Memory information field Unevictable_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_VmallocChunk_bytes', 'node_exporter', 'Memory information field VmallocChunk_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_VmallocTotal_bytes', 'node_exporter', 'Memory information field VmallocTotal_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_VmallocUsed_bytes', 'node_exporter', 'Memory information field VmallocUsed_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Writeback_bytes', 'node_exporter', 'Memory information field Writeback_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_WritebackTmp_bytes', 'node_exporter', 'Memory information field WritebackTmp_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Icmp_InErrors', 'node_exporter', 'Statistic IcmpInErrors.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Icmp_InMsgs', 'node_exporter', 'Statistic IcmpInMsgs.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Icmp_OutMsgs', 'node_exporter', 'Statistic IcmpOutMsgs.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Icmp6_InErrors', 'node_exporter', 'Statistic Icmp6InErrors.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Icmp6_InMsgs', 'node_exporter', 'Statistic Icmp6InMsgs.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Icmp6_OutMsgs', 'node_exporter', 'Statistic Icmp6OutMsgs.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Ip_Forwarding', 'node_exporter', 'Statistic IpForwarding.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Ip6_InOctets', 'node_exporter', 'Statistic Ip6InOctets.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Ip6_OutOctets', 'node_exporter', 'Statistic Ip6OutOctets.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_IpExt_InOctets', 'node_exporter', 'Statistic IpExtInOctets.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_IpExt_OutOctets', 'node_exporter', 'Statistic IpExtOutOctets.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Tcp_ActiveOpens', 'node_exporter', 'Statistic TcpActiveOpens.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Tcp_CurrEstab', 'node_exporter', 'Statistic TcpCurrEstab.', 'Network', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Tcp_InErrs', 'node_exporter', 'Statistic TcpInErrs.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Tcp_PassiveOpens', 'node_exporter', 'Statistic TcpPassiveOpens.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Tcp_RetransSegs', 'node_exporter', 'Statistic TcpRetransSegs.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_TcpExt_ListenDrops', 'node_exporter', 'Statistic TcpExtListenDrops.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_TcpExt_ListenOverflows', 'node_exporter', 'Statistic TcpExtListenOverflows.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_TcpExt_SyncookiesFailed', 'node_exporter', 'Statistic TcpExtSyncookiesFailed.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_TcpExt_SyncookiesRecv', 'node_exporter', 'Statistic TcpExtSyncookiesRecv.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_TcpExt_SyncookiesSent', 'node_exporter', 'Statistic TcpExtSyncookiesSent.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Udp_InDatagrams', 'node_exporter', 'Statistic UdpInDatagrams.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Udp_InErrors', 'node_exporter', 'Statistic UdpInErrors.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Udp_NoPorts', 'node_exporter', 'Statistic UdpNoPorts.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Udp_OutDatagrams', 'node_exporter', 'Statistic UdpOutDatagrams.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Udp6_InDatagrams', 'node_exporter', 'Statistic Udp6InDatagrams.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Udp6_InErrors', 'node_exporter', 'Statistic Udp6InErrors.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Udp6_NoPorts', 'node_exporter', 'Statistic Udp6NoPorts.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Udp6_OutDatagrams', 'node_exporter', 'Statistic Udp6OutDatagrams.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_UdpLite_InErrors', 'node_exporter', 'Statistic UdpLiteInErrors.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_UdpLite6_InErrors', 'node_exporter', 'Statistic UdpLite6InErrors.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_network_receive_bytes_total', 'node_exporter', 'Network device statistic receive_bytes.', 'Network', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_network_receive_compressed_total', 'node_exporter', 'Network device statistic receive_compressed.', 'Network', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_network_receive_drop_total', 'node_exporter', 'Network device statistic receive_drop.', 'Network', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_network_receive_errs_total', 'node_exporter', 'Network device statistic receive_errs.', 'Network', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_network_receive_fifo_total', 'node_exporter', 'Network device statistic receive_fifo.', 'Network', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_network_receive_frame_total', 'node_exporter', 'Network device statistic receive_frame.', 'Network', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_network_receive_multicast_total', 'node_exporter', 'Network device statistic receive_multicast.', 'Network', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_network_receive_packets_total', 'node_exporter', 'Network device statistic receive_packets.', 'Network', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_network_transmit_bytes_total', 'node_exporter', 'Network device statistic transmit_bytes.', 'Network', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_network_transmit_carrier_total', 'node_exporter', 'Network device statistic transmit_carrier.', 'Network', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_network_transmit_colls_total', 'node_exporter', 'Network device statistic transmit_colls.', 'Network', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_network_transmit_compressed_total', 'node_exporter', 'Network device statistic transmit_compressed.', 'Network', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_network_transmit_drop_total', 'node_exporter', 'Network device statistic transmit_drop.', 'Network', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_network_transmit_errs_total', 'node_exporter', 'Network device statistic transmit_errs.', 'Network', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_network_transmit_fifo_total', 'node_exporter', 'Network device statistic transmit_fifo.', 'Network', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_network_transmit_packets_total', 'node_exporter', 'Network device statistic transmit_packets.', 'Network', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_procs_blocked', 'node_exporter', 'Number of processes blocked waiting for I/O to complete.', 'Process', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_procs_running', 'node_exporter', 'Number of processes in runnable state.', 'Process', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_sockstat_FRAG_inuse', 'node_exporter', 'Number of FRAG sockets in state inuse.', 'Network', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_sockstat_FRAG_memory', 'node_exporter', 'Number of FRAG sockets in state memory.', 'Network', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_sockstat_RAW_inuse', 'node_exporter', 'Number of RAW sockets in state inuse.', 'Network', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_sockstat_sockets_used', 'node_exporter', 'Number of sockets sockets in state used.', 'Network', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_sockstat_TCP_alloc', 'node_exporter', 'Number of TCP sockets in state alloc.', 'Network', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_sockstat_TCP_inuse', 'node_exporter', 'Number of TCP sockets in state inuse.', 'Network', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_sockstat_TCP_mem', 'node_exporter', 'Number of TCP sockets in state mem.', 'Network', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_sockstat_TCP_mem_bytes', 'node_exporter', 'Number of TCP sockets in state mem_bytes.', 'Network', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_sockstat_TCP_orphan', 'node_exporter', 'Number of TCP sockets in state orphan.', 'Network', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_sockstat_TCP_tw', 'node_exporter', 'Number of TCP sockets in state tw.', 'Network', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_sockstat_UDP_inuse', 'node_exporter', 'Number of UDP sockets in state inuse.', 'Network', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_sockstat_UDP_mem', 'node_exporter', 'Number of UDP sockets in state mem.', 'Network', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_sockstat_UDP_mem_bytes', 'node_exporter', 'Number of UDP sockets in state mem_bytes.', 'Network', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_sockstat_UDPLITE_inuse', 'node_exporter', 'Number of UDPLITE sockets in state inuse.', 'Network', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_tcp_connection_states', 'node_exporter', 'Number of connection states.', 'Network', 'LOAD', 'Node', 'gauge', 'state', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_vmstat_pgfault', 'node_exporter', '/proc/vmstat information field pgfault.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_vmstat_pgmajfault', 'node_exporter', '/proc/vmstat information field pgmajfault.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_vmstat_pgpgin', 'node_exporter', '/proc/vmstat information field pgpgin.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_vmstat_pgpgout', 'node_exporter', '/proc/vmstat information field pgpgout.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_vmstat_pswpin', 'node_exporter', '/proc/vmstat information field pswpin.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_vmstat_pswpout', 'node_exporter', '/proc/vmstat information field pswpout.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('process_files_open', 'micrometer', 'The open file descriptor count', 'File', 'LOAD', 'Process', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('process_open_fds', 'micrometer', 'Number of open file descriptors.', 'File', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('process_resident_memory_bytes', 'micrometer', 'Resident memory size in bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('process_virtual_memory_bytes', 'micrometer', '-', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_fs_inodes_free', 'cadvisor', 'Number of available Inodes', 'Filesystem', 'LOAD', 'Container', 'gauge', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_fs_inodes_total', 'cadvisor', 'Number of Inodes', 'Filesystem', 'LOAD', 'Container', 'gauge', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_fs_io_current', 'cadvisor', 'Number of I/Os currently in progress', 'Filesystem', 'LOAD', 'Container', 'gauge', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_fs_io_time_seconds_total', 'cadvisor', 'Cumulative count of seconds spent doing I/Os', 'Filesystem', 'LOAD', 'Container', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_fs_io_time_weighted_seconds_total', 'cadvisor', 'Cumulative weighted I/O time in seconds', 'Filesystem', 'LOAD', 'Container', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_fs_read_seconds_total', 'cadvisor', 'Cumulative count of seconds spent reading', 'Filesystem', 'LOAD', 'Container', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_fs_reads_bytes_total', 'cadvisor', 'Cumulative count of bytes read', 'Filesystem', 'LOAD', 'Container', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_fs_reads_merged_total', 'cadvisor', 'Cumulative count of reads merged', 'Filesystem', 'LOAD', 'Container', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_fs_reads_total', 'cadvisor', 'Cumulative count of reads completed', 'Filesystem', 'LOAD', 'Container', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_fs_sector_reads_total', 'cadvisor', 'Cumulative count of sector reads completed', 'Filesystem', 'LOAD', 'Container', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_fs_sector_writes_total', 'cadvisor', 'Cumulative count of sector writes completed', 'Filesystem', 'LOAD', 'Container', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_fs_usage_bytes', 'cadvisor', 'Number of bytes that are consumed by the container on this filesystem.', 'Filesystem', 'LOAD', 'Container', 'gauge', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_fs_write_seconds_total', 'cadvisor', 'Cumulative count of seconds spent writing', 'Filesystem', 'LOAD', 'Container', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_fs_writes_bytes_total', 'cadvisor', 'Cumulative count of bytes written', 'Filesystem', 'LOAD', 'Container', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_fs_writes_total', 'cadvisor', 'Cumulative count of writes completed', 'Filesystem', 'LOAD', 'Container', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_filesystem_avail_bytes', 'node_exporter', 'Filesystem space available to non-root users in bytes.', 'Filesystem', 'LOAD', 'Node', 'gauge', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_filesystem_device_error', 'node_exporter', 'Whether an error occurred while getting statistics for the given device.', 'Filesystem', 'LOAD', 'Node', 'gauge', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_filesystem_files', 'node_exporter', 'Filesystem total file nodes.', 'Filesystem', 'LOAD', 'Node', 'gauge', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_filesystem_files_free', 'node_exporter', 'Filesystem total free file nodes.', 'Filesystem', 'LOAD', 'Node', 'gauge', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_filesystem_free_bytes', 'node_exporter', 'Filesystem free space in bytes.', 'Filesystem', 'LOAD', 'Node', 'gauge', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_filesystem_size_bytes', 'node_exporter', 'Filesystem size in bytes.', 'Filesystem', 'LOAD', 'Node', 'gauge', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_cache_hitrate', 'cassandra_exporter', 'All time cache hit rate', 'Cache', 'LOAD', 'Cassandra', 'gauge', 'cache', '2019-10-02 10:17:01', '2019-10-02 10:17:01'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_cache_hits_count', 'cassandra_exporter', 'Total number of cache hits', 'Cache', 'LOAD', 'Cassandra', 'counter', 'cache', '2019-10-02 10:17:01', '2019-10-02 10:17:01'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_cache_requests_count', 'cassandra_exporter', 'Total number of cache requests', 'Cache', 'LOAD', 'Cassandra', 'counter', 'cache', '2019-10-02 10:17:01', '2019-10-02 10:17:01'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_client_connectednativeclients', 'cassandra_exporter', 'Number of clients connected to this nodes native protocol server', 'Connection', 'LOAD', 'Cassandra', 'gauge', NULL, '2019-10-01 16:45:21', '2019-10-01 16:45:21'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_clientrequest_failures_count', 'cassandra_exporter', 'Number of transaction failures encountered', 'Request', 'LOAD', 'Cassandra', 'counter', 'clientrequest', '2019-10-02 10:17:01', '2019-10-02 10:17:01'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_clientrequest_latency_seconds_count', 'cassandra_exporter', 'Number of client requests latency seconds', 'Request', 'LOAD', 'Cassandra', 'counter', 'clientrequest', '2019-10-02 10:17:01', '2019-10-02 10:17:01'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_clientrequest_timeouts_count', 'cassandra_exporter', 'Number of timeouts encountered', 'Request', 'LOAD', 'Cassandra', 'counter', 'clientrequest', '2019-10-02 10:17:01', '2019-10-02 10:17:01'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_clientrequest_unavailables_count', 'cassandra_exporter', 'Number of unavailable exceptions encountered', 'Request', 'LOAD', 'Cassandra', 'counter', 'clientrequest', '2019-10-02 10:17:01', '2019-10-02 10:17:01'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_commitlog_completedtasks', 'cassandra_exporter', 'Total number of commit log messages written', 'Log', 'LOAD', 'Cassandra', 'counter', NULL, '2019-10-02 10:17:01', '2019-10-02 10:17:01'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_commitlog_totalcommitlogsize', 'cassandra_exporter', 'Current size, in bytes, used by all the commit log segments', 'Log', 'LOAD', 'Cassandra', 'counter', NULL, '2019-10-02 10:17:01', '2019-10-02 10:17:01'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_keyspace_rangelatency_seconds', 'cassandra_exporter', 'Local range scan latency seconds for this keyspace', 'Disk', 'LOAD', 'Cassandra', 'gauge', 'keyspace,quantile', '2019-10-02 10:17:01', '2019-10-02 10:17:01'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_keyspace_rangelatency_seconds_count', 'cassandra_exporter', 'Local range scan count for this keyspace', 'Disk', 'LOAD', 'Cassandra', 'counter', 'keyspace', '2019-10-02 10:17:01', '2019-10-02 10:17:01'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_keyspace_readlatency_seconds', 'cassandra_exporter', 'Local read latency seconds for this keyspace', 'Disk', 'LOAD', 'Cassandra', 'gauge', 'keyspace,quantile', '2019-10-02 10:17:01', '2019-10-02 10:17:01'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_keyspace_readlatency_seconds_count', 'cassandra_exporter', 'Local read count for this keyspace', 'Disk', 'LOAD', 'Cassandra', 'counter', 'keyspace', '2019-10-02 10:17:01', '2019-10-02 10:17:01'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_keyspace_totaldiskspaceused', 'cassandra_exporter', 'Total disk space used belonging to this keyspace', 'Disk', 'LOAD', 'Cassandra', 'gauge', 'keyspace', '2019-10-02 10:17:01', '2019-10-02 10:17:01'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_keyspace_writelatency_seconds', 'cassandra_exporter', 'Local write latency seconds for this keyspace', 'Disk', 'LOAD', 'Cassandra', 'gauge', 'keyspace,quantile', '2019-10-02 10:17:01', '2019-10-02 10:17:01'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_keyspace_writelatency_seconds_count', 'cassandra_exporter', 'Local write count for this keyspace', 'Disk', 'LOAD', 'Cassandra', 'counter', 'keyspace', '2019-10-02 10:17:01', '2019-10-02 10:17:01'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_threadpools_activetasks', 'cassandra_exporter', 'Number of tasks being actively worked on', 'Task', 'LOAD', 'Cassandra', 'gauge', 'path,threadpools', '2019-10-01 16:45:21', '2019-10-01 16:45:21'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_threadpools_completedtasks', 'cassandra_exporter', 'Number of tasks completed', 'Task', 'LOAD', 'Cassandra', 'counter', 'path,threadpools', '2019-10-01 16:45:21', '2019-10-01 16:45:21'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_threadpools_pendingtasks', 'cassandra_exporter', 'Number of queued tasks queued up', 'Task', 'LOAD', 'Cassandra', 'gauge', 'path,threadpools', '2019-10-01 16:45:21', '2019-10-01 16:45:21'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_threadpools_totalblockedtasks_count', 'cassandra_exporter', 'Number of tasks that were blocked due to queue saturation', 'Task', 'LOAD', 'Cassandra', 'counter', 'path,threadpools', '2019-10-01 16:45:21', '2019-10-01 16:45:21'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cloudwatch_requests_total', 'cloudwatch', 'API requests made to CloudWatch', 'API', 'LOAD', 'AWS/Usage', 'counter', 'NULL', '2019-08-27 14:07:00', '2019-08-27 14:07:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('imxc_service_errors_count', 'imxc_api_server', 'the number of error counts in 5s', NULL, 'ERROR', 'Service', 'gauge', 'protocol', '2019-10-15 09:37:44', '2019-10-15 09:37:44'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('imxc_service_errors_total', 'imxc_api_server', 'the total number of errors', NULL, 'ERROR', 'Service', 'counter', 'protocol', '2019-12-20 16:30:00', '2019-12-20 16:30:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('imxc_service_request_milliseconds_total', 'imxc_api_server', 'the total time taken to serve the requests', NULL, 'DURATION', 'Service', 'counter', 'protocol', '2019-12-20 16:30:00', '2019-12-20 16:30:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('imxc_service_requests_count', 'imxc_api_server', 'the number of requests counts in 5s', NULL, 'LOAD', 'Service', 'gauge', 'protocol', '2019-10-15 09:37:44', '2019-10-15 09:37:44'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('imxc_service_requests_milliseconds_total', 'imxc_api_server', 'the total time taken to serve the requests', NULL, 'DURATION', 'Service', 'gauge', 'protocol', '2019-12-10 11:22:00', '2019-10-15 09:37:44'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('imxc_service_requests_total', 'imxc_api_server', 'the total number of requests', NULL, 'LOAD', 'Service', 'counter', 'protocol', '2019-12-20 16:30:00', '2019-12-20 16:30:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mongodb_connections', 'mongodb_exporter', 'The number of incoming connections from clients to the database server', 'Connection', 'LOAD', 'MongoDB', 'gauge', 'state', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mongodb_global_lock_client', 'mongodb_exporter', 'The number of the active client connections performing read or write operations', 'Lock', 'LOAD', 'MongoDB', 'gauge', 'type', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mongodb_global_lock_current_queue', 'mongodb_exporter', 'The number of operations that are currently queued and waiting for the read or write lock', 'Lock', 'LOAD', 'MongoDB', 'gauge', 'type', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mongodb_instance_uptime_seconds', 'mongodb_exporter', 'The number of seconds that the current MongoDB process has been active', 'Server', 'DURATION', 'MongoDB', 'gauge', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mongodb_memory', 'mongodb_exporter', 'The amount of memory, in mebibyte (MiB), currently used by the database process', 'Memory', 'LOAD', 'MongoDB', 'gauge', 'type', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mongodb_metrics_document_total', 'mongodb_exporter', 'The total number of documents processed', 'Row', 'LOAD', 'MongoDB', 'counter', 'state', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mongodb_network_bytes_total', 'mongodb_exporter', 'The number of bytes that reflects the amount of network traffic', 'Network', 'LOAD', 'MongoDB', 'counter', 'state', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mongodb_op_counters_total', 'mongodb_exporter', 'The total number of operations since the mongod instance last started', 'Request', 'LOAD', 'MongoDB', 'counter', 'type', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_aborted_connects', 'mysqld_exporter', 'The number of failed attempts to connect to the MySQL server', 'Connection', 'LOAD', 'MySQL', 'counter', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_bytes_received', 'mysqld_exporter', 'The number of bytes received from all clients', 'Network', 'LOAD', 'MySQL', 'counter', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_bytes_sent', 'mysqld_exporter', 'The number of bytes sent to all clients', 'Network', 'LOAD', 'MySQL', 'counter', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_commands_total', 'mysqld_exporter', 'The number of times each XXX command has been executed', 'Request', 'LOAD', 'MySQL', 'counter', 'command', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_connections', 'mysqld_exporter', 'The number of connection attempts (successful or not) to the MySQL server', 'Connection', 'LOAD', 'MySQL', 'counter', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_innodb_buffer_pool_read_requests', 'mysqld_exporter', 'The number of logical read requests', 'Block', 'LOAD', 'MySQL', 'counter', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_innodb_buffer_pool_write_requests', 'mysqld_exporter', 'The number of writes done to the InnoDB buffer pool', 'Block', 'LOAD', 'MySQL', 'counter', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_innodb_data_read', 'mysqld_exporter', 'The amount of data read since the server was started (in bytes)', 'Disk', 'LOAD', 'MySQL', 'counter', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_innodb_data_reads', 'mysqld_exporter', 'The total number of data reads (OS file reads)', 'Disk', 'LOAD', 'MySQL', 'counter', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_innodb_data_writes', 'mysqld_exporter', 'The total number of data writes', 'Disk', 'LOAD', 'MySQL', 'counter', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_innodb_data_written', 'mysqld_exporter', 'The amount of data written so far, in bytes', 'Disk', 'LOAD', 'MySQL', 'counter', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_innodb_log_write_requests', 'mysqld_exporter', 'The number of write requests for the InnoDB redo log', 'Log', 'LOAD', 'MySQL', 'counter', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_innodb_log_writes', 'mysqld_exporter', 'The number of physical writes to the InnoDB redo log file', 'Disk', 'LOAD', 'MySQL', 'counter', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_innodb_os_log_written', 'mysqld_exporter', 'The number of bytes written to the InnoDB redo log files', 'Disk', 'LOAD', 'MySQL', 'counter', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_innodb_row_lock_current_waits', 'mysqld_exporter', 'The number of row locks currently being waited for by operations on InnoDB tables', 'Lock', 'LOAD', 'MySQL', 'gauge', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_innodb_row_lock_time', 'mysqld_exporter', 'The total time spent in acquiring row locks for InnoDB tables, in milliseconds', 'Lock', 'DURATION', 'MySQL', 'counter', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_innodb_row_lock_waits', 'mysqld_exporter', 'The number of times operations on InnoDB tables had to wait for a row lock', 'Lock', 'LOAD', 'MySQL', 'counter', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_innodb_row_ops_total', 'mysqld_exporter', 'The number of rows operated in InnoDB tables', 'Row', 'LOAD', 'MySQL', 'counter', 'operation', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_table_locks_immediate', 'mysqld_exporter', 'The number of times that a request for a table lock could be granted immediately', 'Lock', 'LOAD', 'MySQL', 'counter', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_threads_connected', 'mysqld_exporter', 'The number of currently open connections', 'Thread', 'LOAD', 'MySQL', 'gauge', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_threads_running', 'mysqld_exporter', 'The number of threads that are not sleeping', 'Thread', 'LOAD', 'MySQL', 'gauge', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_uptime', 'mysqld_exporter', 'The number of seconds that the server has been up', 'Server', 'DURATION', 'MySQL', 'gauge', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_up', 'mysqld_exporter', 'Whether the last scrape of metrics from MySQL was able to connect to the server', 'NULL', 'ERROR', 'MySQL', 'gauge', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('pg_locks_count', 'postgres_exporter', 'Number of locks', 'Lock', 'LOAD', 'PostgreSQL', 'gauge', 'datname,mode', '2019-08-27 14:07:00', '2019-08-27 14:07:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('pg_stat_activity_count', 'postgres_exporter', 'number of connections in this state', 'Connection', 'LOAD', 'PostgreSQL', 'gauge', 'datname,state', '2019-08-27 14:07:00', '2019-08-27 14:07:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('pg_stat_database_blk_read_time', 'postgres_exporter', 'Time spent reading data file blocks by backends in this database, in milliseconds', 'Block', 'LOAD', 'PostgreSQL', 'counter', 'datname', '2019-08-27 14:07:00', '2019-08-27 14:07:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('pg_stat_database_blk_write_time', 'postgres_exporter', 'Time spent writing data file blocks by backends in this database, in milliseconds', 'Block', 'LOAD', 'PostgreSQL', 'counter', 'datname', '2019-08-27 14:07:00', '2019-08-27 14:07:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('pg_stat_database_blks_hit', 'postgres_exporter', 'Number of times disk blocks were found already in the buffer cache', 'Block', 'LOAD', 'PostgreSQL', 'counter', 'datname', '2019-08-27 14:07:00', '2019-08-27 14:07:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('pg_stat_database_blks_read', 'postgres_exporter', 'Number of disk blocks read in this database', 'Block', 'LOAD', 'PostgreSQL', 'counter', 'datname', '2019-08-27 14:07:00', '2019-08-27 14:07:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('pg_stat_database_temp_bytes', 'postgres_exporter', 'Total amount of data written to temporary files by queries in this database', 'TemporaryFile', 'LOAD', 'PostgreSQL', 'counter', 'datname', '2019-08-27 14:07:00', '2019-08-27 14:07:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('pg_stat_database_temp_files', 'postgres_exporter', 'Number of temporary files created by queries in this database', 'TemporaryFile', 'LOAD', 'PostgreSQL', 'counter', 'datname', '2019-08-27 14:07:00', '2019-08-27 14:07:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('pg_stat_database_tup_deleted', 'postgres_exporter', 'Number of rows deleted by queries in this database', 'Row', 'LOAD', 'PostgreSQL', 'counter', 'datname', '2019-08-27 14:07:00', '2019-08-27 14:07:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('pg_stat_database_tup_fetched', 'postgres_exporter', 'Number of rows fetched by queries in this database', 'Row', 'LOAD', 'PostgreSQL', 'counter', 'datname', '2019-08-27 14:07:00', '2019-08-27 14:07:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('pg_stat_database_tup_inserted', 'postgres_exporter', 'Number of rows inserted by queries in this database', 'Row', 'LOAD', 'PostgreSQL', 'counter', 'datname', '2019-08-27 14:07:00', '2019-08-27 14:07:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('pg_stat_database_tup_returned', 'postgres_exporter', 'Number of rows returned by queries in this database', 'Row', 'LOAD', 'PostgreSQL', 'counter', 'datname', '2019-08-27 14:07:00', '2019-08-27 14:07:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('pg_stat_database_tup_updated', 'postgres_exporter', 'Number of rows updated by queries in this database', 'Row', 'LOAD', 'PostgreSQL', 'counter', 'datname', '2019-08-27 14:07:00', '2019-08-27 14:07:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('pg_stat_database_xact_commit', 'postgres_exporter', 'Number of transactions in this database that have been committed', 'Transaction', 'LOAD', 'PostgreSQL', 'counter', 'datname', '2019-08-27 14:07:00', '2019-08-27 14:07:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('pg_stat_database_xact_rollback', 'postgres_exporter', 'Number of transactions in this database that have been rolled back', 'Transaction', 'LOAD', 'PostgreSQL', 'counter', 'datname', '2019-08-27 14:07:00', '2019-08-27 14:07:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('pg_up', 'postgres_exporter', 'Whether the last scrape of metrics from PostgreSQL was able to connect to the server', 'NULL', 'ERROR', 'PostgreSQL', 'gauge', 'NULL', '2019-08-27 14:07:00', '2019-08-27 14:07:00'); + +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816000, '2019-08-19 06:14:22.616', '2019-08-19 06:14:22.616', false, 4, (select id from auth_resource2 where type='menu' and name='Infrastructure' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816001, '2019-08-19 06:14:22.635', '2019-08-19 06:14:22.635', false, 4, (select id from auth_resource2 where type='menu' and name='Topology' and parent_id=(select id from auth_resource2 where type='menu' and name='Infrastructure')) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816002, '2019-08-19 06:14:22.638', '2019-08-19 06:14:22.638', false, 4, (select id from auth_resource2 where type='menu' and name='Overview' and parent_id=(select id from auth_resource2 where type='menu' and name='Infrastructure')) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816003, '2019-08-19 06:14:22.64', '2019-08-19 06:14:22.64', false, 4, (select id from auth_resource2 where type='menu' and name='Namespace' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816004, '2019-08-19 06:14:22.643', '2019-08-19 06:14:22.643', false, 4, (select id from auth_resource2 where type='menu' and name='Nodes' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816005, '2019-08-19 06:14:22.72', '2019-08-19 06:14:22.72', false, 4, (select id from auth_resource2 where type='menu' and name='Node Details' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816006, '2019-08-19 06:14:22.72', '2019-08-19 06:14:22.72', false, 4, (select id from auth_resource2 where type='menu' and name='Resource Usage' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816009, '2019-08-19 06:14:22', '2019-08-19 06:14:22', false, 4, (select id from auth_resource2 where type='menu' and name='Persistent Volume' ) , 'admin'); + +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816100, '2019-08-19 06:14:22.619', '2019-08-19 06:14:22.619', false, 4, (select id from auth_resource2 where type='menu' and name='Workloads' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816105, '2019-08-19 06:14:22.657', '2019-08-19 06:14:22.657', false, 4, (select id from auth_resource2 where type='menu' and name='Jobs' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816106, '2019-08-19 06:14:22.66', '2019-08-19 06:14:22.66', false, 4, (select id from auth_resource2 where type='menu' and name='Cron Jobs' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816107, '2019-08-19 06:14:22.646', '2019-08-19 06:14:22.646', false, 4, (select id from auth_resource2 where type='menu' and name='Pods' ) , 'admin'); + +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816200, '2019-08-19 06:14:22.621', '2019-08-19 06:14:22.621', false, 4, (select id from auth_resource2 where type='menu' and name='Services' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816201, '2019-08-19 06:14:22.698', '2019-08-19 06:14:22.698', false, 4, (select id from auth_resource2 where type='menu' and name='Topology' and parent_id=(select id from auth_resource2 where type='menu' and name='Services')) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816202, '2019-08-19 06:14:22.728', '2019-08-19 06:14:22.728', false, 4, (select id from auth_resource2 where type='menu' and name='Overview' and parent_id=(select id from auth_resource2 where type='menu' and name='Services')) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816203, '2019-08-19 06:14:22.734', '2019-08-19 06:14:22.734', false, 4, (select id from auth_resource2 where type='menu' and name='Detail' and parent_id=(select id from auth_resource2 where type='menu' and name='Services')) , 'admin'); + +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816300, '2019-08-19 06:14:22.624', '2019-08-19 06:14:22.624', false, 4, (select id from auth_resource2 where type='menu' and name='Diagnosis' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816301, '2019-08-19 06:14:22.705', '2019-08-19 06:14:22.705', false, 4, (select id from auth_resource2 where type='menu' and name='Anomaly Score' and parent_id=(select id from auth_resource2 where type='menu' and name='Diagnosis') ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816309, '2019-08-19 06:14:22.668', '2019-08-19 06:14:22.668', false, 4, (select id from auth_resource2 where type='menu' and name='Troubleshooting' ) , 'admin'); + +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816400, '2019-08-19 06:14:22.627', '2019-08-19 06:14:22.627', false, 4, (select id from auth_resource2 where type='menu' and name='Statistics & Analysis') , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816401, '2019-08-19 06:14:22.671', '2019-08-19 06:14:22.671', false, 4, (select id from auth_resource2 where type='menu' and name='Performance Trends' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816402, '2019-08-19 06:14:22.731', '2019-08-19 06:14:22.731', false, 4, (select id from auth_resource2 where type='menu' and name='Alert Analysis' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816403, '2019-08-19 06:14:22.674', '2019-08-19 06:14:22.674', false, 4, (select id from auth_resource2 where type='menu' and name='Alert History' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816404, '2019-08-19 06:14:22.677', '2019-08-19 06:14:22.677', false, 4, (select id from auth_resource2 where type='menu' and name='Anomaly Score' and parent_id=(select id from auth_resource2 where type='menu' and name='Statistics & Analysis')) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816405, '2019-08-19 06:14:22.679', '2019-08-19 06:14:22.679', false, 4, (select id from auth_resource2 where type='menu' and name='Job History' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816406, '2019-08-19 06:14:22.685', '2019-08-19 06:14:22.685', false, 4, (select id from auth_resource2 where type='menu' and name='Sparse Logs' and parent_id=(select id from auth_resource2 where type='menu' and name='Statistics & Analysis' )) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816407, '2019-08-19 06:14:22.682', '2019-08-19 06:14:22.682', false, 4, (select id from auth_resource2 where type='menu' and name='Log Viewer' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816408, '2019-08-19 06:14:22.725', '2019-08-19 06:14:22.725', false, 4, (select id from auth_resource2 where type='menu' and name='Event Logs' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816409, '2019-08-19 06:14:22.734', '2019-08-19 06:14:22.734', false, 4, (select id from auth_resource2 where type='menu' and name='Container Life Cycle' and parent_id=(select id from auth_resource2 where type='menu' and name='Statistics & Analysis')) , 'admin'); + +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816500, '2019-08-19 06:14:22.629', '2019-08-19 06:14:22.629', false, 4, (select id from auth_resource2 where type='menu' and name='Reports' and parent_id is null) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816501, '2019-08-19 06:14:22.734', '2019-08-19 06:14:22.734', false, 4, (select id from auth_resource2 where type='menu' and name='Documents' and parent_id=(select id from auth_resource2 where type='menu' and name='Reports' and parent_id is null)) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816502, '2019-08-19 06:14:22.734', '2019-08-19 06:14:22.734', false, 4, (select id from auth_resource2 where type='menu' and name='Templates' and parent_id=(select id from auth_resource2 where type='menu' and name='Reports' and parent_id is null)) , 'admin'); + +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816550, '2019-08-19 06:14:22', '2019-08-19 06:14:22', false, 4, (select id from auth_resource2 where type='menu' and name='Dashboards' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816551, '2019-08-19 06:14:22.734', '2019-08-19 06:14:22.734', false, 4, (select id from auth_resource2 where type='menu' and name='Documents' and parent_id=(select id from auth_resource2 where type='menu' and name='Dashboards' and parent_id is null)) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816552, '2019-08-19 06:14:22.734', '2019-08-19 06:14:22.734', false, 4, (select id from auth_resource2 where type='menu' and name='Templates' and parent_id=(select id from auth_resource2 where type='menu' and name='Dashboards' and parent_id is null)) , 'admin'); + +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816700, '2019-08-19 06:14:22.632', '2019-08-19 06:14:22.632', false, 4, (select id from auth_resource2 where type='menu' and name='Settings' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816701, '2019-08-19 06:14:22.687', '2019-08-19 06:14:22.687', false, 4, (select id from auth_resource2 where type='menu' and name='User & Group' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816702, '2019-08-19 06:14:22.69', '2019-08-19 06:14:22.69', false, 4, (select id from auth_resource2 where type='menu' and name='Alert' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816703, '2019-08-19 06:14:22.734', '2019-08-19 06:14:22.734', false, 4, (select id from auth_resource2 where type='menu' and name='Host Alerts' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816704, '2019-08-19 06:14:22.693', '2019-08-19 06:14:22.693', false, 4, (select id from auth_resource2 where type='menu' and name='Sparse Logs' and parent_id=(select id from auth_resource2 where type='menu' and name='Settings' )) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816706, '2019-08-19 06:14:22.717', '2019-08-19 06:14:22.717', false, 4, (select id from auth_resource2 where type='menu' and name='Metric Meta' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816707, '2019-08-19 06:14:22.696', '2019-08-19 06:14:22.696', false, 4, (select id from auth_resource2 where type='menu' and name='Notification' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816708, '2019-08-19 06:14:22.696', '2019-08-19 06:14:22.696', false, 4, (select id from auth_resource2 where type='menu' and name='General' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816709, '2019-08-19 06:14:22.734', '2019-08-19 06:14:22.734', false, 4, (select id from auth_resource2 where type='menu' and name='License' ) , 'admin'); + +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816800, '2019-08-19 06:14:22.734', '2019-08-19 06:14:22.734', false, 4, (select id from auth_resource2 where type='menu' and name='Hosts' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816801, '2019-08-19 06:14:22.734', '2019-08-19 06:14:22.734', false, 4, (select id from auth_resource2 where type='menu' and name='Topology' and parent_id=(select id from auth_resource2 where type='menu' and name='Hosts')) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816802, '2019-08-19 06:14:22.734', '2019-08-19 06:14:22.734', false, 4, (select id from auth_resource2 where type='menu' and name='Overview' and parent_id=(select id from auth_resource2 where type='menu' and name='Hosts')) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816803, '2019-08-19 06:14:22.734', '2019-08-19 06:14:22.734', false, 4, (select id from auth_resource2 where type='menu' and name='List' and parent_id=(select id from auth_resource2 where type='menu' and name='Hosts')) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816804, '2019-08-19 06:14:22.734', '2019-08-19 06:14:22.734', false, 4, (select id from auth_resource2 where type='menu' and name='Detail' and parent_id=(select id from auth_resource2 where type='menu' and name='Hosts')) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816805, '2019-08-19 06:14:22.734', '2019-08-19 06:14:22.734', false, 4, (select id from auth_resource2 where type='menu' and name='Group' and parent_id=(select id from auth_resource2 where type='menu' and name='Hosts')) , 'admin'); + + + + +INSERT INTO public.alert_rule_meta ( id, created_date, modified_date, description, expr, meta_name, target, message ) VALUES (97, '2019-04-02 18:07:31.319', '2019-04-02 18:07:31.319', 'NODE CPU 사용', '(100 - (avg by (xm_clst_id, xm_node_id, xm_entity_type) (rate(node_cpu_seconds_total{ name=''node-exporter'', mode=''idle'', xm_entity_type=''Node'', {filter} }[1m])) * 100))', 'Node CPU Usage', 'node', 'Cluster:{{$labels.xm_clst_id}} Node:{{$labels.xm_node_id }} CPU 사용률이 {threshold}%를 초과했습니다. 현재값:{{humanize $value}}%'); +INSERT INTO public.alert_rule_meta ( id, created_date, modified_date, description, expr, meta_name, target, message ) VALUES (1, '2019-04-15 02:26:13.826', '2019-04-15 02:26:24.02', 'NODE Disk 사용', '(1- (sum by (xm_clst_id, xm_node_id, xm_entity_type) (node_filesystem_avail_bytes{xm_entity_type=''Node'', {filter} }) / sum by (xm_clst_id, xm_node_id, xm_entity_type) (node_filesystem_size_bytes{xm_entity_type=''Node'', {filter} }))) * 100', 'Node Disk Usage', 'node', 'Cluster:{{$labels.xm_clst_id}} Node:{{$labels.xm_node_id}} Disk 사용률이 {threshold}%를 초과했습니다. 현재값:{{humanize $value}}%'); +INSERT INTO public.alert_rule_meta ( id, created_date, modified_date, description, expr, meta_name, target, message ) VALUES (119, '2019-04-02 18:08:50.17', '2019-04-02 18:08:50.17', 'NODE Memory 사용', '(1- ((node_memory_MemFree_bytes{xm_entity_type=''Node'', {filter}} + node_memory_Cached_bytes{xm_entity_type=''Node'', {filter}} + node_memory_Buffers_bytes{xm_entity_type=''Node'', {filter}}) / node_memory_MemTotal_bytes{xm_entity_type=''Node''})) * 100', 'Node Memory Usage', 'node', 'Cluster:{{$labels.xm_clst_id}} Node:{{$labels.xm_node_id}} Memory 사용률이 {threshold}%를 초과했습니다. 현재값 : {{humanize $value}}%'); +INSERT INTO public.alert_rule_meta ( id, created_date, modified_date, description, expr, meta_name, target, message ) VALUES (2, '2019-04-15 05:27:56.544', '2019-04-15 05:27:59.924', 'Container CPU 사용', 'sum (rate (container_cpu_usage_seconds_total{ {filter} }[1m])) by (xm_clst_id, xm_namespace, xm_entity_type, xm_pod_id) * 100', 'Container CPU Usage', 'controller', 'Cluster:{{$labels.xm_clst_id }} POD:{{$labels.xm_pod_id}} CPU 사용률이 {threshold}%를 초과했습니다. 현재값:{{humanize $value}}%'); + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_cpu_user','Container CPU User (%)','Container CPU Usage (User)','sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) (rate(container_cpu_user_seconds_total{xm_entity_type=''Container'',xm_cont_name!=''POD'',{filter}}[1m])) * 100','CPU','Container',NULL,true,true,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} CPU User:{{humanize $value}}%|{threshold}%.','2019-06-05 09:07:00.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_memory_working_set_bytes','Container Memory Working Set (GiB)','Current working set in GiB, this includes recently accessed memory, dirty memory, and kernel memory','sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) (container_memory_working_set_bytes{xm_entity_type=''Container'',xm_cont_name!=''POD'',{filter}} / 1024 / 1024 / 1024)','Memory','Container',NULL,true,true,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} Working Set Memory:{{humanize $value}}GiB|{threshold}GiB.','2020-06-04 11:11:11.000','2020-06-04 11:11:11.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_disk_io_seconds','Host io Disk seconds','Host disk io seconds','sum by (instance) (rate(node_disk_io_time_seconds_total{{filter}}[1m]))','Disk','Host',NULL,false,false,'Host:{{$labels.instance}} Disk IO Seconds:{{humanize $value}}|{threshold}.','2020-03-23 04:08:37.359','2020-03-23 04:08:37.359'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_disk_read_write_byte','host disk R/W byte','host disk R/W byte','sum by (data_type, instance) ( +label_replace(rate(node_disk_read_bytes_total{{filter}}[1m]) or rate(node_disk_read_bytes_total{{filter}}[5m]), "data_type", "Read", "", "") or +label_replace(rate(node_disk_written_bytes_total{{filter}}[1m]) or rate(node_disk_written_bytes_total{{filter}}[5m]), "data_type", "Write", "", "") )','Disk','Host',NULL,false,false,'Host:{{$labels.instance}} Read/Write Bytes:{{humanize $value}}KiB|{threshold}KiB.','2020-03-24 05:21:53.915','2020-03-24 05:24:52.674'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_memory_free','Host Memory Free (GiB)','Memory information field MemFree_bytes','(node_memory_MemAvailable_bytes{{filter}} or (node_memory_MemFree_bytes{{filter}} + node_memory_Cached_bytes{{filter}} + node_memory_Buffers_bytes{{filter}}))','Memory','Host',NULL,true,false,'Host:{{$labels.instance}} Free Memory Size:{{humanize $value}}GiB|{threshold}GiB.','2020-03-23 04:08:18.977','2020-03-23 04:08:18.977'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_bytes_sent','Number of Bytes Sent','The number of bytes sent to all clients','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(mysql_global_status_bytes_sent[1m]))','Network','MySQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Number of Bytes Sent:{{humanize $value}}KiB|{threshold}KiB.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_memory_sum_by_namespace','Containe memory sum by namespace','Containe memory sum by namespace','sum by(xm_clst_id, xm_namespace, data_type) ( +label_replace(imxc_kubernetes_container_resource_limit_memory{{filter}}, "data_type", "limit", "" , "") or +label_replace(imxc_kubernetes_container_resource_request_memory{{filter}}, "data_type", "request", "" , "") or +label_replace(container_memory_usage_bytes{xm_entity_type=''Container'',{filter}}, "data_type", "used", "" , ""))','memory','Namespace',NULL,false,false,'Container memory sum by namespace','2020-07-03 04:31:10.079','2020-07-03 08:38:17.034'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_count','Node Count','node count','count by(xm_clst_id, xm_namespace,xm_node_id) (up{{filter}})','Node','Namespace',NULL,false,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} NODE:{{$labels.xm_node_id}} Node Count:{{humanize $value}}|{threshold}.','2020-08-19 16:45:00.000','2020-08-19 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_restart_count','Container Restart Count','container restart count group by namespace','sum by(xm_clst_id, xm_namespace, pod_name ) (increase(imxc_kubernetes_container_restart_count{{filter}}[10s]))','Pod','Namespace',NULL,false,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Container Restart Count:{{humanize $value}}|{threshold}.','2020-08-19 16:45:00.000','2020-08-19 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_cpu_usage','Node CPU Usage (%)','NODE CPU Usage','(100 - (avg by (xm_clst_id, xm_node_id, xm_entity_type)(clamp_max(rate(node_cpu_seconds_total{ name=''node-exporter'', mode=''idle'', xm_entity_type=''Node'', {filter} }[1m]),1.0) * 100)))','CPU','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} CPU Usage:{{humanize $value}}%|{threshold}%.','2019-05-15 01:02:23.000','2020-06-04 11:11:11.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_disk_read_latency_device','Node Disk Read Latency per Device (ms)','Node Disk Read Latency per Device','sum by (xm_clst_id, xm_node_id, xm_entity_type, device, mountpoint) (rate(node_disk_read_time_seconds_total{xm_entity_type=''Node'',{filter}}[1m])) * 1000','Disk','Node','device',true,false,'NODE:{{$labels.xm_node_id}} FS:{{$labels.mountpoint}} Disk Read Latency:{{humanize $value}}ms|{threshold}ms.','2019-08-23 11:26:07.000','2019-08-23 11:26:07.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_filesystem_usage_per_device','Node Filesystem Usage per device (%)','NODE Filesystem Usage per Device','(1- (sum by (xm_clst_id, xm_node_id, xm_entity_type, device, mountpoint) (node_filesystem_avail_bytes{xm_entity_type=''Node'', device!=''rootfs'', {filter} }) / sum by (xm_clst_id, xm_node_id, xm_entity_type, device, mountpoint) (node_filesystem_size_bytes{xm_entity_type=''Node'', device!=''rootfs'', {filter} }))) * 100','Filesystem','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} FS:{{$labels.mountpoint}} Usage:{{humanize $value}}%|{threshold}%.','2019-05-15 01:02:23.000','2019-05-15 01:02:23.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_memory_usage','Node Memory Usage (%)','Node Memory Usage','sum by (xm_clst_id, xm_node_id)((node_memory_MemTotal_bytes{xm_entity_type="Node"}- (node_memory_MemFree_bytes{xm_entity_type="Node"} + node_memory_Cached_bytes{xm_entity_type="Node"} + node_memory_Buffers_bytes{xm_entity_type="Node"})) >= 0 or node_memory_MemTotal_bytes{xm_entity_type="Node"}- node_memory_MemFree_bytes{xm_entity_type="Node"}) / (sum by (xm_clst_id, xm_node_id) (imxc_kubernetes_node_resource_capacity_memory{{filter}})) * 100','Memory','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Memory Usage:{{humanize $value}}%|{threshold}%.','2019-05-15 01:02:23.000','2020-06-04 11:11:11.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_tablespace_size','Tablespace Size (GiB)','Generic counter metric of tablespaces bytes in Oracle','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, tablespace, type) (oracledb_tablespace_bytes) / 1073741824','Tablespace','OracleDB','tablespace, type',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Oracle Tablespace Size:{{humanize $value}}GiB|{threshold}GiB.','2020-01-28 13:03:00.000','2020-01-28 13:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_allocator_allocated_size','Allocated Memory (MiB)','The total amount of memory that the Redis allocator allocated','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (redis_allocator_allocated_bytes) / 1048576','Memory','Redis',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis Allocated Memory:{{humanize $value}}MiB|{threshold}MiB.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_kubernetes_event_count','Cluster events count','Kubernetes Namespace Events count','sum by (xm_clst_id, type) (imxc_kubernetes_event_in_last_min{{filter}})','Event','Cluster',NULL,true,true,'CLST:{{$labels.xm_clst_id}} Event Count:{{humanize $value}}|{threshold}.','2019-09-26 05:33:37.000','2020-04-27 05:38:47.804'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_memory_limit','cluster_memory_limit (Gib)','Total container limit size in GiB for the given cluster','sum by (xm_clst_id) (imxc_kubernetes_container_resource_limit_memory{{filter}}) / 1024 / 1024 / 1024','Memory','Cluster',NULL,false,false,'CLST:{{$labels.xm_clst_id}} Memory Limits:{{humanize $value}}GiB|{threshold}GiB.','2019-08-23 08:45:47.000','2019-08-23 08:45:47.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_pod_total_count','Cluster Pod Total Count','Cluster Pod Total Count','sum by (xm_clst_id) (imxc_kubernetes_controller_counts{{filter}})','Pod','Cluster',NULL,true,true,'CLST:{{$labels.xm_clst_id}} Total Pod Counts:{{humanize $value}}|{threshold}.','2019-08-23 17:36:00.000','2019-11-28 08:25:07.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_swap_free','Host Swap Memory Free','Host Swap Free','node_memory_SwapFree_bytes{{filter}}','Memory','Host',NULL,true,false,'Host:{{$labels.instance}} Free Swap Memory Size:{{humanize $value}}KiB|{threshold}KiB.','2020-03-23 04:08:24.594','2020-03-23 04:08:24.594'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_context_switch_count','Host Context','Total number of context switches.','sum by (instance) (node_context_switches_total{{filter}})','CPU','Host',NULL,false,false,'None','2020-03-23 04:08:15.000','2020-03-23 04:08:15.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_fs_used','Host system Filesystem used','Host File system used','sum by (instance) (node_filesystem_size_bytes{{filter}}-node_filesystem_free_bytes{{filter}})','Filesystem','Host',NULL,true,false,'Host:{{$labels.instance}} Filesystem Utillization:{{humanize $value}}%|{threshold}%.','2020-03-23 04:08:30.407','2020-03-23 04:08:30.407'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_disk_io','Node Disk I/O','Total seconds spent doing I/Os','avg by (xm_clst_id, xm_node_id) (rate(node_disk_io_time_seconds_total{{filter}}[1m]))','Disk','Node',NULL,false,false,'None','2020-05-21 01:18:06.000','2020-05-29 09:38:55.992'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_fs_usage','Container Filesystem Usage (%)','Container File System Usage: 100 * (Used Bytes / Limit Bytes) (not contain persistent volume)','sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) ( +container_fs_usage_bytes{xm_entity_type=''Container'',{filter}} / ((container_fs_limit_bytes{xm_entity_type=''Container'',{filter}} * 100) > 0) or +container_fs_usage_bytes{xm_entity_type=''Container'',{filter}} / 1000)','Filesystem','Container',NULL,true,true,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} Filesystem Usage:{{humanize $value}}%|{threshold}%.','2019-06-05 10:27:42.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_fs_reads','Container Filesystem Read Bytes (KiB)','Cumulative count of bytes read / 1024','sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) (rate(container_fs_reads_bytes_total{xm_entity_type=''Container'',{filter}}[1m])) / 1024','Filesystem','Container',NULL,true,true,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} Filesystem Reads:{{humanize $value}}KiB|{threshold}KiB.','2019-05-20 05:53:42.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_cpu_sum_by_namespace','Container cpu sum by namespace','Container cpu sum by namespace','sum by(xm_clst_id, xm_namespace, data_type) ( +label_replace(imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0.001, "data_type", "limit", "" , "") or +label_replace(imxc_kubernetes_container_resource_request_cpu{{filter}} * 0.001, "data_type", "request", "" , "") or +label_replace(rate(container_cpu_usage_seconds_total{xm_entity_type=''Container'',{filter}}[1m]), "data_type", "used", "" , ""))','CPU','Namespace',NULL,false,false,'.','2020-05-30 08:30:10.158','2020-06-09 02:00:50.856'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_filesystem_avail_size','Node Filesystem Available Size (GiB)','Filesystem space available to non-root users in bytes / 1073741824','sum by (xm_clst_id, xm_node_id, xm_entity_type) (node_filesystem_avail_bytes{xm_entity_type=''Node'', device!=''rootfs'', {filter} }) / 1073741824','Filesystem','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Filesystem Avail Size:{{humanize $value}}GiB|{threshold}GiB.','2019-06-04 19:47:00.000','2019-06-04 19:47:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_pod_running_count','Node Pod Running Count','Node Pod Running Count','count by (xm_clst_id, xm_node_id) (sum by (xm_clst_id, xm_node_id, xm_pod_id) (imxc_kubernetes_container_resource_limit_cpu{pod_state="Running", {filter}}))','Pod','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Running Pod Count:{{humanize $value}}|{threshold}.','2019-10-11 00:29:17.000','2019-11-06 08:02:40.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_cpu_user','Pod CPU User (%)','Pod CPU Usage (User)','sum by (xm_clst_id,xm_pod_id,xm_entity_type,xm_namespace) (rate(container_cpu_user_seconds_total{xm_entity_type=''Container'',{filter}}[1m])) * 100','CPU','Pod',NULL,true,true,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} CPU User:{{humanize $value}}%|{threshold}%.','2019-06-05 09:07:00.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_fs_reads','Pod Filesystem Read Bytes (KiB)','Cumulative count of bytes read / 1024','sum by (xm_clst_id,xm_pod_id,xm_entity_type,xm_namespace) (rate(container_fs_reads_bytes_total{xm_entity_type=''Container'',{filter}}[1m])) / 1024','Filesystem','Pod',NULL,true,true,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} Filesystem Read Bytes:{{humanize $value}}KiB|{threshold}KiB.','2019-05-20 05:53:42.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_memory_max_usage_bytes','Pod Memory Max Used (GiB)','Maximum memory usage recorded in bytes / 1073741824','sum by (xm_clst_id,xm_pod_id,xm_entity_type,xm_namespace) (container_memory_max_usage_bytes{xm_entity_type=''Container'',{filter}}) / 1073741824','Memory','Pod',NULL,true,true,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} Max Used Memory:{{humanize $value}}GiB|{threshold}GiB.','2019-06-05 14:27:36.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_network_receive','Pod Network Receive (KiB)','Network device statistic receive_bytes / 1024','sum by (xm_clst_id,xm_pod_id,xm_entity_type,xm_namespace) (rate(container_network_receive_bytes_total{xm_entity_type=''Container'',{filter}}[1m]) ) / 1024','Network','Pod',NULL,true,true,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} Network Receive:{{humanize $value}}KiB|{threshold}KiB.','2019-05-21 08:23:36.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_cache_hits_count','Total number of cache hits (count/s)','Total number of cache hits','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, cache) (rate(cassandra_cache_hits_count{{filter}}[1m]))','Cache','Cassandra','cache',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Cache Hit Counts per second:{{humanize $value}}|{threshold}.','2019-10-02 10:17:01.000','2019-11-05 11:24:29.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_clientrequest_failures_count','Number of transaction failures encountered','Number of transaction failures encountered','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, clientrequest) (rate(cassandra_clientrequest_failures_count[1m]))','Request','Cassandra','clientrequest',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Failure Request:{{humanize $value}}|{threshold}.','2019-10-02 10:17:01.000','2019-10-02 10:17:01.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_connections_and_tasks','Cassandra connections & tasks','cassandra connections & tasks','sum by (data_type, xm_clst_id, xm_namespace, xm_node_id, instance) ( +label_replace(cassandra_threadpools_activetasks {{filter}}, "data_type", "Active tasks", "", "") or +label_replace(cassandra_threadpools_pendingtasks {{filter}}, "data_type", "Pending tasks", "", "") or +label_replace(cassandra_client_connectednativeclients {{filter}}, "data_type", "Client connections", "", "") )','Connection','Cassandra','data_type',true,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} POD:{{$labels.xm_pod_id}} Cassandra Connections and Tasks:{{humanize $value}}|{threshold}.','2020-01-02 09:11:48.000','2020-02-13 01:24:51.522'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_network_transmit','Pod Network Transmit (KiB)','Network device statistic transmit_bytes / 1024','sum by (xm_clst_id,xm_pod_id,xm_entity_type,xm_namespace) (rate(container_network_transmit_bytes_total{xm_entity_type=''Container'',{filter}}[1m]) ) / 1024','Network','Pod',NULL,true,true,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} Network Transmit:{{humanize $value}}KiB|{threshold}KiB.','2019-05-21 08:26:35.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_memory_request','cluster_memory_request (Gib)','Total container memory request in GiB for the given cluster','sum by (xm_clst_id) (imxc_kubernetes_container_resource_request_memory{{filter}}) / 1024 / 1024 / 1024','Memory','Cluster',NULL,false,false,'None','2019-08-23 08:45:47.000','2019-08-23 08:45:47.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_keyspace_read_count','Local read count (count/s)','Local read count for this keyspace','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, keyspace) (rate(cassandra_keyspace_readlatency_seconds_count[1m]))','Disk','Cassandra','keyspace',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Read Count:{{humanize $value}}|{threshold}.','2019-10-02 10:17:01.000','2019-10-02 10:17:01.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_cpu_capacity_cores','cluster_cpu_capacity_cores','cluster_cpu_capacity_cores','sum by (xm_clst_id) (imxc_kubernetes_node_resource_capacity_cpu{{filter}})','CPU','Cluster',NULL,false,false,'CLST:{{$labels.xm_clst_id}} Cluster CPU Capacity Cores:{{humanize $value}}|{threshold}.','2019-08-23 08:40:36.000','2019-08-23 08:40:36.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_alerts_received_count','Cluster alerts received count','Alert count by cluster','sum by (xm_clst_id, level) (ceil(increase(imxc_alerts_received_count_total{status=''firing'', {filter}}[10m])))','Alert','Cluster',NULL,false,false,'CLST:{{$labels.xm_clst_id}} Alert Received Counts:{{humanize $value}}|{threshold}.','2019-08-23 04:41:49.000','2020-04-28 08:09:09.429'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_cpu_throttled_time','Container CPU Throttled Time','container cpu_throttled time','sum by(xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) (increase(container_cpu_cfs_throttled_seconds_total{container_name!="POD", image!="", {filter}}[10s]))','CPU','Cluster',NULL,false,false,'CLST:{{$labels.xm_clst_id}} CPU Throttled:{{humanize $value}}|{threshold}.','2020-08-19 16:45:00.000','2020-08-19 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_cache_hitrate','All time cache hit rate','All time cache hit rate','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, cache) (cassandra_cache_hitrate {{filter}} * 100)','Cache','Cassandra','cache',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Cache Hit Rate:{{humanize $value}}|{threshold}.','2019-10-02 10:17:01.000','2019-12-13 01:19:54.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('aws_ec2_disk_read_bytes','Bytes Read from All Instance Store Volumes (KiB)','Bytes read from all instance store volumes available to the instance.','sum by (xm_clst_id, instance_id, instance) (aws_ec2_disk_read_bytes_average{{filter}}) / 1024','Disk','AWS/EC2',NULL,true,true,'CLST:{{$labels.xm_clst_id}} Instance:{{$labels.instance_id}} Disk Read Size:{{humanize $value}}KiB|{threshold}KiB.','2019-08-23 17:38:23.000','2019-08-23 17:38:23.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('aws_ec2_disk_write_bytes','Bytes Written to All Instance Store Volumes (KiB)','Bytes written to all instance store volumes available to the instance.','sum by (xm_clst_id, instance_id, instance) (aws_ec2_disk_write_bytes_average{{filter}}) / 1024','Disk','AWS/EC2',NULL,true,true,'CLST:{{$labels.xm_clst_id}} Instance:{{$labels.instance_id}} Disk Write Size:{{humanize $value}}KiB|{threshold}KiB.','2019-08-23 17:38:23.000','2019-08-23 17:38:23.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('aws_ec2_ebswrite_bytes','Bytes written to all EBS volumes (KiB)','Bytes written to all EBS volumes attached to the instance in a specified period of time.','sum by (xm_clst_id, instance_id, instance) (aws_ec2_ebswrite_bytes_average{{filter}}) / 1024','EBS','AWS/EC2',NULL,true,true,'CLST:{{$labels.xm_clst_id}} Instance:{{$labels.instance_id}} EBS Write Size:{{humanize $value}}KiB|{threshold}KiB.','2019-08-23 17:38:23.000','2019-08-23 17:38:23.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_cache_requests_count','Total number of cache requests (count/s)','Total number of cache requests','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, cache) (rate(cassandra_cache_requests_count[1m]))','Cache','Cassandra','cache',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Cache Request per second:{{humanize $value}}|{threshold}.','2019-10-02 10:17:01.000','2019-10-02 10:17:01.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_keyspace_write_latency','Local write latency (ms)','Local write latency seconds for this keyspace','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, keyspace) (cassandra_keyspace_writelatency_seconds{quantile=''0.99''}) * 1000','Disk','Cassandra','keyspace',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Write Latency:{{humanize $value}}ms|{threshold}ms.','2019-10-02 10:17:01.000','2019-10-02 10:17:01.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_memory_usage','Cluster Memory Usage (%)','All Nodes Memory Usage in cluster.','(1- avg by (xm_clst_id) (((node_memory_MemFree_bytes{xm_entity_type=''Node'', {filter}} + node_memory_Cached_bytes{xm_entity_type=''Node'', {filter}} + node_memory_Buffers_bytes{xm_entity_type=''Node'', {filter}}) <= node_memory_MemTotal_bytes{xm_entity_type=''Node'', {filter}} or node_memory_MemFree_bytes{xm_entity_type=''Node'', {filter}}) / node_memory_MemTotal_bytes{xm_entity_type=''Node'', {filter}})) * 100','Memory','Cluster',NULL,true,true,'CLST:{{$labels.xm_clst_id}} Memory Usage:{{humanize $value}}%|{threshold}%.','2019-07-18 06:12:22.000','2020-04-22 04:59:14.251'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mongodb_connections_metrics_created_total','Incoming Connections Created','Count of all incoming connections created to the server (This number includes connections that have since closed)','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(mongodb_connections_metrics_created_total[1m]))','Connection','MongoDB',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MongoDB Incoming Connections Created Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_disk_io','MySQL Disk I/O','MySQL Disk I/O','sum by (data_type, xm_clst_id, xm_namespace, xm_node_id, instance) ( +label_replace(rate(mysql_global_status_innodb_data_read[1m]), "data_type", "read", "", "") or +label_replace(rate(mysql_global_status_innodb_data_written[1m]), "data_type", "written", "", ""))','Disk','MySQL','data_type',true,false,'CLST:{{$labels.xm_clst_id}} SVC:{{$labels.xm_service_name}} Mysql Disk IO:{{humanize $value}}|{threshold}.','2019-12-05 08:48:30.000','2020-02-13 01:12:05.438'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_pod_capacity_count','Cluster Pod Capacity Count','Cluster Pod Capacity Count','sum by (xm_clst_id) (imxc_kubernetes_node_resource_capacity_pods{{filter}})','Pod','Cluster',NULL,true,true,'CLST:{{$labels.xm_clst_id}} Capacity Pod Counts:{{humanize $value}}|{threshold}.','2019-08-27 04:45:52.000','2019-11-28 08:25:07.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('namespace_kubernetes_event_count','Namespace events count','Kubernetes Namespace Events count','sum by (xm_clst_id, xm_namespace, type) (imxc_kubernetes_event_in_last_min{{filter}})','Event','Namespace','level',false,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Events:{{humanize $value}}|{threshold}.','2019-09-24 06:42:09.000','2019-09-24 06:42:34.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_cpu_capacity_cores','node_cpu_capacity_cores','node_cpu_capacity_cores','imxc_kubernetes_node_resource_capacity_cpu{{filter}}','CPU','Node',NULL,false,false,'None','2019-08-23 08:40:36.000','2019-08-23 08:40:36.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_cpu_allocatable_cores','node_cpu_allocatable_cores','node_cpu_allocatable_cores','imxc_kubernetes_node_resource_allocatable_cpu{{filter}}','CPU','Node',NULL,false,false,'None','2019-08-23 08:40:36.000','2019-08-23 08:40:36.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_pod_capacity_count','Node Pod Capacity Count','Node Pod Capacity Count','imxc_kubernetes_node_resource_capacity_pods{{filter}}','Pod','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Total Capacity Count of Pods:{{humanize $value}}|{threshold}.','2019-10-11 00:29:17.000','2019-11-26 01:29:10.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_memory_allocatable','node_memory_allocatable (Gib)','imxc_kubernetes_node_resource_allocatable_memory in GiB','imxc_kubernetes_node_resource_allocatable_memory{{filter}} / 1024 / 1024 / 1024','Memory','Node',NULL,false,false,'None','2019-08-23 08:45:47.000','2019-08-23 08:45:47.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_memory_limit','node_memory_limit (Gib)','Total container memory limit for the given cluster, node','sum by (xm_clst_id, xm_node_id) (imxc_kubernetes_container_resource_limit_memory{{filter}}) / 1024 / 1024 / 1024','Memory','Node',NULL,false,false,'None','2019-08-23 08:45:47.000','2019-08-23 08:45:47.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_keyspace_readwritelatency_seconds','Cassandra Read/Write Latency (ms)','Cassandra Read/Write Latency (ms)','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, keyspace) (cassandra_keyspace_readlatency_seconds{quantile=''0.99''}) or (cassandra_keyspace_writelatency_seconds{quantile=''0.99''}) * 1000','Disk','Cassandra','keyspace',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} POD:{{$labels.xm_pod_id}} Cassandra Keyspace Readwritelatency Seconds:{{humanize $value}}ms|{threshold}ms.','2019-10-23 01:46:07.000','2019-11-05 09:03:05.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_cpu_usage','Cluster CPU Usage (%)','All Nodes CPU Usage in cluster.','(100 - (avg by (xm_clst_id)(clamp_max(rate(node_cpu_seconds_total{ name=''node-exporter'', mode=''idle'', xm_entity_type=''Node'', {filter} }[1m]),1.0)) * 100))','CPU','Cluster',NULL,true,true,'CLST:{{$labels.xm_clst_id}} CPU Usage:{{humanize $value}}%|{threshold}%','2019-07-18 05:54:39.000','2020-04-22 04:59:14.253'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_bytes_received','Number of Bytes Received','The number of bytes received from all clients','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(mysql_global_status_bytes_received[1m]))','Network','MySQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Number of Bytes Received:{{humanize $value}}KiB|{threshold}KiB.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_memory_request','node_memory_request (Gib)','Total container memory request in GiB for the given cluster, node','sum by (xm_clst_id, xm_node_id) (imxc_kubernetes_container_resource_request_memory{{filter}}) / 1024 / 1024 / 1024','Memory','Node',NULL,false,false,'None','2019-08-23 08:45:47.000','2019-08-23 08:45:47.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_threadpools_tasks','Number of tasks','Number of tasks','sum by (task_type, xm_clst_id, xm_namespace, xm_node_id, instance) ( +label_replace(cassandra_threadpools_activetasks {{filter}}, "task_type", "active", "", "") or +label_replace(cassandra_threadpools_pendingtasks {{filter}}, "task_type", "pending", "", "") or +label_replace(cassandra_client_connectednativeclients {{filter}}, "task_type", "connected", "", "") )','Task','Cassandra','task_type',true,false,'Number of tasks','2019-10-24 01:34:25.000','2020-02-13 01:14:23.895'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_latency_seconds','Local latency seconds','Local latency seconds','sum by(type, xm_clst_id, xm_namespace, xm_node_id, instance) +(label_replace(cassandra_keyspace_readlatency_seconds{quantile=''0.99'', {filter}}, "type", "read", "", "") or +label_replace(cassandra_keyspace_writelatency_seconds{quantile=''0.99'', {filter}}, "type", "write", "", "")) * 1000','Disk','Cassandra',NULL,true,true,'Local latency seconds','2019-10-24 02:14:45.000','2020-02-13 01:23:46.608'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_wait_time_concurrency','Wait-Time - Concurrency','Generic counter metric from v$waitclassmetric view in Oracle','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(oracledb_wait_time_concurrency[1m]))','Wait','OracleDB',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Oracle Wait-Time - Concurrency:{{humanize $value}}|{threshold}.','2020-01-28 13:03:00.000','2020-01-28 13:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_threadpools_pendingtasks','Number of queued tasks queued up','Number of queued tasks queued up','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, path) (cassandra_threadpools_pendingtasks)','Task','Cassandra','path',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Active Task:{{humanize $value}}|{threshold}.','2019-10-01 16:45:21.000','2019-10-01 16:45:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_pod_ready_count','Cluster Pod Ready Count','Cluster Pod Ready Count','sum by (xm_clst_id) (imxc_kubernetes_controller_ready{{filter}})','Pod','Cluster',NULL,true,true,'CLST:{{$labels.xm_clst_id}} Ready Pod Counts:{{humanize $value}}|{threshold}.','2019-08-23 17:36:00.000','2019-11-28 08:25:07.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_pod_allocatable_count','Node Pod Allocatable Count','Node Pod Allocatable Count','imxc_kubernetes_node_resource_allocatable_pods{{filter}}','Pod','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Allocatable Pod Count:{{humanize $value}}|{threshold}.','2019-10-11 00:29:17.000','2019-11-26 01:29:10.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_sparselog_type_conatiner_count','Container Type Sparselog Count','Container-type sparse log count by xm_clst_id, xm_namespace, xm_node_id, xm_pod_id over last 1 min','sum by (xm_entity_type, xm_clst_id, xm_namespace, xm_node_id, xm_pod_id) (round(increase(imxc_sparselog_count_total{xm_entity_type="Pod",{filter}}[1m])))','SparseLog','Pod',NULL,true,true,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} Sparselog Count:{{humanize $value}}|{threshold}.','2020-03-26 15:05:51.828','2020-03-26 15:05:51.828'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_threads_connected','Number of Open Connections','The number of currently open connections','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (mysql_global_status_threads_connected)','Thread','MySQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Number of Open Connections Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('aws_ec2_ebsread_bytes','Bytes read from all EBS volumes (KiB)','Bytes read from all EBS volumes attached to the instance in a specified period of time.','sum by (xm_clst_id, instance_id, instance) (aws_ec2_ebsread_bytes_average{{filter}}) / 1024','EBS','AWS/EC2',NULL,true,true,'CLST:{{$labels.xm_clst_id}} Instance:{{$labels.instance_id}} EBS Read Size:{{humanize $value}}KiB|{threshold}KiB.','2019-08-23 17:38:23.000','2019-08-23 17:38:23.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('namespace_cpu_usage','Namespace CPU Usage (%)','CPU Usage by namespace','sum by (xm_clst_id,xm_entity_type,xm_namespace) (rate(container_cpu_usage_seconds_total{xm_entity_type=''Container'', {filter}}[1m])) * 100','CPU','Namespace',NULL,true,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} CPU Utillization:{{humanize $value}}%|{threshold}%','2019-08-23 01:06:05.000','2019-08-23 01:06:05.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('namespace_memory_usage','Namespace memory usage (Gib)','Memory usage by namespace in bytes / 1073741824','sum by (xm_clst_id,xm_entity_type,xm_namespace) (container_memory_usage_bytes{xm_entity_type=''Container'', {filter}}) / 1073741824','Memory','Namespace',NULL,true,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Memory Utillization:{{humanize $value}}GiB|{threshold}GiB.','2019-08-23 01:21:31.000','2019-08-23 01:21:31.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_memory_free','Node Memory Free (GiB)','Memory information field MemFree_bytes / 1073741824','node_memory_MemFree_bytes{xm_entity_type=''Node'', {filter}} / 1073741824','Memory','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Free Memory Size:{{humanize $value}}GiB|{threshold}GiB.','2019-06-04 16:03:00.000','2019-06-04 16:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_swap_memory_cached','Node Swap Memory Cached (GiB)','Memory information field SwapCached_bytes / 1073741824','node_memory_SwapCached_bytes{xm_entity_type=''Node'', {filter}} / 1073741824','Memory','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Cached Swap Memory Size:{{humanize $value}}GiB|{threshold}GiB.','2019-06-04 16:03:00.000','2019-06-04 16:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_allocator_active_size','Active Memory (MiB)','The total amount of active memory that the Redis allocator has','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (redis_allocator_active_bytes) / 1048576','Memory','Redis',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis Active Memory:{{humanize $value}}MiB|{threshold}MiB.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_up','MySQL Up Count','Whether the last scrape of metrics from MySQL was able to connect to the server','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (mysql_up)','Instance','MySQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Up counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_up','Oracle DB Up Count','Whether the Oracle database server is up','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (oracledb_up)','Instance','OracleDB',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Oracle DB Up Count:{{humanize $value}}|{threshold}.','2020-01-28 13:03:00.000','2020-01-28 13:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_process_count','Process Count','Gauge metric with count of processes','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (oracledb_process_count)','Process','OracleDB',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Oracle Process Count Count:{{humanize $value}}|{threshold}.','2020-01-28 13:03:00.000','2020-01-28 13:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_locks_count','Number of Locks','Number of locks','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, datname, mode) (pg_locks_count)','Lock','PostgreSQL','datname,mode',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} PostgreSQL Lock Counts:{{humanize $value}}|{threshold}.','2019-08-27 15:49:21.000','2019-08-27 15:49:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_stat_database_tup_updated','Number of Rows Updated','Number of rows updated by queries in this database','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, datname) (rate(pg_stat_database_tup_updated[1m]))','Row','PostgreSQL','datname',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} PostgreSQL Updated Row Counts:{{humanize $value}}|{threshold}.','2019-08-27 15:49:21.000','2019-08-27 15:49:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_stat_database_tup_deleted','Number of Rows Deleted','Number of rows deleted by queries in this database','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, datname) (rate(pg_stat_database_tup_deleted[1m]))','Row','PostgreSQL','datname',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} PostgreSQL Deleted Row counts:{{humanize $value}}|{threshold}.','2019-08-27 15:49:21.000','2019-08-27 15:49:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_stat_database_temp_files','Number of Temporary Files Created','Number of temporary files created by queries in this database','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, datname) (rate(pg_stat_database_temp_files[1m]))','TemporaryFile','PostgreSQL','datname',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} PostgreSQL Temporary File counts:{{humanize $value}}|{threshold}.','2019-08-27 15:49:21.000','2019-08-27 15:49:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_load15','Node CPU Load 15m Average','Node CPU 15m load average','node_load15{xm_entity_type=''Node'',{filter}}','CPU','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} CPU 15m Load Avg:{{humanize $value}}|{threshold}.','2019-05-15 08:27:39.000','2019-05-15 08:27:39.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_cpu_throttling','Node CPU Throttling','Number of times this cpu package has been throttled.','increase(node_cpu_package_throttles_total{xm_entity_type=''Node'',{filter}}[1m])','CPU','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} CPU Throttling Counts:{{humanize $value}}|{threshold}.','2019-05-15 08:29:24.000','2019-05-15 08:29:24.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_cpu_usage','Pod CPU Usage (%)','Pod CPU Usage','sum by (xm_clst_id,xm_pod_id,xm_entity_type,xm_namespace) (clamp_min((rate(container_cpu_usage_seconds_total{xm_entity_type=''Container'',{filter}}[1m] offset 10s)),0)) * 100','CPU','Pod',NULL,true,true,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} CPU Usage:{{humanize $value}}%|{threshold}%.','2019-05-15 01:02:23.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_cpu_system','Pod CPU System (%)','Pod CPU Usage (System)','sum by (xm_clst_id,xm_pod_id,xm_entity_type,xm_namespace) (rate(container_cpu_system_seconds_total{xm_entity_type=''Container'',{filter}}[1m])) * 100','CPU','Pod',NULL,true,true,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} CPU System:{{humanize $value}}%|{threshold}%.','2019-06-05 09:07:00.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_fs_usage_bytes','Pod Filesystem Used Bytes (GiB)','Number of bytes that are consumed by the container on this filesystem / 1073741824','sum by (xm_clst_id,xm_pod_id,xm_entity_type,xm_namespace) (container_fs_usage_bytes{xm_entity_type=''Container'',{filter}}) / 1073741824','Filesystem','Pod',NULL,true,true,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} Filesystem Used Bytes:{{humanize $value}}GiB|{threshold}GiB.','2019-06-05 10:27:42.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_fs_limit_bytes','Pod Filesystem Limit Bytes (GiB)','Number of bytes that can be consumed by the container on this filesystem / 1073741824','sum by (xm_clst_id,xm_pod_id,xm_entity_type,xm_namespace) (container_fs_limit_bytes{xm_entity_type=''Container'',{filter}}) / 1073741824','Filesystem','Pod',NULL,true,true,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} Filesystem Limit Bytes:{{humanize $value}}GiB|{threshold}GiB.','2019-06-05 10:27:42.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_load5','Node CPU Load 5m Average','Node CPU 5m load average','node_load5{xm_entity_type=''Node'',{filter}}','CPU','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} CPU 5m Load Avg:{{humanize $value}}|{threshold}.','2019-05-15 08:26:07.000','2019-05-15 08:26:07.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_client_connectednativeclients','Number of Client Connections','Number of clients connected to this nodes native protocol server','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (cassandra_client_connectednativeclients)','Connection','Cassandra',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Connection:{{humanize $value}}|{threshold}.','2019-10-01 16:45:21.000','2019-11-07 11:59:04.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_threadpools_activetasks','Number of tasks being actively worked on','Number of tasks being actively worked on','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, path) (cassandra_threadpools_activetasks)','Task','Cassandra','path',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Connection:{{humanize $value}}|{threshold}.','2019-10-01 16:45:21.000','2019-10-01 16:45:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cloudwatch_requests_count','API requests made to CloudWatch','API requests made to CloudWatch','sum by (xm_clst_id, namespace, action) (rate(cloudwatch_requests_total{{filter}}[10m]))','Request','AWS/Usage',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.namespace}} CloudWatch API Call Volume:{{humanize $value}}|{threshold}.','2019-08-27 15:49:21.000','2019-08-27 15:49:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('aws_ec2_network_out','Bytes Sent Out on All Network Interfaces (KiB)','The number of bytes sent out on all network interfaces by the instance.','sum by (xm_clst_id, instance_id, instance) (aws_ec2_network_out_average{{filter}}) / 1024','Network','AWS/EC2',NULL,true,true,'CLST:{{$labels.xm_clst_id}} Instance:{{$labels.instance_id}} Network Transmit Usage:{{humanize $value}}KiB|{threshold}KiB.','2019-08-23 17:38:23.000','2019-08-23 17:38:23.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('aws_ec2_network_in','Bytes Received on All Network Interfaces (KiB)','The number of bytes received on all network interfaces by the instance.','sum by (xm_clst_id, instance_id, instance) (aws_ec2_network_in_average{{filter}}) / 1024','Network','AWS/EC2',NULL,true,true,'CLST:{{$labels.xm_clst_id}} Instance:{{$labels.instance_id}} Network Receive Usage:{{humanize $value}}KiB|{threshold}KiB.','2019-08-23 17:38:23.000','2019-08-23 17:38:23.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('namespace_pod_count','Namespace Pod Count','Pod count by namesapce','count (sum (container_last_seen{{filter}}) by (xm_clst_id, xm_namespace, xm_pod_id)) by (xm_clst_id, xm_namespace)','Pod','Namespace',NULL,true,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Pod Counts:{{humanize $value}}|{threshold}.','2019-08-22 16:53:32.000','2019-08-23 01:06:12.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_filesystem_usage','Node Filesystem Usage (%)','NODE Filesystem Usage','(1- (sum by (xm_clst_id, xm_node_id, xm_entity_type) (node_filesystem_avail_bytes{xm_entity_type=''Node'', device!=''rootfs'', {filter} }) / sum by (xm_clst_id, xm_node_id, xm_entity_type) (node_filesystem_size_bytes{xm_entity_type=''Node'', device!=''rootfs'', {filter} }))) * 100','Filesystem','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Filesystem Usage:{{humanize $value}}%|{threshold}%.','2019-05-15 01:02:23.000','2019-05-15 01:02:23.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_memory_available','Node Memory Available (GiB)','Memory information field MemAvailable_bytes / 1073741824','node_memory_MemAvailable_bytes{xm_entity_type=''Node'', {filter}} / 1073741824','Memory','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Avail Memory Size:{{humanize $value}}GiB|{threshold}GiB.','2019-06-04 16:03:00.000','2019-06-04 16:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_memory_total','Node Memory Total (GiB)','Memory information field MemTotal_bytes / 1073741824','node_memory_MemTotal_bytes{xm_entity_type=''Node'', {filter}} / 1073741824','Memory','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Total Memory Size:{{humanize $value}}GiB|{threshold}GiB.','2019-06-04 16:03:00.000','2019-06-04 16:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_network_receive','Node Network Receive (KiB)','Network device statistic receive_bytes / 1024','sum by (xm_clst_id, xm_node_id, xm_entity_type) (rate(node_network_receive_bytes_total{xm_entity_type=''Node'',{filter}}[1m]) ) / 1024','Network','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Network Receive Usage:{{humanize $value}}KiB|{threshold}KiB.','2019-05-20 09:07:46.000','2019-05-31 17:45:22.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_network_transmit','Node Network Transmit (KiB)','Network device statistic transmit_bytes / 1024','sum by (xm_clst_id, xm_node_id, xm_entity_type) (rate(node_network_transmit_bytes_total{xm_entity_type=''Node'',{filter}}[1m]) ) / 1024','Network','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Network Transmit Usage:{{humanize $value}}KiB|{threshold}KiB.','2019-05-20 09:09:05.000','2019-05-31 17:46:06.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_pod_allocated_count','Cluster Pod Allocated Count','Cluster Pod Allocated Count','sum by (xm_clst_id) (imxc_kubernetes_node_resource_allocatable_pods{{filter}})','Pod','Cluster',NULL,true,true,'CLST:{{$labels.xm_clst_id}} Allocated Pod Counts:{{humanize $value}}|{threshold}.','2019-08-23 17:36:00.000','2019-11-28 08:25:07.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_pod_desired_count','Cluster Pod Desired Count','Cluster pod desired count by controller','sum by (xm_clst_id) (imxc_kubernetes_controller_replicas{{filter}})','Pod','Cluster',NULL,true,true,'CLST:{{$labels.xm_clst_id}} Desired Pod Counts:{{humanize $value}}|{threshold}.','2019-08-23 02:26:55.000','2019-11-28 08:25:07.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_commands_total','Number of Commands Executed','The number of times each XXX command has been executed','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, command) (rate(mysql_global_status_commands_total[1m]) > 0)','Request','MySQL','command',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Number of Commands Executed Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-12 08:20:06.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_threads_running','Number of Threads Running','The number of threads that are not sleeping','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (mysql_global_status_threads_running)','Thread','MySQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Number of Threads Running Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_count_by_dbname_state','Count by dbname and state in pg','count by dbname and state in pg','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, state) (pg_stat_activity_count)','Connection','PostgreSQL','state',true,false,'count by dbname and state in pg','2020-01-30 06:10:54.000','2020-01-31 11:33:41.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('namespace_alerts_received_count','Namespace alerts received count','Alert count by namespace','sum by (xm_clst_id, xm_namespace, level) (floor(increase(imxc_alerts_received_count_total{status=''firing'', {filter}}[10m])))','Alert','Namespace','level',false,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Alert Count:{{humanize $value}}|{threshold}.','2019-08-23 04:43:29.000','2019-08-23 04:43:29.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_disk_reads_count_device','Node Disk Reads Count per Device (IOPS)','Node Disk Reads Count per Device','sum by (xm_clst_id, xm_node_id, xm_entity_type, device, mountpoint) (rate(node_disk_reads_completed_total{xm_entity_type=''Node'', {filter}}[1m]) )','Disk','Node','device',true,false,'NODE:{{$labels.xm_node_id}} FS:{{$labels.mountpoint}} Disk Reads Count:{{humanize $value}}IOPS|{threshold}IOPS.','2019-08-23 11:26:07.000','2019-08-23 11:26:07.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_disk_read_latency','Node Disk Read Latency (ms)','Node Disk Read Latency','sum by (xm_clst_id,xm_node_id, xm_entity_type) (rate(node_disk_read_time_seconds_total{xm_entity_type=''Node'',{filter}}[1m])) * 1000','Disk','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Disk Read Latency:{{humanize $value}}ms|{threshold}ms.','2019-05-20 10:59:07.000','2019-05-31 17:46:54.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_disk_write_latency_device','Node Disk Write Latency per Device (ms)','Node Disk Write Latency per Device','sum by (xm_clst_id, xm_node_id, xm_entity_type, device, mountpoint) (rate(node_disk_write_time_seconds_total{xm_entity_type=''Node'',{filter}}[1m])) * 1000','Disk','Node','device',true,false,'NODE:{{$labels.xm_node_id}} FS:{{$labels.mountpoint}} Disk Write Latency:{{humanize $value}}ms|{threshold}ms.','2019-08-23 11:26:07.000','2019-08-23 11:26:07.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_disk_write_bytes','Node Disk Write Bytes (KiB)','The total number of bytes written successfully / 1024','sum by (xm_clst_id, xm_node_id, xm_entity_type) (rate(node_disk_written_bytes_total{xm_entity_type=''Node'', {filter}}[1m]) ) / 1024','Disk','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Disk Write Size:{{humanize $value}}KiB|{threshold}KiB.','2019-06-04 18:11:00.000','2019-06-04 18:11:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_filesystem_avail_size_device','Node Filesystem Available Size per Device (GiB)','Filesystem space available to non-root users in bytes / 1073741824','sum by (xm_clst_id, xm_node_id, xm_entity_type, device, fs_type, mountpoint) (node_filesystem_avail_bytes{xm_entity_type=''Node'', device!=''rootfs'', {filter} }) / 1073741824','Filesystem','Node','device,fs_type',true,false,'NODE:{{$labels.xm_node_id}} FS:{{$labels.mountpoint}} Avail Size:{{humanize $value}}GiB|{threshold}GiB.','2019-08-23 11:26:07.000','2019-08-23 11:26:07.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_filesystem_free_size_device','Node Filesystem Free Size per Device (GiB)','Filesystem free space in bytes / 1073741824','sum by (xm_clst_id, xm_node_id, xm_entity_type, device, fs_type, mountpoint) (node_filesystem_free_bytes{xm_entity_type=''Node'', device!=''rootfs'', {filter} }) / 1073741824','Filesystem','Node','device,fs_type',true,false,'NODE:{{$labels.xm_node_id}} FS:{{$labels.mountpoint}} Free Size:{{humanize $value}}GiB|{threshold}GiB.','2019-08-23 11:26:07.000','2019-08-23 11:26:07.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_filesystem_total_size_device','Node Filesystem Total Size per Device (GiB)','Filesystem size in bytes / 1073741824','sum by (xm_clst_id, xm_node_id, xm_entity_type, device, fs_type, mountpoint) (node_filesystem_size_bytes{xm_entity_type=''Node'', device!=''rootfs'', {filter} }) / 1073741824','Filesystem','Node','device,fs_type',true,false,'NODE:{{$labels.xm_node_id}} FS:{{$labels.mountpoint}} Total Size:{{humanize $value}}GiB|{threshold}GiB.','2019-08-23 11:26:07.000','2019-08-23 11:26:07.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_swap_memory_free','Node Swap Memory Free (GiB)','Memory information field SwapFree_bytes / 1073741824','node_memory_SwapFree_bytes{xm_entity_type=''Node'', {filter}} / 1073741824','Memory','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Free Swap Memory Size:{{humanize $value}}GiB|{threshold}GiB.','2019-06-04 16:03:00.000','2019-06-04 16:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_swap_memory_total','Node Swap Memory Total (GiB)','Memory information field SwapTotal_bytes / 1073741824','node_memory_SwapTotal_bytes{xm_entity_type=''Node'', {filter}} / 1073741824','Memory','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Total Swap Memory Size:{{humanize $value}}GiB|{threshold}GiB.','2019-06-04 16:03:00.000','2019-06-04 16:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_up','PostgreSQL Up Count','Whether the last scrape of metrics from PostgreSQL was able to connect to the server','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (pg_up)','Instance','PostgreSQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} PostgreSQL Instance Count:{{humanize $value}}|{threshold}.','2019-08-27 15:49:21.000','2019-08-27 15:49:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_innodb_buffer_pool_write_requests','Number of Writes to Buffer Pool','The number of writes done to the InnoDB buffer pool','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(mysql_global_status_innodb_buffer_pool_write_requests[1m]))','Block','MySQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Number of Writes to Buffer Pool Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_innodb_buffer_pool_read_requests','Number of Logical Read Requests','The number of logical read requests','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(mysql_global_status_innodb_buffer_pool_read_requests[1m]))','Block','MySQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Number of Logical Read Requests Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_innodb_data_read','Amount of Data Read','The amount of data read since the server was started (in bytes)','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(mysql_global_status_innodb_data_read[1m]))','Disk','MySQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Amount of Data Read Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_innodb_os_log_written','Number of Bytes Written to Redo Log','The number of bytes written to the InnoDB redo log files','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(mysql_global_status_innodb_os_log_written[1m]))','Disk','MySQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Number of Bytes Written to Redo Log Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_innodb_data_written','Amount of Data Written','The amount of data written so far, in bytes','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(mysql_global_status_innodb_data_written[1m]))','Disk','MySQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Amount of Data Written Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_memory_sum_by_pod','Container Memory Request/Limits vs Used by Pod','container_memory_sum_by_pod','sum by(xm_clst_id, xm_namespace, xm_node_id, xm_pod_id, xm_cont_name, data_type) ( +label_replace(imxc_kubernetes_container_resource_limit_memory{{filter}}, "data_type", "limit", "" , "") or +label_replace(imxc_kubernetes_container_resource_request_memory{{filter}}, "data_type", "request", "" , "") or +label_replace(container_memory_usage_bytes{xm_entity_type=''Container'',{filter}}, "data_type", "used", "" , ""))','Memory','Pod',NULL,true,false,'Container memory sum by pod (limit, request, used)','2020-07-22 21:44:33.000','2020-07-22 21:44:33.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_cache_hit_ratio','Buffer Cache Hit Ratio','Buffer Cache Hit Ratio','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) ( +(1 - increase(mysql_global_status_innodb_buffer_pool_reads [1h]) / increase(mysql_global_status_innodb_buffer_pool_read_requests [1h])) * 100)','Block','MySQL',NULL,true,false,'.','2019-12-05 07:47:50.000','2019-12-13 01:17:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_cpu_sum_by_cluster','Container CPU Request/Limits vs Used by Cluster','Container cpu sum by cluster (capacity, limit, request, usage)','sum by(xm_clst_id, data_type) ( +label_replace(imxc_kubernetes_node_resource_capacity_cpu{{filter}} *0.001, "data_type", "capacity" , "", "") or +label_replace(sum by (xm_clst_id) (imxc_kubernetes_container_resource_limit_cpu{{filter}})*0.001, "data_type", "limit", "" , "") or +label_replace(sum by (xm_clst_id) (imxc_kubernetes_container_resource_request_cpu{{filter}})*0.001, "data_type", "request", "" , "") or +label_replace(sum by(xm_clst_id)(rate(container_cpu_usage_seconds_total{{filter}}[1m])), "data_type", "used", "" , ""))','CPU','Cluster',NULL,true,false,'Container cpu sum by cluster','2020-07-22 17:49:53.000','2020-07-22 17:49:53.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_filesystem_total_size','Node Filesystem Total Size (GiB)','Filesystem size in bytes / 1073741824','sum by (xm_clst_id, xm_node_id, xm_entity_type) (node_filesystem_size_bytes{xm_entity_type=''Node'', device!=''rootfs'', {filter} }) / 1073741824','Filesystem','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Filesystem Total Size:{{humanize $value}}GiB|{threshold}GiB.','2019-06-04 19:47:00.000','2019-06-04 19:47:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_filesystem_free_size','Node Filesystem Free Size (GiB)','Filesystem free space in bytes / 1073741824','sum by (xm_clst_id, xm_node_id, xm_entity_type) (node_filesystem_free_bytes{xm_entity_type=''Node'', device!=''rootfs'', {filter} }) / 1073741824','Filesystem','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Filesystem Free Size:{{humanize $value}}GiB|{threshold}GiB.','2019-06-04 19:47:00.000','2019-06-04 19:47:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_cpu_sum_by_pod','Container CPU Request/Limits vs Used by Pod','Container cpu sum by pod (capacity, limit, request, usage)','sum by(xm_clst_id, xm_namespace, xm_node_id, xm_pod_id, xm_cont_name, data_type)( +label_replace (rate(container_cpu_usage_seconds_total{xm_entity_type=''Container'',{filter}}[1m]), "data_type", "used", "", "") or +label_replace (imxc_kubernetes_container_resource_limit_cpu{{filter}}*0.001, "data_type", "limit", "", "") or +label_replace (imxc_kubernetes_container_resource_request_cpu{{filter}}*0.001, "data_type", "request", "", "") +)','CPU','Pod',NULL,true,false,'Container cpu sum by Pod','2020-07-22 21:37:45.000','2020-07-22 21:37:45.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_count_by_lockmode','Count_by_lockmode','Count by lockmode','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, mode) (pg_locks_count)','Lock','PostgreSQL','mode',true,false,'Count by lockmode','2020-01-30 07:06:13.000','2020-01-30 07:06:47.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_innodb_row_lock_current_waits','Number of Row Locks ','The number of row locks currently being waited for by operations on InnoDB tables','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (mysql_global_status_innodb_row_lock_current_waits)','Lock','MySQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Number of Row Locks Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_memory_capacity','cluster_memory_capacity (Gib)','imxc_kubernetes_node_resource_capacity_memory','sum by (xm_clst_id) (imxc_kubernetes_node_resource_capacity_memory{{filter}})','Memory','Cluster',NULL,false,false,'CLST:{{$labels.xm_clst_id}} Memory Capacity:{{humanize $value}}GiB|{threshold}GiB.','2019-08-23 08:46:58.000','2020-05-27 09:05:56.427'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_fs_free','Host system Filesystem free','Host File system free','sum by (instance) (node_filesystem_free_bytes{{filter}})','Filesystem','Host',NULL,true,false,'Host:{{$labels.instance}} Filesystem Free Size:{{humanize $value}}KiB|{threshold}KiB.','2020-03-23 04:08:29.025','2020-03-23 04:08:29.025'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_fs_total','Host system Filesystem total','Host File system total','sum by (instance) (node_filesystem_size_bytes{{filter}})','Filesystem','Host',NULL,true,false,'Host:{{$labels.instance}} Filesystem Total Size:{{humanize $value}}KiB|{threshold}KiB.','2020-03-23 04:08:27.634','2020-03-23 04:08:27.634'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_swap_used','Host Swap Memory Used','Host Swap Used','node_memory_SwapTotal_bytes{{filter}} - node_memory_SwapFree_bytes{{filter}}','Memory','Host',NULL,true,false,'Host:{{$labels.instance}} Used Swap Memory Size:{{humanize $value}}KiB|{threshold}KiB.','2020-03-23 04:08:26.169','2020-03-23 04:08:26.169'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_disk_read_bytes_device','Node Disk Read Bytes per Device (KiB)','The total number of bytes read successfully / 1024','sum by (xm_clst_id, xm_node_id, xm_entity_type, device, mountpoint) (rate(node_disk_read_bytes_total{xm_entity_type=''Node'', {filter}}[1m]) ) / 1024','Disk','Node','device',true,false,'NODE:{{$labels.xm_node_id}} FS:{{$labels.mountpoint}} Disk Read Size:{{humanize $value}}KiB|{threshold}KiB.','2019-08-23 11:26:07.000','2019-08-23 11:26:07.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_disk_read_bytes','Node Disk Read Bytes (KiB)','The total number of bytes read successfully / 1024','sum by (xm_clst_id, xm_node_id, xm_entity_type) (rate(node_disk_read_bytes_total{xm_entity_type=''Node'', {filter}}[1m]) ) / 1024','Disk','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Disk Read Size:{{humanize $value}}KiB|{threshold}KiB.','2019-06-04 18:11:00.000','2019-06-04 18:11:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_stat_database_xact_rollback','Number of Transactions Rolled Back','Number of transactions in this database that have been rolled back','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, datname) (rate(pg_stat_database_xact_rollback[1m]))','Transaction','PostgreSQL','datname',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} PostgreSQL Rollback Counts:{{humanize $value}}|{threshold}.','2019-08-27 15:49:21.000','2019-08-27 15:49:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_stat_database_xact_commit','Number of Transactions Committed','Number of transactions in this database that have been committed','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, datname) (rate(pg_stat_database_xact_commit[1m]))','Transaction','PostgreSQL','datname',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} PostgreSQL Commit Counts:{{humanize $value}}|{threshold}.','2019-08-27 15:49:21.000','2019-08-27 15:49:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_innodb_row_ops_total','Number of Rows Operated','The number of rows operated in InnoDB tables','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, operation) (rate(mysql_global_status_innodb_row_ops_total[1m]))','Row','MySQL','operation',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Number of Rows Operated Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_table_locks_immediate','Number of Table Lock Immediate','The number of times that a request for a table lock could be granted immediately','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(mysql_global_status_table_locks_immediate[1m]))','Lock','MySQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Number of Table Lock Immediate Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_keyspace_range_count','Local range scan count (count/s)','Local range scan count for this keyspace','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, keyspace) (rate(cassandra_keyspace_rangelatency_seconds_count[1m]))','Disk','Cassandra','keyspace',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Range Scan Count:{{humanize $value}}|{threshold}.','2019-10-02 10:17:01.000','2019-10-02 10:17:01.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_table_locks_waited','Number of Table Lock Waited','The number of times that a request for a table lock could not be granted immediately and a wait was needed','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(mysql_global_status_table_locks_waited[1m]))','Lock','MySQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Number of Table Lock Waited Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_stat_database_blk_read_time','Time Spent Reading Data File Blocks (ms)','Time spent reading data file blocks by backends in this database, in milliseconds','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, datname) (rate(pg_stat_database_blk_read_time[1m]))','Block','PostgreSQL','datname',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} PostgreSQL Block Read Time:{{humanize $value}}|{threshold}.','2019-08-27 15:49:21.000','2019-08-27 15:49:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_stat_database_blk_write_time','Time Spent Writing Data File Blocks (ms)','Time spent writing data file blocks by backends in this database, in milliseconds','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, datname) (rate(pg_stat_database_blk_write_time[1m]))','Block','PostgreSQL','datname',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} PostgreSQL Block Write Time:{{humanize $value}}|{threshold}.','2019-08-27 15:49:21.000','2019-08-27 15:49:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_stat_database_blks_read','Number of Disk Blocks Read','Number of disk blocks read in this database','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, datname) (rate(pg_stat_database_blks_read[1m]))','Block','PostgreSQL','datname',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} PostgreSQL Block Read Counts:{{humanize $value}}|{threshold}.','2019-08-27 15:49:21.000','2019-08-27 15:49:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_stat_database_blks_hit','Number of Block Cache Hit','Number of times disk blocks were found already in the buffer cache','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, datname) (rate(pg_stat_database_blks_hit[1m]))','Block','PostgreSQL','datname',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} PostgreSQL Block Hit Counts:{{humanize $value}}|{threshold}.','2019-08-27 15:49:21.000','2019-08-27 15:49:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_stat_activity_count','Number of Client Connections','number of connections in this state','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, datname, state) (pg_stat_activity_count{{filter}})','Connection','PostgreSQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} PostgreSQL Connection Counts:{{humanize $value}}|{threshold}.','2019-08-27 15:49:21.000','2019-11-18 04:16:33.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_stat_database_tup_fetched','Number of Rows Fetched','Number of rows fetched by queries in this database','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, datname) (rate(pg_stat_database_tup_fetched[1m]))','Row','PostgreSQL','datname',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} PostgreSQL Fetched Row Counts:{{humanize $value}}|{threshold}.','2019-08-27 15:49:21.000','2019-08-27 15:49:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_stat_database_tup_inserted','Number of Rows Inserted','Number of rows inserted by queries in this database','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, datname) (rate(pg_stat_database_tup_inserted[1m]))','Row','PostgreSQL','datname',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} PostgreSQL Inserted Row Counts:{{humanize $value}}|{threshold}.','2019-08-27 15:49:21.000','2019-08-27 15:49:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_keyspace_range_latency','Local range scan latency (ms)','Local range scan latency seconds for this keyspace','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, keyspace) (cassandra_keyspace_rangelatency_seconds{quantile=''0.99''}) * 1000','Disk','Cassandra','keyspace',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Range Scan Latency:{{humanize $value}}ms|{threshold}ms.','2019-10-02 10:17:01.000','2019-10-02 10:17:01.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_commitlog_size','Size used by commit log segments (KiB/s)','Current size, in bytes, used by all the commit log segments / 1024','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(cassandra_commitlog_totalcommitlogsize[1m]){{filter}}) / 1024','Log','Cassandra',NULL,true,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Commit Log Volume:{{humanize $value}}KiB/s|{threshold}KiB/s.','2019-10-02 10:17:01.000','2019-11-05 08:07:03.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_commitlog_messages','Number of commit log messages written (count/s)','Total number of commit log messages written','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(cassandra_commitlog_completedtasks[1m]))','Log','Cassandra',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Commit Log Message per second:{{humanize $value}}|{threshold}.','2019-10-02 10:17:01.000','2019-10-02 10:17:01.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_clientrequest_count','Number of client requests (count/s)','Number of client requests by request type','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, clientrequest) (rate(cassandra_clientrequest_latency_seconds_count{{filter}}[1m]))','Request','Cassandra','clientrequest',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Client Request per second:{{humanize $value}}|{threshold}.','2019-10-02 10:17:01.000','2019-11-05 11:04:25.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_memory_active','Node Memory Active (GiB)','Memory information field Active_bytes in GiB','node_memory_Active_bytes{xm_entity_type=''Node'', {filter}} / 1024 / 1024 / 1024','Memory','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Active Memory:{{humanize $value}}GiB|{threshold}GiB.','2020-06-04 11:11:11.000','2020-06-04 11:11:11.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_stat_database_tup_returned','Number of Rows Returned','Number of rows returned by queries in this database','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, datname) (rate(pg_stat_database_tup_returned[1m]))','Row','PostgreSQL','datname',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} PostgreSQL Returned Row Counts:{{humanize $value}}|{threshold}.','2019-08-27 15:49:21.000','2019-08-27 15:49:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_keyspace_write_count','Local write count (count/s)','Local write count for this keyspace','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, keyspace) (rate(cassandra_keyspace_writelatency_seconds_count[1m]))','Disk','Cassandra','keyspace',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Write Count:{{humanize $value}}|{threshold}.','2019-10-02 10:17:01.000','2019-10-02 10:17:01.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_memory_sum_by_cluster','Container Memory Request/Limits vs Used by Cluster','Container memory sum by cluster','sum by (xm_clst_id, data_type)( +label_replace(imxc_kubernetes_node_resource_capacity_memory{{filter}}, "data_type", "capacity", "" , "") or +label_replace(imxc_kubernetes_container_resource_limit_memory{{filter}}, "data_type", "limit", "", "") or +label_replace(imxc_kubernetes_container_resource_request_memory{{filter}}, "data_type", "request", "", "") or +label_replace(container_memory_usage_bytes{xm_entity_type=''Container'',{filter}}, "data_type", "used", "" , ""))','Memory','Cluster',NULL,true,false,'Container memory sum by cluster','2020-07-22 21:23:15.000','2020-07-22 21:23:15.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_memory_capacity','node_memory_capacity (Gib)','node memory capacity in GiB','imxc_kubernetes_node_resource_capacity_memory{{filter}} / 1024 / 1024 / 1024','Memory','Node',NULL,false,false,'None','2019-08-23 08:46:58.000','2019-08-23 08:46:58.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_cpu_request_cores','cluster_cpu_request_cores','cluster_cpu_request_cores','sum by (xm_clst_id) (imxc_kubernetes_container_resource_request_cpu{{filter}})','CPU','Cluster',NULL,false,false,'None','2019-08-23 08:40:36.000','2019-08-23 08:40:36.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_cpu_request_cores','node_cpu_request_cores','node_cpu_request_cores','sum by (xm_clst_id, xm_node_id) (imxc_kubernetes_container_resource_request_cpu{{filter}})','CPU','Node',NULL,false,false,'None','2019-08-23 08:40:36.000','2019-08-23 08:40:36.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_cpu_limit_cores','cluster_cpu_limit_cores','cluster_cpu_limit_cores','sum by (xm_clst_id) (imxc_kubernetes_container_resource_limit_cpu{{filter}})','CPU','Cluster',NULL,false,false,'None','2019-08-23 08:40:36.000','2019-08-23 08:40:36.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_cpu_limit_cores','node_cpu_limit_cores','node_cpu_limit_cores','sum by (xm_clst_id, xm_node_id) (imxc_kubernetes_container_resource_limit_cpu{{filter}})','CPU','Node',NULL,false,false,'None','2019-08-23 08:40:36.000','2019-08-23 08:40:36.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_clientrequest_unavailables_count','Number of unavailable exceptions encountered','Number of unavailable exceptions encountered','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, clientrequest) (rate(cassandra_clientrequest_unavailables_count[1m]))','Request','Cassandra','clientrequest',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Unavailable Request:{{humanize $value}}|{threshold}.','2019-10-02 10:17:01.000','2019-10-02 10:17:01.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_up','Cassandra Up Count','Whether the last scrape of metrics from Cassandra was able to connect to the server','count by (xm_clst_id, xm_namespace, xm_node_id, instance) (cassandra_bufferpool_size{{filter}})','Instance','Cassandra',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Instances:{{humanize $value}}|{threshold}.','2019-10-02 10:17:01.000','2019-11-05 17:01:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mongodb_up','MongoDB Up Count','The number of seconds that the current MongoDB process has been active','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(mongodb_instance_uptime_seconds[1m]))','Instance','MongoDB',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MongoDB Up Count Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mongodb_global_lock_current_queue','Number of Operations Waiting','The number of operations that are currently queued and waiting for the read or write lock','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, type) (mongodb_global_lock_current_queue)','Lock','MongoDB','type',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MongoDB Number of Operations Waiting Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mongodb_global_lock_client','Number of Active Client','The number of the active client connections performing read or write operations','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, type) (mongodb_global_lock_client)','Lock','MongoDB','type',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MongoDB Number of Active Client Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mongodb_metrics_document_total','Number of Documents Processed','The total number of documents processed','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, state) (rate(mongodb_metrics_document_total[1m]))','Row','MongoDB','state',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MongoDB Number of Documents Processed Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_keyspace_totaldiskspaceused','Total disk space used (GiB)','Total disk space used belonging to this keyspace / 1073741824','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, keyspace) (cassandra_keyspace_totaldiskspaceused {{filter}}) / 1073741824','Disk','Cassandra','keyspace',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Disk Space:{{humanize $value}}GiB|{threshold}GiB.','2019-10-02 10:17:01.000','2019-11-07 01:14:39.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_keyspace_read_latency','Local read latency (ms)','Local read latency seconds for this keyspace','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, keyspace) (cassandra_keyspace_readlatency_seconds{quantile=''0.99''}) * 1000','Disk','Cassandra','keyspace',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Read Latency:{{humanize $value}}ms|{threshold}ms.','2019-10-02 10:17:01.000','2019-10-02 10:17:01.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_threadpools_totalblockedtasks','Number of tasks that were blocked (count/s)','Number of tasks that were blocked due to queue saturation in a second','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, path) (rate(cassandra_threadpools_totalblockedtasks_count[1m]))','Task','Cassandra','path',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Blocked Task per second:{{humanize $value}}|{threshold}.','2019-10-01 16:45:21.000','2019-10-01 16:45:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_threadpools_completedtasks','Number of tasks completed (count/s)','Number of tasks completed in a second','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, path) (rate(cassandra_threadpools_completedtasks{{filter}}[1m]))','Task','Cassandra','path',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Pending Task per second:{{humanize $value}}|{threshold}.','2019-10-01 16:45:21.000','2019-11-05 08:08:57.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mongodb_memory','Amount of Memory, in MebiByte','The amount of memory, in mebibyte (MiB), currently used by the database process','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, type) (mongodb_memory)','Memory','MongoDB','type',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MongoDB Amount of Memory:{{humanize $value}}MiB|{threshold}MiB.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_resource_utilization','Resource Usage','Gauge metric with resource utilization','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, resource_name) (oracledb_resource_current_utilization)','Resource','OracleDB','resource_name',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Oracle Resource Usage:{{humanize $value}}%|{threshold}%.','2020-01-28 13:03:00.000','2020-01-28 13:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_clientrequest_timeouts_count','Number of timeouts encountered','Number of timeouts encountered','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, clientrequest) (rate(cassandra_clientrequest_timeouts_count[1m]))','Request','Cassandra','clientrequest',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Timeout Request:{{humanize $value}}|{threshold}.','2019-10-02 10:17:01.000','2019-10-02 10:17:01.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mongodb_network_bytes_total','Amount of Network Traffic','The number of bytes that reflects the amount of network traffic','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, state) (rate(mongodb_network_bytes_total[1m]))','Network','MongoDB','state',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MongoDB Amount of Network Traffic Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mongodb_op_counters_total','Number of Operations','The total number of operations since the mongod instance last started','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, type) (rate(mongodb_op_counters_total[1m]))','Request','MongoDB','type',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MongoDB Number of Operations Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_innodb_row_lock_waits','Number of Waits for Row Locks','The number of times operations on InnoDB tables had to wait for a row lock','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(mysql_global_status_innodb_row_lock_waits[1m]))','Lock','MySQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Number of Waits for Row Locks Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_activity_execute_count','Execute Count','Generic counter metric from v$sysstat view in Oracle','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(oracledb_activity_execute_count[1m]))','Request','OracleDB',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Oracle Execute Count:{{humanize $value}}|{threshold}.','2020-01-28 13:03:00.000','2020-01-28 13:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_activity_user_commits','User Commits','Generic counter metric from v$sysstat view in Oracle','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(oracledb_activity_user_commits[1m]))','Request','OracleDB',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Oracle User Commit:{{humanize $value}}|{threshold}.','2020-01-28 13:03:00.000','2020-01-28 13:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_activity_parse_count','Parse Count','Generic counter metric from v$sysstat view in Oracle','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(oracledb_activity_parse_count_total[1m]))','Request','OracleDB',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Oracle Parse Count:{{humanize $value}}|{threshold}.','2020-01-28 13:03:00.000','2020-01-28 13:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_activity_user_rollbacks','User Rollbacks','Generic counter metric from v$sysstat view in Oracle','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(oracledb_activity_user_rollbacks[1m]))','Request','OracleDB',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Oracle User Rollback:{{humanize $value}}|{threshold}.','2020-01-28 13:03:00.000','2020-01-28 13:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_fs_writes','Pod Filesystem Write Bytes (KiB)','Cumulative count of bytes written / 1024','sum by (xm_clst_id,xm_pod_id,xm_entity_type,xm_namespace) (rate(container_fs_writes_bytes_total{xm_entity_type=''Container'',{filter}}[1m])) / 1024','Filesystem','Pod',NULL,true,true,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} Filesystem Write Bytes:{{humanize $value}}KiB|{threshold}KiB.','2019-05-20 05:58:07.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_memory_usage','Pod Memory Usage (%)','Pod Memory Usage Compared to Limit','sum by (xm_clst_id,xm_pod_id,xm_entity_type,xm_namespace) ( +container_memory_usage_bytes{xm_entity_type=''Container'',{filter}} / ((container_spec_memory_limit_bytes{xm_entity_type=''Container'',{filter}} * 100) > 0) or +container_memory_usage_bytes{xm_entity_type=''Container'',{filter}} / 1024)','Memory','Pod',NULL,true,true,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} Used Utillization:{{humanize $value}}%|{threshold}%.','2019-06-05 14:27:36.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_memory_usage_bytes','Pod Memory Used (GiB)','Current memory usage in bytes / 1073741824','sum by (xm_clst_id,xm_pod_id,xm_entity_type,xm_namespace) (container_memory_usage_bytes{xm_entity_type=''Container'',{filter}}) / 1073741824','Memory','Pod',NULL,true,true,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} Used Memory:{{humanize $value}}GiB|{threshold}GiB.','2019-06-05 14:27:36.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_stat_database_cache_hit_ratio','Buffer Cache Hit Ratio (%)','Number of Block Cache Hit / (Number of Block Cache Hit & Blocks Reads) * 100','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, datname) (increase(pg_stat_database_blks_hit[1h]) / (increase(pg_stat_database_blks_read[1h]) + increase(pg_stat_database_blks_hit[1h])) * 100)','Block','PostgreSQL','datname',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} PostgreSQL Cache Hit Ratio:{{humanize $value}}%|{threshold}%.','2019-08-27 15:49:21.000','2019-12-13 01:33:39.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_wait_time_other','Wait-Time - Other','Generic counter metric from v$waitclassmetric view in Oracle','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(oracledb_wait_time_other[1m]))','Wait','OracleDB',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Oracle Wait-Time - Other:{{humanize $value}}|{threshold}.','2020-01-28 13:03:00.000','2020-01-28 13:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_wait_time_configuration','Wait-Time - Configuration','Generic counter metric from v$waitclassmetric view in Oracle','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(oracledb_wait_time_configuration[1m]))','Wait','OracleDB',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Oracle Wait-Time - Configuration{{humanize $value}}|{threshold}','2020-01-28 13:03:00.000','2020-01-28 13:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_wait_time_commit','Wait-Time - Commit','Generic counter metric from v$waitclassmetric view in Oracle','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(oracledb_wait_time_commit[1m]))','Wait','OracleDB',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Oracle Wait-Time - Commit:{{humanize $value}}|{threshold}.','2020-01-28 13:03:00.000','2020-01-28 13:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_wait_time_scheduler','Wait-Time - Scheduler','Generic counter metric from v$waitclassmetric view in Oracle','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(oracledb_wait_time_scheduler[1m]))','Wait','OracleDB',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Oracle Wait-Time - Scheduler:{{humanize $value}}|{threshold}.','2020-01-28 13:03:00.000','2020-01-28 13:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_wait_time_system_io','Wait-Time - System I/O','Generic counter metric from v$waitclassmetric view in Oracle','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(oracledb_wait_time_system_io[1m]))','Wait','OracleDB',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Oracle Wait-Time - System I/O:{{humanize $value}}|{threshold}.','2020-01-28 13:03:00.000','2020-01-28 13:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_wait_time_user_io','Wait-Time - User I/O','Generic counter metric from v$waitclassmetric view in Oracle','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(oracledb_wait_time_user_io[1m]))','Wait','OracleDB',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Oracle Wait-Time - User I/O:{{humanize $value}}|{threshold}.','2020-01-28 13:03:00.000','2020-01-28 13:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_wait_time_network','Wait-Time - Network','Generic counter metric from v$waitclassmetric view in Oracle','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(oracledb_wait_time_network[1m]))','Wait','OracleDB',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Oracle Wait-Time - Network:{{humanize $value}}|{threshold}.','2020-01-28 13:03:00.000','2020-01-28 13:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_blocked_clients','Blocked Clients','Number of clients pending on a blocking call (BLPOP, BRPOP, BRPOPLPUSH)','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (redis_blocked_clients)','Connection','Redis',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis Blocked Clients:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_connected_clients','Connected Clients','Number of client connections (excluding connections from replicas)','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (redis_connected_clients)','Connection','Redis',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis Connected Clients:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_connections_received','Received Connections','Total number of connections accepted by the server','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(redis_connections_received_total[1m]))','Connection','Redis',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis Received Connections:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_rejected_connections','Rejected Connections','Number of connections rejected because of maxclients limit','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(redis_rejected_connections_total[1m]))','Connection','Redis',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis Rejected Connections:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_up','Redis Up Count','Whether the Redis server is up','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (redis_up)','Instance','Redis',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis Up Count:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_commands_total','Call Count / Command','Total number of calls per command','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, cmd) (rate(redis_commands_total[1m]))','Request','Redis','cmd',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis Call Count:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_commands_processed','Processed Commands','Total number of commands processed by the server','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(redis_commands_processed_total[1m]))','Request','Redis',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace:}} Redis Processed Commands:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_key_hit_raito','Redis key hit raito','redis key hit raito','sum by (data_type, xm_clst_id, xm_namespace, xm_node_id, instance) ( +label_replace(rate(redis_keyspace_hits_total [1m]), "data_type", "hits", "" , "") or +label_replace(rate(redis_keyspace_misses_total [1m]), "data_type", "misses", "" , "") )','Keyspace','Redis','data_type',true,false,'redis key hit raito','2020-01-29 02:28:03.000','2020-02-13 00:46:27.568'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_net_byte_total','Network byte','Network byte','sum by (data_type, xm_clst_id, xm_namespace, xm_node_id, instance) ( +label_replace(rate(redis_net_input_bytes_total [1m]), "data_type", "input", "", "") or +label_replace(rate(redis_net_output_bytes_total [1m]), "data_type", "output", "", ""))','Network','PostgreSQL','data_type',true,false,'Network byte','2020-01-30 07:22:12.000','2020-02-13 01:04:18.528'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_memory_cache','Pod Memory Cache (GiB)','Number of bytes of page cache memory / 1073741824','sum by (xm_clst_id,xm_pod_id,xm_entity_type,xm_namespace) (container_memory_cache{xm_entity_type=''Container'',{filter}}) / 1073741824','Memory','Pod',NULL,true,true,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} Cache Memory:{{humanize $value}}GiB|{threshold}GiB.','2019-06-05 14:27:36.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_memory_swap','Pod Memory Swap (GiB)','Pod swap usage in bytes / 1073741824','sum by (xm_clst_id,xm_pod_id,xm_entity_type,xm_namespace) (container_memory_swap{xm_entity_type=''Container'',{filter}}) / 1073741824','Memory','Pod',NULL,true,true,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} Swap Memory:{{humanize $value}}GiB|{threshold}GiB.','2019-06-05 14:27:36.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_wait_time_total','Oracledb wait time total','oracledb wait time total','sum by (data_type, xm_clst_id, xm_namespace, xm_node_id, instance) ( +label_replace(rate(oracledb_wait_time_scheduler[1m]), "data_type", "scheduler", "", "") or +label_replace(rate(oracledb_wait_time_commit[1m]), "data_type", "commit", "", "") or +label_replace(rate(oracledb_wait_time_network[1m]), "data_type", "network", "", "") or +label_replace(rate(oracledb_wait_time_concurrency[1m]), "data_type", "concurrency", "", "") or +label_replace(rate(oracledb_wait_time_Configuration[1m]), "data_type", "configuration", "", "") or +label_replace(rate(oracledb_wait_time_user_io[1m]), "data_type", "user_io", "", "") or +label_replace(rate(oracledb_wait_time_system_io[1m]), "data_type", "system_io", "", "") or +label_replace(rate(oracledb_wait_time_other[1m]), "data_type", "other", "", ""))','Wait','OracleDB','data_type',true,false,'oracledb wait time total','2020-01-29 11:03:20.000','2020-02-13 01:08:01.629'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_activity_count','Oracledb activity count','oracledb activity count','sum by (data_type, xm_clst_id, xm_namespace, xm_node_id, instance) ( +label_replace(rate(oracledb_activity_execute_count [1m]), "data_type", "excutecount", "", "") or +label_replace(rate(oracledb_activity_parse_count_total[1m]), "data_type", "parse_count", "", "") )','Request','OracleDB','data_type',true,false,'oracledb activity count','2020-01-29 10:40:58.000','2020-02-13 01:12:05.436'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_transaction','Oracledb transaction','oracledb transaction','sum by (data_type, xm_clst_id, xm_namespace, xm_node_id, instance) ( +label_replace(rate(oracledb_activity_user_rollbacks[1m]), "data_type", "rollbacks", "", "") or +label_replace(rate(oracledb_activity_user_commits[1m]), "data_type", "commits", "", ""))','Request','OracleDB','data_type',true,false,'oracledb transaction','2020-01-29 11:20:47.000','2020-02-13 01:26:28.558'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_cpu_usage','Redis cpu usage','redis cpu usage','sum by (data_type, xm_clst_id, xm_namespace, xm_node_id, instance) ( +label_replace(rate(redis_used_cpu_sys [1m]), "data_type", "system", "", "") or +label_replace(rate(redis_used_cpu_user [1m]), "data_type", "user", "", "") )','CPU','Redis','data_type',true,false,'redis cpu usage','2020-01-29 01:56:58.000','2020-02-12 04:47:21.228'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_total_load','host total load','host total load','sum by (instance, data_type) ( +label_replace(node_load1 {{filter}}, "data_type", "load 1", "", "") or +label_replace(node_load5 {{filter}}, "data_type", "load 5", "", "") or +label_replace(node_load15 {{filter}}, "data_type", "load15", "", "") )','CPU','Host',NULL,false,false,'host total load','2020-04-01 08:10:26.588','2020-04-03 01:23:47.665'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_used_cpu_sys_children','System CPU Used Background','System CPU consumed by the background processes','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(redis_used_cpu_sys_children[1m]))','CPU','Redis',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis System CPU Used Backedground:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_keyspace_hits','Keyspace Hits','Number of successful lookup of keys in the main dictionary','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(redis_keyspace_hits_total[1m]))','Keyspace','Redis',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis Keyspace Hits:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_keyspace_misses','Keyspace Misses','Number of failed lookup of keys in the main dictionary','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(redis_keyspace_misses_total[1m]))','Keyspace','Redis',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis Keyspace Misses:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_db_keys','DB Keys Count','Total number of keys by DB','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, db) (redis_db_keys)','Keyspace','Redis','db',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis DB Keys Count:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_expired_keys','Expired Keys','Total number of key expiration events','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(redis_expired_keys_total[1m]))','Keyspace','Redis',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis Expired Keys:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_evicted_keys','Evicted Keys','Number of evicted keys due to maxmemory limit','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(redis_evicted_keys_total[1m]))','Keyspace','Redis',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis Evicted Keys:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_db_keys_expiring','DB Keys Count Expiring','Total number of expiring keys by DB','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, db) (redis_db_keys_expiring)','Keyspace','Redis','db',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis DB Keys Count Expiring:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_commands_duration_seconds','Duration Seconds / Command','Total duration seconds per command','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, cmd) (rate(redis_commands_duration_seconds_total[1m]) * 1000)','Request','Redis','cmd',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis Duration Seconds:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-01-29 01:42:36.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_memory_total','Redis memory total','redis memory total','sum by (data_type, xm_clst_id, xm_namespace, xm_node_id, instance) ( +label_replace(redis_allocator_active_bytes / 1048576, "data_type", "active", "" , "") or +label_replace(redis_memory_used_bytes / 1048576, "data_type", "used", "" , "") or +label_replace(redis_allocator_allocated_bytes / 1048576, "data_type", "allocated", "" , "") or +label_replace(redis_allocator_resident_bytes / 1048576, "data_type", "resident", "" , "") )','Memory','Redis','data_type',true,false,'redis memory total','2020-01-29 02:08:28.000','2020-02-13 00:45:28.475'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('count_by_connection_type','Count by connection type','count by connection type','sum by (data_type, xm_clst_id, xm_namespace, xm_node_id, instance) ( +label_replace(rate(redis_connections_received_total [1m]), "data_type", "received connections", "", "") or +label_replace(rate(redis_rejected_connections_total [1m]), "data_type", "rejected connections", "", "") or +label_replace(redis_connected_clients, "data_type", "connected clients", "", "") or +label_replace(redis_blocked_clients, "data_type", "blocked clients", "", "") )','Connection','Redis','data_type',true,false,'count by connection type','2020-01-29 00:49:09.000','2020-02-13 01:04:18.528'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_stat_database_tup_count','Number of row by stat','Number of row by stat','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, data_type) +(label_replace(rate(pg_stat_database_tup_deleted[1m]), "data_type", "deleted", "", "") or +label_replace(rate(pg_stat_database_tup_updated[1m]), "data_type", "updated", "", "") or +label_replace(rate(pg_stat_database_tup_inserted[1m]), "data_type", "inserted", "", "") or +label_replace(rate(pg_stat_database_tup_returned[1m]), "data_type", "returned", "", "") or +label_replace(rate(pg_stat_database_tup_fetched[1m]), "data_type", "fetched", "", "") )','Row','PostgreSQL','data_type',true,true,'Number of row by stat','2019-10-28 07:29:26.000','2020-02-13 01:04:18.528'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_stat_database_blk_read_write_time','Read/Write spent time by file blocks','Read/Write spent time by file blocks','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, data_type) +(label_replace(rate(pg_stat_database_blk_read_time [1m]), "data_type", "read", "", "") or +label_replace(rate(pg_stat_database_blk_write_time [1m]), "data_type", "write", "", ""))','Block','PostgreSQL','data_type',true,false,'Read/Write spent time by file blocks','2019-10-28 10:56:48.000','2020-02-13 01:06:46.680'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_allocator_resident_size','Resident Memory (MiB)','The total amount of resident memory that the Redis allocator has','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (redis_allocator_resident_bytes) / 1048576','Memory','Redis',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis Resident Memory:{{humanize $value}}MiB|{threshold}MiB.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_memory_used_size','Used Memory (MiB)','Total number of bytes allocated by Redis using its allocator','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (redis_memory_used_bytes) / 1048576','Memory','Redis',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis Used Memory:{{humanize $value}}MiB|{threshold}MiB.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_clientrequest_anormal_count','Number of anormal request','Number of anormal request ','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, anormal_type) +(label_replace(rate(cassandra_clientrequest_unavailables_count[1m]), "anormal_type", "unavailables", "", "") or +label_replace(rate(cassandra_clientrequest_timeouts_count[1m]), "anormal_type", "timeouts", "", "") or +label_replace(rate(cassandra_clientrequest_failures_count[1m]), "anormal_type", "failures", "", ""))','Request','Cassandra','anomal_type',true,false,'Number of anormal request ','2019-10-28 02:09:45.000','2020-02-13 01:16:24.862'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_commitlog','Commitlog count and size','Commitlog count and size','sum by (data_type, xm_clst_id, xm_namespace, xm_node_id, instance) +(label_replace(rate(cassandra_commitlog_completedtasks {{filter}}[1m]), "data_type", "log_count", "", "") or +label_replace(rate(cassandra_commitlog_totalcommitlogsize {{filter}}[1m]) / 1048576, "data_type", "log_size", "", ""))','Log','Cassandra','data_type',true,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Cache Hit Rate:{{humanize $value}}|{threshold}.','2019-10-24 10:44:47.000','2020-02-13 01:16:24.864'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_threads_total','Number of Threads','Number of Threads','sum by (data_type, xm_clst_id, xm_namespace, xm_node_id, instance) ( +label_replace(mysql_global_status_threads_running, "data_type", "active", "", "") or +label_replace(mysql_global_status_threads_connected, "data_type", "connected", "", "") or +label_replace(rate(mysql_global_status_connections [1m]), "data_type", "connection attempts[1m]", "", "") )','Thread','MySQL','data_type',true,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Number of Threads Running Counts:{{humanize $value}}|{threshold}.','2019-12-05 06:04:21.000','2020-02-13 01:12:05.436'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_read_write_count','Local read write count','Local read write count','sum by(xm_clst_id, xm_namespace, xm_node_id, instance, type) +(label_replace( rate(cassandra_keyspace_readlatency_seconds_count [1m]), "type", "read", "", "") or +label_replace( rate(cassandra_keyspace_writelatency_seconds_count [1m]), "type", "write", "", ""))','Disk','Cassandra','type',true,true,'Local read write count','2019-10-24 05:18:50.000','2020-02-13 01:23:46.608'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_lock_total','Oracledb lock total','oracledb lock total','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, resource_name) +(oracledb_resource_current_utilization{resource_name =~''.+_locks''})','Resource','OracleDB','resource_name',true,false,'oracledb lock total','2020-01-29 11:17:01.000','2020-02-13 01:34:00.720'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_service_http_requests_per_sec_by_api','Service HTTP Requests Count by API (per Second)','the number of HTTP requests counts per second by API','(sum by (xm_clst_id,xm_service_name,xm_entity_type,xm_namespace,api) (rate(imxc_service_request_milliseconds_count{xm_entity_type="Service",protocol="http",{filter}}[1m])) / on (xm_clst_id, xm_namespace, xm_service_name ) group_left imxc_sampling_param_value) or (sum by (xm_clst_id,xm_service_name,xm_entity_type,xm_namespace,api) (rate(imxc_service_request_milliseconds_count{xm_entity_type="Service",protocol="http",{filter}}[1m])) / on (xm_clst_id) group_left imxc_sampling_default_param_value)','Request','Service',NULL,false,false,'not for alarm','2020-02-18 12:12:12.000','2020-06-03 06:52:05.498'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_cpu_core_count','Host CPU Core Count','Host_cpu_capacity_cores','count without(cpu, mode) (node_cpu_seconds_total{{filter}})','CPU','Host',NULL,true,false,'None','2020-03-23 04:08:05.290','2020-03-23 04:08:05.290'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_load5','Host CPU Load 5m Average','Host CPU 5m load average','node_load5{{filter}}','CPU','Host',NULL,true,false,'Host:{{$labels.instance}} CPU 5m Load Average:{{humanize $value}}%|{threshold}$.','2020-03-23 04:08:11.655','2020-03-23 04:08:11.655'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_phase_count_by_cluster','Pod Phase Count by Cluster','pod phase count by cluster','count by(xm_clst_id, pod_state) (sum by (xm_clst_id, xm_pod_id, pod_state)(rate(imxc_kubernetes_container_resource_limit_cpu{{filter}}[1m])))','Cluster','Pod',NULL,true,false,'CLST:{{$labels.xm_clst_id}} pod phase count:{{humanize $value}}|{threshold}.','2020-08-19 16:45:00.000','2020-08-19 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_network_io_byte','host network io byte','host network io byte','sum by (data_type, instance) ( +label_replace(rate(node_network_receive_bytes_total{{filter}}[1m]) or rate(node_network_receive_bytes_total{{filter}}[5m]), "data_type", "Receive", "", "") or +label_replace(rate(node_network_transmit_bytes_total{{filter}}[1m]) or rate(node_network_transmit_bytes_total{{filter}}[5m]), "data_type", "Transmit", "", "") )','Network','Host',NULL,false,false,'host network io byte','2020-03-24 05:48:31.359','2020-03-24 05:48:31.359'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_contextswitch_and_filedescriptor','host contextswitch and filedescriptor','host contextswitch and filedescriptor','sum by (data_type, instance) ( +label_replace(rate(node_context_switches_total {{filter}}[1m]), "data_type", "Context switch", "", "") or +label_replace(node_filefd_allocated {{filter}}, "data_type", "File descriptor", "", "") )','OS','Host',NULL,false,false,'host contextswitch and filedescriptor','2020-03-24 09:05:51.828','2020-03-24 09:08:06.867'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_swap_usage','Host Swap Memory Usage (%)','Host Swap Memory Usage','node_memory_SwapTotal_bytes{{filter}} - node_memory_SwapFree_bytes{{filter}} / node_memory_SwapTotal_bytes{{filter}} +','Memory','Host',NULL,true,false,'None','2020-03-26 06:39:21.333','2020-03-26 06:39:21.333'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_boot_time','Host Boot time','Host Boot time','node_boot_time_seconds{{filter}}','CPU','Host',NULL,true,false,'None','2020-03-26 08:03:46.189','2020-03-26 08:03:46.189'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_disk_read_latency','Host read Disk latency','Host disk read latency','sum by (instance) (rate(node_disk_reads_completed_total{{filter}}[1m])) == 0 or sum by (instance) (rate(node_disk_read_time_seconds_total{{filter}}[1m])/rate(node_disk_reads_completed_total{{filter}}[1m]) >= 0 )','Disk','Host',NULL,true,false,'Host:{{$labels.instance}} Disk Read Latency:{{humanize $value}}|{threshold}.','2020-03-23 04:08:34.001','2020-03-23 04:08:34.001'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_disk_write_latency','Host write Disk latency','Host disk write latency','sum by (instance) (rate(node_disk_writes_completed_total{{filter}}[1m])) == 0 or sum by (instance) (rate(node_disk_write_time_seconds_total{{filter}}[1m])/rate(node_disk_writes_completed_total{{filter}}[1m]) >= 0 )','Disk','Host',NULL,true,false,'Host:{{$labels.instance}} Disk Write Latency:{{humanize $value}}|{threshold}.','2020-03-23 04:08:35.823','2020-03-23 04:08:35.823'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_memory_usage','Host Memory Usage (%)','Host Memory Usage ','((node_memory_MemTotal_bytes{{filter}} - (node_memory_MemFree_bytes{{filter}} + node_memory_Cached_bytes{{filter}} + node_memory_Buffers_bytes{{filter}} + node_memory_SReclaimable_bytes{{filter}})) >= 0 or (node_memory_MemTotal_bytes{{filter}} - node_memory_MemFree_bytes{{filter}})) / node_memory_MemTotal_bytes{{filter}} * 100','Memory','Host',NULL,true,false,'Host:{{$labels.instance}} Memory Usage:{{humanize $value}}%|{threshold}%.','2020-03-26 06:36:47.931','2020-03-26 06:36:47.931'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_memory_total','Host Memory Total (GiB)','Memory information field MemTotal_bytes','node_memory_MemTotal_bytes{{filter}}','Memory','Host',NULL,true,false,'Host:{{$labels.instance}} Total Memory Size:{{humanize $value}}GiB|{threshold}GiB.','2020-03-23 04:08:16.897','2020-03-23 04:08:16.897'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_bytes_received_sent','Bytes Received & Sent in MySQL','Bytes Received & Sent in MySQL','sum by (data_type, xm_clst_id, xm_namespace, xm_node_id, instance) ( +label_replace(rate(mysql_global_status_bytes_received [1m]), "data_type", "received", "", "") or +label_replace(rate(mysql_global_status_bytes_sent [1m]), "data_type", "sent", "", ""))','Network','MySQL','data_type',true,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Container:{{$labels.xm_cont_name}} Cache Memory:{{humanize $value}}|{threshold}.','2019-12-05 07:58:11.000','2020-02-13 01:12:05.436'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_service_http_requests_time_95th','Service HTTP 95% Elapsed Time (ms)','the maximum time taken to servce the 95% of HTTP requests','histogram_quantile(0.95, sum by (xm_entity_type,xm_clst_id,xm_namespace,xm_service_name,le) (rate(imxc_service_request_milliseconds_bucket{xm_entity_type="Service",protocol="http",{filter}}[1m]))) >=0 or sum by (xm_entity_type,xm_clst_id,xm_namespace,xm_service_name) (rate(imxc_service_request_milliseconds_bucket{xm_entity_type="Service",protocol="http",{filter}}[1m]))','Request','Service',NULL,true,true,'SVC:{{$labels.xm_service_name}} 95th HTTP Requests Time:{{humanize $value}}ms|{threshold}ms.','2020-02-18 12:12:12.000','2020-02-18 12:12:12.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_service_http_requests_time_99th','Service HTTP 99% Elapsed Time (ms)','the maximum time taken to servce the 99% of HTTP requests','histogram_quantile(0.99, sum by (xm_entity_type,xm_clst_id,xm_namespace,xm_service_name,le) (rate(imxc_service_request_milliseconds_bucket{xm_entity_type="Service",protocol="http",{filter}}[1m]))) >=0 or sum by (xm_entity_type,xm_clst_id,xm_namespace,xm_service_name) (rate(imxc_service_request_milliseconds_bucket{xm_entity_type="Service",protocol="http",{filter}}[1m]))','Request','Service',NULL,true,true,'SVC:{{$labels.xm_service_name}} 99th HTTP Requests Time:{{humanize $value}}ms|{threshold}ms.','2020-02-18 12:12:12.000','2020-02-18 12:12:12.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_service_pod_http_error_rate','Service Pod HTTP Requests Error Rate','the number of HTTP error counts / the number of HTTP requests counts for pod','sum by (xm_clst_id,xm_service_name,xm_entity_type,xm_namespace,xm_pod_id) (rate(imxc_service_request_milliseconds_count{xm_entity_type="Service",protocol="http",{filter}}[1m])) == 0 or +sum by (xm_clst_id,xm_service_name,xm_entity_type,xm_namespace,xm_pod_id) (rate(imxc_service_errors_count{xm_entity_type="Service",protocol="http",{filter}}[1m])) +/ sum by (xm_clst_id,xm_service_name,xm_entity_type,xm_namespace,xm_pod_id) (rate(imxc_service_request_milliseconds_count{xm_entity_type="Service",protocol="http",{filter}}[1m]))','Request','Service',NULL,true,false,'SVC:{{$labels.xm_service_name}} Pod Error Request Rate:{{humanize $value}}%|{threshold}%.','2019-11-07 07:52:24.000','2020-02-17 12:12:12.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_service_http_requests_time_90th','Service HTTP 90% Elapsed Time (ms)','the maximum time taken to servce the 90% of HTTP requests','histogram_quantile(0.90, sum by (xm_entity_type,xm_clst_id,xm_namespace,xm_service_name,le) (rate(imxc_service_request_milliseconds_bucket{xm_entity_type="Service",protocol="http",{filter}}[1m]))) >=0 or sum by (xm_entity_type,xm_clst_id,xm_namespace,xm_service_name) (rate(imxc_service_request_milliseconds_bucket{xm_entity_type="Service",protocol="http",{filter}}[1m]))','Request','Service',NULL,true,true,'SVC:{{$labels.xm_service_name}} 90th HTTP Requests Time:{{humanize $value}}ms|{threshold}ms.','2020-02-18 12:12:12.000','2020-02-18 12:12:12.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_fs_total_by_mountpoint','host filesystem size by mountpoint','host filesystem size by mountpoint','sum by(instance, mountpoint, fstype, data_type) ( +label_replace(node_filesystem_size_bytes {fstype!="rootfs",{filter}}, "data_type", "totalsize", "", "") or +label_replace(node_filesystem_avail_bytes {fstype!="rootfs",{filter}}, "data_type", "availablesize", "", ""))','Filesystem','Host',NULL,false,false,'host filesystem size by mountpoint','2020-03-30 04:01:45.322','2020-03-30 05:16:32.252'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('namespace_timeline_count','Namespace timeline count','alert, event count','sum (floor(increase(imxc_kubernetes_event_counts{{filter}}[10m])) or floor(increase(imxc_alerts_received_count_total{status="firing", {filter}}[10m])))by (xm_clst_id, xm_namespace, level)','Timeline','Namespace',NULL,false,false,'None','2020-04-08 06:21:21.392','2020-04-08 06:21:21.392'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_timeline_count','Cluster timeline count','alert, event count','sum (floor(increase(imxc_kubernetes_event_counts{{filter}}[10m])) or floor(increase(imxc_alerts_received_count_total{status="firing", {filter}}[10m])))by (xm_clst_id,level)','Timeline','Cluster',NULL,false,false,'None','2020-04-08 06:19:32.792','2020-04-28 08:07:47.786'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_network_transmit','Cluster Network Transmit','Cluster Network Transmit','sum by (xm_clst_id) (rate(node_network_transmit_bytes_total{{filter}} [1m]))','Network','Cluster',NULL,true,true,'Cluster Network Transmit','2020-04-28 08:10:21.070','2020-04-28 08:29:18.491'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_network_receive','Cluster Network Receive','Cluster Network Receive','sum by (xm_clst_id) (rate(node_network_receive_bytes_total{{filter}} [1m]))','Network','Cluster',NULL,true,true,'Cluster Network Receive','2020-04-28 08:07:26.294','2020-04-28 08:29:18.486'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('namespace_pod_running_count','Namespace Pod Running Count','Running pod count by namespace','count by (xm_clst_id, xm_namespace) (sum by (xm_clst_id, xm_node_id, xm_namespace, xm_pod_id) (imxc_kubernetes_container_resource_limit_cpu{pod_state="Running", {filter}}))','Pod','Namespace',NULL,false,false,'None','2020-05-21 01:18:06.016','2020-05-21 01:18:06.016'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_cpu_request','Pod CPU Request','Pod CPU Request','sum by (xm_clst_id, xm_node_id, xm_pod_id) (imxc_kubernetes_container_resource_request_cpu{{filter}})','CPU','Pod',NULL,false,false,'None','2020-05-21 06:50:49.546','2020-05-21 06:50:49.546'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_network_io_byte','Node Network IO byte','Node Network IO byte','sum by (data_type, instance) ( +label_replace(rate(node_network_receive_bytes_total{{filter}}[1m]), "data_type", "Receive", "", "") or +label_replace(rate(node_network_transmit_bytes_total{{filter}}[1m]), "data_type", "Transmit", "", "") )','Network','Node',NULL,false,false,'Node Network IO byte','2020-05-21 07:32:03.535','2020-05-21 07:32:03.535'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_memory_request','pod_memory_request (Gib)','Total container memory request in GiB for the given pod','sum by (xm_clst_id, xm_node_id, xm_pod_id) (imxc_kubernetes_container_resource_request_memory{{filter}}) / 1073741824','Memory','Pod',NULL,false,false,'None','2020-05-21 11:50:52.717','2020-05-21 11:50:52.717'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_memory_sum_by_node','Container memory sum by node','Container memory sum by node','sum by(xm_clst_id, xm_node_id, data_type) ( +label_replace(imxc_kubernetes_node_resource_capacity_memory{{filter}}, "data_type", "capacity" , "", "") or +label_replace(imxc_kubernetes_container_resource_limit_memory{{filter}}, "data_type", "limit", "" , "") or +label_replace(imxc_kubernetes_container_resource_request_memory{{filter}}, "data_type", "request", "" , "") or +label_replace(container_memory_working_set_bytes{{filter}}, "data_type", "used", "" , ""))','Memory','Node',NULL,false,false,'Container memory sum by node','2020-05-28 09:36:44.000','2020-06-09 01:38:10.694'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_context_switches','Node Context Switches','Node Context Switches','rate(node_context_switches_total {{filter}}[1m])','CPU','Node',NULL,false,false,'None','2020-05-21 01:18:06.000','2020-05-29 09:38:05.521'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_contextswitch_and_filedescriptor','Node contextswitch and filedescriptor','Node contextswitch and filedescriptor','sum by(xm_clst_id, xm_node_id, data_type) ( +label_replace(node_filefd_allocated {{filter}}, "data_type", "file descriptor" , "", "") or +label_replace(rate(node_context_switches_total {{filter}}[1m]), "data_type", "context switches", "" , ""))','File','Node',NULL,false,false,'Node contextswitch and filedescriptor','2020-05-28 12:38:21.587','2020-05-28 12:38:21.587'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_disk_read_write_byte','Node disk read and write bytes','Node disk read and write bytes','sum by(xm_clst_id, xm_node_id, data_type) ( +label_replace(rate(node_disk_read_bytes_total{{filter}}[1m]), "data_type", "Read" , "", "") or +label_replace(rate(node_disk_written_bytes_total{{filter}}[1m]), "data_type", "Write", "" , "") +)','Disk','Node',NULL,false,false,'Node disk read and write bytes','2020-05-28 13:02:44.729','2020-05-28 13:04:35.126'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_swap_total','Host Swap Memory Total','Host Swap Total','node_memory_SwapTotal_bytes{{filter}}','Memory','Host',NULL,true,false,'Host:{{$labels.instance}} Total Swap Memory Size:{{humanize $value}}GiB|{threshold}GiB.','2020-03-23 04:08:23.130','2020-03-23 04:08:23.130'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_cpu_iowait','Host CPU iowait','Host CPU iowait','avg by (instance) (rate(node_cpu_seconds_total{mode=''iowait'',{filter}}[1m])) * 100','CPU','Host',NULL,false,false,'Host:{{$labels.instance}} CPU IO wait:{{humanize $value}}|{threshold}.','2020-03-26 08:03:51.307','2020-03-26 08:03:51.307'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_filefd_allocated','Host statistics Filesystem allocated.','Host File descriptor statistics: allocated.','sum by (instance) (node_filefd_allocated{{filter}})','Filesystem','Host',NULL,true,false,'Host:{{$labels.instance}} Filesystem allocated:{{humanize $value}}|{threshold}.','2020-03-23 04:08:31.970','2020-03-23 04:08:31.970'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_service_http_requests_time_avg','Service HTTP Average Elapsed Time (ms)','the average time taken to serve the HTTP requests','sum (rate(imxc_service_request_milliseconds_count{xm_entity_type="Service",protocol="http",{filter}}[1m])) by (xm_clst_id,xm_service_name,xm_entity_type,xm_namespace) == 0 or +sum (rate(imxc_service_request_milliseconds_sum{xm_entity_type="Service",protocol="http",{filter}}[1m])) by (xm_clst_id,xm_service_name,xm_entity_type,xm_namespace) +/ sum (rate(imxc_service_request_milliseconds_count{xm_entity_type="Service",protocol="http",{filter}}[1m])) by (xm_clst_id,xm_service_name,xm_entity_type,xm_namespace)','Request','Service',NULL,true,true,'SVC:{{$labels.xm_service_name}} Requests Time Avg:{{humanize $value}}ms|{threshold}ms.','2019-10-15 09:37:44.000','2020-03-09 06:42:14.172'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_service_http_error_rate_by_api','Service HTTP Requests Error Rate by API','the number of HTTP error counts by API / the number of HTTP requests counts by API','sum by (xm_entity_type,xm_clst_id,xm_namespace,xm_service_name,api) (rate(imxc_service_request_milliseconds_count{xm_entity_type="Service",protocol="http",{filter}}[1m])) ==0 or +sum by (xm_entity_type,xm_clst_id,xm_namespace,xm_service_name,api) (rate(imxc_service_errors_count{xm_entity_type="Service",protocol="http",{filter}}[1m])) +/ sum by (xm_entity_type,xm_clst_id,xm_namespace,xm_service_name,api) (rate(imxc_service_request_milliseconds_count{xm_entity_type="Service",protocol="http",{filter}}[1m]))','Request','Service',NULL,false,false,'not for alarm','2020-02-18 12:12:12.000','2020-06-03 06:52:05.498'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_service_http_requests_time_avg_by_api','Service HTTP Average Elapsed Time by API (ms)','the average time taken to serve the HTTP requests by API for a service','sum by (xm_entity_type,xm_clst_id,xm_namespace,xm_service_name,api) (rate(imxc_service_request_milliseconds_count{xm_entity_type="Service",protocol="http",{filter}}[1m])) == 0 or +sum by (xm_entity_type,xm_clst_id,xm_namespace,xm_service_name,api) (rate(imxc_service_request_milliseconds_sum{xm_entity_type="Service",protocol="http",{filter}}[1m])) +/ sum by (xm_entity_type,xm_clst_id,xm_namespace,xm_service_name,api) (rate(imxc_service_request_milliseconds_count{xm_entity_type="Service",protocol="http",{filter}}[1m]))','Request','Service',NULL,false,false,'not for alarm','2020-02-18 12:12:12.000','2020-06-03 06:52:05.500'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_cpu_used','Node CPU Used (Cores)','Node CPU Used (Cores)','(100 - (avg by (xm_clst_id, xm_node_id) (clamp_max(rate(node_cpu_seconds_total{name="node-exporter", mode="idle", xm_entity_type="Node", {filter}}[1m]),1.0)) * 100)) * sum by(xm_clst_id, xm_node_id)(imxc_kubernetes_node_resource_capacity_cpu{{filter}}) / 100','CPU','Node',NULL,false,false,'None','2020-05-21 01:18:06.000','2020-05-29 09:38:35.939'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_cpu_iowait','Node CPU I/O Wait','Node CPU I/O Wait','avg by (xm_clst_id, xm_node_id, xm_entity_type) (rate(node_cpu_seconds_total{name="node-exporter", mode="iowait", xm_entity_type="Node" , {filter}}[1m])) * 100','CPU','Node',NULL,false,false,'None','2020-05-21 01:18:06.000','2020-05-29 09:38:20.633'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_cpu_sum_by_node','Container cpu sum by Node','Container cpu sum by Node','sum by(xm_clst_id, xm_node_id, data_type) ( +label_replace(imxc_kubernetes_node_resource_capacity_cpu{{filter}} * 0.001, "data_type", "capacity" , "", "") or +label_replace(sum by (xm_clst_id, xm_node_id) (imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0.001), "data_type", "limit", "" , "") or +label_replace(sum by (xm_clst_id, xm_node_id) (imxc_kubernetes_container_resource_request_cpu{{filter}} * 0.001), "data_type", "request", "" , "") or +label_replace(rate(container_cpu_usage_seconds_total{{filter}}[1m]), "data_type", "used", "" , ""))','CPU','Node',NULL,false,false,'Container cpu sum by Node','2020-05-28 08:06:35.736','2020-06-09 01:46:12.446'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_disk_iops_per_device','Node Disk IOPs per device','Node Disk I/O Operations Per Second (per device)','sum by (xm_clst_id, xm_node_id, device) (rate(node_disk_reads_completed_total{{filter}}[1m]) + rate(node_disk_writes_completed_total{{filter}}[1m]))','Disk','Node','device',false,false,'None','2020-06-10 05:56:05.311','2020-06-10 07:24:15.462'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_disk_iops','Node Disk IOPs','Node Disk I/O Operations Per Second','sum by (xm_clst_id, xm_node_id) (rate(node_disk_reads_completed_total{{filter}}[1m]) + rate(node_disk_writes_completed_total{{filter}}[1m]))','Disk','Node',NULL,false,false,'None','2020-06-10 05:54:01.309','2020-06-10 07:24:15.462'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_disk_iops','Host Disk IOPs','Host Disk IOPs','sum by (instance) ((rate(node_disk_reads_completed_total{{filter}}[1m]) + rate(node_disk_writes_completed_total{{filter}}[1m])) or (rate(node_disk_reads_completed_total{{filter}}[5m]) + rate(node_disk_writes_completed_total{{filter}}[5m])))','Disk','Node',NULL,false,false,'Host Disk IOPs','2020-06-10 07:26:28.895','2020-06-10 07:26:28.895'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_cpu_limit','Pod CPU Limit','Pod CPU Limit','sum by (xm_clst_id, xm_node_id, xm_pod_id) (imxc_kubernetes_container_resource_limit_cpu{{filter}})','CPU','Pod',NULL,false,false,'None','2020-05-21 06:50:49.546','2020-05-21 06:50:49.546'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_memory_limit','pod_memory_limit (Gib)','Total container memory limit in GiB for the given pod','sum by (xm_clst_id, xm_node_id, xm_pod_id) (imxc_kubernetes_container_resource_limit_memory{{filter}}) / 1073741824','Memory','Pod',NULL,false,false,'None','2020-05-21 11:50:52.717','2020-05-21 11:50:52.717'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_memory_usage_bytes','Container Memory Used (GiB)','Current memory usage in GiB, this includes all memory regardless of when it was accessed','sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) (container_memory_usage_bytes{xm_entity_type=''Container'',xm_cont_name!=''POD'',{filter}} / 1024 / 1024 / 1024)','Memory','Container',NULL,true,true,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} Used Memory:{{humanize $value}}GiB|{threshold}GiB.','2019-06-05 14:27:36.000','2020-06-04 11:11:11.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_memory_used','Node Memory Used (GIB)','Node Memory Used (GIB)','((node_memory_MemTotal_bytes{xm_entity_type="Node", {filter}} - (node_memory_MemFree_bytes{xm_entity_type="Node", {filter}} + node_memory_Cached_bytes{xm_entity_type="Node", {filter}} + node_memory_Buffers_bytes{xm_entity_type="Node", {filter}})) >= 0 or node_memory_MemTotal_bytes{xm_entity_type="Node", {filter}} - node_memory_MemFree_bytes{xm_entity_type="Node", {filter}}) / 1024 / 1024 / 1024','Memory','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Memory Used:{{humanize $value}}GiB|{threshold}GiB.','2020-05-21 01:18:06.000','2020-06-04 11:11:11.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_used_cpu_user','User CPU Used','User CPU consumed by the Redis server','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(redis_used_cpu_user[1m]))','CPU','Redis',NULL,false,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis User CPU Used:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-05-29 09:37:22.273'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_cpu_sum_by_container','Container cpu sum by container','container cpu sum by container','sum by(xm_clst_id, data_type, xm_pod_id, xm_cont_name) (label_replace(imxc_kubernetes_container_resource_request_cpu{xm_cont_name!=''POD'',{filter}} * 0.001, "data_type", "request" , "", "") or label_replace(imxc_kubernetes_container_resource_limit_cpu{xm_cont_name!=''POD'',{filter}} * 0.001, "data_type", "limit" , "", "") or label_replace(rate(container_cpu_usage_seconds_total{xm_cont_name!=''POD'',{filter}}[1m]), "data_type", "used", "" , ""))','CPU','Container',NULL,false,false,'None','2020-05-21 06:50:49.546','2020-05-21 06:50:49.546'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_cpu_sum_by_pods','Container cpu sum by pod','Container cpu sum by pod','sum by(xm_clst_id, data_type, xm_pod_id) (label_replace(imxc_kubernetes_container_resource_request_cpu{{filter}} * 0.001, "data_type", "request" , "", "") or label_replace(imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0.001, "data_type", "limit" , "", "") or label_replace(rate(container_cpu_usage_seconds_total{{filter}}[1m]), "data_type", "used", "" , ""))','CPU','Pod',NULL,false,false,'None','2020-05-21 06:50:49.546','2020-05-21 06:50:49.546'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_memory_sum_by_pods','Container memory sum by pod','Container memory sum by pod','sum by(xm_clst_id, data_type, xm_pod_id) (label_replace(imxc_kubernetes_container_resource_limit_memory{{filter}}, "data_type", "limit", "" , "") or label_replace(imxc_kubernetes_container_resource_request_memory{{filter}}, "data_type", "request", "" , "") or label_replace(container_memory_usage_bytes{{filter}}, "data_type", "used", "" , ""))','Memory','Pod',NULL,false,false,'None','2020-05-21 06:50:49.546','2020-05-21 06:50:49.546'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_memory_sum_by_container','Container memory sum by container','Container memory sum by container','sum by(xm_clst_id, data_type, xm_pod_id, xm_cont_name) (label_replace(imxc_kubernetes_container_resource_limit_memory{xm_cont_name!=''POD'',{filter}}, "data_type", "limit", "" , "") or label_replace(imxc_kubernetes_container_resource_request_memory{xm_cont_name!=''POD'',{filter}}, "data_type", "request", "" , "") or label_replace(container_memory_usage_bytes{xm_cont_name!=''POD'',{filter}}, "data_type", "used", "" , ""))','Memory','Container',NULL,false,false,'None','2020-05-21 06:50:49.546','2020-05-21 06:50:49.546'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_disk_read_write_byte','Container disk read and write bytes','Container disk read and write bytes','sum by(xm_clst_id, xm_pod_id, xm_cont_name, data_type) (label_replace(rate(container_fs_writes_bytes_total{xm_entity_type="Container",{filter}}[1m]), "data_type", "Read" , "", "") or label_replace(rate(container_fs_reads_bytes_total{xm_entity_type="Container",{filter}}[1m]), "data_type", "Write", "" , ""))','Disk','Container',NULL,false,false,'None','2020-05-21 06:50:49.546','2020-05-21 06:50:49.546'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_disk_read_write_byte','Pod disk read and write bytes','Pod disk read and write bytes','sum by(xm_clst_id, xm_pod_id, data_type) (label_replace(rate(container_fs_writes_bytes_total{xm_entity_type="Container",{filter}}[1m]), "data_type", "Read" , "", "") or label_replace(rate(container_fs_reads_bytes_total{xm_entity_type="Container",{filter}}[1m]), "data_type", "Write", "" , ""))','Disk','Pod',NULL,false,false,'None','2020-05-21 06:50:49.546','2020-05-21 06:50:49.546'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_network_io_byte','Container Network IO byte','Container Network IO byte','sum by (xm_clst_id, xm_pod_id, xm_cont_name, data_type) (label_replace(rate(container_network_receive_bytes_total{{filter}}[1m]), "data_type", "Receive", "", "") or label_replace(rate(container_network_transmit_bytes_total{{filter}}[1m]), "data_type", "Transmit", "", ""))','Network','Container',NULL,false,false,'None','2020-05-21 06:50:49.546','2020-05-21 06:50:49.546'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_network_io_byte','Pod Network IO byte','Pod Network IO byte','sum by (xm_clst_id, xm_pod_id, data_type) (label_replace(rate(container_network_receive_bytes_total{{filter}}[1m]), "data_type", "Receive", "", "") or label_replace(rate(container_network_transmit_bytes_total{{filter}}[1m]), "data_type", "Transmit", "", ""))','Network','Pod',NULL,false,false,'None','2020-05-21 06:50:49.546','2020-05-21 06:50:49.546'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_load1','Node CPU Load 1m Average','Node CPU 1m load average','node_load1{xm_entity_type=''Node'',{filter}}','CPU','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} CPU 1m Load Avg:{{humanize $value}}|{threshold}.','2019-05-15 08:22:49.000','2019-05-15 08:22:49.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_open_file_descriptor','Node File Descriptor','Node File Descriptor','sum by(xm_clst_id, xm_node_id)(node_filefd_allocated {{filter}})','Filesystem','Node',NULL,true,false,'NODE:{{$labels.xm_node_id}} File Descriptor:{{humanize $value}}|{threshold}.','2020-05-21 01:18:06.000','2020-05-29 09:37:51.101'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_sparselog_type_node_count','Node Type Sparselog Count','Node-type sparse log count by xm_clst_id, xm_node_id over last 1 min','sum by (xm_entity_type, xm_clst_id, xm_node_id) (round(increase(imxc_sparselog_count_total{xm_entity_type="Node",{filter}}[1m])))','SparseLog','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Sparselog Count:{{humanize $value}}|{threshold}.','2020-03-26 15:05:51.828','2020-03-26 15:05:51.828'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_memory_cache','Container Memory Cache (GiB)','Number of bytes of page cache memory / 1073741824','sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) (container_memory_cache{xm_entity_type=''Container'',xm_cont_name!=''POD'',{filter}}) / 1073741824','Memory','Container',NULL,true,true,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} Cache Memory:{{humanize $value}}GiB|{threshold}GiB.','2019-06-05 14:27:36.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_load15','Host CPU Load 15m Average','Host CPU 15m load average','node_load15{{filter}}','CPU','Host',NULL,true,false,'Host:{{$labels.instance}} CPU 15m Load Average:{{humanize $value}}%|{threshold}%','2020-03-23 04:08:13.337','2020-03-23 04:08:13.337'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_disk_write_bytes_device','Node Disk Write Bytes per Device (KiB)','The total number of bytes written successfully / 1024','sum by (xm_clst_id, xm_node_id, xm_entity_type, device, mountpoint) (rate(node_disk_written_bytes_total{xm_entity_type=''Node'', {filter}}[1m]) ) / 1024','Disk','Node','device',true,false,'NODE:{{$labels.xm_node_id}} FS:{{$labels.mountpoint}} Disk Write Size:{{humanize $value}}KiB|{threshold}KiB.','2019-08-23 11:26:07.000','2019-08-23 11:26:07.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_disk_write_latency','Node Disk Write Latency (ms)','Node Disk Write Latency','sum by (xm_clst_id,xm_node_id, xm_entity_type) (rate(node_disk_write_time_seconds_total{xm_entity_type=''Node'',{filter}}[1m])) * 1000','Disk','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Disk Write Latency:{{humanize $value}}ms|{threshold}ms.','2019-05-20 11:00:56.000','2019-05-31 17:47:10.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_disk_writes_count_device','Node Disk Writes Count per Device (IOPS)','Node Disk Writes Counts per Device','sum by (xm_clst_id, xm_node_id, xm_entity_type, device, mountpoint) (rate(node_disk_writes_completed_total{xm_entity_type=''Node'', {filter}}[1m]) )','Disk','Node','device',true,false,'NODE:{{$labels.xm_node_id}} FS:{{$labels.mountpoint}} Disk Writes Count:{{humanize $value}}IOPS|{threshold}IOPS.','2019-08-23 11:26:07.000','2019-08-23 11:26:07.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_throttled_rate','Container CPU Throttled Rate','container throttled rate','sum by(xm_clst_id, xm_cont_id) (rate(container_cpu_cfs_throttled_seconds_total{container_name!="POD", image!="",{filter}}[1m]))','Cluster','Container',NULL,false,false,'CLST:{{$labels.xm_clst_id}} CPU Throttled:{{humanize $value}}|{threshold}.','2020-08-19 16:45:00.000','2020-08-19 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_pod_total_count','Node Pod Total Count','Node Pod Total Count','count by (xm_clst_id, xm_node_id) (sum by (xm_clst_id, xm_node_id, xm_pod_id) (imxc_kubernetes_container_resource_limit_cpu{{filter}}))','Pod','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Pod Count:{{humanize $value}}|{threshold}.','2019-10-11 00:29:17.000','2019-11-26 01:29:10.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_service_http_requests_per_sec','Service HTTP Requests Count (per Second)','the number of HTTP requests counts per second','((sum by (xm_clst_id,xm_service_name,xm_entity_type,xm_namespace) (rate(imxc_service_request_milliseconds_count{xm_entity_type="Service",protocol="http",{filter}}[1m]))/ on (xm_clst_id, xm_namespace, xm_service_name ) group_left imxc_sampling_param_value) or (sum by (xm_clst_id,xm_service_name,xm_entity_type,xm_namespace) (rate(imxc_service_request_milliseconds_count{xm_entity_type="Service",protocol="http",{filter}}[1m])) / on (xm_clst_id) group_left imxc_sampling_default_param_value))','Request','Service',NULL,true,true,'SVC:{{$labels.xm_service_name}} Http Requests/Second:{{humanize $value}}|{threshold}.','2019-10-15 09:37:44.000','2020-02-17 12:12:12.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_service_pod_http_requests_per_sec','Service Pod HTTP Requests Count (per Second)','the number of HTTP requets counts per second for pod','sum by (xm_clst_id,xm_service_name,xm_entity_type,xm_namespace,xm_pod_id) (rate(imxc_service_request_milliseconds_count{xm_entity_type="Service",protocol="http",{filter}}[1m]))','Request','Service',NULL,true,false,'SVC:{{$labels.xm_service_name}} IMXC Svc Pod Http Requests/Seconds:{{humanize $value}}|{threshold}.','2019-11-07 07:51:11.000','2020-03-09 06:34:19.353'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_memory_max_usage_bytes','Container Memory Max Used (GiB)','Maximum memory usage recorded in bytes / 1073741824','sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) (container_memory_max_usage_bytes{xm_entity_type=''Container'',xm_cont_name!=''POD'',{filter}}) / 1073741824','Memory','Container',NULL,true,true,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} Max Memory Usage:{{humanize $value}}GiB|{threshold}GiB.','2019-06-05 14:27:36.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_network_receive','Container Network Receive (KiB)','Network device statistic receive_bytes / 1024','sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) (rate(container_network_receive_bytes_total{xm_entity_type=''Container'',{filter}}[1m]) ) / 1024','Network','Container',NULL,true,true,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} Network Receive Usage:{{humanize $value}}KiB|{threshold}KiB.','2019-05-21 08:23:36.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_service_http_requests_time_50th','Service HTTP 50% Elapsed Time (ms)','the maximum time taken to servce the 50% of HTTP requests','histogram_quantile(0.50, sum by (xm_entity_type,xm_clst_id,xm_namespace,xm_service_name,le) (rate(imxc_service_request_milliseconds_bucket{xm_entity_type="Service",protocol="http",{filter}}[1m]))) >=0 or sum by (xm_entity_type,xm_clst_id,xm_namespace,xm_service_name) (rate(imxc_service_request_milliseconds_bucket{xm_entity_type="Service",protocol="http",{filter}}[1m]))','Request','Service',NULL,true,true,'SVC:{{$labels.xm_service_name}} 50th HTTP Requests Time:{{humanize $value}}ms|{threshold}ms.','2020-02-18 12:12:12.000','2020-02-18 12:12:12.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_service_errors_count','Service Error Count','service error count','sum by(xm_clst_id, xm_namespace, xm_service_name, statuscode ) (imxc_service_errors_count{statuscode!="200",{filter}}) OR on() vector(0)','Request','Service',NULL,true,false,'SVC:{{$labels.xm_service_name}} Svc Error Count:{{humanize $value}}|{threshold}.','2020-08-21 16:45:00.000','2020-08-21 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_memory_used','Host Memory Used (GiB)','Memory information field MemUsed_bytes','((node_memory_MemTotal_bytes{{filter}} - (node_memory_MemFree_bytes{{filter}} + node_memory_Cached_bytes{{filter}} + node_memory_Buffers_bytes{{filter}} + node_memory_SReclaimable_bytes{{filter}})) >= 0 or (node_memory_MemTotal_bytes{{filter}} - node_memory_MemFree_bytes{{filter}}))','Memory','Host',NULL,true,false,'Host:{{$labels.instance}} Memory Utillization:{{humanize $value}}GiB|{threshold}GiB.','2020-03-23 04:08:21.399','2020-03-23 04:08:21.399'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('workload_count_all_state','Workload Count All State','workload total count regardless of pod state','count by(xm_clst_id, controller_kind) (imxc_kubernetes_controller_ready{controller_kind=~"Deployment|DaemonSet|ReplicaSet|StatefulSet|StaticPod",{filter}})','Pod','Namespace',NULL,true,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Workload Total Count:{{humanize $value}}|{threshold}.','2020-08-19 16:45:00.000','2020-08-19 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('workload_count_running_pod','Workload Count Running Pod','workload count of Running state pod','sum by(xm_clst_id,controller_kind ) (imxc_kubernetes_controller_ready{controller_kind=~"Deployment|DaemonSet|ReplicaSet|StatefulSet|StaticPod",{filter}})','Pod','Namespace',NULL,false,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Workload Total Count:{{humanize $value}}|{threshold}.','2020-08-19 16:45:00.000','2020-08-19 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_network_transmit_device','Node Network Transmit per Device(KiB)','Network device statistic transmit_bytes by device / 1024','sum by (xm_clst_id, xm_node_id, xm_entity_type, device, mountpoint) (rate(node_network_transmit_bytes_total{xm_entity_type=''Node'',{filter}}[1m]) ) / 1024','Network','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} DEV:{{$labels.device}} Network Transmit Usage:{{humanize $value}}KiB|{threshold}KiB.','2020-11-06 09:09:05.000','2020-11-06 09:09:05.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_network_receive_device','Node Network Receive per Device(KiB)','Network device statistic receive_bytes by device / 1024','sum by (xm_clst_id, xm_node_id, xm_entity_type, device, mountpoint) (rate(node_network_receive_bytes_total{xm_entity_type=''Node'',{filter}}[1m]) ) / 1024','Network','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} DEV:{{$labels.device}} Network Receive Usage:{{humanize $value}}KiB|{threshold}KiB.','2020-11-06 09:09:05.000','2020-11-06 09:09:05.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_service_pod_http_requests_time_avg','Service Pod HTTP Average Elapsed Time (ms)','the average time taken to serve the HTTP requests for pod','sum by (xm_clst_id,xm_service_name,xm_entity_type,xm_namespace,xm_pod_id) (rate(imxc_service_request_milliseconds_count{xm_entity_type="Service",protocol="http",{filter}}[1m])) == 0 or +sum by (xm_clst_id,xm_service_name,xm_entity_type,xm_namespace,xm_pod_id) (rate(imxc_service_request_milliseconds_sum{xm_entity_type="Service",protocol="http",{filter}}[1m])) +/ sum by (xm_clst_id,xm_service_name,xm_entity_type,xm_namespace,xm_pod_id) (rate(imxc_service_request_milliseconds_count{xm_entity_type="Service",protocol="http",{filter}}[1m]))','Request','Service',NULL,true,false,'SVC:{{$labels.xm_service_name}} IMXC Svc Pod http Requests Time Avg:{{humanize $value}}ms|{threshold}ms.','2019-11-07 07:51:46.000','2020-02-17 12:12:12.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_cpu_system','Container CPU System (%)','Container CPU Usage (System)','sum by (xm_clst_id,xm_node_id,xm_pod_id,xm_cont_name,xm_entity_type,xm_namespace,xm_cont_id) (rate(container_cpu_system_seconds_total{xm_entity_type=''Container'',xm_cont_name!=''POD'',{filter}}[1m])) * 100','CPU','Container',NULL,true,true,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} CPU System:{{humanize $value}}%|{threshold}%.','2019-06-05 09:07:00.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_cpu_usage','Container CPU Usage (%)','Container CPU Usage','sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) (rate(container_cpu_usage_seconds_total{xm_entity_type=''Container'',xm_cont_name!=''POD'',{filter}}[1m])) * 100','CPU','Container',NULL,true,true,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} CPU Usage:{{humanize $value}}%|{threshold}%','2019-05-15 01:02:23.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_phase_count_by_namespace','Pod Phase Count by Namespace','pod phase count by cluster, namespace','count by(xm_clst_id, xm_namespace, pod_state) (imxc_kubernetes_container_resource_limit_cpu{{filter}})','Namespace','Pod',NULL,true,false,'CLST:{{$labels.xm_clst_id}} Pod phase count:{{humanize $value}}|{threshold}.','2020-08-19 16:45:00.000','2020-08-19 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_fs_limit_bytes','Container Filesystem Limit Bytes (GiB)','Number of bytes that can be consumed by the container on this filesystem / 1073741824','sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) (container_fs_limit_bytes{xm_entity_type=''Container'',{filter}}) / 1073741824','Filesystem','Container',NULL,true,true,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} Filesystem Limit:{{humanize $value}}GiB|{threshold}GiB.','2019-06-05 10:27:42.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_memory_usage','Container Memory Usage (%)','Container memory usage compared to limit if limit is non-zero or 1GiB if limit is zero','sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) (container_memory_usage_bytes{xm_entity_type=''Container'', xm_cont_name!=''POD'', {filter}} / (container_spec_memory_limit_bytes{xm_entity_type=''Container'',{filter}} > 0) * 100) or sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) (container_memory_usage_bytes{xm_entity_type=''Container'',{filter}} / 1024 / 1024 / 1024 * 100)','Memory','Container',NULL,true,true,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} Memory Usage:{{humanize $value}}%|{threshold}%.','2019-06-05 14:27:36.000','2020-06-04 11:11:11.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_memory_swap','Container Memory Swap (GiB)','Container swap usage in bytes / 1073741824','sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) (container_memory_swap{xm_entity_type=''Container'',xm_cont_name!=''POD'',{filter}}) / 1073741824','Memory','Container',NULL,true,true,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} Swap Memory:{{humanize $value}}GiB|{threshold}GiB.','2019-06-05 14:27:36.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_network_transmit','Container Network Transmit (KiB)','Network device statistic transmit_bytes / 1024','sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) (rate(container_network_transmit_bytes_total{xm_entity_type=''Container'',{filter}}[1m]) ) / 1024','Network','Container',NULL,true,true,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} Network Transmit Usage:{{humanize $value}}KiB|{threshold}KiB.','2019-05-21 08:26:35.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('controller_pod_count','Controller Pod Count','Controller Pod Count','sum (imxc_kubernetes_controller_counts{{filter}}) by (xm_clst_id, xm_namespace, xm_entity_name, xm_entity_type)','Pod','Controller',NULL,false,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Controller Pod Counts:{{humanize $value}}|{threshold}.','2019-10-10 06:39:09.000','2019-10-10 06:39:09.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_load1','Host CPU Load 1m Average','Host CPU 1m load average','node_load1{{filter}}','CPU','Host',NULL,true,false,'Host:{{$labels.instance}} CPU 1m Load Average:{{humanize $value}}%|{threshold}%','2020-03-23 04:08:09.946','2020-03-23 04:08:09.946'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_cpu_usage','Host CPU Usage (%)','Host CPU Usage','100 - (avg by (instance)(clamp_max(rate(node_cpu_seconds_total{mode=''idle'',{filter}}[1m]),1.0)) * 100)','CPU','Host',NULL,true,false,'Host:{{$labels.instance}} CPU Utillization:{{humanize $value}}%|{threshold}%.','2020-03-23 04:08:07.606','2020-03-23 04:08:07.606'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('aws_ec2_cpuutilization','The percentage of allocated EC2 compute','The percentage of allocated EC2 compute units that are currently in use on the instance.','sum by (xm_clst_id, instance_id, instance) (aws_ec2_cpuutilization_average{{filter}})','CPU','AWS/EC2',NULL,true,true,'CLST:{{$labels.xm_clst_id}} Instance:{{$labels.instance_id}} CPU Utillization:{{humanize $value}}%|{threshold}%','2019-08-23 17:38:23.000','2019-08-23 17:38:23.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mongodb_connections','Number of Incoming Connections','The number of incoming connections from clients to the database server','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, state) (mongodb_connections{{filter}})','Connection','MongoDB','state',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MongoDB Number of Incoming Connections Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-13 02:26:09.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_buffer_io','Block read / write','mysql buffer I/O summary','sum by (data_type, xm_clst_id, xm_namespace, xm_node_id, instance) ( +label_replace(mysql_global_status_innodb_buffer_pool_write_requests, "data_type", "write", "", "") or +label_replace(mysql_global_status_innodb_buffer_pool_read_requests, "data_type", "read", "", "") )','Block','MySQL','data_type',true,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} POD:{{$labels.xm_pod_id}} Mysql Buffer IO:{{humanize $value}}|{threshold}.','2019-12-05 07:30:33.000','2020-02-13 01:14:23.895'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_innodb_buffer_pool_reads','Number of Reads Directly from Disk','The number of logical reads that InnoDB could not satisfy from the buffer pool, and had to read directly from disk','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(mysql_global_status_innodb_buffer_pool_reads[1m]))','Block','MySQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Number of Reads Directly from Disk Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_connections','Number of Connection Attempts','The number of connection attempts (successful or not) to the MySQL server','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(mysql_global_status_connections[1m]))','Connection','MySQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Number of Connection Attempts counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_status_locks','Number of Locks in MySQL','Number of Locks in MySQL','sum by (data_type, xm_clst_id, xm_namespace, xm_node_id, instance) ( +label_replace(rate(mysql_global_status_innodb_row_lock_current_waits[1m]), "data_type", "rowlocks", "", "") or +label_replace(rate(mysql_global_status_innodb_row_lock_waits[1m]), "data_type", "waits for rowlocks", "", "") or +label_replace(rate(mysql_global_status_table_locks_immediate[1m]), "data_type", "tablelock immediate", "", "") or +label_replace(rate(mysql_global_status_table_locks_waited[1m]), "data_type", "tablelock waited", "", "") )','Lock','MySQL','data_type',true,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Mysql Status Locks:{{humanize $value}}|{threshold}.','2019-12-05 08:39:30.000','2020-02-13 01:12:05.438'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_fs_usage_bytes','Container Filesystem Used Bytes (GiB)','Number of bytes that are consumed by the container on this filesystem / 1073741824','sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) (container_fs_usage_bytes{xm_entity_type=''Container'',{filter}}) / 1073741824','Filesystem','Container',NULL,true,true,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} Filesystem Used:{{humanize $value}}GiB||{threshold}GiB.','2019-06-05 10:27:42.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_fs_writes','Container Filesystem Write Bytes (KiB)','Cumulative count of bytes written / 1024','sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) (rate(container_fs_writes_bytes_total{xm_entity_type=''Container'',{filter}}[1m])) / 1024','Filesystem','Container',NULL,true,true,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} Filesystem Writes:{{humanize $value}}KiB|{threshold}KiB.','2019-05-20 05:58:07.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_sessions_value','Session Count','Gauge metric with count of sessions by status and type','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, status_type) +(label_join(oracledb_sessions_value, "status_type", "-", "status", "type"))','Session','OracleDB','status_type',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Oracle Session Count:{{humanize $value}}|{threshold}.','2020-01-28 13:03:00.000','2020-02-13 01:34:00.720'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_stat_database_temp_bytes','Bytes Written to Temporary Files (KiB)','Total amount of data written to temporary files by queries in this database','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, datname) (rate(pg_stat_database_temp_bytes[1m])) / 1024','TemporaryFile','PostgreSQL','datname',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} PostgreSQL Temporary File Write Size:{{humanize $value}}|{threshold}.','2019-08-27 15:49:21.000','2019-08-27 15:49:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_used_cpu_sys','System CPU Used','System CPU consumed by the Redis server','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(redis_used_cpu_sys[1m]))','CPU','Redis',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis System CPU Used:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_used_cpu_user_children','User CPU Used Background','User CPU consumed by the background processes','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(redis_used_cpu_user_children[1m]))','CPU','Redis',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis User CPU Used Background:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_service_http_error_rate','Service HTTP Requests Error Rate','the number of HTTP error counts / the number of HTTP requests counts','sum by(xm_clst_id,xm_service_name,xm_entity_type,xm_namespace) (rate(imxc_service_request_milliseconds_count{xm_entity_type="Service",protocol="http",{filter}}[1m])) == 0 or +sum by (xm_clst_id,xm_service_name,xm_entity_type,xm_namespace) (rate(imxc_service_errors_count{xm_entity_type="Service",protocol="http",{filter}}[1m])) / sum by +(xm_clst_id,xm_service_name,xm_entity_type,xm_namespace) (rate(imxc_service_request_milliseconds_count{xm_entity_type="Service",protocol="http",{filter}}[1m]))','Request','Service',NULL,true,true,'SVC:{{$labels.xm_service_name}} Error Request Rate:{{humanize $value}}%|{threshold}%.','2019-10-15 09:37:44.000','2020-02-17 12:12:12.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_cache_hit_ratio','Buffer Cache Hit Ratio (%)','(Number of Logical Read - Number of Reads Directly from Disk) / (Number of Logical Read) * 100','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) ((increase(mysql_global_status_innodb_buffer_pool_read_requests[1m]) - increase(mysql_global_status_innodb_buffer_pool_reads[1m])) / increase(mysql_global_status_innodb_buffer_pool_read_requests[1m]) * 100)','Block','MySQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Buffer Cache Hit Ratio:{{humanize $value}}%|{threshold}%.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_fs_usage','Pod Filesystem Usage (%)','Pod File System Usage: 100 * (Used Bytes / Limit Bytes)','sum by (xm_clst_id,xm_pod_id,xm_entity_type,xm_namespace) ( +container_fs_usage_bytes{xm_entity_type=''Container'',{filter}} /((container_fs_limit_bytes{xm_entity_type=''Container'',{filter}} * 100) > 0) or +container_fs_usage_bytes{xm_entity_type=''Container'',{filter}} / 1000)','Filesystem','Pod',NULL,true,true,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} Filesystem Usage:{{humanize $value}}%|{threshold}%.','2019-06-05 10:27:42.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_pod_cpu_request','Node Pod CPU Request','Node Pod CPU Request','sum by (xm_clst_id, xm_node_id) (imxc_kubernetes_container_resource_request_cpu{{filter}})','CPU','Node',NULL,true,false,'NODE:{{$labels.xm_node_id}} Pod CPU Requests:{{humanize $value}}|{threshold}.','2020-11-20 06:50:49.546','2020-11-20 06:50:49.546'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_pod_cpu_usage','Node Pod CPU Usage (%)','Node Pod CPU Usage','sum by (xm_clst_id,xm_node_id) (clamp_min((rate(container_cpu_usage_seconds_total{xm_entity_type=''Container'',{filter}}[1m] offset 10s)),0)) * 100','CPU','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Pod CPU Usage:{{humanize $value}}%|{threshold}%.','2020-11-20 06:50:49.546','2020-11-20 06:50:49.546'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,in_use,anomaly_score,message) + VALUES ('container_cpu_usage_core','Container CPU Usage (Core)','Container CPU Usage (Core)','sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) (rate(container_cpu_usage_seconds_total{xm_entity_type=''Container'',xm_cont_name!=''POD'',{filter}}[1m]))','CPU','Container',true,false,'None'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,in_use,anomaly_score,message) + VALUES ('container_cpu_system_core','Container CPU System (Core)','Container CPU Usage (System)(Core)','sum by (xm_clst_id,xm_node_id,xm_pod_id,xm_cont_name,xm_entity_type,xm_namespace,xm_cont_id) (rate(container_cpu_system_seconds_total{xm_entity_type=''Container'',xm_cont_name!=''POD'',{filter}}[1m]))','CPU','Container',true,false,'None'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,in_use,anomaly_score,message) + VALUES ('container_cpu_user_core','Container CPU User (Core)','Container CPU Usage (User)(Core)','sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) (rate(container_cpu_user_seconds_total{xm_entity_type=''Container'',xm_cont_name!=''POD'',{filter}}[1m]))','CPU','Container',true,false,'None'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_pod_info_in_service','pod info in service','pod info(state, node) in service','sum by (xm_clst_id, xm_namespace, xm_service_name,xm_node_id,node_status,xm_pod_id,pod_state) (imxc_kubernetes_endpoint_count{{filter}})','Pod','Service',NULL,false,false,'None','2020-12-22 16:05:00.000','2020-12-22 16:05:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_service_state','Service State Count Sum','service state sum by xm_service_name','sum by (xm_service_name,pod_state) (imxc_kubernetes_endpoint_count{{filter}})','Pod','Service',NULL,false,false,'None','2021-01-06 17:30:00.000','2021-01-06 17:30:00.000'); + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_workload_state','Workload State Count Sum','wokload state sum by owner_name','count by (owner_name, pod_state) (imxc_kubernetes_container_resource_request_cpu{{filter}})','Pod','Workload',NULL,false,false,'None','2021-02-08 17:00:00.000','2021-02-08 17:00:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_pod_info_in_workload','Pod info by workload type','pod info(state, node) by workload type (do filter param)','count by (xm_clst_id, xm_namespace, owner_name, xm_node_id, node_status, xm_pod_id, pod_state) (imxc_kubernetes_container_resource_request_cpu{{filter}})','Pod','Workload',NULL,false,false,'None','2021-02-08 17:00:00.000','2021-02-08 17:00:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_up_state','Node State metric','Node State metric for up, down check','imxc_kubernetes_node_ready{{filter}}','State','Node',NULL,true,false,'Cluster:{{$labels.xm_clst_id}} Node:{{$labels.xm_node_id}} Down {threshold}.','2020-02-02 14:30:00.000','2020-02-02 14:30:00.000'); + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_cpu_user_by_workload', 'Container CPU User By workload (%)', 'Container CPU Usage(User)', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (rate(container_cpu_user_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running", {filter}}) without (instance)) * 0) * 100', 'CPU', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU User (%):{{humanize $value}}%|{threshold}%.', now(), now()); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_cpu_system_core_by_workload', 'Container CPU System By workload (Core)', 'Container CPU(Core)', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (rate(container_cpu_system_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running", {filter}}) without (instance)) * 0)', 'CPU', 'Workload', NULL, TRUE, FALSE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU System (Core) (System):{{humanize $value}}%|{threshold}%.', now(), now()); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_cpu_usage_core_by_workload', 'Container CPU Usage By workload (Core)', 'Container CPU Usage (Core)', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (rate(container_cpu_usage_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running", {filter}}) without (instance)) * 0)', 'CPU', 'Workload', NULL, TRUE, FALSE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU Usage (Core):{{humanize $value}}|{threshold}.', now(), now()); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_cpu_user_core_by_workload', 'Container CPU User By workload (Core)', 'Container CPU Usage (User)(Core)', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (rate(container_cpu_user_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running", {filter}}) without (instance)) * 0)', 'CPU', 'Workload', NULL, TRUE, FALSE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU User (Core):{{humanize $value}}|{threshold}.', now(), now()); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_cpu_system_by_workload', 'Container CPU System By workload (%)', 'Container CPU Usage (System)', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (rate(container_cpu_system_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running", {filter}}) without (instance)) * 0) * 100', 'CPU', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU System (%):{{humanize $value}}%|{threshold}%.', now(), now()); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_cpu_usage_by_workload', 'Container CPU Usage By workload (%)', 'Container CPU Usage', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (rate(container_cpu_usage_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running", {filter}}) without (instance)) * 0)', 'CPU', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU Usage (%):{{humanize $value}}%|{threshold}%', now(), now()); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_fs_reads_by_workload', 'Container Filesystem Read Bytes By workload (KiB)', 'Cumulative count of bytes read / 1024', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (rate(container_fs_reads_bytes_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1024', 'Filesystem', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Filesystem Reads:{{humanize $value}}KiB|{threshold}KiB.', now(), now()); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_fs_limit_bytes_by_workload', 'Container Filesystem Limit Bytes By workload (GiB)', 'Number of bytes that can be consumed by the container on this filesystem / 1073741824', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (container_fs_limit_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running", {filter}}) without (instance)) * 0) / 1073741824', 'Filesystem', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Filesystem Limit:{{humanize $value}}GiB|{threshold}GiB.', now(), now()); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_fs_usage_bytes_by_workload', 'Container Filesystem Used Bytes By workload (GiB)', 'Number of bytes that are consumed by the container on this filesystem / 1073741824', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (container_fs_usage_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1073741824', 'Filesystem', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Filesystem Used:{{humanize $value}}GiB||{threshold}GiB.', now(), now()); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_fs_writes_by_workload', 'Container Filesystem Write Bytes By workload (KiB)', 'Cumulative count of bytes written / 1024', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (rate(container_fs_writes_bytes_total{xm_cont_name!="POD"}[1m]) + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1024', 'Filesystem', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Filesystem Writes:{{humanize $value}}KiB|{threshold}KiB.', now(), now()); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_fs_usage_by_workload', 'Container Filesystem Usage By workload (%)', 'Container File System Usage: 100 * (Used Bytes / Limit Bytes) (not contain persistent volume)', 'sum by (xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) ((container_fs_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0)/ (((container_fs_limit_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) * 100) > 0) or (container_fs_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1000)', 'Filesystem', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Filesystem Usage:{{humanize $value}}%|{threshold}%.', now(), now()); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_memory_max_usage_bytes_by_workload', 'Container Memory Max Used By workload (GiB)', 'Maximum memory usage recorded in bytes / 1073741824', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (container_memory_max_usage_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1073741824', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Max Memory Usage:{{humanize $value}}GiB|{threshold}GiB.', now(), now()); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_memory_usage_bytes_by_workload', 'Container Memory Used By workload (GiB)', 'Current memory usage in GiB, this includes all memory regardless of when it was accessed', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (container_memory_usage_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1024 / 1024 / 1024', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Used Memory:{{humanize $value}}GiB|{threshold}GiB.', now(), now()); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_memory_usage_by_workload', 'Container Memory Usage By workload (%)', 'Container Memory usage compared to limit if limit is non-zero or 1GiB if limit is zero', 'sum by (xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (((container_memory_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / (((container_spec_memory_limit_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0)) > 0) * 100) or sum by (xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) ((container_memory_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1024 / 1024 / 1024 *100))', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Memory Usage:{{humanize $value}}%|{threshold}%.', now(), now()); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_memory_swap_by_workload', 'Container Memory Swap By workload (GiB)', 'Container swap usage in bytes / 1073741824', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (container_memory_swap{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1073741824', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Swap Memory:{{humanize $value}}GiB|{threshold}GiB.', now(), now()); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_memory_working_set_bytes_by_workload', 'Container Memory Working Set By workload (GiB)', 'Current working set in GiB, this includes recently accessed memory, dirty memory, and kernel memory', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (container_memory_working_set_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1024 / 1024 / 1024', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Working Set Memory:{{humanize $value}}GiB|{threshold}GiB.', now(), now()); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_memory_cache_by_workload', 'Container Memory Cache By workload (GiB)', 'Number of bytes of page cache memory / 1073741824', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (container_memory_cache{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1073741824', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Cache Memory:{{humanize $value}}GiB|{threshold}GiB.', now(), now()); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_network_receive_by_workload', 'Container Network Receive By workload (KiB)', 'Network device statistic receive_bytes / 1024', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name,xm_entity_type) (rate(container_network_receive_bytes_total{} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id) group_left(owner_name) sum by (xm_clst_id, xm_namespace, xm_pod_id, owner_name) (imxc_kubernetes_container_resource_limit_cpu{{filter}}) * 0) / 1024', 'Network', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Network Receive Usage:{{humanize $value}}KiB|{threshold}KiB.', now(), now()); + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_network_transmit_by_workload', 'Container Network Transmit By workload (KiB)', 'Network device statistic transmit_bytes / 1024', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (rate(container_network_transmit_bytes_total{} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id) group_left(owner_name) sum by(xm_clst_id, xm_namespace, xm_pod_id, owner_name) (imxc_kubernetes_container_resource_limit_cpu{{filter}}) * 0) / 1024', 'Network', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Network Transmit Usage:{{humanize $value}}KiB|{threshold}KiB.', now(), now()); +--Number of Pods not running +INSERT INTO public.metric_meta2 VALUES ('count_pod_not_running_by_workload','Number of Pods not running By Workload','Number of Pods not running (pod_state)','count by (xm_clst_id, xm_pod_id,xm_cont_id, xm_cont_name, entity_type, xm_namespace, pod_state) (imxc_kubernetes_container_resource_limit_cpu{pod_state!="Running", {filter}})','State','Workload',null,true,false,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} State:{{$labels.pod_state}}.',now(),now()); +--Number of Containers not running +INSERT INTO public.metric_meta2 VALUES ('count_container_not_running_by_workload','Number of Containers not running By Workload','Number of Containers not running (container_state)','count by (xm_clst_id, xm_pod_id, xm_cont_id, xm_cont_name, entity_type, xm_namespace, container_state) (imxc_kubernetes_container_resource_limit_cpu{container_state!="Running", {filter}})','State','Workload',null,true,false,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} State:{{$labels.container_state}}.',now(),now()); +-- Containers Restart count +INSERT INTO public.metric_meta2 VALUES ('cotainer_restart_count_by_workload','Number of Containers Restart','Number of Containers Restart (10m)','increase(imxc_kubernetes_container_restart_count{{filter}}[10m])','State','Workload',null,true,false,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} RESTARTCOUNT FOR 10MINUTE:{{humanize $value}}.',now(),now()); + +INSERT INTO metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_jspd_txn_per_sec','Service Transaction Count (per Second)','Service Transaction Count (per Second)','sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_service_name) (rate(imxc_txn_total_count{{filter}}[1m]))','Request','Service',NULL,true,true,'Service Transaction Count (per Second)','2021-11-15 16:11:19.606','2021-11-15 16:12:21.335'); +INSERT INTO metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_jspd_pod_txn_elapsed_time_avg','Service Pod Transaction Elapsed Time (avg)','Service Average Elapsed Time','sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_pod_id, xm_service_name) (increase(imxc_txn_total_count{{filter}}[1m]))==0 or sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_pod_id, xm_service_name) (increase(imxc_txn_laytency{{filter}}[1m])) / sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_pod_id, xm_service_name) (increase(imxc_txn_total_count{{filter}}[1m]))','Request','Service',NULL,true,true,'Service Average Elapsed Time','2021-11-15 16:09:34.233','2021-11-15 16:12:21.335'); +INSERT INTO metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_jspd_txn_error_rate','Service Transaction Error Rate','Service Transaction Error Rate','sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_service_name) (sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_service_name) (rate(imxc_txn_total_count{{filter}}[1m])) == 0 or sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_service_name) (rate(imxc_txn_error_count{{filter}}[1m])) == 0 or sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_service_name) (rate(imxc_txn_error_count {{filter}} [1m])) / sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_service_name) (rate(imxc_txn_total_count {{filter}} [1m])))','Request','Service',null,true,false,'SVC:{{$labels.xm_service_name}} Error Request Rate:{{humanize $value}}%|{threshold}%.','2022-02-15 14:33:00.118000','2022-02-15 15:40:17.640000'); +INSERT INTO metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_jspd_pod_txn_per_sec','Service Pod Transaction Count (per sec)','The number of transaction counts per second for pod','sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_pod_id, xm_service_name) (rate(imxc_txn_total_count{{filter}}[1m]))','Request','Service',null,true,false,'SVC:{{$labels.xm_service_name}} Svc Pod Transaction count/Seconds:{{humanize $value}}|{threshold}.','2022-02-15 17:59:39.450000','2022-02-15 17:59:39.450000'); +INSERT INTO metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_jspd_txn_elapsed_time_avg','Service Average Elapsed Time','Service Average Elapsed Time','sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_service_name) ((increase(imxc_txn_total_count{{filter}}[1m])))== 0 or sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_service_name) ((increase(imxc_txn_laytency{{filter}}[1m])))/ sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_service_name) ((increase(imxc_txn_total_count{{filter}}[1m])))','Request','Service',null,true,true,'SVC:{{$labels.xm_service_name}} Transaction Requests Time Avg:{{humanize $value}}ms|{threshold}ms.','2021-11-15 16:09:34.233000','2021-11-15 16:12:21.335000'); +INSERT INTO metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_jspd_txn_error_count','Service Transaction Error Count','Service Transaction Error Count','sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_service_name) (sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_service_name) (rate(imxc_txn_error_count{{filter}}[1m])) == 0 or sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_service_name) (rate(imxc_txn_error_count {{filter}} [1m])))','Request','Service',NULL,true,true,'Service Transaction Error Count','2021-11-15 16:10:31.352','2021-11-15 16:12:21.335'); +INSERT INTO metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_jspd_pod_txn_error_rate','Service Pod Transaction Error Rate','The number of transaction error rate for pod','sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_pod_id, xm_service_name) (sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_pod_id, xm_service_name) (rate(imxc_txn_total_count{{filter}}[1m])) == 0 or sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_pod_id, xm_service_name) (rate(imxc_txn_error_count{{filter}}[1m])) == 0 or sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_pod_id, xm_service_name) (rate(imxc_txn_error_count {{filter}} [1m])) / sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_pod_id, xm_service_name) (rate(imxc_txn_total_count {{filter}} [1m])))','Request','Service',null,true,false,'SVC:{{$labels.xm_service_name}} Svc Pod Transaction Error rate:{{humanize $value}}|{threshold}.','2022-02-15 18:08:58.180000','2022-02-15 18:08:58.180000'); + +INSERT INTO metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_jspd_active_txn_per_sec','Service Active Transaction Count (per Second)','Service Active Transaction Count (per Second)','sum by(xm_clst_id, xm_namespace, xm_service_name) (rate(imxc_txn_active_count{{filter}}[1m]))','Request','Service',NULL,true,false,'SVC:{{$labels.xm_service_name}} Svc Active Transaction count/Seconds:{{humanize $value}}|{threshold}.','2022-03-11 15:51:45.946','2022-03-11 15:51:45.946'); +INSERT INTO metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_jspd_pod_active_txn_per_sec','Service Pod Active Transaction Count (per sec)','The number of active transaction counts per second for pod','sum by(xm_clst_id, xm_namespace, xm_service_name, xm_pod_id) (rate(imxc_txn_active_count{{filter}}[1m]))','Request','Service',NULL,true,false,'SVC:{{$labels.xm_service_name}} Svc Pod Active Transaction count/Seconds:{{humanize $value}}|{threshold}.','2022-03-11 15:53:29.252','2022-03-11 15:53:29.252'); + + +INSERT INTO public.license_key (id, license_key, set_time, in_used, tenant_id) VALUES (nextval('hibernate_sequence'), 'A46CB0A0870B60DD0EF554F092FB8490C647C4ACCF17177EB0028FEF1B677A1DC86C08219D3D357E55E87B653A9D2F044F9095576ED493CE5D1E180E8843A04BCFE94E500F85491D408CFC7397B82F00063415F4CF8756545B6ED1A38F07F91A7B6D9381B7FC433A5086CDD2D748527ECB42835677199F23F7C8E33A66E8138182DDD76BE4925FA4B1DFD96FD5578FE80C75E0E20D76877BF6FD570265D8E69CAC34795B982CF8D811669894886567E4F5F62E28990953401374B548787E35374BFF201D5C9AD062B326E72F9B1D7791A610DA1BDF1D4F829819BC537E06C8D54F95FB04F2DAC456698F605DE3BBD72E472FC79658C806B188988B053E1E4D96FFFFFF0312983D630FAD5E9160650653074248047030124045265319328119048121312221292096178141356403289033057286071001044254168244430392446457353385472238471183338511051434316333006127241420429465082200161165099271484261287306170426201314452131350327249112310323036187433166345114324280269098441154231174135226128298344425341164290424093450115453299282209144110060155055496368233391148510223372355438125122460232315097083390283180026090507303464176016343147301028053052418046214169100404193398101492126437150008449359062078276386196105011194373118107003376243188284337378334352432479501211364186021040035210237120336302073022394079272002081397132067383497202300181309396185361017436058208454167203412219275329234043427354024133409339470296204490485256467335056F5B2CABD122B376DAEA67944E1CCE6867DF9EB6504C78F817DF9EB6504C78F81BF1E615E6EC6242C9667BD675FC5FA39C6672FE2068E5D1431C6CD04429D07655865E293C1F77ED7A0D33F5556DA6CD3A8EC2774DB04F797CE4A29B0312F75E585D51D7B4DD227EA6BD5278CB9233040E7DD2B30A6D5119959D5B7EAC826D3DA0537EFB5A034A6A1C91A619F4E168F46A455B594C91F058E1E22C7EA2957EED7533D069C335C95B4FA2B53E71A800343EA7F16B05AFBA04635F1FBDE9C81709C27BA075C78FA26311ED3A4A5226EF47FC84C3024999406B47F2098B5983CC3CAF79F92332074B9872E429CBE8EF12D5092628E4D4A39CBDDFCAAB2E382229CF09A5B10243340C1A7A0C5CBC14C704FCE873571524A5B038F1781CD31A4D8E2C48E02E63A2746E668273BE9D63937B88D8C864CE439528EB13BDFAC3E52EE4B8CB75B4ED65A7C97B42E5DAEE3E41D2331B06FFFBA71BECD9B96AEEB969670FC3869CC59050FD6DFA32457195314104022250232266247291151DEFAULT_TENANT', now(), true, 'DEFAULT_TENANT'); +insert into public.license_key2 (id, license_key, set_time, cluster_id, license_used) values (nextval('hibernate_sequence'), 'D041F44269EAFF1AF7C37ACAA86B7D9CBED89547431E777B797220CF62FE5D6A27C66BEBEAB8F4C89EA5379009C90CDEBFFAE307B7AEB897DC4D8CEAB61654340BB746B0B46679A9FB4791C777BAEBA176308F6BEB1654CE43D4E80E6D0F80CEC00B1EC30E7DA4BB8D3159133EF98AEB50617107DB77BE94676E0D4AA04ADA3B11A66824DB89A60C52BC1AB92926F10189DBBA6210B31478F48CF87B5D754F1A7C6BED0D1637742179DBF7BE82B3B3357AEA82CFAAD9126E39C4E19BABCB1CBDDB816C86A8F7C476D963265720383B627800775B0C9116D67CE5CB7CFC71D0A8A36623965EBB18A5BE1816FB1FAAAEAC361D2ABBC7344EC0B6C61E0395115B13FFFFFF03DEF34E840F2ED2AC84AC44DF368362366124308470063002498494067338303241077065122260378200508377102354337080160182150254091118451110391059070094162363290186239455351194330333503046082379128006166220287276298120398066372099177432015458270176242025196335311342039022343475412085392206244005184417460227292375103433217376511140361223163316121467443014486278407389237024349111268136424371062035285300509195050441367478101310353464249250399393211468032382017479033204215420319027225173414447170427346074048078201158299332476339297492269181214328291096331271222221199421106169418137405411466364104047152090465446480302462385088114481261428257207129020358100073347153355274495263056109229159157348228275180360410147142130230179450079472482323145202198010119F9BFDDF3C203A7E537AB046811BB7CEA37AB046811BB7CEA37AB046811BB7CEAE012403885A8163C0E3E14D7AD6207B5E8CE91579501D84B09D6682339A4DB462F479FFE1B232AFB3D19E925768AF0AA3E62D9AB6F9CEADDB1CDCA351CAA90996631814A556C47270431A6A40891F756FDDCA7BDD05C62A2932F8E77979E0D43C9F12565B1F4BB4F0520B44CC76BAC23F65330AC5966D22B209F32126132F4848E500A013F4DC32306A9620394D40C94B8EBC2406B68EBE31DAB17EF2DF977731A5C41C11311DC36E1FB8BC2529D1AA20D5D46919472212D781B1D77378872CBD14C2A5B783C7ADF0D2680946C52E56E186A7E971E7EAB2CF09511361DD892B5D4A113E8A2C60E3F7FEFA4100753D82B7064101002937733CE0285C73130635F0CBBDF6F1160C2917B2DF9B1C391A8E9D7D9F380BF31A77A84017D0DF26B35BED6B2D145A051EB4345DA90241CA997828B8393ACD5C7316594634356CCC3986EFDD7776AC62C65E500ED125097142489479219130046503035CloudMOA', now(), null, true); + +INSERT INTO public.license_policy +(policy_id, policy_desc, term_year, term_month, term_day, license_type, allowable_range, storage_capacity, cluster_count, node_count, pod_count, service_count, core_count, host_ids, user_division, created_date, modified_date) +VALUES('promotion_license', '프로모션 기간에 사용자들에게 발급되는 라이선스', 0, 0, 14, 'trial', '0', 'unlimited', '1', '10', 'unlimited', 'unlimited', 'unlimited', 'unlimited', '1', now(), null); + +INSERT INTO public.report_template(id, created_by, created_date, modified_by, modified_date, cron_exp, "enable", metric_data, template_data, title) VALUES(nextval('hibernate_sequence'), 'admin', '2020-04-28 09:29:49.466', 'admin', '2020-04-28 09:29:49.466', '0 0 1 ? * * *', true, +'[{"id":"metricItem1587977724113","requestInfo":{"clusterId":"cloudmoa","namespace":"All","entityId":"","metricId":"cluster_cpu_usage","type":"Cluster","selectTime":"hour","dateType":"relative","startTime":"1d0h","endTime":"0d0h"},"metricName":"Cluster CPU Usage (%)","displayType":"line","unit":"%","data":""},{"id":"metricItem1588037028605","requestInfo":{"clusterId":"cloudmoa","namespace":"All","entityId":"","metricId":"cluster_memory_usage","type":"Cluster","selectTime":"hour","dateType":"relative","startTime":"1d0h","endTime":"0d0h"},"metricName":"Cluster Memory Usage (%)","displayType":"line","unit":"%","data":""},{"id":"metricItem1588059107546","requestInfo":{"clusterId":"cloudmoa","namespace":"All","entityId":"","metricId":"cluster_network_receive","type":"Cluster","selectTime":"hour","dateType":"relative","startTime":"1d0h","endTime":"0d0h"},"metricName":"Cluster Network Receive","displayType":"line","unit":"%","data":""},{"id":"metricItem1588059110952","requestInfo":{"clusterId":"cloudmoa","namespace":"All","entityId":"","metricId":"cluster_network_transmit","type":"Cluster","selectTime":"hour","dateType":"relative","startTime":"1d0h","endTime":"0d0h"},"metricName":"Cluster Network Transmit","displayType":"line","unit":"%","data":""},{"id":"metricItem1588059623963","requestInfo":{"clusterId":"cloudmoa","namespace":"All","entityId":"","metricId":"cluster_pod_ready_count","type":"Cluster","selectTime":"hour","dateType":"relative","startTime":"1d0h","endTime":"0d0h"},"metricName":"Cluster Pod Ready Count","displayType":"line","unit":"%","data":""}]', +'

1. Cluster Resource

Today''s Cluster resource usage is displayed.

1. CPU Usage

${metricItem1587977724113}

2. Memory Usage

${metricItem1588037028605}

3. Network

Transmit

${metricItem1588059107546}

Receive

${metricItem1588059110952}

2. Pod


1. Allocated Pods Count Trend

Running Pod Count
${metricItem1588059623963}





', 'cloudmoa Cluster Daily Report'); +INSERT INTO public.report_template (id, created_by, created_date, modified_by, modified_date, cron_exp, "enable", metric_data, template_data, title) +VALUES(nextval('hibernate_sequence'), 'admin', '2020-01-20 01:17:50.182', 'admin', '2020-04-29 08:01:40.841', '0 0 9 ? * * *', false, +'[{"id":"metricItem1579497906163","requestInfo":{"clusterId":"cloudmoa","namespace":"","entityId":"exem-master,exem-node001,exem-node002","metricId":"node_cpu_usage","type":"node","selectTime":"hour","dateType":"relative","startTime":"1d0h","endTime":"0d0h"},"metricName":"Node CPU Usage (%)","displayType":"line","unit":"%","data":""},{"id":"metricItem1579497916213","requestInfo":{"clusterId":"cloudmoa","namespace":"","entityId":"exem-master,exem-node001,exem-node002","metricId":"node_memory_usage","type":"node","selectTime":"hour","dateType":"relative","startTime":"1d0h","endTime":"0d0h"},"metricName":"Node Memory Usage (%)","displayType":"bar","unit":"%","data":""},{"id":"metricItem1579497928963","requestInfo":{"clusterId":"cloudmoa","namespace":"","entityId":"exem-master,exem-node001,exem-node002","metricId":"node_network_receive","type":"node","selectTime":"hour","dateType":"relative","startTime":"1d0h","endTime":"0d0h"},"metricName":"Node Network Receive (KiB)","displayType":"pie","unit":"%","data":""},{"id":"metricItem1579497947243","requestInfo":{"clusterId":"cloudmoa","namespace":"","entityId":"exem-master,exem-node001,exem-node002","metricId":"node_load5","type":"node","selectTime":"hour","dateType":"relative","startTime":"1d0h","endTime":"0d0h"},"metricName":"Node CPU Load 5m Average","displayType":"table","unit":"%","data":""}]', +'

1. editor usage

Let''s write the editor.

1.1 Text Decoration

Bold
Itelic
Strike


1.2 Color and blockquote

What''s your color?

Today is the first day of the rest of your life

1.3 List

  • Apple
  • Banana

  1. postgre
  2. cassandra
  3. prometheus

[ TODO List ]
  • Create DB table
  • Charge file name

1.4 Link, Table, Image




Deamonset NameAgeNamespaceLabelsImageCPUMemory
imxc-agent5
day
imxcimxc-agentregistry.openstacklocal:5000/imxc/imxc-agent:latest83.151.68
GiB
kube-flannel-ds-amd643
month
kube-systemflannelnodequay.io/coreos/flannel:v0.11.0-amd641.0790.88
MiB
kube-proxy10
month
kube-systemkube-proxyk8s.gcr.io/kube-proxy:v1.16.01.18117.66
MiB
node-exporter10
month
defaultnode-exporternode-exporterprom/node-exporter4.7697.54
MiB

exem.jpg

1.6 Metric Item

${metricItem1579497906163}
${metricItem1579497916213}
${metricItem1579497928963}
${metricItem1579497947243}



















', 'Editor usage example'); + +INSERT INTO public.report_static(id, created_by, created_date, modified_by, modified_date, cron_exp, metric_data, template_data, title, "type", report_template_id) VALUES(10582051, 'admin', '2020-04-29 08:27:52.545', 'admin', '2020-04-29 08:27:52.545', '0 0 1 ? * * *', +'[{"id":"metricItem1587977724113","requestInfo":{"clusterId":"cloudmoa","namespace":"All","entityId":"","metricId":"cluster_cpu_usage","type":"Cluster","selectTime":"hour","dateType":"relative","startTime":"1d0h","endTime":"0d0h"},"metricName":"Cluster CPU Usage (%)","displayType":"line","unit":"%","data":""},{"id":"metricItem1588037028605","requestInfo":{"clusterId":"cloudmoa","namespace":"All","entityId":"","metricId":"cluster_memory_usage","type":"Cluster","selectTime":"hour","dateType":"relative","startTime":"1d0h","endTime":"0d0h"},"metricName":"Cluster Memory Usage (%)","displayType":"line","unit":"%","data":""},{"id":"metricItem1588059107546","requestInfo":{"clusterId":"cloudmoa","namespace":"All","entityId":"","metricId":"cluster_network_receive","type":"Cluster","selectTime":"hour","dateType":"relative","startTime":"1d0h","endTime":"0d0h"},"metricName":"Cluster Network Receive","displayType":"line","unit":"%","data":""},{"id":"metricItem1588059110952","requestInfo":{"clusterId":"cloudmoa","namespace":"All","entityId":"","metricId":"cluster_network_transmit","type":"Cluster","selectTime":"hour","dateType":"relative","startTime":"1d0h","endTime":"0d0h"},"metricName":"Cluster Network Transmit","displayType":"line","unit":"%","data":""},{"id":"metricItem1588059623963","requestInfo":{"clusterId":"cloudmoa","namespace":"All","entityId":"","metricId":"cluster_pod_ready_count","type":"Cluster","selectTime":"hour","dateType":"relative","startTime":"1d0h","endTime":"0d0h"},"metricName":"Cluster Pod Ready Count","displayType":"line","unit":"%","data":""}]', +'

1. Cluster Resource

Today''s cluster resource usage flow is shown.

1. CPU Usage

Abnormally high CPU usage by particular programs can be an indication that there is something wrong with the computer system.

${metricItem1587977724113}

2. Memory Usage

The Memory Usage window displays the amount of memory available on your system, as well as the memory currently in use by all applications, including Windows itself.

${metricItem1588037028605}

3. Network

A network transmit/receive provides basic network utilization data in relation to the available network capacity.

Transmit

${metricItem1588059107546}

Receive

${metricItem1588059110952}

2. Pod

1. Allocated Pods Count Trend

Running Pod Count
${metricItem1588059623963}







', +'cloudmoa Cluster Daily Report', 'manual', (select id from report_template where title='cloudmoa Cluster Daily Report')); + +-- INSERT INTO public.dashboard2 (id, created_date, modified_date, layout, title, auth_resource_id, created_by, modified_by, description, "share") VALUES(nextval('hibernate_sequence'), '2020-04-28 09:23:14.286', '2020-04-28 09:23:44.213', '[{"i":"widget0","widget":{"header":"default-header","body":"event-view"},"w":48,"h":2,"minW":2,"minH":1,"maxW":48,"maxH":36,"component":{"params":{"targets":["widget1","widget2","widget3","widget4","widget5","widget6","widget7","widget8"],"action":"changeFilter","options":{"clusterId":{"mod":true,"value":"cloudmoa"},"namespace":{"mod":false,"value":null},"entity":{"mod":true,"type":["node"],"value":["exem-master","exem-node001","exem-node002"]}}},"visualization":{"type":"select"}},"x":0,"y":0},{"i":"widget1","widget":{"header":"default-header","body":"line-chart-view","title":"CPU Usage"},"w":18,"h":11,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":"cloudmoa","metricId":"node_disk_read_latency","entityId":[],"type":"node","namespace":"default"}},"visualization":{"showLegend":true}},"x":0,"y":2},{"i":"widget2","widget":{"header":"default-header","body":"horizontal-bar-chart-view","title":"Memory Usage"},"w":18,"h":11,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":false,"clusterId":"cloudmoa","metricId":"node_memory_usage","entityId":[],"type":"node"}},"visualization":{"showLegend":true}},"x":0,"y":13},{"i":"widget3","widget":{"header":"default-header","body":"line-chart-view","title":"Network Transmit (KiB)"},"w":15,"h":11,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":"cloudmoa","metricId":"node_network_transmit","entityId":[],"type":"node","namespace":"default"}},"visualization":{"showLegend":true}},"x":18,"y":2},{"i":"widget4","widget":{"header":"default-header","body":"line-chart-view","title":"Network Receive (KiB)"},"w":15,"h":11,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":"cloudmoa","metricId":"node_network_receive","entityId":[],"type":"node","namespace":"default"}},"visualization":{"showLegend":true}},"x":33,"y":2},{"i":"widget5","widget":{"header":"default-header","body":"stack-bar-chart-view","title":"Pod Running Count"},"w":30,"h":12,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":"cloudmoa","metricId":"node_pod_running_count","entityId":[],"type":"node","namespace":"default"}},"visualization":{"showLegend":true}},"x":18,"y":24},{"i":"widget6","widget":{"header":"default-header","body":"stack-bar-chart-view","title":"Disk Read Latency (ms)"},"w":15,"h":11,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":"cloudmoa","metricId":"node_disk_read_latency","entityId":[],"type":"node","namespace":"default"}},"visualization":{"showLegend":true}},"x":18,"y":13},{"i":"widget7","widget":{"header":"default-header","body":"stack-bar-chart-view","title":"Disk Write Latency (ms)"},"w":15,"h":11,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":"cloudmoa","metricId":"node_disk_write_latency","entityId":[],"type":"node","namespace":"default"}},"visualization":{"showLegend":true}},"x":33,"y":13},{"i":"widget8","widget":{"header":"default-header","body":"stack-bar-chart-view","title":"Filesystem Usage (%)"},"w":18,"h":12,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":"cloudmoa","metricId":"node_filesystem_usage","entityId":[],"type":"node","namespace":"default"}},"visualization":{"showLegend":true}},"x":0,"y":24}]', 'CloudMOA - Nodes Resource', (select id from auth_resource2 where name='CloudMOA - Nodes Resource'), 'admin', 'admin', NULL, true); +-- INSERT INTO public.dashboard2 (id, created_date, modified_date, layout, title, auth_resource_id, created_by, modified_by, description, "share") VALUES(nextval('hibernate_sequence'), '2020-04-28 09:23:14.286', '2020-04-28 09:23:44.213', '[{"i":"widget0","widget":{"header":"default-header","body":"service-tps-view","title":"Service TPS"},"w":24,"h":7,"minW":12,"minH":6,"maxW":48,"maxH":16,"component":{"api":{"uri":"metric.chart","params":{"clusterId":null,"namespace":null,"entityId":null,"type":"service","range":false}}},"x":0,"y":2},{"i":"widget1","widget":{"header":"default-header","body":"event-view"},"w":48,"h":2,"minW":2,"minH":2,"maxW":48,"maxH":36,"component":{"params":{"targets":["widget0","widget2","widget3","widget4","widget5","widget6","widget7","widget8"],"action":"changeFilter","options":{"clusterId":{"mod":true,"value":null},"namespace":{"mod":true,"value":null},"entity":{"mod":true,"type":["service"],"value":[]}}},"visualization":{"type":"select"}},"viewStyle":{"backgroundColor":"#252525"},"x":0,"y":0},{"i":"widget2","widget":{"header":"default-header","body":"service-treeMap-view"},"w":24,"h":21,"minW":20,"minH":10,"maxW":48,"maxH":48,"component":{"api":{"uri":"metric.chart","params":{"clusterId":null,"namespace":null,"entityId":null,"type":"service","range":false}}},"x":24,"y":2},{"i":"widget3","widget":{"header":"default-header","body":"stack-bar-chart-view","title":"Service Request Count"},"w":12,"h":7,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":null,"namespace":null,"metricId":"imxc_service_http_requests_per_sec","entityId":"","type":null}},"visualization":{"showLegend":true}},"x":0,"y":9},{"i":"widget4","widget":{"header":"default-header","body":"stack-bar-chart-view","title":"Service Total Error Count"},"w":12,"h":7,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":null,"namespace":null,"metricId":"imxc_service_errors_count","entityId":"","type":null}},"visualization":{"showLegend":true}},"x":0,"y":16},{"i":"widget5","widget":{"header":"default-header","body":"scatter-chart-view","bodyClass":["drag-ignore"],"title":"Xview","headerClass":["drag-handle"]},"w":24,"h":13,"minW":20,"minH":12,"maxW":68,"maxH":60,"component":{"api":{"params":{}}},"x":0,"y":23},{"i":"widget6","widget":{"header":"default-header","body":"event-list-view","title":"Event List"},"w":24,"h":13,"minW":24,"minH":12,"maxW":48,"maxH":36,"component":{"api":{"params":{"clusterId":null}}},"x":24,"y":23},{"i":"widget7","widget":{"header":"default-header","body":"line-chart-view","title":"Service Latency"},"w":12,"h":7,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":null,"namespace":null,"metricId":"imxc_service_http_requests_time_avg","entityId":"","type":null}},"visualization":{"showLegend":true}},"x":12,"y":9},{"i":"widget8","widget":{"header":"default-header","body":"stack-bar-chart-view","title":"Service Total Transaction Count"},"w":12,"h":7,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":null,"namespace":null,"metricId":"imxc_service_http_requests_per_sec_by_api","entityId":"","type":null}},"visualization":{"showLegend":true}},"x":12,"y":16}]', 'Service Detail', (select id from auth_resource2 where name='Service Detail'), 'admin', 'admin', NULL, true); + +INSERT INTO public.dashboard2 (id, created_date, modified_date, layout, title, auth_resource_id, created_by, modified_by, description, "share") VALUES(nextval('hibernate_sequence'), '2020-04-28 09:23:14.286', '2020-04-28 09:23:44.213', '[{"i":"widget0","widget":{"header":"default-header","body":"event-view"},"w":48,"h":2,"minW":2,"minH":1,"maxW":48,"maxH":36,"component":{"params":{"targets":["widget1","widget2","widget3","widget4","widget5","widget6","widget7","widget8"],"action":"changeFilter","options":{"clusterId":{"mod":true,"value":"cloudmoa"},"namespace":{"mod":false,"value":null},"entity":{"mod":true,"type":["node"],"value":["exem-master","exem-node001","exem-node002"]}}},"visualization":{"type":"select"}},"x":0,"y":0},{"i":"widget1","widget":{"header":"default-header","body":"line-chart-view","title":"CPU Usage"},"w":18,"h":11,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":"cloudmoa","metricId":"node_disk_read_latency","entityId":[],"type":"node","namespace":"default"}},"visualization":{"showLegend":true}},"x":0,"y":2},{"i":"widget2","widget":{"header":"default-header","body":"horizontal-bar-chart-view","title":"Memory Usage"},"w":18,"h":11,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":false,"clusterId":"cloudmoa","metricId":"node_memory_usage","entityId":[],"type":"node"}},"visualization":{"showLegend":true}},"x":0,"y":13},{"i":"widget3","widget":{"header":"default-header","body":"line-chart-view","title":"Network Transmit (KiB)"},"w":15,"h":11,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":"cloudmoa","metricId":"node_network_transmit","entityId":[],"type":"node","namespace":"default"}},"visualization":{"showLegend":true}},"x":18,"y":2},{"i":"widget4","widget":{"header":"default-header","body":"line-chart-view","title":"Network Receive (KiB)"},"w":15,"h":11,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":"cloudmoa","metricId":"node_network_receive","entityId":[],"type":"node","namespace":"default"}},"visualization":{"showLegend":true}},"x":33,"y":2},{"i":"widget5","widget":{"header":"default-header","body":"stack-bar-chart-view","title":"Pod Running Count"},"w":30,"h":12,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":"cloudmoa","metricId":"node_pod_running_count","entityId":[],"type":"node","namespace":"default"}},"visualization":{"showLegend":true}},"x":18,"y":24},{"i":"widget6","widget":{"header":"default-header","body":"stack-bar-chart-view","title":"Disk Read Latency (ms)"},"w":15,"h":11,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":"cloudmoa","metricId":"node_disk_read_latency","entityId":[],"type":"node","namespace":"default"}},"visualization":{"showLegend":true}},"x":18,"y":13},{"i":"widget7","widget":{"header":"default-header","body":"stack-bar-chart-view","title":"Disk Write Latency (ms)"},"w":15,"h":11,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":"cloudmoa","metricId":"node_disk_write_latency","entityId":[],"type":"node","namespace":"default"}},"visualization":{"showLegend":true}},"x":33,"y":13},{"i":"widget8","widget":{"header":"default-header","body":"stack-bar-chart-view","title":"Filesystem Usage (%)"},"w":18,"h":12,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":"cloudmoa","metricId":"node_filesystem_usage","entityId":[],"type":"node","namespace":"default"}},"visualization":{"showLegend":true}},"x":0,"y":24}]', 'CloudMOA - Nodes Resource', +(select id from auth_resource3 where name='dashboard|admin|CloudMOA - Nodes Resource'), 'admin', 'admin', NULL, true); +INSERT INTO public.dashboard2 (id, created_date, modified_date, layout, title, auth_resource_id, created_by, modified_by, description, "share") VALUES(nextval('hibernate_sequence'), '2020-04-28 09:23:14.286', '2020-04-28 09:23:44.213', '[{"i":"widget0","widget":{"header":"default-header","body":"service-tps-view","title":"Service TPS"},"w":24,"h":7,"minW":12,"minH":6,"maxW":48,"maxH":16,"component":{"api":{"uri":"metric.chart","params":{"clusterId":null,"namespace":null,"entityId":null,"type":"service","range":false}}},"x":0,"y":2},{"i":"widget1","widget":{"header":"default-header","body":"event-view"},"w":48,"h":2,"minW":2,"minH":2,"maxW":48,"maxH":36,"component":{"params":{"targets":["widget0","widget2","widget3","widget4","widget5","widget6","widget7","widget8"],"action":"changeFilter","options":{"clusterId":{"mod":true,"value":null},"namespace":{"mod":true,"value":null},"entity":{"mod":true,"type":["service"],"value":[]}}},"visualization":{"type":"select"}},"viewStyle":{"backgroundColor":"#252525"},"x":0,"y":0},{"i":"widget2","widget":{"header":"default-header","body":"service-treeMap-view"},"w":24,"h":21,"minW":20,"minH":10,"maxW":48,"maxH":48,"component":{"api":{"uri":"metric.chart","params":{"clusterId":null,"namespace":null,"entityId":null,"type":"service","range":false}}},"x":24,"y":2},{"i":"widget3","widget":{"header":"default-header","body":"stack-bar-chart-view","title":"Service Request Count"},"w":12,"h":7,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":null,"namespace":null,"metricId":"imxc_service_http_requests_per_sec","entityId":"","type":null}},"visualization":{"showLegend":true}},"x":0,"y":9},{"i":"widget4","widget":{"header":"default-header","body":"stack-bar-chart-view","title":"Service Total Error Count"},"w":12,"h":7,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":null,"namespace":null,"metricId":"imxc_service_errors_count","entityId":"","type":null}},"visualization":{"showLegend":true}},"x":0,"y":16},{"i":"widget5","widget":{"header":"default-header","body":"scatter-chart-view","bodyClass":["drag-ignore"],"title":"Xview","headerClass":["drag-handle"]},"w":24,"h":13,"minW":20,"minH":12,"maxW":68,"maxH":60,"component":{"api":{"params":{}}},"x":0,"y":23},{"i":"widget6","widget":{"header":"default-header","body":"event-list-view","title":"Event List"},"w":24,"h":13,"minW":24,"minH":12,"maxW":48,"maxH":36,"component":{"api":{"params":{"clusterId":null}}},"x":24,"y":23},{"i":"widget7","widget":{"header":"default-header","body":"line-chart-view","title":"Service Latency"},"w":12,"h":7,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":null,"namespace":null,"metricId":"imxc_service_http_requests_time_avg","entityId":"","type":null}},"visualization":{"showLegend":true}},"x":12,"y":9},{"i":"widget8","widget":{"header":"default-header","body":"stack-bar-chart-view","title":"Service Total Transaction Count"},"w":12,"h":7,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":null,"namespace":null,"metricId":"imxc_service_http_requests_per_sec_by_api","entityId":"","type":null}},"visualization":{"showLegend":true}},"x":12,"y":16}]', 'Service Detail', +(select id from auth_resource3 where name='dashboard|admin|Service Detail'), 'admin', 'admin', NULL, true); + +INSERT INTO public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) VALUES ('normal_score', '20', null, null, 'anomaly', '2020-07-07 18:15:55.000000', '2020-07-07 18:15:53.000000'); +INSERT INTO public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) VALUES ('attention_score', '60', null, null, 'anomaly', '2020-07-07 09:18:04.968765', '2020-07-07 09:18:04.968765'); +INSERT INTO public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) VALUES ('warning_score', '90', null, null, 'anomaly', '2020-07-07 09:18:17.091678', '2020-07-07 09:18:17.091678'); +INSERT INTO public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) VALUES ('collection_weeks', '5', null, null, 'anomaly', '2020-07-13 03:52:44.445408', '2020-07-13 03:52:44.445408'); + +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('topology_storage_period', 7, 'retention period setting value for topology information', null, 'storage', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('trace_storage_period', 3, 'retention period setting value for trace data', null, 'storage', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('event_storage_period', 7, 'retention period setting value for event data', null, 'storage', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('metric_storage_period', 7, 'retention period setting value for metric data', null, 'storage', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('sparse_storage_period', 90, 'retention period setting value for sparse log', null, 'storage', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('anomaly_storage_period', 7, 'retention period setting value for anomaly score', null, 'storage', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('alert_storage_period', 7, 'retention period setting value for alert data', null, 'storage', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('audit_storage_period', 7, 'retention period setting value for audit data', null, 'storage', now(), null); + +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('topology_idx', 'kubernetes_cluster_info:kubernetes_cluster_history:kubernetes_cronjob_info:kubernetes_info:kubernetes_job_info:kubernetes_network_connectivity:kubernetes_pod_info:kubernetes_pod_history', 'elastic search topology type data index', null, 'storageidx', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('trace_idx', 'spaninfo:sta_httpapi:sta_httpsummary:sta_podinfo:sta_relation:sta_tracetrend:sta_externalrelation:sta_traceinfo:jspd_ilm', 'elastic search trace type data index', null, 'storageidx', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('event_idx', 'kubernetes_event_info', 'elastic search for event data index', null, 'storageidx', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('sparse_idx', 'sparse_model:sparse_log', 'elastic search sparse data index', null, 'storageidx', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('anomaly_idx', 'entity_score:metric_score:timeline_score', 'elastic search amomaly data index', null, 'storageidx', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('alert_idx', 'alert_event_history', 'elastic search alert data index', null, 'storageidx', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('audit_idx', 'kubernetes_audit_log', 'elastic search audit type data index', null, 'storageidx', now(), null); + +-- insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) values ('ratelimiting', 2.0, '{"type" : "int", "operator" : "range", "minVal" : "1", "maxVal" : "3000", "desc" : "The time-based sampling method allows input as an integer (e.g. 1 monitors only 1 trace per second)" }', null, 'tracesampling', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('probabilistic', 0.1, '{"type" : "float", "operator" : "range", "minVal" : "0", "maxVal" : "1.0", "desc" : "Probability-based sampling method allows input between 0 and 1 (e.g. 0.1 monitors only 10% of trace information)" }', null, 'tracesampling', '2020-07-30 13:54:52', null); + +INSERT INTO common_setting values('alert_expression','==,<=,<,>=,>', 'alert expression for user custom', null,'alert', now(), now()); + +INSERT INTO common_setting values('job_duration_range','86400', 'job duration range for average', null,'job', now(), now()); + +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Topology Agent', 'topology-agent', 'topology agent deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Metric Agent', 'metric-agent', 'metric agent deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Trace Agent', 'cloudmoa-trace-agent', 'trace agent deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Datagate', 'datagate', 'datagate deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Jspd Collector', 'jspd-lite-collector', 'jspd collector deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Metric Collector', 'metric-collector', 'metric collector deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Cloudmoa Collector', 'cmoa-collector', 'cloudmoa collector deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Authentication Server', 'auth-server', 'authentication server deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Notification Server', 'noti-server', 'notification server deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Eureka Server', 'eureka', 'eureka server deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Zuul Server', 'zuul-deployment', 'zuul server deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Api Server', 'imxc-api', 'api server deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Ui Server', 'imxc-ui', 'ui server deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Metric Analyzer Master', 'metric-analyzer-master', 'metric analyzer master deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Metric Analyzer Worker', 'metric-analyzer-worker', 'metric analyzer worker deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Kafka Stream Txntrend', 'kafka-stream-txntrend-deployment', 'kafka stream txntrend deployment name', null, 'modules', now(), null); + +INSERT INTO public.common_setting +(code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +VALUES('error_msg', 'false', 'Error Message default value', '', 'user_setting', now(), null); +INSERT INTO public.common_setting +(code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +VALUES('alert_sound', 'false', 'Alert Sound default value', '', 'user_setting', now(), null); +INSERT INTO public.common_setting +(code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +VALUES('session_persistence', 'true', 'Session Persistence default value', '', 'user_setting', now(), null); +INSERT INTO public.common_setting +(code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +VALUES('gpu_acc_topology', 'true', 'GPU Accelerated Topology default value', '', 'user_setting', now(), null); + +insert into public.log_management (cluster_id, node_id, log_rotate_dir, log_rotate_count, log_rotate_size, log_rotate_management, back_up_dir, back_up_period, back_up_dir_size, back_up_management, created_date, modified_date) values ('cloudmoa', '', '/var/lib/docker', 3, 100, true, '/home/moa/log', 5, 1000, true, '2020-07-30 13:54:52', null); + +insert into public.agent_install_file_info (id, name, type, description, version, yaml, use_yn, created_date, modified_date) values (5, 'metrics-server', 'agent', 'Metrcis-Server는 Kubernetes의 kubelet에 있는 cAdvisor로부터 Container Metric 데이터를 수집하여 Prometheus에 전달하는 역할을 합니다.', null, '--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: system:cloudmoa-aggregated-metrics-reader + labels: + rbac.authorization.k8s.io/aggregate-to-view: "true" + rbac.authorization.k8s.io/aggregate-to-edit: "true" + rbac.authorization.k8s.io/aggregate-to-admin: "true" +rules: + - apiGroups: ["metrics.k8s.io"] + resources: ["pods"] + verbs: ["get", "list", "watch"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cloudmoa-metrics-server:system:auth-delegator +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:auth-delegator +subjects: + - kind: ServiceAccount + name: cloudmoa-metrics-server + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: cloudmoa-metrics-server-auth-reader + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: extension-apiserver-authentication-reader +subjects: + - kind: ServiceAccount + name: cloudmoa-metrics-server + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: system:cloudmoa-metrics-server +rules: + - apiGroups: + - "" + resources: + - pods + - nodes + - nodes/stats + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: system:cloudmoa-metrics-server +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:cloudmoa-metrics-server +subjects: + - kind: ServiceAccount + name: cloudmoa-metrics-server + namespace: kube-system +--- +apiVersion: v1 +kind: Service +metadata: + name: cloudmoa-metrics-server + namespace: kube-system + labels: + kubernetes.io/name: "Metrics-server" +spec: + selector: + k8s-app: cloudmoa-metrics-server + ports: + - port: 443 + protocol: TCP + targetPort: 443 +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cloudmoa-metrics-server + namespace: kube-system +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cloudmoa-metrics-server + namespace: kube-system + labels: + k8s-app: cloudmoa-metrics-server +spec: + selector: + matchLabels: + k8s-app: cloudmoa-metrics-server + template: + metadata: + name: cloudmoa-metrics-server + labels: + k8s-app: cloudmoa-metrics-server + spec: + serviceAccountName: cloudmoa-metrics-server + volumes: + # mount in tmp so we can safely use from-scratch images and/or read-only containers + - name: tmp-dir + emptyDir: {} + containers: + - name: cloudmoa-metrics-server + image: $DOCKER_REGISTRY_URL/metrics-server-amd64 + command: + - /metrics-server + - --logtostderr + - --v=4 + - --kubelet-insecure-tls=true + - --kubelet-preferred-address-types=InternalIP,Hostname,InternalDNS,ExternalDNS,ExternalIP + volumeMounts: + - name: tmp-dir + mountPath: /tmp1', true, '2021-03-11 13:41:48.000000', '2021-03-11 13:41:56.000000'); +insert into public.agent_install_file_info (id, name, type, description, version, yaml, use_yn, created_date, modified_date) values (7, 'jaeger', 'application', 'CloudMOA에서는 고객사에서 운영 중인 application의 TPS, 서비스 연관관계 등의 데이터를 얻기 위해서 Jaeger를 사용하며, Jaeger 사용을 위해 Jaeger-client, jaeger-agent, jaeger-collector의 설치가 필요합니다. +', null, '--- +apiVersion: v1 +kind: List +items: +- apiVersion: apps/v1 + kind: Deployment + metadata: + name: cloudmoa-trace-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-trace-agent + spec: + selector: + matchLabels: + app: cloudmoa-trace-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-trace-agent + spec: + securityContext: + runAsNonRoot: true + runAsUser: 65534 + containers: + - image: $DOCKER_REGISTRY_URL/trace-agent:$IMAGE_TAG + name: cloudmoa-trace-agent + resources: + requests: + cpu: 100m + memory: 50Mi + limits: + cpu: 200m + memory: 100Mi + ports: + - containerPort: 5775 + protocol: UDP + - containerPort: 6831 + protocol: UDP + - containerPort: 6832 + protocol: UDP + - containerPort: 5778 + protocol: TCP + env: + - name: LOG_LEVEL + value: "INFO" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT +- apiVersion: v1 + kind: Service + metadata: + name: cloudmoa-trace-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-trace-agent + spec: + ports: + - name: agent-zipkin-thrift + port: 5775 + protocol: UDP + targetPort: 5775 + - name: agent-compact + port: 6831 + protocol: UDP + targetPort: 6831 + - name: agent-binary + port: 6832 + protocol: UDP + targetPort: 6832 + - name: agent-configs + port: 5778 + protocol: TCP + targetPort: 5778 + selector: + app: cloudmoa-trace-agent + type: ClusterIP', true, '2021-03-11 17:48:34.000000', '2021-03-11 17:48:39.000000'); +insert into public.agent_install_file_info (id, name, type, description, version, yaml, use_yn, created_date, modified_date) values (4, 'node-exporter', 'agent', 'Node에 관련된 Metric 시계열 데이터를 수집하여 고객사 클러스터에 설치된 Prometheus에 전달하는 역할을 합니다.', null, '--- +apiVersion: v1 +kind: Service +metadata: + annotations: + prometheus.io/scrape: ''true'' + labels: + app: cloudmoa-node-exporter + name: cloudmoa-node-exporter + name: cloudmoa-node-exporter + namespace: $CLOUDMOA_NAMESPACE +spec: + clusterIP: None + ports: + - name: scrape + port: 9110 + protocol: TCP + selector: + app: cloudmoa-node-exporter + type: ClusterIP +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: cloudmoa-node-exporter + namespace: $CLOUDMOA_NAMESPACE +spec: + selector: + matchLabels: + app: cloudmoa-node-exporter + template: + metadata: + labels: + app: cloudmoa-node-exporter + name: cloudmoa-node-exporter + spec: + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - image: $DOCKER_REGISTRY_URL/node-exporter + name: cloudmoa-node-exporter + ports: + - containerPort: 9110 + hostPort: 9110 + name: scrape + args: + - --path.procfs=/host/proc + - --path.sysfs=/host/sys + - --path.rootfs=/host/root + - --collector.filesystem.ignored-mount-points=^/(dev|proc|sys|run|var/lib/docker/.+|var/lib/kubelet/pods/.+)($|/) + - --collector.tcpstat + - --web.listen-address=:9110 + # --log.level=debug + env: + - name: GOMAXPROCS + value: "1" + resources: + limits: + cpu: 250m + memory: 180Mi + requests: + cpu: 102m + memory: 180Mi + volumeMounts: + - mountPath: /host/proc + name: proc + readOnly: false + - mountPath: /host/sys + name: sys + readOnly: false + - mountPath: /host/root + mountPropagation: HostToContainer + name: root + readOnly: true + hostNetwork: true + hostPID: true + securityContext: + runAsNonRoot: true + runAsUser: 65534 + volumes: + - hostPath: + path: /proc + name: proc + - hostPath: + path: /sys + name: sys + - hostPath: + path: / + name: root +', true, '2021-03-11 13:41:02.000000', '2021-03-11 13:41:06.000000'); +insert into public.agent_install_file_info (id, name, type, description, version, yaml, use_yn, created_date, modified_date) values (2, 'agent', 'agent', '관제 대상 클러스터의 Topology 데이터를 수집하여 Kafka를 통해 수집 클러스터에 전달하는 역할을 하며, 그 밖에 API 서버와의 TCP 연결을 통해 관리 기능, Log Viewer 기능 등을 수행합니다.', null, '--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cloudmoa-cluster-role +rules: + - nonResourceURLs: + - "*" + verbs: + - get + - apiGroups: + - metrics.k8s.io + resources: + - pods + - nodes + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - nodes/stats + - endpoints + - namespaces + - events + verbs: + - get + - list + - watch + - apiGroups: + - apps + resources: + - daemonsets + - deployments + - deployments/scale + - replicasets + - replicasets/scale + - statefulsets + - statefulsets/scale + verbs: + - get + - list + - watch + - update + - apiGroups: + - batch + resources: + - jobs + verbs: + - get + - list + - watch + - update + - apiGroups: + - batch + resources: + - cronjobs + verbs: + - get + - list + - update + - apiGroups: + - storage.j8s.io + resources: + - storageclasses + verbs: + - get + - list + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - apiGroups: + - extensions + resources: + - ingresses + verbs: + - get + - list + - apiGroups: + - policy + resources: + - podsecuritypolicies + verbs: + - use + resourceNames: + - imxc-ps + - apiGroups: + - certificates.k8s.io + resourceNames: + - kubernetes.io/kube-apiserver-client-kubelet + resources: + - signers + verbs: + - approve + - apiGroups: + - certificates.k8s.io + resourceNames: + - kubernetes.io/kubelet-serving + resources: + - signers + verbs: + - approve + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch + - proxy + - apiGroups: + - "" + resources: + - nodes/log + - nodes/metrics + - nodes/proxy + - nodes/spec + - nodes/stats + verbs: + - ''*'' + - apiGroups: + - ''*'' + resources: + - ''*'' + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cloudmoa-restricted-rb + namespace: $CLOUDMOA_NAMESPACE +subjects: + - kind: ServiceAccount + name: default + namespace: $CLOUDMOA_NAMESPACE +roleRef: + kind: ClusterRole + name: cloudmoa-cluster-role + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: cloudmoa-psp + namespace: $CLOUDMOA_NAMESPACE +spec: + privileged: true + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + runAsUser: + rule: RunAsAny + fsGroup: + rule: RunAsAny + hostPorts: + - max: 65535 + min: 0 + hostNetwork: true + hostPID: true + volumes: + - configMap + - secret + - emptyDir + - hostPath + - projected + - downwardAPI + - persistentVolumeClaim +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: cloudmoa-topology-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-topology-agent +spec: + selector: + matchLabels: + app: cloudmoa-topology-agent + template: + metadata: + labels: + app: cloudmoa-topology-agent + spec: + hostNetwork: true + hostPID: true + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - name: cloudmoa-topology-agent + image: $DOCKER_REGISTRY_URL/topology-agent:$IMAGE_TAG + resources: + requests: + cpu: 200m + memory: 512Mi + limits: + cpu: 500m + memory: 600Mi + securityContext: + privileged: true + volumeMounts: + - mountPath: /host/usr/bin + name: bin-volume + - mountPath: /var/run/docker.sock + name: docker-volume + - mountPath: /host/proc + name: proc-volume + - mountPath: /root + name: root-volume + - mountPath: /log + name: log-volume + env: + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: ROOT_DIRECTORY + value: /root + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: POD_ID + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LOG_LEVEL + value: "INFO" + volumes: + - name: bin-volume + hostPath: + path: /usr/bin + type: Directory + - name: docker-volume + hostPath: + path: /var/run/docker.sock + - name: proc-volume + hostPath: + path: /proc + - name: root-volume + hostPath: + path: / + - name: log-volume + hostPath: + path: /home', true, '2021-03-11 13:37:48.000000', '2021-03-11 13:37:51.000000'); +insert into public.agent_install_file_info (id, name, type, description, version, yaml, use_yn, created_date, modified_date) values (6, 'prometheus', 'agent', 'Prometheus는 다양한 Exporter들과 연결될 수 있으며, 기본적으로 Node Exporter와 cAdvisor를 통해 수집한 Metric 데이터를 Kafka를 통해 수집 클러스터에 전달하는 역할을 합니다.', '1.16', '--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cloudmoa-metric-agent-config + namespace: $CLOUDMOA_NAMESPACE +data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + metric-agent.yml: | + global: + scrape_interval: 15s + + scrape_configs: + - job_name: ''kubernetes-kubelet'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_memory_SReclaimable_bytes|node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + - job_name: ''kubernetes-cadvisor'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod] + target_label: xm_pod_id + - source_labels: [container] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep + - source_labels: [ __name__, image ] + separator: "@" + regex: "container_cpu.*@" + action: drop + - source_labels: [ __name__, name ] + separator: "@" + regex: "container_memory.*@" + action: drop +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cloudmoa-metric-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-metric-agent +spec: + selector: + matchLabels: + app: cloudmoa-metric-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-metric-agent + spec: + containers: + - name: cloudmoa-metric-agent + image: $DOCKER_REGISTRY_URL/metric-agent:$IMAGE_TAG + args: + - --config.file=/etc/metric-agent/metric-agent.yml + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: /etc/metric-agent/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: LOG_MAXAGE + value: "1" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: STORAGE_TYPE + value: datagate + restartPolicy: Always + volumes: + - name: config-volume + configMap: + name: cloudmoa-metric-agent-config +', false, '2021-03-11 13:39:07.000000', '2021-03-11 13:39:09.000000'); +insert into public.agent_install_file_info (id, name, type, description, version, yaml, use_yn, created_date, modified_date) values (3, 'prometheus', 'agent', 'Prometheus는 다양한 Exporter들과 연결될 수 있으며, 기본적으로 Node Exporter와 cAdvisor를 통해 수집한 Metric 데이터를 Kafka를 통해 수집 클러스터에 전달하는 역할을 합니다.', '1.15', '--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cloudmoa-metric-agent-config + namespace: $CLOUDMOA_NAMESPACE +data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + metric-agent.yml: | + global: + scrape_interval: 15s + + scrape_configs: + - job_name: ''kubernetes-kubelet'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_memory_SReclaimable_bytes|node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + + - job_name: ''kubernetes-cadvisor'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod_name] + target_label: xm_pod_id + - source_labels: [container_name] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cloudmoa-metric-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-metric-agent +spec: + selector: + matchLabels: + app: cloudmoa-metric-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-metric-agent + spec: + containers: + - name: cloudmoa-metric-agent + image: $DOCKER_REGISTRY_URL/metric-agent:$IMAGE_TAG + args: + - --config.file=/etc/metric-agent/metric-agent.yml + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: /etc/metric-agent/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: STORAGE_TYPE + value: datagate + restartPolicy: Always + volumes: + - name: config-volume + configMap: + name: cloudmoa-metric-agent-config +', true, '2021-03-11 13:39:07.000000', '2021-03-11 13:39:09.000000'); + +insert into public.alert_config_info (config_id, created_date, modified_date, config_data, config_default, in_use) values ('config', now(), null, 'global:${GLOBAL}\nroute:${ROUTE}\nreceivers:${RECEIVERS}', 'global:${GLOBAL}\nroute:${ROUTE}\nreceivers:${RECEIVERS}', true); +insert into public.alert_config_info (config_id, created_date, modified_date, config_data, config_default, in_use) values ('global', now(), null, '\n resolve_timeout: ${RESOLVE_TIMEOUT}', '\n resolve_timeout: 5m', true); +insert into public.alert_config_info (config_id, created_date, modified_date, config_data, config_default, in_use) values ('receivers', now(), null, '\n- name: ''${NAME}''\n webhook_configs:${WEBHOOK_CONFIGS}', '\n- name: ''cdms''\n webhook_configs:${WEBHOOK_CONFIGS}', true); +insert into public.alert_config_info (config_id, created_date, modified_date, config_data, config_default, in_use) values ('route', now(), null, '\n receiver: ''${RECEIVER}''\n group_by: [${GROUP_BY}]\n group_wait: ${GROUP_WAIT}\n group_interval: ${GROUP_INTERVAL}\n repeat_interval: ${REPEAT_INTERVAL}\n routes:${ROUTES}', '\n receiver: ''cdms''\n group_by: [xm_clst_id, level]\n group_wait: 30s\n group_interval: 5m\n repeat_interval: 10m\n routes:${ROUTES}', true); +insert into public.alert_config_info (config_id, created_date, modified_date, config_data, config_default, in_use) values ('webhook_configs', now(), null, '\n - url: ''${WEBHOOK_URL}''\n send_resolved: ${SEND_RESOLVED}', '\n - url: ''${WEBHOOK_URL}''\n send_resolved: false', true); +insert into public.alert_config_info (config_id, created_date, modified_date, config_data, config_default, in_use) values ('routes', now(), null, '\n - receiver: ''${ROUTES_RECEIVER}''\n group_by: [${ROUTES_GROUP_BY}]\n group_wait: ${ROUTES_GROUP_WAIT}\n group_interval: ${ROUTES_GROUP_INTERVAL}\n repeat_interval: ${ROUTES_REPEAT_INTERVAL}\n match_re:\n level: ${LEVEL}\n continue: ${CONTINUE}', '\n - receiver: ''cdms''\n group_by: [xm_clst_id, level]\n group_wait: 5s\n group_interval: 5s\n repeat_interval: 1m\n match_re:\n level: Critical\n continue: true', true); + + +insert into public.alert_rule_config_info (config_id, created_date, modified_date, config_data, in_use) values ('config', now(), null, 'groups:${GROUPS}', true); +insert into public.alert_rule_config_info (config_id, created_date, modified_date, config_data, in_use) values ('groups', now(), null, '\n- name: "${NAME}"\n rules:${RULES}', true); +insert into public.alert_rule_config_info (config_id, created_date, modified_date, config_data, in_use) values ('isHost', now(), null, '\n instance: "{{ $labels.instance }}"\n is_host: "true"', true); +insert into public.alert_rule_config_info (config_id, created_date, modified_date, config_data, in_use) values ('rules', now(), null, '\n - alert: "${ALERT}"\n expr: "${EXPR}"\n labels:\n level: "${LEVEL}"\n for: "${FOR}"\n annotations:\n xm_service_name: "{{ $labels.xm_service_name }}"\n level: "${LEVEL}"\n meta_id: "${META_ID}"\n xm_node_id: "{{ $labels.xm_node_id }}"\n threshold: ${THRESHOLD}\n xm_container_id: "{{ $labels.xm_cont_name }}"\n message: "${MESSAGE}"\n rule_id: ${RULE_ID}\n xm_pod_id: "{{ $labels.xm_pod_id }}"\n xm_clst_id: "{{ $labels.xm_clst_id }}"\n xm_namespace: "{{ $labels.xm_namespace }}"\n value: "{{ $value }}"\n xm_entity_type: "{{ $labels.xm_entity_type }}"\n alert_entity_type: "${ALERT_ENTITY_TYPE}"', true); + + +INSERT INTO jspd_prop values('TRX_NAME_TYPE','0', 'Set the transaction name generation method (0:default, 1:parameter, 2:param_nouri, 3:attribute)', 'integer','select','{"default":"0", "parameter":"1", "param_nouri":"2", "attribute":"3"}',true, now(), now()); +INSERT INTO jspd_prop values('TRX_NAME_KEY','', 'Set the transaction name generation method by TRX_NAME_TYPE (parameter(1), param_nouri(2),attribute(3))','string','input','',true, now(), now()); +INSERT INTO jspd_prop values('CURR_TRACE_TXN','*:3000', 'Option to check TXNNAME with startsWith logic and collect calltree based on elapsetime. blank or set to *:0 when collecting all.', 'string','input','', true, now(), now()); +INSERT INTO jspd_prop values('CURR_TRACE_LEVEL','100', 'call tree detection level', 'integer','range','{"gte":"0", "lte":"100"}',true, now(), now()); +INSERT INTO jspd_prop values('TRACE_JDBC','true', 'include call tree data', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('EXCLUDE_SERVICE','gif,js,css,xml', 'exclude service name', 'string','input','',true, now(), now()); +INSERT INTO jspd_prop values('INCLUDE_EXCEPTION','', 'Exception that you do not want to be treated as an exception transaction is set.(type.Exception)', 'string','input','',true, now(), now()); +INSERT INTO jspd_prop values('EXCLUDE_EXCEPTION','', 'Set the exception to be treated as an exception transaction.(type.Exception)', 'string','input','',true, now(), now()); +INSERT INTO jspd_prop values('RESP_HEADER_TID','false', 'include X-Xm-Tid text for gearing imxwsmj', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('USE_RUNTIME_REDEFINE','false', 'rt.jar (socket, file, throwable) function use yn option', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('USE_RUNTIME_REDEFINE_HTTP_REMOTE','false', 'rt.jar (socket, file, throwable) function use yn option', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('RT_RMI','false', 'rt.jar (socket, file, throwable) function use yn option', 'boolean','input','',true, now(), now()); + +INSERT INTO jspd_prop values('RT_RMI_TYPE','3', 'remote key value(1: pkey, 2: ckey, 3: pckey)', 'integer','select','{"pkey":"1", "ckey":"2", "pckey":"3"}',true, now(), now()); +INSERT INTO jspd_prop values('RT_RMI_ELAPSE_TIME','0', 'Collect transactions that are greater than or equal to the option value', 'integer','input','',true, now(), now()); +INSERT INTO jspd_prop values('RT_FILE','0x10', 'Display file input/output in call tree', 'string','input','',true, now(), now()); +INSERT INTO jspd_prop values('RT_SOCKET','0x10', 'Display socket input/output in call tree', 'string','input','',true, now(), now()); + +INSERT INTO jspd_prop values('MTD_LIMIT','100000', 'Limit the number of calltree', 'integer','range','{"gte":"0"}',true, now(), now()); + +INSERT INTO jspd_prop values('LIMIT_SQL','20', 'Collection limits based on SQL sentence length', 'integer','input','',true, now(), now()); +INSERT INTO jspd_prop values('TXN_COUNT_LIMIT','3000', 'Transactions per second', 'integer','input','',true, now(), now()); +INSERT INTO jspd_prop values('USE_SQL_ELLIPSIS','false', 'Collect length of sql string by half of SQL_TEXT_BUFFER_SIZE', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('TXN_SQL_LIMIT_COUNT','2000', 'SQL collection limit', 'integer','input','',true, now(), now()); +INSERT INTO jspd_prop values('TXN_CPU_TIME','false', 'cpu time metric used in transactions option', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('TXN_MEMORY','false', 'memory alloc size metric used in transactions option', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('ENABLE_WEB_ID_WHEN_NO_USERAGENT','false', 'Do not create an web ID unless requested by the browser', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('USE_SQL_SEQ','false', 'Add sequence number to sql and packet', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('TRACE_FETCH_METHOD','false', 'Display the fetch function of ResultSet in the call tree', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('EXCLUDE_THREAD','', 'Ability to block monitoring of a specific thread name, value = String[] (prefix1,prefix2)', 'string','input','',true, now(), now()); +INSERT INTO jspd_prop values('USE_METHOD_SEQ','false', 'Display the calltree in the form of a time series without summary', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('TRACE_METHOD_MEMORY','false', 'Collects allocation memory for each method of calltree. (unit k)', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('TRACE_METHOD_CPUTIME','false', 'Collects cputime for each method of calltree. (unit ms)', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('DISABLE_ROOT_METHOD','false', 'Express the service root method at the top of the call tree', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('MTD_BUFFER_SIZE','2500', 'size of the internal buffer that stores the call tree method data.', 'integer','input','',true, now(), now()); +INSERT INTO jspd_prop values('MTD_STACK_BUFFER_SIZE','100', 'A separate option to additionally collect methods that did not generate an error among methods that were not collected because the MTD_BUFFER_SIZE option value was exceeded.', 'integer','input','',true, now(), now()); +INSERT INTO jspd_prop values('MTD_EXCEPTION_BUFFER_SIZE','100', 'A separate option to additionally collect methods that have an error among methods that could not be collected because the MTD_BUFFER_SIZE option value was exceeded.', 'integer','input','',true, now(), now()); +INSERT INTO jspd_prop values('DEBUG','0x000000000', 'Option to specify log level (Debugging)', 'string','input','',true, now(), now()); + +INSERT INTO jspd_prop values('EXCEPTION_LIMIT', '-1', 'Exception content length limit', 'integer', 'input', '', true, now(), now()); +INSERT INTO jspd_prop values('TXN_SEND_PERIOD', '1000', 'Txninfo transmission cycle (ms)', 'integer', 'input', '', true, now(), now()); +INSERT INTO jspd_prop values('MTD_SEND_PERIOD', '1000', 'Txnmethod transmission cycle (ms)', 'integer', 'input', '', true, now(), now()); +INSERT INTO jspd_prop values('SQL_SEND_PERIOD', '1000', 'Txnspl transmission cycle (ms)', 'integer', 'input', '', true, now(), now()); +INSERT INTO jspd_prop values('ETOE_SEND_PERIOD', '1000', 'E2einfo transmission cycle (ms)', 'integer', 'input', '', true, now(), now()); +INSERT INTO jspd_prop values('TXN_SEND_LIMIT', '15000', 'Txninfo maximum number of transfers', 'integer', 'input', '', true, now(), now()); +INSERT INTO jspd_prop values('MTD_SEND_LIMIT', '15000', 'Txnmethod maximum number of transfers', 'integer', 'input', '', true, now(), now()); +INSERT INTO jspd_prop values('SQL_SEND_LIMIT', '15000', 'Txnsql maximum number of transfers', 'integer', 'input', '', true, now(), now()); +INSERT INTO jspd_prop values('ETOE_SEND_LIMIT', '15000', 'E2einfo maximum number of transfers', 'integer', 'input', '', true, now(), now()); diff --git a/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/Chart.yaml b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/Chart.yaml new file mode 100644 index 0000000..a5d4032 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/Chart.yaml @@ -0,0 +1,23 @@ +apiVersion: v1 +appVersion: 4.0.0 +description: Modified Authentication Module By EXEM CloudMOA +home: https://www.keycloak.org/ +icon: https://www.keycloak.org/resources/images/keycloak_logo_480x108.png +keywords: +- sso +- idm +- openid connect +- saml +- kerberos +- ldap +maintainers: +- email: unguiculus@gmail.com + name: unguiculus +- email: thomas.darimont+github@gmail.com + name: thomasdarimont +name: keycloak +sources: +- https://github.com/codecentric/helm-charts +- https://github.com/jboss-dockerfiles/keycloak +- https://github.com/bitnami/charts/tree/master/bitnami/postgresql +version: 11.0.1 diff --git a/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/OWNERS b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/OWNERS new file mode 100644 index 0000000..8c2ff0d --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/OWNERS @@ -0,0 +1,6 @@ +approvers: + - unguiculus + - thomasdarimont +reviewers: + - unguiculus + - thomasdarimont diff --git a/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/README.md b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/README.md new file mode 100644 index 0000000..5f8da10 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/README.md @@ -0,0 +1,765 @@ +# Keycloak + +[Keycloak](http://www.keycloak.org/) is an open source identity and access management for modern applications and services. + +## TL;DR; + +```console +$ helm install keycloak codecentric/keycloak +``` + +## Introduction + +This chart bootstraps a [Keycloak](http://www.keycloak.org/) StatefulSet on a [Kubernetes](https://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. +It provisions a fully featured Keycloak installation. +For more information on Keycloak and its capabilities, see its [documentation](http://www.keycloak.org/documentation.html). + +## Prerequisites Details + +The chart has an optional dependency on the [PostgreSQL](https://github.com/bitnami/charts/tree/master/bitnami/postgresql) chart. +By default, the PostgreSQL chart requires PV support on underlying infrastructure (may be disabled). + +## Installing the Chart + +To install the chart with the release name `keycloak`: + +```console +$ helm install keycloak codecentric/keycloak +``` + +## Uninstalling the Chart + +To uninstall the `keycloak` deployment: + +```console +$ helm uninstall keycloak +``` + +## Configuration + +The following table lists the configurable parameters of the Keycloak chart and their default values. + +| Parameter | Description | Default | +|---|---|---| +| `fullnameOverride` | Optionally override the fully qualified name | `""` | +| `nameOverride` | Optionally override the name | `""` | +| `replicas` | The number of replicas to create | `1` | +| `image.repository` | The Keycloak image repository | `docker.io/jboss/keycloak` | +| `image.tag` | Overrides the Keycloak image tag whose default is the chart version | `""` | +| `image.pullPolicy` | The Keycloak image pull policy | `IfNotPresent` | +| `imagePullSecrets` | Image pull secrets for the Pod | `[]` | +| `hostAliases` | Mapping between IPs and hostnames that will be injected as entries in the Pod's hosts files | `[]` | +| `enableServiceLinks` | Indicates whether information about services should be injected into Pod's environment variables, matching the syntax of Docker links | `true` | +| `podManagementPolicy` | Pod management policy. One of `Parallel` or `OrderedReady` | `Parallel` | +| `restartPolicy` | Pod restart policy. One of `Always`, `OnFailure`, or `Never` | `Always` | +| `serviceAccount.create` | Specifies whether a ServiceAccount should be created | `true` | +| `serviceAccount.name` | The name of the service account to use. If not set and create is true, a name is generated using the fullname template | `""` | +| `serviceAccount.annotations` | Additional annotations for the ServiceAccount | `{}` | +| `serviceAccount.labels` | Additional labels for the ServiceAccount | `{}` | +| `serviceAccount.imagePullSecrets` | Image pull secrets that are attached to the ServiceAccount | `[]` | +| `rbac.create` | Specifies whether RBAC resources are to be created | `false` +| `rbac.rules` | Custom RBAC rules, e. g. for KUBE_PING | `[]` +| `podSecurityContext` | SecurityContext for the entire Pod. Every container running in the Pod will inherit this SecurityContext. This might be relevant when other components of the environment inject additional containers into running Pods (service meshes are the most prominent example for this) | `{"fsGroup":1000}` | +| `securityContext` | SecurityContext for the Keycloak container | `{"runAsNonRoot":true,"runAsUser":1000}` | +| `extraInitContainers` | Additional init containers, e. g. for providing custom themes | `[]` | +| `extraContainers` | Additional sidecar containers, e. g. for a database proxy, such as Google's cloudsql-proxy | `[]` | +| `lifecycleHooks` | Lifecycle hooks for the Keycloak container | `{}` | +| `terminationGracePeriodSeconds` | Termination grace period in seconds for Keycloak shutdown. Clusters with a large cache might need to extend this to give Infinispan more time to rebalance | `60` | +| `clusterDomain` | The internal Kubernetes cluster domain | `cluster.local` | +| `command` | Overrides the default entrypoint of the Keycloak container | `[]` | +| `args` | Overrides the default args for the Keycloak container | `[]` | +| `extraEnv` | Additional environment variables for Keycloak | `""` | +| `extraEnvFrom` | Additional environment variables for Keycloak mapped from a Secret or ConfigMap | `""` | +| `priorityClassName` | Pod priority class name | `""` | +| `affinity` | Pod affinity | Hard node and soft zone anti-affinity | +| `nodeSelector` | Node labels for Pod assignment | `{}` | +| `tolerations` | Node taints to tolerate | `[]` | +| `podLabels` | Additional Pod labels | `{}` | +| `podAnnotations` | Additional Pod annotations | `{}` | +| `livenessProbe` | Liveness probe configuration | `{"httpGet":{"path":"/health/live","port":"http"},"initialDelaySeconds":300,"timeoutSeconds":5}` | +| `readinessProbe` | Readiness probe configuration | `{"httpGet":{"path":"/auth/realms/master","port":"http"},"initialDelaySeconds":30,"timeoutSeconds":1}` | +| `resources` | Pod resource requests and limits | `{}` | +| `startupScripts` | Startup scripts to run before Keycloak starts up | `{"keycloak.cli":"{{- .Files.Get "scripts/keycloak.cli" \| nindent 2 }}"}` | +| `extraVolumes` | Add additional volumes, e. g. for custom themes | `""` | +| `extraVolumeMounts` | Add additional volumes mounts, e. g. for custom themes | `""` | +| `extraPorts` | Add additional ports, e. g. for admin console or exposing JGroups ports | `[]` | +| `podDisruptionBudget` | Pod disruption budget | `{}` | +| `statefulsetAnnotations` | Annotations for the StatefulSet | `{}` | +| `statefulsetLabels` | Additional labels for the StatefulSet | `{}` | +| `secrets` | Configuration for secrets that should be created | `{}` | +| `service.annotations` | Annotations for headless and HTTP Services | `{}` | +| `service.labels` | Additional labels for headless and HTTP Services | `{}` | +| `service.type` | The Service type | `ClusterIP` | +| `service.loadBalancerIP` | Optional IP for the load balancer. Used for services of type LoadBalancer only | `""` | +| `loadBalancerSourceRanges` | Optional List of allowed source ranges (CIDRs). Used for service of type LoadBalancer only | `[]` | +| `service.httpPort` | The http Service port | `80` | +| `service.httpNodePort` | The HTTP Service node port if type is NodePort | `""` | +| `service.httpsPort` | The HTTPS Service port | `8443` | +| `service.httpsNodePort` | The HTTPS Service node port if type is NodePort | `""` | +| `service.httpManagementPort` | The WildFly management Service port | `8443` | +| `service.httpManagementNodePort` | The WildFly management node port if type is NodePort | `""` | +| `service.extraPorts` | Additional Service ports, e. g. for custom admin console | `[]` | +| `service.sessionAffinity` | sessionAffinity for Service, e. g. "ClientIP" | `""` | +| `service.sessionAffinityConfig` | sessionAffinityConfig for Service | `{}` | +| `ingress.enabled` | If `true`, an Ingress is created | `false` | +| `ingress.rules` | List of Ingress Ingress rule | see below | +| `ingress.rules[0].host` | Host for the Ingress rule | `{{ .Release.Name }}.keycloak.example.com` | +| `ingress.rules[0].paths` | Paths for the Ingress rule | `[/]` | +| `ingress.servicePort` | The Service port targeted by the Ingress | `http` | +| `ingress.annotations` | Ingress annotations | `{}` | +| `ingress.labels` | Additional Ingress labels | `{}` | +| `ingress.tls` | TLS configuration | see below | +| `ingress.tls[0].hosts` | List of TLS hosts | `[keycloak.example.com]` | +| `ingress.tls[0].secretName` | Name of the TLS secret | `""` | +| `ingress.console.enabled` | If `true`, an Ingress for the console is created | `false` | +| `ingress.console.rules` | List of Ingress Ingress rule for the console | see below | +| `ingress.console.rules[0].host` | Host for the Ingress rule for the console | `{{ .Release.Name }}.keycloak.example.com` | +| `ingress.console.rules[0].paths` | Paths for the Ingress rule for the console | `[/auth/admin]` | +| `ingress.console.annotations` | Ingress annotations for the console | `{}` | +| `networkPolicy.enabled` | If true, the ingress network policy is deployed | `false` +| `networkPolicy.extraFrom` | Allows to define allowed external traffic (see Kubernetes doc for network policy `from` format) | `[]` +| `route.enabled` | If `true`, an OpenShift Route is created | `false` | +| `route.path` | Path for the Route | `/` | +| `route.annotations` | Route annotations | `{}` | +| `route.labels` | Additional Route labels | `{}` | +| `route.host` | Host name for the Route | `""` | +| `route.tls.enabled` | If `true`, TLS is enabled for the Route | `true` | +| `route.tls.insecureEdgeTerminationPolicy` | Insecure edge termination policy of the Route. Can be `None`, `Redirect`, or `Allow` | `Redirect` | +| `route.tls.termination` | TLS termination of the route. Can be `edge`, `passthrough`, or `reencrypt` | `edge` | +| `pgchecker.image.repository` | Docker image used to check Postgresql readiness at startup | `docker.io/busybox` | +| `pgchecker.image.tag` | Image tag for the pgchecker image | `1.32` | +| `pgchecker.image.pullPolicy` | Image pull policy for the pgchecker image | `IfNotPresent` | +| `pgchecker.securityContext` | SecurityContext for the pgchecker container | `{"allowPrivilegeEscalation":false,"runAsGroup":1000,"runAsNonRoot":true,"runAsUser":1000}` | +| `pgchecker.resources` | Resource requests and limits for the pgchecker container | `{"limits":{"cpu":"10m","memory":"16Mi"},"requests":{"cpu":"10m","memory":"16Mi"}}` | +| `postgresql.enabled` | If `true`, the Postgresql dependency is enabled | `true` | +| `postgresql.postgresqlUsername` | PostgreSQL User to create | `keycloak` | +| `postgresql.postgresqlPassword` | PostgreSQL Password for the new user | `keycloak` | +| `postgresql.postgresqlDatabase` | PostgreSQL Database to create | `keycloak` | +| `serviceMonitor.enabled` | If `true`, a ServiceMonitor resource for the prometheus-operator is created | `false` | +| `serviceMonitor.namespace` | Optionally sets a target namespace in which to deploy the ServiceMonitor resource | `""` | +| `serviceMonitor.namespaceSelector` | Optionally sets a namespace selector for the ServiceMonitor | `{}` | +| `serviceMonitor.annotations` | Annotations for the ServiceMonitor | `{}` | +| `serviceMonitor.labels` | Additional labels for the ServiceMonitor | `{}` | +| `serviceMonitor.interval` | Interval at which Prometheus scrapes metrics | `10s` | +| `serviceMonitor.scrapeTimeout` | Timeout for scraping | `10s` | +| `serviceMonitor.path` | The path at which metrics are served | `/metrics` | +| `serviceMonitor.port` | The Service port at which metrics are served | `http` | +| `extraServiceMonitor.enabled` | If `true`, an additional ServiceMonitor resource for the prometheus-operator is created. Could be used for additional metrics via [Keycloak Metrics SPI](https://github.com/aerogear/keycloak-metrics-spi) | `false` | +| `extraServiceMonitor.namespace` | Optionally sets a target namespace in which to deploy the additional ServiceMonitor resource | `""` | +| `extraServiceMonitor.namespaceSelector` | Optionally sets a namespace selector for the additional ServiceMonitor | `{}` | +| `extraServiceMonitor.annotations` | Annotations for the additional ServiceMonitor | `{}` | +| `extraServiceMonitor.labels` | Additional labels for the additional ServiceMonitor | `{}` | +| `extraServiceMonitor.interval` | Interval at which Prometheus scrapes metrics | `10s` | +| `extraServiceMonitor.scrapeTimeout` | Timeout for scraping | `10s` | +| `extraServiceMonitor.path` | The path at which metrics are served | `/metrics` | +| `extraServiceMonitor.port` | The Service port at which metrics are served | `http` | +| `prometheusRule.enabled` | If `true`, a PrometheusRule resource for the prometheus-operator is created | `false` | +| `prometheusRule.annotations` | Annotations for the PrometheusRule | `{}` | +| `prometheusRule.labels` | Additional labels for the PrometheusRule | `{}` | +| `prometheusRule.rules` | List of rules for Prometheus | `[]` | +| `autoscaling.enabled` | Enable creation of a HorizontalPodAutoscaler resource | `false` | +| `autoscaling.labels` | Additional labels for the HorizontalPodAutoscaler resource | `{}` | +| `autoscaling.minReplicas` | The minimum number of Pods when autoscaling is enabled | `3` | +| `autoscaling.maxReplicas` | The maximum number of Pods when autoscaling is enabled | `10` | +| `autoscaling.metrics` | The metrics configuration for the HorizontalPodAutoscaler | `[{"resource":{"name":"cpu","target":{"averageUtilization":80,"type":"Utilization"}},"type":"Resource"}]` | +| `autoscaling.behavior` | The scaling policy configuration for the HorizontalPodAutoscaler | `{"scaleDown":{"policies":[{"periodSeconds":300,"type":"Pods","value":1}],"stabilizationWindowSeconds":300}` | +| `test.enabled` | If `true`, test resources are created | `false` | +| `test.image.repository` | The image for the test Pod | `docker.io/unguiculus/docker-python3-phantomjs-selenium` | +| `test.image.tag` | The tag for the test Pod image | `v1` | +| `test.image.pullPolicy` | The image pull policy for the test Pod image | `IfNotPresent` | +| `test.podSecurityContext` | SecurityContext for the entire test Pod | `{"fsGroup":1000}` | +| `test.securityContext` | SecurityContext for the test container | `{"runAsNonRoot":true,"runAsUser":1000}` | + + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example: + +```console +$ helm install keycloak codecentric/keycloak -n keycloak --set replicas=1 +``` + +Alternatively, a YAML file that specifies the values for the parameters can be provided while +installing the chart. For example: + +```console +$ helm install keycloak codecentric/keycloak -n keycloak --values values.yaml +``` + +The chart offers great flexibility. +It can be configured to work with the official Keycloak Docker image but any custom image can be used as well. + +For the offical Docker image, please check it's configuration at https://github.com/keycloak/keycloak-containers/tree/master/server. + +### Usage of the `tpl` Function + +The `tpl` function allows us to pass string values from `values.yaml` through the templating engine. +It is used for the following values: + +* `extraInitContainers` +* `extraContainers` +* `extraEnv` +* `extraEnvFrom` +* `affinity` +* `extraVolumeMounts` +* `extraVolumes` +* `livenessProbe` +* `readinessProbe` + +Additionally, custom labels and annotations can be set on various resources the values of which being passed through `tpl` as well. + +It is important that these values be configured as strings. +Otherwise, installation will fail. +See example for Google Cloud Proxy or default affinity configuration in `values.yaml`. + +### JVM Settings + +Keycloak sets the following system properties by default: +`-Djava.net.preferIPv4Stack=true -Djboss.modules.system.pkgs=$JBOSS_MODULES_SYSTEM_PKGS -Djava.awt.headless=true` + +You can override these by setting the `JAVA_OPTS` environment variable. +Make sure you configure container support. +This allows you to only configure memory using Kubernetes resources and the JVM will automatically adapt. + +```yaml +extraEnv: | + - name: JAVA_OPTS + value: >- + -XX:+UseContainerSupport + -XX:MaxRAMPercentage=50.0 + -Djava.net.preferIPv4Stack=true + -Djboss.modules.system.pkgs=$JBOSS_MODULES_SYSTEM_PKGS + -Djava.awt.headless=true +``` + +### Database Setup + +By default, Bitnami's [PostgreSQL](https://github.com/bitnami/charts/tree/master/bitnami/postgresql) chart is deployed and used as database. +Please refer to this chart for additional PostgreSQL configuration options. + +#### Using an External Database + +The Keycloak Docker image supports various database types. +Configuration happens in a generic manner. + +##### Using a Secret Managed by the Chart + +The following examples uses a PostgreSQL database with a secret that is managed by the Helm chart. + +```yaml +postgresql: + # Disable PostgreSQL dependency + enabled: false + +extraEnv: | + - name: DB_VENDOR + value: postgres + - name: DB_ADDR + value: mypostgres + - name: DB_PORT + value: "5432" + - name: DB_DATABASE + value: mydb + +extraEnvFrom: | + - secretRef: + name: '{{ include "keycloak.fullname" . }}-db' + +secrets: + db: + stringData: + DB_USER: '{{ .Values.dbUser }}' + DB_PASSWORD: '{{ .Values.dbPassword }}' +``` + +`dbUser` and `dbPassword` are custom values you'd then specify on the commandline using `--set-string`. + +##### Using an Existing Secret + +The following examples uses a PostgreSQL database with a secret. +Username and password are mounted as files. + +```yaml +postgresql: + # Disable PostgreSQL dependency + enabled: false + +extraEnv: | + - name: DB_VENDOR + value: postgres + - name: DB_ADDR + value: mypostgres + - name: DB_PORT + value: "5432" + - name: DB_DATABASE + value: mydb + - name: DB_USER_FILE + value: /secrets/db-creds/user + - name: DB_PASSWORD_FILE + value: /secrets/db-creds/password + +extraVolumeMounts: | + - name: db-creds + mountPath: /secrets/db-creds + readOnly: true + +extraVolumes: | + - name: db-creds + secret: + secretName: keycloak-db-creds +``` + +### Creating a Keycloak Admin User + +The Keycloak Docker image supports creating an initial admin user. +It must be configured via environment variables: + +* `KEYCLOAK_USER` or `KEYCLOAK_USER_FILE` +* `KEYCLOAK_PASSWORD` or `KEYCLOAK_PASSWORD_FILE` + +Please refer to the section on database configuration for how to configure a secret for this. + +### High Availability and Clustering + +For high availability, Keycloak must be run with multiple replicas (`replicas > 1`). +The chart has a helper template (`keycloak.serviceDnsName`) that creates the DNS name based on the headless service. + +#### DNS_PING Service Discovery + +JGroups discovery via DNS_PING can be configured as follows: + +```yaml +extraEnv: | + - name: JGROUPS_DISCOVERY_PROTOCOL + value: dns.DNS_PING + - name: JGROUPS_DISCOVERY_PROPERTIES + value: 'dns_query={{ include "keycloak.serviceDnsName" . }}' + - name: CACHE_OWNERS_COUNT + value: "2" + - name: CACHE_OWNERS_AUTH_SESSIONS_COUNT + value: "2" +``` + +#### KUBE_PING Service Discovery + +Recent versions of Keycloak include a new Kubernetes native [KUBE_PING](https://github.com/jgroups-extras/jgroups-kubernetes) service discovery protocol. +This requires a little more configuration than DNS_PING but can easily be achieved with the Helm chart. + +As with DNS_PING some environment variables must be configured as follows: + +```yaml +extraEnv: | + - name: JGROUPS_DISCOVERY_PROTOCOL + value: kubernetes.KUBE_PING + - name: KUBERNETES_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: CACHE_OWNERS_COUNT + value: "2" + - name: CACHE_OWNERS_AUTH_SESSIONS_COUNT + value: "2" +``` + +However, the Keycloak Pods must also get RBAC permissions to `get` and `list` Pods in the namespace which can be configured as follows: + +```yaml +rbac: + create: true + rules: + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - list +``` + +#### Autoscaling + +Due to the caches in Keycloak only replicating to a few nodes (two in the example configuration above) and the limited controls around autoscaling built into Kubernetes, it has historically been problematic to autoscale Keycloak. +However, in Kubernetes 1.18 [additional controls were introduced](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-configurable-scaling-behavior) which make it possible to scale down in a more controlled manner. + +The example autoscaling configuration in the values file scales from three up to a maximum of ten Pods using CPU utilization as the metric. Scaling up is done as quickly as required but scaling down is done at a maximum rate of one Pod per five minutes. + +Autoscaling can be enabled as follows: + +```yaml +autoscaling: + enabled: true +``` + +KUBE_PING service discovery seems to be the most reliable mechanism to use when enabling autoscaling, due to being faster than DNS_PING at detecting changes in the cluster. + +### Running Keycloak Behind a Reverse Proxy + +When running Keycloak behind a reverse proxy, which is the case when using an ingress controller, +proxy address forwarding must be enabled as follows: + +```yaml +extraEnv: | + - name: PROXY_ADDRESS_FORWARDING + value: "true" +``` + +### Providing a Custom Theme + +One option is certainly to provide a custom Keycloak image that includes the theme. +However, if you prefer to stick with the official Keycloak image, you can use an init container as theme provider. + +Create your own theme and package it up into a Docker image. + +```docker +FROM busybox +COPY mytheme /mytheme +``` + +In combination with an `emptyDir` that is shared with the Keycloak container, configure an init container that runs your theme image and copies the theme over to the right place where Keycloak will pick it up automatically. + +```yaml +extraInitContainers: | + - name: theme-provider + image: myuser/mytheme:1 + imagePullPolicy: IfNotPresent + command: + - sh + args: + - -c + - | + echo "Copying theme..." + cp -R /mytheme/* /theme + volumeMounts: + - name: theme + mountPath: /theme + +extraVolumeMounts: | + - name: theme + mountPath: /opt/jboss/keycloak/themes/mytheme + +extraVolumes: | + - name: theme + emptyDir: {} +``` + +### Setting a Custom Realm + +A realm can be added by creating a secret or configmap for the realm json file and then supplying this into the chart. +It can be mounted using `extraVolumeMounts` and then referenced as environment variable `KEYCLOAK_IMPORT`. +First we need to create a Secret from the realm JSON file using `kubectl create secret generic realm-secret --from-file=realm.json` which we need to reference in `values.yaml`: + +```yaml +extraVolumes: | + - name: realm-secret + secret: + secretName: realm-secret + +extraVolumeMounts: | + - name: realm-secret + mountPath: "/realm/" + readOnly: true + +extraEnv: | + - name: KEYCLOAK_IMPORT + value: /realm/realm.json +``` + +Alternatively, the realm file could be added to a custom image. + +After startup the web admin console for the realm should be available on the path /auth/admin/\/console/. + +### Using Google Cloud SQL Proxy + +Depending on your environment you may need a local proxy to connect to the database. +This is, e. g., the case for Google Kubernetes Engine when using Google Cloud SQL. +Create the secret for the credentials as documented [here](https://cloud.google.com/sql/docs/postgres/connect-kubernetes-engine) and configure the proxy as a sidecar. + +Because `extraContainers` is a string that is passed through the `tpl` function, it is possible to create custom values and use them in the string. + +```yaml +postgresql: + # Disable PostgreSQL dependency + enabled: false + +# Custom values for Google Cloud SQL +cloudsql: + project: my-project + region: europe-west1 + instance: my-instance + +extraContainers: | + - name: cloudsql-proxy + image: gcr.io/cloudsql-docker/gce-proxy:1.17 + command: + - /cloud_sql_proxy + args: + - -instances={{ .Values.cloudsql.project }}:{{ .Values.cloudsql.region }}:{{ .Values.cloudsql.instance }}=tcp:5432 + - -credential_file=/secrets/cloudsql/credentials.json + volumeMounts: + - name: cloudsql-creds + mountPath: /secrets/cloudsql + readOnly: true + +extraVolumes: | + - name: cloudsql-creds + secret: + secretName: cloudsql-instance-credentials + +extraEnv: | + - name: DB_VENDOR + value: postgres + - name: DB_ADDR + value: "127.0.0.1" + - name: DB_PORT + value: "5432" + - name: DB_DATABASE + value: postgres + - name: DB_USER + value: myuser + - name: DB_PASSWORD + value: mypassword +``` + +### Changing the Context Path + +By default, Keycloak is served under context `/auth`. +This can be changed as follows: + +```yaml +contextPath: mycontext + +startupScripts: + # cli script that reconfigures WildFly + contextPath.cli: | + embed-server --server-config=standalone-ha.xml --std-out=echo + batch + {{- if ne .Values.contextPath "auth" }} + /subsystem=keycloak-server/:write-attribute(name=web-context,value={{ if eq .Values.contextPath "" }}/{{ else }}{{ .Values.contextPath }}{{ end }}) + {{- if eq .Values.contextPath "" }} + /subsystem=undertow/server=default-server/host=default-host:write-attribute(name=default-web-module,value=keycloak-server.war) + {{- end }} + {{- end }} + run-batch + stop-embedded-server + +livenessProbe: | + httpGet: + path: {{ if ne .Values.contextPath "" }}/{{ .Values.contextPath }}{{ end }}/ + port: http + initialDelaySeconds: 300 + timeoutSeconds: 5 + +readinessProbe: | + httpGet: + path: {{ if ne .Values.contextPath "" }}/{{ .Values.contextPath }}{{ end }}/realms/master + port: http + initialDelaySeconds: 30 + timeoutSeconds: 1 +``` + +The above YAML references introduces the custom value `contextPath` which is possible because `startupScripts`, `livenessProbe`, and `readinessProbe` are templated using the `tpl` function. +Note that it must not start with a slash. +Alternatively, you may supply it via CLI flag: + +```console +--set-string contextPath=mycontext +``` + +### Prometheus Metrics Support + +#### WildFly Metrics + +WildFly can expose metrics on the management port. +In order to achieve this, the environment variable `KEYCLOAK_STATISTICS` must be set. + +```yaml +extraEnv: | + - name: KEYCLOAK_STATISTICS + value: all +``` + +Add a ServiceMonitor if using prometheus-operator: + +```yaml +serviceMonitor: + # If `true`, a ServiceMonitor resource for the prometheus-operator is created + enabled: true +``` + +Checkout `values.yaml` for customizing the ServiceMonitor and for adding custom Prometheus rules. + +Add annotations if you don't use prometheus-operator: + +```yaml +service: + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9990" +``` + +#### Keycloak Metrics SPI + +Optionally, it is possible to add [Keycloak Metrics SPI](https://github.com/aerogear/keycloak-metrics-spi) via init container. + +A separate `ServiceMonitor` can be enabled to scrape metrics from the SPI: + +```yaml +extraServiceMonitor: + # If `true`, an additional ServiceMonitor resource for the prometheus-operator is created + enabled: true +``` + +Checkout `values.yaml` for customizing this ServiceMonitor. + +Note that the metrics endpoint is exposed on the HTTP port. +You may want to restrict access to it in your ingress controller configuration. +For ingress-nginx, this could be done as follows: + +```yaml +annotations: + nginx.ingress.kubernetes.io/server-snippet: | + location ~* /auth/realms/[^/]+/metrics { + return 403; + } +``` + +## Why StatefulSet? + +The chart sets node identifiers to the system property `jboss.node.name` which is in fact the pod name. +Node identifiers must not be longer than 23 characters. +This can be problematic because pod names are quite long. +We would have to truncate the chart's fullname to six characters because pods get a 17-character suffix (e. g. `-697f8b7655-mf5ht`). +Using a StatefulSet allows us to truncate to 20 characters leaving room for up to 99 replicas, which is much better. +Additionally, we get stable values for `jboss.node.name` which can be advantageous for cluster discovery. +The headless service that governs the StatefulSet is used for DNS discovery via DNS_PING. + +## Upgrading + +### From chart < 10.0.0 + +* Keycloak is updated to 12.0.4 + +The upgrade should be seemless. +No special care has to be taken. + +### From chart versions < 9.0.0 + +The Keycloak chart received a major facelift and, thus, comes with breaking changes. +Opinionated stuff and things that are now baked into Keycloak's Docker image were removed. +Configuration is more generic making it easier to use custom Docker images that are configured differently than the official one. + +* Values are no longer nested under `keycloak`. +* Besides setting the node identifier, no CLI changes are performed out of the box +* Environment variables for the Postresql dependency are set automatically if enabled. + Otherwise, no environment variables are set by default. +* Optionally enables creating RBAC resources with configurable rules (e. g. for KUBE_PING) +* PostgreSQL chart dependency is updated to 9.1.1 + +### From chart versions < 8.0.0 + +* Keycloak is updated to 10.0.0 +* PostgreSQL chart dependency is updated to 8.9.5 + +The upgrade should be seemless. +No special care has to be taken. + +### From chart versions < 7.0.0 + +Version 7.0.0 update breaks backwards-compatibility with the existing `keycloak.persistence.existingSecret` scheme. + +#### Changes in Configuring Database Credentials from an Existing Secret + +Both `DB_USER` and `DB_PASS` are always read from a Kubernetes Secret. +This is a requirement if you are provisioning database credentials dynamically - either via an Operator or some secret-management engine. + +The variable referencing the password key name has been renamed from `keycloak.persistence.existingSecretKey` to `keycloak.persistence.existingSecretPasswordKey` + +A new, optional variable for referencing the username key name for populating the `DB_USER` env has been added: +`keycloak.persistence.existingSecretUsernameKey`. + +If `keycloak.persistence.existingSecret` is left unset, a new Secret will be provisioned populated with the `dbUser` and `dbPassword` Helm variables. + +###### Example configuration: +```yaml +keycloak: + persistence: + existingSecret: keycloak-provisioned-db-credentials + existingSecretPasswordKey: PGPASSWORD + existingSecretUsernameKey: PGUSER + ... +``` +### From chart versions < 6.0.0 + +#### Changes in Probe Configuration + +Now both readiness and liveness probes are configured as strings that are then passed through the `tpl` function. +This allows for greater customizability of the readiness and liveness probes. + +The defaults are unchanged, but since 6.0.0 configured as follows: + +```yaml + livenessProbe: | + httpGet: + path: {{ if ne .Values.keycloak.basepath "" }}/{{ .Values.keycloak.basepath }}{{ end }}/ + port: http + initialDelaySeconds: 300 + timeoutSeconds: 5 + readinessProbe: | + httpGet: + path: {{ if ne .Values.keycloak.basepath "" }}/{{ .Values.keycloak.basepath }}{{ end }}/realms/master + port: http + initialDelaySeconds: 30 + timeoutSeconds: 1 +``` + +#### Changes in Existing Secret Configuration + +This can be useful if you create a secret in a parent chart and want to reference that secret. +Applies to `keycloak.existingSecret` and `keycloak.persistence.existingSecret`. + +_`values.yaml` of parent chart:_ +```yaml +keycloak: + keycloak: + existingSecret: '{{ .Release.Name }}-keycloak-secret' +``` + +#### HTTPS Port Added + +The HTTPS port was added to the pod and to the services. +As a result, service ports are now configured differently. + + +### From chart versions < 5.0.0 + +Version 5.0.0 is a major update. + +* The chart now follows the new Kubernetes label recommendations: +https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/ +* Several changes to the StatefulSet render an out-of-the-box upgrade impossible because StatefulSets only allow updates to a limited set of fields +* The chart uses the new support for running scripts at startup that has been added to Keycloak's Docker image. +If you use this feature, you will have to adjust your configuration + +However, with the following manual steps an automatic upgrade is still possible: + +1. Adjust chart configuration as necessary (e. g. startup scripts) +1. Perform a non-cascading deletion of the StatefulSet which keeps the pods running +1. Add the new labels to the pods +1. Run `helm upgrade` + +Use a script like the following to add labels and to delete the StatefulSet: + +```console +#!/bin/sh + +release= +namespace= + +kubectl delete statefulset -n "$namespace" -l app=keycloak -l release="$release" --cascade=false + +kubectl label pod -n "$namespace" -l app=keycloak -l release="$release" app.kubernetes.io/name=keycloak +kubectl label pod -n "$namespace" -l app=keycloak -l release="$release" app.kubernetes.io/instance="$release" +``` + +**NOTE:** Version 5.0.0 also updates the Postgresql dependency which has received a major upgrade as well. +In case you use this dependency, the database must be upgraded first. +Please refer to the Postgresql chart's upgrading section in its README for instructions. diff --git a/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/.helmignore b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/.helmignore new file mode 100644 index 0000000..f0c1319 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/Chart.yaml b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/Chart.yaml new file mode 100644 index 0000000..48d8f2f --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/Chart.yaml @@ -0,0 +1,24 @@ +annotations: + category: Database +apiVersion: v1 +appVersion: 11.8.0 +description: Chart for PostgreSQL, an object-relational database management system + (ORDBMS) with an emphasis on extensibility and on standards-compliance. +home: https://www.postgresql.org/ +icon: https://bitnami.com/assets/stacks/postgresql/img/postgresql-stack-110x117.png +keywords: +- postgresql +- postgres +- database +- sql +- replication +- cluster +maintainers: +- email: containers@bitnami.com + name: Bitnami +- email: cedric@desaintmartin.fr + name: desaintmartin +name: postgresql +sources: +- https://github.com/bitnami/bitnami-docker-postgresql +version: 9.1.1 diff --git a/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/README.md b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/README.md new file mode 100644 index 0000000..c84cc7b --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/README.md @@ -0,0 +1,625 @@ +# PostgreSQL + +[PostgreSQL](https://www.postgresql.org/) is an object-relational database management system (ORDBMS) with an emphasis on extensibility and on standards-compliance. + +For HA, please see [this repo](https://github.com/bitnami/charts/tree/master/bitnami/postgresql-ha) + +## TL;DR; + +```console +$ helm repo add bitnami https://charts.bitnami.com/bitnami +$ helm install my-release bitnami/postgresql +``` + +## Introduction + +This chart bootstraps a [PostgreSQL](https://github.com/bitnami/bitnami-docker-postgresql) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This chart has been tested to work with NGINX Ingress, cert-manager, fluentd and Prometheus on top of the [BKPR](https://kubeprod.io/). + +## Prerequisites + +- Kubernetes 1.12+ +- Helm 2.12+ or Helm 3.0-beta3+ +- PV provisioner support in the underlying infrastructure + +## Installing the Chart +To install the chart with the release name `my-release`: + +```console +$ helm install my-release bitnami/postgresql +``` + +The command deploys PostgreSQL on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```console +$ helm delete my-release +``` + +The command removes all the Kubernetes components but PVC's associated with the chart and deletes the release. + +To delete the PVC's associated with `my-release`: + +```console +$ kubectl delete pvc -l release=my-release +``` + +> **Note**: Deleting the PVC's will delete postgresql data as well. Please be cautious before doing it. + +## Parameters + +The following tables lists the configurable parameters of the PostgreSQL chart and their default values. + +| Parameter | Description | Default | +|-----------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------| +| `global.imageRegistry` | Global Docker Image registry | `nil` | +| `global.postgresql.postgresqlDatabase` | PostgreSQL database (overrides `postgresqlDatabase`) | `nil` | +| `global.postgresql.postgresqlUsername` | PostgreSQL username (overrides `postgresqlUsername`) | `nil` | +| `global.postgresql.existingSecret` | Name of existing secret to use for PostgreSQL passwords (overrides `existingSecret`) | `nil` | +| `global.postgresql.postgresqlPassword` | PostgreSQL admin password (overrides `postgresqlPassword`) | `nil` | +| `global.postgresql.servicePort` | PostgreSQL port (overrides `service.port`) | `nil` | +| `global.postgresql.replicationPassword` | Replication user password (overrides `replication.password`) | `nil` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) | +| `global.storageClass` | Global storage class for dynamic provisioning | `nil` | +| `image.registry` | PostgreSQL Image registry | `docker.io` | +| `image.repository` | PostgreSQL Image name | `bitnami/postgresql` | +| `image.tag` | PostgreSQL Image tag | `{TAG_NAME}` | +| `image.pullPolicy` | PostgreSQL Image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify Image pull secrets | `nil` (does not add image pull secrets to deployed pods) | +| `image.debug` | Specify if debug values should be set | `false` | +| `nameOverride` | String to partially override postgresql.fullname template with a string (will prepend the release name) | `nil` | +| `fullnameOverride` | String to fully override postgresql.fullname template with a string | `nil` | +| `volumePermissions.enabled` | Enable init container that changes volume permissions in the data directory (for cases where the default k8s `runAsUser` and `fsUser` values do not work) | `false` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | +| `volumePermissions.image.repository` | Init container volume-permissions image name | `bitnami/minideb` | +| `volumePermissions.image.tag` | Init container volume-permissions image tag | `buster` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `Always` | +| `volumePermissions.securityContext.runAsUser` | User ID for the init container (when facing issues in OpenShift or uid unknown, try value "auto") | `0` | +| `usePasswordFile` | Have the secrets mounted as a file instead of env vars | `false` | +| `ldap.enabled` | Enable LDAP support | `false` | +| `ldap.existingSecret` | Name of existing secret to use for LDAP passwords | `nil` | +| `ldap.url` | LDAP URL beginning in the form `ldap[s]://host[:port]/basedn[?[attribute][?[scope][?[filter]]]]` | `nil` | +| `ldap.server` | IP address or name of the LDAP server. | `nil` | +| `ldap.port` | Port number on the LDAP server to connect to | `nil` | +| `ldap.scheme` | Set to `ldaps` to use LDAPS. | `nil` | +| `ldap.tls` | Set to `1` to use TLS encryption | `nil` | +| `ldap.prefix` | String to prepend to the user name when forming the DN to bind | `nil` | +| `ldap.suffix` | String to append to the user name when forming the DN to bind | `nil` | +| `ldap.search_attr` | Attribute to match agains the user name in the search | `nil` | +| `ldap.search_filter` | The search filter to use when doing search+bind authentication | `nil` | +| `ldap.baseDN` | Root DN to begin the search for the user in | `nil` | +| `ldap.bindDN` | DN of user to bind to LDAP | `nil` | +| `ldap.bind_password` | Password for the user to bind to LDAP | `nil` | +| `replication.enabled` | Enable replication | `false` | +| `replication.user` | Replication user | `repl_user` | +| `replication.password` | Replication user password | `repl_password` | +| `replication.slaveReplicas` | Number of slaves replicas | `1` | +| `replication.synchronousCommit` | Set synchronous commit mode. Allowed values: `on`, `remote_apply`, `remote_write`, `local` and `off` | `off` | +| `replication.numSynchronousReplicas` | Number of replicas that will have synchronous replication. Note: Cannot be greater than `replication.slaveReplicas`. | `0` | +| `replication.applicationName` | Cluster application name. Useful for advanced replication settings | `my_application` | +| `existingSecret` | Name of existing secret to use for PostgreSQL passwords. The secret has to contain the keys `postgresql-postgres-password` which is the password for `postgresqlUsername` when it is different of `postgres`, `postgresql-password` which will override `postgresqlPassword`, `postgresql-replication-password` which will override `replication.password` and `postgresql-ldap-password` which will be sed to authenticate on LDAP. The value is evaluated as a template. | `nil` | +| `postgresqlPostgresPassword` | PostgreSQL admin password (used when `postgresqlUsername` is not `postgres`, in which case`postgres` is the admin username). | _random 10 character alphanumeric string_ | +| `postgresqlUsername` | PostgreSQL user (creates a non-admin user when `postgresqlUsername` is not `postgres`) | `postgres` | +| `postgresqlPassword` | PostgreSQL user password | _random 10 character alphanumeric string_ | +| `postgresqlDatabase` | PostgreSQL database | `nil` | +| `postgresqlDataDir` | PostgreSQL data dir folder | `/bitnami/postgresql` (same value as persistence.mountPath) | +| `extraEnv` | Any extra environment variables you would like to pass on to the pod. The value is evaluated as a template. | `[]` | +| `extraEnvVarsCM` | Name of a Config Map containing extra environment variables you would like to pass on to the pod. The value is evaluated as a template. | `nil` | +| `postgresqlInitdbArgs` | PostgreSQL initdb extra arguments | `nil` | +| `postgresqlInitdbWalDir` | PostgreSQL location for transaction log | `nil` | +| `postgresqlConfiguration` | Runtime Config Parameters | `nil` | +| `postgresqlExtendedConf` | Extended Runtime Config Parameters (appended to main or default configuration) | `nil` | +| `pgHbaConfiguration` | Content of pg_hba.conf | `nil (do not create pg_hba.conf)` | +| `configurationConfigMap` | ConfigMap with the PostgreSQL configuration files (Note: Overrides `postgresqlConfiguration` and `pgHbaConfiguration`). The value is evaluated as a template. | `nil` | +| `extendedConfConfigMap` | ConfigMap with the extended PostgreSQL configuration files. The value is evaluated as a template. | `nil` | +| `initdbScripts` | Dictionary of initdb scripts | `nil` | +| `initdbUser` | PostgreSQL user to execute the .sql and sql.gz scripts | `nil` | +| `initdbPassword` | Password for the user specified in `initdbUser` | `nil` | +| `initdbScriptsConfigMap` | ConfigMap with the initdb scripts (Note: Overrides `initdbScripts`). The value is evaluated as a template. | `nil` | +| `initdbScriptsSecret` | Secret with initdb scripts that contain sensitive information (Note: can be used with `initdbScriptsConfigMap` or `initdbScripts`). The value is evaluated as a template. | `nil` | +| `service.type` | Kubernetes Service type | `ClusterIP` | +| `service.port` | PostgreSQL port | `5432` | +| `service.nodePort` | Kubernetes Service nodePort | `nil` | +| `service.annotations` | Annotations for PostgreSQL service | `{}` (evaluated as a template) | +| `service.loadBalancerIP` | loadBalancerIP if service type is `LoadBalancer` | `nil` | +| `service.loadBalancerSourceRanges` | Address that are allowed when svc is LoadBalancer | `[]` (evaluated as a template) | +| `schedulerName` | Name of the k8s scheduler (other than default) | `nil` | +| `shmVolume.enabled` | Enable emptyDir volume for /dev/shm for master and slave(s) Pod(s) | `true` | +| `shmVolume.chmod.enabled` | Run at init chmod 777 of the /dev/shm (ignored if `volumePermissions.enabled` is `false`) | `true` | +| `persistence.enabled` | Enable persistence using PVC | `true` | +| `persistence.existingClaim` | Provide an existing `PersistentVolumeClaim`, the value is evaluated as a template. | `nil` | +| `persistence.mountPath` | Path to mount the volume at | `/bitnami/postgresql` | +| `persistence.subPath` | Subdirectory of the volume to mount at | `""` | +| `persistence.storageClass` | PVC Storage Class for PostgreSQL volume | `nil` | +| `persistence.accessModes` | PVC Access Mode for PostgreSQL volume | `[ReadWriteOnce]` | +| `persistence.size` | PVC Storage Request for PostgreSQL volume | `8Gi` | +| `persistence.annotations` | Annotations for the PVC | `{}` | +| `commonAnnotations` | Annotations to be added to all deployed resources (rendered as a template) | `{}` | +| `master.nodeSelector` | Node labels for pod assignment (postgresql master) | `{}` | +| `master.affinity` | Affinity labels for pod assignment (postgresql master) | `{}` | +| `master.tolerations` | Toleration labels for pod assignment (postgresql master) | `[]` | +| `master.anotations` | Map of annotations to add to the statefulset (postgresql master) | `{}` | +| `master.labels` | Map of labels to add to the statefulset (postgresql master) | `{}` | +| `master.podAnnotations` | Map of annotations to add to the pods (postgresql master) | `{}` | +| `master.podLabels` | Map of labels to add to the pods (postgresql master) | `{}` | +| `master.priorityClassName` | Priority Class to use for each pod (postgresql master) | `nil` | +| `master.extraInitContainers` | Additional init containers to add to the pods (postgresql master) | `[]` | +| `master.extraVolumeMounts` | Additional volume mounts to add to the pods (postgresql master) | `[]` | +| `master.extraVolumes` | Additional volumes to add to the pods (postgresql master) | `[]` | +| `master.sidecars` | Add additional containers to the pod | `[]` | +| `master.service.type` | Allows using a different service type for Master | `nil` | +| `master.service.nodePort` | Allows using a different nodePort for Master | `nil` | +| `master.service.clusterIP` | Allows using a different clusterIP for Master | `nil` | +| `slave.nodeSelector` | Node labels for pod assignment (postgresql slave) | `{}` | +| `slave.affinity` | Affinity labels for pod assignment (postgresql slave) | `{}` | +| `slave.tolerations` | Toleration labels for pod assignment (postgresql slave) | `[]` | +| `slave.anotations` | Map of annotations to add to the statefulsets (postgresql slave) | `{}` | +| `slave.labels` | Map of labels to add to the statefulsets (postgresql slave) | `{}` | +| `slave.podAnnotations` | Map of annotations to add to the pods (postgresql slave) | `{}` | +| `slave.podLabels` | Map of labels to add to the pods (postgresql slave) | `{}` | +| `slave.priorityClassName` | Priority Class to use for each pod (postgresql slave) | `nil` | +| `slave.extraInitContainers` | Additional init containers to add to the pods (postgresql slave) | `[]` | +| `slave.extraVolumeMounts` | Additional volume mounts to add to the pods (postgresql slave) | `[]` | +| `slave.extraVolumes` | Additional volumes to add to the pods (postgresql slave) | `[]` | +| `slave.sidecars` | Add additional containers to the pod | `[]` | +| `slave.service.type` | Allows using a different service type for Slave | `nil` | +| `slave.service.nodePort` | Allows using a different nodePort for Slave | `nil` | +| `slave.service.clusterIP` | Allows using a different clusterIP for Slave | `nil` | +| `terminationGracePeriodSeconds` | Seconds the pod needs to terminate gracefully | `nil` | +| `resources` | CPU/Memory resource requests/limits | Memory: `256Mi`, CPU: `250m` | +| `securityContext.enabled` | Enable security context | `true` | +| `securityContext.fsGroup` | Group ID for the container | `1001` | +| `securityContext.runAsUser` | User ID for the container | `1001` | +| `serviceAccount.enabled` | Enable service account (Note: Service Account will only be automatically created if `serviceAccount.name` is not set) | `false` | +| `serviceAccount.name` | Name of existing service account | `nil` | +| `livenessProbe.enabled` | Would you like a livenessProbe to be enabled | `true` | +| `networkPolicy.enabled` | Enable NetworkPolicy | `false` | +| `networkPolicy.allowExternal` | Don't require client label for connections | `true` | +| `networkPolicy.explicitNamespacesSelector` | A Kubernetes LabelSelector to explicitly select namespaces from which ingress traffic could be allowed | `{}` | +| `livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | 30 | +| `livenessProbe.periodSeconds` | How often to perform the probe | 10 | +| `livenessProbe.timeoutSeconds` | When the probe times out | 5 | +| `livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 6 | +| `livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed | 1 | +| `readinessProbe.enabled` | would you like a readinessProbe to be enabled | `true` | +| `readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | 5 | +| `readinessProbe.periodSeconds` | How often to perform the probe | 10 | +| `readinessProbe.timeoutSeconds` | When the probe times out | 5 | +| `readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 6 | +| `readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed | 1 | +| `tls.enabled` | Enable TLS traffic support | `false` | +| `tls.preferServerCiphers` | Whether to use the server's TLS cipher preferences rather than the client's | `true` | +| `tls.certificatesSecret` | Name of an existing secret that contains the certificates | `nil` | +| `tls.certFilename` | Certificate filename | `""` | +| `tls.certKeyFilename` | Certificate key filename | `""` | +| `tls.certCAFilename` | CA Certificate filename. If provided, PostgreSQL will authenticate TLS/SSL clients by requesting them a certificate. |`nil` | +| `tls.crlFilename` | File containing a Certificate Revocation List |`nil` | +| `metrics.enabled` | Start a prometheus exporter | `false` | +| `metrics.service.type` | Kubernetes Service type | `ClusterIP` | +| `service.clusterIP` | Static clusterIP or None for headless services | `nil` | +| `metrics.service.annotations` | Additional annotations for metrics exporter pod | `{ prometheus.io/scrape: "true", prometheus.io/port: "9187"}` | +| `metrics.service.loadBalancerIP` | loadBalancerIP if redis metrics service type is `LoadBalancer` | `nil` | +| `metrics.serviceMonitor.enabled` | Set this to `true` to create ServiceMonitor for Prometheus operator | `false` | +| `metrics.serviceMonitor.additionalLabels` | Additional labels that can be used so ServiceMonitor will be discovered by Prometheus | `{}` | +| `metrics.serviceMonitor.namespace` | Optional namespace in which to create ServiceMonitor | `nil` | +| `metrics.serviceMonitor.interval` | Scrape interval. If not set, the Prometheus default scrape interval is used | `nil` | +| `metrics.serviceMonitor.scrapeTimeout` | Scrape timeout. If not set, the Prometheus default scrape timeout is used | `nil` | +| `metrics.prometheusRule.enabled` | Set this to true to create prometheusRules for Prometheus operator | `false` | +| `metrics.prometheusRule.additionalLabels` | Additional labels that can be used so prometheusRules will be discovered by Prometheus | `{}` | +| `metrics.prometheusRule.namespace` | namespace where prometheusRules resource should be created | the same namespace as postgresql | +| `metrics.prometheusRule.rules` | [rules](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/) to be created, check values for an example. | `[]` | +| `metrics.image.registry` | PostgreSQL Exporter Image registry | `docker.io` | +| `metrics.image.repository` | PostgreSQL Exporter Image name | `bitnami/postgres-exporter` | +| `metrics.image.tag` | PostgreSQL Exporter Image tag | `{TAG_NAME}` | +| `metrics.image.pullPolicy` | PostgreSQL Exporter Image pull policy | `IfNotPresent` | +| `metrics.image.pullSecrets` | Specify Image pull secrets | `nil` (does not add image pull secrets to deployed pods) | +| `metrics.customMetrics` | Additional custom metrics | `nil` | +| `metrics.extraEnvVars` | Extra environment variables to add to exporter | `{}` (evaluated as a template) | +| `metrics.securityContext.enabled` | Enable security context for metrics | `false` | +| `metrics.securityContext.runAsUser` | User ID for the container for metrics | `1001` | +| `metrics.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | 30 | +| `metrics.livenessProbe.periodSeconds` | How often to perform the probe | 10 | +| `metrics.livenessProbe.timeoutSeconds` | When the probe times out | 5 | +| `metrics.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 6 | +| `metrics.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed | 1 | +| `metrics.readinessProbe.enabled` | would you like a readinessProbe to be enabled | `true` | +| `metrics.readinessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | 5 | +| `metrics.readinessProbe.periodSeconds` | How often to perform the probe | 10 | +| `metrics.readinessProbe.timeoutSeconds` | When the probe times out | 5 | +| `metrics.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 6 | +| `metrics.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed | 1 | +| `updateStrategy` | Update strategy policy | `{type: "RollingUpdate"}` | +| `psp.create` | Create Pod Security Policy | `false` | +| `rbac.create` | Create Role and RoleBinding (required for PSP to work) | `false` | + + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```console +$ helm install my-release \ + --set postgresqlPassword=secretpassword,postgresqlDatabase=my-database \ + bitnami/postgresql +``` + +The above command sets the PostgreSQL `postgres` account password to `secretpassword`. Additionally it creates a database named `my-database`. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```console +$ helm install my-release -f values.yaml bitnami/postgresql +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +## Configuration and installation details + +### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/) + +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. + +Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. + +### Production configuration and horizontal scaling + +This chart includes a `values-production.yaml` file where you can find some parameters oriented to production configuration in comparison to the regular `values.yaml`. You can use this file instead of the default one. + +- Enable replication: +```diff +- replication.enabled: false ++ replication.enabled: true +``` + +- Number of slaves replicas: +```diff +- replication.slaveReplicas: 1 ++ replication.slaveReplicas: 2 +``` + +- Set synchronous commit mode: +```diff +- replication.synchronousCommit: "off" ++ replication.synchronousCommit: "on" +``` + +- Number of replicas that will have synchronous replication: +```diff +- replication.numSynchronousReplicas: 0 ++ replication.numSynchronousReplicas: 1 +``` + +- Start a prometheus exporter: +```diff +- metrics.enabled: false ++ metrics.enabled: true +``` + +To horizontally scale this chart, you can use the `--replicas` flag to modify the number of nodes in your PostgreSQL deployment. Also you can use the `values-production.yaml` file or modify the parameters shown above. + +### Customizing Master and Slave services in a replicated configuration + +At the top level, there is a service object which defines the services for both master and slave. For deeper customization, there are service objects for both the master and slave types individually. This allows you to override the values in the top level service object so that the master and slave can be of different service types and with different clusterIPs / nodePorts. Also in the case you want the master and slave to be of type nodePort, you will need to set the nodePorts to different values to prevent a collision. The values that are deeper in the master.service or slave.service objects will take precedence over the top level service object. + +### Change PostgreSQL version + +To modify the PostgreSQL version used in this chart you can specify a [valid image tag](https://hub.docker.com/r/bitnami/postgresql/tags/) using the `image.tag` parameter. For example, `image.tag=X.Y.Z`. This approach is also applicable to other images like exporters. + +### postgresql.conf / pg_hba.conf files as configMap + +This helm chart also supports to customize the whole configuration file. + +Add your custom file to "files/postgresql.conf" in your working directory. This file will be mounted as configMap to the containers and it will be used for configuring the PostgreSQL server. + +Alternatively, you can specify PostgreSQL configuration parameters using the `postgresqlConfiguration` parameter as a dict, using camelCase, e.g. {"sharedBuffers": "500MB"}. + +In addition to these options, you can also set an external ConfigMap with all the configuration files. This is done by setting the `configurationConfigMap` parameter. Note that this will override the two previous options. + +### Allow settings to be loaded from files other than the default `postgresql.conf` + +If you don't want to provide the whole PostgreSQL configuration file and only specify certain parameters, you can add your extended `.conf` files to "files/conf.d/" in your working directory. +Those files will be mounted as configMap to the containers adding/overwriting the default configuration using the `include_dir` directive that allows settings to be loaded from files other than the default `postgresql.conf`. + +Alternatively, you can also set an external ConfigMap with all the extra configuration files. This is done by setting the `extendedConfConfigMap` parameter. Note that this will override the previous option. + +### Initialize a fresh instance + +The [Bitnami PostgreSQL](https://github.com/bitnami/bitnami-docker-postgresql) image allows you to use your custom scripts to initialize a fresh instance. In order to execute the scripts, they must be located inside the chart folder `files/docker-entrypoint-initdb.d` so they can be consumed as a ConfigMap. + +Alternatively, you can specify custom scripts using the `initdbScripts` parameter as dict. + +In addition to these options, you can also set an external ConfigMap with all the initialization scripts. This is done by setting the `initdbScriptsConfigMap` parameter. Note that this will override the two previous options. If your initialization scripts contain sensitive information such as credentials or passwords, you can use the `initdbScriptsSecret` parameter. + +The allowed extensions are `.sh`, `.sql` and `.sql.gz`. + +### Securing traffic using TLS + +TLS support can be enabled in the chart by specifying the `tls.` parameters while creating a release. The following parameters should be configured to properly enable the TLS support in the chart: + +- `tls.enabled`: Enable TLS support. Defaults to `false` +- `tls.certificatesSecret`: Name of an existing secret that contains the certificates. No defaults. +- `tls.certFilename`: Certificate filename. No defaults. +- `tls.certKeyFilename`: Certificate key filename. No defaults. + +For example: + +* First, create the secret with the cetificates files: + + ```console + kubectl create secret generic certificates-tls-secret --from-file=./cert.crt --from-file=./cert.key --from-file=./ca.crt + ``` + +* Then, use the following parameters: + + ```console + volumePermissions.enabled=true + tls.enabled=true + tls.certificatesSecret="certificates-tls-secret" + tls.certFilename="cert.crt" + tls.certKeyFilename="cert.key" + ``` + + > Note TLS and VolumePermissions: PostgreSQL requires certain permissions on sensitive files (such as certificate keys) to start up. Due to an on-going [issue](https://github.com/kubernetes/kubernetes/issues/57923) regarding kubernetes permissions and the use of `securityContext.runAsUser`, you must enable `volumePermissions` to ensure everything works as expected. + +### Sidecars + +If you need additional containers to run within the same pod as PostgreSQL (e.g. an additional metrics or logging exporter), you can do so via the `sidecars` config parameter. Simply define your container according to the Kubernetes container spec. + +```yaml +# For the PostgreSQL master +master: + sidecars: + - name: your-image-name + image: your-image + imagePullPolicy: Always + ports: + - name: portname + containerPort: 1234 +# For the PostgreSQL replicas +slave: + sidecars: + - name: your-image-name + image: your-image + imagePullPolicy: Always + ports: + - name: portname + containerPort: 1234 +``` + +### Metrics + +The chart optionally can start a metrics exporter for [prometheus](https://prometheus.io). The metrics endpoint (port 9187) is not exposed and it is expected that the metrics are collected from inside the k8s cluster using something similar as the described in the [example Prometheus scrape configuration](https://github.com/prometheus/prometheus/blob/master/documentation/examples/prometheus-kubernetes.yml). + +The exporter allows to create custom metrics from additional SQL queries. See the Chart's `values.yaml` for an example and consult the [exporters documentation](https://github.com/wrouesnel/postgres_exporter#adding-new-metrics-via-a-config-file) for more details. + +### Use of global variables + +In more complex scenarios, we may have the following tree of dependencies + +``` + +--------------+ + | | + +------------+ Chart 1 +-----------+ + | | | | + | --------+------+ | + | | | + | | | + | | | + | | | + v v v ++-------+------+ +--------+------+ +--------+------+ +| | | | | | +| PostgreSQL | | Sub-chart 1 | | Sub-chart 2 | +| | | | | | ++--------------+ +---------------+ +---------------+ +``` + +The three charts below depend on the parent chart Chart 1. However, subcharts 1 and 2 may need to connect to PostgreSQL as well. In order to do so, subcharts 1 and 2 need to know the PostgreSQL credentials, so one option for deploying could be deploy Chart 1 with the following parameters: + +``` +postgresql.postgresqlPassword=testtest +subchart1.postgresql.postgresqlPassword=testtest +subchart2.postgresql.postgresqlPassword=testtest +postgresql.postgresqlDatabase=db1 +subchart1.postgresql.postgresqlDatabase=db1 +subchart2.postgresql.postgresqlDatabase=db1 +``` + +If the number of dependent sub-charts increases, installing the chart with parameters can become increasingly difficult. An alternative would be to set the credentials using global variables as follows: + +``` +global.postgresql.postgresqlPassword=testtest +global.postgresql.postgresqlDatabase=db1 +``` + +This way, the credentials will be available in all of the subcharts. + +## Persistence + +The [Bitnami PostgreSQL](https://github.com/bitnami/bitnami-docker-postgresql) image stores the PostgreSQL data and configurations at the `/bitnami/postgresql` path of the container. + +Persistent Volume Claims are used to keep the data across deployments. This is known to work in GCE, AWS, and minikube. +See the [Parameters](#parameters) section to configure the PVC or to disable persistence. + +If you already have data in it, you will fail to sync to standby nodes for all commits, details can refer to [code](https://github.com/bitnami/bitnami-docker-postgresql/blob/8725fe1d7d30ebe8d9a16e9175d05f7ad9260c93/9.6/debian-9/rootfs/libpostgresql.sh#L518-L556). If you need to use those data, please covert them to sql and import after `helm install` finished. + +## NetworkPolicy + +To enable network policy for PostgreSQL, install [a networking plugin that implements the Kubernetes NetworkPolicy spec](https://kubernetes.io/docs/tasks/administer-cluster/declare-network-policy#before-you-begin), and set `networkPolicy.enabled` to `true`. + +For Kubernetes v1.5 & v1.6, you must also turn on NetworkPolicy by setting the DefaultDeny namespace annotation. Note: this will enforce policy for _all_ pods in the namespace: + +```console +$ kubectl annotate namespace default "net.beta.kubernetes.io/network-policy={\"ingress\":{\"isolation\":\"DefaultDeny\"}}" +``` + +With NetworkPolicy enabled, traffic will be limited to just port 5432. + +For more precise policy, set `networkPolicy.allowExternal=false`. This will only allow pods with the generated client label to connect to PostgreSQL. +This label will be displayed in the output of a successful install. + +## Differences between Bitnami PostgreSQL image and [Docker Official](https://hub.docker.com/_/postgres) image + +- The Docker Official PostgreSQL image does not support replication. If you pass any replication environment variable, this would be ignored. The only environment variables supported by the Docker Official image are POSTGRES_USER, POSTGRES_DB, POSTGRES_PASSWORD, POSTGRES_INITDB_ARGS, POSTGRES_INITDB_WALDIR and PGDATA. All the remaining environment variables are specific to the Bitnami PostgreSQL image. +- The Bitnami PostgreSQL image is non-root by default. This requires that you run the pod with `securityContext` and updates the permissions of the volume with an `initContainer`. A key benefit of this configuration is that the pod follows security best practices and is prepared to run on Kubernetes distributions with hard security constraints like OpenShift. +- For OpenShift, one may either define the runAsUser and fsGroup accordingly, or try this more dynamic option: volumePermissions.securityContext.runAsUser="auto",securityContext.enabled=false,shmVolume.chmod.enabled=false + +### Deploy chart using Docker Official PostgreSQL Image + +From chart version 4.0.0, it is possible to use this chart with the Docker Official PostgreSQL image. +Besides specifying the new Docker repository and tag, it is important to modify the PostgreSQL data directory and volume mount point. Basically, the PostgreSQL data dir cannot be the mount point directly, it has to be a subdirectory. + +``` +image.repository=postgres +image.tag=10.6 +postgresqlDataDir=/data/pgdata +persistence.mountPath=/data/ +``` + +## Upgrade + +It's necessary to specify the existing passwords while performing an upgrade to ensure the secrets are not updated with invalid randomly generated passwords. Remember to specify the existing values of the `postgresqlPassword` and `replication.password` parameters when upgrading the chart: + +```bash +$ helm upgrade my-release stable/postgresql \ + --set postgresqlPassword=[POSTGRESQL_PASSWORD] \ + --set replication.password=[REPLICATION_PASSWORD] +``` + +> Note: you need to substitute the placeholders _[POSTGRESQL_PASSWORD]_, and _[REPLICATION_PASSWORD]_ with the values obtained from instructions in the installation notes. + +## 8.0.0 + +Prefixes the port names with their protocols to comply with Istio conventions. + +If you depend on the port names in your setup, make sure to update them to reflect this change. + +## 7.1.0 + +Adds support for LDAP configuration. + +## 7.0.0 + +Helm performs a lookup for the object based on its group (apps), version (v1), and kind (Deployment). Also known as its GroupVersionKind, or GVK. Changing the GVK is considered a compatibility breaker from Kubernetes' point of view, so you cannot "upgrade" those objects to the new GVK in-place. Earlier versions of Helm 3 did not perform the lookup correctly which has since been fixed to match the spec. + +In https://github.com/helm/charts/pull/17281 the `apiVersion` of the statefulset resources was updated to `apps/v1` in tune with the api's deprecated, resulting in compatibility breakage. + +This major version bump signifies this change. + +## 6.5.7 + +In this version, the chart will use PostgreSQL with the Postgis extension included. The version used with Postgresql version 10, 11 and 12 is Postgis 2.5. It has been compiled with the following dependencies: + +- protobuf +- protobuf-c +- json-c +- geos +- proj + +## 5.0.0 + +In this version, the **chart is using PostgreSQL 11 instead of PostgreSQL 10**. You can find the main difference and notable changes in the following links: [https://www.postgresql.org/about/news/1894/](https://www.postgresql.org/about/news/1894/) and [https://www.postgresql.org/about/featurematrix/](https://www.postgresql.org/about/featurematrix/). + +For major releases of PostgreSQL, the internal data storage format is subject to change, thus complicating upgrades, you can see some errors like the following one in the logs: + +```console +Welcome to the Bitnami postgresql container +Subscribe to project updates by watching https://github.com/bitnami/bitnami-docker-postgresql +Submit issues and feature requests at https://github.com/bitnami/bitnami-docker-postgresql/issues +Send us your feedback at containers@bitnami.com + +INFO ==> ** Starting PostgreSQL setup ** +NFO ==> Validating settings in POSTGRESQL_* env vars.. +INFO ==> Initializing PostgreSQL database... +INFO ==> postgresql.conf file not detected. Generating it... +INFO ==> pg_hba.conf file not detected. Generating it... +INFO ==> Deploying PostgreSQL with persisted data... +INFO ==> Configuring replication parameters +INFO ==> Loading custom scripts... +INFO ==> Enabling remote connections +INFO ==> Stopping PostgreSQL... +INFO ==> ** PostgreSQL setup finished! ** + +INFO ==> ** Starting PostgreSQL ** + [1] FATAL: database files are incompatible with server + [1] DETAIL: The data directory was initialized by PostgreSQL version 10, which is not compatible with this version 11.3. +``` + +In this case, you should migrate the data from the old chart to the new one following an approach similar to that described in [this section](https://www.postgresql.org/docs/current/upgrading.html#UPGRADING-VIA-PGDUMPALL) from the official documentation. Basically, create a database dump in the old chart, move and restore it in the new one. + +### 4.0.0 + +This chart will use by default the Bitnami PostgreSQL container starting from version `10.7.0-r68`. This version moves the initialization logic from node.js to bash. This new version of the chart requires setting the `POSTGRES_PASSWORD` in the slaves as well, in order to properly configure the `pg_hba.conf` file. Users from previous versions of the chart are advised to upgrade immediately. + +IMPORTANT: If you do not want to upgrade the chart version then make sure you use the `10.7.0-r68` version of the container. Otherwise, you will get this error + +``` +The POSTGRESQL_PASSWORD environment variable is empty or not set. Set the environment variable ALLOW_EMPTY_PASSWORD=yes to allow the container to be started with blank passwords. This is recommended only for development +``` + +### 3.0.0 + +This releases make it possible to specify different nodeSelector, affinity and tolerations for master and slave pods. +It also fixes an issue with `postgresql.master.fullname` helper template not obeying fullnameOverride. + +#### Breaking changes + +- `affinty` has been renamed to `master.affinity` and `slave.affinity`. +- `tolerations` has been renamed to `master.tolerations` and `slave.tolerations`. +- `nodeSelector` has been renamed to `master.nodeSelector` and `slave.nodeSelector`. + +### 2.0.0 + +In order to upgrade from the `0.X.X` branch to `1.X.X`, you should follow the below steps: + + - Obtain the service name (`SERVICE_NAME`) and password (`OLD_PASSWORD`) of the existing postgresql chart. You can find the instructions to obtain the password in the NOTES.txt, the service name can be obtained by running + +```console +$ kubectl get svc +``` + +- Install (not upgrade) the new version + +```console +$ helm repo update +$ helm install my-release bitnami/postgresql +``` + +- Connect to the new pod (you can obtain the name by running `kubectl get pods`): + +```console +$ kubectl exec -it NAME bash +``` + +- Once logged in, create a dump file from the previous database using `pg_dump`, for that we should connect to the previous postgresql chart: + +```console +$ pg_dump -h SERVICE_NAME -U postgres DATABASE_NAME > /tmp/backup.sql +``` + +After run above command you should be prompted for a password, this password is the previous chart password (`OLD_PASSWORD`). +This operation could take some time depending on the database size. + +- Once you have the backup file, you can restore it with a command like the one below: + +```console +$ psql -U postgres DATABASE_NAME < /tmp/backup.sql +``` + +In this case, you are accessing to the local postgresql, so the password should be the new one (you can find it in NOTES.txt). + +If you want to restore the database and the database schema does not exist, it is necessary to first follow the steps described below. + +```console +$ psql -U postgres +postgres=# drop database DATABASE_NAME; +postgres=# create database DATABASE_NAME; +postgres=# create user USER_NAME; +postgres=# alter role USER_NAME with password 'BITNAMI_USER_PASSWORD'; +postgres=# grant all privileges on database DATABASE_NAME to USER_NAME; +postgres=# alter database DATABASE_NAME owner to USER_NAME; +``` diff --git a/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/charts/common/.helmignore b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/charts/common/.helmignore new file mode 100644 index 0000000..50af031 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/charts/common/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/charts/common/Chart.yaml b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/charts/common/Chart.yaml new file mode 100644 index 0000000..b4d8828 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/charts/common/Chart.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +appVersion: 0.3.1 +description: A Library Helm Chart for grouping common logic between bitnami charts. + This chart is not deployable by itself. +home: http://www.bitnami.com/ +icon: https://bitnami.com/downloads/logos/bitnami-mark.png +keywords: +- common +- helper +- template +- function +- bitnami +maintainers: +- email: containers@bitnami.com + name: Bitnami +name: common +sources: +- https://github.com/bitnami/charts +version: 0.3.1 diff --git a/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/charts/common/README.md b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/charts/common/README.md new file mode 100644 index 0000000..ab50967 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/charts/common/README.md @@ -0,0 +1,228 @@ +# Bitnami Common Library Chart + +A [Helm Library Chart](https://helm.sh/docs/topics/library_charts/#helm) for grouping common logic between bitnami charts. + +## TL;DR; + +```yaml +dependencies: + - name: common + version: 0.1.0 + repository: https://charts.bitnami.com/bitnami +``` + +```bash +$ helm dependency update +``` + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "common.names.fullname" . }} +data: + myvalue: "Hello World" +``` + +## Introduction + +This chart provides a common template helpers which can be used to develop new charts using [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This Helm chart has been tested on top of [Bitnami Kubernetes Production Runtime](https://kubeprod.io/) (BKPR). Deploy BKPR to get automated TLS certificates, logging and monitoring for your applications. + +## Prerequisites + +- Kubernetes 1.12+ +- Helm 2.12+ or Helm 3.0-beta3+ + +## Parameters + +The following table lists the helpers available in the library which are scoped in different sections. + +**Names** + +| Helper identifier | Description | Expected Input | +|---------------------------------------------|------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.names.name` | Expand the name of the chart or use `.Values.nameOverride` | `.` Chart context | +| `common.names.fullname` | Create a default fully qualified app name. | `.` Chart context | +| `common.names.chart` | Chart name plus version | `.` Chart context | + +**Images** + +| Helper identifier | Description | Expected Input | +|---------------------------------------------|------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.images.image` | Return the proper and full image name | `dict "imageRoot" .Values.path.to.the.image "global" $`, see [ImageRoot](#imageroot) for the structure. | +| `common.images.pullSecrets` | Return the proper Docker Image Registry Secret Names | `dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" $` | + +**Labels** + +| Helper identifier | Description | Expected Input | +|---------------------------------------------|------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.labels.standard` | Return Kubernetes standard labels | `.` Chart context | +| `common.labels.matchLabels` | Return the proper Docker Image Registry Secret Names | `.` Chart context | + +**Storage** + +| Helper identifier | Description | Expected Input | +|---------------------------------------------|------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.storage.class` | Return the proper Storage Class | `dict "persistence" .Values.path.to.the.persistence "global" $`, see [Persistence](#persistence) for the structure. | + +**TplValues** + +| Helper identifier | Description | Expected Input | +|---------------------------------------------|------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.tplvalues.render` | Renders a value that contains template | `dict "value" .Values.path.to.the.Value "context" $`, value is the value should rendered as template, context frecuently is the chart context `$` or `.` | + +**Capabilities** + +| Helper identifier | Description | Expected Input | +|---------------------------------------------|------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.capabilities.deployment.apiVersion` | Return the appropriate apiVersion for deployment. | `.` Chart context | +| `common.capabilities.ingress.apiVersion` | Return the appropriate apiVersion for ingress. | `.` Chart context | + +**Warnings** + +| Helper identifier | Description | Expected Input | +|---------------------------------------------|------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.warnings.rollingTag` | Warning about using rolling tag. | `ImageRoot` see [ImageRoot](#imageroot) for the structure. | + +**Secrets** + +| Helper identifier | Description | Expected Input | +|---------------------------------------------|------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.secrets.name` | Generate the name of the secret. | `dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $` see [ExistingSecret](#existingsecret) for the structure. | +| `common.secrets.key` | Generate secret key. | `dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName"` see [ExistingSecret](#existingsecret) for the structure. | + +## Special input schemas + +### ImageRoot + +```yaml +registry: + type: string + description: Docker registry where the image is located + example: docker.io + +repository: + type: string + description: Repository and image name + example: bitnami/nginx + +tag: + type: string + description: image tag + example: 1.16.1-debian-10-r63 + +pullPolicy: + type: string + description: Specify a imagePullPolicy. Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + +pullSecrets: + type: array + items: + type: string + description: Optionally specify an array of imagePullSecrets. + +debug: + type: boolean + description: Set to true if you would like to see extra information on logs + example: false + +## An instance would be: +# registry: docker.io +# repository: bitnami/nginx +# tag: 1.16.1-debian-10-r63 +# pullPolicy: IfNotPresent +# debug: false +``` + +### Persistence + +```yaml +enabled: + type: boolean + description: Whether enable persistence. + example: true + +storageClass: + type: string + description: Ghost data Persistent Volume Storage Class, If set to "-", storageClassName: "" which disables dynamic provisioning. + example: "-" + +accessMode: + type: string + description: Access mode for the Persistent Volume Storage. + example: ReadWriteOnce + +size: + type: string + description: Size the Persistent Volume Storage. + example: 8Gi + +path: + type: string + description: Path to be persisted. + example: /bitnami + +## An instance would be: +# enabled: true +# storageClass: "-" +# accessMode: ReadWriteOnce +# size: 8Gi +# path: /bitnami +``` + +### ExistingSecret +```yaml +name: + type: string + description: Name of the existing secret. + example: mySecret +keyMapping: + description: Mapping between the expected key name and the name of the key in the existing secret. + type: object + +## An instance would be: +# name: mySecret +# keyMapping: +# password: myPasswordKey +``` + +**Example of use** + +When we store sensitive data for a deployment in a secret, some times we want to give to users the possiblity of using theirs existing secrets. + +```yaml +# templates/secret.yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }} + labels: + app: {{ include "common.names.fullname" . }} +type: Opaque +data: + password: {{ .Values.password | b64enc | quote }} + +# templates/dpl.yaml +--- +... + env: + - name: PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "common.secrets.name" (dict "existingSecret" .Values.existingSecret "context" $) }} + key: {{ include "common.secrets.key" (dict "existingSecret" .Values.existingSecret "key" "password") }} +... + +# values.yaml +--- +name: mySecret +keyMapping: + password: myPasswordKey +``` + +## Notable changes + +N/A diff --git a/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/charts/common/templates/_capabilities.tpl b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/charts/common/templates/_capabilities.tpl new file mode 100644 index 0000000..c0ea2c7 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/charts/common/templates/_capabilities.tpl @@ -0,0 +1,22 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the appropriate apiVersion for deployment. +*/}} +{{- define "common.capabilities.deployment.apiVersion" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for ingress. +*/}} +{{- define "common.capabilities.ingress.apiVersion" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/charts/common/templates/_images.tpl b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/charts/common/templates/_images.tpl new file mode 100644 index 0000000..ee6673a --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/charts/common/templates/_images.tpl @@ -0,0 +1,44 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper image name +{{ include "common.images.image" ( dict "imageRoot" .Values.path.to.the.image "global" $) }} +*/}} +{{- define "common.images.image" -}} +{{- $registryName := .imageRoot.registry -}} +{{- $repositoryName := .imageRoot.repository -}} +{{- $tag := .imageRoot.tag | toString -}} +{{- if .global }} + {{- if .global.imageRegistry }} + {{- $registryName = .global.imageRegistry -}} + {{- end -}} +{{- end -}} +{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +{{ include "common.images.pullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" $) }} +*/}} +{{- define "common.images.pullSecrets" -}} +{{- if .global }} +{{- if .global.imagePullSecrets }} +imagePullSecrets: + {{- range .global.imagePullSecrets }} + - name: {{ . }} + {{- end }} +{{- end }} +{{- else }} +{{- $pullSecrets := list }} +{{- range .images }} + {{- if .pullSecrets }} + {{- $pullSecrets = append $pullSecrets .pullSecrets }} + {{- end }} +{{- end }} +{{- if $pullSecrets }} +imagePullSecrets: + {{- range $pullSecrets }} + - name: {{ . }} + {{- end }} +{{- end }} +{{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/charts/common/templates/_labels.tpl b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/charts/common/templates/_labels.tpl new file mode 100644 index 0000000..252066c --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/charts/common/templates/_labels.tpl @@ -0,0 +1,18 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Kubernetes standard labels +*/}} +{{- define "common.labels.standard" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +helm.sh/chart: {{ include "common.names.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{/* +Labels to use on deploy.spec.selector.matchLabels and svc.spec.selector +*/}} +{{- define "common.labels.matchLabels" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/charts/common/templates/_names.tpl b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/charts/common/templates/_names.tpl new file mode 100644 index 0000000..adf2a74 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/charts/common/templates/_names.tpl @@ -0,0 +1,32 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "common.names.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "common.names.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "common.names.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/charts/common/templates/_secrets.tpl b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/charts/common/templates/_secrets.tpl new file mode 100644 index 0000000..d6165a2 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/charts/common/templates/_secrets.tpl @@ -0,0 +1,49 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Generate secret name. + +Usage: +{{ include "common.secrets.name" (dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $) }} + +Params: + - existingSecret - ExistingSecret - Optional. The path to the existing secrets in the values.yaml given by the user + to be used istead of the default one. +info: https://github.com/bitnami/charts/tree/master/bitnami/common#existingsecret + - defaultNameSuffix - String - Optional. It is used only if we have several secrets in the same deployment. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.secrets.name" -}} +{{- $name := (include "common.names.fullname" .context) -}} + +{{- if .defaultNameSuffix -}} +{{- $name = cat $name .defaultNameSuffix -}} +{{- end -}} + +{{- with .existingSecret -}} +{{- $name = .name -}} +{{- end -}} + +{{- printf "%s" $name -}} +{{- end -}} + +{{/* +Generate secret key. + +Usage: +{{ include "common.secrets.key" (dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName") }} + +Params: + - existingSecret - ExistingSecret - Optional. The path to the existing secrets in the values.yaml given by the user + to be used istead of the default one. +info: https://github.com/bitnami/charts/tree/master/bitnami/common#existingsecret + - key - String - Required. Name of the key in the secret. +*/}} +{{- define "common.secrets.key" -}} +{{- $key := .key -}} + +{{- if .existingSecret -}} + {{- if .existingSecret.keyMapping -}} + {{- $key = index .existingSecret.keyMapping $.key -}} + {{- end -}} +{{- end -}} + +{{- printf "%s" $key -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/charts/common/templates/_storage.tpl b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/charts/common/templates/_storage.tpl new file mode 100644 index 0000000..60e2a84 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/charts/common/templates/_storage.tpl @@ -0,0 +1,23 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper Storage Class +{{ include "common.storage.class" ( dict "persistence" .Values.path.to.the.persistence "global" $) }} +*/}} +{{- define "common.storage.class" -}} + +{{- $storageClass := .persistence.storageClass -}} +{{- if .global -}} + {{- if .global.storageClass -}} + {{- $storageClass = .global.storageClass -}} + {{- end -}} +{{- end -}} + +{{- if $storageClass -}} + {{- if (eq "-" $storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" $storageClass -}} + {{- end -}} +{{- end -}} + +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/charts/common/templates/_tplvalues.tpl b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/charts/common/templates/_tplvalues.tpl new file mode 100644 index 0000000..2db1668 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/charts/common/templates/_tplvalues.tpl @@ -0,0 +1,13 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Renders a value that contains template. +Usage: +{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $) }} +*/}} +{{- define "common.tplvalues.render" -}} + {{- if typeIs "string" .value }} + {{- tpl .value .context }} + {{- else }} + {{- tpl (.value | toYaml) .context }} + {{- end }} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/charts/common/templates/_warnings.tpl b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/charts/common/templates/_warnings.tpl new file mode 100644 index 0000000..ae10fa4 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/charts/common/templates/_warnings.tpl @@ -0,0 +1,14 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Warning about using rolling tag. +Usage: +{{ include "common.warnings.rollingTag" .Values.path.to.the.imageRoot }} +*/}} +{{- define "common.warnings.rollingTag" -}} + +{{- if and (contains "bitnami/" .repository) (not (.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .repository }}:{{ .tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} + +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/charts/common/values.yaml b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/charts/common/values.yaml new file mode 100644 index 0000000..9ecdc93 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/charts/common/values.yaml @@ -0,0 +1,3 @@ +## bitnami/common +## It is required by CI/CD tools and processes. +exampleValue: common-chart diff --git a/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/ci/commonAnnotations.yaml b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/ci/commonAnnotations.yaml new file mode 100644 index 0000000..a936299 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/ci/commonAnnotations.yaml @@ -0,0 +1,4 @@ +commonAnnotations: + helm.sh/hook: "pre-install, pre-upgrade" + helm.sh/hook-weight: "-1" + diff --git a/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/ci/default-values.yaml b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/ci/default-values.yaml new file mode 100644 index 0000000..fc2ba60 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/ci/default-values.yaml @@ -0,0 +1 @@ +# Leave this file empty to ensure that CI runs builds against the default configuration in values.yaml. diff --git a/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/ci/shmvolume-disabled-values.yaml b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/ci/shmvolume-disabled-values.yaml new file mode 100644 index 0000000..347d3b4 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/ci/shmvolume-disabled-values.yaml @@ -0,0 +1,2 @@ +shmVolume: + enabled: false diff --git a/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/files/README.md b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/files/README.md new file mode 100644 index 0000000..1813a2f --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/files/README.md @@ -0,0 +1 @@ +Copy here your postgresql.conf and/or pg_hba.conf files to use it as a config map. diff --git a/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/files/conf.d/README.md b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/files/conf.d/README.md new file mode 100644 index 0000000..184c187 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/files/conf.d/README.md @@ -0,0 +1,4 @@ +If you don't want to provide the whole configuration file and only specify certain parameters, you can copy here your extended `.conf` files. +These files will be injected as a config maps and add/overwrite the default configuration using the `include_dir` directive that allows settings to be loaded from files other than the default `postgresql.conf`. + +More info in the [bitnami-docker-postgresql README](https://github.com/bitnami/bitnami-docker-postgresql#configuration-file). diff --git a/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/files/docker-entrypoint-initdb.d/README.md b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/files/docker-entrypoint-initdb.d/README.md new file mode 100644 index 0000000..cba3809 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/files/docker-entrypoint-initdb.d/README.md @@ -0,0 +1,3 @@ +You can copy here your custom `.sh`, `.sql` or `.sql.gz` file so they are executed during the first boot of the image. + +More info in the [bitnami-docker-postgresql](https://github.com/bitnami/bitnami-docker-postgresql#initializing-a-new-instance) repository. \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/requirements.lock b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/requirements.lock new file mode 100644 index 0000000..1069b62 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/requirements.lock @@ -0,0 +1,6 @@ +dependencies: +- name: common + repository: https://charts.bitnami.com/bitnami + version: 0.3.1 +digest: sha256:740783295d301fdd168fafdbaa760de27ab54b0ff36b513589a5a2515072b885 +generated: "2020-07-15T00:56:02.067804177Z" diff --git a/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/requirements.yaml b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/requirements.yaml new file mode 100644 index 0000000..868eee6 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/requirements.yaml @@ -0,0 +1,4 @@ +dependencies: + - name: common + version: 0.x.x + repository: https://charts.bitnami.com/bitnami \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/templates/NOTES.txt b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/templates/NOTES.txt new file mode 100644 index 0000000..6dec604 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/templates/NOTES.txt @@ -0,0 +1,54 @@ +** Please be patient while the chart is being deployed ** + +PostgreSQL can be accessed via port {{ template "postgresql.port" . }} on the following DNS name from within your cluster: + + {{ template "postgresql.fullname" . }}.imxc.svc.cluster.local - Read/Write connection +{{- if .Values.replication.enabled }} + {{ template "postgresql.fullname" . }}-read.imxc.svc.cluster.local - Read only connection +{{- end }} + +{{- if and .Values.postgresqlPostgresPassword (not (eq .Values.postgresqlUsername "postgres")) }} + +To get the password for "postgres" run: + + export POSTGRES_ADMIN_PASSWORD=$(kubectl get secret --namespace imxc {{ template "postgresql.secretName" . }} -o jsonpath="{.data.postgresql-postgres-password}" | base64 --decode) +{{- end }} + +To get the password for "{{ template "postgresql.username" . }}" run: + + export POSTGRES_PASSWORD=$(kubectl get secret --namespace imxc {{ template "postgresql.secretName" . }} -o jsonpath="{.data.postgresql-password}" | base64 --decode) + +To connect to your database run the following command: + + kubectl run {{ template "postgresql.fullname" . }}-client --rm --tty -i --restart='Never' --namespace imxc --image {{ template "postgresql.image" . }} --env="PGPASSWORD=$POSTGRES_PASSWORD" {{- if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }} + --labels="{{ template "postgresql.fullname" . }}-client=true" {{- end }} --command -- psql --host {{ template "postgresql.fullname" . }} -U {{ .Values.postgresqlUsername }} -d {{- if .Values.postgresqlDatabase }} {{ .Values.postgresqlDatabase }}{{- else }} postgres{{- end }} -p {{ template "postgresql.port" . }} + +{{ if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }} +Note: Since NetworkPolicy is enabled, only pods with label {{ template "postgresql.fullname" . }}-client=true" will be able to connect to this PostgreSQL cluster. +{{- end }} + +To connect to your database from outside the cluster execute the following commands: + +{{- if contains "NodePort" .Values.service.type }} + + export NODE_IP=$(kubectl get nodes --namespace imxc -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT=$(kubectl get --namespace imxc -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "postgresql.fullname" . }}) + {{ if (include "postgresql.password" . ) }}PGPASSWORD="$POSTGRES_PASSWORD" {{ end }}psql --host $NODE_IP --port $NODE_PORT -U {{ .Values.postgresqlUsername }} -d {{- if .Values.postgresqlDatabase }} {{ .Values.postgresqlDatabase }}{{- else }} postgres{{- end }} + +{{- else if contains "LoadBalancer" .Values.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace imxc -w {{ template "postgresql.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace imxc {{ template "postgresql.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + {{ if (include "postgresql.password" . ) }}PGPASSWORD="$POSTGRES_PASSWORD" {{ end }}psql --host $SERVICE_IP --port {{ template "postgresql.port" . }} -U {{ .Values.postgresqlUsername }} -d {{- if .Values.postgresqlDatabase }} {{ .Values.postgresqlDatabase }}{{- else }} postgres{{- end }} + +{{- else if contains "ClusterIP" .Values.service.type }} + + kubectl port-forward --namespace imxc svc/{{ template "postgresql.fullname" . }} {{ template "postgresql.port" . }}:{{ template "postgresql.port" . }} & + {{ if (include "postgresql.password" . ) }}PGPASSWORD="$POSTGRES_PASSWORD" {{ end }}psql --host 127.0.0.1 -U {{ .Values.postgresqlUsername }} -d {{- if .Values.postgresqlDatabase }} {{ .Values.postgresqlDatabase }}{{- else }} postgres{{- end }} -p {{ template "postgresql.port" . }} + +{{- end }} + +{{- include "postgresql.validateValues" . -}} + diff --git a/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/templates/_helpers.tpl b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/templates/_helpers.tpl new file mode 100644 index 0000000..a7008a1 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/templates/_helpers.tpl @@ -0,0 +1,494 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "postgresql.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "postgresql.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "postgresql.master.fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- $fullname := default (printf "%s-%s" .Release.Name $name) .Values.fullnameOverride -}} +{{- if .Values.replication.enabled -}} +{{- printf "%s-%s" $fullname "master" | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s" $fullname | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "postgresql.networkPolicy.apiVersion" -}} +{{- if semverCompare ">=1.4-0, <1.7-0" .Capabilities.KubeVersion.GitVersion -}} +"extensions/v1beta1" +{{- else if semverCompare "^1.7-0" .Capabilities.KubeVersion.GitVersion -}} +"networking.k8s.io/v1" +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "postgresql.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Return the proper PostgreSQL image name +*/}} +{{- define "postgresql.image" -}} +{{- $registryName := .Values.image.registry -}} +{{- $repositoryName := .Values.image.repository -}} +{{- $tag := .Values.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL postgres user password +*/}} +{{- define "postgresql.postgres.password" -}} +{{- if .Values.global.postgresql.postgresqlPostgresPassword }} + {{- .Values.global.postgresql.postgresqlPostgresPassword -}} +{{- else if .Values.postgresqlPostgresPassword -}} + {{- .Values.postgresqlPostgresPassword -}} +{{- else -}} + {{- randAlphaNum 10 -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL password +*/}} +{{- define "postgresql.password" -}} +{{- if .Values.global.postgresql.postgresqlPassword }} + {{- .Values.global.postgresql.postgresqlPassword -}} +{{- else if .Values.postgresqlPassword -}} + {{- .Values.postgresqlPassword -}} +{{- else -}} + {{- randAlphaNum 10 -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL replication password +*/}} +{{- define "postgresql.replication.password" -}} +{{- if .Values.global.postgresql.replicationPassword }} + {{- .Values.global.postgresql.replicationPassword -}} +{{- else if .Values.replication.password -}} + {{- .Values.replication.password -}} +{{- else -}} + {{- randAlphaNum 10 -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL username +*/}} +{{- define "postgresql.username" -}} +{{- if .Values.global.postgresql.postgresqlUsername }} + {{- .Values.global.postgresql.postgresqlUsername -}} +{{- else -}} + {{- .Values.postgresqlUsername -}} +{{- end -}} +{{- end -}} + + +{{/* +Return PostgreSQL replication username +*/}} +{{- define "postgresql.replication.username" -}} +{{- if .Values.global.postgresql.replicationUser }} + {{- .Values.global.postgresql.replicationUser -}} +{{- else -}} + {{- .Values.replication.user -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL port +*/}} +{{- define "postgresql.port" -}} +{{- if .Values.global.postgresql.servicePort }} + {{- .Values.global.postgresql.servicePort -}} +{{- else -}} + {{- .Values.service.port -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL created database +*/}} +{{- define "postgresql.database" -}} +{{- if .Values.global.postgresql.postgresqlDatabase }} + {{- .Values.global.postgresql.postgresqlDatabase -}} +{{- else if .Values.postgresqlDatabase -}} + {{- .Values.postgresqlDatabase -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper image name to change the volume permissions +*/}} +{{- define "postgresql.volumePermissions.image" -}} +{{- $registryName := .Values.volumePermissions.image.registry -}} +{{- $repositoryName := .Values.volumePermissions.image.repository -}} +{{- $tag := .Values.volumePermissions.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper PostgreSQL metrics image name +*/}} +{{- define "postgresql.metrics.image" -}} +{{- $registryName := default "docker.io" .Values.metrics.image.registry -}} +{{- $repositoryName := .Values.metrics.image.repository -}} +{{- $tag := default "latest" .Values.metrics.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Get the password secret. +*/}} +{{- define "postgresql.secretName" -}} +{{- if .Values.global.postgresql.existingSecret }} + {{- printf "%s" (tpl .Values.global.postgresql.existingSecret $) -}} +{{- else if .Values.existingSecret -}} + {{- printf "%s" (tpl .Values.existingSecret $) -}} +{{- else -}} + {{- printf "%s" (include "postgresql.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a secret object should be created +*/}} +{{- define "postgresql.createSecret" -}} +{{- if .Values.global.postgresql.existingSecret }} +{{- else if .Values.existingSecret -}} +{{- else -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Get the configuration ConfigMap name. +*/}} +{{- define "postgresql.configurationCM" -}} +{{- if .Values.configurationConfigMap -}} +{{- printf "%s" (tpl .Values.configurationConfigMap $) -}} +{{- else -}} +{{- printf "%s-configuration" (include "postgresql.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Get the extended configuration ConfigMap name. +*/}} +{{- define "postgresql.extendedConfigurationCM" -}} +{{- if .Values.extendedConfConfigMap -}} +{{- printf "%s" (tpl .Values.extendedConfConfigMap $) -}} +{{- else -}} +{{- printf "%s-extended-configuration" (include "postgresql.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a configmap should be mounted with PostgreSQL configuration +*/}} +{{- define "postgresql.mountConfigurationCM" -}} +{{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Get the initialization scripts ConfigMap name. +*/}} +{{- define "postgresql.initdbScriptsCM" -}} +{{- if .Values.initdbScriptsConfigMap -}} +{{- printf "%s" (tpl .Values.initdbScriptsConfigMap $) -}} +{{- else -}} +{{- printf "%s-init-scripts" (include "postgresql.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Get the initialization scripts Secret name. +*/}} +{{- define "postgresql.initdbScriptsSecret" -}} +{{- printf "%s" (tpl .Values.initdbScriptsSecret $) -}} +{{- end -}} + +{{/* +Get the metrics ConfigMap name. +*/}} +{{- define "postgresql.metricsCM" -}} +{{- printf "%s-metrics" (include "postgresql.fullname" .) -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "postgresql.imagePullSecrets" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +Also, we can not use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} +{{- if .Values.global.imagePullSecrets }} +imagePullSecrets: +{{- range .Values.global.imagePullSecrets }} + - name: {{ . }} +{{- end }} +{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.volumePermissions.image.pullSecrets }} +imagePullSecrets: +{{- range .Values.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.metrics.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.volumePermissions.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- end -}} +{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.volumePermissions.image.pullSecrets }} +imagePullSecrets: +{{- range .Values.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.metrics.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.volumePermissions.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- end -}} +{{- end -}} + +{{/* +Get the readiness probe command +*/}} +{{- define "postgresql.readinessProbeCommand" -}} +- | +{{- if (include "postgresql.database" .) }} + exec pg_isready -U {{ include "postgresql.username" . | quote }} -d "dbname={{ include "postgresql.database" . }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}{{- end }}" -h 127.0.0.1 -p {{ template "postgresql.port" . }} +{{- else }} + exec pg_isready -U {{ include "postgresql.username" . | quote }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} -d "sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}"{{- end }} -h 127.0.0.1 -p {{ template "postgresql.port" . }} +{{- end }} +{{- if contains "bitnami/" .Values.image.repository }} + [ -f /opt/bitnami/postgresql/tmp/.initialized ] || [ -f /bitnami/postgresql/.initialized ] +{{- end -}} +{{- end -}} + +{{/* +Return the proper Storage Class +*/}} +{{- define "postgresql.storageClass" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +*/}} +{{- if .Values.global -}} + {{- if .Values.global.storageClass -}} + {{- if (eq "-" .Values.global.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.global.storageClass -}} + {{- end -}} + {{- else -}} + {{- if .Values.persistence.storageClass -}} + {{- if (eq "-" .Values.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.persistence.storageClass -}} + {{- end -}} + {{- end -}} + {{- end -}} +{{- else -}} + {{- if .Values.persistence.storageClass -}} + {{- if (eq "-" .Values.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.persistence.storageClass -}} + {{- end -}} + {{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Renders a value that contains template. +Usage: +{{ include "postgresql.tplValue" ( dict "value" .Values.path.to.the.Value "context" $) }} +*/}} +{{- define "postgresql.tplValue" -}} + {{- if typeIs "string" .value }} + {{- tpl .value .context }} + {{- else }} + {{- tpl (.value | toYaml) .context }} + {{- end }} +{{- end -}} + +{{/* +Return the appropriate apiVersion for statefulset. +*/}} +{{- define "postgresql.statefulset.apiVersion" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "apps/v1beta2" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Compile all warnings into a single message, and call fail. +*/}} +{{- define "postgresql.validateValues" -}} +{{- $messages := list -}} +{{- $messages := append $messages (include "postgresql.validateValues.ldapConfigurationMethod" .) -}} +{{- $messages := append $messages (include "postgresql.validateValues.psp" .) -}} +{{- $messages := append $messages (include "postgresql.validateValues.tls" .) -}} +{{- $messages := without $messages "" -}} +{{- $message := join "\n" $messages -}} + +{{- if $message -}} +{{- printf "\nVALUES VALIDATION:\n%s" $message | fail -}} +{{- end -}} +{{- end -}} + +{{/* +Validate values of Postgresql - If ldap.url is used then you don't need the other settings for ldap +*/}} +{{- define "postgresql.validateValues.ldapConfigurationMethod" -}} +{{- if and .Values.ldap.enabled (and (not (empty .Values.ldap.url)) (not (empty .Values.ldap.server))) }} +postgresql: ldap.url, ldap.server + You cannot set both `ldap.url` and `ldap.server` at the same time. + Please provide a unique way to configure LDAP. + More info at https://www.postgresql.org/docs/current/auth-ldap.html +{{- end -}} +{{- end -}} + +{{/* +Validate values of Postgresql - If PSP is enabled RBAC should be enabled too +*/}} +{{- define "postgresql.validateValues.psp" -}} +{{- if and .Values.psp.create (not .Values.rbac.create) }} +postgresql: psp.create, rbac.create + RBAC should be enabled if PSP is enabled in order for PSP to work. + More info at https://kubernetes.io/docs/concepts/policy/pod-security-policy/#authorizing-policies +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for podsecuritypolicy. +*/}} +{{- define "podsecuritypolicy.apiVersion" -}} +{{- if semverCompare "<1.10-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "policy/v1beta1" -}} +{{- end -}} +{{- end -}} + +{{/* +Validate values of Postgresql TLS - When TLS is enabled, so must be VolumePermissions +*/}} +{{- define "postgresql.validateValues.tls" -}} +{{- if and .Values.tls.enabled (not .Values.volumePermissions.enabled) }} +postgresql: tls.enabled, volumePermissions.enabled + When TLS is enabled you must enable volumePermissions as well to ensure certificates files have + the right permissions. +{{- end -}} +{{- end -}} + +{{/* +Return the path to the cert file. +*/}} +{{- define "postgresql.tlsCert" -}} +{{- required "Certificate filename is required when TLS in enabled" .Values.tls.certFilename | printf "/opt/bitnami/postgresql/certs/%s" -}} +{{- end -}} + +{{/* +Return the path to the cert key file. +*/}} +{{- define "postgresql.tlsCertKey" -}} +{{- required "Certificate Key filename is required when TLS in enabled" .Values.tls.certKeyFilename | printf "/opt/bitnami/postgresql/certs/%s" -}} +{{- end -}} + +{{/* +Return the path to the CA cert file. +*/}} +{{- define "postgresql.tlsCACert" -}} +{{- printf "/opt/bitnami/postgresql/certs/%s" .Values.tls.certCAFilename -}} +{{- end -}} + +{{/* +Return the path to the CRL file. +*/}} +{{- define "postgresql.tlsCRL" -}} +{{- if .Values.tls.crlFilename -}} +{{- printf "/opt/bitnami/postgresql/certs/%s" .Values.tls.crlFilename -}} +{{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/templates/configmap.yaml b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/templates/configmap.yaml new file mode 100644 index 0000000..b29ef60 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/templates/configmap.yaml @@ -0,0 +1,26 @@ +{{ if and (or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration) (not .Values.configurationConfigMap) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "postgresql.fullname" . }}-configuration + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: +{{- if (.Files.Glob "files/postgresql.conf") }} +{{ (.Files.Glob "files/postgresql.conf").AsConfig | indent 2 }} +{{- else if .Values.postgresqlConfiguration }} + postgresql.conf: | +{{- range $key, $value := default dict .Values.postgresqlConfiguration }} + {{ $key | snakecase }}={{ $value }} +{{- end }} +{{- end }} +{{- if (.Files.Glob "files/pg_hba.conf") }} +{{ (.Files.Glob "files/pg_hba.conf").AsConfig | indent 2 }} +{{- else if .Values.pgHbaConfiguration }} + pg_hba.conf: | +{{ .Values.pgHbaConfiguration | indent 4 }} +{{- end }} +{{ end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/templates/extended-config-configmap.yaml b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/templates/extended-config-configmap.yaml new file mode 100644 index 0000000..f21a976 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/templates/extended-config-configmap.yaml @@ -0,0 +1,21 @@ +{{- if and (or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf) (not .Values.extendedConfConfigMap)}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "postgresql.fullname" . }}-extended-configuration + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: +{{- with .Files.Glob "files/conf.d/*.conf" }} +{{ .AsConfig | indent 2 }} +{{- end }} +{{ with .Values.postgresqlExtendedConf }} + override.conf: | +{{- range $key, $value := . }} + {{ $key | snakecase }}={{ $value }} +{{- end }} +{{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/templates/initialization-configmap.yaml b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/templates/initialization-configmap.yaml new file mode 100644 index 0000000..6637867 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/templates/initialization-configmap.yaml @@ -0,0 +1,24 @@ +{{- if and (or (.Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql,sql.gz}") .Values.initdbScripts) (not .Values.initdbScriptsConfigMap) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "postgresql.fullname" . }}-init-scripts + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +{{- with .Files.Glob "files/docker-entrypoint-initdb.d/*.sql.gz" }} +binaryData: +{{- range $path, $bytes := . }} + {{ base $path }}: {{ $.Files.Get $path | b64enc | quote }} +{{- end }} +{{- end }} +data: +{{- with .Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql}" }} +{{ .AsConfig | indent 2 }} +{{- end }} +{{- with .Values.initdbScripts }} +{{ toYaml . | indent 2 }} +{{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/templates/metrics-configmap.yaml b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/templates/metrics-configmap.yaml new file mode 100644 index 0000000..6b7a317 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/templates/metrics-configmap.yaml @@ -0,0 +1,13 @@ +{{- if and .Values.metrics.enabled .Values.metrics.customMetrics }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "postgresql.metricsCM" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + custom-metrics.yaml: {{ toYaml .Values.metrics.customMetrics | quote }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/templates/metrics-svc.yaml b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/templates/metrics-svc.yaml new file mode 100644 index 0000000..b993c99 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/templates/metrics-svc.yaml @@ -0,0 +1,25 @@ +{{- if .Values.metrics.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "postgresql.fullname" . }}-metrics + labels: + {{- include "common.labels.standard" . | nindent 4 }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- toYaml .Values.metrics.service.annotations | nindent 4 }} +spec: + type: {{ .Values.metrics.service.type }} + {{- if and (eq .Values.metrics.service.type "LoadBalancer") .Values.metrics.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.metrics.service.loadBalancerIP }} + {{- end }} + ports: + - name: http-metrics + port: 9187 + targetPort: http-metrics + selector: + {{- include "common.labels.matchLabels" . | nindent 4 }} + role: master +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/templates/networkpolicy.yaml b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/templates/networkpolicy.yaml new file mode 100644 index 0000000..2a7b372 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/templates/networkpolicy.yaml @@ -0,0 +1,36 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ template "postgresql.networkPolicy.apiVersion" . }} +metadata: + name: {{ template "postgresql.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + podSelector: + matchLabels: + {{- include "common.labels.matchLabels" . | nindent 6 }} + ingress: + # Allow inbound connections + - ports: + - port: {{ template "postgresql.port" . }} + {{- if not .Values.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ template "postgresql.fullname" . }}-client: "true" + {{- if .Values.networkPolicy.explicitNamespacesSelector }} + namespaceSelector: +{{ toYaml .Values.networkPolicy.explicitNamespacesSelector | indent 12 }} + {{- end }} + - podSelector: + matchLabels: + {{- include "common.labels.matchLabels" . | nindent 14 }} + role: slave + {{- end }} + # Allow prometheus scrapes + - ports: + - port: 9187 +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/templates/podsecuritypolicy.yaml b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/templates/podsecuritypolicy.yaml new file mode 100644 index 0000000..da0b3ab --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/templates/podsecuritypolicy.yaml @@ -0,0 +1,37 @@ +{{- if .Values.psp.create }} +apiVersion: {{ include "podsecuritypolicy.apiVersion" . }} +kind: PodSecurityPolicy +metadata: + name: {{ template "postgresql.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + privileged: false + volumes: + - 'configMap' + - 'secret' + - 'persistentVolumeClaim' + - 'emptyDir' + - 'projected' + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: 'MustRunAsNonRoot' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + readOnlyRootFilesystem: false +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/templates/prometheusrule.yaml b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/templates/prometheusrule.yaml new file mode 100644 index 0000000..b0c41b1 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/templates/prometheusrule.yaml @@ -0,0 +1,23 @@ +{{- if and .Values.metrics.enabled .Values.metrics.prometheusRule.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ template "postgresql.fullname" . }} +{{- with .Values.metrics.prometheusRule.namespace }} + namespace: {{ . }} +{{- end }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- with .Values.metrics.prometheusRule.additionalLabels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: +{{- with .Values.metrics.prometheusRule.rules }} + groups: + - name: {{ template "postgresql.name" $ }} + rules: {{ tpl (toYaml .) $ | nindent 8 }} +{{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/templates/pv.yaml b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/templates/pv.yaml new file mode 100644 index 0000000..ddd7d7c --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/templates/pv.yaml @@ -0,0 +1,27 @@ +kind: PersistentVolume +apiVersion: v1 +metadata: + name: keycloak-saas +spec: + storageClassName: manual + capacity: + storage: 8Gi + accessModes: + - ReadWriteOnce + #- ReadWriteMany + hostPath: + #path: "/home/keycloak/keycloak" + path: /mnt/keycloak-postgresql + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/hostname + operator: In + values: + #- imxc-worker1 + - {{ .Values.node.affinity }} + claimRef: + name: data-keycloak-saas-postgresql-0 + #namespace: auth + diff --git a/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/templates/role.yaml b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/templates/role.yaml new file mode 100644 index 0000000..6d3cf50 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/templates/role.yaml @@ -0,0 +1,19 @@ +{{- if .Values.rbac.create }} +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ template "postgresql.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +rules: + {{- if .Values.psp.create }} + - apiGroups: ["extensions"] + resources: ["podsecuritypolicies"] + verbs: ["use"] + resourceNames: + - {{ template "postgresql.fullname" . }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/templates/rolebinding.yaml b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/templates/rolebinding.yaml new file mode 100644 index 0000000..b7daa2a --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/templates/rolebinding.yaml @@ -0,0 +1,19 @@ +{{- if .Values.rbac.create }} +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ template "postgresql.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +roleRef: + kind: Role + name: {{ template "postgresql.fullname" . }} + apiGroup: rbac.authorization.k8s.io +subjects: + - kind: ServiceAccount + name: {{ default (include "postgresql.fullname" . ) .Values.serviceAccount.name }} + namespace: imxc +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/templates/secrets.yaml b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/templates/secrets.yaml new file mode 100644 index 0000000..c93dbe0 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/templates/secrets.yaml @@ -0,0 +1,23 @@ +{{- if (include "postgresql.createSecret" .) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "postgresql.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + {{- if and .Values.postgresqlPostgresPassword (not (eq .Values.postgresqlUsername "postgres")) }} + postgresql-postgres-password: {{ include "postgresql.postgres.password" . | b64enc | quote }} + {{- end }} + postgresql-password: {{ include "postgresql.password" . | b64enc | quote }} + {{- if .Values.replication.enabled }} + postgresql-replication-password: {{ include "postgresql.replication.password" . | b64enc | quote }} + {{- end }} + {{- if (and .Values.ldap.enabled .Values.ldap.bind_password)}} + postgresql-ldap-password: {{ .Values.ldap.bind_password | b64enc | quote }} + {{- end }} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/templates/serviceaccount.yaml b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/templates/serviceaccount.yaml new file mode 100644 index 0000000..17f7ff3 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/templates/serviceaccount.yaml @@ -0,0 +1,11 @@ +{{- if and (.Values.serviceAccount.enabled) (not .Values.serviceAccount.name) }} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + {{- include "common.labels.standard" . | nindent 4 }} + name: {{ template "postgresql.fullname" . }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/templates/servicemonitor.yaml b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/templates/servicemonitor.yaml new file mode 100644 index 0000000..3e643e1 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/templates/servicemonitor.yaml @@ -0,0 +1,33 @@ +{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "postgresql.fullname" . }} + {{- if .Values.metrics.serviceMonitor.namespace }} + namespace: {{ .Values.metrics.serviceMonitor.namespace }} + {{- end }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.metrics.serviceMonitor.additionalLabels }} + {{- toYaml .Values.metrics.serviceMonitor.additionalLabels | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + +spec: + endpoints: + - port: http-metrics + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + namespaceSelector: + matchNames: + - imxc + selector: + matchLabels: + {{- include "common.labels.matchLabels" . | nindent 6 }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/templates/statefulset-slaves.yaml b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/templates/statefulset-slaves.yaml new file mode 100644 index 0000000..a712a03 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/templates/statefulset-slaves.yaml @@ -0,0 +1,340 @@ +{{- if .Values.replication.enabled }} +apiVersion: {{ template "postgresql.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: "{{ template "postgresql.fullname" . }}-slave" + labels: + {{- include "common.labels.standard" . | nindent 4 }} +{{- with .Values.slave.labels }} +{{ toYaml . | indent 4 }} +{{- end }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- with .Values.slave.annotations }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + serviceName: {{ template "postgresql.fullname" . }}-headless + replicas: {{ .Values.replication.slaveReplicas }} + selector: + matchLabels: + {{- include "common.labels.matchLabels" . | nindent 6 }} + role: slave + template: + metadata: + name: {{ template "postgresql.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 8 }} + role: slave +{{- with .Values.slave.podLabels }} +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.slave.podAnnotations }} + annotations: +{{ toYaml . | indent 8 }} +{{- end }} + spec: + {{- if .Values.schedulerName }} + schedulerName: "{{ .Values.schedulerName }}" + {{- end }} +{{- include "postgresql.imagePullSecrets" . | indent 6 }} + {{- if .Values.slave.nodeSelector }} + nodeSelector: +{{ toYaml .Values.slave.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.slave.affinity }} + affinity: +{{ toYaml .Values.slave.affinity | indent 8 }} + {{- end }} + {{- if .Values.slave.tolerations }} + tolerations: +{{ toYaml .Values.slave.tolerations | indent 8 }} + {{- end }} + {{- if .Values.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + {{- end }} + {{- if .Values.serviceAccount.enabled }} + serviceAccountName: {{ default (include "postgresql.fullname" . ) .Values.serviceAccount.name}} + {{- end }} + {{- if or .Values.slave.extraInitContainers (and .Values.volumePermissions.enabled (or .Values.persistence.enabled (and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled))) }} + initContainers: + {{- if and .Values.volumePermissions.enabled (or .Values.persistence.enabled (and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled) .Values.tls.enabled) }} + - name: init-chmod-data + image: {{ template "postgresql.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + command: + - /bin/sh + - -cx + - | + {{- if .Values.persistence.enabled }} + mkdir -p {{ .Values.persistence.mountPath }}/data {{- if (include "postgresql.mountConfigurationCM" .) }} {{ .Values.persistence.mountPath }}/conf {{- end }} + chmod 700 {{ .Values.persistence.mountPath }}/data {{- if (include "postgresql.mountConfigurationCM" .) }} {{ .Values.persistence.mountPath }}/conf {{- end }} + find {{ .Values.persistence.mountPath }} -mindepth 1 -maxdepth 1 {{- if not (include "postgresql.mountConfigurationCM" .) }} -not -name "conf" {{- end }} -not -name ".snapshot" -not -name "lost+found" | \ + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + xargs chown -R `id -u`:`id -G | cut -d " " -f2` + {{- else }} + xargs chown -R {{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} + {{- end }} + {{- end }} + {{- if and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled }} + chmod -R 777 /dev/shm + {{- end }} + {{- if .Values.tls.enabled }} + cp /tmp/certs/* /opt/bitnami/postgresql/certs/ + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + chown -R `id -u`:`id -G | cut -d " " -f2` /opt/bitnami/postgresql/certs/ + {{- else }} + chown -R {{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} /opt/bitnami/postgresql/certs/ + {{- end }} + chmod 600 {{ template "postgresql.tlsCertKey" . }} + {{- end }} + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + securityContext: + {{- else }} + securityContext: + runAsUser: {{ .Values.volumePermissions.securityContext.runAsUser }} + {{- end }} + volumeMounts: + {{ if .Values.persistence.enabled }} + - name: data + mountPath: {{ .Values.persistence.mountPath }} + subPath: {{ .Values.persistence.subPath }} + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + mountPath: /dev/shm + {{- end }} + {{- if .Values.tls.enabled }} + - name: raw-certificates + mountPath: /tmp/certs + - name: postgresql-certificates + mountPath: /opt/bitnami/postgresql/certs + {{- end }} + {{- end }} + {{- if .Values.slave.extraInitContainers }} +{{ tpl .Values.slave.extraInitContainers . | indent 8 }} + {{- end }} + {{- end }} + {{- if .Values.slave.priorityClassName }} + priorityClassName: {{ .Values.slave.priorityClassName }} + {{- end }} + containers: + - name: {{ template "postgresql.fullname" . }} + image: {{ template "postgresql.image" . }} + imagePullPolicy: "{{ .Values.image.pullPolicy }}" + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" .Values.image.debug | quote }} + - name: POSTGRESQL_VOLUME_DIR + value: "{{ .Values.persistence.mountPath }}" + - name: POSTGRESQL_PORT_NUMBER + value: "{{ template "postgresql.port" . }}" + {{- if .Values.persistence.mountPath }} + - name: PGDATA + value: {{ .Values.postgresqlDataDir | quote }} + {{- end }} + - name: POSTGRES_REPLICATION_MODE + value: "slave" + - name: POSTGRES_REPLICATION_USER + value: {{ include "postgresql.replication.username" . | quote }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_REPLICATION_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-replication-password" + {{- else }} + - name: POSTGRES_REPLICATION_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-replication-password + {{- end }} + - name: POSTGRES_CLUSTER_APP_NAME + value: {{ .Values.replication.applicationName }} + - name: POSTGRES_MASTER_HOST + value: {{ template "postgresql.fullname" . }} + - name: POSTGRES_MASTER_PORT_NUMBER + value: {{ include "postgresql.port" . | quote }} + {{- if and .Values.postgresqlPostgresPassword (not (eq .Values.postgresqlUsername "postgres")) }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_POSTGRES_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-postgres-password" + {{- else }} + - name: POSTGRES_POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-postgres-password + {{- end }} + {{- end }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-password" + {{- else }} + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-password + {{- end }} + - name: POSTGRESQL_ENABLE_TLS + value: {{ ternary "yes" "no" .Values.tls.enabled | quote }} + {{- if .Values.tls.enabled }} + - name: POSTGRESQL_TLS_PREFER_SERVER_CIPHERS + value: {{ ternary "yes" "no" .Values.tls.preferServerCiphers | quote }} + - name: POSTGRESQL_TLS_CERT_FILE + value: {{ template "postgresql.tlsCert" . }} + - name: POSTGRESQL_TLS_KEY_FILE + value: {{ template "postgresql.tlsCertKey" . }} + {{- if .Values.tls.certCAFilename }} + - name: POSTGRESQL_TLS_CA_FILE + value: {{ template "postgresql.tlsCACert" . }} + {{- end }} + {{- if .Values.tls.crlFilename }} + - name: POSTGRESQL_TLS_CRL_FILE + value: {{ template "postgresql.tlsCRL" . }} + {{- end }} + {{- end }} + ports: + - name: tcp-postgresql + containerPort: {{ template "postgresql.port" . }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - /bin/sh + - -c + {{- if (include "postgresql.database" .) }} + - exec pg_isready -U {{ include "postgresql.username" . | quote }} -d "dbname={{ include "postgresql.database" . }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}{{- end }}" -h 127.0.0.1 -p {{ template "postgresql.port" . }} + {{- else }} + - exec pg_isready -U {{ include "postgresql.username" . | quote }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} -d "sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}"{{- end }} -h 127.0.0.1 -p {{ template "postgresql.port" . }} + {{- end }} + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + exec: + command: + - /bin/sh + - -c + - -e + {{- include "postgresql.readinessProbeCommand" . | nindent 16 }} + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + volumeMounts: + {{- if .Values.usePasswordFile }} + - name: postgresql-password + mountPath: /opt/bitnami/postgresql/secrets/ + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + mountPath: /dev/shm + {{- end }} + {{- if .Values.persistence.enabled }} + - name: data + mountPath: {{ .Values.persistence.mountPath }} + subPath: {{ .Values.persistence.subPath }} + {{ end }} + {{- if or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf .Values.extendedConfConfigMap }} + - name: postgresql-extended-config + mountPath: /bitnami/postgresql/conf/conf.d/ + {{- end }} + {{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap }} + - name: postgresql-config + mountPath: /bitnami/postgresql/conf + {{- end }} + {{- if .Values.tls.enabled }} + - name: postgresql-certificates + mountPath: /opt/bitnami/postgresql/certs + readOnly: true + {{- end }} + {{- if .Values.slave.extraVolumeMounts }} + {{- toYaml .Values.slave.extraVolumeMounts | nindent 12 }} + {{- end }} +{{- if .Values.slave.sidecars }} +{{- include "postgresql.tplValue" ( dict "value" .Values.slave.sidecars "context" $ ) | nindent 8 }} +{{- end }} + volumes: + {{- if .Values.usePasswordFile }} + - name: postgresql-password + secret: + secretName: {{ template "postgresql.secretName" . }} + {{- end }} + {{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap}} + - name: postgresql-config + configMap: + name: {{ template "postgresql.configurationCM" . }} + {{- end }} + {{- if or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf .Values.extendedConfConfigMap }} + - name: postgresql-extended-config + configMap: + name: {{ template "postgresql.extendedConfigurationCM" . }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: raw-certificates + secret: + secretName: {{ required "A secret containing TLS certificates is required when TLS is enabled" .Values.tls.certificatesSecret }} + - name: postgresql-certificates + emptyDir: {} + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + emptyDir: + medium: Memory + sizeLimit: 1Gi + {{- end }} + {{- if not .Values.persistence.enabled }} + - name: data + emptyDir: {} + {{- end }} + {{- if .Values.slave.extraVolumes }} + {{- toYaml .Values.slave.extraVolumes | nindent 8 }} + {{- end }} + updateStrategy: + type: {{ .Values.updateStrategy.type }} + {{- if (eq "Recreate" .Values.updateStrategy.type) }} + rollingUpdate: null + {{- end }} +{{- if .Values.persistence.enabled }} + volumeClaimTemplates: + - metadata: + name: data + {{- with .Values.persistence.annotations }} + annotations: + {{- range $key, $value := . }} + {{ $key }}: {{ $value }} + {{- end }} + {{- end }} + spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{ include "postgresql.storageClass" . }} +{{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/templates/statefulset.yaml b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/templates/statefulset.yaml new file mode 100644 index 0000000..35c6293 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/templates/statefulset.yaml @@ -0,0 +1,510 @@ +apiVersion: {{ template "postgresql.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ template "postgresql.master.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- with .Values.master.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- with .Values.master.annotations }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + serviceName: {{ template "postgresql.fullname" . }}-headless + replicas: 1 + updateStrategy: + type: {{ .Values.updateStrategy.type }} + {{- if (eq "Recreate" .Values.updateStrategy.type) }} + rollingUpdate: null + {{- end }} + selector: + matchLabels: + {{- include "common.labels.matchLabels" . | nindent 6 }} + role: master + template: + metadata: + name: {{ template "postgresql.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 8 }} + role: master + {{- with .Values.master.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.master.podAnnotations }} + annotations: {{- toYaml . | nindent 8 }} + {{- end }} + spec: + {{- if .Values.schedulerName }} + schedulerName: "{{ .Values.schedulerName }}" + {{- end }} +{{- include "postgresql.imagePullSecrets" . | indent 6 }} + {{- if .Values.master.nodeSelector }} + nodeSelector: {{- toYaml .Values.master.nodeSelector | nindent 8 }} + {{- end }} + {{- if .Values.master.affinity }} + affinity: {{- toYaml .Values.master.affinity | nindent 8 }} + {{- end }} + {{- if .Values.master.tolerations }} + tolerations: {{- toYaml .Values.master.tolerations | nindent 8 }} + {{- end }} + {{- if .Values.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + {{- end }} + {{- if .Values.serviceAccount.enabled }} + serviceAccountName: {{ default (include "postgresql.fullname" . ) .Values.serviceAccount.name }} + {{- end }} + {{- if or .Values.master.extraInitContainers (and .Values.volumePermissions.enabled (or .Values.persistence.enabled (and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled))) }} + initContainers: + {{- if and .Values.volumePermissions.enabled (or .Values.persistence.enabled (and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled) .Values.tls.enabled) }} + - name: init-chmod-data + image: {{ template "postgresql.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + command: + - /bin/sh + - -cx + - | + {{- if .Values.persistence.enabled }} + mkdir -p {{ .Values.persistence.mountPath }}/data {{- if (include "postgresql.mountConfigurationCM" .) }} {{ .Values.persistence.mountPath }}/conf {{- end }} + chmod 700 {{ .Values.persistence.mountPath }}/data {{- if (include "postgresql.mountConfigurationCM" .) }} {{ .Values.persistence.mountPath }}/conf {{- end }} + find {{ .Values.persistence.mountPath }} -mindepth 1 -maxdepth 1 {{- if not (include "postgresql.mountConfigurationCM" .) }} -not -name "conf" {{- end }} -not -name ".snapshot" -not -name "lost+found" | \ + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + xargs chown -R `id -u`:`id -G | cut -d " " -f2` + {{- else }} + xargs chown -R {{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} + {{- end }} + {{- end }} + {{- if and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled }} + chmod -R 777 /dev/shm + {{- end }} + {{- if .Values.tls.enabled }} + cp /tmp/certs/* /opt/bitnami/postgresql/certs/ + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + chown -R `id -u`:`id -G | cut -d " " -f2` /opt/bitnami/postgresql/certs/ + {{- else }} + chown -R {{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} /opt/bitnami/postgresql/certs/ + {{- end }} + chmod 600 {{ template "postgresql.tlsCertKey" . }} + {{- end }} + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + securityContext: + {{- else }} + securityContext: + runAsUser: {{ .Values.volumePermissions.securityContext.runAsUser }} + {{- end }} + volumeMounts: + {{- if .Values.persistence.enabled }} + - name: data + mountPath: {{ .Values.persistence.mountPath }} + subPath: {{ .Values.persistence.subPath }} + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + mountPath: /dev/shm + {{- end }} + {{- if .Values.tls.enabled }} + - name: raw-certificates + mountPath: /tmp/certs + - name: postgresql-certificates + mountPath: /opt/bitnami/postgresql/certs + {{- end }} + {{- end }} + {{- if .Values.master.extraInitContainers }} + {{- include "postgresql.tplValue" ( dict "value" .Values.master.extraInitContainers "context" $ ) | nindent 8 }} + {{- end }} + {{- end }} + {{- if .Values.master.priorityClassName }} + priorityClassName: {{ .Values.master.priorityClassName }} + {{- end }} + containers: + - name: {{ template "postgresql.fullname" . }} + image: {{ template "postgresql.image" . }} + imagePullPolicy: "{{ .Values.image.pullPolicy }}" + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" .Values.image.debug | quote }} + - name: POSTGRESQL_PORT_NUMBER + value: "{{ template "postgresql.port" . }}" + - name: POSTGRESQL_VOLUME_DIR + value: "{{ .Values.persistence.mountPath }}" + {{- if .Values.postgresqlInitdbArgs }} + - name: POSTGRES_INITDB_ARGS + value: {{ .Values.postgresqlInitdbArgs | quote }} + {{- end }} + {{- if .Values.postgresqlInitdbWalDir }} + - name: POSTGRES_INITDB_WALDIR + value: {{ .Values.postgresqlInitdbWalDir | quote }} + {{- end }} + {{- if .Values.initdbUser }} + - name: POSTGRESQL_INITSCRIPTS_USERNAME + value: {{ .Values.initdbUser }} + {{- end }} + {{- if .Values.initdbPassword }} + - name: POSTGRESQL_INITSCRIPTS_PASSWORD + value: {{ .Values.initdbPassword }} + {{- end }} + {{- if .Values.persistence.mountPath }} + - name: PGDATA + value: {{ .Values.postgresqlDataDir | quote }} + {{- end }} + {{- if .Values.replication.enabled }} + - name: POSTGRES_REPLICATION_MODE + value: "master" + - name: POSTGRES_REPLICATION_USER + value: {{ include "postgresql.replication.username" . | quote }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_REPLICATION_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-replication-password" + {{- else }} + - name: POSTGRES_REPLICATION_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-replication-password + {{- end }} + {{- if not (eq .Values.replication.synchronousCommit "off")}} + - name: POSTGRES_SYNCHRONOUS_COMMIT_MODE + value: {{ .Values.replication.synchronousCommit | quote }} + - name: POSTGRES_NUM_SYNCHRONOUS_REPLICAS + value: {{ .Values.replication.numSynchronousReplicas | quote }} + {{- end }} + - name: POSTGRES_CLUSTER_APP_NAME + value: {{ .Values.replication.applicationName }} + {{- end }} + {{- if and .Values.postgresqlPostgresPassword (not (eq .Values.postgresqlUsername "postgres")) }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_POSTGRES_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-postgres-password" + {{- else }} + - name: POSTGRES_POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-postgres-password + {{- end }} + {{- end }} + - name: POSTGRES_USER + value: {{ include "postgresql.username" . | quote }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-password" + {{- else }} + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-password + {{- end }} + {{- if (include "postgresql.database" .) }} + - name: POSTGRES_DB + value: {{ (include "postgresql.database" .) | quote }} + {{- end }} + {{- if .Values.extraEnv }} + {{- include "postgresql.tplValue" (dict "value" .Values.extraEnv "context" $) | nindent 12 }} + {{- end }} + - name: POSTGRESQL_ENABLE_LDAP + value: {{ ternary "yes" "no" .Values.ldap.enabled | quote }} + {{- if .Values.ldap.enabled }} + - name: POSTGRESQL_LDAP_SERVER + value: {{ .Values.ldap.server }} + - name: POSTGRESQL_LDAP_PORT + value: {{ .Values.ldap.port | quote }} + - name: POSTGRESQL_LDAP_SCHEME + value: {{ .Values.ldap.scheme }} + {{- if .Values.ldap.tls }} + - name: POSTGRESQL_LDAP_TLS + value: "1" + {{- end}} + - name: POSTGRESQL_LDAP_PREFIX + value: {{ .Values.ldap.prefix | quote }} + - name: POSTGRESQL_LDAP_SUFFIX + value: {{ .Values.ldap.suffix | quote}} + - name: POSTGRESQL_LDAP_BASE_DN + value: {{ .Values.ldap.baseDN }} + - name: POSTGRESQL_LDAP_BIND_DN + value: {{ .Values.ldap.bindDN }} + {{- if (not (empty .Values.ldap.bind_password)) }} + - name: POSTGRESQL_LDAP_BIND_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-ldap-password + {{- end}} + - name: POSTGRESQL_LDAP_SEARCH_ATTR + value: {{ .Values.ldap.search_attr }} + - name: POSTGRESQL_LDAP_SEARCH_FILTER + value: {{ .Values.ldap.search_filter }} + - name: POSTGRESQL_LDAP_URL + value: {{ .Values.ldap.url }} + {{- end}} + - name: POSTGRESQL_ENABLE_TLS + value: {{ ternary "yes" "no" .Values.tls.enabled | quote }} + {{- if .Values.tls.enabled }} + - name: POSTGRESQL_TLS_PREFER_SERVER_CIPHERS + value: {{ ternary "yes" "no" .Values.tls.preferServerCiphers | quote }} + - name: POSTGRESQL_TLS_CERT_FILE + value: {{ template "postgresql.tlsCert" . }} + - name: POSTGRESQL_TLS_KEY_FILE + value: {{ template "postgresql.tlsCertKey" . }} + {{- if .Values.tls.certCAFilename }} + - name: POSTGRESQL_TLS_CA_FILE + value: {{ template "postgresql.tlsCACert" . }} + {{- end }} + {{- if .Values.tls.crlFilename }} + - name: POSTGRESQL_TLS_CRL_FILE + value: {{ template "postgresql.tlsCRL" . }} + {{- end }} + {{- end }} + {{- if .Values.extraEnvVarsCM }} + envFrom: + - configMapRef: + name: {{ tpl .Values.extraEnvVarsCM . }} + {{- end }} + ports: + - name: tcp-postgresql + containerPort: {{ template "postgresql.port" . }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - /bin/sh + - -c + {{- if (include "postgresql.database" .) }} + - exec pg_isready -U {{ include "postgresql.username" . | quote }} -d "dbname={{ include "postgresql.database" . }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}{{- end }}" -h 127.0.0.1 -p {{ template "postgresql.port" . }} + {{- else }} + - exec pg_isready -U {{ include "postgresql.username" . | quote }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} -d "sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}"{{- end }} -h 127.0.0.1 -p {{ template "postgresql.port" . }} + {{- end }} + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + exec: + command: + - /bin/sh + - -c + - -e + {{- include "postgresql.readinessProbeCommand" . | nindent 16 }} + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + volumeMounts: + {{- if or (.Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql,sql.gz}") .Values.initdbScriptsConfigMap .Values.initdbScripts }} + - name: custom-init-scripts + mountPath: /docker-entrypoint-initdb.d/ + {{- end }} + {{- if .Values.initdbScriptsSecret }} + - name: custom-init-scripts-secret + mountPath: /docker-entrypoint-initdb.d/secret + {{- end }} + {{- if or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf .Values.extendedConfConfigMap }} + - name: postgresql-extended-config + mountPath: /bitnami/postgresql/conf/conf.d/ + {{- end }} + {{- if .Values.usePasswordFile }} + - name: postgresql-password + mountPath: /opt/bitnami/postgresql/secrets/ + {{- end }} + {{- if .Values.tls.enabled }} + - name: postgresql-certificates + mountPath: /opt/bitnami/postgresql/certs + readOnly: true + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + mountPath: /dev/shm + {{- end }} + {{- if .Values.persistence.enabled }} + - name: data + mountPath: {{ .Values.persistence.mountPath }} + subPath: {{ .Values.persistence.subPath }} + {{- end }} + {{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap }} + - name: postgresql-config + mountPath: /bitnami/postgresql/conf + {{- end }} + {{- if .Values.master.extraVolumeMounts }} + {{- toYaml .Values.master.extraVolumeMounts | nindent 12 }} + {{- end }} +{{- if .Values.master.sidecars }} +{{- include "postgresql.tplValue" ( dict "value" .Values.master.sidecars "context" $ ) | nindent 8 }} +{{- end }} +{{- if .Values.metrics.enabled }} + - name: metrics + image: {{ template "postgresql.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + {{- if .Values.metrics.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.metrics.securityContext.runAsUser }} + {{- end }} + env: + {{- $database := required "In order to enable metrics you need to specify a database (.Values.postgresqlDatabase or .Values.global.postgresql.postgresqlDatabase)" (include "postgresql.database" .) }} + {{- $sslmode := ternary "require" "disable" .Values.tls.enabled }} + {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} + - name: DATA_SOURCE_NAME + value: {{ printf "host=127.0.0.1 port=%d user=%s sslmode=%s sslcert=%s sslkey=%s" (int (include "postgresql.port" .)) (include "postgresql.username" .) $sslmode (include "postgresql.tlsCert" .) (include "postgresql.tlsCertKey" .) }} + {{- else }} + - name: DATA_SOURCE_URI + value: {{ printf "127.0.0.1:%d/%s?sslmode=%s" (int (include "postgresql.port" .)) $database $sslmode }} + {{- end }} + {{- if .Values.usePasswordFile }} + - name: DATA_SOURCE_PASS_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-password" + {{- else }} + - name: DATA_SOURCE_PASS + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-password + {{- end }} + - name: DATA_SOURCE_USER + value: {{ template "postgresql.username" . }} + {{- if .Values.metrics.extraEnvVars }} + {{- include "postgresql.tplValue" (dict "value" .Values.metrics.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: / + port: http-metrics + initialDelaySeconds: {{ .Values.metrics.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.metrics.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.metrics.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.metrics.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.metrics.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + httpGet: + path: / + port: http-metrics + initialDelaySeconds: {{ .Values.metrics.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.metrics.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.metrics.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.metrics.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.metrics.readinessProbe.failureThreshold }} + {{- end }} + volumeMounts: + {{- if .Values.usePasswordFile }} + - name: postgresql-password + mountPath: /opt/bitnami/postgresql/secrets/ + {{- end }} + {{- if .Values.tls.enabled }} + - name: postgresql-certificates + mountPath: /opt/bitnami/postgresql/certs + readOnly: true + {{- end }} + {{- if .Values.metrics.customMetrics }} + - name: custom-metrics + mountPath: /conf + readOnly: true + args: ["--extend.query-path", "/conf/custom-metrics.yaml"] + {{- end }} + ports: + - name: http-metrics + containerPort: 9187 + {{- if .Values.metrics.resources }} + resources: {{- toYaml .Values.metrics.resources | nindent 12 }} + {{- end }} +{{- end }} + volumes: + {{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap}} + - name: postgresql-config + configMap: + name: {{ template "postgresql.configurationCM" . }} + {{- end }} + {{- if or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf .Values.extendedConfConfigMap }} + - name: postgresql-extended-config + configMap: + name: {{ template "postgresql.extendedConfigurationCM" . }} + {{- end }} + {{- if .Values.usePasswordFile }} + - name: postgresql-password + secret: + secretName: {{ template "postgresql.secretName" . }} + {{- end }} + {{- if or (.Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql,sql.gz}") .Values.initdbScriptsConfigMap .Values.initdbScripts }} + - name: custom-init-scripts + configMap: + name: {{ template "postgresql.initdbScriptsCM" . }} + {{- end }} + {{- if .Values.initdbScriptsSecret }} + - name: custom-init-scripts-secret + secret: + secretName: {{ template "postgresql.initdbScriptsSecret" . }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: raw-certificates + secret: + secretName: {{ required "A secret containing TLS certificates is required when TLS is enabled" .Values.tls.certificatesSecret }} + - name: postgresql-certificates + emptyDir: {} + {{- end }} + {{- if .Values.master.extraVolumes }} + {{- toYaml .Values.master.extraVolumes | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.customMetrics }} + - name: custom-metrics + configMap: + name: {{ template "postgresql.metricsCM" . }} + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + emptyDir: + medium: Memory + sizeLimit: 1Gi + {{- end }} +{{- if and .Values.persistence.enabled .Values.persistence.existingClaim }} + - name: data + persistentVolumeClaim: +{{- with .Values.persistence.existingClaim }} + #claimName: {{ tpl . $ }} + claimName: data-keycloak-saas-postgresql-0 +{{- end }} +{{- else if not .Values.persistence.enabled }} + - name: data + emptyDir: {} +{{- else if and .Values.persistence.enabled (not .Values.persistence.existingClaim) }} + volumeClaimTemplates: + - metadata: + name: data + {{- with .Values.persistence.annotations }} + annotations: + {{- range $key, $value := . }} + {{ $key }}: {{ $value }} + {{- end }} + {{- end }} + spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{ include "postgresql.storageClass" . }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/templates/svc-headless.yaml b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/templates/svc-headless.yaml new file mode 100644 index 0000000..4913157 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/templates/svc-headless.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "postgresql.fullname" . }}-headless + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + type: ClusterIP + clusterIP: None + ports: + - name: tcp-postgresql + port: {{ template "postgresql.port" . }} + targetPort: tcp-postgresql + selector: + {{- include "common.labels.matchLabels" . | nindent 4 }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/templates/svc-read.yaml b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/templates/svc-read.yaml new file mode 100644 index 0000000..885c7bb --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/templates/svc-read.yaml @@ -0,0 +1,42 @@ +{{- if .Values.replication.enabled }} +{{- $serviceAnnotations := coalesce .Values.slave.service.annotations .Values.service.annotations -}} +{{- $serviceType := coalesce .Values.slave.service.type .Values.service.type -}} +{{- $serviceLoadBalancerIP := coalesce .Values.slave.service.loadBalancerIP .Values.service.loadBalancerIP -}} +{{- $serviceLoadBalancerSourceRanges := coalesce .Values.slave.service.loadBalancerSourceRanges .Values.service.loadBalancerSourceRanges -}} +{{- $serviceClusterIP := coalesce .Values.slave.service.clusterIP .Values.service.clusterIP -}} +{{- $serviceNodePort := coalesce .Values.slave.service.nodePort .Values.service.nodePort -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "postgresql.fullname" . }}-read + labels: + {{- include "common.labels.standard" . | nindent 4 }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if $serviceAnnotations }} + {{- include "postgresql.tplValue" (dict "value" $serviceAnnotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: {{ $serviceType }} + {{- if and $serviceLoadBalancerIP (eq $serviceType "LoadBalancer") }} + loadBalancerIP: {{ $serviceLoadBalancerIP }} + {{- end }} + {{- if and (eq $serviceType "LoadBalancer") $serviceLoadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- include "postgresql.tplValue" (dict "value" $serviceLoadBalancerSourceRanges "context" $) | nindent 4 }} + {{- end }} + {{- if and (eq $serviceType "ClusterIP") $serviceClusterIP }} + clusterIP: {{ $serviceClusterIP }} + {{- end }} + ports: + - name: tcp-postgresql + port: {{ template "postgresql.port" . }} + targetPort: tcp-postgresql + {{- if $serviceNodePort }} + nodePort: {{ $serviceNodePort }} + {{- end }} + selector: + {{- include "common.labels.matchLabels" . | nindent 4 }} + role: slave +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/templates/svc.yaml b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/templates/svc.yaml new file mode 100644 index 0000000..e9fc504 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/templates/svc.yaml @@ -0,0 +1,40 @@ +{{- $serviceAnnotations := coalesce .Values.master.service.annotations .Values.service.annotations -}} +{{- $serviceType := coalesce .Values.master.service.type .Values.service.type -}} +{{- $serviceLoadBalancerIP := coalesce .Values.master.service.loadBalancerIP .Values.service.loadBalancerIP -}} +{{- $serviceLoadBalancerSourceRanges := coalesce .Values.master.service.loadBalancerSourceRanges .Values.service.loadBalancerSourceRanges -}} +{{- $serviceClusterIP := coalesce .Values.master.service.clusterIP .Values.service.clusterIP -}} +{{- $serviceNodePort := coalesce .Values.master.service.nodePort .Values.service.nodePort -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "postgresql.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if $serviceAnnotations }} + {{- include "postgresql.tplValue" (dict "value" $serviceAnnotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: {{ $serviceType }} + {{- if and $serviceLoadBalancerIP (eq $serviceType "LoadBalancer") }} + loadBalancerIP: {{ $serviceLoadBalancerIP }} + {{- end }} + {{- if and (eq $serviceType "LoadBalancer") $serviceLoadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- include "postgresql.tplValue" (dict "value" $serviceLoadBalancerSourceRanges "context" $) | nindent 4 }} + {{- end }} + {{- if and (eq $serviceType "ClusterIP") $serviceClusterIP }} + clusterIP: {{ $serviceClusterIP }} + {{- end }} + ports: + - name: tcp-postgresql + port: {{ template "postgresql.port" . }} + targetPort: tcp-postgresql + {{- if $serviceNodePort }} + nodePort: {{ $serviceNodePort }} + {{- end }} + selector: + {{- include "common.labels.matchLabels" . | nindent 4 }} + role: master diff --git a/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/values-production.yaml b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/values-production.yaml new file mode 100644 index 0000000..a43670f --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/values-production.yaml @@ -0,0 +1,591 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +global: + postgresql: {} +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName +# storageClass: myStorageClass + +## Bitnami PostgreSQL image version +## ref: https://hub.docker.com/r/bitnami/postgresql/tags/ +## +image: + registry: 10.10.31.243:5000 # docker.io + repository: postgresql # bitnami/postgresql + tag: 11.8.0-debian-10-r61 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Set to true if you would like to see extra information on logs + ## It turns BASH and NAMI debugging in minideb + ## ref: https://github.com/bitnami/minideb-extras/#turn-on-bash-debugging + debug: false + +## String to partially override postgresql.fullname template (will maintain the release name) +## +# nameOverride: + +## String to fully override postgresql.fullname template +## +# fullnameOverride: + +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: false + image: + registry: 10.10.31.243:5000 # docker.io + repository: minideb # bitnami/minideb + tag: buster + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + ## Init container Security Context + ## Note: the chown of the data folder is done to securityContext.runAsUser + ## and not the below volumePermissions.securityContext.runAsUser + ## When runAsUser is set to special value "auto", init container will try to chwon the + ## data folder to autodetermined user&group, using commands: `id -u`:`id -G | cut -d" " -f2` + ## "auto" is especially useful for OpenShift which has scc with dynamic userids (and 0 is not allowed). + ## You may want to use this volumePermissions.securityContext.runAsUser="auto" in combination with + ## pod securityContext.enabled=false and shmVolume.chmod.enabled=false + ## + securityContext: + runAsUser: 0 + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: + +## Pod Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## Pod Service Account +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +serviceAccount: + enabled: false + ## Name of an already existing service account. Setting this value disables the automatic service account creation. + # name: + +## Pod Security Policy +## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +psp: + create: false + +## Creates role for ServiceAccount +## Required for PSP +rbac: + create: false + +replication: + enabled: true + user: repl_user + password: repl_password + slaveReplicas: 2 + ## Set synchronous commit mode: on, off, remote_apply, remote_write and local + ## ref: https://www.postgresql.org/docs/9.6/runtime-config-wal.html#GUC-WAL-LEVEL + synchronousCommit: "on" + ## From the number of `slaveReplicas` defined above, set the number of those that will have synchronous replication + ## NOTE: It cannot be > slaveReplicas + numSynchronousReplicas: 1 + ## Replication Cluster application name. Useful for defining multiple replication policies + applicationName: my_application + +## PostgreSQL admin password (used when `postgresqlUsername` is not `postgres`) +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-user-on-first-run (see note!) +# postgresqlPostgresPassword: + +## PostgreSQL user (has superuser privileges if username is `postgres`) +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run +postgresqlUsername: postgres + +## PostgreSQL password +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run +## +# postgresqlPassword: + +## PostgreSQL password using existing secret +## existingSecret: secret + +## Mount PostgreSQL secret as a file instead of passing environment variable +# usePasswordFile: false + +## Create a database +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-on-first-run +## +# postgresqlDatabase: + +## PostgreSQL data dir +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +postgresqlDataDir: /bitnami/postgresql/data + +## An array to add extra environment variables +## For example: +## extraEnv: +## - name: FOO +## value: "bar" +## +# extraEnv: +extraEnv: [] + +## Name of a ConfigMap containing extra env vars +## +# extraEnvVarsCM: + +## Specify extra initdb args +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +# postgresqlInitdbArgs: + +## Specify a custom location for the PostgreSQL transaction log +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +# postgresqlInitdbWalDir: + +## PostgreSQL configuration +## Specify runtime configuration parameters as a dict, using camelCase, e.g. +## {"sharedBuffers": "500MB"} +## Alternatively, you can put your postgresql.conf under the files/ directory +## ref: https://www.postgresql.org/docs/current/static/runtime-config.html +## +# postgresqlConfiguration: + +## PostgreSQL extended configuration +## As above, but _appended_ to the main configuration +## Alternatively, you can put your *.conf under the files/conf.d/ directory +## https://github.com/bitnami/bitnami-docker-postgresql#allow-settings-to-be-loaded-from-files-other-than-the-default-postgresqlconf +## +# postgresqlExtendedConf: + +## PostgreSQL client authentication configuration +## Specify content for pg_hba.conf +## Default: do not create pg_hba.conf +## Alternatively, you can put your pg_hba.conf under the files/ directory +# pgHbaConfiguration: |- +# local all all trust +# host all all localhost trust +# host mydatabase mysuser 192.168.0.0/24 md5 + +## ConfigMap with PostgreSQL configuration +## NOTE: This will override postgresqlConfiguration and pgHbaConfiguration +# configurationConfigMap: + +## ConfigMap with PostgreSQL extended configuration +# extendedConfConfigMap: + +## initdb scripts +## Specify dictionary of scripts to be run at first boot +## Alternatively, you can put your scripts under the files/docker-entrypoint-initdb.d directory +## +# initdbScripts: +# my_init_script.sh: | +# #!/bin/sh +# echo "Do something." + +## Specify the PostgreSQL username and password to execute the initdb scripts +# initdbUser: +# initdbPassword: + +## ConfigMap with scripts to be run at first boot +## NOTE: This will override initdbScripts +# initdbScriptsConfigMap: + +## Secret with scripts to be run at first boot (in case it contains sensitive information) +## NOTE: This can work along initdbScripts or initdbScriptsConfigMap +# initdbScriptsSecret: + +## Optional duration in seconds the pod needs to terminate gracefully. +## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods +## +# terminationGracePeriodSeconds: 30 + +## LDAP configuration +## +ldap: + enabled: false + url: "" + server: "" + port: "" + prefix: "" + suffix: "" + baseDN: "" + bindDN: "" + bind_password: + search_attr: "" + search_filter: "" + scheme: "" + tls: false + +## PostgreSQL service configuration +service: + ## PosgresSQL service type + type: ClusterIP + # clusterIP: None + port: 5432 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. Evaluated as a template. + ## + annotations: {} + ## Set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + # loadBalancerIP: + + ## Load Balancer sources. Evaluated as a template. + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## + # loadBalancerSourceRanges: + # - 10.10.10.0/24 + +## Start master and slave(s) pod(s) without limitations on shm memory. +## By default docker and containerd (and possibly other container runtimes) +## limit `/dev/shm` to `64M` (see e.g. the +## [docker issue](https://github.com/docker-library/postgres/issues/416) and the +## [containerd issue](https://github.com/containerd/containerd/issues/3654), +## which could be not enough if PostgreSQL uses parallel workers heavily. +## +shmVolume: + ## Set `shmVolume.enabled` to `true` to mount a new tmpfs volume to remove + ## this limitation. + ## + enabled: true + ## Set to `true` to `chmod 777 /dev/shm` on a initContainer. + ## This option is ingored if `volumePermissions.enabled` is `false` + ## + chmod: + enabled: true + +## PostgreSQL data Persistent Volume Storage Class +## If defined, storageClassName: +## If set to "-", storageClassName: "", which disables dynamic provisioning +## If undefined (the default) or set to null, no storageClassName spec is +## set, choosing the default provisioner. (gp2 on AWS, standard on +## GKE, AWS & OpenStack) +## +persistence: + enabled: true + ## A manually managed Persistent Volume and Claim + ## If defined, PVC must be created manually before volume will be bound + ## The value is evaluated as a template, so, for example, the name can depend on .Release or .Chart + ## + # existingClaim: + + ## The path the volume will be mounted at, useful when using different + ## PostgreSQL images. + ## + mountPath: /bitnami/postgresql + + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + ## + subPath: "" + + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + annotations: {} + +## updateStrategy for PostgreSQL StatefulSet and its slaves StatefulSets +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies +updateStrategy: + type: RollingUpdate + +## +## PostgreSQL Master parameters +## +master: + ## Node, affinity, tolerations, and priorityclass settings for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption + nodeSelector: {} + affinity: {} + tolerations: [] + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + priorityClassName: "" + ## Additional PostgreSQL Master Volume mounts + ## + extraVolumeMounts: [] + ## Additional PostgreSQL Master Volumes + ## + extraVolumes: [] + ## Add sidecars to the pod + ## + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + sidecars: [] + + ## Override the service configuration for master + ## + service: {} + # type: + # nodePort: + # clusterIP: + +## +## PostgreSQL Slave parameters +## +slave: + ## Node, affinity, tolerations, and priorityclass settings for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption + nodeSelector: {} + affinity: {} + tolerations: [] + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + priorityClassName: "" + ## Extra init containers + ## Example + ## + ## extraInitContainers: + ## - name: do-something + ## image: busybox + ## command: ['do', 'something'] + extraInitContainers: [] + ## Additional PostgreSQL Slave Volume mounts + ## + extraVolumeMounts: [] + ## Additional PostgreSQL Slave Volumes + ## + extraVolumes: [] + ## Add sidecars to the pod + ## + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + sidecars: [] + + ## Override the service configuration for slave + ## + service: {} + # type: + # nodePort: + # clusterIP: + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: + requests: + memory: 256Mi + cpu: 250m + +## Add annotations to all the deployed resources +## +commonAnnotations: {} + +networkPolicy: + ## Enable creation of NetworkPolicy resources. Only Ingress traffic is filtered for now. + ## + enabled: false + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port PostgreSQL is listening + ## on. When true, PostgreSQL will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + + ## if explicitNamespacesSelector is missing or set to {}, only client Pods that are in the networkPolicy's namespace + ## and that match other criteria, the ones that have the good label, can reach the DB. + ## But sometimes, we want the DB to be accessible to clients from other namespaces, in this case, we can use this + ## LabelSelector to select these namespaces, note that the networkPolicy's namespace should also be explicitly added. + ## + ## Example: + ## explicitNamespacesSelector: + ## matchLabels: + ## role: frontend + ## matchExpressions: + ## - {key: role, operator: In, values: [frontend]} + explicitNamespacesSelector: {} + +## Configure extra options for liveness and readiness probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +## +## TLS configuration +## +tls: + # Enable TLS traffic + enabled: false + # + # Whether to use the server's TLS cipher preferences rather than the client's. + preferServerCiphers: true + # + # Name of the Secret that contains the certificates + certificatesSecret: "" + # + # Certificate filename + certFilename: "" + # + # Certificate Key filename + certKeyFilename: "" + # + # CA Certificate filename + # If provided, PostgreSQL will authenticate TLS/SSL clients by requesting them a certificate + # ref: https://www.postgresql.org/docs/9.6/auth-methods.html + certCAFilename: + # + # File containing a Certificate Revocation List + crlFilename: + +## Configure metrics exporter +## +metrics: + enabled: true + # resources: {} + service: + type: ClusterIP + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9187" + loadBalancerIP: + serviceMonitor: + enabled: false + additionalLabels: {} + # namespace: monitoring + # interval: 30s + # scrapeTimeout: 10s + ## Custom PrometheusRule to be defined + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + prometheusRule: + enabled: false + additionalLabels: {} + namespace: "" + ## These are just examples rules, please adapt them to your needs. + ## Make sure to constraint the rules to the current postgresql service. + ## rules: + ## - alert: HugeReplicationLag + ## expr: pg_replication_lag{service="{{ template "postgresql.fullname" . }}-metrics"} / 3600 > 1 + ## for: 1m + ## labels: + ## severity: critical + ## annotations: + ## description: replication for {{ template "postgresql.fullname" . }} PostgreSQL is lagging by {{ "{{ $value }}" }} hour(s). + ## summary: PostgreSQL replication is lagging by {{ "{{ $value }}" }} hour(s). + rules: [] + + image: + registry: 10.10.31.243:5000 # docker.io + repository: postgres-exporter # bitnami/postgres-exporter + tag: 0.8.0-debian-10-r166 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + ## Define additional custom metrics + ## ref: https://github.com/wrouesnel/postgres_exporter#adding-new-metrics-via-a-config-file + # customMetrics: + # pg_database: + # query: "SELECT d.datname AS name, CASE WHEN pg_catalog.has_database_privilege(d.datname, 'CONNECT') THEN pg_catalog.pg_database_size(d.datname) ELSE 0 END AS size FROM pg_catalog.pg_database d where datname not in ('template0', 'template1', 'postgres')" + # metrics: + # - name: + # usage: "LABEL" + # description: "Name of the database" + # - size_bytes: + # usage: "GAUGE" + # description: "Size of the database in bytes" + ## An array to add extra env vars to configure postgres-exporter + ## see: https://github.com/wrouesnel/postgres_exporter#environment-variables + ## For example: + # extraEnvVars: + # - name: PG_EXPORTER_DISABLE_DEFAULT_METRICS + # value: "true" + extraEnvVars: {} + + ## Pod Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## + securityContext: + enabled: false + runAsUser: 1001 + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## Configure extra options for liveness and readiness probes + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 diff --git a/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/values.schema.json b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/values.schema.json new file mode 100644 index 0000000..7b5e2ef --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/values.schema.json @@ -0,0 +1,103 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": { + "postgresqlUsername": { + "type": "string", + "title": "Admin user", + "form": true + }, + "postgresqlPassword": { + "type": "string", + "title": "Password", + "form": true + }, + "persistence": { + "type": "object", + "properties": { + "size": { + "type": "string", + "title": "Persistent Volume Size", + "form": true, + "render": "slider", + "sliderMin": 1, + "sliderMax": 100, + "sliderUnit": "Gi" + } + } + }, + "resources": { + "type": "object", + "title": "Required Resources", + "description": "Configure resource requests", + "form": true, + "properties": { + "requests": { + "type": "object", + "properties": { + "memory": { + "type": "string", + "form": true, + "render": "slider", + "title": "Memory Request", + "sliderMin": 10, + "sliderMax": 2048, + "sliderUnit": "Mi" + }, + "cpu": { + "type": "string", + "form": true, + "render": "slider", + "title": "CPU Request", + "sliderMin": 10, + "sliderMax": 2000, + "sliderUnit": "m" + } + } + } + } + }, + "replication": { + "type": "object", + "form": true, + "title": "Replication Details", + "properties": { + "enabled": { + "type": "boolean", + "title": "Enable Replication", + "form": true + }, + "slaveReplicas": { + "type": "integer", + "title": "Slave Replicas", + "form": true, + "hidden": { + "value": false, + "path": "replication/enabled" + } + } + } + }, + "volumePermissions": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable Init Containers", + "description": "Change the owner of the persist volume mountpoint to RunAsUser:fsGroup" + } + } + }, + "metrics": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "title": "Configure metrics exporter", + "form": true + } + } + } + } +} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/values.yaml b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/values.yaml new file mode 100644 index 0000000..5f831ef --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/charts/postgresql/values.yaml @@ -0,0 +1,604 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +global: + postgresql: {} +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName +# storageClass: myStorageClass + +## Bitnami PostgreSQL image version +## ref: https://hub.docker.com/r/bitnami/postgresql/tags/ +## +image: + #registry: cdm-dev.exem-oss.org/keycloak + registry: 10.10.31.243:5000/keycloak # registry.openstacklocal:5000/keycloak + repository: keycloak-postgresql + tag: 11.8.0 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Set to true if you would like to see extra information on logs + ## It turns BASH and NAMI debugging in minideb + ## ref: https://github.com/bitnami/minideb-extras/#turn-on-bash-debugging + debug: false + +## String to partially override postgresql.fullname template (will maintain the release name) +## +# nameOverride: + +## String to fully override postgresql.fullname template +## +# fullnameOverride: + +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: true + image: + #registry: cdm-dev.exem-oss.org + registry: 10.10.31.243:5000 # registry.openstacklocal:5000 + repository: minideb # keycloak/minideb + tag: buster + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + ## Init container Security Context + ## Note: the chown of the data folder is done to securityContext.runAsUser + ## and not the below volumePermissions.securityContext.runAsUser + ## When runAsUser is set to special value "auto", init container will try to chwon the + ## data folder to autodetermined user&group, using commands: `id -u`:`id -G | cut -d" " -f2` + ## "auto" is especially useful for OpenShift which has scc with dynamic userids (and 0 is not allowed). + ## You may want to use this volumePermissions.securityContext.runAsUser="auto" in combination with + ## pod securityContext.enabled=false and shmVolume.chmod.enabled=false + ## + securityContext: + runAsUser: 0 + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: + + +## Pod Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## Pod Service Account +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +serviceAccount: + enabled: false + ## Name of an already existing service account. Setting this value disables the automatic service account creation. + # name: + +## Pod Security Policy +## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +psp: + create: false + +## Creates role for ServiceAccount +## Required for PSP +rbac: + create: false + +replication: + enabled: false + user: repl_user + password: repl_password + slaveReplicas: 1 + ## Set synchronous commit mode: on, off, remote_apply, remote_write and local + ## ref: https://www.postgresql.org/docs/9.6/runtime-config-wal.html#GUC-WAL-LEVEL + synchronousCommit: "off" + ## From the number of `slaveReplicas` defined above, set the number of those that will have synchronous replication + ## NOTE: It cannot be > slaveReplicas + numSynchronousReplicas: 0 + ## Replication Cluster application name. Useful for defining multiple replication policies + applicationName: my_application + +## PostgreSQL admin password (used when `postgresqlUsername` is not `postgres`) +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-user-on-first-run (see note!) +# postgresqlPostgresPassword: + +## PostgreSQL user (has superuser privileges if username is `postgres`) +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run +postgresqlUsername: postgres + +## PostgreSQL password +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run +## +# postgresqlPassword: + +## PostgreSQL password using existing secret +## existingSecret: secret + +## Mount PostgreSQL secret as a file instead of passing environment variable +# usePasswordFile: false + +## Create a database +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-on-first-run +## +# postgresqlDatabase: + +## PostgreSQL data dir +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +postgresqlDataDir: /bitnami/postgresql/data +#postgresqlDataDir: /var/lib/postgresql/data/pgdata + +## An array to add extra environment variables +## For example: +## extraEnv: +## - name: FOO +## value: "bar" +## +# extraEnv: +extraEnv: [] + +## Name of a ConfigMap containing extra env vars +## +# extraEnvVarsCM: + +## Specify extra initdb args +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +# postgresqlInitdbArgs: + +## Specify a custom location for the PostgreSQL transaction log +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +# postgresqlInitdbWalDir: + +## PostgreSQL configuration +## Specify runtime configuration parameters as a dict, using camelCase, e.g. +## {"sharedBuffers": "500MB"} +## Alternatively, you can put your postgresql.conf under the files/ directory +## ref: https://www.postgresql.org/docs/current/static/runtime-config.html +## +# postgresqlConfiguration: + +## PostgreSQL extended configuration +## As above, but _appended_ to the main configuration +## Alternatively, you can put your *.conf under the files/conf.d/ directory +## https://github.com/bitnami/bitnami-docker-postgresql#allow-settings-to-be-loaded-from-files-other-than-the-default-postgresqlconf +## +# postgresqlExtendedConf: + +## PostgreSQL client authentication configuration +## Specify content for pg_hba.conf +## Default: do not create pg_hba.conf +## Alternatively, you can put your pg_hba.conf under the files/ directory +# pgHbaConfiguration: |- +# local all all trust +# host all all localhost trust +# host mydatabase mysuser 192.168.0.0/24 md5 + +## ConfigMap with PostgreSQL configuration +## NOTE: This will override postgresqlConfiguration and pgHbaConfiguration +# configurationConfigMap: + +## ConfigMap with PostgreSQL extended configuration +# extendedConfConfigMap: + +## initdb scripts +## Specify dictionary of scripts to be run at first boot +## Alternatively, you can put your scripts under the files/docker-entrypoint-initdb.d directory +## +# initdbScripts: +# my_init_script.sh: | +# #!/bin/sh +# echo "Do something." + +## ConfigMap with scripts to be run at first boot +## NOTE: This will override initdbScripts +# initdbScriptsConfigMap: + +## Secret with scripts to be run at first boot (in case it contains sensitive information) +## NOTE: This can work along initdbScripts or initdbScriptsConfigMap +# initdbScriptsSecret: + +## Specify the PostgreSQL username and password to execute the initdb scripts +# initdbUser: +# initdbPassword: + +## Optional duration in seconds the pod needs to terminate gracefully. +## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods +## +# terminationGracePeriodSeconds: 30 + +## LDAP configuration +## +ldap: + enabled: false + url: "" + server: "" + port: "" + prefix: "" + suffix: "" + baseDN: "" + bindDN: "" + bind_password: + search_attr: "" + search_filter: "" + scheme: "" + tls: false + +## PostgreSQL service configuration +service: + ## PosgresSQL service type + type: ClusterIP + # clusterIP: None + port: 5432 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. Evaluated as a template. + ## + annotations: {} + ## Set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + # loadBalancerIP: + + ## Load Balancer sources. Evaluated as a template. + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## + # loadBalancerSourceRanges: + # - 10.10.10.0/24 + +## Start master and slave(s) pod(s) without limitations on shm memory. +## By default docker and containerd (and possibly other container runtimes) +## limit `/dev/shm` to `64M` (see e.g. the +## [docker issue](https://github.com/docker-library/postgres/issues/416) and the +## [containerd issue](https://github.com/containerd/containerd/issues/3654), +## which could be not enough if PostgreSQL uses parallel workers heavily. +## +shmVolume: + ## Set `shmVolume.enabled` to `true` to mount a new tmpfs volume to remove + ## this limitation. + ## + enabled: true + ## Set to `true` to `chmod 777 /dev/shm` on a initContainer. + ## This option is ingored if `volumePermissions.enabled` is `false` + ## + chmod: + enabled: true + +## PostgreSQL data Persistent Volume Storage Class +## If defined, storageClassName: +## If set to "-", storageClassName: "", which disables dynamic provisioning +## If undefined (the default) or set to null, no storageClassName spec is +## set, choosing the default provisioner. (gp2 on AWS, standard on +## GKE, AWS & OpenStack) +## +persistence: + enabled: true + ## A manually managed Persistent Volume and Claim + ## If defined, PVC must be created manually before volume will be bound + ## The value is evaluated as a template, so, for example, the name can depend on .Release or .Chart + ## + # existingClaim: + + ## The path the volume will be mounted at, useful when using different + ## PostgreSQL images. + ## + mountPath: /bitnami/postgresql + + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + ## + subPath: "" + + storageClass: "" + accessModes: + - ReadWriteOnce + size: 8Gi + annotations: {} + +## updateStrategy for PostgreSQL StatefulSet and its slaves StatefulSets +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies +updateStrategy: + type: RollingUpdate + +## +## PostgreSQL Master parameters +## +master: + ## Node, affinity, tolerations, and priorityclass settings for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption + nodeSelector: {} + affinity: {} + tolerations: [] + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + priorityClassName: "" + ## Extra init containers + ## Example + ## + ## extraInitContainers: + ## - name: do-something + ## image: busybox + ## command: ['do', 'something'] + extraInitContainers: [] + + ## Additional PostgreSQL Master Volume mounts + ## + extraVolumeMounts: [] + ## Additional PostgreSQL Master Volumes + ## + extraVolumes: [] + ## Add sidecars to the pod + ## + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: IfNotPresent + ## ports: + ## - name: portname + ## containerPort: 1234 + sidecars: [] + + ## Override the service configuration for master + ## + service: {} + # type: + # nodePort: + # clusterIP: + +## +## PostgreSQL Slave parameters +## +slave: + ## Node, affinity, tolerations, and priorityclass settings for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption + nodeSelector: {} + affinity: {} + tolerations: [] + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + priorityClassName: "" + extraInitContainers: | + # - name: do-something + # image: busybox + # command: ['do', 'something'] + ## Additional PostgreSQL Slave Volume mounts + ## + extraVolumeMounts: [] + ## Additional PostgreSQL Slave Volumes + ## + extraVolumes: [] + ## Add sidecars to the pod + ## + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: IfNotPresent + ## ports: + ## - name: portname + ## containerPort: 1234 + sidecars: [] + + ## Override the service configuration for slave + ## + service: {} + # type: + # nodePort: + # clusterIP: + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: + requests: + memory: 256Mi + cpu: 250m + +## Add annotations to all the deployed resources +## +commonAnnotations: {} + +networkPolicy: + ## Enable creation of NetworkPolicy resources. Only Ingress traffic is filtered for now. + ## + enabled: false + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port PostgreSQL is listening + ## on. When true, PostgreSQL will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + + ## if explicitNamespacesSelector is missing or set to {}, only client Pods that are in the networkPolicy's namespace + ## and that match other criteria, the ones that have the good label, can reach the DB. + ## But sometimes, we want the DB to be accessible to clients from other namespaces, in this case, we can use this + ## LabelSelector to select these namespaces, note that the networkPolicy's namespace should also be explicitly added. + ## + ## Example: + ## explicitNamespacesSelector: + ## matchLabels: + ## role: frontend + ## matchExpressions: + ## - {key: role, operator: In, values: [frontend]} + explicitNamespacesSelector: {} + +## Configure extra options for liveness and readiness probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +## +## TLS configuration +## +tls: + # Enable TLS traffic + enabled: false + # + # Whether to use the server's TLS cipher preferences rather than the client's. + preferServerCiphers: true + # + # Name of the Secret that contains the certificates + certificatesSecret: "" + # + # Certificate filename + certFilename: "" + # + # Certificate Key filename + certKeyFilename: "" + # + # CA Certificate filename + # If provided, PostgreSQL will authenticate TLS/SSL clients by requesting them a certificate + # ref: https://www.postgresql.org/docs/9.6/auth-methods.html + certCAFilename: + # + # File containing a Certificate Revocation List + crlFilename: + +## Configure metrics exporter +## +metrics: + enabled: false + # resources: {} + service: + type: ClusterIP + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9187" + loadBalancerIP: + serviceMonitor: + enabled: false + additionalLabels: {} + # namespace: monitoring + # interval: 30s + # scrapeTimeout: 10s + ## Custom PrometheusRule to be defined + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + prometheusRule: + enabled: false + additionalLabels: {} + namespace: "" + ## These are just examples rules, please adapt them to your needs. + ## Make sure to constraint the rules to the current postgresql service. + ## rules: + ## - alert: HugeReplicationLag + ## expr: pg_replication_lag{service="{{ template "postgresql.fullname" . }}-metrics"} / 3600 > 1 + ## for: 1m + ## labels: + ## severity: critical + ## annotations: + ## description: replication for {{ template "postgresql.fullname" . }} PostgreSQL is lagging by {{ "{{ $value }}" }} hour(s). + ## summary: PostgreSQL replication is lagging by {{ "{{ $value }}" }} hour(s). + rules: [] + + image: + registry: 10.10.31.243:5000 # docker.io + repository: postgres-exporter # bitnami/postgres-exporter + tag: 0.8.0-debian-10-r166 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + ## Define additional custom metrics + ## ref: https://github.com/wrouesnel/postgres_exporter#adding-new-metrics-via-a-config-file + # customMetrics: + # pg_database: + # query: "SELECT d.datname AS name, CASE WHEN pg_catalog.has_database_privilege(d.datname, 'CONNECT') THEN pg_catalog.pg_database_size(d.datname) ELSE 0 END AS size_bytes FROM pg_catalog.pg_database d where datname not in ('template0', 'template1', 'postgres')" + # metrics: + # - name: + # usage: "LABEL" + # description: "Name of the database" + # - size_bytes: + # usage: "GAUGE" + # description: "Size of the database in bytes" + # + ## An array to add extra env vars to configure postgres-exporter + ## see: https://github.com/wrouesnel/postgres_exporter#environment-variables + ## For example: + # extraEnvVars: + # - name: PG_EXPORTER_DISABLE_DEFAULT_METRICS + # value: "true" + extraEnvVars: {} + + ## Pod Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## + securityContext: + enabled: false + runAsUser: 1001 + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## Configure extra options for liveness and readiness probes + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 +#persistentVolume nodeAffinity Value Require this value +node: + affinity: imxc-worker1 diff --git a/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/ci/h2-values.yaml b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/ci/h2-values.yaml new file mode 100644 index 0000000..10d1705 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/ci/h2-values.yaml @@ -0,0 +1,38 @@ +extraEnv: | + - name: DB_VENDOR + value: h2 + - name: KEYCLOAK_USER_FILE + value: /secrets/admin-creds/user + - name: KEYCLOAK_PASSWORD_FILE + value: /secrets/admin-creds/password + - name: JAVA_OPTS + value: >- + -XX:+UseContainerSupport + -XX:MaxRAMPercentage=50.0 + -Djava.net.preferIPv4Stack=true + -Djboss.modules.system.pkgs=$JBOSS_MODULES_SYSTEM_PKGS + -Djava.awt.headless=true + +secrets: + admin-creds: + annotations: + my-test-annotation: Test secret for {{ include "keycloak.fullname" . }} + stringData: + user: admin + password: secret + +extraVolumeMounts: | + - name: admin-creds + mountPath: /secrets/admin-creds + readOnly: true + +extraVolumes: | + - name: admin-creds + secret: + secretName: '{{ include "keycloak.fullname" . }}-admin-creds' + +postgresql: + enabled: false + +test: + enabled: true diff --git a/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/ci/postgres-ha-values.yaml b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/ci/postgres-ha-values.yaml new file mode 100644 index 0000000..e92c2c7 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/ci/postgres-ha-values.yaml @@ -0,0 +1,73 @@ +replicas: 2 + +podLabels: + test-label: test-label-value + +podAnnotations: + test-annotation: test-annotation-value-{{ .Release.Name }} + test-int-annotation: "12345" + +startupScripts: + hello.sh: | + #!/bin/sh + + echo '********************************************************************************' + echo '* *' + echo '* Hello from my startup script! *' + echo '* *' + echo '********************************************************************************' + +lifecycleHooks: | + postStart: + exec: + command: + - /bin/sh + - -c + - echo 'Hello from lifecycle hook!' + +extraEnv: | + - name: JGROUPS_DISCOVERY_PROTOCOL + value: dns.DNS_PING + - name: JGROUPS_DISCOVERY_PROPERTIES + value: 'dns_query={{ include "keycloak.serviceDnsName" . }}' + - name: CACHE_OWNERS_COUNT + value: "2" + - name: CACHE_OWNERS_AUTH_SESSIONS_COUNT + value: "2" + - name: KEYCLOAK_USER_FILE + value: /secrets/admin-creds/user + - name: KEYCLOAK_PASSWORD_FILE + value: /secrets/admin-creds/password + - name: KEYCLOAK_STATISTICS + value: all + - name: JAVA_OPTS + value: >- + -XX:+UseContainerSupport + -XX:MaxRAMPercentage=50.0 + -Djava.net.preferIPv4Stack=true + -Djboss.modules.system.pkgs=$JBOSS_MODULES_SYSTEM_PKGS + -Djava.awt.headless=true + +secrets: + admin-creds: + stringData: + user: admin + password: secret + +extraVolumeMounts: | + - name: admin-creds + mountPath: /secrets/admin-creds + readOnly: true + +extraVolumes: | + - name: admin-creds + secret: + secretName: '{{ include "keycloak.fullname" . }}-admin-creds' + +postgresql: + enabled: true + persistence: + enabled: true + +test: + enabled: true diff --git a/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/requirements.lock b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/requirements.lock new file mode 100644 index 0000000..4231a57 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/requirements.lock @@ -0,0 +1,6 @@ +dependencies: +- name: postgresql + repository: https://charts.bitnami.com/bitnami + version: 9.1.1 +digest: sha256:33ee9e6caa9e519633071fd71aedd9de7906b9a9d7fb629eb814d9f72bb8d68e +generated: "2020-07-24T07:40:55.78753+02:00" diff --git a/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/requirements.yaml b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/requirements.yaml new file mode 100644 index 0000000..f3409a3 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/requirements.yaml @@ -0,0 +1,5 @@ +dependencies: + - name: postgresql + version: 9.1.1 + repository: https://charts.bitnami.com/bitnami + condition: postgresql.enabled diff --git a/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/scripts/keycloak.cli b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/scripts/keycloak.cli new file mode 100644 index 0000000..1469963 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/scripts/keycloak.cli @@ -0,0 +1,13 @@ +embed-server --server-config=standalone-ha.xml --std-out=echo +batch + +echo Configuring node identifier + +## Sets the node identifier to the node name (= pod name). Node identifiers have to be unique. They can have a +## maximum length of 23 characters. Thus, the chart's fullname template truncates its length accordingly. +/subsystem=transactions:write-attribute(name=node-identifier, value=${jboss.node.name}) + +echo Finished configuring node identifier + +run-batch +stop-embedded-server diff --git a/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/templates/NOTES.txt b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/templates/NOTES.txt new file mode 100644 index 0000000..e76e064 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/templates/NOTES.txt @@ -0,0 +1,61 @@ +*********************************************************************** +* * +* Keycloak Helm Chart by codecentric AG * +* * +*********************************************************************** + +{{- if .Values.ingress.enabled }} + +Keycloak was installed with an Ingress and an be reached at the following URL(s): +{{ range $unused, $rule := .Values.ingress.rules }} + {{- range $rule.paths }} + - http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $rule.host }}{{ . }} + {{- end }} +{{- end }} + +{{- else if eq "NodePort" .Values.service.type }} + +Keycloak was installed with a Service of type NodePort. +{{ if .Values.service.httpNodePort }} +Get its HTTP URL with the following commands: + +export NODE_PORT=$(kubectl get --namespace imxc service {{ include "keycloak.fullname" . }}-http --template='{{"{{ range .spec.ports }}{{ if eq .name \"http\" }}{{ .nodePort }}{{ end }}{{ end }}"}}') +export NODE_IP=$(kubectl get nodes --namespace imxc -o jsonpath="{.items[0].status.addresses[0].address}") +echo "http://$NODE_IP:$NODE_PORT" +{{- end }} +{{ if .Values.service.httpsNodePort }} +Get its HTTPS URL with the following commands: + +export NODE_PORT=$(kubectl get --namespace imxc service {{ include "keycloak.fullname" . }}-http --template='{{"{{ range .spec.ports }}{{ if eq .name \"https\" }}{{ .nodePort }}{{ end }}{{ end }}"}}') +export NODE_IP=$(kubectl get nodes --namespace imxc -o jsonpath="{.items[0].status.addresses[0].address}") +echo "http://$NODE_IP:$NODE_PORT" +{{- end }} + +{{- else if eq "LoadBalancer" .Values.service.type }} + +Keycloak was installed with a Service of type LoadBalancer + +NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get --namespace imxc service -w {{ include "keycloak.fullname" . }}' + +Get its HTTP URL with the following commands: + +export SERVICE_IP=$(kubectl get service --namespace imxc {{ include "keycloak.fullname" . }}-http --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") +echo "http://$SERVICE_IP:{{ .Values.service.httpPort }}" + +Get its HTTPS URL with the following commands: + +export SERVICE_IP=$(kubectl get service --namespace imxc {{ include "keycloak.fullname" . }}-http --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") +echo "http://$SERVICE_IP:{{ .Values.service.httpsPort }}" + +{{- else if eq "ClusterIP" .Values.service.type }} + +Keycloak was installed with a Service of type ClusterIP + +Create a port-forwarding with the following commands: + +export POD_NAME=$(kubectl get pods --namespace imxc -l "app.kubernetes.io/name={{ include "keycloak.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o name) +echo "Visit http://127.0.0.1:8080 to use your application" +kubectl --namespace imxc port-forward "$POD_NAME" 8080 + +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/templates/_helpers.tpl b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/templates/_helpers.tpl new file mode 100644 index 0000000..d019e17 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/templates/_helpers.tpl @@ -0,0 +1,87 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "keycloak.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate to 20 characters because this is used to set the node identifier in WildFly which is limited to +23 characters. This allows for a replica suffix for up to 99 replicas. +*/}} +{{- define "keycloak.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 20 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 20 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 20 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "keycloak.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "keycloak.labels" -}} +helm.sh/chart: {{ include "keycloak.chart" . }} +{{ include "keycloak.selectorLabels" . }} +app.kubernetes.io/version: {{ .Values.image.tag | default .Chart.AppVersion | quote }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "keycloak.selectorLabels" -}} +app.kubernetes.io/name: {{ include "keycloak.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "keycloak.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "keycloak.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} + +{{/* +Create a default fully qualified app name for the postgres requirement. +*/}} +{{- define "keycloak.postgresql.fullname" -}} +{{- $postgresContext := dict "Values" .Values.postgresql "Release" .Release "Chart" (dict "Name" "postgresql") -}} +{{ include "postgresql.fullname" $postgresContext }} +{{- end }} + +{{/* +Create the service DNS name. +*/}} +{{- define "keycloak.serviceDnsName" -}} +{{ include "keycloak.fullname" . }}-headless.imxc.svc.{{ .Values.clusterDomain }} +{{- end }} + +{{/* +Return the appropriate apiVersion for ingress. +*/}} +{{- define "keycloak.ingressAPIVersion" -}} +{{- if .Capabilities.APIVersions.Has "networking.k8s.io/v1/Ingress" -}} +{{- print "networking.k8s.io/v1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/templates/configmap-startup.yaml b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/templates/configmap-startup.yaml new file mode 100644 index 0000000..8fbb462 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/templates/configmap-startup.yaml @@ -0,0 +1,14 @@ +{{- if .Values.startupScripts }} +{{- $highAvailability := gt (int .Values.replicas) 1 -}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "keycloak.fullname" . }}-startup + labels: + {{- include "keycloak.labels" . | nindent 4 }} +data: + {{- range $key, $value := .Values.startupScripts }} + {{ $key }}: | + {{- tpl $value $ | nindent 4 }} + {{- end }} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/templates/hpa.yaml b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/templates/hpa.yaml new file mode 100644 index 0000000..c772b76 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/templates/hpa.yaml @@ -0,0 +1,22 @@ +{{- if .Values.autoscaling.enabled }} +apiVersion: autoscaling/v2beta2 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "keycloak.fullname" . }} + labels: + {{- include "keycloak.labels" . | nindent 4 }} + {{- range $key, $value := .Values.autoscaling.labels }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: StatefulSet + name: {{ include "keycloak.fullname" . }} + minReplicas: {{ .Values.autoscaling.minReplicas }} + maxReplicas: {{ .Values.autoscaling.maxReplicas }} + metrics: + {{- toYaml .Values.autoscaling.metrics | nindent 4 }} + behavior: + {{- toYaml .Values.autoscaling.behavior | nindent 4 }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/templates/ingress.yaml b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/templates/ingress.yaml new file mode 100644 index 0000000..d749e24 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/templates/ingress.yaml @@ -0,0 +1,104 @@ +{{- $ingress := .Values.ingress -}} +{{- if $ingress.enabled -}} +apiVersion: {{ include "keycloak.ingressAPIVersion" . }} +kind: Ingress +metadata: + name: {{ include "keycloak.fullname" . }} + {{- with $ingress.annotations }} + annotations: + {{- range $key, $value := . }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} + {{- end }} + labels: + {{- include "keycloak.labels" . | nindent 4 }} + {{- range $key, $value := $ingress.labels }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} +spec: +{{- if $ingress.tls }} + tls: + {{- range $ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ tpl . $ | quote }} + {{- end }} + {{- with .secretName }} + secretName: {{ tpl . $ }} + {{- end }} + {{- end }} +{{- end }} + rules: + {{- range .Values.ingress.rules }} + - host: {{ tpl .host $ | quote }} + http: + paths: + {{- range .paths }} + - path: {{ . }} + {{- if $.Capabilities.APIVersions.Has "networking.k8s.io/v1/Ingress" }} + pathType: Prefix + backend: + service: + name: {{ include "keycloak.fullname" $ }}-http + port: + name: {{ $ingress.servicePort }} + {{- else }} + backend: + serviceName: {{ include "keycloak.fullname" $ }}-http + servicePort: {{ $ingress.servicePort }} + {{- end }} + {{- end }} + {{- end }} +{{- if $ingress.console.enabled }} +--- +apiVersion: {{ include "keycloak.ingressAPIVersion" . }} +kind: Ingress +metadata: + name: {{ include "keycloak.fullname" . }}-console + {{- with $ingress.console.annotations }} + annotations: + {{- range $key, $value := . }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} + {{- end }} + labels: + {{- include "keycloak.labels" . | nindent 4 }} + {{- range $key, $value := $ingress.labels }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} +spec: +{{- if $ingress.tls }} + tls: + {{- range $ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ tpl . $ | quote }} + {{- end }} + {{- with .secretName }} + secretName: {{ tpl . $ }} + {{- end }} + {{- end }} +{{- end }} + rules: + {{- range .Values.ingress.console.rules }} + - host: {{ tpl .host $ | quote }} + http: + paths: + {{- range .paths }} + - path: {{ . }} + {{- if $.Capabilities.APIVersions.Has "networking.k8s.io/v1/Ingress" }} + pathType: Prefix + backend: + service: + name: {{ include "keycloak.fullname" $ }}-http + port: + name: {{ $ingress.servicePort }} + {{- else }} + backend: + serviceName: {{ include "keycloak.fullname" $ }}-http + servicePort: {{ $ingress.servicePort }} + {{- end }} + {{- end }} + {{- end }} +{{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/templates/networkpolicy.yaml b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/templates/networkpolicy.yaml new file mode 100644 index 0000000..5e7c7b6 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/templates/networkpolicy.yaml @@ -0,0 +1,46 @@ +{{- if .Values.networkPolicy.enabled }} +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: {{ include "keycloak.fullname" . | quote }} + labels: + {{- include "keycloak.labels" . | nindent 4 }} + {{- range $key, $value := .Values.networkPolicy.labels }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} +spec: + policyTypes: + - Ingress + podSelector: + matchLabels: + {{- include "keycloak.selectorLabels" . | nindent 6 }} + ingress: + {{- with .Values.networkPolicy.extraFrom }} + - from: + {{- toYaml . | nindent 8 }} + ports: + - protocol: TCP + port: {{ $.Values.service.httpPort }} + - protocol: TCP + port: {{ $.Values.service.httpsPort }} + {{ range $.Values.extraPorts }} + - protocol: {{ default "TCP" .protocol }} + port: {{ .containerPort }} + {{- end }} + {{- end }} + - from: + - podSelector: + matchLabels: + {{- include "keycloak.selectorLabels" . | nindent 14 }} + ports: + - protocol: TCP + port: {{ .Values.service.httpPort }} + - protocol: TCP + port: {{ .Values.service.httpsPort }} + - protocol: TCP + port: {{ .Values.service.httpManagementPort }} + {{ range .Values.extraPorts }} + - protocol: {{ default "TCP" .protocol }} + port: {{ .containerPort }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/templates/poddisruptionbudget.yaml b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/templates/poddisruptionbudget.yaml new file mode 100644 index 0000000..39cc390 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/templates/poddisruptionbudget.yaml @@ -0,0 +1,13 @@ +{{- if .Values.podDisruptionBudget -}} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ include "keycloak.fullname" . }} + labels: + {{- include "keycloak.labels" . | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "keycloak.selectorLabels" . | nindent 6 }} + {{- toYaml .Values.podDisruptionBudget | nindent 2 }} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/templates/prometheusrule.yaml b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/templates/prometheusrule.yaml new file mode 100644 index 0000000..69af5e7 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/templates/prometheusrule.yaml @@ -0,0 +1,24 @@ +{{- with .Values.prometheusRule -}} +{{- if .enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ include "keycloak.fullname" $ }} + {{- with .annotations }} + annotations: + {{- range $key, $value := . }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} + {{- end }} + labels: + {{- include "keycloak.labels" $ | nindent 4 }} + {{- range $key, $value := .labels }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} +spec: + groups: + - name: {{ include "keycloak.fullname" $ }} + rules: + {{- toYaml .rules | nindent 8 }} +{{- end }} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/templates/rbac.yaml b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/templates/rbac.yaml new file mode 100644 index 0000000..9ca0a2b --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/templates/rbac.yaml @@ -0,0 +1,25 @@ +{{- if and .Values.rbac.create .Values.rbac.rules }} +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ include "keycloak.fullname" . }} + labels: + {{- include "keycloak.labels" . | nindent 4 }} +rules: + {{- toYaml .Values.rbac.rules | nindent 2 }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ include "keycloak.fullname" . }} + labels: + {{- include "keycloak.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ include "keycloak.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ include "keycloak.serviceAccountName" . }} + namespace: {{ .Release.Namespace | quote }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/templates/route.yaml b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/templates/route.yaml new file mode 100644 index 0000000..9507d56 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/templates/route.yaml @@ -0,0 +1,34 @@ +{{- $route := .Values.route -}} +{{- if $route.enabled -}} +apiVersion: route.openshift.io/v1 +kind: Route +metadata: + name: {{ include "keycloak.fullname" . }} + {{- with $route.annotations }} + annotations: + {{- range $key, $value := . }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} + {{- end }} + labels: + {{- include "keycloak.labels" . | nindent 4 }} + {{- range $key, $value := $route.labels }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} +spec: +{{- if $route.host }} + host: {{ tpl $route.host $ | quote }} +{{- end }} + path: {{ $route.path }} + port: + targetPort: http + to: + kind: Service + name: {{ include "keycloak.fullname" $ }}-http + weight: 100 + {{- if $route.tls.enabled }} + tls: + insecureEdgeTerminationPolicy: {{ $route.tls.insecureEdgeTerminationPolicy }} + termination: {{ $route.tls.termination }} + {{- end }} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/templates/secrets.yaml b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/templates/secrets.yaml new file mode 100644 index 0000000..c1cb796 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/templates/secrets.yaml @@ -0,0 +1,29 @@ +{{- range $nameSuffix, $values := .Values.secrets -}} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "keycloak.fullname" $ }}-{{ $nameSuffix }} + {{- with $values.annotations }} + annotations: + {{- range $key, $value := . }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} + {{- end }} + labels: + {{- include "keycloak.labels" $ | nindent 4 }} + {{- range $key, $value := $values.labels }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} +type: {{ default "Opaque" $values.type }} +{{- with $values.data }} +data: + {{- toYaml . | nindent 2 }} +{{- end }} +{{- with $values.stringData }} +stringData: + {{- range $key, $value := . }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 2 }} + {{- end }} +{{- end }} +--- +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/templates/service-headless.yaml b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/templates/service-headless.yaml new file mode 100644 index 0000000..0c22ec9 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/templates/service-headless.yaml @@ -0,0 +1,18 @@ +{{- $highAvailability := gt (int .Values.replicas) 1 -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "keycloak.fullname" . }}-headless + labels: + {{- include "keycloak.labels" . | nindent 4 }} + app.kubernetes.io/component: headless +spec: + type: ClusterIP + clusterIP: None + ports: + - name: http + port: {{ .Values.service.httpPort }} + targetPort: http + protocol: TCP + selector: + {{- include "keycloak.selectorLabels" . | nindent 4 }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/templates/service-http.yaml b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/templates/service-http.yaml new file mode 100644 index 0000000..c4a1dc9 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/templates/service-http.yaml @@ -0,0 +1,59 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "keycloak.fullname" . }}-http + {{- with .Values.service.annotations }} + annotations: + {{- range $key, $value := . }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} + {{- end }} + labels: + {{- include "keycloak.labels" . | nindent 4 }} + {{- range $key, $value := .Values.service.labels }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} + app.kubernetes.io/component: http +spec: + type: {{ .Values.service.type }} + {{- if and (eq "LoadBalancer" .Values.service.type) .Values.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.service.loadBalancerIP }} + {{- end }} + {{- if and (eq "LoadBalancer" .Values.service.type) .Values.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{- toYaml .Values.service.loadBalancerSourceRanges | nindent 4 }} + {{- end }} + {{- if .Values.service.sessionAffinity }} + sessionAffinity: {{ .Values.service.sessionAffinity }} + {{- with .Values.service.sessionAffinityConfig }} + sessionAffinityConfig: + {{- toYaml . | nindent 4 }} + {{- end }} + {{- end }} + ports: + - name: http + port: {{ .Values.service.httpPort }} + targetPort: http + {{- if and (or (eq "NodePort" .Values.service.type) (eq "LoadBalancer" .Values.service.type) ) .Values.service.httpNodePort }} + nodePort: {{ .Values.service.httpNodePort }} + {{- end }} + protocol: TCP + - name: https + port: {{ .Values.service.httpsPort }} + targetPort: https + {{- if and (or (eq "NodePort" .Values.service.type) (eq "LoadBalancer" .Values.service.type) ) .Values.service.httpsNodePort }} + nodePort: {{ .Values.service.httpsNodePort }} + {{- end }} + protocol: TCP + - name: http-management + port: {{ .Values.service.httpManagementPort }} + targetPort: http-management + {{- if and (eq "NodePort" .Values.service.type) .Values.service.httpManagementNodePort }} + nodePort: {{ .Values.service.httpManagementNodePort }} + {{- end }} + protocol: TCP + {{- with .Values.service.extraPorts }} + {{- toYaml . | nindent 4 }} + {{- end }} + selector: + {{- include "keycloak.selectorLabels" . | nindent 4 }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/templates/serviceaccount.yaml b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/templates/serviceaccount.yaml new file mode 100644 index 0000000..1d8f3f0 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/templates/serviceaccount.yaml @@ -0,0 +1,19 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "keycloak.serviceAccountName" . }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- range $key, $value := . }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} + {{- end }} + labels: + {{- include "keycloak.labels" . | nindent 4 }} + {{- range $key, $value := .Values.serviceAccount.labels }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} +imagePullSecrets: + {{- toYaml .Values.serviceAccount.imagePullSecrets | nindent 4 }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/templates/servicemonitor.yaml b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/templates/servicemonitor.yaml new file mode 100644 index 0000000..ba97f62 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/templates/servicemonitor.yaml @@ -0,0 +1,39 @@ +{{- range $key, $serviceMonitor := dict "wildfly" .Values.serviceMonitor "extra" .Values.extraServiceMonitor }} +{{- with $serviceMonitor }} +{{- if .enabled }} +--- +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "keycloak.fullname" $ }}-{{ $key }} + {{- with .namespace }} + namespace: {{ . }} + {{- end }} + {{- with .annotations }} + annotations: + {{- range $key, $value := . }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} + {{- end }} + labels: + {{- include "keycloak.labels" $ | nindent 4 }} + {{- range $key, $value := .labels }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} +spec: + {{- with .namespaceSelector }} + namespaceSelector: + {{- toYaml . | nindent 4 }} + {{- end }} + selector: + matchLabels: + {{- include "keycloak.selectorLabels" $ | nindent 6 }} + app.kubernetes.io/component: http + endpoints: + - port: {{ .port }} + path: {{ .path }} + interval: {{ .interval }} + scrapeTimeout: {{ .scrapeTimeout }} +{{- end }} +{{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/templates/statefulset.yaml b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/templates/statefulset.yaml new file mode 100644 index 0000000..8278986 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/templates/statefulset.yaml @@ -0,0 +1,208 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "keycloak.fullname" . }} + {{- with .Values.statefulsetAnnotations }} + annotations: + {{- range $key, $value := . }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} + {{- end }} + labels: + {{- include "keycloak.labels" . | nindent 4 }} + {{- range $key, $value := .Values.statefulsetLabels }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} +spec: + selector: + matchLabels: + {{- include "keycloak.selectorLabels" . | nindent 6 }} + {{- if not .Values.autoscaling.enabled }} + replicas: {{ .Values.replicas }} + {{- end }} + serviceName: {{ include "keycloak.fullname" . }}-headless + podManagementPolicy: {{ .Values.podManagementPolicy }} + updateStrategy: + type: RollingUpdate + template: + metadata: + annotations: + checksum/config-startup: {{ include (print .Template.BasePath "/configmap-startup.yaml") . | sha256sum }} + checksum/secrets: {{ tpl (toYaml .Values.secrets) . | sha256sum }} + {{- range $key, $value := .Values.podAnnotations }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 8 }} + {{- end }} + labels: + {{- include "keycloak.selectorLabels" . | nindent 8 }} + {{- if and .Values.postgresql.enabled (and .Values.postgresql.networkPolicy .Values.postgresql.networkPolicy.enabled) }} + {{ include "keycloak.postgresql.fullname" . }}-client: "true" + {{- end }} + {{- range $key, $value := .Values.podLabels }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 8 }} + {{- end }} + spec: + {{- if or .Values.postgresql.enabled .Values.extraInitContainers }} + initContainers: + {{- if .Values.postgresql.enabled }} + - name: pgchecker + image: "{{ .Values.pgchecker.image.repository }}:{{ .Values.pgchecker.image.tag }}" + imagePullPolicy: {{ .Values.pgchecker.image.pullPolicy }} + securityContext: + {{- toYaml .Values.pgchecker.securityContext | nindent 12 }} + command: + - sh + - -c + - | + echo 'Waiting for PostgreSQL to become ready...' + + until printf "." && nc -z -w 2 {{ include "keycloak.postgresql.fullname" . }} {{ .Values.postgresql.service.port }}; do + sleep 2; + done; + + echo 'PostgreSQL OK ✓' + volumeMounts: + - mountPath: /opt/jboss/keycloak/themes/cloudmoa/ + name: themes-upper-directory + resources: + {{- toYaml .Values.pgchecker.resources | nindent 12 }} + {{- end }} + {{- with .Values.extraInitContainers }} + {{- tpl . $ | nindent 8 }} + {{- end }} + {{- end }} + containers: + - name: keycloak + securityContext: + {{- toYaml .Values.securityContext | nindent 12 }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + command: + {{- toYaml .Values.command | nindent 12 }} + args: + {{- toYaml .Values.args | nindent 12 }} + {{- with .Values.lifecycleHooks }} + {{- tpl . $ | nindent 12 }} + {{- end }} + env: + - name: KEYCLOAK_USER + value: "admin" + #valueFrom: + # secretKeyRef: + # name: keycloak-secret + # key: KEYCLOAK_MASTER_USERNAME + - name: KEYCLOAK_PASSWORD + value: "admin" + #valueFrom: + # secretKeyRef: + # name: keycloak-secret + # key: KEYCLOAK_MASTER_PASSWORD + {{- if .Values.postgresql.enabled }} + - name: DB_VENDOR + value: postgres + - name: DB_ADDR + value: {{ include "keycloak.postgresql.fullname" . }} + - name: DB_PORT + value: {{ .Values.postgresql.service.port | quote }} + - name: DB_DATABASE + value: {{ .Values.postgresql.postgresqlDatabase | quote }} + - name: DB_USER + value: {{ .Values.postgresql.postgresqlUsername | quote }} + - name: DB_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "keycloak.postgresql.fullname" . }} + key: postgresql-password + {{- end }} + {{- with .Values.extraEnv }} + {{- tpl . $ | nindent 12 }} + {{- end }} + envFrom: + {{- with .Values.extraEnvFrom }} + {{- tpl . $ | nindent 12 }} + {{- end }} + ports: + - name: http + containerPort: 8080 + protocol: TCP + - name: https + containerPort: 8443 + protocol: TCP + - name: http-management + containerPort: 9990 + protocol: TCP + {{- with .Values.extraPorts }} + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.livenessProbe }} + livenessProbe: + {{- tpl . $ | nindent 12 }} + {{- end }} + {{- with .Values.readinessProbe }} + readinessProbe: + {{- tpl . $ | nindent 12 }} + {{- end }} + resources: + {{- toYaml .Values.resources | nindent 12 }} + volumeMounts: + - mountPath: /opt/jboss/keycloak/themes/cloudmoa/ + name: themes-upper-directory + {{- range $key, $value := .Values.startupScripts }} + - name: startup + mountPath: "/opt/jboss/startup-scripts/{{ $key }}" + subPath: "{{ $key }}" + readOnly: true + {{- end }} + {{- with .Values.extraVolumeMounts }} + {{- tpl . $ | nindent 12 }} + {{- end }} + {{- with .Values.extraContainers }} + {{- tpl . $ | nindent 8 }} + {{- end }} + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "keycloak.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} + {{- with .Values.hostAliases }} + hostAliases: + {{- toYaml . | nindent 8 }} + {{- end }} + enableServiceLinks: {{ .Values.enableServiceLinks }} + restartPolicy: {{ .Values.restartPolicy }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- tpl . $ | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.priorityClassName }} + priorityClassName: {{ . }} + {{- end }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} + volumes: + - name: themes-upper-directory + hostPath: + path: /root/oci/infra-set/keycloak/keycloak_theme/ + type: DirectoryOrCreate + {{- with .Values.startupScripts }} + - name: startup + configMap: + name: {{ include "keycloak.fullname" $ }}-startup + defaultMode: 0555 + items: + {{- range $key, $value := . }} + - key: {{ $key }} + path: {{ $key }} + {{- end }} + {{- end }} + {{- with .Values.extraVolumes }} + {{- tpl . $ | nindent 8 }} + {{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/templates/test/configmap-test.yaml b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/templates/test/configmap-test.yaml new file mode 100644 index 0000000..8dda781 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/templates/test/configmap-test.yaml @@ -0,0 +1,50 @@ +{{- if .Values.test.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "keycloak.fullname" . }}-test + labels: + {{- include "keycloak.labels" . | nindent 4 }} + annotations: + helm.sh/hook: test + helm.sh/hook-delete-policy: hook-succeeded +data: + test.py: | + import os + from selenium import webdriver + from selenium.webdriver.common.by import By + from selenium.webdriver.support.ui import WebDriverWait + from selenium.webdriver.support import expected_conditions + from urllib.parse import urlparse + + print('Creating PhantomJS driver...') + driver = webdriver.PhantomJS(service_log_path='/tmp/ghostdriver.log') + + base_url = 'http://{{ include "keycloak.fullname" . }}-http{{ if ne 80 (int .Values.service.httpPort) }}:{{ .Values.service.httpPort }}{{ end }}' + + print('Opening Keycloak...') + driver.get('{0}/auth/admin/'.format(base_url)) + + username = os.environ['KEYCLOAK_USER'] + password = os.environ['KEYCLOAK_PASSWORD'] + + username_input = WebDriverWait(driver, 30).until(expected_conditions.presence_of_element_located((By.ID, "username"))) + password_input = WebDriverWait(driver, 30).until(expected_conditions.presence_of_element_located((By.ID, "password"))) + login_button = WebDriverWait(driver, 30).until(expected_conditions.presence_of_element_located((By.ID, "kc-login"))) + + print('Entering username...') + username_input.send_keys(username) + + print('Entering password...') + password_input.send_keys(password) + + print('Clicking login button...') + login_button.click() + + WebDriverWait(driver, 30).until(lambda driver: '/auth/admin/master/console/' in driver.current_url) + + print('Admin console visible. Login successful.') + + driver.quit() + + {{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/templates/test/pod-test.yaml b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/templates/test/pod-test.yaml new file mode 100644 index 0000000..5b166f2 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/templates/test/pod-test.yaml @@ -0,0 +1,43 @@ +{{- if .Values.test.enabled }} +apiVersion: v1 +kind: Pod +metadata: + name: {{ include "keycloak.fullname" . }}-test + labels: + {{- include "keycloak.labels" . | nindent 4 }} + app.kubernetes.io/component: test + annotations: + helm.sh/hook: test +spec: + securityContext: + {{- toYaml .Values.test.podSecurityContext | nindent 4 }} + containers: + - name: keycloak-test + image: "{{ .Values.test.image.repository }}:{{ .Values.test.image.tag }}" + imagePullPolicy: {{ .Values.test.image.pullPolicy }} + securityContext: + {{- toYaml .Values.test.securityContext | nindent 8 }} + command: + - python3 + args: + - /tests/test.py + env: + - name: KEYCLOAK_USER + valueFrom: + secretKeyRef: + name: {{ include "keycloak.fullname" . }}-admin-creds + key: user + - name: KEYCLOAK_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "keycloak.fullname" . }}-admin-creds + key: password + volumeMounts: + - name: tests + mountPath: /tests + volumes: + - name: tests + configMap: + name: {{ include "keycloak.fullname" . }}-test + restartPolicy: Never +{{- end }} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/values.schema.json b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/values.schema.json new file mode 100644 index 0000000..47c2aa3 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/values.schema.json @@ -0,0 +1,434 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "required": [ + "image" + ], + "definitions": { + "image": { + "type": "object", + "required": [ + "repository", + "tag" + ], + "properties": { + "pullPolicy": { + "type": "string", + "pattern": "^(Always|Never|IfNotPresent)$" + }, + "repository": { + "type": "string" + }, + "tag": { + "type": "string" + } + } + }, + "imagePullSecrets": { + "type": "array", + "items": { + "type": "object", + "properties": { + "name": { + "type": "string" + } + } + } + } + }, + "properties": { + "affinity": { + "type": "string" + }, + "args": { + "type": "array" + }, + "clusterDomain": { + "type": "string" + }, + "command": { + "type": "array" + }, + "enableServiceLinks": { + "type": "boolean" + }, + "extraContainers": { + "type": "string" + }, + "extraEnv": { + "type": "string" + }, + "extraEnvFrom": { + "type": "string" + }, + "extraInitContainers": { + "type": "string" + }, + "extraPorts": { + "type": "array" + }, + "extraVolumeMounts": { + "type": "string" + }, + "extraVolumes": { + "type": "string" + }, + "fullnameOverride": { + "type": "string" + }, + "hostAliases": { + "type": "array" + }, + "image": { + "$ref": "#/definitions/image" + }, + "imagePullSecrets": { + "$ref": "#/definitions/imagePullSecrets" + }, + "ingress": { + "type": "object", + "properties": { + "annotations": { + "type": "object" + }, + "enabled": { + "type": "boolean" + }, + "labels": { + "type": "object" + }, + "rules": { + "type": "array", + "items": { + "type": "object", + "properties": { + "host": { + "type": "string" + }, + "paths": { + "type": "array", + "items": { + "type": "string" + } + } + } + } + }, + "servicePort": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ] + }, + "tls": { + "type": "array", + "items": { + "type": "object", + "properties": { + "hosts": { + "type": "array", + "items": { + "items": { + "type": "string" + } + }, + "secretName": { + "type": "string" + } + } + } + } + } + }, + "lifecycleHooks": { + "type": "string" + }, + "livenessProbe": { + "type": "string" + }, + "nameOverride": { + "type": "string" + }, + "nodeSelector": { + "type": "object" + }, + "pgchecker": { + "type": "object", + "properties": { + "image": { + "$ref": "#/definitions/image" + }, + "resources": { + "type": "object", + "properties": { + "limits": { + "type": "object", + "properties": { + "cpu": { + "type": "string" + }, + "memory": { + "type": "string" + } + } + }, + "requests": { + "type": "object", + "properties": { + "cpu": { + "type": "string" + }, + "memory": { + "type": "string" + } + } + } + } + }, + "securityContext": { + "type": "object" + } + } + }, + "podAnnotations": { + "type": "object" + }, + "podDisruptionBudget": { + "type": "object" + }, + "podLabels": { + "type": "object" + }, + "podManagementPolicy": { + "type": "string" + }, + "podSecurityContext": { + "type": "object" + }, + "postgresql": { + "type": "object" + }, + "priorityClassName": { + "type": "string" + }, + "prometheusRule": { + "type": "object" + }, + "serviceMonitor": { + "type": "object" + }, + "extraServiceMonitor": { + "type": "object" + }, + "readinessProbe": { + "type": "string" + }, + "replicas": { + "type": "integer" + }, + "resources": { + "type": "object" + }, + "restartPolicy": { + "type": "string" + }, + "route": { + "type": "object", + "properties": { + "annotations": { + "type": "object" + }, + "enabled": { + "type": "boolean" + }, + "host": { + "type": "string" + }, + "labels": { + "type": "object" + }, + "path": { + "type": "string" + }, + "tls": { + "type": "object" + } + } + }, + "secrets": { + "type": "object" + }, + "securityContext": { + "type": "object" + }, + "service": { + "type": "object", + "properties": { + "annotations": { + "type": "object" + }, + "extraPorts": { + "type": "array" + }, + "loadBalancerSourceRanges": { + "type": "array" + }, + "httpNodePort": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + }, + "httpPort": { + "type": "integer" + }, + "httpsNodePort": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + }, + "httpsPort": { + "type": "integer" + }, + "httpManagementNodePort": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + }, + "httpManagementPort": { + "type": "integer" + }, + "labels": { + "type": "object" + }, + "nodePort": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + }, + "type": { + "type": "string" + }, + "loadBalancerIP": { + "type": "string" + }, + "sessionAffinity": { + "type": "string" + }, + "sessionAffinityConfig": { + "type": "object" + } + } + }, + "serviceAccount": { + "type": "object", + "properties": { + "annotations": { + "type": "object" + }, + "create": { + "type": "boolean" + }, + "imagePullSecrets": { + "$ref": "#/definitions/imagePullSecrets" + }, + "labels": { + "type": "object" + }, + "name": { + "type": "string" + } + } + }, + "rbac": { + "type": "object", + "properties": { + "create": { + "type": "boolean" + }, + "rules": { + "type": "array" + } + } + }, + "startupScripts": { + "type": "object" + }, + "statefulsetAnnotations": { + "type": "object" + }, + "statefulsetLabels": { + "type": "object" + }, + "terminationGracePeriodSeconds": { + "type": "integer" + }, + "autoscaling": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + }, + "labels": { + "type": "object" + }, + "minReplicas": { + "type": "integer" + }, + "maxReplicas": { + "type": "integer" + }, + "metrics": { + "type": "array" + }, + "behavior": { + "type": "object" + } + } + }, + "test": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + }, + "image": { + "$ref": "#/definitions/image" + }, + "podSecurityContext": { + "type": "object" + }, + "securityContext": { + "type": "object" + } + } + }, + "tolerations": { + "type": "array" + } + } + } +} diff --git a/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/values.yaml b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/values.yaml new file mode 100644 index 0000000..7b3dc78 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/04-keycloak/values.yaml @@ -0,0 +1,552 @@ +# Optionally override the fully qualified name +fullnameOverride: "imxc-keycloak" + +# Optionally override the name +nameOverride: "" + +# The number of replicas to create (has no effect if autoscaling enabled) +replicas: 2 + +image: + # The Keycloak image repository + #repository: cdm-dev.exem-oss.org/keycloak/keycloak + repository: 10.10.31.243:5000/cmoa3/keycloak + # Overrides the Keycloak image tag whose default is the chart version + tag: "11.0.1" + # The Keycloak image pull policy + pullPolicy: Always + +# Image pull secrets for the Pod +#imagePullSecrets: [] +# - name: myRegistrKeySecretName +imagePullSecrets: + - name: regcred + +# Mapping between IPs and hostnames that will be injected as entries in the Pod's hosts files +hostAliases: [] +# - ip: "1.2.3.4" +# hostnames: +# - "my.host.com" + +# Indicates whether information about services should be injected into Pod's environment variables, matching the syntax of Docker links +enableServiceLinks: true + +# Pod management policy. One of `Parallel` or `OrderedReady` +podManagementPolicy: Parallel + +# Pod restart policy. One of `Always`, `OnFailure`, or `Never` +restartPolicy: Always + +serviceAccount: + # Specifies whether a ServiceAccount should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" + # Additional annotations for the ServiceAccount + annotations: {} + # Additional labels for the ServiceAccount + labels: {} + # Image pull secrets that are attached to the ServiceAccount + #imagePullSecrets: [] + imagePullSecrets: + - name: regcred + +rbac: + create: true + rules: + # RBAC rules for KUBE_PING + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + +# SecurityContext for the entire Pod. Every container running in the Pod will inherit this SecurityContext. This might be relevant when other components of the environment inject additional containers into running Pods (service meshes are the most prominent example for this) +podSecurityContext: + fsGroup: 1000 + +# SecurityContext for the Keycloak container +securityContext: + runAsUser: 1000 + runAsNonRoot: true + +# Additional init containers, e. g. for providing custom themes +extraInitContainers: | + - name: theme-provider + image: 10.10.31.243:5000/cmoa3/theme-provider:latest + imagePullPolicy: IfNotPresent + command: + - sh + args: + - -c + - | + echo "Copying theme ..." + cp -R /mytheme/* /theme + volumeMounts: + - name: theme + mountPath: /theme + +#extraInitContainers: "" + +# Additional sidecar containers, e. g. for a database proxy, such as Google's cloudsql-proxy +extraContainers: "" + +# Lifecycle hooks for the Keycloak container +lifecycleHooks: | +# postStart: +# exec: +# command: +# - /bin/sh +# - -c +# - ls + +# Termination grace period in seconds for Keycloak shutdown. Clusters with a large cache might need to extend this to give Infinispan more time to rebalance +terminationGracePeriodSeconds: 60 + +# The internal Kubernetes cluster domain +clusterDomain: cluster.local + +## Overrides the default entrypoint of the Keycloak container +command: [] + +## Overrides the default args for the Keycloak container +#args: ["-Dkeycloak.profile.feature.scripts=enabled", "-Dkeycloak.profile.feature.upload_scripts=enabled", "-Dkeycloak.profile.feature.admin_fine_grained_authz=enabled"] +args: ["-Dkeycloak.profile.feature.scripts=enabled", "-Dkeycloak.profile.feature.upload_scripts=enabled"] + +# Additional environment variables for Keycloak +extraEnv: | + # HA settings + - name: PROXY_ADDRESS_FORWARDING + value: "true" + - name: JGROUPS_DISCOVERY_PROTOCOL + value: kubernetes.KUBE_PING + - name: KUBERNETES_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: CACHE_OWNERS_COUNT + value: "2" + - name: CACHE_OWNERS_AUTH_SESSIONS_COUNT + value: "2" + # postgresql settings + - name: DB_VENDOR + value: postgres + - name: DB_ADDR + value: postgres + - name: DB_PORT + value: "5432" + - name: DB_DATABASE + value: keycloak + - name: DB_USER + value: admin + - name: DB_PASSWORD + value: eorbahrhkswp +# - name: KEYCLOAK_USER +# value: keycloak +# - name: KEYCLOAK_PASSWORD +# value: keycloak +#extraEnv: "" + # - name: KEYCLOAK_LOGLEVEL + # value: DEBUG + # - name: WILDFLY_LOGLEVEL + # value: DEBUG + # - name: CACHE_OWNERS_COUNT + # value: "2" + # - name: CACHE_OWNERS_AUTH_SESSIONS_COUNT + # value: "2" +#extraEnv: | +# - name: JGROUPS_DISCOVERY_PROTOCOL +# value: dns.DNS_PING +# - name: JGROUPS_DISCOVERY_PROPERTIES +# value: 'dns_query={{ include "keycloak.serviceDnsName" . }}' +# - name: CACHE_OWNERS_COUNT +# value: "2" +# - name: CACHE_OWNERS_AUTH_SESSIONS_COUNT +# value: "2" +# Additional environment variables for Keycloak mapped from Secret or ConfigMap +extraEnvFrom: "" + +# Pod priority class name +#priorityClassName: "manual" + +# Pod affinity +affinity: | + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + {{- include "keycloak.selectorLabels" . | nindent 10 }} + matchExpressions: + - key: app.kubernetes.io/component + operator: NotIn + values: + - test + topologyKey: kubernetes.io/hostname + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchLabels: + {{- include "keycloak.selectorLabels" . | nindent 12 }} + matchExpressions: + - key: app.kubernetes.io/component + operator: NotIn + values: + - test + topologyKey: failure-domain.beta.kubernetes.io/zone + +#affinity: {} + +# Node labels for Pod assignment +nodeSelector: {} + +# Node taints to tolerate +tolerations: [] + +# Additional Pod labels +podLabels: {} + +# Additional Pod annotations +podAnnotations: {} + +# Liveness probe configuration +livenessProbe: | + httpGet: + path: /auth/ + port: http + initialDelaySeconds: 300 + timeoutSeconds: 5 + +# Readiness probe configuration +readinessProbe: | + httpGet: + path: /auth/realms/master + port: http + initialDelaySeconds: 30 + timeoutSeconds: 1 + +# Pod resource requests and limits +#resources: {} + # requests: + # cpu: "500m" + # memory: "1024Mi" + # limits: + # cpu: "500m" + # memory: "1024Mi" +resources: + requests: + memory: "600Mi" + cpu: "10m" + +# Startup scripts to run before Keycloak starts up +startupScripts: + # WildFly CLI script for configuring the node-identifier + keycloak.cli: | + {{- .Files.Get "scripts/keycloak.cli" }} + # mystartup.sh: | + # #!/bin/sh + # + # echo 'Hello from my custom startup script!' + +# Add additional volumes, e. g. for custom themes +extraVolumes: | + - name: theme + emptyDir: {} +#extraVolumes: "" + +# Add additional volumes mounts, e. g. for custom themes +extraVolumeMounts: | + - name: theme + mountPath: /opt/jboss/keycloak/themes +#extraVolumeMounts: "" + +# Add additional ports, e. g. for admin console or exposing JGroups ports +extraPorts: [] + +# Pod disruption budget +podDisruptionBudget: {} +# maxUnavailable: 1 +# minAvailable: 1 + +# Annotations for the StatefulSet +statefulsetAnnotations: {} + +# Additional labels for the StatefulSet +statefulsetLabels: {} + +# Configuration for secrets that should be created +secrets: {} + # mysecret: + # type: {} + # annotations: {} + # labels: {} + # stringData: {} + # data: {} + +service: + # Annotations for headless and HTTP Services + annotations: {} + # Additional labels for headless and HTTP Services + labels: {} + # key: value + # The Service type + type: NodePort + # Optional IP for the load balancer. Used for services of type LoadBalancer only + loadBalancerIP: "" + # The http Service port + httpPort: 80 + # The HTTP Service node port if type is NodePort + httpNodePort: 31082 + # The HTTPS Service port + httpsPort: 8443 + # The HTTPS Service node port if type is NodePort + httpsNodePort: null + # The WildFly management Service port + httpManagementPort: 9990 + # The WildFly management Service node port if type is NodePort + httpManagementNodePort: 31990 + # Additional Service ports, e. g. for custom admin console + extraPorts: [] + # When using Service type LoadBalancer, you can restrict source ranges allowed + # to connect to the LoadBalancer, e. g. will result in Security Groups + # (or equivalent) with inbound source ranges allowed to connect + loadBalancerSourceRanges: [] + # Session affinity + # See https://kubernetes.io/docs/concepts/services-networking/service/#proxy-mode-userspace + sessionAffinity: "" + # Session affinity config + sessionAffinityConfig: {} + +ingress: + # If `true`, an Ingress is created + enabled: false + # The Service port targeted by the Ingress + servicePort: http + # Ingress annotations + annotations: {} + ## Resolve HTTP 502 error using ingress-nginx: + ## See https://www.ibm.com/support/pages/502-error-ingress-keycloak-response + # nginx.ingress.kubernetes.io/proxy-buffer-size: 128k + + # Additional Ingress labels + labels: {} + # List of rules for the Ingress + rules: + - + # Ingress host + host: '{{ .Release.Name }}.keycloak.example.com' + # Paths for the host + paths: + - / + # TLS configuration + tls: + - hosts: + - keycloak.example.com + secretName: "" + + # ingress for console only (/auth/admin) + console: + # If `true`, an Ingress is created for console path only + enabled: false + # Ingress annotations for console ingress only + # Useful to set nginx.ingress.kubernetes.io/whitelist-source-range particularly + annotations: {} + rules: + - + # Ingress host + host: '{{ .Release.Name }}.keycloak.example.com' + # Paths for the host + paths: + - /auth/admin/ + +## Network policy configuration +networkPolicy: + # If true, the Network policies are deployed + enabled: false + + # Additional Network policy labels + labels: {} + + # Define all other external allowed source + # See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#networkpolicypeer-v1-networking-k8s-io + extraFrom: [] + +route: + # If `true`, an OpenShift Route is created + enabled: false + # Path for the Route + path: / + # Route annotations + annotations: {} + # Additional Route labels + labels: {} + # Host name for the Route + host: "" + # TLS configuration + tls: + # If `true`, TLS is enabled for the Route + enabled: false + # Insecure edge termination policy of the Route. Can be `None`, `Redirect`, or `Allow` + insecureEdgeTerminationPolicy: Redirect + # TLS termination of the route. Can be `edge`, `passthrough`, or `reencrypt` + termination: edge + +pgchecker: + image: + # Docker image used to check Postgresql readiness at startup + #repository: cdm-dev.exem-oss.org/keycloak/busybox + #repository: {{ .Values.global.IMXC_REGISTRY }}/keycloak/busybox + repository: 10.10.31.243:5000/cmoa3/busybox + # Image tag for the pgchecker image + tag: 1.32 + # Image pull policy for the pgchecker image + pullPolicy: Always + # SecurityContext for the pgchecker contai/docker.ner + securityContext: + allowPrivilegeEscalation: false + runAsUser: 1000 + runAsGroup: 1000 + runAsNonRoot: true + # Resource requests and limits for the pgchecker container + resources: + requests: + cpu: "10m" + memory: "16Mi" + limits: + cpu: "10m" + memory: "16Mi" + +postgresql: + # If `true`, the Postgresql dependency is enabled + enabled: false + # PostgreSQL User to create + postgresqlUsername: keycloak + # PostgreSQL Password for the new user + postgresqlPassword: keycloak + # PostgreSQL Database to create + postgresqlDatabase: keycloak + # PostgreSQL network policy configuration + networkPolicy: + enabled: false + +serviceMonitor: + # If `true`, a ServiceMonitor resource for the prometheus-operator is created + enabled: false + # Optionally sets a target namespace in which to deploy the ServiceMonitor resource + namespace: "" + # Optionally sets a namespace for the ServiceMonitor + namespaceSelector: {} + # Annotations for the ServiceMonitor + annotations: {} + # Additional labels for the ServiceMonitor + labels: {} + # Interval at which Prometheus scrapes metrics + interval: 10s + # Timeout for scraping + scrapeTimeout: 10s + # The path at which metrics are served + path: /metrics + # The Service port at which metrics are served + port: http-management + +extraServiceMonitor: + # If `true`, a ServiceMonitor resource for the prometheus-operator is created + enabled: false + # Optionally sets a target namespace in which to deploy the ServiceMonitor resource + namespace: "" + # Optionally sets a namespace for the ServiceMonitor + namespaceSelector: {} + # Annotations for the ServiceMonitor + annotations: {} + # Additional labels for the ServiceMonitor + labels: {} + # Interval at which Prometheus scrapes metrics + interval: 10s + # Timeout for scraping + scrapeTimeout: 10s + # The path at which metrics are served + path: /auth/realms/master/metrics + # The Service port at which metrics are served + port: http + +prometheusRule: + # If `true`, a PrometheusRule resource for the prometheus-operator is created + enabled: false + # Annotations for the PrometheusRule + annotations: {} + # Additional labels for the PrometheusRule + labels: {} + # List of rules for Prometheus + rules: [] + # - alert: keycloak-IngressHigh5xxRate + # annotations: + # message: The percentage of 5xx errors for keycloak over the last 5 minutes is over 1%. + # expr: | + # ( + # sum( + # rate( + # nginx_ingress_controller_response_duration_seconds_count{exported_namespace="mynamespace",ingress="mynamespace-keycloak",status=~"5[0-9]{2}"}[1m] + # ) + # ) + # / + # sum( + # rate( + # nginx_ingress_controller_response_duration_seconds_count{exported_namespace="mynamespace",ingress="mynamespace-keycloak"}[1m] + # ) + # ) + # ) * 100 > 1 + # for: 5m + # labels: + # severity: warning + +autoscaling: + # If `true`, a autoscaling/v2beta2 HorizontalPodAutoscaler resource is created (requires Kubernetes 1.18 or above) + # Autoscaling seems to be most reliable when using KUBE_PING service discovery (see README for details) + # This disables the `replicas` field in the StatefulSet + enabled: false + # Additional HorizontalPodAutoscaler labels + labels: {} + # The minimum and maximum number of replicas for the Keycloak StatefulSet + minReplicas: 3 + maxReplicas: 10 + # The metrics to use for scaling + metrics: + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: 80 + # The scaling policy to use. This will scale up quickly but only scale down a single Pod per 5 minutes. + # This is important because caches are usually only replicated to 2 Pods and if one of those Pods is terminated this will give the cluster time to recover. + behavior: + scaleDown: + stabilizationWindowSeconds: 300 + policies: + - type: Pods + value: 1 + periodSeconds: 300 + +test: + # If `true`, test resources are created + enabled: false + image: + # The image for the test Pod + #repository: docker.io/unguiculus/docker-python3-phantomjs-selenium + repository: 10.10.31.243:5000/docker-python3-phantomjs-selenium + # The tag for the test Pod image + tag: v1 + # The image pull policy for the test Pod image + pullPolicy: IfNotPresent + # SecurityContext for the entire test Pod + podSecurityContext: + fsGroup: 1000 + # SecurityContext for the test container + securityContext: + runAsUser: 1000 + runAsNonRoot: true + diff --git a/ansible/01_old/roles/cmoa_install_bak/files/05-imxc/Chart.yaml b/ansible/01_old/roles/cmoa_install_bak/files/05-imxc/Chart.yaml new file mode 100644 index 0000000..e2f559f --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/05-imxc/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for Kubernetes +name: imxc +version: 0.1.0 diff --git a/ansible/01_old/roles/cmoa_install_bak/files/05-imxc/cmoa-manual.yaml b/ansible/01_old/roles/cmoa_install_bak/files/05-imxc/cmoa-manual.yaml new file mode 100644 index 0000000..e94fc14 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/05-imxc/cmoa-manual.yaml @@ -0,0 +1,36 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: manual + namespace: imxc +spec: + selector: + matchLabels: + app: manual + replicas: 1 + template: + metadata: + labels: + app: manual + spec: + containers: + - name: manual + image: {{ .Values.global.IMXC_IN_REGISTRY }}/manual:{{ .Values.global.CMOA_MANUAL_VERSION }} + imagePullPolicy: IfNotPresent + +--- +apiVersion: v1 +kind: Service +metadata: + name: manual + namespace: imxc +spec: + type: NodePort + selector: + app: manual + ports: + - protocol: TCP + port: 8088 + targetPort: 3000 + nodePort: {{ .Values.global.CMOA_MANUAL_PORT }} + diff --git a/ansible/01_old/roles/cmoa_install_bak/files/05-imxc/scripts/init-api-server.sh b/ansible/01_old/roles/cmoa_install_bak/files/05-imxc/scripts/init-api-server.sh new file mode 100644 index 0000000..78a9962 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/05-imxc/scripts/init-api-server.sh @@ -0,0 +1,17 @@ +#! /bin/sh + +STATUS_CODE="$(curl -s -o /dev/null -w '%{http_code}' http://imxc-keycloak-http/auth/realms/exem)" + +if [ $STATUS_CODE -eq 200 ]; then + JWT_KEY="$(curl -s -XGET http://imxc-keycloak-http/auth/realms/exem | jq -r '.public_key')" + export JWT_KEY + + chmod -R 777 /home/cloudmoa/notification/cloudmoa_alert.log + + /sbin/tini -- java -Djava.security.egd=file:/dev/./urandom -jar /app.jar + #java -Djava.security.egd=file:/dev/./urandom -jar /app.jar +elif [ $STATUS_CODE -eq 404 ]; then + echo "not found exem relam. check realm in imxc-keycloak" +else + echo "not found keycloak. check to install keycloak" +fi diff --git a/ansible/01_old/roles/cmoa_install_bak/files/05-imxc/scripts/init-auth-server.sh b/ansible/01_old/roles/cmoa_install_bak/files/05-imxc/scripts/init-auth-server.sh new file mode 100644 index 0000000..279b8a5 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/05-imxc/scripts/init-auth-server.sh @@ -0,0 +1,36 @@ +#! /bin/bash + +# 200 -> 서버 및 realm이 있는 경우 +# 404 -> 서버는 있으나 realm이 없는 경우 +# 000 -> 서버가 없음 +STATUS_CODE="$(curl -s -o /dev/null -w '%{http_code}' http://imxc-keycloak-http/auth/realms/exem)" + +if [ $STATUS_CODE -eq 404 ]; then + TOKEN="$(curl -s -d "client_id=admin-cli" -d "username=admin" -d "password=admin" -d "grant_type=password" http://imxc-keycloak-http/auth/realms/master/protocol/openid-connect/token | jq -r '.access_token')" + + echo $TOKEN + + echo "create realm and client" + # create realm and client + curl -s -v POST -H "Authorization: Bearer $TOKEN" -H "Content-Type: application/json" -d "@/tmp/init.json" http://imxc-keycloak-http/auth/admin/realms + + + echo "create admin and owner" + # create admin and owner + curl -s -v POST -H "Authorization: Bearer $TOKEN" -H "Content-Type: application/json" -d '{"firstName":"","lastName":"", "username":"admin","email":"admin@example.com", "enabled":"true","credentials":[{"type":"password","value":"admin","temporary":false}]}' http://imxc-keycloak-http/auth/admin/realms/exem/users + curl -s -v POST -H "Authorization: Bearer $TOKEN" -H "Content-Type: application/json" -d '{"firstName":"","lastName":"", "username":"owner","email":"owner@example.com", "enabled":"true","credentials":[{"type":"password","value":"admin","temporary":false}]}' http://imxc-keycloak-http/auth/admin/realms/exem/users + + JWT_KEY="$(curl -s -XGET http://imxc-keycloak-http/auth/realms/exem | jq -r '.public_key')" + export JWT_KEY + + java -Djava.security.egd=file:/dev/./urandom -jar /app.jar +elif [ $STATUS_CODE -eq 200 ]; then + echo "exist exem relam" + + JWT_KEY="$(curl -s -XGET http://imxc-keycloak-http/auth/realms/exem | jq -r '.public_key')" + export JWT_KEY + + java -Djava.security.egd=file:/dev/./urandom -jar /app.jar +else + echo "not found keycloak. check to install keycloak" +fi diff --git a/ansible/01_old/roles/cmoa_install_bak/files/05-imxc/scripts/init-noti-server.sh b/ansible/01_old/roles/cmoa_install_bak/files/05-imxc/scripts/init-noti-server.sh new file mode 100644 index 0000000..af73aed --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/05-imxc/scripts/init-noti-server.sh @@ -0,0 +1,14 @@ +#! /bin/sh + +STATUS_CODE="$(curl -s -o /dev/null -w '%{http_code}' http://imxc-keycloak-http/auth/realms/exem)" + +if [ $STATUS_CODE -eq 200 ]; then + JWT_KEY="$(curl -s -XGET http://imxc-keycloak-http/auth/realms/exem | jq -r '.public_key')" + export JWT_KEY + + java -Djava.security.egd=file:/dev/./urandom -jar /app.jar +elif [ $STATUS_CODE -eq 404 ]; then + echo "not found exem relam. check realm in imxc-keycloak" +else + echo "not found keycloak. check to install keycloak" +fi \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install_bak/files/05-imxc/scripts/init-resource.sh b/ansible/01_old/roles/cmoa_install_bak/files/05-imxc/scripts/init-resource.sh new file mode 100644 index 0000000..58db392 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/05-imxc/scripts/init-resource.sh @@ -0,0 +1,6 @@ +#!/bin/sh + +chmod -R 777 /scripts + +sed -i "s/localhost/$REDIRECT_URLS/g" /scripts/init.json +cp /scripts/init.json /tmp/init.json \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install_bak/files/05-imxc/scripts/init.json b/ansible/01_old/roles/cmoa_install_bak/files/05-imxc/scripts/init.json new file mode 100644 index 0000000..dcd68b4 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/05-imxc/scripts/init.json @@ -0,0 +1,2148 @@ +{ + "id": "exem", + "realm": "exem", + "notBefore": 0, + "revokeRefreshToken": false, + "refreshTokenMaxReuse": 0, + "accessTokenLifespan": 300, + "accessTokenLifespanForImplicitFlow": 900, + "ssoSessionIdleTimeout": 1800, + "ssoSessionMaxLifespan": 36000, + "ssoSessionIdleTimeoutRememberMe": 0, + "ssoSessionMaxLifespanRememberMe": 0, + "offlineSessionIdleTimeout": 2592000, + "offlineSessionMaxLifespanEnabled": false, + "offlineSessionMaxLifespan": 5184000, + "clientSessionIdleTimeout": 0, + "clientSessionMaxLifespan": 0, + "clientOfflineSessionIdleTimeout": 0, + "clientOfflineSessionMaxLifespan": 0, + "accessCodeLifespan": 60, + "accessCodeLifespanUserAction": 300, + "accessCodeLifespanLogin": 1800, + "actionTokenGeneratedByAdminLifespan": 43200, + "actionTokenGeneratedByUserLifespan": 300, + "enabled": true, + "sslRequired": "none", + "registrationAllowed": false, + "registrationEmailAsUsername": false, + "rememberMe": false, + "verifyEmail": false, + "loginWithEmailAllowed": true, + "duplicateEmailsAllowed": false, + "resetPasswordAllowed": false, + "editUsernameAllowed": false, + "bruteForceProtected": false, + "permanentLockout": false, + "maxFailureWaitSeconds": 900, + "minimumQuickLoginWaitSeconds": 60, + "waitIncrementSeconds": 60, + "quickLoginCheckMilliSeconds": 1000, + "maxDeltaTimeSeconds": 43200, + "failureFactor": 30, + "roles": { + "realm": [ + { + "id": "b361dcb8-4ec4-484e-a432-8d40a8ca5ac8", + "name": "offline_access", + "description": "${role_offline-access}", + "composite": false, + "clientRole": false, + "containerId": "exem", + "attributes": {} + }, + { + "id": "621155f2-6c01-4e4a-bf11-47111503d696", + "name": "uma_authorization", + "description": "${role_uma_authorization}", + "composite": false, + "clientRole": false, + "containerId": "exem", + "attributes": {} + }, + { + "id": "4aadd73a-e863-466a-932b-5bc81553fbf1", + "name": "access", + "composite": false, + "clientRole": false, + "containerId": "exem", + "attributes": {} + } + ], + "client": { + "realm-management": [ + { + "id": "e3eca547-c372-406a-abe7-30f554e13e63", + "name": "manage-realm", + "description": "${role_manage-realm}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "eb1faff2-4cca-458c-b9da-96c1f6f5f647", + "name": "impersonation", + "description": "${role_impersonation}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "eb0f6ebb-8993-47f8-8979-2152ed92bf62", + "name": "create-client", + "description": "${role_create-client}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "29f0b39d-9cc9-4b40-ad81-00041897ae0c", + "name": "view-clients", + "description": "${role_view-clients}", + "composite": true, + "composites": { + "client": { + "realm-management": [ + "query-clients" + ] + } + }, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "b6307563-9b35-4093-b0c4-a27df7cb82bd", + "name": "query-groups", + "description": "${role_query-groups}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "30091a91-f676-4e39-8ae2-ebfcee36c32a", + "name": "query-clients", + "description": "${role_query-clients}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "b40ca071-2318-4f69-9664-f0dfe471d03b", + "name": "view-realm", + "description": "${role_view-realm}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "efd25ec7-e61f-4659-a772-907791aed58e", + "name": "view-authorization", + "description": "${role_view-authorization}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "4ad18bd0-f9a9-4fc7-8864-99afa71f95e4", + "name": "manage-users", + "description": "${role_manage-users}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "a92c781f-7c6a-48d8-aa88-0b3aefb3c10c", + "name": "manage-events", + "description": "${role_manage-events}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "424933c1-3c03-49cd-955c-34aeeb0a3108", + "name": "manage-authorization", + "description": "${role_manage-authorization}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "5476db80-dbfa-408b-a934-5e8decc0af56", + "name": "manage-clients", + "description": "${role_manage-clients}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "acf53868-d09b-4865-92da-3b906307b979", + "name": "realm-admin", + "description": "${role_realm-admin}", + "composite": true, + "composites": { + "client": { + "realm-management": [ + "manage-realm", + "impersonation", + "create-client", + "view-clients", + "query-groups", + "query-clients", + "view-realm", + "view-authorization", + "manage-users", + "manage-events", + "manage-authorization", + "manage-clients", + "query-users", + "query-realms", + "manage-identity-providers", + "view-users", + "view-events", + "view-identity-providers" + ] + } + }, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "f2ad5f83-ffde-4cf4-acc4-21f7bcec4c38", + "name": "query-users", + "description": "${role_query-users}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "96a017bf-5211-4c20-a1b2-7493bc45a3ad", + "name": "query-realms", + "description": "${role_query-realms}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "d8051d4d-f26c-4a6d-bcdd-b3d8111d9d29", + "name": "manage-identity-providers", + "description": "${role_manage-identity-providers}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "8c929b20-abc3-4b78-88f2-ed3348426667", + "name": "view-users", + "description": "${role_view-users}", + "composite": true, + "composites": { + "client": { + "realm-management": [ + "query-groups", + "query-users" + ] + } + }, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "a337a8f7-8725-4ff7-85fc-ecc4b5ce1433", + "name": "view-events", + "description": "${role_view-events}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "649350cf-925c-4502-84b4-ec8415f956d3", + "name": "view-identity-providers", + "description": "${role_view-identity-providers}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + } + ], + "authorization_server": [ + { + "id": "2346ca49-eb3e-4f2e-b0ec-4def9ea9655c", + "name": "access", + "composite": false, + "clientRole": true, + "containerId": "b9bbda1f-a756-4b72-9cd8-06a6dfd6d5bf", + "attributes": {} + } + ], + "security-admin-console": [], + "admin-cli": [], + "account-console": [], + "broker": [ + { + "id": "133ff901-3a8f-48df-893b-4c7e9047e829", + "name": "read-token", + "description": "${role_read-token}", + "composite": false, + "clientRole": true, + "containerId": "fdc71d6d-db86-414f-bd80-ed1f5e9a6975", + "attributes": {} + } + ], + "account": [ + { + "id": "89c5f56f-5845-400b-ac9f-942c46d082e0", + "name": "manage-account-links", + "description": "${role_manage-account-links}", + "composite": false, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + }, + { + "id": "2cba7fed-0a80-4dbd-bd2d-abfa2c6a985e", + "name": "view-profile", + "description": "${role_view-profile}", + "composite": false, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + }, + { + "id": "f446a93d-143f-4071-9bdc-08aa2fdce6d2", + "name": "view-consent", + "description": "${role_view-consent}", + "composite": false, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + }, + { + "id": "ef3364db-e008-4aec-9e74-04bac25cbe40", + "name": "manage-consent", + "description": "${role_manage-consent}", + "composite": true, + "composites": { + "client": { + "account": [ + "view-consent" + ] + } + }, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + }, + { + "id": "96afbe32-3ac2-4345-bc17-06cf0e8de0b4", + "name": "view-applications", + "description": "${role_view-applications}", + "composite": false, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + }, + { + "id": "cf6861ca-4804-40d4-9016-c48e7ebf1c72", + "name": "manage-account", + "description": "${role_manage-account}", + "composite": true, + "composites": { + "client": { + "account": [ + "manage-account-links" + ] + } + }, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + } + ] + } + }, + "groups": [ + { + "id": "8d3f7332-7f72-47e2-9cb3-38331f0c29b5", + "name": "DEFAULT_TENANT", + "path": "/DEFAULT_TENANT", + "attributes": {}, + "realmRoles": [], + "clientRoles": {}, + "subGroups": [] + } + ], + "defaultRoles": [ + "offline_access", + "uma_authorization" + ], + "requiredCredentials": [ + "password" + ], + "otpPolicyType": "totp", + "otpPolicyAlgorithm": "HmacSHA1", + "otpPolicyInitialCounter": 0, + "otpPolicyDigits": 6, + "otpPolicyLookAheadWindow": 1, + "otpPolicyPeriod": 30, + "otpSupportedApplications": [ + "FreeOTP", + "Google Authenticator" + ], + "webAuthnPolicyRpEntityName": "keycloak", + "webAuthnPolicySignatureAlgorithms": [ + "ES256" + ], + "webAuthnPolicyRpId": "", + "webAuthnPolicyAttestationConveyancePreference": "not specified", + "webAuthnPolicyAuthenticatorAttachment": "not specified", + "webAuthnPolicyRequireResidentKey": "not specified", + "webAuthnPolicyUserVerificationRequirement": "not specified", + "webAuthnPolicyCreateTimeout": 0, + "webAuthnPolicyAvoidSameAuthenticatorRegister": false, + "webAuthnPolicyAcceptableAaguids": [], + "webAuthnPolicyPasswordlessRpEntityName": "keycloak", + "webAuthnPolicyPasswordlessSignatureAlgorithms": [ + "ES256" + ], + "webAuthnPolicyPasswordlessRpId": "", + "webAuthnPolicyPasswordlessAttestationConveyancePreference": "not specified", + "webAuthnPolicyPasswordlessAuthenticatorAttachment": "not specified", + "webAuthnPolicyPasswordlessRequireResidentKey": "not specified", + "webAuthnPolicyPasswordlessUserVerificationRequirement": "not specified", + "webAuthnPolicyPasswordlessCreateTimeout": 0, + "webAuthnPolicyPasswordlessAvoidSameAuthenticatorRegister": false, + "webAuthnPolicyPasswordlessAcceptableAaguids": [], + "scopeMappings": [ + { + "clientScope": "offline_access", + "roles": [ + "offline_access" + ] + } + ], + "clientScopeMappings": { + "account": [ + { + "client": "account-console", + "roles": [ + "manage-account" + ] + } + ] + }, + "clients": [ + { + "id": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "clientId": "account", + "name": "${client_account}", + "rootUrl": "${authBaseUrl}", + "baseUrl": "/realms/exem/account/", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "defaultRoles": [ + "view-profile", + "manage-account" + ], + "redirectUris": [ + "/realms/exem/account/*" + ], + "webOrigins": [], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": false, + "serviceAccountsEnabled": false, + "publicClient": false, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": {}, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "1e3d0c5d-c456-4c5f-93cf-58236273186a", + "clientId": "account-console", + "name": "${client_account-console}", + "rootUrl": "${authBaseUrl}", + "baseUrl": "/realms/exem/account/", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [ + "/realms/exem/account/*" + ], + "webOrigins": [], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": false, + "serviceAccountsEnabled": false, + "publicClient": true, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": { + "pkce.code.challenge.method": "S256" + }, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "protocolMappers": [ + { + "id": "cceae7c8-fa8d-48eb-a0a6-6013a2cc771e", + "name": "audience resolve", + "protocol": "openid-connect", + "protocolMapper": "oidc-audience-resolve-mapper", + "consentRequired": false, + "config": {} + } + ], + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "d4d3e5a5-584c-4aff-a79f-ac3c31ace5a1", + "clientId": "admin-cli", + "name": "${client_admin-cli}", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [], + "webOrigins": [], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": false, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": true, + "serviceAccountsEnabled": false, + "publicClient": true, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": {}, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "b9bbda1f-a756-4b72-9cd8-06a6dfd6d5bf", + "clientId": "authorization_server", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [ + "localhost" + ], + "webOrigins": [ + "*" + ], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": true, + "serviceAccountsEnabled": false, + "publicClient": true, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": { + "saml.assertion.signature": "false", + "saml.force.post.binding": "false", + "saml.multivalued.roles": "false", + "saml.encrypt": "false", + "saml.server.signature": "false", + "saml.server.signature.keyinfo.ext": "false", + "exclude.session.state.from.auth.response": "false", + "saml_force_name_id_format": "false", + "saml.client.signature": "false", + "tls.client.certificate.bound.access.tokens": "false", + "saml.authnstatement": "false", + "display.on.consent.screen": "false", + "saml.onetimeuse.condition": "false" + }, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": true, + "nodeReRegistrationTimeout": -1, + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "fdc71d6d-db86-414f-bd80-ed1f5e9a6975", + "clientId": "broker", + "name": "${client_broker}", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [], + "webOrigins": [], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": false, + "serviceAccountsEnabled": false, + "publicClient": false, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": {}, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "clientId": "realm-management", + "name": "${client_realm-management}", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [], + "webOrigins": [], + "notBefore": 0, + "bearerOnly": true, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": false, + "serviceAccountsEnabled": false, + "publicClient": false, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": {}, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "bb6c56f1-126e-4356-9579-d95992a8d150", + "clientId": "security-admin-console", + "name": "${client_security-admin-console}", + "rootUrl": "${authAdminUrl}", + "baseUrl": "/admin/exem/console/", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [ + "/admin/exem/console/*" + ], + "webOrigins": [ + "+" + ], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": false, + "serviceAccountsEnabled": false, + "publicClient": true, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": { + "pkce.code.challenge.method": "S256" + }, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "protocolMappers": [ + { + "id": "3cf06cab-00dd-486b-8e72-1a453a7031ca", + "name": "locale", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "locale", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "locale", + "jsonType.label": "String" + } + } + ], + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + } + ], + "clientScopes": [ + { + "id": "6a21eaaa-69c9-4519-8732-2155865a1891", + "name": "custom_jwt", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "true" + }, + "protocolMappers": [ + { + "id": "fd7557f5-3174-4c65-8cd1-0e9f015a906f", + "name": "customizingJWT", + "protocol": "openid-connect", + "protocolMapper": "oidc-script-based-protocol-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "multivalued": "true", + "id.token.claim": "false", + "access.token.claim": "true", + "jsonType.label": "String", + "script": "/**\r\n * Available variables: \r\n * user - the current user\r\n * realm - the current realm\r\n * token - the current token\r\n * userSession - the current userSession\r\n * keycloakSession - the current keycloakSession\r\n */\r\n\r\n//insert your code here...\r\n\r\n// you can set standard fields in token - test code\r\n// token.setAcr(\"test value\");\r\n\r\n// you can set claims in the token - test code\r\n// token.getOtherClaims().put(\"claimName\", \"claim value\");\r\n\r\n// work with variables and return multivalued token value\r\nvar ArrayList = Java.type(\"java.util.ArrayList\");\r\nvar HashMap = Java.type(\"java.util.HashMap\");\r\nvar tenantInfoMap = new HashMap();\r\nvar tenantIpMap = new HashMap();\r\n\r\nvar forEach = Array.prototype.forEach;\r\n\r\nvar client = keycloakSession.getContext().getClient();\r\nvar groups = user.getGroups();\r\nvar clientRole = client.getRole(\"access\");\r\n\r\nforEach.call(groups.toArray(), function(group) {\r\n if(group.hasRole(clientRole)) {\r\n tenantIpMap.put(group.getName(), clientRole.getAttribute(\"ip\"));\r\n tenantInfoMap.put(group.getName(), group.getAttributes());\r\n }\r\n});\r\n\r\ntoken.setOtherClaims(\"tenantInfo\", tenantInfoMap);\r\n" + } + }, + { + "id": "2cb34189-9f06-4b9f-b066-c28e7930f0a5", + "name": "custom_phone", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "false", + "user.attribute": "phone", + "id.token.claim": "false", + "access.token.claim": "true", + "claim.name": "attributes.phone", + "jsonType.label": "String" + } + }, + { + "id": "6bcb0aa9-8713-4e4b-b997-2e08d2dda0f4", + "name": "group_attr", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "groups", + "id.token.claim": "false", + "access.token.claim": "true", + "claim.name": "groups.attributes", + "jsonType.label": "String" + } + }, + { + "id": "03deb40b-4f83-436e-9eab-f479eed62460", + "name": "custom_name", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "false", + "user.attribute": "name", + "id.token.claim": "false", + "access.token.claim": "true", + "claim.name": "attributes.name", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "9fed7d81-3f42-41b0-b661-7875abb90b2b", + "name": "microprofile-jwt", + "description": "Microprofile - JWT built-in scope", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "false" + }, + "protocolMappers": [ + { + "id": "d030d675-2c31-401a-a461-534211b3d2ec", + "name": "upn", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "username", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "upn", + "jsonType.label": "String" + } + }, + { + "id": "ca2026a0-84de-4b8d-bf0c-35f3d088b115", + "name": "groups", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-realm-role-mapper", + "consentRequired": false, + "config": { + "multivalued": "true", + "user.attribute": "foo", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "groups", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "cf3e7fce-e9e8-40dc-bd0d-5cf7bac861c0", + "name": "web-origins", + "description": "OpenID Connect scope for add allowed web origins to the access token", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "false", + "display.on.consent.screen": "false", + "consent.screen.text": "" + }, + "protocolMappers": [ + { + "id": "6b909bad-30d8-4095-a80b-d71589e8a0b4", + "name": "allowed web origins", + "protocol": "openid-connect", + "protocolMapper": "oidc-allowed-origins-mapper", + "consentRequired": false, + "config": {} + } + ] + }, + { + "id": "73231863-d614-4725-9707-f5704c70893a", + "name": "roles", + "description": "OpenID Connect scope for add user roles to the access token", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "false", + "display.on.consent.screen": "true", + "consent.screen.text": "${rolesScopeConsentText}" + }, + "protocolMappers": [ + { + "id": "fad2c0b3-d6d6-46c9-b8a5-70cf2f3cd69e", + "name": "realm roles", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-realm-role-mapper", + "consentRequired": false, + "config": { + "multivalued": "true", + "user.attribute": "foo", + "access.token.claim": "true", + "claim.name": "realm_access.roles", + "jsonType.label": "String" + } + }, + { + "id": "1fa51f0e-8fa8-4807-a381-c9756ce1d2ff", + "name": "audience resolve", + "protocol": "openid-connect", + "protocolMapper": "oidc-audience-resolve-mapper", + "consentRequired": false, + "config": {} + }, + { + "id": "8be191ba-c7b8-45f1-a37f-2830595d4b54", + "name": "client roles", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-client-role-mapper", + "consentRequired": false, + "config": { + "multivalued": "true", + "user.attribute": "foo", + "access.token.claim": "true", + "claim.name": "resource_access.${client_id}.roles", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "93a4b53a-a281-4203-a070-0ad31e719b29", + "name": "phone", + "description": "OpenID Connect built-in scope: phone", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "true", + "consent.screen.text": "${phoneScopeConsentText}" + }, + "protocolMappers": [ + { + "id": "c716d4df-ad16-4a47-aa05-ded2a69313a3", + "name": "phone number verified", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "phoneNumberVerified", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "phone_number_verified", + "jsonType.label": "boolean" + } + }, + { + "id": "db0fcb5b-bad6-42b7-8ab0-b90225100b8a", + "name": "phone number", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "phoneNumber", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "phone_number", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "f1723d4c-6d93-40be-b5b8-5ca7083e55c7", + "name": "address", + "description": "OpenID Connect built-in scope: address", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "true", + "consent.screen.text": "${addressScopeConsentText}" + }, + "protocolMappers": [ + { + "id": "9e95dff0-dc01-4efe-a414-21c83d94491c", + "name": "address", + "protocol": "openid-connect", + "protocolMapper": "oidc-address-mapper", + "consentRequired": false, + "config": { + "user.attribute.formatted": "formatted", + "user.attribute.country": "country", + "user.attribute.postal_code": "postal_code", + "userinfo.token.claim": "true", + "user.attribute.street": "street", + "id.token.claim": "true", + "user.attribute.region": "region", + "access.token.claim": "true", + "user.attribute.locality": "locality" + } + } + ] + }, + { + "id": "16524b43-6bfc-4e05-868c-682e7e1e611c", + "name": "email", + "description": "OpenID Connect built-in scope: email", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "true", + "consent.screen.text": "${emailScopeConsentText}" + }, + "protocolMappers": [ + { + "id": "4444c30e-5da5-46e6-a201-64c28ab26e10", + "name": "email verified", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "emailVerified", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "email_verified", + "jsonType.label": "boolean" + } + }, + { + "id": "0faa8ba7-6d4d-4ed4-ab89-334e1d18b503", + "name": "email", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "email", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "email", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "4ccced80-99d8-4081-8d1d-37ed6d5aaf34", + "name": "profile", + "description": "OpenID Connect built-in scope: profile", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "true", + "consent.screen.text": "${profileScopeConsentText}" + }, + "protocolMappers": [ + { + "id": "02aea132-f5e1-483c-968a-5fbb9cdfb82d", + "name": "updated at", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "updatedAt", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "updated_at", + "jsonType.label": "String" + } + }, + { + "id": "eb5d10fc-d4a8-473a-ac3e-35f3fb0f41bb", + "name": "family name", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "lastName", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "family_name", + "jsonType.label": "String" + } + }, + { + "id": "2467b8e5-f340-45a2-abff-c658eccf3ed3", + "name": "zoneinfo", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "zoneinfo", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "zoneinfo", + "jsonType.label": "String" + } + }, + { + "id": "50a9bb17-af12-481d-95dd-6aed1dd4bf56", + "name": "full name", + "protocol": "openid-connect", + "protocolMapper": "oidc-full-name-mapper", + "consentRequired": false, + "config": { + "id.token.claim": "true", + "access.token.claim": "true", + "userinfo.token.claim": "true" + } + }, + { + "id": "80a65208-9425-4e66-b769-98c2f1c91e6e", + "name": "nickname", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "nickname", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "nickname", + "jsonType.label": "String" + } + }, + { + "id": "68a750c6-b4b8-47f4-a919-752319e63213", + "name": "gender", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "gender", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "gender", + "jsonType.label": "String" + } + }, + { + "id": "e27abd0e-72c1-40de-a678-e9e4e2db8e7f", + "name": "given name", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "firstName", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "given_name", + "jsonType.label": "String" + } + }, + { + "id": "04f3fa01-6a4c-44eb-bfd8-0a0e1c31bc4a", + "name": "middle name", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "middleName", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "middle_name", + "jsonType.label": "String" + } + }, + { + "id": "94e697d9-fbee-48d8-91d1-7bbc4f1fb44e", + "name": "username", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "username", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "preferred_username", + "jsonType.label": "String" + } + }, + { + "id": "a2f05d76-947d-4ceb-969b-1b923be9a923", + "name": "website", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "website", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "website", + "jsonType.label": "String" + } + }, + { + "id": "1966f863-ac5c-4cbc-a156-d5bd861728f0", + "name": "profile", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "profile", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "profile", + "jsonType.label": "String" + } + }, + { + "id": "18a9b452-cd8e-4c43-a9a8-0ea532074f74", + "name": "locale", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "locale", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "locale", + "jsonType.label": "String" + } + }, + { + "id": "1583790a-ec7a-4899-a901-60e23fd0d969", + "name": "birthdate", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "birthdate", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "birthdate", + "jsonType.label": "String" + } + }, + { + "id": "7094b64a-492b-4f31-aa73-bb19d06ddb56", + "name": "picture", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "picture", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "picture", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "eff18c11-eaf4-4d6a-8365-90f646ea3cc5", + "name": "role_list", + "description": "SAML role list", + "protocol": "saml", + "attributes": { + "consent.screen.text": "${samlRoleListScopeConsentText}", + "display.on.consent.screen": "true" + }, + "protocolMappers": [ + { + "id": "3bb12700-3e6f-4a73-bfbb-cfd16a8ab007", + "name": "role list", + "protocol": "saml", + "protocolMapper": "saml-role-list-mapper", + "consentRequired": false, + "config": { + "single": "false", + "attribute.nameformat": "Basic", + "attribute.name": "Role" + } + } + ] + }, + { + "id": "e83e35b7-9650-4f7e-b182-65c184d261b3", + "name": "offline_access", + "description": "OpenID Connect built-in scope: offline_access", + "protocol": "openid-connect", + "attributes": { + "consent.screen.text": "${offlineAccessScopeConsentText}", + "display.on.consent.screen": "true" + } + } + ], + "defaultDefaultClientScopes": [ + "role_list", + "profile", + "email", + "roles", + "web-origins", + "custom_jwt" + ], + "defaultOptionalClientScopes": [ + "offline_access", + "address", + "phone", + "microprofile-jwt" + ], + "browserSecurityHeaders": { + "contentSecurityPolicyReportOnly": "", + "xContentTypeOptions": "nosniff", + "xRobotsTag": "none", + "xFrameOptions": "SAMEORIGIN", + "contentSecurityPolicy": "frame-src 'self'; frame-ancestors 'self'; object-src 'none';", + "xXSSProtection": "1; mode=block", + "strictTransportSecurity": "max-age=31536000; includeSubDomains" + }, + "smtpServer": {}, + "eventsEnabled": false, + "eventsListeners": [ + "jboss-logging" + ], + "enabledEventTypes": [], + "adminEventsEnabled": false, + "adminEventsDetailsEnabled": false, + "components": { + "org.keycloak.services.clientregistration.policy.ClientRegistrationPolicy": [ + { + "id": "9b1dcf02-e9ec-4302-8aad-28f3250d1b2d", + "name": "Allowed Protocol Mapper Types", + "providerId": "allowed-protocol-mappers", + "subType": "anonymous", + "subComponents": {}, + "config": { + "allowed-protocol-mapper-types": [ + "oidc-sha256-pairwise-sub-mapper", + "oidc-usermodel-property-mapper", + "saml-role-list-mapper", + "saml-user-attribute-mapper", + "oidc-full-name-mapper", + "oidc-usermodel-attribute-mapper", + "oidc-address-mapper", + "saml-user-property-mapper" + ] + } + }, + { + "id": "752137ea-bc3a-46c3-9d83-49cb370d39a9", + "name": "Max Clients Limit", + "providerId": "max-clients", + "subType": "anonymous", + "subComponents": {}, + "config": { + "max-clients": [ + "200" + ] + } + }, + { + "id": "f365d31f-ccc5-4e57-97bd-b2749b1ab5e5", + "name": "Allowed Client Scopes", + "providerId": "allowed-client-templates", + "subType": "authenticated", + "subComponents": {}, + "config": { + "allow-default-scopes": [ + "true" + ] + } + }, + { + "id": "52e385fd-3aa5-442d-b5e4-6ff659126196", + "name": "Allowed Protocol Mapper Types", + "providerId": "allowed-protocol-mappers", + "subType": "authenticated", + "subComponents": {}, + "config": { + "allowed-protocol-mapper-types": [ + "oidc-sha256-pairwise-sub-mapper", + "saml-user-attribute-mapper", + "oidc-full-name-mapper", + "oidc-usermodel-attribute-mapper", + "oidc-address-mapper", + "oidc-usermodel-property-mapper", + "saml-user-property-mapper", + "saml-role-list-mapper" + ] + } + }, + { + "id": "dbebbc9d-1b14-4d09-906c-b4e5638f9588", + "name": "Consent Required", + "providerId": "consent-required", + "subType": "anonymous", + "subComponents": {}, + "config": {} + }, + { + "id": "b3fc18dc-467f-4240-9b6d-f07df5c40aee", + "name": "Full Scope Disabled", + "providerId": "scope", + "subType": "anonymous", + "subComponents": {}, + "config": {} + }, + { + "id": "19e102da-1d66-4747-958b-9311e5156693", + "name": "Trusted Hosts", + "providerId": "trusted-hosts", + "subType": "anonymous", + "subComponents": {}, + "config": { + "host-sending-registration-request-must-match": [ + "true" + ], + "client-uris-must-match": [ + "true" + ] + } + }, + { + "id": "66e83112-7392-46cb-bbd5-b71586183ada", + "name": "Allowed Client Scopes", + "providerId": "allowed-client-templates", + "subType": "anonymous", + "subComponents": {}, + "config": { + "allow-default-scopes": [ + "true" + ] + } + } + ], + "org.keycloak.keys.KeyProvider": [ + { + "id": "a60adc1b-3f6b-40d4-901f-d4f744f0d71b", + "name": "aes-generated", + "providerId": "aes-generated", + "subComponents": {}, + "config": { + "priority": [ + "100" + ] + } + }, + { + "id": "bc1b25d8-b199-4d87-b606-6cde0f6eafb0", + "name": "hmac-generated", + "providerId": "hmac-generated", + "subComponents": {}, + "config": { + "priority": [ + "100" + ], + "algorithm": [ + "HS256" + ] + } + }, + { + "id": "fe624aa7-54a3-43d8-b2a3-f74b543a9225", + "name": "rsa-generated", + "providerId": "rsa-generated", + "subComponents": {}, + "config": { + "priority": [ + "100" + ] + } + } + ] + }, + "internationalizationEnabled": false, + "supportedLocales": [], + "authenticationFlows": [ + { + "id": "a837df3e-15cb-4d2a-8ce0-5eea5c704e76", + "alias": "Account verification options", + "description": "Method with which to verity the existing account", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "idp-email-verification", + "requirement": "ALTERNATIVE", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "ALTERNATIVE", + "priority": 20, + "flowAlias": "Verify Existing Account by Re-authentication", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "59026e13-e2bd-4977-a868-505ea562f545", + "alias": "Authentication Options", + "description": "Authentication options.", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "basic-auth", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "basic-auth-otp", + "requirement": "DISABLED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "auth-spnego", + "requirement": "DISABLED", + "priority": 30, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "484d422c-d9b4-4c0e-86d5-60463ecd24c9", + "alias": "Browser - Conditional OTP", + "description": "Flow to determine if the OTP is required for the authentication", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "conditional-user-configured", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "auth-otp-form", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "0ec05058-6d09-4951-a116-19e8810e5d8e", + "alias": "Direct Grant - Conditional OTP", + "description": "Flow to determine if the OTP is required for the authentication", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "conditional-user-configured", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "direct-grant-validate-otp", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "667c03cd-114c-4d9a-a7fa-7d2c27f10722", + "alias": "First broker login - Conditional OTP", + "description": "Flow to determine if the OTP is required for the authentication", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "conditional-user-configured", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "auth-otp-form", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "1510fbf7-239f-44aa-9955-72d42f6d99fd", + "alias": "Handle Existing Account", + "description": "Handle what to do if there is existing account with same email/username like authenticated identity provider", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "idp-confirm-link", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "REQUIRED", + "priority": 20, + "flowAlias": "Account verification options", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "5622e71d-e1f4-4711-a425-a8470d0a017e", + "alias": "Reset - Conditional OTP", + "description": "Flow to determine if the OTP should be reset or not. Set to REQUIRED to force.", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "conditional-user-configured", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "reset-otp", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "09dfe405-5ef6-4940-8885-5adf867a74c8", + "alias": "User creation or linking", + "description": "Flow for the existing/non-existing user alternatives", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticatorConfig": "create unique user config", + "authenticator": "idp-create-user-if-unique", + "requirement": "ALTERNATIVE", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "ALTERNATIVE", + "priority": 20, + "flowAlias": "Handle Existing Account", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "a3eb6b61-1943-4fb7-9b2f-137826882662", + "alias": "Verify Existing Account by Re-authentication", + "description": "Reauthentication of existing account", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "idp-username-password-form", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "CONDITIONAL", + "priority": 20, + "flowAlias": "First broker login - Conditional OTP", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "7f5e2f68-84bc-4703-b474-e3b092621195", + "alias": "browser", + "description": "browser based authentication", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "auth-cookie", + "requirement": "ALTERNATIVE", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "auth-spnego", + "requirement": "DISABLED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "identity-provider-redirector", + "requirement": "ALTERNATIVE", + "priority": 25, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "ALTERNATIVE", + "priority": 30, + "flowAlias": "forms", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "224cc520-37f7-445e-ab1f-7ba547a45a0d", + "alias": "clients", + "description": "Base authentication for clients", + "providerId": "client-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "client-secret", + "requirement": "ALTERNATIVE", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "client-jwt", + "requirement": "ALTERNATIVE", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "client-secret-jwt", + "requirement": "ALTERNATIVE", + "priority": 30, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "client-x509", + "requirement": "ALTERNATIVE", + "priority": 40, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "2e58184b-529b-450c-9731-29763d26b087", + "alias": "direct grant", + "description": "OpenID Connect Resource Owner Grant", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "direct-grant-validate-username", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "direct-grant-validate-password", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "CONDITIONAL", + "priority": 30, + "flowAlias": "Direct Grant - Conditional OTP", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "c969ac8c-e7d8-44b5-ad4d-5fcb80514eac", + "alias": "docker auth", + "description": "Used by Docker clients to authenticate against the IDP", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "docker-http-basic-authenticator", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "de2259a4-7f92-42ec-994c-f55d8cba3b59", + "alias": "first broker login", + "description": "Actions taken after first broker login with identity provider account, which is not yet linked to any Keycloak account", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticatorConfig": "review profile config", + "authenticator": "idp-review-profile", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "REQUIRED", + "priority": 20, + "flowAlias": "User creation or linking", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "6c2745d2-be21-4f3c-a291-5b3fc039432a", + "alias": "forms", + "description": "Username, password, otp and other auth forms.", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "auth-username-password-form", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "CONDITIONAL", + "priority": 20, + "flowAlias": "Browser - Conditional OTP", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "ac8f5082-3fd0-47c5-854d-0dd9c3951668", + "alias": "http challenge", + "description": "An authentication flow based on challenge-response HTTP Authentication Schemes", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "no-cookie-redirect", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "REQUIRED", + "priority": 20, + "flowAlias": "Authentication Options", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "32030b4b-c82b-4c1a-a692-3b51eae74bbc", + "alias": "registration", + "description": "registration flow", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "registration-page-form", + "requirement": "REQUIRED", + "priority": 10, + "flowAlias": "registration form", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "b99fca4c-386c-4277-acc1-83e57e29244d", + "alias": "registration form", + "description": "registration form", + "providerId": "form-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "registration-user-creation", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "registration-profile-action", + "requirement": "REQUIRED", + "priority": 40, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "registration-password-action", + "requirement": "REQUIRED", + "priority": 50, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "registration-recaptcha-action", + "requirement": "DISABLED", + "priority": 60, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "5edbc053-816a-434e-9866-6c0cc7e49f89", + "alias": "reset credentials", + "description": "Reset credentials for a user if they forgot their password or something", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "reset-credentials-choose-user", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "reset-credential-email", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "reset-password", + "requirement": "REQUIRED", + "priority": 30, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "CONDITIONAL", + "priority": 40, + "flowAlias": "Reset - Conditional OTP", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "460782e7-9644-4a34-8024-cb428cbe3991", + "alias": "saml ecp", + "description": "SAML ECP Profile Authentication Flow", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "http-basic-authenticator", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + } + ], + "authenticatorConfig": [ + { + "id": "67af6e65-853c-4bfd-9eef-72e735691377", + "alias": "create unique user config", + "config": { + "require.password.update.after.registration": "false" + } + }, + { + "id": "af6c6e01-772d-426a-bdd3-3ebc95537bcd", + "alias": "review profile config", + "config": { + "update.profile.on.first.login": "missing" + } + } + ], + "requiredActions": [ + { + "alias": "CONFIGURE_TOTP", + "name": "Configure OTP", + "providerId": "CONFIGURE_TOTP", + "enabled": true, + "defaultAction": false, + "priority": 10, + "config": {} + }, + { + "alias": "terms_and_conditions", + "name": "Terms and Conditions", + "providerId": "terms_and_conditions", + "enabled": false, + "defaultAction": false, + "priority": 20, + "config": {} + }, + { + "alias": "UPDATE_PASSWORD", + "name": "Update Password", + "providerId": "UPDATE_PASSWORD", + "enabled": true, + "defaultAction": false, + "priority": 30, + "config": {} + }, + { + "alias": "UPDATE_PROFILE", + "name": "Update Profile", + "providerId": "UPDATE_PROFILE", + "enabled": true, + "defaultAction": false, + "priority": 40, + "config": {} + }, + { + "alias": "VERIFY_EMAIL", + "name": "Verify Email", + "providerId": "VERIFY_EMAIL", + "enabled": true, + "defaultAction": false, + "priority": 50, + "config": {} + }, + { + "alias": "update_user_locale", + "name": "Update User Locale", + "providerId": "update_user_locale", + "enabled": true, + "defaultAction": false, + "priority": 1000, + "config": {} + } + ], + "browserFlow": "browser", + "registrationFlow": "registration", + "directGrantFlow": "direct grant", + "resetCredentialsFlow": "reset credentials", + "clientAuthenticationFlow": "clients", + "dockerAuthenticationFlow": "docker auth", + "attributes": { + "clientOfflineSessionMaxLifespan": "0", + "clientSessionIdleTimeout": "0", + "clientSessionMaxLifespan": "0", + "clientOfflineSessionIdleTimeout": "0" + }, + "keycloakVersion": "11.0.1", + "userManagedAccessAllowed": false +} \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install_bak/files/05-imxc/templates/auth-server.yaml b/ansible/01_old/roles/cmoa_install_bak/files/05-imxc/templates/auth-server.yaml new file mode 100644 index 0000000..170ece2 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/05-imxc/templates/auth-server.yaml @@ -0,0 +1,82 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: auth-server + namespace: imxc +spec: + selector: + matchLabels: + app: auth + replicas: 1 + template: + metadata: + labels: + app: auth + spec: + initContainers: + - name: init-resource + image: {{ .Values.global.IMXC_IN_REGISTRY }}/init-resource:latest + imagePullPolicy: IfNotPresent + command: ["/bin/sh", "-c"] + args: ['chmod -R 777 /scripts; cp /scripts/init.json /tmp/init.json'] + volumeMounts: + - name: init + mountPath: /tmp + containers: + - name: auth-server + image: {{ .Values.global.IMXC_IN_REGISTRY }}/auth-server:{{ .Values.global.AUTH_SERVER_VERSION }} + imagePullPolicy: IfNotPresent + command: ["sh", "-c", {{ .Files.Get "scripts/init-auth-server.sh" | quote }}] + env: + # spring profile + - name: SPRING_PROFILES_ACTIVE + value: prd + + # imxc-api-server configuration + - name: IMXC_API-SERVER-URL + value: http://imxc-api-service:8080 + + # keycloak configuration + - name: KEYCLOAK_AUTH-SERVER-URL + value: "{{ .Values.global.KEYCLOAK_AUTH_SERVER_URL }}" + - name: KEYCLOAK_REALM + value: exem + # eureka configuration + - name: EUREKA_CLIENT_SERVICE-URL_DEFAULTZONE + value: http://eureka:8761/eureka + # log4j + - name: LOG4J_FORMAT_MSG_NO_LOOKUPS + value: "true" + - name: LOGGING_LEVEL_COM_EXEM_CLOUD_REPO + value: debug + - name: LOGGING_LEVEL_COM_EXEM_CLOUD_AUTH_AUTHENTICATION_USER_SERVICE + value: debug + # 현대카드는 커스텀으로 해당 값 추가. keycloak만 사용(true), keycloak+내부db 사용(false) + - name: IMXC_KEYCLOAK_ENABLED + value: "true" + + volumeMounts: + - name: init + mountPath: /tmp + resources: + requests: + memory: "300Mi" + cpu: "10m" + + volumes: + - name: init + emptyDir: {} +--- +apiVersion: v1 +kind: Service +metadata: + name: auth-server-service + namespace: imxc +spec: + type: ClusterIP + selector: + app: auth + ports: + - protocol: TCP + port: 8480 + # nodePort: 15016 diff --git a/ansible/01_old/roles/cmoa_install_bak/files/05-imxc/templates/cloudmoa-datagate.yaml b/ansible/01_old/roles/cmoa_install_bak/files/05-imxc/templates/cloudmoa-datagate.yaml new file mode 100644 index 0000000..dacf3b0 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/05-imxc/templates/cloudmoa-datagate.yaml @@ -0,0 +1,79 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: datagate + namespace: imxc + labels: + app: datagate +spec: + selector: + matchLabels: + app: datagate + replicas: 2 + template: + metadata: + labels: + app: datagate + spec: + containers: + - image: {{ .Values.global.IMXC_IN_REGISTRY }}/datagate:{{ .Values.global.DATAGATE_VERSION }} + imagePullPolicy: IfNotPresent + name: datagate + ports: + - containerPort: 50051 + protocol: TCP + - containerPort: 14268 + protocol: TCP + - containerPort: 14269 + protocol: TCP + readinessProbe: + httpGet: + path: "/" + port: 14269 + env: + - name: REDIS_ADDR + value: redis-master:6379 + - name: REDIS_PW + value: dkagh1234! + - name: REDIS_DB + value: "0" + - name: REDIS_TYPE + value: normal + - name: STORAGE_TYPE + value: kafka + - name: KAFKA_PRODUCER_BROKERS + value: kafka-broker:9094 + - name: LOG_LEVEL + value: "INFO" + resources: + requests: + cpu: "500m" + memory: "100Mi" + limits: + cpu: "2000m" + memory: "1Gi" +--- +apiVersion: v1 +kind: Service +metadata: + name: datagate + namespace: imxc + labels: + app: datagate +spec: + ports: + - name: datagate-grpc + port: 50051 + protocol: TCP + targetPort: 50051 + nodePort: 30051 + - name: datagate-http + port: 14268 + targetPort: 14268 +# nodePort: 31268 + - name: datagate-readiness + port: 14269 + targetPort: 14269 + selector: + app: datagate + type: NodePort diff --git a/ansible/01_old/roles/cmoa_install_bak/files/05-imxc/templates/cloudmoa-metric-agent.yaml b/ansible/01_old/roles/cmoa_install_bak/files/05-imxc/templates/cloudmoa-metric-agent.yaml new file mode 100644 index 0000000..9add858 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/05-imxc/templates/cloudmoa-metric-agent.yaml @@ -0,0 +1,331 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: metric-agent + namespace: imxc + labels: + app: metric-agent +spec: + selector: + matchLabels: + app: metric-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: metric-agent + spec: + containers: + - name: metric-agent + image: {{ .Values.global.IMXC_IN_REGISTRY }}/metric-agent:{{ .Values.global.METRIC_AGENT_VERSION }} + imagePullPolicy: IfNotPresent + ports: + - containerPort: 14271 + - containerPort: 14272 + args: + - --config.file=/etc/metric-agent/metric-agent.yml + env: + - name: STORAGE_TYPE + value: datagate + - name: DATAGATE + value: datagate:50051 + - name: CLUSTER_ID + value: cloudmoa +# - name: USER_ID +# value: mskim@ex-em.com + volumeMounts: + - mountPath: /etc/metric-agent/ + name: config-volume + resources: + requests: + memory: "512Mi" + cpu: "100m" + limits: + memory: "1000Mi" + cpu: "300m" + volumes: + - name: config-volume + configMap: + name: metric-agent-config + securityContext: + runAsUser: 1000 +--- +apiVersion: v1 +kind: Service +metadata: + name: metric-agent + namespace: imxc + labels: + app: metric-agent +spec: + ports: + - name: metric + port: 14271 + targetPort: 14271 + selector: + app: metric-agent + +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: metric-agent-config + namespace: imxc +data: + metric-agent.yml: | + global: + scrape_interval: 10s + evaluation_interval: 5s # Evaluate rules every 15 seconds. The default is every 1 minute. + + scrape_configs: + - job_name: 'kubernetes-kubelet' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: 'cloudmoa' + - target_label: xm_entity_type + replacement: 'Node' + + # added by mskim 8/19 + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + + - job_name: 'kubernetes-node-exporter' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: '(.*):10250' + replacement: '${1}:9100' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: 'kubernetes-(.*)' + replacement: '${1}' + target_label: name + - target_label: xm_clst_id + replacement: 'cloudmoa' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: 'Node' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + + # added by mskim 8/19 + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + - job_name: 'kubernetes-cadvisor' + scheme: https + + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: 'cloudmoa' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: 'Container' + +{{- if semverCompare ">=1.16-0" .Capabilities.KubeVersion.GitVersion }} + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod] + target_label: xm_pod_id + - source_labels: [container] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + # added by mskim 8/19 + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep + + {{- else }} + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod_name] + target_label: xm_pod_id + - source_labels: [container_name] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + # added by mskim 8/19 + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep +{{- end }} + # CLOUD-8671 | 데이터 필터링 설정 추가 + - source_labels: [ __name__, image ] + separator: "@" + regex: "container_cpu.*@" + action: drop + - source_labels: [ __name__, name ] + separator: "@" + regex: "container_memory.*@" + action: drop + + - job_name: 'kafka-consumer' + metrics_path: /remote_prom + scrape_interval: 5s + scrape_timeout: 5s + scheme: kafka + static_configs: + - targets: ['kafka-broker:9094'] + params: + #server_addrs: ['broker.default.svc.k8s:9094'] + server_addrs: ['kafka-broker:9094'] + encoding: [proto3] + contents: [remote_write] + compression: [snappy] + group: [remote-write-consumer] + workers: [50] + + # job for API server (SpringBoot) commented by ersione 2019-09-19 + - job_name: 'imxc-api' + metrics_path: '/actuator/prometheus' + scrape_interval: 5s + static_configs: + - targets: ['imxc-api-service:8080'] + - job_name: 'imxc-noti' + metrics_path: '/actuator/prometheus' + scrape_interval: 15s + static_configs: + - targets: ['noti-server-service:8080'] + #- job_name: 'imxc-auth' + # metrics_path: '/actuator/prometheus' + # scrape_interval: 15s + # static_configs: + # - targets: ['auth-server-service:8480'] + + + + - job_name: 'alertmanager-exporter' + metrics_path: '/metrics' + scrape_interval: 5s + static_configs: + - targets: ['alertmanager:9093'] + + + # modified by seungtak choi 2020-02-18 + - job_name: 'cmoa-collector' + scrape_interval: 5s + kubernetes_sd_configs: + - role: endpoints + namespaces: + names: + - imxc + relabel_configs: + - source_labels: [__meta_kubernetes_service_name] + action: keep + regex: cmoa-collector + + # added by dwkim 2021-03-15 + - job_name: 'elasticsearch' + scrape_interval: 5s + kubernetes_sd_configs: + - role: endpoints + namespaces: + names: + - imxc + relabel_configs: + - target_label: xm_clst_id + replacement: 'cloudmoa' + - source_labels: [__meta_kubernetes_pod_node_name] + target_label: xm_node_id + - source_labels: [__meta_kubernetes_namespace] + target_label: xm_namespace + - source_labels: [__meta_kubernetes_service_name] + action: keep + regex: es-exporter-elasticsearch-exporter + + # kafka-exporter prometheus 수집 룰 추가 + - job_name: 'kafka-exporter' + kubernetes_sd_configs: + - role: endpoints + namespaces: + names: + - imxc + scheme: http + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_service_label_(.+) + - action: labelmap + regex: __meta_kubernetes_service_annotation_(.+) + - source_labels: [__meta_kubernetes_pod_container_port_number] + action: keep + regex: '(.*)9308' + + # kafka-jmx-exporter configuration yaml 수집룰 추가 + - job_name: 'kafka-jmx' + kubernetes_sd_configs: + - role: endpoints + namespaces: + names: + - imxc + scheme: http + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_service_label_(.+) + - action: labelmap + regex: __meta_kubernetes_service_annotation_(.+) + - source_labels: [__meta_kubernetes_pod_container_port_number] + action: keep + regex: '(.*)9010' + + # job for API Server(Spring Cloud Notification Server) commented by hjyoon 2022-01-26 + - job_name: 'cmoa-noti' + metrics_path: '/actuator/prometheus' + scrape_interval: 15s + static_configs: + - targets: ['noti-server-service:8080'] diff --git a/ansible/01_old/roles/cmoa_install_bak/files/05-imxc/templates/cloudmoa-metric-collector.yaml b/ansible/01_old/roles/cmoa_install_bak/files/05-imxc/templates/cloudmoa-metric-collector.yaml new file mode 100644 index 0000000..3d7acc8 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/05-imxc/templates/cloudmoa-metric-collector.yaml @@ -0,0 +1,45 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: metric-collector + namespace: imxc + labels: + app: metric-collector +spec: + selector: + matchLabels: + app: metric-collector + replicas: 3 + template: + metadata: + labels: + app: metric-collector + spec: + containers: + - name: metric-collector + image: {{ .Values.global.IMXC_IN_REGISTRY }}/metric-collector:{{ .Values.global.METRIC_COLLECTOR_VERSION }} + imagePullPolicy: IfNotPresent + ports: + - containerPort: 14270 + env: + - name: KAFKA_CONSUMER_BROKERS + value: kafka-broker:9094 + - name: HTTP_PUSH + value: http://base-cortex-nginx/api/v1/push + securityContext: + runAsUser: 1000 +--- +apiVersion: v1 +kind: Service +metadata: + name: metric-collector + namespace: imxc + labels: + app: metric-collector +spec: + ports: + - name: metric + port: 14270 + targetPort: 14270 + selector: + app: metric-collector diff --git a/ansible/01_old/roles/cmoa_install_bak/files/05-imxc/templates/cmoa-kube-info-batch.yaml b/ansible/01_old/roles/cmoa_install_bak/files/05-imxc/templates/cmoa-kube-info-batch.yaml new file mode 100644 index 0000000..b20fed2 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/05-imxc/templates/cmoa-kube-info-batch.yaml @@ -0,0 +1,38 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmoa-kube-info-batch + namespace: {{ .Values.global.IMXC_NAMESPACE }} + labels: + app: cmoa-kube-info-batch +spec: + replicas: 1 + selector: + matchLabels: + app: cmoa-kube-info-batch + template: + metadata: + labels: + app: cmoa-kube-info-batch + spec: + containers: + - name: cmoa-kube-info-batch + image: {{ .Values.global.IMXC_IN_REGISTRY }}/kube-info-batch:{{ .Values.global.KUBE_INFO_BATCH_VERSION }} + imagePullPolicy: Always + env: + - name: JDBC_KIND + value: {{ .Values.global.JDBC_KIND }} + - name: JDBC_SERVER + value: {{ .Values.global.JDBC_SERVER }} + - name: JDBC_DB + value: {{ .Values.global.JDBC_DB }} + - name: JDBC_USER + value: {{ .Values.global.JDBC_USER }} + - name: JDBC_PWD + value: {{ .Values.global.JDBC_PWD }} + - name: TABLE_PREFIX + value: {{ .Values.global.TABLE_PREFIX }} + - name: BLACK_LIST + value: {{ .Values.global.BLACK_LIST }} + - name: DELETE_HOUR + value: '{{ .Values.global.DELETE_HOUR }}' diff --git a/ansible/01_old/roles/cmoa_install_bak/files/05-imxc/templates/cmoa-kube-info-connector.yaml b/ansible/01_old/roles/cmoa_install_bak/files/05-imxc/templates/cmoa-kube-info-connector.yaml new file mode 100644 index 0000000..cad91b9 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/05-imxc/templates/cmoa-kube-info-connector.yaml @@ -0,0 +1,48 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmoa-kube-info-connector + namespace: {{ .Values.global.IMXC_NAMESPACE }} + labels: + app: cmoa-kube-info-connector +spec: + replicas: 1 + selector: + matchLabels: + app: cmoa-kube-info-connector + template: + metadata: + labels: + app: cmoa-kube-info-connector + spec: + containers: + - name: cmoa-kube-info-connector + image: {{ .Values.global.IMXC_IN_REGISTRY }}/kube-info-connector:{{ .Values.global.KUBE_INFO_CONNECTOR_VERSION }} + imagePullPolicy: Always + env: + - name: KAFKA_GROUP_ID + value: cmoa-kube-info-connector + - name: KAFKA_SERVER + value: kafka:9092 + - name: JDBC_KIND + value: {{ .Values.global.JDBC_KIND }} + - name: JDBC_SERVER + value: {{ .Values.global.JDBC_SERVER }} + - name: JDBC_DB + value: {{ .Values.global.JDBC_DB }} + - name: JDBC_USER + value: {{ .Values.global.JDBC_USER }} + - name: JDBC_PWD + value: {{ .Values.global.JDBC_PWD }} + - name: TABLE_PREFIX + value: {{ .Values.global.TABLE_PREFIX }} + - name: BLACK_LIST + value: {{ .Values.global.BLACK_LIST }} + - name: MAX_POLL_RECORDS_CONFIG + value: "300" + - name: MAX_POLL_INTERVAL_MS_CONFIG + value: "600000" + - name: SESSION_TIMEOUT_MS_CONFIG + value: "60000" + - name: MAX_PARTITION_FETCH_BYTES_CONFIG + value: "5242880" diff --git a/ansible/01_old/roles/cmoa_install_bak/files/05-imxc/templates/cmoa-kube-info-flat.yaml b/ansible/01_old/roles/cmoa_install_bak/files/05-imxc/templates/cmoa-kube-info-flat.yaml new file mode 100644 index 0000000..51fbb35 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/05-imxc/templates/cmoa-kube-info-flat.yaml @@ -0,0 +1,35 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmoa-kube-info-flat + namespace: {{ .Values.global.IMXC_NAMESPACE }} + labels: + app: cmoa-kube-info-flat +spec: + replicas: 1 + selector: + matchLabels: + app: cmoa-kube-info-flat + template: + metadata: + labels: + app: cmoa-kube-info-flat + spec: + containers: + - name: cmoa-kube-info-flat + image: {{ .Values.global.IMXC_IN_REGISTRY }}/kube-info-flat:{{ .Values.global.KUBE_INFO_FLAT_VERSION }} + imagePullPolicy: Always + env: + - name: KAFKA_SERVER + value: kafka:9092 + - name: KAFKA_INPUT_TOPIC + value: {{ .Values.global.KAFKA_INPUT_TOPIC }} + - name: TABLE_PREFIX + value: {{ .Values.global.TABLE_PREFIX }} + - name: BLACK_LIST + value: {{ .Values.global.BLACK_LIST }} + resources: + limits: + memory: 1Gi + requests: + memory: 1Gi diff --git a/ansible/01_old/roles/cmoa_install_bak/files/05-imxc/templates/cmoa-manual.yaml b/ansible/01_old/roles/cmoa_install_bak/files/05-imxc/templates/cmoa-manual.yaml new file mode 100644 index 0000000..e94fc14 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/05-imxc/templates/cmoa-manual.yaml @@ -0,0 +1,36 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: manual + namespace: imxc +spec: + selector: + matchLabels: + app: manual + replicas: 1 + template: + metadata: + labels: + app: manual + spec: + containers: + - name: manual + image: {{ .Values.global.IMXC_IN_REGISTRY }}/manual:{{ .Values.global.CMOA_MANUAL_VERSION }} + imagePullPolicy: IfNotPresent + +--- +apiVersion: v1 +kind: Service +metadata: + name: manual + namespace: imxc +spec: + type: NodePort + selector: + app: manual + ports: + - protocol: TCP + port: 8088 + targetPort: 3000 + nodePort: {{ .Values.global.CMOA_MANUAL_PORT }} + diff --git a/ansible/01_old/roles/cmoa_install_bak/files/05-imxc/templates/eureka-server.yaml b/ansible/01_old/roles/cmoa_install_bak/files/05-imxc/templates/eureka-server.yaml new file mode 100644 index 0000000..1b54313 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/05-imxc/templates/eureka-server.yaml @@ -0,0 +1,60 @@ +apiVersion: v1 +kind: Service +metadata: + name: eureka + namespace: imxc + labels: + app: eureka +spec: + type: NodePort + ports: + - port: 8761 + targetPort: 8761 + nodePort: 30030 + name: eureka + selector: + app: eureka +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: eureka + namespace: imxc +spec: + serviceName: 'eureka' + replicas: 3 + selector: + matchLabels: + app: eureka + template: + metadata: + labels: + app: eureka + spec: + containers: + - name: eureka + image: {{ .Values.global.IMXC_IN_REGISTRY }}/eureka-server:{{ .Values.global.EUREKA_SERVER_VERSION }} + imagePullPolicy: IfNotPresent + ports: + - containerPort: 8761 + #resources: + # requests: + # memory: "1Gi" + # cpu: "500m" + # limits: + # memory: "1200Mi" + # cpu: "500m" + env: + - name: SPRING_PROFILES_ACTIVE + value: prd + - name: EUREKA_CLIENT_SERVICE-URL_DEFAULTZONE + value: http://eureka-0.eureka:8761/eureka/,http://eureka-1.eureka:8761/eureka/,http://eureka-2.eureka:8761/eureka/ + - name: JVM_OPTS + value: "-Xms1g -Xmx1g" + # log4j + - name: LOG4J_FORMAT_MSG_NO_LOOKUPS + value: "true" + resources: + requests: + memory: "300Mi" + cpu: "20m" diff --git a/ansible/01_old/roles/cmoa_install_bak/files/05-imxc/templates/imxc-api-server.yaml b/ansible/01_old/roles/cmoa_install_bak/files/05-imxc/templates/imxc-api-server.yaml new file mode 100644 index 0000000..8cdb2e8 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/05-imxc/templates/imxc-api-server.yaml @@ -0,0 +1,245 @@ +--- +kind: Service +apiVersion: v1 +metadata: + name: imxc-api-service + namespace: imxc +spec: + type: NodePort + selector: + app: imxc-api + ports: + - protocol: TCP + name: api + port: 8080 + targetPort: 8080 + nodePort: 32080 + - protocol: TCP + name: netty + port: 10100 + targetPort: 10100 + nodePort: 31100 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: imxc-api + namespace: imxc + labels: + app: imxc-api +spec: + revisionHistoryLimit: 0 + replicas: 1 + selector: + matchLabels: + app: imxc-api + template: + metadata: + labels: + app: imxc-api + build: develop + spec: + securityContext: + #runAsNonRoot: true + runAsUser: 1577 + initContainers: + - name: cloudmoa-api-permission-fix + image: {{ .Values.global.IMXC_IN_REGISTRY }}/busybox:latest + imagePullPolicy: IfNotPresent + securityContext: + runAsUser: 0 +# - sh +# - -c +# - "chmod -R 777 /home/cloudmoa/notification/cloudmoa_alert.log" + volumeMounts: + - mountPath: /home/cloudmoa/notification/ + name: notification-upper-directory + - mountPath: /home/cloudmoa/notification/cloudmoa_alert.log + name: notification-directory + containers: + - name: imxc-api + image: {{ .Values.global.IMXC_IN_REGISTRY }}/api-server:{{ .Values.global.API_SERVER_VERSION }} + resources: + requests: + cpu: 1600m + memory: 1500Mi + limits: + cpu: 2000m + memory: 5000Mi + imagePullPolicy: IfNotPresent + command: ["sh", "-c", {{ .Files.Get "scripts/init-api-server.sh" | quote }}] + env: + - name: SPRING_PROFILES_ACTIVE + value: prd + - name: SPRING_ELASTIC_URLS + value: elasticsearch + - name: SPRING_ELASTIC_PORT + value: "9200" + - name: SPRING_DATAGATE_URLS + value: "{{ .Values.global.DATAGATE_INSIDE_IP }}" + - name: SPRING_DATAGATE_PORT + value: "{{ .Values.global.DATAGATE_INSIDE_PORT }}" + - name: SPRING_REDIS_URLS + value: {{ .Values.global.REDIS_URLS }} + - name: SPRING_REDIS_PORT + value: "{{ .Values.global.REDIS_PORT }}" + - name: SPRING_REDIS_PASSWORD + value: {{ .Values.global.REDIS_PASSWORD }} + - name: SPRING_DATASOURCE_URL + value: jdbc:log4jdbc:postgresql://postgres:5432/postgresdb + - name: SPRING_BOOT_ADMIN_CLIENT_URL + value: http://{{ .Values.global.IMXC_ADMIN_SERVER_DNS }}:8888 + - name: SPRING_BOOT_ADMIN_CLIENT_INSTANCE_NAME + value: Intermax Cloud API Server + - name: SPRING_BOOT_ADMIN_CLIENT_ENABLED + value: "false" + - name: OPENTRACING_JAEGER_ENABLED + value: "false" + - name: SPRING_JPA_PROPERTIES_HIBERNATE_GENERATE_STATISTICS + value: "false" + - name: IMXC_REPORT_ENABLED + value: "true" + - name: IMXC_ALERT_PERSIST + value: "true" + - name: SPRING_BOOT_ADMIN_CLIENT_INSTANCE_METADATA_TAGS_ENVIRONMENT + value: Demo + - name: SPRING_BOOT_ADMIN_CLIENT_INSTANCE_PREFERIP + value: "true" + - name: SPRING_BOOT_ADMIN_CLIENT_INSTANCE_METADATA_TAGS_NODENAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: SPRING_BOOT_ADMIN_CLIENT_INSTANCE_METADATA_TAGS_PODNAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + - name: SPRING_BOOT_ADMIN_CLIENT_AUTODEREGISTRATION + value: "true" + - name: SPRING_JPA_HIBERNATE_DDL-AUTO + value: validate + - name: KEYCLOAK_AUTH-SERVER-URL + value: "{{ .Values.global.KEYCLOAK_AUTH_SERVER_URL }}" + - name: KEYCLOAK_REALM + value: exem + - name: KEYCLOAK_RESOURCE + value: "{{ .Values.global.KEYCLOAK_RESOURCE }}" + - name: SPRING_KEYCLOAK_MASTER_USERNAME + value: "{{ .Values.global.KEYCLOAK_MASTER_USERNAME }}" + - name: SPRING_KEYCLOAK_MASTER_PASSWORD + value: "{{ .Values.global.KEYCLOAK_MASTER_PASSWORD }}" + - name: SPRING_LDAP_USE + value: "{{ .Values.global.IMXC_LDAP_USE }}" + - name: TIMEZONE + value: Asia/Seoul + - name: IMXC_PROMETHEUS_URL + value: http://base-cortex-nginx/prometheus + - name: IMXC_PROMETHEUS_NAMESPACE + value: "imxc" + - name: LOGGING_LEVEL_ROOT + value: info + - name: IMXC_ALERT_NOTIFICATION_FILE_USE + value: "true" + - name: IMXC_ALERT_NOTIFICATION_FILE_FILE-LIMIT-SIZE-MB + value: "10" + - name: IMXC_ALERT_NOTIFICATION_FILE_PATH + value: /cloudmoa_noti + - name: IMXC_ALERT_NOTIFICATION_FILE_NAME + value: cloudmoa_alert.log + - name: IMXC_ALERT_NOTIFICATION_FILE_FORMAT + value: $[name]/($[level])/$[data]/$[message] + - name: IMXC_ALERT_NOTIFICATION_FILE_LEVELCONTRACT + value: "true" + #R30020210730 추가 :: 현대카드는 true로 설정 + - name: IMXC_ALERT_NOTIFICATION_MAIL_MAIL-HOST + value: "exemmail1.ex-em.com" + - name: IMXC_ALERT_NOTIFICATION_MAIL_MAIL-PORT + value: "587" + - name: IMXC_ALERT_NOTIFICATION_MAIL_MAIL-USERNAME + value: "imxc@ex-em.com" + - name: IMXC_ALERT_NOTIFICATION_MAIL_MAIL-PASSWORD + value: "1234" + - name: IMXC_ALERT_NOTIFICATION_MAIL_PROTOCOL + value: "smtp" + - name: IMXC_ALERT_NOTIFICATION_MAIL_STARTTLS-REQ + value: "true" + - name: IMXC_ALERT_NOTIFICATION_MAIL_STARTTLS-ENB + value: "true" + - name: IMXC_ALERT_NOTIFICATION_MAIL_SMTP-AUTH + value: "true" + - name: IMXC_ALERT_NOTIFICATION_MAIL_DEBUG + value: "true" + - name: IMXC_ANOMALY_BLACK-LIST + value: "false" + - name: IMXC_VERSION_SAAS + value: "false" + - name: LOGGING_LEVEL_COM_EXEM_CLOUD_API_SERVER_KUBERNETES_SERVICE + value: info + - name: IMXC_WEBSOCKET_SCHEDULE_PERIOD_5SECOND + value: "30000" + - name: IMXC_CACHE_INFO_1MCACHE + value: "0 0/1 * * * ?" + - name: IMXC_EXECUTION_LOG_USE + value: "false" + - name: IMXC_EXECUTION_PERMISSION_LOG_USE + value: "false" + - name: IMXC_EXECUTION_CODE-LOG_USE + value: "false" + - name: IMXC_PORTAL_INFO_URL + value: "{{ .Values.global.IMXC_PORTAL_INFO_URL }}" + # Do not remove below rows related to AGENT-INSTALL. Added by youngmin 2021-03-29. + - name: AGENT-INSTALL_COLLECTION-SERVER_KAFKA_IP + value: {{ .Values.global.KAFKA_IP }} + - name: AGENT-INSTALL_COLLECTION-SERVER_KAFKA_INTERFACE-PORT + value: "{{ .Values.global.KAFKA_INTERFACE_PORT }}" + - name: AGENT-INSTALL_COLLECTION-SERVER_APISERVER_IP + value: {{ .Values.global.IMXC_API_SERVER_IP }} + - name: AGENT-INSTALL_COLLECTION-SERVER_APISERVER_NETTY-PORT + value: "{{ .Values.global.APISERVER_NETTY_PORT }}" + - name: AGENT-INSTALL_REGISTRY_URL + value: {{ .Values.global.IMXC_IN_REGISTRY }} + - name: AGENT-INSTALL_IMAGE_TAG + value: {{ .Values.global.AGENT_IMAGE_TAG }} + - name: AGENT-INSTALL_JAEGER_AGENT_CLUSTERIP + value: {{ .Values.global.JAEGER_AGENT_CLUSTERIP }} + - name: AGENT-INSTALL_JAEGER_JAVA-SPECIALAGENT-CLASSPATH + value: {{ .Values.global.JAEGER_JAVA_SPECIALAGENT_CLASSPATH }} + - name: AGENT-INSTALL_COLLECTION-SERVER_DATAGATE_IP + value: "{{ .Values.global.DATAGATE_OUTSIDE_IP }}" + - name: AGENT-INSTALL_COLLECTION-SERVER_DATAGATE_PORT + value: "{{ .Values.global.DATAGATE_OUTSIDE_PORT }}" + - name: IMXC_REST-CONFIG_MAX-CON + value: "200" + - name: IMXC_REST-CONFIG_MAX-CON-ROUTE + value: "65" + # log4j + - name: LOG4J_FORMAT_MSG_NO_LOOKUPS + value: "true" + # Elasticsearch for Security + - name: SPRING_ELASTIC_SSL_USERNAME + value: "{{ .Values.global.CMOA_ES_ID }}" + - name: SPRING_ELASTIC_SSL_PASSWORD + value: "{{ .Values.global.CMOA_ES_PW }}" + - name: IMXC_BACK-LOGIN_ENABLED + value: "{{ .Values.global.BACKLOGIN }}" + volumeMounts: + - mountPath: /var/log/imxc-audit.log + name: auditlog + - mountPath: /home/cloudmoa/notification/cloudmoa_alert.log + name: notification-directory + - mountPath: /home/cloudmoa/notification/ + name: notification-upper-directory + volumes: + - name: auditlog + hostPath: + path: {{ .Values.global.AUDITLOG_PATH }}/imxc-audit.log + type: FileOrCreate + - name: notification-upper-directory + hostPath: + path: /home/ + type: DirectoryOrCreate + - name: notification-directory + hostPath: + path: /home/cloudmoa_event.log + type: FileOrCreate diff --git a/ansible/01_old/roles/cmoa_install_bak/files/05-imxc/templates/imxc-collector.yaml b/ansible/01_old/roles/cmoa_install_bak/files/05-imxc/templates/imxc-collector.yaml new file mode 100644 index 0000000..dda6673 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/05-imxc/templates/imxc-collector.yaml @@ -0,0 +1,79 @@ +apiVersion: v1 +kind: List +items: +- apiVersion: apps/v1 + kind: Deployment + metadata: + name: cmoa-collector + namespace: imxc + labels: + app: cmoa-collector + spec: + replicas: 1 + selector: + matchLabels: + app: cmoa-collector + template: + metadata: + labels: + app: cmoa-collector + spec: + securityContext: + runAsNonRoot: true + runAsUser: 65534 + containers: + - name: cmoa-collector + image: {{ .Values.global.IMXC_IN_REGISTRY }}/cmoa-collector:{{ .Values.global.COLLECTOR_VERSION }} + imagePullPolicy: IfNotPresent + resources: + requests: + cpu: 300m + memory: 1500Mi + limits: + cpu: 500m + memory: 2500Mi + ports: + - containerPort: 12010 + env: + - name: LOCATION + value: Asia/Seoul + - name: KAFKA_SERVER + value: kafka:9092 + - name: ELASTICSEARCH + value: elasticsearch:9200 +# - name: PROMETHEUS +# value: nginx-cortex/prometheus + - name: REDIS_ADDR + value: redis-master:6379 + - name: REDIS_PW + value: dkagh1234! + - name: REDIS_DB + value: "0" + - name: REDIS_TYPE + value: normal + - name: CMOA_ES_ID + value: {{ .Values.global.CMOA_ES_ID }} + - name: CMOA_ES_PW + value: {{ .Values.global.CMOA_ES_PW }} + resources: + requests: + cpu: "300m" + memory: "1500Mi" + limits: + cpu: "500m" + memory: "2500Mi" +- apiVersion: v1 + kind: Service + metadata: + name: cmoa-collector + namespace: imxc + labels: + app: cmoa-collector + spec: + ports: + - name: cmoa-collector-exporter + port: 12010 + targetPort: 12010 + selector: + app: cmoa-collector + diff --git a/ansible/01_old/roles/cmoa_install_bak/files/05-imxc/templates/noti-server.yaml b/ansible/01_old/roles/cmoa_install_bak/files/05-imxc/templates/noti-server.yaml new file mode 100644 index 0000000..8bafdcc --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/05-imxc/templates/noti-server.yaml @@ -0,0 +1,121 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: noti-server + namespace: imxc +spec: + selector: + matchLabels: + app: noti + replicas: 1 + template: + metadata: + labels: + app: noti + spec: + containers: + - name: noti-server + image: {{ .Values.global.IMXC_IN_REGISTRY }}/notification-server:{{ .Values.global.NOTI_SERVER_VERSION }} + imagePullPolicy: IfNotPresent + command: ["sh", "-c", {{ .Files.Get "scripts/init-noti-server.sh" | quote }}] + env: + # spring profile + - name: SPRING_PROFILES_ACTIVE + value: prd + + # keycloak configuration + - name: KEYCLOAK_AUTH-SERVER-URL + value: {{ .Values.global.KEYCLOAK_AUTH_SERVER_URL }} + - name: KEYCLOAK_REALM + value: exem + + # eureka configuration + - name: EUREKA_CLIENT_SERVICE-URL_DEFAULTZONE + value: http://eureka:8761/eureka + + # postgres configuration + - name: SPRING_DATASOURCE_URL + value: jdbc:log4jdbc:postgresql://postgres:5432/postgresdb + + # redis configuration + - name: SPRING_REDIS_HOST + value: redis-master + - name: SPRING_REDIS_PORT + value: "6379" + - name: SPRING_REDIS_PASSWORD + value: dkagh1234! + + # elasticsearch configuration + - name: SPRING_ELASTIC_URLS + value: elasticsearch + - name: SPRING_ELASTIC_PORT + value: "9200" + + # file I/O configuration + - name: IMXC_ALERT_NOTIFICATION_FILE_USE + value: "true" + - name: IMXC_ALERT_NOTIFICATION_FILE_FILE-LIMIT-SIZE-MB + value: "10" + - name: IMXC_ALERT_NOTIFICATION_FILE_PATH + value: /cloudmoa_noti + - name: IMXC_ALERT_NOTIFICATION_FILE_NAME + value: cloudmoa_alert.log + - name: IMXC_ALERT_NOTIFICATION_FILE_FORMAT + value: $[name]/($[level])/$[data]/$[message] + - name: IMXC_ALERT_NOTIFICATION_FILE_LEVELCONTRACT + value: "true" + + # rabbitmq configuration + - name: IMXC_RABBITMQ_HOST + value: base-rabbitmq + - name: IMXC_RABBITMQ_PORT + value: "61613" + - name: IMXC_RABBITMQ_CLIENT_ID + value: "user" + - name: IMXC_RABBITMQ_CLIENT_PASSWORD + value: "eorbahrhkswp" + - name: IMXC_RABBITMQ_SYSTEM_ID + value: "user" + - name: IMXC_RABBITMQ_SYSTEM_PASSWORD + value: "eorbahrhkswp" + + # api-server configuration + - name: IMXC_API-SERVER-URL + value: "http://imxc-api-service:8080" + + # cortex integration + - name: SPRING_CORTEX_URLS + value: base-cortex-configs + - name: SPRING_CORTEX_PORT + value: "8080" + + # alert webhook + - name: IMXC_ALERT_WEBHOOK_URLS + value: http://noti-server-service:8080/alert + + # etc configuration + - name: IMXC_PROMETHEUS_NAMESPACE + value: {{ .Values.global.IMXC_NAMESPACE }} + - name: IMXC_ALERT_KUBERNETES_NAMESPACE + value: {{ .Values.global.IMXC_NAMESPACE }} + # log4j + - name: LOG4J_FORMAT_MSG_NO_LOOKUPS + value: "true" + resources: + requests: + memory: "500Mi" + cpu: "150m" +--- +apiVersion: v1 +kind: Service +metadata: + name: noti-server-service + namespace: imxc +spec: + type: NodePort + selector: + app: noti + ports: + - protocol: TCP + port: 8080 + nodePort: 31083 diff --git a/ansible/01_old/roles/cmoa_install_bak/files/05-imxc/templates/streams-depl.yaml b/ansible/01_old/roles/cmoa_install_bak/files/05-imxc/templates/streams-depl.yaml new file mode 100644 index 0000000..b3223e5 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/05-imxc/templates/streams-depl.yaml @@ -0,0 +1,26 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: kafka-stream-txntrend-deployment + namespace: imxc + labels: + app: kafka-stream-txntrend +spec: + replicas: 1 + selector: + matchLabels: + app: kafka-stream-txntrend + template: + metadata: + labels: + app: kafka-stream-txntrend + spec: + containers: + - name: kafka-stream-txntrend + image: {{ .Values.global.IMXC_IN_REGISTRY }}/kafka-stream-txntrend:{{ .Values.global.KAFKA_STREAM_VERSION }} + imagePullPolicy: IfNotPresent + env: + - name: SERVICE_KAFKA_HOST + value: kafka-broker:9094 + - name: SERVICE_STREAM_OUTPUT + value: jspd_txntrend diff --git a/ansible/01_old/roles/cmoa_install_bak/files/05-imxc/templates/topology-agent.yaml b/ansible/01_old/roles/cmoa_install_bak/files/05-imxc/templates/topology-agent.yaml new file mode 100644 index 0000000..6bcc783 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/05-imxc/templates/topology-agent.yaml @@ -0,0 +1,107 @@ +{{ if semverCompare ">=1.17-0" .Capabilities.KubeVersion.GitVersion }} +apiVersion: rbac.authorization.k8s.io/v1 +{{ else }} +apiVersion: rbac.authorization.k8s.io/v1beta1 +{{ end }} +kind: ClusterRoleBinding +metadata: + name: topology-agent + namespace: imxc + labels: + k8s-app: topology-agent +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +subjects: + - kind: ServiceAccount + name: topology-agent + namespace: imxc +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: topology-agent + namespace: imxc + labels: + app: topology-agent +spec: + selector: + matchLabels: + app: topology-agent + template: + metadata: + labels: + app: topology-agent + spec: + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + # below appended + hostPID: true + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - name: topology-agent + image: {{ .Values.global.IMXC_IN_REGISTRY }}/topology-agent:{{ .Values.global.TOPOLOGY_AGENT_VERSION }} + imagePullPolicy: IfNotPresent + securityContext: + privileged: true + volumeMounts: + - mountPath: /host/usr/bin + name: bin-volume + - mountPath: /var/run/docker.sock + name: docker-volume + - mountPath: /host/proc + name: proc-volume + - mountPath: /root + name: root-volume + env: + - name: CLUSTER_ID + value: cloudmoa + - name: ROOT_DIRECTORY + value: /root + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: POD_ID + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: DATAGATE + value: datagate:50051 + - name: LOG_RNAME_USE + value: "false" + - name: LOG_LEVEL + value: "DEBUG" + - name: CLOUDMOA_SETTING_PATH + value: /home/cloudmoa/setting/ + resources: + requests: + memory: "512Mi" + cpu: "200m" + limits: + memory: "600Mi" + cpu: "500m" + volumes: + - name: bin-volume + hostPath: + path: /usr/bin + type: Directory + - name: docker-volume + hostPath: + path: /var/run/docker.sock + - name: proc-volume + hostPath: + path: /proc + - name: root-volume + hostPath: + path: / diff --git a/ansible/01_old/roles/cmoa_install_bak/files/05-imxc/templates/zuul-server.yaml b/ansible/01_old/roles/cmoa_install_bak/files/05-imxc/templates/zuul-server.yaml new file mode 100644 index 0000000..26305d0 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/05-imxc/templates/zuul-server.yaml @@ -0,0 +1,62 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: zuul-deployment + namespace: imxc + labels: + app: cloud +spec: + selector: + matchLabels: + app: cloud + replicas: 1 + template: + metadata: + labels: + app: cloud + spec: + containers: + - env: + - name: SPRING_PROFILES_ACTIVE + value: prd + - name: SPRING_ZIPKIN_BASE-URL + value: http://zipkin-service:9411 + - name: LOGGING_LEVEL_COM_EXEM_CLOUD_ZUULSERVER_FILTERS_AUTHFILTER + value: info + # log4j + - name: LOG4J_FORMAT_MSG_NO_LOOKUPS + value: "true" + name: zuul + image: {{ .Values.global.IMXC_IN_REGISTRY }}/zuul-server:{{ .Values.global.ZUUL_SERVER_VERSION }} + imagePullPolicy: IfNotPresent + ports: + - containerPort: 8080 + #- containerPort: 6831 + #protocol: UDP + #resources: + # requests: + # memory: "256Mi" + # cpu: "344m" + # limits: + # memory: "1Gi" + # cpu: "700m" + resources: + requests: + memory: "500Mi" + cpu: "50m" +--- +apiVersion: v1 +kind: Service +metadata: + name: zuul + namespace: imxc + labels: + app: cloud +spec: + type: NodePort + selector: + app: cloud + ports: + - port: 8080 + targetPort: 8080 + nodePort: 31081 diff --git a/ansible/01_old/roles/cmoa_install_bak/files/05-imxc/values.yaml b/ansible/01_old/roles/cmoa_install_bak/files/05-imxc/values.yaml new file mode 100644 index 0000000..cdb0390 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/05-imxc/values.yaml @@ -0,0 +1,157 @@ +# Default values for imxc. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: 10.10.31.243:5000/cmoa3/nginx + tag: stable + pullPolicy: IfNotPresent + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 80 + +ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: [] + + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +nodeSelector: {} + +tolerations: [] + +affinity: {} + +global: + IMXC_LDAP_USE: false + IMXC_ADMIN_SERVER_DNS: imxc-admin-service + AUDITLOG_PATH: /var/log + KAFKA_IP: kafka-broker + # 로드밸런서 안 쓴다고 가정했을때 입니다.. + KAFKA_INTERFACE_PORT: 9094 + APISERVER_NETTY_PORT: 10100 + #REGISTRY_URL: cdm-dev.exem-oss.org:5050 + #REGISTRY_URL: 10.10.31.243:5000/cmoa + IMXC_ADMIN_SERVER_DNS: imxc-admin-service + AGENT_IMAGE_TAG: rel3.4.8 + # Jaeger 관련변수 + JAEGER_AGENT_CLUSTERIP: 10.98.94.198 + JAEGER_JAVA_SPECIALAGENT_CLASSPATH: classpath:/install/opentracing-specialagent-1.7.4.jar + # added by DongWoo Kim 2021-06-21 + KEYCLOAK_AUTH_SERVER_URL: http://10.10.43.227:31082/auth + KEYCLOAK_RESOURCE: authorization_server + KEYCLOAK_MASTER_USERNAME: admin + KEYCLOAK_MASTER_PASSWORD: admin + IMXC_PORTAL_INFO_URL: + KEYCLOAK_REALM: exem + # added by EunHye Kim 2021-08-25 + #DATAGATE_URLS: datagate + #DATAGATE_IP: 10.10.43.227 + #DATAGATE_PORT: 14268 + DATAGATE_INSIDE_IP: datagate + DATAGATE_INSIDE_PORT: 14268 + DATAGATE_OUTSIDE_IP: 10.10.43.227 + DATAGATE_OUTSIDE_PORT: 30051 + REDIS_URLS: redis-master + REDIS_PORT: 6379 + REDIS_PASSWORD: dkagh1234! + # added by DongWoo Kim 2021-08-31 (version of each module) + DATAGATE_VERSION: rel3.4.8 + #ADMIN_SERVER_VERSION: v1.0.0 + #API_SERVER_VERSION: CLOUD-172 + API_SERVER_VERSION: rel3.4.8 + COLLECTOR_VERSION: rel3.4.8 + #release-3.3.0 + TOPOLOGY_AGENT_VERSION: rel3.4.8 + METRIC_COLLECTOR_VERSION: rel3.4.8 + #v1.0.0 + METRIC_AGENT_VERSION: rel3.4.8 + # spring cloud + ZUUL_SERVER_VERSION: rel3.4.8 + #CMOA-1269 + EUREKA_SERVER_VERSION: rel3.4.8 + AUTH_SERVER_VERSION: rel3.4.8 + NOTI_SERVER_VERSION: rel3.4.8 + KAFKA_STREAM_VERSION: rel3.4.8 + CMOA_MANUAL_VERSION: rel3.4.8 + KUBE_INFO_FLAT_VERSION: rel3.4.8 + KUBE_INFO_BATCH_VERSION: rel3.4.8 + KUBE_INFO_CONNECTOR_VERSION: rel3.4.8 + + + CMOA_MANUAL_PORT: 31090 + + + # Keycloak + #KEYCLOAK_VERSION: v1.0.0 + + # 레지스트리 변수화 (Public Cloud 대비 / 아래 값 적절히 수정해서 사용할 것) + #IMXC_REGISTRY: 10.10.31.243:5000 + IMXC_IN_REGISTRY: 10.10.31.243:5000/cmoa3 + + + # namespace 추가 + IMXC_NAMESPACE: imxc + + # ZUUL 8080으로 열어놓을것 + + CMOA_ES_ID: elastic + CMOA_ES_PW: elastic + + JDBC_KIND: 'postgres' + JDBC_SERVER: 'postgres:5432' + JDBC_DB: 'postgresdb' + JDBC_USER: 'admin' + JDBC_PWD: 'eorbahrhkswp' + + KAFKA_INPUT_TOPIC: 'kubernetes_info' + + TABLE_PREFIX: 'cmoa_' + BLACK_LIST: 'configmap_base,cronjob_active,endpoint_base,endpoint_addresses,endpoint_notreadyaddresses,endpoint_ports,event_base,node_image,persistentvolume_base,persistentvolumeclaim_base,pod_volume,resourcequota_base,resourcequota_scopeselector' + DELETE_HOUR: '15' + BACKLOGIN: false diff --git a/ansible/01_old/roles/cmoa_install_bak/files/06-imxc-ui/imxc-ui-jaeger/Chart.yaml b/ansible/01_old/roles/cmoa_install_bak/files/06-imxc-ui/imxc-ui-jaeger/Chart.yaml new file mode 100644 index 0000000..e2f559f --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/06-imxc-ui/imxc-ui-jaeger/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for Kubernetes +name: imxc +version: 0.1.0 diff --git a/ansible/01_old/roles/cmoa_install_bak/files/06-imxc-ui/imxc-ui-jaeger/cmoa-manual.yaml b/ansible/01_old/roles/cmoa_install_bak/files/06-imxc-ui/imxc-ui-jaeger/cmoa-manual.yaml new file mode 100644 index 0000000..e94fc14 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/06-imxc-ui/imxc-ui-jaeger/cmoa-manual.yaml @@ -0,0 +1,36 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: manual + namespace: imxc +spec: + selector: + matchLabels: + app: manual + replicas: 1 + template: + metadata: + labels: + app: manual + spec: + containers: + - name: manual + image: {{ .Values.global.IMXC_IN_REGISTRY }}/manual:{{ .Values.global.CMOA_MANUAL_VERSION }} + imagePullPolicy: IfNotPresent + +--- +apiVersion: v1 +kind: Service +metadata: + name: manual + namespace: imxc +spec: + type: NodePort + selector: + app: manual + ports: + - protocol: TCP + port: 8088 + targetPort: 3000 + nodePort: {{ .Values.global.CMOA_MANUAL_PORT }} + diff --git a/ansible/01_old/roles/cmoa_install_bak/files/06-imxc-ui/imxc-ui-jaeger/scripts/init-api-server.sh b/ansible/01_old/roles/cmoa_install_bak/files/06-imxc-ui/imxc-ui-jaeger/scripts/init-api-server.sh new file mode 100644 index 0000000..45b8f1e --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/06-imxc-ui/imxc-ui-jaeger/scripts/init-api-server.sh @@ -0,0 +1,16 @@ +#! /bin/sh + +STATUS_CODE="$(curl -s -o /dev/null -w '%{http_code}' http://imxc-keycloak-http/auth/realms/exem)" + +if [ $STATUS_CODE -eq 200 ]; then + JWT_KEY="$(curl -s -XGET http://imxc-keycloak-http/auth/realms/exem | jq -r '.public_key')" + export JWT_KEY + + chmod -R 777 /home/cloudmoa/notification/cloudmoa_alert.log + + java -Djava.security.egd=file:/dev/./urandom -jar /app.jar +elif [ $STATUS_CODE -eq 404 ]; then + echo "not found exem relam. check realm in imxc-keycloak" +else + echo "not found keycloak. check to install keycloak" +fi \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install_bak/files/06-imxc-ui/imxc-ui-jaeger/scripts/init-auth-server.sh b/ansible/01_old/roles/cmoa_install_bak/files/06-imxc-ui/imxc-ui-jaeger/scripts/init-auth-server.sh new file mode 100644 index 0000000..279b8a5 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/06-imxc-ui/imxc-ui-jaeger/scripts/init-auth-server.sh @@ -0,0 +1,36 @@ +#! /bin/bash + +# 200 -> 서버 및 realm이 있는 경우 +# 404 -> 서버는 있으나 realm이 없는 경우 +# 000 -> 서버가 없음 +STATUS_CODE="$(curl -s -o /dev/null -w '%{http_code}' http://imxc-keycloak-http/auth/realms/exem)" + +if [ $STATUS_CODE -eq 404 ]; then + TOKEN="$(curl -s -d "client_id=admin-cli" -d "username=admin" -d "password=admin" -d "grant_type=password" http://imxc-keycloak-http/auth/realms/master/protocol/openid-connect/token | jq -r '.access_token')" + + echo $TOKEN + + echo "create realm and client" + # create realm and client + curl -s -v POST -H "Authorization: Bearer $TOKEN" -H "Content-Type: application/json" -d "@/tmp/init.json" http://imxc-keycloak-http/auth/admin/realms + + + echo "create admin and owner" + # create admin and owner + curl -s -v POST -H "Authorization: Bearer $TOKEN" -H "Content-Type: application/json" -d '{"firstName":"","lastName":"", "username":"admin","email":"admin@example.com", "enabled":"true","credentials":[{"type":"password","value":"admin","temporary":false}]}' http://imxc-keycloak-http/auth/admin/realms/exem/users + curl -s -v POST -H "Authorization: Bearer $TOKEN" -H "Content-Type: application/json" -d '{"firstName":"","lastName":"", "username":"owner","email":"owner@example.com", "enabled":"true","credentials":[{"type":"password","value":"admin","temporary":false}]}' http://imxc-keycloak-http/auth/admin/realms/exem/users + + JWT_KEY="$(curl -s -XGET http://imxc-keycloak-http/auth/realms/exem | jq -r '.public_key')" + export JWT_KEY + + java -Djava.security.egd=file:/dev/./urandom -jar /app.jar +elif [ $STATUS_CODE -eq 200 ]; then + echo "exist exem relam" + + JWT_KEY="$(curl -s -XGET http://imxc-keycloak-http/auth/realms/exem | jq -r '.public_key')" + export JWT_KEY + + java -Djava.security.egd=file:/dev/./urandom -jar /app.jar +else + echo "not found keycloak. check to install keycloak" +fi diff --git a/ansible/01_old/roles/cmoa_install_bak/files/06-imxc-ui/imxc-ui-jaeger/scripts/init-noti-server.sh b/ansible/01_old/roles/cmoa_install_bak/files/06-imxc-ui/imxc-ui-jaeger/scripts/init-noti-server.sh new file mode 100644 index 0000000..af73aed --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/06-imxc-ui/imxc-ui-jaeger/scripts/init-noti-server.sh @@ -0,0 +1,14 @@ +#! /bin/sh + +STATUS_CODE="$(curl -s -o /dev/null -w '%{http_code}' http://imxc-keycloak-http/auth/realms/exem)" + +if [ $STATUS_CODE -eq 200 ]; then + JWT_KEY="$(curl -s -XGET http://imxc-keycloak-http/auth/realms/exem | jq -r '.public_key')" + export JWT_KEY + + java -Djava.security.egd=file:/dev/./urandom -jar /app.jar +elif [ $STATUS_CODE -eq 404 ]; then + echo "not found exem relam. check realm in imxc-keycloak" +else + echo "not found keycloak. check to install keycloak" +fi \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install_bak/files/06-imxc-ui/imxc-ui-jaeger/scripts/init-resource.sh b/ansible/01_old/roles/cmoa_install_bak/files/06-imxc-ui/imxc-ui-jaeger/scripts/init-resource.sh new file mode 100644 index 0000000..58db392 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/06-imxc-ui/imxc-ui-jaeger/scripts/init-resource.sh @@ -0,0 +1,6 @@ +#!/bin/sh + +chmod -R 777 /scripts + +sed -i "s/localhost/$REDIRECT_URLS/g" /scripts/init.json +cp /scripts/init.json /tmp/init.json \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install_bak/files/06-imxc-ui/imxc-ui-jaeger/scripts/init.json b/ansible/01_old/roles/cmoa_install_bak/files/06-imxc-ui/imxc-ui-jaeger/scripts/init.json new file mode 100644 index 0000000..dcd68b4 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/06-imxc-ui/imxc-ui-jaeger/scripts/init.json @@ -0,0 +1,2148 @@ +{ + "id": "exem", + "realm": "exem", + "notBefore": 0, + "revokeRefreshToken": false, + "refreshTokenMaxReuse": 0, + "accessTokenLifespan": 300, + "accessTokenLifespanForImplicitFlow": 900, + "ssoSessionIdleTimeout": 1800, + "ssoSessionMaxLifespan": 36000, + "ssoSessionIdleTimeoutRememberMe": 0, + "ssoSessionMaxLifespanRememberMe": 0, + "offlineSessionIdleTimeout": 2592000, + "offlineSessionMaxLifespanEnabled": false, + "offlineSessionMaxLifespan": 5184000, + "clientSessionIdleTimeout": 0, + "clientSessionMaxLifespan": 0, + "clientOfflineSessionIdleTimeout": 0, + "clientOfflineSessionMaxLifespan": 0, + "accessCodeLifespan": 60, + "accessCodeLifespanUserAction": 300, + "accessCodeLifespanLogin": 1800, + "actionTokenGeneratedByAdminLifespan": 43200, + "actionTokenGeneratedByUserLifespan": 300, + "enabled": true, + "sslRequired": "none", + "registrationAllowed": false, + "registrationEmailAsUsername": false, + "rememberMe": false, + "verifyEmail": false, + "loginWithEmailAllowed": true, + "duplicateEmailsAllowed": false, + "resetPasswordAllowed": false, + "editUsernameAllowed": false, + "bruteForceProtected": false, + "permanentLockout": false, + "maxFailureWaitSeconds": 900, + "minimumQuickLoginWaitSeconds": 60, + "waitIncrementSeconds": 60, + "quickLoginCheckMilliSeconds": 1000, + "maxDeltaTimeSeconds": 43200, + "failureFactor": 30, + "roles": { + "realm": [ + { + "id": "b361dcb8-4ec4-484e-a432-8d40a8ca5ac8", + "name": "offline_access", + "description": "${role_offline-access}", + "composite": false, + "clientRole": false, + "containerId": "exem", + "attributes": {} + }, + { + "id": "621155f2-6c01-4e4a-bf11-47111503d696", + "name": "uma_authorization", + "description": "${role_uma_authorization}", + "composite": false, + "clientRole": false, + "containerId": "exem", + "attributes": {} + }, + { + "id": "4aadd73a-e863-466a-932b-5bc81553fbf1", + "name": "access", + "composite": false, + "clientRole": false, + "containerId": "exem", + "attributes": {} + } + ], + "client": { + "realm-management": [ + { + "id": "e3eca547-c372-406a-abe7-30f554e13e63", + "name": "manage-realm", + "description": "${role_manage-realm}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "eb1faff2-4cca-458c-b9da-96c1f6f5f647", + "name": "impersonation", + "description": "${role_impersonation}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "eb0f6ebb-8993-47f8-8979-2152ed92bf62", + "name": "create-client", + "description": "${role_create-client}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "29f0b39d-9cc9-4b40-ad81-00041897ae0c", + "name": "view-clients", + "description": "${role_view-clients}", + "composite": true, + "composites": { + "client": { + "realm-management": [ + "query-clients" + ] + } + }, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "b6307563-9b35-4093-b0c4-a27df7cb82bd", + "name": "query-groups", + "description": "${role_query-groups}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "30091a91-f676-4e39-8ae2-ebfcee36c32a", + "name": "query-clients", + "description": "${role_query-clients}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "b40ca071-2318-4f69-9664-f0dfe471d03b", + "name": "view-realm", + "description": "${role_view-realm}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "efd25ec7-e61f-4659-a772-907791aed58e", + "name": "view-authorization", + "description": "${role_view-authorization}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "4ad18bd0-f9a9-4fc7-8864-99afa71f95e4", + "name": "manage-users", + "description": "${role_manage-users}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "a92c781f-7c6a-48d8-aa88-0b3aefb3c10c", + "name": "manage-events", + "description": "${role_manage-events}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "424933c1-3c03-49cd-955c-34aeeb0a3108", + "name": "manage-authorization", + "description": "${role_manage-authorization}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "5476db80-dbfa-408b-a934-5e8decc0af56", + "name": "manage-clients", + "description": "${role_manage-clients}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "acf53868-d09b-4865-92da-3b906307b979", + "name": "realm-admin", + "description": "${role_realm-admin}", + "composite": true, + "composites": { + "client": { + "realm-management": [ + "manage-realm", + "impersonation", + "create-client", + "view-clients", + "query-groups", + "query-clients", + "view-realm", + "view-authorization", + "manage-users", + "manage-events", + "manage-authorization", + "manage-clients", + "query-users", + "query-realms", + "manage-identity-providers", + "view-users", + "view-events", + "view-identity-providers" + ] + } + }, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "f2ad5f83-ffde-4cf4-acc4-21f7bcec4c38", + "name": "query-users", + "description": "${role_query-users}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "96a017bf-5211-4c20-a1b2-7493bc45a3ad", + "name": "query-realms", + "description": "${role_query-realms}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "d8051d4d-f26c-4a6d-bcdd-b3d8111d9d29", + "name": "manage-identity-providers", + "description": "${role_manage-identity-providers}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "8c929b20-abc3-4b78-88f2-ed3348426667", + "name": "view-users", + "description": "${role_view-users}", + "composite": true, + "composites": { + "client": { + "realm-management": [ + "query-groups", + "query-users" + ] + } + }, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "a337a8f7-8725-4ff7-85fc-ecc4b5ce1433", + "name": "view-events", + "description": "${role_view-events}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "649350cf-925c-4502-84b4-ec8415f956d3", + "name": "view-identity-providers", + "description": "${role_view-identity-providers}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + } + ], + "authorization_server": [ + { + "id": "2346ca49-eb3e-4f2e-b0ec-4def9ea9655c", + "name": "access", + "composite": false, + "clientRole": true, + "containerId": "b9bbda1f-a756-4b72-9cd8-06a6dfd6d5bf", + "attributes": {} + } + ], + "security-admin-console": [], + "admin-cli": [], + "account-console": [], + "broker": [ + { + "id": "133ff901-3a8f-48df-893b-4c7e9047e829", + "name": "read-token", + "description": "${role_read-token}", + "composite": false, + "clientRole": true, + "containerId": "fdc71d6d-db86-414f-bd80-ed1f5e9a6975", + "attributes": {} + } + ], + "account": [ + { + "id": "89c5f56f-5845-400b-ac9f-942c46d082e0", + "name": "manage-account-links", + "description": "${role_manage-account-links}", + "composite": false, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + }, + { + "id": "2cba7fed-0a80-4dbd-bd2d-abfa2c6a985e", + "name": "view-profile", + "description": "${role_view-profile}", + "composite": false, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + }, + { + "id": "f446a93d-143f-4071-9bdc-08aa2fdce6d2", + "name": "view-consent", + "description": "${role_view-consent}", + "composite": false, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + }, + { + "id": "ef3364db-e008-4aec-9e74-04bac25cbe40", + "name": "manage-consent", + "description": "${role_manage-consent}", + "composite": true, + "composites": { + "client": { + "account": [ + "view-consent" + ] + } + }, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + }, + { + "id": "96afbe32-3ac2-4345-bc17-06cf0e8de0b4", + "name": "view-applications", + "description": "${role_view-applications}", + "composite": false, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + }, + { + "id": "cf6861ca-4804-40d4-9016-c48e7ebf1c72", + "name": "manage-account", + "description": "${role_manage-account}", + "composite": true, + "composites": { + "client": { + "account": [ + "manage-account-links" + ] + } + }, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + } + ] + } + }, + "groups": [ + { + "id": "8d3f7332-7f72-47e2-9cb3-38331f0c29b5", + "name": "DEFAULT_TENANT", + "path": "/DEFAULT_TENANT", + "attributes": {}, + "realmRoles": [], + "clientRoles": {}, + "subGroups": [] + } + ], + "defaultRoles": [ + "offline_access", + "uma_authorization" + ], + "requiredCredentials": [ + "password" + ], + "otpPolicyType": "totp", + "otpPolicyAlgorithm": "HmacSHA1", + "otpPolicyInitialCounter": 0, + "otpPolicyDigits": 6, + "otpPolicyLookAheadWindow": 1, + "otpPolicyPeriod": 30, + "otpSupportedApplications": [ + "FreeOTP", + "Google Authenticator" + ], + "webAuthnPolicyRpEntityName": "keycloak", + "webAuthnPolicySignatureAlgorithms": [ + "ES256" + ], + "webAuthnPolicyRpId": "", + "webAuthnPolicyAttestationConveyancePreference": "not specified", + "webAuthnPolicyAuthenticatorAttachment": "not specified", + "webAuthnPolicyRequireResidentKey": "not specified", + "webAuthnPolicyUserVerificationRequirement": "not specified", + "webAuthnPolicyCreateTimeout": 0, + "webAuthnPolicyAvoidSameAuthenticatorRegister": false, + "webAuthnPolicyAcceptableAaguids": [], + "webAuthnPolicyPasswordlessRpEntityName": "keycloak", + "webAuthnPolicyPasswordlessSignatureAlgorithms": [ + "ES256" + ], + "webAuthnPolicyPasswordlessRpId": "", + "webAuthnPolicyPasswordlessAttestationConveyancePreference": "not specified", + "webAuthnPolicyPasswordlessAuthenticatorAttachment": "not specified", + "webAuthnPolicyPasswordlessRequireResidentKey": "not specified", + "webAuthnPolicyPasswordlessUserVerificationRequirement": "not specified", + "webAuthnPolicyPasswordlessCreateTimeout": 0, + "webAuthnPolicyPasswordlessAvoidSameAuthenticatorRegister": false, + "webAuthnPolicyPasswordlessAcceptableAaguids": [], + "scopeMappings": [ + { + "clientScope": "offline_access", + "roles": [ + "offline_access" + ] + } + ], + "clientScopeMappings": { + "account": [ + { + "client": "account-console", + "roles": [ + "manage-account" + ] + } + ] + }, + "clients": [ + { + "id": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "clientId": "account", + "name": "${client_account}", + "rootUrl": "${authBaseUrl}", + "baseUrl": "/realms/exem/account/", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "defaultRoles": [ + "view-profile", + "manage-account" + ], + "redirectUris": [ + "/realms/exem/account/*" + ], + "webOrigins": [], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": false, + "serviceAccountsEnabled": false, + "publicClient": false, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": {}, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "1e3d0c5d-c456-4c5f-93cf-58236273186a", + "clientId": "account-console", + "name": "${client_account-console}", + "rootUrl": "${authBaseUrl}", + "baseUrl": "/realms/exem/account/", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [ + "/realms/exem/account/*" + ], + "webOrigins": [], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": false, + "serviceAccountsEnabled": false, + "publicClient": true, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": { + "pkce.code.challenge.method": "S256" + }, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "protocolMappers": [ + { + "id": "cceae7c8-fa8d-48eb-a0a6-6013a2cc771e", + "name": "audience resolve", + "protocol": "openid-connect", + "protocolMapper": "oidc-audience-resolve-mapper", + "consentRequired": false, + "config": {} + } + ], + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "d4d3e5a5-584c-4aff-a79f-ac3c31ace5a1", + "clientId": "admin-cli", + "name": "${client_admin-cli}", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [], + "webOrigins": [], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": false, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": true, + "serviceAccountsEnabled": false, + "publicClient": true, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": {}, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "b9bbda1f-a756-4b72-9cd8-06a6dfd6d5bf", + "clientId": "authorization_server", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [ + "localhost" + ], + "webOrigins": [ + "*" + ], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": true, + "serviceAccountsEnabled": false, + "publicClient": true, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": { + "saml.assertion.signature": "false", + "saml.force.post.binding": "false", + "saml.multivalued.roles": "false", + "saml.encrypt": "false", + "saml.server.signature": "false", + "saml.server.signature.keyinfo.ext": "false", + "exclude.session.state.from.auth.response": "false", + "saml_force_name_id_format": "false", + "saml.client.signature": "false", + "tls.client.certificate.bound.access.tokens": "false", + "saml.authnstatement": "false", + "display.on.consent.screen": "false", + "saml.onetimeuse.condition": "false" + }, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": true, + "nodeReRegistrationTimeout": -1, + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "fdc71d6d-db86-414f-bd80-ed1f5e9a6975", + "clientId": "broker", + "name": "${client_broker}", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [], + "webOrigins": [], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": false, + "serviceAccountsEnabled": false, + "publicClient": false, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": {}, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "clientId": "realm-management", + "name": "${client_realm-management}", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [], + "webOrigins": [], + "notBefore": 0, + "bearerOnly": true, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": false, + "serviceAccountsEnabled": false, + "publicClient": false, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": {}, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "bb6c56f1-126e-4356-9579-d95992a8d150", + "clientId": "security-admin-console", + "name": "${client_security-admin-console}", + "rootUrl": "${authAdminUrl}", + "baseUrl": "/admin/exem/console/", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [ + "/admin/exem/console/*" + ], + "webOrigins": [ + "+" + ], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": false, + "serviceAccountsEnabled": false, + "publicClient": true, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": { + "pkce.code.challenge.method": "S256" + }, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "protocolMappers": [ + { + "id": "3cf06cab-00dd-486b-8e72-1a453a7031ca", + "name": "locale", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "locale", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "locale", + "jsonType.label": "String" + } + } + ], + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + } + ], + "clientScopes": [ + { + "id": "6a21eaaa-69c9-4519-8732-2155865a1891", + "name": "custom_jwt", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "true" + }, + "protocolMappers": [ + { + "id": "fd7557f5-3174-4c65-8cd1-0e9f015a906f", + "name": "customizingJWT", + "protocol": "openid-connect", + "protocolMapper": "oidc-script-based-protocol-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "multivalued": "true", + "id.token.claim": "false", + "access.token.claim": "true", + "jsonType.label": "String", + "script": "/**\r\n * Available variables: \r\n * user - the current user\r\n * realm - the current realm\r\n * token - the current token\r\n * userSession - the current userSession\r\n * keycloakSession - the current keycloakSession\r\n */\r\n\r\n//insert your code here...\r\n\r\n// you can set standard fields in token - test code\r\n// token.setAcr(\"test value\");\r\n\r\n// you can set claims in the token - test code\r\n// token.getOtherClaims().put(\"claimName\", \"claim value\");\r\n\r\n// work with variables and return multivalued token value\r\nvar ArrayList = Java.type(\"java.util.ArrayList\");\r\nvar HashMap = Java.type(\"java.util.HashMap\");\r\nvar tenantInfoMap = new HashMap();\r\nvar tenantIpMap = new HashMap();\r\n\r\nvar forEach = Array.prototype.forEach;\r\n\r\nvar client = keycloakSession.getContext().getClient();\r\nvar groups = user.getGroups();\r\nvar clientRole = client.getRole(\"access\");\r\n\r\nforEach.call(groups.toArray(), function(group) {\r\n if(group.hasRole(clientRole)) {\r\n tenantIpMap.put(group.getName(), clientRole.getAttribute(\"ip\"));\r\n tenantInfoMap.put(group.getName(), group.getAttributes());\r\n }\r\n});\r\n\r\ntoken.setOtherClaims(\"tenantInfo\", tenantInfoMap);\r\n" + } + }, + { + "id": "2cb34189-9f06-4b9f-b066-c28e7930f0a5", + "name": "custom_phone", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "false", + "user.attribute": "phone", + "id.token.claim": "false", + "access.token.claim": "true", + "claim.name": "attributes.phone", + "jsonType.label": "String" + } + }, + { + "id": "6bcb0aa9-8713-4e4b-b997-2e08d2dda0f4", + "name": "group_attr", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "groups", + "id.token.claim": "false", + "access.token.claim": "true", + "claim.name": "groups.attributes", + "jsonType.label": "String" + } + }, + { + "id": "03deb40b-4f83-436e-9eab-f479eed62460", + "name": "custom_name", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "false", + "user.attribute": "name", + "id.token.claim": "false", + "access.token.claim": "true", + "claim.name": "attributes.name", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "9fed7d81-3f42-41b0-b661-7875abb90b2b", + "name": "microprofile-jwt", + "description": "Microprofile - JWT built-in scope", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "false" + }, + "protocolMappers": [ + { + "id": "d030d675-2c31-401a-a461-534211b3d2ec", + "name": "upn", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "username", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "upn", + "jsonType.label": "String" + } + }, + { + "id": "ca2026a0-84de-4b8d-bf0c-35f3d088b115", + "name": "groups", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-realm-role-mapper", + "consentRequired": false, + "config": { + "multivalued": "true", + "user.attribute": "foo", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "groups", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "cf3e7fce-e9e8-40dc-bd0d-5cf7bac861c0", + "name": "web-origins", + "description": "OpenID Connect scope for add allowed web origins to the access token", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "false", + "display.on.consent.screen": "false", + "consent.screen.text": "" + }, + "protocolMappers": [ + { + "id": "6b909bad-30d8-4095-a80b-d71589e8a0b4", + "name": "allowed web origins", + "protocol": "openid-connect", + "protocolMapper": "oidc-allowed-origins-mapper", + "consentRequired": false, + "config": {} + } + ] + }, + { + "id": "73231863-d614-4725-9707-f5704c70893a", + "name": "roles", + "description": "OpenID Connect scope for add user roles to the access token", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "false", + "display.on.consent.screen": "true", + "consent.screen.text": "${rolesScopeConsentText}" + }, + "protocolMappers": [ + { + "id": "fad2c0b3-d6d6-46c9-b8a5-70cf2f3cd69e", + "name": "realm roles", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-realm-role-mapper", + "consentRequired": false, + "config": { + "multivalued": "true", + "user.attribute": "foo", + "access.token.claim": "true", + "claim.name": "realm_access.roles", + "jsonType.label": "String" + } + }, + { + "id": "1fa51f0e-8fa8-4807-a381-c9756ce1d2ff", + "name": "audience resolve", + "protocol": "openid-connect", + "protocolMapper": "oidc-audience-resolve-mapper", + "consentRequired": false, + "config": {} + }, + { + "id": "8be191ba-c7b8-45f1-a37f-2830595d4b54", + "name": "client roles", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-client-role-mapper", + "consentRequired": false, + "config": { + "multivalued": "true", + "user.attribute": "foo", + "access.token.claim": "true", + "claim.name": "resource_access.${client_id}.roles", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "93a4b53a-a281-4203-a070-0ad31e719b29", + "name": "phone", + "description": "OpenID Connect built-in scope: phone", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "true", + "consent.screen.text": "${phoneScopeConsentText}" + }, + "protocolMappers": [ + { + "id": "c716d4df-ad16-4a47-aa05-ded2a69313a3", + "name": "phone number verified", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "phoneNumberVerified", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "phone_number_verified", + "jsonType.label": "boolean" + } + }, + { + "id": "db0fcb5b-bad6-42b7-8ab0-b90225100b8a", + "name": "phone number", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "phoneNumber", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "phone_number", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "f1723d4c-6d93-40be-b5b8-5ca7083e55c7", + "name": "address", + "description": "OpenID Connect built-in scope: address", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "true", + "consent.screen.text": "${addressScopeConsentText}" + }, + "protocolMappers": [ + { + "id": "9e95dff0-dc01-4efe-a414-21c83d94491c", + "name": "address", + "protocol": "openid-connect", + "protocolMapper": "oidc-address-mapper", + "consentRequired": false, + "config": { + "user.attribute.formatted": "formatted", + "user.attribute.country": "country", + "user.attribute.postal_code": "postal_code", + "userinfo.token.claim": "true", + "user.attribute.street": "street", + "id.token.claim": "true", + "user.attribute.region": "region", + "access.token.claim": "true", + "user.attribute.locality": "locality" + } + } + ] + }, + { + "id": "16524b43-6bfc-4e05-868c-682e7e1e611c", + "name": "email", + "description": "OpenID Connect built-in scope: email", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "true", + "consent.screen.text": "${emailScopeConsentText}" + }, + "protocolMappers": [ + { + "id": "4444c30e-5da5-46e6-a201-64c28ab26e10", + "name": "email verified", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "emailVerified", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "email_verified", + "jsonType.label": "boolean" + } + }, + { + "id": "0faa8ba7-6d4d-4ed4-ab89-334e1d18b503", + "name": "email", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "email", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "email", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "4ccced80-99d8-4081-8d1d-37ed6d5aaf34", + "name": "profile", + "description": "OpenID Connect built-in scope: profile", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "true", + "consent.screen.text": "${profileScopeConsentText}" + }, + "protocolMappers": [ + { + "id": "02aea132-f5e1-483c-968a-5fbb9cdfb82d", + "name": "updated at", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "updatedAt", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "updated_at", + "jsonType.label": "String" + } + }, + { + "id": "eb5d10fc-d4a8-473a-ac3e-35f3fb0f41bb", + "name": "family name", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "lastName", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "family_name", + "jsonType.label": "String" + } + }, + { + "id": "2467b8e5-f340-45a2-abff-c658eccf3ed3", + "name": "zoneinfo", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "zoneinfo", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "zoneinfo", + "jsonType.label": "String" + } + }, + { + "id": "50a9bb17-af12-481d-95dd-6aed1dd4bf56", + "name": "full name", + "protocol": "openid-connect", + "protocolMapper": "oidc-full-name-mapper", + "consentRequired": false, + "config": { + "id.token.claim": "true", + "access.token.claim": "true", + "userinfo.token.claim": "true" + } + }, + { + "id": "80a65208-9425-4e66-b769-98c2f1c91e6e", + "name": "nickname", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "nickname", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "nickname", + "jsonType.label": "String" + } + }, + { + "id": "68a750c6-b4b8-47f4-a919-752319e63213", + "name": "gender", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "gender", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "gender", + "jsonType.label": "String" + } + }, + { + "id": "e27abd0e-72c1-40de-a678-e9e4e2db8e7f", + "name": "given name", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "firstName", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "given_name", + "jsonType.label": "String" + } + }, + { + "id": "04f3fa01-6a4c-44eb-bfd8-0a0e1c31bc4a", + "name": "middle name", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "middleName", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "middle_name", + "jsonType.label": "String" + } + }, + { + "id": "94e697d9-fbee-48d8-91d1-7bbc4f1fb44e", + "name": "username", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "username", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "preferred_username", + "jsonType.label": "String" + } + }, + { + "id": "a2f05d76-947d-4ceb-969b-1b923be9a923", + "name": "website", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "website", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "website", + "jsonType.label": "String" + } + }, + { + "id": "1966f863-ac5c-4cbc-a156-d5bd861728f0", + "name": "profile", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "profile", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "profile", + "jsonType.label": "String" + } + }, + { + "id": "18a9b452-cd8e-4c43-a9a8-0ea532074f74", + "name": "locale", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "locale", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "locale", + "jsonType.label": "String" + } + }, + { + "id": "1583790a-ec7a-4899-a901-60e23fd0d969", + "name": "birthdate", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "birthdate", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "birthdate", + "jsonType.label": "String" + } + }, + { + "id": "7094b64a-492b-4f31-aa73-bb19d06ddb56", + "name": "picture", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "picture", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "picture", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "eff18c11-eaf4-4d6a-8365-90f646ea3cc5", + "name": "role_list", + "description": "SAML role list", + "protocol": "saml", + "attributes": { + "consent.screen.text": "${samlRoleListScopeConsentText}", + "display.on.consent.screen": "true" + }, + "protocolMappers": [ + { + "id": "3bb12700-3e6f-4a73-bfbb-cfd16a8ab007", + "name": "role list", + "protocol": "saml", + "protocolMapper": "saml-role-list-mapper", + "consentRequired": false, + "config": { + "single": "false", + "attribute.nameformat": "Basic", + "attribute.name": "Role" + } + } + ] + }, + { + "id": "e83e35b7-9650-4f7e-b182-65c184d261b3", + "name": "offline_access", + "description": "OpenID Connect built-in scope: offline_access", + "protocol": "openid-connect", + "attributes": { + "consent.screen.text": "${offlineAccessScopeConsentText}", + "display.on.consent.screen": "true" + } + } + ], + "defaultDefaultClientScopes": [ + "role_list", + "profile", + "email", + "roles", + "web-origins", + "custom_jwt" + ], + "defaultOptionalClientScopes": [ + "offline_access", + "address", + "phone", + "microprofile-jwt" + ], + "browserSecurityHeaders": { + "contentSecurityPolicyReportOnly": "", + "xContentTypeOptions": "nosniff", + "xRobotsTag": "none", + "xFrameOptions": "SAMEORIGIN", + "contentSecurityPolicy": "frame-src 'self'; frame-ancestors 'self'; object-src 'none';", + "xXSSProtection": "1; mode=block", + "strictTransportSecurity": "max-age=31536000; includeSubDomains" + }, + "smtpServer": {}, + "eventsEnabled": false, + "eventsListeners": [ + "jboss-logging" + ], + "enabledEventTypes": [], + "adminEventsEnabled": false, + "adminEventsDetailsEnabled": false, + "components": { + "org.keycloak.services.clientregistration.policy.ClientRegistrationPolicy": [ + { + "id": "9b1dcf02-e9ec-4302-8aad-28f3250d1b2d", + "name": "Allowed Protocol Mapper Types", + "providerId": "allowed-protocol-mappers", + "subType": "anonymous", + "subComponents": {}, + "config": { + "allowed-protocol-mapper-types": [ + "oidc-sha256-pairwise-sub-mapper", + "oidc-usermodel-property-mapper", + "saml-role-list-mapper", + "saml-user-attribute-mapper", + "oidc-full-name-mapper", + "oidc-usermodel-attribute-mapper", + "oidc-address-mapper", + "saml-user-property-mapper" + ] + } + }, + { + "id": "752137ea-bc3a-46c3-9d83-49cb370d39a9", + "name": "Max Clients Limit", + "providerId": "max-clients", + "subType": "anonymous", + "subComponents": {}, + "config": { + "max-clients": [ + "200" + ] + } + }, + { + "id": "f365d31f-ccc5-4e57-97bd-b2749b1ab5e5", + "name": "Allowed Client Scopes", + "providerId": "allowed-client-templates", + "subType": "authenticated", + "subComponents": {}, + "config": { + "allow-default-scopes": [ + "true" + ] + } + }, + { + "id": "52e385fd-3aa5-442d-b5e4-6ff659126196", + "name": "Allowed Protocol Mapper Types", + "providerId": "allowed-protocol-mappers", + "subType": "authenticated", + "subComponents": {}, + "config": { + "allowed-protocol-mapper-types": [ + "oidc-sha256-pairwise-sub-mapper", + "saml-user-attribute-mapper", + "oidc-full-name-mapper", + "oidc-usermodel-attribute-mapper", + "oidc-address-mapper", + "oidc-usermodel-property-mapper", + "saml-user-property-mapper", + "saml-role-list-mapper" + ] + } + }, + { + "id": "dbebbc9d-1b14-4d09-906c-b4e5638f9588", + "name": "Consent Required", + "providerId": "consent-required", + "subType": "anonymous", + "subComponents": {}, + "config": {} + }, + { + "id": "b3fc18dc-467f-4240-9b6d-f07df5c40aee", + "name": "Full Scope Disabled", + "providerId": "scope", + "subType": "anonymous", + "subComponents": {}, + "config": {} + }, + { + "id": "19e102da-1d66-4747-958b-9311e5156693", + "name": "Trusted Hosts", + "providerId": "trusted-hosts", + "subType": "anonymous", + "subComponents": {}, + "config": { + "host-sending-registration-request-must-match": [ + "true" + ], + "client-uris-must-match": [ + "true" + ] + } + }, + { + "id": "66e83112-7392-46cb-bbd5-b71586183ada", + "name": "Allowed Client Scopes", + "providerId": "allowed-client-templates", + "subType": "anonymous", + "subComponents": {}, + "config": { + "allow-default-scopes": [ + "true" + ] + } + } + ], + "org.keycloak.keys.KeyProvider": [ + { + "id": "a60adc1b-3f6b-40d4-901f-d4f744f0d71b", + "name": "aes-generated", + "providerId": "aes-generated", + "subComponents": {}, + "config": { + "priority": [ + "100" + ] + } + }, + { + "id": "bc1b25d8-b199-4d87-b606-6cde0f6eafb0", + "name": "hmac-generated", + "providerId": "hmac-generated", + "subComponents": {}, + "config": { + "priority": [ + "100" + ], + "algorithm": [ + "HS256" + ] + } + }, + { + "id": "fe624aa7-54a3-43d8-b2a3-f74b543a9225", + "name": "rsa-generated", + "providerId": "rsa-generated", + "subComponents": {}, + "config": { + "priority": [ + "100" + ] + } + } + ] + }, + "internationalizationEnabled": false, + "supportedLocales": [], + "authenticationFlows": [ + { + "id": "a837df3e-15cb-4d2a-8ce0-5eea5c704e76", + "alias": "Account verification options", + "description": "Method with which to verity the existing account", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "idp-email-verification", + "requirement": "ALTERNATIVE", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "ALTERNATIVE", + "priority": 20, + "flowAlias": "Verify Existing Account by Re-authentication", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "59026e13-e2bd-4977-a868-505ea562f545", + "alias": "Authentication Options", + "description": "Authentication options.", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "basic-auth", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "basic-auth-otp", + "requirement": "DISABLED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "auth-spnego", + "requirement": "DISABLED", + "priority": 30, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "484d422c-d9b4-4c0e-86d5-60463ecd24c9", + "alias": "Browser - Conditional OTP", + "description": "Flow to determine if the OTP is required for the authentication", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "conditional-user-configured", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "auth-otp-form", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "0ec05058-6d09-4951-a116-19e8810e5d8e", + "alias": "Direct Grant - Conditional OTP", + "description": "Flow to determine if the OTP is required for the authentication", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "conditional-user-configured", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "direct-grant-validate-otp", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "667c03cd-114c-4d9a-a7fa-7d2c27f10722", + "alias": "First broker login - Conditional OTP", + "description": "Flow to determine if the OTP is required for the authentication", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "conditional-user-configured", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "auth-otp-form", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "1510fbf7-239f-44aa-9955-72d42f6d99fd", + "alias": "Handle Existing Account", + "description": "Handle what to do if there is existing account with same email/username like authenticated identity provider", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "idp-confirm-link", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "REQUIRED", + "priority": 20, + "flowAlias": "Account verification options", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "5622e71d-e1f4-4711-a425-a8470d0a017e", + "alias": "Reset - Conditional OTP", + "description": "Flow to determine if the OTP should be reset or not. Set to REQUIRED to force.", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "conditional-user-configured", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "reset-otp", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "09dfe405-5ef6-4940-8885-5adf867a74c8", + "alias": "User creation or linking", + "description": "Flow for the existing/non-existing user alternatives", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticatorConfig": "create unique user config", + "authenticator": "idp-create-user-if-unique", + "requirement": "ALTERNATIVE", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "ALTERNATIVE", + "priority": 20, + "flowAlias": "Handle Existing Account", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "a3eb6b61-1943-4fb7-9b2f-137826882662", + "alias": "Verify Existing Account by Re-authentication", + "description": "Reauthentication of existing account", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "idp-username-password-form", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "CONDITIONAL", + "priority": 20, + "flowAlias": "First broker login - Conditional OTP", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "7f5e2f68-84bc-4703-b474-e3b092621195", + "alias": "browser", + "description": "browser based authentication", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "auth-cookie", + "requirement": "ALTERNATIVE", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "auth-spnego", + "requirement": "DISABLED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "identity-provider-redirector", + "requirement": "ALTERNATIVE", + "priority": 25, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "ALTERNATIVE", + "priority": 30, + "flowAlias": "forms", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "224cc520-37f7-445e-ab1f-7ba547a45a0d", + "alias": "clients", + "description": "Base authentication for clients", + "providerId": "client-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "client-secret", + "requirement": "ALTERNATIVE", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "client-jwt", + "requirement": "ALTERNATIVE", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "client-secret-jwt", + "requirement": "ALTERNATIVE", + "priority": 30, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "client-x509", + "requirement": "ALTERNATIVE", + "priority": 40, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "2e58184b-529b-450c-9731-29763d26b087", + "alias": "direct grant", + "description": "OpenID Connect Resource Owner Grant", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "direct-grant-validate-username", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "direct-grant-validate-password", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "CONDITIONAL", + "priority": 30, + "flowAlias": "Direct Grant - Conditional OTP", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "c969ac8c-e7d8-44b5-ad4d-5fcb80514eac", + "alias": "docker auth", + "description": "Used by Docker clients to authenticate against the IDP", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "docker-http-basic-authenticator", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "de2259a4-7f92-42ec-994c-f55d8cba3b59", + "alias": "first broker login", + "description": "Actions taken after first broker login with identity provider account, which is not yet linked to any Keycloak account", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticatorConfig": "review profile config", + "authenticator": "idp-review-profile", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "REQUIRED", + "priority": 20, + "flowAlias": "User creation or linking", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "6c2745d2-be21-4f3c-a291-5b3fc039432a", + "alias": "forms", + "description": "Username, password, otp and other auth forms.", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "auth-username-password-form", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "CONDITIONAL", + "priority": 20, + "flowAlias": "Browser - Conditional OTP", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "ac8f5082-3fd0-47c5-854d-0dd9c3951668", + "alias": "http challenge", + "description": "An authentication flow based on challenge-response HTTP Authentication Schemes", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "no-cookie-redirect", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "REQUIRED", + "priority": 20, + "flowAlias": "Authentication Options", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "32030b4b-c82b-4c1a-a692-3b51eae74bbc", + "alias": "registration", + "description": "registration flow", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "registration-page-form", + "requirement": "REQUIRED", + "priority": 10, + "flowAlias": "registration form", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "b99fca4c-386c-4277-acc1-83e57e29244d", + "alias": "registration form", + "description": "registration form", + "providerId": "form-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "registration-user-creation", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "registration-profile-action", + "requirement": "REQUIRED", + "priority": 40, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "registration-password-action", + "requirement": "REQUIRED", + "priority": 50, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "registration-recaptcha-action", + "requirement": "DISABLED", + "priority": 60, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "5edbc053-816a-434e-9866-6c0cc7e49f89", + "alias": "reset credentials", + "description": "Reset credentials for a user if they forgot their password or something", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "reset-credentials-choose-user", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "reset-credential-email", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "reset-password", + "requirement": "REQUIRED", + "priority": 30, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "CONDITIONAL", + "priority": 40, + "flowAlias": "Reset - Conditional OTP", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "460782e7-9644-4a34-8024-cb428cbe3991", + "alias": "saml ecp", + "description": "SAML ECP Profile Authentication Flow", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "http-basic-authenticator", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + } + ], + "authenticatorConfig": [ + { + "id": "67af6e65-853c-4bfd-9eef-72e735691377", + "alias": "create unique user config", + "config": { + "require.password.update.after.registration": "false" + } + }, + { + "id": "af6c6e01-772d-426a-bdd3-3ebc95537bcd", + "alias": "review profile config", + "config": { + "update.profile.on.first.login": "missing" + } + } + ], + "requiredActions": [ + { + "alias": "CONFIGURE_TOTP", + "name": "Configure OTP", + "providerId": "CONFIGURE_TOTP", + "enabled": true, + "defaultAction": false, + "priority": 10, + "config": {} + }, + { + "alias": "terms_and_conditions", + "name": "Terms and Conditions", + "providerId": "terms_and_conditions", + "enabled": false, + "defaultAction": false, + "priority": 20, + "config": {} + }, + { + "alias": "UPDATE_PASSWORD", + "name": "Update Password", + "providerId": "UPDATE_PASSWORD", + "enabled": true, + "defaultAction": false, + "priority": 30, + "config": {} + }, + { + "alias": "UPDATE_PROFILE", + "name": "Update Profile", + "providerId": "UPDATE_PROFILE", + "enabled": true, + "defaultAction": false, + "priority": 40, + "config": {} + }, + { + "alias": "VERIFY_EMAIL", + "name": "Verify Email", + "providerId": "VERIFY_EMAIL", + "enabled": true, + "defaultAction": false, + "priority": 50, + "config": {} + }, + { + "alias": "update_user_locale", + "name": "Update User Locale", + "providerId": "update_user_locale", + "enabled": true, + "defaultAction": false, + "priority": 1000, + "config": {} + } + ], + "browserFlow": "browser", + "registrationFlow": "registration", + "directGrantFlow": "direct grant", + "resetCredentialsFlow": "reset credentials", + "clientAuthenticationFlow": "clients", + "dockerAuthenticationFlow": "docker auth", + "attributes": { + "clientOfflineSessionMaxLifespan": "0", + "clientSessionIdleTimeout": "0", + "clientSessionMaxLifespan": "0", + "clientOfflineSessionIdleTimeout": "0" + }, + "keycloakVersion": "11.0.1", + "userManagedAccessAllowed": false +} \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install_bak/files/06-imxc-ui/imxc-ui-jaeger/templates/imxc-ui-config-jaeger.yaml b/ansible/01_old/roles/cmoa_install_bak/files/06-imxc-ui/imxc-ui-jaeger/templates/imxc-ui-config-jaeger.yaml new file mode 100644 index 0000000..9fa97ed --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/06-imxc-ui/imxc-ui-jaeger/templates/imxc-ui-config-jaeger.yaml @@ -0,0 +1,75 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: imxc-ui-config-jaeger + namespace: imxc +data: + properties.file: | + api.url = {{ .Values.global.SERVELET_URL_PROTOCOL }}://{{ .Values.global.ZUUL_SERVER_IP }}:{{ .Values.global.ZUUL_SERVER_PORT }} + config.js: | + window.appEnv = { + // Env Settings servletURL + offlineAccess: "{{ .Values.global.OFFLINEACCESS }}", + backLogin: "{{ .Values.global.BACKLOGIN }}", + servletURL: "{{ .Values.global.SERVELET_URL_PROTOCOL }}://{{ .Values.global.ZUUL_SERVER_IP }}:{{ .Values.global.ZUUL_SERVER_PORT }}", + demoServletURL: "{{ .Values.global.DEMO_SERVELET_URL_PROTOCOL }}://{{ .Values.global.ZUUL_SERVER_IP }}:{{ .Values.global.ZUUL_SERVER_PORT }}", + // Env Settings socketURL + socketURL: "http://{{ .Values.global.NOTI_SERVER_IP }}:{{ .Values.global.NOTI_SERVER_PORT }}/ui-server-websocket", + manualURL: "http://{{ .Values.global.CMOA_MANUAL_SERVER_IP }}:{{ .Values.global.CMOA_MANUAL_PORT }}", + // Env Settings interMaxURL + interMaxURL: "http://{{ .Values.global.INTERMAX_IP }}:8080/intermax/?", + // Env Settings CloudMOA Version + version: '{{ .Values.global.CLOUDMOA_UI_VERSION }}', + UI_build_ver: '{{ .Values.global.UI_SERVER_VERSION }}', + maxSelectionSize: 30, + loginType: 'keycloak', + keyCloak: { + "realm": "{{ .Values.global.KEYCLOAK_REALM }}", + "auth-server-url": "{{ .Values.global.KEYCLOAK_AUTH_SERVER_URL }}", + "ssl-required": "none", + "resource": "{{ .Values.global.KEYCLOAK_RESOURCE }}", + "public-client": true, + "confidential-port": 0 + }, + healthIndicatorStateInfo: [ + { + state: "critical", + // max: 1.0, + // over: 0.8, + max: 100, + over: 80, + text: "Critical", + color: "#ff4040", + level: 4, + }, { + state: "warning", + // max: 0.8, + // over: 0.5, + max: 80, + over: 50, + text: "Warning", + color: "#ffa733", + level: 3, + }, { + state: "attention", + // max: 0.5, + // over: 0.0, + max: 50, + over: 0, + text: "Attention", + // color: "#B4B83D", + color: "#1cbe85", + level: 2, + }, { + state: "normal", + max: 0, + over: 0, + text: "Normal", + // color: "#64B87D", + color: "#24b0ed", + level: 1, + }, + ] + }; + + diff --git a/ansible/01_old/roles/cmoa_install_bak/files/06-imxc-ui/imxc-ui-jaeger/templates/imxc-ui-server-jaeger.yaml b/ansible/01_old/roles/cmoa_install_bak/files/06-imxc-ui/imxc-ui-jaeger/templates/imxc-ui-server-jaeger.yaml new file mode 100644 index 0000000..a0d959f --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/06-imxc-ui/imxc-ui-jaeger/templates/imxc-ui-server-jaeger.yaml @@ -0,0 +1,63 @@ +--- +kind: Service +apiVersion: v1 +metadata: + name: imxc-ui-service-jaeger + namespace: imxc +spec: + type: NodePort + selector: + app: imxc-ui-jaeger + ports: + - protocol: TCP + name: ui + port: 80 + targetPort: 9999 + nodePort: 31084 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: imxc-ui-jaeger + namespace: imxc + labels: + app: imxc-ui +spec: + revisionHistoryLimit: 0 + replicas: 1 + selector: + matchLabels: + app: imxc-ui-jaeger + template: + metadata: + labels: + app: imxc-ui-jaeger + spec: + containers: + - name: imxc-ui-jaeger + image: {{ .Values.global.IMXC_IN_REGISTRY }}/ui-server:{{ .Values.global.UI_SERVER_VERSION }} + resources: + requests: + cpu: 100m + memory: 50Mi + limits: + cpu: 200m + memory: 100Mi + imagePullPolicy: IfNotPresent + ports: + - containerPort: 80 + volumeMounts: + - name: config-profile + mountPath: /usr/src/app/web/env + - name: config-server + mountPath: /usr/src/app/config + volumes: + - name: config-profile + configMap: + name: imxc-ui-config-jaeger + items: + - key: "config.js" + path: "config.js" + - name: config-server + configMap: + name: imxc-ui-config-jaeger diff --git a/ansible/01_old/roles/cmoa_install_bak/files/06-imxc-ui/imxc-ui-jaeger/values.yaml b/ansible/01_old/roles/cmoa_install_bak/files/06-imxc-ui/imxc-ui-jaeger/values.yaml new file mode 100644 index 0000000..54b3bcb --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/06-imxc-ui/imxc-ui-jaeger/values.yaml @@ -0,0 +1,94 @@ +# Default values for imxc. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: 10.10.31.243:5000/cmoa3/nginx + tag: stable + pullPolicy: IfNotPresent + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 80 + +ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: [] + + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +nodeSelector: {} + +tolerations: [] + +affinity: {} + +global: + INTERMAX_IP: + SERVELET_URL_PROTOCOL : http + DEMO_SERVELET_URL_PROTOCOL : http + KEYCLOAK_AUTH_SERVER_URL: http://10.10.43.227:31082/auth + KEYCLOAK_RESOURCE: authorization_server + KEYCLOAK_REALM: exem + + IMXC_IN_REGISTRY: 10.10.31.243:5000/cmoa3 + + ZUUL_SERVER_IP: 10.10.43.227 + ZUUL_SERVER_PORT: 31081 + + NOTI_SERVER_IP: 10.10.43.227 + NOTI_SERVER_PORT: 31083 + + CMOA_MANUAL_SERVER_IP: 10.10.43.227 + CMOA_MANUAL_PORT: 31090 + + OFFLINEACCESS: false + BACKLOGIN: false + + CLOUDMOA_VERSION: rel3.4.8 + UI_SERVER_VERSION: rel3.4.8 + CMOA_MANUAL_VERSION: rel3.4.8 diff --git a/ansible/01_old/roles/cmoa_install_bak/files/06-imxc-ui/imxc-ui-jspd/Chart.yaml b/ansible/01_old/roles/cmoa_install_bak/files/06-imxc-ui/imxc-ui-jspd/Chart.yaml new file mode 100644 index 0000000..e2f559f --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/06-imxc-ui/imxc-ui-jspd/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for Kubernetes +name: imxc +version: 0.1.0 diff --git a/ansible/01_old/roles/cmoa_install_bak/files/06-imxc-ui/imxc-ui-jspd/scripts/init-api-server.sh b/ansible/01_old/roles/cmoa_install_bak/files/06-imxc-ui/imxc-ui-jspd/scripts/init-api-server.sh new file mode 100644 index 0000000..45b8f1e --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/06-imxc-ui/imxc-ui-jspd/scripts/init-api-server.sh @@ -0,0 +1,16 @@ +#! /bin/sh + +STATUS_CODE="$(curl -s -o /dev/null -w '%{http_code}' http://imxc-keycloak-http/auth/realms/exem)" + +if [ $STATUS_CODE -eq 200 ]; then + JWT_KEY="$(curl -s -XGET http://imxc-keycloak-http/auth/realms/exem | jq -r '.public_key')" + export JWT_KEY + + chmod -R 777 /home/cloudmoa/notification/cloudmoa_alert.log + + java -Djava.security.egd=file:/dev/./urandom -jar /app.jar +elif [ $STATUS_CODE -eq 404 ]; then + echo "not found exem relam. check realm in imxc-keycloak" +else + echo "not found keycloak. check to install keycloak" +fi \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install_bak/files/06-imxc-ui/imxc-ui-jspd/scripts/init-auth-server.sh b/ansible/01_old/roles/cmoa_install_bak/files/06-imxc-ui/imxc-ui-jspd/scripts/init-auth-server.sh new file mode 100644 index 0000000..279b8a5 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/06-imxc-ui/imxc-ui-jspd/scripts/init-auth-server.sh @@ -0,0 +1,36 @@ +#! /bin/bash + +# 200 -> 서버 및 realm이 있는 경우 +# 404 -> 서버는 있으나 realm이 없는 경우 +# 000 -> 서버가 없음 +STATUS_CODE="$(curl -s -o /dev/null -w '%{http_code}' http://imxc-keycloak-http/auth/realms/exem)" + +if [ $STATUS_CODE -eq 404 ]; then + TOKEN="$(curl -s -d "client_id=admin-cli" -d "username=admin" -d "password=admin" -d "grant_type=password" http://imxc-keycloak-http/auth/realms/master/protocol/openid-connect/token | jq -r '.access_token')" + + echo $TOKEN + + echo "create realm and client" + # create realm and client + curl -s -v POST -H "Authorization: Bearer $TOKEN" -H "Content-Type: application/json" -d "@/tmp/init.json" http://imxc-keycloak-http/auth/admin/realms + + + echo "create admin and owner" + # create admin and owner + curl -s -v POST -H "Authorization: Bearer $TOKEN" -H "Content-Type: application/json" -d '{"firstName":"","lastName":"", "username":"admin","email":"admin@example.com", "enabled":"true","credentials":[{"type":"password","value":"admin","temporary":false}]}' http://imxc-keycloak-http/auth/admin/realms/exem/users + curl -s -v POST -H "Authorization: Bearer $TOKEN" -H "Content-Type: application/json" -d '{"firstName":"","lastName":"", "username":"owner","email":"owner@example.com", "enabled":"true","credentials":[{"type":"password","value":"admin","temporary":false}]}' http://imxc-keycloak-http/auth/admin/realms/exem/users + + JWT_KEY="$(curl -s -XGET http://imxc-keycloak-http/auth/realms/exem | jq -r '.public_key')" + export JWT_KEY + + java -Djava.security.egd=file:/dev/./urandom -jar /app.jar +elif [ $STATUS_CODE -eq 200 ]; then + echo "exist exem relam" + + JWT_KEY="$(curl -s -XGET http://imxc-keycloak-http/auth/realms/exem | jq -r '.public_key')" + export JWT_KEY + + java -Djava.security.egd=file:/dev/./urandom -jar /app.jar +else + echo "not found keycloak. check to install keycloak" +fi diff --git a/ansible/01_old/roles/cmoa_install_bak/files/06-imxc-ui/imxc-ui-jspd/scripts/init-noti-server.sh b/ansible/01_old/roles/cmoa_install_bak/files/06-imxc-ui/imxc-ui-jspd/scripts/init-noti-server.sh new file mode 100644 index 0000000..af73aed --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/06-imxc-ui/imxc-ui-jspd/scripts/init-noti-server.sh @@ -0,0 +1,14 @@ +#! /bin/sh + +STATUS_CODE="$(curl -s -o /dev/null -w '%{http_code}' http://imxc-keycloak-http/auth/realms/exem)" + +if [ $STATUS_CODE -eq 200 ]; then + JWT_KEY="$(curl -s -XGET http://imxc-keycloak-http/auth/realms/exem | jq -r '.public_key')" + export JWT_KEY + + java -Djava.security.egd=file:/dev/./urandom -jar /app.jar +elif [ $STATUS_CODE -eq 404 ]; then + echo "not found exem relam. check realm in imxc-keycloak" +else + echo "not found keycloak. check to install keycloak" +fi \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install_bak/files/06-imxc-ui/imxc-ui-jspd/scripts/init-resource.sh b/ansible/01_old/roles/cmoa_install_bak/files/06-imxc-ui/imxc-ui-jspd/scripts/init-resource.sh new file mode 100644 index 0000000..58db392 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/06-imxc-ui/imxc-ui-jspd/scripts/init-resource.sh @@ -0,0 +1,6 @@ +#!/bin/sh + +chmod -R 777 /scripts + +sed -i "s/localhost/$REDIRECT_URLS/g" /scripts/init.json +cp /scripts/init.json /tmp/init.json \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install_bak/files/06-imxc-ui/imxc-ui-jspd/scripts/init.json b/ansible/01_old/roles/cmoa_install_bak/files/06-imxc-ui/imxc-ui-jspd/scripts/init.json new file mode 100644 index 0000000..dcd68b4 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/06-imxc-ui/imxc-ui-jspd/scripts/init.json @@ -0,0 +1,2148 @@ +{ + "id": "exem", + "realm": "exem", + "notBefore": 0, + "revokeRefreshToken": false, + "refreshTokenMaxReuse": 0, + "accessTokenLifespan": 300, + "accessTokenLifespanForImplicitFlow": 900, + "ssoSessionIdleTimeout": 1800, + "ssoSessionMaxLifespan": 36000, + "ssoSessionIdleTimeoutRememberMe": 0, + "ssoSessionMaxLifespanRememberMe": 0, + "offlineSessionIdleTimeout": 2592000, + "offlineSessionMaxLifespanEnabled": false, + "offlineSessionMaxLifespan": 5184000, + "clientSessionIdleTimeout": 0, + "clientSessionMaxLifespan": 0, + "clientOfflineSessionIdleTimeout": 0, + "clientOfflineSessionMaxLifespan": 0, + "accessCodeLifespan": 60, + "accessCodeLifespanUserAction": 300, + "accessCodeLifespanLogin": 1800, + "actionTokenGeneratedByAdminLifespan": 43200, + "actionTokenGeneratedByUserLifespan": 300, + "enabled": true, + "sslRequired": "none", + "registrationAllowed": false, + "registrationEmailAsUsername": false, + "rememberMe": false, + "verifyEmail": false, + "loginWithEmailAllowed": true, + "duplicateEmailsAllowed": false, + "resetPasswordAllowed": false, + "editUsernameAllowed": false, + "bruteForceProtected": false, + "permanentLockout": false, + "maxFailureWaitSeconds": 900, + "minimumQuickLoginWaitSeconds": 60, + "waitIncrementSeconds": 60, + "quickLoginCheckMilliSeconds": 1000, + "maxDeltaTimeSeconds": 43200, + "failureFactor": 30, + "roles": { + "realm": [ + { + "id": "b361dcb8-4ec4-484e-a432-8d40a8ca5ac8", + "name": "offline_access", + "description": "${role_offline-access}", + "composite": false, + "clientRole": false, + "containerId": "exem", + "attributes": {} + }, + { + "id": "621155f2-6c01-4e4a-bf11-47111503d696", + "name": "uma_authorization", + "description": "${role_uma_authorization}", + "composite": false, + "clientRole": false, + "containerId": "exem", + "attributes": {} + }, + { + "id": "4aadd73a-e863-466a-932b-5bc81553fbf1", + "name": "access", + "composite": false, + "clientRole": false, + "containerId": "exem", + "attributes": {} + } + ], + "client": { + "realm-management": [ + { + "id": "e3eca547-c372-406a-abe7-30f554e13e63", + "name": "manage-realm", + "description": "${role_manage-realm}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "eb1faff2-4cca-458c-b9da-96c1f6f5f647", + "name": "impersonation", + "description": "${role_impersonation}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "eb0f6ebb-8993-47f8-8979-2152ed92bf62", + "name": "create-client", + "description": "${role_create-client}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "29f0b39d-9cc9-4b40-ad81-00041897ae0c", + "name": "view-clients", + "description": "${role_view-clients}", + "composite": true, + "composites": { + "client": { + "realm-management": [ + "query-clients" + ] + } + }, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "b6307563-9b35-4093-b0c4-a27df7cb82bd", + "name": "query-groups", + "description": "${role_query-groups}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "30091a91-f676-4e39-8ae2-ebfcee36c32a", + "name": "query-clients", + "description": "${role_query-clients}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "b40ca071-2318-4f69-9664-f0dfe471d03b", + "name": "view-realm", + "description": "${role_view-realm}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "efd25ec7-e61f-4659-a772-907791aed58e", + "name": "view-authorization", + "description": "${role_view-authorization}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "4ad18bd0-f9a9-4fc7-8864-99afa71f95e4", + "name": "manage-users", + "description": "${role_manage-users}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "a92c781f-7c6a-48d8-aa88-0b3aefb3c10c", + "name": "manage-events", + "description": "${role_manage-events}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "424933c1-3c03-49cd-955c-34aeeb0a3108", + "name": "manage-authorization", + "description": "${role_manage-authorization}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "5476db80-dbfa-408b-a934-5e8decc0af56", + "name": "manage-clients", + "description": "${role_manage-clients}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "acf53868-d09b-4865-92da-3b906307b979", + "name": "realm-admin", + "description": "${role_realm-admin}", + "composite": true, + "composites": { + "client": { + "realm-management": [ + "manage-realm", + "impersonation", + "create-client", + "view-clients", + "query-groups", + "query-clients", + "view-realm", + "view-authorization", + "manage-users", + "manage-events", + "manage-authorization", + "manage-clients", + "query-users", + "query-realms", + "manage-identity-providers", + "view-users", + "view-events", + "view-identity-providers" + ] + } + }, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "f2ad5f83-ffde-4cf4-acc4-21f7bcec4c38", + "name": "query-users", + "description": "${role_query-users}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "96a017bf-5211-4c20-a1b2-7493bc45a3ad", + "name": "query-realms", + "description": "${role_query-realms}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "d8051d4d-f26c-4a6d-bcdd-b3d8111d9d29", + "name": "manage-identity-providers", + "description": "${role_manage-identity-providers}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "8c929b20-abc3-4b78-88f2-ed3348426667", + "name": "view-users", + "description": "${role_view-users}", + "composite": true, + "composites": { + "client": { + "realm-management": [ + "query-groups", + "query-users" + ] + } + }, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "a337a8f7-8725-4ff7-85fc-ecc4b5ce1433", + "name": "view-events", + "description": "${role_view-events}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "649350cf-925c-4502-84b4-ec8415f956d3", + "name": "view-identity-providers", + "description": "${role_view-identity-providers}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + } + ], + "authorization_server": [ + { + "id": "2346ca49-eb3e-4f2e-b0ec-4def9ea9655c", + "name": "access", + "composite": false, + "clientRole": true, + "containerId": "b9bbda1f-a756-4b72-9cd8-06a6dfd6d5bf", + "attributes": {} + } + ], + "security-admin-console": [], + "admin-cli": [], + "account-console": [], + "broker": [ + { + "id": "133ff901-3a8f-48df-893b-4c7e9047e829", + "name": "read-token", + "description": "${role_read-token}", + "composite": false, + "clientRole": true, + "containerId": "fdc71d6d-db86-414f-bd80-ed1f5e9a6975", + "attributes": {} + } + ], + "account": [ + { + "id": "89c5f56f-5845-400b-ac9f-942c46d082e0", + "name": "manage-account-links", + "description": "${role_manage-account-links}", + "composite": false, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + }, + { + "id": "2cba7fed-0a80-4dbd-bd2d-abfa2c6a985e", + "name": "view-profile", + "description": "${role_view-profile}", + "composite": false, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + }, + { + "id": "f446a93d-143f-4071-9bdc-08aa2fdce6d2", + "name": "view-consent", + "description": "${role_view-consent}", + "composite": false, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + }, + { + "id": "ef3364db-e008-4aec-9e74-04bac25cbe40", + "name": "manage-consent", + "description": "${role_manage-consent}", + "composite": true, + "composites": { + "client": { + "account": [ + "view-consent" + ] + } + }, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + }, + { + "id": "96afbe32-3ac2-4345-bc17-06cf0e8de0b4", + "name": "view-applications", + "description": "${role_view-applications}", + "composite": false, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + }, + { + "id": "cf6861ca-4804-40d4-9016-c48e7ebf1c72", + "name": "manage-account", + "description": "${role_manage-account}", + "composite": true, + "composites": { + "client": { + "account": [ + "manage-account-links" + ] + } + }, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + } + ] + } + }, + "groups": [ + { + "id": "8d3f7332-7f72-47e2-9cb3-38331f0c29b5", + "name": "DEFAULT_TENANT", + "path": "/DEFAULT_TENANT", + "attributes": {}, + "realmRoles": [], + "clientRoles": {}, + "subGroups": [] + } + ], + "defaultRoles": [ + "offline_access", + "uma_authorization" + ], + "requiredCredentials": [ + "password" + ], + "otpPolicyType": "totp", + "otpPolicyAlgorithm": "HmacSHA1", + "otpPolicyInitialCounter": 0, + "otpPolicyDigits": 6, + "otpPolicyLookAheadWindow": 1, + "otpPolicyPeriod": 30, + "otpSupportedApplications": [ + "FreeOTP", + "Google Authenticator" + ], + "webAuthnPolicyRpEntityName": "keycloak", + "webAuthnPolicySignatureAlgorithms": [ + "ES256" + ], + "webAuthnPolicyRpId": "", + "webAuthnPolicyAttestationConveyancePreference": "not specified", + "webAuthnPolicyAuthenticatorAttachment": "not specified", + "webAuthnPolicyRequireResidentKey": "not specified", + "webAuthnPolicyUserVerificationRequirement": "not specified", + "webAuthnPolicyCreateTimeout": 0, + "webAuthnPolicyAvoidSameAuthenticatorRegister": false, + "webAuthnPolicyAcceptableAaguids": [], + "webAuthnPolicyPasswordlessRpEntityName": "keycloak", + "webAuthnPolicyPasswordlessSignatureAlgorithms": [ + "ES256" + ], + "webAuthnPolicyPasswordlessRpId": "", + "webAuthnPolicyPasswordlessAttestationConveyancePreference": "not specified", + "webAuthnPolicyPasswordlessAuthenticatorAttachment": "not specified", + "webAuthnPolicyPasswordlessRequireResidentKey": "not specified", + "webAuthnPolicyPasswordlessUserVerificationRequirement": "not specified", + "webAuthnPolicyPasswordlessCreateTimeout": 0, + "webAuthnPolicyPasswordlessAvoidSameAuthenticatorRegister": false, + "webAuthnPolicyPasswordlessAcceptableAaguids": [], + "scopeMappings": [ + { + "clientScope": "offline_access", + "roles": [ + "offline_access" + ] + } + ], + "clientScopeMappings": { + "account": [ + { + "client": "account-console", + "roles": [ + "manage-account" + ] + } + ] + }, + "clients": [ + { + "id": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "clientId": "account", + "name": "${client_account}", + "rootUrl": "${authBaseUrl}", + "baseUrl": "/realms/exem/account/", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "defaultRoles": [ + "view-profile", + "manage-account" + ], + "redirectUris": [ + "/realms/exem/account/*" + ], + "webOrigins": [], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": false, + "serviceAccountsEnabled": false, + "publicClient": false, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": {}, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "1e3d0c5d-c456-4c5f-93cf-58236273186a", + "clientId": "account-console", + "name": "${client_account-console}", + "rootUrl": "${authBaseUrl}", + "baseUrl": "/realms/exem/account/", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [ + "/realms/exem/account/*" + ], + "webOrigins": [], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": false, + "serviceAccountsEnabled": false, + "publicClient": true, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": { + "pkce.code.challenge.method": "S256" + }, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "protocolMappers": [ + { + "id": "cceae7c8-fa8d-48eb-a0a6-6013a2cc771e", + "name": "audience resolve", + "protocol": "openid-connect", + "protocolMapper": "oidc-audience-resolve-mapper", + "consentRequired": false, + "config": {} + } + ], + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "d4d3e5a5-584c-4aff-a79f-ac3c31ace5a1", + "clientId": "admin-cli", + "name": "${client_admin-cli}", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [], + "webOrigins": [], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": false, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": true, + "serviceAccountsEnabled": false, + "publicClient": true, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": {}, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "b9bbda1f-a756-4b72-9cd8-06a6dfd6d5bf", + "clientId": "authorization_server", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [ + "localhost" + ], + "webOrigins": [ + "*" + ], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": true, + "serviceAccountsEnabled": false, + "publicClient": true, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": { + "saml.assertion.signature": "false", + "saml.force.post.binding": "false", + "saml.multivalued.roles": "false", + "saml.encrypt": "false", + "saml.server.signature": "false", + "saml.server.signature.keyinfo.ext": "false", + "exclude.session.state.from.auth.response": "false", + "saml_force_name_id_format": "false", + "saml.client.signature": "false", + "tls.client.certificate.bound.access.tokens": "false", + "saml.authnstatement": "false", + "display.on.consent.screen": "false", + "saml.onetimeuse.condition": "false" + }, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": true, + "nodeReRegistrationTimeout": -1, + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "fdc71d6d-db86-414f-bd80-ed1f5e9a6975", + "clientId": "broker", + "name": "${client_broker}", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [], + "webOrigins": [], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": false, + "serviceAccountsEnabled": false, + "publicClient": false, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": {}, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "clientId": "realm-management", + "name": "${client_realm-management}", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [], + "webOrigins": [], + "notBefore": 0, + "bearerOnly": true, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": false, + "serviceAccountsEnabled": false, + "publicClient": false, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": {}, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "bb6c56f1-126e-4356-9579-d95992a8d150", + "clientId": "security-admin-console", + "name": "${client_security-admin-console}", + "rootUrl": "${authAdminUrl}", + "baseUrl": "/admin/exem/console/", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [ + "/admin/exem/console/*" + ], + "webOrigins": [ + "+" + ], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": false, + "serviceAccountsEnabled": false, + "publicClient": true, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": { + "pkce.code.challenge.method": "S256" + }, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "protocolMappers": [ + { + "id": "3cf06cab-00dd-486b-8e72-1a453a7031ca", + "name": "locale", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "locale", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "locale", + "jsonType.label": "String" + } + } + ], + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + } + ], + "clientScopes": [ + { + "id": "6a21eaaa-69c9-4519-8732-2155865a1891", + "name": "custom_jwt", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "true" + }, + "protocolMappers": [ + { + "id": "fd7557f5-3174-4c65-8cd1-0e9f015a906f", + "name": "customizingJWT", + "protocol": "openid-connect", + "protocolMapper": "oidc-script-based-protocol-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "multivalued": "true", + "id.token.claim": "false", + "access.token.claim": "true", + "jsonType.label": "String", + "script": "/**\r\n * Available variables: \r\n * user - the current user\r\n * realm - the current realm\r\n * token - the current token\r\n * userSession - the current userSession\r\n * keycloakSession - the current keycloakSession\r\n */\r\n\r\n//insert your code here...\r\n\r\n// you can set standard fields in token - test code\r\n// token.setAcr(\"test value\");\r\n\r\n// you can set claims in the token - test code\r\n// token.getOtherClaims().put(\"claimName\", \"claim value\");\r\n\r\n// work with variables and return multivalued token value\r\nvar ArrayList = Java.type(\"java.util.ArrayList\");\r\nvar HashMap = Java.type(\"java.util.HashMap\");\r\nvar tenantInfoMap = new HashMap();\r\nvar tenantIpMap = new HashMap();\r\n\r\nvar forEach = Array.prototype.forEach;\r\n\r\nvar client = keycloakSession.getContext().getClient();\r\nvar groups = user.getGroups();\r\nvar clientRole = client.getRole(\"access\");\r\n\r\nforEach.call(groups.toArray(), function(group) {\r\n if(group.hasRole(clientRole)) {\r\n tenantIpMap.put(group.getName(), clientRole.getAttribute(\"ip\"));\r\n tenantInfoMap.put(group.getName(), group.getAttributes());\r\n }\r\n});\r\n\r\ntoken.setOtherClaims(\"tenantInfo\", tenantInfoMap);\r\n" + } + }, + { + "id": "2cb34189-9f06-4b9f-b066-c28e7930f0a5", + "name": "custom_phone", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "false", + "user.attribute": "phone", + "id.token.claim": "false", + "access.token.claim": "true", + "claim.name": "attributes.phone", + "jsonType.label": "String" + } + }, + { + "id": "6bcb0aa9-8713-4e4b-b997-2e08d2dda0f4", + "name": "group_attr", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "groups", + "id.token.claim": "false", + "access.token.claim": "true", + "claim.name": "groups.attributes", + "jsonType.label": "String" + } + }, + { + "id": "03deb40b-4f83-436e-9eab-f479eed62460", + "name": "custom_name", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "false", + "user.attribute": "name", + "id.token.claim": "false", + "access.token.claim": "true", + "claim.name": "attributes.name", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "9fed7d81-3f42-41b0-b661-7875abb90b2b", + "name": "microprofile-jwt", + "description": "Microprofile - JWT built-in scope", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "false" + }, + "protocolMappers": [ + { + "id": "d030d675-2c31-401a-a461-534211b3d2ec", + "name": "upn", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "username", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "upn", + "jsonType.label": "String" + } + }, + { + "id": "ca2026a0-84de-4b8d-bf0c-35f3d088b115", + "name": "groups", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-realm-role-mapper", + "consentRequired": false, + "config": { + "multivalued": "true", + "user.attribute": "foo", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "groups", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "cf3e7fce-e9e8-40dc-bd0d-5cf7bac861c0", + "name": "web-origins", + "description": "OpenID Connect scope for add allowed web origins to the access token", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "false", + "display.on.consent.screen": "false", + "consent.screen.text": "" + }, + "protocolMappers": [ + { + "id": "6b909bad-30d8-4095-a80b-d71589e8a0b4", + "name": "allowed web origins", + "protocol": "openid-connect", + "protocolMapper": "oidc-allowed-origins-mapper", + "consentRequired": false, + "config": {} + } + ] + }, + { + "id": "73231863-d614-4725-9707-f5704c70893a", + "name": "roles", + "description": "OpenID Connect scope for add user roles to the access token", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "false", + "display.on.consent.screen": "true", + "consent.screen.text": "${rolesScopeConsentText}" + }, + "protocolMappers": [ + { + "id": "fad2c0b3-d6d6-46c9-b8a5-70cf2f3cd69e", + "name": "realm roles", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-realm-role-mapper", + "consentRequired": false, + "config": { + "multivalued": "true", + "user.attribute": "foo", + "access.token.claim": "true", + "claim.name": "realm_access.roles", + "jsonType.label": "String" + } + }, + { + "id": "1fa51f0e-8fa8-4807-a381-c9756ce1d2ff", + "name": "audience resolve", + "protocol": "openid-connect", + "protocolMapper": "oidc-audience-resolve-mapper", + "consentRequired": false, + "config": {} + }, + { + "id": "8be191ba-c7b8-45f1-a37f-2830595d4b54", + "name": "client roles", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-client-role-mapper", + "consentRequired": false, + "config": { + "multivalued": "true", + "user.attribute": "foo", + "access.token.claim": "true", + "claim.name": "resource_access.${client_id}.roles", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "93a4b53a-a281-4203-a070-0ad31e719b29", + "name": "phone", + "description": "OpenID Connect built-in scope: phone", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "true", + "consent.screen.text": "${phoneScopeConsentText}" + }, + "protocolMappers": [ + { + "id": "c716d4df-ad16-4a47-aa05-ded2a69313a3", + "name": "phone number verified", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "phoneNumberVerified", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "phone_number_verified", + "jsonType.label": "boolean" + } + }, + { + "id": "db0fcb5b-bad6-42b7-8ab0-b90225100b8a", + "name": "phone number", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "phoneNumber", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "phone_number", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "f1723d4c-6d93-40be-b5b8-5ca7083e55c7", + "name": "address", + "description": "OpenID Connect built-in scope: address", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "true", + "consent.screen.text": "${addressScopeConsentText}" + }, + "protocolMappers": [ + { + "id": "9e95dff0-dc01-4efe-a414-21c83d94491c", + "name": "address", + "protocol": "openid-connect", + "protocolMapper": "oidc-address-mapper", + "consentRequired": false, + "config": { + "user.attribute.formatted": "formatted", + "user.attribute.country": "country", + "user.attribute.postal_code": "postal_code", + "userinfo.token.claim": "true", + "user.attribute.street": "street", + "id.token.claim": "true", + "user.attribute.region": "region", + "access.token.claim": "true", + "user.attribute.locality": "locality" + } + } + ] + }, + { + "id": "16524b43-6bfc-4e05-868c-682e7e1e611c", + "name": "email", + "description": "OpenID Connect built-in scope: email", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "true", + "consent.screen.text": "${emailScopeConsentText}" + }, + "protocolMappers": [ + { + "id": "4444c30e-5da5-46e6-a201-64c28ab26e10", + "name": "email verified", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "emailVerified", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "email_verified", + "jsonType.label": "boolean" + } + }, + { + "id": "0faa8ba7-6d4d-4ed4-ab89-334e1d18b503", + "name": "email", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "email", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "email", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "4ccced80-99d8-4081-8d1d-37ed6d5aaf34", + "name": "profile", + "description": "OpenID Connect built-in scope: profile", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "true", + "consent.screen.text": "${profileScopeConsentText}" + }, + "protocolMappers": [ + { + "id": "02aea132-f5e1-483c-968a-5fbb9cdfb82d", + "name": "updated at", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "updatedAt", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "updated_at", + "jsonType.label": "String" + } + }, + { + "id": "eb5d10fc-d4a8-473a-ac3e-35f3fb0f41bb", + "name": "family name", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "lastName", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "family_name", + "jsonType.label": "String" + } + }, + { + "id": "2467b8e5-f340-45a2-abff-c658eccf3ed3", + "name": "zoneinfo", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "zoneinfo", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "zoneinfo", + "jsonType.label": "String" + } + }, + { + "id": "50a9bb17-af12-481d-95dd-6aed1dd4bf56", + "name": "full name", + "protocol": "openid-connect", + "protocolMapper": "oidc-full-name-mapper", + "consentRequired": false, + "config": { + "id.token.claim": "true", + "access.token.claim": "true", + "userinfo.token.claim": "true" + } + }, + { + "id": "80a65208-9425-4e66-b769-98c2f1c91e6e", + "name": "nickname", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "nickname", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "nickname", + "jsonType.label": "String" + } + }, + { + "id": "68a750c6-b4b8-47f4-a919-752319e63213", + "name": "gender", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "gender", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "gender", + "jsonType.label": "String" + } + }, + { + "id": "e27abd0e-72c1-40de-a678-e9e4e2db8e7f", + "name": "given name", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "firstName", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "given_name", + "jsonType.label": "String" + } + }, + { + "id": "04f3fa01-6a4c-44eb-bfd8-0a0e1c31bc4a", + "name": "middle name", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "middleName", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "middle_name", + "jsonType.label": "String" + } + }, + { + "id": "94e697d9-fbee-48d8-91d1-7bbc4f1fb44e", + "name": "username", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "username", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "preferred_username", + "jsonType.label": "String" + } + }, + { + "id": "a2f05d76-947d-4ceb-969b-1b923be9a923", + "name": "website", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "website", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "website", + "jsonType.label": "String" + } + }, + { + "id": "1966f863-ac5c-4cbc-a156-d5bd861728f0", + "name": "profile", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "profile", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "profile", + "jsonType.label": "String" + } + }, + { + "id": "18a9b452-cd8e-4c43-a9a8-0ea532074f74", + "name": "locale", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "locale", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "locale", + "jsonType.label": "String" + } + }, + { + "id": "1583790a-ec7a-4899-a901-60e23fd0d969", + "name": "birthdate", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "birthdate", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "birthdate", + "jsonType.label": "String" + } + }, + { + "id": "7094b64a-492b-4f31-aa73-bb19d06ddb56", + "name": "picture", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "picture", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "picture", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "eff18c11-eaf4-4d6a-8365-90f646ea3cc5", + "name": "role_list", + "description": "SAML role list", + "protocol": "saml", + "attributes": { + "consent.screen.text": "${samlRoleListScopeConsentText}", + "display.on.consent.screen": "true" + }, + "protocolMappers": [ + { + "id": "3bb12700-3e6f-4a73-bfbb-cfd16a8ab007", + "name": "role list", + "protocol": "saml", + "protocolMapper": "saml-role-list-mapper", + "consentRequired": false, + "config": { + "single": "false", + "attribute.nameformat": "Basic", + "attribute.name": "Role" + } + } + ] + }, + { + "id": "e83e35b7-9650-4f7e-b182-65c184d261b3", + "name": "offline_access", + "description": "OpenID Connect built-in scope: offline_access", + "protocol": "openid-connect", + "attributes": { + "consent.screen.text": "${offlineAccessScopeConsentText}", + "display.on.consent.screen": "true" + } + } + ], + "defaultDefaultClientScopes": [ + "role_list", + "profile", + "email", + "roles", + "web-origins", + "custom_jwt" + ], + "defaultOptionalClientScopes": [ + "offline_access", + "address", + "phone", + "microprofile-jwt" + ], + "browserSecurityHeaders": { + "contentSecurityPolicyReportOnly": "", + "xContentTypeOptions": "nosniff", + "xRobotsTag": "none", + "xFrameOptions": "SAMEORIGIN", + "contentSecurityPolicy": "frame-src 'self'; frame-ancestors 'self'; object-src 'none';", + "xXSSProtection": "1; mode=block", + "strictTransportSecurity": "max-age=31536000; includeSubDomains" + }, + "smtpServer": {}, + "eventsEnabled": false, + "eventsListeners": [ + "jboss-logging" + ], + "enabledEventTypes": [], + "adminEventsEnabled": false, + "adminEventsDetailsEnabled": false, + "components": { + "org.keycloak.services.clientregistration.policy.ClientRegistrationPolicy": [ + { + "id": "9b1dcf02-e9ec-4302-8aad-28f3250d1b2d", + "name": "Allowed Protocol Mapper Types", + "providerId": "allowed-protocol-mappers", + "subType": "anonymous", + "subComponents": {}, + "config": { + "allowed-protocol-mapper-types": [ + "oidc-sha256-pairwise-sub-mapper", + "oidc-usermodel-property-mapper", + "saml-role-list-mapper", + "saml-user-attribute-mapper", + "oidc-full-name-mapper", + "oidc-usermodel-attribute-mapper", + "oidc-address-mapper", + "saml-user-property-mapper" + ] + } + }, + { + "id": "752137ea-bc3a-46c3-9d83-49cb370d39a9", + "name": "Max Clients Limit", + "providerId": "max-clients", + "subType": "anonymous", + "subComponents": {}, + "config": { + "max-clients": [ + "200" + ] + } + }, + { + "id": "f365d31f-ccc5-4e57-97bd-b2749b1ab5e5", + "name": "Allowed Client Scopes", + "providerId": "allowed-client-templates", + "subType": "authenticated", + "subComponents": {}, + "config": { + "allow-default-scopes": [ + "true" + ] + } + }, + { + "id": "52e385fd-3aa5-442d-b5e4-6ff659126196", + "name": "Allowed Protocol Mapper Types", + "providerId": "allowed-protocol-mappers", + "subType": "authenticated", + "subComponents": {}, + "config": { + "allowed-protocol-mapper-types": [ + "oidc-sha256-pairwise-sub-mapper", + "saml-user-attribute-mapper", + "oidc-full-name-mapper", + "oidc-usermodel-attribute-mapper", + "oidc-address-mapper", + "oidc-usermodel-property-mapper", + "saml-user-property-mapper", + "saml-role-list-mapper" + ] + } + }, + { + "id": "dbebbc9d-1b14-4d09-906c-b4e5638f9588", + "name": "Consent Required", + "providerId": "consent-required", + "subType": "anonymous", + "subComponents": {}, + "config": {} + }, + { + "id": "b3fc18dc-467f-4240-9b6d-f07df5c40aee", + "name": "Full Scope Disabled", + "providerId": "scope", + "subType": "anonymous", + "subComponents": {}, + "config": {} + }, + { + "id": "19e102da-1d66-4747-958b-9311e5156693", + "name": "Trusted Hosts", + "providerId": "trusted-hosts", + "subType": "anonymous", + "subComponents": {}, + "config": { + "host-sending-registration-request-must-match": [ + "true" + ], + "client-uris-must-match": [ + "true" + ] + } + }, + { + "id": "66e83112-7392-46cb-bbd5-b71586183ada", + "name": "Allowed Client Scopes", + "providerId": "allowed-client-templates", + "subType": "anonymous", + "subComponents": {}, + "config": { + "allow-default-scopes": [ + "true" + ] + } + } + ], + "org.keycloak.keys.KeyProvider": [ + { + "id": "a60adc1b-3f6b-40d4-901f-d4f744f0d71b", + "name": "aes-generated", + "providerId": "aes-generated", + "subComponents": {}, + "config": { + "priority": [ + "100" + ] + } + }, + { + "id": "bc1b25d8-b199-4d87-b606-6cde0f6eafb0", + "name": "hmac-generated", + "providerId": "hmac-generated", + "subComponents": {}, + "config": { + "priority": [ + "100" + ], + "algorithm": [ + "HS256" + ] + } + }, + { + "id": "fe624aa7-54a3-43d8-b2a3-f74b543a9225", + "name": "rsa-generated", + "providerId": "rsa-generated", + "subComponents": {}, + "config": { + "priority": [ + "100" + ] + } + } + ] + }, + "internationalizationEnabled": false, + "supportedLocales": [], + "authenticationFlows": [ + { + "id": "a837df3e-15cb-4d2a-8ce0-5eea5c704e76", + "alias": "Account verification options", + "description": "Method with which to verity the existing account", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "idp-email-verification", + "requirement": "ALTERNATIVE", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "ALTERNATIVE", + "priority": 20, + "flowAlias": "Verify Existing Account by Re-authentication", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "59026e13-e2bd-4977-a868-505ea562f545", + "alias": "Authentication Options", + "description": "Authentication options.", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "basic-auth", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "basic-auth-otp", + "requirement": "DISABLED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "auth-spnego", + "requirement": "DISABLED", + "priority": 30, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "484d422c-d9b4-4c0e-86d5-60463ecd24c9", + "alias": "Browser - Conditional OTP", + "description": "Flow to determine if the OTP is required for the authentication", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "conditional-user-configured", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "auth-otp-form", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "0ec05058-6d09-4951-a116-19e8810e5d8e", + "alias": "Direct Grant - Conditional OTP", + "description": "Flow to determine if the OTP is required for the authentication", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "conditional-user-configured", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "direct-grant-validate-otp", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "667c03cd-114c-4d9a-a7fa-7d2c27f10722", + "alias": "First broker login - Conditional OTP", + "description": "Flow to determine if the OTP is required for the authentication", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "conditional-user-configured", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "auth-otp-form", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "1510fbf7-239f-44aa-9955-72d42f6d99fd", + "alias": "Handle Existing Account", + "description": "Handle what to do if there is existing account with same email/username like authenticated identity provider", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "idp-confirm-link", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "REQUIRED", + "priority": 20, + "flowAlias": "Account verification options", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "5622e71d-e1f4-4711-a425-a8470d0a017e", + "alias": "Reset - Conditional OTP", + "description": "Flow to determine if the OTP should be reset or not. Set to REQUIRED to force.", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "conditional-user-configured", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "reset-otp", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "09dfe405-5ef6-4940-8885-5adf867a74c8", + "alias": "User creation or linking", + "description": "Flow for the existing/non-existing user alternatives", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticatorConfig": "create unique user config", + "authenticator": "idp-create-user-if-unique", + "requirement": "ALTERNATIVE", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "ALTERNATIVE", + "priority": 20, + "flowAlias": "Handle Existing Account", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "a3eb6b61-1943-4fb7-9b2f-137826882662", + "alias": "Verify Existing Account by Re-authentication", + "description": "Reauthentication of existing account", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "idp-username-password-form", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "CONDITIONAL", + "priority": 20, + "flowAlias": "First broker login - Conditional OTP", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "7f5e2f68-84bc-4703-b474-e3b092621195", + "alias": "browser", + "description": "browser based authentication", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "auth-cookie", + "requirement": "ALTERNATIVE", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "auth-spnego", + "requirement": "DISABLED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "identity-provider-redirector", + "requirement": "ALTERNATIVE", + "priority": 25, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "ALTERNATIVE", + "priority": 30, + "flowAlias": "forms", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "224cc520-37f7-445e-ab1f-7ba547a45a0d", + "alias": "clients", + "description": "Base authentication for clients", + "providerId": "client-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "client-secret", + "requirement": "ALTERNATIVE", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "client-jwt", + "requirement": "ALTERNATIVE", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "client-secret-jwt", + "requirement": "ALTERNATIVE", + "priority": 30, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "client-x509", + "requirement": "ALTERNATIVE", + "priority": 40, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "2e58184b-529b-450c-9731-29763d26b087", + "alias": "direct grant", + "description": "OpenID Connect Resource Owner Grant", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "direct-grant-validate-username", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "direct-grant-validate-password", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "CONDITIONAL", + "priority": 30, + "flowAlias": "Direct Grant - Conditional OTP", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "c969ac8c-e7d8-44b5-ad4d-5fcb80514eac", + "alias": "docker auth", + "description": "Used by Docker clients to authenticate against the IDP", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "docker-http-basic-authenticator", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "de2259a4-7f92-42ec-994c-f55d8cba3b59", + "alias": "first broker login", + "description": "Actions taken after first broker login with identity provider account, which is not yet linked to any Keycloak account", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticatorConfig": "review profile config", + "authenticator": "idp-review-profile", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "REQUIRED", + "priority": 20, + "flowAlias": "User creation or linking", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "6c2745d2-be21-4f3c-a291-5b3fc039432a", + "alias": "forms", + "description": "Username, password, otp and other auth forms.", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "auth-username-password-form", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "CONDITIONAL", + "priority": 20, + "flowAlias": "Browser - Conditional OTP", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "ac8f5082-3fd0-47c5-854d-0dd9c3951668", + "alias": "http challenge", + "description": "An authentication flow based on challenge-response HTTP Authentication Schemes", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "no-cookie-redirect", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "REQUIRED", + "priority": 20, + "flowAlias": "Authentication Options", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "32030b4b-c82b-4c1a-a692-3b51eae74bbc", + "alias": "registration", + "description": "registration flow", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "registration-page-form", + "requirement": "REQUIRED", + "priority": 10, + "flowAlias": "registration form", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "b99fca4c-386c-4277-acc1-83e57e29244d", + "alias": "registration form", + "description": "registration form", + "providerId": "form-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "registration-user-creation", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "registration-profile-action", + "requirement": "REQUIRED", + "priority": 40, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "registration-password-action", + "requirement": "REQUIRED", + "priority": 50, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "registration-recaptcha-action", + "requirement": "DISABLED", + "priority": 60, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "5edbc053-816a-434e-9866-6c0cc7e49f89", + "alias": "reset credentials", + "description": "Reset credentials for a user if they forgot their password or something", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "reset-credentials-choose-user", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "reset-credential-email", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "reset-password", + "requirement": "REQUIRED", + "priority": 30, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "CONDITIONAL", + "priority": 40, + "flowAlias": "Reset - Conditional OTP", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "460782e7-9644-4a34-8024-cb428cbe3991", + "alias": "saml ecp", + "description": "SAML ECP Profile Authentication Flow", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "http-basic-authenticator", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + } + ], + "authenticatorConfig": [ + { + "id": "67af6e65-853c-4bfd-9eef-72e735691377", + "alias": "create unique user config", + "config": { + "require.password.update.after.registration": "false" + } + }, + { + "id": "af6c6e01-772d-426a-bdd3-3ebc95537bcd", + "alias": "review profile config", + "config": { + "update.profile.on.first.login": "missing" + } + } + ], + "requiredActions": [ + { + "alias": "CONFIGURE_TOTP", + "name": "Configure OTP", + "providerId": "CONFIGURE_TOTP", + "enabled": true, + "defaultAction": false, + "priority": 10, + "config": {} + }, + { + "alias": "terms_and_conditions", + "name": "Terms and Conditions", + "providerId": "terms_and_conditions", + "enabled": false, + "defaultAction": false, + "priority": 20, + "config": {} + }, + { + "alias": "UPDATE_PASSWORD", + "name": "Update Password", + "providerId": "UPDATE_PASSWORD", + "enabled": true, + "defaultAction": false, + "priority": 30, + "config": {} + }, + { + "alias": "UPDATE_PROFILE", + "name": "Update Profile", + "providerId": "UPDATE_PROFILE", + "enabled": true, + "defaultAction": false, + "priority": 40, + "config": {} + }, + { + "alias": "VERIFY_EMAIL", + "name": "Verify Email", + "providerId": "VERIFY_EMAIL", + "enabled": true, + "defaultAction": false, + "priority": 50, + "config": {} + }, + { + "alias": "update_user_locale", + "name": "Update User Locale", + "providerId": "update_user_locale", + "enabled": true, + "defaultAction": false, + "priority": 1000, + "config": {} + } + ], + "browserFlow": "browser", + "registrationFlow": "registration", + "directGrantFlow": "direct grant", + "resetCredentialsFlow": "reset credentials", + "clientAuthenticationFlow": "clients", + "dockerAuthenticationFlow": "docker auth", + "attributes": { + "clientOfflineSessionMaxLifespan": "0", + "clientSessionIdleTimeout": "0", + "clientSessionMaxLifespan": "0", + "clientOfflineSessionIdleTimeout": "0" + }, + "keycloakVersion": "11.0.1", + "userManagedAccessAllowed": false +} \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install_bak/files/06-imxc-ui/imxc-ui-jspd/templates/imxc-ui-config.yaml b/ansible/01_old/roles/cmoa_install_bak/files/06-imxc-ui/imxc-ui-jspd/templates/imxc-ui-config.yaml new file mode 100644 index 0000000..e47ff66 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/06-imxc-ui/imxc-ui-jspd/templates/imxc-ui-config.yaml @@ -0,0 +1,44 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: imxc-ui-config + namespace: imxc + +data: + properties.file: | + api.url = {{ .Values.global.SERVELET_URL_PROTOCOL }}://{{ .Values.global.ZUUL_SERVER_IP }}:{{ .Values.global.ZUUL_SERVER_PORT }} + config.js: | + window.appEnv = { + offlineAccess: "{{ .Values.global.OFFLINEACCESS }}", + backLogin: "{{ .Values.global.BACKLOGIN }}", + // Env Settings servletURL + servletURL: "{{ .Values.global.SERVELET_URL_PROTOCOL }}://{{ .Values.global.ZUUL_SERVER_IP }}:{{ .Values.global.ZUUL_SERVER_PORT }}", + // Env Settings socketURL + socketURL: "http://{{ .Values.global.NOTI_SERVER_IP }}:{{ .Values.global.NOTI_SERVER_PORT }}/ui-server-websocket", + // Env Settings interMaxURL + // ex) ~/intermax/?paConnect=1&paType=ResponseInspector&fromTime=1556096539206&toTime=1556096599206&serverName=jeus89 + interMaxURL: "", + manualURL: "http://{{ .Values.global.CMOA_MANUAL_SERVER_IP }}:{{ .Values.global.CMOA_MANUAL_PORT }}", + // Env Settings CloudMOA Version + version: '{{ .Values.global.CLOUDMOA_VERSION }}', + loginType: 'keycloak', + keyCloak: { + "realm": "{{ .Values.global.KEYCLOAK_REALM }}", + "auth-server-url": "{{ .Values.global.KEYCLOAK_AUTH_SERVER_URL }}", + "ssl-required": "none", + "resource": "{{ .Values.global.KEYCLOAK_RESOURCE }}", + "public-client": true, + "confidential-port": 0 + }, + // refreshTime: '4', // 리로드 주기 설정 4로 설정시 새벽 4시에 리로드 하게 됨 + intervalTime: { // 5의 배수여야만 함 + short: 5, + medium: 10, + long: 60, + }, + // excludedContents: { + // anomalyScoreSettings: true, // entity black list setting page + // anomalyScoreInSidebar: true, // anomaly score in side bar + // }, + serviceTraceAgentType: 'jspd' + }; diff --git a/ansible/01_old/roles/cmoa_install_bak/files/06-imxc-ui/imxc-ui-jspd/templates/imxc-ui-server.yaml b/ansible/01_old/roles/cmoa_install_bak/files/06-imxc-ui/imxc-ui-jspd/templates/imxc-ui-server.yaml new file mode 100644 index 0000000..35c4b61 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/06-imxc-ui/imxc-ui-jspd/templates/imxc-ui-server.yaml @@ -0,0 +1,63 @@ +--- +kind: Service +apiVersion: v1 +metadata: + name: imxc-ui-service + namespace: imxc +spec: + type: NodePort + selector: + app: imxc-ui + ports: + - protocol: TCP + name: ui + port: 80 + targetPort: 9999 + nodePort: 31080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: imxc-ui + namespace: imxc + labels: + app: imxc-ui +spec: + revisionHistoryLimit: 0 + replicas: 1 + selector: + matchLabels: + app: imxc-ui + template: + metadata: + labels: + app: imxc-ui + spec: + containers: + - name: imxc-ui + image: {{ .Values.global.IMXC_IN_REGISTRY }}/ui-server:{{ .Values.global.UI_SERVER_VERSION }} + resources: + requests: + cpu: 100m + memory: 50Mi + limits: + cpu: 200m + memory: 100Mi + imagePullPolicy: IfNotPresent + ports: + - containerPort: 80 + volumeMounts: + - name: config-profile + mountPath: /usr/src/app/web/env + - name: config-server + mountPath: /usr/src/app/config + volumes: + - name: config-profile + configMap: + name: imxc-ui-config + items: + - key: "config.js" + path: "config.js" + - name: config-server + configMap: + name: imxc-ui-config diff --git a/ansible/01_old/roles/cmoa_install_bak/files/06-imxc-ui/imxc-ui-jspd/values.yaml b/ansible/01_old/roles/cmoa_install_bak/files/06-imxc-ui/imxc-ui-jspd/values.yaml new file mode 100644 index 0000000..54b3bcb --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/06-imxc-ui/imxc-ui-jspd/values.yaml @@ -0,0 +1,94 @@ +# Default values for imxc. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: 10.10.31.243:5000/cmoa3/nginx + tag: stable + pullPolicy: IfNotPresent + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 80 + +ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: [] + + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +nodeSelector: {} + +tolerations: [] + +affinity: {} + +global: + INTERMAX_IP: + SERVELET_URL_PROTOCOL : http + DEMO_SERVELET_URL_PROTOCOL : http + KEYCLOAK_AUTH_SERVER_URL: http://10.10.43.227:31082/auth + KEYCLOAK_RESOURCE: authorization_server + KEYCLOAK_REALM: exem + + IMXC_IN_REGISTRY: 10.10.31.243:5000/cmoa3 + + ZUUL_SERVER_IP: 10.10.43.227 + ZUUL_SERVER_PORT: 31081 + + NOTI_SERVER_IP: 10.10.43.227 + NOTI_SERVER_PORT: 31083 + + CMOA_MANUAL_SERVER_IP: 10.10.43.227 + CMOA_MANUAL_PORT: 31090 + + OFFLINEACCESS: false + BACKLOGIN: false + + CLOUDMOA_VERSION: rel3.4.8 + UI_SERVER_VERSION: rel3.4.8 + CMOA_MANUAL_VERSION: rel3.4.8 diff --git a/ansible/01_old/roles/cmoa_install_bak/files/ip_change b/ansible/01_old/roles/cmoa_install_bak/files/ip_change new file mode 100755 index 0000000..ac13cc7 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/ip_change @@ -0,0 +1,15 @@ +#!/bin/bash +if [ -z "$BASH_VERSION" ]; then exec bash "$0" "$@"; exit; fi + +before_ip=$1 +after_ip=$2 +grep_path=$3 + +if [[ $before_ip == '' || $after_ip == '' ]]; then + echo '[Usage] $0 {before_ip} {after_ip}' + exit +fi + +grep -rn ${before_ip} ${grep_path} | awk -F':' {'print $1'} | uniq | /usr/bin/xargs sed -i "s/${before_ip}/${after_ip}/g" + +echo "success" \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install_bak/files/k8s_status b/ansible/01_old/roles/cmoa_install_bak/files/k8s_status new file mode 100755 index 0000000..16b3c61 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/k8s_status @@ -0,0 +1,86 @@ +#! /usr/bin/python3 +#-*- coding:utf-8 -*- + +import os, sys, subprocess, io, time +from kubernetes import client, config +def debug_print(msg): + print(" # ", msg) + +def k8s_conn(KUBE_CONFIG_PATH): + config.load_kube_config( + config_file=KUBE_CONFIG_PATH + ) + k8s_api = client.CoreV1Api() + + return k8s_api + +def k8s_get_pod(k8s_api, namespace, target=''): + pretty=False + watch=False + timeout_seconds=30 + api_response = k8s_api.list_namespaced_pod(namespace, pretty=pretty, timeout_seconds=timeout_seconds, watch=watch) + pod_list=[] + for pod in api_response.items: + status = pod.status.phase + #container_status = pod.status.container_statuses[0] + #if container_status.started is False or container_status.ready is False: + # waiting_state = container_status.state.waiting + # if waiting_state.message is not None and 'Error' in waiting_state.message: + # status = waiting_state.reason + if target != '': + if target in pod.metadata.name: + return (pod.metadata.name + " " + status) + pod_list.append(pod.metadata.name+" "+status) + return pod_list + +def k8s_pod_status_check(k8s_api, waiting_time, namespace,except_pod=False): + num=0 + while True: + num+=1 + resp=k8s_get_pod(k8s_api, namespace) + all_run_flag=True + if debug_mode: + debug_print('-'*30) + debug_print('pod 상태 체크시도 : {} ({}s)'.format(num, waiting_time)) + debug_print('-'*30) + for i in resp: + if except_pod: + if except_pod in i.lower(): continue + if 'pending' in i.lower(): + all_run_flag=False + result='{} 결과: {}'.format(i, all_run_flag) + debug_print(result) + if all_run_flag: + if debug_mode: + debug_print('-'*30) + debug_print('[{}] pod All Running'.format(namespace)) + debug_print('-'*30) + for i in resp: debug_print(i) + break + else: time.sleep(int(waiting_time)) + +def main(): + namespace = os.sys.argv[1] + + try: + Except_k8s_pod = os.sys.argv[2] + except: + Except_k8s_pod = '' + + try: + KUBE_CONFIG_PATH = os.sys.argv[3] + os.environ["KUBECONFIG"]=KUBE_CONFIG_PATH + except: + KUBE_CONFIG_PATH = os.environ["KUBECONFIG"] + + k8s_api=k8s_conn(KUBE_CONFIG_PATH) + k8s_pod_status_check(k8s_api, 60, namespace, Except_k8s_pod) + + +if __name__ == "__main__": + try: + debug_mode=False + main() + except Exception as err: + print("[Usage] k8s_status {namespace} {Except_pod=(default=false)} {KUBECONFIG_PATH=(default=current env)}") + print(err) diff --git a/ansible/01_old/roles/cmoa_install_bak/files/postgres_check_data b/ansible/01_old/roles/cmoa_install_bak/files/postgres_check_data new file mode 100755 index 0000000..d377aeb --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/postgres_check_data @@ -0,0 +1,6 @@ +#!/bin/bash + +namespace=$1 +pg_pod=`kubectl -n ${namespace} get pod --no-headers | awk '{print $1}' | grep postgres` +kubectl_cmd="kubectl -n ${namespace} exec -it ${pg_pod} --" +${kubectl_cmd} bash -c "echo \"select count(*) from pg_database where datname='keycloak';\" | /usr/bin/psql -U postgres | egrep -iv '(count|---|row)' | tr -d ' ' | tr -d '\n'" \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install_bak/files/rel_change b/ansible/01_old/roles/cmoa_install_bak/files/rel_change new file mode 100755 index 0000000..ae1f6b3 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/files/rel_change @@ -0,0 +1,15 @@ +#!/bin/bash +if [ -z "$BASH_VERSION" ]; then exec bash "$0" "$@"; exit; fi + +before_version=$1 +after_version=$2 +grep_path=$3 + +if [[ $before_version == '' || $after_version == '' ]]; then + echo '[Usage] $0 {before_version} {after_version}' + exit +fi + +grep -rn ${before_version} ${grep_path} | awk -F':' {'print $1'} | uniq | /usr/bin/xargs sed -i "s/${before_version}/${after_version}/g" + +echo "success" \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install_bak/tasks/00-default-settings-master.yml b/ansible/01_old/roles/cmoa_install_bak/tasks/00-default-settings-master.yml new file mode 100644 index 0000000..4a17c4a --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/tasks/00-default-settings-master.yml @@ -0,0 +1,30 @@ +--- +- name: 1. Create a cmoa namespace + kubernetes.core.k8s: + name: "{{ cmoa_namespace }}" + api_version: v1 + kind: Namespace + state: present + +- name: 2. Create secret + kubernetes.core.k8s: + state: present + namespace: "{{ item }}" + src: "{{ role_path }}/files/00-default/secret_nexus.yaml" + apply: yes + with_items: + - "{{ cmoa_namespace }}" + - default + +- name: 3. kubeconfig check + shell: "echo $KUBECONFIG" + register: kubeconfig + +- name: 4. Patch default sa + shell: "{{ role_path }}/files/00-default/sa_patch.sh {{ kubeconfig.stdout }}" + +- name: 5. Master IP Setting + command: "{{ role_path }}/files/ip_change {{ before_ip }} {{ ansible_default_ipv4.address }} {{ role_path }}/files" + +- name: 6. CloudMOA Version Change + command: "{{ role_path }}/files/rel_change {{ before_version }} {{ cmoa_version }} {{ role_path }}/files" diff --git a/ansible/01_old/roles/cmoa_install_bak/tasks/00-default-settings-node.yml b/ansible/01_old/roles/cmoa_install_bak/tasks/00-default-settings-node.yml new file mode 100644 index 0000000..a568b74 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/tasks/00-default-settings-node.yml @@ -0,0 +1,27 @@ +--- +- name: 1. Node add Label (worker1) + kubernetes.core.k8s: + apply: yes + definition: + apiversion: v1 + kind: Node + metadata: + name: "{{ item }}" + labels: + cmoa: worker1 + with_items: + - "{{ ansible_hostname }}" + when: ansible_default_ipv4.address in groups.worker1 + +- name: 2. Node add Label (worker2) + kubernetes.core.k8s: + definition: + apiversion: v1 + kind: Node + metadata: + name: "{{ item }}" + labels: + cmoa: worker2 + with_items: + - "{{ ansible_hostname }}" + when: ansible_default_ipv4.address in groups.worker2 \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install_bak/tasks/01-storage-install.yml b/ansible/01_old/roles/cmoa_install_bak/tasks/01-storage-install.yml new file mode 100644 index 0000000..bef58ef --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/tasks/01-storage-install.yml @@ -0,0 +1,45 @@ +--- +- name: 1. yaml file install (sc, pv) + kubernetes.core.k8s: + state: present + namespace: "{{ cmoa_namespace }}" + src: "{{ role_path }}/files/01-storage/{{ item }}" + apply: yes + with_items: + - 00-storageclass.yaml + - 01-persistentvolume.yaml + +- name: 2. helmchart install (minio) + kubernetes.core.helm: + name: "{{item}}" + release_namespace: "{{ cmoa_namespace }}" + chart_ref: "{{ role_path }}/files/01-storage/{{item}}" + create_namespace: yes + release_state: present + values_files: + - "{{ role_path }}/files/01-storage/{{item}}/values.yaml" + with_items: + - minio + +- name: 3. Change a Minio Api Service (NodePort=minio_nodePort) + kubernetes.core.k8s: + state: present + definition: + apiVersion: v1 + kind: Service + metadata: + name: "{{ minio_service_name }}" + namespace: "{{ cmoa_namespace }}" + spec: + type: NodePort + ports: + - protocol: TCP + port: "{{ minio_service_port }}" + nodePort: "{{ minio_nodePort }}" + apply: yes + +- name: 4. Check Kubernetes Pods (minio) + command: "{{ role_path }}/files/k8s_status {{ cmoa_namespace }}" + +- name: 5. minio setting (minio) + command: "{{ role_path }}/files/01-storage/cmoa_minio {{ ansible_default_ipv4.address }}:{{ minio_nodePort }} {{ minio_user }} {{ bucket_name }} {{ days }} {{ rule_id }}" \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install_bak/tasks/02-base-install.yml b/ansible/01_old/roles/cmoa_install_bak/tasks/02-base-install.yml new file mode 100644 index 0000000..f7924a6 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/tasks/02-base-install.yml @@ -0,0 +1,51 @@ +--- +- name: 1. kafka broker config apply (base) + kubernetes.core.k8s: + state: present + namespace: "{{ cmoa_namespace }}" + src: "{{ role_path }}/files/02-base/{{ item }}" + apply: yes + with_items: + - 00-kafka-broker-config.yaml + +- name: 2. coredns config apply (base) + kubernetes.core.k8s: + state: present + namespace: default + src: "{{ role_path }}/files/02-base/{{ item }}" + apply: yes + with_items: + - 01-coredns.yaml + +- name: 3. helmchart install (base) + kubernetes.core.helm: + name: "{{item}}" + release_name: "{{item}}" + release_namespace: "{{ cmoa_namespace }}" + chart_ref: "{{ role_path }}/files/02-base/{{item}}" + create_namespace: yes + release_state: present + values_files: + - "{{ role_path }}/files/02-base/{{item}}/values.yaml" + with_items: + - base + +- name: 4. Check Kubernetes Pods (base) + command: "{{ role_path }}/files/k8s_status {{ cmoa_namespace }} alertmanage" + +- name: 5. Change a Elasticsearch Service (NodePort=elasticsearch_nodePort) + kubernetes.core.k8s: + state: present + definition: + apiVersion: v1 + kind: Service + metadata: + name: "{{ elasticsearch_service_name }}" + namespace: "{{ cmoa_namespace }}" + spec: + type: NodePort + ports: + - protocol: TCP + port: "{{ elasticsearch_service_port }}" + nodePort: "{{ elasticsearch_nodePort }}" + apply: yes diff --git a/ansible/01_old/roles/cmoa_install_bak/tasks/03-ddl-dml.yml b/ansible/01_old/roles/cmoa_install_bak/tasks/03-ddl-dml.yml new file mode 100644 index 0000000..9c44f8e --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/tasks/03-ddl-dml.yml @@ -0,0 +1,64 @@ +- name: 1. Check Postgres DB Data + command: "{{ role_path }}/files/postgres_check_data {{ cmoa_namespace }}" + register: pg_check_result + +- name: 2. Insert Elasticsearch template + command: "sh {{ role_path }}/files/03-ddl-dml/elasticsearch/es-ddl-put.sh {{ cmoa_namespace }}" +# when: pg_check_result.stdout != '1' +# register: es + +#- debug: +# msg: "{{es.stdout_lines}}" + +- name: 2.1. Elasticsearch dependency deploy restart + command: "kubectl -n {{ cmoa_namespace }} rollout restart deploy alertmanager base-cortex-configs base-cortex-distributor base-cortex-ruler" + register: restart + +- debug: + msg: "{{restart.stdout_lines}}" + +- name: 2.2. Check Kubernetes Pods (Elasticsearch dependency) + command: "{{ role_path }}/files/k8s_status {{ cmoa_namespace }} alertmanage" + +- name: 3. Get a list of all pods from the namespace + command: kubectl -n "{{ cmoa_namespace }}" get pods --no-headers -o custom-columns=":metadata.name" + register: pod_list + when: pg_check_result.stdout != '1' + +- name: 4. Copy psql file in postgres (DDL) + kubernetes.core.k8s_cp: + namespace: "{{ cmoa_namespace }}" + pod: "{{ item }}" + remote_path: /tmp/postgres_insert_ddl.psql + local_path: "{{ role_path }}/files/03-ddl-dml/postgres/postgres_insert_ddl.psql" + when: item is match('postgres') and pg_check_result.stdout != '1' + with_items: "{{ pod_list.stdout_lines }}" + ignore_errors: true + +- name: 5. Execute a command in postgres (DDL) + kubernetes.core.k8s_exec: + namespace: "{{ cmoa_namespace }}" + pod: "{{ item }}" + command: bash -c "PGPASSWORD='eorbahrhkswp' && /usr/bin/psql -h 'localhost' -U 'admin' -d 'postgresdb' -f /tmp/postgres_insert_ddl.psql" + with_items: "{{ pod_list.stdout_lines }}" + when: item is match('postgres') + ignore_errors: true + +- name: 6. Copy psql file in postgres (DML) + kubernetes.core.k8s_cp: + namespace: "{{ cmoa_namespace }}" + pod: "{{ item }}" + remote_path: /tmp/postgres_insert_dml.psql + local_path: "{{ role_path }}/files/03-ddl-dml/postgres/postgres_insert_dml.psql" + with_items: "{{ pod_list.stdout_lines }}" + when: item is match('postgres') + ignore_errors: true + +- name: 7. Execute a command in postgres (DML) + kubernetes.core.k8s_exec: + namespace: "{{ cmoa_namespace }}" + pod: "{{ item }}" + command: bash -c "PGPASSWORD='eorbahrhkswp' && /usr/bin/psql -h 'localhost' -U 'admin' -d 'postgresdb' -f /tmp/postgres_insert_dml.psql" + with_items: "{{ pod_list.stdout_lines }}" + when: item is match('postgres') + ignore_errors: true \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install_bak/tasks/04-keycloak-install.yml b/ansible/01_old/roles/cmoa_install_bak/tasks/04-keycloak-install.yml new file mode 100644 index 0000000..de5fc9c --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/tasks/04-keycloak-install.yml @@ -0,0 +1,34 @@ +--- +- name: 1. helmchart install (keycloak) + kubernetes.core.helm: + name: "{{item}}" + release_name: "{{item}}" + release_namespace: "{{ cmoa_namespace }}" + chart_ref: "{{ role_path }}/files/04-keycloak" + create_namespace: yes + release_state: present + values_files: + - "{{ role_path }}/files/04-keycloak/values.yaml" + with_items: + - keycloak + +- name: 4. Check Kubernetes Pods (base) + command: "{{ role_path }}/files/k8s_status {{ cmoa_namespace }}" + + +- name: 5. Change a Elasticsearch Service (NodePort=elasticsearch_nodePort) + kubernetes.core.k8s: + state: present + definition: + apiVersion: v1 + kind: Service + metadata: + name: "{{ elasticsearch_service_name }}" + namespace: "{{ cmoa_namespace }}" + spec: + type: NodePort + ports: + - protocol: TCP + port: "{{ elasticsearch_service_port }}" + nodePort: "{{ elasticsearch_nodePort }}" + apply: yes diff --git a/ansible/01_old/roles/cmoa_install_bak/tasks/05-imxc-install.yml b/ansible/01_old/roles/cmoa_install_bak/tasks/05-imxc-install.yml new file mode 100644 index 0000000..420d2d1 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/tasks/05-imxc-install.yml @@ -0,0 +1,16 @@ +--- +- name: 1. helmchart install (imxc) + kubernetes.core.helm: + name: "{{item}}" + release_name: "{{item}}" + release_namespace: "{{ cmoa_namespace }}" + chart_ref: "{{ role_path }}/files/05-imxc" + create_namespace: yes + release_state: present + values_files: + - "{{ role_path }}/files/05-imxc/values.yaml" + with_items: + - imxc + +- name: 2. Check Kubernetes Pods (imxc / keycloak) + command: "{{ role_path }}/files/k8s_status {{ cmoa_namespace }}" diff --git a/ansible/01_old/roles/cmoa_install_bak/tasks/06-imxc-ui-install.yml b/ansible/01_old/roles/cmoa_install_bak/tasks/06-imxc-ui-install.yml new file mode 100644 index 0000000..7da82a1 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/tasks/06-imxc-ui-install.yml @@ -0,0 +1,112 @@ +--- +- name: 1. helmchart install (imxc-ui-all) + kubernetes.core.helm: + name: "{{item}}" + release_name: "{{item}}" + release_namespace: "{{ cmoa_namespace }}" + chart_ref: "{{ role_path }}/files/06-imxc-ui/{{ item }}" + create_namespace: yes + release_state: present + values_files: + - "{{ role_path }}/files/06-imxc-ui/{{ item }}/values.yaml" + with_items: + - imxc-ui-jaeger + - imxc-ui-jspd + when: imxc_ui == 'all' + +- name: 1. helmchart install (imxc-ui-jaeger) + kubernetes.core.helm: + name: "{{item}}" + release_name: "{{item}}" + release_namespace: "{{ cmoa_namespace }}" + chart_ref: "{{ role_path }}/files/06-imxc-ui/{{ item }}" + create_namespace: yes + release_state: present + values_files: + - "{{ role_path }}/files/06-imxc-ui/{{ item }}/values.yaml" + with_items: + - imxc-ui-jaeger + when: imxc_ui == 'jaeger' + +- name: 2. Change a imxc-ui Service (imxc-ui-jaeger) + kubernetes.core.k8s: + state: present + definition: + apiVersion: v1 + kind: Service + metadata: + name: "{{ jaeger_servicename }}" + namespace: "{{ cmoa_namespace }}" + spec: + type: NodePort + ports: + - protocol: TCP + port: "{{ jaeger_service_port }}" + nodePort: "{{ jaeger_nodePort }}" + apply: yes + when: imxc_ui == 'jaeger' + +- name: 2. Get a list of all pods from the namespace + command: kubectl -n "{{ cmoa_namespace }}" get pods --no-headers -o custom-columns=":metadata.name" # Output is a column + register: pod_list + when: imxc_ui != 'all' + +- name: 3. Copy psql file in psql (imxc-jaeger) + kubernetes.core.k8s_cp: + namespace: "{{ cmoa_namespace }}" + pod: "{{ item }}" + remote_path: /tmp/jaeger_menumeta.psql + local_path: "{{ role_path }}/files/03-ddl-dml/postgres/jaeger_menumeta.psql" + with_items: "{{ pod_list.stdout_lines }}" + when: + - item is match('postgres') + - imxc_ui == 'jaeger' + ignore_errors: true + +- name: 4. Execute a command in psql (imxc-jaeger) + kubernetes.core.k8s_exec: + namespace: "{{ cmoa_namespace }}" + pod: "{{ item }}" + command: bash -c "PGPASSWORD='eorbahrhkswp' && /usr/bin/psql -h 'localhost' -U 'admin' -d 'postgresdb' -f /tmp/jaeger_menumeta.psql" + with_items: "{{ pod_list.stdout_lines }}" + when: + - item is match('postgres') + - imxc_ui == 'jaeger' + ignore_errors: true + +- name: 1. helmchart install (imxc-ui-jspd) + kubernetes.core.helm: + name: "{{item}}" + release_name: "{{item}}" + release_namespace: "{{ cmoa_namespace }}" + chart_ref: "{{ role_path }}/files/06-imxc-ui/{{ item }}" + create_namespace: yes + release_state: present + values_files: + - "{{ role_path }}/files/06-imxc-ui/{{ item }}/values.yaml" + with_items: + - imxc-ui-jspd + when: imxc_ui == 'jspd' + ignore_errors: true + +- name: 3. Copy psql file in postgres (imxc-ui-jspd) + kubernetes.core.k8s_cp: + namespace: "{{ cmoa_namespace }}" + pod: "{{ item }}" + remote_path: /tmp/jspd_menumeta.psql + local_path: "{{ role_path }}/files/03-ddl-dml/postgres/jspd_menumeta.psql" + with_items: "{{ pod_list.stdout_lines }}" + when: item is match('postgres') and imxc_ui == 'jspd' + ignore_errors: true + +- name: 4. Execute a command in postgres (imxc-ui-jspd) + kubernetes.core.k8s_exec: + namespace: "{{ cmoa_namespace }}" + pod: "{{ item }}" + command: bash -c "PGPASSWORD='eorbahrhkswp' && /usr/bin/psql -h 'localhost' -U 'admin' -d 'postgresdb' -f /tmp/jspd_menumeta.psql" + with_items: "{{ pod_list.stdout_lines }}" + when: item is match('postgres') and imxc_ui == 'jspd' + ignore_errors: true + +- name: 2. Check Kubernetes Pods (imxc ui) + command: "{{ role_path }}/files/k8s_status {{ cmoa_namespace }}" diff --git a/ansible/01_old/roles/cmoa_install_bak/tasks/07-keycloak-setting.yml b/ansible/01_old/roles/cmoa_install_bak/tasks/07-keycloak-setting.yml new file mode 100644 index 0000000..f800f87 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/tasks/07-keycloak-setting.yml @@ -0,0 +1,76 @@ +--- +- name: 0. Generate keycloak auth token + ansible.builtin.uri: + url: "{{ keycloak_url }}{{ keycloak_context }}/realms/master/protocol/openid-connect/token" + method: POST + body: "client_id={{ keycloak_auth_client }}&username={{ keycloak_admin_user }}&password={{ keycloak_admin_password }}&grant_type=password" + validate_certs: no + #no_log: "{{ keycloak_no_log | default('True') }}" + register: keycloak_auth_response + until: keycloak_auth_response.status == 200 + retries: 5 + delay: 2 + +- name: 1. Determine if realm exists + ansible.builtin.uri: + url: "{{ keycloak_url }}{{ keycloak_context }}/admin/realms/{{ keycloak_realm }}" + method: GET + status_code: + - 200 + - 404 + headers: + Accept: "application/json" + Authorization: "Bearer {{ keycloak_auth_response.json.access_token }}" + register: keycloak_realm_exists + +- name: 2. Validate Keycloak clients + ansible.builtin.assert: + that: + - item.name is defined and item.name | length > 0 + - (item.client_id is defined and item.client_id | length > 0) or (item.id is defined and item.id | length > 0) + fail_msg: "For each keycloak client, attributes `name` and either `id` or `client_id` is required" + quiet: True + loop: "{{ keycloak_clients | flatten }}" + loop_control: + label: "{{ item.name | default('unnamed client') }}" + +- name: 3. update a Keycloak client + community.general.keycloak_client: + auth_client_id: "{{ keycloak_auth_client }}" + auth_keycloak_url: "{{ keycloak_url }}{{ keycloak_context }}" + auth_realm: "{{ keycloak_auth_realm }}" + auth_username: "{{ keycloak_admin_user }}" + auth_password: "{{ keycloak_admin_password }}" + realm: "{{ item.realm }}" + default_roles: "{{ item.roles | default(omit) }}" + client_id: "{{ item.client_id | default(omit) }}" + id: "{{ item.id | default(omit) }}" + name: "{{ item.name | default(omit) }}" + description: "{{ item.description | default(omit) }}" + root_url: "{{ item.root_url | default('') }}" + admin_url: "{{ item.admin_url | default('') }}" + base_url: "{{ item.base_url | default('') }}" + enabled: "{{ item.enabled | default(True) }}" + redirect_uris: "{{ item.redirect_uris | default(omit) }}" + web_origins: "{{ item.web_origins | default('+') }}" + bearer_only: "{{ item.bearer_only | default(omit) }}" + standard_flow_enabled: "{{ item.standard_flow_enabled | default(omit) }}" + implicit_flow_enabled: "{{ item.implicit_flow_enabled | default(omit) }}" + direct_access_grants_enabled: "{{ item.direct_access_grants_enabled | default(omit) }}" + service_accounts_enabled: "{{ item.service_accounts_enabled | default(omit) }}" + public_client: "{{ item.public_client | default(False) }}" + protocol: "{{ item.protocol | default(omit) }}" + state: present + #no_log: "{{ keycloak_no_log | default('True') }}" + register: create_client_result + loop: "{{ keycloak_clients | flatten }}" + when: (item.name is defined and item.client_id is defined) or (item.name is defined and item.id is defined) + +- name: 4. Dependency deploy restart + command: "kubectl -n {{ cmoa_namespace }} rollout restart deploy imxc-api noti-server auth-server zuul-deployment" + register: restart + +- debug: + msg: "{{restart.stdout_lines}}" + + diff --git a/ansible/01_old/roles/cmoa_install_bak/tasks/08-finish.yml b/ansible/01_old/roles/cmoa_install_bak/tasks/08-finish.yml new file mode 100644 index 0000000..f06cc24 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/tasks/08-finish.yml @@ -0,0 +1,92 @@ +--- +- name: 0. Check Kubernetes Pods (ALL) + command: "{{ role_path }}/files/k8s_status {{ cmoa_namespace }}" + +- name: 1. IP Setting reset + command: "{{ role_path }}/files/ip_change {{ansible_default_ipv4.address}} {{before_ip}} {{ role_path }}/files" + +- name: 2. CloudMOA Version reset + command: "{{ role_path }}/files/rel_change {{ cmoa_version }} {{ before_version }} {{ role_path }}/files" + +- debug: + msg: + - ======================================================================================= + - "## Keycloak WEB" + - keycloak URL = http://{{ ansible_default_ipv4.address }}:31082 + - --------------------------------------------------------------------------------------- + - "## Keycloak Login Theme Setting" + - "## WEB > Realm Settings > Themes > Login Theme" + - " > CloudMOA_V2" + - --------------------------------------------------------------------------------------- + - "## CloudMOA WEB " + - CloudMOA Jaeger = http://{{ ansible_default_ipv4.address }}:31080 + - CloudMOA JSPD = http://{{ ansible_default_ipv4.address }}:31084 + - ======================================================================================= + +#- name: Node add Label (worker1) +# shell: kubectl get node "{{ item }}" --show-labels +# register: worker1 +# with_items: +# - "{{ ansible_hostname }}" +# #when: ansible_hostname in groups.worker1 +# +#- name: Node add Label (worker2) +# shell: kubectl get node "{{ item }}" --show-labels +# register: worker2 +# with_items: +# - "{{ ansible_hostname }}" +# #when: ansible_hostname in groups.worker2 +# +# +#- name: debug +# debug: +# msg: "{{item}}" +# with_items: +# - "{{ worker1.stdout }}" +# - "{{ worker2.stdout }}" + +#- name: Iterate over pod names and delete the filtered ones +# #debug: +# # msg: "{{ item }}" +# kubernetes.core.k8s_cp: +# namespace: imxc +# pod: "{{ item }}" +# remote_path: /tmp/postgres_insert_ddl.psql +# local_path: "{{ role_path }}/files/03-ddl-dml/postgres/postgres_insert_ddl.psql" +# with_items: "{{ pod_list.stdout_lines }}" +# when: item is match('postgres') + +#- name: Execute a command +# kubernetes.core.k8s_exec: +# namespace: imxc +# pod: "{{ item }}" +# command: bash -c "PGPASSWORD='eorbahrhkswp' && /usr/bin/psql -h 'localhost' -U 'admin' -d 'postgresdb' -f /tmp/postgres_insert_ddl.psql" +# with_items: "{{ pod_list.stdout_lines }}" +# when: item is match('postgres') +# +#- name: Iterate over pod names and delete the filtered ones +# #debug: +# # msg: "{{ item }}" +# kubernetes.core.k8s_cp: +# namespace: imxc +# pod: "{{ item }}" +# remote_path: /tmp/postgres_insert_dml.psql +# local_path: "{{ role_path }}/files/03-ddl-dml/postgres/postgres_insert_dml.psql" +# with_items: "{{ pod_list.stdout_lines }}" +# when: item is match('postgres') +# +#- name: Execute a command +# kubernetes.core.k8s_exec: +# namespace: imxc +# pod: "{{ item }}" +# command: bash -c "PGPASSWORD='eorbahrhkswp' && /usr/bin/psql -h 'localhost' -U 'admin' -d 'postgresdb' -f /tmp/postgres_insert_dml.psql" +# with_items: "{{ pod_list.stdout_lines }}" +# when: item is match('postgres') +# register: test +# +#- name: test +# debug: +# msg: "{{ test.stdout }}" +##- set_fact: +## postgres_pod: "{{ postgres_pod2.stdout_lines is match('postgres') | default(postgres_pod2) }}" +# \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install_bak/tasks/helm-install.yml b/ansible/01_old/roles/cmoa_install_bak/tasks/helm-install.yml new file mode 100644 index 0000000..d057455 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/tasks/helm-install.yml @@ -0,0 +1,60 @@ +--- +- name: Create Helm temporary directory + file: + path: /tmp/helm + state: directory + mode: "0755" + +- name: Fetch Helm package + get_url: + url: 'https://get.helm.sh/helm-{{ helm_version }}-linux-amd64.tar.gz' + dest: /tmp/helm.tar.gz + checksum: '{{ helm_checksum }}' + +- name: Extract Helm package + unarchive: + remote_src: true + src: /tmp/helm.tar.gz + dest: /tmp/helm + +- name: Ensure "docker" group exists + group: + name: docker + state: present + become: true + +- name: Install helm to /usr/local/bin + copy: + remote_src: true + src: /tmp/helm/linux-amd64/helm + dest: /usr/local/bin/helm + owner: root + group: docker + mode: "0755" + become: true + +- name: Cleanup Helm temporary directory + file: + path: /tmp/helm + state: absent + +- name: Cleanup Helm temporary download + file: + path: /tmp/helm.tar.gz + state: absent + +- name: Ensure bash_completion.d directory exists + file: + path: /etc/bash_completion.d + state: directory + mode: "0755" + become: true + +- name: Setup Helm tab-completion + shell: | + set -o pipefail + /usr/local/bin/helm completion bash | tee /etc/bash_completion.d/helm + args: + executable: /bin/bash + changed_when: false + become: true diff --git a/ansible/01_old/roles/cmoa_install_bak/tasks/main.yml b/ansible/01_old/roles/cmoa_install_bak/tasks/main.yml new file mode 100644 index 0000000..7239fa3 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/tasks/main.yml @@ -0,0 +1,43 @@ +--- +- include: helm-install.yml + tags: helm-install + +- include: 00-default-settings-master.yml + tags: default_setting + when: kubernetes_role == 'master' + +- include: 00-default-settings-node.yml + tags: default_setting_node + when: kubernetes_role == 'node' + +- include: 01-storage-install.yml + tags: storage-install + when: kubernetes_role == 'master' + +- include: 02-base-install.yml + tags: base-install + when: kubernetes_role == 'master' + +- include: 03-ddl-dml.yml + tags: ddl-dml + when: kubernetes_role == 'master' + +- include: 04-keycloak-install.yml + tags: keycloak-install + when: kubernetes_role == 'master' + +- include: 05-imxc-install.yml + tags: imxc-install + when: kubernetes_role == 'master' + +- include: 06-imxc-ui-install.yml + tags: imxc-ui-install + when: kubernetes_role == 'master' + +- include: 07-keycloak-setting.yml + tags: keycloak-setting + when: kubernetes_role == 'master' + +- include: 08-finish.yml + tags: finish + when: kubernetes_role == 'master' \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_install_bak/templates/realm.json.j2 b/ansible/01_old/roles/cmoa_install_bak/templates/realm.json.j2 new file mode 100644 index 0000000..1323ce2 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/templates/realm.json.j2 @@ -0,0 +1,7 @@ +{ + "id": "{{ keycloak_realm }}", + "realm": "{{ keycloak_realm }}", + "enabled": true, + "eventsEnabled": true, + "eventsExpiration": 7200 +} diff --git a/ansible/01_old/roles/cmoa_install_bak/vars/main.yml b/ansible/01_old/roles/cmoa_install_bak/vars/main.yml new file mode 100644 index 0000000..14c8e95 --- /dev/null +++ b/ansible/01_old/roles/cmoa_install_bak/vars/main.yml @@ -0,0 +1,7 @@ +--- +# name of the realm to create, this is a required variable +keycloak_realm: Exem + +# other settings +keycloak_url: "http://{{ ansible_default_ipv4.address }}:{{ keycloak_http_port }}" +keycloak_management_url: "http://{{ ansible_default_ipv4.address }}:{{ keycloak_management_http_port }}" diff --git a/ansible/01_old/roles/cmoa_os_setting/README.md b/ansible/01_old/roles/cmoa_os_setting/README.md new file mode 100644 index 0000000..225dd44 --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/README.md @@ -0,0 +1,38 @@ +Role Name +========= + +A brief description of the role goes here. + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +Role Variables +-------------- + +A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. + +Dependencies +------------ + +A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + + - hosts: servers + roles: + - { role: username.rolename, x: 42 } + +License +------- + +BSD + +Author Information +------------------ + +An optional section for the role authors to include contact information, or a website (HTML is not allowed). diff --git a/ansible/01_old/roles/cmoa_os_setting/defaults/main.yml b/ansible/01_old/roles/cmoa_os_setting/defaults/main.yml new file mode 100644 index 0000000..55b8a06 --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/defaults/main.yml @@ -0,0 +1,140 @@ +helm_checksum: sha256:72f1c0fcfb17b41b89087e9232e50f20c606e44a0edc2bb9737e05d1c75b8c4f +helm_version: v3.10.2 + +kubernetes_version: 1.25.2 + +kubernetes_kubelet_extra_args: "" +kubernetes_kubeadm_init_extra_opts: "" +kubernetes_join_command_extra_opts: "" + +kubernetes_pod_network: + cni: 'calico' + cidr: '10.96.0.0/12' + +kubernetes_calico_manifest_file: https://docs.projectcalico.org/manifests/calico.yaml + +kubernetes_metric_server_file: https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml + +containerd_config: + version: 2 + root: /var/lib/containerd + state: /run/containerd + plugin_dir: "" + disabled_plugins: [] + required_plugins: [] + oom_score: 0 + grpc: + address: /run/containerd/containerd.sock + tcp_address: "" + tcp_tls_cert: "" + tcp_tls_key: "" + uid: 0 + gid: 0 + max_recv_message_size: 16777216 + max_send_message_size: 16777216 + ttrpc: + address: "" + uid: 0 + gid: 0 + debug: + address: "" + uid: 0 + gid: 0 + level: "" + metrics: + address: "" + grpc_histogram: false + cgroup: + path: "" + timeouts: + "io.containerd.timeout.shim.cleanup": 5s + "io.containerd.timeout.shim.load": 5s + "io.containerd.timeout.shim.shutdown": 3s + "io.containerd.timeout.task.state": 2s + plugins: + "io.containerd.gc.v1.scheduler": + pause_threshold: 0.02 + deletion_threshold: 0 + mutation_threshold: 100 + schedule_delay: 0s + startup_delay: 100ms + "io.containerd.grpc.v1.cri": + disable_tcp_service: true + stream_server_address: 127.0.0.1 + stream_server_port: "0" + stream_idle_timeout: 4h0m0s + enable_selinux: false + sandbox_image: k8s.gcr.io/pause:3.1 + stats_collect_period: 10 + systemd_cgroup: false + enable_tls_streaming: false + max_container_log_line_size: 16384 + disable_cgroup: false + disable_apparmor: false + restrict_oom_score_adj: false + max_concurrent_downloads: 3 + disable_proc_mount: false + containerd: + snapshotter: overlayfs + default_runtime_name: runc + no_pivot: false + default_runtime: + runtime_type: "" + runtime_engine: "" + runtime_root: "" + privileged_without_host_devices: false + untrusted_workload_runtime: + runtime_type: "" + runtime_engine: "" + runtime_root: "" + privileged_without_host_devices: false + runtimes: + runc: + runtime_type: io.containerd.runc.v1 + runtime_engine: "" + runtime_root: "" + privileged_without_host_devices: false + cni: + bin_dir: /opt/cni/bin + conf_dir: /etc/cni/net.d + max_conf_num: 1 + conf_template: "" + registry: + configs: + "10.10.31.243:5000": + tls: + insecure_skip_verify: true + mirrors: + "docker.io": + endpoint: + - https://registry-1.docker.io + "10.10.31.243:5000": + endpoint: + - http://10.10.31.243:5000 + x509_key_pair_streaming: + tls_cert_file: "" + tls_key_file: "" + "io.containerd.internal.v1.opt": + path: /opt/containerd + "io.containerd.internal.v1.restart": + interval: 10s + "io.containerd.metadata.v1.bolt": + content_sharing_policy: shared + "io.containerd.monitor.v1.cgroups": + no_prometheus: false + "io.containerd.runtime.v1.linux": + shim: containerd-shim + runtime: runc + runtime_root: "" + no_shim: false + shim_debug: false + "io.containerd.runtime.v2.task": + platforms: + - linux/amd64 + "io.containerd.service.v1.diff-service": + default: + - walking + "io.containerd.snapshotter.v1.devmapper": + root_path: "" + pool_name: "" + base_image_size: "" diff --git a/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/.helmignore b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/.helmignore new file mode 100644 index 0000000..50af031 --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/CHANGELOG.md b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/CHANGELOG.md new file mode 100644 index 0000000..27a52e8 --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/CHANGELOG.md @@ -0,0 +1,445 @@ +# Changelog + +This file documents all notable changes to [ingress-nginx](https://github.com/kubernetes/ingress-nginx) Helm Chart. The release numbering uses [semantic versioning](http://semver.org). + +### 4.2.1 + +- The sha of kube-webhook-certgen image & the opentelemetry image, in values file, was changed to new images built on alpine-v3.16.1 +- "[8896](https://github.com/kubernetes/ingress-nginx/pull/8896) updated to new images built today" + +### 4.2.0 + +- Support for Kubernetes v1.19.0 was removed +- "[8810](https://github.com/kubernetes/ingress-nginx/pull/8810) Prepare for v1.3.0" +- "[8808](https://github.com/kubernetes/ingress-nginx/pull/8808) revert arch var name" +- "[8805](https://github.com/kubernetes/ingress-nginx/pull/8805) Bump k8s.io/klog/v2 from 2.60.1 to 2.70.1" +- "[8803](https://github.com/kubernetes/ingress-nginx/pull/8803) Update to nginx base with alpine v3.16" +- "[8802](https://github.com/kubernetes/ingress-nginx/pull/8802) chore: start v1.3.0 release process" +- "[8798](https://github.com/kubernetes/ingress-nginx/pull/8798) Add v1.24.0 to test matrix" +- "[8796](https://github.com/kubernetes/ingress-nginx/pull/8796) fix: add MAC_OS variable for static-check" +- "[8793](https://github.com/kubernetes/ingress-nginx/pull/8793) changed to alpine-v3.16" +- "[8781](https://github.com/kubernetes/ingress-nginx/pull/8781) Bump github.com/stretchr/testify from 1.7.5 to 1.8.0" +- "[8778](https://github.com/kubernetes/ingress-nginx/pull/8778) chore: remove stable.txt from release process" +- "[8775](https://github.com/kubernetes/ingress-nginx/pull/8775) Remove stable" +- "[8773](https://github.com/kubernetes/ingress-nginx/pull/8773) Bump github/codeql-action from 2.1.14 to 2.1.15" +- "[8772](https://github.com/kubernetes/ingress-nginx/pull/8772) Bump ossf/scorecard-action from 1.1.1 to 1.1.2" +- "[8771](https://github.com/kubernetes/ingress-nginx/pull/8771) fix bullet md format" +- "[8770](https://github.com/kubernetes/ingress-nginx/pull/8770) Add condition for monitoring.coreos.com/v1 API" +- "[8769](https://github.com/kubernetes/ingress-nginx/pull/8769) Fix typos and add links to developer guide" +- "[8767](https://github.com/kubernetes/ingress-nginx/pull/8767) change v1.2.0 to v1.2.1 in deploy doc URLs" +- "[8765](https://github.com/kubernetes/ingress-nginx/pull/8765) Bump github/codeql-action from 1.0.26 to 2.1.14" +- "[8752](https://github.com/kubernetes/ingress-nginx/pull/8752) Bump github.com/spf13/cobra from 1.4.0 to 1.5.0" +- "[8751](https://github.com/kubernetes/ingress-nginx/pull/8751) Bump github.com/stretchr/testify from 1.7.2 to 1.7.5" +- "[8750](https://github.com/kubernetes/ingress-nginx/pull/8750) added announcement" +- "[8740](https://github.com/kubernetes/ingress-nginx/pull/8740) change sha e2etestrunner and echoserver" +- "[8738](https://github.com/kubernetes/ingress-nginx/pull/8738) Update docs to make it easier for noobs to follow step by step" +- "[8737](https://github.com/kubernetes/ingress-nginx/pull/8737) updated baseimage sha" +- "[8736](https://github.com/kubernetes/ingress-nginx/pull/8736) set ld-musl-path" +- "[8733](https://github.com/kubernetes/ingress-nginx/pull/8733) feat: migrate leaderelection lock to leases" +- "[8726](https://github.com/kubernetes/ingress-nginx/pull/8726) prometheus metric: upstream_latency_seconds" +- "[8720](https://github.com/kubernetes/ingress-nginx/pull/8720) Ci pin deps" +- "[8719](https://github.com/kubernetes/ingress-nginx/pull/8719) Working OpenTelemetry sidecar (base nginx image)" +- "[8714](https://github.com/kubernetes/ingress-nginx/pull/8714) Create Openssf scorecard" +- "[8708](https://github.com/kubernetes/ingress-nginx/pull/8708) Bump github.com/prometheus/common from 0.34.0 to 0.35.0" +- "[8703](https://github.com/kubernetes/ingress-nginx/pull/8703) Bump actions/dependency-review-action from 1 to 2" +- "[8701](https://github.com/kubernetes/ingress-nginx/pull/8701) Fix several typos" +- "[8699](https://github.com/kubernetes/ingress-nginx/pull/8699) fix the gosec test and a make target for it" +- "[8698](https://github.com/kubernetes/ingress-nginx/pull/8698) Bump actions/upload-artifact from 2.3.1 to 3.1.0" +- "[8697](https://github.com/kubernetes/ingress-nginx/pull/8697) Bump actions/setup-go from 2.2.0 to 3.2.0" +- "[8695](https://github.com/kubernetes/ingress-nginx/pull/8695) Bump actions/download-artifact from 2 to 3" +- "[8694](https://github.com/kubernetes/ingress-nginx/pull/8694) Bump crazy-max/ghaction-docker-buildx from 1.6.2 to 3.3.1" + +### 4.1.2 + +- "[8587](https://github.com/kubernetes/ingress-nginx/pull/8587) Add CAP_SYS_CHROOT to DS/PSP when needed" +- "[8458](https://github.com/kubernetes/ingress-nginx/pull/8458) Add portNamePreffix Helm chart parameter" +- "[8522](https://github.com/kubernetes/ingress-nginx/pull/8522) Add documentation for controller.service.loadBalancerIP in Helm chart" + +### 4.1.0 + +- "[8481](https://github.com/kubernetes/ingress-nginx/pull/8481) Fix log creation in chroot script" +- "[8479](https://github.com/kubernetes/ingress-nginx/pull/8479) changed nginx base img tag to img built with alpine3.14.6" +- "[8478](https://github.com/kubernetes/ingress-nginx/pull/8478) update base images and protobuf gomod" +- "[8468](https://github.com/kubernetes/ingress-nginx/pull/8468) Fallback to ngx.var.scheme for redirectScheme with use-forward-headers when X-Forwarded-Proto is empty" +- "[8456](https://github.com/kubernetes/ingress-nginx/pull/8456) Implement object deep inspector" +- "[8455](https://github.com/kubernetes/ingress-nginx/pull/8455) Update dependencies" +- "[8454](https://github.com/kubernetes/ingress-nginx/pull/8454) Update index.md" +- "[8447](https://github.com/kubernetes/ingress-nginx/pull/8447) typo fixing" +- "[8446](https://github.com/kubernetes/ingress-nginx/pull/8446) Fix suggested annotation-value-word-blocklist" +- "[8444](https://github.com/kubernetes/ingress-nginx/pull/8444) replace deprecated topology key in example with current one" +- "[8443](https://github.com/kubernetes/ingress-nginx/pull/8443) Add dependency review enforcement" +- "[8434](https://github.com/kubernetes/ingress-nginx/pull/8434) added new auth-tls-match-cn annotation" +- "[8426](https://github.com/kubernetes/ingress-nginx/pull/8426) Bump github.com/prometheus/common from 0.32.1 to 0.33.0" + +### 4.0.18 + +- "[8291](https://github.com/kubernetes/ingress-nginx/pull/8291) remove git tag env from cloud build" +- "[8286](https://github.com/kubernetes/ingress-nginx/pull/8286) Fix OpenTelemetry sidecar image build" +- "[8277](https://github.com/kubernetes/ingress-nginx/pull/8277) Add OpenSSF Best practices badge" +- "[8273](https://github.com/kubernetes/ingress-nginx/pull/8273) Issue#8241" +- "[8267](https://github.com/kubernetes/ingress-nginx/pull/8267) Add fsGroup value to admission-webhooks/job-patch charts" +- "[8262](https://github.com/kubernetes/ingress-nginx/pull/8262) Updated confusing error" +- "[8256](https://github.com/kubernetes/ingress-nginx/pull/8256) fix: deny locations with invalid auth-url annotation" +- "[8253](https://github.com/kubernetes/ingress-nginx/pull/8253) Add a certificate info metric" +- "[8236](https://github.com/kubernetes/ingress-nginx/pull/8236) webhook: remove useless code." +- "[8227](https://github.com/kubernetes/ingress-nginx/pull/8227) Update libraries in webhook image" +- "[8225](https://github.com/kubernetes/ingress-nginx/pull/8225) fix inconsistent-label-cardinality for prometheus metrics: nginx_ingress_controller_requests" +- "[8221](https://github.com/kubernetes/ingress-nginx/pull/8221) Do not validate ingresses with unknown ingress class in admission webhook endpoint" +- "[8210](https://github.com/kubernetes/ingress-nginx/pull/8210) Bump github.com/prometheus/client_golang from 1.11.0 to 1.12.1" +- "[8209](https://github.com/kubernetes/ingress-nginx/pull/8209) Bump google.golang.org/grpc from 1.43.0 to 1.44.0" +- "[8204](https://github.com/kubernetes/ingress-nginx/pull/8204) Add Artifact Hub lint" +- "[8203](https://github.com/kubernetes/ingress-nginx/pull/8203) Fix Indentation of example and link to cert-manager tutorial" +- "[8201](https://github.com/kubernetes/ingress-nginx/pull/8201) feat(metrics): add path and method labels to requests countera" +- "[8199](https://github.com/kubernetes/ingress-nginx/pull/8199) use functional options to reduce number of methods creating an EchoDeployment" +- "[8196](https://github.com/kubernetes/ingress-nginx/pull/8196) docs: fix inconsistent controller annotation" +- "[8191](https://github.com/kubernetes/ingress-nginx/pull/8191) Using Go install for misspell" +- "[8186](https://github.com/kubernetes/ingress-nginx/pull/8186) prometheus+grafana using servicemonitor" +- "[8185](https://github.com/kubernetes/ingress-nginx/pull/8185) Append elements on match, instead of removing for cors-annotations" +- "[8179](https://github.com/kubernetes/ingress-nginx/pull/8179) Bump github.com/opencontainers/runc from 1.0.3 to 1.1.0" +- "[8173](https://github.com/kubernetes/ingress-nginx/pull/8173) Adding annotations to the controller service account" +- "[8163](https://github.com/kubernetes/ingress-nginx/pull/8163) Update the $req_id placeholder description" +- "[8162](https://github.com/kubernetes/ingress-nginx/pull/8162) Versioned static manifests" +- "[8159](https://github.com/kubernetes/ingress-nginx/pull/8159) Adding some geoip variables and default values" +- "[8155](https://github.com/kubernetes/ingress-nginx/pull/8155) #7271 feat: avoid-pdb-creation-when-default-backend-disabled-and-replicas-gt-1" +- "[8151](https://github.com/kubernetes/ingress-nginx/pull/8151) Automatically generate helm docs" +- "[8143](https://github.com/kubernetes/ingress-nginx/pull/8143) Allow to configure delay before controller exits" +- "[8136](https://github.com/kubernetes/ingress-nginx/pull/8136) add ingressClass option to helm chart - back compatibility with ingress.class annotations" +- "[8126](https://github.com/kubernetes/ingress-nginx/pull/8126) Example for JWT" + + +### 4.0.15 + +- [8120] https://github.com/kubernetes/ingress-nginx/pull/8120 Update go in runner and release v1.1.1 +- [8119] https://github.com/kubernetes/ingress-nginx/pull/8119 Update to go v1.17.6 +- [8118] https://github.com/kubernetes/ingress-nginx/pull/8118 Remove deprecated libraries, update other libs +- [8117] https://github.com/kubernetes/ingress-nginx/pull/8117 Fix codegen errors +- [8115] https://github.com/kubernetes/ingress-nginx/pull/8115 chart/ghaction: set the correct permission to have access to push a release +- [8098] https://github.com/kubernetes/ingress-nginx/pull/8098 generating SHA for CA only certs in backend_ssl.go + comparision of P… +- [8088] https://github.com/kubernetes/ingress-nginx/pull/8088 Fix Edit this page link to use main branch +- [8072] https://github.com/kubernetes/ingress-nginx/pull/8072 Expose GeoIP2 Continent code as variable +- [8061] https://github.com/kubernetes/ingress-nginx/pull/8061 docs(charts): using helm-docs for chart +- [8058] https://github.com/kubernetes/ingress-nginx/pull/8058 Bump github.com/spf13/cobra from 1.2.1 to 1.3.0 +- [8054] https://github.com/kubernetes/ingress-nginx/pull/8054 Bump google.golang.org/grpc from 1.41.0 to 1.43.0 +- [8051] https://github.com/kubernetes/ingress-nginx/pull/8051 align bug report with feature request regarding kind documentation +- [8046] https://github.com/kubernetes/ingress-nginx/pull/8046 Report expired certificates (#8045) +- [8044] https://github.com/kubernetes/ingress-nginx/pull/8044 remove G109 check till gosec resolves issues +- [8042] https://github.com/kubernetes/ingress-nginx/pull/8042 docs_multiple_instances_one_cluster_ticket_7543 +- [8041] https://github.com/kubernetes/ingress-nginx/pull/8041 docs: fix typo'd executible name +- [8035] https://github.com/kubernetes/ingress-nginx/pull/8035 Comment busy owners +- [8029] https://github.com/kubernetes/ingress-nginx/pull/8029 Add stream-snippet as a ConfigMap and Annotation option +- [8023] https://github.com/kubernetes/ingress-nginx/pull/8023 fix nginx compilation flags +- [8021] https://github.com/kubernetes/ingress-nginx/pull/8021 Disable default modsecurity_rules_file if modsecurity-snippet is specified +- [8019] https://github.com/kubernetes/ingress-nginx/pull/8019 Revise main documentation page +- [8018] https://github.com/kubernetes/ingress-nginx/pull/8018 Preserve order of plugin invocation +- [8015] https://github.com/kubernetes/ingress-nginx/pull/8015 Add newline indenting to admission webhook annotations +- [8014] https://github.com/kubernetes/ingress-nginx/pull/8014 Add link to example error page manifest in docs +- [8009] https://github.com/kubernetes/ingress-nginx/pull/8009 Fix spelling in documentation and top-level files +- [8008] https://github.com/kubernetes/ingress-nginx/pull/8008 Add relabelings in controller-servicemonitor.yaml +- [8003] https://github.com/kubernetes/ingress-nginx/pull/8003 Minor improvements (formatting, consistency) in install guide +- [8001] https://github.com/kubernetes/ingress-nginx/pull/8001 fix: go-grpc Dockerfile +- [7999] https://github.com/kubernetes/ingress-nginx/pull/7999 images: use k8s-staging-test-infra/gcb-docker-gcloud +- [7996] https://github.com/kubernetes/ingress-nginx/pull/7996 doc: improvement +- [7983] https://github.com/kubernetes/ingress-nginx/pull/7983 Fix a couple of misspellings in the annotations documentation. +- [7979] https://github.com/kubernetes/ingress-nginx/pull/7979 allow set annotations for admission Jobs +- [7977] https://github.com/kubernetes/ingress-nginx/pull/7977 Add ssl_reject_handshake to defaul server +- [7975] https://github.com/kubernetes/ingress-nginx/pull/7975 add legacy version update v0.50.0 to main changelog +- [7972] https://github.com/kubernetes/ingress-nginx/pull/7972 updated service upstream definition + +### 4.0.14 + +- [8061] https://github.com/kubernetes/ingress-nginx/pull/8061 Using helm-docs to populate values table in README.md + +### 4.0.13 + +- [8008] https://github.com/kubernetes/ingress-nginx/pull/8008 Add relabelings in controller-servicemonitor.yaml + +### 4.0.12 + +- [7978] https://github.com/kubernetes/ingress-nginx/pull/7979 Support custom annotations in admissions Jobs + +### 4.0.11 + +- [7873] https://github.com/kubernetes/ingress-nginx/pull/7873 Makes the [appProtocol](https://kubernetes.io/docs/concepts/services-networking/_print/#application-protocol) field optional. + +### 4.0.10 + +- [7964] https://github.com/kubernetes/ingress-nginx/pull/7964 Update controller version to v1.1.0 + +### 4.0.9 + +- [6992] https://github.com/kubernetes/ingress-nginx/pull/6992 Add ability to specify labels for all resources + +### 4.0.7 + +- [7923] https://github.com/kubernetes/ingress-nginx/pull/7923 Release v1.0.5 of ingress-nginx +- [7806] https://github.com/kubernetes/ingress-nginx/pull/7806 Choice option for internal/external loadbalancer type service + +### 4.0.6 + +- [7804] https://github.com/kubernetes/ingress-nginx/pull/7804 Release v1.0.4 of ingress-nginx +- [7651] https://github.com/kubernetes/ingress-nginx/pull/7651 Support ipFamilyPolicy and ipFamilies fields in Helm Chart +- [7798] https://github.com/kubernetes/ingress-nginx/pull/7798 Exoscale: use HTTP Healthcheck mode +- [7793] https://github.com/kubernetes/ingress-nginx/pull/7793 Update kube-webhook-certgen to v1.1.1 + +### 4.0.5 + +- [7740] https://github.com/kubernetes/ingress-nginx/pull/7740 Release v1.0.3 of ingress-nginx + +### 4.0.3 + +- [7707] https://github.com/kubernetes/ingress-nginx/pull/7707 Release v1.0.2 of ingress-nginx + +### 4.0.2 + +- [7681] https://github.com/kubernetes/ingress-nginx/pull/7681 Release v1.0.1 of ingress-nginx + +### 4.0.1 + +- [7535] https://github.com/kubernetes/ingress-nginx/pull/7535 Release v1.0.0 ingress-nginx + +### 3.34.0 + +- [7256] https://github.com/kubernetes/ingress-nginx/pull/7256 Add namespace field in the namespace scoped resource templates + +### 3.33.0 + +- [7164] https://github.com/kubernetes/ingress-nginx/pull/7164 Update nginx to v1.20.1 + +### 3.32.0 + +- [7117] https://github.com/kubernetes/ingress-nginx/pull/7117 Add annotations for HPA + +### 3.31.0 + +- [7137] https://github.com/kubernetes/ingress-nginx/pull/7137 Add support for custom probes + +### 3.30.0 + +- [#7092](https://github.com/kubernetes/ingress-nginx/pull/7092) Removes the possibility of using localhost in ExternalNames as endpoints + +### 3.29.0 + +- [X] [#6945](https://github.com/kubernetes/ingress-nginx/pull/7020) Add option to specify job label for ServiceMonitor + +### 3.28.0 + +- [ ] [#6900](https://github.com/kubernetes/ingress-nginx/pull/6900) Support existing PSPs + +### 3.27.0 + +- Update ingress-nginx v0.45.0 + +### 3.26.0 + +- [X] [#6979](https://github.com/kubernetes/ingress-nginx/pull/6979) Changed servicePort value for metrics + +### 3.25.0 + +- [X] [#6957](https://github.com/kubernetes/ingress-nginx/pull/6957) Add ability to specify automountServiceAccountToken + +### 3.24.0 + +- [X] [#6908](https://github.com/kubernetes/ingress-nginx/pull/6908) Add volumes to default-backend deployment + +### 3.23.0 + +- Update ingress-nginx v0.44.0 + +### 3.22.0 + +- [X] [#6802](https://github.com/kubernetes/ingress-nginx/pull/6802) Add value for configuring a custom Diffie-Hellman parameters file +- [X] [#6815](https://github.com/kubernetes/ingress-nginx/pull/6815) Allow use of numeric namespaces in helm chart + +### 3.21.0 + +- [X] [#6783](https://github.com/kubernetes/ingress-nginx/pull/6783) Add custom annotations to ScaledObject +- [X] [#6761](https://github.com/kubernetes/ingress-nginx/pull/6761) Adding quotes in the serviceAccount name in Helm values +- [X] [#6767](https://github.com/kubernetes/ingress-nginx/pull/6767) Remove ClusterRole when scope option is enabled +- [X] [#6785](https://github.com/kubernetes/ingress-nginx/pull/6785) Update kube-webhook-certgen image to v1.5.1 + +### 3.20.1 + +- Do not create KEDA in case of DaemonSets. +- Fix KEDA v2 definition + +### 3.20.0 + +- [X] [#6730](https://github.com/kubernetes/ingress-nginx/pull/6730) Do not create HPA for defaultBackend if not enabled. + +### 3.19.0 + +- Update ingress-nginx v0.43.0 + +### 3.18.0 + +- [X] [#6688](https://github.com/kubernetes/ingress-nginx/pull/6688) Allow volume-type emptyDir in controller podsecuritypolicy +- [X] [#6691](https://github.com/kubernetes/ingress-nginx/pull/6691) Improve parsing of helm parameters + +### 3.17.0 + +- Update ingress-nginx v0.42.0 + +### 3.16.1 + +- Fix chart-releaser action + +### 3.16.0 + +- [X] [#6646](https://github.com/kubernetes/ingress-nginx/pull/6646) Added LoadBalancerIP value for internal service + +### 3.15.1 + +- Fix chart-releaser action + +### 3.15.0 + +- [X] [#6586](https://github.com/kubernetes/ingress-nginx/pull/6586) Fix 'maxmindLicenseKey' location in values.yaml + +### 3.14.0 + +- [X] [#6469](https://github.com/kubernetes/ingress-nginx/pull/6469) Allow custom service names for controller and backend + +### 3.13.0 + +- [X] [#6544](https://github.com/kubernetes/ingress-nginx/pull/6544) Fix default backend HPA name variable + +### 3.12.0 + +- [X] [#6514](https://github.com/kubernetes/ingress-nginx/pull/6514) Remove helm2 support and update docs + +### 3.11.1 + +- [X] [#6505](https://github.com/kubernetes/ingress-nginx/pull/6505) Reorder HPA resource list to work with GitOps tooling + +### 3.11.0 + +- Support Keda Autoscaling + +### 3.10.1 + +- Fix regression introduced in 0.41.0 with external authentication + +### 3.10.0 + +- Fix routing regression introduced in 0.41.0 with PathType Exact + +### 3.9.0 + +- [X] [#6423](https://github.com/kubernetes/ingress-nginx/pull/6423) Add Default backend HPA autoscaling + +### 3.8.0 + +- [X] [#6395](https://github.com/kubernetes/ingress-nginx/pull/6395) Update jettech/kube-webhook-certgen image +- [X] [#6377](https://github.com/kubernetes/ingress-nginx/pull/6377) Added loadBalancerSourceRanges for internal lbs +- [X] [#6356](https://github.com/kubernetes/ingress-nginx/pull/6356) Add securitycontext settings on defaultbackend +- [X] [#6401](https://github.com/kubernetes/ingress-nginx/pull/6401) Fix controller service annotations +- [X] [#6403](https://github.com/kubernetes/ingress-nginx/pull/6403) Initial helm chart changelog + +### 3.7.1 + +- [X] [#6326](https://github.com/kubernetes/ingress-nginx/pull/6326) Fix liveness and readiness probe path in daemonset chart + +### 3.7.0 + +- [X] [#6316](https://github.com/kubernetes/ingress-nginx/pull/6316) Numerals in podAnnotations in quotes [#6315](https://github.com/kubernetes/ingress-nginx/issues/6315) + +### 3.6.0 + +- [X] [#6305](https://github.com/kubernetes/ingress-nginx/pull/6305) Add default linux nodeSelector + +### 3.5.1 + +- [X] [#6299](https://github.com/kubernetes/ingress-nginx/pull/6299) Fix helm chart release + +### 3.5.0 + +- [X] [#6260](https://github.com/kubernetes/ingress-nginx/pull/6260) Allow Helm Chart to customize admission webhook's annotations, timeoutSeconds, namespaceSelector, objectSelector and cert files locations + +### 3.4.0 + +- [X] [#6268](https://github.com/kubernetes/ingress-nginx/pull/6268) Update to 0.40.2 in helm chart #6288 + +### 3.3.1 + +- [X] [#6259](https://github.com/kubernetes/ingress-nginx/pull/6259) Release helm chart +- [X] [#6258](https://github.com/kubernetes/ingress-nginx/pull/6258) Fix chart markdown link +- [X] [#6253](https://github.com/kubernetes/ingress-nginx/pull/6253) Release v0.40.0 + +### 3.3.1 + +- [X] [#6233](https://github.com/kubernetes/ingress-nginx/pull/6233) Add admission controller e2e test + +### 3.3.0 + +- [X] [#6203](https://github.com/kubernetes/ingress-nginx/pull/6203) Refactor parsing of key values +- [X] [#6162](https://github.com/kubernetes/ingress-nginx/pull/6162) Add helm chart options to expose metrics service as NodePort +- [X] [#6180](https://github.com/kubernetes/ingress-nginx/pull/6180) Fix helm chart admissionReviewVersions regression +- [X] [#6169](https://github.com/kubernetes/ingress-nginx/pull/6169) Fix Typo in example prometheus rules + +### 3.0.0 + +- [X] [#6167](https://github.com/kubernetes/ingress-nginx/pull/6167) Update chart requirements + +### 2.16.0 + +- [X] [#6154](https://github.com/kubernetes/ingress-nginx/pull/6154) add `topologySpreadConstraint` to controller + +### 2.15.0 + +- [X] [#6087](https://github.com/kubernetes/ingress-nginx/pull/6087) Adding parameter for externalTrafficPolicy in internal controller service spec + +### 2.14.0 + +- [X] [#6104](https://github.com/kubernetes/ingress-nginx/pull/6104) Misc fixes for nginx-ingress chart for better keel and prometheus-operator integration + +### 2.13.0 + +- [X] [#6093](https://github.com/kubernetes/ingress-nginx/pull/6093) Release v0.35.0 + +### 2.13.0 + +- [X] [#6093](https://github.com/kubernetes/ingress-nginx/pull/6093) Release v0.35.0 +- [X] [#6080](https://github.com/kubernetes/ingress-nginx/pull/6080) Switch images to k8s.gcr.io after Vanity Domain Flip + +### 2.12.1 + +- [X] [#6075](https://github.com/kubernetes/ingress-nginx/pull/6075) Sync helm chart affinity examples + +### 2.12.0 + +- [X] [#6039](https://github.com/kubernetes/ingress-nginx/pull/6039) Add configurable serviceMonitor metricRelabelling and targetLabels +- [X] [#6044](https://github.com/kubernetes/ingress-nginx/pull/6044) Fix YAML linting + +### 2.11.3 + +- [X] [#6038](https://github.com/kubernetes/ingress-nginx/pull/6038) Bump chart version PATCH + +### 2.11.2 + +- [X] [#5951](https://github.com/kubernetes/ingress-nginx/pull/5951) Bump chart patch version + +### 2.11.1 + +- [X] [#5900](https://github.com/kubernetes/ingress-nginx/pull/5900) Release helm chart for v0.34.1 + +### 2.11.0 + +- [X] [#5879](https://github.com/kubernetes/ingress-nginx/pull/5879) Update helm chart for v0.34.0 +- [X] [#5671](https://github.com/kubernetes/ingress-nginx/pull/5671) Make liveness probe more fault tolerant than readiness probe + +### 2.10.0 + +- [X] [#5843](https://github.com/kubernetes/ingress-nginx/pull/5843) Update jettech/kube-webhook-certgen image + +### 2.9.1 + +- [X] [#5823](https://github.com/kubernetes/ingress-nginx/pull/5823) Add quoting to sysctls because numeric values need to be presented as strings (#5823) + +### 2.9.0 + +- [X] [#5795](https://github.com/kubernetes/ingress-nginx/pull/5795) Use fully qualified images to avoid cri-o issues + + +### TODO + +Keep building the changelog using *git log charts* checking the tag diff --git a/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/Chart.yaml b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/Chart.yaml new file mode 100644 index 0000000..55c0b54 --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/Chart.yaml @@ -0,0 +1,23 @@ +annotations: + artifacthub.io/changes: | + - "[8896](https://github.com/kubernetes/ingress-nginx/pull/8896) updated to new images built today" + - "fix permissions about configmap" + artifacthub.io/prerelease: "false" +apiVersion: v2 +appVersion: 1.3.1 +description: Ingress controller for Kubernetes using NGINX as a reverse proxy and + load balancer +home: https://github.com/kubernetes/ingress-nginx +icon: https://upload.wikimedia.org/wikipedia/commons/thumb/c/c5/Nginx_logo.svg/500px-Nginx_logo.svg.png +keywords: +- ingress +- nginx +kubeVersion: '>=1.20.0-0' +maintainers: +- name: rikatz +- name: strongjz +- name: tao12345666333 +name: ingress-nginx +sources: +- https://github.com/kubernetes/ingress-nginx +version: 4.2.5 diff --git a/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/OWNERS b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/OWNERS new file mode 100644 index 0000000..6b7e049 --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/OWNERS @@ -0,0 +1,10 @@ +# See the OWNERS docs: https://github.com/kubernetes/community/blob/master/contributors/guide/owners.md + +approvers: +- ingress-nginx-helm-maintainers + +reviewers: +- ingress-nginx-helm-reviewers + +labels: +- area/helm diff --git a/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/README.md b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/README.md new file mode 100644 index 0000000..4e6a696 --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/README.md @@ -0,0 +1,494 @@ +# ingress-nginx + +[ingress-nginx](https://github.com/kubernetes/ingress-nginx) Ingress controller for Kubernetes using NGINX as a reverse proxy and load balancer + +![Version: 4.2.5](https://img.shields.io/badge/Version-4.2.5-informational?style=flat-square) ![AppVersion: 1.3.1](https://img.shields.io/badge/AppVersion-1.3.1-informational?style=flat-square) + +To use, add `ingressClassName: nginx` spec field or the `kubernetes.io/ingress.class: nginx` annotation to your Ingress resources. + +This chart bootstraps an ingress-nginx deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +## Prerequisites + +- Chart version 3.x.x: Kubernetes v1.16+ +- Chart version 4.x.x and above: Kubernetes v1.19+ + +## Get Repo Info + +```console +helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx +helm repo update +``` + +## Install Chart + +**Important:** only helm3 is supported + +```console +helm install [RELEASE_NAME] ingress-nginx/ingress-nginx +``` + +The command deploys ingress-nginx on the Kubernetes cluster in the default configuration. + +_See [configuration](#configuration) below._ + +_See [helm install](https://helm.sh/docs/helm/helm_install/) for command documentation._ + +## Uninstall Chart + +```console +helm uninstall [RELEASE_NAME] +``` + +This removes all the Kubernetes components associated with the chart and deletes the release. + +_See [helm uninstall](https://helm.sh/docs/helm/helm_uninstall/) for command documentation._ + +## Upgrading Chart + +```console +helm upgrade [RELEASE_NAME] [CHART] --install +``` + +_See [helm upgrade](https://helm.sh/docs/helm/helm_upgrade/) for command documentation._ + +### Upgrading With Zero Downtime in Production + +By default the ingress-nginx controller has service interruptions whenever it's pods are restarted or redeployed. In order to fix that, see the excellent blog post by Lindsay Landry from Codecademy: [Kubernetes: Nginx and Zero Downtime in Production](https://medium.com/codecademy-engineering/kubernetes-nginx-and-zero-downtime-in-production-2c910c6a5ed8). + +### Migrating from stable/nginx-ingress + +There are two main ways to migrate a release from `stable/nginx-ingress` to `ingress-nginx/ingress-nginx` chart: + +1. For Nginx Ingress controllers used for non-critical services, the easiest method is to [uninstall](#uninstall-chart) the old release and [install](#install-chart) the new one +1. For critical services in production that require zero-downtime, you will want to: + 1. [Install](#install-chart) a second Ingress controller + 1. Redirect your DNS traffic from the old controller to the new controller + 1. Log traffic from both controllers during this changeover + 1. [Uninstall](#uninstall-chart) the old controller once traffic has fully drained from it + 1. For details on all of these steps see [Upgrading With Zero Downtime in Production](#upgrading-with-zero-downtime-in-production) + +Note that there are some different and upgraded configurations between the two charts, described by Rimas Mocevicius from JFrog in the "Upgrading to ingress-nginx Helm chart" section of [Migrating from Helm chart nginx-ingress to ingress-nginx](https://rimusz.net/migrating-to-ingress-nginx). As the `ingress-nginx/ingress-nginx` chart continues to update, you will want to check current differences by running [helm configuration](#configuration) commands on both charts. + +## Configuration + +See [Customizing the Chart Before Installing](https://helm.sh/docs/intro/using_helm/#customizing-the-chart-before-installing). To see all configurable options with detailed comments, visit the chart's [values.yaml](./values.yaml), or run these configuration commands: + +```console +helm show values ingress-nginx/ingress-nginx +``` + +### PodDisruptionBudget + +Note that the PodDisruptionBudget resource will only be defined if the replicaCount is greater than one, +else it would make it impossible to evacuate a node. See [gh issue #7127](https://github.com/helm/charts/issues/7127) for more info. + +### Prometheus Metrics + +The Nginx ingress controller can export Prometheus metrics, by setting `controller.metrics.enabled` to `true`. + +You can add Prometheus annotations to the metrics service using `controller.metrics.service.annotations`. +Alternatively, if you use the Prometheus Operator, you can enable ServiceMonitor creation using `controller.metrics.serviceMonitor.enabled`. And set `controller.metrics.serviceMonitor.additionalLabels.release="prometheus"`. "release=prometheus" should match the label configured in the prometheus servicemonitor ( see `kubectl get servicemonitor prometheus-kube-prom-prometheus -oyaml -n prometheus`) + +### ingress-nginx nginx\_status page/stats server + +Previous versions of this chart had a `controller.stats.*` configuration block, which is now obsolete due to the following changes in nginx ingress controller: + +- In [0.16.1](https://github.com/kubernetes/ingress-nginx/blob/main/Changelog.md#0161), the vts (virtual host traffic status) dashboard was removed +- In [0.23.0](https://github.com/kubernetes/ingress-nginx/blob/main/Changelog.md#0230), the status page at port 18080 is now a unix socket webserver only available at localhost. + You can use `curl --unix-socket /tmp/nginx-status-server.sock http://localhost/nginx_status` inside the controller container to access it locally, or use the snippet from [nginx-ingress changelog](https://github.com/kubernetes/ingress-nginx/blob/main/Changelog.md#0230) to re-enable the http server + +### ExternalDNS Service Configuration + +Add an [ExternalDNS](https://github.com/kubernetes-incubator/external-dns) annotation to the LoadBalancer service: + +```yaml +controller: + service: + annotations: + external-dns.alpha.kubernetes.io/hostname: kubernetes-example.com. +``` + +### AWS L7 ELB with SSL Termination + +Annotate the controller as shown in the [nginx-ingress l7 patch](https://github.com/kubernetes/ingress-nginx/blob/ab3a789caae65eec4ad6e3b46b19750b481b6bce/deploy/aws/l7/service-l7.yaml): + +```yaml +controller: + service: + targetPorts: + http: http + https: http + annotations: + service.beta.kubernetes.io/aws-load-balancer-ssl-cert: arn:aws:acm:XX-XXXX-X:XXXXXXXXX:certificate/XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX + service.beta.kubernetes.io/aws-load-balancer-backend-protocol: "http" + service.beta.kubernetes.io/aws-load-balancer-ssl-ports: "https" + service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout: '3600' +``` + +### AWS route53-mapper + +To configure the LoadBalancer service with the [route53-mapper addon](https://github.com/kubernetes/kops/blob/be63d4f1a7a46daaf1c4c482527328236850f111/addons/route53-mapper/README.md), add the `domainName` annotation and `dns` label: + +```yaml +controller: + service: + labels: + dns: "route53" + annotations: + domainName: "kubernetes-example.com" +``` + +### Additional Internal Load Balancer + +This setup is useful when you need both external and internal load balancers but don't want to have multiple ingress controllers and multiple ingress objects per application. + +By default, the ingress object will point to the external load balancer address, but if correctly configured, you can make use of the internal one if the URL you are looking up resolves to the internal load balancer's URL. + +You'll need to set both the following values: + +`controller.service.internal.enabled` +`controller.service.internal.annotations` + +If one of them is missing the internal load balancer will not be deployed. Example you may have `controller.service.internal.enabled=true` but no annotations set, in this case no action will be taken. + +`controller.service.internal.annotations` varies with the cloud service you're using. + +Example for AWS: + +```yaml +controller: + service: + internal: + enabled: true + annotations: + # Create internal ELB + service.beta.kubernetes.io/aws-load-balancer-internal: "true" + # Any other annotation can be declared here. +``` + +Example for GCE: + +```yaml +controller: + service: + internal: + enabled: true + annotations: + # Create internal LB. More informations: https://cloud.google.com/kubernetes-engine/docs/how-to/internal-load-balancing + # For GKE versions 1.17 and later + networking.gke.io/load-balancer-type: "Internal" + # For earlier versions + # cloud.google.com/load-balancer-type: "Internal" + + # Any other annotation can be declared here. +``` + +Example for Azure: + +```yaml +controller: + service: + annotations: + # Create internal LB + service.beta.kubernetes.io/azure-load-balancer-internal: "true" + # Any other annotation can be declared here. +``` + +Example for Oracle Cloud Infrastructure: + +```yaml +controller: + service: + annotations: + # Create internal LB + service.beta.kubernetes.io/oci-load-balancer-internal: "true" + # Any other annotation can be declared here. +``` + +An use case for this scenario is having a split-view DNS setup where the public zone CNAME records point to the external balancer URL while the private zone CNAME records point to the internal balancer URL. This way, you only need one ingress kubernetes object. + +Optionally you can set `controller.service.loadBalancerIP` if you need a static IP for the resulting `LoadBalancer`. + +### Ingress Admission Webhooks + +With nginx-ingress-controller version 0.25+, the nginx ingress controller pod exposes an endpoint that will integrate with the `validatingwebhookconfiguration` Kubernetes feature to prevent bad ingress from being added to the cluster. +**This feature is enabled by default since 0.31.0.** + +With nginx-ingress-controller in 0.25.* work only with kubernetes 1.14+, 0.26 fix [this issue](https://github.com/kubernetes/ingress-nginx/pull/4521) + +### Helm Error When Upgrading: spec.clusterIP: Invalid value: "" + +If you are upgrading this chart from a version between 0.31.0 and 1.2.2 then you may get an error like this: + +```console +Error: UPGRADE FAILED: Service "?????-controller" is invalid: spec.clusterIP: Invalid value: "": field is immutable +``` + +Detail of how and why are in [this issue](https://github.com/helm/charts/pull/13646) but to resolve this you can set `xxxx.service.omitClusterIP` to `true` where `xxxx` is the service referenced in the error. + +As of version `1.26.0` of this chart, by simply not providing any clusterIP value, `invalid: spec.clusterIP: Invalid value: "": field is immutable` will no longer occur since `clusterIP: ""` will not be rendered. + +## Requirements + +Kubernetes: `>=1.20.0-0` + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| commonLabels | object | `{}` | | +| controller.addHeaders | object | `{}` | Will add custom headers before sending response traffic to the client according to: https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/#add-headers | +| controller.admissionWebhooks.annotations | object | `{}` | | +| controller.admissionWebhooks.certificate | string | `"/usr/local/certificates/cert"` | | +| controller.admissionWebhooks.createSecretJob.resources | object | `{}` | | +| controller.admissionWebhooks.enabled | bool | `true` | | +| controller.admissionWebhooks.existingPsp | string | `""` | Use an existing PSP instead of creating one | +| controller.admissionWebhooks.extraEnvs | list | `[]` | Additional environment variables to set | +| controller.admissionWebhooks.failurePolicy | string | `"Fail"` | Admission Webhook failure policy to use | +| controller.admissionWebhooks.key | string | `"/usr/local/certificates/key"` | | +| controller.admissionWebhooks.labels | object | `{}` | Labels to be added to admission webhooks | +| controller.admissionWebhooks.namespaceSelector | object | `{}` | | +| controller.admissionWebhooks.networkPolicyEnabled | bool | `false` | | +| controller.admissionWebhooks.objectSelector | object | `{}` | | +| controller.admissionWebhooks.patch.enabled | bool | `true` | | +| controller.admissionWebhooks.patch.image.digest | string | `"sha256:549e71a6ca248c5abd51cdb73dbc3083df62cf92ed5e6147c780e30f7e007a47"` | | +| controller.admissionWebhooks.patch.image.image | string | `"ingress-nginx/kube-webhook-certgen"` | | +| controller.admissionWebhooks.patch.image.pullPolicy | string | `"IfNotPresent"` | | +| controller.admissionWebhooks.patch.image.registry | string | `"registry.k8s.io"` | | +| controller.admissionWebhooks.patch.image.tag | string | `"v1.3.0"` | | +| controller.admissionWebhooks.patch.labels | object | `{}` | Labels to be added to patch job resources | +| controller.admissionWebhooks.patch.nodeSelector."kubernetes.io/os" | string | `"linux"` | | +| controller.admissionWebhooks.patch.podAnnotations | object | `{}` | | +| controller.admissionWebhooks.patch.priorityClassName | string | `""` | Provide a priority class name to the webhook patching job # | +| controller.admissionWebhooks.patch.securityContext.fsGroup | int | `2000` | | +| controller.admissionWebhooks.patch.securityContext.runAsNonRoot | bool | `true` | | +| controller.admissionWebhooks.patch.securityContext.runAsUser | int | `2000` | | +| controller.admissionWebhooks.patch.tolerations | list | `[]` | | +| controller.admissionWebhooks.patchWebhookJob.resources | object | `{}` | | +| controller.admissionWebhooks.port | int | `8443` | | +| controller.admissionWebhooks.service.annotations | object | `{}` | | +| controller.admissionWebhooks.service.externalIPs | list | `[]` | | +| controller.admissionWebhooks.service.loadBalancerSourceRanges | list | `[]` | | +| controller.admissionWebhooks.service.servicePort | int | `443` | | +| controller.admissionWebhooks.service.type | string | `"ClusterIP"` | | +| controller.affinity | object | `{}` | Affinity and anti-affinity rules for server scheduling to nodes # Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity # | +| controller.allowSnippetAnnotations | bool | `true` | This configuration defines if Ingress Controller should allow users to set their own *-snippet annotations, otherwise this is forbidden / dropped when users add those annotations. Global snippets in ConfigMap are still respected | +| controller.annotations | object | `{}` | Annotations to be added to the controller Deployment or DaemonSet # | +| controller.autoscaling.behavior | object | `{}` | | +| controller.autoscaling.enabled | bool | `false` | | +| controller.autoscaling.maxReplicas | int | `11` | | +| controller.autoscaling.minReplicas | int | `1` | | +| controller.autoscaling.targetCPUUtilizationPercentage | int | `50` | | +| controller.autoscaling.targetMemoryUtilizationPercentage | int | `50` | | +| controller.autoscalingTemplate | list | `[]` | | +| controller.config | object | `{}` | Will add custom configuration options to Nginx https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/ | +| controller.configAnnotations | object | `{}` | Annotations to be added to the controller config configuration configmap. | +| controller.configMapNamespace | string | `""` | Allows customization of the configmap / nginx-configmap namespace; defaults to $(POD_NAMESPACE) | +| controller.containerName | string | `"controller"` | Configures the controller container name | +| controller.containerPort | object | `{"http":80,"https":443}` | Configures the ports that the nginx-controller listens on | +| controller.customTemplate.configMapKey | string | `""` | | +| controller.customTemplate.configMapName | string | `""` | | +| controller.dnsConfig | object | `{}` | Optionally customize the pod dnsConfig. | +| controller.dnsPolicy | string | `"ClusterFirst"` | Optionally change this to ClusterFirstWithHostNet in case you have 'hostNetwork: true'. By default, while using host network, name resolution uses the host's DNS. If you wish nginx-controller to keep resolving names inside the k8s network, use ClusterFirstWithHostNet. | +| controller.electionID | string | `"ingress-controller-leader"` | Election ID to use for status update | +| controller.enableMimalloc | bool | `true` | Enable mimalloc as a drop-in replacement for malloc. # ref: https://github.com/microsoft/mimalloc # | +| controller.existingPsp | string | `""` | Use an existing PSP instead of creating one | +| controller.extraArgs | object | `{}` | Additional command line arguments to pass to nginx-ingress-controller E.g. to specify the default SSL certificate you can use | +| controller.extraContainers | list | `[]` | Additional containers to be added to the controller pod. See https://github.com/lemonldap-ng-controller/lemonldap-ng-controller as example. | +| controller.extraEnvs | list | `[]` | Additional environment variables to set | +| controller.extraInitContainers | list | `[]` | Containers, which are run before the app containers are started. | +| controller.extraModules | list | `[]` | | +| controller.extraVolumeMounts | list | `[]` | Additional volumeMounts to the controller main container. | +| controller.extraVolumes | list | `[]` | Additional volumes to the controller pod. | +| controller.healthCheckHost | string | `""` | Address to bind the health check endpoint. It is better to set this option to the internal node address if the ingress nginx controller is running in the `hostNetwork: true` mode. | +| controller.healthCheckPath | string | `"/healthz"` | Path of the health check endpoint. All requests received on the port defined by the healthz-port parameter are forwarded internally to this path. | +| controller.hostNetwork | bool | `false` | Required for use with CNI based kubernetes installations (such as ones set up by kubeadm), since CNI and hostport don't mix yet. Can be deprecated once https://github.com/kubernetes/kubernetes/issues/23920 is merged | +| controller.hostPort.enabled | bool | `false` | Enable 'hostPort' or not | +| controller.hostPort.ports.http | int | `80` | 'hostPort' http port | +| controller.hostPort.ports.https | int | `443` | 'hostPort' https port | +| controller.hostname | object | `{}` | Optionally customize the pod hostname. | +| controller.image.allowPrivilegeEscalation | bool | `true` | | +| controller.image.chroot | bool | `false` | | +| controller.image.digest | string | `"sha256:54f7fe2c6c5a9db9a0ebf1131797109bb7a4d91f56b9b362bde2abd237dd1974"` | | +| controller.image.digestChroot | string | `"sha256:a8466b19c621bd550b1645e27a004a5cc85009c858a9ab19490216735ac432b1"` | | +| controller.image.image | string | `"ingress-nginx/controller"` | | +| controller.image.pullPolicy | string | `"IfNotPresent"` | | +| controller.image.registry | string | `"registry.k8s.io"` | | +| controller.image.runAsUser | int | `101` | | +| controller.image.tag | string | `"v1.3.1"` | | +| controller.ingressClass | string | `"nginx"` | For backwards compatibility with ingress.class annotation, use ingressClass. Algorithm is as follows, first ingressClassName is considered, if not present, controller looks for ingress.class annotation | +| controller.ingressClassByName | bool | `false` | Process IngressClass per name (additionally as per spec.controller). | +| controller.ingressClassResource.controllerValue | string | `"k8s.io/ingress-nginx"` | Controller-value of the controller that is processing this ingressClass | +| controller.ingressClassResource.default | bool | `false` | Is this the default ingressClass for the cluster | +| controller.ingressClassResource.enabled | bool | `true` | Is this ingressClass enabled or not | +| controller.ingressClassResource.name | string | `"nginx"` | Name of the ingressClass | +| controller.ingressClassResource.parameters | object | `{}` | Parameters is a link to a custom resource containing additional configuration for the controller. This is optional if the controller does not require extra parameters. | +| controller.keda.apiVersion | string | `"keda.sh/v1alpha1"` | | +| controller.keda.behavior | object | `{}` | | +| controller.keda.cooldownPeriod | int | `300` | | +| controller.keda.enabled | bool | `false` | | +| controller.keda.maxReplicas | int | `11` | | +| controller.keda.minReplicas | int | `1` | | +| controller.keda.pollingInterval | int | `30` | | +| controller.keda.restoreToOriginalReplicaCount | bool | `false` | | +| controller.keda.scaledObject.annotations | object | `{}` | | +| controller.keda.triggers | list | `[]` | | +| controller.kind | string | `"Deployment"` | Use a `DaemonSet` or `Deployment` | +| controller.labels | object | `{}` | Labels to be added to the controller Deployment or DaemonSet and other resources that do not have option to specify labels # | +| controller.lifecycle | object | `{"preStop":{"exec":{"command":["/wait-shutdown"]}}}` | Improve connection draining when ingress controller pod is deleted using a lifecycle hook: With this new hook, we increased the default terminationGracePeriodSeconds from 30 seconds to 300, allowing the draining of connections up to five minutes. If the active connections end before that, the pod will terminate gracefully at that time. To effectively take advantage of this feature, the Configmap feature worker-shutdown-timeout new value is 240s instead of 10s. # | +| controller.livenessProbe.failureThreshold | int | `5` | | +| controller.livenessProbe.httpGet.path | string | `"/healthz"` | | +| controller.livenessProbe.httpGet.port | int | `10254` | | +| controller.livenessProbe.httpGet.scheme | string | `"HTTP"` | | +| controller.livenessProbe.initialDelaySeconds | int | `10` | | +| controller.livenessProbe.periodSeconds | int | `10` | | +| controller.livenessProbe.successThreshold | int | `1` | | +| controller.livenessProbe.timeoutSeconds | int | `1` | | +| controller.maxmindLicenseKey | string | `""` | Maxmind license key to download GeoLite2 Databases. # https://blog.maxmind.com/2019/12/18/significant-changes-to-accessing-and-using-geolite2-databases | +| controller.metrics.enabled | bool | `false` | | +| controller.metrics.port | int | `10254` | | +| controller.metrics.prometheusRule.additionalLabels | object | `{}` | | +| controller.metrics.prometheusRule.enabled | bool | `false` | | +| controller.metrics.prometheusRule.rules | list | `[]` | | +| controller.metrics.service.annotations | object | `{}` | | +| controller.metrics.service.externalIPs | list | `[]` | List of IP addresses at which the stats-exporter service is available # Ref: https://kubernetes.io/docs/user-guide/services/#external-ips # | +| controller.metrics.service.loadBalancerSourceRanges | list | `[]` | | +| controller.metrics.service.servicePort | int | `10254` | | +| controller.metrics.service.type | string | `"ClusterIP"` | | +| controller.metrics.serviceMonitor.additionalLabels | object | `{}` | | +| controller.metrics.serviceMonitor.enabled | bool | `false` | | +| controller.metrics.serviceMonitor.metricRelabelings | list | `[]` | | +| controller.metrics.serviceMonitor.namespace | string | `""` | | +| controller.metrics.serviceMonitor.namespaceSelector | object | `{}` | | +| controller.metrics.serviceMonitor.relabelings | list | `[]` | | +| controller.metrics.serviceMonitor.scrapeInterval | string | `"30s"` | | +| controller.metrics.serviceMonitor.targetLabels | list | `[]` | | +| controller.minAvailable | int | `1` | | +| controller.minReadySeconds | int | `0` | `minReadySeconds` to avoid killing pods before we are ready # | +| controller.name | string | `"controller"` | | +| controller.nodeSelector | object | `{"kubernetes.io/os":"linux"}` | Node labels for controller pod assignment # Ref: https://kubernetes.io/docs/user-guide/node-selection/ # | +| controller.podAnnotations | object | `{}` | Annotations to be added to controller pods # | +| controller.podLabels | object | `{}` | Labels to add to the pod container metadata | +| controller.podSecurityContext | object | `{}` | Security Context policies for controller pods | +| controller.priorityClassName | string | `""` | | +| controller.proxySetHeaders | object | `{}` | Will add custom headers before sending traffic to backends according to https://github.com/kubernetes/ingress-nginx/tree/main/docs/examples/customization/custom-headers | +| controller.publishService | object | `{"enabled":true,"pathOverride":""}` | Allows customization of the source of the IP address or FQDN to report in the ingress status field. By default, it reads the information provided by the service. If disable, the status field reports the IP address of the node or nodes where an ingress controller pod is running. | +| controller.publishService.enabled | bool | `true` | Enable 'publishService' or not | +| controller.publishService.pathOverride | string | `""` | Allows overriding of the publish service to bind to Must be / | +| controller.readinessProbe.failureThreshold | int | `3` | | +| controller.readinessProbe.httpGet.path | string | `"/healthz"` | | +| controller.readinessProbe.httpGet.port | int | `10254` | | +| controller.readinessProbe.httpGet.scheme | string | `"HTTP"` | | +| controller.readinessProbe.initialDelaySeconds | int | `10` | | +| controller.readinessProbe.periodSeconds | int | `10` | | +| controller.readinessProbe.successThreshold | int | `1` | | +| controller.readinessProbe.timeoutSeconds | int | `1` | | +| controller.replicaCount | int | `1` | | +| controller.reportNodeInternalIp | bool | `false` | Bare-metal considerations via the host network https://kubernetes.github.io/ingress-nginx/deploy/baremetal/#via-the-host-network Ingress status was blank because there is no Service exposing the NGINX Ingress controller in a configuration using the host network, the default --publish-service flag used in standard cloud setups does not apply | +| controller.resources.requests.cpu | string | `"100m"` | | +| controller.resources.requests.memory | string | `"90Mi"` | | +| controller.scope.enabled | bool | `false` | Enable 'scope' or not | +| controller.scope.namespace | string | `""` | Namespace to limit the controller to; defaults to $(POD_NAMESPACE) | +| controller.scope.namespaceSelector | string | `""` | When scope.enabled == false, instead of watching all namespaces, we watching namespaces whose labels only match with namespaceSelector. Format like foo=bar. Defaults to empty, means watching all namespaces. | +| controller.service.annotations | object | `{}` | | +| controller.service.appProtocol | bool | `true` | If enabled is adding an appProtocol option for Kubernetes service. An appProtocol field replacing annotations that were using for setting a backend protocol. Here is an example for AWS: service.beta.kubernetes.io/aws-load-balancer-backend-protocol: http It allows choosing the protocol for each backend specified in the Kubernetes service. See the following GitHub issue for more details about the purpose: https://github.com/kubernetes/kubernetes/issues/40244 Will be ignored for Kubernetes versions older than 1.20 # | +| controller.service.enableHttp | bool | `true` | | +| controller.service.enableHttps | bool | `true` | | +| controller.service.enabled | bool | `true` | | +| controller.service.external.enabled | bool | `true` | | +| controller.service.externalIPs | list | `[]` | List of IP addresses at which the controller services are available # Ref: https://kubernetes.io/docs/user-guide/services/#external-ips # | +| controller.service.internal.annotations | object | `{}` | Annotations are mandatory for the load balancer to come up. Varies with the cloud service. | +| controller.service.internal.enabled | bool | `false` | Enables an additional internal load balancer (besides the external one). | +| controller.service.internal.loadBalancerSourceRanges | list | `[]` | Restrict access For LoadBalancer service. Defaults to 0.0.0.0/0. | +| controller.service.ipFamilies | list | `["IPv4"]` | List of IP families (e.g. IPv4, IPv6) assigned to the service. This field is usually assigned automatically based on cluster configuration and the ipFamilyPolicy field. # Ref: https://kubernetes.io/docs/concepts/services-networking/dual-stack/ | +| controller.service.ipFamilyPolicy | string | `"SingleStack"` | Represents the dual-stack-ness requested or required by this Service. Possible values are SingleStack, PreferDualStack or RequireDualStack. The ipFamilies and clusterIPs fields depend on the value of this field. # Ref: https://kubernetes.io/docs/concepts/services-networking/dual-stack/ | +| controller.service.labels | object | `{}` | | +| controller.service.loadBalancerIP | string | `""` | Used by cloud providers to connect the resulting `LoadBalancer` to a pre-existing static IP according to https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer | +| controller.service.loadBalancerSourceRanges | list | `[]` | | +| controller.service.nodePorts.http | string | `""` | | +| controller.service.nodePorts.https | string | `""` | | +| controller.service.nodePorts.tcp | object | `{}` | | +| controller.service.nodePorts.udp | object | `{}` | | +| controller.service.ports.http | int | `80` | | +| controller.service.ports.https | int | `443` | | +| controller.service.targetPorts.http | string | `"http"` | | +| controller.service.targetPorts.https | string | `"https"` | | +| controller.service.type | string | `"LoadBalancer"` | | +| controller.shareProcessNamespace | bool | `false` | | +| controller.sysctls | object | `{}` | See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for notes on enabling and using sysctls | +| controller.tcp.annotations | object | `{}` | Annotations to be added to the tcp config configmap | +| controller.tcp.configMapNamespace | string | `""` | Allows customization of the tcp-services-configmap; defaults to $(POD_NAMESPACE) | +| controller.terminationGracePeriodSeconds | int | `300` | `terminationGracePeriodSeconds` to avoid killing pods before we are ready # wait up to five minutes for the drain of connections # | +| controller.tolerations | list | `[]` | Node tolerations for server scheduling to nodes with taints # Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ # | +| controller.topologySpreadConstraints | list | `[]` | Topology spread constraints rely on node labels to identify the topology domain(s) that each Node is in. # Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ # | +| controller.udp.annotations | object | `{}` | Annotations to be added to the udp config configmap | +| controller.udp.configMapNamespace | string | `""` | Allows customization of the udp-services-configmap; defaults to $(POD_NAMESPACE) | +| controller.updateStrategy | object | `{}` | The update strategy to apply to the Deployment or DaemonSet # | +| controller.watchIngressWithoutClass | bool | `false` | Process Ingress objects without ingressClass annotation/ingressClassName field Overrides value for --watch-ingress-without-class flag of the controller binary Defaults to false | +| defaultBackend.affinity | object | `{}` | | +| defaultBackend.autoscaling.annotations | object | `{}` | | +| defaultBackend.autoscaling.enabled | bool | `false` | | +| defaultBackend.autoscaling.maxReplicas | int | `2` | | +| defaultBackend.autoscaling.minReplicas | int | `1` | | +| defaultBackend.autoscaling.targetCPUUtilizationPercentage | int | `50` | | +| defaultBackend.autoscaling.targetMemoryUtilizationPercentage | int | `50` | | +| defaultBackend.containerSecurityContext | object | `{}` | Security Context policies for controller main container. See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for notes on enabling and using sysctls # | +| defaultBackend.enabled | bool | `false` | | +| defaultBackend.existingPsp | string | `""` | Use an existing PSP instead of creating one | +| defaultBackend.extraArgs | object | `{}` | | +| defaultBackend.extraEnvs | list | `[]` | Additional environment variables to set for defaultBackend pods | +| defaultBackend.extraVolumeMounts | list | `[]` | | +| defaultBackend.extraVolumes | list | `[]` | | +| defaultBackend.image.allowPrivilegeEscalation | bool | `false` | | +| defaultBackend.image.image | string | `"defaultbackend-amd64"` | | +| defaultBackend.image.pullPolicy | string | `"IfNotPresent"` | | +| defaultBackend.image.readOnlyRootFilesystem | bool | `true` | | +| defaultBackend.image.registry | string | `"registry.k8s.io"` | | +| defaultBackend.image.runAsNonRoot | bool | `true` | | +| defaultBackend.image.runAsUser | int | `65534` | | +| defaultBackend.image.tag | string | `"1.5"` | | +| defaultBackend.labels | object | `{}` | Labels to be added to the default backend resources | +| defaultBackend.livenessProbe.failureThreshold | int | `3` | | +| defaultBackend.livenessProbe.initialDelaySeconds | int | `30` | | +| defaultBackend.livenessProbe.periodSeconds | int | `10` | | +| defaultBackend.livenessProbe.successThreshold | int | `1` | | +| defaultBackend.livenessProbe.timeoutSeconds | int | `5` | | +| defaultBackend.minAvailable | int | `1` | | +| defaultBackend.name | string | `"defaultbackend"` | | +| defaultBackend.nodeSelector | object | `{"kubernetes.io/os":"linux"}` | Node labels for default backend pod assignment # Ref: https://kubernetes.io/docs/user-guide/node-selection/ # | +| defaultBackend.podAnnotations | object | `{}` | Annotations to be added to default backend pods # | +| defaultBackend.podLabels | object | `{}` | Labels to add to the pod container metadata | +| defaultBackend.podSecurityContext | object | `{}` | Security Context policies for controller pods See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for notes on enabling and using sysctls # | +| defaultBackend.port | int | `8080` | | +| defaultBackend.priorityClassName | string | `""` | | +| defaultBackend.readinessProbe.failureThreshold | int | `6` | | +| defaultBackend.readinessProbe.initialDelaySeconds | int | `0` | | +| defaultBackend.readinessProbe.periodSeconds | int | `5` | | +| defaultBackend.readinessProbe.successThreshold | int | `1` | | +| defaultBackend.readinessProbe.timeoutSeconds | int | `5` | | +| defaultBackend.replicaCount | int | `1` | | +| defaultBackend.resources | object | `{}` | | +| defaultBackend.service.annotations | object | `{}` | | +| defaultBackend.service.externalIPs | list | `[]` | List of IP addresses at which the default backend service is available # Ref: https://kubernetes.io/docs/user-guide/services/#external-ips # | +| defaultBackend.service.loadBalancerSourceRanges | list | `[]` | | +| defaultBackend.service.servicePort | int | `80` | | +| defaultBackend.service.type | string | `"ClusterIP"` | | +| defaultBackend.serviceAccount.automountServiceAccountToken | bool | `true` | | +| defaultBackend.serviceAccount.create | bool | `true` | | +| defaultBackend.serviceAccount.name | string | `""` | | +| defaultBackend.tolerations | list | `[]` | Node tolerations for server scheduling to nodes with taints # Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ # | +| dhParam | string | `nil` | A base64-encoded Diffie-Hellman parameter. This can be generated with: `openssl dhparam 4096 2> /dev/null | base64` # Ref: https://github.com/kubernetes/ingress-nginx/tree/main/docs/examples/customization/ssl-dh-param | +| imagePullSecrets | list | `[]` | Optional array of imagePullSecrets containing private registry credentials # Ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ | +| podSecurityPolicy.enabled | bool | `false` | | +| portNamePrefix | string | `""` | Prefix for TCP and UDP ports names in ingress controller service # Some cloud providers, like Yandex Cloud may have a requirements for a port name regex to support cloud load balancer integration | +| rbac.create | bool | `true` | | +| rbac.scope | bool | `false` | | +| revisionHistoryLimit | int | `10` | Rollback limit # | +| serviceAccount.annotations | object | `{}` | Annotations for the controller service account | +| serviceAccount.automountServiceAccountToken | bool | `true` | | +| serviceAccount.create | bool | `true` | | +| serviceAccount.name | string | `""` | | +| tcp | object | `{}` | TCP service key-value pairs # Ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/exposing-tcp-udp-services.md # | +| udp | object | `{}` | UDP service key-value pairs # Ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/exposing-tcp-udp-services.md # | + diff --git a/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/README.md.gotmpl b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/README.md.gotmpl new file mode 100644 index 0000000..8959961 --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/README.md.gotmpl @@ -0,0 +1,235 @@ +{{ template "chart.header" . }} +[ingress-nginx](https://github.com/kubernetes/ingress-nginx) Ingress controller for Kubernetes using NGINX as a reverse proxy and load balancer + +{{ template "chart.versionBadge" . }}{{ template "chart.typeBadge" . }}{{ template "chart.appVersionBadge" . }} + +To use, add `ingressClassName: nginx` spec field or the `kubernetes.io/ingress.class: nginx` annotation to your Ingress resources. + +This chart bootstraps an ingress-nginx deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +## Prerequisites + +- Chart version 3.x.x: Kubernetes v1.16+ +- Chart version 4.x.x and above: Kubernetes v1.19+ + +## Get Repo Info + +```console +helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx +helm repo update +``` + +## Install Chart + +**Important:** only helm3 is supported + +```console +helm install [RELEASE_NAME] ingress-nginx/ingress-nginx +``` + +The command deploys ingress-nginx on the Kubernetes cluster in the default configuration. + +_See [configuration](#configuration) below._ + +_See [helm install](https://helm.sh/docs/helm/helm_install/) for command documentation._ + +## Uninstall Chart + +```console +helm uninstall [RELEASE_NAME] +``` + +This removes all the Kubernetes components associated with the chart and deletes the release. + +_See [helm uninstall](https://helm.sh/docs/helm/helm_uninstall/) for command documentation._ + +## Upgrading Chart + +```console +helm upgrade [RELEASE_NAME] [CHART] --install +``` + +_See [helm upgrade](https://helm.sh/docs/helm/helm_upgrade/) for command documentation._ + +### Upgrading With Zero Downtime in Production + +By default the ingress-nginx controller has service interruptions whenever it's pods are restarted or redeployed. In order to fix that, see the excellent blog post by Lindsay Landry from Codecademy: [Kubernetes: Nginx and Zero Downtime in Production](https://medium.com/codecademy-engineering/kubernetes-nginx-and-zero-downtime-in-production-2c910c6a5ed8). + +### Migrating from stable/nginx-ingress + +There are two main ways to migrate a release from `stable/nginx-ingress` to `ingress-nginx/ingress-nginx` chart: + +1. For Nginx Ingress controllers used for non-critical services, the easiest method is to [uninstall](#uninstall-chart) the old release and [install](#install-chart) the new one +1. For critical services in production that require zero-downtime, you will want to: + 1. [Install](#install-chart) a second Ingress controller + 1. Redirect your DNS traffic from the old controller to the new controller + 1. Log traffic from both controllers during this changeover + 1. [Uninstall](#uninstall-chart) the old controller once traffic has fully drained from it + 1. For details on all of these steps see [Upgrading With Zero Downtime in Production](#upgrading-with-zero-downtime-in-production) + +Note that there are some different and upgraded configurations between the two charts, described by Rimas Mocevicius from JFrog in the "Upgrading to ingress-nginx Helm chart" section of [Migrating from Helm chart nginx-ingress to ingress-nginx](https://rimusz.net/migrating-to-ingress-nginx). As the `ingress-nginx/ingress-nginx` chart continues to update, you will want to check current differences by running [helm configuration](#configuration) commands on both charts. + +## Configuration + +See [Customizing the Chart Before Installing](https://helm.sh/docs/intro/using_helm/#customizing-the-chart-before-installing). To see all configurable options with detailed comments, visit the chart's [values.yaml](./values.yaml), or run these configuration commands: + +```console +helm show values ingress-nginx/ingress-nginx +``` + +### PodDisruptionBudget + +Note that the PodDisruptionBudget resource will only be defined if the replicaCount is greater than one, +else it would make it impossible to evacuate a node. See [gh issue #7127](https://github.com/helm/charts/issues/7127) for more info. + +### Prometheus Metrics + +The Nginx ingress controller can export Prometheus metrics, by setting `controller.metrics.enabled` to `true`. + +You can add Prometheus annotations to the metrics service using `controller.metrics.service.annotations`. +Alternatively, if you use the Prometheus Operator, you can enable ServiceMonitor creation using `controller.metrics.serviceMonitor.enabled`. And set `controller.metrics.serviceMonitor.additionalLabels.release="prometheus"`. "release=prometheus" should match the label configured in the prometheus servicemonitor ( see `kubectl get servicemonitor prometheus-kube-prom-prometheus -oyaml -n prometheus`) + +### ingress-nginx nginx\_status page/stats server + +Previous versions of this chart had a `controller.stats.*` configuration block, which is now obsolete due to the following changes in nginx ingress controller: + +- In [0.16.1](https://github.com/kubernetes/ingress-nginx/blob/main/Changelog.md#0161), the vts (virtual host traffic status) dashboard was removed +- In [0.23.0](https://github.com/kubernetes/ingress-nginx/blob/main/Changelog.md#0230), the status page at port 18080 is now a unix socket webserver only available at localhost. + You can use `curl --unix-socket /tmp/nginx-status-server.sock http://localhost/nginx_status` inside the controller container to access it locally, or use the snippet from [nginx-ingress changelog](https://github.com/kubernetes/ingress-nginx/blob/main/Changelog.md#0230) to re-enable the http server + +### ExternalDNS Service Configuration + +Add an [ExternalDNS](https://github.com/kubernetes-incubator/external-dns) annotation to the LoadBalancer service: + +```yaml +controller: + service: + annotations: + external-dns.alpha.kubernetes.io/hostname: kubernetes-example.com. +``` + +### AWS L7 ELB with SSL Termination + +Annotate the controller as shown in the [nginx-ingress l7 patch](https://github.com/kubernetes/ingress-nginx/blob/ab3a789caae65eec4ad6e3b46b19750b481b6bce/deploy/aws/l7/service-l7.yaml): + +```yaml +controller: + service: + targetPorts: + http: http + https: http + annotations: + service.beta.kubernetes.io/aws-load-balancer-ssl-cert: arn:aws:acm:XX-XXXX-X:XXXXXXXXX:certificate/XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX + service.beta.kubernetes.io/aws-load-balancer-backend-protocol: "http" + service.beta.kubernetes.io/aws-load-balancer-ssl-ports: "https" + service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout: '3600' +``` + +### AWS route53-mapper + +To configure the LoadBalancer service with the [route53-mapper addon](https://github.com/kubernetes/kops/blob/be63d4f1a7a46daaf1c4c482527328236850f111/addons/route53-mapper/README.md), add the `domainName` annotation and `dns` label: + +```yaml +controller: + service: + labels: + dns: "route53" + annotations: + domainName: "kubernetes-example.com" +``` + +### Additional Internal Load Balancer + +This setup is useful when you need both external and internal load balancers but don't want to have multiple ingress controllers and multiple ingress objects per application. + +By default, the ingress object will point to the external load balancer address, but if correctly configured, you can make use of the internal one if the URL you are looking up resolves to the internal load balancer's URL. + +You'll need to set both the following values: + +`controller.service.internal.enabled` +`controller.service.internal.annotations` + +If one of them is missing the internal load balancer will not be deployed. Example you may have `controller.service.internal.enabled=true` but no annotations set, in this case no action will be taken. + +`controller.service.internal.annotations` varies with the cloud service you're using. + +Example for AWS: + +```yaml +controller: + service: + internal: + enabled: true + annotations: + # Create internal ELB + service.beta.kubernetes.io/aws-load-balancer-internal: "true" + # Any other annotation can be declared here. +``` + +Example for GCE: + +```yaml +controller: + service: + internal: + enabled: true + annotations: + # Create internal LB. More informations: https://cloud.google.com/kubernetes-engine/docs/how-to/internal-load-balancing + # For GKE versions 1.17 and later + networking.gke.io/load-balancer-type: "Internal" + # For earlier versions + # cloud.google.com/load-balancer-type: "Internal" + + # Any other annotation can be declared here. +``` + +Example for Azure: + +```yaml +controller: + service: + annotations: + # Create internal LB + service.beta.kubernetes.io/azure-load-balancer-internal: "true" + # Any other annotation can be declared here. +``` + +Example for Oracle Cloud Infrastructure: + +```yaml +controller: + service: + annotations: + # Create internal LB + service.beta.kubernetes.io/oci-load-balancer-internal: "true" + # Any other annotation can be declared here. +``` + +An use case for this scenario is having a split-view DNS setup where the public zone CNAME records point to the external balancer URL while the private zone CNAME records point to the internal balancer URL. This way, you only need one ingress kubernetes object. + +Optionally you can set `controller.service.loadBalancerIP` if you need a static IP for the resulting `LoadBalancer`. + +### Ingress Admission Webhooks + +With nginx-ingress-controller version 0.25+, the nginx ingress controller pod exposes an endpoint that will integrate with the `validatingwebhookconfiguration` Kubernetes feature to prevent bad ingress from being added to the cluster. +**This feature is enabled by default since 0.31.0.** + +With nginx-ingress-controller in 0.25.* work only with kubernetes 1.14+, 0.26 fix [this issue](https://github.com/kubernetes/ingress-nginx/pull/4521) + +### Helm Error When Upgrading: spec.clusterIP: Invalid value: "" + +If you are upgrading this chart from a version between 0.31.0 and 1.2.2 then you may get an error like this: + +```console +Error: UPGRADE FAILED: Service "?????-controller" is invalid: spec.clusterIP: Invalid value: "": field is immutable +``` + +Detail of how and why are in [this issue](https://github.com/helm/charts/pull/13646) but to resolve this you can set `xxxx.service.omitClusterIP` to `true` where `xxxx` is the service referenced in the error. + +As of version `1.26.0` of this chart, by simply not providing any clusterIP value, `invalid: spec.clusterIP: Invalid value: "": field is immutable` will no longer occur since `clusterIP: ""` will not be rendered. + +{{ template "chart.requirementsSection" . }} + +{{ template "chart.valuesSection" . }} + +{{ template "helm-docs.versionFooter" . }} diff --git a/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/controller-custom-ingressclass-flags.yaml b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/controller-custom-ingressclass-flags.yaml new file mode 100644 index 0000000..b28a232 --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/controller-custom-ingressclass-flags.yaml @@ -0,0 +1,7 @@ +controller: + watchIngressWithoutClass: true + ingressClassResource: + name: custom-nginx + enabled: true + default: true + controllerValue: "k8s.io/custom-nginx" diff --git a/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/daemonset-customconfig-values.yaml b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/daemonset-customconfig-values.yaml new file mode 100644 index 0000000..4393a5b --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/daemonset-customconfig-values.yaml @@ -0,0 +1,14 @@ +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + kind: DaemonSet + allowSnippetAnnotations: false + admissionWebhooks: + enabled: false + service: + type: ClusterIP + + config: + use-proxy-protocol: "true" diff --git a/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/daemonset-customnodeport-values.yaml b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/daemonset-customnodeport-values.yaml new file mode 100644 index 0000000..1d94be2 --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/daemonset-customnodeport-values.yaml @@ -0,0 +1,22 @@ +controller: + kind: DaemonSet + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + + service: + type: NodePort + nodePorts: + tcp: + 9000: 30090 + udp: + 9001: 30091 + +tcp: + 9000: "default/test:8080" + +udp: + 9001: "default/test:8080" diff --git a/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/daemonset-extra-modules.yaml b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/daemonset-extra-modules.yaml new file mode 100644 index 0000000..f299dbf --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/daemonset-extra-modules.yaml @@ -0,0 +1,10 @@ +controller: + kind: DaemonSet + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + service: + type: ClusterIP + extraModules: + - name: opentelemetry + image: busybox diff --git a/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/daemonset-headers-values.yaml b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/daemonset-headers-values.yaml new file mode 100644 index 0000000..ab7d47b --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/daemonset-headers-values.yaml @@ -0,0 +1,14 @@ +controller: + kind: DaemonSet + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + addHeaders: + X-Frame-Options: deny + proxySetHeaders: + X-Forwarded-Proto: https + service: + type: ClusterIP diff --git a/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/daemonset-internal-lb-values.yaml b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/daemonset-internal-lb-values.yaml new file mode 100644 index 0000000..0a200a7 --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/daemonset-internal-lb-values.yaml @@ -0,0 +1,14 @@ +controller: + kind: DaemonSet + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + service: + type: ClusterIP + internal: + enabled: true + annotations: + service.beta.kubernetes.io/aws-load-balancer-internal: "true" diff --git a/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/daemonset-nodeport-values.yaml b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/daemonset-nodeport-values.yaml new file mode 100644 index 0000000..3b7aa2f --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/daemonset-nodeport-values.yaml @@ -0,0 +1,10 @@ +controller: + kind: DaemonSet + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + service: + type: NodePort diff --git a/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/daemonset-podannotations-values.yaml b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/daemonset-podannotations-values.yaml new file mode 100644 index 0000000..0b55306 --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/daemonset-podannotations-values.yaml @@ -0,0 +1,17 @@ +controller: + kind: DaemonSet + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + metrics: + enabled: true + service: + type: ClusterIP + podAnnotations: + prometheus.io/path: /metrics + prometheus.io/port: "10254" + prometheus.io/scheme: http + prometheus.io/scrape: "true" diff --git a/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/daemonset-tcp-udp-configMapNamespace-values.yaml b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/daemonset-tcp-udp-configMapNamespace-values.yaml new file mode 100644 index 0000000..acd86a7 --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/daemonset-tcp-udp-configMapNamespace-values.yaml @@ -0,0 +1,20 @@ +controller: + kind: DaemonSet + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + service: + type: ClusterIP + tcp: + configMapNamespace: default + udp: + configMapNamespace: default + +tcp: + 9000: "default/test:8080" + +udp: + 9001: "default/test:8080" diff --git a/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/daemonset-tcp-udp-portNamePrefix-values.yaml b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/daemonset-tcp-udp-portNamePrefix-values.yaml new file mode 100644 index 0000000..90b0f57 --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/daemonset-tcp-udp-portNamePrefix-values.yaml @@ -0,0 +1,18 @@ +controller: + kind: DaemonSet + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + service: + type: ClusterIP + +tcp: + 9000: "default/test:8080" + +udp: + 9001: "default/test:8080" + +portNamePrefix: "port" diff --git a/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/daemonset-tcp-udp-values.yaml b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/daemonset-tcp-udp-values.yaml new file mode 100644 index 0000000..25ee64d --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/daemonset-tcp-udp-values.yaml @@ -0,0 +1,16 @@ +controller: + kind: DaemonSet + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + service: + type: ClusterIP + +tcp: + 9000: "default/test:8080" + +udp: + 9001: "default/test:8080" diff --git a/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/daemonset-tcp-values.yaml b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/daemonset-tcp-values.yaml new file mode 100644 index 0000000..380c8b4 --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/daemonset-tcp-values.yaml @@ -0,0 +1,14 @@ +controller: + kind: DaemonSet + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + service: + type: ClusterIP + +tcp: + 9000: "default/test:8080" + 9001: "default/test:8080" diff --git a/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/deamonset-default-values.yaml b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/deamonset-default-values.yaml new file mode 100644 index 0000000..82fa23e --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/deamonset-default-values.yaml @@ -0,0 +1,10 @@ +controller: + kind: DaemonSet + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + service: + type: ClusterIP diff --git a/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/deamonset-metrics-values.yaml b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/deamonset-metrics-values.yaml new file mode 100644 index 0000000..cb3cb54 --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/deamonset-metrics-values.yaml @@ -0,0 +1,12 @@ +controller: + kind: DaemonSet + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + metrics: + enabled: true + service: + type: ClusterIP diff --git a/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/deamonset-psp-values.yaml b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/deamonset-psp-values.yaml new file mode 100644 index 0000000..8026a63 --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/deamonset-psp-values.yaml @@ -0,0 +1,13 @@ +controller: + kind: DaemonSet + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + service: + type: ClusterIP + +podSecurityPolicy: + enabled: true diff --git a/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/deamonset-webhook-and-psp-values.yaml b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/deamonset-webhook-and-psp-values.yaml new file mode 100644 index 0000000..fccdb13 --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/deamonset-webhook-and-psp-values.yaml @@ -0,0 +1,13 @@ +controller: + kind: DaemonSet + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: true + service: + type: ClusterIP + +podSecurityPolicy: + enabled: true diff --git a/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/deamonset-webhook-values.yaml b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/deamonset-webhook-values.yaml new file mode 100644 index 0000000..54d364d --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/deamonset-webhook-values.yaml @@ -0,0 +1,10 @@ +controller: + kind: DaemonSet + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: true + service: + type: ClusterIP diff --git a/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-autoscaling-behavior-values.yaml b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-autoscaling-behavior-values.yaml new file mode 100644 index 0000000..dca3f35 --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-autoscaling-behavior-values.yaml @@ -0,0 +1,14 @@ +controller: + autoscaling: + enabled: true + behavior: + scaleDown: + stabilizationWindowSeconds: 300 + policies: + - type: Pods + value: 1 + periodSeconds: 180 + admissionWebhooks: + enabled: false + service: + type: ClusterIP diff --git a/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-autoscaling-values.yaml b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-autoscaling-values.yaml new file mode 100644 index 0000000..b8b3ac6 --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-autoscaling-values.yaml @@ -0,0 +1,11 @@ +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + autoscaling: + enabled: true + admissionWebhooks: + enabled: false + service: + type: ClusterIP diff --git a/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-customconfig-values.yaml b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-customconfig-values.yaml new file mode 100644 index 0000000..1749418 --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-customconfig-values.yaml @@ -0,0 +1,12 @@ +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + config: + use-proxy-protocol: "true" + allowSnippetAnnotations: false + admissionWebhooks: + enabled: false + service: + type: ClusterIP diff --git a/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-customnodeport-values.yaml b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-customnodeport-values.yaml new file mode 100644 index 0000000..a564eaf --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-customnodeport-values.yaml @@ -0,0 +1,20 @@ +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + service: + type: NodePort + nodePorts: + tcp: + 9000: 30090 + udp: + 9001: 30091 + +tcp: + 9000: "default/test:8080" + +udp: + 9001: "default/test:8080" diff --git a/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-default-values.yaml b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-default-values.yaml new file mode 100644 index 0000000..9f46b4e --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-default-values.yaml @@ -0,0 +1,8 @@ +# Left blank to test default values +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + service: + type: ClusterIP diff --git a/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-extra-modules.yaml b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-extra-modules.yaml new file mode 100644 index 0000000..ec59235 --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-extra-modules.yaml @@ -0,0 +1,10 @@ +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + service: + type: ClusterIP + extraModules: + - name: opentelemetry + image: busybox diff --git a/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-headers-values.yaml b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-headers-values.yaml new file mode 100644 index 0000000..17a11ac --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-headers-values.yaml @@ -0,0 +1,13 @@ +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + addHeaders: + X-Frame-Options: deny + proxySetHeaders: + X-Forwarded-Proto: https + service: + type: ClusterIP diff --git a/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-internal-lb-values.yaml b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-internal-lb-values.yaml new file mode 100644 index 0000000..fd8df8d --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-internal-lb-values.yaml @@ -0,0 +1,13 @@ +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + service: + type: ClusterIP + internal: + enabled: true + annotations: + service.beta.kubernetes.io/aws-load-balancer-internal: "true" diff --git a/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-metrics-values.yaml b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-metrics-values.yaml new file mode 100644 index 0000000..9209ad5 --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-metrics-values.yaml @@ -0,0 +1,11 @@ +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + metrics: + enabled: true + service: + type: ClusterIP diff --git a/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-nodeport-values.yaml b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-nodeport-values.yaml new file mode 100644 index 0000000..cd9b323 --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-nodeport-values.yaml @@ -0,0 +1,9 @@ +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + service: + type: NodePort diff --git a/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-podannotations-values.yaml b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-podannotations-values.yaml new file mode 100644 index 0000000..b48d93c --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-podannotations-values.yaml @@ -0,0 +1,16 @@ +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + metrics: + enabled: true + service: + type: ClusterIP + podAnnotations: + prometheus.io/path: /metrics + prometheus.io/port: "10254" + prometheus.io/scheme: http + prometheus.io/scrape: "true" diff --git a/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-psp-values.yaml b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-psp-values.yaml new file mode 100644 index 0000000..2f332a7 --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-psp-values.yaml @@ -0,0 +1,10 @@ +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + service: + type: ClusterIP + +podSecurityPolicy: + enabled: true diff --git a/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-tcp-udp-configMapNamespace-values.yaml b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-tcp-udp-configMapNamespace-values.yaml new file mode 100644 index 0000000..c51a4e9 --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-tcp-udp-configMapNamespace-values.yaml @@ -0,0 +1,19 @@ +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + service: + type: ClusterIP + tcp: + configMapNamespace: default + udp: + configMapNamespace: default + +tcp: + 9000: "default/test:8080" + +udp: + 9001: "default/test:8080" diff --git a/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-tcp-udp-portNamePrefix-values.yaml b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-tcp-udp-portNamePrefix-values.yaml new file mode 100644 index 0000000..56323c5 --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-tcp-udp-portNamePrefix-values.yaml @@ -0,0 +1,17 @@ +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + service: + type: ClusterIP + +tcp: + 9000: "default/test:8080" + +udp: + 9001: "default/test:8080" + +portNamePrefix: "port" diff --git a/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-tcp-udp-values.yaml b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-tcp-udp-values.yaml new file mode 100644 index 0000000..5b45b69 --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-tcp-udp-values.yaml @@ -0,0 +1,15 @@ +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + service: + type: ClusterIP + +tcp: + 9000: "default/test:8080" + +udp: + 9001: "default/test:8080" diff --git a/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-tcp-values.yaml b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-tcp-values.yaml new file mode 100644 index 0000000..ac0b6e6 --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-tcp-values.yaml @@ -0,0 +1,11 @@ +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + service: + type: ClusterIP + +tcp: + 9000: "default/test:8080" + 9001: "default/test:8080" diff --git a/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-webhook-and-psp-values.yaml b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-webhook-and-psp-values.yaml new file mode 100644 index 0000000..6195bb3 --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-webhook-and-psp-values.yaml @@ -0,0 +1,12 @@ +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: true + service: + type: ClusterIP + +podSecurityPolicy: + enabled: true diff --git a/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-webhook-extraEnvs-values.yaml b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-webhook-extraEnvs-values.yaml new file mode 100644 index 0000000..95487b0 --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-webhook-extraEnvs-values.yaml @@ -0,0 +1,12 @@ +controller: + service: + type: ClusterIP + admissionWebhooks: + enabled: true + extraEnvs: + - name: FOO + value: foo + - name: TEST + value: test + patch: + enabled: true diff --git a/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-webhook-resources-values.yaml b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-webhook-resources-values.yaml new file mode 100644 index 0000000..49ebbb0 --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-webhook-resources-values.yaml @@ -0,0 +1,23 @@ +controller: + service: + type: ClusterIP + admissionWebhooks: + enabled: true + createSecretJob: + resources: + limits: + cpu: 10m + memory: 20Mi + requests: + cpu: 10m + memory: 20Mi + patchWebhookJob: + resources: + limits: + cpu: 10m + memory: 20Mi + requests: + cpu: 10m + memory: 20Mi + patch: + enabled: true diff --git a/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-webhook-values.yaml b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-webhook-values.yaml new file mode 100644 index 0000000..76669a5 --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-webhook-values.yaml @@ -0,0 +1,9 @@ +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: true + service: + type: ClusterIP diff --git a/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/override-values.yaml b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/override-values.yaml new file mode 100644 index 0000000..e190f03 --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/override-values.yaml @@ -0,0 +1,10 @@ +controller: + kind: DaemonSet + + service: + type: LoadBalancer + nodePorts: + http: "30000" + https: "30001" + tcp: {} + udp: {} diff --git a/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/temp.yaml b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/temp.yaml new file mode 100644 index 0000000..2b28787 --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/temp.yaml @@ -0,0 +1,724 @@ +--- +# Source: ingress-nginx/templates/controller-serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: release-name-ingress-nginx + namespace: default +automountServiceAccountToken: true +--- +# Source: ingress-nginx/templates/controller-configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: release-name-ingress-nginx-controller + namespace: default +data: + allow-snippet-annotations: "true" +--- +# Source: ingress-nginx/templates/clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + name: release-name-ingress-nginx +rules: + - apiGroups: + - "" + resources: + - configmaps + - endpoints + - nodes + - pods + - secrets + - namespaces + verbs: + - list + - watch + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - list + - watch + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - apiGroups: + - networking.k8s.io + resources: + - ingresses/status + verbs: + - update + - apiGroups: + - networking.k8s.io + resources: + - ingressclasses + verbs: + - get + - list + - watch +--- +# Source: ingress-nginx/templates/clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + name: release-name-ingress-nginx +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: release-name-ingress-nginx +subjects: + - kind: ServiceAccount + name: release-name-ingress-nginx + namespace: "default" +--- +# Source: ingress-nginx/templates/controller-role.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: release-name-ingress-nginx + namespace: default +rules: + - apiGroups: + - "" + resources: + - namespaces + verbs: + - get + - apiGroups: + - "" + resources: + - configmaps + - pods + - secrets + - endpoints + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - watch + - apiGroups: + - networking.k8s.io + resources: + - ingresses/status + verbs: + - update + - apiGroups: + - networking.k8s.io + resources: + - ingressclasses + verbs: + - get + - list + - watch + # TODO(Jintao Zhang) + # Once we release a new version of the controller, + # we will be able to remove the configmap related permissions + # We have used the Lease API for selection + # ref: https://github.com/kubernetes/ingress-nginx/pull/8921 + - apiGroups: + - "" + resources: + - configmaps + resourceNames: + - ingress-controller-leader + verbs: + - get + - update + - apiGroups: + - "" + resources: + - configmaps + verbs: + - create + - apiGroups: + - coordination.k8s.io + resources: + - leases + resourceNames: + - ingress-controller-leader + verbs: + - get + - update + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +--- +# Source: ingress-nginx/templates/controller-rolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: release-name-ingress-nginx + namespace: default +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: release-name-ingress-nginx +subjects: + - kind: ServiceAccount + name: release-name-ingress-nginx + namespace: "default" +--- +# Source: ingress-nginx/templates/controller-service-webhook.yaml +apiVersion: v1 +kind: Service +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: release-name-ingress-nginx-controller-admission + namespace: default +spec: + type: ClusterIP + ports: + - name: https-webhook + port: 443 + targetPort: webhook + appProtocol: https + selector: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/component: controller +--- +# Source: ingress-nginx/templates/controller-service.yaml +apiVersion: v1 +kind: Service +metadata: + annotations: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: release-name-ingress-nginx-controller + namespace: default +spec: + type: LoadBalancer + ipFamilyPolicy: SingleStack + ipFamilies: + - IPv4 + ports: + - name: http + port: 80 + protocol: TCP + targetPort: http + appProtocol: http + - name: https + port: 443 + protocol: TCP + targetPort: https + appProtocol: https + selector: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/component: controller +--- +# Source: ingress-nginx/templates/controller-deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: release-name-ingress-nginx-controller + namespace: default +spec: + selector: + matchLabels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/component: controller + replicas: 1 + revisionHistoryLimit: 10 + minReadySeconds: 0 + template: + metadata: + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/component: controller + spec: + dnsPolicy: ClusterFirst + containers: + - name: controller + image: "registry.k8s.io/ingress-nginx/controller:v1.3.1@sha256:54f7fe2c6c5a9db9a0ebf1131797109bb7a4d91f56b9b362bde2abd237dd1974" + imagePullPolicy: IfNotPresent + lifecycle: + preStop: + exec: + command: + - /wait-shutdown + args: + - /nginx-ingress-controller + - --publish-service=$(POD_NAMESPACE)/release-name-ingress-nginx-controller + - --election-id=ingress-controller-leader + - --controller-class=k8s.io/ingress-nginx + - --ingress-class=nginx + - --configmap=$(POD_NAMESPACE)/release-name-ingress-nginx-controller + - --validating-webhook=:8443 + - --validating-webhook-certificate=/usr/local/certificates/cert + - --validating-webhook-key=/usr/local/certificates/key + securityContext: + capabilities: + drop: + - ALL + add: + - NET_BIND_SERVICE + runAsUser: 101 + allowPrivilegeEscalation: true + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LD_PRELOAD + value: /usr/local/lib/libmimalloc.so + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + readinessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + ports: + - name: http + containerPort: 80 + protocol: TCP + - name: https + containerPort: 443 + protocol: TCP + - name: webhook + containerPort: 8443 + protocol: TCP + volumeMounts: + - name: webhook-cert + mountPath: /usr/local/certificates/ + readOnly: true + resources: + requests: + cpu: 100m + memory: 90Mi + nodeSelector: + kubernetes.io/os: linux + serviceAccountName: release-name-ingress-nginx + terminationGracePeriodSeconds: 300 + volumes: + - name: webhook-cert + secret: + secretName: release-name-ingress-nginx-admission +--- +# Source: ingress-nginx/templates/controller-ingressclass.yaml +# We don't support namespaced ingressClass yet +# So a ClusterRole and a ClusterRoleBinding is required +apiVersion: networking.k8s.io/v1 +kind: IngressClass +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: nginx +spec: + controller: k8s.io/ingress-nginx +--- +# Source: ingress-nginx/templates/admission-webhooks/validating-webhook.yaml +# before changing this value, check the required kubernetes version +# https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#prerequisites +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook + name: release-name-ingress-nginx-admission +webhooks: + - name: validate.nginx.ingress.kubernetes.io + matchPolicy: Equivalent + rules: + - apiGroups: + - networking.k8s.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - ingresses + failurePolicy: Fail + sideEffects: None + admissionReviewVersions: + - v1 + clientConfig: + service: + namespace: "default" + name: release-name-ingress-nginx-controller-admission + path: /networking/v1/ingresses +--- +# Source: ingress-nginx/templates/admission-webhooks/job-patch/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: release-name-ingress-nginx-admission + namespace: default + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +--- +# Source: ingress-nginx/templates/admission-webhooks/job-patch/clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: release-name-ingress-nginx-admission + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +rules: + - apiGroups: + - admissionregistration.k8s.io + resources: + - validatingwebhookconfigurations + verbs: + - get + - update +--- +# Source: ingress-nginx/templates/admission-webhooks/job-patch/clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: release-name-ingress-nginx-admission + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: release-name-ingress-nginx-admission +subjects: + - kind: ServiceAccount + name: release-name-ingress-nginx-admission + namespace: "default" +--- +# Source: ingress-nginx/templates/admission-webhooks/job-patch/role.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: release-name-ingress-nginx-admission + namespace: default + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +rules: + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - create +--- +# Source: ingress-nginx/templates/admission-webhooks/job-patch/rolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: release-name-ingress-nginx-admission + namespace: default + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: release-name-ingress-nginx-admission +subjects: + - kind: ServiceAccount + name: release-name-ingress-nginx-admission + namespace: "default" +--- +# Source: ingress-nginx/templates/admission-webhooks/job-patch/job-createSecret.yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: release-name-ingress-nginx-admission-create + namespace: default + annotations: + "helm.sh/hook": pre-install,pre-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +spec: + template: + metadata: + name: release-name-ingress-nginx-admission-create + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook + spec: + containers: + - name: create + image: "registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.3.0@sha256:549e71a6ca248c5abd51cdb73dbc3083df62cf92ed5e6147c780e30f7e007a47" + imagePullPolicy: IfNotPresent + args: + - create + - --host=release-name-ingress-nginx-controller-admission,release-name-ingress-nginx-controller-admission.$(POD_NAMESPACE).svc + - --namespace=$(POD_NAMESPACE) + - --secret-name=release-name-ingress-nginx-admission + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + securityContext: + allowPrivilegeEscalation: false + restartPolicy: OnFailure + serviceAccountName: release-name-ingress-nginx-admission + nodeSelector: + kubernetes.io/os: linux + securityContext: + fsGroup: 2000 + runAsNonRoot: true + runAsUser: 2000 +--- +# Source: ingress-nginx/templates/admission-webhooks/job-patch/job-patchWebhook.yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: release-name-ingress-nginx-admission-patch + namespace: default + annotations: + "helm.sh/hook": post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +spec: + template: + metadata: + name: release-name-ingress-nginx-admission-patch + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook + spec: + containers: + - name: patch + image: "registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.3.0@sha256:549e71a6ca248c5abd51cdb73dbc3083df62cf92ed5e6147c780e30f7e007a47" + imagePullPolicy: IfNotPresent + args: + - patch + - --webhook-name=release-name-ingress-nginx-admission + - --namespace=$(POD_NAMESPACE) + - --patch-mutating=false + - --secret-name=release-name-ingress-nginx-admission + - --patch-failure-policy=Fail + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + securityContext: + allowPrivilegeEscalation: false + restartPolicy: OnFailure + serviceAccountName: release-name-ingress-nginx-admission + nodeSelector: + kubernetes.io/os: linux + securityContext: + fsGroup: 2000 + runAsNonRoot: true + runAsUser: 2000 diff --git a/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/temp2.yaml b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/temp2.yaml new file mode 100644 index 0000000..9ef52fc --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/temp2.yaml @@ -0,0 +1,725 @@ +--- +# Source: ingress-nginx/templates/controller-serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: release-name-ingress-nginx + namespace: default +automountServiceAccountToken: true +--- +# Source: ingress-nginx/templates/controller-configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: release-name-ingress-nginx-controller + namespace: default +data: + allow-snippet-annotations: "true" +--- +# Source: ingress-nginx/templates/clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + name: release-name-ingress-nginx +rules: + - apiGroups: + - "" + resources: + - configmaps + - endpoints + - nodes + - pods + - secrets + - namespaces + verbs: + - list + - watch + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - list + - watch + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - apiGroups: + - networking.k8s.io + resources: + - ingresses/status + verbs: + - update + - apiGroups: + - networking.k8s.io + resources: + - ingressclasses + verbs: + - get + - list + - watch +--- +# Source: ingress-nginx/templates/clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + name: release-name-ingress-nginx +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: release-name-ingress-nginx +subjects: + - kind: ServiceAccount + name: release-name-ingress-nginx + namespace: "default" +--- +# Source: ingress-nginx/templates/controller-role.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: release-name-ingress-nginx + namespace: default +rules: + - apiGroups: + - "" + resources: + - namespaces + verbs: + - get + - apiGroups: + - "" + resources: + - configmaps + - pods + - secrets + - endpoints + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - watch + - apiGroups: + - networking.k8s.io + resources: + - ingresses/status + verbs: + - update + - apiGroups: + - networking.k8s.io + resources: + - ingressclasses + verbs: + - get + - list + - watch + # TODO(Jintao Zhang) + # Once we release a new version of the controller, + # we will be able to remove the configmap related permissions + # We have used the Lease API for selection + # ref: https://github.com/kubernetes/ingress-nginx/pull/8921 + - apiGroups: + - "" + resources: + - configmaps + resourceNames: + - ingress-controller-leader + verbs: + - get + - update + - apiGroups: + - "" + resources: + - configmaps + verbs: + - create + - apiGroups: + - coordination.k8s.io + resources: + - leases + resourceNames: + - ingress-controller-leader + verbs: + - get + - update + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +--- +# Source: ingress-nginx/templates/controller-rolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: release-name-ingress-nginx + namespace: default +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: release-name-ingress-nginx +subjects: + - kind: ServiceAccount + name: release-name-ingress-nginx + namespace: "default" +--- +# Source: ingress-nginx/templates/controller-service-webhook.yaml +apiVersion: v1 +kind: Service +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: release-name-ingress-nginx-controller-admission + namespace: default +spec: + type: ClusterIP + ports: + - name: https-webhook + port: 443 + targetPort: webhook + appProtocol: https + selector: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/component: controller +--- +# Source: ingress-nginx/templates/controller-service.yaml +apiVersion: v1 +kind: Service +metadata: + annotations: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: release-name-ingress-nginx-controller + namespace: default +spec: + type: LoadBalancer + ipFamilyPolicy: SingleStack + ipFamilies: + - IPv4 + ports: + - name: http + port: 80 + protocol: TCP + targetPort: http + appProtocol: http + nodePort: 30000 + - name: https + port: 443 + protocol: TCP + targetPort: https + appProtocol: https + nodePort: 30001 + selector: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/component: controller +--- +# Source: ingress-nginx/templates/controller-daemonset.yaml +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: release-name-ingress-nginx-controller + namespace: default +spec: + selector: + matchLabels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/component: controller + revisionHistoryLimit: 10 + minReadySeconds: 0 + template: + metadata: + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/component: controller + spec: + dnsPolicy: ClusterFirst + containers: + - name: controller + image: "registry.k8s.io/ingress-nginx/controller:v1.3.1@sha256:54f7fe2c6c5a9db9a0ebf1131797109bb7a4d91f56b9b362bde2abd237dd1974" + imagePullPolicy: IfNotPresent + lifecycle: + preStop: + exec: + command: + - /wait-shutdown + args: + - /nginx-ingress-controller + - --publish-service=$(POD_NAMESPACE)/release-name-ingress-nginx-controller + - --election-id=ingress-controller-leader + - --controller-class=k8s.io/ingress-nginx + - --ingress-class=nginx + - --configmap=$(POD_NAMESPACE)/release-name-ingress-nginx-controller + - --validating-webhook=:8443 + - --validating-webhook-certificate=/usr/local/certificates/cert + - --validating-webhook-key=/usr/local/certificates/key + securityContext: + capabilities: + drop: + - ALL + add: + - NET_BIND_SERVICE + runAsUser: 101 + allowPrivilegeEscalation: true + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LD_PRELOAD + value: /usr/local/lib/libmimalloc.so + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + readinessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + ports: + - name: http + containerPort: 80 + protocol: TCP + - name: https + containerPort: 443 + protocol: TCP + - name: webhook + containerPort: 8443 + protocol: TCP + volumeMounts: + - name: webhook-cert + mountPath: /usr/local/certificates/ + readOnly: true + resources: + requests: + cpu: 100m + memory: 90Mi + nodeSelector: + kubernetes.io/os: linux + serviceAccountName: release-name-ingress-nginx + terminationGracePeriodSeconds: 300 + volumes: + - name: webhook-cert + secret: + secretName: release-name-ingress-nginx-admission +--- +# Source: ingress-nginx/templates/controller-ingressclass.yaml +# We don't support namespaced ingressClass yet +# So a ClusterRole and a ClusterRoleBinding is required +apiVersion: networking.k8s.io/v1 +kind: IngressClass +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: nginx +spec: + controller: k8s.io/ingress-nginx +--- +# Source: ingress-nginx/templates/admission-webhooks/validating-webhook.yaml +# before changing this value, check the required kubernetes version +# https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#prerequisites +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook + name: release-name-ingress-nginx-admission +webhooks: + - name: validate.nginx.ingress.kubernetes.io + matchPolicy: Equivalent + rules: + - apiGroups: + - networking.k8s.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - ingresses + failurePolicy: Fail + sideEffects: None + admissionReviewVersions: + - v1 + clientConfig: + service: + namespace: "default" + name: release-name-ingress-nginx-controller-admission + path: /networking/v1/ingresses +--- +# Source: ingress-nginx/templates/admission-webhooks/job-patch/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: release-name-ingress-nginx-admission + namespace: default + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +--- +# Source: ingress-nginx/templates/admission-webhooks/job-patch/clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: release-name-ingress-nginx-admission + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +rules: + - apiGroups: + - admissionregistration.k8s.io + resources: + - validatingwebhookconfigurations + verbs: + - get + - update +--- +# Source: ingress-nginx/templates/admission-webhooks/job-patch/clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: release-name-ingress-nginx-admission + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: release-name-ingress-nginx-admission +subjects: + - kind: ServiceAccount + name: release-name-ingress-nginx-admission + namespace: "default" +--- +# Source: ingress-nginx/templates/admission-webhooks/job-patch/role.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: release-name-ingress-nginx-admission + namespace: default + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +rules: + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - create +--- +# Source: ingress-nginx/templates/admission-webhooks/job-patch/rolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: release-name-ingress-nginx-admission + namespace: default + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: release-name-ingress-nginx-admission +subjects: + - kind: ServiceAccount + name: release-name-ingress-nginx-admission + namespace: "default" +--- +# Source: ingress-nginx/templates/admission-webhooks/job-patch/job-createSecret.yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: release-name-ingress-nginx-admission-create + namespace: default + annotations: + "helm.sh/hook": pre-install,pre-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +spec: + template: + metadata: + name: release-name-ingress-nginx-admission-create + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook + spec: + containers: + - name: create + image: "registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.3.0@sha256:549e71a6ca248c5abd51cdb73dbc3083df62cf92ed5e6147c780e30f7e007a47" + imagePullPolicy: IfNotPresent + args: + - create + - --host=release-name-ingress-nginx-controller-admission,release-name-ingress-nginx-controller-admission.$(POD_NAMESPACE).svc + - --namespace=$(POD_NAMESPACE) + - --secret-name=release-name-ingress-nginx-admission + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + securityContext: + allowPrivilegeEscalation: false + restartPolicy: OnFailure + serviceAccountName: release-name-ingress-nginx-admission + nodeSelector: + kubernetes.io/os: linux + securityContext: + fsGroup: 2000 + runAsNonRoot: true + runAsUser: 2000 +--- +# Source: ingress-nginx/templates/admission-webhooks/job-patch/job-patchWebhook.yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: release-name-ingress-nginx-admission-patch + namespace: default + annotations: + "helm.sh/hook": post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +spec: + template: + metadata: + name: release-name-ingress-nginx-admission-patch + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook + spec: + containers: + - name: patch + image: "registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.3.0@sha256:549e71a6ca248c5abd51cdb73dbc3083df62cf92ed5e6147c780e30f7e007a47" + imagePullPolicy: IfNotPresent + args: + - patch + - --webhook-name=release-name-ingress-nginx-admission + - --namespace=$(POD_NAMESPACE) + - --patch-mutating=false + - --secret-name=release-name-ingress-nginx-admission + - --patch-failure-policy=Fail + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + securityContext: + allowPrivilegeEscalation: false + restartPolicy: OnFailure + serviceAccountName: release-name-ingress-nginx-admission + nodeSelector: + kubernetes.io/os: linux + securityContext: + fsGroup: 2000 + runAsNonRoot: true + runAsUser: 2000 diff --git a/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/NOTES.txt b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/NOTES.txt new file mode 100644 index 0000000..8985c56 --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/NOTES.txt @@ -0,0 +1,80 @@ +The ingress-nginx controller has been installed. + +{{- if contains "NodePort" .Values.controller.service.type }} +Get the application URL by running these commands: + +{{- if (not (empty .Values.controller.service.nodePorts.http)) }} + export HTTP_NODE_PORT={{ .Values.controller.service.nodePorts.http }} +{{- else }} + export HTTP_NODE_PORT=$(kubectl --namespace {{ .Release.Namespace }} get services -o jsonpath="{.spec.ports[0].nodePort}" {{ include "ingress-nginx.controller.fullname" . }}) +{{- end }} +{{- if (not (empty .Values.controller.service.nodePorts.https)) }} + export HTTPS_NODE_PORT={{ .Values.controller.service.nodePorts.https }} +{{- else }} + export HTTPS_NODE_PORT=$(kubectl --namespace {{ .Release.Namespace }} get services -o jsonpath="{.spec.ports[1].nodePort}" {{ include "ingress-nginx.controller.fullname" . }}) +{{- end }} + export NODE_IP=$(kubectl --namespace {{ .Release.Namespace }} get nodes -o jsonpath="{.items[0].status.addresses[1].address}") + + echo "Visit http://$NODE_IP:$HTTP_NODE_PORT to access your application via HTTP." + echo "Visit https://$NODE_IP:$HTTPS_NODE_PORT to access your application via HTTPS." +{{- else if contains "LoadBalancer" .Values.controller.service.type }} +It may take a few minutes for the LoadBalancer IP to be available. +You can watch the status by running 'kubectl --namespace {{ .Release.Namespace }} get services -o wide -w {{ include "ingress-nginx.controller.fullname" . }}' +{{- else if contains "ClusterIP" .Values.controller.service.type }} +Get the application URL by running these commands: + export POD_NAME=$(kubectl --namespace {{ .Release.Namespace }} get pods -o jsonpath="{.items[0].metadata.name}" -l "app={{ template "ingress-nginx.name" . }},component={{ .Values.controller.name }},release={{ .Release.Name }}") + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:80 + echo "Visit http://127.0.0.1:8080 to access your application." +{{- end }} + +An example Ingress that makes use of the controller: + +{{- $isV1 := semverCompare ">=1" .Chart.AppVersion}} + apiVersion: networking.k8s.io/v1 + kind: Ingress + metadata: + name: example + namespace: foo + {{- if eq $isV1 false }} + annotations: + kubernetes.io/ingress.class: {{ .Values.controller.ingressClass }} + {{- end }} + spec: + {{- if $isV1 }} + ingressClassName: {{ .Values.controller.ingressClassResource.name }} + {{- end }} + rules: + - host: www.example.com + http: + paths: + - pathType: Prefix + backend: + service: + name: exampleService + port: + number: 80 + path: / + # This section is only required if TLS is to be enabled for the Ingress + tls: + - hosts: + - www.example.com + secretName: example-tls + +If TLS is enabled for the Ingress, a Secret containing the certificate and key must also be provided: + + apiVersion: v1 + kind: Secret + metadata: + name: example-tls + namespace: foo + data: + tls.crt: + tls.key: + type: kubernetes.io/tls + +{{- if .Values.controller.headers }} +################################################################################# +###### WARNING: `controller.headers` has been deprecated! ##### +###### It has been renamed to `controller.proxySetHeaders`. ##### +################################################################################# +{{- end }} diff --git a/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/_helpers.tpl b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/_helpers.tpl new file mode 100644 index 0000000..e69de0c --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/_helpers.tpl @@ -0,0 +1,185 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "ingress-nginx.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "ingress-nginx.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "ingress-nginx.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + + +{{/* +Container SecurityContext. +*/}} +{{- define "controller.containerSecurityContext" -}} +{{- if .Values.controller.containerSecurityContext -}} +{{- toYaml .Values.controller.containerSecurityContext -}} +{{- else -}} +capabilities: + drop: + - ALL + add: + - NET_BIND_SERVICE + {{- if .Values.controller.image.chroot }} + - SYS_CHROOT + {{- end }} +runAsUser: {{ .Values.controller.image.runAsUser }} +allowPrivilegeEscalation: {{ .Values.controller.image.allowPrivilegeEscalation }} +{{- end }} +{{- end -}} + +{{/* +Get specific image +*/}} +{{- define "ingress-nginx.image" -}} +{{- if .chroot -}} +{{- printf "%s-chroot" .image -}} +{{- else -}} +{{- printf "%s" .image -}} +{{- end }} +{{- end -}} + +{{/* +Get specific image digest +*/}} +{{- define "ingress-nginx.imageDigest" -}} +{{- if .chroot -}} +{{- if .digestChroot -}} +{{- printf "@%s" .digestChroot -}} +{{- end }} +{{- else -}} +{{ if .digest -}} +{{- printf "@%s" .digest -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified controller name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "ingress-nginx.controller.fullname" -}} +{{- printf "%s-%s" (include "ingress-nginx.fullname" .) .Values.controller.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Construct the path for the publish-service. + +By convention this will simply use the / to match the name of the +service generated. + +Users can provide an override for an explicit service they want bound via `.Values.controller.publishService.pathOverride` + +*/}} +{{- define "ingress-nginx.controller.publishServicePath" -}} +{{- $defServiceName := printf "%s/%s" "$(POD_NAMESPACE)" (include "ingress-nginx.controller.fullname" .) -}} +{{- $servicePath := default $defServiceName .Values.controller.publishService.pathOverride }} +{{- print $servicePath | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified default backend name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "ingress-nginx.defaultBackend.fullname" -}} +{{- printf "%s-%s" (include "ingress-nginx.fullname" .) .Values.defaultBackend.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Common labels +*/}} +{{- define "ingress-nginx.labels" -}} +helm.sh/chart: {{ include "ingress-nginx.chart" . }} +{{ include "ingress-nginx.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/part-of: {{ template "ingress-nginx.name" . }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- if .Values.commonLabels}} +{{ toYaml .Values.commonLabels }} +{{- end }} +{{- end -}} + +{{/* +Selector labels +*/}} +{{- define "ingress-nginx.selectorLabels" -}} +app.kubernetes.io/name: {{ include "ingress-nginx.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} + +{{/* +Create the name of the controller service account to use +*/}} +{{- define "ingress-nginx.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "ingress-nginx.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the backend service account to use - only used when podsecuritypolicy is also enabled +*/}} +{{- define "ingress-nginx.defaultBackend.serviceAccountName" -}} +{{- if .Values.defaultBackend.serviceAccount.create -}} + {{ default (printf "%s-backend" (include "ingress-nginx.fullname" .)) .Values.defaultBackend.serviceAccount.name }} +{{- else -}} + {{ default "default-backend" .Values.defaultBackend.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiGroup for PodSecurityPolicy. +*/}} +{{- define "podSecurityPolicy.apiGroup" -}} +{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "policy" -}} +{{- else -}} +{{- print "extensions" -}} +{{- end -}} +{{- end -}} + +{{/* +Check the ingress controller version tag is at most three versions behind the last release +*/}} +{{- define "isControllerTagValid" -}} +{{- if not (semverCompare ">=0.27.0-0" .Values.controller.image.tag) -}} +{{- fail "Controller container image tag should be 0.27.0 or higher" -}} +{{- end -}} +{{- end -}} + +{{/* +IngressClass parameters. +*/}} +{{- define "ingressClass.parameters" -}} + {{- if .Values.controller.ingressClassResource.parameters -}} + parameters: +{{ toYaml .Values.controller.ingressClassResource.parameters | indent 4}} + {{ end }} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/_params.tpl b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/_params.tpl new file mode 100644 index 0000000..305ce0d --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/_params.tpl @@ -0,0 +1,62 @@ +{{- define "ingress-nginx.params" -}} +- /nginx-ingress-controller +{{- if .Values.defaultBackend.enabled }} +- --default-backend-service=$(POD_NAMESPACE)/{{ include "ingress-nginx.defaultBackend.fullname" . }} +{{- end }} +{{- if and .Values.controller.publishService.enabled .Values.controller.service.enabled }} +{{- if .Values.controller.service.external.enabled }} +- --publish-service={{ template "ingress-nginx.controller.publishServicePath" . }} +{{- else if .Values.controller.service.internal.enabled }} +- --publish-service={{ template "ingress-nginx.controller.publishServicePath" . }}-internal +{{- end }} +{{- end }} +- --election-id={{ .Values.controller.electionID }} +- --controller-class={{ .Values.controller.ingressClassResource.controllerValue }} +{{- if .Values.controller.ingressClass }} +- --ingress-class={{ .Values.controller.ingressClass }} +{{- end }} +- --configmap={{ default "$(POD_NAMESPACE)" .Values.controller.configMapNamespace }}/{{ include "ingress-nginx.controller.fullname" . }} +{{- if .Values.tcp }} +- --tcp-services-configmap={{ default "$(POD_NAMESPACE)" .Values.controller.tcp.configMapNamespace }}/{{ include "ingress-nginx.fullname" . }}-tcp +{{- end }} +{{- if .Values.udp }} +- --udp-services-configmap={{ default "$(POD_NAMESPACE)" .Values.controller.udp.configMapNamespace }}/{{ include "ingress-nginx.fullname" . }}-udp +{{- end }} +{{- if .Values.controller.scope.enabled }} +- --watch-namespace={{ default "$(POD_NAMESPACE)" .Values.controller.scope.namespace }} +{{- end }} +{{- if and (not .Values.controller.scope.enabled) .Values.controller.scope.namespaceSelector }} +- --watch-namespace-selector={{ default "" .Values.controller.scope.namespaceSelector }} +{{- end }} +{{- if and .Values.controller.reportNodeInternalIp .Values.controller.hostNetwork }} +- --report-node-internal-ip-address={{ .Values.controller.reportNodeInternalIp }} +{{- end }} +{{- if .Values.controller.admissionWebhooks.enabled }} +- --validating-webhook=:{{ .Values.controller.admissionWebhooks.port }} +- --validating-webhook-certificate={{ .Values.controller.admissionWebhooks.certificate }} +- --validating-webhook-key={{ .Values.controller.admissionWebhooks.key }} +{{- end }} +{{- if .Values.controller.maxmindLicenseKey }} +- --maxmind-license-key={{ .Values.controller.maxmindLicenseKey }} +{{- end }} +{{- if .Values.controller.healthCheckHost }} +- --healthz-host={{ .Values.controller.healthCheckHost }} +{{- end }} +{{- if not (eq .Values.controller.healthCheckPath "/healthz") }} +- --health-check-path={{ .Values.controller.healthCheckPath }} +{{- end }} +{{- if .Values.controller.ingressClassByName }} +- --ingress-class-by-name=true +{{- end }} +{{- if .Values.controller.watchIngressWithoutClass }} +- --watch-ingress-without-class=true +{{- end }} +{{- range $key, $value := .Values.controller.extraArgs }} +{{- /* Accept keys without values or with false as value */}} +{{- if eq ($value | quote | len) 2 }} +- --{{ $key }} +{{- else }} +- --{{ $key }}={{ $value }} +{{- end }} +{{- end }} +{{- end -}} diff --git a/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/admission-webhooks/job-patch/clusterrole.yaml b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/admission-webhooks/job-patch/clusterrole.yaml new file mode 100644 index 0000000..5659a1f --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/admission-webhooks/job-patch/clusterrole.yaml @@ -0,0 +1,34 @@ +{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "ingress-nginx.fullname" . }}-admission + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: admission-webhook + {{- with .Values.controller.admissionWebhooks.patch.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +rules: + - apiGroups: + - admissionregistration.k8s.io + resources: + - validatingwebhookconfigurations + verbs: + - get + - update +{{- if .Values.podSecurityPolicy.enabled }} + - apiGroups: ['extensions'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + {{- with .Values.controller.admissionWebhooks.existingPsp }} + - {{ . }} + {{- else }} + - {{ include "ingress-nginx.fullname" . }}-admission + {{- end }} +{{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/admission-webhooks/job-patch/clusterrolebinding.yaml b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/admission-webhooks/job-patch/clusterrolebinding.yaml new file mode 100644 index 0000000..abf17fb --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/admission-webhooks/job-patch/clusterrolebinding.yaml @@ -0,0 +1,23 @@ +{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ include "ingress-nginx.fullname" . }}-admission + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: admission-webhook + {{- with .Values.controller.admissionWebhooks.patch.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ include "ingress-nginx.fullname" . }}-admission +subjects: + - kind: ServiceAccount + name: {{ include "ingress-nginx.fullname" . }}-admission + namespace: {{ .Release.Namespace | quote }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/admission-webhooks/job-patch/job-createSecret.yaml b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/admission-webhooks/job-patch/job-createSecret.yaml new file mode 100644 index 0000000..7558e0b --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/admission-webhooks/job-patch/job-createSecret.yaml @@ -0,0 +1,79 @@ +{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled -}} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ include "ingress-nginx.fullname" . }}-admission-create + namespace: {{ .Release.Namespace }} + annotations: + "helm.sh/hook": pre-install,pre-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + {{- with .Values.controller.admissionWebhooks.annotations }} + {{- toYaml . | nindent 4 }} + {{- end }} + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: admission-webhook + {{- with .Values.controller.admissionWebhooks.patch.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: +{{- if .Capabilities.APIVersions.Has "batch/v1alpha1" }} + # Alpha feature since k8s 1.12 + ttlSecondsAfterFinished: 0 +{{- end }} + template: + metadata: + name: {{ include "ingress-nginx.fullname" . }}-admission-create + {{- if .Values.controller.admissionWebhooks.patch.podAnnotations }} + annotations: {{ toYaml .Values.controller.admissionWebhooks.patch.podAnnotations | nindent 8 }} + {{- end }} + labels: + {{- include "ingress-nginx.labels" . | nindent 8 }} + app.kubernetes.io/component: admission-webhook + {{- with .Values.controller.admissionWebhooks.patch.labels }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + {{- if .Values.controller.admissionWebhooks.patch.priorityClassName }} + priorityClassName: {{ .Values.controller.admissionWebhooks.patch.priorityClassName }} + {{- end }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: {{ toYaml .Values.imagePullSecrets | nindent 8 }} + {{- end }} + containers: + - name: create + {{- with .Values.controller.admissionWebhooks.patch.image }} + image: "{{- if .repository -}}{{ .repository }}{{ else }}{{ .registry }}/{{ .image }}{{- end -}}:{{ .tag }}{{- if (.digest) -}} @{{.digest}} {{- end -}}" + {{- end }} + imagePullPolicy: {{ .Values.controller.admissionWebhooks.patch.image.pullPolicy }} + args: + - create + - --host={{ include "ingress-nginx.controller.fullname" . }}-admission,{{ include "ingress-nginx.controller.fullname" . }}-admission.$(POD_NAMESPACE).svc + - --namespace=$(POD_NAMESPACE) + - --secret-name={{ include "ingress-nginx.fullname" . }}-admission + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + {{- if .Values.controller.admissionWebhooks.extraEnvs }} + {{- toYaml .Values.controller.admissionWebhooks.extraEnvs | nindent 12 }} + {{- end }} + securityContext: + allowPrivilegeEscalation: false + {{- if .Values.controller.admissionWebhooks.createSecretJob.resources }} + resources: {{ toYaml .Values.controller.admissionWebhooks.createSecretJob.resources | nindent 12 }} + {{- end }} + restartPolicy: OnFailure + serviceAccountName: {{ include "ingress-nginx.fullname" . }}-admission + {{- if .Values.controller.admissionWebhooks.patch.nodeSelector }} + nodeSelector: {{ toYaml .Values.controller.admissionWebhooks.patch.nodeSelector | nindent 8 }} + {{- end }} + {{- if .Values.controller.admissionWebhooks.patch.tolerations }} + tolerations: {{ toYaml .Values.controller.admissionWebhooks.patch.tolerations | nindent 8 }} + {{- end }} + {{- if .Values.controller.admissionWebhooks.patch.securityContext }} + securityContext: + {{- toYaml .Values.controller.admissionWebhooks.patch.securityContext | nindent 8 }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/admission-webhooks/job-patch/job-patchWebhook.yaml b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/admission-webhooks/job-patch/job-patchWebhook.yaml new file mode 100644 index 0000000..0528215 --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/admission-webhooks/job-patch/job-patchWebhook.yaml @@ -0,0 +1,81 @@ +{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled -}} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ include "ingress-nginx.fullname" . }}-admission-patch + namespace: {{ .Release.Namespace }} + annotations: + "helm.sh/hook": post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + {{- with .Values.controller.admissionWebhooks.annotations }} + {{- toYaml . | nindent 4 }} + {{- end }} + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: admission-webhook + {{- with .Values.controller.admissionWebhooks.patch.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: +{{- if .Capabilities.APIVersions.Has "batch/v1alpha1" }} + # Alpha feature since k8s 1.12 + ttlSecondsAfterFinished: 0 +{{- end }} + template: + metadata: + name: {{ include "ingress-nginx.fullname" . }}-admission-patch + {{- if .Values.controller.admissionWebhooks.patch.podAnnotations }} + annotations: {{ toYaml .Values.controller.admissionWebhooks.patch.podAnnotations | nindent 8 }} + {{- end }} + labels: + {{- include "ingress-nginx.labels" . | nindent 8 }} + app.kubernetes.io/component: admission-webhook + {{- with .Values.controller.admissionWebhooks.patch.labels }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + {{- if .Values.controller.admissionWebhooks.patch.priorityClassName }} + priorityClassName: {{ .Values.controller.admissionWebhooks.patch.priorityClassName }} + {{- end }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: {{ toYaml .Values.imagePullSecrets | nindent 8 }} + {{- end }} + containers: + - name: patch + {{- with .Values.controller.admissionWebhooks.patch.image }} + image: "{{- if .repository -}}{{ .repository }}{{ else }}{{ .registry }}/{{ .image }}{{- end -}}:{{ .tag }}{{- if (.digest) -}} @{{.digest}} {{- end -}}" + {{- end }} + imagePullPolicy: {{ .Values.controller.admissionWebhooks.patch.image.pullPolicy }} + args: + - patch + - --webhook-name={{ include "ingress-nginx.fullname" . }}-admission + - --namespace=$(POD_NAMESPACE) + - --patch-mutating=false + - --secret-name={{ include "ingress-nginx.fullname" . }}-admission + - --patch-failure-policy={{ .Values.controller.admissionWebhooks.failurePolicy }} + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + {{- if .Values.controller.admissionWebhooks.extraEnvs }} + {{- toYaml .Values.controller.admissionWebhooks.extraEnvs | nindent 12 }} + {{- end }} + securityContext: + allowPrivilegeEscalation: false + {{- if .Values.controller.admissionWebhooks.patchWebhookJob.resources }} + resources: {{ toYaml .Values.controller.admissionWebhooks.patchWebhookJob.resources | nindent 12 }} + {{- end }} + restartPolicy: OnFailure + serviceAccountName: {{ include "ingress-nginx.fullname" . }}-admission + {{- if .Values.controller.admissionWebhooks.patch.nodeSelector }} + nodeSelector: {{ toYaml .Values.controller.admissionWebhooks.patch.nodeSelector | nindent 8 }} + {{- end }} + {{- if .Values.controller.admissionWebhooks.patch.tolerations }} + tolerations: {{ toYaml .Values.controller.admissionWebhooks.patch.tolerations | nindent 8 }} + {{- end }} + {{- if .Values.controller.admissionWebhooks.patch.securityContext }} + securityContext: + {{- toYaml .Values.controller.admissionWebhooks.patch.securityContext | nindent 8 }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/admission-webhooks/job-patch/psp.yaml b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/admission-webhooks/job-patch/psp.yaml new file mode 100644 index 0000000..70edde3 --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/admission-webhooks/job-patch/psp.yaml @@ -0,0 +1,39 @@ +{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled .Values.podSecurityPolicy.enabled (empty .Values.controller.admissionWebhooks.existingPsp) -}} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ include "ingress-nginx.fullname" . }}-admission + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: admission-webhook + {{- with .Values.controller.admissionWebhooks.patch.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + allowPrivilegeEscalation: false + fsGroup: + ranges: + - max: 65535 + min: 1 + rule: MustRunAs + requiredDropCapabilities: + - ALL + runAsUser: + rule: MustRunAsNonRoot + seLinux: + rule: RunAsAny + supplementalGroups: + ranges: + - max: 65535 + min: 1 + rule: MustRunAs + volumes: + - configMap + - emptyDir + - projected + - secret + - downwardAPI +{{- end }} diff --git a/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/admission-webhooks/job-patch/role.yaml b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/admission-webhooks/job-patch/role.yaml new file mode 100644 index 0000000..795bac6 --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/admission-webhooks/job-patch/role.yaml @@ -0,0 +1,24 @@ +{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ include "ingress-nginx.fullname" . }}-admission + namespace: {{ .Release.Namespace }} + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: admission-webhook + {{- with .Values.controller.admissionWebhooks.patch.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +rules: + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - create +{{- end }} diff --git a/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/admission-webhooks/job-patch/rolebinding.yaml b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/admission-webhooks/job-patch/rolebinding.yaml new file mode 100644 index 0000000..698c5c8 --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/admission-webhooks/job-patch/rolebinding.yaml @@ -0,0 +1,24 @@ +{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ include "ingress-nginx.fullname" . }}-admission + namespace: {{ .Release.Namespace }} + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: admission-webhook + {{- with .Values.controller.admissionWebhooks.patch.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ include "ingress-nginx.fullname" . }}-admission +subjects: + - kind: ServiceAccount + name: {{ include "ingress-nginx.fullname" . }}-admission + namespace: {{ .Release.Namespace | quote }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/admission-webhooks/job-patch/serviceaccount.yaml b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/admission-webhooks/job-patch/serviceaccount.yaml new file mode 100644 index 0000000..eae4751 --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/admission-webhooks/job-patch/serviceaccount.yaml @@ -0,0 +1,16 @@ +{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "ingress-nginx.fullname" . }}-admission + namespace: {{ .Release.Namespace }} + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: admission-webhook + {{- with .Values.controller.admissionWebhooks.patch.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/admission-webhooks/validating-webhook.yaml b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/admission-webhooks/validating-webhook.yaml new file mode 100644 index 0000000..8caffcb --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/admission-webhooks/validating-webhook.yaml @@ -0,0 +1,48 @@ +{{- if .Values.controller.admissionWebhooks.enabled -}} +# before changing this value, check the required kubernetes version +# https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#prerequisites +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + {{- if .Values.controller.admissionWebhooks.annotations }} + annotations: {{ toYaml .Values.controller.admissionWebhooks.annotations | nindent 4 }} + {{- end }} + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: admission-webhook + {{- with .Values.controller.admissionWebhooks.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.fullname" . }}-admission +webhooks: + - name: validate.nginx.ingress.kubernetes.io + matchPolicy: Equivalent + rules: + - apiGroups: + - networking.k8s.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - ingresses + failurePolicy: {{ .Values.controller.admissionWebhooks.failurePolicy | default "Fail" }} + sideEffects: None + admissionReviewVersions: + - v1 + clientConfig: + service: + namespace: {{ .Release.Namespace | quote }} + name: {{ include "ingress-nginx.controller.fullname" . }}-admission + path: /networking/v1/ingresses + {{- if .Values.controller.admissionWebhooks.timeoutSeconds }} + timeoutSeconds: {{ .Values.controller.admissionWebhooks.timeoutSeconds }} + {{- end }} + {{- if .Values.controller.admissionWebhooks.namespaceSelector }} + namespaceSelector: {{ toYaml .Values.controller.admissionWebhooks.namespaceSelector | nindent 6 }} + {{- end }} + {{- if .Values.controller.admissionWebhooks.objectSelector }} + objectSelector: {{ toYaml .Values.controller.admissionWebhooks.objectSelector | nindent 6 }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/clusterrole.yaml b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/clusterrole.yaml new file mode 100644 index 0000000..0e725ec --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/clusterrole.yaml @@ -0,0 +1,94 @@ +{{- if .Values.rbac.create }} + +{{- if and .Values.rbac.scope (not .Values.controller.scope.enabled) -}} + {{ required "Invalid configuration: 'rbac.scope' should be equal to 'controller.scope.enabled' (true/false)." (index (dict) ".") }} +{{- end }} + +{{- if not .Values.rbac.scope -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.fullname" . }} +rules: + - apiGroups: + - "" + resources: + - configmaps + - endpoints + - nodes + - pods + - secrets +{{- if not .Values.controller.scope.enabled }} + - namespaces +{{- end}} + verbs: + - list + - watch + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - list + - watch +{{- if and .Values.controller.scope.enabled .Values.controller.scope.namespace }} + - apiGroups: + - "" + resources: + - namespaces + resourceNames: + - "{{ .Values.controller.scope.namespace }}" + verbs: + - get +{{- end }} + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - apiGroups: + - networking.k8s.io + resources: + - ingresses/status + verbs: + - update + - apiGroups: + - networking.k8s.io + resources: + - ingressclasses + verbs: + - get + - list + - watch +{{- end }} + +{{- end }} diff --git a/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/clusterrolebinding.yaml b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/clusterrolebinding.yaml new file mode 100644 index 0000000..acbbd8b --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/clusterrolebinding.yaml @@ -0,0 +1,19 @@ +{{- if and .Values.rbac.create (not .Values.rbac.scope) -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.fullname" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ include "ingress-nginx.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ template "ingress-nginx.serviceAccountName" . }} + namespace: {{ .Release.Namespace | quote }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-configmap-addheaders.yaml b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-configmap-addheaders.yaml new file mode 100644 index 0000000..dfd49a1 --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-configmap-addheaders.yaml @@ -0,0 +1,14 @@ +{{- if .Values.controller.addHeaders -}} +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.fullname" . }}-custom-add-headers + namespace: {{ .Release.Namespace }} +data: {{ toYaml .Values.controller.addHeaders | nindent 2 }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-configmap-proxyheaders.yaml b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-configmap-proxyheaders.yaml new file mode 100644 index 0000000..f8d15fa --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-configmap-proxyheaders.yaml @@ -0,0 +1,19 @@ +{{- if or .Values.controller.proxySetHeaders .Values.controller.headers -}} +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.fullname" . }}-custom-proxy-headers + namespace: {{ .Release.Namespace }} +data: +{{- if .Values.controller.proxySetHeaders }} +{{ toYaml .Values.controller.proxySetHeaders | indent 2 }} +{{ else if and .Values.controller.headers (not .Values.controller.proxySetHeaders) }} +{{ toYaml .Values.controller.headers | indent 2 }} +{{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-configmap-tcp.yaml b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-configmap-tcp.yaml new file mode 100644 index 0000000..0f6088e --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-configmap-tcp.yaml @@ -0,0 +1,17 @@ +{{- if .Values.tcp -}} +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +{{- if .Values.controller.tcp.annotations }} + annotations: {{ toYaml .Values.controller.tcp.annotations | nindent 4 }} +{{- end }} + name: {{ include "ingress-nginx.fullname" . }}-tcp + namespace: {{ .Release.Namespace }} +data: {{ tpl (toYaml .Values.tcp) . | nindent 2 }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-configmap-udp.yaml b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-configmap-udp.yaml new file mode 100644 index 0000000..3772ec5 --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-configmap-udp.yaml @@ -0,0 +1,17 @@ +{{- if .Values.udp -}} +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +{{- if .Values.controller.udp.annotations }} + annotations: {{ toYaml .Values.controller.udp.annotations | nindent 4 }} +{{- end }} + name: {{ include "ingress-nginx.fullname" . }}-udp + namespace: {{ .Release.Namespace }} +data: {{ tpl (toYaml .Values.udp) . | nindent 2 }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-configmap.yaml b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-configmap.yaml new file mode 100644 index 0000000..f28b26e --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-configmap.yaml @@ -0,0 +1,29 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +{{- if .Values.controller.configAnnotations }} + annotations: {{ toYaml .Values.controller.configAnnotations | nindent 4 }} +{{- end }} + name: {{ include "ingress-nginx.controller.fullname" . }} + namespace: {{ .Release.Namespace }} +data: + allow-snippet-annotations: "{{ .Values.controller.allowSnippetAnnotations }}" +{{- if .Values.controller.addHeaders }} + add-headers: {{ .Release.Namespace }}/{{ include "ingress-nginx.fullname" . }}-custom-add-headers +{{- end }} +{{- if or .Values.controller.proxySetHeaders .Values.controller.headers }} + proxy-set-headers: {{ .Release.Namespace }}/{{ include "ingress-nginx.fullname" . }}-custom-proxy-headers +{{- end }} +{{- if .Values.dhParam }} + ssl-dh-param: {{ printf "%s/%s" .Release.Namespace (include "ingress-nginx.controller.fullname" .) }} +{{- end }} +{{- range $key, $value := .Values.controller.config }} + {{- $key | nindent 2 }}: {{ $value | quote }} +{{- end }} + diff --git a/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-daemonset.yaml b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-daemonset.yaml new file mode 100644 index 0000000..80c268f --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-daemonset.yaml @@ -0,0 +1,223 @@ +{{- if or (eq .Values.controller.kind "DaemonSet") (eq .Values.controller.kind "Both") -}} +{{- include "isControllerTagValid" . -}} +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.controller.fullname" . }} + namespace: {{ .Release.Namespace }} + {{- if .Values.controller.annotations }} + annotations: {{ toYaml .Values.controller.annotations | nindent 4 }} + {{- end }} +spec: + selector: + matchLabels: + {{- include "ingress-nginx.selectorLabels" . | nindent 6 }} + app.kubernetes.io/component: controller + revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} + {{- if .Values.controller.updateStrategy }} + updateStrategy: {{ toYaml .Values.controller.updateStrategy | nindent 4 }} + {{- end }} + minReadySeconds: {{ .Values.controller.minReadySeconds }} + template: + metadata: + {{- if .Values.controller.podAnnotations }} + annotations: + {{- range $key, $value := .Values.controller.podAnnotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} + {{- end }} + labels: + {{- include "ingress-nginx.selectorLabels" . | nindent 8 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if .Values.controller.podLabels }} + {{- toYaml .Values.controller.podLabels | nindent 8 }} + {{- end }} + spec: + {{- if .Values.controller.dnsConfig }} + dnsConfig: {{ toYaml .Values.controller.dnsConfig | nindent 8 }} + {{- end }} + {{- if .Values.controller.hostname }} + hostname: {{ toYaml .Values.controller.hostname | nindent 8 }} + {{- end }} + dnsPolicy: {{ .Values.controller.dnsPolicy }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: {{ toYaml .Values.imagePullSecrets | nindent 8 }} + {{- end }} + {{- if .Values.controller.priorityClassName }} + priorityClassName: {{ .Values.controller.priorityClassName }} + {{- end }} + {{- if or .Values.controller.podSecurityContext .Values.controller.sysctls }} + securityContext: + {{- end }} + {{- if .Values.controller.podSecurityContext }} + {{- toYaml .Values.controller.podSecurityContext | nindent 8 }} + {{- end }} + {{- if .Values.controller.sysctls }} + sysctls: + {{- range $sysctl, $value := .Values.controller.sysctls }} + - name: {{ $sysctl | quote }} + value: {{ $value | quote }} + {{- end }} + {{- end }} + {{- if .Values.controller.shareProcessNamespace }} + shareProcessNamespace: {{ .Values.controller.shareProcessNamespace }} + {{- end }} + containers: + - name: {{ .Values.controller.containerName }} + {{- with .Values.controller.image }} + image: "{{- if .repository -}}{{ .repository }}{{ else }}{{ .registry }}/{{ include "ingress-nginx.image" . }}{{- end -}}:{{ .tag }}{{ include "ingress-nginx.imageDigest" . }}" + {{- end }} + imagePullPolicy: {{ .Values.controller.image.pullPolicy }} + {{- if .Values.controller.lifecycle }} + lifecycle: {{ toYaml .Values.controller.lifecycle | nindent 12 }} + {{- end }} + args: + {{- include "ingress-nginx.params" . | nindent 12 }} + securityContext: {{ include "controller.containerSecurityContext" . | nindent 12 }} + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + {{- if .Values.controller.enableMimalloc }} + - name: LD_PRELOAD + value: /usr/local/lib/libmimalloc.so + {{- end }} + {{- if .Values.controller.extraEnvs }} + {{- toYaml .Values.controller.extraEnvs | nindent 12 }} + {{- end }} + {{- if .Values.controller.startupProbe }} + startupProbe: {{ toYaml .Values.controller.startupProbe | nindent 12 }} + {{- end }} + livenessProbe: {{ toYaml .Values.controller.livenessProbe | nindent 12 }} + readinessProbe: {{ toYaml .Values.controller.readinessProbe | nindent 12 }} + ports: + {{- range $key, $value := .Values.controller.containerPort }} + - name: {{ $key }} + containerPort: {{ $value }} + protocol: TCP + {{- if $.Values.controller.hostPort.enabled }} + hostPort: {{ index $.Values.controller.hostPort.ports $key | default $value }} + {{- end }} + {{- end }} + {{- if .Values.controller.metrics.enabled }} + - name: http-metrics + containerPort: {{ .Values.controller.metrics.port }} + protocol: TCP + {{- end }} + {{- if .Values.controller.admissionWebhooks.enabled }} + - name: webhook + containerPort: {{ .Values.controller.admissionWebhooks.port }} + protocol: TCP + {{- end }} + {{- range $key, $value := .Values.tcp }} + - name: {{ if $.Values.portNamePrefix }}{{ $.Values.portNamePrefix }}-{{ end }}{{ $key }}-tcp + containerPort: {{ $key }} + protocol: TCP + {{- if $.Values.controller.hostPort.enabled }} + hostPort: {{ $key }} + {{- end }} + {{- end }} + {{- range $key, $value := .Values.udp }} + - name: {{ if $.Values.portNamePrefix }}{{ $.Values.portNamePrefix }}-{{ end }}{{ $key }}-udp + containerPort: {{ $key }} + protocol: UDP + {{- if $.Values.controller.hostPort.enabled }} + hostPort: {{ $key }} + {{- end }} + {{- end }} + {{- if (or .Values.controller.customTemplate.configMapName .Values.controller.extraVolumeMounts .Values.controller.admissionWebhooks.enabled .Values.controller.extraModules) }} + volumeMounts: + {{- if .Values.controller.extraModules }} + - name: modules + mountPath: /modules_mount + {{- end }} + {{- if .Values.controller.customTemplate.configMapName }} + - mountPath: /etc/nginx/template + name: nginx-template-volume + readOnly: true + {{- end }} + {{- if .Values.controller.admissionWebhooks.enabled }} + - name: webhook-cert + mountPath: /usr/local/certificates/ + readOnly: true + {{- end }} + {{- if .Values.controller.extraVolumeMounts }} + {{- toYaml .Values.controller.extraVolumeMounts | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.controller.resources }} + resources: {{ toYaml .Values.controller.resources | nindent 12 }} + {{- end }} + {{- if .Values.controller.extraContainers }} + {{ toYaml .Values.controller.extraContainers | nindent 8 }} + {{- end }} + + + {{- if (or .Values.controller.extraInitContainers .Values.controller.extraModules) }} + initContainers: + {{- if .Values.controller.extraInitContainers }} + {{ toYaml .Values.controller.extraInitContainers | nindent 8 }} + {{- end }} + {{- if .Values.controller.extraModules }} + {{- range .Values.controller.extraModules }} + - name: {{ .Name }} + image: {{ .Image }} + command: ['sh', '-c', '/usr/local/bin/init_module.sh'] + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.controller.hostNetwork }} + hostNetwork: {{ .Values.controller.hostNetwork }} + {{- end }} + {{- if .Values.controller.nodeSelector }} + nodeSelector: {{ toYaml .Values.controller.nodeSelector | nindent 8 }} + {{- end }} + {{- if .Values.controller.tolerations }} + tolerations: {{ toYaml .Values.controller.tolerations | nindent 8 }} + {{- end }} + {{- if .Values.controller.affinity }} + affinity: {{ toYaml .Values.controller.affinity | nindent 8 }} + {{- end }} + {{- if .Values.controller.topologySpreadConstraints }} + topologySpreadConstraints: {{ toYaml .Values.controller.topologySpreadConstraints | nindent 8 }} + {{- end }} + serviceAccountName: {{ template "ingress-nginx.serviceAccountName" . }} + terminationGracePeriodSeconds: {{ .Values.controller.terminationGracePeriodSeconds }} + {{- if (or .Values.controller.customTemplate.configMapName .Values.controller.extraVolumeMounts .Values.controller.admissionWebhooks.enabled .Values.controller.extraVolumes .Values.controller.extraModules) }} + volumes: + {{- if .Values.controller.extraModules }} + - name: modules + emptyDir: {} + {{- end }} + {{- if .Values.controller.customTemplate.configMapName }} + - name: nginx-template-volume + configMap: + name: {{ .Values.controller.customTemplate.configMapName }} + items: + - key: {{ .Values.controller.customTemplate.configMapKey }} + path: nginx.tmpl + {{- end }} + {{- if .Values.controller.admissionWebhooks.enabled }} + - name: webhook-cert + secret: + secretName: {{ include "ingress-nginx.fullname" . }}-admission + {{- end }} + {{- if .Values.controller.extraVolumes }} + {{ toYaml .Values.controller.extraVolumes | nindent 8 }} + {{- end }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-deployment.yaml b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-deployment.yaml new file mode 100644 index 0000000..5ad1867 --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-deployment.yaml @@ -0,0 +1,228 @@ +{{- if or (eq .Values.controller.kind "Deployment") (eq .Values.controller.kind "Both") -}} +{{- include "isControllerTagValid" . -}} +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.controller.fullname" . }} + namespace: {{ .Release.Namespace }} + {{- if .Values.controller.annotations }} + annotations: {{ toYaml .Values.controller.annotations | nindent 4 }} + {{- end }} +spec: + selector: + matchLabels: + {{- include "ingress-nginx.selectorLabels" . | nindent 6 }} + app.kubernetes.io/component: controller + {{- if not .Values.controller.autoscaling.enabled }} + replicas: {{ .Values.controller.replicaCount }} + {{- end }} + revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} + {{- if .Values.controller.updateStrategy }} + strategy: + {{ toYaml .Values.controller.updateStrategy | nindent 4 }} + {{- end }} + minReadySeconds: {{ .Values.controller.minReadySeconds }} + template: + metadata: + {{- if .Values.controller.podAnnotations }} + annotations: + {{- range $key, $value := .Values.controller.podAnnotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} + {{- end }} + labels: + {{- include "ingress-nginx.selectorLabels" . | nindent 8 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if .Values.controller.podLabels }} + {{- toYaml .Values.controller.podLabels | nindent 8 }} + {{- end }} + spec: + {{- if .Values.controller.dnsConfig }} + dnsConfig: {{ toYaml .Values.controller.dnsConfig | nindent 8 }} + {{- end }} + {{- if .Values.controller.hostname }} + hostname: {{ toYaml .Values.controller.hostname | nindent 8 }} + {{- end }} + dnsPolicy: {{ .Values.controller.dnsPolicy }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: {{ toYaml .Values.imagePullSecrets | nindent 8 }} + {{- end }} + {{- if .Values.controller.priorityClassName }} + priorityClassName: {{ .Values.controller.priorityClassName | quote }} + {{- end }} + {{- if or .Values.controller.podSecurityContext .Values.controller.sysctls }} + securityContext: + {{- end }} + {{- if .Values.controller.podSecurityContext }} + {{- toYaml .Values.controller.podSecurityContext | nindent 8 }} + {{- end }} + {{- if .Values.controller.sysctls }} + sysctls: + {{- range $sysctl, $value := .Values.controller.sysctls }} + - name: {{ $sysctl | quote }} + value: {{ $value | quote }} + {{- end }} + {{- end }} + {{- if .Values.controller.shareProcessNamespace }} + shareProcessNamespace: {{ .Values.controller.shareProcessNamespace }} + {{- end }} + containers: + - name: {{ .Values.controller.containerName }} + {{- with .Values.controller.image }} + image: "{{- if .repository -}}{{ .repository }}{{ else }}{{ .registry }}/{{ include "ingress-nginx.image" . }}{{- end -}}:{{ .tag }}{{ include "ingress-nginx.imageDigest" . }}" + {{- end }} + imagePullPolicy: {{ .Values.controller.image.pullPolicy }} + {{- if .Values.controller.lifecycle }} + lifecycle: {{ toYaml .Values.controller.lifecycle | nindent 12 }} + {{- end }} + args: + {{- include "ingress-nginx.params" . | nindent 12 }} + securityContext: {{ include "controller.containerSecurityContext" . | nindent 12 }} + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + {{- if .Values.controller.enableMimalloc }} + - name: LD_PRELOAD + value: /usr/local/lib/libmimalloc.so + {{- end }} + {{- if .Values.controller.extraEnvs }} + {{- toYaml .Values.controller.extraEnvs | nindent 12 }} + {{- end }} + {{- if .Values.controller.startupProbe }} + startupProbe: {{ toYaml .Values.controller.startupProbe | nindent 12 }} + {{- end }} + livenessProbe: {{ toYaml .Values.controller.livenessProbe | nindent 12 }} + readinessProbe: {{ toYaml .Values.controller.readinessProbe | nindent 12 }} + ports: + {{- range $key, $value := .Values.controller.containerPort }} + - name: {{ $key }} + containerPort: {{ $value }} + protocol: TCP + {{- if $.Values.controller.hostPort.enabled }} + hostPort: {{ index $.Values.controller.hostPort.ports $key | default $value }} + {{- end }} + {{- end }} + {{- if .Values.controller.metrics.enabled }} + - name: http-metrics + containerPort: {{ .Values.controller.metrics.port }} + protocol: TCP + {{- end }} + {{- if .Values.controller.admissionWebhooks.enabled }} + - name: webhook + containerPort: {{ .Values.controller.admissionWebhooks.port }} + protocol: TCP + {{- end }} + {{- range $key, $value := .Values.tcp }} + - name: {{ if $.Values.portNamePrefix }}{{ $.Values.portNamePrefix }}-{{ end }}{{ $key }}-tcp + containerPort: {{ $key }} + protocol: TCP + {{- if $.Values.controller.hostPort.enabled }} + hostPort: {{ $key }} + {{- end }} + {{- end }} + {{- range $key, $value := .Values.udp }} + - name: {{ if $.Values.portNamePrefix }}{{ $.Values.portNamePrefix }}-{{ end }}{{ $key }}-udp + containerPort: {{ $key }} + protocol: UDP + {{- if $.Values.controller.hostPort.enabled }} + hostPort: {{ $key }} + {{- end }} + {{- end }} + {{- if (or .Values.controller.customTemplate.configMapName .Values.controller.extraVolumeMounts .Values.controller.admissionWebhooks.enabled .Values.controller.extraModules) }} + volumeMounts: + {{- if .Values.controller.extraModules }} + - name: modules + mountPath: /modules_mount + {{- end }} + {{- if .Values.controller.customTemplate.configMapName }} + - mountPath: /etc/nginx/template + name: nginx-template-volume + readOnly: true + {{- end }} + {{- if .Values.controller.admissionWebhooks.enabled }} + - name: webhook-cert + mountPath: /usr/local/certificates/ + readOnly: true + {{- end }} + {{- if .Values.controller.extraVolumeMounts }} + {{- toYaml .Values.controller.extraVolumeMounts | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.controller.resources }} + resources: {{ toYaml .Values.controller.resources | nindent 12 }} + {{- end }} + {{- if .Values.controller.extraContainers }} + {{ toYaml .Values.controller.extraContainers | nindent 8 }} + {{- end }} + {{- if (or .Values.controller.extraInitContainers .Values.controller.extraModules) }} + initContainers: + {{- if .Values.controller.extraInitContainers }} + {{ toYaml .Values.controller.extraInitContainers | nindent 8 }} + {{- end }} + {{- if .Values.controller.extraModules }} + {{- range .Values.controller.extraModules }} + - name: {{ .name }} + image: {{ .image }} + command: ['sh', '-c', '/usr/local/bin/init_module.sh'] + volumeMounts: + - name: modules + mountPath: /modules_mount + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.controller.hostNetwork }} + hostNetwork: {{ .Values.controller.hostNetwork }} + {{- end }} + {{- if .Values.controller.nodeSelector }} + nodeSelector: {{ toYaml .Values.controller.nodeSelector | nindent 8 }} + {{- end }} + {{- if .Values.controller.tolerations }} + tolerations: {{ toYaml .Values.controller.tolerations | nindent 8 }} + {{- end }} + {{- if .Values.controller.affinity }} + affinity: {{ toYaml .Values.controller.affinity | nindent 8 }} + {{- end }} + {{- if .Values.controller.topologySpreadConstraints }} + topologySpreadConstraints: {{ toYaml .Values.controller.topologySpreadConstraints | nindent 8 }} + {{- end }} + serviceAccountName: {{ template "ingress-nginx.serviceAccountName" . }} + terminationGracePeriodSeconds: {{ .Values.controller.terminationGracePeriodSeconds }} + {{- if (or .Values.controller.customTemplate.configMapName .Values.controller.extraVolumeMounts .Values.controller.admissionWebhooks.enabled .Values.controller.extraVolumes .Values.controller.extraModules) }} + volumes: + {{- if .Values.controller.extraModules }} + - name: modules + emptyDir: {} + {{- end }} + {{- if .Values.controller.customTemplate.configMapName }} + - name: nginx-template-volume + configMap: + name: {{ .Values.controller.customTemplate.configMapName }} + items: + - key: {{ .Values.controller.customTemplate.configMapKey }} + path: nginx.tmpl + {{- end }} + {{- if .Values.controller.admissionWebhooks.enabled }} + - name: webhook-cert + secret: + secretName: {{ include "ingress-nginx.fullname" . }}-admission + {{- end }} + {{- if .Values.controller.extraVolumes }} + {{ toYaml .Values.controller.extraVolumes | nindent 8 }} + {{- end }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-hpa.yaml b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-hpa.yaml new file mode 100644 index 0000000..e0979f1 --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-hpa.yaml @@ -0,0 +1,52 @@ +{{- if and .Values.controller.autoscaling.enabled (or (eq .Values.controller.kind "Deployment") (eq .Values.controller.kind "Both")) -}} +{{- if not .Values.controller.keda.enabled }} + +apiVersion: autoscaling/v2beta2 +kind: HorizontalPodAutoscaler +metadata: + annotations: + {{- with .Values.controller.autoscaling.annotations }} + {{- toYaml . | trimSuffix "\n" | nindent 4 }} + {{- end }} + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.controller.fullname" . }} + namespace: {{ .Release.Namespace }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "ingress-nginx.controller.fullname" . }} + minReplicas: {{ .Values.controller.autoscaling.minReplicas }} + maxReplicas: {{ .Values.controller.autoscaling.maxReplicas }} + metrics: + {{- with .Values.controller.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: {{ . }} + {{- end }} + {{- with .Values.controller.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ . }} + {{- end }} + {{- with .Values.controller.autoscalingTemplate }} + {{- toYaml . | nindent 2 }} + {{- end }} + {{- with .Values.controller.autoscaling.behavior }} + behavior: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} +{{- end }} + diff --git a/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-ingressclass.yaml b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-ingressclass.yaml new file mode 100644 index 0000000..9492784 --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-ingressclass.yaml @@ -0,0 +1,21 @@ +{{- if .Values.controller.ingressClassResource.enabled -}} +# We don't support namespaced ingressClass yet +# So a ClusterRole and a ClusterRoleBinding is required +apiVersion: networking.k8s.io/v1 +kind: IngressClass +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ .Values.controller.ingressClassResource.name }} +{{- if .Values.controller.ingressClassResource.default }} + annotations: + ingressclass.kubernetes.io/is-default-class: "true" +{{- end }} +spec: + controller: {{ .Values.controller.ingressClassResource.controllerValue }} + {{ template "ingressClass.parameters" . }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-keda.yaml b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-keda.yaml new file mode 100644 index 0000000..875157e --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-keda.yaml @@ -0,0 +1,42 @@ +{{- if and .Values.controller.keda.enabled (or (eq .Values.controller.kind "Deployment") (eq .Values.controller.kind "Both")) -}} +# https://keda.sh/docs/ + +apiVersion: {{ .Values.controller.keda.apiVersion }} +kind: ScaledObject +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.controller.fullname" . }} + {{- if .Values.controller.keda.scaledObject.annotations }} + annotations: {{ toYaml .Values.controller.keda.scaledObject.annotations | nindent 4 }} + {{- end }} +spec: + scaleTargetRef: +{{- if eq .Values.controller.keda.apiVersion "keda.k8s.io/v1alpha1" }} + deploymentName: {{ include "ingress-nginx.controller.fullname" . }} +{{- else if eq .Values.controller.keda.apiVersion "keda.sh/v1alpha1" }} + name: {{ include "ingress-nginx.controller.fullname" . }} +{{- end }} + pollingInterval: {{ .Values.controller.keda.pollingInterval }} + cooldownPeriod: {{ .Values.controller.keda.cooldownPeriod }} + minReplicaCount: {{ .Values.controller.keda.minReplicas }} + maxReplicaCount: {{ .Values.controller.keda.maxReplicas }} + triggers: +{{- with .Values.controller.keda.triggers }} +{{ toYaml . | indent 2 }} +{{ end }} + advanced: + restoreToOriginalReplicaCount: {{ .Values.controller.keda.restoreToOriginalReplicaCount }} +{{- if .Values.controller.keda.behavior }} + horizontalPodAutoscalerConfig: + behavior: +{{ with .Values.controller.keda.behavior -}} +{{ toYaml . | indent 8 }} +{{ end }} + +{{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-poddisruptionbudget.yaml b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-poddisruptionbudget.yaml new file mode 100644 index 0000000..8dfbe98 --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-poddisruptionbudget.yaml @@ -0,0 +1,19 @@ +{{- if or (and .Values.controller.autoscaling.enabled (gt (.Values.controller.autoscaling.minReplicas | int) 1)) (and (not .Values.controller.autoscaling.enabled) (gt (.Values.controller.replicaCount | int) 1)) }} +apiVersion: {{ ternary "policy/v1" "policy/v1beta1" (semverCompare ">=1.21.0-0" .Capabilities.KubeVersion.Version) }} +kind: PodDisruptionBudget +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.controller.fullname" . }} + namespace: {{ .Release.Namespace }} +spec: + selector: + matchLabels: + {{- include "ingress-nginx.selectorLabels" . | nindent 6 }} + app.kubernetes.io/component: controller + minAvailable: {{ .Values.controller.minAvailable }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-prometheusrules.yaml b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-prometheusrules.yaml new file mode 100644 index 0000000..78b5362 --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-prometheusrules.yaml @@ -0,0 +1,21 @@ +{{- if and ( .Values.controller.metrics.enabled ) ( .Values.controller.metrics.prometheusRule.enabled ) ( .Capabilities.APIVersions.Has "monitoring.coreos.com/v1" ) -}} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ include "ingress-nginx.controller.fullname" . }} +{{- if .Values.controller.metrics.prometheusRule.namespace }} + namespace: {{ .Values.controller.metrics.prometheusRule.namespace | quote }} +{{- end }} + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- if .Values.controller.metrics.prometheusRule.additionalLabels }} + {{- toYaml .Values.controller.metrics.prometheusRule.additionalLabels | nindent 4 }} + {{- end }} +spec: +{{- if .Values.controller.metrics.prometheusRule.rules }} + groups: + - name: {{ template "ingress-nginx.name" . }} + rules: {{- toYaml .Values.controller.metrics.prometheusRule.rules | nindent 4 }} +{{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-psp.yaml b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-psp.yaml new file mode 100644 index 0000000..2e0499c --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-psp.yaml @@ -0,0 +1,94 @@ +{{- if (semverCompare "<1.25.0-0" .Capabilities.KubeVersion.Version) }} +{{- if and .Values.podSecurityPolicy.enabled (empty .Values.controller.existingPsp) -}} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ include "ingress-nginx.fullname" . }} + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + allowedCapabilities: + - NET_BIND_SERVICE + {{- if .Values.controller.image.chroot }} + - SYS_CHROOT + {{- end }} +{{- if .Values.controller.sysctls }} + allowedUnsafeSysctls: + {{- range $sysctl, $value := .Values.controller.sysctls }} + - {{ $sysctl }} + {{- end }} +{{- end }} + privileged: false + allowPrivilegeEscalation: true + # Allow core volume types. + volumes: + - 'configMap' + - 'emptyDir' + #- 'projected' + - 'secret' + #- 'downwardAPI' +{{- if .Values.controller.hostNetwork }} + hostNetwork: {{ .Values.controller.hostNetwork }} +{{- end }} +{{- if or .Values.controller.hostNetwork .Values.controller.hostPort.enabled }} + hostPorts: +{{- if .Values.controller.hostNetwork }} +{{- range $key, $value := .Values.controller.containerPort }} + # {{ $key }} + - min: {{ $value }} + max: {{ $value }} +{{- end }} +{{- else if .Values.controller.hostPort.enabled }} +{{- range $key, $value := .Values.controller.hostPort.ports }} + # {{ $key }} + - min: {{ $value }} + max: {{ $value }} +{{- end }} +{{- end }} +{{- if .Values.controller.metrics.enabled }} + # metrics + - min: {{ .Values.controller.metrics.port }} + max: {{ .Values.controller.metrics.port }} +{{- end }} +{{- if .Values.controller.admissionWebhooks.enabled }} + # admission webhooks + - min: {{ .Values.controller.admissionWebhooks.port }} + max: {{ .Values.controller.admissionWebhooks.port }} +{{- end }} +{{- range $key, $value := .Values.tcp }} + # {{ $key }}-tcp + - min: {{ $key }} + max: {{ $key }} +{{- end }} +{{- range $key, $value := .Values.udp }} + # {{ $key }}-udp + - min: {{ $key }} + max: {{ $key }} +{{- end }} +{{- end }} + hostIPC: false + hostPID: false + runAsUser: + # Require the container to run without root privileges. + rule: 'MustRunAsNonRoot' + supplementalGroups: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + readOnlyRootFilesystem: false + seLinux: + rule: 'RunAsAny' +{{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-role.yaml b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-role.yaml new file mode 100644 index 0000000..330be8c --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-role.yaml @@ -0,0 +1,113 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.fullname" . }} + namespace: {{ .Release.Namespace }} +rules: + - apiGroups: + - "" + resources: + - namespaces + verbs: + - get + - apiGroups: + - "" + resources: + - configmaps + - pods + - secrets + - endpoints + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - watch + - apiGroups: + - networking.k8s.io + resources: + - ingresses/status + verbs: + - update + - apiGroups: + - networking.k8s.io + resources: + - ingressclasses + verbs: + - get + - list + - watch + # TODO(Jintao Zhang) + # Once we release a new version of the controller, + # we will be able to remove the configmap related permissions + # We have used the Lease API for selection + # ref: https://github.com/kubernetes/ingress-nginx/pull/8921 + - apiGroups: + - "" + resources: + - configmaps + resourceNames: + - {{ .Values.controller.electionID }} + verbs: + - get + - update + - apiGroups: + - "" + resources: + - configmaps + verbs: + - create + - apiGroups: + - coordination.k8s.io + resources: + - leases + resourceNames: + - {{ .Values.controller.electionID }} + verbs: + - get + - update + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +{{- if .Values.podSecurityPolicy.enabled }} + - apiGroups: [{{ template "podSecurityPolicy.apiGroup" . }}] + resources: ['podsecuritypolicies'] + verbs: ['use'] + {{- with .Values.controller.existingPsp }} + resourceNames: [{{ . }}] + {{- else }} + resourceNames: [{{ include "ingress-nginx.fullname" . }}] + {{- end }} +{{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-rolebinding.yaml b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-rolebinding.yaml new file mode 100644 index 0000000..e846a11 --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-rolebinding.yaml @@ -0,0 +1,21 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.fullname" . }} + namespace: {{ .Release.Namespace }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ include "ingress-nginx.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ template "ingress-nginx.serviceAccountName" . }} + namespace: {{ .Release.Namespace | quote }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-service-internal.yaml b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-service-internal.yaml new file mode 100644 index 0000000..aae3e15 --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-service-internal.yaml @@ -0,0 +1,79 @@ +{{- if and .Values.controller.service.enabled .Values.controller.service.internal.enabled .Values.controller.service.internal.annotations}} +apiVersion: v1 +kind: Service +metadata: + annotations: + {{- range $key, $value := .Values.controller.service.internal.annotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- if .Values.controller.service.labels }} + {{- toYaml .Values.controller.service.labels | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.controller.fullname" . }}-internal + namespace: {{ .Release.Namespace }} +spec: + type: "{{ .Values.controller.service.type }}" +{{- if .Values.controller.service.internal.loadBalancerIP }} + loadBalancerIP: {{ .Values.controller.service.internal.loadBalancerIP }} +{{- end }} +{{- if .Values.controller.service.internal.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{ toYaml .Values.controller.service.internal.loadBalancerSourceRanges | nindent 4 }} +{{- end }} +{{- if .Values.controller.service.internal.externalTrafficPolicy }} + externalTrafficPolicy: {{ .Values.controller.service.internal.externalTrafficPolicy }} +{{- end }} + ports: + {{- $setNodePorts := (or (eq .Values.controller.service.type "NodePort") (eq .Values.controller.service.type "LoadBalancer")) }} + {{- if .Values.controller.service.enableHttp }} + - name: http + port: {{ .Values.controller.service.ports.http }} + protocol: TCP + targetPort: {{ .Values.controller.service.targetPorts.http }} + {{- if semverCompare ">=1.20" .Capabilities.KubeVersion.Version }} + appProtocol: http + {{- end }} + {{- if (and $setNodePorts (not (empty .Values.controller.service.nodePorts.http))) }} + nodePort: {{ .Values.controller.service.nodePorts.http }} + {{- end }} + {{- end }} + {{- if .Values.controller.service.enableHttps }} + - name: https + port: {{ .Values.controller.service.ports.https }} + protocol: TCP + targetPort: {{ .Values.controller.service.targetPorts.https }} + {{- if semverCompare ">=1.20" .Capabilities.KubeVersion.Version }} + appProtocol: https + {{- end }} + {{- if (and $setNodePorts (not (empty .Values.controller.service.nodePorts.https))) }} + nodePort: {{ .Values.controller.service.nodePorts.https }} + {{- end }} + {{- end }} + {{- range $key, $value := .Values.tcp }} + - name: {{ if $.Values.portNamePrefix }}{{ $.Values.portNamePrefix }}-{{ end }}{{ $key }}-tcp + port: {{ $key }} + protocol: TCP + targetPort: {{ if $.Values.portNamePrefix }}{{ $.Values.portNamePrefix }}-{{ end }}{{ $key }}-tcp + {{- if $.Values.controller.service.nodePorts.tcp }} + {{- if index $.Values.controller.service.nodePorts.tcp $key }} + nodePort: {{ index $.Values.controller.service.nodePorts.tcp $key }} + {{- end }} + {{- end }} + {{- end }} + {{- range $key, $value := .Values.udp }} + - name: {{ if $.Values.portNamePrefix }}{{ $.Values.portNamePrefix }}-{{ end }}{{ $key }}-udp + port: {{ $key }} + protocol: UDP + targetPort: {{ if $.Values.portNamePrefix }}{{ $.Values.portNamePrefix }}-{{ end }}{{ $key }}-udp + {{- if $.Values.controller.service.nodePorts.udp }} + {{- if index $.Values.controller.service.nodePorts.udp $key }} + nodePort: {{ index $.Values.controller.service.nodePorts.udp $key }} + {{- end }} + {{- end }} + {{- end }} + selector: + {{- include "ingress-nginx.selectorLabels" . | nindent 4 }} + app.kubernetes.io/component: controller +{{- end }} diff --git a/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-service-metrics.yaml b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-service-metrics.yaml new file mode 100644 index 0000000..1c1d5bd --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-service-metrics.yaml @@ -0,0 +1,45 @@ +{{- if .Values.controller.metrics.enabled -}} +apiVersion: v1 +kind: Service +metadata: +{{- if .Values.controller.metrics.service.annotations }} + annotations: {{ toYaml .Values.controller.metrics.service.annotations | nindent 4 }} +{{- end }} + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- if .Values.controller.metrics.service.labels }} + {{- toYaml .Values.controller.metrics.service.labels | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.controller.fullname" . }}-metrics + namespace: {{ .Release.Namespace }} +spec: + type: {{ .Values.controller.metrics.service.type }} +{{- if .Values.controller.metrics.service.clusterIP }} + clusterIP: {{ .Values.controller.metrics.service.clusterIP }} +{{- end }} +{{- if .Values.controller.metrics.service.externalIPs }} + externalIPs: {{ toYaml .Values.controller.metrics.service.externalIPs | nindent 4 }} +{{- end }} +{{- if .Values.controller.metrics.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.controller.metrics.service.loadBalancerIP }} +{{- end }} +{{- if .Values.controller.metrics.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{ toYaml .Values.controller.metrics.service.loadBalancerSourceRanges | nindent 4 }} +{{- end }} +{{- if .Values.controller.metrics.service.externalTrafficPolicy }} + externalTrafficPolicy: {{ .Values.controller.metrics.service.externalTrafficPolicy }} +{{- end }} + ports: + - name: http-metrics + port: {{ .Values.controller.metrics.service.servicePort }} + protocol: TCP + targetPort: http-metrics + {{- $setNodePorts := (or (eq .Values.controller.metrics.service.type "NodePort") (eq .Values.controller.metrics.service.type "LoadBalancer")) }} + {{- if (and $setNodePorts (not (empty .Values.controller.metrics.service.nodePort))) }} + nodePort: {{ .Values.controller.metrics.service.nodePort }} + {{- end }} + selector: + {{- include "ingress-nginx.selectorLabels" . | nindent 4 }} + app.kubernetes.io/component: controller +{{- end }} diff --git a/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-service-webhook.yaml b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-service-webhook.yaml new file mode 100644 index 0000000..2aae24f --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-service-webhook.yaml @@ -0,0 +1,40 @@ +{{- if .Values.controller.admissionWebhooks.enabled -}} +apiVersion: v1 +kind: Service +metadata: +{{- if .Values.controller.admissionWebhooks.service.annotations }} + annotations: {{ toYaml .Values.controller.admissionWebhooks.service.annotations | nindent 4 }} +{{- end }} + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.controller.fullname" . }}-admission + namespace: {{ .Release.Namespace }} +spec: + type: {{ .Values.controller.admissionWebhooks.service.type }} +{{- if .Values.controller.admissionWebhooks.service.clusterIP }} + clusterIP: {{ .Values.controller.admissionWebhooks.service.clusterIP }} +{{- end }} +{{- if .Values.controller.admissionWebhooks.service.externalIPs }} + externalIPs: {{ toYaml .Values.controller.admissionWebhooks.service.externalIPs | nindent 4 }} +{{- end }} +{{- if .Values.controller.admissionWebhooks.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.controller.admissionWebhooks.service.loadBalancerIP }} +{{- end }} +{{- if .Values.controller.admissionWebhooks.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{ toYaml .Values.controller.admissionWebhooks.service.loadBalancerSourceRanges | nindent 4 }} +{{- end }} + ports: + - name: https-webhook + port: 443 + targetPort: webhook + {{- if semverCompare ">=1.20" .Capabilities.KubeVersion.Version }} + appProtocol: https + {{- end }} + selector: + {{- include "ingress-nginx.selectorLabels" . | nindent 4 }} + app.kubernetes.io/component: controller +{{- end }} diff --git a/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-service.yaml b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-service.yaml new file mode 100644 index 0000000..2b28196 --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-service.yaml @@ -0,0 +1,101 @@ +{{- if and .Values.controller.service.enabled .Values.controller.service.external.enabled -}} +apiVersion: v1 +kind: Service +metadata: + annotations: + {{- range $key, $value := .Values.controller.service.annotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- if .Values.controller.service.labels }} + {{- toYaml .Values.controller.service.labels | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.controller.fullname" . }} + namespace: {{ .Release.Namespace }} +spec: + type: {{ .Values.controller.service.type }} +{{- if .Values.controller.service.clusterIP }} + clusterIP: {{ .Values.controller.service.clusterIP }} +{{- end }} +{{- if .Values.controller.service.externalIPs }} + externalIPs: {{ toYaml .Values.controller.service.externalIPs | nindent 4 }} +{{- end }} +{{- if .Values.controller.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.controller.service.loadBalancerIP }} +{{- end }} +{{- if .Values.controller.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{ toYaml .Values.controller.service.loadBalancerSourceRanges | nindent 4 }} +{{- end }} +{{- if .Values.controller.service.externalTrafficPolicy }} + externalTrafficPolicy: {{ .Values.controller.service.externalTrafficPolicy }} +{{- end }} +{{- if .Values.controller.service.sessionAffinity }} + sessionAffinity: {{ .Values.controller.service.sessionAffinity }} +{{- end }} +{{- if .Values.controller.service.healthCheckNodePort }} + healthCheckNodePort: {{ .Values.controller.service.healthCheckNodePort }} +{{- end }} +{{- if semverCompare ">=1.21.0-0" .Capabilities.KubeVersion.Version -}} +{{- if .Values.controller.service.ipFamilyPolicy }} + ipFamilyPolicy: {{ .Values.controller.service.ipFamilyPolicy }} +{{- end }} +{{- end }} +{{- if semverCompare ">=1.21.0-0" .Capabilities.KubeVersion.Version -}} +{{- if .Values.controller.service.ipFamilies }} + ipFamilies: {{ toYaml .Values.controller.service.ipFamilies | nindent 4 }} +{{- end }} +{{- end }} + ports: + {{- $setNodePorts := (or (eq .Values.controller.service.type "NodePort") (eq .Values.controller.service.type "LoadBalancer")) }} + {{- if .Values.controller.service.enableHttp }} + - name: http + port: {{ .Values.controller.service.ports.http }} + protocol: TCP + targetPort: {{ .Values.controller.service.targetPorts.http }} + {{- if and (semverCompare ">=1.20" .Capabilities.KubeVersion.Version) (.Values.controller.service.appProtocol) }} + appProtocol: http + {{- end }} + {{- if (and $setNodePorts (not (empty .Values.controller.service.nodePorts.http))) }} + nodePort: {{ .Values.controller.service.nodePorts.http }} + {{- end }} + {{- end }} + {{- if .Values.controller.service.enableHttps }} + - name: https + port: {{ .Values.controller.service.ports.https }} + protocol: TCP + targetPort: {{ .Values.controller.service.targetPorts.https }} + {{- if and (semverCompare ">=1.20" .Capabilities.KubeVersion.Version) (.Values.controller.service.appProtocol) }} + appProtocol: https + {{- end }} + {{- if (and $setNodePorts (not (empty .Values.controller.service.nodePorts.https))) }} + nodePort: {{ .Values.controller.service.nodePorts.https }} + {{- end }} + {{- end }} + {{- range $key, $value := .Values.tcp }} + - name: {{ if $.Values.portNamePrefix }}{{ $.Values.portNamePrefix }}-{{ end }}{{ $key }}-tcp + port: {{ $key }} + protocol: TCP + targetPort: {{ if $.Values.portNamePrefix }}{{ $.Values.portNamePrefix }}-{{ end }}{{ $key }}-tcp + {{- if $.Values.controller.service.nodePorts.tcp }} + {{- if index $.Values.controller.service.nodePorts.tcp $key }} + nodePort: {{ index $.Values.controller.service.nodePorts.tcp $key }} + {{- end }} + {{- end }} + {{- end }} + {{- range $key, $value := .Values.udp }} + - name: {{ if $.Values.portNamePrefix }}{{ $.Values.portNamePrefix }}-{{ end }}{{ $key }}-udp + port: {{ $key }} + protocol: UDP + targetPort: {{ if $.Values.portNamePrefix }}{{ $.Values.portNamePrefix }}-{{ end }}{{ $key }}-udp + {{- if $.Values.controller.service.nodePorts.udp }} + {{- if index $.Values.controller.service.nodePorts.udp $key }} + nodePort: {{ index $.Values.controller.service.nodePorts.udp $key }} + {{- end }} + {{- end }} + {{- end }} + selector: + {{- include "ingress-nginx.selectorLabels" . | nindent 4 }} + app.kubernetes.io/component: controller +{{- end }} diff --git a/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-serviceaccount.yaml b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-serviceaccount.yaml new file mode 100644 index 0000000..824b2a1 --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-serviceaccount.yaml @@ -0,0 +1,18 @@ +{{- if or .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ template "ingress-nginx.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} + {{- if .Values.serviceAccount.annotations }} + annotations: + {{ toYaml .Values.serviceAccount.annotations | indent 4 }} + {{- end }} +automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-servicemonitor.yaml b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-servicemonitor.yaml new file mode 100644 index 0000000..973d36b --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-servicemonitor.yaml @@ -0,0 +1,48 @@ +{{- if and .Values.controller.metrics.enabled .Values.controller.metrics.serviceMonitor.enabled -}} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "ingress-nginx.controller.fullname" . }} +{{- if .Values.controller.metrics.serviceMonitor.namespace }} + namespace: {{ .Values.controller.metrics.serviceMonitor.namespace | quote }} +{{- end }} + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- if .Values.controller.metrics.serviceMonitor.additionalLabels }} + {{- toYaml .Values.controller.metrics.serviceMonitor.additionalLabels | nindent 4 }} + {{- end }} +spec: + endpoints: + - port: http-metrics + interval: {{ .Values.controller.metrics.serviceMonitor.scrapeInterval }} + {{- if .Values.controller.metrics.serviceMonitor.honorLabels }} + honorLabels: true + {{- end }} + {{- if .Values.controller.metrics.serviceMonitor.relabelings }} + relabelings: {{ toYaml .Values.controller.metrics.serviceMonitor.relabelings | nindent 8 }} + {{- end }} + {{- if .Values.controller.metrics.serviceMonitor.metricRelabelings }} + metricRelabelings: {{ toYaml .Values.controller.metrics.serviceMonitor.metricRelabelings | nindent 8 }} + {{- end }} +{{- if .Values.controller.metrics.serviceMonitor.jobLabel }} + jobLabel: {{ .Values.controller.metrics.serviceMonitor.jobLabel | quote }} +{{- end }} +{{- if .Values.controller.metrics.serviceMonitor.namespaceSelector }} + namespaceSelector: {{ toYaml .Values.controller.metrics.serviceMonitor.namespaceSelector | nindent 4 }} +{{- else }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} +{{- end }} +{{- if .Values.controller.metrics.serviceMonitor.targetLabels }} + targetLabels: + {{- range .Values.controller.metrics.serviceMonitor.targetLabels }} + - {{ . }} + {{- end }} +{{- end }} + selector: + matchLabels: + {{- include "ingress-nginx.selectorLabels" . | nindent 6 }} + app.kubernetes.io/component: controller +{{- end }} diff --git a/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-wehbooks-networkpolicy.yaml b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-wehbooks-networkpolicy.yaml new file mode 100644 index 0000000..f74c2fb --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-wehbooks-networkpolicy.yaml @@ -0,0 +1,19 @@ +{{- if .Values.controller.admissionWebhooks.enabled }} +{{- if .Values.controller.admissionWebhooks.networkPolicyEnabled }} + +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: {{ include "ingress-nginx.fullname" . }}-webhooks-allow + namespace: {{ .Release.Namespace }} +spec: + ingress: + - {} + podSelector: + matchLabels: + app.kubernetes.io/name: {{ include "ingress-nginx.name" . }} + policyTypes: + - Ingress + +{{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/default-backend-deployment.yaml b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/default-backend-deployment.yaml new file mode 100644 index 0000000..fd3e96e --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/default-backend-deployment.yaml @@ -0,0 +1,118 @@ +{{- if .Values.defaultBackend.enabled -}} +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: default-backend + {{- with .Values.defaultBackend.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.defaultBackend.fullname" . }} + namespace: {{ .Release.Namespace }} +spec: + selector: + matchLabels: + {{- include "ingress-nginx.selectorLabels" . | nindent 6 }} + app.kubernetes.io/component: default-backend +{{- if not .Values.defaultBackend.autoscaling.enabled }} + replicas: {{ .Values.defaultBackend.replicaCount }} +{{- end }} + revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} + template: + metadata: + {{- if .Values.defaultBackend.podAnnotations }} + annotations: {{ toYaml .Values.defaultBackend.podAnnotations | nindent 8 }} + {{- end }} + labels: + {{- include "ingress-nginx.selectorLabels" . | nindent 8 }} + app.kubernetes.io/component: default-backend + {{- with .Values.defaultBackend.labels }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if .Values.defaultBackend.podLabels }} + {{- toYaml .Values.defaultBackend.podLabels | nindent 8 }} + {{- end }} + spec: + {{- if .Values.imagePullSecrets }} + imagePullSecrets: {{ toYaml .Values.imagePullSecrets | nindent 8 }} + {{- end }} + {{- if .Values.defaultBackend.priorityClassName }} + priorityClassName: {{ .Values.defaultBackend.priorityClassName }} + {{- end }} + {{- if .Values.defaultBackend.podSecurityContext }} + securityContext: {{ toYaml .Values.defaultBackend.podSecurityContext | nindent 8 }} + {{- end }} + containers: + - name: {{ template "ingress-nginx.name" . }}-default-backend + {{- with .Values.defaultBackend.image }} + image: "{{- if .repository -}}{{ .repository }}{{ else }}{{ .registry }}/{{ .image }}{{- end -}}:{{ .tag }}{{- if (.digest) -}} @{{.digest}} {{- end -}}" + {{- end }} + imagePullPolicy: {{ .Values.defaultBackend.image.pullPolicy }} + {{- if .Values.defaultBackend.extraArgs }} + args: + {{- range $key, $value := .Values.defaultBackend.extraArgs }} + {{- /* Accept keys without values or with false as value */}} + {{- if eq ($value | quote | len) 2 }} + - --{{ $key }} + {{- else }} + - --{{ $key }}={{ $value }} + {{- end }} + {{- end }} + {{- end }} + securityContext: + capabilities: + drop: + - ALL + runAsUser: {{ .Values.defaultBackend.image.runAsUser }} + runAsNonRoot: {{ .Values.defaultBackend.image.runAsNonRoot }} + allowPrivilegeEscalation: {{ .Values.defaultBackend.image.allowPrivilegeEscalation }} + readOnlyRootFilesystem: {{ .Values.defaultBackend.image.readOnlyRootFilesystem}} + {{- if .Values.defaultBackend.extraEnvs }} + env: {{ toYaml .Values.defaultBackend.extraEnvs | nindent 12 }} + {{- end }} + livenessProbe: + httpGet: + path: /healthz + port: {{ .Values.defaultBackend.port }} + scheme: HTTP + initialDelaySeconds: {{ .Values.defaultBackend.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.defaultBackend.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.defaultBackend.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.defaultBackend.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.defaultBackend.livenessProbe.failureThreshold }} + readinessProbe: + httpGet: + path: /healthz + port: {{ .Values.defaultBackend.port }} + scheme: HTTP + initialDelaySeconds: {{ .Values.defaultBackend.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.defaultBackend.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.defaultBackend.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.defaultBackend.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.defaultBackend.readinessProbe.failureThreshold }} + ports: + - name: http + containerPort: {{ .Values.defaultBackend.port }} + protocol: TCP + {{- if .Values.defaultBackend.extraVolumeMounts }} + volumeMounts: {{- toYaml .Values.defaultBackend.extraVolumeMounts | nindent 12 }} + {{- end }} + {{- if .Values.defaultBackend.resources }} + resources: {{ toYaml .Values.defaultBackend.resources | nindent 12 }} + {{- end }} + {{- if .Values.defaultBackend.nodeSelector }} + nodeSelector: {{ toYaml .Values.defaultBackend.nodeSelector | nindent 8 }} + {{- end }} + serviceAccountName: {{ template "ingress-nginx.defaultBackend.serviceAccountName" . }} + {{- if .Values.defaultBackend.tolerations }} + tolerations: {{ toYaml .Values.defaultBackend.tolerations | nindent 8 }} + {{- end }} + {{- if .Values.defaultBackend.affinity }} + affinity: {{ toYaml .Values.defaultBackend.affinity | nindent 8 }} + {{- end }} + terminationGracePeriodSeconds: 60 + {{- if .Values.defaultBackend.extraVolumes }} + volumes: {{ toYaml .Values.defaultBackend.extraVolumes | nindent 8 }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/default-backend-hpa.yaml b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/default-backend-hpa.yaml new file mode 100644 index 0000000..594d265 --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/default-backend-hpa.yaml @@ -0,0 +1,33 @@ +{{- if and .Values.defaultBackend.enabled .Values.defaultBackend.autoscaling.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: default-backend + {{- with .Values.defaultBackend.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ template "ingress-nginx.defaultBackend.fullname" . }} + namespace: {{ .Release.Namespace }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ template "ingress-nginx.defaultBackend.fullname" . }} + minReplicas: {{ .Values.defaultBackend.autoscaling.minReplicas }} + maxReplicas: {{ .Values.defaultBackend.autoscaling.maxReplicas }} + metrics: +{{- with .Values.defaultBackend.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ . }} +{{- end }} +{{- with .Values.defaultBackend.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + targetAverageUtilization: {{ . }} +{{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/default-backend-poddisruptionbudget.yaml b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/default-backend-poddisruptionbudget.yaml new file mode 100644 index 0000000..00891ce --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/default-backend-poddisruptionbudget.yaml @@ -0,0 +1,21 @@ +{{- if .Values.defaultBackend.enabled -}} +{{- if or (gt (.Values.defaultBackend.replicaCount | int) 1) (gt (.Values.defaultBackend.autoscaling.minReplicas | int) 1) }} +apiVersion: {{ ternary "policy/v1" "policy/v1beta1" (semverCompare ">=1.21.0-0" .Capabilities.KubeVersion.Version) }} +kind: PodDisruptionBudget +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: default-backend + {{- with .Values.defaultBackend.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.defaultBackend.fullname" . }} + namespace: {{ .Release.Namespace }} +spec: + selector: + matchLabels: + {{- include "ingress-nginx.selectorLabels" . | nindent 6 }} + app.kubernetes.io/component: default-backend + minAvailable: {{ .Values.defaultBackend.minAvailable }} +{{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/default-backend-psp.yaml b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/default-backend-psp.yaml new file mode 100644 index 0000000..c144c8f --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/default-backend-psp.yaml @@ -0,0 +1,38 @@ +{{- if (semverCompare "<1.25.0-0" .Capabilities.KubeVersion.Version) }} +{{- if and .Values.podSecurityPolicy.enabled .Values.defaultBackend.enabled (empty .Values.defaultBackend.existingPsp) -}} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ include "ingress-nginx.fullname" . }}-backend + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: default-backend + {{- with .Values.defaultBackend.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + allowPrivilegeEscalation: false + fsGroup: + ranges: + - max: 65535 + min: 1 + rule: MustRunAs + requiredDropCapabilities: + - ALL + runAsUser: + rule: MustRunAsNonRoot + seLinux: + rule: RunAsAny + supplementalGroups: + ranges: + - max: 65535 + min: 1 + rule: MustRunAs + volumes: + - configMap + - emptyDir + - projected + - secret + - downwardAPI +{{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/default-backend-role.yaml b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/default-backend-role.yaml new file mode 100644 index 0000000..a2b457c --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/default-backend-role.yaml @@ -0,0 +1,22 @@ +{{- if and .Values.rbac.create .Values.podSecurityPolicy.enabled .Values.defaultBackend.enabled -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: default-backend + {{- with .Values.defaultBackend.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.fullname" . }}-backend + namespace: {{ .Release.Namespace }} +rules: + - apiGroups: [{{ template "podSecurityPolicy.apiGroup" . }}] + resources: ['podsecuritypolicies'] + verbs: ['use'] + {{- with .Values.defaultBackend.existingPsp }} + resourceNames: [{{ . }}] + {{- else }} + resourceNames: [{{ include "ingress-nginx.fullname" . }}-backend] + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/default-backend-rolebinding.yaml b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/default-backend-rolebinding.yaml new file mode 100644 index 0000000..dbaa516 --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/default-backend-rolebinding.yaml @@ -0,0 +1,21 @@ +{{- if and .Values.rbac.create .Values.podSecurityPolicy.enabled .Values.defaultBackend.enabled -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: default-backend + {{- with .Values.defaultBackend.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.fullname" . }}-backend + namespace: {{ .Release.Namespace }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ include "ingress-nginx.fullname" . }}-backend +subjects: + - kind: ServiceAccount + name: {{ template "ingress-nginx.defaultBackend.serviceAccountName" . }} + namespace: {{ .Release.Namespace | quote }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/default-backend-service.yaml b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/default-backend-service.yaml new file mode 100644 index 0000000..5f1d09a --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/default-backend-service.yaml @@ -0,0 +1,41 @@ +{{- if .Values.defaultBackend.enabled -}} +apiVersion: v1 +kind: Service +metadata: +{{- if .Values.defaultBackend.service.annotations }} + annotations: {{ toYaml .Values.defaultBackend.service.annotations | nindent 4 }} +{{- end }} + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: default-backend + {{- with .Values.defaultBackend.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.defaultBackend.fullname" . }} + namespace: {{ .Release.Namespace }} +spec: + type: {{ .Values.defaultBackend.service.type }} +{{- if .Values.defaultBackend.service.clusterIP }} + clusterIP: {{ .Values.defaultBackend.service.clusterIP }} +{{- end }} +{{- if .Values.defaultBackend.service.externalIPs }} + externalIPs: {{ toYaml .Values.defaultBackend.service.externalIPs | nindent 4 }} +{{- end }} +{{- if .Values.defaultBackend.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.defaultBackend.service.loadBalancerIP }} +{{- end }} +{{- if .Values.defaultBackend.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{ toYaml .Values.defaultBackend.service.loadBalancerSourceRanges | nindent 4 }} +{{- end }} + ports: + - name: http + port: {{ .Values.defaultBackend.service.servicePort }} + protocol: TCP + targetPort: http + {{- if semverCompare ">=1.20" .Capabilities.KubeVersion.Version }} + appProtocol: http + {{- end }} + selector: + {{- include "ingress-nginx.selectorLabels" . | nindent 4 }} + app.kubernetes.io/component: default-backend +{{- end }} diff --git a/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/default-backend-serviceaccount.yaml b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/default-backend-serviceaccount.yaml new file mode 100644 index 0000000..b45a95a --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/default-backend-serviceaccount.yaml @@ -0,0 +1,14 @@ +{{- if and .Values.defaultBackend.enabled .Values.defaultBackend.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: default-backend + {{- with .Values.defaultBackend.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ template "ingress-nginx.defaultBackend.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +automountServiceAccountToken: {{ .Values.defaultBackend.serviceAccount.automountServiceAccountToken }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/dh-param-secret.yaml b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/dh-param-secret.yaml new file mode 100644 index 0000000..12e7a4f --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/templates/dh-param-secret.yaml @@ -0,0 +1,10 @@ +{{- with .Values.dhParam -}} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "ingress-nginx.controller.fullname" $ }} + labels: + {{- include "ingress-nginx.labels" $ | nindent 4 }} +data: + dhparam.pem: {{ . }} +{{- end }} diff --git a/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/values.yaml b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/values.yaml new file mode 100644 index 0000000..9ec174f --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/files/ingress-nginx/values.yaml @@ -0,0 +1,944 @@ +## nginx configuration +## Ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/nginx-configuration/index.md +## + +## Overrides for generated resource names +# See templates/_helpers.tpl +# nameOverride: +# fullnameOverride: + +## Labels to apply to all resources +## +commonLabels: {} +# scmhash: abc123 +# myLabel: aakkmd + +controller: + name: controller + image: + ## Keep false as default for now! + chroot: false + registry: registry.k8s.io + image: ingress-nginx/controller + ## for backwards compatibility consider setting the full image url via the repository value below + ## use *either* current default registry/image or repository format or installing chart by providing the values.yaml will fail + ## repository: + tag: "v1.3.1" + digest: sha256:54f7fe2c6c5a9db9a0ebf1131797109bb7a4d91f56b9b362bde2abd237dd1974 + digestChroot: sha256:a8466b19c621bd550b1645e27a004a5cc85009c858a9ab19490216735ac432b1 + pullPolicy: IfNotPresent + # www-data -> uid 101 + runAsUser: 101 + allowPrivilegeEscalation: true + + # -- Use an existing PSP instead of creating one + existingPsp: "" + + # -- Configures the controller container name + containerName: controller + + # -- Configures the ports that the nginx-controller listens on + containerPort: + http: 80 + https: 443 + + # -- Will add custom configuration options to Nginx https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/ + config: {} + + # -- Annotations to be added to the controller config configuration configmap. + configAnnotations: {} + + # -- Will add custom headers before sending traffic to backends according to https://github.com/kubernetes/ingress-nginx/tree/main/docs/examples/customization/custom-headers + proxySetHeaders: {} + + # -- Will add custom headers before sending response traffic to the client according to: https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/#add-headers + addHeaders: {} + + # -- Optionally customize the pod dnsConfig. + dnsConfig: {} + + # -- Optionally customize the pod hostname. + hostname: {} + + # -- Optionally change this to ClusterFirstWithHostNet in case you have 'hostNetwork: true'. + # By default, while using host network, name resolution uses the host's DNS. If you wish nginx-controller + # to keep resolving names inside the k8s network, use ClusterFirstWithHostNet. + dnsPolicy: ClusterFirst + + # -- Bare-metal considerations via the host network https://kubernetes.github.io/ingress-nginx/deploy/baremetal/#via-the-host-network + # Ingress status was blank because there is no Service exposing the NGINX Ingress controller in a configuration using the host network, the default --publish-service flag used in standard cloud setups does not apply + reportNodeInternalIp: false + + # -- Process Ingress objects without ingressClass annotation/ingressClassName field + # Overrides value for --watch-ingress-without-class flag of the controller binary + # Defaults to false + watchIngressWithoutClass: false + + # -- Process IngressClass per name (additionally as per spec.controller). + ingressClassByName: false + + # -- This configuration defines if Ingress Controller should allow users to set + # their own *-snippet annotations, otherwise this is forbidden / dropped + # when users add those annotations. + # Global snippets in ConfigMap are still respected + allowSnippetAnnotations: true + + # -- Required for use with CNI based kubernetes installations (such as ones set up by kubeadm), + # since CNI and hostport don't mix yet. Can be deprecated once https://github.com/kubernetes/kubernetes/issues/23920 + # is merged + hostNetwork: false + + ## Use host ports 80 and 443 + ## Disabled by default + hostPort: + # -- Enable 'hostPort' or not + enabled: false + ports: + # -- 'hostPort' http port + http: 80 + # -- 'hostPort' https port + https: 443 + + # -- Election ID to use for status update + electionID: ingress-controller-leader + + ## This section refers to the creation of the IngressClass resource + ## IngressClass resources are supported since k8s >= 1.18 and required since k8s >= 1.19 + ingressClassResource: + # -- Name of the ingressClass + name: nginx + # -- Is this ingressClass enabled or not + enabled: true + # -- Is this the default ingressClass for the cluster + default: false + # -- Controller-value of the controller that is processing this ingressClass + controllerValue: "k8s.io/ingress-nginx" + + # -- Parameters is a link to a custom resource containing additional + # configuration for the controller. This is optional if the controller + # does not require extra parameters. + parameters: {} + + # -- For backwards compatibility with ingress.class annotation, use ingressClass. + # Algorithm is as follows, first ingressClassName is considered, if not present, controller looks for ingress.class annotation + ingressClass: nginx + + # -- Labels to add to the pod container metadata + podLabels: {} + # key: value + + # -- Security Context policies for controller pods + podSecurityContext: {} + + # -- See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for notes on enabling and using sysctls + sysctls: {} + # sysctls: + # "net.core.somaxconn": "8192" + + # -- Allows customization of the source of the IP address or FQDN to report + # in the ingress status field. By default, it reads the information provided + # by the service. If disable, the status field reports the IP address of the + # node or nodes where an ingress controller pod is running. + publishService: + # -- Enable 'publishService' or not + enabled: true + # -- Allows overriding of the publish service to bind to + # Must be / + pathOverride: "" + + # Limit the scope of the controller to a specific namespace + scope: + # -- Enable 'scope' or not + enabled: false + # -- Namespace to limit the controller to; defaults to $(POD_NAMESPACE) + namespace: "" + # -- When scope.enabled == false, instead of watching all namespaces, we watching namespaces whose labels + # only match with namespaceSelector. Format like foo=bar. Defaults to empty, means watching all namespaces. + namespaceSelector: "" + + # -- Allows customization of the configmap / nginx-configmap namespace; defaults to $(POD_NAMESPACE) + configMapNamespace: "" + + tcp: + # -- Allows customization of the tcp-services-configmap; defaults to $(POD_NAMESPACE) + configMapNamespace: "" + # -- Annotations to be added to the tcp config configmap + annotations: {} + + udp: + # -- Allows customization of the udp-services-configmap; defaults to $(POD_NAMESPACE) + configMapNamespace: "" + # -- Annotations to be added to the udp config configmap + annotations: {} + + # -- Maxmind license key to download GeoLite2 Databases. + ## https://blog.maxmind.com/2019/12/18/significant-changes-to-accessing-and-using-geolite2-databases + maxmindLicenseKey: "" + + # -- Additional command line arguments to pass to nginx-ingress-controller + # E.g. to specify the default SSL certificate you can use + extraArgs: {} + ## extraArgs: + ## default-ssl-certificate: "/" + + # -- Additional environment variables to set + extraEnvs: [] + # extraEnvs: + # - name: FOO + # valueFrom: + # secretKeyRef: + # key: FOO + # name: secret-resource + + # -- Use a `DaemonSet` or `Deployment` + kind: Deployment + + # -- Annotations to be added to the controller Deployment or DaemonSet + ## + annotations: {} + # keel.sh/pollSchedule: "@every 60m" + + # -- Labels to be added to the controller Deployment or DaemonSet and other resources that do not have option to specify labels + ## + labels: {} + # keel.sh/policy: patch + # keel.sh/trigger: poll + + + # -- The update strategy to apply to the Deployment or DaemonSet + ## + updateStrategy: {} + # rollingUpdate: + # maxUnavailable: 1 + # type: RollingUpdate + + # -- `minReadySeconds` to avoid killing pods before we are ready + ## + minReadySeconds: 0 + + + # -- Node tolerations for server scheduling to nodes with taints + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + tolerations: [] + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + + # -- Affinity and anti-affinity rules for server scheduling to nodes + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## + affinity: {} + # # An example of preferred pod anti-affinity, weight is in the range 1-100 + # podAntiAffinity: + # preferredDuringSchedulingIgnoredDuringExecution: + # - weight: 100 + # podAffinityTerm: + # labelSelector: + # matchExpressions: + # - key: app.kubernetes.io/name + # operator: In + # values: + # - ingress-nginx + # - key: app.kubernetes.io/instance + # operator: In + # values: + # - ingress-nginx + # - key: app.kubernetes.io/component + # operator: In + # values: + # - controller + # topologyKey: kubernetes.io/hostname + + # # An example of required pod anti-affinity + # podAntiAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # - labelSelector: + # matchExpressions: + # - key: app.kubernetes.io/name + # operator: In + # values: + # - ingress-nginx + # - key: app.kubernetes.io/instance + # operator: In + # values: + # - ingress-nginx + # - key: app.kubernetes.io/component + # operator: In + # values: + # - controller + # topologyKey: "kubernetes.io/hostname" + + # -- Topology spread constraints rely on node labels to identify the topology domain(s) that each Node is in. + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + ## + topologySpreadConstraints: [] + # - maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # whenUnsatisfiable: DoNotSchedule + # labelSelector: + # matchLabels: + # app.kubernetes.io/instance: ingress-nginx-internal + + # -- `terminationGracePeriodSeconds` to avoid killing pods before we are ready + ## wait up to five minutes for the drain of connections + ## + terminationGracePeriodSeconds: 300 + + # -- Node labels for controller pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: + kubernetes.io/os: linux + + ## Liveness and readiness probe values + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes + ## + ## startupProbe: + ## httpGet: + ## # should match container.healthCheckPath + ## path: "/healthz" + ## port: 10254 + ## scheme: HTTP + ## initialDelaySeconds: 5 + ## periodSeconds: 5 + ## timeoutSeconds: 2 + ## successThreshold: 1 + ## failureThreshold: 5 + livenessProbe: + httpGet: + # should match container.healthCheckPath + path: "/healthz" + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + httpGet: + # should match container.healthCheckPath + path: "/healthz" + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 3 + + + # -- Path of the health check endpoint. All requests received on the port defined by + # the healthz-port parameter are forwarded internally to this path. + healthCheckPath: "/healthz" + + # -- Address to bind the health check endpoint. + # It is better to set this option to the internal node address + # if the ingress nginx controller is running in the `hostNetwork: true` mode. + healthCheckHost: "" + + # -- Annotations to be added to controller pods + ## + podAnnotations: {} + + replicaCount: 1 + + minAvailable: 1 + + ## Define requests resources to avoid probe issues due to CPU utilization in busy nodes + ## ref: https://github.com/kubernetes/ingress-nginx/issues/4735#issuecomment-551204903 + ## Ideally, there should be no limits. + ## https://engineering.indeedblog.com/blog/2019/12/cpu-throttling-regression-fix/ + resources: + ## limits: + ## cpu: 100m + ## memory: 90Mi + requests: + cpu: 100m + memory: 90Mi + + # Mutually exclusive with keda autoscaling + autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 11 + targetCPUUtilizationPercentage: 50 + targetMemoryUtilizationPercentage: 50 + behavior: {} + # scaleDown: + # stabilizationWindowSeconds: 300 + # policies: + # - type: Pods + # value: 1 + # periodSeconds: 180 + # scaleUp: + # stabilizationWindowSeconds: 300 + # policies: + # - type: Pods + # value: 2 + # periodSeconds: 60 + + autoscalingTemplate: [] + # Custom or additional autoscaling metrics + # ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-custom-metrics + # - type: Pods + # pods: + # metric: + # name: nginx_ingress_controller_nginx_process_requests_total + # target: + # type: AverageValue + # averageValue: 10000m + + # Mutually exclusive with hpa autoscaling + keda: + apiVersion: "keda.sh/v1alpha1" + ## apiVersion changes with keda 1.x vs 2.x + ## 2.x = keda.sh/v1alpha1 + ## 1.x = keda.k8s.io/v1alpha1 + enabled: false + minReplicas: 1 + maxReplicas: 11 + pollingInterval: 30 + cooldownPeriod: 300 + restoreToOriginalReplicaCount: false + scaledObject: + annotations: {} + # Custom annotations for ScaledObject resource + # annotations: + # key: value + triggers: [] + # - type: prometheus + # metadata: + # serverAddress: http://:9090 + # metricName: http_requests_total + # threshold: '100' + # query: sum(rate(http_requests_total{deployment="my-deployment"}[2m])) + + behavior: {} + # scaleDown: + # stabilizationWindowSeconds: 300 + # policies: + # - type: Pods + # value: 1 + # periodSeconds: 180 + # scaleUp: + # stabilizationWindowSeconds: 300 + # policies: + # - type: Pods + # value: 2 + # periodSeconds: 60 + + # -- Enable mimalloc as a drop-in replacement for malloc. + ## ref: https://github.com/microsoft/mimalloc + ## + enableMimalloc: true + + ## Override NGINX template + customTemplate: + configMapName: "" + configMapKey: "" + + service: + enabled: true + + # -- If enabled is adding an appProtocol option for Kubernetes service. An appProtocol field replacing annotations that were + # using for setting a backend protocol. Here is an example for AWS: service.beta.kubernetes.io/aws-load-balancer-backend-protocol: http + # It allows choosing the protocol for each backend specified in the Kubernetes service. + # See the following GitHub issue for more details about the purpose: https://github.com/kubernetes/kubernetes/issues/40244 + # Will be ignored for Kubernetes versions older than 1.20 + ## + appProtocol: true + + annotations: {} + labels: {} + # clusterIP: "" + + # -- List of IP addresses at which the controller services are available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + # -- Used by cloud providers to connect the resulting `LoadBalancer` to a pre-existing static IP according to https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer + loadBalancerIP: "" + loadBalancerSourceRanges: [] + + enableHttp: true + enableHttps: true + + ## Set external traffic policy to: "Local" to preserve source IP on providers supporting it. + ## Ref: https://kubernetes.io/docs/tutorials/services/source-ip/#source-ip-for-services-with-typeloadbalancer + # externalTrafficPolicy: "" + + ## Must be either "None" or "ClientIP" if set. Kubernetes will default to "None". + ## Ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + # sessionAffinity: "" + + ## Specifies the health check node port (numeric port number) for the service. If healthCheckNodePort isn’t specified, + ## the service controller allocates a port from your cluster’s NodePort range. + ## Ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + # healthCheckNodePort: 0 + + # -- Represents the dual-stack-ness requested or required by this Service. Possible values are + # SingleStack, PreferDualStack or RequireDualStack. + # The ipFamilies and clusterIPs fields depend on the value of this field. + ## Ref: https://kubernetes.io/docs/concepts/services-networking/dual-stack/ + ipFamilyPolicy: "SingleStack" + + # -- List of IP families (e.g. IPv4, IPv6) assigned to the service. This field is usually assigned automatically + # based on cluster configuration and the ipFamilyPolicy field. + ## Ref: https://kubernetes.io/docs/concepts/services-networking/dual-stack/ + ipFamilies: + - IPv4 + + ports: + http: 80 + https: 443 + + targetPorts: + http: http + https: https + + type: LoadBalancer + + ## type: NodePort + ## nodePorts: + ## http: 32080 + ## https: 32443 + ## tcp: + ## 8080: 32808 + nodePorts: + http: "" + https: "" + tcp: {} + udp: {} + + external: + enabled: true + + internal: + # -- Enables an additional internal load balancer (besides the external one). + enabled: false + # -- Annotations are mandatory for the load balancer to come up. Varies with the cloud service. + annotations: {} + + # loadBalancerIP: "" + + # -- Restrict access For LoadBalancer service. Defaults to 0.0.0.0/0. + loadBalancerSourceRanges: [] + + ## Set external traffic policy to: "Local" to preserve source IP on + ## providers supporting it + ## Ref: https://kubernetes.io/docs/tutorials/services/source-ip/#source-ip-for-services-with-typeloadbalancer + # externalTrafficPolicy: "" + + # shareProcessNamespace enables process namespace sharing within the pod. + # This can be used for example to signal log rotation using `kill -USR1` from a sidecar. + shareProcessNamespace: false + + # -- Additional containers to be added to the controller pod. + # See https://github.com/lemonldap-ng-controller/lemonldap-ng-controller as example. + extraContainers: [] + # - name: my-sidecar + # image: nginx:latest + # - name: lemonldap-ng-controller + # image: lemonldapng/lemonldap-ng-controller:0.2.0 + # args: + # - /lemonldap-ng-controller + # - --alsologtostderr + # - --configmap=$(POD_NAMESPACE)/lemonldap-ng-configuration + # env: + # - name: POD_NAME + # valueFrom: + # fieldRef: + # fieldPath: metadata.name + # - name: POD_NAMESPACE + # valueFrom: + # fieldRef: + # fieldPath: metadata.namespace + # volumeMounts: + # - name: copy-portal-skins + # mountPath: /srv/var/lib/lemonldap-ng/portal/skins + + # -- Additional volumeMounts to the controller main container. + extraVolumeMounts: [] + # - name: copy-portal-skins + # mountPath: /var/lib/lemonldap-ng/portal/skins + + # -- Additional volumes to the controller pod. + extraVolumes: [] + # - name: copy-portal-skins + # emptyDir: {} + + # -- Containers, which are run before the app containers are started. + extraInitContainers: [] + # - name: init-myservice + # image: busybox + # command: ['sh', '-c', 'until nslookup myservice; do echo waiting for myservice; sleep 2; done;'] + + extraModules: [] + ## Modules, which are mounted into the core nginx image + # - name: opentelemetry + # image: registry.k8s.io/ingress-nginx/opentelemetry:v20220801-g00ee51f09@sha256:482562feba02ad178411efc284f8eb803a185e3ea5588b6111ccbc20b816b427 + # + # The image must contain a `/usr/local/bin/init_module.sh` executable, which + # will be executed as initContainers, to move its config files within the + # mounted volume. + + admissionWebhooks: + annotations: {} + # ignore-check.kube-linter.io/no-read-only-rootfs: "This deployment needs write access to root filesystem". + + ## Additional annotations to the admission webhooks. + ## These annotations will be added to the ValidatingWebhookConfiguration and + ## the Jobs Spec of the admission webhooks. + enabled: true + # -- Additional environment variables to set + extraEnvs: [] + # extraEnvs: + # - name: FOO + # valueFrom: + # secretKeyRef: + # key: FOO + # name: secret-resource + # -- Admission Webhook failure policy to use + failurePolicy: Fail + # timeoutSeconds: 10 + port: 8443 + certificate: "/usr/local/certificates/cert" + key: "/usr/local/certificates/key" + namespaceSelector: {} + objectSelector: {} + # -- Labels to be added to admission webhooks + labels: {} + + # -- Use an existing PSP instead of creating one + existingPsp: "" + networkPolicyEnabled: false + + service: + annotations: {} + # clusterIP: "" + externalIPs: [] + # loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 443 + type: ClusterIP + + createSecretJob: + resources: {} + # limits: + # cpu: 10m + # memory: 20Mi + # requests: + # cpu: 10m + # memory: 20Mi + + patchWebhookJob: + resources: {} + + patch: + enabled: true + image: + registry: registry.k8s.io + image: ingress-nginx/kube-webhook-certgen + ## for backwards compatibility consider setting the full image url via the repository value below + ## use *either* current default registry/image or repository format or installing chart by providing the values.yaml will fail + ## repository: + tag: v1.3.0 + digest: sha256:549e71a6ca248c5abd51cdb73dbc3083df62cf92ed5e6147c780e30f7e007a47 + pullPolicy: IfNotPresent + # -- Provide a priority class name to the webhook patching job + ## + priorityClassName: "" + podAnnotations: {} + nodeSelector: + kubernetes.io/os: linux + tolerations: [] + # -- Labels to be added to patch job resources + labels: {} + securityContext: + runAsNonRoot: true + runAsUser: 2000 + fsGroup: 2000 + + + metrics: + port: 10254 + # if this port is changed, change healthz-port: in extraArgs: accordingly + enabled: false + + service: + annotations: {} + # prometheus.io/scrape: "true" + # prometheus.io/port: "10254" + + # clusterIP: "" + + # -- List of IP addresses at which the stats-exporter service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + # loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 10254 + type: ClusterIP + # externalTrafficPolicy: "" + # nodePort: "" + + serviceMonitor: + enabled: false + additionalLabels: {} + ## The label to use to retrieve the job name from. + ## jobLabel: "app.kubernetes.io/name" + namespace: "" + namespaceSelector: {} + ## Default: scrape .Release.Namespace only + ## To scrape all, use the following: + ## namespaceSelector: + ## any: true + scrapeInterval: 30s + # honorLabels: true + targetLabels: [] + relabelings: [] + metricRelabelings: [] + + prometheusRule: + enabled: false + additionalLabels: {} + # namespace: "" + rules: [] + # # These are just examples rules, please adapt them to your needs + # - alert: NGINXConfigFailed + # expr: count(nginx_ingress_controller_config_last_reload_successful == 0) > 0 + # for: 1s + # labels: + # severity: critical + # annotations: + # description: bad ingress config - nginx config test failed + # summary: uninstall the latest ingress changes to allow config reloads to resume + # - alert: NGINXCertificateExpiry + # expr: (avg(nginx_ingress_controller_ssl_expire_time_seconds) by (host) - time()) < 604800 + # for: 1s + # labels: + # severity: critical + # annotations: + # description: ssl certificate(s) will expire in less then a week + # summary: renew expiring certificates to avoid downtime + # - alert: NGINXTooMany500s + # expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"5.+"} ) / sum(nginx_ingress_controller_requests) ) > 5 + # for: 1m + # labels: + # severity: warning + # annotations: + # description: Too many 5XXs + # summary: More than 5% of all requests returned 5XX, this requires your attention + # - alert: NGINXTooMany400s + # expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"4.+"} ) / sum(nginx_ingress_controller_requests) ) > 5 + # for: 1m + # labels: + # severity: warning + # annotations: + # description: Too many 4XXs + # summary: More than 5% of all requests returned 4XX, this requires your attention + + # -- Improve connection draining when ingress controller pod is deleted using a lifecycle hook: + # With this new hook, we increased the default terminationGracePeriodSeconds from 30 seconds + # to 300, allowing the draining of connections up to five minutes. + # If the active connections end before that, the pod will terminate gracefully at that time. + # To effectively take advantage of this feature, the Configmap feature + # worker-shutdown-timeout new value is 240s instead of 10s. + ## + lifecycle: + preStop: + exec: + command: + - /wait-shutdown + + priorityClassName: "" + +# -- Rollback limit +## +revisionHistoryLimit: 10 + +## Default 404 backend +## +defaultBackend: + ## + enabled: false + + name: defaultbackend + image: + registry: registry.k8s.io + image: defaultbackend-amd64 + ## for backwards compatibility consider setting the full image url via the repository value below + ## use *either* current default registry/image or repository format or installing chart by providing the values.yaml will fail + ## repository: + tag: "1.5" + pullPolicy: IfNotPresent + # nobody user -> uid 65534 + runAsUser: 65534 + runAsNonRoot: true + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + + # -- Use an existing PSP instead of creating one + existingPsp: "" + + extraArgs: {} + + serviceAccount: + create: true + name: "" + automountServiceAccountToken: true + # -- Additional environment variables to set for defaultBackend pods + extraEnvs: [] + + port: 8080 + + ## Readiness and liveness probes for default backend + ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/ + ## + livenessProbe: + failureThreshold: 3 + initialDelaySeconds: 30 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + readinessProbe: + failureThreshold: 6 + initialDelaySeconds: 0 + periodSeconds: 5 + successThreshold: 1 + timeoutSeconds: 5 + + # -- Node tolerations for server scheduling to nodes with taints + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + tolerations: [] + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + + affinity: {} + + # -- Security Context policies for controller pods + # See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for + # notes on enabling and using sysctls + ## + podSecurityContext: {} + + # -- Security Context policies for controller main container. + # See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for + # notes on enabling and using sysctls + ## + containerSecurityContext: {} + + # -- Labels to add to the pod container metadata + podLabels: {} + # key: value + + # -- Node labels for default backend pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: + kubernetes.io/os: linux + + # -- Annotations to be added to default backend pods + ## + podAnnotations: {} + + replicaCount: 1 + + minAvailable: 1 + + resources: {} + # limits: + # cpu: 10m + # memory: 20Mi + # requests: + # cpu: 10m + # memory: 20Mi + + extraVolumeMounts: [] + ## Additional volumeMounts to the default backend container. + # - name: copy-portal-skins + # mountPath: /var/lib/lemonldap-ng/portal/skins + + extraVolumes: [] + ## Additional volumes to the default backend pod. + # - name: copy-portal-skins + # emptyDir: {} + + autoscaling: + annotations: {} + enabled: false + minReplicas: 1 + maxReplicas: 2 + targetCPUUtilizationPercentage: 50 + targetMemoryUtilizationPercentage: 50 + + service: + annotations: {} + + # clusterIP: "" + + # -- List of IP addresses at which the default backend service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + # loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 80 + type: ClusterIP + + priorityClassName: "" + # -- Labels to be added to the default backend resources + labels: {} + +## Enable RBAC as per https://github.com/kubernetes/ingress-nginx/blob/main/docs/deploy/rbac.md and https://github.com/kubernetes/ingress-nginx/issues/266 +rbac: + create: true + scope: false + +## If true, create & use Pod Security Policy resources +## https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +podSecurityPolicy: + enabled: false + +serviceAccount: + create: true + name: "" + automountServiceAccountToken: true + # -- Annotations for the controller service account + annotations: {} + +# -- Optional array of imagePullSecrets containing private registry credentials +## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ +imagePullSecrets: [] +# - name: secretName + +# -- TCP service key-value pairs +## Ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/exposing-tcp-udp-services.md +## +tcp: {} +# 8080: "default/example-tcp-svc:9000" + +# -- UDP service key-value pairs +## Ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/exposing-tcp-udp-services.md +## +udp: {} +# 53: "kube-system/kube-dns:53" + +# -- Prefix for TCP and UDP ports names in ingress controller service +## Some cloud providers, like Yandex Cloud may have a requirements for a port name regex to support cloud load balancer integration +portNamePrefix: "" + +# -- (string) A base64-encoded Diffie-Hellman parameter. +# This can be generated with: `openssl dhparam 4096 2> /dev/null | base64` +## Ref: https://github.com/kubernetes/ingress-nginx/tree/main/docs/examples/customization/ssl-dh-param +dhParam: diff --git a/ansible/01_old/roles/cmoa_os_setting/handlers/main.yml b/ansible/01_old/roles/cmoa_os_setting/handlers/main.yml new file mode 100644 index 0000000..4bf601f --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/handlers/main.yml @@ -0,0 +1,10 @@ +--- +- name: Reload systemd configuration + service: + daemon_reload: True + +- name: Restart containerd service + service: + name: containerd + enabled: true + state: restarted diff --git a/ansible/01_old/roles/cmoa_os_setting/meta/main.yml b/ansible/01_old/roles/cmoa_os_setting/meta/main.yml new file mode 100644 index 0000000..c572acc --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/meta/main.yml @@ -0,0 +1,52 @@ +galaxy_info: + author: your name + description: your role description + company: your company (optional) + + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: http://example.com/issue/tracker + + # Choose a valid license ID from https://spdx.org - some suggested licenses: + # - BSD-3-Clause (default) + # - MIT + # - GPL-2.0-or-later + # - GPL-3.0-only + # - Apache-2.0 + # - CC-BY-4.0 + license: license (GPL-2.0-or-later, MIT, etc) + + min_ansible_version: 2.1 + + # If this a Container Enabled role, provide the minimum Ansible Container version. + # min_ansible_container_version: + + # + # Provide a list of supported platforms, and for each platform a list of versions. + # If you don't wish to enumerate all versions for a particular platform, use 'all'. + # To view available platforms and versions (or releases), visit: + # https://galaxy.ansible.com/api/v1/platforms/ + # + # platforms: + # - name: Fedora + # versions: + # - all + # - 25 + # - name: SomePlatform + # versions: + # - all + # - 1.0 + # - 7 + # - 99.99 + + galaxy_tags: [] + # List tags for your role here, one per line. A tag is a keyword that describes + # and categorizes the role. Users find roles by searching for tags. Be sure to + # remove the '[]' above, if you add tags to this list. + # + # NOTE: A tag is limited to a single word comprised of alphanumeric characters. + # Maximum 20 tags per role. + +dependencies: [] + # List your role dependencies here, one per line. Be sure to remove the '[]' above, + # if you add dependencies to this list. diff --git a/ansible/01_old/roles/cmoa_os_setting/tasks/00-centos-os-main.yml b/ansible/01_old/roles/cmoa_os_setting/tasks/00-centos-os-main.yml new file mode 100644 index 0000000..9b831b8 --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/tasks/00-centos-os-main.yml @@ -0,0 +1,82 @@ +--- +- name: Update and upgrade yum packages + yum: + name: "*" + state: latest + +- name: Install yum packages + yum: + name: ['cloud-utils', 'ca-certificates', 'socat', 'conntrack', 'gnupg', 'bash-completion'] + state: present + +- name: Disable firewalld + systemd: name=firewalld state=stopped + ignore_errors: yes + tags: + - install + - atomic + - firewalld + +- name: Disable SWAP since kubernetes can't work with swap enabled (1/2) + command: 'swapoff -a' + + # - name: Disable SWAP in fstab since kubernetes can't work with swap enabled (2/2) + # replace: + # path: /etc/fstab + # regexp: '^([^#].*?\sswap\s+sw\s+.*)$' + # replace: '# \1' + +- name: Disable SWAP in fstab since kubernetes can't work with swap enabled (2/2) + become: true + lineinfile: + path: /etc/fstab + regexp: '^/dev/mapper/.*swap' + line: '# {{ item }}' + # when: item is search('^/dev/mapper/.*swap') + loop: "{{ lookup('file', '/etc/fstab').split('\n') }}" + +- name: Add br_netfilter to module autoload + lineinfile: + path: /etc/modules-load.d/k8s2.conf + line: "{{ item }}" + create: true + with_items: + - 'overlay' + - 'br_netfilter' + +- name: Add br_netfilter to module autoload + modprobe: + name: "{{ item }}" + state: present + become: true + with_items: + - 'overlay' + - 'br_netfilter' + +- name: Add br_netfilter to module autoload + lineinfile: + path: /etc/sysctl.d/k8s.conf + line: "{{ item }}" + create: true + with_items: + - 'net.bridge.bridge-nf-call-iptables = 1' + - 'net.bridge.bridge-nf-call-ip6tables = 1' + - 'net.ipv4.ip_forward = 1' + +- name: Disable net.bridge.bridge-nf-call-iptables + sysctl: + name: "{{ item }}" + value: 1 + with_items: + - 'net.bridge.bridge-nf-call-iptables' + - 'net.bridge.bridge-nf-call-ip6tables' + +- name: Disable net.ipv4.ip_forward + sysctl: + name: net.ipv4.ip_forward + value: "1" + +- name: Setting hosts file + template: + src: hosts.j2 + dest: /etc/hosts diff --git a/ansible/01_old/roles/cmoa_os_setting/tasks/00-ubuntu-os-main.yml b/ansible/01_old/roles/cmoa_os_setting/tasks/00-ubuntu-os-main.yml new file mode 100644 index 0000000..8c460d5 --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/tasks/00-ubuntu-os-main.yml @@ -0,0 +1,71 @@ +--- +- name: Update and upgrade apt packages + apt: + upgrade: yes + update_cache: yes + force_apt_get: yes + cache_valid_time: 86400 + +- name: Install apt packages + apt: + name: ['cloud-utils', 'apt-transport-https', 'ca-certificates', 'curl', 'socat', 'conntrack', 'gnupg', 'lsb-release', 'bash-completion', 'chrony'] + state: present + +- name: Disable ufw + command: 'ufw disable' + when: ansible_distribution_version == '20.04' + +- name: Disable SWAP since kubernetes can't work with swap enabled (1/2) + command: 'swapoff -a' + +- name: Disable SWAP in fstab since kubernetes can't work with swap enabled (2/2) + replace: + path: /etc/fstab + regexp: '^([^#].*?\sswap\s+sw\s+.*)$' + replace: '# \1' + +- name: Add br_netfilter to module autoload + lineinfile: + path: /etc/modules-load.d/k8s.conf + line: "{{ item }}" + create: true + with_items: + - 'overlay' + - 'br_netfilter' + +- name: Add br_netfilter to module autoload + modprobe: + name: "{{ item }}" + state: present + become: true + with_items: + - 'overlay' + - 'br_netfilter' + +- name: Add br_netfilter to module autoload + lineinfile: + path: /etc/sysctl.d/k8s.conf + line: "{{ item }}" + create: true + with_items: + - 'net.bridge.bridge-nf-call-iptables = 1' + - 'net.bridge.bridge-nf-call-ip6tables = 1' + - 'net.ipv4.ip_forward = 1' + +- name: Disable net.bridge.bridge-nf-call-iptables + sysctl: + name: "{{ item }}" + value: 1 + with_items: + - 'net.bridge.bridge-nf-call-iptables' + - 'net.bridge.bridge-nf-call-ip6tables' + +- name: Disable net.ipv4.ip_forward + sysctl: + name: net.ipv4.ip_forward + value: "1" + +- name: Setting hosts file + template: + src: hosts.j2 + dest: /etc/hosts diff --git a/ansible/01_old/roles/cmoa_os_setting/tasks/01-centos-os-runtime.yml b/ansible/01_old/roles/cmoa_os_setting/tasks/01-centos-os-runtime.yml new file mode 100644 index 0000000..35a0cb6 --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/tasks/01-centos-os-runtime.yml @@ -0,0 +1,45 @@ +--- +- name: Add containerd yum repository + command: yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo + +- name: Create containerd configuration directory + file: + path: /etc/containerd + state: directory + +- name: Configure containerd + template: + src: config.toml.j2 + dest: /etc/containerd/config.toml + notify: + - Restart containerd service + +- name: Install required packages + yum: + name: ['containerd'] + state: present + notify: + - Reload systemd configuration + - Restart containerd service + +- meta: flush_handlers + +- name: Enable containerd service + service: + name: containerd + enabled: True + state: started + +- name: Add kubernetes yum repository + ansible.builtin.yum_repository: + name: kubernetes + description: kubernetes + baseurl: https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64 + enabled: 1 + gpgcheck: 1 + gpgkey: https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg + +- name: Install kubernetes utils + ansible.builtin.yum: + name: ['kubelet-{{kubernetes_version}}','kubeadm-{{kubernetes_version}}','kubectl-{{kubernetes_version}}'] + exclude: kubernetes \ No newline at end of file diff --git a/ansible/01_old/roles/cmoa_os_setting/tasks/01-ubuntu-os-runtime.yml b/ansible/01_old/roles/cmoa_os_setting/tasks/01-ubuntu-os-runtime.yml new file mode 100644 index 0000000..556485e --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/tasks/01-ubuntu-os-runtime.yml @@ -0,0 +1,78 @@ +--- +- name: Add docker apt key + apt_key: + url: https://download.docker.com/linux/{{ ansible_distribution | lower }}/gpg + +- name: Add docker apt repository + apt_repository: + repo: deb [arch=amd64] https://download.docker.com/linux/{{ ansible_distribution | lower }} {{ ansible_distribution_release }} stable + filename: docker + register: containerd_apt_repo_task + +- name: apt list --upgradable + command: apt list --upgradable + when: containerd_apt_repo_task.changed + +- name: apt update + apt: + update_cache: yes + when: containerd_apt_repo_task.changed + +- name: Create containerd configuration directory + file: + path: /etc/containerd + state: directory + +- name: Configure containerd + template: + src: config.toml.j2 + dest: /etc/containerd/config.toml + notify: + - Restart containerd service + +- name: Install required packages + apt: + name: + - containerd.io + notify: + - Reload systemd configuration + - Restart containerd service + +- meta: flush_handlers + +- name: Enable containerd service + service: + name: containerd + enabled: True + state: started + +- name: Install kubernetes + block: + - name: 'Add kubernetes repo key' + apt_key: + url: https://packages.cloud.google.com/apt/doc/apt-key.gpg + state: present + become: true + - name: Add kubernetes repository + apt_repository: + repo: deb http://apt.kubernetes.io kubernetes-xenial main + state: present + filename: 'kubernetes' + become: true + - name: Install kubernetes components + apt: + name: ['kubelet={{kubernetes_version}}-*', 'kubeadm={{kubernetes_version}}-*', 'kubectl={{kubernetes_version}}-*'] + state: present + update_cache: yes + force: yes + dpkg_options: force-downgrade + +- name: Hold kubernetes packages + dpkg_selections: + name: "{{item}}" + selection: hold + with_items: + - kubelet + - kubectl + - kubeadm + diff --git a/ansible/01_old/roles/cmoa_os_setting/tasks/02-k8s-main.yml b/ansible/01_old/roles/cmoa_os_setting/tasks/02-k8s-main.yml new file mode 100644 index 0000000..ede7119 --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/tasks/02-k8s-main.yml @@ -0,0 +1,45 @@ +--- +- name: Enable kubelet service + systemd: + name: kubelet + enabled: true + masked: false + +- name: Check if Kubernetes has already been initialized. + stat: + path: /etc/kubernetes/admin.conf + register: kubernetes_init_stat + +# Set up master. +- include_tasks: 03-k8s-master.yml + when: kubernetes_role == 'master' + +# Set up nodes. +- name: Get the kubeadm join command from the Kubernetes master. + command: kubeadm token create --print-join-command + changed_when: false + when: kubernetes_role == 'master' + register: kubernetes_join_command_result + +- name: Get kubeconfig + fetch: + src: /etc/kubernetes/admin.conf + dest: ~/.kube/ansible_config + flat: yes + when: kubernetes_role == 'master' + +- name: Set the kubeadm join command globally. + set_fact: + kubernetes_join_command: > + {{ kubernetes_join_command_result.stdout }} + {{ kubernetes_join_command_extra_opts }} + when: kubernetes_join_command_result.stdout is defined + delegate_to: "{{ item }}" + delegate_facts: true + with_items: "{{ groups['all'] }}" + +- include_tasks: 05-k8s-node.yml + when: kubernetes_role == 'node' + +- include_tasks: 06-worker-directory.yml + when: kubernetes_role == 'node' diff --git a/ansible/01_old/roles/cmoa_os_setting/tasks/03-k8s-master.yml b/ansible/01_old/roles/cmoa_os_setting/tasks/03-k8s-master.yml new file mode 100644 index 0000000..954cdbb --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/tasks/03-k8s-master.yml @@ -0,0 +1,45 @@ +--- +- name: Initialize Kubernetes master with kubeadm init. + command: > + kubeadm init + --pod-network-cidr={{ kubernetes_pod_network.cidr }} + --apiserver-advertise-address={{ kubernetes_apiserver_advertise_address | default(ansible_default_ipv4.address, true) }} + {{ kubernetes_kubeadm_init_extra_opts }} + register: kubeadmin_init + when: not kubernetes_init_stat.stat.exists + +- name: Print the init output to screen. + debug: + var: kubeadmin_init.stdout + verbosity: 2 + when: not kubernetes_init_stat.stat.exists + +- name: Ensure .kube directory exists. + file: + path: ~/.kube + state: directory + +- name: Symlink the kubectl admin.conf to ~/.kube/conf. + file: + src: /etc/kubernetes/admin.conf + dest: ~/.kube/config + state: link + force: yes + +- name: copy the kubectl config to ~/.kube/ansible_config + copy: + src: /etc/kubernetes/admin.conf + dest: ~/.kube/ansible_config + remote_src: true + +- name: Configure Calico networking and Metric Server + include_tasks: 04-k8s-master-yaml.yml + +- name: Kubectl Cheat Sheet + lineinfile: + path: ~/.bashrc + line: "{{ item }}" + with_items: + - source <(kubectl completion bash) + - alias k=kubectl + - complete -o default -F __start_kubectl k diff --git a/ansible/01_old/roles/cmoa_os_setting/tasks/04-k8s-master-yaml.yml b/ansible/01_old/roles/cmoa_os_setting/tasks/04-k8s-master-yaml.yml new file mode 100644 index 0000000..996a122 --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/tasks/04-k8s-master-yaml.yml @@ -0,0 +1,15 @@ +--- +- name: Configure Calico networking. + command: "{{ item }}" + with_items: + - kubectl apply -f {{ kubernetes_calico_manifest_file }} + register: calico_result + changed_when: "'created' in calico_result.stdout" + when: kubernetes_pod_network.cni == 'calico' + +- name: Configure Metric Server + command: "{{ item }}" + with_items: + - kubectl apply -f {{ kubernetes_metric_server_file }} + register: metric_server_result + changed_when: "'created' in metric_server_result.stdout" diff --git a/ansible/01_old/roles/cmoa_os_setting/tasks/05-k8s-node.yml b/ansible/01_old/roles/cmoa_os_setting/tasks/05-k8s-node.yml new file mode 100644 index 0000000..304cbf1 --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/tasks/05-k8s-node.yml @@ -0,0 +1,6 @@ +--- +- name: Join node to Kubernetes master + shell: > + {{ kubernetes_join_command }} + creates=/etc/kubernetes/kubelet.conf + tags: ['skip_ansible_lint'] diff --git a/ansible/01_old/roles/cmoa_os_setting/tasks/06-worker-directory.yml b/ansible/01_old/roles/cmoa_os_setting/tasks/06-worker-directory.yml new file mode 100644 index 0000000..5b14eab --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/tasks/06-worker-directory.yml @@ -0,0 +1,43 @@ +--- +- name: make worker1 directory + ansible.builtin.file: + path: "{{ item }}" + state: directory + mode: u+rwx,g+rwx,o+rwx + recurse: yes + owner: root + group: root + with_items: + - /media/data/minio/pv1 + - /media/data/minio/pv2 + - /media/data/postgres/postgres-data-0 + - /media/data/elasticsearch/elasticsearch-data-0 + - /media/data/zookeeper/zookeeper-data-0 + - /media/data/kafka/kafka-data-0 + - /media/cloudmoa/ingester/ingester-data-1 + - /media/data/redis/redis-data-0 + - /media/data/redis/redis-data-1 + - /media/data/rabbitmq + when: inventory_hostname in groups["worker1"] + +- name: make worker2 directory + ansible.builtin.file: + path: "{{ item }}" + state: directory + mode: u+rwx,g+rwx,o+rwx + recurse: yes + owner: root + group: root + with_items: + - /media/data/minio/pv3 + - /media/data/minio/pv4 + - /media/data/elasticsearch/elasticsearch-data-1 + - /media/data/zookeeper/zookeeper-data-1 + - /media/data/zookeeper/zookeeper-data-2 + - /media/data/kafka/kafka-data-1 + - /media/data/kafka/kafka-data-2 + - /media/cloudmoa/ingester/ingester-data-2 + - /media/cloudmoa/ingester/ingester-data-3 + - /media/data/redis/redis-data-1 + - /media/data/redis/redis-data-2 + when: inventory_hostname in groups["worker2"] diff --git a/ansible/01_old/roles/cmoa_os_setting/tasks/main.yml b/ansible/01_old/roles/cmoa_os_setting/tasks/main.yml new file mode 100644 index 0000000..d73559e --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/tasks/main.yml @@ -0,0 +1,19 @@ +--- +- include: 00-centos-os-main.yml + tags: centos + when: ansible_distribution == 'CentOS' + +- include: 00-ubuntu-os-main.yml + tags: ubuntu + when: ansible_distribution == 'Ubuntu' + +- include: 01-centos-os-runtime.yml + tags: centos + when: ansible_distribution == 'CentOS' + +- include: 01-ubuntu-os-runtime.yml + tags: ubuntu + when: ansible_distribution == 'Ubuntu' + +- include: 02-k8s-main.yml + tags: k8s-main diff --git a/ansible/01_old/roles/cmoa_os_setting/templates/config.toml.j2 b/ansible/01_old/roles/cmoa_os_setting/templates/config.toml.j2 new file mode 100644 index 0000000..0217565 --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/templates/config.toml.j2 @@ -0,0 +1,5 @@ +# {{ ansible_managed }} + +{% from 'yaml2toml_macro.j2' import yaml2toml with context -%} + +{{ yaml2toml(containerd_config) }} diff --git a/ansible/01_old/roles/cmoa_os_setting/templates/hosts.j2 b/ansible/01_old/roles/cmoa_os_setting/templates/hosts.j2 new file mode 100644 index 0000000..18804b7 --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/templates/hosts.j2 @@ -0,0 +1,6 @@ +127.0.0.1 localhost +:: 1 localhost + +{% for host in groups.all %} +{{ hostvars[host].ansible_default_ipv4.address }} {{ hostvars[host].ansible_fqdn }} {{ hostvars[host].ansible_hostname }} +{%endfor%} diff --git a/ansible/01_old/roles/cmoa_os_setting/templates/yaml2toml_macro.j2 b/ansible/01_old/roles/cmoa_os_setting/templates/yaml2toml_macro.j2 new file mode 100644 index 0000000..33f69d0 --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/templates/yaml2toml_macro.j2 @@ -0,0 +1,58 @@ +{%- macro yaml2inline_toml(item, depth) -%} + {%- if item is string or item is number -%} + {#- First, process all primitive types. -#} + {{ item | to_json }} + {%- elif item is mapping -%} + {#- Second, process all mappings. -#} + {#- Note that inline mappings must not contain newlines (except inside contained lists). -#} + {{ "{" }} + {%- for key, value in item.items() | sort -%} + {{ " " + + (key | to_json) + + " = " + + yaml2inline_toml(value, depth) + }} + {%- if not loop.last -%}{{ "," }}{%- endif -%} + {%- endfor -%} + {{ " }" }} + {%- else -%} + {#- Third, process all lists. -#} + {%- if item | length == 0 -%}{{ "[]" }}{%- else -%} + {{ "[" }} + {%- for entry in item -%} + {{ "\n" + + (" " * (depth + 1)) + + yaml2inline_toml(entry, depth + 1) + }} + {%- if not loop.last -%}{{ "," }}{%- endif -%} + {%- endfor -%} + {{ "\n" + (" " * depth) + "]" }} + {%- endif -%} + {%- endif -%} +{%- endmacro -%} + +{%- macro yaml2toml(item, super_keys=[]) -%} + {%- for key, value in item.items() | sort -%} + {%- if value is not mapping -%} + {#- First, process all non-mappings. -#} + {{ (" " * (super_keys | length)) + + (key | to_json) + + " = " + + (yaml2inline_toml(value, super_keys | length)) + + "\n" + }} + {%- endif -%} + {%- endfor -%} + {%- for key, value in item.items() | sort -%} + {%- if value is mapping -%} + {#- Second, process all mappings. -#} + {{ "\n" + + (" " * (super_keys | length)) + + "[" + + ((super_keys+[key]) | map('to_json') | join(".")) + + "]\n" + + yaml2toml(value, super_keys+[key]) + }} + {%- endif -%} + {%- endfor -%} +{%- endmacro -%} diff --git a/ansible/01_old/roles/cmoa_os_setting/tests/inventory b/ansible/01_old/roles/cmoa_os_setting/tests/inventory new file mode 100644 index 0000000..878877b --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/tests/inventory @@ -0,0 +1,2 @@ +localhost + diff --git a/ansible/01_old/roles/cmoa_os_setting/tests/test.yml b/ansible/01_old/roles/cmoa_os_setting/tests/test.yml new file mode 100644 index 0000000..191e731 --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - apache diff --git a/ansible/01_old/roles/cmoa_os_setting/vars/main.yml b/ansible/01_old/roles/cmoa_os_setting/vars/main.yml new file mode 100644 index 0000000..2aa5032 --- /dev/null +++ b/ansible/01_old/roles/cmoa_os_setting/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for apache diff --git a/ansible/01_old/roles/connect-settings/README.md b/ansible/01_old/roles/connect-settings/README.md new file mode 100644 index 0000000..225dd44 --- /dev/null +++ b/ansible/01_old/roles/connect-settings/README.md @@ -0,0 +1,38 @@ +Role Name +========= + +A brief description of the role goes here. + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +Role Variables +-------------- + +A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. + +Dependencies +------------ + +A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + + - hosts: servers + roles: + - { role: username.rolename, x: 42 } + +License +------- + +BSD + +Author Information +------------------ + +An optional section for the role authors to include contact information, or a website (HTML is not allowed). diff --git a/ansible/01_old/roles/connect-settings/defaults/main.yml b/ansible/01_old/roles/connect-settings/defaults/main.yml new file mode 100644 index 0000000..5415520 --- /dev/null +++ b/ansible/01_old/roles/connect-settings/defaults/main.yml @@ -0,0 +1,15 @@ +--- +# defaults file for password + +encrypt: 0 # strings 0 , encrypted 1 +debug_mode: False +sshrootlogin: forced-commands-only +sshmainport: 2222 +iptables_rules: + - { source: "10.10.45.0/24", target: "DROP" } + - { source: "10.10.47.0/24", target: "DROP" } + - { source: "10.10.48.0/24", target: "DROP" } + - { source: "10.10.50.0/24", target: "DROP" } + - { source: "10.10.37.0/24", target: "DROP" } +delete_rule: False +add_rule: True \ No newline at end of file diff --git a/ansible/01_old/roles/connect-settings/files/00_old/gen_password.py b/ansible/01_old/roles/connect-settings/files/00_old/gen_password.py new file mode 100644 index 0000000..b1b4e13 --- /dev/null +++ b/ansible/01_old/roles/connect-settings/files/00_old/gen_password.py @@ -0,0 +1,44 @@ +#!/usr/bin/python3 + +import base64, random, string, os +from Crypto.Cipher import AES +from Crypto.Random import get_random_bytes +from Crypto.Util.Padding import pad, unpad + +try: + encrypt_flag=True if os.sys.argv[1].lower()=='1' else False +except Exception as err: + encrypt_flag=False + +def generate_password(length=8, num_uppercase=1, num_lowercase=1, num_digits=1, num_sp_char=1): + sp_char = '!@#$' + all_chars = string.ascii_letters + string.digits + sp_char + + password = [ + *random.choices(string.ascii_uppercase, k=num_uppercase), + *random.choices(string.ascii_lowercase, k=num_lowercase), + *random.choices(string.digits, k=num_digits), + *random.choices(sp_char, k=num_sp_char) + ] + + remaining_length = length - (num_uppercase + num_lowercase + num_digits + num_sp_char) + password += random.choices(all_chars, k=remaining_length) + + random.shuffle(password) + return ''.join(password) + +def encrypt(plain_text, key): + manual_iv = b'PhilinnovatorDEV' + cipher = AES.new(key, AES.MODE_CBC, iv=manual_iv) + ct_bytes = cipher.encrypt(pad(plain_text.encode(), 16)) + ct = base64.b64encode(ct_bytes).decode('utf-8') + return ct + +key = b'PhilinnovatorDEVPhilinnovatorDEV' +plain_text = generate_password() + +if encrypt_flag: + encrypted_text = encrypt(plain_text, key) + print(encrypted_text) +else: + print(plain_text) diff --git a/ansible/01_old/roles/connect-settings/files/00_old/vault_test.py b/ansible/01_old/roles/connect-settings/files/00_old/vault_test.py new file mode 100644 index 0000000..18f6988 --- /dev/null +++ b/ansible/01_old/roles/connect-settings/files/00_old/vault_test.py @@ -0,0 +1,11 @@ +import hvac + +str_url = "http://10.10.43.98:31080" +str_token = "hvs.CAESIMV6zCg-GpUP4pQgVA5f1ZXkgyJZrqOC6QDCegrpiAX9Gh4KHGh2cy5ORkpkc2ZyVUxYd09qUVFtQldRNDBjS3I" +client = hvac.Client(url=str_url, token=str_token) + +str_mount_point = 'kv' +str_secret_path = 'host1' +read_secret_result = client.secrets.kv.v1.read_secret(mount_point=str_mount_point, path=str_secret_path) +print(read_secret_result) + diff --git a/ansible/01_old/roles/connect-settings/files/custom_excel b/ansible/01_old/roles/connect-settings/files/custom_excel new file mode 100755 index 0000000..562b89c --- /dev/null +++ b/ansible/01_old/roles/connect-settings/files/custom_excel @@ -0,0 +1,108 @@ +#!/usr/bin/python3 +#-*- coding: utf-8 -*- + +import os, sys, time, errno, socket, signal, psutil, random, logging.handlers, subprocess, paramiko, hvac +from xlwt import Workbook, XFStyle, Borders, Font, Pattern +from socket import error as SocketError + +process_time = time.strftime("%Y%m%d_%H%M", time.localtime()) +excel_file_name = '/mnt/e/excel/{}.xls'.format(process_time) + +def process_close(flag=True, result=''): + if flag: + print("[Success]") + else: + print("[Fail]:{}".format(result)) + + sys.exit(0) + +def set_header(sheet, header_list): + # 폰트 설정 + font = Font() + font.bold = True + + # 테두리 설정 + borders = Borders() + borders.left = Borders.THIN + borders.right = Borders.THIN + borders.top = Borders.THIN + borders.bottom = Borders.THIN + + # 배경색 설정 + pattern = Pattern() + pattern.pattern = Pattern.SOLID_PATTERN + pattern.pattern_fore_colour = 22 # #E2EFDA는 xlwt에서 인덱스 22에 해당하는 색입니다. + + hdrstyle = XFStyle() + hdrstyle.font = font + hdrstyle.borders = borders + hdrstyle.pattern = pattern + + for idx, header in enumerate(header_list): + sheet.write(0, idx, header, hdrstyle) + sheet.col(idx).width = len(header) * 800 + +def write_data(sheet, data_list): + datestyle = XFStyle() + datestyle.num_format_str = 'YYYY-MM-DD' + + for row_num, data in enumerate(data_list, start=1): + for col_num, cell_data in enumerate(data): + if col_num == 7: + sheet.write(row_num, col_num, cell_data, datestyle) + elif col_num in [1, 4, 5]: + formatted_data = u'{}'.format(cell_data) if cell_data else '' + sheet.write(row_num, col_num, formatted_data) + else: + sheet.write(row_num, col_num, cell_data) + +def excel_write(header_list=[], data_list=[], filename='', sheetTitle=''): + workbook = Workbook(style_compression=2, encoding='utf-8') + sheet = workbook.add_sheet(sheetTitle) + + set_header(sheet, header_list) + write_data(sheet, data_list) + + sheet.panes_frozen = True + sheet.vert_split_pos = 0 + sheet.horz_split_pos = 1 + workbook.save(filename) + +def main(): + header_list=['번호','호스트 유형','호스트명','호스트 IP','포트번호','프로토콜','인증방법','1차 로그인 계정명','1차 로그인 비밀번호','1차 로그인 계정명','2차 로그인 비밀번호','용도','비고'] + data_list=[] + + openfile=open('/tmp/host_list','r') + readfile=openfile.readlines() + openfile.close() + for idx, host_data in enumerate(readfile): + try: + if idx==0: continue + host_num=idx + hosttype=host_data.strip().split(' ')[0] + print(hosttype) + hostname=host_data.strip().split(' ')[1] + host_ips=host_data.strip().split(' ')[2] + port_num=int(host_data.strip().split(' ')[3]) + protocol='SSH' + auth_con='Password' + username=host_data.strip().split(' ')[4] + first_pw=host_data.strip().split(' ')[5] + rootuser=host_data.strip().split(' ')[6] + secon_pw=host_data.strip().split(' ')[7] + descript='-' + remarks_='-' + data_list.append([host_num,hosttype,hostname,host_ips,port_num,protocol,auth_con,username,first_pw,rootuser,secon_pw,descript,remarks_,]) + except: + continue + + excel_write(header_list, data_list, excel_file_name, 'TEST') + +DEBUG=False +try: + if os.sys.argv[1]: DEBUG=True +except: + pass +main() +process_close() + diff --git a/ansible/01_old/roles/connect-settings/files/decrypt_password b/ansible/01_old/roles/connect-settings/files/decrypt_password new file mode 100755 index 0000000..5e31c71 --- /dev/null +++ b/ansible/01_old/roles/connect-settings/files/decrypt_password @@ -0,0 +1,21 @@ +#!/usr/bin/python3 +#-*- coding: utf-8 -*- + +import base64, random, string, os +from Crypto.Cipher import AES +from Crypto.Random import get_random_bytes +from Crypto.Util.Padding import pad, unpad + +try: + encrypted_text=os.sys.argv[1] +except: + encrypted_text="q6i1/JxyNe1OUrO0JKu+Z4WQTyQZam2yIJTp43dl1pI=" + +def decrypt(ct, key): + manual_iv = b'PhilinnovatorDEV' + ct_bytes = base64.b64decode(ct) + cipher = AES.new(key, AES.MODE_CBC, iv=manual_iv) + return unpad(cipher.decrypt(ct_bytes), 16).decode('utf-8') + +key = b'PhilinnovatorDEVPhilinnovatorDEV' +print(decrypt(encrypted_text, key)) \ No newline at end of file diff --git a/ansible/01_old/roles/connect-settings/files/gen_password b/ansible/01_old/roles/connect-settings/files/gen_password new file mode 100755 index 0000000..febe48a --- /dev/null +++ b/ansible/01_old/roles/connect-settings/files/gen_password @@ -0,0 +1,45 @@ +#!/usr/bin/python3 +#-*- coding: utf-8 -*- + +import base64, random, string, os +from Crypto.Cipher import AES +from Crypto.Random import get_random_bytes +from Crypto.Util.Padding import pad, unpad + +try: + encrypt_flag=True if os.sys.argv[1].lower()=='1' else False +except Exception as err: + encrypt_flag=False + +def generate_password(length=12, num_uppercase=3, num_lowercase=4, num_digits=3, num_sp_char=2): + sp_char = '!@#$' + all_chars = string.ascii_letters + string.digits + sp_char + + password = [ + *random.choices(string.ascii_uppercase, k=num_uppercase), + *random.choices(string.ascii_lowercase, k=num_lowercase), + *random.choices(string.digits, k=num_digits), + *random.choices(sp_char, k=num_sp_char) + ] + + remaining_length = length - (num_uppercase + num_lowercase + num_digits + num_sp_char) + password += random.choices(all_chars, k=remaining_length) + + random.shuffle(password) + return ''.join(password) + +def encrypt(plain_text, key): + manual_iv = b'PhilinnovatorDEV' + cipher = AES.new(key, AES.MODE_CBC, iv=manual_iv) + ct_bytes = cipher.encrypt(pad(plain_text.encode(), 16)) + ct = base64.b64encode(ct_bytes).decode('utf-8') + return ct + +key = b'PhilinnovatorDEVPhilinnovatorDEV' +plain_text = generate_password() + +if encrypt_flag: + encrypted_text = encrypt(plain_text, key) + print(encrypted_text) +else: + print(plain_text) diff --git a/ansible/01_old/roles/connect-settings/files/vault_get b/ansible/01_old/roles/connect-settings/files/vault_get new file mode 100755 index 0000000..d0fabdb --- /dev/null +++ b/ansible/01_old/roles/connect-settings/files/vault_get @@ -0,0 +1,17 @@ +#!/usr/bin/python3 +#-*- coding: utf-8 -*- + +import hvac +import os + +hostname=os.sys.argv[1] + +str_url = "http://10.10.43.240:30803" +client = hvac.Client(url=str_url) +client.auth.approle.login(role_id="e96c5fd8-abde-084a-fde7-7450a9348a70", secret_id="5371706b-414a-11d3-f3fd-6cf98871aad1") + +try: + data = client.secrets.kv.v2.read_secret_version(mount_point='host', path=hostname, raise_on_deleted_version=True)['data']['data'] + print(data) +except Exception as err: + print(err) diff --git a/ansible/01_old/roles/connect-settings/files/vault_put b/ansible/01_old/roles/connect-settings/files/vault_put new file mode 100755 index 0000000..aeae507 --- /dev/null +++ b/ansible/01_old/roles/connect-settings/files/vault_put @@ -0,0 +1,21 @@ +#!/usr/bin/python3 +#-*- coding: utf-8 -*- + +import hvac +import os + +hostname=os.sys.argv[1] +accountid=os.sys.argv[2] +password=os.sys.argv[3] +adminuser=os.sys.argv[4] +adminpass=os.sys.argv[5] + +str_url = "http://10.10.43.240:30803" +client = hvac.Client(url=str_url) +client.auth.approle.login(role_id="e96c5fd8-abde-084a-fde7-7450a9348a70", secret_id="5371706b-414a-11d3-f3fd-6cf98871aad1") + +client.secrets.kv.v2.create_or_update_secret( + mount_point='host', + path=hostname, + secret=dict(accountid=f'{accountid}',password=f'{password}',adminuser=f'{adminuser}',adminpass=f'{adminpass}') +) diff --git a/ansible/01_old/roles/connect-settings/handlers/main.yml b/ansible/01_old/roles/connect-settings/handlers/main.yml new file mode 100644 index 0000000..b44722c --- /dev/null +++ b/ansible/01_old/roles/connect-settings/handlers/main.yml @@ -0,0 +1,16 @@ +--- +- name: Reload systemd configuration + ansible.builtin.systemd: + daemon_reload: True + +- name: Restart teleport service + ansible.builtin.systemd: + name: teleport + enabled: true + state: restarted + +- name: restart sshd + service: + name: sshd + state: restarted + enabled: true \ No newline at end of file diff --git a/ansible/01_old/roles/connect-settings/meta/main.yml b/ansible/01_old/roles/connect-settings/meta/main.yml new file mode 100644 index 0000000..c572acc --- /dev/null +++ b/ansible/01_old/roles/connect-settings/meta/main.yml @@ -0,0 +1,52 @@ +galaxy_info: + author: your name + description: your role description + company: your company (optional) + + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: http://example.com/issue/tracker + + # Choose a valid license ID from https://spdx.org - some suggested licenses: + # - BSD-3-Clause (default) + # - MIT + # - GPL-2.0-or-later + # - GPL-3.0-only + # - Apache-2.0 + # - CC-BY-4.0 + license: license (GPL-2.0-or-later, MIT, etc) + + min_ansible_version: 2.1 + + # If this a Container Enabled role, provide the minimum Ansible Container version. + # min_ansible_container_version: + + # + # Provide a list of supported platforms, and for each platform a list of versions. + # If you don't wish to enumerate all versions for a particular platform, use 'all'. + # To view available platforms and versions (or releases), visit: + # https://galaxy.ansible.com/api/v1/platforms/ + # + # platforms: + # - name: Fedora + # versions: + # - all + # - 25 + # - name: SomePlatform + # versions: + # - all + # - 1.0 + # - 7 + # - 99.99 + + galaxy_tags: [] + # List tags for your role here, one per line. A tag is a keyword that describes + # and categorizes the role. Users find roles by searching for tags. Be sure to + # remove the '[]' above, if you add tags to this list. + # + # NOTE: A tag is limited to a single word comprised of alphanumeric characters. + # Maximum 20 tags per role. + +dependencies: [] + # List your role dependencies here, one per line. Be sure to remove the '[]' above, + # if you add dependencies to this list. diff --git a/ansible/01_old/roles/connect-settings/tasks/00_host_setting.yml b/ansible/01_old/roles/connect-settings/tasks/00_host_setting.yml new file mode 100644 index 0000000..58b0215 --- /dev/null +++ b/ansible/01_old/roles/connect-settings/tasks/00_host_setting.yml @@ -0,0 +1,142 @@ +- name: "Create dev2 group" + ansible.builtin.group: + name: "dev2" + state: present + when: + - add_rule == True + +- name: Ensure user dev2-iac exists + user: + name: "{{ item }}" + create_home: yes + home: "/home/{{ item }}" + group: dev2 + shell: /bin/bash + with_items: + - dev2-iac + - dev2 + when: + - add_rule == True + +- name: "Ensure .ssh directory exists for dev2-iac" + file: + path: /home/dev2-iac/.ssh + state: directory + owner: dev2-iac + group: dev2 + mode: '0700' + when: + - add_rule == True + +- name: "Add authorized key for dev2-iac" + authorized_key: + user: dev2-iac + key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCsiN0I8B3UmB1mVBxVpvrSF5j0vrwUggngVrlplW8iJLllSBwarHzmSpMWv3eQtb9QQ/HKyOsS3j6UkbQK2aJ6jGeK2pQUkbb6KdMc9OrS/ILWysritcBJ3rUuITwOMvekQHtq+yKshap3uw/8ZEiM1Xn0MxVGhpAZsWbotf9n6ntmsMDXkRSQnYU5T2y4hkWlYImPkIasmbDFVkxi0Wz7I7pUX4hG3l6NJegXWO6n4OcpXxm26oZUtmpqrNRipUIUglM5xp4+YlQhu3FIa/aEZ+fuE9xnSZ8gCYnmPKwJ7AKKkEUruSTA3vhBnlh5rFYgSg5NkVte2RjdPg1SYZCTUXVwE9bbIzeGiXJ9vSe1/bhacpLeLgg48H6SSVInoCmen6W4Oo4/QlekXMBCuxfRwH2pO2K84gEKAAD0hUHBEf0Eh4rIi3K2oUdDCnMv5CD3lqiBn49hFB+bBdk+kxFNNx9iSDciFc91lIjz2IW8FO//+iLO7DEBZMrz/B8bJQ0=" + when: + - add_rule == True + +- name: "Add authorized key for dev2" + authorized_key: + user: dev2 + key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDUAppqxDLltrMsYMwIxGi0FA5STA/R+H6oy7myfiJP2Lt4woCogMi3ELVKEhFkeJx4i8y9G80lynEYCHRH1kAQ/7YaJEVFrPXTvBw+OVxYdVS/gLl0rL89ky+n0dv6A9mancrvUOMacI5aN7/W+EhoLohRjRbWlsPGNnvAmO0AZnt595aMUjFkdhusGyBVunDUFSitj9TFkjxDhr6cx8Bi0FLpvdsoAvfqiw/MVKW2pMgj56AT5UCT0wvtSHSNY/C731jP/RKrxP0fnVhIkVys/XmLV/6SVEqL1XwqMTvRfi5+Q8cPsXrnPuUFHiNN4e/MGJkYi0lg7XbX8jDXv3ybdxZ7lGiUDebxjTKBCCghFae3eAwpJADEDfrzb8DHJZFwJVVdKGXvStTWTibcs14ilRPcB4SWIBx/cFCzwOBK/iw8CfEfsbVe6WQbDc4T4LrgL8cUzHPOO8CQcC4DV/O3BuoqQExu6xTmU8rhLT9kgatIdX0K5jgGbuqz7c2lelU=" + when: + - add_rule == True + +- name: "sudoers_users file" + file: + path: /etc/sudoers.d/sudoers_users + state: touch + when: + - add_rule == True + +- name: "Allow user to sudo" + lineinfile: + path: /etc/sudoers.d/sudoers_users + line: "{{ item }} ALL=(ALL) NOPASSWD:ALL" + state: present + with_items: + - dev2-iac + - dev2 + when: + - add_rule == True + +# - name: Check if rule exists +# command: iptables -D INPUT 7 +# loop: "{{ range(0, 9) }}" +# ignore_errors: yes +# when: +# - delete_rule == True + +# - name: Check if rule exists +# command: iptables -C INPUT -s {{ item.source }} -j {{ item.target }} +# register: rule_check +# ignore_errors: yes +# changed_when: false +# with_items: "{{ iptables_rules }}" +# when: +# - add_rule == True + +# - name: Add rule if it doesn't exist +# command: iptables -A INPUT -s {{ item.item.source }} -j {{ item.item.target }} +# with_items: "{{ rule_check.results }}" +# when: +# - item.rc == 1 +# - add_rule == True + +- name: "selinux permissive" + command: "setenforce 0" + ignore_errors: yes + when: + - ansible_facts.os_family == "RedHat" + +- name: "firewalld stop" + systemd: + name: firewalld + state: stopped + enabled: false + ignore_errors: yes + when: + - ansible_facts.os_family == "RedHat" + +- name: Configure ssh root login to {{sshrootlogin}} + lineinfile: + dest: /etc/ssh/sshd_config + regexp: '^(#)?PermitRootLogin.*' + line: 'PermitRootLogin {{sshrootlogin}}' + insertbefore: '^Match.*' + state: present + owner: root + group: root + mode: 0640 + notify: restart sshd + +- name: Remove existing Port lines + lineinfile: + path: /etc/ssh/sshd_config + regexp: '^Port' + state: absent + +- name: SSH Listen on Main Port + lineinfile: + dest: /etc/ssh/sshd_config + insertbefore: '^#*AddressFamily' + line: 'Port {{sshmainport}}' + state: present + owner: root + group: root + mode: 0640 + notify: restart sshd + +- name: "Create sshd_config.d directory" + ansible.builtin.file: + path: "/etc/ssh/sshd_config.d/" + state: directory + recurse: yes + owner: root + group: root + +- name: "Setting sshd allow users" + template: + src: allow_users.j2 + dest: "/etc/ssh/sshd_config.d/allow_users.conf" + notify: restart sshd diff --git a/ansible/01_old/roles/connect-settings/tasks/01_get_password.yml b/ansible/01_old/roles/connect-settings/tasks/01_get_password.yml new file mode 100644 index 0000000..c848fda --- /dev/null +++ b/ansible/01_old/roles/connect-settings/tasks/01_get_password.yml @@ -0,0 +1,36 @@ +--- +- name: get password + command: "{{ role_path }}/files/gen_password {{ encrypt }}" + register: user_password + delegate_to: 127.0.0.1 + when: manual_password is not defined + +- name: get admin password + command: "{{ role_path }}/files/gen_password {{ encrypt }}" + register: admin_password + delegate_to: 127.0.0.1 + when: manual_password is not defined + +- name: set fact user password + block: + - set_fact: + user_password: "{{ user_password.stdout }}" + rescue: + - set_fact: + user_password: "{{ manual_password }}" + always: + - debug: + msg: "{{ username }} : {{ user_password }}" + when: debug_mode == True + +- name: set fact admin password + block: + - set_fact: + admin_password: "{{ admin_password.stdout }}" + rescue: + - set_fact: + admin_password: "{{ manual_password }}" + always: + - debug: + msg: "{{ adminuser }} : {{ admin_password }}" + when: debug_mode == True \ No newline at end of file diff --git a/ansible/01_old/roles/connect-settings/tasks/02_change_password.yml b/ansible/01_old/roles/connect-settings/tasks/02_change_password.yml new file mode 100644 index 0000000..64deba0 --- /dev/null +++ b/ansible/01_old/roles/connect-settings/tasks/02_change_password.yml @@ -0,0 +1,21 @@ +--- +- include_tasks: 99_decrypt_password.yml + when: + - encrypt == 1 + - manual_password is not defined + +- name: user password change + user: + name: "{{ item }}" + password: "{{ user_password | password_hash('sha512') }}" + state: present + with_items: + - "{{ username }}" + +- name: admin password change + user: + name: "{{ item }}" + password: "{{ admin_password | password_hash('sha512') }}" + state: present + with_items: + - "{{ adminuser }}" \ No newline at end of file diff --git a/ansible/01_old/roles/connect-settings/tasks/03_vault.yml b/ansible/01_old/roles/connect-settings/tasks/03_vault.yml new file mode 100644 index 0000000..1f3aa95 --- /dev/null +++ b/ansible/01_old/roles/connect-settings/tasks/03_vault.yml @@ -0,0 +1,21 @@ +--- +- name: Check if ansible_port is defined + set_fact: + ansible_port: "{{ ansible_port | default(22) }}" + +- debug: + msg: "{{ ansible_distribution }} {{ ansible_hostname }} {{ ansible_default_ipv4.address }} {{ ansible_port }} {{ username }} {{ user_password }} {{ adminuser }} {{ admin_password }}" + when: debug_mode == True + +- name: put vault + command: "{{ role_path }}/files/vault_put {{ ansible_default_ipv4.address }} {{ username }} {{ user_password }} {{ adminuser }} {{ admin_password }}" + delegate_to: 127.0.0.1 + +- name: get vault + command: "{{ role_path }}/files/vault_get {{ ansible_default_ipv4.address }} {{ username }} {{ user_password }} {{ adminuser }} {{ admin_password }}" + register: get_vault + delegate_to: 127.0.0.1 + +- debug: + msg: "{{get_vault.stdout_lines}}" + when: debug_mode == True diff --git a/ansible/01_old/roles/connect-settings/tasks/04_excel_export.yml b/ansible/01_old/roles/connect-settings/tasks/04_excel_export.yml new file mode 100644 index 0000000..cf70b57 --- /dev/null +++ b/ansible/01_old/roles/connect-settings/tasks/04_excel_export.yml @@ -0,0 +1,19 @@ +--- +- name: Redirect output to local file + delegate_to: localhost + copy: + content: "[{{ ansible_date_time.date }} {{ ansible_date_time.hour }}:{{ ansible_date_time.minute }}:{{ ansible_date_time.second }}]" + dest: "/tmp/host_list" + mode: '0666' + backup: yes + +- name: Append output to local file + delegate_to: localhost + lineinfile: + path: "/tmp/host_list" + line: "{{ ansible_distribution }} {{ ansible_hostname }} {{ ansible_default_ipv4.address }} {{ sshmainport }} {{ username }} {{ user_password }} {{ adminuser }} {{ admin_password }}" + create: yes + +- name: excel export + command: "{{ role_path }}/files/custom_excel" + delegate_to: 127.0.0.1 diff --git a/ansible/01_old/roles/connect-settings/tasks/99_decrypt_password.yml b/ansible/01_old/roles/connect-settings/tasks/99_decrypt_password.yml new file mode 100644 index 0000000..164cecc --- /dev/null +++ b/ansible/01_old/roles/connect-settings/tasks/99_decrypt_password.yml @@ -0,0 +1,27 @@ +--- +- name: user_password decrypt + command: "{{ role_path }}/files/decrypt_password {{ user_password }}" + register: user_password + delegate_to: 127.0.0.1 + +- name: admin_password decrypt + command: "{{ role_path }}/files/decrypt_password {{ admin_password }}" + register: admin_password + delegate_to: 127.0.0.1 + when: + - encrypt == 1 + - manual_password is not defined + +- name: admin_password re fact + set_fact: + admin_password: "{{ admin_password.stdout }}" + when: + - encrypt == 1 + - manual_password is not defined + +- name: user_password re fact + set_fact: + user_password: "{{ user_password.stdout }}" + when: + - encrypt == 1 + - manual_password is not defined diff --git a/ansible/01_old/roles/connect-settings/tasks/main.yml b/ansible/01_old/roles/connect-settings/tasks/main.yml new file mode 100644 index 0000000..82dd567 --- /dev/null +++ b/ansible/01_old/roles/connect-settings/tasks/main.yml @@ -0,0 +1,15 @@ +--- +- include: 00_host_setting.yml + tags: host + +- include: 01_get_password.yml + tags: password + +- include: 02_change_password.yml + tags: change + +- include: 03_vault.yml + tags: vault + +- include: 04_excel_export.yml + tags: excel \ No newline at end of file diff --git a/ansible/01_old/roles/connect-settings/templates/allow_users.j2 b/ansible/01_old/roles/connect-settings/templates/allow_users.j2 new file mode 100755 index 0000000..67c88da --- /dev/null +++ b/ansible/01_old/roles/connect-settings/templates/allow_users.j2 @@ -0,0 +1,22 @@ +AllowUsers dev2-iac@10.10.43.* +AllowUsers *@10.20.142.* +{% if ansible_distribution == "Ubuntu" %} +AllowUsers ubuntu@10.10.43.* +{% endif %} +{% if ansible_distribution == "CentOS" %} +AllowUsers centos@10.10.43.* +{% endif %} +{% if ansible_distribution == "RedHat" %} +AllowUsers redhat@10.10.43.* +{% endif %} + +{% if admin_users is defined %} +{% for user in admin_users %} +AllowUsers {{ user.name }}@{{ user.ip }} +{% endfor %} +{% endif %} +{% if allow_users is defined %} +{% for user in allow_users %} +AllowUsers {{ user.name }}@{{ user.ip }} +{% endfor %} +{% endif %} \ No newline at end of file diff --git a/ansible/01_old/roles/connect-settings/tests/inventory b/ansible/01_old/roles/connect-settings/tests/inventory new file mode 100644 index 0000000..878877b --- /dev/null +++ b/ansible/01_old/roles/connect-settings/tests/inventory @@ -0,0 +1,2 @@ +localhost + diff --git a/ansible/01_old/roles/connect-settings/tests/test.yml b/ansible/01_old/roles/connect-settings/tests/test.yml new file mode 100644 index 0000000..c604954 --- /dev/null +++ b/ansible/01_old/roles/connect-settings/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - password diff --git a/ansible/01_old/roles/connect-settings/vars/main.yml b/ansible/01_old/roles/connect-settings/vars/main.yml new file mode 100644 index 0000000..1392b01 --- /dev/null +++ b/ansible/01_old/roles/connect-settings/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for password diff --git a/ansible/01_old/roles/datadog.datadog/.circleci/config.yml b/ansible/01_old/roles/datadog.datadog/.circleci/config.yml new file mode 100644 index 0000000..1c3739b --- /dev/null +++ b/ansible/01_old/roles/datadog.datadog/.circleci/config.yml @@ -0,0 +1,299 @@ +--- +version: 2.1 + +commands: + dry_run: + parameters: + version: + type: string + python: + type: string + steps: + - run: ansible-playbook -i ./ci_test/inventory/ci.ini "./ci_test/install_agent_<>.yaml" -e 'ansible_python_interpreter=/usr/bin/<>' --check + - run: ansible-playbook -i ./ci_test/inventory/ci.ini "./ci_test/downgrade_to_5.yaml" -e 'ansible_python_interpreter=/usr/bin/<>' --check + + install_agent_5: + parameters: + python: + type: string + steps: + - run: ansible-playbook -i ./ci_test/inventory/ci.ini "./ci_test/install_agent_5.yaml" -e 'ansible_python_interpreter=/usr/bin/<>' + - run: dd-agent info || true + - run: ps aux | grep -v grep | grep datadog-agent + + install_agent: + parameters: + version: + type: string + python: + type: string + jinja2_native: + type: string + default: "false" + inventory: + type: string + default: "ci.ini" + steps: + - run: ANSIBLE_JINJA2_NATIVE="<>" ansible-playbook -i ./ci_test/inventory/<> "./ci_test/install_agent_<>.yaml" -e 'ansible_python_interpreter=/usr/bin/<>' + - run: datadog-agent version + + test_install_no_manage_config: + parameters: + version: + type: string + python: + type: string + steps: + - run: ansible-playbook -i ./ci_test/inventory/ci.ini "./ci_test/install_agent_<>.yaml" -e '{"ansible_python_interpreter":"/usr/bin/<>","datadog_manage_config":false}' + - run: bash -c '[ -f /etc/datadog-agent/datadog.yaml.example ] || [ -f /etc/dd-agent/datadog.conf.example ]' + - run: bash -c '[ ! -f /etc/datadog-agent/datadog.yaml ] && [ ! -f /etc/datadog-agent/system-probe.yaml ] && [ ! -f /etc/datadog-agent/security-agent.yaml ] && [ ! -f /etc/dd-agent/datadog.conf ]' + + downgrade_agent_5_32: + parameters: + python: + type: string + steps: + - run: ansible-playbook -i ./ci_test/inventory/ci.ini "./ci_test/downgrade_to_5.yaml" -e 'ansible_python_interpreter=/usr/bin/<>' + - run: dd-agent info || true + - run: ps aux | grep -v grep | grep datadog-agent + + test_agent_install_downgrade: + parameters: + version: + type: string + python: + type: string + steps: + - checkout + - dry_run: + version: "<>" + python: "<>" + - install_agent_5: + python: "<>" + - when: + condition: + not: + equal: [<>, "5"] + steps: + - install_agent: + version: "<>" + python: "<>" + - downgrade_agent_5_32: + python: "<>" + + test_agent_install: + parameters: + version: + type: string + python: + type: string + jinja2_native: + type: string + default: "false" + steps: + - checkout + - install_agent: + version: "<>" + python: "<>" + jinja2_native: "<>" + + test_agent_install_macos: + parameters: + version: + type: string + python: + type: string + jinja2_native: + type: string + default: "false" + steps: + - checkout + - install_agent: + version: "<>" + python: "<>" + jinja2_native: "<>" + inventory: "ci_macos.ini" + +jobs: + ansible_lint: + docker: + - image: datadog/docker-library:ansible_debian_2_10 + steps: + - checkout + # TODO: upgrade ansible-lint to latest (will require updating the image for this task) + - run: pip install ansible-lint==5.4.0 + - run: ansible-lint -v . + + test_install_downgrade: + parameters: + ansible_version: + type: string + agent_version: + type: string + os: + type: string + python: + type: string + docker: + - image: datadog/docker-library:ansible_<>_<> + steps: + - checkout + - test_agent_install_downgrade: + version: "<>" + python: "<>" + + test_install: + parameters: + ansible_version: + type: string + agent_version: + type: string + os: + type: string + python: + type: string + jinja2_native: + type: string + default: "false" + docker: + - image: datadog/docker-library:ansible_<>_<> + steps: + - checkout + - test_agent_install: + version: "<>" + python: "<>" + jinja2_native: "<>" + + test_install_no_manage_config: + parameters: + ansible_version: + type: string + agent_version: + type: string + os: + type: string + python: + type: string + docker: + - image: datadog/docker-library:ansible_<>_<> + steps: + - checkout + - test_install_no_manage_config: + version: "<>" + python: "<>" + + test_install_macos: + parameters: + ansible_version: + type: string + agent_version: + type: string + python: + type: string + jinja2_native: + type: string + default: "false" + macos: + xcode: 13.3.0 + steps: + - checkout + - run: + name: Install Python3 + command: brew install python3 + - run: + name: Install Ansible + command: pip3 install ansible~=<> + - test_agent_install_macos: + version: "<>" + python: "<>" + jinja2_native: "<>" + +workflows: + version: 2 + test_datadog_role: + # Note: Ansible 5.* requires Python >= 3.8, which for now we only have on Debian. + # Whenever newer major versions of RHEL and SUSE get released, they should have + # a new enough Python, so we will test on them as well + jobs: + - ansible_lint + - test_install_downgrade: + matrix: + parameters: + ansible_version: ["2_6", "2_7", "2_8"] + agent_version: ["5", "6", "7"] + os: ["debian"] + python: ["python2", "python3"] + + # Newer debian images only have Pythpn 3 installed + - test_install_downgrade: + matrix: + parameters: + ansible_version: ["2_9", "2_10", "3_4", "4_10", "5_3"] + agent_version: ["5", "6", "7"] + os: ["debian"] + python: ["python3"] + + # Newer debian images only have Pythpn 3 installed + - test_install_no_manage_config: + matrix: + parameters: + ansible_version: ["2_10", "3_4", "4_10"] + agent_version: ["5", "7"] + os: ["debian"] + python: ["python3"] + + # centos = CentOS 7. CentOS <= 7 + Python3 is not supported, + # as the yum module is Python2-only. + - test_install_downgrade: + matrix: + parameters: + ansible_version: ["2_6", "2_7", "2_8", "2_9", "2_10", "3_4", "4_10"] + agent_version: ["5", "6", "7"] + os: ["centos"] + python: ["python2"] + + # We want to check that the dnf path works with CentOS 8 + # Newer CentOS images only have Pythpn 3 installed + - test_install: + matrix: + parameters: + ansible_version: ["2_8", "2_9", "2_10", "3_4", "4_10"] + agent_version: ["6", "7"] + jinja2_native: ["true", "false"] + os: ["rocky8"] + python: ["python3"] + + # Newer suse images only have Python 3 installed + - test_install: + matrix: + parameters: + ansible_version: ["2_8", "2_9", "2_10", "3_4", "4_10"] + agent_version: ["6", "7"] + os: ["suse"] + python: ["python3"] + + # Amazon Linux 2 has yum only by default => we only use Python 3 with Ansible >= 4.10 + # because of the respawn_module functionality added to the yum module in + # https://github.com/ansible/ansible/commit/4c5ce5a1a9e79a845aff4978cfeb72a0d4ecf7d6 + - test_install: + matrix: + parameters: + ansible_version: ["2_8", "2_9", "2_10", "3_4", "4_10"] + agent_version: ["6", "7"] + os: ["amazonlinux2"] + python: ["python2"] + + - test_install: + matrix: + parameters: + ansible_version: ["4_10"] + agent_version: ["6", "7"] + os: ["amazonlinux2"] + python: ["python3"] + + - test_install_macos: + matrix: + parameters: + ansible_version: ["2.8", "2.9", "2.10", "3.4", "4.10"] + agent_version: ["6_macos", "7_macos"] + python: ["python3"] diff --git a/ansible/01_old/roles/datadog.datadog/.github/CODEOWNERS b/ansible/01_old/roles/datadog.datadog/.github/CODEOWNERS new file mode 100644 index 0000000..338fd9f --- /dev/null +++ b/ansible/01_old/roles/datadog.datadog/.github/CODEOWNERS @@ -0,0 +1,4 @@ +* @DataDog/agent-platform + +# Docs +*README.md @DataDog/agent-platform @DataDog/documentation diff --git a/ansible/01_old/roles/datadog.datadog/.gitignore b/ansible/01_old/roles/datadog.datadog/.gitignore new file mode 100644 index 0000000..aca3bea --- /dev/null +++ b/ansible/01_old/roles/datadog.datadog/.gitignore @@ -0,0 +1,14 @@ +.idea/ +*.retry +.venv + +# Vagrant +.vagrant + +# pre and post tasks folders (user defined) +pre_tasks/ +post_tasks/ + +# OSX github datastore +**/.DS_Store + diff --git a/ansible/01_old/roles/datadog.datadog/CHANGELOG.md b/ansible/01_old/roles/datadog.datadog/CHANGELOG.md new file mode 100644 index 0000000..1cec6ba --- /dev/null +++ b/ansible/01_old/roles/datadog.datadog/CHANGELOG.md @@ -0,0 +1,474 @@ +CHANGELOG +========= + +# 4.19.0 / 2023-05-10 + +* [IMPROVEMENT] Ensure user selected for macOS systemwide installation actually exists. See [#479]. +* [BUGFIX] Refresh Datadog repository cache on Red Hat family systems to ensure DNF properly imports repodata signing keys to its cache. See [#478]. + +# 4.18.0 / 2023-01-12 + +* [DEPRECATION] Remove the old RPM GPG key 4172A230 from hosts that still trust it. This also removes the configuration variables `datadog_yum_gpgkey`, `datadog_zypper_gpgkey` and `datadog_zypper_gpgkey_sha256sum`. See [#466]. + +# 4.17.0 / 2023-01-04 + +* [FEATURE] Add support for Universal Service Monitoring sysprobe configuration. See [#458]. Thanks [@marcus-crane]. +* [IMPROVEMENT] Lock Agent version using `includepkgs` in repofiles on Red Hat compatible platforms. See [#443]. Thanks [@sspans-sbp]. +* [IMPROVEMENT] Prettify and fix yaml indentations. See [#448]. Thanks [@denzhel]. +* [IMPROVEMENT] Add the possibility to prevent the zypper repository installation. See [#452]. Thanks [@jb-mayer]. +* [IMPROVEMENT] Use `ansible_managed` instead of custom hardcoded message in managed files. See [#454]. Thanks [@jlosito]. +* [BUGFIX] Fix version comparison tasks when using ansible-core RC version. See [#446]. +* [BUGFIX] Fix running role multiple times in a row on SUSE compatible platforms. See [#453]. +* [DOCS] Add troubleshooting instructions about `service_facts` breaking Ubuntu 20.04. See [#449]. +* [DOCS] Clarify `datadog_config` behavior. See [#451]. Thanks [@hestonhoffman]. + +# 4.16.0 / 2022-07-11 +* [FEATURE] Add macOS support. See [#437]. Thanks [@lc-applause]. +* [BUGFIX] Remove temporary directory after APT key import. See [#442]. Thanks [@wisnij]. +* [BUGFIX] Prevent security-agent startup if it's not configured. See [#438]. + +# 4.15.0 / 2022-04-20 + +* [IMPROVEMENT] Switch Agent start mode to delayed on Windows. See [#422]. +* [BUGFIX] Fix installation of a newer pinned version by DNF. See [#429]. + +# 4.14.0 / 2022-02-08 + +* [FEATURE] Add tasks for creating custom Python checks. See [#408]. Thanks [@snorlaX-sleeps]. +* [FEATURE] Support Rocky Linux and AlmaLinux. See [#418]. +* [BUGFIX] Fix provisioning on Python 3 / Amazon Linux 2. See [#412]. Thanks [@moleskin-smile]. +* [BUGFIX] Prevent dependency on `ansible.windows` with non-Windows nodes. See [#416]. +* [BUGFIX] Don't display content of `DDAGENTUSER_PASSWORD` for Windows nodes. See [#415]. +* [BUGFIX] Additional fixes for `jinja2_native = True` setting. See [#414]. + +# 4.13.0 / 2022-01-21 + +* [FEATURE] Add datadog_manage_config to disable changing the Agent config files. See [#410]. +* [BUGFIX] Fix error: dict object has no attribute 'system'. See [#409]. Thanks [@stegar123]. + +# 4.12.0 / 2021-11-03 + +* [FEATURE] Add Cloud Workload Security Agent configuration. See [#375]. Thanks [@alsmola]. +* [IMPROVEMENT] Avoid usage of `ansible_lsb` to not depend on `lsb-release` package on Debian. See [#377]. +* [IMPROVEMENT] Check that `datadog_checks` is a mapping to avoid misconfiguration. See [#384]. Thanks [@soar]. +* [IMPROVEMENT] Enable turning off the Agent 6.14 fix for Windows. See [#399]. +* [DOCS] Mention limitations in enabling NPM on Windows. See [#396]. +* [BUGFIX] Fix execution with `jinja2_native = True`. See [#383]. Thanks [@soar]. + +# 4.11.0 / 2021-07-05 + +* [IMPROVEMENT] Install datadog-signing-keys package on Debian/Ubuntu. See [#372]. +* [IMPROVEMENT] Skip install on Linux systems when pinned version is already installed. See [#371]. +* [IMPROVEMENT] Update 'http' URLs to 'https' wherever possible. See [#369].Thanks [@rossigee]. +* [BUGFIX] Detect existing version in check mode on Windows. See [#364]. Thanks [@camjay]. + +# 4.10.0 / 2021-05-25 + +* [IMPROVEMENT] Make Windows package download behavior in check mode consistent with Linux. See [#359]. Thanks [@camjay]. +* [BUGFIX] Remove `indentfirst` in system-probe.yaml.j2, making the role compatible with Jinja2 >= 3. See [#361]. Thanks [@tasktop-teho]. +* [BUGFIX] Ensure gnupg is installed on Debian/Ubuntu. See [#358]. + +# 4.9.0 / 2021-05-06 + +* [IMPROVEMENT] Improvements for APT keys management. See [#351]. + * By default, get keys from keys.datadoghq.com, not the Ubuntu keyserver. + * Always add the `DATADOG_APT_KEY_CURRENT.public` key (contains key used to sign current repodata). + * Add `signed-by` option to all sources list lines. + * On Debian >= 9 and Ubuntu >= 16, only add keys to `/usr/share/keyrings/datadog-archive-keyring.gpg`. + * On older systems, also add the same keyring to `/etc/apt/trusted.gpg.d`. +* [BUGFIX] Don't set `repo_gpgcheck=1` by default on RHEL/CentOS 8.1 and on custom repos. See [#352]. +* [BUGFIX] Change RPM key URLs to non-SNI versions to ensure the role continues to work with Python <= 2.7.9. See [#353]. +* [DOCS] Add a note about installing marketplace integrations. See [#354]. + +# 4.8.2 / 2021-04-21 + +* [BUGFIX] Another fix for agent not restarting after a configuration change on Windows. See [#349]. + +# 4.8.1 / 2021-04-19 + +* [BUGFIX] Fix Agent not restarting after a configuration change on Windows. See [#347]. + +# 4.8.0 / 2021-04-13 + +* [FEATURE] Add NPM support for Windows. See [#335]. +* [IMPROVEMENT] Split Windows handler into its own file, so we don't include anything from ansible.windows on non-Windows; add a note about the dependency on `ansible.windows`. See [#337]. +* [IMPROVEMENT] Turn on `repo_gpgcheck` on RPM repositories by default. See [#341]. +* [IMPROVEMENT] Align Windows agent to Linux so that service is disabled when `datadog_enabled` is `false`. See [#338]. Thanks [@erikhjensen]. +* [BUGFIX] Fix system-probe enablement conditions. See [#336]. +* [CHORE] Fix issues found by linter (fix file permissions, add `role_name` and `namespace` to `galaxy_info`, remove pre/post tasks). See [#340]. + +# 4.7.1 / 2021-03-23 + +* [BUGFIX] Revert addition of NPM support for Windows, which introduced multiple issues. See [#333]. + +# 4.7.0 / 2021-03-23 + +* [FEATURE] Enable configuring `gpgcheck` option on RPM repofiles. See [#324]. +* [FEATURE] Add NPM support for Windows. See [#326]. +* [IMPROVEMENT] Implement usage of multiple GPG keys in repofiles, use keys from keys.datadoghq.com. See [#325]. +* [BUGFIX] Use the `dnf` task instead of `yum` when we detect that a Python 3 interpreter is used on a target host. See [#301]. +* [DOCS] Lint README for Documentation style. See [#327]. + +# 4.6.0 / 2021-01-11 + +* [FEATURE] Allow removing checks. See [#151] and [#320]. Thanks [@Jno21]. +* [BUGFIX] Make security-agent also affected by datadog_enabled. See [#318]. +* [BUGFIX] Change configuration perms on Linux. See [#313]. Thanks [@loliee]. +* [CHORE] Do not name the RPM repo file differently depending on the Agent version. See [#311]. +* [CHORE] Replace facts from 'ansible_*' to using 'ansible_facts' dictionary. See [#304]. Thanks to [@samasc30]. + +# 4.5.0 / 2020-11-06 + +* [FEATURE] (Windows) Adds support for non-default installation and configuration directories. See [#295][]. +* [BUGFIX] Fixes handling of nil vs. defined but empty variables. See [#303][]. +* [BUGFIX] (Windows) Fixes incorrect service name when validating services. See [#307][]. +* [FEATURE] Adds support for the latest package signing keys. See [#308][]. +* [FEATURE] Adds support for the Datadog IOT agent. See [#309][]. + +# 4.4.0 / 2020-09-30 + +* [BUGFIX] (Windows) Fix compatibility with Ansible 2.10. See [#289][]. +* [FEATURE] Adds support for 3rd party integrations via the `datadog-agent integration` command. See [#291][]. +* [BUGFIX] Updates apt cache prior to attempting install. See [#297][]. + +# 4.3.0 / 2020-07-07 + +* [FEATURE] Record installation information for telemetry and troubleshooting purposes. See [#281][]. +* [BUGFIX] Fix error when facts value doesn't exist on Redhat OS family of the arm architecture. See [#283][]. Thanks to [@kanga333][]. +* [BUGFIX] (Windows) Fix idempotence when reinstalling same pinned version. See [#269][]. + +# 4.2.1 / 2020-05-04 + +* [BUGFIX] Fix error when checking custom repository file on debian-based systems. See [#275][]. + +# 4.2.0 / 2020-04-08 + +* [FEATURE] Ensure the start mode when starting on Windows. See [#271][]. Thanks to [@DevKyleS][]. + * The Agent service will now always be started on Windows at the end of an Ansible run + if `datadog_enabled` is set to `true`. + Previously, if the Agent was already installed, the start mode of the existing Agent + service was used (which meant a disabled Agent service would remain disabled + after an Ansible run, even with `datadog_enabled: true`). + If you manually disabled the Agent service and want it to remain disabled, + set `datadog_enabled` to `false`. +* [FEATURE] Remove old INI config files from v6/v7 configuration. See [#271][]. Thanks to [@b2jrock][]. +* [FEATURE] Register result when Agent install task is run. See [#268][]. +* [BUGFIX] Update `datadog_additional_groups` task & doc. See [#267][]. +* [BUGFIX] Fix role idempotence on Debian. See [#262][]. Thanks to [@jharley][]. +* [DOCS] README update: system-probe installation steps. See [#257][]. +* [DOCS] README update: minimum Ansible version & various fixes. See [#264][]. +* [DOCS] Documentation (README, CONTRIBUTING) overhaul. See [#270][]. + +# 4.1.1 / 2020-02-10 + +* [BUGFIX] Add skip check on sysprobe set_fact tasks. See [#259][] +* [BUGFIX] Only try to stop sysprobe if it is installed. See [#255][]. Thanks to [@dv9io0o][]. + +# 4.1.0 / 2020-01-20 + +* [FEATURE] Fail with explicit message if OS is not supported by the role. See [#247][] +* [BUGFIX] Ensure that system-probe is stopped if it is disabled or not installed. See [#249][] +* [BUGFIX] Change default datadog_agent group to dd-agent. See [#248][] +* [DOCS] Update instructions to use datadog.datadog as the role name. See [#246][] +* [DOCS] Add development guidelines & small kitchen dev environment. See [#243][] + +# 4.0.1 / 2019-12-23 + +* [BUGFIX] Fix system-probe.yaml.j2 indent filter. See [#240][] +* [BUGFIX] Fix sysprobe service detection for systemd services. See [#242][] +* [OTHER] Improve ansible-galaxy score by following best practices. See [#236][] +* [OTHER] Include names for `include_tasks`. See [#226][]. Thanks to [@the-real-cphillips][]. + +# 4.0.0 / 2019-12-18 + +**This role will install Agent v7 by default.** Datadog Agent v7 runs checks with Python 3, so if you were running any custom checks written in Python, they must be compatible with Python 3. If you were not running any custom checks or if your custom checks are already compatible with Python 3, then it is safe to upgrade to Agent v7. + +* [MAJOR] Agent 7 support. See [#220][]. + * Refer to the [role upgrade section](README.md#role-upgrade-from-v3-to-v4) of the docs for the complete list of changes and instructions to upgrade this role from v3 to v4. +* [FEATURE] Infer major version from `datadog_agent_version`. See [#239][]. +* [FEATURE] Allow pinned version install on multiple platforms at the same time. See [#235][]. + +# 3.4.0 / 2019-12-18 + +* [FEATURE] Reset pinned Windows version. See [#234][]. +* [DOCS] Add README instructions for Windows hosts. See [#233][]. +* [META] Update list of platforms supported by the role. See [#224][]. + +# 3.3.0 / 2019-11-18 + +* [FEATURE] Blacklist installation of 6.14.0 and 6.14.1 on Windows. +* [FEATURE] Run fix + sanity check script before agent install/upgrade on Windows. +* [FEATURE] Adding support for Datadog system-probe (thanks to [@jstoja][]). + +# 3.2.0 / 2019-10-02 + +* [DEPRECATION] Drop support for EOL version of Ansible (2.5) +- [FEATURE] Add the `datadog_integration resource` to easily control installed integrations. + +# 3.1.0 / 2019-08-30 + +- [FEATURE] Trust new RPM key on SUSE. See [#203][]. +- [IMPROVEMENT] Windows: Add the ability to specify the 'ddagentuser' name and password in the configuration. +- [FEATURE] Add 'pre_task' and 'post_task' folder for custom user tasks. + +# 3.0.0 / 2019-05-17 + +- [FEATURE] On Linux: you can now add the Agent's user to additionnal groups. +- [DEPRECATION] Bumping this minimum supported Ansible version from 2.2 to 2.5 (version prior from 2.5 are EOL). +- [IMPROVEMENT] Use 'include_tasks' instead of 'include' which bump minimum ansible version to 2.4 (thanks to [@rouge8][]). +- [FIX] Make sure the Live Process agent and APM agent aren't started when datadog_enabled is set to false (thanks to [@pdecat][]). + +# 2.6.0 / 2019-03-05 + +* [FEATURE] Add support for managing Windows hosts. + +# 2.5.0 / 2019-02-12 + +* [IMPROVEMENT] Allow the use of a backup keyserver for apt in case the main one is down. +* [IMPROVEMENT] Fix configuration items order to be the same between playbook runs (thanks to [@jpiron][]). + +# 2.4.0 / 2018-10-25 + +* [FEATURE] Add support for "site" configuration. +* [IMPROVEMENT] Add retry policy when failing to pull GPG key from keyserver.ubuntu.com + +# 2.3.1 / 2018-08-24 + +* [FIX] Disabling repo metadata signature check for SUSE/SLES. + +# 2.3.0 / 2018-07-23 + +* [FEATURE] Add support for SUSE/SLES (thanks to [@enarciso][]). + +# 2.2.0 / 2018-06-06 + +* [DEPRECATION] Drop support for EOL platform +* [IMPROVEMENT] Harmonize tasks names between agent5 and agent6 (thanks [@xp-1000][]). + +# 2.1.0 / 2018-05-14 + +* [FEATURE] Support "--check" Ansible option for dry-run. +* [BUGFIX] Fix downgrade on centos. +* [IMPROVEMENT] Update conf paths to respect agent6 best practice (thanks [@dbr1993][]). +* [IMPROVEMENT] Fix YAML cosmetics: standardize syntax everywhere (thanks [@tomgoren][]). +* [DEPRECATION] Drop support for EOL versions of ansible (<2.2). + +# 2.0.3 / 2018-04-13 + +* [BUGFIX] Removing legacy http apt repos pre-dating usage of HTTPS. See [#116][] + +# 2.0.2 / 2018-03-27 + +* [BUGFIX] Remove empty brackets from datadog.yaml when datadog_config is empty. See [#107][] + +# 2.0.1 / 2018-03-05 + +* [BUGFIX] Remove failing import of expired APT key. See [#105][] + +# 1.6.1 / 2018-03-05 + +* [BUGFIX] Remove failing import of expired APT key. See [#105][] + +# 2.0.0 / 2018-02-27 + +* [RELEASE] Make Agent6 the default version to install. +* [IMPROVEMENT] Defaulting to HTTPS for apt and yum repo. + +# 1.6.0 / 2018-01-19 + +* [IMPROVEMENT] Refresh apt cache every hour. See [#98][] + +# 1.5.0 / 2018-01-05 + +* [FEATURE] Add Agent6 (beta) support on RPM-based distros. See [#90][] (thanks [@brendanlong][]) + +# 1.4.0 / 2017-10-30 + +* [FEATURE] Allow specifying custom repo. See [#80][] +* [FEATURE] Add Agent6 (beta) support on debianoids. See [#81][] +* [BUGFIX] Fix incorrect handler name in process task. See [#68][] (thanks [@jeffwidman][]) +* [SANITY] Improve agent service task name and handler formatting. See [#62][] and [#67][] (thanks [@jeffwidman][]) + +# 1.3.0 / 2017-04-04 + +* [FEATURE] Add support for configuring trace agent. See [#45][] and [#58][] (thanks [@pmbauer][]) +* [FEATURE] Allow pinning the version of the Agent. See [#61][] +* [IMPROVEMENT] Pipe `datadog_checks` through list for python3 support. See [#51][] (thanks [@gtrummell][]) +* [IMPROVEMENT] Use `ansible-lint` on the role and use names on all tasks. See [#50][] (thanks [@eplanet][]) +* [BUGFIX] Fix `ini` format of the `datadog.conf` file. See [#59][] + +# 1.2.0 / 2016-12-13 + +* [FEATURE] Trust new APT and RPM keys. See [#30][] +* [IMPROVEMENT] Change the `state` of `apt-transport-https` from `latest` to `present`. See [#36][] +* [IMPROVEMENT] Convert config file tasks to proper YAML formatting. See [#32][] (thanks [@jeffwidman][]) + +# 1.1.0 / 2016-06-27 + +* [FEATURE] Allow APT repo settings to be user-defined. See [#20][] (thanks [@geoffwright][]) + +# 1.0.0 / 2016-06-08 + +Initial release, compatible with Ansible v1 & v2 + + +[#20]: https://github.com/DataDog/ansible-datadog/issues/20 +[#30]: https://github.com/DataDog/ansible-datadog/issues/30 +[#32]: https://github.com/DataDog/ansible-datadog/issues/32 +[#36]: https://github.com/DataDog/ansible-datadog/issues/36 +[#45]: https://github.com/DataDog/ansible-datadog/issues/45 +[#50]: https://github.com/DataDog/ansible-datadog/issues/50 +[#51]: https://github.com/DataDog/ansible-datadog/issues/51 +[#58]: https://github.com/DataDog/ansible-datadog/issues/58 +[#59]: https://github.com/DataDog/ansible-datadog/issues/59 +[#61]: https://github.com/DataDog/ansible-datadog/issues/61 +[#62]: https://github.com/DataDog/ansible-datadog/issues/62 +[#67]: https://github.com/DataDog/ansible-datadog/issues/67 +[#68]: https://github.com/DataDog/ansible-datadog/issues/68 +[#80]: https://github.com/DataDog/ansible-datadog/issues/80 +[#81]: https://github.com/DataDog/ansible-datadog/issues/81 +[#90]: https://github.com/DataDog/ansible-datadog/issues/90 +[#98]: https://github.com/DataDog/ansible-datadog/issues/98 +[#105]: https://github.com/DataDog/ansible-datadog/issues/105 +[#107]: https://github.com/DataDog/ansible-datadog/issues/107 +[#116]: https://github.com/DataDog/ansible-datadog/issues/116 +[#151]: https://github.com/DataDog/ansible-datadog/issues/151 +[#203]: https://github.com/DataDog/ansible-datadog/issues/203 +[#220]: https://github.com/DataDog/ansible-datadog/issues/220 +[#224]: https://github.com/DataDog/ansible-datadog/issues/224 +[#226]: https://github.com/DataDog/ansible-datadog/issues/226 +[#233]: https://github.com/DataDog/ansible-datadog/issues/233 +[#234]: https://github.com/DataDog/ansible-datadog/issues/234 +[#235]: https://github.com/DataDog/ansible-datadog/issues/235 +[#236]: https://github.com/DataDog/ansible-datadog/issues/236 +[#239]: https://github.com/DataDog/ansible-datadog/issues/239 +[#240]: https://github.com/DataDog/ansible-datadog/issues/240 +[#242]: https://github.com/DataDog/ansible-datadog/issues/242 +[#243]: https://github.com/DataDog/ansible-datadog/issues/243 +[#246]: https://github.com/DataDog/ansible-datadog/issues/246 +[#247]: https://github.com/DataDog/ansible-datadog/issues/247 +[#248]: https://github.com/DataDog/ansible-datadog/issues/248 +[#249]: https://github.com/DataDog/ansible-datadog/issues/249 +[#255]: https://github.com/DataDog/ansible-datadog/issues/255 +[#257]: https://github.com/DataDog/ansible-datadog/issues/257 +[#259]: https://github.com/DataDog/ansible-datadog/issues/259 +[#262]: https://github.com/DataDog/ansible-datadog/issues/262 +[#264]: https://github.com/DataDog/ansible-datadog/issues/264 +[#267]: https://github.com/DataDog/ansible-datadog/issues/267 +[#268]: https://github.com/DataDog/ansible-datadog/issues/268 +[#269]: https://github.com/DataDog/ansible-datadog/issues/269 +[#270]: https://github.com/DataDog/ansible-datadog/issues/270 +[#271]: https://github.com/DataDog/ansible-datadog/issues/271 +[#275]: https://github.com/DataDog/ansible-datadog/issues/275 +[#281]: https://github.com/DataDog/ansible-datadog/issues/281 +[#283]: https://github.com/DataDog/ansible-datadog/issues/283 +[#289]: https://github.com/DataDog/ansible-datadog/issues/289 +[#291]: https://github.com/DataDog/ansible-datadog/issues/291 +[#295]: https://github.com/DataDog/ansible-datadog/issues/295 +[#297]: https://github.com/DataDog/ansible-datadog/issues/297 +[#301]: https://github.com/DataDog/ansible-datadog/issues/301 +[#303]: https://github.com/DataDog/ansible-datadog/issues/303 +[#304]: https://github.com/DataDog/ansible-datadog/issues/304 +[#307]: https://github.com/DataDog/ansible-datadog/issues/307 +[#308]: https://github.com/DataDog/ansible-datadog/issues/308 +[#309]: https://github.com/DataDog/ansible-datadog/issues/309 +[#311]: https://github.com/DataDog/ansible-datadog/issues/311 +[#313]: https://github.com/DataDog/ansible-datadog/issues/313 +[#318]: https://github.com/DataDog/ansible-datadog/issues/318 +[#320]: https://github.com/DataDog/ansible-datadog/issues/320 +[#324]: https://github.com/DataDog/ansible-datadog/issues/324 +[#325]: https://github.com/DataDog/ansible-datadog/issues/325 +[#326]: https://github.com/DataDog/ansible-datadog/issues/326 +[#327]: https://github.com/DataDog/ansible-datadog/issues/327 +[#333]: https://github.com/DataDog/ansible-datadog/issues/333 +[#335]: https://github.com/DataDog/ansible-datadog/issues/335 +[#336]: https://github.com/DataDog/ansible-datadog/issues/336 +[#337]: https://github.com/DataDog/ansible-datadog/issues/337 +[#338]: https://github.com/DataDog/ansible-datadog/issues/338 +[#340]: https://github.com/DataDog/ansible-datadog/issues/340 +[#341]: https://github.com/DataDog/ansible-datadog/issues/341 +[#347]: https://github.com/DataDog/ansible-datadog/issues/347 +[#349]: https://github.com/DataDog/ansible-datadog/issues/349 +[#351]: https://github.com/DataDog/ansible-datadog/issues/351 +[#352]: https://github.com/DataDog/ansible-datadog/issues/352 +[#353]: https://github.com/DataDog/ansible-datadog/issues/353 +[#354]: https://github.com/DataDog/ansible-datadog/issues/354 +[#358]: https://github.com/DataDog/ansible-datadog/issues/358 +[#359]: https://github.com/DataDog/ansible-datadog/issues/359 +[#361]: https://github.com/DataDog/ansible-datadog/issues/361 +[#362]: https://github.com/DataDog/ansible-datadog/issues/362 +[#364]: https://github.com/DataDog/ansible-datadog/issues/364 +[#369]: https://github.com/DataDog/ansible-datadog/issues/369 +[#371]: https://github.com/DataDog/ansible-datadog/issues/371 +[#372]: https://github.com/DataDog/ansible-datadog/issues/372 +[#375]: https://github.com/DataDog/ansible-datadog/issues/375 +[#377]: https://github.com/DataDog/ansible-datadog/issues/377 +[#383]: https://github.com/DataDog/ansible-datadog/issues/383 +[#384]: https://github.com/DataDog/ansible-datadog/issues/384 +[#396]: https://github.com/DataDog/ansible-datadog/issues/396 +[#399]: https://github.com/DataDog/ansible-datadog/issues/399 +[#408]: https://github.com/DataDog/ansible-datadog/issues/408 +[#409]: https://github.com/DataDog/ansible-datadog/issues/409 +[#410]: https://github.com/DataDog/ansible-datadog/issues/410 +[#412]: https://github.com/DataDog/ansible-datadog/issues/412 +[#414]: https://github.com/DataDog/ansible-datadog/issues/414 +[#415]: https://github.com/DataDog/ansible-datadog/issues/415 +[#416]: https://github.com/DataDog/ansible-datadog/issues/416 +[#418]: https://github.com/DataDog/ansible-datadog/issues/418 +[#422]: https://github.com/DataDog/ansible-datadog/issues/422 +[#429]: https://github.com/DataDog/ansible-datadog/issues/429 +[#437]: https://github.com/DataDog/ansible-datadog/issues/437 +[#438]: https://github.com/DataDog/ansible-datadog/issues/438 +[#442]: https://github.com/DataDog/ansible-datadog/issues/442 +[#443]: https://github.com/DataDog/ansible-datadog/issues/443 +[#446]: https://github.com/DataDog/ansible-datadog/issues/446 +[#448]: https://github.com/DataDog/ansible-datadog/issues/448 +[#449]: https://github.com/DataDog/ansible-datadog/issues/449 +[#451]: https://github.com/DataDog/ansible-datadog/issues/451 +[#452]: https://github.com/DataDog/ansible-datadog/issues/452 +[#453]: https://github.com/DataDog/ansible-datadog/issues/453 +[#454]: https://github.com/DataDog/ansible-datadog/issues/454 +[#458]: https://github.com/DataDog/ansible-datadog/issues/458 +[#466]: https://github.com/DataDog/ansible-datadog/issues/466 +[#478]: https://github.com/DataDog/ansible-datadog/issues/478 +[#479]: https://github.com/DataDog/ansible-datadog/issues/479 +[@DevKyleS]: https://github.com/DevKyleS +[@Jno21]: https://github.com/Jno21 +[@alsmola]: https://github.com/alsmola +[@b2jrock]: https://github.com/b2jrock +[@brendanlong]: https://github.com/brendanlong +[@camjay]: https://github.com/camjay +[@dbr1993]: https://github.com/dbr1993 +[@denzhel]: https://github.com/denzhel +[@dv9io0o]: https://github.com/dv9io0o +[@enarciso]: https://github.com/enarciso +[@eplanet]: https://github.com/eplanet +[@erikhjensen]: https://github.com/erikhjensen +[@geoffwright]: https://github.com/geoffwright +[@gtrummell]: https://github.com/gtrummell +[@hestonhoffman]: https://github.com/hestonhoffman +[@jb-mayer]: https://github.com/jb-mayer +[@jeffwidman]: https://github.com/jeffwidman +[@jharley]: https://github.com/jharley +[@jlosito]: https://github.com/jlosito +[@jpiron]: https://github.com/jpiron +[@jstoja]: https://github.com/jstoja +[@kanga333]: https://github.com/kanga333 +[@lc-applause]: https://github.com/lc-applause +[@loliee]: https://github.com/loliee +[@marcus-crane]: https://github.com/marcus-crane +[@moleskin-smile]: https://github.com/moleskin-smile +[@pdecat]: https://github.com/pdecat +[@pmbauer]: https://github.com/pmbauer +[@rossigee]: https://github.com/rossigee +[@rouge8]: https://github.com/rouge8 +[@samasc30]: https://github.com/samasc30 +[@snorlaX-sleeps]: https://github.com/snorlaX-sleeps +[@soar]: https://github.com/soar +[@sspans-sbp]: https://github.com/sspans-sbp +[@stegar123]: https://github.com/stegar123 +[@tasktop-teho]: https://github.com/tasktop-teho +[@the-real-cphillips]: https://github.com/the-real-cphillips +[@tomgoren]: https://github.com/tomgoren +[@wisnij]: https://github.com/wisnij +[@xp-1000]: https://github.com/xp-1000 \ No newline at end of file diff --git a/ansible/01_old/roles/datadog.datadog/CONTRIBUTING.md b/ansible/01_old/roles/datadog.datadog/CONTRIBUTING.md new file mode 100644 index 0000000..744f876 --- /dev/null +++ b/ansible/01_old/roles/datadog.datadog/CONTRIBUTING.md @@ -0,0 +1,65 @@ +# Contributing + +The code is licensed under the Apache License 2.0 (see LICENSE for details). + +[![Ansible Galaxy](https://img.shields.io/badge/galaxy-Datadog.datadog-660198.svg)](https://galaxy.ansible.com/Datadog/datadog/) +[![Build Status](https://travis-ci.org/DataDog/ansible-datadog.svg?branch=master)](https://travis-ci.org/DataDog/ansible-datadog) + +First of all, thanks for contributing! + +This document provides some basic guidelines for contributing to this repository. To propose improvements, feel free to submit a PR. + +## Submitting issues + +* If you think you've found an issue, search the issue list to see if there's an existing issue. +* Then, if you find nothing, open a Github issue. + +## Pull Requests + +Have you fixed a bug or written a new feature and want to share it? Many thanks! + +In order to ease/speed up our review, here are some items you can check/improve when submitting your PR: + + * Have a proper commit history (we advise you to rebase if needed). + * Write tests for the code you wrote. + * Preferably, make sure that all unit tests pass locally and some relevant kitchen tests. + * Summarize your PR with an explanatory title and a message describing your changes, cross-referencing any related bugs/PRs. + * Open your PR against the `master` branch. + +Your pull request must pass all CI tests before we merge it. If you see an error and don't think it's your fault, it may not be! [Join us on Slack][slack] or send us an email, and together we'll get it sorted out. + +### Keep it small, focused + +Avoid changing too many things at once. For instance if you're fixing a recipe and at the same time adding some code refactor, it makes reviewing harder and the _time-to-release_ longer. + +### Commit messages + +Please don't be this person: `git commit -m "Fixed stuff"`. Take a moment to write meaningful commit messages. + +The commit message should describe the reason for the change and give extra details that will allow someone later on to understand in 5 seconds the thing you've been working on for a day. + +If your commit is only shipping documentation changes or example files, and is a complete no-op for the test suite, add **[skip ci]** in the commit message body to skip the build and give that slot to someone else who does need it. + +### Squash your commits + +Rebase your changes on `master` and squash your commits whenever possible. This keeps history cleaner and easier to revert things. It also makes developers happier! + +## Development + +To contribute, follow the contributing guidelines above. + +### Manual testing + +To test the roles provided by this project, you can follow the instructions in the manual tests [readme.md][tests]. + +## Author Information + +brian@akins.org + +dustinjamesbrown@gmail.com --Forked from brian@akins.org + +Datadog --Forked from dustinjamesbrown@gmail.com + + +[slack]: https://datadoghq.slack.com +[tests]: https://github.com/DataDog/ansible-datadog/blob/master/manual_tests/readme.md diff --git a/ansible/01_old/roles/datadog.datadog/LICENSE b/ansible/01_old/roles/datadog.datadog/LICENSE new file mode 100644 index 0000000..9df50a3 --- /dev/null +++ b/ansible/01_old/roles/datadog.datadog/LICENSE @@ -0,0 +1,203 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2014 Brian Akins brian@akins.org + Copyright 2015 Dustin Brown dustinjamesbrown@gmail.com + Copyright 2015 Datadog, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/ansible/01_old/roles/datadog.datadog/README.md b/ansible/01_old/roles/datadog.datadog/README.md new file mode 100644 index 0000000..6c22c22 --- /dev/null +++ b/ansible/01_old/roles/datadog.datadog/README.md @@ -0,0 +1,642 @@ +# Ansible Datadog Role + +The Ansible Datadog role installs and configures the Datadog Agent and integrations. Version `4` of the role installs the Datadog Agent v7 by default. + +## Setup + +### Requirements + +- Requires Ansible v2.6+. +- Supports most Debian and RHEL-based Linux distributions, macOS, and Windows. +- When using Ansible 2.10+ on Windows, requires the `ansible.windows` collection to be installed: + + ```shell + ansible-galaxy collection install ansible.windows + ``` + +### Installation + +Install the [Datadog role][1] from Ansible Galaxy on your Ansible server: + +```shell +ansible-galaxy install datadog.datadog +``` + +To deploy the Datadog Agent on hosts, add the Datadog role and your API key to your playbook: + +```text +- hosts: servers + roles: + - { role: datadog.datadog, become: yes } + vars: + datadog_api_key: "" +``` + +#### Role variables + +| Variable | Description | +|--------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `datadog_api_key` | Your Datadog API key. | +| `datadog_site` | The site of the Datadog intake to send Agent data to. Defaults to `datadoghq.com`, set to `datadoghq.eu` to send data to the EU site. This option is only available with Agent version >= 6.6.0. | +| `datadog_agent_flavor` | Override the default Debian / RedHat Package for IOT Installations on RPI. Defaults to "datadog-agent" - use "datadog-iot-agent" for RPI. | +| `datadog_agent_version` | The pinned version of the Agent to install (optional, but recommended), for example: `7.16.0`. Setting `datadog_agent_major_version` is not needed if `datadog_agent_version` is used. **Note**: Downgrades are not supported on Windows platforms. | +| `datadog_agent_major_version` | The major version of the Agent to install. The possible values are 5, 6, or 7 (default). If `datadog_agent_version` is set, it takes precedence otherwise the latest version of the specified major is installed. Setting `datadog_agent_major_version` is not needed if `datadog_agent_version` is used. | +| `datadog_checks` | YAML configuration for Agent checks to drop into:
- `/etc/datadog-agent/conf.d/.d/conf.yaml` for Agent v6 and v7,
- `/etc/dd-agent/conf.d` for Agent v5. | +| `datadog_disable_untracked_checks` | Set to `true` to remove all checks not present in `datadog_checks` and `datadog_additional_checks`. | +| `datadog_additional_checks` | List of additional checks that are not removed if `datadog_disable_untracked_checks` is set to `true`. | +| `datadog_disable_default_checks` | Set to `true` to remove all default checks. | +| `datadog_config` | Set configuration for the Datadog Agent. The role writes the config to the [correct location based on the operating system](https://docs.datadoghq.com/agent/guide/agent-configuration-files/?tab=agentv6v7#agent-main-configuration-file). For a full list of config options, see [the `datadog.yaml` template file in the datadog-agent GitHub repository](https://github.com/DataDog/datadog-agent/blob/main/pkg/config/config_template.yaml). | +| `datadog_config_ex` | (Optional) Extra INI sections to go in `/etc/dd-agent/datadog.conf` (Agent v5 only). | +| `datadog_apt_repo` | Override the default Datadog `apt` repository. Make sure to use the `signed-by` option if repository metadata is signed using Datadog's signing keys: `deb [signed-by=/usr/share/keyrings/datadog-archive-keyring.gpg] https://yourrepo`. | +| `datadog_apt_cache_valid_time` | Override the default apt cache expiration time (defaults to 1 hour). | +| `datadog_apt_key_url_new` | Override the location from which to obtain Datadog `apt` key (the deprecated `datadog_apt_key_url` variable refers to an expired key that's been removed from the role). The URL is expected to be a GPG keyring containing keys `382E94DE` and `F14F620E`. | +| `datadog_yum_repo` | Override the default Datadog `yum` repository. | +| `datadog_yum_repo_gpgcheck` | Override the default `repo_gpgcheck` value (empty). If empty, value is dynamically set to `yes` when custom `datadog_yum_repo` is not used and system is not RHEL/CentOS 8.1 (due to [a bug](https://bugzilla.redhat.com/show_bug.cgi?id=1792506) in dnf), otherwise it's set to `no`. **Note**: repodata signature verification is always turned off for Agent 5. | +| `datadog_yum_gpgcheck` | Override the default `gpgcheck` value (`yes`) - use `no` to turn off package GPG signature verification. | +| `datadog_yum_gpgkey` | **Removed in version 4.18.0** Override the default URL to the Datadog `yum` key used to verify Agent v5 and v6 (up to 6.13) packages (key ID `4172A230`). | +| `datadog_yum_gpgkey_e09422b3` | Override the default URL to the Datadog `yum` key used to verify Agent v6.14+ packages (key ID `E09422B3`). | +| `datadog_yum_gpgkey_e09422b3_sha256sum` | Override the default checksum of the `datadog_yum_gpgkey_e09422b3` key. | +| `datadog_zypper_repo` | Override the default Datadog `zypper` repository. | +| `datadog_zypper_repo_gpgcheck` | Override the default `repo_gpgcheck` value (empty). If empty, value is dynamically set to `yes` when custom `datadog_zypper_repo` is not used, otherwise it's set to `no`. **Note**: repodata signature verification is always turned off for Agent 5. | +| `datadog_zypper_gpgcheck` | Override the default `gpgcheck` value (`yes`) - use `no` to turn off package GPG signature verification. | +| `datadog_zypper_gpgkey` | **Removed in version 4.18.0** Override the default URL to the Datadog `zypper` key used to verify Agent v5 and v6 (up to 6.13) packages (key ID `4172A230`). | +| `datadog_zypper_gpgkey_sha256sum` | **Removed in version 4.18.0** Override the default checksum of the `datadog_zypper_gpgkey` key. | +| `datadog_zypper_gpgkey_e09422b3` | Override the default URL to the Datadog `zypper` key used to verify Agent v6.14+ packages (key ID `E09422B3`). | +| `datadog_zypper_gpgkey_e09422b3_sha256sum` | Override the default checksum of the `datadog_zypper_gpgkey_e09422b3` key. | +| `datadog_agent_allow_downgrade` | Set to `yes` to allow Agent downgrades on apt-based platforms (use with caution, see `defaults/main.yml` for details). **Note**: On Centos this only works with Ansible 2.4+. | +| `datadog_enabled` | Set to `false` to prevent `datadog-agent` service from starting (defaults to `true`). | +| `datadog_additional_groups` | Either a list, or a string containing a comma-separated list of additional groups for the `datadog_user` (Linux only). | +| `datadog_windows_ddagentuser_name` | The name of Windows user to create/use, in the format `\` (Windows only). | +| `datadog_windows_ddagentuser_password` | The password used to create the user and/or register the service (Windows only). | +| `datadog_apply_windows_614_fix` | Whether or not to download and apply file referenced by `datadog_windows_614_fix_script_url` (Windows only). See https://dtdg.co/win-614-fix for more details. You can set this to `false` assuming your hosts aren't running Datadog Agent 6.14.\*. | +| `datadog_macos_user` | The name of the user to run Agent under. The user has to exist, it won't be created automatically. Defaults to `ansible_user` (macOS only). | +| `datadog_macos_download_url` | Override the URL to download the DMG installer from (macOS only). | + +### Integrations + +To configure a Datadog integration (check), add an entry to the `datadog_checks` section. The first level key is the name of the check, and the value is the YAML payload to write the configuration file. Examples are provided below. + +#### Process check + +To define two instances for the `process` check use the configuration below. This creates the corresponding configuration files: + +* Agent v6 & v7: `/etc/datadog-agent/conf.d/process.d/conf.yaml` +* Agent v5: `/etc/dd-agent/conf.d/process.yaml` + +```yml + datadog_checks: + process: + init_config: + instances: + - name: ssh + search_string: ['ssh', 'sshd'] + - name: syslog + search_string: ['rsyslog'] + cpu_check_interval: 0.2 + exact_match: true + ignore_denied_access: true +``` + +#### Custom check + +To configure a custom check use the configuration below. This creates the corresponding configuration files: + +- Agent v6 & v7: `/etc/datadog-agent/conf.d/my_custom_check.d/conf.yaml` +- Agent v5: `/etc/dd-agent/conf.d/my_custom_check.yaml` + +```yml + datadog_checks: + my_custom_check: + init_config: + instances: + - some_data: true +``` + +##### Custom Python Checks + +To pass a Python check to the playbook, use the configuration below. + +This configuration requires the Datadog [play and role][12] to be a part of the larger playbook where the value passed in is the relative file path to the actual task for [Linux][13] or [Windows][14]. + +This is only available for Agent v6+. + +The key should be the name of the file created in the checks directory `checks.d/{{ item }}.py`: + +```yml + datadog_checks: + my_custom_check: + init_config: + instances: + - some_data: true + datadog_custom_checks: + my_custom_check: '../../../custom_checks/my_custom_check.py' +``` + +#### Autodiscovery + +When using Autodiscovery, there is no pre-processing nor post-processing on the YAML. This means every YAML section is added to the final configuration file, including `autodiscovery identifiers`. + +The example below configures the PostgreSQL check through **Autodiscovery**: + +```yml + datadog_checks: + postgres: + ad_identifiers: + - db-master + - db-slave + init_config: + instances: + - host: %%host%% + port: %%port%% + username: username + password: password +``` + +Learn more about [Autodiscovery][3] in the Datadog documentation. + +### Tracing + +To enable trace collection with Agent v6 or v7 use the following configuration: + +```yaml +datadog_config: + apm_config: + enabled: true +``` + +To enable trace collection with Agent v5 use the following configuration: + +```yaml +datadog_config: + apm_enabled: "true" # has to be a string +``` + +### Live processes + +To enable [live process][6] collection with Agent v6 or v7 use the following configuration: + +```yml +datadog_config: + process_config: + enabled: "true" # type: string +``` + +The possible values for `enabled` are: `"true"`, `"false"` (only container collection), or `"disabled"` (disable live processes entirely). + +#### Variables + +The following variables are available for live processes: + +* `scrub_args`: Enables the scrubbing of sensitive arguments from a process command line (defaults to `true`). +* `custom_sensitive_words`: Expands the default list of sensitive words used by the command line scrubber. + +#### System probe + +The system probe is configured under the `system_probe_config` variable. Any variables nested underneath are written to the `system-probe.yaml`, in the `system_probe_config` section. + +[Network Performance Monitoring][7] (NPM) is configured under the `network_config` variable. Any variables nested underneath are written to the `system-probe.yaml`, in the `network_config` section. + +[Cloud Workload Security][8] is configured under the `runtime_security_config` variable. Any variables nested underneath are written to the `system-probe.yaml` and `security-agent.yaml`, in the `runtime_security_config` section. + +[Universal Service Monitoring][17] (USM) is configured under the `service_monitoring_config` variable. Any variables nested underneath are written to the `system-probe.yaml`, in the `service_monitoring_config` section. + +**Note for Windows users**: NPM is supported on Windows with Agent v6.27+ and v7.27+. It ships as an optional component that is only installed if `network_config.enabled` is set to true when the Agent is installed or upgraded. Because of this, existing installations might need to do an uninstall and reinstall of the Agent once to install the NPM component, unless the Agent is upgraded at the same time. + +#### Example configuration + +```yml +datadog_config: + process_config: + enabled: "true" # type: string + scrub_args: true + custom_sensitive_words: ['consul_token','dd_api_key'] +system_probe_config: + sysprobe_socket: /opt/datadog-agent/run/sysprobe.sock +network_config: + enabled: true +service_monitoring_config: + enabled: true +runtime_security_config: + enabled: true +``` + +**Note**: This configuration works with Agent 6.24.1+ and 7.24.1+. For older Agent versions, see the [Network Performance Monitoring][9] documentation on how to enable system-probe. + +On Linux, once this modification is complete, follow the steps below if you installed an Agent version older than 6.18.0 or 7.18.0: + +1. Start the system-probe: `sudo service datadog-agent-sysprobe start` **Note**: If the service wrapper is not available on your system, run this command instead: `sudo initctl start datadog-agent-sysprobe`. +2. [Restart the Agent][10]: `sudo service datadog-agent restart`. +3. Enable the system-probe to start on boot: `sudo service enable datadog-agent-sysprobe`. + +For manual setup, see the [NPM][9] documentation. + +#### Agent v5 + +To enable [live process][6] collection with Agent v5, use the following configuration: + +```yml +datadog_config: + process_agent_enabled: true +datadog_config_ex: + process.config: + scrub_args: true + custom_sensitive_words: "," +``` + +## Versions + +By default, the current major version of the Datadog Ansible role installs Agent v7. The variables `datadog_agent_version` and `datadog_agent_major_version` are available to control the Agent version installed. + +For v4+ of this role, when `datadog_agent_version` is used to pin a specific Agent version, the role derives per-OS version names to comply with the version naming schemes of the supported operating systems, for example: + +- `1:7.16.0-1` for Debian and SUSE based +- `7.16.0-1` for RedHat-based +- `7.16.0-1` for macOS +- `7.16.0` for Windows. + +This makes it possible to target hosts running different operating systems in the same Ansible run, for example: + +| Provided | Installs | System | +|-------------------------------------|--------------|-----------------------| +| `datadog_agent_version: 7.16.0` | `1:7.16.0-1` | Debian and SUSE-based | +| `datadog_agent_version: 7.16.0` | `7.16.0-1` | RedHat-based | +| `datadog_agent_version: 7.16.0` | `7.16.0-1` | macOS | +| `datadog_agent_version: 7.16.0` | `7.16.0` | Windows | +| `datadog_agent_version: 1:7.16.0-1` | `1:7.16.0-1` | Debian and SUSE-based | +| `datadog_agent_version: 1:7.16.0-1` | `7.16.0-1` | RedHat-based | +| `datadog_agent_version: 1:7.16.0-1` | `7.16.0` | Windows | + +**Note**: If the version is not provided, the role uses `1` as the epoch and `1` as the release number. + +**Agent v5 (older version)**: + +The Datadog Ansible role includes support for Datadog Agent v5 for Linux only. To install Agent v5, use `datadog_agent_major_version: 5` to install the latest version of Agent v5 or set `datadog_agent_version` to a specific version of Agent v5. **Note**: The `datadog_agent5` variable is obsolete and has been removed. + +### Repositories + +#### Linux + +When the variables `datadog_apt_repo`, `datadog_yum_repo`, and `datadog_zypper_repo` are not set, the official Datadog repositories for the major version set in `datadog_agent_major_version` are used: + +| # | Default apt repository | Default yum repository | Default zypper repository | +|---|-------------------------------------------|------------------------------------|-----------------------------------------| +| 5 | deb https://apt.datadoghq.com stable main | https://yum.datadoghq.com/rpm | https://yum.datadoghq.com/suse/rpm | +| 6 | deb https://apt.datadoghq.com stable 6 | https://yum.datadoghq.com/stable/6 | https://yum.datadoghq.com/suse/stable/6 | +| 7 | deb https://apt.datadoghq.com stable 7 | https://yum.datadoghq.com/stable/7 | https://yum.datadoghq.com/suse/stable/7 | + +To override the default behavior, set these variables to something else than an empty string. + +If you previously used the Agent v5 variables, use the **new** variables below with `datadog_agent_major_version` set to `5` or `datadog_agent_version` pinned to a specific Agent v5 version. + +| Old | New | +|------------------------------|-----------------------| +| `datadog_agent5_apt_repo` | `datadog_apt_repo` | +| `datadog_agent5_yum_repo` | `datadog_yum_repo` | +| `datadog_agent5_zypper_repo` | `datadog_zypper_repo` | + +Since version 4.9.0, the `use_apt_backup_keyserver` variable has been removed, as APT keys are obtained from https://keys.datadoghq.com. + +#### Windows + +When the variable `datadog_windows_download_url` is not set, the official Windows MSI package corresponding to the `datadog_agent_major_version` is used: + +| Agent version | Default Windows MSI package URL | +|---------------|----------------------------------------------------------------------------------| +| 6 | https://s3.amazonaws.com/ddagent-windows-stable/datadog-agent-6-latest.amd64.msi | +| 7 | https://s3.amazonaws.com/ddagent-windows-stable/datadog-agent-7-latest.amd64.msi | + +To override the default behavior, set this variable to something other than an empty string. + +#### macOS + +When the variable `datadog_macos_download_url` is not set, the official macOS DMG package corresponding to the `datadog_agent_major_version` is used: + +| Agent version | Default macOS DMG package URL | +|---------------|--------------------------------------------------------------| +| 6 | https://s3.amazonaws.com/dd-agent/datadog-agent-6-latest.dmg | +| 7 | https://s3.amazonaws.com/dd-agent/datadog-agent-7-latest.dmg | + +To override the default behavior, set this variable to something other than an empty string. + +### Upgrade + +To upgrade from Agent v6 to v7, use `datadog_agent_major_version: 7` to install the latest version or set `datadog_agent_version` to a specific version of Agent v7. Use similar logic to upgrade from Agent v5 to v6. + +#### Integrations + +**Available for Agent v6.8+** + +Use the `datadog_integration` resource to install a specific version of a Datadog integration. Keep in mind, the Agent comes with all the integrations already installed. This command is useful for upgrading a specific integration without upgrading the whole Agent. For more details, see [Integration Management][4]. + +Available actions: + +- `install`: Installs a specific version of the integration. +- `remove`: Removes an integration. + +##### Datadog Marketplace + +[Datadog Marketplace][15] integrations can be installed with the `datadog_integration` resource. **Note**: Marketplace integrations are considered to be "third party" and thus have to have `third_party: true` set - see the example below. + +##### Syntax + +```yml + datadog_integration: + : + action: + version: +``` + +To install third party integrations, set `third_party` to true: + +```yml + datadog_integration: + : + action: + version: + third_party: true +``` + +##### Example + +This example installs version `1.11.0` of the ElasticSearch integration and removes the `postgres` integration. + +```yml + datadog_integration: + datadog-elastic: + action: install + version: 1.11.0 + datadog-postgres: + action: remove +``` + +To see the available versions of Datadog integrations, see their `CHANGELOG.md` file in the [integrations-core repository][5]. + +### Downgrade + +To downgrade to a prior version of the Agent: + +1. Set `datadog_agent_version` to a specific version, for example: `5.32.5`. +2. Set `datadog_agent_allow_downgrade` to `yes`. + +**Notes:** + +- Downgrades are not supported for Windows platforms. + +## Playbooks + +Below are some sample playbooks to assist you with using the Datadog Ansible role. + +The following example sends data to Datadog US (default), enables logs, NPM, and configures a few checks. + +```yml +- hosts: servers + roles: + - { role: datadog.datadog, become: yes } + vars: + datadog_api_key: "" + datadog_agent_version: "7.16.0" + datadog_config: + tags: + - ":" + - ":" + log_level: INFO + apm_config: + enabled: true + logs_enabled: true # available with Agent v6 and v7 + datadog_checks: + process: + init_config: + instances: + - name: ssh + search_string: ['ssh', 'sshd' ] + - name: syslog + search_string: ['rsyslog' ] + cpu_check_interval: 0.2 + exact_match: true + ignore_denied_access: true + ssh_check: + init_config: + instances: + - host: localhost + port: 22 + username: root + password: + sftp_check: True + private_key_file: + add_missing_keys: True + nginx: + init_config: + instances: + - nginx_status_url: http://example.com/nginx_status/ + tags: + - "source:nginx" + - "instance:foo" + - nginx_status_url: http://example2.com:1234/nginx_status/ + tags: + - "source:nginx" + - ":" + + #Log collection is available on Agent 6 and 7 + logs: + - type: file + path: /var/log/access.log + service: myapp + source: nginx + sourcecategory: http_web_access + - type: file + path: /var/log/error.log + service: nginx + source: nginx + sourcecategory: http_web_access + # datadog_integration is available on Agent 6.8+ + datadog_integration: + datadog-elastic: + action: install + version: 1.11.0 + datadog-postgres: + action: remove + network_config: + enabled: true +``` + +### Agent v6 + +This example installs the latest Agent v6: + +```yml +- hosts: servers + roles: + - { role: datadog.datadog, become: yes } + vars: + datadog_agent_major_version: 6 + datadog_api_key: "" +``` + +### Configuring the site + +If using a site other than the default `datadoghq.com`, set the `datadog_site` var to the appropriate URL (eg: `datadoghq.eu`, `us3.datadoghq.com`). + +This example sends data to the EU site: + +```yml +- hosts: servers + roles: + - { role: datadog.datadog, become: yes } + vars: + datadog_site: "datadoghq.eu" + datadog_api_key: "" +``` + +### Windows + +On Windows, remove the `become: yes` option so the role does not fail. Below are two methods to make the example playbooks work with Windows hosts: + +#### Inventory file + +Using the inventory file is the recommended approach. Set the `ansible_become` option to `no` in the inventory file for each Windows host: + +```ini +[servers] +linux1 ansible_host=127.0.0.1 +linux2 ansible_host=127.0.0.2 +windows1 ansible_host=127.0.0.3 ansible_become=no +windows2 ansible_host=127.0.0.4 ansible_become=no +``` + +To avoid repeating the same configuration for all Windows hosts, group them and set the variable at the group level: + +```ini +[linux] +linux1 ansible_host=127.0.0.1 +linux2 ansible_host=127.0.0.2 + +[windows] +windows1 ansible_host=127.0.0.3 +windows2 ansible_host=127.0.0.4 + +[windows:vars] +ansible_become=no +``` + +#### Playbook file + +Alternatively, if your playbook **only runs on Windows hosts**, use the following in the playbook file: + +```yml +- hosts: servers + roles: + - { role: datadog.datadog } + vars: + ... +``` + +**Note**: This configuration fails on Linux hosts. Only use it if the playbook is specific to Windows hosts. Otherwise, use the [inventory file method](#inventory-file). + +### Uninstallation + +On Windows it's possible to uninstall the Agent by using the following code in your Ansible role: + +```yml +- name: Check If Datadog Agent is installed + win_shell: | + (get-wmiobject win32_product -Filter "Name LIKE '%datadog%'").IdentifyingNumber + register: agent_installed_result +- name: Set Datadog Agent installed fact + set_fact: + agent_installed: "{{ agent_installed_result.stdout | trim }}" +- name: Uninstall the Datadog Agent + win_package: + product_id: "{{ agent_installed }}" + state: absent + when: agent_installed != "" +``` + +However for more control over the uninstall parameters, the following code can be used. +In this example, the '/norestart' flag is added and a custom location for the uninstallation logs is specified: + +```yml +- name: Check If Datadog Agent is installed + win_stat: + path: 'c:\Program Files\Datadog\Datadog Agent\bin\agent.exe' + register: stat_file +- name: Uninstall the Datadog Agent + win_shell: start-process msiexec -Wait -ArgumentList ('/log', 'C:\\uninst.log', '/norestart', '/q', '/x', (Get-WmiObject -Class Win32_Product -Filter "Name='Datadog Agent'" -ComputerName .).IdentifyingNumber) + when: stat_file.stat.exists == True +``` + +## Troubleshooting + +### Debian stretch + +**Note:** this information applies to versions of the role prior to 4.9.0. Since 4.9.0, the `apt_key` module is no longer used by the role. + +On Debian Stretch, the `apt_key` module used by the role requires an additional system dependency to work correctly. The dependency (`dirmngr`) is not provided by the module. Add the following configuration to your playbooks to make use of the present role: + +```yml +--- +- hosts: all + pre_tasks: + - name: Debian Stretch requires the dirmngr package to use apt_key + become: yes + apt: + name: dirmngr + state: present + roles: + - { role: datadog.datadog, become: yes } + vars: + datadog_api_key: "" +``` + +### CentOS 6/7 with Python 3 interpreter and Ansible 2.10.x or below + +The `yum` Python module, which is used in this role to install the Agent on CentOS-based hosts, is only available on Python 2 if Ansible 2.10.x or below is used. In such cases, the `dnf` package manager would have to be used instead. + +However, `dnf` and the `dnf` Python module are not installed by default on CentOS-based hosts before CentOS 8. In this case, it is not possible to install the Agent when a Python 3 interpreter is used. + +This role fails early when this situation is detected to indicate that Ansible 2.11+ or a Python 2 interpreter is needed when installing the Agent on CentOS / RHEL < 8. + +To bypass this early failure detection (for instance, if `dnf` and the `python3-dnf` package are available on your host), set the `datadog_ignore_old_centos_python3_error` variable to `true`. + +### Windows + +Due to a critical bug in Agent versions `6.14.0` and `6.14.1` on Windows, installation of these versions is blocked (starting with version `3.3.0` of this role). + +**NOTE:** Ansible fails on Windows if `datadog_agent_version` is set to `6.14.0` or `6.14.1`. Use `6.14.2` or above. + +If you are updating from **6.14.0 or 6.14.1 on Windows**, use the following steps: + +1. Upgrade the present `datadog.datadog` Ansible role to the latest version (`>=3.3.0`). +2. Set the `datadog_agent_version` to `6.14.2` or above (defaults to latest). + +For more details, see [Critical Bug in Uninstaller for Datadog Agent 6.14.0 and 6.14.1 on Windows][11]. + +### Ubuntu 20.04 broken by service_facts + +Running the `service_facts` module on Ubuntu 20.04 causes the following error: + +``` +localhost | FAILED! => { + "changed": false, + "msg": "Malformed output discovered from systemd list-unit-files: accounts-daemon.service enabled enabled " +} +``` + +To fix this, [update Ansible to `v2.9.8` or above][16]. + +[1]: https://galaxy.ansible.com/Datadog/datadog +[2]: https://github.com/DataDog/ansible-datadog +[3]: https://docs.datadoghq.com/agent/autodiscovery +[4]: https://docs.datadoghq.com/agent/guide/integration-management/ +[5]: https://github.com/DataDog/integrations-core +[6]: https://docs.datadoghq.com/infrastructure/process/ +[7]: https://docs.datadoghq.com/network_performance_monitoring/ +[8]: https://docs.datadoghq.com/security_platform/cloud_workload_security/getting_started/ +[9]: https://docs.datadoghq.com/network_performance_monitoring/installation/?tab=agent#setup +[10]: https://docs.datadoghq.com/agent/guide/agent-commands/#restart-the-agent +[11]: https://app.datadoghq.com/help/agent_fix +[12]: https://docs.ansible.com/ansible/latest/reference_appendices/playbooks_keywords.html#playbook-keywords +[13]: https://github.com/DataDog/ansible-datadog/blob/main/tasks/agent-linux.yml +[14]: https://github.com/DataDog/ansible-datadog/blob/main/tasks/agent-win.yml +[15]: https://www.datadoghq.com/blog/datadog-marketplace/ +[16]: https://github.com/ansible/ansible/blob/stable-2.9/changelogs/CHANGELOG-v2.9.rst#id61 +[17]: https://docs.datadoghq.com/tracing/universal_service_monitoring/?tab=configurationfiles#enabling-universal-service-monitoring \ No newline at end of file diff --git a/ansible/01_old/roles/datadog.datadog/ci_test/downgrade_to_5.yaml b/ansible/01_old/roles/datadog.datadog/ci_test/downgrade_to_5.yaml new file mode 100644 index 0000000..40642e8 --- /dev/null +++ b/ansible/01_old/roles/datadog.datadog/ci_test/downgrade_to_5.yaml @@ -0,0 +1,25 @@ +--- + +- hosts: all + roles: + - { role: '/root/project/'} + vars: + datadog_api_key: "11111111111111111111111111111111" + datadog_agent_major_version: 5 + datadog_agent_version: "{{ '1:5.32.9-1' if ansible_facts.os_family in ['RedHat', 'Rocky', 'AlmaLinux'] else '1:5.32.8-1' }}" + datadog_agent_allow_downgrade: yes + datadog_config: + tags: "mytag0, mytag1" + log_level: INFO + apm_enabled: "true" # has to be set as a string + datadog_config_ex: + trace.config: + env: dev + trace.concentrator: + extra_aggregators: version + datadog_checks: + process: + init_config: + instances: + - name: agent + search_string: ['agent'] diff --git a/ansible/01_old/roles/datadog.datadog/ci_test/install_agent_5.yaml b/ansible/01_old/roles/datadog.datadog/ci_test/install_agent_5.yaml new file mode 100644 index 0000000..94362b9 --- /dev/null +++ b/ansible/01_old/roles/datadog.datadog/ci_test/install_agent_5.yaml @@ -0,0 +1,25 @@ +--- + +- hosts: all + roles: + - { role: '/root/project/'} + vars: + datadog_api_key: "11111111111111111111111111111111" + datadog_agent_major_version: 5 + # avoid checking that the agent is stopped for centos + datadog_skip_running_check: true + datadog_config: + tags: "mytag0, mytag1" + log_level: INFO + apm_enabled: "true" # has to be set as a string + datadog_config_ex: + trace.config: + env: dev + trace.concentrator: + extra_aggregators: version + datadog_checks: + process: + init_config: + instances: + - name: agent + search_string: ['agent' ] diff --git a/ansible/01_old/roles/datadog.datadog/ci_test/install_agent_6.yaml b/ansible/01_old/roles/datadog.datadog/ci_test/install_agent_6.yaml new file mode 100644 index 0000000..b97ea1d --- /dev/null +++ b/ansible/01_old/roles/datadog.datadog/ci_test/install_agent_6.yaml @@ -0,0 +1,32 @@ +--- + +- hosts: all + roles: + - { role: '/root/project/'} + vars: + datadog_api_key: "11111111111111111111111111111111" + datadog_enabled: false + datadog_agent_major_version: 6 + # avoid checking that the agent is stopped for centos + datadog_skip_running_check: true + datadog_config: + tags: "mytag0, mytag1" + log_level: INFO + apm_enabled: "true" # has to be set as a string + datadog_config_ex: + trace.config: + env: dev + trace.concentrator: + extra_aggregators: version + system_probe_config: + sysprobe_socket: /opt/datadog-agent/run/sysprobe.sock + network_config: + enabled: true + service_monitoring_config: + enabled: true + datadog_checks: + process: + init_config: + instances: + - name: agent + search_string: ['agent', 'sshd' ] diff --git a/ansible/01_old/roles/datadog.datadog/ci_test/install_agent_6_macos.yaml b/ansible/01_old/roles/datadog.datadog/ci_test/install_agent_6_macos.yaml new file mode 100644 index 0000000..3eab182 --- /dev/null +++ b/ansible/01_old/roles/datadog.datadog/ci_test/install_agent_6_macos.yaml @@ -0,0 +1,26 @@ +--- + +- hosts: all + roles: + - { role: '/Users/distiller/project/' } + vars: + datadog_api_key: "11111111111111111111111111111111" + datadog_enabled: false + datadog_agent_major_version: 6 + # avoid checking that the agent is stopped for centos + datadog_skip_running_check: true + datadog_config: + tags: "mytag0, mytag1" + log_level: INFO + apm_enabled: "true" # has to be set as a string + datadog_config_ex: + trace.config: + env: dev + trace.concentrator: + extra_aggregators: version + datadog_checks: + process: + init_config: + instances: + - name: agent + search_string: ['agent', 'sshd' ] diff --git a/ansible/01_old/roles/datadog.datadog/ci_test/install_agent_7.yaml b/ansible/01_old/roles/datadog.datadog/ci_test/install_agent_7.yaml new file mode 100644 index 0000000..5cba622 --- /dev/null +++ b/ansible/01_old/roles/datadog.datadog/ci_test/install_agent_7.yaml @@ -0,0 +1,34 @@ +--- + +- hosts: all + roles: + - { role: '/root/project/'} + vars: + datadog_api_key: "11111111111111111111111111111111" + datadog_enabled: false + datadog_agent_major_version: 7 + # avoid checking that the agent is stopped for centos + datadog_skip_running_check: true + datadog_config: + tags: "mytag0, mytag1" + log_level: INFO + apm_enabled: "true" # has to be set as a string + datadog_config_ex: + trace.config: + env: dev + trace.concentrator: + extra_aggregators: version + system_probe_config: + sysprobe_socket: /opt/datadog-agent/run/sysprobe.sock + network_config: + enabled: true + service_monitoring_config: + enabled: true + runtime_security_config: + enabled: true + datadog_checks: + process: + init_config: + instances: + - name: agent + search_string: ['agent', 'sshd' ] diff --git a/ansible/01_old/roles/datadog.datadog/ci_test/install_agent_7_macos.yaml b/ansible/01_old/roles/datadog.datadog/ci_test/install_agent_7_macos.yaml new file mode 100644 index 0000000..a97304c --- /dev/null +++ b/ansible/01_old/roles/datadog.datadog/ci_test/install_agent_7_macos.yaml @@ -0,0 +1,28 @@ +--- + +- hosts: all + roles: + - { role: '/Users/distiller/project/' } + vars: + datadog_api_key: "11111111111111111111111111111111" + datadog_enabled: false + datadog_agent_major_version: 7 + # avoid checking that the agent is stopped for centos + datadog_skip_running_check: true + datadog_config: + tags: "mytag0, mytag1" + log_level: INFO + apm_enabled: "true" # has to be set as a string + datadog_config_ex: + trace.config: + env: dev + trace.concentrator: + extra_aggregators: version + runtime_security_config: + enabled: true + datadog_checks: + process: + init_config: + instances: + - name: agent + search_string: ['agent', 'sshd' ] diff --git a/ansible/01_old/roles/datadog.datadog/ci_test/inventory/ci.ini b/ansible/01_old/roles/datadog.datadog/ci_test/inventory/ci.ini new file mode 100644 index 0000000..ff8e2e7 --- /dev/null +++ b/ansible/01_old/roles/datadog.datadog/ci_test/inventory/ci.ini @@ -0,0 +1,2 @@ +[test] +127.0.0.1 ansible_connection=local diff --git a/ansible/01_old/roles/datadog.datadog/ci_test/inventory/ci_macos.ini b/ansible/01_old/roles/datadog.datadog/ci_test/inventory/ci_macos.ini new file mode 100644 index 0000000..a0d6733 --- /dev/null +++ b/ansible/01_old/roles/datadog.datadog/ci_test/inventory/ci_macos.ini @@ -0,0 +1,2 @@ +[test] +127.0.0.1 ansible_connection=local ansible_user=distiller diff --git a/ansible/01_old/roles/datadog.datadog/defaults/main.yml b/ansible/01_old/roles/datadog.datadog/defaults/main.yml new file mode 100644 index 0000000..7300734 --- /dev/null +++ b/ansible/01_old/roles/datadog.datadog/defaults/main.yml @@ -0,0 +1,221 @@ +--- +role_version: 4.19.0 + +# define if the datadog-agent services should be enabled +datadog_enabled: yes + +# Whether the datadog.conf / datadog.yaml, system-probe.yaml, security-agent.yaml and checks config under conf.d are managed by Ansible +datadog_manage_config: yes + +# default datadog.conf / datadog.yaml options +datadog_config: {} + +# default system-probe.yaml options +system_probe_config: {} +network_config: {} +service_monitoring_config: {} + +# default checks enabled +datadog_checks: {} + +# custom Python checks +datadog_custom_checks: {} + +# set this to `true` to delete untracked checks +datadog_disable_untracked_checks: false + +# Add additional checks to keep when `datadog_disable_untracked_checks` is set to `true` +datadog_additional_checks: [] + +# set this to `true` to delete default checks +datadog_disable_default_checks: false + +# default user/group +datadog_user: dd-agent +datadog_group: dd-agent + +# agent integration variables +integration_command_user_linux: "dd-agent" +integration_command_user_windows: "administrator" +integration_command_user_macos: "dd-agent" +datadog_agent_binary_path_linux: /opt/datadog-agent/bin/agent/agent +datadog_agent_binary_path_windows: "C:\\Program Files\\Datadog\\Datadog Agent\\bin\\agent.exe" +datadog_agent_binary_path_macos: "/opt/datadog-agent/bin/agent/agent" + +# list of additional groups for datadog_user +datadog_additional_groups: {} + +# Major version of the Agent that will be installed. +# Possible values: 5, 6, 7 +# By default, version 7 will be installed. +# If datadog_agent_version is defined, the major version will be deduced from it. +datadog_agent_major_version: "" + +# Pin agent to a version. Highly recommended. +# Defaults to the latest version of the major version chosen in datadog_agent_major_version +# If both datadog_agent_major_version and datadog_agent_version are set, they must be +# compatible (ie. the major version in datadog_agent_version must be datadog_agent_major_version) +datadog_agent_version: "" + +# Default Package name for APT and RPM installs - can override in playbook for IOT Agent +datadog_agent_flavor: "datadog-agent" + +# Default apt repo and keyserver + +# By default, the role uses the official apt Datadog repository for the chosen major version +# Use the datadog_apt_repo variable to override the repository used. +datadog_apt_repo: "" + +datadog_apt_cache_valid_time: 3600 +datadog_apt_key_retries: 5 + +# DATADOG_RPM_KEY.public (4172A230) is only useful to install old (< 6.14) Agent packages. +# We no longer add it and we explicitly remove it. +datadog_rpm_remove_keys: [4172A230] + +# Default yum repo and keys + +# By default, the role uses the official yum Datadog repository for the chosen major version +# Use the datadog_yum_repo variable to override the repository used. +datadog_yum_repo: "" + +datadog_yum_repo_gpgcheck: "" +datadog_yum_gpgcheck: yes +# NOTE: we don't use URLs starting with https://keys.datadoghq.com/, as Python +# on older CentOS/RHEL/SUSE doesn't support SNI and get_url would fail on them + +# the CURRENT key always contains the key that is used to sign repodata and latest packages +datadog_yum_gpgkey_current: "https://s3.amazonaws.com/public-signing-keys/DATADOG_RPM_KEY_CURRENT.public" +# this key expires in 2022 +datadog_yum_gpgkey_e09422b3: "https://s3.amazonaws.com/public-signing-keys/DATADOG_RPM_KEY_E09422B3.public" +datadog_yum_gpgkey_e09422b3_sha256sum: "694a2ffecff85326cc08e5f1a619937999a5913171e42f166e13ec802c812085" +# this key expires in 2024 +datadog_yum_gpgkey_20200908: "https://s3.amazonaws.com/public-signing-keys/DATADOG_RPM_KEY_FD4BF915.public" +datadog_yum_gpgkey_20200908_sha256sum: "4d16c598d3635086762bd086074140d947370077607db6d6395b8523d5c23a7d" +# Default zypper repo and keys + +# By default, we fail early & print a helpful message if an older Ansible version and Python 3 +# interpreter is used on CentOS < 8. The 'yum' module is only available on Python 2, and the 'python3-dnf' +# package is not available before CentOS 8. +# If set to true, this option removes this check and allows the install to proceed. Useful in specific setups +# where an old CentOS host using a Python 3 interpreter does have 'dnf' (eg. through backports). +datadog_ignore_old_centos_python3_error: false + +# By default, the role uses the official zypper Datadog repository for the chosen major version +# Use the datadog_zypper_repo variable to override the repository used. +datadog_zypper_repo: "" + +# Define if the official zypper Datadog repository services should be installed +datadog_manage_zypper_repofile: yes + +datadog_zypper_repo_gpgcheck: "" +datadog_zypper_gpgcheck: yes +datadog_zypper_gpgkey_current: "https://s3.amazonaws.com/public-signing-keys/DATADOG_RPM_KEY_CURRENT.public" +datadog_zypper_gpgkey_e09422b3: "https://s3.amazonaws.com/public-signing-keys/DATADOG_RPM_KEY_E09422B3.public" +datadog_zypper_gpgkey_e09422b3_sha256sum: "694a2ffecff85326cc08e5f1a619937999a5913171e42f166e13ec802c812085" +datadog_zypper_gpgkey_20200908: "https://s3.amazonaws.com/public-signing-keys/DATADOG_RPM_KEY_FD4BF915.public" +datadog_zypper_gpgkey_20200908_sha256sum: "4d16c598d3635086762bd086074140d947370077607db6d6395b8523d5c23a7d" + +# Avoid checking if the agent is running or not. This can be useful if you're +# using sysvinit and providing your own init script. +datadog_skip_running_check: false + +# Set this to `yes` to allow agent downgrades on apt-based platforms. +# Internally, this uses `apt-get`'s `--force-yes` option. Use with caution. +# On centos this will only work with ansible 2.4 and up +datadog_agent_allow_downgrade: no + +# Default windows latest msi package URL + +# By default, will use the official latest msi package for the chosen major version. +# Use the datadog_windows_download_url option to override the msi package used. +datadog_windows_download_url: "" + +# The default msi package for each major Agent version is specified in the following variables. +# These variables are for internal use only, do not modify them. +datadog_windows_agent6_latest_url: "https://s3.amazonaws.com/ddagent-windows-stable/datadog-agent-6-latest.amd64.msi" +datadog_windows_agent7_latest_url: "https://s3.amazonaws.com/ddagent-windows-stable/datadog-agent-7-latest.amd64.msi" + +# If datadog_agent_version is set, the role will use the following url prefix instead, and append the version number to it +# in order to get the full url to the msi package. +datadog_windows_versioned_url: "https://s3.amazonaws.com/ddagent-windows-stable/ddagent-cli" + +# url of the 6.14 fix script. See https://dtdg.co/win-614-fix for more details. +datadog_windows_614_fix_script_url: "https://s3.amazonaws.com/ddagent-windows-stable/scripts/fix_6_14.ps1" +# whether or not to download and apply the above fix +datadog_apply_windows_614_fix: true + +# Override to change the name of the windows user to create +datadog_windows_ddagentuser_name: "" +# Override to change the password of the created windows user. +datadog_windows_ddagentuser_password: "" + +# Override to change the binary installation directory (instead of default c:\program files\datadog\datadog agent) +datadog_windows_program_files_dir: "" + +# Override to change the root of the configuration directory +datadog_windows_config_files_dir: "" + +# Default configuration root. Do not modify +datadog_windows_config_root: "{{ ansible_facts.env['ProgramData'] }}\\Datadog" + +# do not modify. Default empty value for constructing the list of optional +# arguments to supply to the windows installer. +win_install_args: " " + + +# +# Internal variables +# The following variables are for internal use only, do not modify them. +# + +datadog_apt_trusted_d_keyring: "/etc/apt/trusted.gpg.d/datadog-archive-keyring.gpg" +datadog_apt_usr_share_keyring: "/usr/share/keyrings/datadog-archive-keyring.gpg" +datadog_apt_key_current_name: "DATADOG_APT_KEY_CURRENT" +# NOTE: we don't use URLs starting with https://keys.datadoghq.com/, as Python +# on older Debian/Ubuntu doesn't support SNI and get_url would fail on them +datadog_apt_default_keys: + - key: "{{ datadog_apt_key_current_name }}" + value: https://s3.amazonaws.com/public-signing-keys/DATADOG_APT_KEY_CURRENT.public + - key: A2923DFF56EDA6E76E55E492D3A80E30382E94DE + value: https://s3.amazonaws.com/public-signing-keys/DATADOG_APT_KEY_382E94DE.public + - key: D75CEA17048B9ACBF186794B32637D44F14F620E + value: https://s3.amazonaws.com/public-signing-keys/DATADOG_APT_KEY_F14F620E.public + +# The default apt repository for each major Agent version is specified in the following variables. +datadog_agent5_apt_repo: "deb [signed-by={{ datadog_apt_usr_share_keyring }}] https://apt.datadoghq.com/ stable main" +datadog_agent6_apt_repo: "deb [signed-by={{ datadog_apt_usr_share_keyring }}] https://apt.datadoghq.com/ stable 6" +datadog_agent7_apt_repo: "deb [signed-by={{ datadog_apt_usr_share_keyring }}] https://apt.datadoghq.com/ stable 7" + +# The default yum repository for each major Agent version is specified in the following variables. +datadog_agent5_yum_repo: "https://yum.datadoghq.com/rpm/{{ ansible_facts.architecture }}" +datadog_agent6_yum_repo: "https://yum.datadoghq.com/stable/6/{{ ansible_facts.architecture }}" +datadog_agent7_yum_repo: "https://yum.datadoghq.com/stable/7/{{ ansible_facts.architecture }}" + +# The default zypper repository for each major Agent version is specified in the following variables. +datadog_agent5_zypper_repo: "https://yum.datadoghq.com/suse/rpm/{{ ansible_facts.architecture }}" +datadog_agent6_zypper_repo: "https://yum.datadoghq.com/suse/stable/6/{{ ansible_facts.architecture }}" +datadog_agent7_zypper_repo: "https://yum.datadoghq.com/suse/stable/7/{{ ansible_facts.architecture }}" + +# Default macOS latest dmg package URL + +# By default, will use the official latest dmg package for the chosen major version. +# Use the datadog_macos_download_url option to override the dmg package used. +datadog_macos_download_url: "" + +# The default dmg package for each major Agent version is specified in the following variables. +# These variables are for internal use only, do not modify them. +datadog_macos_agent6_latest_url: "https://s3.amazonaws.com/dd-agent/datadog-agent-6-latest.dmg" +datadog_macos_agent7_latest_url: "https://s3.amazonaws.com/dd-agent/datadog-agent-7-latest.dmg" + +# If datadog_agent_version is set, the role will use the following url prefix instead, and append the version number to it +# in order to get the full url to the dmg package. +datadog_macos_versioned_url: "https://s3.amazonaws.com/dd-agent/datadog-agent" + +datadog_macos_user: "{{ ansible_user }}" +datadog_macos_service_name: "com.datadoghq.agent" +datadog_macos_user_plist_file_path: "Library/LaunchAgents/{{ datadog_macos_service_name }}.plist" +datadog_macos_system_plist_file_path: "/Library/LaunchDaemons/{{ datadog_macos_service_name }}.plist" +datadog_macos_etc_dir: "/opt/datadog-agent/etc" +datadog_macos_logs_dir: "/opt/datadog-agent/logs" +datadog_macos_run_dir: "/opt/datadog-agent/run" diff --git a/ansible/01_old/roles/datadog.datadog/handlers/main-macos.yml b/ansible/01_old/roles/datadog.datadog/handlers/main-macos.yml new file mode 100644 index 0000000..1bc1c7c --- /dev/null +++ b/ansible/01_old/roles/datadog.datadog/handlers/main-macos.yml @@ -0,0 +1,27 @@ +--- +# This file doesn't actually contain "handlers" in the Ansible sense: when running +# our role, Ansible only loads the contents of handlers/main.yml as handlers. +# However, this is here because this is a "handler-like" task that is dynamically +# included by a handler task in handlers/main.yml. + +# NOTE: We don't use bootout/bootstrap here, because bootout can't wait for the operation +# to finish and if it's in progress for a longer time, bootstrap fails. We use the old +# unload/load combo because they actually wait. +- name: Unload datadog-agent service + command: "launchctl unload -wF {{ datadog_macos_system_plist_file_path }}" + become: yes + check_mode: no + when: datadog_enabled and not ansible_check_mode and ansible_facts.os_family == "Darwin" + +- name: Load datadog-agent service + command: "launchctl load -wF {{ datadog_macos_system_plist_file_path }}" + become: yes + check_mode: no + when: datadog_enabled and not ansible_check_mode and ansible_facts.os_family == "Darwin" + +- name: Restart datadog-agent service + command: "launchctl kickstart -k system/{{ datadog_macos_service_name }}" + become: yes + register: command_result + check_mode: no + when: datadog_enabled and not ansible_check_mode and ansible_facts.os_family == "Darwin" diff --git a/ansible/01_old/roles/datadog.datadog/handlers/main-win.yml b/ansible/01_old/roles/datadog.datadog/handlers/main-win.yml new file mode 100644 index 0000000..146b01b --- /dev/null +++ b/ansible/01_old/roles/datadog.datadog/handlers/main-win.yml @@ -0,0 +1,11 @@ +--- +# This file doesn't actually contain "handlers" in the Ansible sense: when running +# our role, Ansible only loads the contents of handlers/main.yml as handlers. +# However, this is here because this is a "handler-like" task that is dynamically +# included by a handler task in handlers/main.yml. +- name: Restart Windows datadogagent service + win_service: + name: datadogagent + state: restarted + force_dependent_services: true + when: datadog_enabled and not ansible_check_mode and ansible_facts.os_family == "Windows" diff --git a/ansible/01_old/roles/datadog.datadog/handlers/main.yml b/ansible/01_old/roles/datadog.datadog/handlers/main.yml new file mode 100644 index 0000000..54b0b31 --- /dev/null +++ b/ansible/01_old/roles/datadog.datadog/handlers/main.yml @@ -0,0 +1,31 @@ +--- + +- name: restart datadog-agent-sysprobe + service: + name: datadog-agent-sysprobe + state: restarted + when: datadog_enabled and datadog_sysprobe_enabled and not ansible_check_mode and not ansible_facts.os_family == "Windows" and not ansible_facts.os_family == "Darwin" + +- name: restart datadog-agent + service: + name: datadog-agent + state: restarted + when: datadog_enabled and not ansible_check_mode and not ansible_facts.os_family == "Windows" and not ansible_facts.os_family == "Darwin" + +# We can't add the Windows Agent service restart handler directly here because that makes the role require +# the ansible.windows collection on all platforms. We only want it to be needed on Windows. +# Therefore, what we do is the following: when needed, our Windows tasks call this handler to require a +# Windows Agent restart (through notify: restart datadog-agent-win). +# When notified, the below handler is executed at the end of the playbook run. +# The include_tasks loads the handlers/main-win.yml file, which contains the real service restart task +# (which depends on ansible.windows), and runs it, triggering the Windows Agent restart. +- name: restart datadog-agent-win + include_tasks: handlers/main-win.yml + +# When needed, our macOS tasks call this handler to require a +# macOS Agent restart (through notify: restart datadog-agent-macos). +# When notified, the below handler is executed at the end of the playbook run. +# The include_tasks loads the handlers/main-macos.yml file, which contains the real service restart task +# and runs it, triggering the macOS Agent restart. +- name: restart datadog-agent-macos + include_tasks: handlers/main-macos.yml diff --git a/ansible/01_old/roles/datadog.datadog/manual_tests/.gitignore b/ansible/01_old/roles/datadog.datadog/manual_tests/.gitignore new file mode 100644 index 0000000..8000dd9 --- /dev/null +++ b/ansible/01_old/roles/datadog.datadog/manual_tests/.gitignore @@ -0,0 +1 @@ +.vagrant diff --git a/ansible/01_old/roles/datadog.datadog/manual_tests/Vagrantfile b/ansible/01_old/roles/datadog.datadog/manual_tests/Vagrantfile new file mode 100644 index 0000000..7b8a8f3 --- /dev/null +++ b/ansible/01_old/roles/datadog.datadog/manual_tests/Vagrantfile @@ -0,0 +1,19 @@ +Vagrant.configure("2") do |config| + config.vm.define "ubuntu", primary: true do |c| + c.vm.box = "ubuntu/trusty64" + end + + config.vm.define "centos", autostart: false do |c| + c.vm.box = "centos/8" + end + + config.vm.define "amazonlinux", autostart: false do |c| + c.vm.box = "bento/amazonlinux-2" + end + + Dir["test_*.yml"].sort.each do |playbook| + config.vm.provision playbook, type: "ansible" do |ansible| + ansible.playbook = playbook + end + end +end diff --git a/ansible/01_old/roles/datadog.datadog/manual_tests/inventory b/ansible/01_old/roles/datadog.datadog/manual_tests/inventory new file mode 100644 index 0000000..bbed330 --- /dev/null +++ b/ansible/01_old/roles/datadog.datadog/manual_tests/inventory @@ -0,0 +1,2 @@ +[test_host] +127.0.0.1 ansible_ssh_host=localhost ansible_ssh_user=vagrant ansible_ssh_port=2222 ansible_ssh_private_key_file=./ansible-datadog/manual_tests/.vagrant/machines/ubuntu/virtualbox/private_key diff --git a/ansible/01_old/roles/datadog.datadog/manual_tests/inventory_win b/ansible/01_old/roles/datadog.datadog/manual_tests/inventory_win new file mode 100644 index 0000000..0921413 --- /dev/null +++ b/ansible/01_old/roles/datadog.datadog/manual_tests/inventory_win @@ -0,0 +1,9 @@ +[windows] +win ansible_host=127.0.0.1 + +[windows:vars] +ansible_user=Administrator +ansible_become=no +ansible_connection=winrm +ansible_port=5986 +ansible_winrm_server_cert_validation=ignore diff --git a/ansible/01_old/roles/datadog.datadog/manual_tests/readme.md b/ansible/01_old/roles/datadog.datadog/manual_tests/readme.md new file mode 100644 index 0000000..5258de7 --- /dev/null +++ b/ansible/01_old/roles/datadog.datadog/manual_tests/readme.md @@ -0,0 +1,43 @@ +# Linux test setup with Vagrant + +This is an example setup, based on vagrant + virtualbox, that allows to easily run ansible commands to test the module. + +## Requirements + +- vagrant > 2.0.0 +- virtualbox > 5.1.28 + +## Setup + +From `ansible-datadog/manual_tests` directory: + +- provision VM: `vagrant up ubuntu --provision --provision-with test_7_full.yml` +- when done, destroy VM if needed: `vagrant destroy -f` + +To test with different agent versions or configurations, replace +`--provision-with` argument `test_7_full.yml` with any of the other +`test_*.yml` files in this directory. + +To test on different operating systems, replace `ubuntu` with `centos` or `amazonlinux`. + +If `vagrant up --provision` is used without any other parameters, all the +playbooks are applied one by one on an Ubuntu machine. + +# Windows test setup from WSL + +## Requirements + +- Install Ansible and `pywinrm` inside WSL: `sudo python3 -m pip install ansible pywinrm` +- From an elevated Powershell terminal (outside WSL), run the following script to setup WinRM so Ansible can connect: +https://raw.githubusercontent.com/ansible/ansible/devel/examples/scripts/ConfigureRemotingForAnsible.ps1 +- Make sure the Administrator account is enabled and you know the password (or use a different account in the `inventory_win` file). + +## Setup + +- From `ansible-datadog`'s parent directory, run in a WSL console (it will ask for the Administrator password each time): + +```shell +ansible-playbook -k ansible-datadog/manual_tests/test_7_full.yml -i ansible-datadog/manual_tests/inventory_win +``` + +Note: Replace `test_7_full.yml` with any of the other yaml files on this directory. diff --git a/ansible/01_old/roles/datadog.datadog/manual_tests/test_5_default.yml b/ansible/01_old/roles/datadog.datadog/manual_tests/test_5_default.yml new file mode 100644 index 0000000..0a9b57c --- /dev/null +++ b/ansible/01_old/roles/datadog.datadog/manual_tests/test_5_default.yml @@ -0,0 +1,6 @@ +--- +- hosts: all + roles: + - { role: ../../ansible-datadog, become: yes } + vars: + datadog_agent_major_version: 5 diff --git a/ansible/01_old/roles/datadog.datadog/manual_tests/test_5_full.yml b/ansible/01_old/roles/datadog.datadog/manual_tests/test_5_full.yml new file mode 100644 index 0000000..8fee143 --- /dev/null +++ b/ansible/01_old/roles/datadog.datadog/manual_tests/test_5_full.yml @@ -0,0 +1,47 @@ +- hosts: all + roles: + - { role: ../../ansible-datadog, become: yes } # On Ansible < 1.9, use `sudo: yes` instead of `become: yes` + vars: + datadog_agent_major_version: 5 + datadog_api_key: "123456" + datadog_agent_version: "1:5.18.0-1" # for apt-based platforms, use a `5.12.3-1` format on yum-based platforms + datadog_agent_allow_downgrade: true + datadog_config: + tags: "mytag0, mytag1" + log_level: INFO + apm_enabled: "true" # has to be set as a string + datadog_config_ex: + trace.config: + env: dev + trace.concentrator: + extra_aggregators: version + datadog_checks: + process: + init_config: + instances: + - name: ssh + search_string: ['ssh', 'sshd' ] + - name: syslog + search_string: ['rsyslog' ] + cpu_check_interval: 0.2 + exact_match: true + ignore_denied_access: true + ssh_check: + init_config: + instances: + - host: localhost + port: 22 + username: root + password: changeme + sftp_check: True + private_key_file: + add_missing_keys: True + nginx: + init_config: + instances: + - nginx_status_url: http://example.com/nginx_status/ + tags: + - instance:foo + - nginx_status_url: http://example2.com:1234/nginx_status/ + tags: + - instance:bar diff --git a/ansible/01_old/roles/datadog.datadog/manual_tests/test_6_default.yml b/ansible/01_old/roles/datadog.datadog/manual_tests/test_6_default.yml new file mode 100644 index 0000000..baad2e2 --- /dev/null +++ b/ansible/01_old/roles/datadog.datadog/manual_tests/test_6_default.yml @@ -0,0 +1,6 @@ +--- +- hosts: all + roles: + - { role: ../../ansible-datadog, become: yes } + vars: + datadog_agent_major_version: 6 diff --git a/ansible/01_old/roles/datadog.datadog/manual_tests/test_6_full.yml b/ansible/01_old/roles/datadog.datadog/manual_tests/test_6_full.yml new file mode 100644 index 0000000..66c2f6a --- /dev/null +++ b/ansible/01_old/roles/datadog.datadog/manual_tests/test_6_full.yml @@ -0,0 +1,75 @@ +- hosts: all + roles: + - { role: ../../ansible-datadog, become: yes } # On Ansible < 1.9, use `sudo: yes` instead of `become: yes` + vars: + datadog_agent_major_version: 6 + datadog_api_key: "123456" + datadog_agent_allow_downgrade: true + system_probe_config: + source_excludes: + "*": + - 8301 + dest_excludes: + "*": + - 8301 + network_config: + enabled: true + datadog_config: + tags: "mytag0, mytag1" + log_level: INFO + apm_enabled: "true" # has to be set as a string + # logs related config + logs_enabled: true + logset: main + datadog_config_ex: + trace.config: + env: dev + trace.concentrator: + extra_aggregators: version + datadog_checks: + process: + init_config: + instances: + - name: ssh + search_string: ['ssh', 'sshd' ] + - name: syslog + search_string: ['rsyslog' ] + cpu_check_interval: 0.2 + exact_match: true + ignore_denied_access: true + ssh_check: + init_config: + instances: + - host: localhost + port: 22 + username: root + password: changeme + sftp_check: True + private_key_file: + add_missing_keys: True + nginx: + init_config: + instances: + - nginx_status_url: http://example.com/nginx_status/ + tags: + - instance:foo + - nginx_status_url: http://example2.com:1234/nginx_status/ + tags: + - instance:bar + logs: + - type: file + path: /var/log/nginx.log + service: nginx + source: nginx + sourcecategory: webapp + tags: env:prod + - type: tcp + port: 10514 + service: webapp + source: php + sourcecategory: front + log_processing_rules: + - type: exclude_at_match + name: exclude_datadoghq_users + # Regexp can be anything + pattern: User=\w+@datadoghq.com diff --git a/ansible/01_old/roles/datadog.datadog/manual_tests/test_7_default.yml b/ansible/01_old/roles/datadog.datadog/manual_tests/test_7_default.yml new file mode 100644 index 0000000..bef90e1 --- /dev/null +++ b/ansible/01_old/roles/datadog.datadog/manual_tests/test_7_default.yml @@ -0,0 +1,4 @@ +--- +- hosts: all + roles: + - { role: ../../ansible-datadog, become: yes } diff --git a/ansible/01_old/roles/datadog.datadog/manual_tests/test_7_full.yml b/ansible/01_old/roles/datadog.datadog/manual_tests/test_7_full.yml new file mode 100644 index 0000000..b5a0b26 --- /dev/null +++ b/ansible/01_old/roles/datadog.datadog/manual_tests/test_7_full.yml @@ -0,0 +1,80 @@ +- hosts: all + roles: + - { role: ../../ansible-datadog, become: yes } # On Ansible < 1.9, use `sudo: yes` instead of `become: yes` + vars: + datadog_agent_major_version: 7 + datadog_api_key: "123456" + datadog_agent_allow_downgrade: true + system_probe_config: + source_excludes: + "*": + - 8301 + dest_excludes: + "*": + - 8301 + network_config: + enabled: true + datadog_config: + tags: "mytag0, mytag1" + log_level: INFO + apm_enabled: "true" # has to be set as a string + # logs related config + logs_enabled: true + logset: main + datadog_config_ex: + trace.config: + env: dev + trace.concentrator: + extra_aggregators: version + datadog_integration: + datadog-aqua: + action: 'install' + version: '1.0.0' + third_party: true + datadog_checks: + process: + init_config: + instances: + - name: ssh + search_string: ['ssh', 'sshd' ] + - name: syslog + search_string: ['rsyslog' ] + cpu_check_interval: 0.2 + exact_match: true + ignore_denied_access: true + ssh_check: + init_config: + instances: + - host: localhost + port: 22 + username: root + password: changeme + sftp_check: True + private_key_file: + add_missing_keys: True + nginx: + init_config: + instances: + - nginx_status_url: http://example.com/nginx_status/ + tags: + - instance:foo + - nginx_status_url: http://example2.com:1234/nginx_status/ + tags: + - instance:bar + logs: + - type: file + path: /var/log/nginx.log + service: nginx + source: nginx + sourcecategory: webapp + tags: env:prod + - type: tcp + port: 10514 + service: webapp + source: php + sourcecategory: front + log_processing_rules: + - type: exclude_at_match + name: exclude_datadoghq_users + # Regexp can be anything + pattern: User=\w+@datadoghq.com diff --git a/ansible/01_old/roles/datadog.datadog/meta/.galaxy_install_info b/ansible/01_old/roles/datadog.datadog/meta/.galaxy_install_info new file mode 100644 index 0000000..40c5d7c --- /dev/null +++ b/ansible/01_old/roles/datadog.datadog/meta/.galaxy_install_info @@ -0,0 +1,2 @@ +install_date: Fri Jul 14 08:13:58 2023 +version: 4.19.0 diff --git a/ansible/01_old/roles/datadog.datadog/meta/main.yml b/ansible/01_old/roles/datadog.datadog/meta/main.yml new file mode 100644 index 0000000..b2494e9 --- /dev/null +++ b/ansible/01_old/roles/datadog.datadog/meta/main.yml @@ -0,0 +1,67 @@ +--- +galaxy_info: + role_name: datadog + namespace: datadog + author: 'Brian Akins, Dustin Brown & Datadog' + description: Install Datadog agent and configure checks + license: Apache2 + min_ansible_version: 2.6 + github_branch: main + platforms: + - name: Ubuntu + versions: + - trusty + - xenial + - artful + - bionic + - focal + - name: Debian + versions: + - wheezy + - jessie + - stretch + - buster + - bullseye + - name: EL + versions: + - 8 + - 7 + - 6 + - name: Amazon Linux 2 + versions: + - any + - name: opensuse + versions: + - 12.1 + - 12.2 + - 12.3 + - 13.1 + - 13.2 + - 42.1 + - 42.2 + - 42.3 + - 15.0 + - 15.1 + - 15.2 + - 15.3 + - name: SLES + versions: + - 11SP3 + - 11SP4 + - 12 + - 12SP1 + - 15 + - name: Windows + versions: + - 2008x64 + - 2008R2 + - 2012 + - 2012R2 + - 2016 + - 2019 + - name: Darwin + versions: + - any + galaxy_tags: + - monitoring +dependencies: [] diff --git a/ansible/01_old/roles/datadog.datadog/tasks/_agent-linux-macos-shared.yml b/ansible/01_old/roles/datadog.datadog/tasks/_agent-linux-macos-shared.yml new file mode 100644 index 0000000..e17b0db --- /dev/null +++ b/ansible/01_old/roles/datadog.datadog/tasks/_agent-linux-macos-shared.yml @@ -0,0 +1,91 @@ +--- +- name: Create Datadog agent config directory + file: + dest: "{{ _dd_config_dir }}" + state: directory + mode: 0755 + owner: "{{ _dd_user }}" + group: "{{ _dd_group }}" + when: datadog_manage_config + +- name: Create main Datadog agent configuration file + template: + src: datadog.yaml.j2 + dest: "{{ _dd_config_dir }}/datadog.yaml" + mode: 0640 + owner: "{{ _dd_user }}" + group: "{{ _dd_group }}" + when: datadog_manage_config + notify: "{{ _dd_notify_agent }}" + +- name: Register all checks directories present in datadog + find: + paths: "{{ _dd_config_dir }}/conf.d/" + patterns: + - "*.d" + file_type: directory + register: datadog_conf_directories + when: datadog_manage_config and (datadog_disable_untracked_checks or datadog_disable_default_checks) + +- name: Delete checks not present in datadog_tracked_checks + file: + path: "{{ _dd_config_dir }}/conf.d/{{ item }}.d/conf.yaml" + state: absent + loop: "{{ datadog_conf_directories.files | map(attribute='path') | list | map('basename') | list | map('regex_replace', '^(.*).d$', '\\1') | list }}" + when: datadog_manage_config and datadog_disable_untracked_checks and item not in datadog_tracked_checks + notify: "{{ _dd_notify_agent }}" + +- name: Delete all default checks + file: + path: "{{ _dd_config_dir }}/conf.d/{{ item }}.d/conf.yaml.default" + state: absent + loop: "{{ datadog_conf_directories.files | map(attribute='path') | list | map('basename') | list | map('regex_replace', '^(.*).d$', '\\1') | list }}" + when: datadog_manage_config and datadog_disable_default_checks and item not in datadog_tracked_checks + notify: "{{ _dd_notify_agent }}" + +- name: Ensure configuration directories are present for each Datadog check + file: + dest: "{{ _dd_config_dir }}/conf.d/{{ item }}.d" + state: directory + owner: "{{ _dd_user }}" + group: "{{ _dd_group }}" + mode: 0755 + with_items: '{{ datadog_checks|list }}' + when: datadog_manage_config + +- name: Create a configuration file for each Datadog check + template: + src: checks.yaml.j2 + dest: "{{ _dd_config_dir }}/conf.d/{{ item }}.d/conf.yaml" + mode: 0640 + owner: "{{ _dd_user }}" + group: "{{ _dd_group }}" + with_items: "{{ datadog_checks|list }}" + when: datadog_manage_config + notify: "{{ _dd_notify_agent }}" + +- name: Remove old configuration file for each Datadog check + file: + dest: "{{ _dd_config_dir }}/conf.d/{{ item }}.yaml" + state: absent + with_items: "{{ datadog_checks|list }}" + when: datadog_manage_config + notify: "{{ _dd_notify_agent }}" + +- name: Create custom check file for each custom check + copy: + src: "{{ datadog_custom_checks[item] }}" + dest: "{{ _dd_config_dir }}/checks.d/{{ item }}.py" + mode: 0755 + owner: "{{ _dd_user }}" + group: "{{ _dd_group }}" + with_items: "{{ datadog_custom_checks|list }}" + notify: "{{ _dd_notify_agent }}" + +- name: Create installation information file + template: + src: install_info.j2 + dest: "{{ _dd_config_dir }}/install_info" + owner: "{{ _dd_user }}" + group: "{{ _dd_group }}" + mode: 0644 diff --git a/ansible/01_old/roles/datadog.datadog/tasks/_apt-key-import.yml b/ansible/01_old/roles/datadog.datadog/tasks/_apt-key-import.yml new file mode 100644 index 0000000..62db22c --- /dev/null +++ b/ansible/01_old/roles/datadog.datadog/tasks/_apt-key-import.yml @@ -0,0 +1,90 @@ +# We allow users to specify a file from which to import keys, so we expect +# that to be a binary keyring; at the same time, we have ascii armored +# individual keys at keys.datadoghq.com that we import. The below procedure +# can be called for a URL pointing to a keyring or an ascii armored file +# and extract and import a specific key from it (we specialcase the +# DATADOG_APT_KEY_CURRENT value, which we always expect to be ascii +# armored individual key). + +# NOTE: we use 'noqa risky-shell-pipe' throughout this file, because Debian's +# default shell is /bin/sh which doesn't have a pipefail option and the +# presence of a different shell isn't guaranteed. + +# NOTE: in order to display Ansible's `changed: [hostname]` properly throughout +# tasks in this file, we added `changed_when: false` to a lot of them, even if +# they actually run every time (e.g. importing the CURRENT key). The reason is +# that they operate inside a temporary directory and they don't have a +# permanent effect on the host (nothing will actually change on the host +# whether these tasks run or not) except the last one - the actual import of +# the key to `datadog_apt_usr_share_keyring`. + +- name: "Set local variables for processed key {{ item.key }}" + set_fact: + key_fingerprint: "{{ item.key }}" + keyring_url: "{{ item.value }}" + +- name: "Find out whether key {{ key_fingerprint }} is already imported" + shell: "gpg --no-default-keyring --keyring {{ datadog_apt_usr_share_keyring }} --list-keys --with-fingerprint --with-colons | grep {{ key_fingerprint }}" # noqa risky-shell-pipe + register: key_exists_result + failed_when: false # we expect the command to fail when the key is not found; we never want this task to fail + changed_when: key_exists_result.rc != 0 + when: key_fingerprint != datadog_apt_key_current_name # we always want to import the CURRENT key + +- name: "Set local helper variable for determining key import (when not {{ datadog_apt_key_current_name }})" + set_fact: + key_needs_import: "{{ 'false' if key_exists_result.rc == 0 else 'true' }}" + when: key_fingerprint != datadog_apt_key_current_name + +- name: "Set local helper variable for determining key import (when {{ datadog_apt_key_current_name }})" + set_fact: + key_needs_import: "true" + when: key_fingerprint == datadog_apt_key_current_name + +- name: "Create temporary directory for key manipulation" + tempfile: + state: directory + suffix: keys + register: tempdir + when: key_needs_import + changed_when: false + +- name: "Download {{ keyring_url }} to import key {{ key_fingerprint }}" + get_url: + url: "{{ keyring_url }}" + dest: "{{ tempdir.path }}/{{ key_fingerprint }}" + force: yes + when: key_needs_import + changed_when: false + +# gpg --dearmor called on a binary keyring does nothing +- name: "Ensure downloaded file for {{ key_fingerprint }} is a binary keyring" + shell: "cat {{ tempdir.path }}/{{ key_fingerprint }} | gpg --dearmor > {{ tempdir.path }}/binary.gpg" # noqa risky-shell-pipe + when: key_needs_import + changed_when: false + +- name: "Extract the required key from the binary keyring (when not {{ datadog_apt_key_current_name }})" + shell: "gpg --no-default-keyring --keyring {{ tempdir.path }}/binary.gpg --export {{ key_fingerprint }} > {{ tempdir.path }}/single.gpg" + when: key_fingerprint != datadog_apt_key_current_name and key_needs_import + changed_when: false + +- name: "Extract the required key from the binary keyring (when {{ datadog_apt_key_current_name }})" + copy: + src: "{{ tempdir.path }}/binary.gpg" + dest: "{{ tempdir.path }}/single.gpg" + mode: "0600" + remote_src: yes + when: key_fingerprint == datadog_apt_key_current_name and key_needs_import + changed_when: false + +- name: "Import key {{ key_fingerprint }} to {{ datadog_apt_usr_share_keyring }} keyring" + shell: "cat {{ tempdir.path }}/single.gpg | gpg --no-default-keyring --keyring {{ datadog_apt_usr_share_keyring }} --import --batch" # noqa risky-shell-pipe + when: key_needs_import + register: key_import_result + changed_when: '"imported: 1" in key_import_result.stderr' + +- name: "Remove temporary directory for key manipulation" + file: + path: "{{ tempdir.path }}" + state: absent + when: key_needs_import + changed_when: false diff --git a/ansible/01_old/roles/datadog.datadog/tasks/_remove_rpm_keys.yml b/ansible/01_old/roles/datadog.datadog/tasks/_remove_rpm_keys.yml new file mode 100644 index 0000000..e45406b --- /dev/null +++ b/ansible/01_old/roles/datadog.datadog/tasks/_remove_rpm_keys.yml @@ -0,0 +1,5 @@ +- name: "Ensure GPG key {{ item }} is not present in the RPM db" + rpm_key: + state: absent + key: "{{ item }}" + when: not ansible_check_mode diff --git a/ansible/01_old/roles/datadog.datadog/tasks/agent-linux.yml b/ansible/01_old/roles/datadog.datadog/tasks/agent-linux.yml new file mode 100644 index 0000000..2ce5df2 --- /dev/null +++ b/ansible/01_old/roles/datadog.datadog/tasks/agent-linux.yml @@ -0,0 +1,161 @@ +--- +- name: Populate service facts + service_facts: + +- name: Set before 6/7.40.0 flag + set_fact: + datadog_before_7400: "{{ datadog_major is defined and datadog_minor is defined + and datadog_major | int < 8 and datadog_minor | int < 40 }}" + +- name: Set before 6/7.24.1 flag + set_fact: + datadog_before_7241: "{{ datadog_major is defined and datadog_minor is defined and datadog_bugfix is defined + and datadog_major | int < 8 + and (datadog_minor | int < 24 or (datadog_minor | int == 24 and datadog_bugfix | int < 1)) }}" + +- name: Set before 6/7.18.0 flag + set_fact: + datadog_before_7180: "{{ datadog_major is defined and datadog_minor is defined + and datadog_major | int < 8 and datadog_minor | int < 18 }}" + +- name: Add "{{ datadog_user }}" user to additional groups + user: name="{{ datadog_user }}" groups="{{ datadog_additional_groups }}" append=yes + when: datadog_additional_groups | default([], true) | length > 0 + notify: restart datadog-agent + +- name: Include configuration setup tasks + include_tasks: "_agent-linux-macos-shared.yml" + vars: + _dd_config_dir: /etc/datadog-agent + _dd_user: "{{ datadog_user }}" + _dd_group: "{{ datadog_group }}" + _dd_notify_agent: "restart datadog-agent" + +- name: Create system-probe configuration file + template: + src: system-probe.yaml.j2 + dest: /etc/datadog-agent/system-probe.yaml + mode: 0640 + owner: "root" + group: "{{ datadog_group }}" + when: datadog_manage_config + notify: + "{% if datadog_before_7180 %}restart datadog-agent-sysprobe{% else %}restart datadog-agent{% endif %}" + +- name: Set system probe installed + set_fact: + datadog_sysprobe_installed: "{{ ansible_facts.services['datadog-agent-sysprobe'] is defined + or ansible_facts.services['datadog-agent-sysprobe.service'] is defined }}" + when: not datadog_skip_running_check + +# Before 6/7.24.1, system_probe_config controls the system-probe service +# datadog_minor is only defined when a specific Agent version is given +# (see tasks/parse-version.yml) +- name: Set system probe enabled (before 6/7.24.1) + set_fact: + datadog_sysprobe_enabled: "{{ system_probe_config is defined + and 'enabled' in (system_probe_config | default({}, true)) + and system_probe_config['enabled'] + and datadog_sysprobe_installed }}" + when: not datadog_skip_running_check + and datadog_before_7241 + +# Since 6/7.24.1, setting enabled: true in network_config is enough to start the system-probe service: +# https://docs.datadoghq.com/network_monitoring/performance/setup/?tab=agent#setup +- name: Set system probe enabled (since 6/7.24.1) + set_fact: + datadog_sysprobe_enabled: "{{ + ((system_probe_config is defined + and 'enabled' in (system_probe_config | default({}, true)) + and system_probe_config['enabled']) + or (network_config is defined + and 'enabled' in (network_config | default({}, true)) + and network_config['enabled'])) + and datadog_sysprobe_installed }}" + when: not datadog_skip_running_check + and (not datadog_before_7241) + +# Since 6/7.40.0, setting enabled: true in service_monitoring_config is enough to start the system-probe service: +# https://docs.datadoghq.com/tracing/universal_service_monitoring/?tab=configurationfiles#enabling-universal-service-monitoring +- name: Set system probe enabled (since 6/7.40.0) + set_fact: + datadog_sysprobe_enabled: "{{ + ((system_probe_config is defined + and 'enabled' in (system_probe_config | default({}, true)) + and system_probe_config['enabled']) + or (network_config is defined + and 'enabled' in (network_config | default({}, true)) + and network_config['enabled']) + or (service_monitoring_config is defined + and 'enabled' in (service_monitoring_config | default({}, true)) + and service_monitoring_config['enabled'])) + and datadog_sysprobe_installed }}" + when: not datadog_skip_running_check + and (not datadog_before_7400) + +- name: Ensure datadog-agent is running + service: + name: datadog-agent + state: started + enabled: yes + when: not datadog_skip_running_check and datadog_enabled and not ansible_check_mode + +- name: Ensure datadog-agent-sysprobe is running if enabled and installed + service: + name: datadog-agent-sysprobe + state: started + enabled: yes + when: not datadog_skip_running_check and datadog_enabled and not ansible_check_mode and datadog_sysprobe_enabled + +- name: Ensure datadog-agent, datadog-agent-process and datadog-agent-trace are not running + service: + name: "{{ item }}" + state: stopped + enabled: no + when: not datadog_skip_running_check and not datadog_enabled + with_list: + - datadog-agent + - datadog-agent-process + - datadog-agent-trace + +# Stop system-probe manually on Agent versions < 6/7.18, as it was not tied +# to the main Agent service: https://github.com/DataDog/datadog-agent/pull/4883 +- name: Ensure datadog-agent-sysprobe is stopped if disabled or not installed (before 6/7.18.0) + service: + name: datadog-agent-sysprobe + state: stopped + enabled: no + when: not datadog_skip_running_check + and (not datadog_enabled or not datadog_sysprobe_enabled) + and datadog_before_7180 + and datadog_sysprobe_installed + +- name: Ensure datadog-agent-security is not running + service: + name: datadog-agent-security + state: stopped + enabled: no + when: not datadog_skip_running_check and not datadog_enabled + failed_when: false # Since older versions of the Agent don't include the security agent + +- name: Create security-agent configuration file + template: + src: security-agent.yaml.j2 + dest: /etc/datadog-agent/security-agent.yaml + mode: 0640 + owner: "root" + group: "{{ datadog_group }}" + when: datadog_manage_config and (runtime_security_config is defined and runtime_security_config | default({}, true) | length > 0) + notify: + "{% if datadog_before_7180 %}restart datadog-agent-sysprobe{% else %}restart datadog-agent{% endif %}" + +# Templates don't support the "state: absent" argument, so if the file was created in a previous run +# and then runtime_security_config was completely removed, this is the only way to ensure +# we remove the leftover config file. +- name: Remove security-agent configuration file if security-agent is no longer configured + file: + path: /etc/datadog-agent/security-agent.yaml + state: absent + when: datadog_manage_config and (runtime_security_config is not defined or runtime_security_config | default({}, true) | length == 0) + notify: + "{% if datadog_before_7180 %}restart datadog-agent-sysprobe{% else %}restart datadog-agent{% endif %}" diff --git a/ansible/01_old/roles/datadog.datadog/tasks/agent-macos.yml b/ansible/01_old/roles/datadog.datadog/tasks/agent-macos.yml new file mode 100644 index 0000000..e9d2ebb --- /dev/null +++ b/ansible/01_old/roles/datadog.datadog/tasks/agent-macos.yml @@ -0,0 +1,93 @@ +--- +# NOTE: the DMG gets installed as ansible_user, but we then configure it to run +# under datadog_macos_user and remove the user-specific config for ansible_user +- name: Load user data + shell: + cmd: "dscacheutil -q user -a name {{ datadog_macos_user }} | awk 'BEGIN { RS=\"\\n\"; ORS=\" \" } /uid:/ { print \"{ \\\"uid\\\": \" $2\",\" } /gid:/ { print \"\\\"gid\\\": \" $2 \" }\"}'" + executable: /bin/bash + changed_when: false + register: macos_user_output + check_mode: no + +# This task is used to more cleanly format the variable contents.The ABOVE task's shell command returns a JSON +# object as a string but nested in `.stdout`. Ansible has built in behavior that if it receives JSON data as +# a string it will automatically convert it to the corresponding object. This enables us to get multiple values +# out of the ABOVE task preventing us from having to run 2 similar commands. +- name: Extract JSON user data as variable object + set_fact: + macos_user_data: "{{ macos_user_output.stdout }}" + +- name: Load user group data + shell: + cmd: "dscacheutil -q group -a gid {{ macos_user_data.gid }} | grep '^name: ' | awk '{ print $2 }'" + register: macos_user_group + changed_when: false + +# If the ansible_user was logged in via GUI during installation, the postinstall package script +# created launchctl service for the user and also a login item + +- name: Find out if user LaunchAgent is running + shell: + cmd: "launchctl print gui/$(id -u)/{{ datadog_macos_service_name }}" + register: user_service_created + changed_when: false + failed_when: false + +- name: Unload and stop user LaunchAgent + shell: + cmd: "launchctl bootout gui/$(id -u)/{{ datadog_macos_service_name }}" + when: user_service_created.rc == 0 + +- name: Remove user login item + command: |- + osascript -e 'tell application "System Events" to if login item "Datadog Agent" exists then delete login item "Datadog Agent"' + when: user_service_created.rc == 0 + +- name: Remove user LaunchAgent plist file + file: + path: "/Users/{{ ansible_user }}/{{ datadog_macos_user_plist_file_path }}" + state: absent + +# We could take the plist file from user LaunchAgent location and just add UID/GID, +# but when the version is pinned and agent is already installed, that file had +# already been removed and won't be recreated and so we won't be able to use it. +# +# The disadvantage of using a template obviously is that if we changed the plist +# file in the .dmg, we would also have to update this. Fortunately this seems +# to basically never happen, so I think it's an acceptable downside. +- name: Add system LaunchDaemon plist file + template: + src: com.datadoghq.agent.plist.j2 + dest: "{{ datadog_macos_system_plist_file_path }}" + owner: 0 + group: 0 + mode: 0644 + become: true + notify: restart datadog-agent-macos + vars: + # NOTE: https://developer.apple.com/library/archive/documentation/MacOSX/Conceptual/BPSystemStartup/Chapters/CreatingLaunchdJobs.html + # docs say both UID/GID and UserName/GroupName work, but only UserName/GroupName actually work. + username: "{{ datadog_macos_user }}" + groupname: "{{ macos_user_group.stdout }}" + +- name: Include configuration setup tasks + import_tasks: "_agent-linux-macos-shared.yml" + vars: + _dd_config_dir: "{{ datadog_macos_etc_dir }}" + _dd_user: "{{ macos_user_data.uid }}" + _dd_group: "{{ macos_user_data.gid }}" + _dd_notify_agent: "restart datadog-agent-macos" + become: true + +- name: Set permissions for DataDog Directories + file: + path: "{{ item }}" + owner: "{{ macos_user_data.uid }}" + group: "{{ macos_user_data.gid }}" + recurse: yes + with_items: + - "{{ datadog_macos_etc_dir }}" + - "{{ datadog_macos_logs_dir }}" + - "{{ datadog_macos_run_dir }}" + notify: restart datadog-agent-macos + become: true diff --git a/ansible/01_old/roles/datadog.datadog/tasks/agent-win.yml b/ansible/01_old/roles/datadog.datadog/tasks/agent-win.yml new file mode 100644 index 0000000..761cced --- /dev/null +++ b/ansible/01_old/roles/datadog.datadog/tasks/agent-win.yml @@ -0,0 +1,103 @@ +--- +- name: Create main Datadog agent configuration file + win_template: + #FIXME: should have permissions set to only be readable by ddagentuser + src: datadog.yaml.j2 + dest: "{{ datadog_windows_config_root }}\\datadog.yaml" + when: datadog_manage_config + notify: restart datadog-agent-win + +- name: Register all checks directories present in datadog + win_find: + paths: "{{ ansible_facts.env['ProgramData'] }}\\Datadog\\conf.d" + patterns: + - "*.d" + file_type: directory + register: datadog_conf_directories + when: datadog_manage_config and (datadog_disable_untracked_checks or datadog_disable_default_checks) + +- name: Delete checks not present in datadog_tracked_checks + win_file: + path: "{{ ansible_facts.env['ProgramData'] }}\\Datadog\\conf.d\\{{ item }}.d\\conf.yaml" + state: absent + loop: "{{ datadog_conf_directories.files | map(attribute='path') | list | map('win_basename') | list | map('regex_replace', '^(.*).d$', '\\1') | list }}" + when: datadog_manage_config and datadog_disable_untracked_checks and item not in datadog_tracked_checks + notify: restart datadog-agent-win + +- name: Delete default checks + win_file: + path: "{{ ansible_facts.env['ProgramData'] }}\\Datadog\\conf.d\\{{ item }}.d\\conf.yaml.default" + state: absent + loop: "{{ datadog_conf_directories.files | map(attribute='path') | list | map('win_basename') | list | map('regex_replace', '^(.*).d$', '\\1') | list }}" + when: datadog_manage_config and datadog_disable_default_checks and item not in datadog_tracked_checks + notify: restart datadog-agent-win + +- name: Ensure configuration directories are present for each Datadog check + win_file: + path: "{{ datadog_windows_config_root }}\\conf.d\\{{ item }}.d" + state: directory + with_items: '{{ datadog_checks|list }}' + when: datadog_manage_config + +- name: Create a configuration file for each Datadog check + win_template: + src: checks.yaml.j2 + dest: "{{ datadog_windows_config_root }}\\conf.d\\{{ item }}.d\\conf.yaml" + with_items: "{{ datadog_checks|list }}" + when: datadog_manage_config + notify: restart datadog-agent-win + +- name: Remove old configuration file for each Datadog check + win_file: + path: "{{ datadog_windows_config_root }}\\conf.d\\{{ item }}.yaml" + state: absent + with_items: "{{ datadog_checks|list }}" + when: datadog_manage_config + notify: restart datadog-agent-win + +- name: Create custom check file for each custom check + win_copy: + src: "{{ datadog_custom_checks[item] }}" + dest: "{{ datadog_windows_config_root }}\\checks.d\\{{ item }}.py" + with_items: "{{ datadog_custom_checks|list }}" + notify: restart datadog-agent-win + +- name: Ensure datadog-trace-agent and datadog-process-agent are not disabled + win_service: + name: "{{ item }}" + start_mode: manual + when: not datadog_skip_running_check and datadog_enabled and not ansible_check_mode + with_list: + - datadog-trace-agent + - datadog-process-agent + +- name: Create system-probe configuration file + win_template: + src: system-probe.yaml.j2 + dest: "{{ datadog_windows_config_root }}\\system-probe.yaml" + when: datadog_manage_config + notify: restart datadog-agent-win + +- name: Ensure datadog-agent is running + win_service: + name: datadogagent + state: started + start_mode: delayed + when: not datadog_skip_running_check and datadog_enabled and not ansible_check_mode + +- name: Ensure datadog-agent is disabled + win_service: + name: "{{ item }}" + state: stopped + start_mode: disabled + when: not datadog_skip_running_check and not datadog_enabled + with_list: + - datadog-trace-agent + - datadog-process-agent + - datadogagent + +- name: Create installation information file + template: + src: install_info.j2 + dest: "{{ datadog_windows_config_root }}\\install_info" + mode: 0644 diff --git a/ansible/01_old/roles/datadog.datadog/tasks/agent5-linux.yml b/ansible/01_old/roles/datadog.datadog/tasks/agent5-linux.yml new file mode 100644 index 0000000..bf1587b --- /dev/null +++ b/ansible/01_old/roles/datadog.datadog/tasks/agent5-linux.yml @@ -0,0 +1,77 @@ +--- +- name: (agent5) Create Datadog agent config directory + file: + dest: /etc/dd-agent + state: directory + mode: 0755 + when: datadog_manage_config + +- name: (agent5) Create main Datadog agent configuration file + template: + src: datadog.conf.j2 + dest: /etc/dd-agent/datadog.conf + owner: "{{ datadog_user }}" + group: "{{ datadog_group }}" + mode: 0644 #FIXME: should have permissions set to only be readable by owner + when: datadog_manage_config + notify: restart datadog-agent + +- name: (agent5) Ensure datadog-agent is running + service: + name: datadog-agent + state: started + enabled: yes + when: not datadog_skip_running_check and datadog_enabled and not ansible_check_mode + +- name: (agent5) Ensure datadog-agent is not running + service: + name: datadog-agent + state: stopped + enabled: no + when: not datadog_skip_running_check and not datadog_enabled + +- name: Register all checks files present in datadog + find: + paths: /etc/dd-agent/conf.d/ + patterns: + - "*.yaml" + file_type: file + register: datadog_conf_files + when: datadog_manage_config and datadog_disable_untracked_checks + +- name: Register all checks files present in datadog + find: + paths: /etc/dd-agent/conf.d/ + patterns: + - "*.yaml.default" + file_type: file + register: datadog_conf_files_default + when: datadog_manage_config and datadog_disable_default_checks + +- name: Delete checks not present in datadog_tracked_checks + file: + path: "/etc/dd-agent/conf.d/{{ item }}.yaml" + state: absent + loop: "{{ datadog_conf_files.files | map(attribute='path') | list | map('basename') | list | map('regex_replace', '^(.*).yaml$', '\\1') | list }}" + when: datadog_manage_config and datadog_disable_untracked_checks and item not in datadog_tracked_checks + notify: restart datadog-agent + +- name: Delete default checks + file: + path: "/etc/dd-agent/conf.d/{{ item }}.yaml.default" + state: absent + loop: "{{ datadog_conf_files_default.files | map(attribute='path') | list + | map('basename') | list | map('regex_replace', '^(.*).yaml.default$', '\\1') | list }}" + when: datadog_manage_config and datadog_disable_default_checks and item not in datadog_tracked_checks + notify: restart datadog-agent + +- name: (agent5) Create a configuration file for each Datadog check + template: + src: checks.yaml.j2 + dest: "/etc/dd-agent/conf.d/{{ item }}.yaml" + owner: "{{ datadog_user }}" + group: "{{ datadog_group }}" + mode: 0644 #FIXME: should have permissions set to only be readable by owner + with_items: "{{ datadog_checks|list }}" + when: datadog_manage_config + notify: restart datadog-agent diff --git a/ansible/01_old/roles/datadog.datadog/tasks/check-removed-config.yml b/ansible/01_old/roles/datadog.datadog/tasks/check-removed-config.yml new file mode 100644 index 0000000..674fdfb --- /dev/null +++ b/ansible/01_old/roles/datadog.datadog/tasks/check-removed-config.yml @@ -0,0 +1,9 @@ +- name: Ensure datadog_yum_gpgkey is not used + fail: + msg: datadog_yum_gpgkey configuration value was removed. + when: datadog_yum_gpgkey is defined and datadog_yum_gpgkey|length > 0 + +- name: Ensure datadog_zypper_gpgkey is not used + fail: + msg: datadog_zypper_gpgkey configuration value was removed. + when: datadog_zypper_gpgkey is defined and datadog_zypper_gpgkey|length > 0 diff --git a/ansible/01_old/roles/datadog.datadog/tasks/facts-ansible10.yml b/ansible/01_old/roles/datadog.datadog/tasks/facts-ansible10.yml new file mode 100644 index 0000000..cca541c --- /dev/null +++ b/ansible/01_old/roles/datadog.datadog/tasks/facts-ansible10.yml @@ -0,0 +1,3 @@ +--- +- name: Gather Ansible Facts + ansible.builtin.setup: # If the full prefix isn't specified in Ansible 2.10+, we might end up running `ansible.windows.setup` instead. diff --git a/ansible/01_old/roles/datadog.datadog/tasks/facts-ansible9.yml b/ansible/01_old/roles/datadog.datadog/tasks/facts-ansible9.yml new file mode 100644 index 0000000..ec35438 --- /dev/null +++ b/ansible/01_old/roles/datadog.datadog/tasks/facts-ansible9.yml @@ -0,0 +1,3 @@ +--- +- name: Gather Ansible Facts + setup: diff --git a/ansible/01_old/roles/datadog.datadog/tasks/integration.yml b/ansible/01_old/roles/datadog.datadog/tasks/integration.yml new file mode 100644 index 0000000..00e598d --- /dev/null +++ b/ansible/01_old/roles/datadog.datadog/tasks/integration.yml @@ -0,0 +1,86 @@ +--- +- name: set agent binary path (windows) + set_fact: + datadog_agent_binary_path: "{{ datadog_agent_binary_path_windows }}" + when: ansible_facts.os_family == "Windows" + +- name: set agent binary path (unix) + set_fact: + datadog_agent_binary_path: "{{ datadog_agent_binary_path_linux }}" + when: ansible_facts.os_family != "Windows" and ansible_facts.os_family != "Darwin" + +- name: set agent binary path (macOS) + set_fact: + datadog_agent_binary_path: "{{ datadog_agent_binary_path_macos }}" + when: ansible_facts.os_family == "Darwin" + +- name: set agent user for integration commmand (windows) + set_fact: + integration_command_user: "{{ integration_command_user_windows }}" + when: ansible_facts.os_family == "Windows" + +- name: set agent user for integration commmand (unix) + set_fact: + integration_command_user: "{{ integration_command_user_linux }}" + when: ansible_facts.os_family != "Windows" and ansible_facts.os_family != "Darwin" + +- name: set agent user for integration commmand (macOS) + set_fact: + integration_command_user: "{{ integration_command_user_macos }}" + when: ansible_facts.os_family == "Darwin" + +- name: Validate integrations actions + fail: + msg: "Unkown action '{{ item.value.action }}' for integration command ({{ item.key }}). Valid actions are 'install' and 'remove'" + when: item.value.action != "install" and item.value.action != "remove" + loop: "{{ datadog_integration|dict2items }}" + +# Remove Integrations + +- name: Removing integrations (Unix, macOS) + command: + argv: + - "{{ datadog_agent_binary_path }}" + - integration + - remove + - "{{ item.key }}" + become: yes + become_user: "{{ integration_command_user }}" + loop: "{{ datadog_integration|dict2items }}" + when: item.value.action == "remove" and ansible_facts.os_family != "Windows" + +- name: Removing integrations (Windows) + win_command: "\"{{ datadog_agent_binary_path }}\" integration remove {{ item.key }}" + become: yes + become_user: "{{ integration_command_user }}" + loop: "{{ datadog_integration|dict2items }}" + when: item.value.action == "remove" and ansible_facts.os_family == "Windows" + +# Install integrations + +- name: Install pinned version of integrations (Unix) + command: "{{ datadog_agent_binary_path }} integration install {{ third_party }} {{ item.key }}=={{ item.value.version }}" + become: yes + become_user: "{{ integration_command_user }}" + vars: + third_party: "{% if 'third_party' in item.value and item.value.third_party | bool %}--third-party{% endif %}" + loop: "{{ datadog_integration|dict2items }}" + when: item.value.action == "install" and ansible_facts.os_family != "Windows" and ansible_facts.os_family != "Darwin" + +- name: Install pinned version of integrations (Windows) + win_command: "\"{{ datadog_agent_binary_path }}\" integration install {{ third_party }} {{ item.key }}=={{ item.value.version }}" + become: yes + vars: + third_party: "{% if 'third_party' in item.value and item.value.third_party | bool %}--third-party{% endif %}" + become_user: "{{ integration_command_user }}" + loop: "{{ datadog_integration|dict2items }}" + when: item.value.action == "install" and ansible_facts.os_family == "Windows" + +- name: Install pinned version of integrations (macOS) + command: "{{ datadog_agent_binary_path }} integration install {{ third_party }} {{ item.key }}=={{ item.value.version }}" + become: yes + become_user: "{{ integration_command_user }}" + vars: + third_party: "{% if 'third_party' in item.value and item.value.third_party | bool %}--third-party{% endif %}" + loop: "{{ datadog_integration|dict2items }}" + when: item.value.action == "install" and ansible_facts.os_family == "Darwin" diff --git a/ansible/01_old/roles/datadog.datadog/tasks/main.yml b/ansible/01_old/roles/datadog.datadog/tasks/main.yml new file mode 100644 index 0000000..1a40ade --- /dev/null +++ b/ansible/01_old/roles/datadog.datadog/tasks/main.yml @@ -0,0 +1,71 @@ +--- +- name: Include Gather Ansible Facts task on Ansible >= 2.10 + include_tasks: facts-ansible10.yml + when: ansible_version.major >= 2 and ansible_version.minor >= 10 + +- name: Include Gather Ansible Facts task on Ansible < 2.10 + include_tasks: facts-ansible9.yml + when: ansible_version.major == 2 and ansible_version.minor < 10 + +- name: Check if OS is supported + include_tasks: os-check.yml + +- name: Resolve datadog_tracked_checks later to defend against variable presidence issues arising from dynamically included null datadog_checks + include_tasks: sanitize-checks.yml + +# Also sets datadog_skip_install +- name: Set Facts for Datadog Agent Major Version + include_tasks: set-parse-version.yml + +- name: Debian Install Tasks + include_tasks: pkg-debian.yml + when: ansible_facts.os_family == "Debian" and not datadog_skip_install + +- name: Include tasks to remove old GPG keys + include_tasks: "_remove_rpm_keys.yml" + when: ansible_facts.os_family in ["RedHat", "Rocky", "AlmaLinux", "Suse"] + loop: "{{ datadog_rpm_remove_keys }}" + +- name: Include tasks to check removed configuration value usage + include_tasks: check-removed-config.yml + +# Only Ansible >= 3.0 knows that AlmaLinux belongs to "RedHat" family +# (and latest bugfix releases of some 2.X) +# For Rocky it is some 4.X and >= 5.0 +- name: RedHat Install Tasks + include_tasks: pkg-redhat.yml + when: ansible_facts.os_family in ["RedHat", "Rocky", "AlmaLinux"] and not datadog_skip_install + +- name: Suse Install Tasks + include_tasks: pkg-suse.yml + when: ansible_facts.os_family == "Suse" and not datadog_skip_install + +# Note we don't check datadog_skip_install variable value for windows here, +# because some tasks in pkg-windows.yml are carried out regardless of its value. +- name: Windows Install Tasks + include_tasks: pkg-windows.yml + when: ansible_facts.os_family == "Windows" + +- name: macOS Install Tasks + include_tasks: pkg-macos.yml + when: ansible_facts.os_family == "Darwin" and not datadog_skip_install + +- name: Linux Configuration Tasks (Agent 5) + include_tasks: agent5-linux.yml + when: datadog_agent_major_version | int == 5 and ansible_facts.os_family != "Windows" and ansible_facts.os_family != "Darwin" + +- name: Linux Configuration Tasks + include_tasks: agent-linux.yml + when: datadog_agent_major_version | int > 5 and ansible_facts.os_family != "Windows" and ansible_facts.os_family != "Darwin" + +- name: Windows Configuration Tasks + include_tasks: agent-win.yml + when: datadog_agent_major_version | int > 5 and ansible_facts.os_family == "Windows" + +- name: macOS Configuration Tasks + include_tasks: agent-macos.yml + when: ansible_facts.os_family == "Darwin" + +- name: Integrations Tasks + include_tasks: integration.yml + when: datadog_integration is defined diff --git a/ansible/01_old/roles/datadog.datadog/tasks/os-check.yml b/ansible/01_old/roles/datadog.datadog/tasks/os-check.yml new file mode 100644 index 0000000..1ec5898 --- /dev/null +++ b/ansible/01_old/roles/datadog.datadog/tasks/os-check.yml @@ -0,0 +1,5 @@ +--- +- name: Fail if OS is not supported + fail: + msg: "The Datadog Ansible role does not support your OS yet. Please email support@datadoghq.com to open a feature request." + when: ansible_facts.os_family not in ["RedHat", "Rocky", "AlmaLinux", "Debian", "Suse", "Windows", "Darwin"] diff --git a/ansible/01_old/roles/datadog.datadog/tasks/parse-version-macos.yml b/ansible/01_old/roles/datadog.datadog/tasks/parse-version-macos.yml new file mode 100644 index 0000000..7ac8c79 --- /dev/null +++ b/ansible/01_old/roles/datadog.datadog/tasks/parse-version-macos.yml @@ -0,0 +1,7 @@ +- name: Get macOS Agent version + shell: "set -o pipefail && {{ datadog_agent_binary_path_macos }} version | grep 'Agent ' | awk '{print $2}'" + register: datadog_version_check_macos + changed_when: false + failed_when: false + check_mode: no + when: ansible_facts.os_family == "Darwin" diff --git a/ansible/01_old/roles/datadog.datadog/tasks/parse-version-windows.yml b/ansible/01_old/roles/datadog.datadog/tasks/parse-version-windows.yml new file mode 100644 index 0000000..684179b --- /dev/null +++ b/ansible/01_old/roles/datadog.datadog/tasks/parse-version-windows.yml @@ -0,0 +1,18 @@ +# NOTE: This won't work with rc / beta builds. +- name: Get Windows Agent version + win_shell: | + $product_name = "Datadog Agent" + $query = "Select Name,IdentifyingNumber,InstallDate,InstallLocation,ProductID,Version FROM Win32_Product where Name like '$product_name%'" + $installs = Get-WmiObject -query $query + + if (!$installs -or ($installs.Count -eq 0) -or ($installs.Count -gt 1)) { + Write-Host "" + } else { + $ddmaj, $ddmin, $ddpatch, $ddbuild = $installs.Version.split(".") + Write-Host "$($ddmaj).$($ddmin).$($ddpatch)" + } + register: datadog_version_check_win + changed_when: false + failed_when: false + check_mode: no + when: ansible_facts.os_family == "Windows" diff --git a/ansible/01_old/roles/datadog.datadog/tasks/parse-version.yml b/ansible/01_old/roles/datadog.datadog/tasks/parse-version.yml new file mode 100644 index 0000000..9560860 --- /dev/null +++ b/ansible/01_old/roles/datadog.datadog/tasks/parse-version.yml @@ -0,0 +1,104 @@ +--- +- name: Parse Agent version + set_fact: + agent_version: "{{ datadog_agent_version | regex_search(regexp, '\\g', '\\g', '\\g', '\\g', '\\g', '\\g') }}" + vars: + regexp: '(?:(?P[0-9]+):)?(?P[0-9]+)\.(?P[0-9]+)\.(?P[0-9]+)(?P(?:~|-)[^0-9\s-]+[^-\s]*)?(?:-(?P[0-9]+))?' + +- name: Set version vars + set_fact: + datadog_epoch: "{{ agent_version.0 | default('', true) | string }}" + datadog_major: "{{ agent_version.1 | default('', true) | string }}" + datadog_minor: "{{ agent_version.2 | default('', true) | string }}" + datadog_bugfix: "{{ agent_version.3 | default('', true) | string }}" + datadog_suffix: "{{ agent_version.4 | default('', true) | string }}" + datadog_release: "{{ agent_version.5 | default('', true) | string }}" + +- name: Fill empty version epoch with default + set_fact: + datadog_epoch: "1" + when: datadog_epoch | length == 0 + +- name: Fill empty version release with default + set_fact: + datadog_release: "1" + when: datadog_release | length == 0 + +- name: Stop play if datadog_agent_version and datadog_agent_major_version are not compatible + fail: + msg: "The provided major version {{ datadog_agent_major_version }} is not compatible with the + version {{ datadog_major }} deduced from datadog_agent_version ({{ datadog_agent_version }}). + Aborting play." + when: datadog_agent_major_version | length > 0 and datadog_major != datadog_agent_major_version + +- name: Set datadog_agent_major_version to deduced value from datadog_agent_version + set_fact: + datadog_agent_major_version: "{{ datadog_major }}" + +- name: Set helper variables + set_fact: + datadog_agent_linux_version: "{{ datadog_epoch }}:{{ datadog_major }}.{{ datadog_minor }}.{{ datadog_bugfix }}{{ datadog_suffix }}-{{ datadog_release }}" + datadog_rpm_version_finding_cmd: "rpm -q --qf '%{EPOCH}:%{VERSION}-%{RELEASE}' {{ datadog_agent_flavor }}" + +- name: Set OS-specific versions + # NOTE: if changing these, make sure the format correspond with values in datadog_version_finding_cmds below + set_fact: + datadog_agent_debian_version: "{{ datadog_agent_linux_version }}" + datadog_agent_redhat_version: "{{ datadog_agent_linux_version }}" + datadog_agent_suse_version: "{{ datadog_agent_linux_version }}" + datadog_agent_windows_version: "{{ datadog_major }}.{{ datadog_minor }}.{{ datadog_bugfix }}{{ datadog_suffix }}" + datadog_agent_macos_version: "{{ datadog_major }}.{{ datadog_minor }}.{{ datadog_bugfix }}{{ datadog_suffix }}" + +- name: Construct commands to find Agent version + set_fact: + datadog_version_finding_cmds: + Debian: "dpkg -s {{ datadog_agent_flavor }} | grep '^Version:' | awk '{print $2}'" + RedHat: "{{ datadog_rpm_version_finding_cmd }}" + Rocky: "{{ datadog_rpm_version_finding_cmd }}" + AlmaLinux: "{{ datadog_rpm_version_finding_cmd }}" + Suse: "{{ datadog_rpm_version_finding_cmd }}" + +- name: Create OS-specific version dict + set_fact: + datadog_agent_os2version: + Debian: "{{ datadog_agent_debian_version }}" + RedHat: "{{ datadog_agent_redhat_version }}" + Rocky: "{{ datadog_agent_redhat_version }}" + AlmaLinux: "{{ datadog_agent_redhat_version }}" + Suse: "{{ datadog_agent_suse_version }}" + Windows: "{{ datadog_agent_windows_version }}" + Darwin: "{{ datadog_agent_macos_version }}" + +- name: Get Linux Agent version + shell: "{{ datadog_version_finding_cmds[ansible_facts.os_family] }}" # noqa 305 - Ansible lint thinks we could use command, but we need shell because some of the cmds have pipes + register: datadog_version_check_linux + changed_when: false + failed_when: false + check_mode: no + when: ansible_facts.system is defined and ansible_facts.system == "Linux" + +# The task is win_shell, so if users don't have the "ansible.windows" collection installed, +# parsing the task would fail even if the host is not Windows. By hiding the task inside +# a conditionally included file, we can prevent this. +- name: Include Windows Agent version tasks + include_tasks: parse-version-windows.yml + when: ansible_facts.os_family == "Windows" + +- name: Include macOS Agent version tasks + include_tasks: parse-version-macos.yml + when: ansible_facts.os_family == "Darwin" + +- name: Set skip install flag if version already installed (Linux) + set_fact: + datadog_skip_install: "{{ datadog_version_check_linux.stdout | trim == datadog_agent_os2version[ansible_facts.os_family] }}" + when: ansible_facts.system is defined and ansible_facts.system == "Linux" + +- name: Set skip install flag if version already installed (Windows) + set_fact: + datadog_skip_install: "{{ datadog_version_check_win.stdout | trim == datadog_agent_os2version[ansible_facts.os_family] }}" + when: ansible_facts.os_family == "Windows" + +- name: Set skip install flag if version already installed (macOS) + set_fact: + datadog_skip_install: "{{ datadog_version_check_macos.stdout | trim == datadog_agent_os2version[ansible_facts.os_family] }}" + when: ansible_facts.os_family == "Darwin" diff --git a/ansible/01_old/roles/datadog.datadog/tasks/pkg-debian.yml b/ansible/01_old/roles/datadog.datadog/tasks/pkg-debian.yml new file mode 100644 index 0000000..25e9f1e --- /dev/null +++ b/ansible/01_old/roles/datadog.datadog/tasks/pkg-debian.yml @@ -0,0 +1,127 @@ +--- +- name: Install apt-transport-https + apt: + update_cache: yes + name: apt-transport-https + state: present + when: not ansible_check_mode + +- name: Install gnupg + apt: + update_cache: yes + name: gnupg + state: present + when: not ansible_check_mode + +- name: "Check if {{ datadog_apt_usr_share_keyring }} exists with correct mode" + stat: + path: "{{ datadog_apt_usr_share_keyring }}" + register: apt_keyring_file + +- name: "Ensure {{ datadog_apt_usr_share_keyring }} exists" + file: + path: "{{ datadog_apt_usr_share_keyring }}" + owner: root + group: root + mode: "0644" + state: touch + when: not ansible_check_mode and (not apt_keyring_file.stat.exists or not apt_keyring_file.stat.mode == "0644") + +- name: Install apt keys from default URLs + include_tasks: _apt-key-import.yml + with_items: + "{{ datadog_apt_default_keys }}" + when: datadog_apt_key_url_new is not defined and not ansible_check_mode + +- name: Install apt keys from custom URL + include_tasks: _apt-key-import.yml + with_items: + - key: A2923DFF56EDA6E76E55E492D3A80E30382E94DE + value: "{{ datadog_apt_key_url_new }}" + - key: D75CEA17048B9ACBF186794B32637D44F14F620E + value: "{{ datadog_apt_key_url_new }}" + when: datadog_apt_key_url_new is defined and not ansible_check_mode + +- name: "Ensure {{ datadog_apt_trusted_d_keyring }} exists with same contents as {{ datadog_apt_usr_share_keyring }} for older distro versions" + copy: + src: "{{ datadog_apt_usr_share_keyring }}" + dest: "{{ datadog_apt_trusted_d_keyring }}" + mode: "0644" + remote_src: yes + when: ((ansible_distribution == 'Debian' and ansible_distribution_major_version|int < 9) or (ansible_distribution == 'Ubuntu' and ansible_distribution_major_version|int < 16)) and not ansible_check_mode + +- name: Ensure Datadog non-https repositories and repositories not using signed-by option are deprecated + apt_repository: + repo: "{{ item }}" + state: "absent" + update_cache: yes + with_items: + - "deb http://apt.datadoghq.com/ stable main" + - "deb http://apt.datadoghq.com/ stable 6" + - "deb http://apt.datadoghq.com/ stable 7" + - "deb https://apt.datadoghq.com/ stable main" + - "deb https://apt.datadoghq.com/ stable 6" + - "deb https://apt.datadoghq.com/ stable 7" + when: not ansible_check_mode + +- name: Ensure Datadog repository is up-to-date + apt_repository: + filename: "ansible_datadog_{{ item.key }}" + repo: "{{ item.value }}" + state: "{% if item.key == datadog_agent_major_version|int and datadog_apt_repo | length == 0 %}present{% else %}absent{% endif %}" + update_cache: yes + when: (not ansible_check_mode) + with_dict: + 5: '{{ datadog_agent5_apt_repo }}' + 6: '{{ datadog_agent6_apt_repo }}' + 7: '{{ datadog_agent7_apt_repo }}' + +- name: Initialize custom repo file deletion flag to False + set_fact: + datadog_remove_custom_repo_file: "False" + +- name: Check if custom repository file exists + stat: + path: /etc/apt/sources.list.d/ansible_datadog_custom.list + register: datadog_custom_repo_file + +- name: Fetch custom repository file + slurp: + src: /etc/apt/sources.list.d/ansible_datadog_custom.list + register: datadog_custom_repo_file_contents + when: datadog_custom_repo_file.stat.exists + +- name: Flag custom repository file for deletion if different from current repository config + set_fact: + datadog_remove_custom_repo_file: "{{ datadog_repo_file_contents != datadog_apt_repo }}" + vars: + datadog_repo_file_contents: "{{ datadog_custom_repo_file_contents['content'] | b64decode | trim }}" + when: datadog_custom_repo_file.stat.exists + +- name: (Custom) Remove Datadog custom repository file when not set or updated + file: + path: /etc/apt/sources.list.d/ansible_datadog_custom.list + state: absent + when: (datadog_apt_repo | length == 0) or datadog_remove_custom_repo_file and (not ansible_check_mode) + +- name: (Custom) Ensure Datadog repository is up-to-date + apt_repository: + filename: ansible_datadog_custom + repo: "{{ datadog_apt_repo }}" + state: present + update_cache: yes + when: (datadog_apt_repo | length > 0) and (not ansible_check_mode) + +- include_tasks: pkg-debian/install-pinned.yml + when: datadog_agent_debian_version is defined + +- include_tasks: pkg-debian/install-latest.yml + when: datadog_agent_debian_version is not defined + +- name: Install latest datadog-signing-keys package + apt: + name: datadog-signing-keys + state: latest # noqa 403 + # we don't use update_cache: yes, as that was just done by the install-pinned/install-latest + register: datadog_signing_keys_install + when: not ansible_check_mode diff --git a/ansible/01_old/roles/datadog.datadog/tasks/pkg-debian/install-latest.yml b/ansible/01_old/roles/datadog.datadog/tasks/pkg-debian/install-latest.yml new file mode 100644 index 0000000..5a7110d --- /dev/null +++ b/ansible/01_old/roles/datadog.datadog/tasks/pkg-debian/install-latest.yml @@ -0,0 +1,9 @@ +--- +- name: Install latest datadog-agent package + apt: + name: "{{ datadog_agent_flavor }}" + state: latest # noqa 403 + update_cache: yes + cache_valid_time: "{{ datadog_apt_cache_valid_time }}" + register: datadog_agent_install + when: not ansible_check_mode diff --git a/ansible/01_old/roles/datadog.datadog/tasks/pkg-debian/install-pinned.yml b/ansible/01_old/roles/datadog.datadog/tasks/pkg-debian/install-pinned.yml new file mode 100644 index 0000000..323df9a --- /dev/null +++ b/ansible/01_old/roles/datadog.datadog/tasks/pkg-debian/install-pinned.yml @@ -0,0 +1,10 @@ +--- +- name: Install pinned datadog-agent package + apt: + name: "{{ datadog_agent_flavor }}={{ datadog_agent_debian_version }}" + state: present + force: "{{ datadog_agent_allow_downgrade }}" + update_cache: yes + cache_valid_time: "{{ datadog_apt_cache_valid_time }}" + register: datadog_agent_install + when: not ansible_check_mode diff --git a/ansible/01_old/roles/datadog.datadog/tasks/pkg-macos.yml b/ansible/01_old/roles/datadog.datadog/tasks/pkg-macos.yml new file mode 100644 index 0000000..191003f --- /dev/null +++ b/ansible/01_old/roles/datadog.datadog/tasks/pkg-macos.yml @@ -0,0 +1,86 @@ +--- +# NOTE: the DMG gets installed as ansible_user, but we then configure it to run +# under datadog_macos_user and remove the user-specific config for ansible_user +- name: Fail if Agent 5 + fail: + msg: "The Datadog ansible role does not currently support Agent 5 on macOS" + when: datadog_agent_major_version|int == 5 + +- name: Check if the macOS user for Agent service exists + command: id -u "{{ datadog_macos_user }}" + register: mac_user_check + changed_when: false + ignore_errors: true + +- name: Fail if the macOS user for Agent service doesn't exist + fail: + msg: "The Datadog ansible role wasn't able to find the user : {{ datadog_macos_user }}" + when: mac_user_check.rc != 0 + +- include_tasks: pkg-macos/macos_agent_latest.yml + when: (not datadog_skip_install) and (datadog_agent_macos_version is not defined) + +- include_tasks: pkg-macos/macos_agent_version.yml + when: (not datadog_skip_install) and (datadog_agent_macos_version is defined) + +- name: Display macOS download URL + debug: + var: dd_download_url + when: not datadog_skip_install + +- name: pre-Delete temporary dmg + file: + path: '/tmp/datadog-agent.dmg' + state: absent + become: yes + when: not datadog_skip_install + +- name: Create temporary datadog install user file + copy: + dest: "/tmp/datadog-install-user" + content: "{{ datadog_macos_user }}" + mode: 0554 + when: (not datadog_skip_install) and (not ansible_check_mode) + +- name: Download macOS datadog agent + get_url: + url: "{{ dd_download_url }}" + dest: '/tmp/datadog-agent.dmg' + mode: 0750 + register: download_dmg_result + when: (not datadog_skip_install) and (not ansible_check_mode) + +- name: Detach agent dmg if already mounted + shell: 'hdiutil detach "/Volumes/datadog_agent" >/dev/null 2>&1 || true' + when: (not datadog_skip_install) and (not ansible_check_mode) + +- name: Attach agent dmg + command: 'hdiutil attach /tmp/datadog-agent.dmg -mountpoint "/Volumes/datadog_agent"' + when: (not datadog_skip_install) and (not ansible_check_mode) and (download_dmg_result.status_code == 200) + +- name: Unpack and copy Datadog Agent files + shell: + cmd: '/usr/sbin/installer -pkg "`find "/Volumes/datadog_agent" -name \*.pkg 2>/dev/null`" -target /' + chdir: '/' + become: yes + register: datadog_agent_install + when: (not datadog_skip_install) and (not ansible_check_mode) and (download_dmg_result.status_code == 200) + notify: restart datadog-agent-macos + +- name: Detach mounted dmg + command: 'hdiutil detach "/Volumes/datadog_agent"' + when: (not datadog_skip_install) and (not ansible_check_mode) and (download_dmg_result.status_code == 200) + +- name: Delete temporary dmg + file: + path: "{{ download_dmg_result.dest }}" + state: absent + become: yes + when: (not datadog_skip_install) and (not ansible_check_mode) and (download_dmg_result.status_code == 200) + +- name: Delete temporary datadog install user file + file: + path: "/tmp/datadog-install-user" + state: absent + become: yes + when: (not datadog_skip_install) and (not ansible_check_mode) diff --git a/ansible/01_old/roles/datadog.datadog/tasks/pkg-macos/macos_agent_latest.yml b/ansible/01_old/roles/datadog.datadog/tasks/pkg-macos/macos_agent_latest.yml new file mode 100644 index 0000000..64ab51e --- /dev/null +++ b/ansible/01_old/roles/datadog.datadog/tasks/pkg-macos/macos_agent_latest.yml @@ -0,0 +1,12 @@ +--- + +- name: Set agent download filename to custom URL + set_fact: + dd_download_url: "{{ datadog_macos_download_url }}" + when: datadog_macos_download_url | default('', true) | length > 0 + +- name: Set agent download filename to latest + set_fact: + dd_download_url: "{% if datadog_agent_major_version|int == 7 %}{{ datadog_macos_agent7_latest_url }} + {% else %}{{ datadog_macos_agent6_latest_url }}{% endif %}" + when: datadog_macos_download_url | default('', true) | length == 0 diff --git a/ansible/01_old/roles/datadog.datadog/tasks/pkg-macos/macos_agent_version.yml b/ansible/01_old/roles/datadog.datadog/tasks/pkg-macos/macos_agent_version.yml new file mode 100644 index 0000000..2acfbac --- /dev/null +++ b/ansible/01_old/roles/datadog.datadog/tasks/pkg-macos/macos_agent_version.yml @@ -0,0 +1,5 @@ +--- + +- name: Set agent download filename to a specific version + set_fact: + dd_download_url: "{{ datadog_macos_versioned_url }}-{{ datadog_agent_macos_version }}-1.dmg" diff --git a/ansible/01_old/roles/datadog.datadog/tasks/pkg-redhat.yml b/ansible/01_old/roles/datadog.datadog/tasks/pkg-redhat.yml new file mode 100644 index 0000000..ebd4de6 --- /dev/null +++ b/ansible/01_old/roles/datadog.datadog/tasks/pkg-redhat.yml @@ -0,0 +1,169 @@ +--- +- name: Fail early if Python 3 is used on CentOS / RHEL < 8 with old Ansible + fail: + msg: "The installation of the Agent on RedHat family systems using yum is not compatible with Python 3 with older Ansible versions. + To run this role, use a Python 2 interpreter on hosts running CentOS / RHEL < 8 or upgrade Ansible to version 2.11+" + # We can't compare ansible_version.full with 2.11 in the condition below, because ansible's + # `semver` and `strict` version_type don't recognize it as a valid version and the `loose` + # version_type considers it to be a post-release. It seems that the best course of action + # is to explicitly use just major.minor for comparison with 2.11. + # See https://github.com/ansible/ansible/issues/78288 + when: (not datadog_ignore_old_centos_python3_error) + and ("{}.{}".format(ansible_version.major, ansible_version.minor) is version("2.11", operator="lt", strict=True)) + and (ansible_pkg_mgr == "yum") + and (ansible_facts.python.version.major | int >= 3) + +- name: Find out whether to set repo_gpgcheck or not + # We turn off repo_gpgcheck on custom repos and on RHEL/CentOS 8.1 because + # of https://bugzilla.redhat.com/show_bug.cgi?id=1792506 + set_fact: + do_yum_repo_gpgcheck: >- + {{ datadog_yum_repo_gpgcheck if datadog_yum_repo_gpgcheck != '' else ( + 'no' if ( + ansible_facts.distribution_version.startswith('8.1.') or ansible_facts.distribution_version == '8.1' or + datadog_yum_repo != '' + ) else 'yes' + ) }} + +- name: Download current RPM key + get_url: + url: "{{ datadog_yum_gpgkey_current }}" + dest: /tmp/DATADOG_RPM_KEY_CURRENT.public + force: yes + +- name: Import current RPM key + rpm_key: + key: /tmp/DATADOG_RPM_KEY_CURRENT.public + state: present + when: not ansible_check_mode + +- name: Download new RPM key (Expires in 2022) + get_url: + url: "{{ datadog_yum_gpgkey_e09422b3 }}" + dest: /tmp/DATADOG_RPM_KEY_E09422B3.public + checksum: "sha256:{{ datadog_yum_gpgkey_e09422b3_sha256sum }}" + +- name: Import new RPM key (Expires in 2022) + rpm_key: + key: /tmp/DATADOG_RPM_KEY_E09422B3.public + state: present + when: not ansible_check_mode + +- name: Download new RPM key (Expires in 2024) + get_url: + url: "{{ datadog_yum_gpgkey_20200908 }}" + dest: /tmp/DATADOG_RPM_KEY_20200908.public + checksum: "sha256:{{ datadog_yum_gpgkey_20200908_sha256sum }}" + +- name: Import new RPM key (Expires in 2024) + rpm_key: + key: /tmp/DATADOG_RPM_KEY_20200908.public + state: present + when: not ansible_check_mode + +- name: Set versioned includepkgs variable + set_fact: + datadog_includepkgs: "{{ datadog_agent_flavor }}-{{ datadog_agent_redhat_version | regex_replace('^\\d+:', '') }}" + when: datadog_agent_redhat_version is defined + +- name: Set plain includepkgs variable + set_fact: + datadog_includepkgs: "{{ datadog_agent_flavor }}" + when: datadog_agent_redhat_version is not defined + +- name: Install Datadog Agent 5 yum repo + yum_repository: + name: datadog + description: Datadog, Inc. + baseurl: "{{ datadog_agent5_yum_repo }}" + enabled: yes + includepkgs: "{{ datadog_includepkgs }}" + repo_gpgcheck: no # we don't sign Agent 5 repodata + gpgcheck: "{{ datadog_yum_gpgcheck }}" + gpgkey: [ + "{{ datadog_yum_gpgkey_current }}", + "{{ datadog_yum_gpgkey_20200908 }}", + "{{ datadog_yum_gpgkey_e09422b3 }}", + ] + register: repofile5 + when: (datadog_agent_major_version|int == 5) and (datadog_yum_repo | length == 0) and (not ansible_check_mode) + +- name: Install Datadog Agent 6 yum repo + yum_repository: + name: datadog + description: Datadog, Inc. + baseurl: "{{ datadog_agent6_yum_repo }}" + enabled: yes + includepkgs: "{{ datadog_includepkgs }}" + repo_gpgcheck: "{{ do_yum_repo_gpgcheck }}" + gpgcheck: "{{ datadog_yum_gpgcheck }}" + gpgkey: [ + "{{ datadog_yum_gpgkey_current }}", + "{{ datadog_yum_gpgkey_20200908 }}", + "{{ datadog_yum_gpgkey_e09422b3 }}", + ] + register: repofile6 + when: (datadog_agent_major_version|int == 6) and (datadog_yum_repo | length == 0) and (not ansible_check_mode) + +- name: Install Datadog Agent 7 yum repo + yum_repository: + name: datadog + description: Datadog, Inc. + baseurl: "{{ datadog_agent7_yum_repo }}" + enabled: yes + includepkgs: "{{ datadog_includepkgs }}" + repo_gpgcheck: "{{ do_yum_repo_gpgcheck }}" + gpgcheck: "{{ datadog_yum_gpgcheck }}" + gpgkey: [ + "{{ datadog_yum_gpgkey_current }}", + "{{ datadog_yum_gpgkey_20200908 }}", + "{{ datadog_yum_gpgkey_e09422b3 }}", + ] + register: repofile7 + when: (datadog_agent_major_version|int == 7) and (datadog_yum_repo | length == 0) and (not ansible_check_mode) + +- name: Install Datadog Custom yum repo + yum_repository: + name: datadog + description: Datadog, Inc. + baseurl: "{{ datadog_yum_repo }}" + enabled: yes + includepkgs: "{{ datadog_includepkgs }}" + repo_gpgcheck: "{{ do_yum_repo_gpgcheck }}" + gpgcheck: "{{ datadog_yum_gpgcheck }}" + gpgkey: [ + "{{ datadog_yum_gpgkey_current }}", + "{{ datadog_yum_gpgkey_20200908 }}", + "{{ datadog_yum_gpgkey_e09422b3 }}", + ] + register: repofilecustom + when: (datadog_yum_repo | length > 0) and (not ansible_check_mode) + +- name: Clean repo metadata if repo changed # noqa 503 + command: yum clean metadata --disablerepo="*" --enablerepo=datadog + failed_when: false # Cleaning the metadata is only needed when downgrading a major version of the Agent, don't fail because of this + args: + warn: no + when: repofile5.changed or repofile6.changed or repofile7.changed or repofilecustom.changed + +# On certain version of dnf, gpg keys aren't imported into the local db with the package install task. +# This rule assures that they are correctly imported into the local db and users won't have to manually accept +# them if running dnf commands on the hosts. +- name: Refresh Datadog repository cache # noqa 503 + command: yum -y makecache --disablerepo="*" --enablerepo=datadog + failed_when: false + args: + warn: no + when: repofile5.changed or repofile6.changed or repofile7.changed or repofilecustom.changed + +- name: Remove old yum repo files + yum_repository: + name: "ansible_datadog_{{ item }}" + state: absent + with_items: [ 5, 6, 7, "custom" ] + +- include_tasks: pkg-redhat/install-pinned.yml + when: datadog_agent_redhat_version is defined + +- include_tasks: pkg-redhat/install-latest.yml + when: datadog_agent_redhat_version is not defined diff --git a/ansible/01_old/roles/datadog.datadog/tasks/pkg-redhat/install-latest.yml b/ansible/01_old/roles/datadog.datadog/tasks/pkg-redhat/install-latest.yml new file mode 100644 index 0000000..90f4a06 --- /dev/null +++ b/ansible/01_old/roles/datadog.datadog/tasks/pkg-redhat/install-latest.yml @@ -0,0 +1,18 @@ +--- +- name: Install latest datadog-agent package (dnf) + dnf: + name: "{{ datadog_agent_flavor }}" + update_cache: yes + state: latest # noqa 403 + register: datadog_agent_install + when: not ansible_check_mode and ansible_pkg_mgr == "dnf" + notify: restart datadog-agent + +- name: Install latest datadog-agent package (yum) + yum: + name: "{{ datadog_agent_flavor }}" + update_cache: yes + state: latest # noqa 403 + register: datadog_agent_install + when: not ansible_check_mode and ansible_pkg_mgr == "yum" + notify: restart datadog-agent diff --git a/ansible/01_old/roles/datadog.datadog/tasks/pkg-redhat/install-pinned.yml b/ansible/01_old/roles/datadog.datadog/tasks/pkg-redhat/install-pinned.yml new file mode 100644 index 0000000..9b9545e --- /dev/null +++ b/ansible/01_old/roles/datadog.datadog/tasks/pkg-redhat/install-pinned.yml @@ -0,0 +1,21 @@ +--- +- name: Install pinned datadog-agent package (dnf) + dnf: + name: "{{ datadog_agent_flavor }}-{{ datadog_agent_redhat_version }}" + update_cache: yes + state: present + allow_downgrade: "{{ datadog_agent_allow_downgrade }}" + register: datadog_agent_install + when: not ansible_check_mode and ansible_pkg_mgr == "dnf" + notify: restart datadog-agent + +- name: Install pinned datadog-agent package (yum) + yum: + # We have to add architecture, because yum only understands epoch when architecture is also specified + name: "{{ datadog_agent_flavor }}-{{ datadog_agent_redhat_version }}.{{ ansible_facts.architecture }}" + update_cache: yes + state: present + allow_downgrade: "{{ datadog_agent_allow_downgrade }}" + register: datadog_agent_install + when: not ansible_check_mode and ansible_pkg_mgr == "yum" + notify: restart datadog-agent diff --git a/ansible/01_old/roles/datadog.datadog/tasks/pkg-suse.yml b/ansible/01_old/roles/datadog.datadog/tasks/pkg-suse.yml new file mode 100644 index 0000000..9306c8e --- /dev/null +++ b/ansible/01_old/roles/datadog.datadog/tasks/pkg-suse.yml @@ -0,0 +1,107 @@ +--- +- name: Find out whether to set repo_gpgcheck or not + set_fact: + do_zypper_repo_gpgcheck: >- + {{ datadog_zypper_repo_gpgcheck if datadog_zypper_repo_gpgcheck != '' else ( + 'yes' if datadog_zypper_repo == '' and datadog_agent_major_version|int != 5 else 'no' + ) }} + +- block: # Work around due to SNI check for SLES11 + - name: Stat if current RPM key already exists + stat: + path: /tmp/DATADOG_RPM_KEY_CURRENT.public + register: ddkeycurrent + - name: Download current RPM key (SLES11) + get_url: + url: "{{ datadog_zypper_gpgkey_current }}" + dest: /tmp/DATADOG_RPM_KEY_CURRENT.public + force: yes + when: not ddkeycurrent.stat.exists + when: ansible_distribution_version|int == 11 + +- name: Download current RPM key + get_url: + url: "{{ datadog_zypper_gpgkey_current }}" + dest: /tmp/DATADOG_RPM_KEY_CURRENT.public + force: yes + when: ansible_distribution_version|int >= 12 + +- name: Import current RPM key + rpm_key: + key: /tmp/DATADOG_RPM_KEY_CURRENT.public + state: present + when: not ansible_check_mode + +- block: # Work around due to SNI check for SLES11 + - name: Stat if E09422B3 key (Expires 2022) RPM key already exists + stat: + path: /tmp/DATADOG_RPM_KEY_E09422B3.public + register: ddnewkey + - name: Download E09422B3 key (Expires 2022) RPM key (SLES11) + get_url: + url: "{{ datadog_zypper_gpgkey_e09422b3 }}" + dest: /tmp/DATADOG_RPM_KEY_E09422B3.public + when: not ddnewkey.stat.exists + when: ansible_distribution_version|int == 11 + +- name: Download E09422B3 key (Expires 2022) RPM key + get_url: + url: "{{ datadog_zypper_gpgkey_e09422b3 }}" + dest: /tmp/DATADOG_RPM_KEY_E09422B3.public + checksum: "sha256:{{ datadog_zypper_gpgkey_e09422b3_sha256sum }}" + when: ansible_distribution_version|int >= 12 + +- name: Import E09422B3 key (Expires 2022) RPM key + rpm_key: + key: /tmp/DATADOG_RPM_KEY_E09422B3.public + state: present + when: not ansible_check_mode + +- block: # Work around due to SNI check for SLES11 + - name: Stat if 20200908 key (Expires 2024) RPM key already exists + stat: + path: /tmp/DATADOG_RPM_KEY_20200908.public + register: ddnewkey_20200908 + - name: Download 20200908 key (Expires 2024) RPM key (SLES11) + get_url: + url: "{{ datadog_zypper_gpgkey_20200908 }}" + dest: /tmp/DATADOG_RPM_KEY_20200908.public + when: not ddnewkey_20200908.stat.exists + when: ansible_distribution_version|int == 11 + +- name: Download 20200908 key (Expires 2024) RPM key + get_url: + url: "{{ datadog_zypper_gpgkey_20200908 }}" + dest: /tmp/DATADOG_RPM_KEY_20200908.public + checksum: "sha256:{{ datadog_zypper_gpgkey_20200908_sha256sum }}" + when: ansible_distribution_version|int >= 12 + +- name: Import 20200908 key (Expires 2024) RPM key + rpm_key: + key: /tmp/DATADOG_RPM_KEY_20200908.public + state: present + when: not ansible_check_mode + +# ansible don't allow repo_gpgcheck to be set, we have to create the repo file manually +- name: Install DataDog zypper repo + template: + src: zypper.repo.j2 + dest: /etc/zypp/repos.d/datadog.repo + owner: "root" + group: "root" + mode: 0644 + register: datadog_zypper_repo_template + when: datadog_manage_zypper_repofile + +# refresh zypper repos only if the template changed +- name: refresh Datadog zypper_repos # noqa 503 + command: zypper refresh datadog + when: datadog_zypper_repo_template.changed and not ansible_check_mode + args: + warn: false # silence warning about using zypper directly + +- include_tasks: pkg-suse/install-pinned.yml + when: datadog_agent_suse_version is defined + +- include_tasks: pkg-suse/install-latest.yml + when: datadog_agent_suse_version is not defined diff --git a/ansible/01_old/roles/datadog.datadog/tasks/pkg-suse/install-latest.yml b/ansible/01_old/roles/datadog.datadog/tasks/pkg-suse/install-latest.yml new file mode 100644 index 0000000..18b7d0c --- /dev/null +++ b/ansible/01_old/roles/datadog.datadog/tasks/pkg-suse/install-latest.yml @@ -0,0 +1,8 @@ +--- +- name: Ensure Datadog agent is installed + zypper: + name: datadog-agent + state: latest # noqa 403 + register: datadog_agent_install + when: not ansible_check_mode + notify: restart datadog-agent diff --git a/ansible/01_old/roles/datadog.datadog/tasks/pkg-suse/install-pinned.yml b/ansible/01_old/roles/datadog.datadog/tasks/pkg-suse/install-pinned.yml new file mode 100644 index 0000000..831e505 --- /dev/null +++ b/ansible/01_old/roles/datadog.datadog/tasks/pkg-suse/install-pinned.yml @@ -0,0 +1,9 @@ +--- +- name: Install pinned datadog-agent package + zypper: + name: "datadog-agent={{ datadog_agent_suse_version }}" + state: present + oldpackage: "{{ datadog_agent_allow_downgrade }}" + register: datadog_agent_install + when: not ansible_check_mode + notify: restart datadog-agent diff --git a/ansible/01_old/roles/datadog.datadog/tasks/pkg-windows-opts.yml b/ansible/01_old/roles/datadog.datadog/tasks/pkg-windows-opts.yml new file mode 100644 index 0000000..eee9a7b --- /dev/null +++ b/ansible/01_old/roles/datadog.datadog/tasks/pkg-windows-opts.yml @@ -0,0 +1,92 @@ +- name: Set DD Username Arg + set_fact: + win_install_args: "{{ win_install_args }} DDAGENTUSER_NAME={{ datadog_windows_ddagentuser_name }}" + when: datadog_windows_ddagentuser_name | default('', true) | length > 0 + +# NOTE: We don't set DD Password Arg here to prevent it from being printed; +# we set it right before using win_install_args + +# check the registry. On upgrade, the location of the config file root will +# be set here. +- name: Check existing config file Directory + win_reg_stat: + path: HKLM:\SOFTWARE\Datadog\Datadog Agent + name: ConfigRoot + register: config_root_from_registry + +# check the registry. On upgrade, the location of the installation root directory will +# be set here. + +- name: Check existing installPath Directory + win_reg_stat: + path: HKLM:\SOFTWARE\Datadog\Datadog Agent + name: InstallPath + register: install_path_from_registry + +## validate the config path. Only necessary if it's set in the registry alread (i.e. upgrade) +## Will fail the install if the caller has set the config root to a non-standard root, and that +## root is different than what's already present. +- name: Validate config path + fail: + msg: "Incompatible configuration option {{ config_root_from_registry.value }} != {{ datadog_windows_config_files_dir }}" + when: ( (config_root_from_registry.exists) and + (datadog_windows_config_files_dir | length > 0 ) and + (config_root_from_registry.value | regex_replace('\\\\$','') | lower != datadog_windows_config_files_dir | lower ) ) + +- name: Validated config path + debug: + msg: "Allowing configuration option {{ config_root_from_registry.value }} == {{ datadog_windows_config_files_dir }}" + when: ( (config_root_from_registry.exists) and + (datadog_windows_config_files_dir | length > 0 ) and + (config_root_from_registry.value | regex_replace('\\\\$','') | lower == datadog_windows_config_files_dir | lower ) ) + +## validate the binary install path. Only necessary if it's set in the registry alread (i.e. upgrade) +## Will fail the install if the caller has set the binary install path to a non-standard root, and that +## root is different than what's already present. +- name: Validate install path + fail: + msg: "Incompatible configuration option {{ install_path_from_registry.value }} != {{ datadog_windows_program_files_dir }}" + when: ( (install_path_from_registry.exists) and + (datadog_windows_program_files_dir | length > 0 ) and + (install_path_from_registry.value | regex_replace('\\\\$','') | lower != datadog_windows_program_files_dir | lower ) ) + +- name: Validated install path + debug: + msg: "Allowing configuration option {{ install_path_from_registry.value }} == {{ datadog_windows_program_files_dir }}" + when: ( (install_path_from_registry.exists) and + (datadog_windows_program_files_dir | length > 0 ) and + (install_path_from_registry.value | regex_replace('\\\\$','') | lower == datadog_windows_program_files_dir | lower ) ) + + +- name: Set Program Files Target Directory + set_fact: + win_install_args: "{{ win_install_args }} PROJECTLOCATION=\"{{ datadog_windows_program_files_dir }}\" " + when: datadog_windows_program_files_dir | length > 0 + +- name: Set Config Files Target Directory + set_fact: + win_install_args: "{{ win_install_args }} APPLICATIONDATADIRECTORY=\"{{ datadog_windows_config_files_dir }}\" " + when: datadog_windows_config_files_dir | length > 0 + +# if the current installation was set to a non-standard config root, and that config root is not +# presented here, then update accordingly, so that any config file modifications will be made +# in the right place +- name: Set config root for config Files + set_fact: + datadog_windows_config_root: "{{ datadog_windows_config_files_dir }}" + when: ((datadog_windows_config_files_dir | length > 0) and (not config_root_from_registry.exists)) + +- name: Set config root for config files from current location + set_fact: + datadog_windows_config_root: "{{ config_root_from_registry.value | regex_replace('\\\\$','') }}" + when: config_root_from_registry.exists + +- name: Set Test + set_fact: + win_install_args: "{{ win_install_args }}" + +# Add the installation arguments to install Windows NPM. +- name: Set Windows NPM flag + set_fact: + win_install_args: "{{ win_install_args }} ADDLOCAL=MainApplication,NPM" + when: datadog_sysprobe_enabled diff --git a/ansible/01_old/roles/datadog.datadog/tasks/pkg-windows.yml b/ansible/01_old/roles/datadog.datadog/tasks/pkg-windows.yml new file mode 100644 index 0000000..3ea0a7e --- /dev/null +++ b/ansible/01_old/roles/datadog.datadog/tasks/pkg-windows.yml @@ -0,0 +1,87 @@ +--- +- name: Fail if Agent 5 + fail: + msg: "The Datadog ansible role does not currently support Agent 5" + when: datadog_agent_major_version|int == 5 + +- name: Download windows datadog agent 614 fix script + win_get_url: + url: "{{ datadog_windows_614_fix_script_url }}" + dest: '%TEMP%\fix_6_14.ps1' + when: not datadog_skip_install and datadog_apply_windows_614_fix + +- name: Run 6.14.0/1 PowerShell fix + win_shell: | + Set-ExecutionPolicy Bypass -Scope Process -Force + &$env:temp\fix_6_14.ps1 + when: not datadog_skip_install and datadog_apply_windows_614_fix + +- include_tasks: win_agent_latest.yml + when: (not datadog_skip_install) and (datadog_agent_windows_version is not defined) + +- include_tasks: win_agent_version.yml + when: (not datadog_skip_install) and (datadog_agent_windows_version is defined) + +- name: show URL var + debug: + var: dd_download_url + when: not datadog_skip_install + +## must be prior to `pkg-windows-opts.yml`, because the variable is used inside +- name: Set windows NPM installed + set_fact: + datadog_sysprobe_enabled: "{{ network_config is defined and 'enabled' in (network_config | default({}, true)) and network_config['enabled'] }}" +- include_tasks: pkg-windows-opts.yml + +- name: pre-Delete temporary msi + win_file: + path: '%TEMP%\ddagent.msi' + state: absent + when: not datadog_skip_install + +- name: Download windows datadog agent + win_get_url: + url: "{{ dd_download_url }}" + dest: '%TEMP%\ddagent.msi' + register: download_msi_result + when: (not datadog_skip_install) and (not ansible_check_mode) + +- name: Create Binary directory root (if not default) + win_file: + path: "{{ datadog_windows_program_files_dir }}" + state: directory + when: datadog_windows_program_files_dir | length > 0 + +- name: Set default permissions on binary directory root (if not default) + win_acl: + path: "{{ datadog_windows_program_files_dir }}" + inherit: ContainerInherit,ObjectInherit + user: "BUILTIN\\USERS" + rights: ReadAndExecute + type: allow + state: present + propagation: None + when: datadog_windows_program_files_dir | length > 0 + +- name: Show installation flags + debug: + msg: "{{ win_install_args }}{% if datadog_windows_ddagentuser_password | default('', true) | length > 0 %} DDAGENTUSER_PASSWORD={% endif %}" + +# We set DD Password Arg here to prevent it from being printed in any kind of debug logs/messages prior usage +- name: Set DD Password Arg + set_fact: + win_install_args: "{{ win_install_args }} DDAGENTUSER_PASSWORD={{ datadog_windows_ddagentuser_password }}" + when: datadog_windows_ddagentuser_password | default('', true) | length > 0 + +- name: Install downloaded agent + win_package: + path: "{{ download_msi_result.dest }}" + arguments: "{{ win_install_args }}" + register: datadog_agent_install + when: (not datadog_skip_install) and (not ansible_check_mode) + +- name: Delete temporary msi + win_file: + path: "{{ download_msi_result.dest }}" + state: absent + when: (not datadog_skip_install) and (not ansible_check_mode) and (download_msi_result.status_code == 200) diff --git a/ansible/01_old/roles/datadog.datadog/tasks/sanitize-checks.yml b/ansible/01_old/roles/datadog.datadog/tasks/sanitize-checks.yml new file mode 100644 index 0000000..28c34b7 --- /dev/null +++ b/ansible/01_old/roles/datadog.datadog/tasks/sanitize-checks.yml @@ -0,0 +1,12 @@ +- name: Defend against defined but null datadog_checks variable + set_fact: + datadog_checks: "{{ datadog_checks | default({}, true) }}" + +- name: Resolve datadog_tracked_checks + set_fact: + datadog_tracked_checks: "{{ datadog_checks | list + datadog_additional_checks | default([], true) }}" + +- name: Check that datadog_checks is a mapping + assert: + that: + - datadog_checks is mapping diff --git a/ansible/01_old/roles/datadog.datadog/tasks/set-parse-version.yml b/ansible/01_old/roles/datadog.datadog/tasks/set-parse-version.yml new file mode 100644 index 0000000..39219b8 --- /dev/null +++ b/ansible/01_old/roles/datadog.datadog/tasks/set-parse-version.yml @@ -0,0 +1,16 @@ +--- +- name: Convert datadog_agent_major_version to string + set_fact: + datadog_agent_major_version: "{{ datadog_agent_major_version | default('', true) | string }}" + +- name: Initialize skip install flag to false + set_fact: + datadog_skip_install: no + +- include_tasks: parse-version.yml + when: datadog_agent_version | default('', true) | length > 0 + +- name: Set Agent default major version + set_fact: + datadog_agent_major_version: "7" + when: datadog_agent_major_version | length == 0 diff --git a/ansible/01_old/roles/datadog.datadog/tasks/win_agent_latest.yml b/ansible/01_old/roles/datadog.datadog/tasks/win_agent_latest.yml new file mode 100644 index 0000000..282e1fd --- /dev/null +++ b/ansible/01_old/roles/datadog.datadog/tasks/win_agent_latest.yml @@ -0,0 +1,12 @@ +--- + +- name: (Custom) Set agent download filename to latest + set_fact: + dd_download_url: "{{ datadog_windows_download_url }}" + when: datadog_windows_download_url | default('', true) | length > 0 + +- name: Set agent download filename to latest + set_fact: + dd_download_url: "{% if datadog_agent_major_version|int == 7 %}{{ datadog_windows_agent7_latest_url }} + {% else %}{{ datadog_windows_agent6_latest_url }}{% endif %}" + when: datadog_windows_download_url | default('', true) | length == 0 diff --git a/ansible/01_old/roles/datadog.datadog/tasks/win_agent_version.yml b/ansible/01_old/roles/datadog.datadog/tasks/win_agent_version.yml new file mode 100644 index 0000000..2864605 --- /dev/null +++ b/ansible/01_old/roles/datadog.datadog/tasks/win_agent_version.yml @@ -0,0 +1,10 @@ +--- + +- name: Check agent pinned version on Windows + fail: + msg: "The Agent versions you pinned (6.14.0 or 6.14.1) have been blacklisted, please use 6.14.2 instead. See https://dtdg.co/win-614-fix." + when: datadog_agent_version == "6.14.0" or datadog_agent_version == "6.14.1" + +- name: set agent download filename to a specific version + set_fact: + dd_download_url: "{{ datadog_windows_versioned_url }}-{{ datadog_agent_windows_version }}.msi" diff --git a/ansible/01_old/roles/datadog.datadog/templates/checks.yaml.j2 b/ansible/01_old/roles/datadog.datadog/templates/checks.yaml.j2 new file mode 100644 index 0000000..d095222 --- /dev/null +++ b/ansible/01_old/roles/datadog.datadog/templates/checks.yaml.j2 @@ -0,0 +1 @@ +{{ datadog_checks[item] | to_nice_yaml(indent=2) }} diff --git a/ansible/01_old/roles/datadog.datadog/templates/com.datadoghq.agent.plist.j2 b/ansible/01_old/roles/datadog.datadog/templates/com.datadoghq.agent.plist.j2 new file mode 100644 index 0000000..9bfc447 --- /dev/null +++ b/ansible/01_old/roles/datadog.datadog/templates/com.datadoghq.agent.plist.j2 @@ -0,0 +1,33 @@ + + + + + KeepAlive + + SuccessfulExit + + + Label + com.datadoghq.agent + EnvironmentVariables + + DD_LOG_TO_CONSOLE + false + + ProgramArguments + + /opt/datadog-agent/bin/agent/agent + run + + StandardOutPath + /opt/datadog-agent/logs/launchd.log + StandardErrorPath + /opt/datadog-agent/logs/launchd.log + ExitTimeOut + 10 + UserName + {{ username }} + GroupName + {{ groupname }} + + diff --git a/ansible/01_old/roles/datadog.datadog/templates/datadog.conf.j2 b/ansible/01_old/roles/datadog.datadog/templates/datadog.conf.j2 new file mode 100644 index 0000000..9f4b595 --- /dev/null +++ b/ansible/01_old/roles/datadog.datadog/templates/datadog.conf.j2 @@ -0,0 +1,31 @@ +# {{ ansible_managed }} + +[Main] + +{% if datadog_config["dd_url"] is not defined -%} + dd_url: {{ datadog_url | default('https://app.datadoghq.com') }} +{% endif %} + +{% if datadog_config["api_key"] is not defined -%} + api_key: {{ datadog_api_key | default('youshouldsetthis') }} +{% endif %} + +{% if datadog_config["use_mount"] is not defined -%} + use_mount: {{ datadog_use_mount | default('no') }} +{% endif %} + +{# These variables are free-style, passed through a hash -#} +{% if datadog_config -%} +{% for key, value in datadog_config | dictsort -%} +{{ key }}: {{ value }} +{% endfor -%} +{% endif %} + +{% if datadog_config_ex is defined -%} +{% for section, keyvals in datadog_config_ex | dictsort %} +[{{ section }}] +{% for key, value in keyvals | dictsort -%} +{{ key }}: {{ value }} +{% endfor -%} +{% endfor %} +{% endif %} diff --git a/ansible/01_old/roles/datadog.datadog/templates/datadog.yaml.j2 b/ansible/01_old/roles/datadog.datadog/templates/datadog.yaml.j2 new file mode 100644 index 0000000..0328dcc --- /dev/null +++ b/ansible/01_old/roles/datadog.datadog/templates/datadog.yaml.j2 @@ -0,0 +1,19 @@ +# {{ ansible_managed }} + +{% if datadog_site is defined + and datadog_config["site"] is not defined -%} +site: {{ datadog_site }} +{% endif %} + +{% if datadog_config["dd_url"] is not defined + and datadog_url is defined -%} +dd_url: {{ datadog_url }} +{% endif %} + +{% if datadog_config["api_key"] is not defined -%} +api_key: {{ datadog_api_key | default('youshouldsetthis') }} +{% endif %} + +{% if datadog_config | default({}, true) | length > 0 -%} +{{ datadog_config | to_nice_yaml }} +{% endif %} diff --git a/ansible/01_old/roles/datadog.datadog/templates/install_info.j2 b/ansible/01_old/roles/datadog.datadog/templates/install_info.j2 new file mode 100644 index 0000000..aee2830 --- /dev/null +++ b/ansible/01_old/roles/datadog.datadog/templates/install_info.j2 @@ -0,0 +1,5 @@ +--- +install_method: + tool: ansible + tool_version: ansible-{{ ansible_version.full }} + installer_version: datadog_role-{{ role_version }} diff --git a/ansible/01_old/roles/datadog.datadog/templates/security-agent.yaml.j2 b/ansible/01_old/roles/datadog.datadog/templates/security-agent.yaml.j2 new file mode 100644 index 0000000..f4e8955 --- /dev/null +++ b/ansible/01_old/roles/datadog.datadog/templates/security-agent.yaml.j2 @@ -0,0 +1,12 @@ +# {{ ansible_managed }} + +{% if runtime_security_config is defined and runtime_security_config | default({}, true) | length > 0 -%} +runtime_security_config: +{# The "first" option in indent() is only supported by jinja 2.10+ + while the old equivalent option "indentfirst" is removed in jinja 3. + Using non-keyword argument in indent() to be backward compatible. +#} +{% filter indent(2, True) %} +{{ runtime_security_config | to_nice_yaml }} +{% endfilter %} +{% endif %} diff --git a/ansible/01_old/roles/datadog.datadog/templates/system-probe.yaml.j2 b/ansible/01_old/roles/datadog.datadog/templates/system-probe.yaml.j2 new file mode 100644 index 0000000..1233007 --- /dev/null +++ b/ansible/01_old/roles/datadog.datadog/templates/system-probe.yaml.j2 @@ -0,0 +1,45 @@ +# {{ ansible_managed }} + +{% if system_probe_config is defined and system_probe_config | default({}, true) | length > 0 -%} +system_probe_config: +{# The "first" option in indent() is only supported by jinja 2.10+ + while the old equivalent option "indentfirst" is removed in jinja 3. + Using non-keyword argument in indent() to be backward compatible. +#} +{% filter indent(2, True) %} +{{ system_probe_config | to_nice_yaml }} +{% endfilter %} +{% endif %} + +{% if network_config is defined and network_config | default({}, true) | length > 0 -%} +network_config: +{# The "first" option in indent() is only supported by jinja 2.10+ + while the old equivalent option "indentfirst" is removed in jinja 3. + Using non-keyword argument in indent() to be backward compatible. +#} +{% filter indent(2, True) %} +{{ network_config | to_nice_yaml }} +{% endfilter %} +{% endif %} + +{% if service_monitoring_config is defined and service_monitoring_config | default({}, true) | length > 0 -%} +service_monitoring_config: +{# The "first" option in indent() is only supported by jinja 2.10+ + while the old equivalent option "indentfirst" is removed in jinja 3. + Using non-keyword argument in indent() to be backward compatible. +#} +{% filter indent(2, True) %} +{{ service_monitoring_config | to_nice_yaml }} +{% endfilter %} +{% endif %} + +{% if runtime_security_config is defined and runtime_security_config | default({}, true) | length > 0 -%} +runtime_security_config: +{# The "first" option in indent() is only supported by jinja 2.10+ + while the old equivalent option "indentfirst" is removed in jinja 3. + Using non-keyword argument in indent() to be backward compatible. +#} +{% filter indent(2, True) %} +{{ runtime_security_config | to_nice_yaml }} +{% endfilter %} +{% endif %} diff --git a/ansible/01_old/roles/datadog.datadog/templates/zypper.repo.j2 b/ansible/01_old/roles/datadog.datadog/templates/zypper.repo.j2 new file mode 100644 index 0000000..c1f0b9f --- /dev/null +++ b/ansible/01_old/roles/datadog.datadog/templates/zypper.repo.j2 @@ -0,0 +1,27 @@ +{% if datadog_zypper_repo | length > 0 %} + {% set baseurl = datadog_zypper_repo %} +{% elif datadog_agent_major_version|int == 5 %} + {% set baseurl = datadog_agent5_zypper_repo %} +{% elif datadog_agent_major_version|int == 6 %} + {% set baseurl = datadog_agent6_zypper_repo %} +{% elif datadog_agent_major_version|int == 7 %} + {% set baseurl = datadog_agent7_zypper_repo %} +{% endif %} + +[datadog] +name=Datadog, Inc. +enabled=1 +autorefresh=1 +baseurl={{ baseurl }} + +type=rpm-md +gpgcheck={{ datadog_zypper_gpgcheck|int }} +repo_gpgcheck={{ do_zypper_repo_gpgcheck|int }} +{# zypper in SUSE < 15 will not parse (SUSE 11) or respect (SUSE 12 - 14) mutliple entries in gpgkey #} +{% if ansible_distribution_version|int < 15 %} +gpgkey={{ datadog_zypper_gpgkey_current }} +{% else %} +gpgkey={{ datadog_zypper_gpgkey_current }} + {{ datadog_zypper_gpgkey_20200908 }} + {{ datadog_zypper_gpgkey_e09422b3 }} +{% endif %} diff --git a/ansible/01_old/roles/datasaker/README.md b/ansible/01_old/roles/datasaker/README.md new file mode 100644 index 0000000..225dd44 --- /dev/null +++ b/ansible/01_old/roles/datasaker/README.md @@ -0,0 +1,38 @@ +Role Name +========= + +A brief description of the role goes here. + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +Role Variables +-------------- + +A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. + +Dependencies +------------ + +A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + + - hosts: servers + roles: + - { role: username.rolename, x: 42 } + +License +------- + +BSD + +Author Information +------------------ + +An optional section for the role authors to include contact information, or a website (HTML is not allowed). diff --git a/ansible/01_old/roles/datasaker/defaults/main.yml b/ansible/01_old/roles/datasaker/defaults/main.yml new file mode 100644 index 0000000..f8138d3 --- /dev/null +++ b/ansible/01_old/roles/datasaker/defaults/main.yml @@ -0,0 +1,34 @@ +--- +# defaults file for datasaker + +datagate_trace_url: 10.10.43.111 +datagate_trace_port: 31300 +datagate_trace_timeout: 5s + +datagate_manifest_url: 10.10.43.111 +datagate_manifest_port: 31301 +datagate_manifest_timeout: 5s + +datagate_metric_url: 10.10.43.111 +datagate_metric_port: 31302 +datagate_metric_timeout: 5s + +datagate_plan_url: 10.10.43.111 +datagate_plan_port: 31303 +datagate_plan_timeout: 5s + +datagate_loggate_url: 10.10.43.111 +datagate_loggate_port: 31304 +datagate_loggate_timeout: 5s + +datasaker_api_url: 10.10.43.111:31501 +datasaker_api_send_interval: 1m + +apt_trusted_d_keyring: "/etc/apt/trusted.gpg.d/datasaker-archive-keyring.gpg" +apt_usr_share_keyring: "/usr/share/keyrings/datasaker-archive-keyring.gpg" + +dsk_public_gpg_key: "https://dsk-agent-s3.s3.ap-northeast-2.amazonaws.com/dsk-agent-s3/public/public.gpg.key" + + +datasaker_agents: [] + diff --git a/ansible/01_old/roles/datasaker/handlers/main.yml b/ansible/01_old/roles/datasaker/handlers/main.yml new file mode 100644 index 0000000..abfddbd --- /dev/null +++ b/ansible/01_old/roles/datasaker/handlers/main.yml @@ -0,0 +1,34 @@ +--- +- name: Reload systemd configuration + service: + daemon_reload: True + +- name: Restart dsk-trace-agent service + service: + name: dsk-trace-agent + enabled: true + state: restarted + +- name: Restart dsk-node-agent service + service: + name: dsk-node-agent + enabled: true + state: restarted + +- name: Restart dsk-log-agent service + service: + name: dsk-log-agent + enabled: true + state: restarted + +- name: Restart dsk-postgres-agent service + service: + name: dsk-postgres-agent + enabled: true + state: restarted + +- name: Restart dsk-plan-postgres-agent service + service: + name: dsk-plan-postgres-agent + enabled: true + state: restarted \ No newline at end of file diff --git a/ansible/01_old/roles/datasaker/tasks/check-agent.yml b/ansible/01_old/roles/datasaker/tasks/check-agent.yml new file mode 100644 index 0000000..b576529 --- /dev/null +++ b/ansible/01_old/roles/datasaker/tasks/check-agent.yml @@ -0,0 +1,20 @@ +--- +- name: Check dsk-node-agent + include_tasks: dsk-node-agent.yml + when: '"dsk-node-agent" in datasaker_agents' + +- name: Check dsk-trace-agent + include_tasks: dsk-trace-agent.yml + when: '"dsk-trace-agent" in datasaker_agents' + +- name: Check dsk-log-agent + include_tasks: dsk-log-agent.yml + when: '"dsk-log-agent" in datasaker_agents' + +- name: Check dsk-postgres-agent + include_tasks: dsk-postgres-agent.yml + when: '"dsk-postgres-agent" in datasaker_agents' + +- name: Check dsk-plan-postgres-agent + include_tasks: dsk-plan-postgres-agent.yml + when: '"dsk-plan-postgres-agent" in datasaker_agents' diff --git a/ansible/01_old/roles/datasaker/tasks/dsk-common.yml b/ansible/01_old/roles/datasaker/tasks/dsk-common.yml new file mode 100644 index 0000000..1ad74eb --- /dev/null +++ b/ansible/01_old/roles/datasaker/tasks/dsk-common.yml @@ -0,0 +1,10 @@ +--- +- name: Make Datasaker Directory + ansible.builtin.file: + path: "{{ item }}" + state: directory + recurse: yes + owner: root + group: root + with_items: + - /etc/datasaker \ No newline at end of file diff --git a/ansible/01_old/roles/datasaker/tasks/dsk-debian-pkg.yml b/ansible/01_old/roles/datasaker/tasks/dsk-debian-pkg.yml new file mode 100644 index 0000000..a37ad27 --- /dev/null +++ b/ansible/01_old/roles/datasaker/tasks/dsk-debian-pkg.yml @@ -0,0 +1,60 @@ +--- +- name: "Setting global-config" + template: + src: global-config.yml.j2 + dest: "/etc/datasaker/global-config.yml" + +- name: "Create temporary directory for key manipulation" + tempfile: + state: directory + suffix: keys + register: tempdir + +- name: "download keyring then add key to keyring" + get_url: + url: "{{ dsk_public_gpg_key }}" + dest: "{{ tempdir.path }}/datasaker.gpg.key" + force: yes + +- name: "Ensure downloaded file for binary keyring" + shell: "cat {{ tempdir.path }}/datasaker.gpg.key | sudo gpg --import --batch --no-default-keyring --keyring {{ apt_usr_share_keyring }}" + +- name: "copy keyring to trusted keyring" + copy: + src: "{{ apt_usr_share_keyring }}" + dest: "{{ apt_trusted_d_keyring }}" + mode: "0600" + remote_src: yes + +- name: "Remove temporary directory for key manipulation" + file: + path: "{{ tempdir.path }}" + state: absent + +- name: "Add datasaker repository" + apt_repository: + repo: "deb [signed-by={{ apt_usr_share_keyring }}] https://nexus.exem-oss.org/repository/debian-repos/ ubuntu main" + state: present + filename: datasaker.list + +- name: "Check datasaker Agent" + include_tasks: check-agent.yml + +- name: "Install datasaker agent" + apt: + name: "{{ item }}" + state: present + update_cache: yes + with_items: + - "{{ datasaker_agents }}" + notify: + - Restart {{ item }} service + +# - name: "Ensure datasaker agent is running" +# service: +# name: "{{ item }}" +# state: started +# enabled: yes +# with_items: +# - "{{ datasaker_agents }}" + diff --git a/ansible/01_old/roles/datasaker/tasks/dsk-log-agent.yml b/ansible/01_old/roles/datasaker/tasks/dsk-log-agent.yml new file mode 100644 index 0000000..5dfff23 --- /dev/null +++ b/ansible/01_old/roles/datasaker/tasks/dsk-log-agent.yml @@ -0,0 +1,15 @@ +--- +- name: Make agent Directory + ansible.builtin.file: + path: "/etc/datasaker/{{ item }}" + state: directory + recurse: yes + owner: root + group: root + with_items: + - "dsk-log-agent" + +- name: "Setting dsk-log-agent config" + template: + src: log-agent-config.yml.j2 + dest: "/etc/datasaker/dsk-log-agent/agent-config.yml" \ No newline at end of file diff --git a/ansible/01_old/roles/datasaker/tasks/dsk-node-agent.yml b/ansible/01_old/roles/datasaker/tasks/dsk-node-agent.yml new file mode 100644 index 0000000..673baa9 --- /dev/null +++ b/ansible/01_old/roles/datasaker/tasks/dsk-node-agent.yml @@ -0,0 +1,15 @@ +--- +- name: Make agent Directory + ansible.builtin.file: + path: "/etc/datasaker/{{ item }}" + state: directory + recurse: yes + owner: root + group: root + with_items: + - "dsk-node-agent" + +- name: "Setting dsk-node-agent config" + template: + src: node-agent-config.yml.j2 + dest: "/etc/datasaker/dsk-node-agent/agent-config.yml" \ No newline at end of file diff --git a/ansible/01_old/roles/datasaker/tasks/dsk-plan-postgres-agent.yml b/ansible/01_old/roles/datasaker/tasks/dsk-plan-postgres-agent.yml new file mode 100644 index 0000000..29029bd --- /dev/null +++ b/ansible/01_old/roles/datasaker/tasks/dsk-plan-postgres-agent.yml @@ -0,0 +1,15 @@ +--- +- name: Make agent Directory + ansible.builtin.file: + path: "/etc/datasaker/{{ item }}" + state: directory + recurse: yes + owner: root + group: root + with_items: + - "dsk-plan-postgres-agent" + +- name: "Setting dsk-plan-postgres-agent config" + template: + src: plan-postgres-agent-config.yml.j2 + dest: "/etc/datasaker/dsk-plan-postgres-agent/agent-config.yml" \ No newline at end of file diff --git a/ansible/01_old/roles/datasaker/tasks/dsk-trace-agent.yml b/ansible/01_old/roles/datasaker/tasks/dsk-trace-agent.yml new file mode 100644 index 0000000..bb15f02 --- /dev/null +++ b/ansible/01_old/roles/datasaker/tasks/dsk-trace-agent.yml @@ -0,0 +1,15 @@ +--- +- name: Make agent Directory + ansible.builtin.file: + path: "/etc/datasaker/{{ item }}" + state: directory + recurse: yes + owner: root + group: root + with_items: + - "dsk-trace-agent" + +- name: "Setting dsk-trace-agent config" + template: + src: trace-agent-config.yml.j2 + dest: "/etc/datasaker/dsk-trace-agent/agent-config.yml" \ No newline at end of file diff --git a/ansible/01_old/roles/datasaker/tasks/gather-facts.yml b/ansible/01_old/roles/datasaker/tasks/gather-facts.yml new file mode 100644 index 0000000..8e413e4 --- /dev/null +++ b/ansible/01_old/roles/datasaker/tasks/gather-facts.yml @@ -0,0 +1,3 @@ +--- +- name: Gather Ansible Facts + ansible.builtin.setup: \ No newline at end of file diff --git a/ansible/01_old/roles/datasaker/tasks/main copy.yml b/ansible/01_old/roles/datasaker/tasks/main copy.yml new file mode 100644 index 0000000..f89f8c2 --- /dev/null +++ b/ansible/01_old/roles/datasaker/tasks/main copy.yml @@ -0,0 +1,12 @@ +--- +- name: Include Gather Ansible Facts task on Ansible >= 2.10 + include_tasks: gather-facts.yml + when: ansible_version.major >= 2 and ansible_version.minor >= 10 + +- name: Include Datasaker Add Repository + include_tasks: dsk-common.yml + when: ansible_facts.os_family == "Debian" + +- name: Include Datasaker Host Agent Install + include_tasks: dsk-debian-pkg.yml + when: ansible_facts.os_family == "Debian" \ No newline at end of file diff --git a/ansible/01_old/roles/datasaker/tasks/main.yml b/ansible/01_old/roles/datasaker/tasks/main.yml new file mode 100644 index 0000000..28f26dc --- /dev/null +++ b/ansible/01_old/roles/datasaker/tasks/main.yml @@ -0,0 +1,12 @@ +--- +- name: Include Gather Ansible Facts task on Ansible >= 2.10 + include_tasks: gather-facts.yml + when: ansible_version.major >= 2 and ansible_version.minor >= 10 + +- name: Include Datasaker Add Repository + include_tasks: dsk-common.yml + when: ansible_facts.os_family == "Debian" + +- name: Include Datasaker Host Agent Install + include_tasks: dsk-debian-pkg.yml + when: ansible_facts.os_family == "Debian" diff --git a/ansible/01_old/roles/datasaker/templates/global-config.yml.j2 b/ansible/01_old/roles/datasaker/templates/global-config.yml.j2 new file mode 100644 index 0000000..646b5b9 --- /dev/null +++ b/ansible/01_old/roles/datasaker/templates/global-config.yml.j2 @@ -0,0 +1,22 @@ +global: + api_key: {{ datasaker_api_key }} + gates: + trace_datagate: + url: {{ datagate_trace_url }}:{{ datagate_trace_port }} + remote_timeout: {{ datagate_trace_timeout }} + manifest_datagate: + url: {{ datagate_manifest_url }}:{{ datagate_manifest_port }} + remote_timeout: {{ datagate_manifest_timeout }} + metric_datagate: + url: {{ datagate_metric_url }}:{{ datagate_metric_port }} + remote_timeout: {{ datagate_metric_timeout }} + plan_datagate: + url: {{ datagate_plan_url }}:{{ datagate_plan_port }} + remote_timeout: {{ datagate_plan_timeout }} + loggate: + url: {{ datagate_loggate_url }}:{{ datagate_loggate_port }} + remote_timeout: {{ datagate_loggate_timeout }} + agent_manager: + url: {{ datasaker_api_url }} + base_url: /dsk-agentmanager-api/agent + send_interval: {{ datasaker_api_send_interval }} \ No newline at end of file diff --git a/ansible/01_old/roles/datasaker/templates/log-agent-config.yml.j2 b/ansible/01_old/roles/datasaker/templates/log-agent-config.yml.j2 new file mode 100644 index 0000000..cd6d442 --- /dev/null +++ b/ansible/01_old/roles/datasaker/templates/log-agent-config.yml.j2 @@ -0,0 +1,35 @@ +agent: + metadata: + agent_name: "{{ log_agent_name | default('dsk-log-agent') }}" + cluster_id: "{{ log_agent_cluster_id | default('unknown') }}" + environment: "{{ log_agent_environment | default('etc') }}" + collect: + - paths: +{% if paths is defined and paths | length > 0 %} +{% for path in paths %} + - "{{ path | default('') }}" +{% endfor %} +{% else %} + - /var/log/*/*.log +{% endif %} +{% if exclude_paths is defined and exclude_paths | length > 0 %} + exclude_paths: +{% for exclude_path in exclude_paths %} + - "{{ exclude_path | default('') }}" +{% endfor %} +{% else %} + exclude_paths: [] +{% endif %} +{% if keywords is defined and keywords | length > 0 %} + keywords: +{% for keyword in keywords %} + - "{{ keyword | default('') }}" +{% endfor %} +{% else %} + keywords: [] +{% endif %} + tag: "{{ log_agent_tag | default('sample') }}" + service: + name: "{{ log_agent_service_name | default('test') }}" + category: "{{ log_agent_service_category | default('etc') }}" + type: "{{ log_agent_service_type | default('etc') }}" \ No newline at end of file diff --git a/ansible/01_old/roles/datasaker/templates/log-agent-config.yml.j2_bak b/ansible/01_old/roles/datasaker/templates/log-agent-config.yml.j2_bak new file mode 100644 index 0000000..cc55c92 --- /dev/null +++ b/ansible/01_old/roles/datasaker/templates/log-agent-config.yml.j2_bak @@ -0,0 +1,37 @@ +agent: + metadata: + agent_name: "{{ log_agent_name | default('dsk-log-agent') }}" + cluster_id: "{{ log_agent_cluster_id | default('unknown') }}" + environment: "{{ log_agent_environment | default('etc') }}" + collect: + - paths: + {% if paths is defined and paths | length > 0 %} + {% for path in paths %} + - "{{ path | default('') }}" + {% endfor %} + {% else %} + - /var/log/*/*.log + {% endif %} + + {% if exclude_paths is defined and exclude_paths | length > 0 %} + exclude_paths: + {% for exclude_path in exclude_paths %} + - "{{ exclude_path | default('') }}" + {% endfor %} + {% else %} + exclude_paths: [] + {% endif %} + + {% if keywords is defined and keywords | length > 0 %} + keywords: + {% for keyword in keywords %} + - "{{ keyword | default('') }}" + {% endfor %} + {% else %} + keywords: [] + {% endif %} + tag: "{{ log_agent_tag | default('sample') }}" + service: + name: "{{ log_agent_service_name | default('test') }}" + category: "{{ log_agent_service_category | default('etc') }}" + type: "{{ log_agent_service_type | default('etc') }}" \ No newline at end of file diff --git a/ansible/01_old/roles/datasaker/templates/node-agent-config.yml.j2 b/ansible/01_old/roles/datasaker/templates/node-agent-config.yml.j2 new file mode 100644 index 0000000..d00e55d --- /dev/null +++ b/ansible/01_old/roles/datasaker/templates/node-agent-config.yml.j2 @@ -0,0 +1,18 @@ +agent: + metadata: + # agent_name: my-dsk-node-agent + # cluster_id: my-cluster + option: + exporter_config: + command: "dsk-node-exporter" + port: 19110 + args: + - --collector.filesystem.ignored-mount-points="^/(dev|proc|sys|run|var/lib/docker/.+|var/lib/kubelet/pods/.+)($|/)" + - --collector.tcpstat + scrape_interval: 15s + scrape_timeout: 5s + scrape_configs: + - job_name: dsk-node-agent + url: localhost:19110 + filtering_configs: + rule: drop \ No newline at end of file diff --git a/ansible/01_old/roles/datasaker/templates/plan-postgres-agent-config.yml.j2 b/ansible/01_old/roles/datasaker/templates/plan-postgres-agent-config.yml.j2 new file mode 100644 index 0000000..76eb510 --- /dev/null +++ b/ansible/01_old/roles/datasaker/templates/plan-postgres-agent-config.yml.j2 @@ -0,0 +1,18 @@ +agent: + metadata: + agent_name: "{{ plan_postgres_agent_name | default('dsk-plan-postgres-agent') }}" + cluster_id: "{{ plan_postgres_agent_cluster_id | default('REPLACE_CLUSTER_ID') }}" + data_source_name: + user: "{{ plan_postgres_user_name | default('') }}" + password: "{{ plan_postgres_user_password | default('') }}" + address: "{{ plan_postgres_database_address | default('') }}" + port: "{{ plan_postgres_database_port | default('') }}" + DBName: "{{ plan_postgres_database_name | default('') }}" + explain: + scrape_interval: "{{ plan_postgres_scrape_interval | default('30s') }}" + scrape_timeout: "{{ plan_postgres_scrape_timeout | default('5s') }}" + slow_query_standard: "{{ plan_postgres_slow_query_standard | default('5s') }}" + executor_number: "{{ plan_postgres_executor_number | default('10') }}" + sender_number: "{{ plan_postgres_sender_number | default('10') }}" + activity_query_buffer: "{{ plan_postgres_activity_query_buffer | default('50') }}" + plan_sender_buffer: "{{ plan_postgres_plan_sender_buffer | default('50') }}" diff --git a/ansible/01_old/roles/datasaker/templates/trace-agent-config.yml.j2 b/ansible/01_old/roles/datasaker/templates/trace-agent-config.yml.j2 new file mode 100644 index 0000000..5b92245 --- /dev/null +++ b/ansible/01_old/roles/datasaker/templates/trace-agent-config.yml.j2 @@ -0,0 +1,5 @@ +agent: + metadata: + option: + agent_name: "{{ trace_agent_name | default('trace-agent') }}" + cluster_id: "{{ trace_agent_cluster_id | default('unknown_cluster') }}" \ No newline at end of file diff --git a/ansible/01_old/roles/dsk_bot.datasaker/README.md b/ansible/01_old/roles/dsk_bot.datasaker/README.md new file mode 100644 index 0000000..765d076 --- /dev/null +++ b/ansible/01_old/roles/dsk_bot.datasaker/README.md @@ -0,0 +1,240 @@ +# Ansible Datasaker Role + +The Ansible Datasaker role installs and configures the Datasaker Agent and integrations. + +## Setup + +### Requirements + +- Requires Ansible v2.6+. +- Supports most Debian Linux distributions. +- Supports most RedHat Linux distributions. +- Supports Amazon Linux 2 distributions. + +### Installation + +Install the [Datasaker role] from Ansible Galaxy on your Ansible server: + +```shell +ansible-galaxy install dsk_bot.datasaker +``` + +To deploy the Datasaker Agent on hosts, add the Datasaker role and your API key to your playbook: + +*** When installing `dsk-log-agent`, `fluent-bit` is automatically installed. *** + +In this example: + +###### Host Agent Default Install Example +```yml +- hosts: servers + become: true + roles: + - role: dsk_bot.datasaker + vars: + datasaker_api_key: "" + datasaker_agents: ["dsk-node-agent","dsk-log-agent"] +``` +###### Docker Agent Default Install Example +```yml +- hosts: servers + become: true + roles: + - role: dsk_bot.datasaker + vars: + datasaker_api_key: "" + datasaker_docker_agents: ["dsk-docker-node-agent","dsk-docker-log-agent"] +``` + +#### Base Role variables + +| Variable | Description | Default | +|--------------------------------------------|--------------------------------------------------|--------------------------------------------------| +|`datasaker_api_key`|Your Datasaker API key.| +|`datasaker_agents`|Set to Datasaker Host Agent.
`dsk-node-agent` `dsk-trace-agent` `dsk-log-agent` `dsk-postgres-agent` `dsk-plan-postgres-agent`
| `dsk-node-agent`| +|`datasaker_docker_agents`|Set to Datasaker Docker Agent.
`dsk-docker-node-agent` `dsk-docker-trace-agent` `dsk-docker-log-agent` `dsk-docker-postgres-agent`
| `dsk-docker-node-agent`| + + +#### Docker Agent Role variables +| Variable | Description | Default | +|--------------------------------------------|--------------------------------------------------|--------------------------------------------------| +|`datasaker_docker_config_path`| Override the datasaker global config path.
| `~/.datasaker`| +|`datasaker_docker_global_config`| Override the datasaker global config file name.
| `~/.datasaker/config.yml`| +|`docker_default_path`| Override the docker containers path.
| `/var/lib/docker/containers/`| +|`datasaker_docker_path`| Override the datasaker docker agent containers path.
| `/var/datasaker`| +|`container_agent_restart_policy`| Override the restart policy for a `dsk-container-agent` container
| `always`| +|`node_agent_restart_policy`| Override the restart policy for a `dsk-node-agent` container
| `always`| +|`trace_agent_restart_policy`| Override the restart policy for a `dsk-trace-agent` container
| `always`| +|`log_agent_restart_policy`| Override the restart policy for a `dsk-log-agent` container
| `always`| +|`postgres_agent_restart_policy`| Override the restart policy for a `dsk-postgres-agent` container
| `always`| +|`plan_postgres_agent_restart_policy`| Override the restart policy for a `dsk-plan-postgres-agent` container
| `always`| +|`container_agent_log_level`| Override the `dsk-container-agent` log level
| `INFO`| +|`node_agent_log_level`| Override the `dsk-node-agent` log level
| `INFO`| +|`trace_agent_log_level`| Override the `dsk-trace-agent` log level
| `INFO`| +|`log_agent_log_level`| Override the `dsk-log-agent` log level
| `INFO`| +|`postgres_agent_log_level`| Override the `dsk-postgres-agent` log level
| `INFO`| +|`plan_postgres_agent_log_level`| Override the `dsk-plan-postgres-agent` log level
| `INFO`| + + + +#### Agents Setting Role variables +| Variable | Description | Default | +|--------------------------------------------|--------------------------------------------------|--------------------------------------------------| +|`trace_sampling_rate`| Override The `dsk-trace-agent` sampling rate applied to the collector.
- When set to 100 or higher, all data is collected.
| `10`| +|`logs[*].service`|Defines the service name of the log collection target.|`default`| +|`logs[*].tag`|Sets the tag of the log collection target.|`None`| +|`logs[*].keyword`|Sets the keyword for log collection. Only logs that include the keyword are collected.|`None`| +|`logs[*].multiline.format`|Sets the multiline log format (e.g.: go, java, ruby, python).|`None`| +|`logs[*].multiline.pattern`|Sets the multiline log pattern. (e.g.: ^\d{4}-\d{2}-\d{2}).|`None`| +|`logs[*].masking[*].pattern`|Sets the log pattern to be masked. (e.g.: ^\d{4}-\d{2}-\d{2}) User-defined regular expression patterns are possible.|`None`| +|`logs[*].masking[*].replace`|Sets the string that the masking pattern will be replaced with. (e.g.: *****).|`None`| +|`logs[*].collect.type`|Sets the method of log collection (Choose one from file, driver).|`file`| +|`logs[*].collect.category`|Sets the service category (Choose one from app, database, syslog, etc).|`etc`| +|`logs[*].collect.address`|Sets the database host and port information (required if service category is database).|`None`| +|`logs[*].collect.file.paths`|Sets the paths for log collection. Example: /var/log/sample/.log.|`['/var/log/*.log']`| +|`logs[*].collect.file.exclude_paths`|Sets the paths to be excluded from log collection.|`None`| +|`custom_log_volume`| Volume mount in Docker Log Agent. |`/var/lib/docker/containers`| +|`postgres_user_name`| Enter the Postgres user ID.
| `None` | +|`postgres_user_password`| Enter the Postgres user password.
| `None` | +|`postgres_database_address`| Enter the Postgres address.
| `None` | +|`postgres_database_port`| Enter the Postgres port.
| `None` | +|`plan_postgres_user_name`| Enter the Plan Postgres user ID.
| `None` | +|`plan_postgres_user_password`| Enter the Plan Postgres user password.
| `None` | +|`plan_postgres_database_address`| Enter the Plan Postgres address.
| `None` | +|`plan_postgres_database_port`| Enter the Plan Postgres port.
| `None` | +|`plan_postgres_database_name`| Enter the Plan Postgres database.
| `None` | +|`plan_postgres_scrape_interval`| Override the Plan Postgres scrape interval
| `30s` | +|`plan_postgres_scrape_timeout`| Override the Plan Postgres scrape timeout
| `5s` | +|`plan_postgres_slow_query_standard`| Override the Plan Postgres slow query standard
| `5s` | +|`plan_postgres_executor_number`| Override the Plan Postgres executor number
| `10` | +|`plan_postgres_sender_number`| Override the Plan Postgres sender number
| `10` | +|`plan_postgres_activity_query_buffer`| Override the Plan Postgres activity query buffer
| `50` | +|`plan_postgres_plan_sender_buffer`| Override the Plan Postgres plan sender buffer
| `50` | + + +In this example: + +###### Ansible Playbook Setting Example (Linux) +```yml +- hosts: servers + become: true + roles: + - role: dsk_bot.datasaker + vars: + datasaker_api_key: "" + datasaker_agents: + - "dsk-node-agent" + - "dsk-trace-agent" + - "dsk-log-agent" + - "dsk-postgres-agent" + postgres_user_name: sample + postgres_user_password: 1q2w3e4r + postgres_database_address: 0.0.0.0 + postgres_database_port: 5432 + plan_postgres_user_name: sample + plan_postgres_user_password: 1q2w3e4r + plan_postgres_database_address: 0.0.0.0 + plan_postgres_database_name: sample + plan_postgres_database_port: 5432 + logs: + - collect: + type: file + file: + paths: + - /var/log/*.log +``` + +###### Ansible Playbook Setting Example (Docker) +```yml +- hosts: servers + become: true + roles: + - role: dsk_bot.datasaker + vars: + datasaker_api_key: "" + datasaker_docker_agents: + - "dsk-docker-node-agent" + - "dsk-docker-trace-agent" + - "dsk-docker-log-agent" + - "dsk-docker-postgres-agent" + postgres_user_name: sample + postgres_user_password: 1q2w3e4r + postgres_database_address: 0.0.0.0 + postgres_database_port: 5432 + plan_postgres_user_name: sample + plan_postgres_user_password: 1q2w3e4r + plan_postgres_database_address: 0.0.0.0 + plan_postgres_database_name: sample + plan_postgres_database_port: 5432 + logs: + - collect: + type: file + file: + paths: + - /var/log/*.log + - /var/lib/docker/containers/*/*.log + custom_log_volume: + - /var/log/ + - /var/lib/docker/containers +``` + +## Uninstallation + +Datasaker Agent can be uninstalled. +For this, datasaker_clean should be set to True. + +| Variable | Description | Default | +|--------------------------------------------|--------------------------------------------------|--------------------------------------------------| +|`uninstall`| Only removes the agents specified in `datasaker_agents` or `datasaker_docker_agents`.
| `False`| +|`datasaker_clean`| Removes the agents specified in `datasaker_agents` or `datasaker_docker_agents` along with any generated folders and configuration files.
| `False`| + +In this example: + +```yml +- hosts: servers + become: true + roles: + - role: dsk_bot.datasaker + vars: + datasaker_agents: [""] + uninstall: True + datasaker_clean: True +``` + diff --git a/ansible/01_old/roles/dsk_bot.datasaker/README_ko.md b/ansible/01_old/roles/dsk_bot.datasaker/README_ko.md new file mode 100644 index 0000000..54c3cf2 --- /dev/null +++ b/ansible/01_old/roles/dsk_bot.datasaker/README_ko.md @@ -0,0 +1,241 @@ +# Ansible Datasaker Role + +Ansible을 이용하여 Datasaker Agent를 설치할 수 있습니다. + +## Requirements + +- Ansible v2.6+가 필요합니다. +- 대부분의 Debian Linux 배포판을 지원합니다. +- 대부분의 Redhat Linux 배포판을 지원합니다. +- Amazon Linux 2 배포판을 지원합니다. + + +## Installation + +Ansible Galaxy에서 Datasaker role을 설치합니다. + +```shell +ansible-galaxy install dsk_bot.datasaker +``` + +에이전트를 배포하기 위하여 Ansible playbook을 작성합니다. + + +****`dsk-log-agent` 설치 시 `fluent-bit` 이 자동으로 설치됩니다.*** + + +아래는 기본 설치에 대한 예시입니다. + +#### Host Agent Default Install Example +```yml +- hosts: servers + become: true + roles: + - role: dsk_bot.datasaker + vars: + datasaker_api_key: "" + datasaker_agents: ["dsk-node-agent","dsk-log-agent"] +``` +#### Docker Agent Default Install Example +```yml +- hosts: servers + become: true + roles: + - role: dsk_bot.datasaker + vars: + datasaker_api_key: "" + datasaker_docker_agents: ["dsk-docker-node-agent","dsk-docker-log-agent"] +``` + +### 필수 설정 + +| 변수명 | 설명 | Default | +|--------------------------------------------|--------------------------------------------------|--------------------------------------------------| +|`datasaker_api_key`|API Key를 입력합니다.| +|`datasaker_agents`| 각 호스트에 설치하고자 하는 Host Agent 리스트입니다.
`dsk-node-agent` `dsk-trace-agent` `dsk-log-agent` `dsk-postgres-agent` `dsk-plan-postgres-agent`
| `dsk-node-agent`| +|`datasaker_docker_agents`| 각 호스트에 설치하고자 하는 Docker Container Agent 리스트입니다.
Docker Container Agents를 넣으면 Host Agent 설치는 자동으로 비활성화 됩니다.
`dsk-docker-node-agent` `dsk-docker-trace-agent` `dsk-docker-log-agent` `dsk-docker-postgres-agent`
| `dsk-docker-node-agent`| + + +### Docker Container Agent 설정 +| 변수명 | 설명 | Default | +|--------------------------------------------|--------------------------------------------------|--------------------------------------------------| +|`datasaker_docker_config_path`| Datasaker Global Config 위치 설정.
| `~/.datasaker`| +|`datasaker_docker_global_config`| Datasaker Global Config 이름 설정.
| `~/.datasaker/config.yml`| +|`docker_default_path`| Datasaker Docker Log Agent에 마운트할 Docker Log 수집 위치 설정.
| `/var/lib/docker/containers/`| +|`datasaker_docker_path`| Datasaker Docker Agent Container 위치 설정.
| `/var/datasaker`| +|`container_agent_restart_policy`| `dsk-container-agent` Container Restart Policy 설정.
| `always`| +|`node_agent_restart_policy`| `dsk-node-agent` Container Restart Policy 설정.
| `always`| +|`trace_agent_restart_policy`| `dsk-trace-agent` Container Restart Policy 설정.
| `always`| +|`log_agent_restart_policy`| `dsk-log-agent` Container Restart Policy 설정.
| `always`| +|`postgres_agent_restart_policy`| `dsk-postgres-agent` Container Restart Policy 설정.
| `always`| +|`plan_postgres_agent_restart_policy`| `dsk-plan-postgres-agent` Container Restart Policy 설정.
| `always`| +|`container_agent_log_level`| `dsk-container-agent` Log Level 설정.
| `INFO`| +|`node_agent_log_level`| `dsk-node-agent` Log Level 설정.
| `INFO`| +|`trace_agent_log_level`| `dsk-trace-agent` Log Level 설정.
| `INFO`| +|`log_agent_log_level`| `dsk-log-agent` Log Level 설정.
| `INFO`| +|`postgres_agent_log_level`| `dsk-postgres-agent` Log Level 설정.
| `INFO`| +|`plan_postgres_agent_log_level`| `dsk-plan-postgres-agent` Log Level 설정.
| `INFO`| + + + +### Datasaker Agent 상세 설정 +- Host Agent 와 Docker Container Agent는 같은 설정값을 사용합니다. + +| 변수명 | 설명 | Default | +|--------------------------------------------|--------------------------------------------------|--------------------------------------------------| +|`trace_sampling_rate`| `dsk-trace-agent` 에서 collector에 적용되는 샘플링 비율 설정.
- 100 이상일 때 모든 데이터가 수집.
| `10`| +|`logs[*].service`|로그 수집 대상의 서비스 이름 설정.|`default`| +|`logs[*].tag`|로그 수집 대상의 태그 설정.|`None`| +|`logs[*].keyword`|로그 수집 키워드 설정. 키워드가 포함된 로그만 수집.|`None`| +|`logs[*].multiline.format`|멀티라인 로그 포맷 설정 (예 : go, java, ruby, python).|`None`| +|`logs[*].multiline.pattern`|멀티라인 로그 패턴 설정. (예 : ^\d{4}-\d{2}-\d{2}).|`None`| +|`logs[*].masking[*].pattern`|마스킹할 로그 패턴 설정. (예 : ^\d{4}-\d{2}-\d{2}) 사용자 커스텀 정규식 패턴 사용 가능.|`None`| +|`logs[*].masking[*].replace`|마스킹 패턴이 대체될 문자열 설정. (예 : ******).|`None`| +|`logs[*].collect.type`|로그 수집 방법 설정 (`file`, `driver` 중 하나의 값을 작성).|`file`| +|`logs[*].collect.category`|서비스 분류 설정 (`app`, `database`, `syslog`, `etc` 중 하나의 값을 작성).|`etc`| +|`logs[*].collect.address`|데이터베이스 host 및 port 정보 설정 (서비스 분류가 database인 경우 설정).|`None`| +|`logs[*].collect.file.paths`|로그 수집 대상 경로 설정. 예 : /var/log/sample/*.log.|`['/var/log/*.log']`| +|`logs[*].collect.file.exclude_paths`|로그 수집 제외 대상 경로 설정.|`None`| +|`custom_log_volume`| Docker 사용 시 수집할 로그가 있는 경로 마운트.|`/var/lib/docker/containers/`| +|`postgres_user_name`| `dsk-postgres-agent`에 Postgres user ID 설정.
| `None` | +|`postgres_user_password`| `dsk-postgres-agent`에 Postgres user password 설정.
| `None` | +|`postgres_database_address`| `dsk-postgres-agent`에 Postgres address 설정.
| `None` | +|`postgres_database_port`| `dsk-postgres-agent`에 Postgres port 설정.
| `None` | +|`plan_postgres_user_name`| `dsk-plan-postgres-agent`에 Plan Postgres user ID 설정.
| `None` | +|`plan_postgres_user_password`| `dsk-plan-postgres-agent`에 Plan Postgres user password 설정.
| `None` | +|`plan_postgres_database_address`| `dsk-plan-postgres-agent`에 Plan Postgres address 설정.
| `None` | +|`plan_postgres_database_port`| `dsk-plan-postgres-agent`에 Plan Postgres port 설정.
| `None` | +|`plan_postgres_database_name`| `dsk-plan-postgres-agent`에 Plan Postgres database 설정.
| `None` | +|`plan_postgres_scrape_interval`| `dsk-plan-postgres-agent`에 Plan Postgres scrape interval 설정.
| `30s` | +|`plan_postgres_scrape_timeout`| `dsk-plan-postgres-agent`에 Plan Postgres scrape timeout 설정.
| `5s` | +|`plan_postgres_slow_query_standard`| `dsk-plan-postgres-agent`에 Plan Postgres slow query standard 설정.
| `5s` | +|`plan_postgres_executor_number`| `dsk-plan-postgres-agent`에 Plan Postgres executor number 설정.
| `10` | +|`plan_postgres_sender_number`| `dsk-plan-postgres-agent`에 Plan Postgres sender number 설정.
| `10` | +|`plan_postgres_activity_query_buffer`| `dsk-plan-postgres-agent`에 Plan Postgres activity query buffer 설정.
| `50` | +|`plan_postgres_plan_sender_buffer`| `dsk-plan-postgres-agent`에 Plan Postgres plan sender buffer 설정.
| `50` | + +#### Ansible Playbook 상세 설정 Example (Linux) +```yml +- hosts: servers + become: true + roles: + - role: dsk_bot.datasaker + vars: + datasaker_api_key: "" + datasaker_agents: + - "dsk-node-agent" + - "dsk-trace-agent" + - "dsk-log-agent" + - "dsk-postgres-agent" + postgres_user_name: sample + postgres_user_password: 1q2w3e4r + postgres_database_address: 0.0.0.0 + postgres_database_port: 5432 + plan_postgres_user_name: sample + plan_postgres_user_password: 1q2w3e4r + plan_postgres_database_address: 0.0.0.0 + plan_postgres_database_name: sample + plan_postgres_database_port: 5432 + logs: + - collect: + type: file + file: + paths: + - /var/log/*.log +``` + +#### Ansible Playbook 상세 설정 Example (Docker) +```yml +- hosts: servers + become: true + roles: + - role: dsk_bot.datasaker + vars: + datasaker_api_key: "" + datasaker_docker_agents: + - "dsk-docker-node-agent" + - "dsk-docker-trace-agent" + - "dsk-docker-log-agent" + - "dsk-docker-postgres-agent" + postgres_user_name: sample + postgres_user_password: 1q2w3e4r + postgres_database_address: 0.0.0.0 + postgres_database_port: 5432 + plan_postgres_user_name: sample + plan_postgres_user_password: 1q2w3e4r + plan_postgres_database_address: 0.0.0.0 + plan_postgres_database_name: sample + plan_postgres_database_port: 5432 + logs: + - collect: + type: file + file: + paths: + - /var/log/*.log + - /var/lib/docker/containers/*/*.log + custom_log_volume: + - /var/log/ + - /var/lib/docker/containers +``` + + +## Uninstallation + +Datasaker Agent를 제거 할 수 있습니다. +datasaker_clean은 uninstall이 `True`로 설정되어야 합니다. + +| 변수명 | 설명 | Default | +|--------------------------------------------|--------------------------------------------------|--------------------------------------------------| +|`uninstall`| `datasaker_agents` 또는 `datasaker_docker_agents` 에 작성된 Agent만 제거.
| `False`| +|`datasaker_clean`| `datasaker_agents` 또는 `datasaker_docker_agents` 에 작성된 Agent 와 생성 된 폴더 및 설정 파일까지 제거.
| `False`| + +#### Datasaker Agents Uninstall Example + +```yml +- hosts: servers + become: true + roles: + - role: dsk_bot.datasaker + vars: + datasaker_agents: [""] + uninstall: True + datasaker_clean: True +``` + diff --git a/ansible/01_old/roles/dsk_bot.datasaker/defaults/main.yml b/ansible/01_old/roles/dsk_bot.datasaker/defaults/main.yml new file mode 100644 index 0000000..806647b --- /dev/null +++ b/ansible/01_old/roles/dsk_bot.datasaker/defaults/main.yml @@ -0,0 +1,77 @@ +--- +# defaults file for datasaker +datasaker_host_path: /etc/datasaker +datasaker_host_log_path: /var/log/datasaker + +datagate_trace_url: gate.kr.datasaker.io +datagate_trace_port: 31300 +datagate_trace_timeout: 5s + +datagate_manifest_url: gate.kr.datasaker.io +datagate_manifest_port: 31301 +datagate_manifest_timeout: 5s + +datagate_metric_url: gate.kr.datasaker.io +datagate_metric_port: 31302 +datagate_metric_timeout: 5s + +datagate_plan_url: gate.kr.datasaker.io +datagate_plan_port: 31303 +datagate_plan_timeout: 5s + +datagate_loggate_url: gate.kr.datasaker.io +datagate_loggate_port: 31304 +datagate_loggate_timeout: 5s + +datasaker_api_url: api.kr.datasaker.io +datasaker_api_send_interval: 1m + +datasaker_yum_repo: "https://nexus.exem-oss.org/repository/" +datasaker_yum_enabled: yes +datasaker_yum_gpgcheck: no + +apt_sources_list: "/etc/apt/sources.list.d/datasaker.list" +apt_trusted_d_keyring: "/etc/apt/trusted.gpg.d/datasaker-archive-keyring.gpg" +apt_usr_share_keyring: "/usr/share/keyrings/datasaker-archive-keyring.gpg" + +dsk_public_gpg_key: "https://dsk-agent-s3.s3.ap-northeast-2.amazonaws.com/dsk-agent-s3/public/public.gpg.key" + +datasaker_agents: ['dsk-node-agent'] +datasaker_docker_agents: [] + +datasaker_docker_config_path: "~/.datasaker" +datasaker_docker_global_config: "{{ datasaker_docker_config_path }}/config.yml" +docker_default_path: "/var/lib/docker/containers/" + +datasaker_docker_path: "/var/datasaker" +datasaker_docker_user: "datasaker" +datasaker_docker_group: "datasaker" +datasaker_docker_user_uid: 202306 +datasaker_docker_user_gid: 202306 +VAR_CLUSTER_ID: "unknown_cluster" + +container_agent_image_tag: "latest" +node_agent_image_tag: "latest" +trace_agent_image_tag: "latest" +log_agent_image_tag: "latest" +postgres_agent_image_tag: "latest" +plan_postgres_agent_image_tag: "latest" + +container_agent_restart_policy: "always" +node_agent_restart_policy: "always" +trace_agent_restart_policy: "always" +log_agent_restart_policy: "always" +postgres_agent_restart_policy: "always" +plan_postgres_agent_restart_policy: "always" + +container_agent_log_level: "INFO" +node_agent_log_level: "INFO" +trace_agent_log_level: "INFO" +log_agent_log_level: "INFO" +postgres_agent_log_level: "INFO" +plan_postgres_agent_log_level: "INFO" + +log_agent_port: 21212 + +uninstall: False +datasaker_clean: False diff --git a/ansible/01_old/roles/dsk_bot.datasaker/files/libpq-13.5-1.el8.x86_64.rpm b/ansible/01_old/roles/dsk_bot.datasaker/files/libpq-13.5-1.el8.x86_64.rpm new file mode 100644 index 0000000000000000000000000000000000000000..0a31fa75358971b4b8ce0cdc176c2f5e79d04200 GIT binary patch literal 202384 zcmeFYcU)AH9< z@RWwaWsuTP8IlWzQu!ZVR3N&4Zjo?zo@&4DAPB@c4csdMg&w$n4Nx|qW2Rp`@Cl;k z1$buQ=D@Fe5YU#2eVs%o=1FnvdKH@LMBuU{sXML$|3yc-SpC6Hkg;x>{~7~ zowg4nxgIOZb4}R35A|g!XQ%TZbhQuf`Lf%i61MmJ3~*;(&5rnZW34K_Mx6sm*O;tqxhYoAj|S|Z z@D=IT^44Y>r?NI4xL%aa?Sm9I)4u!GxXM$^*!o^B3bNZ%aqD_T@RR1oyz{+*>_XQC z!as*MpR_G+oq!~~QWbnsXYHq-8@!HMW*F=w9#I^$=~U#8bCfTOH9u+o$ok$%p1uPt zit(})XZ1*xyZ(AEALr_Bw4RlnyhuF0epPCf`;#!*oy3~)!MdjU^V=@vlJk*Gj4x(VTbNhLrEbon?-5y-$;v;GYXzaq&7Q6+DLgFA;A_NA36OnKj5e6rr zum}_mPbA{e7z7@QhT+IC2p)uk;c*xuj0A^cad0Anj6@Tmcq9&vM?;_x6pVxA}5BcIK5t{dJR)lD>@}(nw++`0QDXwmdEG|hyo;cDUQ>p39FDXh3wLg6? zNADA{!*x|S^JX1YQ;gWhn2o6jgtF^XaJ90v*?A8wo+v97UlaPL(J$s*Wh98SifD ze~}s19y&`eno(zLSau-gUgmk>aqq~1X$nE$_2wiyY!h{88f`f3Qef;j&lV$u+xc)W^OJv9A%h%?XXYl;1s2Dr)B77-YioK+onq+%SCJ7PW-bZ+pfO$NC2`%izGf=cON3tZ!`xv=wlLJ59EY=ir4%!Lwrf}x#At0v zNtwT|za#K>1pbb|-x2sb0)I!~?+E-Ifxjd0cLe^9z~2%0I|6@4;Q#j#_=&OZ?d^F0 zkpM7I5a&nhUt4 zj1_>&sU`M#%2+A9={|o5pg2B1KDCZK=--)OM_1?)4a?DKNJc$RnjyxK3Gof1b% zdo6$hZ8<38p!Bc3&lC4~9e@J%I2!hO%YFI$uX`%8Wp0#J&5>wP(5 zpSRhUQ~aj%XS*+t-{&cL0@&d!04NYkT0B63@p3-{D8(MdFM#J2+~-OAyvROJ-sg|* z^Dg_m_&)CgP++{gDgdSQ@4L^Z?el&B1^VZm|5ZhKmmXGbN2aZfCBdUDdVHaX8;QL!%wNFQV0D!-vLq1cR;_+cZ}V= zeeAtRW+wVzf{P=`%?Avqc;UPPK&Kr@lzR_Pum_IdjI$?!J-pog9El`vun!<2d3yue zjyM;vw*$_LMEtECnC$Ka#<>N6|6%#izGa}jy%)|E3@A9d`H;NGI08uq47PIj1z&J< zaRIxL05d-BfY93q2k<@)j^4j_4%GX4lPH!CxdZ)s{lWh4I{Q8U>W<vg&gF zUjAMvKtl`vyFZHTSBrn=6NvAne_3L3zXA2Z|6#d=1KLsgMI+ESA{>iA<53tW9!@C<2;Rn+h9R&d0-g-PLa+z|U;>6A!Ek{4LL55I8IHn{fvJNd&}2LTgM%VTXdDWMBSV2KK?8$?qc9jK z2{?$uK#>?28UewAfN;SGz&sFOWE_-8AmNBWk|5C-G7(C|l941l6plthF+?bgj3yv} z^E@OGi^1W63?RUu5MYFegF}dD3?4`eAbNm0kN`M52?{~u;edB&AQWUYjsS!o z223Ih55d6+WCSp9A_@u&0Yb!~CWW5Az^W791MpgA~7fw0)xc?ErG8Ml1u=;R=`0j z0t>i?#S$PSG64Z_z#Rca1fE4885!B%|M_}*$pUX76oT^U;VI+oE`yYT!hy%&AGv?| z0HQ28ek*@cxcn#X-)E@3+}(XZ|DXE!8OA^S-b2RQiy-6W0lepceZ&9y^|QBMQzt=r z*P;-Tf0#%KNeL7J@96WVI#X?9EjuXWzZb#&dlCG<6=7vizsvsdf|UmR^Zu0%DggOE zFTld<&>suM7GRq|$zIf-hPhv*rLUu=ZU>WrB7S%4ANs!uVKT`7N%}tt{}hRT>Ju*W zKXv+NnOA^^k2_@)P#Nf-$MGkLk+G(snVG&F6!`no)Y*G`<2)SgoW1Z!iWS(OZ2TAD zA1dr%b_P19|2TXXBF@9kOByDF_^*~cd>p)h$2Ua)`BN8v68txD_4ao8S0nsG^}ik- z@T~#nOhVn=)x(RjI3}KQbRikyTuI)NpnVyoM#IsbK{JgG7uSrG!%r8LCHXVQpzV%1_pcr zl~9BX90bA@wzQnD>uWiGvbM#p_MwWMoTtjU`8pNbfwwBQFH%)(d-7UrJ1kpl)8DsP zJB_MXYp1DLYpJ!I?ayv802cbrU_D`sEl%+ubb?2iA7? z!U2j74ix%d72$we|CS=fe`B1F7tpJryPvy{+mlCN1rMCt&-?#s7y6&51NrlxsZ+fE zx19mL(bDk$Oda;0ssCFcgaPUBAK9hkjWLjnE?`rfqqnoK2M}+sUw8i;ZwTe5g8#lU z;-@43rsZE#^KU9sCPo@;O2XrSZ6dJCqom>?Fi_#`?&3%CmII5yATT(vWdCO^_@A|) zSQH9a(Eqa*?5CCs&WA(Siz`%yr z+tJ<4(aj#{*@xipf26)2=KnfsAn5^vYQC->zenOHCID>3FMxk;tbaLWkMjXMkO{!K zx_~J=8RDNKj79)IwWC+3-TX6Wbi1BnUt0C0le znz_0A09gR^LD^h;1KaU`vAZU^6TD@i5Lp0!0Omt>zo|47B8`-B@NsnkdQerxCMy3E-j> z?CIz4fdPz)f-_UFsh{{T1t+Coqf9`dV4y5OpCS zNCJ_(L1ZtGqYual2Xb}?dHI68{XjlmAYW%tpu@iAuYQ2OE&~<+TtE!J-cmn*-~5N$ zpKbq{r-uFmWue6Bw{iVnkL?%4B<)B95nvz^utkHwumHFKY_*6u2prf1qM;-*05$-( zDBvEAK@$G3R{q27@4F$8x{0eUdxeS+ab^m`G<6#eASs)L4I|}mV2q1s} zGDnv2#<_a9kU+Bg`#srz-2EP?e_2O>{i7P#B>rml6OjZqj0BRKH_7{7R|0D_56?e~ z+zB`rk}U89&{wxJ&@?ks)zUQkR~kgpUugiH{)G*)*I#Kk`uvrK6Yj4xoZbIQ!^`)t zG`#)(O2g+b$Kvb!S2hA2?9>haTctlv>(Bg=1xXvhz|!_$X%9ya5?I=T^3apEAbEMY zdnp1h2v?ksv^R<1>*eSZ0MvU(No&}hGBQ1_bIQuj%*fnST~iTg{;U4yKETq_PSwmz z6VNa+Gy_YMN&Wc8!fc_imI5#(UyrbJMZC4Zm zEbZ#!>qb(v2W)sb5&-{j-T`g|2f!zHUvFt&w+oJLL}?!!-i6}LzVieZoVUYoLjZ;a z2nggq@F0SRqdUky5Cq%DmZg9B47@4;yjj}bjdGCjFD}aS@PF(2?LG9HP0BO&x4M6q zQhcEy0&h{uL;;50<~2n06^@&^VF6C1L@-E;Q_$cNqicOF*I7FSG!q6uSLFIruyT6lS1M0p$QtXn;Zs6gr?# zf_Rz7Y4NZvUq+|pf$~@9CFlaAQU1>f)Fc6T zX=mWU3#6I5$32{}lUf`{F5`wtFI~__{6ktRpVZgiP_U{Y+a}^^Km)-9?fxtqDouQY z*_GIY1_8P$)jOJPCfL~MnRmTGYvG9r?>`qQ3NP}FxqvGYsVJLPAa zCnfS{Lk?mDX?Bf_od{VGrB3{0j2F98R0$&)t%dp=~R|o-wZ5bkdWW5o3six!Ds{ zCK<1UcH7?cT=`@x%bqMGKl65Bb!gOJRl{aOA;vvSN8z)Xt-4HrhK66WiZ?FjY}?$r zwXchzQ*}_s_ir}QUQyHbfx+tn%=O{V8FJjecE00>p*w;lbOba?N@G@@x@qZN3ow)B zCIz0gb~zr))6pGk)!8KcBD2`BJUlts!a}v8)}`{~(o~oN-{-3*so#pvpL$5I|6}{L zmFhEx{M8d{&YR~RnzlnMV$W|s$A&>RsN&u??|$(%3t+`Dw>63IkW^69V%mIKu*02I zakV26M^rB9eiHKtc^sH6yd*F|rQN8UG53Y})_i!=4TI1hyuQzzHG;K;`{L$pINNM? zJUV6(%jyp9irF5T5pKtkb8+c0^II>y;GW+dB=NbkY~FKQ1=*__C{k}gf~#L&O!Wc{rD&PIo> z(R?E?*d?pcM@&+l=VaP;to*xcE5y+#9^$rNc(Wf*9X4@y2Pd+)vpi4Zyq1})`SKJ@ z+Sj0uCVk`RJBNDJmjOu=xk{cL0XFVK56?6RigBG|$SvBIwO~e)4*9If4%BsY?%BQ! z&SA{)zvh&~w%UYM=I+(i7GvW(eUpdXZA!s{lgIy7%TRscE32o)oN5LY}K}Cy>TQ zenMWgA&$e|Z(G{j7QhUuUj8D%$PaE!`mRrx+x=(W6^_rG7h#K9clsEQx#1T4L!pl0 zLTbaq&KM^ig=goDi`W_}3Rk_1&Rvu&(-fEgv22l5KbQ6116;xo6?{xmC~v$kz$Nm7 z68AuMZioBLVfH+Y{Ew0|5Ec%HB9PFjjuiuY6G-0nm0l=XRk0n8ju+wIaB1Cr3QGt8GutG~3vWJITz zeza-!o*{UysSbvIQXuD#<*~Y9l+(q9WN%uOcqYHur@WIBMg?HEH%vH#XzlVIsSiDe zm?GwG-*0#m(eq*}(dTO0G9tw&{$M5J@OA-1D(<^_Q?5@Z!}q1$hv70@u06As#HWW7 zK9@Ni?o#e{3|%n)r1C0e5(f5j!%`V-*MzD3B zTZ9d|FE-ptEtBAuN&kQdE*?`mun-Z_)YGHn&6@LG?Bf8Br|dVo3sucE*YQhhAe8uA zEVL`feU3ZSZz^_=L*`gX^f`a-h4BXQ`121C4=@ErSw%6T3GF#3JQ~hfU?iMg0sJt_LqkY_Esksi}ON(Y7Ny z@G+FzeG#kVdO$Gx?v(;PffByQC8{!h4((TuwJZ2?4N0b&q?mPy-_&(WZX96?1ZnYR ztGtuw+14_8bIq&TfQ_3k!WUevq37K|GjyLvJR`kd`m5MYu3qNH#Rbk^4V=o{PLzaX zDLb_!+ih_jv=|7}Ls(6iwLe;N$s=_!WutBm%$k)K+NX_v6mT-~Pw|`^7tLTEzkl#G zgD9rpV1!eUNAK;Yxq>8?v$F>8Cw@e*tAy@O>w@knnu{KH&8{mz{=Nj1L> zX4OmXC%nPYR2R1&2l^=svg`XSOmoCm&%CHkX@C9}9U!O`2N|!bKYAR3brqObyV7i2 za&X>Uv9Y-zX(Q|Tv8Y3~M5p+^szWept{yDhQzoyeu}St+SINThAUDVtC-#WNjms-OwOw)}eBSR?Z>&2k#1ojC>i(hKxF&D`B- zgS_$b*$Z;QLmtfnY*hCyuLNat<}jQ#%L3>Sixw; z>4`g~ipFM7kfGfvS6>gBploY%+^P$Xo>9{b?#~*NSRPFCG(PYaYgQ%vvFLbowN9jG zAD3E5+tugbj6GPVkdrPJd~OeRDCM^HmW=e^n&23-NxtS$)wZo$sjoS#Y+7^XNwp7e zYG(;N;g&h`=;0tvKOo1zw7rOLYeAb0IoRoSkFDiawX+V!L4JzY$s^86^6Y>?2_kC? z8Z;pv_c;n)*L<8OoLpeOa7eK|2!G_;ZxbdG$648(aGT(_k?jI%Sh zkL}f&tkUDn=i8EsGwus3qr)KL`sdZHuf(NiJU+7>l1Mc3v|+SdX$shqd=e8DwL)hi zTNP0>si7$t;LJHP>j8^yi;uJv&^?PuRyofyG9qJsAmrVVbUd{xwMgf^dy37!sNxfSal%kVup32SlI7WzbDqNQvvrRWgDp@*y zKHQ?#@_jd|*86?YM=^GrIgTV*@v7W!xs}-@f6-`F^@-XKuMDivrOuBy4u^^l-J~;3 zgsTU6^l9Xz(2iTxDrnGr%jBp%k`t$YaPxUk+ktb>Zu38`b1mRrE{7p5HFg!9bi5|# zeb@2$s``a~{$^q)SF?|*=Glcq(MN`lOB8dMhR&cM8AH))M>rJne&JNi17RKCxvvn8+n``(d%x=^ZBua-BQ9nc z2Dio^IhAyFLNcrA&Qn7;)$Q{mR!?X@<@bsg`h% zjW`?IytFZBk#x~K;svgG)a}vZRR%k`#>$$Ur;pW9N?T)NkrCb@N1*dpo*BOE4!L4@yiXf4lXdpn926Th%m3;k-7x>=|Xf zVavAkWpBrN1T2`7AW1i%;GjhY7hh-r@bgjOJ zq<^B})zYl$tGO|urHo)zvesUkEtZ6v#I&!Z*YoQYI9?qmb! z=#NYFoab~|AZKf{)%2cumqHS#-Ac)|+-_fS*GHgmHQ4Jq3&|s}Pqnq9IYFO;YnAw( z!p8}S>)K(Fo{44wr8Jm0Xy7LwGo;{qwg-G=vUJ8J{IfHG7rVcIuDQi>t7q_bh#M~Y z@rNrb9CX6Mkxpk140C>WXIt8h;h+}lHL9sPc1LaX%ue9Tg@okm7}cb&4w3Z5tJbn( z7a@#KnOr<3ZxTV0Zw6yhi#+Z}J?(_jL?B=F&4;mC!Q9`ACY+_o$as6)FYgq?ZRUqz z2~5;N&R7c_sNNf$m!Bs}YKw5()2nXH_ez$SS>&dg8Nawt^@AT~l46hPXPK}pU-#US ziY8gLEv>JuGhIf*ou)Q1c;CLg#Df8L1QHMms4+)>>5w z=>uA#-*XSXEes~wz8C?Yf4aHcwRv#PIkx@lc_bZ7zEK(!fkiN?iq2kG7UPSGxlChA zF3Qsk7bs&5->@4L>gV+DnywLdpt*FX7KgF9p_UDyt{Ht@p?{lERn`3#i`n-9*je+c z?;~FNNpkUTh)hv7p^OXmTl^P}ea_twvD1gwJ{EgF63x3A?;;I*toEZbYF#ckf%VEQ-?J>GVn5E6)m87EHJ} zFwa#nn`GLl%I+PVd1Bjpb6%uoc)%)$#%l+Y3;Wvp^!=n>6!J;zE1@qPOv1_5+Kx<< zJsQ>X4{jTXP;ukzsu|OsynnmicH&Y=ZRnaxrn%EK*{paGxBh2I7VTN1KMdQS^Z9x% zDPBpguILiC(bIiwPM6O=X7o9dvCTHoYMtsKP31-y-l^lm=S1-ww-%MnGi^mmTTCW* zjXx!|!aK{&t8;0YxiniuIqTBgje4Y=BVIOqn;#r0Je}Uwb-7l#{>tmSSyMHgS68m* z$ojH`OV@^o&pmZGUG)l$e*XI4<_9kgk#r3(-+)pIX8cQIueWcZg28QbSZ8YhDs5=9 zpRukx&WF-)7@e-fP7e<-Z<2~*_v+zISG!Mt zzjCCy#aca8KO{cv!A93(xm|sxh`q z7yg~^cDyeKNpK|^eSH41jZ5=+Va|#I=C0jh?8_gAI)gGBA8)vy7!E45^zc!UfAIbj z{&l9Kqjf$HB)wLse?h)WA{SfY!qq4}Z6CYlbY^Do=%n;}+WY5^d@k8ON58$R_tlY9 z`muGkxEy!r<()wd6E;U#A53knP^iM&=Cnmv!?}x2ml)4z>$?dS-bh<8Pih*Q&Aiv} zQnZ4NL;tH{(fVSaM5$fjQk<&83BspaGC8|F+uPdiHbx-qgF)x?d(Y&)j=d16l#6r> zi&Jzd%H&RxRE?2LI^o2Io^#fwW?J4&D_jyfeXQ{|205k68IoBozl%HZ#5H7AoYha1 zt2O#@+o`qGHNQTM3SDQmw@GJkN4Uf{*Wo4@CwGy?yPln%>$CnzKyJo=`k^ zDup>8>Sx>UQLv-=x$HryvD;LtdgG|ft0ZN$Es-5$Wa3Qs%DihBN9M;5fqB+emwUra zv9cO=Od%)0F1$)o^1TBgI{BqgZ*TrXl1!F^1I;g@9qHZjIx*I}%d4%t$Kms(Ul`U8 z>KEUZai9|O3O=1kz2)u}EPOO6N9BXo;nxj3*0oPY{S)6$e~%_YJtIm?6#3PXWp%qU z6l_i~xSLiAtk6T_1X}MOx;TcgXe#1EAo5CXidbezp){WtvpqElk&qWiiBMYq)cF2c zOm@j|Wo1DgHJw~)&_xYYc*02C1_|4a<+P&5+ zBh?w)YalT?{aH6uGB%y6Eq1hb&nr0B_?G2&AHQA<$6LB}+}ro{#EglG4-UZU+TmTl)ou)JFbpVJzY*y{0o$rF=76Z0dg_oUwaag&e8 zb=Df8@Z*k^*Pbzubu`K>k~ZaZpWe~KqXkK6(HHN1B_~NSl{c(P)X+(tAp3)MZ5V4J zYQ9N5dn$zb;c9leFy;k8s|UM!Vozin^`+PK(bprv#YiN7ZBCw@k*_;Ia+xhTK?a*OHfy5304{^>o`M*s zX5}5R(ztqk0QCp<9uOqV{GQ*2# zw;jyArMMzGmtMb?nu~vd&iEqPzX0(Kdf9Ow?Rl^l(Q1VbWHPqkJ=CtPR~BQ(#onLu z$+JnP(w-4xfco4`SkvpuE2ztug2JKud3tZiEqS>UqeVGg#6l=z%wgBLxL+N2e}eN`fa zQU40+d53XOeTn0N*}`T`rRS2*Z|>S(48Pk5892Fz)YPX=019g zohg^^&4JRk1bfyJzTUz`UC~^=Ne%PBZ-aafQ26GXhW%#_EEKD=-@82+X}02h`yul@ zJ)*YfxX+mvx1dP1Z{%E(uWR1n6w%JDue7&O2IcrqpU?9P_}O}~W~zxc$wr^b;piZX zjhT6b`dLSH@ZNrTmJ@9LVa%T^BC)IbV{HA8A@3IF>QiA|YIKTA$&&;@`AoEn1)t+5 zo61`&pBSl61$k{T1$ENWwTcQ({_qS->ex+?z6*K%))#ceEp0uyOlT==Kx*LB^Q@CK z*0Hpv>&qX6Bs;ey4++goZSTD3Z8~#aA=1~>o8x+3p{s@JgD`W^-Mz+`guF`*(JoXIApA;hW#*p zAmwoCC8$v3vD3v?H!Bej{e0u*ps~-yjpjCHM@uz|eBEP_i{%%bFB$E+J^RkW<7`AD z`f~TdYZX17qN4 z7bK2uojG~*jp9Qt>X+(ro-yUsG?2x0{o+X{NscTYIh7NS`5##s{5S@g_{h@tK=kr8 zZU(y$YWE{fIP5_z*uGXPk6|G|Q=TV(EcVp>(X$cj4;=MX-*q(kh2^t+|KdJ$ST*rv z%|Ia(l51Y$&Ep4S_T#wcDL3;?S(A8vfOJ@^@6=(Q><5)}9N8ITzDxR?v(*zLR@=u$ zv}{%AH5E!Vk5tla)<+YDAGLoSwe#svd#wZ$Phr?dFFhT+TgIiJXUQgPwlkE1Y31R`(`bYHpD)fR-zXGvL#AKbl7+kgYfU zWv<`qX^vSxwRT^nO)#-4ZjW}#o4EC9fj-j6P5$a>7^(cGqWI}g(WJ`<9SwS^QJcJl znJ*sRX*=Wl?K85ox}dDeqyM#nG9lD3e3Ew6k)x|!fOT7MJ(g&QI*eTK8D){n!|0;G5DHt0)5NxrTi|-^Tep-8{Igz3y1Y zN%R|$Z#7l%$Cr>`uCBm<@9&%L4G0&~8HQe^%WnAU`b<8@z=du7>zlWQhYW=dtGpve zK<+50@;RL_pt-wxUCYnTBcu9g&{KmW>`+9#Fkb!TI6aLQ&F~zz>wz{>)rG|}&PQd* zG~ZtH@VdO5Wp`dWBi*em9k8MUaT4?)E}PhV83b_*Hm0e3_lqKpT=rRu&^J7G#~g7{ zx9^+&n>%Z%@3|{BAO^RiXPQrjdS0A*s5Mm8-}f@<(HHR-^-{+@k+1C7d0W0|oe%40 ze`F0FNj|mPsEQL=xmydNN4z^XQq5_^G!jS}@4+SKSJ@`d)!%=>RHAg_@beurTPcoj z)F}i9whtqB{B!Ow_|$2TP$zMM)Jgtg8cui4)Z2{tD_YB#sZB`qC80xkaP8M-WJGQ6?8}HVNtNdQ zgF+5(n!xLAJ|og^>_<=a7CBU<4W8w@mzc43wu%q>Oq=x?gN`yCP0dNvceQMtiRI^) zWbY6DuzX^%_^jvsE%T$L!}gZ+r@O#Aq$k<&K796%b`*2w7A|h68K1hYLT=fJkvEPT zNOWe^cMXcYaQUm(cGEkTEmxPgXBMSI&9g5Bh9=}MDO^0ohvMbgB|WaAVu?J?` zEIUuPiAmy~dvCL{S+6g7^XreT&VCC z)fjdhV}E7SJubQY0^&is#Z&b>Z$IB^xHyvnevkD^7-H9cNzb0yB_79O z3izt8hpKn(R)6iWQ?zfBX2`eUJHJ^f;X~AmdB1jO)hm8&tLC;TE=mLsZ*2sb9Wa9K zNNHalzbb zxW8M6zco0?1ai`5o5`+l$jZhl6meCDB|qv8>3PK`ap>}v^jTZ3`7cus?!Mvl6Du)d zTtAXptdOFt4y}4*+`RYrsq~1|gXE_Uogc&rUm`Xt#a}&&MYe^@NqgyxT?vsLjgB8V zc%y&g;3L&52(KfludgK3=M{2r51Ltg?7Bd6A*oZ6Gp1babpN}MA}nmLDQ~>7|F+Fk z^uXQW%}iqY%sv>de_z6?)Uifuj|-RPQ1#^B3P{>uI6Nm|4X^Z?=X<4D%&i zH^tNBKaV{BpqZzb?NRm#V@tPqFKa37xk3H8_4!0WHv6HOE?iG=3g}Vf-d!d0>Bl0W zUoM}aJ)*U-)x~wb@axcl@2mZyqo0lMU~=E7T5N<%$0h~VeeW<*qNax7&$Jt z+WP7CN(48o3x1Jp?|Gtz7aLq@O}RxQu*HUWT!_zSwFv$sQ+#$!`+YmjlX%aWwKA&p z@S`eSFlzZLasgeK^Hjs$wfB6)yZ6qD=pCCDf3PHS<>@TPaA3F7i94+JPjiH%YU;i+ zblf?3E<9W6oJ~iqQ(aM2SyB-&T(-j;Z6gQ|{9+2unYs zQSaGQeE)gz2*PHgrg}x|htfy{lVQ&`VFZ$GI9pg9eL(qjz!ATvX@DBtf%cUDRFEjo>=sL-I-}V z%uDy9SRi#|_LRH*bX7cn3=jjqLcqd<*c#tQ=n(%$8$_1Ez3%ja_IE77zV62Mi?7f2 z`&E}SgsF)qhW62h9)&oIet&>43>&JkFJ<<+XI*zKzZn(6h6v)rO>t9j4uxooN#X?kil zHW^dxzmGU8=9WJio5j3mdgCx=DjPAOZ+c{m@$PC_Grmcyut`KG@qW1X^mB`>ZzZ^G zL#d8=wXu3LgVe&tY0%+xy2A^XvKEzm*F|KDZ#>Pg&q*-)x~}x94KvtY-LYb75;D(` zHUERx(xgoBKHs3>ItHw~0ZANx*q}IBY!leU5gcz?w>7p6QCYcgCderNK|pKGv-@kq zyrvZEsNe12x&;#G?Ar(wFu9ol6Ic{9CYP!u*wT00Gn={^ z?3K!|>6qJ@1~!;@FgkFK_Wl9Bn7e8jD-I7C{k2cZ9<$MX;~uZfJo@}w04dFIr<&JX z;K}Y);SsaU?jQxu>vt2k^IAC%zqNn9rdu9$=#@2R^3E}_Mba_P)snn#iwp=viz`c1GhZku(GZwrH>ZOMc}=CCA{tzEhT)Z+PVgU(6r1 z5aYbEhF@FS1|@?1Z{}tr5>iPa4!7X*!fctwc75r@+k|HWB4cWI=f?YNyfl3; zH?_GY8ZvPqy&RdElmu)!7Ov|gh*s$Z$VZOmTRDAr{;5{tf#>{mNvzfKu^zLTAIZ%r zr9$D-K1&+a%5Y6)ERF7EzoF>U<$~v}jd-e9X|G9|*>0>6T=cYb!wuPPEFZd8Zys>* z+Wh2JM~AKJyop@>=_fC`*Zp(P9q}2M;bsc^q&s!UK=K@a^)nOsk>|aR_dQtFS~pLd zFo)m29e8uDf%p1x-E_vl>@RF@MdPo$S}7MdPe_YpYp6H6j_Jl(r9ONyoaHmN_vIRl zz!Iu+7+P5Cjf!Xeu-fnUnso?Ts^40Omq;jkqUf~pIDg*xxr;)U8Xm0vMk0m#OSAtX zui!+aQTbiIgva9Rm)EXj>q@eBv+*>!T_@bwP=0g!>ZEt!iRT9|+*sPw6f7Qp^!&yt z!9!W2IdM*e7YAIen0EWMY7h;#*upbDdgQ2B5QM=#!og7sHv;0Qy>Rt|$5@4gUyf~n zQgtTpFHD|T=}nu{6Q5O$-??$;%Yxa*LC46usmoAr*in;RkIPSbojV_lZeDC_8jilR z_jdF!+GTH8e3TwlFK_>FLauqp==|KbPpwB^9Mh@4nbznz+=jrwQ4fEaQ!^5_m@oIiYx3aOe=nc~RcG{=@RuOA5VQA1#NjwB+se z-HSWRu*>ReB(BfvVpFAePJ_&5vE*3Qll%aC{o0OLr7{T&8_~C3-q8DYN=0*ZHz?t) zUa6*?1y)%x#33eG+JCxA5gvHa8OEN$)+%T0evi5H3F*>9ebSGtD>h7Tjwt*%Ahw!v zdbENxT^g@C)HmtY^aHiw$X5E}c2kxs4zxwnxf zpaT*+ev!x%^*I7Hyx(3yoXESKPZh9>&GSVVHn0x$?B{h=P~d zAF@<97z7+jqetje$$K+*+_#5Dl<%9dYMbb~ooPOpC;An2f!N)ng{3L^IDV}`^x^0W zg{_o0;m(W(vSr!LUQotfBbceW^;NMGE=3S06rPsJvhvWY_0{;P)CifbK)xFcurArR z`ScIr+b?;SavvSdQbqB`g~-%u#96Crz4(!sE)^1%WE`BFu_e%QEbjYIuff@5=03eQi0@+OS+QT#Cg_u|L;6B&1Tj9d zeaxSHdgo74A;?WvE6a9T)Nx zvYjS>-C~W-Z?qRb1Wz7`n>li`#Gr^!dq9x)S^dXP4kl^~rCSo8^TL&kW?5esqdJtQ zgG5y~6iw4!EtOov2i||i{rqmN%KIh6#6tHyhGx^O$I4UAk?hA*Cd4%cV|E+7(xaRT zc@=3UkGgKdWbQNLsL7_Cbrx*2-H%olygzndrllRbv(={U{5JcA*iInb=tBEsT--wR zH6b%gy|Y=JFCPlJC2kr7C?AbfNTlUwVfyHEvL&&u9g zbm(1e)>LTxr1GJ5BE*W}ENw`%7_u1OqEaV!xSA9b2mmJ1nnW1>; zM;)oHv2{H%FeESc0n4eZ#%eK1&NuwM*)h#TsfO1NTTadn>nb{FsT|eFGdS>}MA+=Z z;bdCLQol6)>6>rgof?>Y61)8o>*#ji`utYj=_9f)if%nwyagM0+aww81m?aa_atv9 zFu2JbBc8i_@22zDk0(?h?~J{K@8?wq+_=JA+PL$IbE)&5T)xO_Ti3e{zB7u^U(@hU zitZgXPnsKjx6{stQRM1z|9;$0%A99m=rr@;)a_c!Yj3-%&u1Lc9z7=zo(#=;ni^;K zanaKQtTYyW)8PYg_Hy9vE{pe<7gMe$!M z<6!eWdQhaEfzgq9ao51E<yR$WaZR>Ir)>PVZ95WEFUXLi4*$kl>y96R!7-?6?wc*u^Kf2 zSI*`W0A_qW@HWC1nr5$&Emtgs2*ApG^~;(L50@3Ll%%hiO~K3|lv z&C+0T?)1+?dL6p$fI3}9CT~99Ux#cEseDZ{x;@_2gM=lcsZa)9qpH^pfq=(fvFfZH zjm9N_BA&=qD6Z5P{_;RQQ7~^Gsy4a^4P(OP;vN$EI&;6~iI1U6YK_rO^#lL{xwE9U z6QK?f?Ui>$J_JLI2r%Azu#2(}l``!Roumx-MRXo%1f!> zLi`F21#eN~WME10^94xNI5|PE5h$gQE0(uM#waIA>?LeW7m$V3tQ!33uERjQF=@9f zF$pjKW8%0uCbS!45s!Vt!;P84L(&0*Icts`43h#SGA@SNAkA}r3%9fIUR_NYHaY%j z-V&s2L>WXTnC^FNrzi)PTLrKLi0Z~kbZgEG_E{maa}vXxU0N)5?j>^%lQb(TFl#Rni zcDr?vF_*CKT#K07`1>y9kkFq(jzQ1ZK|sYGh*q_Kq(H=01N4T>tdf47Xfn108#&|* z_&73QY!*eIA}IrxZPKiZLfqB}Gp#g2+EuxG^-1x9XlS9jj!;K2_n|cvIF%(ek$HcL zm_vx*8B+$Do|@$ceI5y6?or|Xmx^ro+EjZm%X7XrHfZk1Ui zeA|m#Ex(ccN~;bZCHb8!g|6ENtxU%@uT4?-{yUCJuZMN-5}73&9d|C z*uGNDAy=WIa7~_)#MWGKlsyowVGU}>EQ`F6yKR)>eg{n4R=elBlw850IQ|oGy+1+x z;9C5l!m|)R&%kzt8Slde6eAib_Vj_I_k_BZZWsbU79>uf$5s$x?^p3>ANt zS|XYrhWPC8sfr%kh0DWIK;8;7!10^RLrcKs-d7soAHnC=Fe@&x>Fu-ShNUoJqeX39 z!Y01v3Z=IciS}YKqfchux&&RM1M^6EkW3l<-Ou|AjInxBR@_voMa8y8rL+HyNKuj*lcC zZ-=JIkCYrkIhjNT}Ed=amJefODDW#lqm%8R| zK1#to05ZY)`IonCh@>~c?}c$nrn*SQAmX6R+vPie#UrD5r07c5nfC<;W`E1yVWe7@ zN@c~Mehw$Z=FJGBGtvi1jLV&fgKjVl`TJ1;X~CuKr1%M@wG_!zmM_Z!>e z?AKl;e8SkjUT%_MC#IAk~j~@XnSS!%T%7gAtJ6D-zm^Lm;p(=U6P$& zDvy^-+@B>nddoCmJx9@U6hlzF;eejhipipQiuFhS0p~|s9&rCAg&y4|FN`^)KO{qQ z-HE)!;oUTP*z}cX6RU|)4>bNurycuT&MDX}Ws1k|54-_DM!#JPAG(faZ+j80m_Kv4 zbTH}%$I!;Nc&|5(CPLh#{@L#ts9R$FUY?Sb4HxV{c?tm9JJ#IuV3)VfwU1=9H?q#H3+s+!O z$BcSF6$UCbt-y;Rq^?M|C?Xymf$#$u>2kr>V$&!z?P=7p`-#YLvrt~m>awu5qTmR{ zzZ0x@>a^Rtmm&^Ak2OqzW}{kW!XeCijvUW6f%K+tcH5d@7GH+dj!P&vBq* z@`imLKRN!^SAc!oFg!2@?F_x$IV}*ja=A6;c zoi&nP;ctB$`ypOZ17lXns9A0BD;rWPa91|9gdCRUE3FlA95tjJ6;|@ry{>kg(Lffa zt9tw4nvMNM+-Led|dM)%%<;O(&q}0zb*+rm2 zx$|S`o_A{GJZsyqwBTr^_($&MiK|@TvmBi@S#j+gm=hXwL58E(`zC2xatlw)jaicj zmly$ub^7$)5yV(Iap{?o%9>#1jI85oq7volx~VG>AC13T4Qja)Z-;8&H_U76z0Qp% zoxbuP9QO&4?m_t?Y~HAk+lqyvbPnR^Lj)k&{K9ipz}f z))B}F`(+X$Dku0)8=VUHR1EJ6B#{RI{k})vbRTjSuRHna-U2S6ZfRnz z`xbiUGZIK(a0@ZC{A@tM5V3_NB7o_EeC-jP=^jgofQu{wE^$>_M; zJ^yFP{7W)z4fl#s1=0I-kF(t*TTh{rA@hLYfUzY+w2dIU0GwV0Ha2ikP5C@?eb96s~oosBLwZ&XiBw!rYOz0*<#>jcFwK zn-&h|0z^%h^zlkeYQXXE6)Tq*mdbmC%gAcS01_o4Vx^1%L_*_L>T?kecgW59PZdQs6h2;)G zwshFFOWW<}NAZ!hwfT6x(u>U9X_L<}&HILzMrP^Z1KEfraysrd!)KcZGk=y2PWzBD z=n<#{t}>A`aqOAm{cm*0)drAr(I2mFq$%4dGRY?qesv6&guTwTDs|VpS#=Rgl!Xs9 zmd-FQrV{R}B72=0Z%@%iw%p$G(!~>wz>i;Bv#{&YX?c*CLFC*~LEG|*Zq!zY7iy%A zm>oPmJmNoFe2mAz_j_-oNW50Nd0&f-%OUQ`o!IC}gZTwR9zL@FX}Q`gZ(V6FeTHM+%}c1+-I*GtK7*T_^%nW$=uFb5|7C z^ULa;ap~SC$HWZR88F78lfTZ%!Rd8?N&4=BsFWrPl-KK?%E<}lpa;^YRI61qNLgGq z7=2(|ZtiT7mY`%;`|p@j)61$AmED9MV1jAN6bV%H{gjxmLT}ojRAQkKf9nh-uxsO; z0bqgpnFEs}WMzb}@P*Tdw!P0ykDu~80&OB+Ac)73@!RMs1;iQ4^M*;}_h{LIvMcw5 z!k(u3dj|7lWX21IIIW?LjT%YuF^c|6ga8h!nJEQS`=)0Ed5PV*L=+=&zCB`*a2gi1 z@pb_(0Wc9Wd@EF7U6_EnoS-AW-RfUoaP@T<2JOdnL!e{;)<(7bMWj)>;+B?K^qb=6 zD-e!`dNn9?-oFjGxk~{rw<1lBwNa_7#|#P(Lj0`p(DmjWX5E)b?d&7E2z5anCFv7( zC6tNA`=2_PWq|xCs1{*Pj1p+s(ZJN`%$RxO6~46gO=>6rp#-yn`|V|?fULE7LQadH zLVijH38`7Wc<2)6GKf)l0JEYlPv>TK$sLg%nU;`@t&(0p!`ARL7~6*$dJAT$Azcy7Q%@$ZI7!C z;5lFoXv!bdvzS-VC+J@mS9t9-YRt8?x^w5hXymXA1@lTRsPxJ}>jWjd%G(BoDG+e^ z@}zs@QgB~T7QUb?Lm*;y3b*tOgx)%8rI@7VVG$Ny?vcfLbjpL@qT=KtXs7@d67P~Z z#Mv2SY*$~V_yR!cyGS8uVV8LtyeR0Q6#qW=nf7l-nTRk<_)hfM`c`6+Ih z3Qm6B?6l4@S$WTIN%N_9C0E_`tYbA!(0Mn}nKFK3+GPi(`Yo%>_B5y$^58|lZr0JP ziQ?a!M&Fsw_B5OxC0`8;?9IdxJnLVbX#;^`tr--hga~-VJpU>C3iGK?u(+XmS)G!f zcXZep&G5sWr>AuBzc<$Q%NA2^ink=hapZLh&4GgQJ?AB`R|bpq1F_OsmzS`sLIDwo zhmHGw0y;9SI>2M=8^Fo~*Mrq>6$@+dRlR@)-d1sH=w_eDodCTwttppCt!_Zb=!!tk zt&j$qW0Q0CTEZ1?Mp!0#lP-5mGM21NVEy3dK{jm?#wWnAWG2Pw(Y?rH0y!7t)frZwA(0W+`Cw76$W?&L@uiEuYv)h zdYHy1`JZm(Vs&W6#OldrJ2}p6`i^aDh0{0TeFX0XV77^tTG}r88*xGAq})|0dL4YS z44DA=o4uhww2-6{YZ}>yBU}i7&0HL|dlp~Vzu&4z4f;wBATE0ofVWR9h$VW7B%|Ma zz6zyug$fP0udCYrfs*ay39coUUv61vy6^pnmCsA(3n_BuG`oOwi?!5Qh zF0;^i_Xbv$=c;&8M7Tjr>+CGH7^&8d-9sUv`=ZgfrB5d9Ra&S|LURL3Sp)Xlmbsdd zNG(^}Stdc946nps%-6O_!iuz8|GB^L&^-9dGLUZvZ$%rCI;&D}E zRGcp#+epUyK1tOa$|?0MbOF=);_86{;-m{SRIfSi+BE{Ug~gxFiN=Ou&@5%W=}kTm zgE#H`PO3M;3dbs=rXzn;GO22WMl?~%jik-3l73g8;~S479_qVf zG!+8)f>*teBaF+q@W&nrp^neg2nE58FjaJMkX+MmkMlqLX8FR6T4}AgH%eouvy+L4j4hy5GnP3^LCv264)d zAkZMBz>?n$Q6&_1OXV(sk{vffHYD?-c530%S(a|U~ICK&#-Re;$F8Yd61MRzr= z&zl;^{0DJh=Cx`|VZb=^ox46?prsdUg{UTf8RrW@emBH{(P`x02BJic6GBw5(x5iKmKxq^@n4Od_tKY#C z^0z6O2eRhbPgN^K?Mv%YzS@xV*?Ou%dYaVH`IE z%uciZO{2znvrCu)h#s@~NvWe>2(O@wd+5bnxxe+3_14H&dUpPJMT9ot6P1y7#>#CO za5JpQk#UG6n`kmbCqj~rQy#m=u#IN;H7UOw+l2(9%a)dM$7+``b^_<*l&5#)CY@}Up6=$IsV;5$enElnNSmus#Crh5ErX-YstMVaYaQ- z7BQuan?M1wouyBhdx}gvR)KPR%p>gV-N>Kb@k$l+1G5gQ37OvpG;WsIy|HFv{vbP= zEB8ZeUe|iUjrmY5QL|S5v#9z*%K1c39g7}!{j3OHHnYsY;Dy_j*+Q-$65G(8X%)+a zz}y~ZKS?NmQ;?)uq`pg05(qs+IIEQo4!kL9!@1+l!b2Y&fdF5<5dM%u?D7z`k-nrY zXar~lm`o79yyuH?0$sPB%8-)np)C&j7xg)`g93c0w|NEe{=ZU6^b=?IVhNNCGk4@- z^c#5UmaXDzan>lidn@@z9PgHW-Qs&mq0un7`dkOl_P@U;pIUg*f4P~2?HU|H?|IE- z)qbV71#S+yadqSAh`Gj!g0hVRm(DZWX??LbkMYmh;Kuf_vM~-DRfG6_S2*c#Cz{oy zm~=manv{>fQW^FrJkz`4^j)D@OzTG1dX;;I(tuD$zYcTGE;IBZC(V9`=J(vQ;ORYV zjs;naFI68@x1VQlJI;<-BXucT$^cXT_zFF-U-9 zSehYrJX6;aSQifJV|b&_=~OBgBpUCjd1_N(n?+E+<`#y4!_A27+$OE9sZtFHK~wT4}x=skN1RO28L57}^F5OudOOE9xy; zql*;n8B>c2TA_(a5}&=E_wv0UyN+9QTcb*(a(iHs7T*%f9L=pS7(^^Xy!1 ze+ECjY8ZG$z+~}kB@_&m9v*Td85?S08s+-#NBWOugD-Y-mZwfbrH_ZP$zzyH&oAu} zVgEg*4G4N)sm{zmSfCRTf8QupP_k?Tu!6Y=Z1PXIaXGmEYLfjHqnZTppUor1=?du= zL1sh|wT@535+i{gapONCCxC=5;ye@qf@ackCtzQz3nu99`vHLdu_WH>{H)Y%tKH)I zI{m1TxH}-S`ml-Mlif--;m=rC-W{|CV)I!T{lq;|*eo)3z8So_U-5~_YdgxEq>Wt+ zi7{Q&}1K$G$;WT|9-RlX5TWkGc_M&s>A7K%4!aSJ9^{Yx(Ug=r6kk6Qr#`VYqw32y*ng}8j(_ux;^#S?^u%?|nbrcNu@G z#_=%+0Ui@9lE6R`Si2)2{<49w4kPY-J@6Hf*@YMjbD*VJ>Gbhn>0Rh)t;)>5lx)-PsylR0!y&pSv+f4!86DC1s8 z-V_qgRc%bp&czASl(Rn;11PDmjqH_{t}bMLR{XawWdQ%LC|**{l|C}h)2-bPhuT;i zzrb69k4|KDnGTbblsfJ4XXHhMUI`bXpWFXo{BP?BktgXdx^X;n^qg^9~dREUb))Hq*<_)qv&5pijx@ zRWQGp*~E9ULjV}oL=h}3<6@*3+ajaCbVNLg?35aMnz}$KAI`*;b3b`z6&2+xaTAOp zB(H@61Wg(k|5QK`$ZW;!7HirCq^ygh6{|Rg%2b`m=tSW##q4T0{=?oON&AuTOKeCi zNqwD(kX2^-3<~l+GK$%=KWUPglf$(Zq*aRg5RO>=BT1!QGWavTh3all3h@8|jDZ@vtAomqJz5-sSoaLh zw@Fj{h+b$`?M_(xiVvYnk5f(-BHLzca?$N_xaL7!&Hmk1EMiO(aGeCEIsOg0e)4ne z9YOJum6($ph{5YOh^ui_I5m*rxqAJkXc2DWE^y{Kh`Z2H>XFdfGs+N%jo8ZhEnPcs zHUkBvtFc4FWQ6rE^nI6ytax0jCc#rWU2C5Xz9P0B$|Oe<8P2cq_ymq$Y;D#l}`$$PiHTq7k5}*Z8j(MjzirX{>)NMLA0!s@BS|_oS_m%vW zvNWZW=4^v!ZRr32UF&T|Ec&gc@9sEt12j4|<8(BMav>Gb5NWN+ysiIN{+xJ8+u+M! z$)2)E)oBVU8R+N`+?WgtckWf$d!+D&{q;@+fzO_WxaCrZr5%me#1@_7~b0g{A$(6OlDKpmlXr(|;`yG(JfN~&*QWRz)F5W^Ik{?zjK`&uBf;cCj zRhfkim3j?=;YSDIqdqc(4o%ND=y=nXc|fC;zNxu%{;{pFsySI9}-yWf$aLI;l6>=%Pn)gS5&88Yr_Df4nBm;gwX%b3{5!RvzS? zBDZ+R`>xjl{Ez9C0M4HYRm&Bcv+~A@|8FX&vsHx{WO3r!u0rKRy$m<0x^y zooXYnTz{R9u6(JEci~moB=y&FrSGm(Z7f!5K0f85uLd8kZhE(i);aNK=y9mrz>l^X zEsWyK(~h-dF$~8yRRe+WxStr0xmX%((|5cI9r5Y}zLS!cd?6JzGj#boSK_XdKExKb z0uEc)nrN6R;PIW(IZ=n)F zNkagcp){W(T465p1fi_o%^-6MJ^z1JMDQI0JdrEW(C%a!!p3?%`2km`l^-{}+4dqY z{!o7CS$4cvO^XLrQ`g1?Ks)mEKu}u(JVVbm^+foF%OcdhqaNrdA1?may5vJ7Ya|4k ztgX(Y+TvTN6r?zJ;%W6V7NlrlV_nDm>g>^T_NG3Lx2}B~NR$_Jr&jB|TKQ9oSH{N~ z4gTR=F73#1YG%fZQTlI1XZ<#oXUl~{2CR{JD^0#MuI8LBs$ovT7SOXV4ZzIOf=`W!C)vDCeY;JG+%8Ua zp+3Nyig8G);i4V)-_*xkKr<1x14D)<5W{OYe%lrc>$G(PJB{+Ec6)vyPqDO`xK6sH zJuQAr_tj%UK*1iABQ>8rY(x6`V+XE`06FHAYYPD^x)6Ou%;Mt6CRYFM_^qamR!z-N zGv*&8!rIcNm<`SWgf8yH8a?SCS`GM7x+J7WM2>-~GO-4FK<&gNYn|29v}=te&>E!7 zgM8k7+j+YVK9roiw0=O_*ErAvw1f`i(D!I6q&Wj3xh$f#Nl@DamzfuA0SxI(>{GF@ z_g*}fDHT2@t#0k^3%i0*y%s92DUlhoDV14WoIfW{o?VXY6cm7BD zRrEl2%ycin!nc%^G88`TmX{f-e-UO$=@%afdF5?SuI`8##7zx>%TH9z7qBbRD4QIM z7x%O3!R>)mP^mf7P)F3b&t*skuwEh^{=OaS$-pl<&M@D<&cD;LZ894-V~kZa37cg` zx84%kNq2pw@KC!h!6Mj|Z;ngkH&l2X)SyjC@`|zofG!sza-^8OPFx301^TzX0-iz3 zW7fpXFL0Vft$m8idV_m*WKwI!WY|K?P zeEY{e3$!5`{gZenRtJ^<{cS_Hv8G<{xwD}?F(JO($kjhj5GeZkpca8X7*w4(>y3%4 z;Y*gz6HVW2eR$pNx6jJ9itRTy=Mwcd7V;_|eeb5&tCFy>f~@2xftVy0MIzcLgy4vX zVf&F{^YH2%f^iOarnC|E`%M#WZC>vI>b049cagzBs5_*aZ{DuAqX;^hd zmXjE%CTi$%^tkjb0-2W$i!7-y2>Wj~vwAE-WprBZaDA6Y6kq0vCr;`Fm%(%v;ALQzzGhU0(8d^RfZA!@?JJM%SEgZt00Fa3JfW1(iJtWV8a z=s7@msNNM${88pIz_etTw{~0PRW{IAJ2`kKCa)ZZ@0bjm;rZ-eJY_l2TWKF<>GN04 z&rWHC7APBk+^U2U_L&kUW3PL|37Wp~+CI|yo|bakNmMMF%%C!ozYdS`&X8rn_R1-b zWj^O!I}bQw%2%s#*@EVs<7=J+b8+O9rl{-jCf|^!dN=Q&)O%OR;VafVI|z|dy#Z*B zsuDiZ$$r8rE)L1Eb9wR;dBe*mLEVZ!AQK!LZobYc5vbTTq zil7LHg{Dc!PAG-_JqMOzs{;1bi*V|+on0+_?XWcZ!BnM)ZYodwF02Z#*0VmH7;`;w zs`Du+^PMqf7shK@aWYD&B%8g70_!#sb-4Mg0U3?Azz#JO^zBag<=%%xW~;zA2sRV9 zQuO4zP9|tnILQrbWz-{yo73Os^>q>LdjMfOl{MN&JnX$1q%Y56U4zzfVEmPkbju8| z>ur%3pGj)6$aeUqt@NX<7sG)w3n+(n*;0G_FD|s*MbtWJ!HP~e(1K8g;muAND6iI0 zw`1J5i&DS-{wPFTSYx29WRy|y3W7F;Eu%`p216<{a7R2~AUA@>nMl$&5>5vP!Q6+Z z^InDnwN<+6*c0kdlSlhD)N1`zBFVIE4kxW^LFnuy%}aqj=KEke5;R2>I%7&I9kj$# z5<1h?AH>amMW(Pc!tih)oAaXM&X#K#fd{WJ9=<=^*Q3uaYqR8XkWh3&HB_j(Y2i(h zjqXbB=z_Hg2)y7OqW4NIv?rkcvkpW=Hgt&kQrsYe9#x!M4wVU!Sf$to6OWnSfc1$k z2(#1RZhZIVUI(s2K$RCG&jO9nKa#I?|xW^M^@`}g9hu3aB2*3f1 zl);2ik9TmK8Z~C0-dt*uSSIqO3_aBXh70qxj%C*4GOK-sz4D#%LO3YB)KA_y)ptf)N^lVp_7qFdh?9={oK_I(};Kd6~409@&Eo1akuPz1i~W z=pBQcsPK(p!BP~9CR7IPS54p@&pr5ci$*{BjkUEm89I1jCVa;}bD@hr8zycuKtY%z zmTdhTfHkR|)Uwyw*XGP09RjS~lH!7frY8-T+Y%$&&#k+xZZT88(~$m$;VTw@b@fZw zp%-p0flUWuc8vkGDooGIZcb^auLB@o#F%126zmdAn=PQ}o1H3ng!}Jlu0sTN8bK9M z`A2_2nm`X5^Ud}(584|-u6L+M5Eq>b0e0}jK?F=n1eDE*AHNptCtLeCsnU4cJBr`1 z_K@VPuk0D~?p$;ioq5c4aKODcKWA-4o{T0gp*`YQgM~1Xi57dwRtKRUpAi@K$|60t zN1)O~66gXSSfl~n2g^?mx7EH^n`P_Ls()HExd$VaKJR~ICM)wyvAp8TyHJmhd~0=4 zr~PztXz;9$QC<~E5(9iF>ow}`4%P&@wBBIEuGLS1Y)2|)VMjF24hFr;XMz|G-{x2v z#0X^@q=U;Ssr0#>Lo3!-0y#>nZhaywt!K%VBtMmON7L2x*rvC7jv$R>0bZjjntO3BaVDvGz zo3%=Prs-`g{jGLphQ47PoI7c(WXD5Crbap{P1j(z{J6w5**j^mIB;lKG^ekdo_^9P zGwxb3Iobs(?gb-%P%RkrSCy}7LiXr3a07nCgKsix4NZcuGP-l$Q#%gfXZ`gXiD|J2 z{1!ABizA(6%jkX!%VwPIAzK2DA>-tnSCMLWhX65v9VXRer@L}tLqnGSe?fhi!*Z?7 zL9yMPe&F-uV2w{b_0DaFvFOfqngiQ+NQABcJZ{p2!+y92YSw#tq@S+RGfV=B;I0*? z2=U9L%yXXkU9eoBnk-cFw{|O%fh3s?avMrBabFX!2LV%f%XvniE6*qUVOS$+)FWJf z4YQ(^Y`Mt|^{MWU*~)5JcWVm0(l$^60jBxK4K&-OJ``E1n0k3;Xf?X6C~&w{=Qtewxl zs_pQr5B}2F+TTJ@)~h8?V2nx zgHyz`1#OOgp`%(#ng3*r$4s6nV?gQ8;$fT)Fe>`BuQP>G zAKmyrxL!#9IxmghaHyjlNieNbjG3-O4_x$KVcPmy%DN!6Rj3}7izDQrtp zCnRLXMnHW}<=NHDl(_Xz=bEk6m~F2{%tC;|O$-zl=L$SEe;Uzn&GHxlbUR} zCJggX#&6cFE~KK;G-lvH#W2M4@}1|Zg!S`Phq0A|73QHoD+wSg3S;G$#Ck%6WzKWgS*P< z%vnsZp4;nIRRD3U>4H1%e~O@948qNL3fdm=E$Q~xc+O`)?2_#$Oc`8{zC9V@g}L)j zN~s|Me1f{vuRlhX;Z?8yK3S21URHcR{R%68!c$*lBTE$mtYmf{v$Rv^bdz8h&Yudb za>9(}i+m+k!>e73w-9#wHpTB?pw??jhPo)@+*9kRoO^Orp>Cy2WOjl$wR?okBOU+F3$_9XJ^Dg9~dA{MLV=<$7|I!1WnN zOM!loYx#d>q@`=Z0O7npl;P+sfvfQGEI)I`&M_s3qsg5IQN*ke4&>x73csW|Sxtz_ ziVj?m34U$(P}_;!6Mz%F>c~fc1~K2NkU_Eb70Fb$ym@#1swD<>M%bOFR=!j|&6!Lo zJ$Z^ur+~>KRh(a!Bd#FYy@gP+&b{U*$SA%8Zk1OS92LGfy5R2lTbT7Ens6} DMS zGTabVA`gk)6F>wonP3&o23v}#DR4$055A8@cIBAJ6esqRQh?xW}bVjq>y_17jfwDX`i#RtS2XUC+14@Wt0XdM3GeKIku5lxv2EhmPLwu1RnItCq* z$?~#aDe};vcD2D!xK{WvYFN2=qvxP%@FT0@0@2wLtwHDZSJRz0qyOe*>VjM<$?JAb zM#zj@!tk(*$HbLP6sO6%uJ^rzr*MqHDLycxoQKpX=3~0s0M~7>O{n%OHWSPj8jpjj zpjKoyzR8Nt+G>Yb_6bvUDsn7C^Fbd3O00p3fXNjRr^0vJdfa9qtbcRwJvr0-UaXxLR1PrDBpo;Ajz*wi(yMd;N2y08UZNkGFSU zoeohSV)J=0$`IuwwydFE4jO2xg5H5B=rOu2{Oa<}skthi*yZ~gSwf-6Tn(g1trryA zlddM(B%xxEX0t9v25AOz!}>fp?8Pf6^;GBfV8E;t;h_`$Vx z0)0r?6w2FmoZ?PWZnMnCi5fCNvoIqBbpxI=R3o^x^Ec2M)8>~YK)LTt{aRkuHV5>? z0pZn_mpyeL+JtGhbbSw})$8$`mn1S6@0PWfl~+j;?Ja-XUR2Hct={V#sgn6Aim-1c z>d6*|Emg}^<+O7qejYayY30*3xCLx7S=`KA7a#=2hzvd5WJ{7k%zFl(qWRd6@!tj& z)}&>*wn;YKmgTBcG%Vdn&Z&bMsYVX6&&^l00PsDXnl|-e>`WzqhZYo+%0&7!>le@~ zD|(FBUi#`Pgqiot_7Vn@Dq{q_|T2Yrz zhetXv)-)h8VCY`VYnL8{&;2{`1yV^9^N&?fezPfB1`%?)BwO#QYrbh+ve3UK^`*Gf zEA^O;N&m3qV3vN50+;1xQ4}C`fyw7=_vDD&rwN=`Xg-@`2&corwR^^fd?BJuBoQBy z%p+s5r&Fbt>`e#tu{Fk9uzk%PJwM&}FyPhG^B}wT0aej5-Kro}2=Ald3SqEGJQ5VQ z@svf6;nL-M85*AMNgZaQXYE7%DC676-b-0#R%%B-CP7#3r4sGGP@2O9nx(5i1mDfc ztA0?bycOv{)Yqje1*R`szum}8B~W25Y?%# z=qvO1>`jfER(@?e>^CUnxn}jhdLA?6@C=}9`$?9+rD-ic%BL@4FB*mg^n`qYLqUk* zEZ^h-v}D{i?NZ#Ve^?PcI~z}poW_%CAjs1QV(E~-N(WB&m$hUm!X0Nu^2*5yxhcvw zaMDGN)9^$llO{^dGtKU>#@|Khxlafd)wLPw`+Y@*mWZ#K22&OH&VYJ3b~FofaO5g_ z%B(bc8-xVynqQYj2#vY)jrbgVv6 z=7H4MtNG>RqTDZc-*hUQGN?P!{;kpKHXN|aRWLy&d9h~ONrWHK=@5a;DEp65r{V~` z6at1}rlQFQ*$iE0i8N!)=VNCC>y_xYq#408?B^<<)frIB(T^Pb(odFIUokHH5c%lt zvS{N|Vvi^UOtsK0RkItf%Pdt=Qu!EbzFa}>lP$~2?}ie;Mz(|X>rt1UMSk7J7|Txg7W*FftPRvYSik`NUv_3tcvU=$->JZv~<&J4thdf?}=)?{>=rNs_ z53=ad5|^-$8=Y*f^$ifWyKq=*dY^>*oJO*f{(nts-E1c0#=>^qHPnC6->-MwLyTa@ zg7aCQx;t=M&8E$~?*?y0!~^n~T?j20m*y4PnNF`*z>;;J_9x_dA6&_P4;HsjXe~RM zb4m7`$Xbk!gZwz%^Ycp6X;KhFE68d4;JZDfT*pdnPsC$s*&k~BCIE^MD_YkeF>YaC{P^;JFYEMOEurRX_A0U#53OwGJM7)1#h4w)lLzfcjPqWpy@{7K!J6YI*B%kDT zD_AZ=C7Fgh!D(1+8+Vy+&zLlZxPlJSE=@E`2nHMRO1Oy`u|yBn>*Yoox$I|#vT?gA zZ2Uw5lb;?r=-=h|giTN5iwyeZdwU%XxrOpl6be$n>0n)^1FJe1C4TZZalU`y4JrDIkB~*#vHKfJsw@k^tzOJIr=A4rD~-6 z0QbawAyvu&I6_o3tQ>JINlPBlUTzAL4W(pdSReHvg$psX@RA-sVsfcK{2}scQkjjz#rASJAWOP$-k8l5);B3%s>5Ut$luEI*nV%Ll^b9M#4a-#Zo|`6=AGn|Ac7=`O3(J(Wox;0% z6fo5v%wlZPzVR^X#?t-fIN2<)XmOGt!=8f3BqQY8es^IUiRn30-zywL#C&f3blgDS zuuvoar{=H@(W?Q=DJWD9Z+HH?~T^2ZF7`G@A8p=teW{jA$Cx%(XR<&=w(`T zF%+Ej#_d>gyA`#>#hdxA^oGuSYV@azeoyGkW&}UEUfU+=1}=4d18eQp*}%y{;wbL8 z$AtT|s)GeJ;5N1%2>%JTAZ+E{s}y#p`R^>3nBxTpaF&pXoL#RCe3YNSxl?JUC!Z_w zg7en&w;(Lm=Ra6Jhk6GHpRY})1AT-Ql_}uC|@yj?I`%h zQXVDezl!&q2lFNV?buvDAQvOmyq;(3{+Pafo+iwyXj`y7@?Tq{V>0h$RYFq{=62KD zzXbbxl1F1w0Z1mQsdf)D70w?EL@I=w#@W?j26FIWw;0P z9DG4zyVqPteMH4Dt6nLU&y#>HBW%?3FINX;pQ1?tzW#`x#>U_jYFVs_wN)>6dFNpb z`fQJCMoMT}FCb6m+*(!I?kCf3#pI$gh(UBHOgK$fhe^06aoXc`(J22KcSnWbF1T$G zyh@{$`*KgNt@;S#PWqN(ld+XR6&zWtKVJ`~N}Ztmp?awF&&%agOljj1!(&<*EpHZr zGbCnj_$GV-cBt3b=fLTO24=3=9e;Y>Gq9{^^=s79wIf}}E^*OU(u?UA!e);lC++&d zoW)BX9wo)I^RilOtzw@DaV{*lLxFb`PmO3aHzkofHXqb1Klpbybissms1Ke3=hy2E zGL$EIPOTTnSZ*uSo4Z>eX+NJbxkQ04>A4MvmSwvF^m;O^H|@Lf9%pr(q#HUkKtda^ zcaaOIvIg|(1f!(UyE0P}H~c~Dn=yKm(^ zeBSP9fkW@)32i1OUQr~EJErpC2`dseAyU&^@YQlU?opUt>!+yPCaI?Ug^=$-&VBoX z4X*x=OifTEIZF5SOf?N7Q?26kg=!V?Z6U^@Bst_!F;#LZq~67dQQBudT`#&?5Tn;7 zucLUJ6o?OLs_SrzwrP@Le5MtInGW>!&0<)kv1$48dYLPYDn1NMpuP2-PfNN8j3EDZ zf92$zs>N;&Y!$xN6#zj%zQ2xf#L-qe%zsDS(3IhySaN>B;YM(o!$GdE>9X9DR+q8s zf))QqZ)r27cZ4U6CW#6QeKmeYoy=_i*_0NUhNNT-5;bPTv~;+FMc?G4u%I#sp2HL| zbY&89$NKxldTp$z|Dde39X>V!a3RLv=tQ*1EZSmlARYELyxq8n5Yf=Iurc8N2RGYa zMfhf~ex#S?gK*|WK3#4UN%QhEsygPLSIA*<#NE zyBx{W>uD`eI5NC9$Aj7z|oJ3zu+n434xpL&7OhQ%8PolShEdXJXV z{LR{t1+!$xdV%k*OGG#4oK1ESLXDBZ8hJT#zlnOWWD*;8qQNLsd3`G5qkevp4*tz_ z{a1B_T~O5R#gxQ2W}}kW3XBlQY*88?0Bd4xF9AdWt6J9F^Kt5!D3OdI`ySCF#xL9^ zT~$|17Ts}W<~$#lJVYVl{8JEudlTYcHYzHL_H#-aS$9bH?PjGxKVgdAPG?Ld@OY#M z=}$O)v)xKa47Dhf~EqA5zD;~@adbsr6NM~rnMOzf>J(x_M7z={lT&&llsX6j5k%mG(pbU&`I zRcK#hg4K29p0?WcisuYsNZkmFQu;V_;4=`)cgxyHm1hq52g}S|tWi|4-4QaD zn3#Ee+f<3c&&@y!d-i$4W97Pf>@G*+0VT*Q?rr+EyNPe57-1M?3axWqK*Vx*7O#lO zEksprM*YK0Mh#V58vT~wRGCF9AL#5YqXL{fpwb)>B7j*A)T8Xg%gqBjUR!r9PGWKA|4i*h5qcCY4EfN9xcQ#4A=$jN{TQ@-%8<-hKKsde98iH2H? zKz$*(DzrsX2Df1!graGAp%4f2xdAGh;@9rSw%dhUfn-RV%>@Cs^YafGxe2K=D+Yjo zzFq7I^J0gRb|D%)Q^RHhwwI7d zrCvQD+xTF5mFMZ=>II*rJ7X$6oIHoh%*Fa*5sU{cxSEI+sFO!RFHm?N+Kq(G)`s!c z+`V3MGh32Gf+)L9?%`D*;~%(q*+GqHddJY0(qp=cLu^ghtms1C6UW0HsXg&rs$c9t!gl%+i&wd>>^()eKTc}@46V(R`{UPg{e zP~^_V`B{D7;&jf{&pAXA0Pp!N%?soUaQP6X33j`e*iKE<{fedOzz)^ja-##pxGPpd zuqU~t*qnt1;|$^T$O(p9wgwo^c3gj;vlt@R39iw zFxTtZI@RMCt6Z6$IoKtL8S7dfkS+m^t%aD|qt}IcrY%D=5X=m<{yq{BxaC3RLTOifp@=D_Yvo0 zj<|I8D^=J4A;WG^Il5}tZG>VGr#Xi^)2p4_-!3p*Ezr^xOG~H9-2ZNUYOanX z)|BuG*|9~tajrAu93yq96KZ9NT7B76+~1*5L=Lu;KfnVgHOV6CQ_$b&!0;(vldGy> zJtHExGf<6m+aY|{lhRSdy;?%H2MtwOLJpgeaYoL#KmBT2Pri%|OZCozAdVn*#cb4K zs)Y0X!HP2y&K;9d>bQQ&XbUxV9f^S*Mh73xh~uhi7e?=2e(yO0)rbFu7BDSOT8n*S zksrm$bB(|R3Ya3NQRh2Z7sA+DG2R;LD5FU5#E=|@>5Qyg@bj3%D z);C^K<$KzJyUH_8gurE3HX4`)yD0iV(lKU8AWH zA1v@qiR(b!!&nvE)?yimFpr&WDGD=hnnB52^J|%3w`vaX+p2FFVR|p$${|U1;P#j6 z=JJmbkqH2ZBD<#;;51JBU{esj`up9A zyEx}K0vH463E~a37jD7Hkf^BAyElFgl#?-T5VucER}&SEl&=M;EU9iQTF)bCwixta zQTWK6-tF_2@%Ey(4mT>Bn}{_XEs7;vZ#?8s>k1j3GAQU|;*olDNj~EjlI~G{r;~A54jPKrcp5ObSasY1z3 zhZmM1?6Yo)qZ@r&IabE+_cdA7W6C31EXAH702ai>ZFoD8*@k%frV1-4Oo>+j>e*S3 zEsW!`!+Am~&`0pM-=!mezc7yC&`lDHZ+tsnZ4myLb%IYk4aZ|pF63r`AM(TpD%l6} zY?1?qpg&;d=Kh^ELxR=8 zoM!nZ6TTsIoUd2zO)@i3L&T4P(-qxPnfx`MIi0g5)bD|Z=BQMk_{P3C} zu2&lqi*n@e2d!9RqFSvx_5kZGl)z>wU{_v{O>u*(MQ830l-Sf9vpsbv#YqaUx)j)M zuO!&OSVl7pCTz!i2chPC3la41nAFJDaC4GU1FZ5FtEjVxe>Fb*?%=~ahHq2*>%gHq zPL;GOXpHpZl){)_(yWh@glm>qlW`c8viO^QRA28QcJI<48G>O#nf-?@D&pZrYH97O zO35dB5!qxQ&BzhGtexM{u_qFFF-NnZaxOZteZdP07L7t|gJZ#g9i&AAWc;R2p#%fy zp3ETKM0=dTmI_sh9r^%D#>j$@vg2QR0DgYmxT%q;bB*EJ2yF%8gH6Da-l4LF8NfC4 zz*N&?UtXk12FDDl$8N;%dUZ4-Hb1pvay7O5`+p-K5P5JEF*C&U0@mcHt};R5tCuaX zRWG(t(7JL`!l%~36iYSYwi0!Txfz9|4$n87boc{-l2uZ}hq3Yw$T!_^rbtn6B4KO| z3g4v2HS1wd!c&m0Kq~?Fvio1)%dQIAg@YU5*J1cKEKtLteWs%qFX<)vR1AP6_}hI` z&Nr)~A<7nAT9C0G9UP)B0~5bs!XY=;%L)6)nd3UuEF+~{Pxjx^5m_7Da;5z>P}+uH zjAhW*MLiIeRLM@CI5i>T!L)1hnc0Of#3yOpx z$4Fas#q8zdF7ygYWc75!uHi0%A=JSert*&LMGU+_|I`8{I=O};S8!HzvvqAJvZ@+U6bVIq;i0=P-A==GI~)k*t8^R33w5AQ<$XQav!QCl0FpzmGH|cU zm&SiLR1wq!KGvxFap<@9kB*n|J9r>KWk!ovspXL$^$Y+nykTw+4mAn(Dcis+!r-}t zRzAf#TSQIy|Lnm|g^|^fEB1n|>CnR%PFGJfkLG$vMGQqQ#e8^Z3ESN6FPfio#V~^# z-wu7ww0rxu2O+li(_sHq^LHZJd?{=CcTDbGI*&KE*glNdD_aWyFVN|P!P2f<{JAe3 z!!Wn=O*|ZyF{$~J5k^B2rZM$VeTClP5a0xL`KBz-wND0}FeM@UNYg^of)t?9`v#t# z$f~JB27LgySOH2|N$qkcHQJtH6*Lqpss)AN446p_J1vDLo?XAbggo?9_> zGIF^8JB{mg?VDSCmY|43kW?C{bO*T4SP-oSap$k_+4-iwt0*Xe?F8lcvasxg9fYw|K(!=%LvI z9{PNMU+>Fq(Z{?MnA34ULP+809_y2|N~9|U71jNFF0}`A9wkt+68rB9#axL=XMKfP zxwfNbM3~*iXmU~&7qF2!cs$I0?PN5(r{bv^QAZ9|6g#bC^CU|xsb)Pn!iM-?s4Gj9 z_g2M`3zeDcyJWJ$mzantaUiUbeN^d+QgJYSL{{W_Q$ntr9jEfRS7O?nmPPb+FQEHf z1nV7^u+5Z>|&!k$cv(I*jsakBK07w@0;DM(Oy>p@-+k-KqFTfQb?3*sVE7UE1 z*%&C6xGoi-a%vxS3Q!Ha`>+8fyfPmpU|%c^-ex zrwOh7?Jw~^$OAx}y)XTcK#o~Qyv`)pGov6#Y0J-~R}!zs(akM5Ww_xtu(HpWedH zo*)QMf$|??+U7lob(vItv+Piz9U&EeOh5NUdO$YL$i6EY_2kyR*+Pe$ocB5oHJ5Cq zbr9U5A&ZrOK!2incQz!aGxB_E+X-Boo1#c0QHdN#`#D~QFQ#)N7KYZq!fqo8IxP7cd7(Ex#ZR*RvRu&Pq&#pKIiCa@JiN{a+nou3UjQP}kX zL~q@F;sNZrgW?Wo-sV5*oSIyY1`Z?X!Xy}TUskzQEDtNX<$f4s$w^bdjuYqYSvn1O8(nLplVIXQk?NV&Qhfxh7G8Y zX=v0h(d6kh5bmvi8=&zgsHJ-oNH}8LG~=UY85z{5%#z<<6-qUO8I~SkC=g{>vJm;1 z9YmaiX^g=SwHYF=MD&5AdhwGfQ?m8Re$c-mQg>Iuqi%aRgAOEtQtmU znxst!)HO$Adve?OpZncz6M{jUN2lN%ZK<%^*~k{&NVq~1*3wA^CoF%C2^e3}9`|=z zFw+#IlLdP@2Dr2eylBkLP~haue?qA|Y+q-?I1qhar+bHo_i*SP#|Rcpj*n=gu#;(= z7zY?;^bDW)=0!=QA7V6}ZZNED%7QW=GYW=FRM^A7KZS=tH zMt_XT;>IsF>bhh#{yLlP4m}PWqV}8`qrF^e7v)JeeW18p(7-?H1Lv1Cpr3oup9mZw& z060f~;D5ae%;5zVUwlCsGF`faa^wkU0vylTLZ_0d>R3^_Avr@Jh*~wBP`;+6L%YB~ zBll>i6X&kXJ{2zu&Dtjyg$YNVt2QJa#8+B3QCZUfZ}D7SN2!(+QP-Xe?h{SWi9szw zT{x|yz@vMshae*~Mm4CVwU(fK#o}B4)v^DzEoPte?2pho_5l|e+hDs|F2HTSwp@ZA z6e$X>hi7v!QYD*=4gTt)Vcb<>-khnzMiCJ_)WEOV^_IBMMjlB8F7;+wn{f@heox} z+Xd7MM|rKbQ@QzzRc>qFaqHklJP-ugZo+(O{lr0Lbz66uO#6bo*~bG>A^3w$!qHM% z0>*YHG;9-v5ivu%oar)RGneG(0JYE?gqunmpai2CJpYhiqJy<~aP_tWo|&9)?uI{d zM~l7{T{)a0c+)bk6*>b|fsnSOIXv%w5xd~hIg5uiB$yrce6d>TOv=HGPy)5I2mLb& zw1;C541H7uD9&9QAZjC+;;5~o-aT0}i(&A}?lNpEur{gD{{^gAE+3c_GRo=;#>9^7layVBU4;zSrh}{Ae84Yf3$$<*gL!6LtM}(u(~3yp z0305{e|o0w)~j)?>Kz_{nm-MT|9O=t>PvsWUn-=5@j+jt5_tP7tMh_&X9b}VB`GP& zHQwqby)h-Q)R zsUMNXuIJ9kXwb`vpdi_GZ^#=9b`16`W#kTV8~CC*b9LSZ-wEYu#!J#E5Y;Zhde%10 z(=J9HjFBL4EM@cC;v1lX&yJ1v|IJGNqb{kc{D&d0@mBX(+nLwgz-zKY95{3n0?Qe% zd2!KEKv2HR8}~J~&(8r3U;&5>KQ69gefi({oI1iFBZ_s*HI~*l8NWb$b(V}#Li7;RQIUEXwv*rmQy3!9 zP)aQ)&EO~@%H3m=)x&>42w&)2ZHPf|d!wUf_aoLc^xlk$_!^lw^S%#}s@VH6Fvo7% zdcP5r_9mZ#Ly}Gzvh7hx7Z4B^YhYLWd8RFiY#FvAD2gQ!_JK0pS1&yFfRsP35hYHh zhVV0TG%_*{Ey%c5$H6`$tK!`7sUDvnPUYbwG{IjEAe7XP7t~lMAJsE}wO$v7=-pbH zZMo=#dTb6p>cq)u@b$U1pke-wT~#rmDr-Yj15?Bfa4R_ZA*<|~y;h8Ew35OJHldt? zSg(ktOV`t2fOkAwOx<2$;)Movy#6)+yLnqF24h*cj%JoTv`;TR^mG0ykV+f{GD`Ff zkqU5*ir+s%Co^U^7^V}w$i;GxhRdd0q6RKyl0XNM24zhIx3BKx{68JQI1eTQQsOX! z&~x{_6w#mi>{-Crf+eJ>?bvJ#P&^rx^d&aCDp5Eu*1LUKwwf(J=Zdc$o z8R)Zup3O6bx2w)b%~36(+DdGU)_Mv**|*(E9I_*crp;yLf0yajlO_EYSfNy_z(4qs zE$(c5;o=LwW%`k@Kk@;0b#bUsGS>tUyd>}eRREoq4zGsVm>(BC9TcvfS&+tef|Jo) zz8e*q+a-E;h(6DJ0K$A-SwMFhSn9*3M9v5B% z6|(4iT@nra3l5m@33fZbo${X4PXGR)@!x0~asoi7-(3Uu1|P7+tXy=&aSjO-k^N!N z{#85kR>OHWGpcEvy$}RD=?Fd&gx;}Qh3K?PA?(nbD}!3TvuQ6ZufY~o17DbC@5=Xs z&}kG2nKm*D1jx;jpDxu*8;#GYj%KmxHzIks6FK-n)~=fsJrdc3Am`au_dk&A&L_eK zvyv3@M=}nRAOZp9Zc<4;oU*2jc$q1n6;Oy{VBs2_o00^dcj}FPprXu6!WUFZ3fDhH z@{HS;+ApTbrL~j(L`JliE+N*2it9jsFImNx7r`hxJz85-?Ir?uvFeVK=GYzGI~Pr2 zj1^n6JrE}T4MGh(smn_u9w3GLBsnNnyrx|60!6Eq#Ci)!?>RQ~Mh`3*Ev&3vsYH97 zvx?fQtAdY1W(zF=^lh>K-Kw`DZ^dJ1_X$(9lT&xG@^X?F`5L}K=5Q=i#3~ZpGO$aC zBPv39fjTIISUjPcbJm|1oVX?cblI=1S| z&8%c@gYmei!6trJx}O z0B)8i$yx*myJfyZ9LlX2d;BSP?g=xt8jbF99Uhr(A@5!+ncg604;Zmwt7u2OI>qFz z@U3Z`19)RvA`IoB_7+>Bm*;$LVS!y#6g4EGJ5WSFSn@Id%Tx?ks|{v5N~-fbM9H)+ zr6Jg~y}G~|Mm)=hPC9^GGk5?p=H3|@AXGnX%2TvE*V;# z8;WJ4+nyHbDA~}DxtiG^R%1Mxq3kXDz*FU*xdg$54oQ{_cuS3~Z?G{~>t`gvx5kEq z)sMa!&Y`qnj|8EBNWskbSAAk*gd#Yd}06oU&qTXG6CZoO@BP|YdH{TsfBd}*qn&E-A&{!sJE>ze%Awb9&tIl zO)2^WEc~kHNzSsAmQ!~<^TDiCOF;PKx<=)5y;Skv}E z@7VZO$_M%#Y%B4e0{Vx0HOzU}s)#MAzzB22n|>AW!1x z`^Ul>h;Gjpx&U6(2^AxQuSiI^YUy^HTUpS9*s&&CKk;81G{Yw&Z2XixFBj|gvh3t{ z>4?G-EU3@>NTtf<)K}9GE^yl=TJf9yiI`>Gp|g}i(~sf5gc&rV(S4fDdfr+atv z?!s!3rjlk{riea6NUMA?;sQuoS&s5YSw`-nJ0@#8>Ap|S@Xdk=T&l5643`d@-y2!a z$RzF^+#QUI31`wt)5}dom2lG``P`2#^bBvEwkQn4l&Bj+8_h4Jrhdc+>@o_EYl>~1 zK6Sg=D7+PPdf>_Tl*$39Ji-!F%Kb}k!U^t`JGx*D()@<;LePB3Vsmdr&-{B1F z9i)G+&GLXc7@b&T9JZrJEn?Bao|{#4hPlJFxFvtFdts?Zx*gZ57L8HNiJHX1yk{br ztM5V(Ie{8^`E(3gu0$^0hMvMim&qXOH8)4%)}6OkE_?21T1kirL{`!}el2ZQ1r&d~ z`zMq^o_o-*4G)-|#rGWHG5QArU*kZCSGeN0Rzlh>FovVAOk!(3i4QoqB#Z z-UZRO#Hg#fvvmTA0wz(;{5_hiRzJS%OEO37;yV!gR!}7vqTd71yB(HfNmyJPcrP2Y zdh(hmqH9&}Kh*6Xu|j0`sP&r=dT%SqH3DJ=0|md3Y3uSraS89ueXZ;g{loDxit8zvGoc3ZNRgX$+i3!5UZ`QX6`p)sr$;h-> zRm~+C80kNN^ShU!xTM3Qt!qapfLKq#cG6so%eeY=^;=>##YG2VLVVdji zP+a_Kb{(o_>Jcy=<6ZKE3Y5#eZglUo)J{&1)IP%ln@4M}yzXOO^^X8?=$hw~yMsHb| z9*t|MABIin&xCWxEvjj`5wyC5GO92h1rsK$B#3>sc6!}Rm$Xb_k>_?rsn=rI{Pz^( ziPMVW#`IafDdfe|=rgAtXCGX;#Bn(lr1LuOSNOB7P;7Lh6MixlqresVq2F^}N)N5*pawH{|0Oi9tnN=rgf592bRE>=&|l7_ez)fHa%mizP2g$|aA4qh0=_wME= zNEt>1D`-NMC3#P?8DAk@%!+3U9jkJx`k6Xk>24&yrpO6Tv$?o(&9XsR51oVH{31D5 z*NEKx5)P%hiS`BqR-R)?dNI%<#g!M^aK^Z-#P6&IY-$!5p}W5Ri_ou3gqlws$l-Z1n%0J+6?Uk^`|jakTAF?TL?kRc}e zpYDU1(i}kYs@9yBh^;jUs=OD1>$VOL5;Xn#nZLCrML4&5%Tami_ z*Hjrs?97rIpn#r)C1GbyZAUKlzod3MdF>p9m#|2{YFb(s!83Qs>Q?ZXsbkODTekT} zv-9-cnwf{IMhCax%_L0ZP3-zFQ5vPgnNg^-swYXsom}F}zLos%dl`|3I!j4k+NDL) zAhaTuS!3QEjp?}AFHKsUk7Q2c6O7aQy8H(_^a`4xKyu#zS zBY!vBQ&Cp~TgZ;I703OlbRG`rJMwhl`}qBlZw|XdQDN!7aaAwRW+tyqG@-gS1me%9cV3NNUCh|@dF~| zm{N-SnIt&-(WzTK(9#2ATEHuuu6vWr47QP!i2R_Oh~!$M;eJ+OW5-(;VAG>%s)iSk z7d>a$1Mp0aQ$+pAM80*s{DtS6x#&NS#J%F;;ptPM6l{pwYnj;FI9ClR1bRj7ov#gY zKqKeGf^0lNOpVhYjPKU#X)zr>O*Sr?#g5w{ro&O|XygC}L; zR4Uf%{)nXD?0}5lV{kU_&Vn=K#o!BxSAkyXp#ItzsW;kpjUvw^HC)eA8*vcO<$Thb znadn(2nShEXx2@^?0UfFfRixxLJkYKfUirKhz*u`6ddZAJp_A zJ%!g&+0HzD0Af_0IYbO!#& z%$4e|W*~_0$aiCcPAmnnZ}s2~DiDfHm;cQAd=o3LLOM?~I&$hvY!k4t8^n9>+*G_Sp4fy+ zw&}IR{%JEfP(q6mVilhlvXmbaVBr=Rm*(ZAkkxz%0I02$Fko+0L=Q^VL5Tv=H;ZQ4 zP-R%$kSeKL@<2o3Pubftt$4~nj=Ijy$>ZvC`@u|d;*rEYhPSNZle8`m{Ye9|z-aK1 z(&o_g$G8Rh3Y}ekz;gl!85I3v@x88+E$s3ftvei45$R-NB9}_kIY&3}412QAxzO#1 zHf(w)TqbYeFXxrzXf}YlmfGBg44h~x%mGk+WPRtXk0x+9OAT+~%4yQ5*jckK5M7(> zT-Nx|^EYK`PLgB29v}#XD&vbjb{4M%u$H^_Q-I)b_M)9ZVYOd{DT&kD;J$mJcOUHK z)Ph@|yhqv<0mx7ZauR0x+u@{?)cz01-ONZn6dtEJ;Q->keHt7Ztto=A&+B)bydMjW0Su(FF`B2+!k zG*?+yuX5u;Mq(PL-eh^u^Ek=px;-Kmluw|Whs6x>NA+o^%_K8Ekql;T(3!>>{jIz( zxDFWny76qB_m>i>lVclW4Oi`z5U<4k=kDrB( zxgi?+*gSQKLa(Q)jiG91`e_C?0SDh@AD;^L6GC2U-H!BBU$+Li)1N2u5o*J54rba; zr(389!(q~S*@xH@B#vF8AI5yIBnh>-N>z^5si=t&ieZ`XggoK>|%gUVD;Cjv@0~Jgsg^0(?*XgoKL4qL5uac;9(vAS?deh*r z3o(fSfEOZNWt-uCj^Z%>kSC=Ad|so%I|K!cWV5tGgb1qZo|@H0Y%8g9HRAR6ks(Ci z_;$%N%|qMW6SUhv*ihaI|9&HF6~{}S)E+Z!tvy*Hm7CX1gpm97W;{75{eIQt674D;;>E`; zL7Z;f{%59b=z{x{lqahAp$l$_;6MBuAN0ji_+ri6Kp-6Q)K@VpV|||>Nv~v5eE7YA zi0Q+?kP8j58w-X84yP_U1S>Z0Fnq|D{_2gJS@_I)q zMx7gvYLlfvBYndakbdLLXh*N4`07WP}9)FQ7u8z!l4weM`XD`bAbY%tw7+( zn2D;LoJ%B$$he#qF67cDE|Ws$yg;C08Sojy<9HU1QD4I);;Vc}}hJwF9qj1LZ9fwT6HWtGJ*CEKqw4AM&j3Ups}CXJ$%>1l+r zbPV)If=C}WuWP8o{OuL?2Lp7_4o$Bn>YFGG4dm@Fn(1|Wkr(L31UQBkPTdu5r;a`{ z@+enZLA?Sy4d%j^TKW?paUXW3nRWBHhR#eX7%a6Yh$__PKDdY|C>Z|h1zUqXIbR$v zCzQG~UJsJVqYjQO_-}|2NDbCD-Npvz&uj|vivT$wdnyv%D4(79MUW|Jfo}H7ecBr`%p&U5QB^X_Rs!U zt70^`XsX~(k(1~d(a|r1FRXX+0RK7X-DoEeKDU0RwL*v{_bV#4jql(K9J?2AZ@r>( zYwy?`$tpR~VNaan152bg(6v!^FMYl-wRdYqa-(1ToqK{)`%!vbf{+GNn_K3B7SXPJ z92k=c*F`qD%b3Dr7HF@`mw=0?>0Wc$c-*eI#08Vmb&5ME0^$zlkQ9H@wBH9~R+NYBp0YzK^gcb0YIGvIsFNcT*L(L4nE zv*3kB1dc=II8)T7`ufBseRPWfm_mrV1|1(xCK4Qsi|)y5HmJfnA& zB=A`JpdsW&`tHx->F;}!ktVx%93yC1I;SL3AV7`GZE{#5`!h##lcj?$IzQo7XE%O} z!^+}fp7BB!^Z)a(&enjqj)U5P4R6r#h4scqsJHIZG^iaj-KKx1 z)uxD8wr2DgEa$8hzwNqlL`Os!_jx_;FQAj;3?BfY$_A1Jt_~LJdMfriV5dL7;X6n4^6X!vB zVX0mWQUJOHbY`*fUt+GItt(9RQYs9dhRD#wKH;`0o#6hHg5Pgt3UlnrlB~SPP~`uU zxsSYKKEnKNbUl>KTtbDxf8gbE7I9=JG54<_g{f*cmyp^ zpkmc}0Gyix7Av%8&6V@%)FsSI@e=#qrin1W$W%FvUR3;c@<8FrW_di^*05zR4k=X6 z@PxPuu>xeF>Ra#sIo)RO1kp9;>L*!NiG+CCq5JW@#Q zGm}XVSrOkOZ2Ryd_{zLE|J_=250ata!1>${Ai)%Gk7Lf;BK=K9=Y(2^P1jVg`BTwj zD9uePxK+K5sTpUb^d46Vjk;K6K?T;=9MpWJ98-88){NEz8#5uk`fcPXia&0&NJ*po@Tz9A4<&ALNrTgHS(4Rv;a(>hre;AWx0{i_C@BLgByc3p1tJcLKey? z*BUnB{4jckz1kf`#@1?4B*uOZ+6e?QIOLNNeq*WNLefg6;ueoW+IAkRF-_rcn{30X zqV4cUv;+-fhIgY@nRmQCini?z{G*>4;wlWq_ZseQ06$VUC|1D+x|eE?x@u}^NYD@W(Ol-9<9W1NE`c;m%Ar>btS!ZXo#H?_Ha677<6ra&49Hv2({-Nf+(yku5`| z2c{WQ*p?SM7i;uyE2ux9Tqq=C^fid9_dQ*IL?DLTzg0^fAI)&9woP5Xxs01qvRYTZ z79vx($6&x+TTh~IbM%O)F$X~6(ym11=5q|qRZq2&7=^gFGwxQLnwuG+gU6Q0(ah?m)Lq}Qn>Si_4CE^T!q~fQw zL!g45ikTeB*F~{<#_MusIy6Qna2ki{B3Do5@%m&(86xz}f6;<$7}`5alZDtKKFp?< zCA&OtOTFgs2JDeSb;V^kg3irnOBz>}yvc?Vmf=hARa<+HtLuHQ|--=-Bi( zy{2ej|CqK;i_4(?1Lj14=^wTy;*SE?07K4aSDXLuQ=nnKC;JZzJ1tR^k+57^61G%b zh70vTcyXuu(h4&7B{#W2)YtgJlmO12N2z9ZjmIRcg=s=p5KZb+Z1?!gb2KDpD#a7| zAO)NlLO2*Q{W&Xx*!o7)Sz!E zZMAF+YrYkn`&s6X_%`r>3L1^na!_W1DtUI$QCJ%>qe_D`g8-6_fe`y-j(}H+3 z!6>7wi;lYOo}kHOFh64p%!U2q$u=@IK99AoPJg*~#Dr+V&2uQ)*B*7r50rLLR?RSQ zkZYYdtYVDFq&TRl?o7Ca@z$bUqr5Tp<*tIzyi-bYqLx5DH-uEOX}YXjJy2RuXj7f+ zmoM0DoW?+HJVBboqWh-f5%zMgI}z!7I~oefpl#PM=W6{g;RSiH%MZxZQQN)BFmdzp zZCZ}=a>ZXnUbdz3kMFnz0oA$tX~{&MKl65M)iSZH!rKUk*Q%^1S(yWgg&t^ zN|P+r+wqkF>i)6>ncqQaGyP=RZgF3PGP#L*KKxG-zrATY?z?GR%X1)`778OPsvi3Qm)0!;^F;9c8q zfC0_$nPbK9*xVboKYpUr;A>PVpv$-ELLPolJ|czY1l1EB%=Xs#mQ=qI$%EJPFeC91 zVh$T%td>ANRnAF)1C|pd4L(PQw*mMGstbJ`ot5NgjAZ8w2yeM5_8$WXMA;B1(C~!RVCJZ<(}G@ zo(}>s_~1C9O1Vcudf-PZb>53Pm`LmkcMZ)64@5_^m7clTkgETz`a5~BknisjmNbw& zhmRgX%-;kBGhf=(8aIWOJc^W~JnK9Z?63T#%?n$IMJDUj35Ox>~D&UC2UY8z8 z+Bx_9gIS{#1+YtI%vGkj8qX7Xpx^2Hy#f$$Fe@*}G7;34WLVuc4FwYn4C1%|2v~es}>IL@&i;C z$Zsy^M}WUY19BSZCp?=?_g`C(%v(=YiBRRHc%Up7+)ls%ML@d0?Nh8{zZpp$RSCL2 zrR$YnL(5@A>m(QDj|P3S>&haBo3*_IP-=^#=&z<(E`o*VkY6 z41=O)^|cb(AtS|W(<7M`d}fc^ff}fatozs$N`?PKvH*er2PW;p#sEVQUm)*Fwb{I7 zzd8c8^?&-ON6MLive)1ebi}|FGT%#$xA5X>k*rGN;cwQ0AhLwf8f^0V{Z*`njb3)C z>2F8i-^3;Yqo%v<{Zu%ARFP2P`#hyUzzDLdfgvIYJ~rX_t!m6{mff0mNqBF-c2&aG@uS1tf!+Mg%~cYCIF=*w`Kwkc|aL>8ygb054u;r05@F6=4z*h8xyMPrw-b;@SH zwYJt9+db5Y*B7ih8Ma44b9Nj#4oc`is~bkJ1@m^t!I5vX!c{WEX_^PH zC_stQTvr}o!QQCa$Hr19ZmCM6Tp{OeFQ-Lq>-y3tlf6S4MwHU|o$(9*L4zr7 z(yhuE+fD=;iq|XE5UhRh1d}`kYVJPxt^_J~pLsBPT7$_?q!M=;NnIjTw zqUF!D2}Z#y_Cex9{FwnL?w~_f^yW+vx{KuAG5wNdtV3)!tq_20p=%*@10i13-SzS9 z79GI2(&&1fQ3t6Zo8?Vn-!d7edbi4WTur6G5dI-0ktzRUHPOn|T+c-8ICdo+BJ}K6=$)jG)l$W0=O{o=xy(8`EvJ9gT4B!s}y@P$& zaZwS3NtJqCI!RK-)jHLQMu7QmqK0ki9S539rI1tK8Zi%YFK1Jek#F$N=81i z?0!M!niM+N#^=VJVGSUQ4RL!!%Fhe1pWzTqI!2MG2sXEI$#-C~gWKxo44ZNTJ3GjTb%pq*96=;O!PIE7(H$I~s#rr@5{SKA zrF-|2L#DQo_r$mURNMuK6ud-_{Rslrj1o6{>gz^eQflE`1uMLbX8uGPW;3s~Z@qy; z+-|N_&!}kKj{Vi%)pM1(g>$*`+?_0GK{ZH4^N;)EhB%={M0^eE9kw$yM!_|webGab z@FlTM*Y@t(HfTUF43|auGV=Y&yRD7MZSoX8LE+0aI68wJZJ*R@c1Gam8?NuzbCBY} zNE8JB9xY}lI)q8qKHEenP2*Is)u%rTQ!g8_Y5}0$mm}V`b5gZcAw(vy$$ckz&?gVY z;c%tFMzmYE+^m}~5B|_;a%GbPbseT-%33{$Cw`#`&HY`06M2udcWVH`d91jj>0~J( zY5uT2?@1!J(2L8+X4w+1n9fBQ)NPnw@2&oO!+s|F{EabTyprM`CvA2U4ag7iu_@Et3j8v)ie&)Sfp2+rN z_sk-TAh_m==cJI3+TSWgJVU{~C$+pyKZ|(!iL29uu|Igi?TCXKg~RqeO{ugm{jW)Q z2Hj-z?-;==en5|to2tw*PIDl~ocF*`%kGd_Kxfd(MzeA`TI9E#fWxkd$=L2sXB=}F zwm4Pm+;U6HGY^NpvjOrrcTOhjQ~+ZyNYElpLnEeW2!?rAqJ^)l6DnI1VJ`&kafiYo zWPg5|$ygTLE+e)ltYtp=6W6vu@$zsv-=>@#iqqfdfeHWH7Na7j1}a7&G)@oBpvFjJ zjI`ArG{;R_RK&_2Re8IY`a;a=mS{c=aQG5wzn>Qf!`9OY<_LHTlUkZgrriT7Q7Lj&~a0hEmf7stRX~Ch6FO@5#MI!ss_^*W0kv%;Y`su zmx8fio!uo+PDpf7VGgQOptXQx)tjs?4- z2QI4hSpX>!t{c73ss~s9;PM+7M}JcS+1v)~lt?8XdrAF@GH!I^KG#I}z|(KEf+h%} z(_ik`0O24&cMs7y#8vBy()X{L7Qw6Y`~^FDe@yC>+@die44ucsuyA}+qa1j-je9|F z5|jwjd3<+zJgxU{n&kS)(+X*Pmg+-Vl-_OiKb)x-^?GRe#u;^|Od7 z+DqsyI;}LTqt8!c$AQn5y|LaoKb>NQDT3u&5INu__JcppTmd>02ekABqfm$O1avzQ z?Qaz4`LXS)ldQd~BZGjcGm-fQXxKBVq^0vjd`}9S8}K(((4~KB;jftIKhhLt`f(7a z-7xAtX{YF=GEV&mjHbLO4;o|(8f>XCc^uZ0Q3TuqEE!J;hHpH$UN`7Ddf=$o^FPHQ zX+DqS6b#t+^V`nz1xJBctj7bN_>eK;;WJ3M-{0NY3QTU+7gW#}53K#_L>h2;ge9IC z|FeVG3G9>k?&IhWe{AplD<6o_Tlq&2 ziRs;cBm2OD(1D1v+s^13Pi3)=aG1Zhxx`Jw`?K)#EpYfu`uuY@$pAeDeON0)f7bXM zTL>-uSrq=Y_Hg8?V7S(FNG?Wg?8H3QwhKKL%w%UXyd=XLUdhmqEP<^`YdQ?hJ6G}q zhylrF%@4|66T~wa6`8Y>;ezgT`&0CMt^9ZF^OK6ep9gF^ zUsF&rzx)IT^;Xz)ak@69X-X$%NYnX@}r<6cpn>%;h(MxQYcMR zXHy&bkx;QLfh_TJm)Uaz2k%8wU){z}M5$Z|0nbFeK)s5TN1^NnaI`{=XAB@rU|0rx zDT`9MBlG?>hsPHj=vNU5Kk$;gwbH6;H*Lw!K2s7e5dQM0^vN0`8VM5xoC^paj_D<$ zQmn5EC0*%gBh05RRIlhoV{X-SX1lqrs`~*NCo7aqORUtB1LZ_vFwMh*oH)?|@(ZtvhE8^(LKJC2iy^OAqWObIqMUetDp&8*F(?d-ZK<2+} zZO1SxG&$J26daP?G{A@U>9G(VXgo&=&z9Y}qr9W0%!Ac{8-;K(3FFXT{_y+i86+9- zK8o9v!Ic~OMt90^f6WJZbA5Ii?l+~zrw(T4CPlxvOTw{h&>dLwm`J5xui=l}I&of*w;|F&E&h2M`CYlt&LUYJ zU>AFSbM6z(2{jCv;NPjkO;UAd>htgJVxc=kjl_tEtQ4d`n>vpNXb5)jqf3qW`1mpr z)?h>ib5jtr>)$JvhuzDX4E9%)`&9)5 zxgF}{QOIY0Z#ZMsHIUVzo4)d$(``b)xo;j@F)S1j#-~J=gkgkPW1kH2)S5@4SbT9( z3{e{3fnO1p*t2~qZa)53srFi4lz^-%8v>#j_P|FyaZ^Qts6Pr0xXyxh^p+g@+G5?B z8`{@=g4Np;RxSo?qfSL4HkR;&H?#hAKvZz9NFS1tBv~CcJ3<0cSDHuYG4x`#MR-qv z^tLmn4eLVq(120hO8fwGcvqTHKYY9vb914kg$|QEQ2`tkK_So)JW$;QVNcf_EP=Z=KC&iAGo zu^P%z6cz6ccXE_krz#5R=*@yPgvw@)IG}rYroEtRSAyv1P{?BJY7nXfiqZM6UNUrd zICSD&h~R>S)bAU0b{updm|0(#a?7pEG-~VNovB#b@t*~(gI?wq!fFaig3y#)Wi^8E zpoDL*C2;dFSvRy4xHM_kL0Zld(Y$C(0%b;)>ph!o=;wnio#<5KXcH9FeTH(28jgfIshK8@XmkXRX& z;Ond;09e6#T>S`gI`9RWy7s1F4rKkGi0V}%@&*wLkI+f92J80@4279x^q+5=`0Hot zI$=qmHE(jrUji#-D?hM@;q%?7aR3?1nQ)GD@9GK^F`;ZSXX6(!Y_IrT(=~*> zpsUg?QZhq61JF8`ns3(r(j5fCxSHU(7HWnQUzB}Wfg{h{)nD}7kLg!&)=r6UL}|${ z%p9S6++F%f*b=+!ax9R_aFSLrwrnXqx>(Gb8H~pw!pLuj8?3bXf7;$1RjB0rTru}7 ztIq3DGqGljs3FFOS8^wb-A?KUky-Iz71jZ4>^j&MVSx zS}4TL4wX|6G1XR4e&b%>2wIN7-ITAi&{2r?i%~thsqEpo@OO9pOi=#fO^;!@tFWJP zj#Pyy+bN>o4OL*(Tp1s_f4Ij!-bX|@N&W0b40zGzg(gZQemNrrdR+!Wsf6v_(GQF~ zEaRz}<~oWh#jM+DjdO(>hd3o!4%Ro%-wY;33;4VoAmF8GWy2nNJ|T^KKMhYq>g^}l z-DK{gODo*T&xmL}5N2^*)u)g;#v)XnOwN^g$(tmChX&*@MnEEbEGR`O0ocPx9Qu9# zq1wLjG=={Bd?}M$H8vLxhI^WZDEHSMv7Eq(RMUfU&a3N}KilhUP-bz}=N^P8AmXpX zo1BK*OOB`uCi7e5@hm=&%%O{s)upHLqU|H7gf?wX?7h*VvwX`pBqww%=A z7xY3rvJ9N2w2eQ(EL;%Ap0!lEa29B;ItKT^)SX0>_1*Q+sTg(Ax9MQ#78pm4{S>Y( zL*d*}l$HEfRoH?ob$coh(d<=hCBy)$8v4EN}c&U{|iS8N%=M z$Cctx)oB}CPqv6IJ()0iSYuMQvcIkcWM4(Vu*Q$a(mhi<*;Z6qf9gH7JJO)Afrt14 z)YrDWw|F3Wee^7<4`(m#T5&~zS=6T|R2wI?bvMri{F}BKx!h*G*Bp&>DvuiEO}&<{ zfO_Vp4q)TEF_RU2Ugo{c)onc4$q)l@0fFFsIs|LYoVG{|5tlKAreZ{ zsC{t_o<{8AQes~p*2}P;=sQ6n&G3PWaRH}kbbWF-)u)y_Z=b#ISie0li5^IIimNSS;cB`@>_+yu2X@tf_zcC zA1a2|nmK~T!8{Zs?q)vSlDlFl6dZ3e%ultFzZZ7cw?mdX!se>EzshTkQSfRzo zcP2Fym(^7SVBH)JK^f3J{USw~HD|q*xq}go!J&KEn^1}Z8zX(H)EAe(uXFX=j4ZC( zTd7HjrHS zW`M;~fF$1ybWo`B`j}mQHMIakvKLNP0KCzejzr0US$Llob9Bz~-*LI>d{Q?!ux z6s6E06$~pcx*Shvg*>HiteAB8y9qX<@DzSDI>-_9SbWQ?S|{60>{Ms_osDzaor1%6 za^Q#jvmkJufCOdt6tbfj@gtb30^1AAF!|<57UilrwE$ljHj^HOh*%z9XuI)VS6axz znI;bMJH0<%Z)$(dHc&1-oiunb8pY3~hoZg-#AFmxyuF45Zh5}ZH`*Qa772gGmOZed z!soOtv)0=J!;^KY+dC?%`v$!1_Jw~GzM>%S62%$$K&MXoqH&nB`eV3mFqC|m#gV=5 zVOFE=vt4%nKYxBkZt=-ptxPU<)OG!j#Z4`%RFk^}(6|j5(5@kUCWtq-FoLTyt`H~S=j_i&kiV$z5r*w5ycB?XG#f^O!rnUENiDKBLTmGeEJPBIUO za<_b|t=mZ-Le?D2V~GH=#@Srnu&LrUi_}8htYCENu>dfI9XF$PEl=lnn{LdWj*bBx zwiFmA4uQ&b$=|o}z%d#>{x>i*@@f~sPFEFs07Q0RsgK&z?Tk_9ugEqf{-D@WRC{*O zS+CRCnw!T;&0%nS#)mEJ5L2%;hCqWtX09U@%akFC;5#RtmkCr%daKeH@@0{LH$5S1`X? zhkJZVdnrqOUQx~=hgJF_^?3vy8pL5IpT%Gl1o2xI#FW`fy=IJXmHj-&GsucyF~Fx7 z*R9);Q#+TQcSucDLeZh#xLqrGm+sHDOa%bNSmQmcKfYMpVr_AUqy82el26C!H8t`a zKLW5ZfYSPjWQ$vt65tp86>#YRcmdi`Y88;HameJxA{Fw%_e=2Ye%WJMEq^;Zb4J`7 z0~RQ%!ZfJPQm0x>7qNr%1p?($B94(W1Dj0*WXTZY60IeO3bo|2SnMT?u-&n{x^wnm zZo9TR8`)q5mUMZ)kax=TzrYfCbmXL;e4q(1xGAb(61xiJtko@+D>bd2o$T=ZMSnu? zU9rk@nUL_Jv`vHg7c`|PJY~L|MsRQ08kL7l6XT3WS|2&keoWyF@$*(#UMtX>C+OV6 zwCZuCsXzT-m7mP&&;nCn+|N&xcPu^Aca;ijcT-J|THr`tHttonm58yA@OEent-!bK zdB6j2(^4B2OD1z@yGM~^&~O^TKu1yUn!1uW=>_l(WFiMlJv=$f1g0$l#cwma}583`gZau!u+9`>>Z(*DG3`u{YY)y9g*XaW5ifPxD+=Eg6@w z$tTA98mAwB{GETJOoLG86ik36)9{$PeLc^0t-J_+NCD2J47J13qqA zTu(D-3qi*i)c0Qu;nEpqT5fDJ__1 z=1ud@IZx;<46BIjFvfcscTmOo4)MT=6JF}8#ao;iJ=~!g!?im~2_TJehSQjguWpfk z#J~S}j@dTR2tVbIBbx-{fo{IKx!szwGOamXz!LuTz-c3Ns{0mxY11C% z5PMq!5B}`;U+PAN5heeuNeTrqX%OKZ|9l-t>(}GBgU&%BBV!%{&;f+N*XyocV4AS+ zfF)h+xtojiFp_^JgY4oh`3an8B~_c;maPh|&_H#mTD6sn^NiUuXBU+kILg=&r!I?H z!jiB>Sty@Q6g!4E-r+ezJ+X~i8{@4R_oF0dOD9?0OQ!Tv<&T$2 zpmhiLtu@5vjDG`!N}S!f2l8gvjX6C0o1`i@2hA*f>O|mY;CG zc-K9%Z!Z!swuxdD7NH9Q!}e0Av@$n^qoQ81!;Rqv8kdL;*FS8qn6dEQ+DaGiS zX?0_p!$k~e5!fbcvAStQ4*C;703xF{?pOXLXq@5nM!#yi|nYdoL4HgpZn zGUwx1B?PmIA-bs!JT-^5)&y5jEdpcVeN?qHMrhr@(oWhBAfmqGSeqU}X7K1_jIW?J zOQ}RDd$Q4F^rT(VNLA#-T`JSKE6A7W5NHa*V<6N1oZcKRhB)zCk3{>09X{eRJaI7P zy|G-bW)hkkPQ{8h#p#G!odxE-iZh>autUF@vpvR`I}$86-4I?v4@=*Ve>8$E6>+q7 z^=F_H`2`N+LTev^omY3XkXcvIhB#~BN~KEz3MEFi`Z6^LU1DgsPiVz-MMaIq>OIBMo&gN@Jg1iEoa@;pM10oT%#cM&`~Gx5zSA)NZWaj%rk`>NaLasrd#S=?xG8V5EKh@ z6-TdRO54+}SrTZbTS*zjwJZ5T00^y%9J$^ojzHU(NA`^UzoX2fk+b#A4(=uv8>P+_ zs~~sIya5!TTCY$s$ko!i#har<7{+wwhNB_IkLEUcj=9jA%wYX$`(K$4PVSpVM#N<) zbZoP#7J{4`BfC`{PoCbKK8ccZhv5NE1ocUzdW$%!%Tf5~si*JpUb*g_Oq*YkVD@NCUISs;LU7${k!#T$}5sH8oKWGN)A-d{|#4 z7)}XAmlp6pT7>ge20~B}f-_QRhx-hYea~W%2&5dvW~ZEm>pO(MiKnc-;w4cb*qfbD zwkOJrhE1qarDq;bXuvgcN9U9WKMB3J7oh=L?yFSEtdr#+RSFt>qnnfO17)9%uDwXT zQ~e|5mh{+oZ>G6rZ=svQ$1A2m5*ym@N{yP~FkU(rpG2~KDVPx_fb7s58PAW}JZ~l` z6OhsLKnV`Yn%f`FK-4h_y+Fd@@HCcdJ-Z4h(IMIkB%pp`>E5%1nTva!Fc%_iI)^)0 zQsW@Y8U8_V-xQhX2hi6~VCIzOBgtDpwL`WizF|^nm3WO4Z|LLUV<19jy6*Ff1a&(J>m`ZQpP zB@hU>YU*s)J~ETIu?0NS_5Q!p7x;0%T)%FdY{!Z}kGp}vgB@z=Rl4A$0bE?Z7o2ym z0P#rG*N5JpfDs;D$2%~V82c%(%k2rku6B|8*Jv6I zzcw%yQHnsA;(w-pWF0hBd-}Op*b)twG(8oQBLjH{ooa;EAy4aZJYMim`_|3FCZSCc)gC7qfeV3t>#d4~e&871zrhov?-}FVgfnwZIl>YUJU|B#;+(QUm zYdWhFAVH-(9BR0GBpqxa-$GHZka8={TOGZ0(y~m8nM_Y2Xw5=#; zBd6$wZr4!%G<I06aVVsHUPXBUogs+y@dNKUsxnc$T%2~r1A_;pC`Aw{BLs5gB z5^Y-|NEki(#jiA~0kZSOZ79$TDU`^O<^zunG?vvquz+kG*hzC7h)j*!Pt%z>Y~_|) z_{mS9=eh8{yZ+TjdIxT;!9;4QF!x(yZ_gXu(?Ezm%B*YLs>?!W;a#&2K(gX*Yf%*y z$>Pl?U+(X|h5O9Jo)X$(@48cBsxhO|Q*Vf@1cy%H$(zPGqtJB-K+K}nt>qg-KiKq3 zZpYS|0)yHj%I$qR<{n3y%jhd&uI0rDf3oXzL8XYc1f3BeaO)@z&4;a=+T;rpF@iL0 zj=PUo%xyp|ez`<9P0@PSB|vIvnEHe%xn4GbgVvZH44;(_NTH-`Gu2zyW~h6_2f z{5uO?F0(ZwYBCX?!O)b({fDmFP~S@bX0rP$cp|p0Njx-yhRd{2!S>$W`NZhrRvUa? z7NL4Dp2>7fb`yJq*(lHHHP_%~reUG|Z)9onx=$$#tO$>DkBd+S7E%tjN0mud_Y{ui zgL(yamfAh<+YQ9(2Ih@me_M28Ifs{wsIcgpw*?|h2yr=pTj>arLdd%WPKaU$OZu;L zu_-#2KS;E$dAUM**AAfXs2eMi&cB|bve#-W?x#|dW&uhxx6WBw_d;9T@R6QaEp+wS zCCj6ohQi!jX&?4|9JcKds#4RdAd-uz4)P!Fs4qNSGS<9TelM#wLu4kr)0UJ6SpL+8 zy&URTC)6Sp+RpYelOodh7Yp?6oTVZ268Ieim+tP#kzn>pD51=A)@rJH2X4hjNmYbZ zNvP|ke~YfYmpkakuV)bbT#&d(_Q$~xbNO+NsPVx+YI|j)eY+l&Sem?^%f=wrEP}$h zO{}(S1Eb>O|E;(7DVRO`$o*9yMI|omkjqG;Cg35C4P=&G5Q$mn>DQAm{8V|1QK-q~ z5XlmVTJV7H0*y8>i*ibkJ^J(~#0l+cgPW6X)a+ssk*n zC}HDn4pVMdA^3L327^Lkgb*bliFrktgi)bM-zu02Hldclz~wB_^dxOq}hnZNiCa6xT(- zm29ptqkc$}l#*!~nQ=3+thJ{1IrrZDyq#n2v=xQi+^$mJ!pw`TTG5`11eO z>w;mmg>Pu4$M%7vw;yO#rFk#TNyVL2WDj=r$#F;~=FJq#d>sN(omlOXi-(%86(Y_0 zWKmpNOFZQHo4gvLRASQUzHz-!2V3Ckk@~EPNqxx02mmhhG(;v3c(_Gax3BC*jgwTl zZ~q|;>IJWY0H$w3qm%(E$~DX>L_+{-^_}R9#m{QVHG$BO{IoR?dhG3x8M9wyM%pcC zvT}5H0*l?tM(W+M9In~Wp*2BGq4@MSH}9SK-}bJmutW|;R^7JkHfEr<5!BMUn5O>4 z9Kf{UX#K)L4o3_5O~)dYC;dCH)NFS(1!r6))2n+wj$}FX>yh^N=yEr^zr8^Dn0;)d zYS-bS(FvZpu-Kh*@3ueTX&~MngYJ6PCUf_D0w}ACt|K>sA%_>2ja zds#Rp-}uLs24SFINmYI0M*I&Dy$RZl!&&iTLdi$BbZhgB|3j~~mC{|;F>uYgGG7{s zLJ~&A`(Kv;<+AFtPrClfm^>{vu60&AK`iy|DxAJ2+`VR~U)~kMNtoM)8cY8-b_CA3 z%DzDM$|;VV_rPsLqLu|eaSL_D#q1Mt-Yqe|xVH42P`aPn<-?_CYrKXk1*pIl?yz+H zpp@k_(ZUh|W0h=HC3Z0Jrl)j{W%0x8Wq*hum|NqTbm|t(7jYXg5+nD8LhM@$r9FK7 zK&>KP(36OD*amf<_3&0$D}tvWt2+tEz4rOf)JGY0yJ*GqZE6;13|64PVggZ4TeaH| z-q&wIoBN`dry;ad8RMp#swm3!gMUN<6WOYq*|%1{WV|}F7MA;)htAP)mRF7>cvt-w z2?R5(^GuKr%Un@eiuZj^p7b&eAqM)SS`n%ko##ns+JXB*p9$o)*JCwHe%H>J0soGn$-CNs(GSLDz>8^rm}kF9zib-&HaNw&Ux z8d$qPfywTK31uPzFD1=;#%%ipN^Mf}ZJO$@p3w^`Kv^|dI}Q~)M!hM{5(G?GUOC#txZz4VEA zu})&P=(*UGl!6aGN+Dw3Oqo2P=tU z#)3w$Sa_ZZgI%UjmgE-#UI|Ui*6y`wxh?;qr#tIPvaqD9TtW+XfL)nSokG3`esS*# zT`hmCgnEbfGQ6)GZk4XCk$#3LNbH#7e)!)t$KF%j)TeG$v>kvWz- zgk2{SA2ZsA{~bb@T9Sj20=yVKdJ19#^gcdD%iEkq4Jk$VroF=|T)|x*UW$ivkWvx9 zTkx}wp|k6|;hL!;~xVDlEyuZza{_SD=Po0}TxEbE>L3+0M)pD-19`&vd13 zxp4CTVQ_m+_2oQNk#iu~vu5jwXL|iSucDifKC?cZc+K z9?xYdd)2Ge)5bBJfy09S*B?W(*CRN(nQ6d>fW1>ftPx1t9vm%%n_&+N2l$xShPJGC z88Jdoj+-m^*DE}TE}nZ%D#6HINMS)&yBen@D{gq(6oDfI!d9|e&(N0He=f(S3zTvM zKO8xhgi6`a=xEdl<);ug&m!z)UX{H`7wd^G!0Uwg__x@OB~1=RDct`mQrLAZK^*gO z1Og|VmN6XRxX=6D0{|e-*=g^TY?dUxSdfnq(1uhOgW}6h-l>FYbjC87fi9L;ipQE= zjyq`hk;r%|FfLfR^xYOb_S(u@K@Fog#k`$SOOECEh@LB&tg+SZPsKph=Sd42CONw4 z3yYV~AFU^Jm0pVkNh^%faS}1KQ!n=o71?IlJ9kkP?RMD z(|iW6ph0LXY{>rOP;+CSTWG~WUddlb>e9|KrjwVW-}*@q0`g5EM2~pQT@%|i{`oya zS{E9ILHx=*(%mF19Mhm!;T(ou)AsKj#$I{8;K-aT($Vbc$u}Jijb0l#o5|Irk~DFE zjb`$Qmv>NvzF^YiH?(!2{JBg)vbHhF92>DiRC|!ermYSuqa_LaxI^j|BpV+nG20r& z50YR{$RrbrG3o<8ojG3JP)BA}(C%~pHYj3aTebOuiZ@R90x}4p&?Y7UZhsMlKMx{m z*_ROL2Pmsbu__VqV;2#;0U;c*EP;FT2B9n&fXsHn8+nGsyklSN{R?4yfH;3raS?|a z=2y=t)R0`5yZG>lbUA!dO9>b`#CVijg7KFX%e;7~$AsH@#E*(9M~4lYkz{OVfZ`^iFxIv0jS2_1t@2FEVs~>1SLSg0_M!Ob^FP60OnsHkpc#Gg)p77W6~@L^ zx!T!ohmt9N={;L#d_(!g%g5+jB{xZ$$PbCbn48us9c`BbT+8HwMNVaAS*3W_8jIm3 zSPubD;Q2X`nIOP(R^UZoAyhl#0q6Ql*zj8~nWL!4DRxE=(y>L%FysB!I$$KEXEhr3-l zdCoRQm0{3$S!Nu_3`<_d*O=r{7R@uMKfaZ5HNFK6iY`arz#Ig7&`g-%GAG+ZFibwA zOlY({SuOx%)05-I(?TeD@vlCuDYyrj$&-tmcvI6kl^R}vi_BQ@B$2?@xUv6T#Z?xP zYK10Y{+s*5QznVa3hFQ=hGNB5^VIwm6DyT~z+cQjHvW?Di;4r4R6y8&%-$Q}OLfq@ zDcT}q2pNiG^R?pQ9pzQEi>hy8bawPFg75_{#|N95gzkr#);NVC1yy$F8#@Xq^Kiic z#}*hmYCyn`4nT8&S%w+)peN>1(!9HkMVBtoF6Vlm6EPOKk#QVFRiWTIlo2MtVt7oI zB;$*AfNT&XQZjPk=i=f0Y$j)_;@N(@H2THaabH^5a5n zj#VIFUzCK)!g%W9vZe$T-9q>hS$S6WcLdmbKGk$9-WKQq`v#U1;b%AyM69O4EM0M; zKffDcj=4eoAp6ubhu}S@n9*|P0#O)n+GVBr`&+RV-up`#IHT^E5PWH${>WC<yRj(A2S%(toH_ai3y0K1~IDSLGc z?s^=1wayVYqr08`(+1@JmsXM7FyoyFL)jr;OsB5*B9sTP5qMY?$T>KuPrN7lII}pW zAX*iBlLGJ%A%N6Zea2@D4UpK4185Y?n2n1hMa>^RoE|L4UGy_4FTk2N%ZEYk9eRxR zLCQ9oXs(3k|8eP%Cz@*%H5%U#olz>{zTr9^8-BdMcG?R&(iI3j`vi#_-mRA$c~x6! zRmydPxselxTA3psc)HbO9JUV%XX@`jPzAMUVsccaEZhNMpt(LQY^)SORNKAv6zgQp zkFMhW!{UibnYewqo}VXoGxFbG+froIYDGP1D?hxElZi^lE&`C&PE|hM^&PLJ3Oc)g z=tEPr8rHok%S+fK^`>N`ms;e~z8GMS6;s;jjC;3erV=t?3nH#B(ASpY%I$m1{Y1=3 z`BhL7WY(~Np>E8h%h{!4><1&&GK(;W3Miq(zR9|XT_kmRHT12M1c@T$y4=Lf_o2Y6 zZ-clvmNP#8XQr^*-8{UwEp-H*K?-2G#JVRaTu`liB=Oy)1>H2s z$x~?v-qg%r$d4V7Nf*Y0z!YjY^~l-tg=lJsu_on9V5l++BD!t?`eGOJ8bdYo6{PkR zlNWUtJYS7;LZAZKT2+MeufX`ZmmQiwZt~R z`y}}JgLP$)h~&E>Lx~JFKU1A2ce>v?j!lQBdx}_EW zOvg4g2g4g!#t4QQRS+fP)z#e6Ls(dP_J#QRh447I!(T^od>oMHP;rZ=N4SdIi@EDA3|+GFV>^V`y$1DJ5I|)SqHZkA7Bvd!I(B zX1l%|8F=a-5KJVBH5sKQg6Wf!*oW>UL8X9wr2Tqq%)fV5j^Sb5lR(a(i;?BQzJ1^` zne{TBP*J3<8WnF)3^qJ}jw$sATcJ|S zPPa-_a_RuhD})7@8ErZTP-eE_Em()*|4^pLtoq6NDrk#lG401vxrkAM;vQlk@H*bH z!ENc!k3i6PV5SY(1ti5a{KH-qxYLstJsJ@A29YnKJnU-AOeJJ1r6~9T@4?8jd7uPK zpzHr~jmT52ZfCIg_TI**Yj5HJ-ER|%L=Uw(jqtR`!DKwZ=mJw6_-;gh2i~fE>-DGu z-#ZqWKtzC-D-;a`e9bvWF$RlhuhrCRUyY`aWp5zcpa@ z=t90W)7H4EFW%U`bqfSp3@VD$esGff+y@(=g*J8Q(6s7@{4Lcs`GuI|R@$#&IX1CN zU;_)ePZQrqaE6msDE`euFb%{SmiMyYsrc&9DxP8lAPz%Q4NT37*0e>ZYP02Ez2fEm z^-!OJQ5~m)L74IO-r2=Ao-O~U((9YQxIq>H{WI^>pEVpwCbB0i^$}zeEwce0X?nW? z9uBDeuRaZKlK9PM78dqx#}c1txr=uwaz(w|5gaf~{kB;C0KRNis|Bk+!G6&&>}7pc zpfAH2{|*_JgN`sMWoJURHa}>`b2d~^;?Wfo5_EKDV&X4bebMxyN+ou`s6Ma&q;Ap; zFnZPStKSVD+D?;l#i%f{l|u9r&1&dH1mKN7CGT}xLynZ#NA0i)!BEqfe|J~Rq?wanXN#XPYfek{#=|U@-#UBzqfAkb_c0A7Bw(f z3|^ryWdEamSzK7Zvwj{Kmu=IZnot9tD;aJ>F(`0MTwLyI3Eh>+Vr={y*S`2y7-lm! zEe}n%*S$!eJ`d=29k(*Mx;=pKZT`7!GyUjRr64mD;QE_8C50Brm~eU02c0040i+5~Pj=VSd)sMo3W-R)8Yz@BmvT0ei$Uy^ zV)v1kurc{x`b`awLdJA>XN7qtZ>5A)RQR;kN!*`cx#5(Og7!8 z^3P4A3_@9~E5G`{*5Y7$vubi74@Tn>9XYGh-zD$VhE)M#YpKV)Jbt@s#IH4oEw}6d@Cl3k_rX@YH)5>gQ zE@pj9w!^XMmwfDj$&nwwAaJ{UTg4x5B9!hfA{v{!Xg_r_e=lI<*51OZ+b?c1o<3^% z{P_f#LcAm4w5J0EpH;K$HE9LuFyNB2_<0WD+lfE3QYGFE@ znmO?%7Z^DbSSz%2f|z%@_{OMkc4{}lHFIP7S> zOdX7D7g+(AoF|RIe+y%_+~6LiMfmy|?$@Cr_W!U&<^T|KPnx6rMa_^oq6`P*H&o0N zhL_1_3m5|7b>ZM38y6e4n^OV$;19V86}f%=<(#;P$@W6e$D7}&63lZGAHTimjCjWN zEzEM~iNutHJKDtjiD7h(f()Mwf6LB9bd5LrYsF?KH!$9wh~>6{p*&xis#x@313Ph< za1^6Hq{piOF+>voXmjPO6*SiibqX$(b0^xQCic@StVqpJZJFWs)Z(ZLac_W(bml-J z(d9T3;(`?7*22vONgAzR8=XJ77(jl9!My`_17Zs|y6`TQC5N<5l?Dj1NfG7SN9#Ln>C+Y>Omc1XIgj;)o?i#3LONljyi7 zq&KcJSG91YN}6*BiLT4|G{HGYUJ>oEE4mFP4p?f#yF5=X_$=pXtW5*jo#zuR{8Dky zAd<2nW>e}02*-AJMlOxN`ww~BC0S8v64zKZtz13}NiM>L{)4zY;I!+CCaK*#-M6za zb}YFv4`pWat8-8>5GgBGlj3*ZyO~$6KPiMxQM`v)_9Py^ofk@Ejq5i-_bn{xz47u< zYVM9#p*?Q55Z8CpsvbT>_9vu)A$Z%A%VT-ie{Ov~LE&O&FNmFMMD?+6KK`h(fOEU7vc!IGR}6n(o!uSz;L05_r*A z0RYu&j=xc_v52vJ0?4A(V>K*YyzQ!40P@TeKyL%Q?s}_nsT_mi#dDsWh=o6GzPD-D zGWc_jkXyjjvnB_>4_v_`cTDatqt)S@R<>!g6cnSL3dfLcuSx#=eaA3&q zigmU^ysLfX7`kpBJ|JTseG!Yj!>%c4&HAV1GOr{@8O3|oK+Q-niF0nMu)_~pMJc${F3Zx`hHmWY+4cXq{iw}02~`Z8ye zo0V>=%s4|`{NZ4F20nu7^kOo1lr>Fcxn7oE_H69m9?$iyH}MXfb46#v zSk1-Lf#iz5SpYLY%)g_2gKdTb&Fc{s{|RcCRd%Ghu#Ska>Y&E{AO=uL+xOWLnSDEt z<&zDA#ExRPW{|&aG%UJUg!Yj*ttqbgaJ2(232CL=_47Iny!h}wtL(jEDpWe<*A&tv zuVPKKMYZ%#06y;qE;Z!Ct*F0 z0uUXTWvAWCJ!}r^uz25#oE!}*5&r>uraN@b{sGH~)?sKxMjG`05qib_FA~D2UT3;g zz5e>p$dH1&f#oxw)wbMY;sTTM+hjOa=YS@gSMYD@N1oK{L8vnoLUMAisN(Crtr!4| zXk_WSrGQWEi)^QP=;zb5gDGuI?1O`p67BPwhMJmW`_=X&ttvlnFY_%Nrn=r@-KqL7`e({)^Kr@DzCtcJ6)s5oDQKpWdQoLntYwl{g7#Nobi&K0 zER}c$Y6;`F>=QFq4&8tJbcl2XvU`^7ImCu!qD>pjfhJo-E}yHY z*8NN8VW$XzRcGR(01TGe3xT)qz&Y^2w+yoZuyPHAjz)KgwvMW1`cMai;3Xu^s5hX1 zlMx-^;}_(<-MFuZfC3B&J6*$y=?#d6kf(wEy>Qej?GVf40nx>i@=97K<34!USGO*D zlHsPod9yaqS4xxb&Y!kPFyHae>+qDJbdRb3WBtHtkX^_??sw|FEiY}6>SRP2=!MPR zpcBo$XuQ>Gy^jgfta_mK9H=+yDe_>Sl57O^1p~j1#Y1#RE!nsOFp0f;Xdoo|IXscc z-k8)Y?1f!~1KH*A5nbILBQ@HYmd8h~|6Ntx3wqL`dZSX0xD@CZvVn(~FNZ>y8YH6VeM{US_1(m_>(3`G{l*0I(1DT@vH4=((=Qr4FN*4Sm=_g9Hw(0tj0)pk_(uz{42kZ=q%BvG zECrXxZ_*bKg7Aigrs3z#@PsEO2O1-TO%s{Mfh_;T(l5S3_yTIGHDh4XzXk)r#|@3# z*bS0^T#o62=)^hxW&$g!X7MXZONuona@WwxU5%?gvR=FJDYJ`mHMGjQiimC4z zSl>R6z>)-#+53kjaw2(zuf)*IF=h}u=+U{B{I^ovRj4}cP2sEJ25H` zz0<*Zj;P7d5EZ^JS+s+viC8^JH2<8*Ux4X(+NyJy(5dbh69nswjen#GJ4LqulBb0; zkMH}X=H0qp3=*|O5mNFT@j3$No>`A{F(rwB+*G}FJ9VYT)lreA5wffm?#d6a%m>!M zpr2k4>a9x#oCtzHps{7#607eIqr9MBHHWC`-9u#d77*UGYwgW8hjOHlwPI#%u1x!v_f^v;ddGEG&Rw6A+kuoRlNB#f!h&P=IZaJ;C|J^S>J3KE4av9G@*Fa*PWYdT?^-Tl! zZ7w)YJa;>ZO`ntUHuehNQkJy+2VXUGqxxZKt7+U!-u{U|9C40C~&&3Z4+LA9VlwMma2V8ac*X|b&-H7q{|yhZvwF14SAMg zhzbLHSp>gch)<{@HI1+-)=9K9Z;+YjaW5%}J80SA!@svwQ!ywu2xP01d#2LtNHg2* zg^_3ZT8mi5ntdvxCxJbQmuZ8*OCGIBs$5&M+=bOf*HH5F0-w`>mPn9Mkqzb7ndM=u zXn4V1I)8Gp%-O=a6*f|QrEkSktvyE2#9XoPjF{ccO-*W4n~v{xMtg<|*)|{31hMi6 z+8}MJ#w#WkwRNh5vXQhKdX=-|x>wx1g`H=8jjI5tq%wgC9&H%KkiK1P|AU*5z-Np8 zob1$6*woo%ol`rMNUemryBZx%iW$&yAO7xBv&pVXf5>`$Rg*ek@6~DUsMGgOY~%;m z-$?kL+6@s9zS`mAV&U_8A4k!6njBE9Opuyzv<^6yu++)-G09&~>8$d@{b$1R^B?KR zVt0FmP?cCRNP(VJ&eoz@qB?6#9T~9(Ndvm?;1KU9$-2#sv^7+fYX1Axj0~a1li9Ym z#B5d?V|lQeBsz2j%6t<7+yItS)b0ldP7aC%MhF~%IJ8zdy?N1P4NqeF5AC#G>`Xbr z;Fug=KNu_71ng|smNP!#kw;$2F%Y035R(tR^DLAnbtOUKCp-lKc-uCtuW6nF{JVWo z`Dx-W-)G3Yt)&%8UT+zV7Jx%YH>})!_6H2;Ow*pAN8+}8)Vk;WcUGlXZR@?G1#wcJ zv|;xKAj&Gp5zIlRvg_kN&-OXPtWC(<4%&iorTP5`dnkH9+ICavy}#_C>;uwkbuCaL zxl?}Gc_X&MBT^KUWq+8#EkUJ%Fx$;B{mf8k7xwrTh`&cR3C~t{9dDTMo^ABA%DsAr z!-L}*Ux}}|N1cc$jF)OQ#jIBO_z1M?nfi#`az3tLNS)wp$5D0pOn%5+yDu7+d*ee> zk?=?Vv;+7Htl{+3v1!TFOrG>so?qsHOGf37DqA6BFNRh)OzXx^SFN8mN!EqB=kIg_ zc2?xHNgr4(`Bd|_ZGk>Bxz+Hc+VMT@^^`nGAxev5yTx=;$H=nWc_YUv3m1-;=DFOi zl#uAEYmwOo!jVEJo-Q3_ABa-dnt3X~8>IVKgYt!SsGmkE1kuv$m{4&*R~PmTDcfV2 zefpC&>c&pnTWR4uBiojPnEca+g)`4PSd~iK2?XMB($yg|61xvK7W{Vo=R+1e$n}@) zB?iY(q91n(Xx7h$bOCF1)c81pDh(l;uRd%>@iHGP78?RN56Bk1y<3CXrY2sna@Ta? z50l5!zE+J^ebLlCdmq{aO^5||W&ebjQOC;fQDIVANrLj}f`@sMP4EDr$in~LJ3|7~ zyCn5?Ryf}i7tKo9Tm)$T7&R^Zut{%sYRmebjK1HF13&XTzV-M3itPbypV?u{x(@Tn z9L>UA9|A`BHdMhgb=L&uv>h9e$*{ewb3fynDxoc%hBa~&aeH^9C#)~vi;;cAbxO*< z6@^H=-jT;(r+46$Nzub;?T7!n2c3*m}d#<@C8}=b;P6*T;rbP zGl(glm6!AUe!p1lu#csbXaHw|Zx5qk;F1_11-YQnxI31~6tYtjb0Qo2It&tY)`D(8qaCFlVB(b*tQ5_dd4Jm)Ga-Kh|3VMC)6yoxB(wYZxjHGy10SJ3;<@`Vm=%#LjeI3XQ>=Hl~Kl^91 zF-G-bAcpO1;%T>+o{(~`vZXguRhz3FB=YqxtBB=(qD4>2a${I0tIFcO-kW2;5>|Qe zC}L+Hw=RmKkb36S$41Ynitum_$C5omO-@B6{#e68Max@4Yr0N0#mwC_W>!{GM2iqt z%_A%(@e*`aUEU8VBGPDTHanP>RPj6hwr>V6JFb9%=3BI@n2^x=q>%PV2=IEoJbn~Y zABX{`Qr;piCk6(F9S}90VBa4Jorof`VT}Hp4hyjE^@go;JR9K)K-9LG&sh7d=Jg9{ z063!M#PTCo%*IctL)ll2XVUL!{0hFCXs0xgoU~EenwkweU+C&QBOIk(kwoum;Ov$> zXTnnN%`V@iFDp`j-(O?aG81EfAb3`JvPxBO0xNct$k9>`d1wt7OnLQAgH`@NU_fPw z++P?r0T(MEp&-j?yOCbibfa|vTCceqb%mTB@Ll!VAN`O^nB`Rg;o=>AWGzBuUET0J zo%_J<&3^D}>cePiap6>Hl3o|pUl)>L0n%kPn_^xH=9CW1{nY*^aSdtuHuk@==9aX= zQu%-v0*0vfGPguv@Ew$Km@QaVK4uuujf`RCBIT60S}8h2i41cxg;n+2jbh%Ueoo=0 z@FgysVEg1tSoH%VTsSQnH1ryw4tDVQk)4F;hp$^AGpWIm(Jcz7Sr@BRYUj>I*Vm=r z;+<)$$hg^|P$~hGbatRuSs)rt^8u-R)Ov1-B=*=!iPZ{{7_Y8L=oGF5HoEae4#p4QO)f>|b|K`ElMt$FOU$sDM7vm?mCz@- zYOs|$B_$vGf+HxixoU6Mc5BTw#iXPas&tbJyY+3Y2NnQRM$@AwU3%$>Q%k5dvUU_; z_&J1(;P?vx=cJcTN)Ni>KFi9J6ewaoBmCv~{r$aohFSUD_XbK|4f;tY?Nw)$n@?cu|4F+mDvZRLstN~T zFtTJ32YdEoK^&Qjb52FNKy-;r_}gB0Y%PLhHh!~{`yJoQz=mgu9?8*LvSIUVNrPN5 z4=It_R!0Gr8ySTW5}3t)6@faz>&o2RsfLJBKqsV`$f?u;?*#C(y@lC=1HZ1QxDe^BEOhUOsZhp(D~0yA9cMV;XJDFi?d<;&eUD~Z;sTZS}f-TLd- z`$ggiZE>e12|M#3?_2Re6_a|qqg zN^o=@qE&9m{a1%=I1|kxdaHl$j9#I?pMdR@8!(Mg9sT|y5BDObc>d2KgdMfXvWIwTpIM%AOvD! z+pesT7VDkQyJ-)}SGAR%fWiL|AmUDI+*>m6G^zyFD91NOUZT1h10LN)qtWk;HF;np zz7D`LfQY$D|I~?!UfEWL@)o$2F6F|}P8l_7)djm-ne)0JjInk+5v%HKtV0q^L1S{7 zM3Q@>n#mFQG4*Oi6(TCS(Q*F;_*^R5khMYARk9^9cQqtxtlkVRk=<44y$LGbrk(;UO z)}ElNUbccK01`TIE?;#iRRX^@O%+oCfyRs4rezonnYM3x3o04N=QrP`@dGe!qiI96 z=5Ghah#=Ziwct#iIRnV!y^8G6bWEqa3ZYct)8zf=wSEmcFNh31VhmaH4!_qt3f$S3ZQ z_fw|=X5{=DQWJQ*c_=R-kiq^p;T zw>Ozrs)MnhurUs;T?+EM`K~DJdcFk9NQNU2QmPReq##l&-P4blRRLY&Xx}dH>r}N_ z%g)|h6^8c>?4)YPAXbK~@T}M&$(lg6w&^2M4{4r}qJncZFv68`&V>E+V}yssVTq}V zm+y0-*JN0Mp5%<+;6ot~CWNd=-)-#%aqD%&bcwkDO`(PD#j!y;~8Z!y=~<|3g68}XS+yG%ytI*xX%hPb^~;1>6HkWjS}22(9NSz zg-QVNRZEJ+lR?>^>s_me8PFB;v@SLmq4ArL3^1)X1%xNh6Ds|30WG?*y@nfERtiKd zhhM(*19IH0QI$+pl!X^oa9msmtn0)o0b6uF&n(zlu%O*r`#u}Tz?<$i?e@T<^6yu8 zdno}UqM21c#!dr7j?I95E6W-+ zo(dQ=O3yTp@5-2)AM~gjDqt+fcxfj8^#7^UzX@T_XNX>;g*kbB+v*)EDn@+*Ia`qm zXbgE6*zE3woGBETMM@>ii}-vny>Sk*7xGM;@wqj@XhzySZGj+s5u| ze%9z(d#dx9lyBK8Kdm#N5@B2zRRWRtTR)en*(1d!H$sZhN}4gcgR^A@Se*kjS%JkH z@WX+7B8;xP)U_U4tdV1lBV2?(KI1sX>tix9hpb2)HYuoe_6bKb0j8RN^YNA98af;D z1_I`vOA4>vPmFpREC1qM8YX$}j}drY3E<{$5W`M+Rsf5aJmNzutIyZ!%K+GHQ% z97O3Z3k>kw-u@e;-Lc35u9JQhqU!{s^3_MI3e|4T`lN1L@%l0{Az=ROIl!GC{!K_$kxy@0(U0|0GaL!eE#Mz9tZ5#BF8Yf0rpuHHl4y zYi}D(5bBbxAMHitfkDQI;hOSF&qOJJf}wn>S4OL13Wg+bO;E72`B#U*BS)vU&^05V zhNmJiRshpPl!rwU-Z!b23lTF?ZhlS(n@`YHj5lKbiW$AWReI3qwt!NHn3a5?EUU6E zhGVZ`!^#8{Pr^Dlf8dRn;LxyN$h;V)&!NRLHqM$>oMmtHRCyE$$|DMgN5hxEEbJ;F zl}h(De^@OAdZ;@e+bEdz&765`xZb&;!`z_l?Md6vr;2OkZ5CwT+_t6I zoG#Pjufdo@g!o??qM3>NX(IKvc_)ErR=}u4CR?p|2}eUAqg$n_3)G?~xeRKcyTz z4REBgi)udxi>4AWHtVQs*Q9q7NVzF1H~@yPu~dSc0*-@Odo z?4}v-Xb$vgSUkmVk83GskEjOU+$B;+rd;>rdBRQrN1x~b8vsjh@dH?s#vddKyJ=t% z4Mb-)Ty&|J#c0&kY;XhNCfw7WQ2<9kaBIz8;@OX$y@bc3ufj|xPrW9d-_QVdQ!cxi z#-3mVM_Xu$T_c&3$T9R>$y-uhU!?DC@kB=}air@gC-+o%Q-JFoY{<+jD=1Uoufs0> zT1DX(epqkob)U|Pc#<&S@^wF!O@)QJs&}lK~<3oZQ72}LXM;KX74#u&TV6KOwY2rlW-mb<2wI|E%+pw7{k*|^Njx&Elm`B;Y$Z>sq zD945mB!%}dg6wz~VT%7#3}7j5QUT>|(^w@dNQt177FW*vcBT*m;5c$*N-%!$`eer* zO&c{M38_&}04aA8fu+~gP~&sM5}{AsHP;|IqH;KImD&nLSb|DwM@*$=UpAx2z<{q> zO+;+;lLQ`ro4_jEuH=}m0c+jTrAh8nJD_JL1Sr%*_pUHiY61j$27 zM^h8S7_xWiTHk_x~ku>$jY=L;m*Mph#ltLpKUN{v{f zW=rlKbv63hJ5C>ci=;g^=U8oAa*rdFFa3A{d+u6*%F^9>0A{A0gOhz4lm zGDjDYqW*7bL$M6A%kks6vIZz_b?l9HXX*$dV}R*c0BM&Xu7C%ooi%|^Qe8uJZiMv>r2|h6?9ZS=OON-k|q5+NhXdr$VO(mYLy|PiNQ{2PIcQ@}su^ zw-iXzHyVn45cDj2>=e=-MR<<&7a9|)rc9^;7-bwzRk>^trmL>c4w9{{x(Vi$)XeA* zna$~Jxb!xUKi3*P`v2MWitT_L`-6%SSrwdor0S}17YXa%VieR8)la04$H2X$yYn2* z0<^+wFba@mkbipEHa{ptj6!WgmITQz%Bb{N%udSv7EvO-wVKp+BPZE*qZ26m;s}ZH zV>bUj43TV8x{pT|w7@X|k>3-!TkuzmmjcfR`@J>a-@-6a20~e%6ypd@s400Tnu6{DaS&=#d(Vk=b6~u9(B4V7PU^dZ-zmP+?%~LYLQo z|F}W(z9jg-*8&Vvc_x_&>>&CK%ZcU@jm8zY3&LZBtCw$n&5ia;MbQ@`fnv%;gCyv%USS64|Dz>nuy&l_S(U=3?mYVWk;i7eMJy`P;On`5 z8ukIc5o{Jx{(bK}oXWor>u`IiAT^S}bk|q9fbNA!lM>Pk%Z~OKC2`A7gU6yn2g*^z zCEI-Jg~!~S40z}EaH?b;rO4!yhb8raH+Ocmz~i}sDK`ri)uuO&Y$?J-Kqox;3SCuT zxot&23}l?gj**NSYR*KN1DA)n8q_PE&E$bjOsxl~hkooLl0)$p=(>B>@GN2N$mv4p z%*c?(R$hPGK)l-TYYu4{QyY7}%Woc`<jNr>19Y6*a_K!w%t`{OxBOC@>W`s!m=8=Xq~%q9sXZZX?>3pVCCT^6O+}59?r< zgZL6G<)rG2%?F*A8^XKjE-y`);q;}zEVLKl^4j#mj3#^28Jbk`%gE86$uKzdS7Bb> zhyvf_m}T(%w|Uo}b>;GAC=q5M9)}&=w9;+U(3Ufn`)GIQjR#-)%ua+D1ifdqg-%s$ zqosc5CC>t%UkxCo_kZ#PgAH~O}st- zr(cdHRhegy@oYb$o?Bv06r*Yt4-ToUzD5dl{4jhW^rm0?>>&1l{x6FgjDjTTTRsDO>^&hL?kjauSJ%ckJIKFA#3>gl)a!pZay8Db)0%P~lnWNl?2e-r7z{XL?m8`~+98`xvX=CiH$PfI%fE zx3Zj8F&biQE%9-|-kz;dI*g2W+3+THyS3_dn}M`5LjVp%@W z%*vY3RkzeZ+wXIKwScxlZNRVKC_qOFy$W9H$$DE|>?Md;`H1>FWuqpbJ8B^F|4gz? zjHU6I#O$R}#80K1_Z6-U>(if7T<-73{tWO~KD#FA%n|sJw!wi*P!xokk+-Z=V=&Wi z4L2?g#s*SE;`3ywK0_)lzql-EOi=P`(6Z0KXT7At2DDHS&Y*~23|vIv@O!Z8Nc~ru zJj_6v>zaE4j{=Q}3FU7st&98!^tveuN)f|J@XfPVNUSaO8s){&Dwu)G%L7QrD(YP< zqthUZ20Oap&c}yeFQ&rgZIABLw}Yq7?rP9hb|{GVJ?;HRB^di3l*`_ALYX(7h_HkV zR^Z%}&#hUmr${?it3*MU1GzNVn=sDjm_#bheFx{&aeE*y02Ot}$!%@^rZePZhpOS8 z)-)-&o1Aj(13`YgvTFizT;~2U&fY|q8G$f#n^nsjZBo(dfu8(nI5m?Wsa@wNf%hc(!0Pcvj32&Nbr|x*A6R5x1ItvYA*9UrUn%@V6`Yf^DvznRE zG0aC=)`N0ewkBl57g52qtxF)7d_p1q8Z4wT$OOz}wMK~hx$Os>-*K>y=W5l5`NlA+ zSeA4AbqI}hyOzCE@DkIpfFX94r3m-gVxmWAXM`OMV=YiFC!7amF)Q`et*T>o*kX^+ zH!$^8YK2Dv3>RF8!ZszUZLpF?lY2mvW?lJd!w0G z+A?rCa&T^jd4Q&;>Fm=PdOX~!)=mH`+XH@p;Lj8H#n?KL?*3~xt>0koDV;>^nQrd( z>$!LQx9Q~8P0nK!yAz|FDrJf*?v(Tx%W(LBK+R$Ul3^KR#R8bhJHHme8Uv!p__Rj1 zNYrc;VyNFEA3k#%d2=A_XlRZ|l#~cXQ|6ZOms=*Xx8cqa3!O@DZVKm)SbuPoZYCL+L%8YHV*7vNQh9^_~@fSy`1 zq*uJq-=z*&jGzH4QY1#vjaDFtq(>yPGlSw2X^Rr^IWu2W#$}A0ax1|%lksJ&p`&8Z=Zxt zqS;7u6lIav5p^JBVrn(MMX(5$Sa-c9Lbpr~m8s4IQpeojsO&qU2D$8^Cj1&wT$n^4 zv%Aq35*Hj-7%7E0v_J3lQ?X_QXI+0ftN4aTMVgwoMp^WXxsEdrk+WqCqsh%z3s|Hx z>O-iz2rXksaB0L705*nK!+UxtP8{kS1+Y?K{5hCPvP=zKXIo*4FR=3b_WdbqWA@_( zq{$U1Ye=kRZeSdIdcoeVW{e`Zrus(&77S~&UNNck*rx&V-d$u^6gH$*G+K*kdHh^9 ziKW&3byi21VDNs~>`Zn@6ez*))^*9tn1d_)`zo{51Ue#2_Y(AZ1i(ES!UCxF=3&^) z=BLaU-8((Pxn?+lw$$nds;Y`i4Y0{K-o=#Ue*Ys5!@>zKlJ+ke3{FM>gY4jR2D{*G zJ&xG8=3vbkeIEY2<^+PV~ZpxIve5e-s>@xq+; zaB&-}^X2}RM<`}%JfVD?V#;79B`OH3NIC9DnybdDEuO+}vEN1Os8t+#9S5fgg5hSi zk5zkvhqJ=Y9Nm`EU`JSrN=Em5ahDho9h>z|D*RvYD?DwD^~QTDS*-;vS3h_+^41t1 zh<)AbtcUmH!rf6VTgNPSFlRAIzkqD+z$VYsz3!Um9Ow$=u@Mw0IeQ~;~eJEAi@j|nN`+_m&J zG)F?@v`vZu3lvVv5^?;@{YdmeLmiqb^J_sr>yv5Q982=C`Q$`lP`2DRD1*smEQ##o zoVpaj5L*CnfYUt_ZU-n{8_cCPkt{@PlW91@N8Ox;alEs4xvx9EUW)i5KsYX5a|hsg zm%~|Dff+#_-jd}c4cj|bbu66QPO`bh3MeGrK& zSEEwx+$*GY()i$Muj87RU!qULu%_|(b35M2|)5oD3RJh_icW6MB z{KNoZ_nSQ)K#*)HkaXr=Yx(4*X>16$mp|H#G+kSz1z3w@v-)4l_IUtq*S8@TMaAaI zfddU?T(8A4dV(#^Y0%mdYglAhckFVpNS~Hoi zBX{`=(Tnh@FR6&0-XyrR=@ly(z-1$40?Sh|4WYPq+WmnarBxsEoqS3z`0?z|8cSn# z2*W0_B_iBjE~+q%qdi`5f+c-sUxSSHn}ML%69Z_$fwJkFhHLiShqv-_7Sl<)GlR$4 zSUWjC_#u2j->OStTxUrzvx0qpZ3Rq{8-@Hb`9V00DN(QRW#Wp&bPPmwxzHYpgwu>!Ka>3sD7pd;7^d?Z7!Fdlug8vu|l zSM_e<>Af)o`czn1HeqF)UMmvzmPX;9bnt|E_5PBNVLlo(AVP1Ri_ItIeZV@k$G}sw5YAqYS?80YZYRy`&4Ka^zHd zQI#2^6`P| zA2XW*C4;-T!OA35^jWc zDj*Vr`y0E*q3X6i8OYn+a7j<8=?~Mr?=^P4Z&Y4mt{K}qNlsKRf&TO&D)W(EyDRez zcA%$nFx~RuIdmP90^|?FPPNj;%4tS7BZ505Njv_aio*O3QqVZ6WhytQz*hRl0Lcwj z=$uuulF%kOX>#(5U}CDFbc{xk@R>rFIOYI>%{>C6A3p1>eh(*=O>N`4WLt+2RW?*q zyT@#CZuR*Ur6rw-m-7Jdb+;gK9*YHEN-el(A#jO8pyy?FztIG0y?ri4im z>l1i~`H}Tl_NT@JMU}68p$V@205dBW@NFP`sN1Xuw^w@kYMLn5p1vuFFr6PBU<@SS zv_2}2m>ZF?naW;%_UzN7VpuY09q#-o9wNenP&jcx>h@Z z*O=Zqqp>+?2Z+1aSQdxY;};oF`+`yc?;ou zH(ggNq<*cvo3uy>bpv4)$na7gE4Xqz343r(NguCjRx{y=R-76;d$>|6LF$*{w}Pcx zg?0l>7;=E0!>o-cat2RO)%h-Tt3;8{u;9Q{e**)rAJ2kJEXmR_D@^KGunLW3h zKBaJbY>JT5MKSrz@;au3gg3f7Td5lTK5X|F`=;-NZ2YeUfa_Cb2)}~gx}v{xQ2s6v zC3~x*aEzm$t=S3=-Brn=lMKHI5 zeOG-V0U?j5HT3+=N?=!pBS33wof92eEXIVIb*0cbcO6G)Xz`dkd>tE1^;LEhZX{c8 zkS&lQs3G1LGUgR8J@a-!LEY4Fr4@(JOrebhv80=&Z34}GNr8NR&@c*>Yco+?f@PM; z<~81B;aX$)u{zYW1Q`6x7nDDjpGe2m2dl!6JfsOP>++(H-mbN1obkp4)0 z#^z7yv6#sr4^7v_-HP%b!RxywuvwNk2>%ecbV18i>`rP6Byg1dkE4y+`FMx|Af7nv zmD2%q)!(={NmlZQJ1SDtX^-cC&Bt(uIS8Qs-(@9D*Sk|%rV5rq>4puN;Lfo+d3So&^!VY(H?3`eNTll`^`*#fPIf$h zzjdNg`_w@WS~0=!J=8GI)Vw_jh1GitVyk*uUuoaSK8v)>L#V5y(`+f34ceb^F^aSU zjnT#{%gmO4l-?1_@ZC7!ez_RGLo~2?;YvC~+=ZgNc6cGXsHhA$tC>7TWV}s1(>J^f zXXK;_q!O}QbMLYQylitAPPm8Zb3Nldk9~CbKWYBf(8wE;z z@^q;djasQ>EG3pQj=UGF;-~U=`C$hUqq4BEsgsS7Oc^xg4?Ls9i>+jrK=V7UmwYHv za>}8%3T%h?W>d#{^vPpSf7tv)sRc=sz~@4-e81l2vocG+MsmhkRDVqi&~umki{G6e zl3_O9=opf-zK&?FABD2{zcaSTKH=vZTH;9Bu2_){~<&qFD#92AE&HmS3iD-ShbTifec?$o#{~-K;&>>p|2Ta$H!brus z%;@+d6j%!q`i9Mr+tzWuC&03%MPp1fzB%XF>0Y6=c|&sV-53=R$w=(>E`M#W)M|U) z{Q&HA_}4uj7pH>u5;y_48W^&}m)+4u+gL*2BYV4bs8G!*W&c0cM1zHf#bhZJvu$tr zsXyXbb&!fS+4(WCr_tM-opY*>whb`+l4fIz+lCxba-R%h?~3a%HrRPFp^qtYaP*y5 z1RinK=^=^B0+YMDDv4P-e&_#k${ar$tu#o^bCd?o(z`Y2R6Of5Ov>>{ti9!WPEF%T zMgfngA{pt0plVQI*eHON-#p!H?sCwPl=D<2cF`-SHHa}FoF1XE2Hx~`Js9l)a6h^8 zkc7MuX=tHl_SG5i?SzlT=Rj$eCGAk57Gpt&h0DzgKM3pXmC??@8#^Za**tESzG!#5 z1#=neLhk8LKd1Z)0KVNum^PTo^2!7v3!U`b@XLa0>sTUDNF;#>GxJABb8j`T99a3POAuYl@4_HVv9hy&_j{mAXo zjL$K(t9Tkr*HGa1YBA8_X?}^yS{in_^AZkvs6yA*F8>tKuUP= z#V0xFSU2o^hb$^ere@tR=JE*Ke3uNlgNT-w3eht8!M)i625KOJ!6osoG^|^$JSOro zZfUHyKCu-g&ItliOqLT<&jaHXd0PuBA98{*nP>kOfSLX-C-Y`%%=0>NBUf}&=Sv1A zM~}Q#{RS^WK@@L;UPPxstLBGq9Y0uxEGW6mItLAyqNaw>Xs#Rt*Kwn@+4rGgDr}2$ z(tkPO5thioS}L+9xR&Ikf}Zh}TgCl0sr7~=_bU#?b@Hli_F%bi@jRz|-XqZ#sduC| zM^)W4z8C&%9Hk5;Lu=(I3MZy*_6Bp3ba9tOai|52ki zDT=0@6$=o;CO`|GfFKmStIoyI^MY?ahuP0CAUz^-Ttd3+pi`Ebbc1Xh7U5=_O+DOl zehIN2GGJkT{gcXXTH^Mfuf!i2LpX>i53{(Kc+tKX2?}`LuuDM|E6P7k%w<}l6aPO= zqz|(7oF|ORPbxov1YP;&ZBcCjINX5#5g=dyxcOLaVrCSEPE9;Z&PFyEC#)c5fNB9{ zv%!0si;Omkl71JH1s$Q;o4iy1*9mf|o3PH5qO7(Yp}R84F^ag!lxjQZ7gASCU_KAE zR>96-^NbT4a5*F#6ybRmrCww%5!;x#mO(&vai{xie%~w9Wg9aX%F2|bUz^A3W^@Qm zw6uzER&mY`l(6%)YGX7boW0#LX+^K6D=g-0Dd2v`Fz%(A<`|N;(3Mwe;IEy0(*-v` zsB=qGj@jM6oPtcw+O~GUS$6-R8-=vfoReUF1X2y^63Ke-%iZw*q*(A3)6(-pmX=G_ zpEU1DZG(Yyxa~hs-$9T~%`@nE@$`e{)|C_K?VB;49N=$dV&`lqP#6>NS}lpyPgm(n z*Nxfz%flM*)35D;pd$F!UNxEnO#9WA1Ib|g{Ai1(;Rtyg~ zEZb(Jp9GufrN?&8SEQ3%f^JOjj(x`BxjrbBDG;}TNBUYR#smo>ok4vV6<9@)@Rge1L57+c+Bf4x{f zQYyfEvDcxIuB{(<4r9cYbUaA#A&{@%y$*W6<=_!5EWU>-U#=4_>w==Tks@^QIQ^1+ zz<8K#G8&>;zx3Xuox~_8TE+>gj!@l-ry$bJ-ct2o|EN^{)8y6)<-FL?Iphx6)__SZ zsSFs?*=-@DVn{px6l5Fn2Nlh((S2_12Kr9S>@aPxC#@en`l^+Yu*VTNb;U;~>0^x6 zwx+Jr`*CUekUC%SC6wEg<^@_G{xII7xug^Dsxde|d0kwHq=(pI)MOMY%?S9pkjXW1 z#i}!dN^rp|dii#&rYDFT=lc$PWcwn>Fp;|A!ojH6C+nZid*+jSEb(2vIgRlKSL)S07f@bVS!l!*7;*HxsxGo~j@fT3Z9^>mV;w6Yl zzf%*Ivpx#+ngoC0);eSF6w08H`3I4$Y2W*Z2g4NYxA&}QTq|Hnj(pq?p6-A3(Tk$T zTCF6zVRmVq^UOieyThws)(aU3TEWBuuXAM%ABA@TslEaD)%e_1F?eu7)owB1X=@KV zLyOVN0y14q)q?{DsDkj$Wo9p+pBEhui{j60rM~$l;N`Y6{TOIV#Nv+}$A+5Sr2`qL zGOa}e7JjChP+D1l#hMI#0>Q&J!ZUsrR-`WVt1f*K>DXLUKWY^VLs-C$bHoCCqij`@ zIIOC_!AWZy|9(?CF#6EwhnskmqR%R+ba`&*35Y)+mYwiGmz~wfO}sSNaqu{2;-!o* zG`Gub3lNd&Fa2&+4T6#SF$&FBqf~y*X4ENXyp6hyh$2PJ#(Mg^K8e@5a=1w%@4R`q zH*u>49YP|$`{RmD_VBhbTj(rQ#(K-18a9oUSrPCk|BE-z@nN_;cnm$E)U&+OWNPKU#*8t_u^n4eOcB z9gl4H`giKe>2{P8(QRlfyL<_g^Zl3;S1&MNiJlk#@0pepcZ}RvR{Gi9!~7F#OKD|! zRQlMIn>s;lBcb+_>LA&18({x#)I%EizAFUEK7Qi9pgY#;75p>xnEr*_MY@f{(c1oO z_=n7i{jgEIwlj$jDO`3%cZYrn4MPT7H(=Cbl|-M1J5}xvsOv4dhcq3x7CSVW8XLCH z34eO#u}JJ<7^_SSF5c91NZ-~@V6-Cuz+q08sr(K2iV{v^NjML3-sLTXo67l;wkRFk z*Y?F9M|<31U4mQMzu;I!6|XZMq>9PDZ+oo{rP(_5*pbbkZr9eS6c|o!nD~&ey#t*y z2LG>2`s<>$#7z}t`+9aGj@D8_i_9&mj(av7VEKhvL6T04heR0oGj}#TT?=Q|K&MQX zO&I!}E(Q8_pqnJaaMqwUQVeP!Zs2RY9flXjSN$%0qP8xu@~Hd$h$*-_61p@1Ie_mv zne*h{1|n9d`?@70QO-7W6C1j45CV8TB)-5`F(S-{$rIpE{4;7;kTVJ&r_(Ji-KAo| z;O+?$3aPW!Rtu<^{fgnTSLKtZ@qJJq?`MQa^tcM#+9bU`FTgllC4x#X6b$Lb#{!v_ zb(iEn>rv7E{X$6g03@2h2RNz6`owtZmAhNa&Qs?ij~}DoY!eu%7k4x;iccNppzsW3 zir)1GA7O6A9=S;opC!Rl9F>e+G0kT%{A?drW1 zhem(Q-MfSs{PUT5a@mFJt>XJBFy}?J3`wUt{0uF*>O?`f#$aL}=isSHeCy`vFbvF8 zd_ntl+hz|5!G`^x9{kq5g>XD+!L$pviT@9952Oq zul>Zit32JwkjGD2upfK6b002jWzw9wmYxoiz-;+n2;^XVd3LvAn;2ggzrb9$<&i>K z1y8C?xqnGk4=a!l_P07MGzKhONt}^i7ipBXT3S*$kEs3T1G-*!?#Mz+DOhp2ieM_k zF!0M_QCgjQi@BO9fwXA0jt3wnKulWR_WZV=(xE}_yjv*t{XT>Sf) z{SKSHV8wxD3;Lt&Z&=sM6vP(wd}4q$C9AUKc+}Vi4TN|U^o=!n0#t`X>Fud<0|E)7 zj1N1`K%FmKN*ZLN%3)P$Q;~-%*}IUMaem$6FSLD~z`1DC((&k@$J>F-&x$){73WZ5 zHqEqY7fG0@C0A7sW_lh`eMyJ((RI?3&uNDA!s`NE-pM9KKo?wdC}xy2n ztT-1G%np31;UTU&_1GH{9h0|+9#;NnkVBR7WBqB3fadM4Qa0s{3LMI)zkB^4QEBC5 zbWJh;W>1|BB6hqguTB@rta7?OJ(=}KwM)v`)P$Lrrq;l#e0H&jJ2K+Wv0g6eAU3CM z3(Nr5qaIHT5(;eU0(L7vSu^k9p(q^#T?oOfPwKKUF&Aw=pC_+sU^)Tj535%4-Dt?1 zbTbNDhRowA2M{eo!HT;k;E(#CD>ZsET!FPMJ5bQ?uq1njqa-GMWzs+e9X+AbO9fuB z8jo4BE)>K>J}LN?!!RXzM=O93?6K21XTTq5-;PrvVAZzw+R?ee{YS=uKt+uz&~~L! zyGwHbp7zveCvqeZV6eGKA02~L<{M~kF9nu*S+lk$k7b9Lu2h)Tc!H)EpO*`rZN&}HUm?4P(yFjomh`!+2**j>M5*@{q`^A<7y-IXS zV4r5mv4*5fnAv3FcW| zXti`mhOE7rZg!`t++aBH^n_NEM)sTm(|cL8juotzEXB=bn@f=6A$H5E-4pAv$qiAR z8@k}&3|$PU!DUT z){LVZ!V7u1MOKRkD7)*5cYVMEY?%uW$oD@9ZSiUutH2*eg6}IL25%|gi%?jA-N!Yk zC8!_P-C4y3+k;{boR1^KY_>XG-*v1Yr^~X2FkK#`WXRIi7BqVhW5~Iz;0#~jh2z$( zd4Kc`ci!}YLRd^;lC{O1cj!cY@hjRJ@~rdj>vpJu-RjQ4QhVOa?mB}n>uDTCzgTlW z{EY3LPyz;Pwt|LQpDA5J^C>nwSwz=5yx z$u|IZWNc}U4s?xu&(VvilhB$>U@Ql%Q$bYBq#cO9ysXjAHgLg6Y!rXMBxiZ&r?d%4?Zm2uCJlVz!{4CRR8#~7bgu~|1#8-M{&LFYvDt!!VmcV z^z|QrSY&WFg7IUXt%7}cMgYU2eF&tQq#;c@Be8v`5pY+c+5Y|(krk;zdCAb#K6b4_ zisl1Rh`_MoL-HtdhMWN1t#}@exeIjTgbu2`;>gJ z1VZP=_yJ)>t})6G>=5xu5<`ziW*6@1u0&#TV-4+(X0CHOR?Xrt|KB-kN}jbfe{r^+ zP9FFSqS6E#f8m`w;6r#4Oc<{z#{`)mVl)gSj8)Mj7tPw(o6p=NNln&FqXX*%ueM!% zi_QAi?Xtz0qZy`%q|Dj-qi`oO(ui?V+;hATqwbFtq12my?J;)}(L@?ClmOB3e2 z;hs}sajSe!MM!#ov(FPgCb^`;t;9!I6h1xCI+;;clM}Ba%~QqqBENXhQ_z)(ItX2) zIc?Crt*04N#a;l7zEYQj?Gx_GkL`@XNvxI&(kf!U_($k6+&>wUN9}CTbm2DUCb<30 z*IGHe#!rHd4DZ??EteHOT1w6COC~qIP*f}Ac^bB)8%3X&LgAUFdVcKS=_hLz(-;(C zCk!BN;)!DS+uy{@hbl<02;13qa&ybX3*(7t%;l&4HIX+gZo1HkZgk{Ai>sw6V}0f@ z5jILs(plQXS%n)XBIhF^9CWzdbQaqyAzO+j?`DV-?z4OPRt>uZH*2!ZHz`~C5gm*57bdklIG~}i#5?h(2j6xHO}~j zTkh(6if0wA2wX$N9;!6}wf87WH+C>ouv_R^b_#6Sdl^thum&9#I)gY)Y7>S*^nY{= z6Xj>QCbyX9gYMZo)|=vEDq%&1t1e(s{ z@)LVmvs&I+^{9UOf|JP9*8k+^@6z#zU8F@6D1a=mJ?o5#V_YTLQ1HrBG$N^>ZG&rL zqzE8hJY5cEhS1QyLlw@UYBx}R_&(Hyi2;r&Zp?LWA^1B$-s{+|gK`!mA_3Qsm!~)0 z9zZOPGa$I)_`str%mZZ;s(mxFY)Psy%z_T?;6H!;dXDe{4YkRQ)}qyGQ*8iM@uuG9 zyQiR$`aOP%AVV@l+sQbmhdFqG?jHriIWM|IkA}i0Vbi^kRhIq#7Ea!fTrxcCkcjJ* zN{%0_H|F62TMl)3XIHjBV~VR7>53*P1zD(GfYovm4J1N9zS6$VA?f&y2ru%Ezr2wS zSiG_^pdXKmZqHr6okKbzJ3$JPeQ;hU5MiJ(;F?kLVY$AFDg@g)U}lPdEV z+6Hatogov&SV0p7*5vh{GsAa*DZi z%&=Ehq{9GHfPDuqcp(s{C#4!5T}xv4Mcc}=Apvov63W7ph{U^QcU+0z9Ad0ER;Gu= zqgYzOQjSh9ad4(Pa6#zlo7e158nTK#E?Mf)Ve<{7oG~yY*>aNAz_ z7$Oy>nS+p>AO;9p;2tkA;9I004y8OJ{Oo1obVq2 z%3fE1ld}iEc0$ykbh3Wz!OK={eb^48m?t6jZ!Gup7?Xx5KVEDqOM2(#i2JEE7A=+? z(5M`xITAjEepxCFI`+!VWEA+G^;qC+^F|8pUTcDUP`b?61dy8&l07P^LPu@RCd|%) z4Cvohrn#r$Oslb>x9_lnNI!JApIElUl{>*0V)c79>5s=2Rf^ONjuSTZWb$z8k@J^u zUr*9L#EmM=OhDevG#U5X^vLJqx5Nl(g|Re_#|#`VO`||mSAveZPy83sn9or8{Zt#L zq?q!|V<%K)IJcn>@+h3ftetKkG6i$lTewMrs-Cvl(|X{8yos#Jc#yc5@k?2!s)VCj z_Mkx@1s;8YcMZ&jSivfV!Z)OQ@bE>5GyMx-5oi(uP3?n}|;WR{h zwPJQK*p`*I7|Er)S`H^!Zh|KHABZ`$Pyt@$L=LOe{JbQ>HsE)^=61PBU_jQIdM@Bm zSvmTEURuvryeq(II4J#ItJkz@K%GR*KG~O*J;LbGFR)CTR)OIe3on&HzQd~8Tm>k% zhpnuE&a~!R&E;D9vUbAz@)P&$o}gr{#-}$2O1eQOr4$TiB1vv3g@p%9^P%y&YW7=* z-go2Ip*1`G_@KjE{OW~cltvN^*@o0tv5@!!etznPm88r$<`-Ef>r`44Vf*k=oy+z#Rf~~aJdoUvuo&M<#3#D zGH=Z9&F`@}a20)a{Z12>gz=u&>kn$=;{;b0qh(DcE(oU>Z8yLG&lBnS1wXr`z74u5 zLCFXTrbv*#j*O2C$aM6BtQyX5|8Sf|Mk9F44J&j(x2|zp=+kxUSFOcU9-;4~sC|3s z0g6UxC(YDI&>}+^hAneYJL3VS&!u2TH+o<~{L4zbrK1TBTtb8d4>HG6qG=igJuP$ zkeb9d^4WL_T|4||Dp_7Y(ZDBl&Va#54P`wf`PBu0`Qq?C+#Po^hR~!X&{DGE>Usx{ zV+JAE=zueYiboI@`s~fG5i%_RR#N{Kft|}lw>m*~H)EDvoF$*nkH7d*)RH5`UC;tM zy|&|{^D2FkfA85%6o#?@ZN}^(On7tCzRGv8ev+Upq2=_6@bGBOh%@DAKL|9!9t)1^ zuI^3TPn{d3)^jm(21eeeP#qZm-FN&%aP@jtQ|O`7*xkO)?#=6>_IDu_=IJk)I&Q!| zDV>J5zo_8}!^{Cq(uq?3Q1sf<)U-cB}O>t*4%0ln}Gu6V8g>j&Bon0pF7+ZauhQ_aS;Esu$z zC+W7OUT{6~8^ISf&WJZTFskWm3O#&${j2#2^fjW3g*Ex&Y{Me3>FZ(DEpe|5Mt5Mk z>*1FRj)wH&hssE@sH^Q~_kRcQcRZT7vFyd6Fo6pLMmkimY++|b@88gwH78?Rv?>cQ zcxI=4XlwdC*h`KTdWDDN*4DALx*RZ##i#@FxrR`ldY6An6Ve4hN)AS}nMS?S!J zSXK*3xrCoX^>e9zepHJHfNPnoq1-*F=)>f?*`9A-4;n>{`6b0EKBbdBLdkHj>cB5} zj3EDZLJWBBC>@(-!Wz3dGGOqQD056R@5HjJ6N=vbD>bI2Iy^3#2aOtVhHgEqO5OmB zEpf1@*{1`iZR0ry;f0Q)+0e*cDd1j)aLeGqKK(wkiyoc*I4`xRfvfoX`*uw$YQ=s= zx@5)bp)}Vl8!1~D_|0lFC@;yWbp^!WP$Cj%@0_9zX0}`(Zc$5Tm zE;FZUl|?pgDP+Y2B0j%GF#EFxc1(LgYoDtp$%jVv zJ7>$|dN%J4bDI+KuCXz*&S!E2tvrn)lZlqKq2;D@EAetKAcTri;_(DbXvrp=wdv;Q zp6qGCfXg=}e+HpQh5|J1PmkItp7(d8&}kxhcrPypOP&hB%{4^{?LAw}>97we6>20I zP^efpkA2-c8XC`c@x|+Xs4ghX!52_H5~xH8)u2g>BsH}^Rf3|!QIuabmrngdW9j8h zFv~el=iEGe?dEOWPB0Q|Z;D*7wVHGgO#p4~D#o%7lc2UrfQ&L%Y!xOph8Sjpaq<)w zI(S3gZ|d98o_E6{ovrnguTCnqew4ufDZ8`I&(QIR<~8*Um+$e*-B?3)cd^je`)q0) z=Ay&IkkAYy(Y6CfqU3otwG;Q_$V9QHU~F|$1bo;hpGh3Pg;+m%2i8sc@n`x-^+r_-`IVOBwQx4+1CyPCp=w;1BA(0K_3ptQa5mHzQ+HxE?0}ZYMM>DulfOmNqD8aKK z|52aIgM0$A{B8?oV2#<&wQzSuEUI+Kk9m6DSDg`SlK2HXZSLc z??A=DsB%h(o&%{H*_%_X150YMWKedw+~88d5o)a=Bz7LFh8>hr0zCrOMkIO&S*Fvn zq|eSE(GW{GgjId@gokSSJo5U+31lbuENLHB*XM0xR%5iUqc}9`zOOUs$(do1#!bib5ze_P}Nmx1v!^>Xw2^{ zCieXz_1MpDb6k7&lhd9dX6$UeP?+EQR2?H1n+YYND5t}&knA?foUijqEiGi&$?|Vn zJw?y@A*d(qG);q#yodKn{2kv;+>a(9u_DIgK4VHvr5eA2Gvr$gO*Eg=5u zD{~(gQd*epVAkH5KP_aznp~Yt^V(<~k5S2NBfkBU>cdR6Iyo)pmR#CD-Z$r?1s_1q z{)evN0K&0eHp+az#d#*C3KHW;j>XK%aJ$YjzL2BX0*K$j-B+67+7+v2OPt%Ofd|1v z^tDBZU9t~-t+#ax1s5ALLfC&a@(Akgq>No7Op3qQgDq&-?kSkEVHBe#P=7Mw93n+;ETAc_)v;ODTFVACgH?^bTu(`PZ zvN=A#HPX@OioZ+^>(-dkZ>Cw->710%0v)1-1C4)yo-*t7F*sx;4lq<{ z%<_>i$HM@3xfq~@LueY5n-A}osU@S~`9;#R<)t3WP*_vfwGr`zm-4&BW5ydrg9)+R58l=8{;<~KIg{_^U$dL>) zkPT4Czv-ybR4HA8$#?~CZHXAKyCc|;wPaVDzp!erSNu1@0X1R=+BNZEAo+rWc4OHRYON*V5ybGeyn?{xv(~-c`Cz+mw~fe)D#dBE8jT; zj>)94T|H{f+%ebag0pt>olp&Cw_cuSXkQw1Bh?^bI_s|8gM3;B3b6H9WW$5YM(gB4 zCD=RlwlDH_-g=q0x)vpWp+4*Fyk3bR?N@xU?sQvSQFAR*yeTA*CIHn!u!!#1q7Xx~ zy1F$Mye0*_MIAP(ONd7}M1T4cgt0XW4}D<+7HFA6A^r$!_etg}wdO_0!wU$|c6|<* z_}3ev&T$EGr{Em?kkDxiamFMjl!hB_lm$^l3e4=s+&5HyzrX2FaSh?Re=?=SE$WQ} z+L+Zc1d9KTsb-mYrQixQ#+XTV+54b0Cz(H3D6P?X_Abh6i7$V(4tmJuEV)>ikp>CC zN+pl=>5s)PIr@j5h6V7z0;^@vsbm&FH(Kx9xEXK!-H5^XV?SP>;*}Q?Hq;FD$A?5~ zfUh>Vc)4p@;io9*KTr)xljYww--<`%5!A9H06|t(BmFaebo{piBr9q1a@aD4i%)iz zO*|QbGP`WWJM}Yk)*D@=T8=mcpp#IQtA^(}*3d1nDad1$2QrgG7#LO37PCMBc;l9i zFia_#6z!n{sB!plBGzs9ArFfLYa|xM_eU3`yI~O16rT0?Elor)tA4F0CuQI-rE3LV zi)}4KAwm>j7VQxgjUaO_il)UCl_;xG^*~TSLX1j=luhg%qN3IC5Lv%oA4Wp#p@`xM zHj^U83xG_)=O7BL*h=GKUTRtRvE-OLWjhHLZC2{*XHl>K@Pb$}U$>_`#f|-qolrH) zTY6vznP#W~WV>$Xg#LhTXS3Z$AR8cc+RmC_H54-5#V=c^L)K2DUH5~_{+Xkr)I&u8 zV@)f70o7tmxz?#;YxXGNoPtg}#K(?_7)7B7NEHHa@Aeq2W>h^YQII{?esx(meO|D* zT+n*q^QgJ{XhKaS=M%2kuWSxxPhP}U!jz>J2#Ds@m#F?R0R2S6*Lux!uJg4rNsoq@ zJuV?1cYY<$7GO#=3ckIL$~cpCaj+QnkF5j=nAg#o3NiRthv?(%G>&R&mWG5(x)GkKdO7dI+wZd()`**D+-BEk(S{8-Yv} z!Ge8As2B0D1OS<9tTzY`6VAuI1*u^s)_CTe&kJ6op=rBD7}6w zN(xkFtDUHLr}h&s*rX`MB#)MKIc>BZ176nTN9II13N0wa&EPSsx<6;jKSLTP>$ zUK;XVgVk#uQD+Q9;hDbD2;G)o~AT_(C^g!CJn0S9xK$yk-RMtJI$*SBZf)i`~(UXK7ZPN{4RA+O9$HYhOw-d^T;Z zMP2Sa6KO+r;{BlOoqEJ?nXAii+r6Gaj;|P z9Hoi!#h*dO!76vt!?8?ef;*l$4W?Glg8(clEH8D=ab=~-M1F$FQlfYKQIiBPR#cO4LZ6T^E8FX(1ukBO_q{3{8Db&(1A}uIAEnp(_K!t;f=M&RCz8tLO_{Q;Rz93U@Lo2~Zj24~h9$u+ogX zB6D+5ehr%zyC4Lqds1c~fEyZd8p8Ia7X1u-`}j^>4ArU?620ys~) zpNHsHTEiAjlveFO&>1$*vFtZ(wOENEzJu75FD7Wwn9&JG#M1Du0X=obfv%?4mV8GF zMdmExLM_oaPTlKLys;cdBovx9}Pi;LpXA%CkiBz;nwN9&KOdjIOLmZZRJ0-AZEy>6< z)Km^g2bVo%bW|M(b+ESB04U`p7$F_Am`h7$k<+cC_2U+-t{h_9sBbDA>KC~hY|n&3 z*#`x3t^=3!wSn}62!bE`|m8*gC@mDVWa?5Y=&8H=;*Lz}Gs`;@?gG#z99qs1fKQ8<>m2Tm|{ih0bTNen} zb;fnx@H8CRU>V@p#F^{N1WGJNt$-+Nl196GFwkB;X!NQ0zi7;FOK70uS;!|JZPErt zWn@)*DiX7yO$h)0NQH^nc}6SA5&KgO2qN{_%g|Lx47Q;}2e_1YmRiESi+HQlBQN0i zOqe`Qq)xFdv*F&yks~KgUG0OiPPhK6$E~CLS@%49y!|cOsKqG>#-(Y%MQ4HVmVRXV zPt!QrxTwbxBkW|2y*}Tf>#Mz8RgO**y>4x{eML3+5GF;cgaGuw9}|2AvyB??*Wk7H zgN0@u3W1y5v{Evzo;DnfbVgn|N0Dq}r*QpoLDCeyXI(w?R-HTXBY2Rg|15(jWbPg= zn-1_H*k>$aZU)!N$g$Y-A?N+SG#lDr=J=9lkrh815T`TUO`WS*{2Y$(+Ff@?ZK95( zig4Z9y`SEtVWk17Va;d*{Vdtk;J3f=_6Eyr`|7le;dtY|H5D_N6 zlVbRDp;e&VAU?!#EW$nrKdi6HfoFUvH(qQ7+Q2MM+6kHigwI8wCHmOh9I%a{X?l&? zth^6F#?r4KfZh(wwZ0xF&Cio>Mr2XEst=}L;m7ZsIPbuT(_SyZ5$RI`$m{14q zC7jOw9k&v}UWLk+a-cJHlr_kE&io%#E)+TdWpmAgS;H-k)oUZb9PAi=az|OrwET&G zO&_(>&XMTJqu8)1pSeUQCMHw*A3Tvz%gp>+75Z=%%aahkzYPd(9l%uiUBcfjzbWLz zp)p7Z+OEV0{+~;^@DU}&!chHNX`MX3Djivyc%cEigY zit-kZtpy{R#V?>9^pKR*?;KQxoc~yBXidQ`eZC9G4wJ=GBwaxG_^!Q#C+z^$C9es- zq`vjUPvyr4an_oN_vD&mkeIRZ8lpsCl1lAD!nL8N*)0Qw*Z^WmO(IAyB_sb?77r1Y za%ZiMR*Zg!>QMG2O96@iwHD zoUWHgE!v2m-vd~ z#%CPogRUUb)c%9RXs=^I1ht^da{`C2Y0C3JiM~JwJgR$f5z!Bwa)qEF1Gy~wLSQN{Rdd&Z&nl;*+*gpvARS8xAOLEzR-6n={ehji|yOsZ(DWei~= zU3ev76uchkaUcH5zw^SukdkOQSToA6%E~5m^Jl(f zSL4fooJ{p-x`G*k1hHPPN|6l<(`nJT1eMYfIYh?iH?>4;IT@4X57MOfwse=v(2Za} zI+AyO9)a{z?LA^cQYhd3w|VwCR#4`4yte+Y`@}tR5GRH|yxYLbOolmMb9zarsf~H0 zft@a*3w%JqP#7PGYI)7}MCy z0kAU#g8c8v=l48>w;b3;tPRk}WQM-KJ@#TWxg8AO>cmHqX81mshqHmbKZ^hj_Y$(& z7PrOxy4VOY0Bh|p=tglWvR2GRiu@0%yKMgWmTo4kEeDbNSU}Rz_1<>IHQ_@ogsI<% z5H!Z;WxIa6-0oZpn9XevO5ngw<043JpXuF>)8lO7qp&0OAM17RB%v3Ny`NJn{X>pop3medhO6l@oX4fHD`3h-QYbrpERh4+#{ryG3dnET;DwAYs!!$yNuTe zpRN%ibHpQq=d$5IIQ*Z)X!C1uW92HD$Eep#iZSIhU|ujrRJ&F=BUK=5_HmmW9ZXmB zLvNncU6rvHcO%>_SPIuIgs5TbEs@Nt7vo#yw(bMaPX}y`_8AebtTHTqJ{G~?`_(^@ ztZ=-lRR-(L?K@u)B2ZCulud3f#!!tqf7kO>_pQ5qnzk%|(AH_Ur3=tg_ZbgujjpLazoEd`*ug-W>FYrSJyQENaUKG%= z+g??+&s5g1Vj)c~_#rR=v_Ev70lI3#@~6}LhnzsEPpKtxdGSHDsf7|Fi3E!)bwe%A z3-`DC3IGXw)Q>T$6Sflkx-EUzI6&7Z>KWz@EaRs85`hHh#51IL(@0=>TjK5}(EbNK ze9_U`8Eu|I!$vl=nUfbN-c|CQYvS<8m0qL>*2x*BAbk*mqNM;UoG%s04RPU|a^srr zSX}nuP@n%@3j_#O9-zGH7Anko?mS_I1)yQs0kkFNe)r$h`cfF(N3X4+ARXOe2Q z=PCSxy1eTLlGME}ifIA6Qk`y5VQL<4!hF$5G9|vnDr9VZKfq$c5>&0JF9ti=Y;b59yY^|0|C zSmn}myTvJ32|ywHRF+CucZN6Nz3G249q0(4CW*q7L6nZes`ZkxR9&Q2)KHP2>dcfk zb?+szGLHKV#)o-$HRb)@dVi*2FPx2nV9Y&UX2UQPM;gU8I6%Q@`EyGI&ug zmJQAIM`CDVK9~lk!Ju`5(pJ7YCXDkkCLyMrTObDza*LeMO9Jo5i4;$0b%RaZ!aH!i z;7$EdqRzvH7#i40OKr4>Nm{CCLInMTOU){y=yc9+M$a)-E#d3gFGxshX;s}Uc~T~C zUOt3PEBpv~(;ddOhA%i09qb+`7n`_Yu<^oSPx^llm7)NofD;G3q{gZ%-9zVh7VF>b zq7~terI?P87RmM{KfAH4p}5{zZKJgpX;H#0!K~QXlBh})5g!6tGF3jm-d5$eDg0Ml?}et08Aecn8*|*v=Akv zcYnTcmBs6iwr$*g<^{n1rR1y(j)67I7t}vvR3Z3@%u-q2fra6BpckaizjvX2$$3EK zDG2d3?m8bHv5aJ`ZHe0(xlyF z?7EegDZ9QWJ!0szF63l2C*H0VzfMvyJTiF(j^emWq{N*Ep<%lWkuc0Lheo#@hNG5$ zK8ot6zpb5lSw#aqY3EGUi3`M$Q06TqwGX`UTQ&v#z#kdZAjUY`C=Wm+BD-0ccg8%m z`6-YwVPuR+avp>Hv}9m;P$l(XJd4B?Id^sNuCcAbUN!BM%&U99K||UN$HQnwJC1t8 zYuikV14L_1+;8I~-Mi??c+$+}{jNDE!7RFZ%iDA)wFF>O6f#sgRaC-r7)-(lS__1g zJacM^HtJYluHt<;Q9tE2WHH-%vI}a;)*)AuoUjkCL(9~^9FGS;`B~XK3YHnKUx$RR z_F}5xJTAK=48TV)T!i&{SOzf`%aHsHx*EObWC+n^VE2hRFvko_tY5x+i=4^M>**XB zSKbC#fdK_C&mh`Rfl0{+#+U$t?LFpRTBN}!)qMrDuK9b2Sh0rS1!r*=33W!Vsx{pA zce1s;Ue#Z@d6q%HwavPM2=m60OuJfJi$!LtC2Rt2okldq*UY0y)O`tzFyyyt%Pk zCFKquC~;9;R#KV&M;xexZydV&AWSW$G_srNb@15jiOIXAzm^!eus5rS(W8{Bh)t5y{EYZjLGFNW+ zvl-ympPyhQ!+R;7LMxcTnDXRR&4EK=4^+{YbWbF6& z%R12+W>x68K^?i8{E8lC#hAsFq)Fg13pZI>i{8*V{ulgPl#~V{UVaziofYE;iB>&DHw@50`On3r|UBKnPB>dSK&@=DqT zvhvWQ=oTayDc~OXeFU!0ES|o}8DqblaN|=X5ib3^7h1gR{pC%;RhSbyD0kcHi2@n8 zE})4N&f)-W06LeTe0oq-g;r~wRyJMEIYweOtihU;T{+tTvRHCCp;_>8TJwM&h0ar~+4piB*gvar(xnzFoUjkAQKCzqPAkV(t!(i+Uy=FW&1G zY~m{V_#zpZg;h()IX3FM{eWE+Nd8v;NknAl{lB?%!O^ByxtlNxiuvNMd}g?94!;iN zX$qa3E0;(e=IiBzgxAIHPaXoU$xHM~^q*1vx@o@14!-q@;?iU=pUWa7sOlhIs5TGp zfM$KDczC`aVN{qwH3i6TNr~GCwma>QkFv`s%AE+jB6P)}V5<|Z*13i!?OmVEnZ1eySz*Z7pZ$ve~V2?gt->TWfYvyVchSo{1v@&h%FQ#EV1z zsW&gYFeJSEpjZ@UW0aLWH%eB({d_Wu5qUtuEMlH~0Lx_DKxeX5l8GZtrp2ScbGt(n zXp*A+c8?YRha-k?S_oD6ayub&;(X|Aj|Yr`(|B=UlQ->^Z04exV*eVl_kHT};n69A zQaypyQ;n-GFid6mcw0d7$wXvD{1a+JBU&e2#9pf8Een`|Ia=z>HYZ^^H`%6|%{ z?yA9r1=5mf3B;Zy$hOY$HS!V+Tws=e;I}Udlihqh^l0m&!|D)77DF*oADpxS+#j}V zm(rl@CNwD@;&u$a2BEZUh}iRduXBGboj#BVijn}u{F>E(o@UAR-BGfGIkZCga*A2H zei@;_hFdY~Qd%2dd>eO{TyW$xf^CpPpbGvtpz^S-Q7j%N4b1062z?u)B|>?bVv~8| zAs1zvOu4Wltso{s8`DY5Nir%5nSN&7N=@&&J?p#aWhhnQ7AF}_ccfCY2b?+4@{VSf zbYGS8qbU?aqa;^9bwj1MMCLh&^HVDk913s<&()4M6Ri-c6o$8ESh$k)GQwT4^D{PneZiAZ^SutV4$h6-s((-~+wp zGbP-9K#I%>11cKT7Hfu$8FAJ8*{b=?t|><+I$sU9DO2_~<<|!l;uS~v(7vCrj(zqC zh7;Ow=8zBaWn6UJM3!rFHM2_4D)PzC}~xv3YAqDJ0x>_YTV z)!Q#Wp7*Jx^2GkRqc>7&h9eJjTot64_nn$t=Ii@34rDgMH-!@Uyad+g z!1A4er3HMu9{4>9B7h_|_UONdIr7<3E3Ri)ckFjwoI)&pN*jEXG*r*A#tlWWiJvL|Ske8`BLMN2!^nufRuo@;u)zkzxif6_C3oBI{n>x4hUi6fiSOTp2XG1SDv(w zhGA^%ht-hJS>^(jEu<7eov_&jq;PP0tsz?xh@zHhv@tq-7Z_z9t?tLMT7z5`M&Qfo5^C?AP%Ji zFF~pRYFQPQC*8v>6c=bmXX_|)?GqVc23H|43E(A@=@Vbnsir4N#;}=qfv&3|3gNTW zt-BnhnjItx5f6Cdi>TFg9fI@7?d_tX0Yyg29*E4QJw0d>OJ5;(DR`Ku`*HhWvUq3` zP_1d4^;|suO;GRHuEC>00or8t|MiYqoYuy@WnMss33T_J+om2)%xq^HD65o*jAgK= zoLd>WZ?w@>1Nc44uEQPxB|zH0btVrmF(b4lMxRp#hFgGio0Q!el1<$DgW z?Lcpl4lVZgag$m|PG~sT(SQh|9-U)I{HX%QSQPeNE8#uU{SGy{N-Bot zXZFY8bA;ChhEwz-|j}t0f>k5I*aT(l#!}F*IFmB8;i`cGpJxx$&2WK+d z?)p5_P^49;pa`qc+}9jGi%rRGt)IY$80CyM&%xiXxt=v{dxWM64Hqn2R4RTUoRo=? zvPa(18R08}jOH0(fs?XHt2^PpVZv((=|~kVPhUJS#TfqA^+o34My-2>JZK9%K~tm4 z?VX1;QKAnpn?hDQn!=`zAQkbYO0}}ajZb;8tum8>xfgM=u*GB{g{+qE|3JsPmjn1Z9Y`qA!c7C^F}^yTfe~hJ(hH7@9RYm(@z8rutQp+*quCwQq_L)vz^r8J(i9@CwKoa(9`^|*dZ zjCV_R2A&C~ptphRaUpCP0nc>er|u+hRKxdF?3|BB*1(HB=KL}C3!I(?TFdSQ-vOY; zlWius`U9Qo6PI$hy5Xf-_I)h>252vfYvgQ8{*qc*+Q9Fdhpl##yO259unl|;^#G77 zjH*G@h^W@k$ew73)R`avGTlb1YB>}LwbqdgWibP`^v4K=2ITcI^ zC*G7uR!Aa5!^jCyFr{abdnER~$Aa^l_8pfp3P20kS@nd7Bk^pGQaRS0S8Wmd7mOrX z=tzgmJcgz(q+yD=zFH=MvWH#!AgoR>;jj$sU(#e;8dvUq5omb-t?AR;gX(>|wmSzL zq#eH7bE$=G>rtSEK+&Fq2ro8GX5xz_1pWX#iLD$V-obeRe3~S)_E1tha~q%IS_*x> zu#Y5C362-&axJi~o=}Qy=|1yK0nbic=w%tVGcD7^Txf%FiU(~Ej#7M%SE;?i(DogF z@ih!Fj_x8uC@DM^0~)*K#a6Rpxe0!R9qYm zbh5ZD_A4~V_%|fG<%QcgJ2{5?1d|ogmNkyLOu(>X`K!tn1R_Kd9{!9N{hB zHj1y~1y4Wekzfxeme5mB=Zc2VFLSTnXH&NZvk3!#R5pM_#P@y5P8jAD~|gzB8PPIC%#ji zchJSl1FD!4DJ?)#nt=FU>q`)D9>^DFl;8@fB`w3~->Xa+D+q9=`(QpqPShH83S1mU zoXpUpRjEgVq_b=d5($;qH?Sxgkjcs@{L#BJvdExj5*f&=k&;&DZE18P2UQHc=>0reRTn3-#t?6MbTN1{BIfb^6=knin;86 zqy^Vu9izK3+|OA zAL(@VTM?fny#)5hf&0qryO&QGW5Boyj1{@+bx%$aO3#IXuc)%{#^izb+n{p#^1c4iG`A}fOHlyQ6zw*sKz1c_~uxS<5YK< z+tiH_zU#8a(=e5eYV3YYT^8f5YCxNHGbE%#)Yrs3CK8Ny`jlH?{gR&yt{VWkYBtSF z(!i}MgCPjLBE)mRWztfT`5*FM^AGx;Hk1%6a)k*48I(!$uWc_OY*4;zzc{`euzk?L zU^Tdi(fA)Um`v<5n~LI}FIIRr`5)=iW&#~0%%hOcZto3{Q&4hWhMdhOM~zs~La`Kl zSbO~C+* zCgrAA^u|-f*1^{;_z;iK2Q^3hRIiAdHK)g-MnNvvhy>i)FlUN9WI4&q-;-Dn>}K;O zmw`AoZ%yhYDUYZCfbzT4W#42B3{uB?Q!~|Kgp4GfBP%8mM~)Kt6#tp69B9Zi6~!$i z=#v50RckRmY09C#;@9h+>zc;&jAXJ)kF@q$*1TvW#0dEO-PBK1$2al1{2>)7XpbLM z=nCt?s<0g}50J&Bcb<-9X}Mr$!w1(1qy}DD`AU|clOLNlE!wcMsT=6d{zV+xcfA*Y7qO-f(79;itbW5r(-UO4oL+yS zjtL$v$wOmSnCD>s3}hIQWdF3F7FYeP%qv?iq73vccJ$ouu%q6fNnDk(Fj+rz|usCO@{a} zKC!>Do2lhk^9gpQ|7m#U?uWK9NPOYfwt?Q=9VgfJBL%P4b>9L3f2G%BB{Q%P@AAW1 z-!)^po<3yf-J;;D^3ZSzdiG7IoPXS87|>AC--~OO8o#HPz80Mz_(L9Mxo0jo%2$aPNcW$mP(!siUn*6Nm7M zugLu_Ox@bA70{Vk8Dd8590eiWD@TOc3*&mv`$j z0b^n~UR5tsV|s%GY$}=%sh%#ZEO&vwbG~Pfye32noL%FnLfG%5Q-zY38N&WsTzbR* z0;6IcGa{zu$->EOs)>mzvGxgsTrqqOvF@9xICZnJBRQc+%-u27qLCr^VTnV%<<0J* z&hk#N_-oeK$%Ew|=uD8&o=GPlzFH)*HkTM3W|{DEe0a+}=$-ZReD@sV5C(H{Iik(+ ze4sTj3F`>=KxcUnyFoXspJnpbZ{Oc@63s8JFc;cLA5kN7`VsMyWCW@P)T2cacwWjFctJk*ZgupR?aa4@oA-VQt~41G5w@n z#=W9*p3QI%zP3C)73dLPp;ym=1PTE(v*q8l$X6p~)Z}Ld-#FYBuhG`8gLPa`-r?;Y z1D`SMpsAgEv*7C*WMtW5?>Qy>#k~pK#@L|5oFX)Y7czA*+RRSCYcI}`Y9)jX(%E}d z(TLXycH2b260h?5$^pqin0$Y^v?ygHUiO#yf7ph$ukVD*13R~k(YVB0lGt2?tj`21V-Z5@I(fVBd{-i}1J$BVNxyBk?n zg}w_Ps09Z*-eatQhHM#xQ8d}LmY8*Jt{)Zh!V@saFE=?Ix3y{Ip)2JO1u}rWJfA*G z0$0t~_9JW9`D+)|M6&MMz}64(@?u7bUwYj@E7&*m=GOiRFp9lB@u!F`d{@QX_7N^r zzMvP`JB*TR(NhPD`!H4f*FeB?u>q=+yNfM{eFoN`fD>+u!ZvqN3j?1!)% zc9D7i!Ja{(d^SA{pFR$s6!CV5kFj0+J3&pBt%z_uo+e^$NpF$R>aSdYu@{!TI2qjg z_&)OhLk7>bhk_LAJVO5=rZbD$TW%Cd5c#F`a5XTH3BYfOX8h{U&0bcm2$&WnV9jr0 zIj?W*HAK$`U*4F3=qFmwj$EQBv@IhrsZjDF!Ge%4Vi&;6;nCk$XZ;MXb&>R>*oU9@ z&5Zd-oq@pvg*Wj)n>!zqjB&Gvi$J8@EwSgf=$q~`3AG8$?>eNXS&j+i=%X$*>!vGD zETOs5hVdMByk5INu5q1DeR6cAG6d+M?5A-li7e#t3nA6;_B>unQ`=BDtJK2Zh^0Vn zL(rBW96GggctPzqAd2E>1eA_=_^inoWS(JM;=P@T<1Zy{YcW+2eZcPG&-(C1f z>)L}bF+>(C!z)H5L^Zy`JNC9GKuL?ET?;?L~*|uG#v*`ox&v-x_6ROR-@G{o_L8 z-~yqX{n;kxa#sw$x}Xw0H!!g*l{QTX``v;lDt;9y>aCl^(>tQ@Pb<}m6zKTudkkM} zEjNg}S!hmYjo>2KfZsUul~zGdpprM#dEYa{77=f`yG%Q{{s zN!h@0!jKPe_b^#fXqTvb33WdUB9uXlFlJSgz~z$SWc#QK3!+V~>uRglJW+GU#@dBR zDBWV(VxA2J;;h}~Y`k4Udyj6YP==3wo~S2kT_(Da*w4w8>sodFPl)xgkiMWHkh=EK zf^=AB4mT_xfrVaohBi8s{K+&ayybo-n=$acJ7Q8Ny^0_}lb{QM>4rQFpuio{Vv`AA zoR~JuE@!p^aGDXULIcXcvErLX`%6q_#_Bbfu=+#((B*_VHk!gpjwtoLy5?bT!Rdy?8*l0*=3sJbTh<*d;@_=nk0RU@$MXVSi*aIXAKgA6 zf^Rl1FEMHn6gyvHGDKp$oHpC68^}?$C}Q4N(SC)0NeaV`w*)Y!HTN`e>|S8a3~mpm zJYi6`7CrXhfxbF>^l#BgAdolR23UQ@s2eEZwYnOF?>X};4KQ^BL5Jq!6HRc+hJ=0% z`)zr5k99{o_wWcZY1!W6a|MTi6y-O#?wUD!Gn=;m#k^L0tTwbxxno7~Ml@_rV2-W7 zln9)cqIFaDW(DxNJR=Jur~LwkMM0m+e+hMBbnnz-H1uj_5%B2?_f9b@80Qq;mE1 zL4mMlzi$osI5uK=M5HRuiowY5^!Y|>RNZQVwEGHa1#wMGVs!bghJl@IRB~gp?c}&P z#Z~zf!o}-goNSN3s#&vyC1>^4BX}ru=3l?G@ln%6yckW-JkUw-9Ff_ahZ&(aQg*fF z3J9lJh2!f*m4qImhYWhFBEaR&rG}}B4ul#Lzg6PuZ%j1kqD{EjlCLzUW=e> z8cWt7@G`%1xL{_=+59POS(+93je4>a4Y%jTuB#`ca-~R{hyPn8Z^t%9$qTHgUFi8G zWmwMwa>#>nf7+y6wfZl~M5ic=U&UH3{J(&>=c+K$ZPu_kEmzN)tVh-+0)@Mk_p6?#tc%ZTXGv1y7L5Mj96D< z+amrb0&DlRZx%~Ol$Bd(Gm3h`lF6PQoQhSr6rh@MC_i9JGLm%4OPh#&43;{IY%1!k z6Pl;EvU0mEfqw4oaB_%zch*SfJ4pq+nLXM2xK8-xlOe27)_`+25JBs3VR&6Lf%;z=v`PyoEF!tRU#6%aD6Lnb(oJdme>V-= z2K!<_{N!}ZyU6-C(*rg<19o2#N-6+tc@v46G`7SJ;!$udOSq#Ka=tH%pQKr+p&+-D zg5)SX-YCQT<)lfv;dTO0r0wOiyMHK3Bfd)YOk}@5o(9OlJ$b-i`L+2jx%AA)Umg>I zHbFw63Lk)3E>pTNPl`%=d={$i{F1fzEhB;|R>VSh9#~ooTXzbgP>o+|*`Y;KcGpFI zZ#Hk!CC&8&h8eN^0MBwdex6wZrgOZUl>KnwlA{)c?&fhDbq#JwoW$MxEDD;fl*eBM zsY`tBx+vk*2DMQJpOf3-nC5c)2q~VXg4K+;b9IUPH|Ao-fDl81qkogA3G|;c9ocT) zu=~0nWQ$3Pk;+3KhoaKyCsW$wOvx|t3~%~j^?wrin50E^N)bWTc$7xlTY;T{(Jj`Q zW#j3dWX-|#2p^>2A0JWeT8ZZON$Jmh%N~?ghXLpNCZn=4zCs7r1_CFcNOC>mn-TAj z3U-%5p50Ke{SZdFAu4&ZoQ^5B(;0dV`E>WI;B45ke(b>F-jE&%v`@DFzsN}Pv1V(e zWvBXk)t`o+u`Yx5`jKWY?O0$~Brmjz>^TQEqcW+`K_-?NH~2x2OH+l*HFd>(8c(B{ z$A1HMirUk6jT+poM&}_*9b#^e=fBDOZb;v5xM_B`2>Rc^#qPNzzu&m zoH6>Dd#dcPx{&fYU|%~Qifa2`-KR1_+>V*JmIAQ8O$k%9XD|^jO70N7zA;!+41{2? z%E8|h=-}NJmBZGCc0_$aEPmfLZ!;0)S-s)|At^Vb#phytF2Xg*KJ)3#O&1Z@A|z2B zMd)SW@$f2Hu}{FD=by`y@E%Drt_=R^le!M=gB9hYn@n>sIjW-6czOKSqx;?W>DCH> ze?4QVLuzofhZaz%V7L_S=$Y#QJ5sI5(e6J8Tzm+1_*f4w0gUqLON43 z8RfeVBU4>G7Xx(%!JY$e-I%)=q&7Yjk!$$&aAqaYRO6uC&2WXbD1<|zi-S5!O*~nh zb!#@hcM|aRMP<`!u%6PeU0H_zK=!&qB>n^K0{A16~o&`1|2%sIZ9ua z^)yGMdlSY|gSBg=<~JD!E$}L#Hvd9AQ+EcD&F^u9sEV#bz6d#gIE5&yRQDM(>Rg{W zvaX#zT3pEM7mtK@3W{Nn)Tte}hM>3cc5)2Brz06nTZ8G<*SIS9a|#iZw4teR{7pL) z6Pn$cen2g4OlgtC)|+jGN;T8E{s^CNgCY*5EqTy4Q>@vslWdGPl@ZrGjQA7WCOKM3S=&>uA}?G@%PkK4 z3*SOD%F0Aw4q)oliv(<(b5`Yqx4W8p{5Rz{2sL%-+5mQEW9KmZ33#m#BnUbH47?go zl;?cV`&grMC*82bRFE%S3(9l*(-5VH<;RHCR+5D$T8*24pR9Y4g*~-7+p+$Sm1r|) zqnURV4&XgtF<=MkM4ScL_4%G07t*^5YySCX7D&erZ|Z;(@R>@I&7n4-5QGBbIB|$) z4_4)F;oKFL>9PO!eb5*PAq=^z9=`Xt5^3ZiE4OEZkHMt~5YOaupC7pN{S>j*NqW!P z>%^PQAkcF9W4B=$#B&A}pCTZc(La8uY2A@x_H5yQ)=`?9MlZ0V#C~i+_ zpta2JgC#F9TStfLmX8uKUwpjq?C)bBFNp;AC{4k>3EEGsn1>QJ^QeWbqGK@A<<5Mz zAJvS}x@ye|bMM3xrO01re{|VuN>}00VEMw{6`A*1oJjZ8$Hb_KsNNWWz}kXUOWH#J zX`sX(L4kSck)I1kottm%@qNjjr==1N`%$Df=*9W)GPas6u?;_0=@Y=sv z+dR*>nu1(_{OA{c$j2sb;k-}j=AUZaLUtK&f_+7BTq0T7E^RxOB^-G$1?**B6?o{!odmY*EcOSs^<7i2SDQz@cL|BU#j`I&E?H zxtoRGWKpj&#>DWV4~OgPlIRC7^YqTz9A-nS35+V5xxvXP=W+oClp)MrfC%^N>A5>} z?oP*0Sk3rdQ3PeU5gp z{cay8Q9_VP@5t1xhqBr5DX7%7IFOHrM9H^ZMX27tEa?ph@$ij1zd&}+ zqHLniJJF>S!#f?gfqU8PPU9r|w@+W0nX%8rS`Wp-+WlFmDkb$T`uAY}ZYmGHT#A`7 z%0OZwZ9qvXVo}Rr#>;-Onk|TxXaFb;z*x$N*L@*Wl%wNQiB&(Y7$w1)3GD&R&JriL zP(4(g2{~0a8vw_fAwqzGc;WLc;*sN$-(4em#*!`~nnJP29F{S6e)%CPUY2)C%g1;- zLBXD&t@C+psKi~Q-eAd)0TDH#QauV07em>qq1a|LIib^;FEg=&t zQ5E~dBYL=qUc@D90p!a+E(;*sRddxIpN-*9S#=i($e%(t%;DH;5s;85IM|x*x;{T< z*s5g+Ucp>{XHSMW7cAG!?TESrx(2cI|L6p&b*mS9ljeY_d%;0PL{8ycLL(ZLO!l$Q z7BMX*VMC?Zf3C7n_a;yqk@#lbC=9oVu^o(`;e$#SD(p!3cn4PIZ|?8GEZr6SIaX_& zeuVL0WX+4T=mi!~fQu0LO@*vFIHW*C1f{FDSVp>L3V55@&o&meqrZ(E&vnb3qAQX$ zd~eLzLl869F|Jg)&{gk1bNTeUtd9|HT`fg>u1l%~@5TJcs$&YClafUBRdGIOz%;qV z$KRbAiIcaK7Jn#}t{e}x9ibHV`gRTmEoGFR``2Kkc=@=3_OjZ(9ur%>{9dg~bsx2z z0p99WZ_Max+9uJ_~17wg!ZZEDX&k~#aK*EuJ$)2RurNCEIL1}ZWH!4yJ(v0); zPths&MD@El6!^TE>$vEU8!?XZboqLfNomm%`8*z5K5wA@P}bS>5Tpk~eA|{}4w#zj zNBpwJ{xl~>@^AZ|vQj+c*Y(VAIBSgioUPpXUYSz>7U%;$cSl8sL`5$Cy4hSd8MtFb zju>dS333A=pX%$;oIp$#Kx-YW!3AxOODxoGK#SWTYo;x!LD45BZRBe(Eb_x z>??(#GnPdE#J1DGg3<_M5Lq@dg-1GC6$EuwAZy$adeeKco}R;~J^HR|5gY1#7JN^` z830#E-4!bufcNrC&O2CjC4h&?Pciwuo)W(RCXFt)HRteQ%=F;%61LRnouB=T zMwn$9$}LHQ(1}0E7`dRDNMY^@w*d~a3*OOEwQq?igVA>gpMe&)J)s*J5&&I&UvUch z!!otj4&v^H4YGlx7(3&7_1^?C!V-=MT^aV1E%f;3)B)KuPKpNT?~q&&&JS>(p8gBsMD4yQD3GEfKhUT<=bwDi40{)*1t#klWt>h)o;$Q*^&O+ zTu`4iLeX*+n8?^K4JQ>$3}%sy4KqM<6=*+=Zpuj);>J+>^bLr z(+q~*kV6EeO|QH~6re)4h?(sq4Av z2$=X7_MUb9=tZWnKCQE-xIMDj9hQ5Z2>sKD71s9HBc+M%{)?~%ps$#_R6B&T;J7=B zD97)`#11yAX6cF`ra8I@30}XGe9;-1wwbMMxP%9dlEB?FuT9h~hjYDmP))2-K#FM8 z<-gm(DHGCFbO0C$(W6?dJ5mfORH=4w=RqkPC!CiIQ7d+66l46gB1p84AFkFc%SI7V z1OG<9+tuiR!iG1P&6bc&Wi=ZCb2h6|C07>0d)D*eKK0xc=+y@e^WwE6n&=*pFNz+! zDMFC$Z@p{3!2<*CqRXthPyg);G%-kQ?V9UtQ|Mr+k>FAQy3Ie4!5QVve`DiSI|t?R zEe;2h_iT3x3it(IHK93Tl^@?EQ;Veno zB{#W#wN1Ms?!p{+a~_3;071iv*!E7H*fDraC?K$UIek7bL@Gt(4CKLhi>Ue&WxaKD zyoC|xdJktrjgYu#qC==vPdc$HA`N)>?s#yFJr7!6>znrVy(?_Q|TV{V|m=aJ)!pb`y z8V6D&cXhz*P%4a1r=is_Mnt8)<}MK7fmM%hW2cpw>1B)5Zo^u%AvTd4ymF05W6bsk zD2TY->+>1zKmwx=ek(EI$h4r^`2rc{wwJv3rAXFFNF~h1CEPq0@J}&(mh<#tEuMh0 zq5|j)#n8q+YkfXdkP?3YR^Z;@T#l*_9Rxs*&9Z~xOT3_R*rnE)AQD zZZ~CA2WY@4V|2TnGe=Z}8c<;b%Dq9G>fM1NN4Um4IJ#guUMg9LxIEdK0VXJ%1oA$n zT{b$eyxcAr?S?cYmR4W|-}0(=4I>TdvX|82TVCW%CCe^u?3Y^ht>LdDGfXb=y?J3?20Mv9-x`Q&=%FkPOWvJD_(aYk%8Zg)m)|EJM7S9t<<|H(`>) zf1&LAM+U`+-hL^)+(}iVkVaHoJS~~+&-nuM3~%9f6a&iC3dW;Ys(CBUqBDJp<&)XM z$0+=VyGMRG6YFe1G)&`6pXnC|)LaxE+>p8&c2V>FqjSL}^PY-w1)5m{gAUdg)Wt@; zNloXE6dTPM`Nx3B8NP~>zp5FhK#A?>sW#VtNg$-QS1a*ICf;h#t)i5>#tW-|9Lzcf zw}0?;5s~Y@tbkDTc})&0P)>FJ2;ZHb=OM1-Mr_NPZcYHnT<4-plx+*=eBE-td=ag3 zGx)(c+5Ah}FSaWwJf&%d?0vi5WI;ky!1yO^Qx;K%4`RjQSi3Amnl`ZFPX1oRA^IMH zY27whB|`7v#OHJ^1Gk$l^?I%t)cPAcw2r*V;X>NXRRo~ zt^sT);~Bnh6g`XG*Z74a2imXo7wz>f9nPZI5Xh`<+nRTT&mUXyc44VO*jz_vBRbg5 zi$Mh<>LYUq3W1C}`7(M+^JSQ>6LAU-)EuSt!#8OaX<4O_H-trqDIkcd>@OM&`Uk03 zRpH@}LQ=FfR_ztuOixYditxKPKW~a((@^IdzThRNM z{Du|&8W*jJPboHqGv)hCDnZ4qoH4!f@hjf$`g7(L?DY2_zOMPIZTGiMtQ2mwRnFo? zX#q_!vEPpfYyvxL=MJ8F=>eAU`u%XyH0{RtP&`nqy#aJ?QS(O`mx=Z7TS}gG*qamQ zu$Qe^9{4~X7W9E4g;#f9UOUm}{3k>hTrjR0S{VV*MS9rqQDyz)$yN(=sz9eQLC>Qd z?u{^&nT~U60$4GO#gdrtcPO)MO=FT`YH}aKB*W~2_hgQ9gtpztGLK_Spg-!O7MXAP zq2KwluOp(qVg!e?ih)`3WP+`zc+e6V9~IT$0Z*KM*oe~jPao35`6jEw9-ZoGnGxl9 z>&mMF0uVTQ)p)2;2YbdF`u!iX(9Mba7Zp5%!fH9f{UHR{(KHxs^!G!vc0 z!NNo3RLeWLSxDn(9pN87)cSdFjyd0Uh$zkwmHDrD$4MJk$_Tb16>+CYqazV!GAe>u z>aq2`#{k|1zqW7;mlgNi?Ofl11&LHZpV6zuNITjOc(39SAPvs(XNn7VHpv3C#MIK> zK<(BH`?KyfvIa9zie*VM$V;LRHnw>?zKJtp`O9lSknBc^fyj0-b%e-kh9Vo)|pk~5KMHwkUI?4iJ<3q(O%xe~yx z+8ilk@cl=uDY)YWE}%Z!>g?-v2bULZkJ2NMZOdjmcV`#J0xjmZ*tL4^s2$*QoPVQ$ z+Bzb0Z#vD8{fZn?u$7TVY;Ia(nFt9-U^QlNR!jX;+6eS74r>((fyT2I58;a8au+CD zMJ_eni7>Dt^fCt9yRplLLs`K5vR$cI7-YvO~oPo)tDA>HgbKye2@? zcULy}I@&s@Y@Ep_S7e}Ww%#L8?SVKRWaK)U7eDoS&--Y;Eq3*g0Oh{-u^Y>`pu{Oa z(rPqN{Zy9i7L>st3A+dEl{Sq7k1c3vY=v!4arC_J<)%U?_nfO^laS0FbIJPz8+dJ> z_Y+C_^>_e%nKJ<75GWG$@hQ^96GxvPA??Zv+^rJmJpw5p^Fj$-_`rr{q9I}9Fgzul zdFot1=^~G-_k`Dt3TZdZEK;v)vUlgVRmHicFL6>P&qzOh9RUYcm5kS3NZpxNw=jR{?7$TRhjf-_1Z^8~}ChWoVEVL?cj@j%3gM z5zE}syTToE^&GK=b1^wmk(eTO6*@3?&cn~&rz^t^xx(UD0c3*+(+}4Qut*Sk0t7=Q zKE+IuF>#8~?xQ(h;UB3^j%RosOcrNpB2Jxoft5k7uzKpz4XR|R%He>~LzkbDR&2`1 z!SXg|2d^iZjb-7;Mm4=kh8WA?S)am%2AWkf8MYdFuG+1a`kPbGAE3mKyzfSSUZV>h zd+N_%taoUz-h`06gcoi%_Q8xQhb@=I%%kH204a2QzdM6OH2%u!rp+LGDPAsyPnXt5 z%EZKdp}17Og;vx+hq<{Jei-&6D&@h^P3Zh?lpfiaP7Ebxb_Q@IIj?9tpHBAfynYw| z*9Z)e3GZI$4(eto&b|?F@G7YFfdaGr=$`s=yTJf4!n7FHeuMU+|ITPlP_mPjTor10 zzeS_%#<(FGDiUH0&v8G~hBfCfM*?V4D^}KgaYvQ$yW?cOEQ!Hp9u* zP?*UQTTaO+(>(b-2r)u2=LK$BrNI{jkjCeAy&pOn>WuNNqe%WWddqN4#PL1VgmplSD?(JAq#Y3KpD_Ui=`3;vzGymxP>lYcTr4;&qHY(OjYo#L?EmdV@xPxMda6T z3R}a4dQkMnk{*TiiMxlaT60rkV5cC(uZPV` zl8G|94Of54FF4;P@gx^XMf9h?+SnGlg(rYnGF}u<6Y#v2-w;XZls()m@D zD0y^oga@Zn#EZfEf0pH&iTkFT0zJ@_`Sb1An}HboR))y{Xwv*3AP>hc_=)mgt^vhI zHmA%qemT)5kCY{YLZ~K+uBc!bq(Mfe(xL}zrAnMFV)b_YH4G;J5w|_do_@w#TVmdU zKQ_u#ilsa&WVa*yo7?lR@jeqx+yxm&S1Yp#K$1x>K>pxmU!Qqb&G_Fu(8<;gzNR6> zUD6l^rf)WjapQq)x~o&bpVA-KhWF%>MBMwbiIK0-Cw{nm~;of&XPl<_gB%t#K8$xZuNVwRPZ;T$I&O~OVG<$$4%J3Gf? zlMxn|>pbe5TC%1mbEgZSW*>|PQG%jh z5^*}@I8Qy@2~4Pr>9qP6+|cmhl~ZA-#TIaM35WfC{A*QGk}nsBnBv)FRQI;xxF>vL z+ITRQ#}MJDMZ>dYp9-ljCc(!XC!1JKv~t!EZu0apHsPu!tUk+Nib1E{%78Fw4V_<< zG?0iY$H_QvY1q;~upHG~zFmzoo~eOF^Fi_WvPbZ?$bxR+OV6@SQLz^y*C&~pn4zam zw6~)dIGb?tC(#k^6CQ1Br*!{fENg|@OdE1z3W6fODZOsJ*Md>UD(1#z3kIB0veoX^ zMJh9I_eu{1uV$IGxH*8ipi+GK8GjuPCeDAE=Bd=QSl3^?&;8@Eh{grV=WbfxoSb-Z z*u`&sTzWT}A)1Q6by-ZFMUho%)(L&zHAnND>w$>d<4C;ajvYv=hz>?DlXle1;(?Wj zjF~X@WpLylBtX?xqzcEB{vV9>Uut;v|K%Y+pYE3te7N&5lTD|fOt8i?3-JJ2S0H4y z?_8(W`^V%Y^G96nXAwA^0sX_Zam?9H46+XFR4w z9&SzG=AOCxzgdG zdhd#zCl@O)^=TKw-K*uWmzOdY?u?8()$I1iE=H9!^!&e{r&bNk@-k(F>+g$)N6Xkk zyr~hDQuB~P2=qu1;y0mcmJePrQq%eeTq8H*;sh%Ghvsi zpBO1kufN2nD~Ne9fpWC<&?oV}f_2VENwyooXYfJ;a1W(z*XCWD^!r+CRf%)MI3JW8 zkFx9m52l%;(<|RYH2JE-c+J`7fh%oL?18DsZqxGRJ)ITie0#<773L4&B?U>v_Igyp|#MZJc7qXc6=mBzvlD z`@b%YRs#^cNI1(ICb(`(P8?|aS$-7)z3j~-8cROT-m&HGjAW3>$2rUI`jDTvsNXI70iapi^pStfFgSHhe^s!NQlX%lD=%o2#_R8VFFfp>0lt*U#vsX z!HACg0o?%&PwpA@CynyglK)ZfDohCrt~^*C(p;Trj~&g_@l%;=u3X*+fB=m3Sb@CpZOLZa;I z^zd>pt%G%Wi=DS^w(gvzqc;tl|3VTB6K!-UGp&9gGJsM$Fb=iw;7HS);x969RtE66 zgsehmjt3QjUoNdLP2gyj(%lXnWP2Us${;K0-ygKzZ)Iil98P&w^XD!3@p8KZ)QFy{ zcUA%A=?>8ay@PDrt;_2CPXdUJGWC|q=anO4X+B(c2)nx zM~SBxyP@zVR-oJJ%&0GsK7s0x8cnZ|b)7$oTmqtjiLgaLr>^ZNXRlW8`<%_5^n5qu zJR6r_8vzl~rerSY@v`+v7WaZ+zj-Aa#1OX(>`^V&SR2p4r{db-#4#XG-mY@f(3`?& zAMGabgnC2+)IbG0yFOiQjxa}=0e#+G7yQMP!WJlw+~M?CYx}8}33jP&+OI|1MZCvw zgIId=OmMws19YfuSRN7}K)s{8%6&2>)%_!y$82S*D*UZX8RewSdy2DI-$F`}WVmC! z=GURz5ew?5l~v|N=zUh4!jq~D2=zX?xYHV=kC7)$wAD+b749#Bx`@FUTSC5iqz%-C zhyt{GHtKknSd9y|or&*1A^>u92DH{2tq8kzPpHmug2jZrlv|q^WB{KtSuhQeiT zKB9B}v-!TENI&mEH!Vx72aK`?VplS5suXwMwJd_lKVYDbrHo~+#Eih>4rpUA&AG~S z79&!N<-9}aO6VcMpXNaDUyd=oteN~}j(a6@di4C&k-Jk+iKI5gz4^hC1{bgS-DwyWt2SJP z5e`=21f=4zJ7EwF?D8}~oys}Q!->Y5-c}MZSEi{{AD_E9R&K%W^CPx#6Grr?M<(bl zKlumlI%INGsweobrvKqa>q^6qbtO)sxD|Pd=@n*PSk?26ujvX-qD8NoS0tD&ZE`lz z=1=pyH99>2SM8yYN#;e<>ArS6nQqG=I30C)je9L2Od_4;!cuV;SyCpMLDanX3y3Za z4ow0!@T=eLM(WHsU&7m9!etk2&7BrE2J!mq97$z@ z2osiiOZtuNCqufrYMO z1KRIhLH=_G^^^Qbmxs<*To!ja<(^nKHCB$UicUc}>>Zf)TO5r^34P?Y*g*Vn;ownS z|6w=&-~!pu4lxvzGDMwF;4q%Z$;skodWxiAf&N*^siS}TgP_;_i-^|}0k5U+dd|Pr z_|=3M{awVBgQ$H)%BZ6=}d5wMH_7?Gy5?7C&{Q z{x?}xVW}{qchPqV0Vi6uH^TLhBQ5*AjifW9GX!ns-`LD#Ax`&110$GQ`YBaGHV*I% zmyT?)v9{k>$5^B82_Cau%oQ$_Yw4Y>P<3we?O$Yk%5{DIqs9@*Db&LEjmyKa9cPE` zTukzFwUPO;9=JXw{wIg^eE->=^MwA^kWdprV6Lg*YGF9sD!eRY<-Au$tW|rX4c~o3 zDN(b?8EMPXdH_N|y}!BAp4U-U6cv0oL#NvPaHsLlli%*!xon|?n62LR_S}olfNVwU z3$wtF{#XXWq~W^0MkIjGT`=)GG}s{_eTGHFh5%~CW(*?MS9DU6;O1dc>T-EaHmG|h zm?VUAu)){z5FpieN@z!b$L4A6`aTZ`44zW-XC0Q+%sOlv0&7^ix{;dJ|1B;k;h@!; zgEWtY#nTAI(TY*henrA4iy|s{6{`%8vF&v3qNg@1_)jlOX=TkF1hY%5J3+hpQ>bGnuW~j z2{F{k>W)YDQj=$Sjqt5U$2*uwLREOw`~I&GpQ4>yTe(sPDhFCWMiJqCPQcvobtb&# zMQ*%F>h1ka%mnTW%&zn$2jM(Yz6z`gV{Ip`%lxao!0QrO9F?l=`JcXb2pQ}+v$}(T z?Br()%*K07O;S@B3eieyKFEI<-nyDyqc*05Z2bIg(W3eJoxL^}AO9?}E7hN9xUsM;#mn`&@=F z=aLox6W9^HOxN8gy_87*oQE#N6i?@p_=%<^`(z~wsgg(Z+&ro#K!d*kuH^RQ82TE< z1vylJFy>`VT3t?rlBE^UTio<`h9ZRLu($yw2R`w|)QcFnc06FnSpx%Eh zGO3--D)SkLsTPUE4S^z96toT?U+~`JFRTOTXA-O7L06vy26e|YG;s`6La$^u8?c13 zjzNzVrM!fR#=UQ0DU>J?%Z0Q!mivhKm%PGE*Y?`3H6>l2xrOu4xurQdP^Gr7vY`=E z>Dg5ey8&M!;ZB{pTh8e8GOGz?Rd-!DHbol6UfsNDGD zk7Z6#@9x;B*&9;mf-Mk*z>8GqX*gRivf_>^Qu~xyxyzl%OGRfTO5B90DK?TO&ihsW zCWe6hC5{Cu-GMJY{pEYgZ-f~dU@_j;*#p?hA#xR@BR3Oz(ebVvFWJ75-s4@o z8F7?yr@Np&`qaTq@>?8XY5q%syYkM?la2PJDxb{(3hZ}m% z`nx_K?T1Z2hNYRBxIApV2^^#0S%V2*UO76}6~{Y`1FaV{qkoL)p~eYSjr$gQY;L?1 zJ6|UsS?x9Z9)8j47kZty-N6!4iFwBo>13F}Xx=LBVl@;tMFlIZ#r z4~8wpv$9)WowwLt-_Z705wQX%FI9tD+<<9(uhLUhQUU=&ruQ{9UXHAq&T9He2El!! zdA1N}6O?fiJ8_iwN0~Gf6)|tQOz!Ncw5$i1#i*kdsrF)tltFIzt%1+xm06@eAHX>P zlOrwzz)i>G4yxSU_YTD0(Tj?Pu>`V_(Em+LI*d^Ug_N~K*|ufiOki(=|KaAY7uZ>k z-qR$E48%=nW9ZB^z%9}#BQHcW@3McEiIt^QNtY(fk=e2xlv&8*$eRiFF|yQAf-Av=s?6X1{4;7?xZdHUTpz0Y-f$V+4taB4 zw=+yCW2oeiK=1P8($T%ZAAukd-3^4CB9uT27S$2jvlbNGy)s<-WzYc?U24~~f&_8^ z1p?y}sy2Y$lgn^ZNC{?e0zegwW165iJL2(KVDkqE3=^?myFxF!N*~(ksgDW#%D6{+$h%ho zW{ZurQu{wy;CCziBvI00L5-Aqhr}34v;e(!3eTItdqeS(sh0UMxSXb$YzQ7#5DwGN zIv!8C`J!2wGj++l&lwuR{VrAe{S-HeW+za}}AH~P|6 zGNf?gL5j^2W1s_^&O8`}WyFyk zNP-Wcm(_Nt%7X@5RW#RINdlq#GEOUFZ2l-AZgkszJmVC5cDb9SSZZxD0sKcPSI@eq zdf31@1hr&`%4&5kEO@BFg3kHYH6by6{xh+E2bsQU;NdqfF2US);SvrL?wRiVNkx9E z*L=Q#APvIxZywI$c7wco2<_VHXzFS0SqLmO2}(>#g3?@bxl7LUh&Po;E=rEpvKFlD zVk;1P5-xo(t7rDx+6A4h{vO-Q5`)bBvVf3LU!URLJ^Tt~0e^aA4r&b$vrAVV%YAUS zC8fd)!_4c5{z{(%Hciex1Y)avyZ z|1$zFYJ9as=61$nM!bBer~lOktyL(->}kFJ(*$M!`)H(CSS$`k;ZhQ!az-?sO=Xxt zd4ze9H1rT8DTt)`06Fc$EVuqcm3@!KAZ@1Bb@&&pE3c8+JZNZO>9Z;VcX`rbtZBUQ z9GFQFQVay?+Cgf}$3}m2?G-Orndkp(B4s|%K71;`g<{o+Kf0adu%qq{a{7Onn1tbS zbjZP2aTpQNf4hWMwkNF>DilCZJjf=T51*=}1u#*r1?(9egYdn~<~aH(BoZl8#RR$C zs%{qzfoEt&=sNbuhPVB$feQ)-4VD9Ue}Vcn)hxJMj8#&-+tq@TPVot7?GtByDd&Tc z>FoDUFq+#kp17{8N~lU@ZYb`7lI5O%7}*J_2lrG1 z!|v^m4%sfSaK6oG_LHB6Yu=9r(9XHLZM8%&jEW18iaWGo>J@ZZimP4J$J|219CU{* zJMXXHUf;2O16N(L&qzAhel3~%B+a;+i&X37Vr`aS@eJdxpWzE=K4p6)iRoM{Ae@W3 zis1j@9v^}DbcJvDCbWJneBCK)1XE#(+Qa*6ElHIdw(tq=cakKk%S;TcRZHbX1OR!^qZ4tXJ_2 zqtrOOtuTr6{X(>qn#eb?enxqU7i+&P%PnYz3Ki@UiX|ja@Ku10mTKo*0qhsa2p9ik zdg(uBi9z7Ze*)LU7pl{HO{5Zwh>8r;pWGeWTb6m~W#Iw1W(%oBl4tz$4nvFe5h{%G z&z#-Ur>SOKG&?n@4A^HO_96UYv;{1tmDaY>`^J3XSXQTM$IIHvrI-E~f+)))95R#s zAS47P|CO(ZA)5s3q$(QG#Fz2_;488vWZ#wKp# z#GKW2|8z)CYjevTHS}uJO?O!FNK@<>Go4p{)qTo*NtBF0(~>J`=Jk0W^xaUobu0dZ z<%`cfzzBBTh@=d~FM~Nk$F$DA2#>X>ocp+UcVy#t)VDE{j$3~IH3qEQ9IiP-X#qB& zwi$2AYIG4qfn*YMUQ=N{kH*V>*;K6&XU0#`m!}0cHWXmj4z}cu3ne*6+~@Cwg|KE= zm+G4NFbxUhUY(s{$}+$fb5V~iE*K|cO0m`qC$i8ReH zcJ##WG6q-sh05z=HPn$Fgu3;#C!xL^9nwX zYPW$1O*(0WAX4du@JC8-p}Uwu)e7q+w61ko>G(#N^qIYd-wo2bvT&8Ah${f2p*;Ra z+iPJs73`J%LtZ&zQU&(n@YSIr!}63A!W+V7Z9^f)-|PZz+${8NN2s)Adq2ZREV}T= z=Y8YkVV07BY}NQ2(icuO=ce2#9+$ZBuM+N-Jl4g3uop+}PR?2^FiwE%bwecGfkAD< zlc&k{w`qt{%ZMp|76eDfarq2WO~LpOTf8a!cuP}M5+F3*37gv+0_&LX_bPNL2taw_ zFeci!5C|fya|$rn@(Ik8=$afk{omJX=Y8lD9x!+8%P%U&5QfMuVkPUI>MD&jp>rUPtaUjO`3;^6`UmWjwA|s*GD3aJ5*X@Up!tE6M`8#KMYQ}K#=a{hTOAK9rHxHfbV?iK>e&o#=%a0X~oh+YgoW&$dheLVN(93P!Dwz1Mh zIodI>2(0$!+4{NGV)yUeMh+S-r(&Y@Frf|*I2tE=W8(%PI1Q`+^$hSGAHM>L0AvfL znH+#(x0!ewFWmy)Z-CSOs_R-W;Ps4VOvGG-Cif5XgK;n}3u6?L4SHBN)T-(*47mT2 z{&47~lrzNePNxJd{{;DwS@$hP-n4;|!yo$u$9xT!)5PGI+}!0{@v?9~NcQvTCsPl&@p-a6vCnFtFUI9{xo@I2iU3CWQs zA5&}z`R7<=s!qlpsSvDBxr*}gG`*HBLgdiFh9cf~-mYWlXOI_`IL%o@GdE5vGyG5b z40PzJ->ES54N|nyz>acnRlkT961oRfjjAbh@`WBI(_9b0Z23wek|s9fT7E9EC7cCs z+w1!67RK!d)k9fM>qTz~{*J(&wQrvFfJ|jvJrW_V(JO9vDHfZip)v27x=sq{4Y7r*7P{BvKRSg<3(eXhrKr2aw>}HQHPhKGg1>;rDuz? z?NtKbaE= zpRgKSd;HTFeK0Y4=v|_NXAhykdqvrcnoyIDuXQLUjNZttcSiA6`IaTG^=Y|SML2n_ zQk97Gj%A?bEyqT1!!p8X)NE|u4!7$*mP2GR?yDg$##hYA#jdkKTz$C`Wq)qkf28qU z;Yiz(WEQPpY?$5Q`aYSzB*n|T)r$+O4j5K=!X8&Qtb-#?aq23NfiITb> z(SOt3v8#PQc^iH{UHyiM_K_wO3J}DE&m4U(HEqV?gqxh&Bh9e?S>=Rat3&Mz%pGIj zBzCqZ$Vt?#v8P*SfJmBiQv1nfR)AQFMZg4 z6xwAV9JeoV5#9qG!cL~JA_zh;trysc&BAt%w`HL+jd|MaOQi!SFDG=GiI)d4Rrhe9*cTpu+mZ~WBM>@;b07Pe}XFd$)_t^Qky5P z%CdU%hF7*%1jWk+j;D$twb5^btj$p9XoolJ!}{wG&-&d=2t+IGCpV0MX|e<43S9t} z_tU%|EQ)}^-;$LP)8beW3dY>RJ%ua>2e*mXHw{JwpMfChU;(C}uF~{TYfu`2M)AMV zkj}RbCNir&k_DJL4T{i*`)gzVxuGO~TiKv*wn4PV3(Kf!a%&p~w>H!nwGaEv8iG9Vf{N6>P=m4OhUEO1Ecr+kQ*}!i+!llDs}RX&)ic2k6~ZjF&bDWn&5ASa_Z*Y zZQ)UUjS?riLp)9~&Mg4`o~}1x7`@y0gN(uY!$C$+OPB*;zAVE$x$*phYhFNPit)?} zw$)aK^qK2a>7sqG2sh7OY>o+?vy1EzzdGPIM}2r!rxG@^b;kc=ZXVKnrkn3MbM{l3 z1MqnD09d0a!Y67EI7ix}FgwiQxlO=y!N<(%MKYEpuw*>VWmhzF^m#?29>IeLfy8G` z&MU}vpo_oab3qRIv9+kN_Vi>(+QiQm?Bby>ciszd6{--F8I^8?^59C_#P*{a6c9b?C)4J9cowFZbQR0 z&>cz*YQ1_x7?1*54)4GR^5|ikhutkDss#yUO4HP-!w;03!n5bIXGP1&GBv;sR5Db` zE2;2mqAn5cu7sLCbYAk!{er$u90(4#zdkJ2-mp?l4MfoG3xV-l?vI z*jSY{IwjMXVP7`7q_=Ez7oKRk3@J;9;aF%UycEH$ZfnHgD&--xHH2kyKu_*`ne-+k-+D+H z3z$0z@9?do%bfNJb8H&1!`bP5=Dls4hWg;aWkxF4B!@ah zfu+~R0~NHUHSy7WIsvcf50z+ z7_pSSdw8<~1>FWzl@CiyoQ?-seBuur-dE3IOgLsx=Sa@?vos3I#;@Fceok|!0Q+^G z27g}+F-hedI@J}`J*U8X2`>00=0E=vPzjz-Uhh``H>koYJS17&u?GTdv(A3-+6hFA zH^&5(>Q6VD1j98%BC0lDRCE=RZ`+3x($cHc{M~IpPMgzp&j0KKr}}tD^fRV4u6%ah zrL<_3oG3~$-^#Aot*&fh@2!A(U2vsT5hJ(K1HeU)u~HCARAB~kvrd0_J#$Ew9<&eV zT3q{00G}T52kqorRMIH7n>jnao~#9)zHnWV=2@Q27Tga3Xw4r zGNRQyKfIv8Jkk#~5=7MzkG2@(2b{O|SjWc*TDm9z)2G-{oRIz9(&hm?SO72}0#WMQ zTFoSQN<5c$0QR-tW;@Mua&?3RaIWlD{S%j%@{|S!Q1um{I|Fh)__czXGfmgZ>&*$D zDn5v|VL}c%cg2Eufzc}n4OWVCCFt2W|2^q z3~SmZjZ_@!=3l=OkJwzrs zn&K>sis=u-u4-w+IPBSb!_={7}a@OW@oUv)v7%W!dKW$nGr z_&hVLuvVF5;QNNl&AG)$%mHk!h<6q0oeY+QF{7n9j*4Yb+mHm&uIc`}tq(fT3JYxl zjT^{qiD@u3AVTY^e~y2sR5ZZ^`D`&cz|n0R*?}4YUfk?c*1Pc$=88bMHs$maFcZ2V z<70_o&$rVO%ix^PzI9nDA`yu<3GIL^=k=($70Uw2vB0S-T!A9TY1g!Kud`z_XgRb^ z02r?$`Ms_$GTSSNnQoj>R>a+OMRKRrco?|>Tu`4F^Lnmy2E=BUP|xT>ScaCXT|wY#Uy$|-?SHO@d#rt5gzRshAdp;u`=8g&j=T!ItnLnSrMtJDy!4sOpvn; zDse;Ac72i;rB4V3AW6Wy;lMzAQDhlRd1M(bi&0bIluh-wXFV`63cN zIPwp+)2_)wJ4V)*yk9qV#Y7wI-8eJ7EQijq*!2)&aEM9;jKj{7C(|`My*sAUcI0Hs zXknfeEoIXzyBchAJd&MIfo(3s89M4uhlUOG4kcC|jjX3y99CD+qHntBxJX-bPkcBX z8?$OnYbdF$w`ibqERAI1bPWtS$Uoyyt=O zI`NA^5{dElFH$Gr_rf+kyNAklJU^tUk8)3k$SBcme^TR9?6_@)yFfNS?G8TiO~FdF zY$9W#+%Q)x-gj1GHU?V`;YF$$5fBZhDLwaW%JQfze9pK@jXnF|btG+V!MLEw#(am?3oJ^d?}oOnpKdy7tzI6nR)?PR4~@kU@C!?rAq2uI$Q_zp z)fowRfxN^9oVpI_Jye%ik8vI*!Ny_}6*G60Chl(5QjwQfQX#F^1RApbH-g{i>vFd6 zML>WakUg*?{J>ym=+Eh)x`KF1xb!{t>Y zD{(nb@+g;cm}~uW(1&ZS4Ln*v+yIH}UQbc&G3UOj$CrV3LE|IpdN9%c!jC41??*SxA^Kaj_~szenu zvgLmPs!gNi3*|%m&C*zx%nzHGmE19dMbNCl5bqL;|5UUcNHZNY$T=Ca1QH_K5%C zOA*9z+`96#=^a~4!EXb4AD$ifn|?LDT*Gr}Jw0#kZ^!pjp$1a9OBv^Ol=IBDXtHLs zd&5?mnBlMtCtGih_UHyVB}ep1yZq2}S`5ZFEv6qdZY~&A4z9s*MwI z@<=23^LLHY(E!4m+Oj0PK^v4IdnoYIgj|U+5{;v#<|CQ9IT>G?M}o7TgZJzdsgK9g zVLj7!pG+z+;4VWE&aXN`GPshu)Vxy;5z==b@|s*k=dE)j+C1mg8E!8Z-;Si=h^ zF=H5QH|b}GejCEvkbhocslb7xTToUVU-OS|X3M~TBS&V%JV+h}5+r_XLP7N~;P4btsZnN`jvC-U;}PN}&i$ zPv6@LZjOX9c)q~#hlh51_P!ALKPgz$RbiD~rqb3d^KNB_3w^Kzo?XgRHrQEr751g);ymhcNkDv6dbgYf60 zu?r0)WvMy>NNL1%mcAP8@*4Fe{SZS}!ZnVI&E;{R1lH$ zJZNt@b_RaJ5YG6qnboj!Z1{-V1>9jUvzf3iDUF6{D2dQBJFp*=Dm_JpH!@y~2Z7TQb*kddX63FEz>gqbrS0SeE%2@zqpDYF@GrMmTBrYa5gZIUfO z{BucH&?L90&T8IXBZg6S{3l;J`Z!yX{2FIR7(U=N+zHJ*aIa@6I{l9A_n9r~!Ni z!Jb1XcCey9K{2fo6y5^B^qrwzJ=}{*dNA+`x8F^pBnYNg;Kq5KM72Z}-6wl|tRASxwd#Q4MT&^%j8*mVS=`Z;q*~vJ>qH@r* zkD*-%oYPyt*UoncOK1s*?YOov#D2HvL!ArAi^>Ydk(!0}2=<{L7TP$Sg56%Vcir~D zEJ0JZz(v=bVo%Efk}Pc*W8d3(-IvpzmbR~B-cH^*X+|GT-bF`Z=j8@n%SE7b+^@LT zPo*8rUzN$OkT_>HkVp5g88v1?8PlKuoC8h+U1=SUcT-rgVTL9kv4Qr9%tbV4ssz14 z3x=#mm(;>y8!iZwe?Ym+E6+CdYsAOuu6-8S(>rd0;0s4>tx44mpqNGNPQCLDSo-JQ z?e!SO5w`<45}xj7yHAl5J|zQ5icQuNDBtc{Pv)!@k9h$WD#nrJ)G~!=jTxGGP8${s958=jSw!_?gKS^ zP0VFn<4x;TS1kaxI$-D;Fx%x^02KzsT%g2JgN4~97r{(~=5W!p%5K43(>VV;&B8O! z*arJ_30wyehMlp``a6QraTgb3&^#$lar1KTvb=hFYg*l$gbBL~uw~(Yt6WLF!Iy<} zv`P`8ct56i;jkrjmA=eTn6wZSoP4GAI^}un(W}=SWtc#F*FXW}bbH0o4E_a~d)sqq zehZ=jjex*M`e+>y_m9nOI+*b0FZc!JxytYcO}tatWJ}0-8Q1wlVZ1!Aw$Rq*gO%so z*Acz8$VTyo?8`lm3ezdu!Sh1vJEBW%c*lkSEMb&}33So>lr;&V*)v4UNI`5XkQZvX zknRtj9s@*cLhxKMnwmMm0sNO#(7km@(hMz=GMA#AXRnStEBdbe2R@^`WFAWPOAYPO zy~Dn;@iG^d5(0$X5h3SR?Fk5pWl_~hPA4tNs_P5Z$4G^_uNlCr8F+_j?H7san^X6JVA7}ac6U-E#>m#dqZ#vk)oPK!nl zR!bXc=P91rlOa!q(1xAn*14U)UpB-faNkut3{ovOpir@O@(x`L^otZEG|0Zw6nxsg zv6Sf`vY$|wwdJls?efn72&gd;sfdZ|)ISXlke=M6dI@D`OMo1Oz9aAc0Q)bsG4HIY z^SUa@YGgiLM`KS5!SYj$|G{iDgYn1Ds_9^Kr<0o9SWd)|pQTPK%~zX*;S@(jfzZi@ zc^7eFOJUqkm!=7#@efHf+?`bx&`94l<+_pDVcC~iD9fdkV@i%N4F9a^_<(-2rTHa!J|&}fAtFrBAZh-1lD_nOtD zi(glK&okYaVwckfq~X%jOZ7;fhj!oywr?m&MblUj6G+8q-r3A6Alrjq=y{+I<4pyk~VuC7th4w^v@U$qw% zCX=a*`c<@q+`CHM@|~wU!WksOcP1Q;mctHwo#=4scN51)Z}2fmnr7nWO03)s?!Vkl zIIDsoN$EuQI5V*|2IdGdx?gX_|M8op)`+_8qD9c|QkS+vS`*JVTKaZxN&7rp5T^#E zZ=-jWi+KaL3<@*(6Gw)=f=fsfE^~X#F796LQ4JbUUpv^?$6BS&z>t&QWGVq&;t18b z^33m+?NVd-JU@hO{^foZkYOsZMqp7w#TlXDZNHm^FJfvBquECRVKib^dtD| ze@Vapy1BUd=14t#TxXC2qFTAFhwJhseKZP&L#sW`?2e{P#%z9e2r_inDK{x1 zO_N0W)@}Itt2Arl(-blVBB~Gi?;aE45Wt67y*Fny)~_lB-GV7h$N2s@I!3}MXJS1D zh4KR6a#gUc!-2C^DJ$V6qBc0Qd2(k^PECSkDqH1GR1wh16tf$k|Clk&0|~C=_&m9Q zoRA+Icb2bsIGuC$*j2|PQ;Sci;<0;+ObXcgvqg83e|=Hpj^fN13b$5<_cH*rdrseS zu0@=CJA6O+B17)T`R5Yqe-_6VC59G_F zR|{3p&R5A9>w6Q29|@}%pSPh{)n2czn-GDB**&L&B73r^(mpJ8c_3ZyCw@BVVvX{x zhAlp(AsimdY%n!K_8i7WmRjNJkPX{nm*D6X^CUOi;RSECl1=L^`U&;j@1l}s{8<>9 zf6MOT!r(MiDXu#Cm={h(?s*=T91U;u9Lj)4vr3UBq_C_H*s9%D^llV*%)sOHq(X1c z$A=iL0|@eYJYpt?q{?j^4q5a%kz}2WfyKlES_4}KRb1Be9qMuIt)yaMJ5h%BW*p5b z2x_49Am{_%Y4b$|xt|3iE`{Wa>dh?u)h_gC{!I`_TbgiR~ToSaYQ5H_RffHrePg2*x-f5vN^Y$Bz%$vWx zM1_H*#9%ZxBsWTU-%&K58MwwBfxk__2%7fC7?PJik>%(4rf;x+e?SlS1wCPE;FCR( zkcCr(~&K(v?UY5lrOL?CQQf$LP>c=-=`>%O>z)U&S**lKDp&^g-xZVlT>@4 z6sjXUvbApwnT?VlIFdW7ds=RsDe%t(YC(Pvl=pC?mUlOcIT9Q2whT9zUeu^IW;^sL z{d&=*1~bw5E6PVcrJ+J21Zm(n)=9hi0#-``E*fHCCJ?uhUe&n@`>M~wXBwmN4PZ*( zx}AM$2JG!tm{kIqdpfY9q8Ye#X|qUQLnJNlaaMafHHVPs!c}O9?X~^H15>8x+Fnd$ zm|b9wkL%d}9ZTHR79^n64^{5qFPZY3Y0NQ@!!Mau)FI_t>;?s`$)!8rLN)~8nx6R{ z@nWli_uUQ|U?clej-b;`!Fq+_AukMp9&s?+0X42Y4rM_$eWGdC+xu68mIz!KaNv=f zgbC1XDY4{}uoXp>Ueew)1eZ_@Ty$@Y%&{rnW&E6imRwd@JZPU=4hX9EMfxlv5Jon8V2I4*3tzRhN28+Y^B`T61bT#U5y%(lHNK>*XK>+C>NB#dc zx2JYFdOOkfCUjl%JC!H6_EfD$!8&fW#eFOn4L#Dlfd1EUx&-PR2n2f<`l`2~Oz!Z6me4To&Q5Yh9P1?{B9X>AN#_~8-CqF=ib zW4aHBl+e2!(?U>OjH_Ch)s^p7Ba%vr|pI(yTEqN5rnlY^{ zyLWnSmf`O72lF{G5Crezns|wg=mBVuMsT3}t*q7T=8O)$!14YO=2t`cPtUSd!DMKN zJSvJ&FB=;2K%Q*!e#Sj{BvxN5Pt*4UYms79?Z1~EfA%02f&pG?zc?FpteGTzSzqX7 zqy8|Lo|v5>m)R{m&!ipVP0@Ft$!zx-^iyS=u&;)Cb7K1+q^J992u*!~p`E1xt&RbY3dkv2lzgrXW_UFm>|M(o}ZE!pG zb<&Rg?VK(%y$lrUHDw}pH~^+CZ8{}z#Nn~fd+|gy?N_vQbGjRG-9XxuW2MF_8Yyf_ z(G?VTdR*|aw1RSE689;}17#X}+%P=o$)Wm+T5Gk^S+l060gvWbU%^a0U0OS@1>iCp zm+a$2b)NOB5ZDZc4W20oooi`dell@+3C?0p4*~&zkWMXK*8~h!PCzWp*L1Z zBmi&FlEttjD!(yRl_6k(`-y@(PDRN3FDORN4FkJ$8>uDK@Bt21AP|6X9Jp}23>7wT zeioSE?7o4(pAn{dw**~Q@@faGaBcqRLe{>S0PtOkX>!m7^H9Xb7QV^aGZ?^mtDRh~ zHkw;+I&B)81ym9=ThI)!!!{`KQX~?taC~ACT7F|9B7HlH@dTF(MzCyFb<=O3`0NxQ zyzsBkb42kA@I@gqv#e;7YC*5>qIE+sJH+^*H@WDy83=5V-HoPR8^j*3LCdbGO2Dx2 zwI}sOkE)G_`?o%3NIM>S*uX#bi7eMl`Bk!Mk*^CPB=O9^l(v|d4>xz4}l%i$a*(0G+R z$3j1Vhf^{z!@okGEj5m9fofD8G0De+sv4a(UOnGtc0oVHcemXFLU(R@T3U@j2rC3! zHvcUEc9VboFUbslrIM0)g!<;yG)6W^#q7RIY$uRZ?avvkb%mqhF!vSWHQ0|^jA8gH zVsn%{%{9#rd*{?KChRGf18pO3UT0E$Yez)89gwc{mdH&cZB+^$-zS-qaw1mn?j(y+ zcCS?Jq0`Rrs36*DkSOg;l?a8vR5pwiy*bnc7P1Q*@dj z!9xARwYz$zVlF6Wu;ffs_#;p%+E!GB>iek)il;9v$lK-&dXYw*w(U=@P?0YdCx=&Z zeTDPgnMb{_Dk;3^-RcUGB!F1i8DiifU5IBZCFnz%uAmwcNiw-1-Qt&glxF$2#& zC3nY14oWwYmv1#ahN}Kwh@+!i4lf-&DtubTCI00FA-MtSGJkiiy}m8$c(#lUs~N8* zoN&}tcY<3wBI8x9367pW<%cszU|voFLxyJa9OLS2OX94>G^C+^g*CmN1}AoLT05xI zo+oVi*`qaM{?rPR$|ux3n9ThzDS3Jrh==UKs3YjN=`9x9QgTeDFDJiJyDZ?#FPaZ- zAvXtNr_@n^_9)G4 zrrid!)0uZ#Q8j!I&yT)PKWf#dDnIM7SH*X-tZ_cg2Z!%fk++40t8dGCi`YGnS?1y` z`^1qem-1;F>E!(0O&upt3u5WWHHsr3y28TBOg5)Tfr*fe=8!vgzNZ)>C)9y468`B7 z`?IDUKCCSRJC^eLb|q=2xRjsigY_E>BYP^N$x&fP|74Hukdr0R!mu{C6=nej)>| z*#DJGHj|>sBPMBPpkhor628u^>z-o z1TVr2sw%CTsdM1TsIe!;6aiR2lk#KP19{4)qzILw`X%2&v{1TfzWNnrJxT0+uDMzV8KNQWPf1-zX z$Yea!uyJQ0gh?e@qT-k=zt6%!$P&m7II1HFSmj$LG`N3O;gF*o{6%uA&{-t?1NWS9 zB{#xpEGXR;=d^prIre?lsqAB{&P8HOQE~YhS8@{>)avi)Lktd1Ncuf3 zIP1fVfR}h6@ztl3x{*r#*>;!9XwUN8KNYpx<8i+?YV-qQHgwBR_a+r*0MpdU4RhxG z;`B=;`Yp*DDMJcfSe4whd3L8>h>@%VVdg>$o%D`?J_ZiMc9u`Rc`vlArf)~O`7@Hx zx6Dci>+jRcw&ob34f~)YyVVjA&9t3PC&Xohaj;v=JV52{2KiGjYA!@_bVWQQtp75I z5Ki0@za>e&rW_X zG-b*uKZ43_gx)aJxQ3pZ66sTI)zg)uhZmY#9`rVTmN|nFBm&y9&QFa>id;(B60K@s z0Rggsn>7N3azTm$#V;Opl4TWyYGwvcZ${Ks%>#6E7+GSRxlFs!MXp_z(4tE$sy%dr zV!rnoS9ZsLO3T?uh^ac8BuC%=Q=Kx`|Lxf-)U+)9Sj9r9jcumt)o_J38{LduEMbV7 zt^F!FvrU}C;*=P#MARCZ>uca)vkZBdiodz=2Rb!UP&iCcHyZThmPv+1X_$S83$(;+ z^TVe-K1X&LFLgdlfCWC!C68(S^$Ps3Hr?1BF%ggXYb@N?n(}*28Qz3-u7gUIzQhGl zOZql)US?JwO?5XWiS>}59;~OgC-^M>ZhdUrFr76&oE7-7nZu61?yW9-l=mn*lUO}> z!xL)FWTqR(ZZ$y%;zJKln#Q1@V%JPYge)SyH?bcH?0{k5xwQh=!S)Bh_#@Ehhm=+! z?`-I5Tjb=lfS@zp*e3tt=U%mLz;0~eF7eeS$iE=ixetj7@z*W ztxdk*=%9rhQn^}*!PuDM1`a8mno&CAPOOZ01knJXyfn6HBX?8=u*_iyF1FyN&6iHX zlK7+4odSGo#Zif|lF^xr0!@-DMBw{Ncqn%2domtheYGHWGIL(HqKIG;Q}LQwm$%&3VZ*JsXn#f3DO!>i7B zy&LRR8$MyhP}iC+Ul-lt9ajWjU&+tLB?{w~Id)|<4u&lewPP;Lol+Dm)r9*~$OylC zlMjVTs#V)r~bw z*G;(*o!Mf)TXIU4`?R^Zu#R?YtHK%_&wXUo8=F}4renpbR8Hic!q8E&D?XEP6l{09 zox`Q|BJ?(kD_M8gJ~z9SSwKLzrP0uWDTA;Edm_F}h!=vuryql^n>Q+bKu`{7SR`$s z{+2B*D#7GzY_yl~k0Qwx(`MWDwp03WHf9<@E;WclyBB@^4TZ$tg0Arg*DKOnJ7i zNOOy`BQPvUyG;lxCpLY{O!6juSzME$+w92;QipRlz7)!`v&C&!9q*bM`G59LwCRg~ zMlPy|BzC=tJ*#rJ)!2xeiSD>*>R-x=mR}RV$(rR>w1KhRR7wW2$%hL;Sz|H^g)b=y=oi zJg{`Rb2b--?16UxYo<3F#<3FMM{_BeCV~l?>TW2&t;Jy;@L74QrS!gjrrDdo>QN?w zxX3}M;c6C{c>CHu*+sL6#quOR*h>@uC$YZZqg2cj1_-HMg#sT~%ssCd9-;Z#cEUrZ z!wnBGoM;y*vIM!KtSsGzuyfJ=aaMB!aT8A{-qylgHaK=@J4OiaG>e;gmaaPTYn3pr!r#n58Le#tA0=jS;JBS{p*!Q}7k31v#4 z;cEbdvjb^d?AkK~v|VEkFMn{E;t!XNf6@iQlp3f<8IdS3aVF4ZV%5pY+8b2*l_vQ| z&sGQYlS4}!S!1le{V@;y%8qzwB!;3C=D``hv{u?6=mXP&Kl~;Y~pknv}cywu`Ut-=r3QYO;L-VTmWtJX^IVnA9_3+LgpuB zMp4eU`Iddms(l3Nlj+mvN04mLHc1G&Rt4{9J5(F~qmV5Huq8n1o!M}DyNM5*`=AY)3TfM~a%=;O@nslMpk;R^Z?nL8mCZTH%%qGrc<_!j#DTHEg^~<@Dt14!7DmK8*DvhAA(fk z$}6qrRiNjO0A_hUL7BQSf&GS% zHvP#+T$l#)zqoT~Fm1P`ndf5^OQJLernfM5uf-yXu5l}}B)?EZ2g z6Nz3omRGe|NJ@y17oB694)jPBT>}0XYE>0j@I<=MOs@t+3)k-@VZ(|tDuMf+7sL`; z{i`ich#C|P8aTf?{zZ+H+?kDOD)@1*D6LvsN_8(EPjy z^SAGbY@s0S@R{0Cd?{(zPHQn%W`SW#m2(wafDCbMz*}EGPu&N8@PJv%Va+$83T!5T*zoTf!)1*3m)|k7!DCQ-ok^-X++Eu-s_@t z#zUJ-Dw&u3jmo~ILOYhK+JqV1{0)hGLVbc4`$3^rKaY5owRHqChM`{2e}=>R(viu5 zShZWJ0j6Z%+-N63P$ynhr?Tzy`1zPpJ#MIZ?rmSxaI;=hnlw&#td3=TCl}ey&YLi< zp1aD1(vZEZ`A)*e>Qj*S$Kr;2}g=2jjK0aYe8!9BQyOzg6i4Gi)GSu(n=;~bJ;r^tHIH6J@xM^`pEe6^T%VjqUp_?V?+`QCD zkJbjfmnBWfL`a(|a5GnpBbl;zz(5CJfn->-F#naVH(>1SN`_c=2@ZdM++u?aXxLsTcQsAcjj zxKsSU4hw-b!VkWUvE%a+F)EXw9dn+LoXN8g0U^NqU@;uBmNDr6bc3rD=&3#$Ru-k* z^8A*UetP$kG!d20en!kdp-*mQ>~^k@hfDC_lUbZ7NbTheghxQk9#%&_z8mOMUYx!H zaU!+~mw$OVZGJ%?LfYiBWj-Ouy<$;I;sMiBYu(JE_17uZ*|a)--DAC&x?(*@6fr9k z)2jKIbdsmd(3NxTs)6`NE1<}_V2S=(W~*EL^f?!UL=C<%ZeTFm#5F2levvQIvg zT}e2E{lF2aQt4W2f^BJJ+%9`R6b?*7(C!Pearwp@3pFL9W!`O^BhEXDHlmEtFeSqo zJFB7^qv*H{B(=~Zi5@q^q*8kelyIP~u3T4-O8of`NJPWG2AzN*h^=-p{r-{Xb)!Z} zw+ddI%M8#^V>xrrK((dKtjsaAo7nQENaaF2;)7NR5R#oJg|xy9ia%%NN29C|(|r9) z^pEjsWgmXzNd_4ynhAqL(X2VQ!TO#nfdV!V8#=%W5J<>Z?kZYkY75QT2j!Ix5++nY z8QRxgxX1vZ-?{tr*l^NX{+vJ7bzL486r8VLgF`1O_poliwh4~(UCCC0I7C1Sw%K>$ zgMHNv*N#yA9{HYj3;4eWfSdLbY8~G-O~+uOU~;*DRA9^cd35kboY>$?9}* zE6%eK1jk=!Goq$`6rr`>lMqM|g(Oo#4qgNwt-MLTLxk>h44)1E8qcl1m!eu8dF%{O zQ;C{#<{X7xF4vMV-gs{8(zUSQEn6-Tp=WVw3<<3tXNA~PQZ>K{!Ejo8e-E|yp`GWb z!Tal))1CaRt%6pAR6+}5^d-<*QeMYwc_^;#cwf{v5#R^co_DkPAw)43ABE==pBq*2 z;E?JjnRDmw-#sJkCRT?9;R%T~MH{^v=dqSY??7-BACQxFCA>b+Q6>QpiUWLhh(eXU|08mx2i?z-(*hWyrUimwv zv_Z_Nx>!YOqe?-Q<+P5Sb|^^y>G?KKUfxo^#t){TMkE(^^(7FblcBkScG|vAZInLm zKm;E&_~JH|hcaOaSW3wQ1%f1;`YWA^lP}Rz@75^pi{)SoY89VwfElsx@`rX@V8u_4 zyTe-5UQki)Hnec-jfD@Y3;CBE!LmPQv{>+5Eyn#no#LUNkKjwc8iv5$=mlG%k(_Wi zTl0OX=^UxXz8L{)^!UXZE_mxdH(oFb(ygN?7%Yt5WIxH&@K8?eNhJu9FiCdBPsR_u zwnya7Vr>%EEr(yTrKs+4;aM`=`jA%aj?nOu^}djZ^fS}Bef>@=pE2%iW1s$pCuD9G zg`a`q=J7k$EZ7mdkuT!Y%XXL zsvrlRMD}~jEz{w9U&T(aMT+XWFJ8?F#oH=FnfhVr{f{q@NP~_r*G1iDdJDvWSGupU z)78_0QwjWS=brT_6MU!m*rmD#WT|ycDvYv>fT~#Lf6768kv^jb#c6&sUYNKe6%jiZesdALiJF^swz|pBR%jbo<~Jh7FSYg;*q+mo z${kJEfcUQ0W!Ty)ayR5*Q-BK-N4kj=M)qx^>#QSdDL~eb-f`%BX`psnVYWd8Frh>I z#Rl_=ifBR;hx_Syear)q^qW5mmJ)9kH}Y1gmUX8u6ZaW`Q8IPYdv^*Y$;1w4N?NwQ z+1s7j#0lI73@S8{Y`+-RsO^C?4&;MOHk5?o z+9x%|#Yaw#Caa=WyXn{<|mNE}Pg664_$n=V}oe}hvW`WHXgw);X^@+SRCrp{Nf<=ybwEPwhshVi?^tWv7EJNF;XCPNqt?3s1HLUQ za9ZGWL0|#oL;CE#-o7|HLy0aE(mpQDA>`^2QdCo!B*dG#r*fu<3yCF%FmAt)Z`3iC}sH}1;+=Wwrd}uG-*#J z=UN>hppsJ~m>o%`T*U&6#&;8K%Mm87a2IZQ5>e@B$Y+;Vn+8QgZ>fV4VSLlL!&IP??1 z)oI__<8=C4AG%< zgC{Lb>v6cv+*E_o8#HLCGzGa(9n#5XVvM-!kOTH;*iwhtM6e00yVW~V4Lf|a>7B&z zmm`E~zA??b#d6LPCY-q8)*x`vCnL1JR`HvoEI^-=K;E5y?(3GP^noEq=~V?Wn`ubN zrgzCBbVSQg>q`tmH@tmOrtxfr3-x&tycGW-#%}sf!TK;!_HDtE#{~AuoxDw-I8c)k zxEVRnwksW$xE@|{+c<;2!ab+sC7E3v%pZ9AMdrbVdA=Hr6JP4o7HZ~AB)_=CGRPAI>zG%+48!He$r~{t>HgVHZD)ppHCC}w zIZZc5^WM^}8c9(yG^NDnU2+tj?n*_5%1Ap*ysT>0>t*$ND&^P&sXLsqQ;Uj~&O56@ zs^WsdYhP-EWcw9PEp-=99=RCIcRJWpV!Dr#m8@R^sHjYR5_&*W3HvuUALD54{>L5J zqz#$!r2hLD;eor0nT=6YYiE#4x$ELAAed)E3PC z!#nG+HWT?#)~M6Qd1a+`Twu$NjE4?@)Wv~ozBq28i~67E*Ks9q@!3;^SR+{;4t4>& zHWEZUFkZz_)6g~dgF`g+CzD(Q;gbpWJf;<${xvAMH0#WCMp~Y&E^h@ctS0&q?tqAS zlNE`b;)QN#w%UdgF$FV^n2ib2w_AQ-<%K)E@H8q?nYk&#La@=?tc#ISJvjR{)DrhO zQRk1APxD5~r5a#pY*49I&yeS~aibCPmpjGj+#)JsZf>+sa}|9*Q(;ZMf&p=fNQ*0* zHl_3gnOWTqxJag%pNoR5k`__H18xu>d4LcTH>IGVRvUK|? z=EVB!rd(IyDrMsPML^I`*2x0^Bd&~BvG=2_5A7SWnHXPwB9M?3(jtpsJ(BO*2(q@m z?yITvF*ZQO1ahG5EeYq%9`uv>(aAB-{=YuW+{De#6jOa6Pkfdq&*00A{uu|l5LQ$K zJ3=ACDID^bs5iI_cO^9m7)=C3lr2?$fuR7?E2!H_>jp#O3%}4if~5g4dxC?WIpo3p z!A61;3+udsilqVgT8XRpQC9w0YeGIXRE3ndn&eEEfA?B?mMTVDyKPcdCh9KOiTVTt zG>qiCe;_^vY_)IOz}0>1p7N&%+07*ar2S7zn`3wF%+RBi>7lf?z(#6yOVa8i zFocJ5`?-hHq`(vo`n=>)l>YeR((WHhi`_(yApU%5o+m}Ym-tnfbQ(+sqD9Sq192JL z0O^{U<-4ew%D9~8j#dD|)Z90v!2^_yN=7#Txk~C#3zKLOF||`BJk07d%MKC6OY@M3 zF{qB|Q)-n(pW;FQRlDbKftCj7Kkcje4?x*2O zA50U=0#um1iADt7#s;Yf4^vfayEh%*Ji3Js zgVuhI2J?n^2Y>~TOficH=e+MW_?{irT)heB8es^7ym@JhQAfqn?o7Fu8GkZsJVQK`pY_ z#fMB+0#v&LL?KJ=tq=x6dov1f?Ho|LWhWm_^#nqnCENdAmEFh~-Kop~0ouI3U1reL zl)X8G@^aXhKolFs{D|b#+T|-|qnLC3*$=@+*0=uuTf0!JD1Cm_<-%KJ;z7m?OGy}3-w+$;mRM0@{c33hbAUta@*&1`bX`Fn;wNxCP-|0a&PE;F8 z=aAor0#v(Q^BqIp=-?ts3^kQaCC%jsL9Y&Ls2^XQmnKyYy zx+nn#tVJ0+A^riPV2MX&H*PR7grlcT%mzcsVJ88&GkXbq@`1oh2*7Y8Y+6Y6VCH89 zV=Gyr1JA%vT4tvf-pB1ueAb4&q=@3N1LUTc&YtxJ=R~>8Wl`9|rv=&>o!1<7wRkTL zjuWUQv3aKC_$L9I%qm33xJK5!h|aeV*bj?jmlaI1@}Dvw#k?Jj@igB!n#Q?xhq{7v z{7&oklp1E5PYmW9{ULxCwmB_Ig;7rwClKhWIFiTGYg*JUT=9i5mxj&x&5I{y(sb~a zcry9@^5j1(&jph>$V{ybW~xagu|Z0DJ;oS=So1Ip*9?kpwM!~va1W38?p9wB7p?Bc z7ovL1aU3zKR3oII4Ff`aP?RxgnAok~d-7B5$)q_E}dl6+T`YbAA{T@ZrM zL|h82`aj|0h&jhfWu96@wqU-@L-raj2?WiM&et-`r?h z2L{k;ZvscylU5Qu(UTuV9+4{YZL5&0vG95_N&(6mHT~>r$Vg`{k&PnecAKf8iu*2un9Eg^ZTQElJG=5jZf}LRe*NE zmFxqJ#EQFb!|X_|6|&wXziad+tHX_A45?Hpg?3cUZdx+|5p zycqdH$w6~#+*|@$Fcl-pNxP1IGrlsB*ef^}y*hx}eRKz7gC7w)J*@(BfciI!nC}0> zkoM|?3_Q!_D3xMzfEW|~DE5+*;^-Z#gCoyv9h|cTeW%Q-pb~uA+sWUKz(Nk4|Jgg} zpF^~$mJear+CHfyU2S(u?m=gR=LFK~6|_$OdQXkN+j`Df-*A`f(j^hSXwO4@?{N8z zyIH#UQP?`Gb0j6XU$7)?+mNkIbBYfKFe|_;#pD3hE3y_gN8orUoak8RHGM-rn?WDGpp< z&q4N13n@7}P^3_Q?XNq+WLt=$=J)y#sj1(Vzz#L*|q=+v?P*&D;6X z6j3*y6Kog2E6J!-FK?s!uF)D;o3EmdRy=~f34`#o=m^@MOgHkZ4d0W;U-2ZQD2DE4 z2d#&a*B-9iW$7u5hNTYZV9XHt@5i>rp`Ywwqt(Pwn^1n7V>{IfgA)H%bK*~RUv$2H0a7>=D`GV{3qMVjfJ_vi%`f=BA zlAZqp9)La}{HO>dRymHWEXx)r*$-fulw_LIz@^p`KBfD^BK>kB+0)~QI$0$4qpcmU z*y=#!42qjn3EV|Iim$Uleu#`c!$7?w7C7t#y`7H@=1{bvhxz8(Dvzuy+G$e9M>WMjAmYUH#fengK`|69h6}Si&FNIE}^(vjiaRiUVOP zP<_^QvfP@%XTxUB&V1?o9~Zpk&E6_ci)#W79JM}PDfS~!QY^O)jtLut;!$|Q5InEf ztUa*^@pe(Wd$_?ydcCf)yMixim2ax&n`}%vWvaZ6$y;_u8b-V5e71}IC5c?${cnju z!>EYp&b7VSKTZ-WVOk>=g?lu7;8ggIaM<-%tUj(Y1eUU-92*}+WBGhn+LEJV^Om2!=<&bSDqR}>$Ck-t zO`m7RcBC-bwlE#GWt)VY3;S)J($;5FAi;&KLUYWamGL8HLo5hr^f9^b_pm)~ha5D>%bIkChg1DdbpVjB{}yj{r- zJX{4pWrfY-yG|sFK=`eAI;r+S*0d=ECu=BAt@_5D26e$pCeNQPzanD&@=d6}sL%zO zXA@rGR2)BX?!+f0g6*d=p~zZLEe4iJ6WIGzz}dAK-eYaUJB=1YXH#)dM+*bBCl;Rp z1m`OOuQ|aJU$Yip&OOaY09@NgzbY7s13%3EeZ1V_*sE0WF7!|xLTPF;j8YmTC>7;A zR%0ph%mR@JrHFAO9jAX+w0>9CRa}I!n2US2;2HNhMjV&E&}NL4zWA!;U)ApZ;y*TG0{V#t9}sf z<;>tVZH%tu^xP$i^oooDbKMu{;lKozF-YR)TwP3k+Osm~6(6uSjcbdWoXqaBmcC=Z za|C%|;|s$QjBh^@l;T+c8rb5U*@kJfzdJ?ioBZNASl1f~N3h2e0AXlKE~U~IIReys zW4!mo6X1A;!~P0PqWwVlgDirp0fDsm^~FUD0dHal80o?8=hxi*$bu`mwG-ZS@Yk=Y z+jNpuf_Jf{8_Kn?+twM+sV*BMw;T95uZXv3O;$MT2T6Pl3{>L8 z=t|b)bd}>`uGyWBGc}w`Rv6hStPL;#9KgW}`9#KsOLPpn1Dg{x;g2a{mm}cY6Qm~> zjessi^06EQLVt$vwIU9$Y=e!?^PmVIpl2I>%5=x;%D?NFu{bd_;-SrnX7sB-C;0^$ z08i((xJ*(;`l>mGQQ`k-*+HR(2ff1Yz##h|59DCPbgPkNo$bTh*0J{Je~b5ail)qy zgvt*eM*A(T8I%*JP`q=cxlnLZV2fw(EjNVx;5#D{DdtfQ>zDj*_#Tc|82c%pv7!2( z%n$)Zw#(H}i)*OAV&}@d$1lF1y^wOrDCz1ZQ-KH7(xE}`rcO>bW-zOZ7+IKDpQXH2 z@YvJ9?@Y7MaFqApm3%tOj7HN`wl`n_F`q0-OG`~luns%ivB6e zRj7+ap}Gl!_P3;<9R%&Hx2nWl*|UjL*971gt(J+Sf^8>4RN})GPsLm~p8lP6S(&y* z@n`U-O;qbJu-Rfh)GkyL^w5iqSu8|HVLUU9`Qq)T@2qRPmxT{GvNy7`2Q{PH)}9j) z#t$ZSI0Ho>o-CJM5LU>G6uUiGIT6~^K(1pi{9aI#29|b;!;Rq%O%kyjiV+dd@Z#RT zQ{eN5!nr=h_S>ex_SuT?!WxFQtGZ4G+@m_SgX~JyN;9FK=npq0BZS!-UE0aiOm}yQ z$KpSB2#B;|M!dgIlrndZ_T$VrH1p3(6^a?<2BgirnadB;l!x#nb5{A2cz@SHdh-Su z&Itrm-KOrq-nsq<8?~ICXw1i!{?q(31vY&Z@iW#t3@YNVu0mJ)(-@R|1$SV8Oa}^; z%k-~MuU(_}8ctuK4SA^7L7ba&1p+rM=oIq*a}Y+q!QU0-d?(@H#T%;(KVs7chGDYI5CW* z5?;?&Vgp2Ow^9F=?E!W4gavN>&;9w;y?m8Q*+Zf-H(3!$=Mys^o7LF0r$)C-vf71_ zQ(BY2N;e8Cu>!-5@`+zqplqdAXDeM>88+MZdfySWI^D568U|a*b^EZ`<6HrG%`OqK z(+p03r+TuPl9L0{La$4-@aoDE#nQ|WS?mY@Ff;j(J-GZHPx@)oN%q(DTpclrOcJ&X z{a5roVq_+QGgLLQIm0V#(=a5X%Vb#d*ZHJC+PU7Sn)YwyU;??$zKFE0qd@$X8NvsL z@Y^rr=rL}ue+NjMkN?*rU7Qw8bdfp!F?|Px3r}UDY!u1LRgMnT~M)o)JJ{XWI@HRoS2o-UvzvE2pBi@I3D_r57!> zglzP{nk@!oNR?HTbO67O&q?SzlHa=D3}wz#Gb9CTOY6l>lFK|_CC1E(T*cPUppTtV zFRn~C{-Qwnk7o3@rc<<-Z}1isEd8j#skMJI7OlMc3I>Gt-!pxe=o;S@Z_OU6jE3|D zlcE_49vOiB1hqM-X4@OvMs*PpWayH*78aG6Ue#tGi9=$sC2`vb`4F2R&o&IG?0FyFemno^t2Tt*p~edAyGygVvB$n~6=(lubE?)!>ht zbN~65!3?Qg8xN$ALz&Qr5D*engv_2WP9Kj}UM}tN+xq2t{h9;j=CGM_%ZampZ59{Pe6^}aWdM- zx*g-Dxvv$z)}2Bbf6!7bkVo-z3iqgxm4cPvjq34=ow9%N3tHbw9bf8vdgMC59oSdk zad(C%)skmNP8|?9Y4kDq4?iKiux~5c9J;3DwC!U0R+g-(3)1*S{nHZOO?4rQ1wzD_ zFl1*h$*6HvG8aOcRW)fPPD||J!aOO$azfGjD^MFq5e!N8zPa5gJCq0lgn~vt!<%Px zxuR1$t7HeCilj`Sq-&Pb<>w2pW=E17UL(vqnf&TRiVh(uK+Lx^TITMlT{imGopvkO zpY`t?U=2_b&Y4(-F`d#-7S6`JJ@$x&nsC%@aa_s_kY6+jCJ`H}?RaE) z5HeZy2a>=N*l-J*iJX%L)Kja4pkwy{@vOn3Q3R!JJ+U!9Bkv;%=MWK_2J%%%ZNDkEipgE54JnGpu~`xa%)Pd^@*yOS2@1>${9wTad$~AI%i9Fd)2hT#|^$ z%-(v;rmck=ca*mhho}Ya_mM|H=Uro?|4;L9`wBo)a#a~^u(zQJ1hNL@dW3#*Q-;Nw z>sRx}%~K8$gjM{C?{3?TTpl1*vhBaIzr@}u;;D)zfUcGIW_az z#5N>&NsY8AVMXmWv_zQ1P7~>QAYjM(szP}0h5O_D>R@0yb!P9K|Ovne%T*dG5v8*PIe=q1<~ zMPlIPycWy8h&h`j=hkIU(SqRo|C1A1lj|fu>Eos!fuEH&_=-ni(sp`^8$O+Ll|F@h zLBi$@cjXB#fYQ!ghr<-*fJa-NUOl#kDodB#E_=Z?K@GwnDT0+w-jN@})uKKML3yoB z9i?u-h1O5=S)Uih21(_Ilv?FT$gQR573lDbbhRjy^|64eNMoL?G9@c&dbV66uzL-MG?xm;BK}osD*(_^ zYi@)D1>oq@^J!-rwDsH}OtTP?w;DUZ?q3hMf97}p$I>MBiW2hoC?h09cq?Jdj6co% z@;e-wRx=*#sxx-({w?WM?d|7xXXlmil9E{8ZS!CJn}MJGkt(SUCk5`-n~T+HR}6$sfYZ3h-!Bd)`d;ZoDxM zIRQZuSM@WLxi}l_k_3>qCJ9fb0&VRiM}SFajl9obRg%a{ zzOZ&ht{9!Pim~oNP-bmsNnz=3@|;Ywl9C{TQZO~9mX%8^0#rrn;3iCbkj4CLeo5ep z-l^_(#gsHx6sr3ixV)TJ|GVI~k(5Y{g5+dXoR6bk4+(1r5VuLsb6wDvz4A^{k0x}0 ze6k3l7heor1wy!hwXA8<+X>DfkoiE3ZBufY2gijm zD1?B=VgR_g0M;1^bsuDziN%=|JCF4VsY1GQUc|AiW}B7ih=R}FY>6oV2SO)cP-{}06VH}%6Tdx!JRLFJs;p$$7y=I8WQ*=%Ys54W+A{ByBQ6EXu;@ ziXUTWuWW-r;W$PX&zc{_l7dW|J;#cLbS&zi$l0G}4vh50;(Hb__0!laz!C9smC^2n~Ty5jSnOdws-W>YF9-u?edn#qd0gcTmbomwW zK26am_hOYh8(^ATn_sg%)>rPF>S&a{+2?{%8%m?7hZQ!}W}F$G961a-qXHk-N<^j8 zcS9m!VL@zomH0&=OVa@9^a5!2%P{zQs)mo8uI6KN+{I`iJ6E%d_@KzgeZOQZBY2z^ zdhE9d4CcHLgV`c`(g@*jfFj!s5KbTsDH!#w$W%|D{c@VP)GpGWJ9zu4^%qzis$W1N zIqG)?mS`d#)IQATdX81;?V_c3RoQW%9!nM0frkUht1J=dC}HX#|I7B=xG#KBsR$MBH6iLQo`@9tj>WD z1PdKNV6;wniuAbvmmJ1ibQQmvb~DTtuh%0RlNMTZ`x9{p5Xr`VUeDA(CC6}g8MFbE z$4=yU>o&h;rQBrWo4UQG-U(a?!0)FctyAkC1YlpF9rkbU%lSgFjRNE2eK4cOf%gIQ z0LpS&kyNyxym|`IDbIK`&5U?+E3H8h`Vd+hRhb;MCu}Dlfu7NHe|<8sC4(<48g~ry z^~2iXyCFaPWQSm}2vPi;rIibr@cI~g;TgECzRs+nH2jm_u%XcR48=P+I`E!BdCDv( zebQyoq;x$o2i%bY63mW%r;-{JzvYeV6t{`?+USJs>-7vHEG!r>RNkeV!QZo2tKEdL z3rX8P>9mI1(#*{KaIEwQqS*ZQR66UDXUCl+M+YRf~;I~~$@G7#Haojx|s};q&sJFb-Sfp_zgSy3kkWYfWZ}7|bY$4F9r(uU<@65mUN_DCu_YbSv2MR9Mf3#mUUpCbf9axO<)PkfL`hXHvc23Tt?!C;I(3 zQmw!I7uPq{v%gno1zX_)Bi9v5w;~ZgF$ym=rFJCp`kpdB+xxxeBy9|8z6H5Av?S6W zr>giw@<cV;Ute~oq$7ojbhOyb`QY=Ur&FWS+OB;e}_Ceyu-YW z`UmWPepW+=18?Uv%l$G2q8e6nRhsE?-f#Q&gG)l~QQ=>f_T1FhBuUljA(6_5(-Hei zNM3Tgd3emKX;42{>i&0yF&ZLPD9vw=@UR%du;+)ad`_|92Iz?J+Wk+iQg<)Af}`~O z#Y7tB`8j0VlVq1-N3`k#RvrzLq8jq(C&piM4&w&x0g+)x9srQIfZig!XYt-EU-Z0W zcef9d=6vU!G)r>)d=qz1zi&S`7eu9k7vjowVg15N&$G7ZYGp3a3pNLzU2^zgO9`4I ziU)4^`nx{j%&lK!nZQu^mb#L3& z6SGZmbF;S8_hxRPPr~!>i1o)thi4u+-9h;SGGpv#{s=8<3QV1ez2swJ)wc|&?-?}m zAdY)W0e~IpC@Q*>pNR>@FkpNX)oyYt_aT~w56T%sWmXaQF2@G)G*eWzU=h|e)QFHa z4J$)1m?}lFqB*J@3B)%DzI7rN?dO^oM&76j6WE2K6aLvGS^p~f5Zyq^7tWY6OF_jZ zans(kyKauEJFei9!g0YND-;< zIU;`;_vKHH9)x$fHR>|6{{)K3vjH0_0pmw}I7v0}gGFr=eb`{nyZj2s!VydD-hyWO zm74XZQrRU+0A&wBhXl+=MGkne9wmIEyh+4Y({V0Z)%P%%!m{k9nq%oH7HmN7)40r? z|3lD1`3#FX(&Jn7e9)J+>u6F5cKE_RzS|8!v(ZHk(|~<=@L#rJ#S}EZ6pmyGig=NyW6pdOkS>a{Z3v&}cbY`R z`!85q30SHfXdw6jiS76qXJA*D*Ja7%WS2!R1Gs)G^!7Szb?>%ptm$xVSD}O!-n*syO ziwDUsxzXeag&w>#-uEt9KCv28${3X%`fz+4h+|E<$fvcq|+nz>f7o>WLN!Oiq;j0AN$A%X#g`TUmwk8&KFb1+%|rBT)m>@Dqv8+wZl2IBuK; zo!R(~ebKbuJo`*f(7QPtdTc6@5BT%@C4i||(K%o%nbN6;is2t+B=EFld0fwmKPS{C zUaYt9DyI|%>oy`OCAD4JGo^C6=l}s&33-3;os%A}Bx$_|AthHa_6!~tnao29@UTj` z$IJv|@eu-;KGs55deoThTt9oH?Yf^a3>qxI8-tD_7+q7itb)*929sCmH~TnMR5WEy z%{Fzgxu>Eydw?LV3GgPX4ANRr?zSd)$!^hX|GsJwmTZ)P+Xj^0%Wr4An4A_yO0|WQ z7B-v+gdnr_FWnQNEDFu!fmU1R;{cv;>-!J~wv0Sk0ESty5+uWzu{8oR$@9EzP;(~i z%Y}o)5;5P7L*{^uE%bAYRrH1;%*v~sEf?yVY~WVWz+ovimuz^+2EEY`;hvIZb{+M6 zo^^5+d%Jn)j~c5c6D%?6jtgXQbBElzkPiT}sI%5lztnMEwAJ*W9A zZHn^>3|~sAegThIWsBMDQfbex^O2EFdH$&yjPQRYOdrqQj1wmJQ1}sKX|kkJo1HMO zJ{IFqQMEU(eu%zP-2f)CKLn{!+95A5%1Yi)o|JZQQr;T8!a!+B)Sz^t$0N zNs(Gxri|6;3HcDLy$KBbQn=`C|2zyc7%Qvc5`T$zQfv=wb+BJxWTdAver~vk1|)tT z#!k!LZy;4*xGD9Ltc>BZp~A>SAi}|&ko%6)Z-6E73A$L)0pyU%l0;>?p)DtjQr)3u zLB(+BnHGO}$$3xc*Yl!k!?){rm@nRhlZZ6Yi9G_P!W6ueeiJv39l~7u*E^8{!g9SB zF>#eaA#+&))H)s66mL}|d3t!sdLy$;Az@J#N#=h34PDFuMU-}Ujg~zV)7#qPC!?9L z&+x;v6r-xKc?=$a#*%zQO%pr=8*{ERTjXKH%$1Yydv~(Ac!b)=DH#GNs)IGrR z$*d!h;@BgjMea|n!jf~jX}GBodguvLA6A9Y0?Yz7H=ZcEK1ooHFGv^m6W?64DeowQ zWiE!l_dM5CpBM|`M>!*20UC+y{VhO~F$sZZJaKu~Ry4H-MdGnSf@uoLw$+k?f!_pXvR zk6{ONmq142C6=S2K#U;zxW`LAcGP=?CNBx>K<0Rs-U$dcZdHhA{YvJDlZcIBe$P2l zB!rtSA#`h;%fQoxWel)_z-}f$K1%V{zCp+Gu81=H{MTS*uf(XjxNn9ZA@Y#=rEdU@ zl@8jVK(e~K5ceJpB1TI!lOkC)w=FL~?YD$cYl!kth@jV|9z(Q2Wt8I;_~AH~J87wT zk~77&ZEET$@HKv*Sm!~s*NS!F4k-ke1`89~m8ssDETpw~M#_uJKho&f#1yj#*npj7 z{ST?EWJci!IZ88HokQ;tyL)MEPl-0o;@ZrZylcxWj6eNX$3L;k4)b+CvL z`*Ce9|cXAA&NzYX46ir zMEDM^#P-kc8V2XIIwvASFOxEfjYHThYi%@X_=3Nbo8}xl*Y%(1-F-f!u6>Yf0NMIb zZCHI)oazvJ=%L@LZpAI@&7oG#GSp}6m)=4h0u|8wH3(173IXXXLWDClQRJ#;Do^1i zT2S65BCf=97&i3Al0%DthUF94LFkIV3~Zq^noVV-%LJ4Yne zI4}OB7y9*RxLY;P3#LSQC;**2Bnez8&WF*SpM)L(v3}NAKaYnqar@z}$NON0k6(j( zhx1}0Z^;{tj=kH2y%TsAPFl>mG?)AK7o{9qQ{?{|lXxv|Qob+TMPk&e_D1eV+ zKy2luXa#X$wSBqD_%QWUi45AW)&hd=s`Ei27rbi6t<{AZX$q6*%DnZPV-oRD|XeoZ#*~G9LQU=W)Lmd4qv6f2bO%Z_>51# zLPQb%f1F7;Ol1d^Ju6=tHe>B&JKoAsrFn-PiRHG)b(M0Cw88Ks8M2?v>eN7!_PoFX zLq^HffG63v^P(75m;?hpSe1f1*S&S_07=kECxA|1@d@R1+T-q2GMl#lsXAXVqMiz( zKwhi=32XfbJaX8!<4%sZcYm=5rPeN5;)w9XH?ThT&2TUQ&4yC6yHyjP@K>muNxDd z3Q9^TsOI^~mzA7qzYl2C?F@@-d=sD+H6O+TdO@#le!N>l18%pUfN+9$aI?TY1G zClP+EgVPy*OCKKFuGh6TAXwSh#q zf4?A@FlR&r5>{XuF`586qjSs-5^dy6v>%!4x0O@d$^U0$WZ0|N!N-2WMlgSEz7|)H ziVC>qSjIQJMWcOj66^e*LpRGLXmhvk`K?+qgI8u_#-|v7CWDmsP@{BIsdAmd$lzU< zIM3h2Xq65;xP&|nQC!J8{E3M^%jBvV%xVr|vNTMeFFQ&5}*;r`NGz^$8jQB4~G1m2!NOxG+ z2OPlGDa>0K6lF)d0gapX5uUcRC^~!mTYiw)*T$4%+{IO)9w4f`s?^NH6VrLQM{;TK*&>+-X&7;uWC38i2`t8Y|F>#= z1@%EIoS#_uvcYAHa+oEmrs)>03fW(_Ps9b8t2uUHRyZ1+Z+R>i@*8+_&?(jdBx6;X;}5AK#7KXU*GpBAWQ%g(YhG9q`T64VVUqU1mYIlSjV<$)k=GpOQlJ7BRdsn z=gA}$Ja`VvrH5BP!B6brFkIn#HoZ@#_B;33=tA|^-8 z@iN)d7tLXI9WiT+*n&P=;je+eZH8O8ARYRjFd7i1Ka1l53zi{3v=rkMkW%1APe8>F z#rAy?fUOs!XtiQR9w?zQlj6msKoOUQf4rUm2@(_#F9$b_+=S)jS1a$G->+!xv2c6cnzcI#f0Zu+8iqPR73&ustVD zp6u165Ov%daMizgmh&Vi7v=3|%Bm+oB_huH${00<{|`w16|C%-h61`iH4N)X5`3gh zc9qfQ<0;05P-+0lhY)&OeSlg?{qV`IIo|mG{ z%Z4vT!^iBYF_>RbY?|W8nxDdjPaqFu05!W)TVkl3*5zMLj|3}8WO6yx#?g_xzQ0je z&@}H=#VGKGSHrM#p0rk9lf4MjdO-eO*Qkh}84d`Y2uhZk0SF7xg#v=RAh$yNUV?!# z4ykK?X3MusC3h7|Y13o}#H5zbrF@QEil1Kt-eHJTp>ZtYkPe!Lff)vp;BzHRA~hW@ zhNBJdp#2{wWb{SA*GTB#Ix^|Q*>he7AiPRX3jbP&2jjZ2EyR*|q~Pd{RxWJG(xVEY z#|g7OSmUbOO4%a|fQuH7?ei%Cl5s|#TeER`!nWH(yOvEMgddxjM1mnp>}o=2sbYpF zF~f*Ei5lJ~E1NX$pqZ0;JBe@WKkFPG7Myp%Y_SY$&*yGW6+yivrqpTjW~i zD66H<3YCT596Y8sFO1oM-fx11Jg3`XKp5$-o;~B_y6A}~YL~mtE^{Js#c=FXyws#@ z;Y^4J(OUmp%C2`)MzqdBeCn>Z=d4TJCCmfYTEZ%sXH%}3BTRC0PsDSEh+ndHSkN(wpPc_l>^sCm8BmKX$&^2;R(8bQXsb=mKH_V1 z!V^k|)q?XTGb*=@yk@?Wn=JFP>##rvy-_Di^f&<&ZrB^nQVZ1=?ENgp)Z{x*yk<&(9QSs3iiLmJ?+6373hr;4l6PO`U0JikW};JjD?bXeuu#ug_^!Yo`G$+z3XdUP8q_(HEW^ ztMdIsITIo;oTDslFVQyK4@mo07F`qbGM;E;@|3x&EgMa04RPvdQs5;d;?FNT-DsmJ z#-XR5eeGROJx-P~ei>DLxDoyneC=QtY5^|zMOEJ-HYKY76bN8vg`zT=MqHpR5Bdeh zzK=zy(Ws4rFZ84yRGK^Mni4pZw;&Smox zU44MH5gJzhb33Y4K2v`buijGmj%Oj${tXlWEFwc_yW8&VXNDBQ1Y=F?!e$J-1n<_%58L^9{ z6uN+&Eu?^&rc773fqa7W>jz7G#D3798j!x99^S)5B&SkXc%XTwqr3QRR(g8(prnzn zg?Co~nyM#?&6Yq*V)6KkW!dcuzF`O4F{x;0WiMiOY?I@1oh#@RcUQ0=2nn?i= zuFlZY%pElxNU5fI;s{XDeLZSQD?PuPzEpYnss&EE%pp*a7^4Fz3j@G49K_6KTSSHi zhieXz{;K@&!WP0CaDglrO_i!Wa)}okG|%Nbr(Tmy0jTrg|7o?7Rq?WK08x|7?hM~&P?T6AKB5y|>Z3~-)A-X@B=)h18VCDGa5U?D%WHYM1?F$X^LT;gM4sMS9wwSd217vKZH zi7`zI3=kN)pl`LTjf1p4H_-ys##Nr<=7D>833JAxH^WV{X8w=1!4S~cgTF?-+WPZO@J&ZeHlMavl9IzU29&OG&P z;wN_XV&;r1^S{FrZw$8!aRb)f-hR83)epz&Qs@14Q~4-cVVl5`4jc2sY?qj zbeD!1_2)t$_O?^35$DWLFUeAN7LsIPw9$xPUq@SvuAC)y`zhV263_M9!jeVBPplLV zh(}C~>gb4lLHdI)?iuMPlw7@s2ySJ&DvIwX%of2iHl?=v;{K_t5fx{LfixD!xY0{O zg*wTU&N_VcyqVlD(3w4%V~oGwFU*}2WXF*BBon(=Wo@>X5R6^8YA5&TLwC#WmGpYzjm`!8hvT%;gMX2w)^le>lJf) z-11x&IwqJYwk6e|9k{%pBx^D8#)qw~{XIYlX0s}ozt6wLT(9kV9( z{ftsuLQtfQKQBBZzvdv;6m6|6MvhI!c_1)1_qbl4wHNy>lZRu){OGW-F7;|&efXx} z57`VYl|5LBx=C!qQa`rtngtaSYiF&CEx9 zWOtZpTptmlur7-(4GzB56f^jkWbhhSX=*eCww5P9%FH9jW=g^bwE@7Ac65vXpxy~A)pOk?3; z;#+qrct7i0D!SETJ9qll2Syqi90%X~tMgmUI+6Jodr0Uh7sVSHUrVDbfp4^aXgPmj zYH1BZoWYc=P+b0$>2Si#B1#-u4eC|0NWtZ@sr>*|2~@uRf!tT7U)*!n&&0+Z>!^r2 zbHfZA>pH3J&|2Kp$o>csXI*^>9~Z{2^7?Y0Ycie zBAWVdwfW;)1DT@{mnuW`sq<=HHB#r5{!;(UKhJ1I^h$&l*Q#X@zxLY z!rjq7^?fw!+`NGDE;asYC{(kYk?9-!zSuo7J=k9yc63uoPMd1%VLV{nz){uP0;v$= zun>^>Sdf~?y_i^fyLOuY(Gq3i^XM^Vz8T>c>UG7s$7-L%GaO-J~MJMkpzjK7< zdTHU(M5Ykp>BA(fz5w&g$XXYi5!c91=J83D5>a&`HcQ@kUEwjXAup2<#6{b@>3*CE z1+BTQ3QEW*JqS8HDUlCJdC(o{Q0*Zy!K%VxXb^l`ng|BLl%?DAG7}pvF6-OA!nlbS zp{oAa;vPPw%WL(+VUlT8cnO9@?#ZfaUG8D2s}S09Il?XKZd+U@7srsGZu0PSM;YGa zVMoI6{c1A9e#Iw?{}`i6145TTw{nvh(GVwi&PN7IqI;6AUmH*b2359&m8Y<*J)rx) zn2oF^#;d}XX@-zmyAN<|yXyM$>VA)iqI~IAeM)`hs3`*ahIFFd*Xd-vuF&Dx@*prf z3golUPwO~gFK;WhIZT5Zn#l{n+7al^k9^j}TSnb_%uH>2)c|%v*fpC$>>$k`1T#Z= z7XJ5ewK`d23_cFicWu72ygQ-Hnljg0Zk$Gljq4JNvC4GatF0~FTdp4jn6SOdG{TGU zd&W6*z`((0u0}S;6MkPF)QjjZk+~B?A&pc9wG?-e^-R~xbg0T}S;b@4s4LT{Jf$p- z^vDouuo=N#Hra{R=XzA(WSR~$30o1{7myK$aJs;QHt{C_S2NwiyTF%=Y^|YsWd83mn*rrDdm&a{{+8xz49zsv3wd*m#Lt(t43)-*!WlQq*ByewWwSiyyx4tu$?RV=E;|=pLAm)T8992U}%wadEbwGIPG!A352dZ2OUa zo#LVI3B8%Gr|5#_N%osLMq`|9B~Jjz%Z1;LIGid8_2b6M$CjsN%rU&!GxfZTTlP^@ zQQs>tzY1i)l-p6f&yyRVNmec8pSF8Cj2zf?osCx z5j4TYdcJt7QC>9aGitdaiWJYQ+hL;L!Vg|ID8hWi;!U`HJ(~WvHuag}G>ThFtO$x- z*~uo^#C9S+hDx_By)$=)$!E$&m~iwxL-c>^evWrqY;{(Y5@W4!nysr%mqEw(;j7Ie#JwwRfI3gg}=dbQ>XD@?CYCBnheNmLb@$1 zf)&kENrBkdY@J(J9@fs0EX;_fZ!X=0n8eYG*i!#y+j1t9L9xY$3mYgT^UG1>UdCp| zvPQz)U=09#=RVE3tTe9xsrr*V2JJ(0n0QU6oI^N*(ef?JV z9kcGZ`^^A3POo0Y_^gcx^)2Cdo`|a4@mU7>W`BHA8er^0hW za^&;RI90n|C&V?kZ^`p0PrO0qH%A*tQ8GmIqkLyxNI3Ak2P32tl>2a>ev7+ir!H+& zk#7{qw%~85pFlEMQZq!uo?o7HapHHmGL6FtlfDNcb3__t(OmD{h=5X~cN(QF58E-A zd1Nlzl~$eG$}4NOwy`NqE-ihYz#D!|X%HBEh-9Z&tvhg*RrSK(LNRH9%RqgdYu5o6 z85)ru$DJF=QjGpF%jP3RRSh4}Agh(`y;WD0TxlgUBhR&b$Y`xrI7Q z+4r(o*VXs`9SWBM1*(3DU6>#BpL@R(PM_Dx#$O!k01*lz+Iq*!S2N2ENv0yyBC!AT=@tXjwMhC<}(XCmP-@MO_3MEm$_g% z1$C>j)nU$MqE6+Z8lU-6Ox1McE$PgqaLcoYxYi^+chbc}ZfJ5o@fKZ8V@EJF>ZOvH z?Hnm82N5qWD|@Rw-B#+p3Qw;wcx1IPsQ4aXMw5h-!3Rc3wq92_m31^7fHHyU(X{5W ztpzllc_dGLqv!oB{{m&@V!Ej8gi&u-KVZU1W`WXl!n?za!@1VI)hNMt{mXm0+EF2Z z?~-%jktj~zBP6|V3Rd?VG=Q!F1uCP*n`u?pU_K8_^tLCk6gu&jt3laQ1DjCv8ajnAEyKsz7Sd0NS1YV|psFrSqsU2>i z6;LdY2#b&y0?7kvqIOrsh|5x%b})`A*+L!OxS;uHilA zu5!I?CePsw%YjeG=Kpk+;{}onJAE`)d$YDjl*^{X%5@DWlc%YvPLhfiI!rG&k){809n`O?)3i+jXt=B4ybw<2*Z-G|A zAtUUeqnY95$qwQruSb=Kx;u(+;l%oqq}X`*@*6~>C7xI+gXKvH2EhrbG$?t#v~L@R zR&wu;r~&P+dLF3>H?g*a;#6Vw*U*nKN?{QdL%6Py?S|)fib3BY1xK!b1jfAfH5*NX z8*K{^*T}2sSRP(TPQ0DMMp*qhZf509DL4rsfJEf<#P4>?Hj(*j7I+!Ibed->Z7YlO z`gVkOMvGpISV#|z?9#@!AAW;`1;5g?wPVi7EC+6@$_TgXP9)MiP+;tK?YzXxO}rZBnDOf1;4 z;}@B@ihD{WqLTZbwyKX`9QSkWEA&HC`+dLidBDedCp0&_Kcqe-F8rx1%=&5df*c5Y zwJc}-Waf+&PhEKv93C8MKct6{@t)}1?vzlj4fgF2wSFXNTT-cysG>8z9zA=8B=eJxU!=$}d8g67)tP!%7$b_*QLnSet};4D_<9iSAGI`b z;no=Yt5+mr=}+;BnlOC!0}>Lz8uJ6jpxi7fvOWxS6*ps2>-q8JU< ziu34KVR?j-7{EMt3jfM-({;obH*Sk_YTH{W z|E1|;1^e2NFby8|n?Sm8@Wio~OFwo}`k{P4bLRo|(9bhf`LM-UHHt{OKiq?ryl7-= zqW86!p`3&TJK|N0l|&Q-f^yNUmE7kO+uS(LZDpJ~P`_rHb&D8(TV!aFFdQ?>!0Lv;4S`TgIw+xUq+aCO{%dSHSK;Xu^8xq9 zue*VcNO9y>aU+ywP85#&?zY;vgx`E zEly@525jXyaisbi&s)s*csuXFl9FlaO4QPR(`w@UIg!?UoRFcUkAouG+QmV_ZF-SZ zN@kxf9GYSLrDDsEzKCKWHnbDarMKKp-ak=&e0hIjM}qVmo}h|Q?w%}l9?>HmeM!P1 zOC3*H4RD#{R*()17X?&$&>GSX!7-k`uXkS%g)(U#ecZY9yWy3`T4&(6JuZT^9b7BI z=&u$s@^ht8OtBE2m~3`t=d$1Fa=XzU0o|V8sMwJ;)ieKdt1GTFJ}dyiOKGQ81gcA@ zfb;TB7d-(9IvlgQG-A@piUFQcFs$$Dt&D|c0oLL+dGvH{wa%;AYXm;-$|5*A`eCOXbPOWL`)vcf^ikKjqt*E#|4}*gw zRtqyCF+)3ZKCP4E7#F#(NQ?yj{4DrJe2{Gmzt-cdW-{VdQaCB|k>R1i`@6=Jn}1J& z_Dn|w3D3x)Rbje762MZAyUNsAMzilWI>K6!w7D)K3)*>dHPNL$~iv@JKVFPw2 zWN2#ub~mO0Dv+{xpsp7+$X@7OR6*08m<>SD=rB%|eOcqDx!d+%v+`U0oP7G8waC@L z8IQ0Pt{DJbf+62m(c5z8eTpmvhsf5=)(3inLdsgU9^s5m1l~-Vl8_{>iLx_DQ=@W-PT`qOF*$*tTM)S-6YOH{pP_qEKTZhdrXM;itAa2a30bfwDo04Y|FRE2qGn@*g zY~sc+*#R#g(YiWn_yfDNRc0h!>$`to{BNlkWj>QS`j~!hUxNcjZ^&Rp!-jsm$GoFQ z^9{yzyml)WP(k5{2AD_EpZw`fROiA;#wP6Eqtk^?f&dcuM-kit z%ZJER|6KgmW5W|)JGF;9#~@(3w+;+*77-FIc~i189PT?!0?K~lmEXvdcrNj zHWyUW%C9wO9iHs8dA`SGr_MKm58Mj5(KF1wzs%!+-NV~xO zhFyVHzQ^DJ_Xb<#xZ29+bkCl4jJ;atl^j-s+0MOPrq!pqIg+#Jx{xp|wCt9=sJ;1N-^+#x{%ru>j(t9A6SW;xsG7A!BNivTOz_7@I+D8rbX#EL;h-~XW+C12SveYevGy14v&T@8|T%fhg1+v=# z=?XjI0Yz~V8A4R0^Wv;K{gul7B%gmPSjV(f;Tgw1MXhQ8?TyKco3};I=KQ|LUC@*$ zNYPV(S;58CidhLIy??S!1L`xGrVwSPXl?yX;KW~s#kctkCk(lTP!==oLHl;o=Um`W zlIfvdRiE9*2KyJSTU&yu`2g*}oSe9LUHEO^Q#Qs=%mfw!88=VSj|4h}g|v;(c+Ry< z@FkGN$~_}?cv9-lc#Zd|`xSv%8JkFCSU%ic)4TQ@bFZTiNOclb;t~*P^p+~M8LrJ| zEq4p?1g;-$5U99*tnMol|7gRJk_^Q3Pob$p$*$11BIRQT%<+Er{A=DR?kLn6FO)#6 z7rAUXJNn=Gu^9O!B9?G!9f5^7Ty0cl>vaBCnH5yFApM`yz)Fc13>#Y0IUIKmxDrLG za|mYY2|8-X#qcO3cu`JKB+MF})L3-v{K~!8Gw;1oF~|R%#0SAh9qauQlhiaaG-EiZ z@^<5#yt@3Nc|+QALMPuSQG6h)4YZ#;0l8QkkFS1kkF#dX$=hz_q!Zf-4Hm%chFCu~ zLb@6lzIv(4E60J~+TF7uepwOz+Ux{)$Xa(Oqv&+o5Ly5H&i2v6CqRLNfitoEq8ZzMTXqbn8h$V^i+jr<4EPIDPNI$-@j=2wGvy4stpN=fEV6L z9O2ZEnwrgUs2~S1gx%xJT!}6d&RqlQg3{hh>0Q~z#er`oYX$HjYu71&}!Vs z+6o-_MGTFpUn{qqR)5VC0&MOz9k&>OQb@|q!kx{{t&O9CidG*X!?3xxl3tyi2oqjv z|Ak0D0bnJ<@DZ4vRZ&keXUwt-2<(++ng*nuS_!kX>!MaUKSUbLi9LNts*&DK)hv>1 z^CIf3y4aof9wl*iJ?Qr2tb|zN(?BIyEo>6M+HaN-?TEOFC;vJ}g*vPvmyUT`@-IX# zgn?i|zycj8yj8d{Ov&qlXSZP%1ooZiP3hpNE%;?cwpcALFOEFuW1ylu#vGjh6sW~T z;2OHVyIAXvZ_A?1_ja|fj@9tecc9Kx@BP=wnmFcLZk2a^m*hCVf4hqw%yU$+rRE>k zP(3b+vej!!oKO+KHzR66o`(v{!usJy<&}s>ANL7bS(WvsBZ(q>9u{VL#ZAZTXa0!I zo~Wt}KCOOc2!{W_{}@ImY+Q*4eVQ&8F;{{Rmj(t*i;o0~DhZr*;?pLXx)898 z_NVu0DqgZ~Zvu}vDdbR67YX+)2Hl#BUxSak#7J_^0twD1dHB zlLWqi9;=cC6O+NAjvyYBE>sBZ-5^*Z$6Hv)^XLU0o#_eb>fAeE3d-Kg8;1+!?IZ%A z|7~_Bz1*MNs1hWz;ewbs%i;X4wcP}WRb|950Lge?f zS;@(%JlO-#v&Zv~Bk!-20h#w@9&OEGyHsl|2jgZBE?+I{Ohth6yNzp)FIw({0pjP` zHh@100ebYXU}gxYcMn)@v}Z83am!E9m~~l~0Ik=uMNB`(uoC=my#SrQ(2R?_@-}O% zpMPh&8_DR=PRDfVGI)ZZ)k2t(Ws_?Q`qv&Ozi<>$=go$@NVF8onkh3KLmEOWoG+Lj zG9J2B8)1N!-}&(p5qvfdyevdC9fFWy_37k=y=O2-W1KSGcvp>`zYt^}FqUxgxmHPX zIEuw#AJ7ILfke2pjfExBby|IIsXN(lhbf+6=Dhb`%+zw@nb?~ybipVWdpIbo7i?8e z@e1c<2~*0T&dOp;R@$9ug6mqq#07N@#w)`MFfmF8w&5uof?9!rw-H0@quzOiw~VB< zht&=j3OJhB(rExL2>bN#U(fEv6EJR|<_-=f7RWa)yGk3h!?_AbX$3>^cCv8>R<<^p zqPZOdCiR9-1y3Kb=!(7bFTSHJ@{us=_@X=)ET?AJ9Y_}S-VNjxh>fUl9P7uRnKD>e z`Z4^PRO`J5uZ6E>H>$h~gx2Vvd!}0>{S+8!*!^n2K#g3hxkYPemM{xjGvc)4c`%hd zfF!-D$Ke)CUE)-bV)FOhv~2n#k?HboOyr-OP}awP5IttZCXp6+tc{!JoR9+R72H!p zdJrchSRB)7iy*2F>WyjPFOVYHc8!9DFs@GpgC0ljH}g_AwUE9MeT0-jlis>D5}7dl zNzp!8pV(7;DTPOs&TUL_t)_}U*-p;o)isMRQX>wSm5+~23pA*G8$i5V+lo5R)ial{ zHriEXrv-?85efO7*L$s*d`aI^KR`jYm%qt3(ba#iXbGkg*D=xk^dnq(QfM-*ob+t_ zSKXT&5Yz-fTjz(`Uw1nB*Is2I&Z0s5CO2AXx}JEaiyY;f7{)KNjqOWSPB}_)!bK6V4qyhzQwiV{@r%2Vh>-JAv@YLS?niQY-V~My*q@&W-`Pw$|{%f&aS9473r88 z!k^VKF_;EQo?0D2eDgBtSr*2sz9+UCC$mHH>f($1c(s#873i^bOH*zU^*!WVs8Hf0 zp|f&EjRXYd7qcK&^5;^zzPtL89Ld3YrQry!OBwg|j>Ij5OLQ049QS6(9+VN%lrauO zJdoVfyI2Oz|Ibz0hwWE})aW@z42H}{GUmHec!+9A8Ze*#hp7QekvGb?N8nOn9jKT+ ze-r{yQW9}|^1`-jcvrZxxy$lV_BTyhEa;7Tu{lTH2gl?kb=s6;i2Mxhf^<^h>@e_4Vq z6c{I9zTSfE47#II&ez!gCiFy}+(qn$Jttga{UNcpIHp|~-hqGD zzHzdM+62?hRQy@ihhh|O%y>ll#K(G3HNaUIqd9AO z0HM=+VJDt$TIeIvIe5AwvdN!~OZa;AVT0kqq@sAgzy|S!B50Q5ys8?prTr!4PZh3) zMcJ-OIj9w%yasbU`b{7$V#dQ>MD9?D+0GLhs5PvfebW%2I?kg(Kj{$mm9ESAo+E{VgFdM z^1KRL$}Ac&P>pa35%sSkhn3j}X5vBn<6)+EgWV`pTe_zH!D5ZxA`;8uj^#hN^Ece$ z0R2k|#76_23sTk7EUQ{5V>8C!v~{VanAA%vD$Uk`!C;GqC{mF%EOh*%KR1s2fl4ZE z7f8t!U+e(CIt|fKB|rY5@Oxkgw3XtkG%cC3dx;vT6Pbn@H2fK$?|N(l{J>`%R)GQq zep8nxXKJ)jBkfp*oT>Z?yKSs6Q{yqrJ(VjrYcXc4rZ#bFerY@0x!aUv&hV^tk@Z!=%scT`=4l(j#zSreu3EW6} z_vC${ANE9ZotFxpA9?G}hsWGcq%mpik2=yc@fw49(@6oiv|w}`K)WJAJ)_BHdqyD{ z<=H?g-qkk6F4Z(Mi<|G%;agLT7^u~71ULf>p&&vTY^qu}!kL1X8PMtP_X2sHhI9#l3`Wg^o)|uB7+|}@h%;q|+ zFrjeVtYj1NL2WDwC!re)EmDoQ**uq>Pts;gdw{;`HKEjDtv+OuX#dmCQ3F%#>pKke zqriDimZ?~A0fpN~O^3f4yicSR=qhXD}?=X_9J9+Sd~qSvVuK$r9a`@ z`+(wr%GX!!Qy*UnB4}leUxnGn$=qLp4=mYO4M#~MuhDsj&BT?!@0~o$?n*P*%i_)~lI;xmY2jkzln?CXkz`3-Thm0oMd}kAO z5-gyh@{j0PzKB(dF+|ZKs4m2c2<%4;EX^;bn3vO+N_>=1*k%H8Y29#EHLNwuh@MQC ze`4Qim5w;v{L+*?j_~&LR%O>k zh(H@4fsZ_BS!d1(%*^csqZdJoK<{uOib-xfsL>*JN2Cj!UbtfaDVW;vo=&i;$LQCs z<*vA5@4#yC69)qj%eTC)-^Z2h^d9x{wU=P&8=7R8!BBJyEJyIPiyvH%3LfVJj{@B* zZYO%0=0f_tT0>jbHdn3y)Xet9-<$P$gbZVI;(gJFIeMFp!gyh{dn&y?74wPW$^{f# zF2o#593Gx%Al{a+NhT7SO0)3%qgHY{9$`?bnzW8Zr*o#HLQ~C0LmkaeG}F81!8X^C z5};nB7g6T`l!mGXF{}_kRrA;>MP-^d-dY}u7Om~k%alL*$Y(BBE!bY9fmk$)qNyKw ztI3wxW%&8Ilb>5r)ddj~h^A_N6VkQ-t{b6hbvKz6=X){c=7va2pQDcNjC(31&S#k} zi}(2S6p+IpdmAf~I|SE|b?5WBgF5S1HR%kq+56EX4FtSpd+sSXHGH|dLBzZY;>Sbi zlQRLS0d?5jf};RMK)S#8`kg_!)ia9O(aJl^$#+dL=e-FvmO)yIwZ5Lm9?`H8V8pyU zrdQy~EWSHUzCFUo;Ezn3-!3BETH@krH{zXdMgOmlR6ox=Xr)2d6U_=KEzNUVY5$qT zI0d>>v+%aYC^Q+hPTjtD+g!W9BU2k|CtM*<7y4NmdUS1vC~)*+XjT!)3C36GHRM!r zdbf-qN~}%QiY!%WdwMmobn4kFeZbJ=Bm`xzh+pFOVe1$2CWg zV*JSRd*ypEyIb-lFAw2YCi#p)Y}Lz#`vh3p1f-{I~V+Go8el zCt~jHeOm1WF)zwhv3#jP^S+5Y5QgqrVAr?L zAuQ2IZv)wC%Q2AXPLphkyC6}K>jDdI5xC>y_g72Zy%@$ z)aQ=2cq2#ikoRJFGDUl>Qh86)<`Oq0T=aM5 zh*jvH6!gNnR&9P=L=!q!^c9`zv^@pH>`ViJdCexoZ-ys=*hd>j+SSHWbamtuEAF~> zM5=-gm~eG9-kAM%vywUJ0}CulHEM+QwxdLl4;7OijkbfmxoRMR4sm@Ikgg_8_1>xo z9;~yto{6V{R-Q0KUaCOYc8Lw^O>e^X3=Wj?=(qv>g|AP7p~3N>2cHP9a0upT7AjD? z7tlAk^HG8=&<;c4%`IGFOx*qPoDoE?N25n!6R6gr{kz3~D=RGpgqGOUsCchB=ozZ1 zIhA}DRMsl2JwxX-IaywJ3E?qw5qD8cs|>OYQ~6?l*0JOLFk{J8SmoC?bV6;M^2iyH z*6shb5*w73v>V?xkwf$`%>rqkM+hFeNB^gk>M{uS?}L%5_Cg9oia?gAgmic_lu>2<%pvw5by z-|J%#(?n9_pkJ$EJFWQAt2zSkxiI-pnik4nT3F?+ZL-+$N$o`Yk*DjWmgxg?W3(!g zuUDyuL(!hQ!uwpF?-ufU)4As3E!B?%Sfu{rDOLKbd8!usSysQ;Iml_fM1+g12a-c< zvp?Kh;RTzyG2P;Npu*4Yw7Xl{(B${nKEQ~oYc_$}&XvrEYs^V2*qzs4xE0_zw!C2i zM%eDbLoLl`-n~t4o{M1!nyi0~eqDs4Ua_3vqSig)b$`(eoRi`}UPJ5S{sSrk=NRBi zt$f?`16;%S9?{m8E$uIqha*=EzLONQNjWw+#=+8=SLQRVvi&f|uwU>UdcVxhl*FkN zl9zNrF&c1x1Ak8>8o#2T|9Nkd!fuuAp($9=-^KKoRaLg3I>v!KM~y#ZphU>~vaWN{ z=Vb|7j3U)kC#~;7_pD6Dy$CtD^Y!I1eN=2E2wYqoPHRDL_aHDoO2&uz>ccD z5-YHw{&9mJLS*Rsu+ze=mI#magyt3vHT-ZH^FCE#Yh+~)2`iIxdFdiFUW3%=IyiYB zfD^LW*@av2NC(~lT!-IzZurz3eU)^k=9y|XN3kFwK{|cNQu42OMSR-{y-2TK=elNM zo4DezNkvn)afm_{rdL5r3F%0E0GyJi`2DCVGRfR6+qLw;SHv3V`^U*?SMRf5;FOov z9|#UdurtCR=bw5??e)rg;cGCM4T9RR7uZe9Z`NGmDQuaJhh;uY;kH+Kd_awd33`~E zLmt=fzAZQNR1eqdO^fi%n&ll_ylkKr>ap9>+W)FofnK zG;=9g78-CNhAsE(7kGd+0c~=PLt5IySmhBOoaZo?K6QlLX> z7_mohaEG{XsaEz>hAg(qmcjRH2k;nB;>~#72(F1}oO70O$O@S4p+8UuuGVwPNl|B} zAGt*c?~SxBg*Pg}Cw*b5@4=4KE@wA+3yeqyS!p6;6gu+b9{aK=T5R-qyT!#J*4_{2 zHabbwgl;V$2&jrOA{3AbP5pT6lxTkntCxX>e&zkqbp%+N@Dl=z%gE=cGTj$FU6!kt3#` zkb1*-wndsxI$@{QO#@fm@$V~=hkm8BwRBlrsWkG@&Gg7|k7KZq;XHa` z5zhbt_tH8#Phg_oQYRHw^QDh#?j(Fry)+yblbq$mPkOB6>u!4jcXCEB)Aa+E?N!pJ zKdX_w61S;5@k$)I>68p`xI&16CjCd{H|(a8olyD z_4?%Qc{iecbfYytI;;1}x80U&#AmSo{4om`_CGX)v-AIE!Uap?qlm=L*mWG0)SIAa zCptc?AJw%DdCf}8eh~BXFPuV`1suwtofPWQmp&CMBFZsm2olqFlcc;)oy1UeB_2Pb zn@w(El?7PR2jz`0-oAF;$x9VL$8m!V?6?v2CXhk?Qq);z_-^0Y&gZ# z4YMn2;BG28%CWnsV$q9DEL30uaElZ)ikMqkD&}b*AO+x00!F$3;YS;#{m)3X>K$0+U3X|5Lst&~LmA%; zqMC3f10S7xQ1D}==U?+zuA&rQebgz*^SV_%R%CmNuZl7F?hHbHqK~}d2P~gnF)1u0 zuxz7bKc(GslpzC(oD`%0MQUbnCZEn8IW=-=#wH9wi1q>`^K1>_xMxpnmzPcHEMcnr z3X{Dy4%M&vF(4Ic2}k9Q%-1s)0k*;eG_VbN8RX(}JPZ%NEz z?^#D^-g|Wv@!&C=TnvjdAdd}UGvJBfzoO&5!-m1umsiu(G|MEK6Mzfk;7V4RkQFL+ zWFX6vc)%)TQCPWuMo)b&BcZ(cQWd~*1f<4{)L&s6ee%4I>q~@f8=GsP*io+M;5rC5_wdt=~@WD;ZIsz~hS4>ok1ij*!LVOX*%l6PA?% zV1R*UO_cI`KgS`4rZ+LX)7pdl&bL1#Lk5!G8-X4q0I=MkMnCAv8ZenJssz8o=QL4w zMrm_lZ$9)wTyp(Zk~jec5$`6Jx1>hqgHT%?drur$PRd+cECpuR@(X;=+M3CgX(_$u^EilAec;S4^r{k!8Bx&&vY zk^R+e$Abjg{?LaD9|&%Z{Mgb_7^~g~B{h{;3PA{vD0q|Q%eqRlkbdDYu|M&hPL?#y zF7AwdF+HK%^k8r**COidcsMJzyUnAFYxz#^&TYc9Lz_>myN3Nh6Y0wuNHNgVEUw^q zHAV&eRvL;n$_H%DW%wsZHNl`(q=T+3KFT>7f9TL?__*RIKFKMiBZ#`f~qVrfujTmU4|7Luu#7PRupV^H?_%IMBP(z6cA13Yn7^ZmJW-Y%d z_GX2#HQ`y~zLf(8nC3iT_-!rRR5FuB<8=(k+bv2kbtS5WFp?~!JXH7L6Bfbi_gqfE z$Mpa1k-?qTvs3oXW^|fVt9+>#EVDqyOlZCQ4)MsJNSc&4JmI9T=uRPVV`F9T1i;c< z-PAVsv7C3(3qyun*ujk7R1B4zO%JvUhWxS^sh(a~xCxqG3M@NgWsSIXLD0RCOblQr zO!IPfaSF>r!*MU$m5Et0B%ouI>K3o0#H zVR--VFD*u3ZB&6jLzzw5SrTlYlBf@SFeIgY!xI0ySKTRW1W>fooqXlWJW>xQE?VuL zMK9bzA9n+H2Xs(nd!dSS;c5CYrDT{+xOx51KUch$n+g+qBqjI0N!T?x7X7$))A5t7 zTg%bq{C8AJhCRjy4o2awDIsF8wBI?~KfhQRF*_XR`et)xL!4aa(5A6Mt)ivml9?2= zhe)&7=gCTN>YH6msEUKgp3^ecu2yI#_}#PqJBluH=M?)X2kkI;9zW1qFyj-dAvhwD zSf<87QB6R9B^~cW0RR?mvLDBK>(R)&O8S1aNf=A;X|WIJ2r3EsK}@@Sd4d|H+abGHB8+RR%HCAM1^!W<-5K0i0gdK5p=_urUx?6h@uIxM_KIm?wUu zM~0TqB|hKc4Oy+lcHp?a|901ui+D6(&PlpUXs4ORLJ5Hl$s!#ygFEaN0w(RyC@pe+ zd)>^vGgX_Cj-LFzIS+v0=#6V^De(!$H=W=eEzAN$9O*t>Xl;qbWi zecgMZ#&C4!Z0FiF?rG8xW|aQhx%(bBX^yIkHEPKJeA*-lxwsMqoodb2nG^{ezKP52 zc97SC#;pXau*=|_vE6@9vz>4xX@*?efT(Br$udKO8_l9KTD`D0UjKiS0y#p9CiL`G z#c^Lu+>+O_I=i%v;yqR3O(PDRHmTBpJ-ZyaY+Bt!j1~ktRF^6#53?wwAI7gS5sPs5 zjUmr20Pi1FD~L36y`gsF@*gn_6==2LK_D?BQC>7I0cZuvEin>wxn?Cneg%l-&~&}Y z%jM?ROqB~2ZNr|IK_%gKmLl|6BdN;=A!NYVA{|7#rs;?;PZ(*-iNOE_P_h71?4oaP zz~HU03lJCY64C2*K*F`8(fn%K?WL- z+;$x{WzatCRLyCtk0Kb#cY`n`KyG=UKn^99-48R-rq|O7Z{)BiAbhS4zfzR(MZn~D zK!mk>H#Iq|JFg>p`5$n&2us-(h38{1!8JjxT|{;zWVVe!H{J1{{Ob;Tn;d_I>03eH z@|!CRmVSc1ea<(BBx(*S&g*Bi&=+EvRDmiT#2vivri7YmZ$yiQCC%ssL>nS&OyVzh z(P#up$Xt0s8T>?g7EPxC`K7k9kIWd8#j>rFP@N130B3%WH*0$TGEw&!W@pRjcL)Y7 zB&Cyq@(*~C;mV84#L|)>ef2S4IKO4Kias)vMtA!2QeOcBKbBA%w<8N#leQ_;WY{vW zsy}V1yOaZzGWSEv)RilP+Wp&A$k7Y}(4&}cSch-hs*AE}1w1My;kM$z3{j#ZLokoM{@2PZ%nA&W(?ot#jkbFtHE_KzIAf#LM@&A5r)%Ct zoyD?A_0(ikPc~%rry5rBc+|se%bM*4y3A>W5(lgqXrKuJJckiNzn^%EPW`-uL6cG2 zP)a9Kgc+WBQY4{$DoI$mr)T?+s*HeE32H+)M0cGRWTIBvrFrw$%wu>R$5xwXj?~a@ zP*b2byG04haCiR^dRvX~So_+qql_og{V@;lypA&7_q>?Oe6nEikcx=2`Ae4QvwQ=` z(rds3^w}dI_dCb05Qsb?sSi^M19rmCvrKae#lx^o?;Mw+51uqt+zrj3Bb1s~%Ntx8 zAIHc8#(g7)Z0SzU!Cr>jXSMjqi*xrXwgeg~?^7i@9)8|yX|hs$Am63#!hq?>LSGvD z=$PksyFZf=W_L%h=X+ipAL9hhd|#@ytM|#x%UrI^_bfPy9=^NN>tEb33Oczf<(|)-rbavbnDa%U{Zp*RDwM({&Z#zbaMg$3hv@Mmy!NwW+HbKtKJ0?WKml?Ohk0>eNy&XWmD47wU(cAbho8UECU(0}05xA?(>Az$)ZmRAEu z8&i?6LsK2V?_8=reiS^^Y0oL4-*e5YxHM7+ zC`BCiT`bjQ(Fr7HaiTv%6rJk%O9#TV@?}b5T0r~RDmS-(p792e?n92Ugy*0-eh8kb zRpz8pln?Bm*LPgyy()tmHqFfwCVnzu@87k>fU-ZFr*zf@WPnn&bX!PR#0+<|lYX=4 zuwm6lCjdY^QmR^>Y)l4qfJ^S~u$TNexH~|T5DorzG2Q^5V;=XTqB+W=dBhh&YepIn z#c>Gcb&b@5S33{7R(HHBLt&&XXLp{Pmj<%f(B{Z0xc_}%eMyj5O;0EjwXaX2YI#9t zqFLC{o4_G%&ODjD>PyCIg2V=Pg<<)EAi;4@@${^q5uI_(lw76?-(I)whMO?rUeuqc z?Pj~}Kb9HWHtSL&1>JlREG_r7G8LfLH}p@WdkTE(CE?i%Z9erCuX~ePx-0c>sjRPR zj~O+kDJMS$=GF6#plg!g;dR;+nC9J${KQSE&}VU+9)kbZq3J5!K$NWN0{&owjd>Fg z_}-`tqYqwBnlmxZv~u%qgFxvotXFplg}qIiiMrJan6`W!RI4#Q_*!++UeJ>iFrier zzR;z@L6cQnvk_dVoV7RP7;@vJEH%6`mYhcLJ2w#mO(nZwY43o?uoYcepN>! zbGRpkO@_x218nNY1MRH*(o!$EKozU%0&kT{;xF(9& z$)=NfH;C3nm>Oe<^4N%2qP^tY1?+(#ekt!45M-Qw8Q)#~$j_yP#xHD1U)EID;^aJT zVtuVx+&f%qIdIsWqG9cKvVQvq=c)fha*(o?P&tPTHpc2=Y{^&o z5uECTM6-2Dlp=>uZ<%a2FAr8e7PnseF-o>j4tiOy$e z-SWA_kW4-$;Vsg3&uMVv*_{AncC0dAh#sA6I!UVK0?jk?GD@Ew*0>Fh(AVUfT9OFo}! z$8~%DqeSJ;RLvx0er`(J5V&rCW8}sC&=kub-2a`YJB2}b8NEYPlzFlD2511B(y(sv znLqC4#Uj-MNOXV?RI6cM!%EDnN2TEMi-n^L%YRNgfF#I2-@UbguENxc zPHgcM=|f^thqtxk!xgo?7#@cfQ5p~M- zX7#*dtSPEJmpZUoNY_kTF(Al@_A(8pO)~@gDfarDuu5#p;9fWr)J`h3ok0zutax|~ zk^Um@`DT{+PzNX*N3si$L4sWzR6KyC2e-W`)JkSU51U3-^0A{mIn%;S4#*3x50lhj zCFR;P`(h3pkyYkKZOV#Ud}uDnIGTq_6xh_U!hl0+CC&(}$pJ8x(&%U{eg0+oH^Vc< zp5^*#{;hgS^#U{>1d^C<`5Tfe8JF~lZr(%j%(B%C3U(rgB6hI<$~@I8oIkUgDEq_B zGuB)gE)m+M$oIcYoNH4c1Pyo_yz@_0d9;7megdL?=bUXN1G99>s0u|fZ@7>BMX70HmOrHL6FrS5*&i+O1IIiUX4N?A;>~-MOl1hQ zEyQ{Q*s3hW;OC`=tlwy+AEb^w^c!`M?5 zcvs;56IjXA%IMIXp-+$ZMzS+3_C?6KKjNJ%V+jj2j+8$00(ve$Wj?azm=+1VrW$u9 zEsywGZ|FVuHeBw}^fp;^LV5T&b`6xpbQsh1k%YZLM0_wC)d0^07(PER6WkPf1+DE? zGYaY%q>%ST+HRP;T64@BKAfMhfWv<$Q25v=Pu7(IEdc^E=f}w;W?edUZb$`XdY@eW zW-4y@)y+6@E2H56Vb;T|QaT}XvB{Z+-E!aM*hPSF&2o68>C2DwUud2m#gtur?RMfJ z(QuGm`YnC22wx(wx=(|L49$;2r@onEVz`!BH^quz{nr2mSB2GKR+l?A20S0kr}?cYeMJ^!;0J85tM66ehK0zsp8B&I9gg2%K2ptyKFZE<28*nT4$M! zVW)Aqz9NZL(Cm+)NJ#$lD3ItWG|d537S93u<2(*(uxtV2V?`lS?!f>q%}4wD*@Ya_ z_6XQ@2I;Kux8kmS5x*}yT)GrOMnex1E7;+9vGuFJt~t{e&pTzhffF>h#7m)q0!%<| zh*DF1VM#_HZytoKG{WHrF?h`Y(7c2)m_@-GCuZQ!)&Q>Z4**~hwxa{WL_Z_Yf?n;& z-(E<(yn?Zl)D+36q7a6=I1Og4`T&&ZujnsF-d!c(KwrK9n{$2#`goK{qwzKc;z^O% zio$f5yjMeTz{^6jZ zAc?%e{CY~><@?~A``kk6-t$5~VL949jC`)Op=t(r2^mlgNqK_6;5fl9cV~+whb5o_ zP5ba*35SCGvwKZrvoX_;lye0`ZWrB-*Q$sEy&A`O-(>{P?R@flO?JW) z&P-VAz5j%_@POUQWrPa(0Jon1&jm|8QSc^7lo*~oF=?S{g6$Sf(sRI@0@w)4kz5zd zF0uNw)5EVxEQfk?U({wa)wt)&NlIP|$=9|1u_SlY?rrNP4hI@VG*#0=0(#vGV_&AB z7Va-!+1saf@1rY3Q+tC%)GVm8Jd<58E&%AN)|>q+nDyEz^P2E=GfRSW83@bxttPY0 znRyq>5198auyU<}MqkpbsR5pjFMytXg;3M(aDxf!X8@|bTESSfLoh=jb#ov|6;qSt zvA>1s$5yq!B>4(&}zG`{#j?H+7j9rNG(UOaX4EMN8y&3HDEeGB)jB0q$WN>@+_Ak)? zuC*rymW%ieO8f2@)FuVcXpd_(I%H#zZk@i|%bRk4%#!#xD4TQ(h@VVsy z`mCuuOlYj0zTL2TFDI&)s`;<~A!i{q6jGz;dJO>!Ps2`mt`WTEy)DL|ni6zodWqPJ;ysYU@AzFY000RUo}1 zNB_2?i-1An|E>oa3gEN8=h)~1zV%lT@9~BiI6zFEk-a_-RX~QnLc#pNH6;Nt4;JAp za;e=-?u`?1vkP0Q1((LjK(r1m)k8(;abAi)Bqmlr+JfcU1yo8T1?n{S zvYbuooybLiu{xP3Wnzn>5wrs!+8d=^)cy*Wu)*!+c5a|Gmw{YjcnS~pl<+PU&gT&z z{uo1Y%eqLoVd3hvj*@JDD6?5Y6T3&so9ew$R?rB=33)@d7l@&W4-fX;Zb#jf;3F(i zeqq##S@N}JK=lMaWJim!88L+DDk15j8q^g3_pC3;E1q?r5Q=pkr$6>y3$2|0^&B?x z_r$zhqpn(FCdPXHTET_{MkVGIoi42Jv~cG$p+2U*+DJZF&!Ax#AgjE}ZL z8$~#(3JotQIqMj}?0}#H+d_I@Zc9R^IFyDIkAKw;3 zV)69%PCTDS?_{#>@CdDr0X0(eeIaNLe@ioTApk<9+BYZ`kxZFmH%-Jzm1A37cDpsG zIptN_&>WO0QY2cBjPc*qUZbiu7SIsql;1mN-g zY;EmI_fNWaI|hqOs`F20RzGJBYhaqU(tJX0!!G;dBNNw;mBDa-crd{Y9&AIm>MNEb zhVP*`SaE`*+#+YO`M0U{aP4|aMFirwx6lk&aL&!QrW?XuKg-(t-pLRN>h>kTxgEt_ zAY#F!-XT7&lfd`v0^ACZCUJ;F(noK4$-WbYaie2-I`4z_>BEVwVx}6Uch>l-WApLA z2rlNAm5!~!j*51Qr4DEC;3@}<+O{i!wl^&tBt}s&h=JuvRejEK+osQtv-P{l|GNs& z6VDJQN%qOqvTM4WKG(D)*OfDxZ!U|^Is+;UE?~x(>uTCwE7Q3-zZt+#Ap$rHf*HiO z*G1yif25E5P{F4*99l3!2;+~c=*4YL>lqmse(RGQQ2RA};&aDcRJI zMAo8Szb0O8MTa^D z@LWz+3_|>8p#z{CeQy_#K?&Xz6&y^7oC`x3M<-XyFm)3yKwxMX3V9Va6OpI=XP)Mw zec%9OBr*2U1Ar?F#muU5qM4b0PBujXy@!zXrn_L;RP_ffbB=EeFSl$YVP_~!7FCu7 zG_22MfAhuJTD7c{5q;Gq=^+v<0_+pOF9NtnSAEhGT124cN zB2_er-Oo_Ts`9(H#}$R7rtOFnNb5T#)QxCTva@M%5orG`F=Kh@40pqe{sM4Jrv7TV>jHxnvw;F}i#LsY7URdw|j8lP6V$4)V!aH5&C#gV)6XRRuRLkj5= zZK~!>bzI*%_lL!nGQh}$R9QdN#>DUkh4{+pax~P`f)UDl#&VZ9hUFxPGF*lCQ8OLI9^S;)Stbpw)j3?;{T8wz9goJ7OIr7_n}aR5QCJt$mNh zd^uJO_7Nmwf9SVCja)mTMTgBN=yZ8NQA0?VY_CVLir-fqr@(}_S)%QQy!LOmoNN6V zrmzydC+*@Yil~?#DD5Uh%qrI#vi+2Ymc)FCk%`x3F(BeOg05IB#Z@~KN#uybJ?a#T zM?HZbOsEt}yVJ+Y5abG%YEcJ8+7=5o0+nnr+>rW%+V^D3LqT1>Lcmm6dF;+7ps3A3 zMPpYyHE@WmNQnTY+ZHyQipnVmQem8h`iaK^b>{>_j}b<y=*aTh%KoC%y0N7cjtOw003woO&+7us?e(5tfr)Dbo_6u zSAgR}!lOWcQrpZBsqID4J&xZE{g+x4nQD|O54`#-SNb{i7oNZeW2aXGM#WLrZ79TDA;EM+!O9_! z>>5A3-*|W)puGA%(GT{ci%1pxWAQK)!s3+l?nTBpRLex%m2o}d=<9k zp#^9g4}+}qBBJsU(QF-=FI#xzp0aN^W*s^cb}wVGF-wdd@PmvW=OMrf(z$6>w0Z4q zS27G%_JsA6IU>IgizY+tUOX=<={1CTPET6!;}FsK($!6QQVMmi{Oh)b;FLuxiFhNY zKj4uiz^9a9d31YM%g(eLD$h%`J}k@3nxNmnc6N_NUWjYzW3zle9%s#S_wWJnk~?H{ z&foeU9F$fbd@-GZt!mX40d*>pybW8nBYa|&EcGT~ziwPmkHK#(%;lq8FE^ZYG59Ph z_mVudPxbE{Wvlk#F9#+xhJCi-_H*Lx#0+=Yz-2b+N}%8ZPt~3XHNzDr0GRHO$Cpcz zg$N?aMmUn_`h-p{b&1{Af2U>*{jn8rmtv;WZNCX`5{9gKPPV28eZ7dbh6y134Rv8{ z!9iOON4219-f4!qSZ{N5Z=!tbXQF4_IIU;1k8%%w;vD~NPy;Q*#cS!MVVU~k`^o#K zioEcvJL+3!t`pUVcuxhfF<$a?g8*;(=5rg(U=QCsja-;ioU)|&NZN#v@Ht1Y2j%H% zA+nJujFV*)(cic;%Js+*j&dW(GdA& zWMWGIXOR)=S0>Zs=mpsOoB^s#Kry3A2Dc+*KcwJxO=O+`eX)s1&)NxfDuj3D%o8BX z7R8$EtYP3Q16|te<}=W-njlrzacD4f1g!Qj#`9~)3IyIe?tSK=iq{XV6c{H3U$ovM zl9PA%2r{8n8{kEfsK!hsVF!9{d>k~>AIG~5w=py2L1C4$n4fcGTLDB;$AG_-EI z0Q2I*?OU-rajv7jfW=0=OZdLMCwP4G3eOYNcpb2uOJDG~aT7HMyH(hRHL~rb{>Epg zucY7jbDujgFyRa*mS=9%@HHi$vU_hRFb;~Wu|3{pd?{lekX?}@qi_k){`N6>(5z&I zlT0%i^p@7VcYDjXb~S}HqkdXpz#Wvgj-bgbf&?tp1D*0jmZHUaEwl@N^cO8B8GeM(vIMxmsc#s(#8x9%eMeU#l>f-rKI&mirWqY7k>x zcG%9P_m!L6WOas-*8lX2BGXX}aJC)vaccdGy?=uF+F2GUCf&nc8`k3NfUrYP#hJ+Z z>~BA^1nUHHrq_VC7Fd0sdDH&?GR{OcpD!g|4WdjMCQ&boM@!;3%awoYXnHjh0YHv<|7Cf##+*wf$}j8eS$*$ zy>LF=5p1`Lc)z_ev@$JF8h=tPa=fFeAyv2Coi>{Ft ztrP=qZ^oyVq>cZdZqdCy((^Qo2%n^H`v@6 zLUja1ed3{VG$f;Z69>djo;;i`Cx_HmC1ksp_6`fjg0Kc=qVMrz*MfIU_e<(PWa;!m zg!g$J0EM&dVHr@v`H{)!5svy=)Yrh4DUh^Y)R9!AjYaL2utZ8jRvxM;sN4z{X%Gp3 z>c0p`vvzyFG4cVGB}Z}*39TCxXLTQExLx#qm%1Jm$6P9Zf6ZrxniZ`CQ=TD%oD_jZ8Co3BjPoUG?GUA;t@ zLrbZwWOMt~tuSM9@k-p^7t@oBe|Lhf#zdT)f~N0+!3pYvplaw3?%M2X6KL6Id`6b$ zY8m04CvYiwrz8keF#+Br9#T{i zM~Q0+m^|i~7%VrGq!T$;Y7=E7OX4t%q#kxza(v01q)VZdV+ZgwtcWM!7lNl_)*^@a zid{3oT1iudj5q(I56-9~p0J!)S4=5MszmoG)2Uti)O->e&<@09DFh zbgd0Ee^hHz3x`#6yCO`-h6ShW$09Xq*He0Gk>1Gc$%MRB4vmn|0MzXgQ}ACOD$Ek0 zIHO<^uBN^CU#M{&%223*503$4uAJI2p`%U-Bnruo^xpqX`eIaId|2qmc_97zdgMgO4nGZTUJ9?4(l^> z4WQ-GIc6kAtPMHIj?IbST?pRgrWgd>6niXo6g4wDE`85&|8WoVn2Uld9T5Q+V~yxQ zYuT={N2i+PyPqqR$+*?A6qmZ!=UpoU)$%T%>idp|rhC-~nGT-~*f zunwVb6fPLK_AU}8WBae{lAq*^ynsu^U9JI<4k{e()bPX95|U=F>y= zjbFJk!@-bMQelSDerT(@7m?QFk1&y7XH^1~=uA?I>RIJ@SrVXv6}}6b^_$zEAZ2e3 zSZAa|Ids{~01i$5sr2?}|L&J~qq}sigLYKcrbM0n1}2BB#XX z*nX38;EStc|7<`ZhOl5N)vR1jY#wiio1Pnm=Y^RVZ0Un4Mtp`FdSA4@D>kddB~%3u zEX!F_IK7z+yn>5yLxMdAM*};hT~F1hUwJE_m@L=@5YcYlKtf;br#O=uc_?=*L8A}^ zAjU?v2X>gisOd6=1T3Pn_1nh#z*zhL4wzu&=fAX3gJ=q1LnV^01| zUBL%?|6x(WM0U{fPucr4(3CGe}-cuuXW<4bWW5 zuW9OQ#6>%s>mlqNDvBP7E4+js=ZM`raE-f)j=`8mDOG}k2=W#AXo7eOK$l^6Q6m&4 z-~E77402>4VJFVW{nq#kmFFHJ^%%eV_Zlu=9s*Z5QP~ThKVMr;w=SP7*N+s0-pc;t%WoYS^%b|m<|drIE@838WDKE!xY+PXdWTf9w-K7YE8CefqLZH{cIUmCNzikU%OVIjcYAF*Rd(<#-v$m-&+f zS#uC$Csos)9SJ9zk<}t_^|ZilMPe%Q(C3&6!j%RF-Vi`5yjc~4=w*Uk`$A*j62OU} zK|)&9w`mucDEA)(^lr!UKhI!S_5-b4Se^Y0EEP(5!Vvt`Pl z!{XT}9xa+8zol%sBQG#Dm(0W7^E!Qw4&wT;dWqK}RVejTf#xum+0rp>L14*4zQYy= zz8S?64QZG)S7FC18i2ft`psVqx_;Fjj%R&Q=dNJ_Msz%p0AQav9OVm~bV8}8a=CR7 zrARXy+uVi`d*PE?V{5eamC<+(D@c;N`JpIr6iBtKm6!D$6!ILQ+-Ja~&(wVdqn(K9 zcLSR@=t%=lKNS71vn5$mgzJKhcl8EDY01=&7F@~JN+(cgbsXX?<|ZKAv`~x0Wj5t% z%%|rM#uB2WtXeao{dBF5_C=d`Ezx@3vO0?#Gt1!%-I5B{J|nDq5hnoP)(i%+pOUF8 zF045Lhd=4e=&@wb#89Y}+_K8JRw_Ns@Z=dELm7Lip(!GB*wXEJPCukBBSa}kgw0gY zAK5t04a|m5u~Gw?R2Xjc0+_>>3gJrd=jYW$X%XUTMB)(mAw>Fmh_ujB;E27k@&G195EBYVpW=pm!_*R@d;utq^7zrHC z*OTo;37KU8gAnZwt;EDE$gxzTBO+WlfN#jXQS;AayVv zXOv>+8mQ74>@7}Mlkr~+6nT=z`5g8=oJ)SgM;@$K_1$aYp(&?9RdZ;t-fPSFq_|Js z{Ud!*5*H2|+7+F?Lx3gv&Dx8AdP&UNkCW6|3?HoIMoNS*wU_6Tci2Kc!Gqe$E0S)K zy>BzN83-m zzs)FKoX8`zX!CxrL@6!(0=d<2r(RU}M+T3>1q2JTnGp^rD*5~FSa^t&oJ~Ewg^Cs{^9}g`=0Ug;tYVV9OmO_* zV0qw~P1PI18r9D~bI>gBXdgA|@*L7_twDEV5|qKfqbif1#WK^mrxU&g=q zly_+#bX^HXMHUSG6%@D<&P6A!uU|5tJBKXYCuYB_@f2`spkj8fFc9FCrFuOdBuI-J zmzD{!&nbh@BpIqu=)G5y7bnJ%ia_DlsjT$a&IqBH8-gvwmTu6Hs&W<)(m40iA`Fso zoY270y8}>DGp`eu-)LN}ftrAWUD3WzCe1Q$wrQU2FAQ=_V-IHegr9bYUF>p?w=X*tr@^%AK*Sk1)UK$ z!1o5!h8busqf!{#x&v3L#hWl6O*=?>;bPp+49Lp?iu| zZKxcF&Vv->JmaJc?#<5<_(a^EHqb?MDG(3I8W&pMQZa-8Oo(|3eIS;5C0N zssqy7^_8QoC(sEP^{a$ZcHGo@6HEAIw>*q+x+4ZjLAJ~TK@p?qFbR0^V)13R>Tcg0 z4}#B*wn}(4B!LsZGdx=W**El}_3WsRBEH`_4H-?QOOSKyu81%bppWEWRsHqg;JP;a z9)myQbEyy!JS-dxIltR4v+LClQ-?X!R?~^|>+IR~P!wPG1yGI7)fhD$$VH_Nbh7;Q zDiSQazs>v|@Ew3HafpB_8Tci?sf&IER+bOm*Y1?RYMW|~xLrjq}!C~sX^Sa3K6zf?;tsxFcL znuv2&_lD&yZguaqXYEtUct{8@0PJ#_xwrb zJcwt_sD6r^7eWsS^aXuSxyHqTUTA?bSeb-YGs~QU7uDC>0n9Hm(BCap;K&3ex(MOp z6f8|>IlA4gDuDM}Cajiz4bq&&1xWee{s;3Io@&NAMw?T=U7AC=Pxfo?Ehzza3GP)- z#ini;!`Wa3l?Qiyn=?Ezix-CcKmHXunbLz9?n6rkzCPwf|5gWw3<9dfvZ1TqQF!cv zoYq2%Ooj3{@x5UwL@uh|+6+|PP^JB5HGprSsYD9I71HobWyDVWW>8SBw)&j~sm_~& zq}J9g+&?{z~9F*c^ioIsihhU^WF!&Jtau4 zK@d(EN~;1Mn)N+{thv@Qm@^-RjqVo9f{MC`Lj~$>=FTQgf9TxJ?*@ef;)uNnFZ%XR zkQYHUT^2`2cMqC%C+VJEC<#4@seMvw_M-X?WXNUO!c8M9o6f?JIv9@Oaxkw( zcnag(#xE%#efNCEgk04xvAdo|feN!%M(b)$>_p1O?1-jhaJWWibUHPTKgU zg|Rro9TY`Q&>)JrRWiNDnf~JvqQH<%=u@)*dl@v)UHfu*etuL$MajC(^3@`g48r6k zg9*jHYxD0~zS!tQi1!I4AL)W)Jy10ww`?@AVEb!sCnrui1K`vX7Pj^$Yb7B0ET+fQ=&^s z;PJE;f72+elcexCtU?>P|Wu;Ix0~T;>B9T%=pw(LwmN)2n2pdK47h1TrFnXi!nZU8ueBSV!D4Jd!ZLQFL?aU zUvH@JsB60rDP7s^7=LNMJSj>Dh@fHXN;9u|_t@p$b*EU^doTMENvk6tFE@y)Chvwv;uaY~gf>K%OI{evtO6}tmkWZ(fk zRatCjZaOz4xdy`kEhzo-GIA)Ywf?r(YD#!FKZrR92+kaA+Q5azh3Qvvy@Nkwm#%!% zKh7JrjT{$4lDR%LBjOoqSn+{mU9?Ec!&U}mLKcodlv6Gt#An}Psq>R2PiAVhRK`rK z*lz4>+)X7wTL{H^C`eQ$2ga|?lUnPY~ieqhZGOAl%jjw*TyLbdcQGw=0q(SdHeB#BI)hmF~&FC z^8Tgpl4zR5*%_%YM}4_YC z6iYPzP1zZi1G8U#Jyl(t3)WGHa9VyY9ikh2EH^Bf$OG%q8!|^o!~JrUSxNr%&uXJD zrE`q)*L3Jeg)X464V%s`0q4rwC%ri?DKYzh0ig;RC70&KRlS9nDpS)zB$KZQA8yM! z)THgQ1YRYW*kDPP?>8DHGZd_=`;BDgd9*lhXOZg}kgS~0hc7PNIcg2XDI``i;&vZp zzp0iEkX>(+C5gMYDz+6AWOr^dFrCKa?vi2)>}MrDkH5YV%F}F zm-1;9wT-lv@+5XT4x5Ue4Pp;6FW?kniarR zpEvIuMMXKPA;v|ekwMbO(mhXzZ=ZQzsO&b$+PkNu9+K*cwLkP8u?HT|pt`3-_h!yl zU|q>y2zGQX2vvPeJaPLx++TIYxW`7E?ZXx;)NdA zeghdaNVfbL!+pD-TvFbYzk}fGsiUGXO9dK=N@^xTv(6$;_)kh)hQPz9N18PfaNZ8dt3!OC>OCSrDOq1_Mn5qkkBl9~lnXr~^t6H};#*vVyTl(J-_KYJV<@4NH8zZEi zbvK2yjtQPsW~V(0%Z3io!M@2^$mJNZzzGH_WWHxJsC@V0YmMI#ig?9c8qXSadm!XP zyAQ53-x20VQXwz~dj~uQ)uCE;utbz2QBvawyp`CBJ0&wTrSs3Y>tRi+>y6O;_Vk6@ zS045CD+l3UhH13UHejI73J{8yDhS7bQXl-Z#lmBo#HCLRM1%C&(291LHh*h`)@x=Q zY47V$&$$!n_bsm$^WZq$GBPqyI}x= zArP^KXlEP5$J_wUSrzlBdSA-5Y?ec_HvQe3km-#Sf337&Bv=xgZ+8rb-MNYmI$-J7 z#q6FJ)2Ys^F3jegs?8aLd=#ohT{T%vApyJ!KVys)fg;~J^6B!rCrS>{sJ^=`q($Pg zf!oh|`ZUtUquxdSY9CJ@hT^ceJpgChRDCOuEmir#B0qXgf$U*!vH`o6DSea{(%k#b zKZ%li>l%3I)p5;lBa5e+kaIXY_&YI(+N++;45c@{DFZaDI0lx0Q$(%&^MFSb26~s5fqgv!lI|uzj%?(Bu_@N3P_%*EkMUtH@MGx)V>~m~B{rS){;K zs(BqrZf$MmUFyYNi)bOio68(;__?C?4+8t5uq>gZLm1#WBr~qb%(o9CdVJ!;-Oc9PFHZi2+egjNNH4Kz~ zd$dI6F*MkCLV#1$^1sMB^MYL=cktUBHL_4mUKn@#2z1NqBXrv*A7TBXV~jEnK%JiD zV0#Lo$^V;~BM+J-&WmoP#VW<+6Kjj+JBOY3B^j$D)+}_!NyFonr52+^?p_OLi2r1J zTR7~@^t9!>Kn=O3zn4&#x~;hKxCS}*JF%)>0YQ0OPI8r*+SZ<)W-AvB;nrl5tw)_? zy3z`8^571;K%fBlXj6&4NQ-KCA)$vP4}I!cO$L|v(wuBmaA}jA%UtqRvDf5qV*;mX zzkq{pBrt6G@gn^7_HUnt(oa?NF|#JXR|}S-#h&0?Pq3;t%jJ)l;EaX9$D^Xztohjs zqrCGlNmOSFwZTd^(W`jwRn^CtBDC=$!AN6azTE=m0IN|)9u>lwYf7LAv?T@O$upd1 zpN)mDSktzLQ{-D?#DNfPh7DsP+==@HzEvN*`+}p9MVdRaSGVJ(*+G~qy!H{W1z%@L z6;Thc`&D7F_oR1OkG1{A}1*8 z?~eKMng(3aN~_@Fu*J4;nL`by zP$K!DmG=&4ZoBLXr^~0Yw|0#(qFJe^S+mq)PT0Mv-}3BGM~i-KKR zgBEFiXgK(Gqgtag>A12k%V+QIF+y8Imf>&V8U5x1dR5aW%(R}^d2kn$?-P5RzH#V# z!o|_fjpgwC1%!Z|qTNLO0H)y^th?0x?x;wA6pw0FDFVE>E)0lqIvqgKE5@}M2C6|<|=DebV0Ui%S z{{yq5A*C&jIfAPrqfJptn6G%^PJDbL_c|r4MGvc4c!EtGz1=Ld?@l05*y>^&@kYkTUk73q*hg1w1L8e8Z#Ll4!bvPsM9*}Ejb}1ZUM~-;K&4m4JbN#4ta|_j&oA6YbFKD2mxvvv`ggD>8ZFQ_O1|E z)UY?SaDm}vIzkgh6s}l0P)3Mv9&sUNax$^E9da%mT19>uX) z2QqLzHpm>(^CPHvkzzYvI%DPzBX&8wGO9uFcO!+CjWKPM$X3l7dB(<8=BEza7&)Lg zb80O)uTae&o_jhJO`v87>6#WPaQ;t6#%I_C2EnV)NejopU>Ln%PRd|rUQeR zgyOA4u4v4^pJ%zVVR@8F(-|u6!0`eG;d!kOjIL=(;#-RdaeP?>DN+F!r*<|jz`wV> z8&hMuC1CO&X8!G%{j2!YLiwc4AKFe}ftTnh<)xs&=*nd!o8|FGR<8JLLB6kBHeH4&1G4d4Gvgbl>8hIl(5CP zwi7t0`OD*pz&!a(=yF;jvR?H^663J>L}vO+blkSJg)7y8C-ucEFZZ>7i#-Xp+OqC?jsZ%yP$@V2diqG z-=Dj9{Dq*2$4NCAx9sR1go(AetO{3uIZ6!=Pdti$W0rJ@PP?rW9&e$nAuDG)oWb|M zwf)GS(M%niwagc%0}qZpZ?k>LJxi&j?&%i*yp5AEFqG7q{SEUel3pB$Ph!CalVF~@HpYI`L{jSy41O8TtQZJLA*Q_0(eA*eB z)@yn0=e!{V&Wf0q-_>Qm3)ATg^i&sn1DVBec&ZDGtQcb2?U7>v93xsUU%duPtpgNy z2e1HdHgCB%^3)g(_2Ds!m52+tgjeD9da-bB(Y$c%)18v{I z(z;0K^Of^}iArwj=?`FJqc0zEJ5#Afy7HeLaFCPfs^IJn8~4^LNLlx_E~56XrgE&ca(VM8&;KtvlBvA<)Ro5Jbp;l-Ka$!j&oUq(us+<2B zufLxm-J->$e0DyIEAziDFv=MXRQ$&ty=xh8+hXqKrX`KsFB{=f*v ze798w{OND@``-MXkSy#ZWzG{{oTF6JtT_09XCqc#uQ*!L*k!nlBv9- zP^-*o6b1SqV!EZ-Ypka2O@TkZc{x00Y$&wn^nUeU^&`lpM*WQQS)I=iog^`fg0oG2 zF^$W9c@C-t_a}{?x6{pVt(_l(s!za(?HeyHv;&%Q40f+DJYqQ zcmOESOeXlU5AP7yFC?N4=5hkfSEZfhfS37hjhug=1Tc>?)35PEmAIQUImBZ-<<}e_ zeH=cQj!&W@Vxr#T93e8d+SE*LtU1Qqe707PvBb{>{gPOOw?Yg`{&)#nEyC~jzpIl1 zDIK0U8u~oPC!ve;07ifDN?L-q9Z=M5yaRqW&{Yo!cX*N5;^oUMY3GORmC3m*?%YiQ^_YVY^vRF6NjAKfz?Tc=ANfMeD|cg199}^h@0? z+SR>|aH3m)7WF@;`&mlhdkmdBc@QsTpAwcn0<|Sg2|<8yyN^7@I&HA_jq1VW@HSNk zGtJLP))p|oSO+Yv;fcA4wr3h!2k(b;UNAqQjHqVu>ZbS0)&k8U2qHqgW zQ%#CyO2)y=olU+gVy2x;g0$3pf?6am46GQ5Fh8&+-)l4sc%rkbIk2?3HHKVH$8abw z2^QpIjV#+Yqpb^4-lbambU>DOSXcIWNy!-)0 z8~6csfdr`%)IFw5&=e9>s=o|}eV`x8?LpFkS z%_PW0wzUU%@(VOl%Tu zdf8)$#b&Xnv~l%_9JJa@d?A0Pn;|FFI-|m+Di(glKl~45-fofvXAkeU?s=UhVjc(X z;O~nVg#)rhPLRGcj6s$nE7ROjm1{s2WIV+ARX!bY0q9}RVc8Vmj-LNX+z`E&$e`0D zbJtm41>W+pw!S9KeqX^ni4U}3|Ola$?zp{ML%I0~pF z(sKA_Yi!baG0@AefrJT=7F>$>Nh4f0@W#_Z@-ampx<=V}mK-)CW`_KNx7`YgKEt0q zH$`n7jI)#VWAKELYus&hd*=C7OAF=V$aCIwzQ80NYyO5iU!xogIYYnnm2)@ZHm0M? zmO{oEE{h9!N6S!E!jzs*B23BSRAZkj1F4jS!0$=6; zmdyRBU9r|LwZ*X9E`QZ$;TD5QIkuz;;T=x9g>Rt2(&KUx`+O_XQPB&7a4xv_`dpu+Pl5Ee!3SjC}UgEK!|x0ye_@5@v`xH zLe9@2B4KeONmErm6JUF5P!gumC&em4z5XmSjk)CrTTg(7`Z-cS+xmUnGcs8h(Zk?b z>gLduqH%cbug_InNbRJX0@EIfUXK*MdEU=p%{Baaob`0nIj^5@aajB)cJ_h(m@s^f z#K<*z)B{s(3>U?Ys%F7TaEBuHw^-SK3{6xrhU!u}77u8}3PAmCg^Yc282<3k?uQD0 zr>H;)%u)CwRZmi+9Ze>X$D+S-{Q z$*Y^80a?dqlN;D(qHDWVWSF`$Hf~OLDPea{V974*k#Di8iDAC7+;{*mXsHELLf}3# zH3d2g;jMeTCe4KCh!5S?4ng>ivuq>WL;J1ui4eo`NLL*1ydfCVbpBtV9b;D+x3Kn6 z1p0heSp8G<^+6a(`v4upt~L}al~BN95Cgr}nWCCXD#yS&%SqjUXU`Ex0F9~F*>)TE zg3{5NG}91C3O%QrrpWsP`}L_6SW{w3DL%H%9MSl@P+F#fv{XBtQ=s(}8g2#OqvYOdn{oyT2Fgw6c!kC@MK;r?Fx`bF0227kC5Aml2KR&K|c zh4d6UO9A(^^VVM*fLwPtghBBe8Fg!_R3QSl+vvL0W@1=}{dYzDM5$a+0uyi-T`oT6 zUdE#-9aRmh)+;~iUm7^qIUedEjAI49Ss`bg-nn#;TPl~Og6rsiJm$+tzKKuVD#*mW zk``Pn0zXzKh$y}l)3al(JtFz5%lGs{(6rH#abGx4eheCD`37z=O{W86kbEGh&{WPp z#-E*foV>D)r9p~TA&O{u@V53Z;a7vR(fK)J1fax%CaY?8lgzSv>Z>;;g%tudax(PO zN4|M6p|SW}y&R^XS+6RnA*Nt2_84KM6Y;NsD zc{3`7s<5#Lku7`Vvv`Y>TC+;df0G6sGJsz+R_Ga3m z)Y6|Q-3Y0$?E*8;h5sEEIw^fByX+I(>86#8k=VadQB=KWC<0pE6&gMfn(Q?b^{=Aq z!~V{lqVr1kk-?ve{Vq`ol2M|Xn0$gG0x;G?qq{15uYIspHroto?3TQ?RgGjF};>w$2tB5rLLzz>dxp6w2huD|`07!XF4A5u_)e z>Pst8yVlkx0GY$)IG(f@!;)OOkE=bNae;PtYd%rjberkIDH%RplSZ{u! zNb%iH^%F_!>9*{p<)Byz9%4j!X~-RS+#PjYfo$hIbLfDCpgW977ZN1n)|RWw*88~b zPcep2^DwFQsds!K*8FL!vIBSWMEV9&>|HSKapB`2dpeRF`cX8M=$n*P<|``#{Ve@Z zyv3f#fnCNOBcFMW`O@bi${+;}{By4NH*b?7MyHw&!QiI9H=}sK?9bQ^>VFam1Uk#DgSMDA>+kL5`}24CmoYep z;^ucGa2D-EBS&*Xq2sFUR3jkgVVv1OeB|7SbRwUS)=jOA#q$X#)ou2xYFgdm#-wYO zA&8g?iKUn;!|E8-SC31Pm6u{s0o0bvK|hxK;w(c~-Ji@V3e}H;-@r7uS^hdQgfpN- zryvb%xLTn*rL0_`#%~S)q%!TGz>!qv8&B8YRv-(YdgHd``#zFvQV50&S8c!o-K;hA zptQjzmx;1i`ZkQZqC?IY>8}bYoH`ja#wWL-xNS)P9?w|^c)uNPRWQCZ!di>T7PRS< zgUbh(X%J*wj)R|DA83EaY17fV_u~GvC5(9Z0SdT@G2M3|HA_jHkIF?JOLGqoXS z$s}xwPHPt9{d(%aFE#XACY$&=IS(DE2HRoE-L0^$)&)8dRnH6o_Gj?j4$xWo0pA~7 zVaWSxVa_!3h!Goq!-#)u8w~ER!Bb$Yk(~GwH@Gs9^=npj!j$%+DA0*HCpaV52qJY( z1|6x?YnkM)I;#_?u=zb|q&V<(FG3aU?xBfu2B=S6;hxDzjD4z_oD&9Z{)U=}dLn>a z+LlR~P@@>I)x8{;q+++xn^%;A*||Y&PBeENe4x8r3wX6VjU$~roIg$>& z`MO>Ni$QKxkY=~Czsts_!Et>%#hTP@%qi;p zGMMMa^5?=eFwa;rJlK0vr+E%8RB+jV8tBFnfFb#$1m2vWH!FSCz<_B)-v}MmaR#AO zp^RS7?*kuN23}S1Wz$hUwIWg50l*kB3u#_L%AiG`j}u%SYBG&3Z%ND_8jMv&oPn+! zQLUy|)s>kC#nYh@uhm~yPE>-F4*tJGe{^VR+On%(1Z`WgS@2>?q)nurL4ba$fb#=E zKnf9}T$za6()4D5e&y#m((wMy3Q9bx6yjR>xy;|dE@04=ygWFyO#MY@<9* zBTAaa8{&M?fd#6tCBkE=iiykCC>WSNZCqzuDNvs)<{e8n{&wbNSt5FLsxkXpf%8i# zuMfEVQ8|@OAe33(PTKKE53{^{=tZJREzpwhJu`)6Ib?VX?f{6j($|cNB$p z&mTdrs#k!0ca;E(d+T`6KfLw2up%SAAm7z>r~oxP2>b7k=Ub2up0pR$B*$-On$_$% z#-MM>{dN@i#m(e39)0LwvOIdW+0^oS!)CdzIg~gLNecx?>vPpXJrfITFZ=d9at^wtBCH*rr*{wwUD&XYevumeumQ0pnEx& z7bT_8mq=S~9~3ip($eZ{O`v+;*!*-s$k)zAx}AJIq5JN*Gl1JlO|_)2qb4Z@!g%!{ zM^7B?U}4x)5F=Yu0}T~V-_?uBj0L^H#AA1Qy)myH5Pp;BZ#s`%y0F%Apbi|@Hm?b> z=z8CqOC`F_J9uH_BD|ZlqHWDkD4J{6RT!wLaCOOz~}LwG_QjEf#W(&MP9=Je2c8e2M&lKjT~$_pwLF6-v<&j zG4H|~fnnKtJC9mtUmLXLZ_0Q!g!RlW4d8xLl*vm#1U`OTA3~dZ`oRX(bHN=LfBmNZ zj{$(G1iVbN`X3)1%jtoi+VXbl-byDQAwWZy%Pwodj@Y1o%m%?U)tKSz2JSULeAMCH zhZCahqfUJt#*`VS?}Lmu<*a-c?F_U$Pw=X2>{9~y)@ zxGL!sdU-DGgH*-c>#TunK5^EHy`BFut@lF%Ly|H7Q!>#5<$Vpt3!^umCi2{-1Pe24 z;f*(S2R8CbV6475tR4a($yJU6$@nCBC(GQCntC;(5^|f~PVQ zcq?H8Y{^R&<;XTo3qg}K7x4GfSy6wAE&Ib?TKC5~ZFvD-a=|OyI*6Hz4>xE7Wd$SI zIZWOUPz-|-G3-jufl^y3?ITJq7aH)RUB70%E`z zxg0HA*NTe?F2ySZiwmg`otlnt@#1N+BZyf)Ff(}YU2UPsND{;lX+Qv4pq>S%f)61je!q`ks+(n;6iAnZW&D622g(p6OZ<#4-KQu89w-p{LWl{ZN-XwwZqnS=jBcP&ng@`&wF1%oUgccW%ezBMUm?>f zyfu+)!IPmD4V3bVbE}De{9vTsb0i5UTDUKu$AWNfCflMpU2_Q9XOlY}!xXwZ4h|62BbNNn zjAP)&x6K^Ti3-gGrax@OufHSWnO^reO>IW*BBPk~C72pqae*^TjVXymsA5E7VpUu6 z1t)~va4`BH^TIFD;cdx2PM!Z1LXgvKDPaJ%nd5cC4Md|qfZ zb%kspZyegAJ3u*$mQ(V=WlB>ZANFx&1+@c$3`GPYBbPWdr~D|m)F-Ik0*MfL_N%Gi zZ(L<5p|rUl_+^L2qtY8i+*j!?dB_0C{aNs^{6aM)znvAm<`s#-=VfZVVC@2$`B~Pp zsUhd{0SI~ylJOh~XL=VaUC?<~%ZW*y8jgN9!QcMhs0{KL8y=Fn7z)VnHPMji>6p`d zIXXvpV^okZk5D@fcnA=mb32TamNq{RfGehs@9#u{aoSK;I>aVPB83|hq^g-Hy~s7D z>EoyX+?RrGjfV1kgvtoo5VF*51)`8?)2orAA!@0#&|ino-#E21k08xmMk$@w1P zpn<1ovqrQnBLH#Md~WmM5=l5qDV*VsZk39>(3h4BZTzqRVrstr!Jm~WKN9qN%q$nt z7rLEHbX-4Q1rb_tN|=o*;XT#|`*=}?a}+k&hJHmO^Nosnqj9h_FnND6()tNy2w=%t z0e!io_K0?4z1CSgU{%Ex?Qy&h~A~zb@=B(m{a%RIOUKlbri_hBLe+g#Z8m z07m@|?eWhry_gH$20Q})3e?`nen^55;QC|uY_3k?a{vK}!wcB9Ed;fM_qOTZ90C9U I000VETEkr}8UO$Q literal 0 HcmV?d00001 diff --git a/ansible/01_old/roles/dsk_bot.datasaker/files/libpq-devel-13.5-1.el8.x86_64.rpm b/ansible/01_old/roles/dsk_bot.datasaker/files/libpq-devel-13.5-1.el8.x86_64.rpm new file mode 100644 index 0000000000000000000000000000000000000000..7d02cdc6a395d0b20501ca63840b281fb1fbd394 GIT binary patch literal 99352 zcmeFZ2|Siv*C>7)Gmj}`xDlD%^N`3aW9DRt>z>T>oXRXD%24J)D49y3BBGFH6e1#N zP!tU^o_$Bp^u6bM-tV06{D1%RJJ0nz``*{udtH04z4lsb@4eR6$BQ$=Ofc{;czcli z15_x~P^z~I60M3=L8?-{3G}Le`(c2w{1rE?`Yppo89NNNc@n&9g0u;|_k)}dbj-C5 zgI^eSlN#uatHhPd+`$i!YHF@a=b#m1O8*=4s=vBIWj%um#%+PG?e zea(16&w-d+SoLx17h^>SS+;vRohGo-*N-9nm}>b#R6GWDV%g$b9*)ZyEh+p>T%)Td zy){x+C*q0Wmt3|JQrz4jpAOFprn9BS;cK0znI)3uy%^;6CDfHJ?8&yN3_!VGRf>=z z!d$E$Hs`hJeN&R3!j|4rzxVY8vLH;IC1goVHCj^;uRPJe^iGMaD2><|AXt8hXfR`)7)?4Z8n>-$e(aM$3XA5`EI88q|rPrSJ z>C{-1TiV>og%8ILWZV^SS` z8J8#K?D07(w%cd?C8EZ^T0&;AmDRF!KqpSZHSB`H$F;8-9w)Q|me_o7s2Cf3gL;tf z{hZVvgvN&kGOVia-C*t`xpU*+kcj8Rn=(shaqmR(R*SxsZ>~33Qph2AHs2MGs`7in zDRi^pd$<3sjIyt%=I#c@D8WWu)h<$S?(tEN{VWY^k}|K<-LvKELn0A0Jb?@$hzJxK z2@#Q41cF4SAdmz+5>G=CNN56%01=2V5`@H3&{!-AA`>wP2ur1)Xm|t-LBin3Xgn58 zLtsb{k%YvOs3ZsrVKD>(l8VC;kYqfbOe8`;7-0;)h<_!A(aU_Wlf3)=pI|A}?BLoD zXIDooc=ENBpDj$hH*C!@J{>Hkf5qn5HM3sp*yB&O9!X#^=w5ZoDz)8a$FGh*3sN}Ibv~i<_|`q&Ogd@&eR2k7SJ|ggKbT&Df4FmEhcuS+M!qp~#)sY9w^s3WnvT);UK$ygY*}sPzR2un z*e>7g%Z2n)+BUzczJ>LL^%e`g`&cLR*3Sz)@`ok&e~ZL5*4TZBUB&aLo7Al}q_rFk zt0|iMegk)T=#-91kyhuOp*qh?<&RqAM~nG7%&rGi>^VJyREkZ$aB#8aw7*S+d*$lK zmTMo{KTvs$u0)^7-=jI2rM;2XV{(1i4z&{-LhXem`;2#eB!-0P|@DB(6;lMu}_=f}kaNr*f z{KJ9&=Q;4x$GWz*<_|ale4sGcAN;VpVKCVyPy}+iA5``{$muu~kk~+?KfnB}zz+*! z+<@PKoDTDBz;wSW9Twbx>3&z>3(j2 z&-HQx<^?&Z=T6#y>2m^L9)}HhGspoAyviU4<7XBEIryG;&qn=LkOO?)PwVB(!s|I7 zoxh-MX8OEHOQX0pH(%jX)0C6A9jc?bqu?+SYUC{UE2;Ki+`pd;sl< zEP$NezT-wceO~D>T~0tfeON#Ha|Sv1UiAD1d|(|Gn+EwdfJy>67_a18kkh}Xtiw|D zanoVy2CT6G(>7qO4cL1FrjMJBPag;9Uy4o#eY_#-urw(935S9l&?8+1a(etBY`tD) zWTQTO9o` zCwqI)k%ImFyn|pi?o{~C7UBL7*$Z-`!u*!NAS^6Q)qj0Hf&#o% z{Q}+8z^wX)gu?)(E;zcLgXgOIdd^l3gE;~@WIPOm9S#7ZFz{0hV9dZyqJYdXi>>3) ze=h(zp+CVu$3=mz@7@l+TLZc~0Ys{Gz3%6)IQXU8ttC*G2vQfo2YM=+5A-q;q!S={ zfbuesVnNb|!Ni}yU|S*Zo(hABSpi(|{g#U{AGJTmocrs4+CNMGTthD_UMpVvEAD@U z|3K$|+bfWRBR^Ae;R5t4RhWcrl>f>-e%AdFm;U|>|F8Kkyx;S~puPXSghbPNcl}Mw4A0qglh@et1Xc!EMC!o-1 zGzLi}kP&1e4U0!0DFh^hr;#BdnF8WiL^1(^r4cb01QClRB9SB%4WfW1&{!H3L5AU| zBs>y{CR3nGztwzC16o#5W+)|sb~s;K)|BWSR?_7#1UzrOAG=0ppX;_5d`MQ zBoY`t4TeA?QDh2|Mgk+mQE4bD5<^5Ih-fm3M#MvS1OkfyJrZ#Q3=#)HBmxD2M_>sA z5{U|egfs*S22rpm3<-(CAP8W%Bs`8lA)qigB#nYaqDe>uo($m#SOi3*Vj%*SfT3Xs zIM5B5LZV_wI6MIcVF^S64uuBH#}hGt6DS0Mf<{18Fn-W86@?+9APNSek&$#lKyL&* z1@M}TK_bD#Q(-iS1Okg>Bo2^-Mu9mc5^+>2j!cBW6jHEgDiw{!;c+0MiNZh#G>Sx_ zA^=k<2n^sXkPH|Sg(V{}Xgm&qCjcsGBs7LhAR-_F8b<|!Kt^H#L0AyFB#|%_1Q~~; zpdb+Jq|(TEFu61wP(3n&MkP>jBp_ONB9ek6;xJTDM5a;EM9?xsC7|(CA_@o-0!u`Z zAq*LdK@!mzDjBeZL?FXpL>vxHL*WPrJRXh1045NDoL~`nAkBCnPY4tdg9mH}0tZG0 zg0et}h*$_PorFe%Fe?Uzr{QrZFpo$wkas)|0|X92)38(m1y7|xcqHIEiGV-=J)l5H z1PMVPBQZDxnheC0h{7Tv7@mN}fG8~q3t{kB1dWWNp#j|#G!lcM0=c3g5a2r$5(qI0 zfd*2Ar+_FeAQDF)V9*F09tP$RiA9od5Q<2qlF$&DM1~MF8VN&1Qn45e0Vo#%0f81k zgBThSXaEpPA_YtoAOTCJ!LT?4m}UeSjl~1fuaD*KqC@qK+EueyI^jxNboHY zbOv-8B12eEM?wNYhJh(2QK={z3WKK0E)hcoG6k58r(sEe`BW5%05pw6L*S4U3`R07xqeM}#L(vF0GzO?7 zpa~2OO`)N{3{YTr1Rh7h10#e4h7T|i=nR5FLeMo8i^fq%5FV&Mg#Zi`M8*O=1~!jG z111wg##1nOGyz6|fSID}ClCZ81&1UckiZgwAw$5(kx^74gu)Z45D+Ugg>F88?Zgsj zbZZHO6o(+eATR}ZG?9iPqL4VC<}@;z0?Y@X5J>=qbb$gk8%H-A7%G@aED#tZ4HzR} z*pNsJ6$W$%O{7a08B7kA2#gnr2yiGsZh(md+y?Rk0f|HdX$68nC6XZ^=@f7gfecZ| zcr=U(>>d^(f~f>!Cj*@VN`j+dfWQHYuoxN*59WbJBa=Y4L@eMR22F)fKxS|_8Xib8 z41>oLAS4dFLlh*6K&H|VR0Nic2HP~iGYSM03K(Tz%YccaP$&?A24RR0nT#e9XjCi~ z4}-|STmS_mk%1+}fRPY@q7ukt9DxYoh!`4yh{B`MVD-VFvA{gga9}mV6R;SdwM4qK zz_1XK2G%T~=u|R-NVk%}03yf;BniR*RU`q4KmslRQNn=h8br`EjS6&$O2SdF7@*!T zU?YJ+qyaksTE>Af;xSlYQo*7Jwwl0R02>2g5a7xN1YyDT4+I+MF_1&%4X0pfKr(S892w{-1!x6OZx{tw zVJes?ECyIuU^P&96kR9~NF1=7Xc`_Y2Ng|&^De%Lj&~&L{kYc1crwxk9!P~K^a{lPyy6$teN)qwSM=FXu?(C>myh$iHHt|BM9= zuQ+g8`70LcCzdx9Or?OPz-A2Wng5Iw;vWe4s~H&VK@Ncey(1L<)bW386A0|zVxZSC=q_F0N)2`g6w$dtfd@tSko>&iz~LF> z;pa@y`0D4PCz8*gv%fHx4Z|t{TH--X70vQLmA^sFFLqDe<2qxU08u%LxzJ9?# z3P2xp=V}me$NmfND#ec+q=rPO0f#e~54DXRRgef3tg3skk2mN+TgOQ4Kd=#$ud|rm zGu<6dmy@92-^tu)3E+ABc{Iu!9&P$u07tjK2e7AbQUv_>)mfG97Uod-%Ma8FZcb1b3Eu@FZf^?r^gQduSkr5P7r^i$2fle-3f?=(Bmrf zy~)owBnQapF*q)e=ss(DT#FuKqi^W>L88a01VP#i5ZzbpHP0@vnD!PRIXm=lNfbeCr246%eEH4GvOu-vDXU-(Tsa zzkQ^#9tink(I4MksNuo?0)<}uCo~_(Hw5zjQxORJ(E9;#7ue6}lrQ8BBZH!!yT=W( zKm{Es00f47=r4YOKjDpywaZVc{R61s{-h8Z4Fs9!C_kfXK^qjT$C*?-C@_csLjeyy zjf5wYAczW_4OlerGlQH0{_2E8Mgb2Ba2}IL>s^Cg>c)#d`48LxlHmh{>enfn{%2kP z55T`2J=PB)9=>Gn5DHc8&(P0#`%Br+>HU{wzoz^@ul!@1;C%y`{`T9%{>w6m@Ov>1 z^QZUUj%gdEKclH?-hO1ro2mvDCsSP)Gkt4oZ9{$Qe}w^t{WloYziopS_;+o%|7|ZG z!GG6=C-iq1UVeXv;qU#o7{M+&R)2>S81i=*L7{(#5&XAI3-S89HbOoA4&$J^i>~>f zkyQ4g;3{r#6@L$ZDqLkBeeqY>M-2@03)BD`B_AkQC5TE63G@h#0OkJ5Dtaykd#!AY z4EDQN@3pnk)z<)@ub2Nk+S=Q@Xj@zB1B|`q)^HUXHJnWK4+fjXAOL`E5!p)x#O;GY zB;7Ai1?+eIz=lHw+!rAG1yWSp$Yhluu(u<-gIfVA^nFW^#!uy>s{}X+gAElukiLj#{K>)1Xx3|z%0IK-E#;Rf0W^dJNJpr8m}vOA#3FC<7M z#5c^tm!c93k?40Eei2XhhJxIG4FL=bRFG+ZfMH~R4?kG=K^STy#;&qXv&zr7yNa7H z{gMZG=xgc!Ef0O&rqlZ?(bswUJAF<6RrYIr|AqT2|5g9ToL)yyEE|cwC;0XLzm)5n z8o+TF72qI(n_BDqx#oy>;2Yt3d(RwgfHad0ZgNJ1RTtd z7nPz6HqqcJ3Gl;!U$`2OJ78bEDI2r5-V)uzgHrx^F$H*3)J*9YD&S~UaDY$-rxtK# zGSYJdW*6zK0TKSaHCY)BG{M8)AKcuafDJrdHzHuV@F2gCKr&Ssu5GPrWCRZen~r~3 zsJwng11A|U3c5alp@5y@AHMqCdH3_i4q*5H@rS*1x{%`dGE~&7C z#0(N6NGu>Rfy4$914!H;ZL(OeqxZuW5=>Jez-ZvQ0tG~#Ko|vw0f8YJh|GXkIgLmL z!7FfufQ-k|KoFCH10fM`zu{-lo`eRmOb{g}(`ex44ZSUpc7wEOZ4LbKFF32L;1Ak^ zi*Fp`OI*c_FewHBP$<)MRQVO*)$FP>OqlxxGYR!0gfqJ2c7%$DbwGy4TVL5zWZb@Q z2aJ}lKCVSd9>~7tC-x;d=8$88*YZG)fLfWm^~)<>8P`L|FPIFd4r4^4FYX1eUuX92 znw)bk^T+4O$?<(&8lB1>w!wA1-kWyctZ%ZXRBEeAdf&(#?Ns5D%_7}}4lb=`=NvAa zdv#)_JzYQTptMx~zWoh+FcIQ8`!sB7p?sT3rs7deg%G}B@8S>Bp&bo`U^l#!wdX+C?LeU`KIVfJUqf%;0nOz|X%6?|u$+wJyyG2?Qdl-BGo zM`dC1`Uian%f>1fDt@F|5!J*bQukX{YE>)m^CFr^X5Ti2Pdf=2et(6z?ATDjJyvOU z=K5)-&dc2@Q;xfp##%Dc^|!xjPetqHXmCHh_xRLt38U@Ob?`X(`}1E&@4jC?G_qx& z1je1)dn7O+-@N~TD&un5jS$h%7rue`I;I;;{f7+X1Jf$P)v$JuMDtOXyB7R?=$!$Z z+;L4$xi7bylziRmST#M=a?!i2W~YA`2OrC*FRpW-;dxW5f=cdI^PQ%JUCMmd20w5z zMK_E{f!hX26yu2k&M@uigSORsmC>pczi-J_Xg=2F zCq^mSe=a($wKS+%Ls^&{@Z^HG!DyTGF3lZ>m@+?{nq>u zW(sq@df(KINs};7zI_VJiMKk~ZchmMoA13)d=MRQ?!utu-kDv(3_^}oPm|Pcifa!a zI*JqCkrm^arAFlSa3%*e7j;T&_a)3ZZ`K;+5S4mUT^{4sof78qvAvP2FJ3#TQ1vTm zQxy8vR}!}da`KYaU71|PXO&AUulA;6l&4L3_QfWPjK?+@yq_!1pwzgEA1=YFF5Sr? z-;n-X>-bdwF6>cgx(~-k`HG%*9eU|XahWnKbuM{_}p13q+J)~0F^9A=s zx1|}!LM$O_p0}>6q121Z3XaG4f}awXHJ757NCbJB>dinwL@E zz%5#xwh^P&@!wMd3X(`j%Chd ztD72Zy?g(f385jb{bZ|EYsSKan863{m9q1nq;u6i1?pX^%ab~2R+6y$n_YC{BAb)= z__=Ypu^I77BcJw=Hxn=j@tEAY$U9=wh0fCsIcvbIcHzF*`D;FIEIYSVIk2Wj!5w2X z+)b#(GsTg)(w|k~nNxdIjBF~r3gGwT6 zV&(b*wqKaTl#dj$p*$`y%Dh_KQWKPuM#De)G29|3^2&7UV$afPXyKNMIPboMp#t-M zUZa%YiJZBc9Bt*JH{68o>(ekAHj{DUH{$#p+&OTuX%XPBlJ&?)6WA23Ij*~bh^p!t8XI7Gf6G7zvU~ zx+31Dn7>WHc(eOnGS7|p#oN`}n2rmYZ#Ll@i|1}?y>9SgdjDbJ$zI$7H%n>UUe-6S zJCJcw7sJMj<%X@FDm;{C5J8@w_@sG#Vrr9=hXhNpD|YM0OzHOq+7SiM5l9dHf#^1g z;oTNBq^{@Q@N9IZ6W_gi2g)N%?fQm#O{3T%RaafiQm9d4mLYOJr9LT~Eut2S4u?~> zOulKTo7tw_r*Qg5^t@*~LuvNJP2u9GBBy-56P?_tlO9=nLZg^%ZfezZ3=4xv63P*^ zY0bf<={((4Kr^sr2xIY$Ejnnf>UFR=JndP|U};Fm&Swc#EnA$Ww>;1aucDA{TR+&= zR31Wj%BS>Z*OvJg6I1qIpL;S--5zD%syiHRh%{f)k2uM#cac2xeK{p?|8BoNIk`GTf(EDoq2S@Rr@7Q0Ev-FSSbR6KXk5uw4A8 z^iAtAEdzb*`9!0no%}mytB&&DX}e)0{qpX7^QWc2n~%>{U#olaX377}&BE~JDYegm zyj?w)ZeKXVcce9cfN=o38V|qY|AyQvHl2`d>2d93Cg17(6-_Uzj`l``*-mM=n~J_s zmgGR6OdbE#7$NUi+R?lO6USz8Mw=Z_`&iHz-s= zjGIppVy`a_yD|TjFB7Ti@)jqnEWv$$q(o97p!;josmpgd2OCq4EF70&SMosDd8)t4 z!n0vrY2UAX+xr2@Yn?Le8IP{{dIOD&vh|7cuNV07wbAW+4U3!qBKqd$e#vo_-si`+ z$Kx+T6P>q{k{O@57Z=@}z%KT4;i&!G(vxHjB?qsn0>%e@_qN7Ur$z10e%Qy6cq`T*XIeGF{GgEuJvtQb8N1{%w|x z12YbrrNb?2RP{9Eul2m8%_++lO>K(Y^wvt;BChDBF*Fgl5?2||tj4RvoQYw*t#D}f zqqm1ykpaC^A{?zlpAdGMhd!)Xu>(D3f2@6jc_FX?j}yzx(oaO~ z{i>!m_?C6gLPUXmvJv#rv~noQ?qIG@hnr;T5nVN{sn%`vE-k?)ZyahUVB4YT@TzA* z>S>h5XNrBujkHt8zI@eC-}ZLX(K9JG%gaJecBCs61m1Ye+Nmft7a;k1(-%eR-f{eG zee$V`T4qb%o-Ro~!oE(Oh>WDw*H}hZ-iX=nnz}#vPL$cfvIX@A`2xi`GI870nfQv< z&OCT(m{rQZO}Eq{@64yL-Hi%S{$?YyqRZ&qmI1Ub0}8k=*Nee&!o2VnqC|GV&U$n{NYsihk{@(Ic$mSi%OZ8c4lu4yf*pu z=aP8#rK&Iv7A@DTeTkxH#4q|J-u3Id>*{*{onYjMi@Anux8Ad7J+tc0zKVOXDfP#A zwzIh#4;0Or2>24)`@bN+-c{w1tKsZo_x!L%{Zdq*5L;n@Xy7ZA_D3!4?laTl8J?pG z+_$@>co5(JSxHic1=DBY;q@|Z^lp3dssIsTA}13aJiYaFu8;gFy-dsV-lg`WNOm(D zg*~ISqGgp<6<4h;h)YKW*QNyR`;xWfV$64~Ec3vdBd>(Iu8hAFl)m*N=#uh`SGQE> z6-`;AXA-Fk-(23ZuI)N-_JI4a$j3&ulAX&6$F)sUyI;2&hQ9vRF=I~B-jvGDcO!De zBKRfS#Mw_v`%epGrdB)j$@JOy3>HBG+5QTV35j{8j!{SCt74w83lB}CU768Ni&4zt zeD)zL|Ks!+>{QBCb-Zu#{JqAr&+gXLG)=aZoz1Q>50c^1*q>9S8c%uewx!+uN85AJ z_h#J&Z-kmYiCLbv@^~mv<|p%T@7D{9m@Rs5i@I)Il_cR!Su;%b<#){QTWRj(amuy1 z_4fY5+g`~@{<11Djych{pEPfs4DPYE60;c9xUDD`b4z3AV59$s3Es-jg|#=*95|0! zTCW`Q-BCcaxxCydFR77(6cC*Ckhv4c^mL5K5#&UsDaxTDMYES%L<8286th zcb)aS92|1+{qp|V7iE^sOEM#y!cG-&8uM=%d0X$r$b~hO(IdU@Cof?4M;sVyWh&*% zx}iI4<$ek77?r5er25I9rMgm?xEsE*GdD&pz31o$BaR(fJ1W_?wIBuN+Tr>pk`pz{ z1B}L#A?yKGUg1etbG&Z!xiBkH*6iv&%yJzslFDTbdFt1vmWeX9nbhG8?aWa0u!UO0rNwd#@jOlw8{u5~}UB^WaFFe)z%LL%C`9FMTYZ zj5Ew``hnXCiZDY>(r@M#={LoB$i~Jbm~m`opt~Z$n?!gaE!`JgqQdUKLr&VfwM&Zmpfn{y z4{?7NTG6iet|qBa?7`ycqpkdHZ zNi=&ne{8kdy5(W4l&P(1EBlZq!_N4nhbYg-+q=$n6!ex*9*i&av=|DN%kWQR33Z=| z=*1)(sTB^IUwx>c{_&1RevFOB8b-?Hy8a0t;fB@sDJ{d4;e1{B>QR0N`Nre=C=;SFXP+!$Y{T?bwlS1sh8P!)mAb z*)IJC$^N$8YW|Ck3%pK;nRZOi`g=9hrEN<69JDIzFV^?q#LNL%rC}|mrGh&&1-{81 z71v`A3x;lHjMhePlF2^i5Xr|B<$UF^@O}sOZ;jYJuJbae6V7j)67n-6W>0&mP(A&J zwu@Udw8>qL(@K~Bn$viei4DG@6>=^%8=*D(W4?Mh__p?FA#=yL%VrOnb5`xwc#gwL z6%o^|VR+ln*W=Lh47`%jp<#FLoakG-DERINq8?_h@yz3L58s>jO3Y_0YwbD{xicnn zIkMHQ^tw*K+9zqBc|{!gSnj)g)217eZj-N%+&`)PIb$T#J+QEheQ!`Ur?7}e(q;w| zHU;y~xk~1l;|Hy^mb+O(FV4J-v5Jdt?fd4xlM?sJYG|ngz4UHOX8)m!H|GxV$b0a2 z&Zgcmb`cS~(%NfQRDAOrQ@CZ;^!xEvYvZYxW+_^ZCEu$rW^`NX`pFevdgL9!_4ZO* zn93T7<_1=L{ zICLA`hjMI9wVP>BRv#hiVwF6%sBN37i0G%85^u9|%V9H>jafD&xqp1ne8oP7ar59% zXy~hu4y!ZUJssb7nG6UI!sUh=ZKuN07Bir+^e2AH`Qv9t8@i57Kd8m;vDx-=;;7YW zUd-n>@*1uz;pD5wH?A}^?Pt!ij2X4ubd+6zHj^l0m3(tz>|5IR;~)1pv)$x+{M|E) zlhR~FEoTfGh?%_J@}=Lk&#qlqLEwb!o0FPk@24_(ul=gFHXAcVxb0)Dn6N17Xnqif zYpL6^b#VIlspL(|XSzi5^i`XJQaQ^WaRm;1Mb$B??9}>py7Quv{~okSgQv7kQQW(j z+Ertn50`Dz;mm!TEN3@AC*3{&=4;8DG9{>K#MHDp`9hwQtmec^oBf|>cZ;y}zT^ui z*Z&eBzij>_p|0ZLF6U>rKF8jvw+eNZzE-JlK{eDc?(7lSO=qYHdt(Ta=jy2T0Xo9z z<aSCs4<#YhWv9)7QeufMC^Yk1^ih!V{EY{_?n(JAKol}?o1rE48d(&pV&ofa>` zCN~N3dVCor6-`^7=jT=CVcsP3$m`|N+Pk_(SYB5r3J$I%ct37@#`*GnHO(ieZv4R3 z#+zPO@I(Go<*j?OxK-@zXAE+uZ^nDbAE*ysh*1G<~${5qa%LA%ukM0Yq4bk(oBq1DMvF6Uu|p2PT=mDkQ!?2URqZIwA~ z{kAdteN-m3Y1TY>r^4FS@v9H(`^-+}nz}Md$658h@0Wb%$7arAS(ZOMMv@c0@ZiOE z7V9rrH!n`KzU%yMp78j7MP&%<1D{js=)rSO=lXke^R7F8PPnJ}X*fW{g*q^9dh~Rp zz6!Ma;k!8b;F1uglH3KQ5vRCqQhP<6W^e2*+f>)`rXiOYRcJ4GBUJbVGdw=>r1P2c z;f)3Tk66ygb3FWTGX3RT(wUayI<;Mga_VNH!@u)A)2)_BPds#Wa&rF7V=BG%Nu5NbX@z@a1=VCn~MuW%;kX>$J$W_25qNd89YH3tSn<7BB6_b=xM#hTVAS`C-g->L%ge^@kPBWV zXYLcv8HR1^m(Oz@VT!QPGTU8zfYxraOmWXni`Izya`};882aiC4c982fdQkgt+!~$ z4J-mxr0R7BW((ugm1G&u#o@#!8dP>9!zdLOS*9ZxUQGzZS86zAC$o6+^b8&Du5y21 z5yeT>F8|CvYl{0Zas5ZHo|7-KETa2ePWJ<~cL@>^gI!EcC!Sr6 zmB(=L=rhfqP_>q+7dXT2kh#ac^nBr8Wccs}F_6UKgx%q0_cp`gV@jX$sbzM_Gdo3N z!%MUtc`1K%m$&v7d%!8NdaGX$A0wUoanGBA{o3f!p7Jq|fE3g33$a5!EoI*zj>zru z+Yc3JT}%~YxT1BwnkhU%ILV;mg+6*jJviO2Z$)NIP_(_HrGRjq<8Y%8`?sbX6wc|f zF_ku+>h7sNv0B!0R=s}En%Q(U{pII4xs+#o$hBg^W5dNMl7mH@a!!E?PHGt1lrojZgNiG$Chm z!biW@myPI(n&%x1I+Xv095-)xa_p8hImp&2YWw9GX{Nvtx$S+9hex-}es99tGF6vU z=6Mdqn^))fS&r?jV^En>{yu)eFlyl(hM}%tLh1ot>fJ1J6<1;ABMFjshi_pALUd=a*J)Y8h*IV7`8p#r_9iOT0Yu0 znbC7-;-w?F)Y~uslbOKWIrcPFcJKV;xt3~tjg!cvh4<~U?ZHbAbH&{^2{P7kqmG!% z$;%`1(p|f=v%iLq^98mx@zZKkjJZ(b!B;5=Z@H_S;wJ{z>U~1GOJf#wc9dqi-+j&R zV4asKC46Y)wajxn=^l3bv;DoM6^A4D^;m_3+~mp^Y7Gsz)cim&9WVAACf6a+bgF`R z;ND{c)9Iw~J*B=8w?ci5 zIm+cRqpe5&*{%8&EskHj_j>hx)wGO~o%gmmGar;!W}3j0Z`+kSneq_XT8&QMZuozRiw$pBRjUH5S>p zR@xGGBQ5%(g`fA7)+ikxT|J&>>&G2n)*chEq*f36xGN7booGC&a!JzEM)%FV#(+nH zgziTpX0b7{;X1Qig$8e&C3iAx4TzdA2-;2NS5?H8KN%lYx100n{$j?8xjOQ_zm3x0 zR9x3vUCaY5r9{d4D%pz5+{Xu5q!pF;+y98=Rz(&J)|4vTn0S`vDD0FY=v{l~nn^)- z%e3P7F3&-037IIk_wENRWJIgmonz3=XWMFp5VQR@oCx8L?$*|T;m`sHQUo$@S1#eKw z{Z(o1dhWoeKtr^#@}ZYH&+>=tPAhtgx#X(rs|dW<-TGYMQMv_lE+P54rfy|E&viE+ zFX5byV*hKLQ>*7HwpLVF8ESY3gxmH#E!*_q40}7~!MTQdftevAambfVD)F=T3rXc~ zo2X2Co43wIGd*LVJuX#xMR;=IaK}PRaRdNIx2VR(_7!uq6BiZ6)&I`Q2j?#$wuYFN<${zSG4Y$RNLr zIbkzqsB>o*!Nx zo)FS&R$Qno#?{qUX1n>&N}l>kI8`&rm$5t2cefny?`DoLbEKWiFi9b^PnFHz@tg7D zi4!g}oF6Vo7R$SJu3YHt`1HYnR|-e#yNEtJ60N9N35MjE2Sp{DZhH$QnkR5c_&MmS zRivN>1j|;(?m%9Q%{Oa##btTYKNI4XDq`nt%$pBLWS=a&f91IFOqTX>>{X@6;i1>; z+bFsEpSp51wN-Q_l`}nA2Y49Ozdv>G*L5;^0 zR9A%^U9-US)ItOL;}7G*@<^%$YPIL1eCJlmu917j7=v+xtxnNfRzywS3&Whr+b&1j z^zaQI;4~8J=}a2xvX4VY2t=!>{}2>o?fCS0hUnrOWne$_QH)dQ71NOWTo@SyNiK0@2F~P+Q<%s%k!GDUe8?8z+|4DH5@!}D`J^3 zzsJSQ=GND>fD>hDj$Pl0KKhTY<4bQ`wLcCg&S-{&q(*F=KK<#z;N*Ackg%M(qLc1c z9+C)m^=h>7SiI6|X4IyA{_nMNzbafYYHNmawp%AL>D~+Phh|R%%|4mkeQs+t zKM*Of=;v?Eq*~57n@ekazK066do8;2+Krigv+lkpS*ul4fw-!2@oDWv)8jLgTK98l z-4*@1?^j2xx3ZA#HzxWsr=L@pC_fl*Z#(lx)uyg@OAn)EubZ@(bD&f5?4A7LX9Nyf zoblPTts(MjJ{NY*Tz~dj#bXB7avr@#zPM1gJkI{S?-^eakUv z7C!K6t~s^^9cL1k7L@zLQj;dK$X%g~LZ-J5K7%L^@20os9e%XmF_cB<>%Ot8vyw3b zF$0_$?@i9zlFuk3hu!gbh`QU`go9`hC9HMvJZTejBq*)+1Vh5gzsL|+Vy}fwtZJ7|` z9pCEy08Ph+)+AKJbSw#h?lL9s(ohPUiwR9ZJa$ojActg=nQmL%&tELXaD6L0RGP}` zBl_w{{@NS!G3R5b@5P=Ifw&Wg)%N6h6J7MW;rQxJpLS`;$N}s3 zN^eKcl|5Z{EVF<-Zmej<4z4x}u;PaX)4oFsUy(;5XKM1Qb25)X=ioEEcq0W4RsQB? zvsx(d-IL6$s!M0)-&d}rpD}lvZjEPo;&v=TFVdT=^&_97yP~7AoPA`M5?K-r{>;97 z-rO-`q!j;p^m=Nx=ptl(YN-BExQbT8EAPmvlPB)8dM=rcpX&akzjbRvXx-h|z!6Qs zxc#gZs?~A_kN7{6Sg1{5_{7dKdgX$pVvbEqs;g8wme*=PY58p1&N<)necDYvXLOqE z1h1#&4XZD5sE4dceu^1Ze)s|} zKGB$>&iW&^^13joxITvr3(B z-9quNyxCW6ez3F$dw4JG&g@c%X#gf||BrJD4QEeo4%<^|{CwQihigQ~;DgdN7X9rc z^Si8JnZ0hxJqLKZ@7K*KmJM8fYnzVKkhRU8 zKn1ihFk}QtNVGR$C3uDp#25MO6q4{@Uv89yD}PO(DP)EyY;ok{fi2VA`Tsl2S278XwUK0 z+U4)NZ+-5<69>bCT#Z5J`b@3$MkTm4-O3!jdqY6w>bJ%) znRu;^6IafKojOXZi`D)jythB-(A}IU3zst1`m( zl)Z;Zp@0CVD1jy*em`QD2ytkdW8vZeM z>c%+x=}K=7lYYLP1>agk$*~RDp_OQ++%vle74BV2)h~_?Hz}WZecy9dqhNc7_=Q6i znw1vG`J^D?QR}*@$otJwfr8DVE%RrV_npmtO8&GOZD7;Dc4Th&@TIIzzQwiYnrO$Y z-C8t^uA|$6+UBiF3ej` z-QCBlT>kPXLqpihWW&dA!qQ-`U%#0D66W`)d-r}uQuN#Kz6V#{l$5_+eG#En^tQvk z=i!5bSAH^AdGPH^TN1LFmkVDh^g*K%l&*%jgyD1ugxIqp4|dkchPd!H2=-T zyPJ>8e;hoFRNH%)wMS;D?yJ2^RsOB>ZyH1dTTZt`$LYNvF5x>7_9z&-^vb44BUwnD ze~p!K@zE2Qnp?LK>-SkD1YErdy0l%Kw?>}VrzN6MVyZu%qa^yGr`~h5xgRAvMM}aC zKs%3iw@rO?{2~UMKBhjgeEhsYjPz!2GWDdpLA^EdYZtm z=s}1U5ir06oEX4XnSC!+c&t~uGq%gy=B8bmJxf^NoyVIVik33+ML$-z6GiMsA)3vjR+e6-)(A-g#XmuKkgRxZ-*xu0Dd-@qOMt{T`MpZT?)6S8g~D zKY_(P+ZpRsWl~r8`FZQpnSiNKbJg?ZGRNIs3R*ApGzkxF1&Mg zx>s{UglFm$A!pNs4nvs!4IaxC%TTUu8nuEqqV)43gJxcor&`@uWt|wyPiVj16H;9h zq)_F;Wb1W}eTZ?70^8JlXYqW#;v50Sc~%W_oLR1C+I4B+>=gF1dmS=+!6IgGPhf(b z1k+m#ODFlTgy>wL(w1(6^U41mJV&jI)QXKVwZ_(-`Bqndr~yX>r^sq-9T# z-SO@N(=jK?IkyxRp?BdV6}G>vX1z2K@^WQdJfGuQ$|CHBpv}TqZ-tIkZu;ofnQuM< ze4Xi?)0H{fCdxL)sb9XM_Gmu4(ZuGyjiV#qnam?IJ?JxCC$ES%-oE#PcJgbsYEO{R z{NzCigYCzHU)Mi9T)Mv?7Q3C3ZEfhSS$Bs?v@7jmMC-%s<^}I3>}Lluc)ooQnSOfl z$W)p@(3@GgiVu(P@duUX9yk)XaF=jKd}2iFwt(8@H@4FYT8ZIXZx!QjX?kLsX=L{x za^nC4$#J5qivO)_?DazaWbuOJ@Stq2#&3>wbvs-5PTG$g68iBd`_-tSm)tRveZ4-f z7Tb5F2aVAP7Za>=J&pS7g;mDnTa>08iPs8uzuFEH|k|?*9QTK+?a@G0n!A4m+0fejk$v)#VdrqAN0ap}UQG0_8Sl z)IP2YfIT`&&gfpLm?^wr)(I$!Y82BX)%_?ql%LihIfM9OkB2fuX7sHwz7U6vbzJ)b z?b7~{&(Ne3xFu=e9rbB2hBv+cYwmnc;4oq_;Vq9XTHvNX6@acDg8qkx@aPQD?%JNI z>rck?zV!IYX9z$bCo>zSd%SgpIq~4q3Ypp7cuDlQ8oH}u3>$%$!67JbR3rL9y;F=a zoHCu|0wev8D01ZI$|z6_+WC(7w+Wz%rlAJ#U&bxmVu-isGjOxa;PBC;fC$Xy+)N|! ztNBV6ujjSEQ8@A6H^=b)BBI@Z2%`XIm}WO(mqPMY71xHW&>c0>>P}4iYE1?*&0TfR z8uRTY9#Kpl5K@gMF9ZanvbZ9M1Y(a(Z!51mPm>ih&g>*PrO z%XPP+T-DpxV;Es-QNI(gK^#m-=a)aFw>tKsj+>Lf zdxWK&0$kdt0!UwPB=UE*dtG}k%=Y~HrmIFeZTq&kxIXp-PBKps(dX+seA_Y*^g$bb z#>dD_00o3acz`HlgNjt#2Gz@yIuE8zqE?VV+f~@bY;6EJe5JfmTa4{DwZ4fV4t(Hr>v5aT3ble8yz3wA*HZ}rW1W8f8jC5et_zc^dGP5upsf&L zO6pTaSO2_%_DX^VM8erx5NgB>Za3((Wpa9;P9C0w;U&Xirl5A&JY_xTUUzcLR`x?= zi)jZ@U!^=8VkwNTtq7)73LSrwB}_6!I^34mh6+#{-d^kIT=b_kGO}^7xi9i(s(#lK z##orMChX4$C?@8u;nOFu=z^WyQeHNhiLC22Qn7C9WA@%pVw-o&8}g9HN`S`!7;LpJ zPT^!>^t{UKRYT(1zx)M!XX44;bv-a2x#)~_lGT!iNPbZ%BeaSKbRHAa8hVSAyot~j zb(P;*{)9JemLzV%fly$A((paS4rzpseKw#G3(Q765(2Twec**b$)t@ZRY4Q)1eJ97 zlDYza#F=+N@u~Jduoh7iB-^rGN{Hh}Eyy?>J7NOMLg9ElBNjALnb1N9##X7 zJ!Q!y0GULan0o)%4de5?V2`)^T2FP`6r$HN$W4|)`}H3P4aU!>jBCb`CJ{uo{u%&s z2E5R#1ZkMemr;^D6KS1-Vtz?oNVNa3N7NOug=KSP4h|7QqquJ3?P=A8)Xod=yy88! z2qePrh3pFB&6JXw=0SkHG^3=v5YU3Zj$~{(Nc0us)bl7AV6H)ipVcx3@WaEwBm-Ls zwHfVSuHHtf>qiR5vJh6jNqGB3q(t;N&*Pw79~&y9%EA@jLXdSir+Ec8)aKLrO8is8 zy78F|X4CzK*ccxoQ$kc_%ea@4E=30$CdU^vFqPz*9IapJ){HhB|L7%$J*KyVjal;0 z-Qh^ix6uUWru3DVMgyTzZMv_4=SNhld)N6`Qvsm-fIsM%dDRlD2e82}7u$VdEEg-f ziP&@T)pGm_ai6BR26zaA&qUgcQlLFNd2>r~H^AY+R()j5BouFKQCYj9jI|h6-Zlf6 z`)sEg(Tnm>nEe-yD75BsI=uWjstq)gzzF`dTANzCk**Aj8!(Jd1EQ+QO6H~gP z8zqNa4gJ?=hJ6tz5HhQjQ~7NBgB~HBX1ImG5AV<8p^KAoT*1Nn)H|@?k0Bo)a3xpWsbzi9vDpg24i4Q{M;Z9>>rsv;;QCamrX5Q`Ytu5Cunl5pGP=V9=IfAGvhOZ`I177%mEN7`j!4VhAKVo0 zHAeI;mQA$L{oK45`wAX^_s-Cr|L8?JTTACIh205N<6WjT zO=}S78lLT|e4rlbv$rBTVAwFe@B;CkJ_Fx)0^dGkc#3u9q_Xz~8R=K@d`yl@EZrCv z7Tu&fmKi;W6DYwl|19X%>0e3DsviO?k47Xf%oI|>bB&=LWQv^skGT3JqjhkH;G@0Q zb4Y^aiXUoRwM|@7jE-3_5cxTu#* zk3fyNV?6R=beMux`7JN}!2U`~TcjwxiiD=ouk|B_8HmRG_P@CZJH@s~GHz#4pbU3Y z&E=Xg7-(JXtQR5-Tb8j`X{}65NQved5r||XNj;!fO{#FL|5J*lo!mF%*UV}dl*-EyBXHDgK{9=n_0hUMn$pw5Xy|!0*WUT+`UE;?d z*$3myZm6#-viCd5J5YeNk0QGvamD^aQPXhji)RZalfmJ0EK zbigSRIS?4<=WxxeUMSsKz=W+$X_Z_7D`&2q1X03`kUqwW26T3UaW6B1lr{F2z&ykd zvuN=v+TdD)R#AHb5oO_+n#{58xyJg!v&(1`T_;^)j}NP z@cdWAlr}_tEq*N@SAzLbT)8|NG|ZNJ!ElK5I>(9d$9(O~=cfksYZA=SHH#uu`S#WE zy{3V+DpWufw{urh+P^)$Z~9dY=k&%L-lhV;eI8Amq^du#m?U8&G;YfKW>LzHQ%G9_rWrWRHvN!EW2s++&5p&9W3wp`aV$x%gqepz?~{FS5iE@93X+K3u=Pql-uO%{B`Z?-@_CJ@UeY8Tj^L+D<7~z;$T`3C{1SuKW6Ox9wn5vNSLqQ3yHh5V*iW(D zd$xLT@gD*mxrN8c&Ozbu4kQq9SV=|UE2mn>$eYOXX^I>K0xxewq9 zZrW>C8+gz#7GaNR#j7>oz+OsD%oM?zb0e~}=gxJ5k`OEM^jR{)Mr(CVm}H2N!%0hG zC&qicQzAD92x#Q7YdEt=TR1A#G+^SWUU9yhtDKF7q^L=urU=5Pq&8WZtu6i+dV+HJ zguVyn<9mURL3*iY4#MT&r(Hvo)%0?p)S7^IJT;@@c1Bi0WXKws6gF-iRhsjG~vmP{zRfB5@HS(dOB0ZAMpY6wRJ((Xej8BwA=9M zFIBsO{ZpM~F1HZj{eefrJoi(oGPZ$#RmqNP6~F#E{;3wA<{xEhA8IGf^z)@MX-(JD z{3k zicX>OI;g7nBoX(M%*8M{Te8gR{0p?LguG8>?yQyCyIW)#D_iQoVaJz~f75xp@PXom zxH5ON_6UaU_V72`M_`fqS(^baxexCj zTBT~lPB(Jcx}+^jEsH40i+6P&*{KK+DuDs_MJxRLpwD%>n_nB2MoK>tjzY)uL1?!c zXe}kKn~x|EcYK=9L^?G=(7sAUFDcEU7nShBJJRj{9jTj!wE}w_aTKAphh87RAyqej z=5O0TGJhOqEalw!hOE?o!s^(7W2t8wzD}9$5{Ye%S|=#o4$xj{BW`y%t-?;qHzb(zC_^0hub^uW$E`xg#fl^@=d zGWBPsQlPrcsEkE>@nT{d;zi54-lpQJXVObOex?EMwp?odkk@Lh5bR_MPveT}UdL0t zmLJ3+`jbX9Xu#{@Y&4$!2%u3FD+GdGtes9Ng*(QA?L-=3rdjc?L|-2pqYnDVC~@1r zI)Z2g%nv=tY|I*Rk{6e)aE-J{xs_)-sNJaL^?oD3+;BZ)R3GoVcm!cdgyfn2@SAtE z#Rf=Xg5OnuLrM{xTaweyT4Adw`0PG?;~IQQ9PY^gIHZEBRJfxU`{DS5Ht0mBF1HkW9b`s6f&R z&34?b{4rWh6GJ&8b)!iZizjH@1iz1U?Ai8AEmx8JTE<<5gyguPvdDs7s05g!(gSBt zeGk^8RD}O9g}i-09uv*)p)Q0dN|?=Dk?dl7QT-qxlr6|4FS1>wofo}?oq22cXuuXj z3ctbFN`CFRP0}YVX2#nNxc#a`-lYJp*>a)SO|PS6AzD#Yre7E;kBQ5_gotsnPrHx7 ztO{AQqr>5b1s&1=1|hT%;a%o5mrOZ1W6y}n^<_L?lvA5sLAQT#phU3~+GtQa5pq4gwuT%eO_qe%FIfF* zU2SrGpOC^#=o3fkTLv7b@f^~HyqGi4b zM_Tha7Z7sqb8u>&#~1R#K9DA5S)H0?l4UKGq^0Ga|VXO0j8^zi-I3 zKiwQ&%p#uiIsLyV>Az;M`TjNetY;>Pm=sN_Skez#bUm!Bx+Y@3h)=T&7Sy?Km>IQ= zsOb+^_!$NHL&h=Vl-G*Mo+6OgvJ$885*f%lfMVl0xiVq5+9i!sF0X*fznMt>=<*Z~ zQt;;`sEqlzISCO_!-{n@R4FfoeX{V&88 zt7WtUJh)F7;HXZzV?kCqj)>FS26Y%U-3-BoCjWj$3npSF9uQB#m+N>XESr{Vw4RYa zdy?HT`zu~(t$*cNUauQyB}xJw!1-*E`^k|35+jQ3Q0TtuE`1(lZIT`#y3Q;ndLL3H zsqO1?XB0tgjbW%;DvOo-!@$!ZL33ub6BUlBoW%}^k|pfh45#q3w|3G^SvsNC#do;u z&_@h?Xv*VoqdOa-m#k1~X`Ar=YyDY>OzN(H6%M%kVZR9`5EJ-fqjNxRohc4bGdrLk zvRd*W-TgWw5)`#PAG}Z9L=Nbr&R-m(V*}F=XxsJgB6HN)`1D6ZXh3wraVw1_?2R5J zyHoVNHxY5sK{9{yG9~&(XQjo8-Y}@hoh4m^`%mV}vnH=jH2wRTmP*bZ#Q8*sxgw3% ze?BA949v1Wp=@mh<&B&NsC-8IH7cuQFU@4B#;|c6X}sJ#8*iKF7Y;FU0}R{`Y%iE? zW|^3}Z_0QNG$k#sl9*KWNG0Ic7y$KRU7cSE-JC*a%{k-6F4PzQiG^;YROcl<82J1q}i}Ej6b;EWO&iA8w{-iJriuiH>i1uDv=18 zR`vtHcKQx4yofm@s}96Y|7pB{{?KRkc?v=oo6~15CH#q+U_i&h=NoYWr!g-ti0361`H+>U!L=U zd`^-W37lwJifro2Li)uRC%%^bInm~6E$ z(^0q5weH6w&>YZzdDysc-%M>T&l+DlUiK3O_R?TGVhzOUYWk8jn)YW^`rd1_Sk zJhy9-#h3DDRjC(k?i{1jG?f+JJH|JEN(&`9`fKf5OSb5vpJ?QzCCHS%iZ~^0cJtoN z5b3rN`=(lwn(nET7~ZajyiAux)bGlg1o=xiK0u_)_`{_r9#1dA;BfUHf9&Q3?r#%O z?|YI`wz7=x?{mgPOn$s^F6Ew}05Hh&CV0_=;8)9lv|XcwdxFVjZU*?u-Bc zo=$H>Y+j)!^4V5v3Sox^S@kc}X&PX}|0vWX)76s4UV)nT|u*ZW3@d zC{>}**+gh8ElnfFM2?-Q4s9eH(eV`PxUye{!gvQn-*Ax{u(I){kA$+WsdTHaT_uol z&~3=|cT*s+j%Ii!{nJ?9r|`Cil7` z!f*wrnK3}p22chP&)u;dA8hyQ@#%!)mO6=%W;}0H=nv)x!ML%R9o$jTNDbo}1R)Ew z7-zmFbY%SX35*COCuYOS)NuHS)VfYLA51Hh_^kqqk_}vJHgOvuyL<=nOe+qSet!nU zd-CTf1GjhH7v~_B$KGX|psjZiVXzQ(uu-gN?i*Wg)y*S=#YX?)i z4gB@Mn}sa5eSSk07hbv^`A+&EvQebUkpgq1)D$q82!Mt=EO;7`NllxS7cwe!K#)I* z2tW!wN6IMb^DD$Sum06?kPR8gSG{-UWi45N6Ab5YJiHng(JG8SBK=Tq*@OWrs;Oo; z-e$?#ZSs~!pZZ^tF|gD=)EE#tKZEwl=o&k`s}$8E0zNA={#x<3hEPN^SYRj4T2j@o z?OAdJuW2{(vnb{o5l(%}ZX8<{qu5JmM2^RZ5^^-iPk6qCHQ?tFA}f}?4j%^ts2^=&Z&oDBgW&lEbTwYgsN*bcmNKYIY7cKql|TQ_iU}#*Z5|3XKY|>Sa8xy{j6Xo#y1?AG-WSz#iU=8 z*ELzN&5xjzBb=i`H3kHLxl7Cb|0;?DqAXrTcRia$YNH(lZnRJ80~ud;jQT-6G$|^S zz;QnN*Va%(r5utJmkyXvM0HB6?QS1BJn#nX$V5|XZ zoaAdov;N`NI$-68-t@PyDGP$w zE+Yt%AEC98)-*0M*uthr^R~seewEQ=zck1~g0VQvRoa>#2leQVU*LLhS@hea~ z2DGWiK%J@KMnGj2Vij#AFCij9z>g#JZ2sPR_=BGs=z2gzNiNE{cpJtv- z)X|?Sk*<$-7V$2ghzqvEx=II$J!B4_^Xu@;w}`OrWm$< zOT=n42!%L*Xd%B-gzS$Ca31}(%33Xa0Z0j;)NYd~T%znpdrvgMHn!(EG1)w8{&*=e z%NHw2nH^6r8w`?`c?gb7PO1D4FC0PnNzl9sXiAk;jT=YNJZGZ+kt*PG6_(Kz z!MTXDj+?SL41`nlp7K985D@9LUn=6iJF^w1jyUG0%uz98ka)|f6@ylb7yoNgsei6G z!Tn#fL_VzUaa1!qnW_KWkKy`Q22SqUDEusrF^u9)v=4v3ZhIZ1JW}N|bc1?nW4@#{!mj{G#_wYzT%?PrJ@4$VhYXLO861Hq@q13-U5zNbPT-C9RmDBpj zHrzZ37JI$Erl^SEwI5>dZ3rNESg#&l%Ep9zOUd?7sDIa6f^jz7cbrCG zVf1yQf>Ubv1L)tQ7u|K|d(sil3Ls9ONIPW@t`fNXkv~>r>6xH_Eu8r(R=&1pVe(3U zEvgB>hr07nk=b#-lOE*pCX^^)e<(lEGT1Y}3%|vO>Gl&RhNq?U*@bN;Tah2R2fmWP z4^=~r^Q*NsJ-Tkk51jsnx_9rOZe>Q{(Ny!(cN1hOrJNnQrKW%UTOu1*L-}hUo21K1 z%F`oUzZh0^e5fDA!98bLBEOKlas4k*J#x(3WeN8gX%DR*(y;-J@3UN#q&40Kjc?>8 zN|_|~sa-7!8v*ciei60Zc2s{%uFg~)c9{O^TK*LFs=IY8B+hb($BZbX=cY`L z_fg5L40UxKzmbD5g7#m^pD9iAr2WNb>3^tR*{?W7;?QE24lFpR-VL9P*`wc*5SJl9g4pQ!J*mw%PXKI=O#1FY0_8Y?5$n;C znuMNpi)W#bBAw^%#t;9X*wRWvMq*1Nm45gF4hlFZ;xBtIQ?v-WNoKxNU8-KB;pmeu zZEijqT@1yf%xStQS`~Q#8azwR6#;+Nnqr@tFt&GbK}dZO$qq^JEn zU&y?9fy#j?_mZ*dMdd^03k&T^9Hs({8k5Ic~V4#3mvs zyCN+IYRizCu;4ezDZ!7os4z0`@vJkNQ{g5P(V=I$NB7(_GWWFrVHYWCx9!`8u~hN4|G#Wk)c}N03Wt#t%_n! z)i#KCgB)?>29j~%IEo|2PXFgFBxr*qs##IX1{tg}l!?zYfZ>u!p~khfG7u)7)AQ_= z+BKgh_6>fMR&0cyss|@-R!6$G**WREN~V6l7q}c7gdZqel4X>p737yQCbj+E%%|HBrr8!0kie zD-dG8Uu1Su&DCf7%}n%fV=Xs>%4MfD^+hAFPC1-@Lj9Srz&zK{LJgKNr}(xf={Y{>iBq6bxyJk_ouD1OB}ad6D7-?1@u)gNyE zL+6FWgxN_JO?OA?sP86$wbKBr+DO56eREQP&#b&0PK0iarKGRP!hdYQ)-tmj9c_;j?iYQ?N9QC^GN{4TWKmN(?X%XC-H=nPbu!<69pdiK-zsmQ74V1rh8I6k2kEeF11PD8>BwHzdzS!*a#j(9g7sn4SG1KtU z(iZF_o%Y|*$%CoK#inH#ae;FZ1JkH1{rY@i1R6^cbnlQkQ@Q!_e>E|?4NwbYQTJCr z%e&;>b_6C6wpmzy_LNmjvXhHaSFQO}g?+JMN`wsQ_%EORJrXAw`JO1Fjw{EPvnC9o z%JsJP#KB{sYFlb-LKlN=p&Su+@j>r1Ii<*>#vsHq54K*?_4(g)f&@tEmksg|N2x=J zQhe{MJ6UD#dBGc_x;XLI2v>VOna}zD;%1&ABx&emXm5Gnu@ky~%ek7V0V+$8^s;|* zDf#S7?gf+6Rk`x3g_Mvx-w=%dTN}e}Al>1rJg&n;?AY?XO{ZpqtoBo=3yJ69l=4%B zvUN3}ql}rfN#@ee9*miIGz9&SvY#7HeNQMDPAZ|XoJRugh2F^yK_|z0Auh{qTx=D_ zv5L5v2yn;L*F?Lf#?AlunA9x}gjlNW+F^>rQU$kV+9&>q$3N<)8y9?MZcN(DF&Q-NS!!;k!XhY&liw?`+FTD-R&zN^}xjT`dE_V zFTuyXxLg^j=}}0mOp(MrX|9a0{=C=P`CedbmlWF7v{7n?NLL*y3{Ksy%J`_iQF9S% zS`?2GJrMo4xd%_!mFLEW)XpZxMQe}1s%N4!J zvmH5G@LV4f^wE6A)sS*U%;xQ1jpsXQ{79I{N5j)OF*t4Wgay~M(usb%`*W@U~pk6LTD_LzlP&vAs~QR8kr+}0#;>a7Rq&89|NfX zO~lt7kJycML0?xek%g?V%$;YBfubL){YDOpVBy7}x^8kNQ(Ds+him7IaDloca7%~z zC>ic~JSWj^!qHb(>kVcliq;OV@-R5IsQmZB8&OKZoRHwTH6qW4e1IXPQMEM8POzCN2p)hObkRc)j3Dq3&p{$P>GI-)qfQ9zqnc%qw5F8(+XW zJ#oC8!H;CFl|O>dzA~wc*}*|OXM6IX|8VO0aiikW&txD$;R1}TPfD9y;&Ofz1I9+c| zNO%DP%$QKG!%*bRerp0yjot$w7pq44Hq9`p;yx?MDqH&agUWZx_%sxiyD8#AyXGdP zAdN1_fOM~!C9mH$gQ~Z-^n=-_C&=nujd_?E&;_z)VxEvtC+yy&2-UqH*8#aPY%#$M zQloqU(L|+vY6y8WO@;37b2;8`qjNcECM|&&YAn}}1h?BHMm`s?&WRFW-0KDX8F&n= zbnNT$K_bovGtK+k#6S!K1=;o@%>Doyj0X2LtY~`Ojk_VWH>F0H;`BH9abNaD3`)ao z%$)5`>x~NRq0xBa_|2O#={3tF2G}95+Q^71SIxVnZbP2vkzh;vQY6<{?Z$e+ zm=Kw+SNFcvb))9~n)pQ3LDkVc`$S8G?hN0HE4x@twLaQX@nT0`*miH?uMz2~UrKC@ z>)Q9~z>LUCH+o9e`)tHsJDXy6#?4PtX9U*gOj5q(>~Yq4tXMr{w_7pY#SHPS zbSb4H+3gTJjr(_h-+n8pTj_DWnm+WfzU`4fki?sKHyD13wCEgK`&A%)6h=<9op&>m zK4d#PINu9W4o&6T4H1;JZN6l116g!c+mY{4~6q!|%Ghg2-qebUsp z)W){VL09_m0DRjI(hgsX_#yqV75mBj0ep_`+FQrkl|Pa9cI3oo))bT6)L-k({eY3V1cI?8Sf|u7yf^JGCLn3JpOMH zK&339EY%nVJ);W&Ci>CwSO0tifOv3`%w_zm2e3<<*7{+({S|&7b(OxGi(<3|AL&K_ zthUnUq;Wn)R@?2bz&3%8-jQTp zrYS)$4Mfz~{0SlR|*5BuWfXpVwoeiC20}*t@g1(0)`ZUU0Lhc#AXOa#b8r8SHZf0;m+EcK^b8 z^XaHE{G22LtB>In{uCtsLh#fH_{XHDmDmbZPqX&;-IqYXY-N*LWMlX8l&ukMsV~fP zTVDZss$6iWq20Bfi7iRpcxQ>5SHJ@)tf4Wa`!VUM4P0ZKiR?)5`R!ndn00PtJ)P7+&*)R`@S+Y!C`o8mtukG3(4bu1)WO zoX6%RVAdO2gEd9P`8T!@IEHpVA(oV)bcCO5#5LwOfMUs!zQE?OFPQF|yVgExi+`?i zp1EN{V~|j=Hokd1`%^Q|A3}@B@QT-Mm8*LGG`F2z9loe%vDkPoQ8_zeT(TWeVlv0X z^u+9q@R#folvdNurj6}5UwZ$NRrhPczi2}TYsjZha~r+C zzxa+E*{bigu9byG0UxN8qoB#R_ZSwfZT)j@VTwe@>y_P-A>hEEP z1j*@3h1ZUAIQc412B6WW!QqfIP_wj~b_39))rdYe&Lr#H5Thn1@W$tf1pahG&lv9T z{Oy=ZanJ8pYS^^nzL$r{d&kgf$-LJ7X{Z|cF$o(xgS>ONGfX+ikM?M@#>p!>$TW{| zQN5<56guR(eBz=T<0tY@J$sxC)32pAg$<%Vedi#d{gxq(&+o2R{mGI0#krkAZ4N>} z;&==iZ=I)ay8*e-V63pv8O<51pyRp+wK+HfxF(@+(0WQ`y^OP8Q*2AVx0BCK`-f6q zU6i|pE0aXRD{WxTOpG1wChBn+B$f7!(|w8OE$b0Vp0IFV|0wRO9Q^bYx#lyfHy-mI z(wur|i}VVY+9;{Bj);1XIQ4d~4?h6O)o61?4H4cT9Z`V^r`&ebUp@(U zn!8Ycf+?>n!k8cE^5zlnDzXi{=J`vjeXcg@n;@e5Fn7}X{-D|dBda%LCMzy?WZhm# z3BZnnLM0H)ap&3YGZ-S|`D|V5koeXis26 zvUaT~CL*_qrwoRtv~(e1AC4Uq@iMOZV{jh^$Sv2v2Z5wXl?@Dbj(@Xd9GnDdP=*h! zysbjVZ1)`ly@YVa^ts_(sCBbCf@SVjwJn<_!OT=zv}77K^u-NY*W-TGVmPK>1_9|e z7yq&*`*`YSx9o&^S4v4}s3XL6y^-MW?9qZG2Tf=z6bW`a(oDhW%`Mk`M{y;8P4^&M ztubYB1|y#+P}i=u%?qjk#yQx;y7YYi%xXU%-Wy6c#0gDD7~e;+{~N!H%kc}vMAd>T zfM=4rx?dTPFtafevoN%$-UgixXxqaX^HU@p670M6%G1jRK5f{hzmd?L$5G@hKIIZS z5v8V7{why7-oGPKHCbH9a~wF+WOk8rU!6|GJcT6e=T=^;h}8* zGKn7iNi$stL^w7nsVJBc+kc*|Yq&n!1somf-uv@t6orfT!2)=M78{ zvwJ*6vmbjOBu*73(WP$$Hfb$nJJ|`3z5wkK7Z#abC|}t+xkrK|JCIbZU<-=mp)}4= ziQh2<^$xe8e`7|}U~WIO;AkjyClAg2H! zQq}#qr8a8{&wd&d1sG5t5*`VQC?TBE`I<4_F0AaXdI-ZH)cbZ+&fB&Mcw?_=_bdg) ztDk2x1v^)(1BxXx1@eL0tWj&?!-xKc_JB(@`79`$Y7#ew_do=^gzf}FG-i(ju$&T4-&XG zRWu{>oO9)RD6qsdwC^nZ< zwKN%Z-q5|;g%P<+W=L(RV1Kb1PJ$}BYeWs}d{-Fdx6f(umk$<1j?$N&_^=6Ca6k1L z+T|rp{|3u$$Yud7hrtVl=cJ!$thWexXHuQ;k2qZV+{(Rfkz7DL_(tsKfRUlhtd;75 zzi_EAE*=9y1qk8(yR-YIMl2XmvgMzYzVli{E<;Mv^)hiiSxZxXLUI)iY_qHIh^0KH z&5dF9^6g(Y`40*u1j&wbpen3%BTd=QF)^>%qx&(h)34#>){uItvxtVGQnbT(<*GnRPxD6QI30X ztdfFpWHN5AP2LIJ24ZSrfr8K&Y!5&2icrUANfN;yF7+_)Nrz;4Gt*iyYlY;^1d<%| z2BkR;x;U^-D;M_gDk#T`DX4Sc%g?0BGs6&r6Z`0Gi@k#vDt~YnV`?O05h+M-jGmtI z{Pj+v(2^yXN`1=+xT25sj7xjYq;VeJmIzk7!P zM=_zvuHU6a>Aj=icp=70z=kLMqu<$(ByY?ja?)TfufkIB3MQ(qmbWUJV>!pP%LB7I z8h}&YmR6|SEKQ($KltbTBygy*(omh*=6f$mW(}6CtjPMrFDbQM-b`ATe`4>H$hu@> z47i%w4s{dG2iV3^JTtkOV1qQVTdzm)zHeDR%#WPH+g>vM3c9?cS68eVgDoQ|FwPqp z{1&1+6?pUl#G%Jj%}AkR6FMb3t8yyi<02SvLv_y;zFhE@H(s!M@Snn^R*g=8?W=x3 z*s?yQsL8sbn-PEhLVV7-lZRy!BpINHpC{l*h-0W$>W)Ey-&?0TksO}acp8`{&$!$u zuK(yU&bPSml1V}oLWTRPF3q^+F=xv{>>0 zZKT;DaRN7@X~ge~hWQT89G+>0;_00(53&iK-Te7{`41tCGEXfD*@TC9aupTC&m)sf zadn0C7*9?2+y__`MZ1HEjw@XmsJ+Onxm7sS`s6CzEx9P4=X?Avxe(;!;~d$#@sJ>)7#h z8jeGodOHZUc~^NzN3+&Y3|q>r7DVpn_8>^nH?uIr2<*ev9`J00wHfCFYOLGy2}E(A z`1@{V_3UJd1zZ}^j&*yapeIZiUl7J`Rq%C^y5w56(sRkli2m47w5WS&;~=~yu&wGs zAFL`cC)zcf8mROr?|}`myW;dzZet*D-93Dzs78r;olMo^=9T3_qIDJ<;58 zD%SUiri-+CnCcM8*%dw-g3$s&8fF;NHb)eGvAH!DDz~96thSbi7hJsMq5ESYRQSf{ z=Mql2DrBb?`2p@U;ekX4_M^MF{ch=4Yh9ZA?_&-S1}hRQndu4g#uA{4(sfEuwj=({ zHi{=2tJ`tr68$o%3N13?g_9qaTDkK9Mj0?7EOK@hKL+z(^qEscBr;vT4!ELM^!_bf z4h~GOQXZd@^lemN?l1Xuv_a*VV!$VvQZ!vtxf97$;KZy1+y~@EkB?gWc&pRxx0~xB z+YHfAbb8B@V_PJil@;p+o(lM|e_MUtBGSu#qUjUosCqmfs_<2j-I{BLYPqS4tVMX% z^Bk{o+7%bz!XEba+G3>4#rT&i9W0RHqA`Z|Sa2o{JbsU$`w94QQI)D#y+HSb4yU9C zXus*R*W5VJFx<6p0YeAa_zN)KzazL)Q1r$~ibHqr(x!rXd7`b^@^Tirt zhCXd}lW9i2G-?u_cfb;VGukPeCRO$csmz9sFV#LL(Vn5~j5skeTnp|ei;uHN_irMn zQ@|1YftFDVa?am8+p~$EVRA!-_^3G~^l;(Izrww09All~+UMxu1d?S+A^A z?WbDSHQ=3j>>}l6ck%=U=;c11ABvGRnS+N9APz3gxTZ0~U3<`1G@SOL8s#)7brHip z(izm6e(Wp;!4Jo>XuR+lWc0&@4N%rtI-TNK;~3Hg51k_O*`Hf_hyfwA+io#I{{K?X zwmXk5lDW1rvedt)cY(@nCagUhyvhw*Y8R9aY1xCf^FFRjrS>%M-lJd79%WZ%d9RzG}~KadAK`akp+!?jn?Dhv}=1%Eu& z^r z**&EWUA81>CiJ%_U3r~#gYVx_w~%>=-jgAyJI@R;9*RmxDH3M*<0fA3;w-B<6v;ZN zx&iotRtan~3)A^MbTi!9c~>G#QYqc?C6fvZPkHT6+r)zYx_;BO=mi5t*EG7CY~5lI zeO9z{96oOv9LANTmjBDHhWaaPA06{>$zbXECe-Y=

3QZoW>l#!E@C3*MvXkpG&KQ z6X=}t1l~gbo`PzU_y~xoY#trjS;Z+q6z7YCwWHRLrxt|w55Nw!FR2e}!F!rBp=z3_ z3wcAibI$?Sg{T{?_G<|WX~q#gmTPRd@u!jchq_bM%TkO{zSPw8Pj#@Q=n6KpjAt^g zbs|!(=`Lpf*McT}S&>GDFIH#_)PDG48YER+Qy!S6nVuu*2c>rOXZ~L^=;3yAj2uF_ z-FbN)VCUu9A+izS!wxHEdyY6LXLA$m@SCRnw$ZSWc38PwNWSFCu8ekx54I-2@Z4C_ zZ@CMKT>f9zZvf4N(vO#(%i4QQRVx4ZqLHfX0$uYjYEN6bqffW!#w@CO3@~3@xp?DF zs!nd=p&quOG*(L>;_i)%w3lt00urh1?|x-67Uw>5{^txQ$ANy_Q*u3jbnME$`UsfP;4yt{h)@h2iZ^`itr@`S=^=CQS368? zN$?MI)^Y_zJfczDJIg1OhHx~Vyz6|hXXBRa|E`r+%e%cQ+31eGqYy;>YUvxdSTJ?J zY3^`~LQ>cTCPYIRdzBUMh!WZbQbxG`Ozt&Gwz$P+9OuuuVl#y$O>41gnZfU-1EYI9 zn&33imq zL#g(-ek}pA)KD&u<-T{q>wtN)?CX)vGoGN&p8-n5BF53BCR5ZatN22NS#E0WtDKd`4yd=W z5~6}PjZ~aY?KqDPG{+3^kVYAOR8bRA;cZy`LQVXso$JI!&kXG{FOv%O`RuBXE%3RP zzU0Oxj$-eP=F09XA&@3Ed5B+tCq7Ee9w}VLg{sh7h<=@2PWP7bCRf%gmub-!x`Ui` zhb_QRj0*jCrxRpH0X?2v9COO;7MNehCu!WW8XKf-(E8C@9Pv|Gf9t}6+nI7uA09`#n)0PbkSQXTAW;=VjCd5+;TH_kFe zQS-lEG`~Icz9LnpSwLF700Fp8d-I1s)Z&gwOM&8>&*u=NRrB;ZwlEI{Gh-aOKtZQN zMv$i(hdsQ!c@IZLLBt_6uH5jj#c|LeJEi9nXmo$x+iq{D!7?sZ<9-$F@KA%rA&B$F ze?GfM(r;K8i#!{TQ;$Oa{?bw}or&PBB*ZD`#u+QD3rE39oo z^QDP>v&~W$2+iT65i|(4I!+wpI z#$$OCgh*A$tbK=d8Ru@*pBNEZPCd+od}TW%;@GX*HF7LcH77g*$kEFfOYuD0D2)4= zP%HBxA6S-cm~QX1)rRmWwtcO;sp#6Xz>JaYBmTxRRc$6q$iNCmypK>L zybAxxYv+-!jFUJ~KAtYpEesq@_8|fQd=|!}l}$rWL(vSnQ4Jc(5_laYI|R-U)vzcs zZDG~cBjk7o%%-7>5;RO}ro;F~Ug4r>EGhno?M%j`Q*Hg^Rt(p81aZu3lmneT4SVE3 zJ=WOkkX^jW*DkNUf3bJezMrA z@FZ0lhhw$0tw23S>se{w2^F1t9>1rV&IR#Psj|Yca~xyrlth-1F!Fj!_|+6HMXlb7 zX}#Ahr6w(pWjo8fekAmw0`uv(ZKdKq)~+%q%%hHIiB}C>y1uy4}HQ?W0HacKL2^W&L7WMD^&D3^K?<{BYJ^-3{A&l;j={kZy~a|u}LR!X~( z>wrETL}q!PGaqn;UjJ-I&eNIvX(o=MWLerKq^O${9pcm=&P2Ulb0p;r)$s-XYz=rA zryodJhJ>s$1C&x&bk&84tp}Vm1HuryY1FuS<=TYA!0L^|5!Ndw8BnTg!X(U1bUAUa zhj|I>Wts2@c`&Ivn(mRW)djL@6p`sTmgSl-getIsJW;iU;+#P_dr{Fz473bW-}af~P6B*ahcu$;2!%rT zB9xFyxR~y>nc>t)4%vc?E!<41qkJoC5)wr!j4M2v&=7Gn$3gQ$GDrfT;qgm`!mJPb zJgQoZCHw7I1^}E7Km$d{kJIy((pImZYomIbJ^ zmrr|_WkIfP$~acoZDoybw+Q&)@GE>*Fhr|Cko?+dhz4!yqP46~nbNP}XN|5K7D{ut zS{{bz%rQ2odqmN_XNln*L|#J|Hb!>#yKEO?ttsMG1WV5(iyh6efsd*WHEOd0(*(mg z*&?^TTfaLP+wAC$QFRapgF7on=O_ncy^!&|S@nB+3E?k(w*5RX5qr)IUAYZde6c^D zTMFD0fzOng;yl`H+s0(P_ez7l2ep8L^$c(nE-$bWk%S-n@$c8X^Uzsn!oY79L; z`$*3i@d&$8!s{*#8*lB$=UEK6Z_3AfOgMX8OE?4z7=X|rzy$>2K3WHB+-fj7Swwm9 z7teUmqW6N~9jG3hIWJb^dIOP=0VsOnVb^js!1Vg1r*%r9Y`qIIQ+IzAx}LkQmEq2l zn=#`(K|brMmj)kwQTU(_pHP(fnzQxNR!LR6$Sq*k^|rHGr%OS+#{fP3khT#lxpsb| zAXt7zF`}}X-9?HI84KXa8}=;X`4Ly`CTnjYYLMQYE&OLJjXMcRlMPD@O*WwrE(9^q zbT;vFn`-*P)d(0QGklNq`l7pXf`gG8KaT%(t`m%kIJ`6XCw0IVgm``YrkC*XNG=*g zO4JcWEm4Ri#C>i1draNy$2sUs{#ggumY(MedvDe3M1;`pY8kS+=TV#-7MSE);3nj3 z-Xq-5x;ouC5q!T%W(|h^YJXjZg;t+XP_ZBmSsRmelCUg+eV0kUrLXb*m-6 zI2{MC=C$`4fLxsp*OUo=fvuU3rIQ>>fsFm*UXJmL$n2-X6_(x)9(XRa%HS9|)K`L` zp|apNET;Rbw#4Ir!WCvif-`2iEj?8UQOKGT9E5fb-DO1tXf_3b=uRtYpZRy5&eZ7y zX{^OmkF?>+ai(-KH4s4Ut2Bx-;}b1jw{$ivZ+oM-GfQzr$_jx`^OGIv+a?uqbp;enQ$dSaK6ha7jRE%h?_&nt{5kFR zbs|s~Kv_E&;yJW^seOc&j>86@%N_;ao=-+>_`b$Xm>dD#FcT0-Bz;0k3v$iib?~e9V*WbUqH}dXyCpzX0Pqp>3|- z71H{Fb|?`3$6udw5h0uivZe)=;cUEK9KFY$E;J0TKy~bor+OGhMy-xZ?3_JJQ?|(G zMv71pS^tOtCx=~Gwz9*h0{vXTIfL_eM!$?>ydgEE42h#y`M^)6Y^;;tyvi*BGU>Qw z+Ox@y?kKIk{C}njnt&C=6ergRWxJfRWW<#rRlxqWjx5&2A+K9}b|VqRs}U@n3*p69qx8do+)!pcR5x%>bJGKXp`r(jsN=b<#lbfpdpx8 za?^}{+F)RckBq#wf6X?V;@2P6L}%Ogm{9*l+)1jhgk=#_v2BlSO<;JCXo3`_O-1oV z^CkmjATlAvd*$69r7z=+YK6jYfd~dYHtQY#6c=xCp-Ec8zfcmZE@-DHB1C=rY|yd$ zv$5;vnzYuecU)i6WvD4iJz7!YI*)zm!2sJPxOw(x$I=;}$y*GI(~8|bu>FbPFP>di z*)vfFrfcT_pv;sOwFU1>A>N5$Raw9+CL-s}st#G=)A-!R0x!?eq} z*$Tt?Ez_?}t`xq0%j!aFx&t58jVj6FvpY>Ggip3;@yT4bx3|9rI`9D0VFhHHb?^>eD ze9~AteBQ49ff`tSBhE4k!XQq6#N@I)VHr@(sRGRug}nz2qHkC{x{Sbzoq;lbWXxR8 zzZWsB`AGNK=59_pE;uJp3n}wtP!Z1RupwmNUIeIcH%M#Crie-2pZii@mW!-hcnh$J z2vW`y9D>r}?)c<6vo&JAL^e9#rTmj0p}0Rh)F=X9?ekIABa>0X%*b$Xn}spx^OA7+ zp2;Ds(&<+lvxCr|$Ra8~(4H|PDmi)g;lGi0K<6vr6r``2axstzwqTykaZwKlul$XI z*B<-6H!1m&f^^dkV=S!5t4RC#h1T!<8hoVYtB+Rh zc!)wsk6|f-M8WL+OPBaP8@U(GJQ+B$=E27v7;#@Ks>FMWZ5R%FDjpylgN8f~4pEk@ ze?7!dop^{=jolE)npNaPNSwSoA{k0aI}Xns1LWJ&(%lLX4na;nMzwyvY*+E3|L$c} zq0TKBi5fKvv`|4sr8EggWjUt{IkE0{vg?O$0q8g6)n+S}(HiUF62Fh~^ymDp#ic?(dnv)ZB9LzHd6>^N>=qe$ zc*YL#@#g?L)B8VOtl0^d(UjNcFQnxnxp1!5uV(DT`g$uRn^>;#VIYl5GewJbzBC60 z%+(~!t^Cj^W$f+#Aub0s3*E3-2UbC$BqHNDaU)p5ZRC9ksyDF>0v8|9LklRU@!Q0fO1IV_2B4%L7%06LNhDiSuS5f>A`+E{OLssCI@DMAmFnd zjcOn|zp7bbkPtUQNJmf!voNuE=c`*52d-wJO%rEWpFXs+E^TZ)7^0UO?a;A&Ek?`B zO9NKYG_CMfowOOung87|!=)_myfr71$D@w0Lb6!>e!+hYd!lg}G%xZ)t=v60lu7`i z8IsjmfspGL3prr6N;pG~KzKR9&C`MvFYEW*?tf-8ZBcSeUjR7}Dls`B0JYKg97qWS zQqbOC;aO&2nAi1@yF<*X9K76>lcenv(?u-p zOm;3TbxJ8v2xTk#G!^WiX@&D3By&wE@&%62FhR`Hodo835xinNE$30t#BjT>pI=}3VN+`X3wAJx`Zyc< z-ov`uV#fyJ=T*kXt{dfvrKye_TzK3DIF}#9gfso=)GL-I6-&QoiI>-%(5H%&#s3u& zu+ae$&HQa>TDb}g%_ykrGI-245Qu9;=RM$UuJI#ExB;L8u>ju9I&0dU7~!5OYNkvt zC&uz$F}nL&Z2*Dp~TXI zhoTK(N(T$4F%3$#Zh1{)OoNN+5jFBK5?d}tOksJ{@OsT8du;?QPkMPV5a2;z7wi<= zekHeU(w3Wm^NbOu&rX0`>Fdc1QKdN=SpA?K;~<5Tr(e7M6qbs@x?EO2fPLb7BwX_$ zl-E8Ytj+xKLvM*5yCmcl$MvNsHylAtE5e+<9KXFC{z@mqNSF7jrfBKwt3F_?P567w zU=sF{b_EcFIYkWHj{0;i7`xBQ*1*~))U&@MOwkNq$&|$xCOvP7D>2vbWLhZ%>M|UN z{)>IZR0+3Y4@E&r#@UC>s6gl-cC@r-46@e_qXhjzyEx8n>;dQ7VLnsNhRVGk>DBXh>Z6A(SFtJ7{VbTX4;i$*d)yrb*zEC9Avy z(?m%XcBvHaLg})>LvV}NZ~1oGdh?OBkg1E z__qO4hr6+}W%89<+_704H4H#3A)gsHfS>s7|7!dods>pLX|)4CP=dXEEmN|aIiso*V#^c|AyK;?FKKv>0V!@ki*Iil@naL& z<2f9j8$q$D>tQj%&iS8K;GL!rpnL4Q{wy1RU+kwRxAkb$vmwFuD{V*y5DKwOQa{+x z5l};5zbrDOTcQZ$Pptqs79{cy zPu7Dk6WqiiD0B4ei8P0-Jn%+~g??hpI6C%h#J60`ve^Kp!5NW#sdzOv;H={-i;jk6rB!34 z8AY0ykZCNa0Qo>t$~9i%U#f_;jux~EeMunTP8_!2XPrN+uBgk!U4GSGk+T9U8HhR2 z<}jVS_M$H(q-eU;sfi*o8`$Fa$NAZonE^vpfUDd9AwT|lmI_TZ+)%yGol(TZ;@AsB z39l2-@e^8IzEWIboD9dX{|*X|ZJ9F^y^A|#ODeM$S);H&s<_XGG~sO}rL{1S4Zj_h z2><2-eVSeexVL094GIXB7a#R~65MHz#d>=Bel|#i@?DIQpge>qqe=&0mHeF<9p7yI zD8SyOqjckvK=jwuZOMB>$n1j4WVVM&?ne@hExH3&Vk?z*z~R35jkwI=q9H|R5QMQKS-FUw2{5V#RhJrPV~*7E156>1Pp=&BtEF;izqK6 zJVSfjDL>&{LaGu^;3bTM{O+Y_aem%El=a6_P9(=8vWyMi zGU}3xeE}#n%F6SXk$0`Zc2&Tda??OU=Ahlb# zT=*w@o%H;Oq5v3y1pKm6R5s;jAu&x{2$}!pkJrq?70AlXGIFpV){yVO`NN76XlA3~ z|6E7pp4?_Lk#-k293q7%2*b1t&;we&u9>AmE+#t&5YMMIg)DW1~I}yY{&MFo2&ng) z2Ims^Wbc=ld6w%N{=?Tx=UZOVy*ui_6OqYRlL3#SsE`%NAYj}|KwL6Su$jCL!HFWb zhJ`}k9%1Qga(a3rPxDS67pi{Ijm%Uk3joj`j#YQ9g5o_w-6v(d*^L$(7 zFF54U9Jt=A#|w&>*8-ji%}v|zu>=f8FUnanG@9EmMKtDzzmEq!fTbwHl(M9*1xUDim^TWfHPnS>Q5>J#~2| z)@C5bA1f_$l8?;|z3t!?h}pKDUY^bTir*259;(VHC`x37H$PlAznj(ySFsL7#Y|L7 z?lld!06y-_UkoL>M~Q?b2gD#Yq2cu2Fej|n%K-epdy_(Y0x5$8kI9wS<#fHx$G0pT z<=O`JTHKqqW!~eK?>KwmK?ncb(rzVP8q)b*It2*;kGTT*tSC#8dn)Xe)|8` z-IT!oHi=-^DZj*`Ao+owwJAu4tw;LvR5TH=d`-Xf1<;S=uev5##U zm0R$#oBVWrUS&iwT@*D&o)(|2Z)2QKUkZ#8jO6`}H4o*d?JBO$e>4RrNj+aCV5B6G zFA=o_|JHj&s;<&CInEuQE}{33XZqSGGa((vx~9Q;b=FZLlH9n7YND>vkxzxBG0z?N z3C5+(KFI7?Gk&F3@=1O)P2&UPM|L{CX(>5nMTBO7*n})cNe?VAzZL~HdM!TR_^&S{ zv!Zszf;8zVTKk?Z%h6aRr=M|fA=Z6*p^xAXqX{Gref|OhwNdTM3FcCogtZbYb~H_f zZ7dyXpm#@WgG=_NYKsdNG6bMxye_0wfiWH^RYlLge`x0KH_y5q(i?3$0g}a}Xl^u_ z;KM=qJ{xwu3>Mk{`7RWc{b?f`+8@5h&c9iJWy<*o9H=!BPUI#IghiNS!tD5d3I;J> zcT9OCW6~Zx*P#S~eS-_Y#6G8QRA>wt z%X%&5Rb*Jz z$ZG)XaZK{qmJplrbQ{{AGmB#HjCHtq$cHC7R34tUO9&3I3L4`}Or zPjmXE#P}(Pyss`K7?lP%(^+Bi^C#oV& zYk6=<8ai6Ix3KA^&Uv+>{+R7(Cdhky{7-SiG7?CJTKZ-MvTpCaLn0j|5wnh1)z^dM zAsx>*!!S&6mwLqT+YsEOHAk0n)*N|8TCg$iuAQ8_kV@FD@cSz8zmh`ZUoNYC9CDMj zq?7Hl`qF{*xla21NhLgoekdi@uYkPh$b(}miIYMK@{tn3WPh!^0%C2<8p88%r&&QI zu$d*8uXG7MDX^5nroMA=Jmx^D@n0ukW(R|u6*uj%KA~6~n)1sn8CgpYT3)_uVftz3 zemdHVWT?`&jVw2R0_v<61aBR0g$j)kK;{4Om257+-F>w3aP=xxR=Tk89c&gIvU_^| zQ<1*d?|to$3&}i8K5jNzxJS{Tc?$#pnf9dzhtlk^jVdbv!>IF_31R{;#D&YQjT zTr?q8&YhCGB!gM8vF4#7iCP9CA>8Hx3L+YkADW2ByyvKC54bbI{!zae_ZLR$oXVE$ z5IYXR2)@c+*O*jkl`e<`0=G@{D|E zeStWLW25k~6MVysE@F`>&vzQ3|7D0(qaxKexkZBp8$adtH%?M`jwpNq;Fj^020P5| z%jGRmldkip1Z?T3B=s1}#J7l~7nw=$1=gEDr%j`8Yaf#m)DHCo2@4)Ba`kQsLPZJU zQo75GwH~|+e2g7f=2+StGKf-|yOG|Y*~TLwGW8Jr;LDOe*U zzQmTH^%R~iyL@ml*N%^*1Ul>LR^o~3Uq#MESk*^c4gMIZhM_~kaCMRpY45GxmVgq9 z>K-Y_b<13gl_QezhVkGKm30JUs+}CiK@P32wv3;*6(lcZa|+8Ao;EhtPbw={X4QXC z6XF%8ueY}C&Vk7-bhO{wvXs>`H6$@Pg0Oc<9ntAtM5fd(?&rAB01sP;R2sA;sfi+~I#RMzSW21XX&n z9BKoi$+zC^$eKkEjQ@77ZdmX4@&y0Ixz$L55 zYG513lZ{9$QuxdPukBmm3IHTSN`?UHGGi_wK$FQn`Ixyv*`7L>!&{=X^GpF~i$@Es z%4GL%otoUN=g^ryBxQw;=#fMgdTBbC?WoeUvjHhzg<7cLkCk$|6~P<~j4|ji16zop z#nyQPi>HFiE#rj(K!)h zjsl6?jXQMfrB&BDi6C+_%)DEOvfc6-qsevBa%A|1e!vKj7INFdM`6z?$0rG5^Se#} zyQ%1O@YA{}$GO~D*JLkLo(LzzI`9GqdmINaaaF~ZV=9ccK0iVFhjHbJIWgPel1m`w zIS~4kUST3*J@Wq8W7%Wyf|ZxCzg;ITX-)I7fYq1`2gk+Vi(-fT!RI-bA~&R1B$G?*J?NiB4hG!mx9~9>;Mk!1?LKzl(~ImRr&I9FtMud*SL+SsZG0+7Mk7x~p226uw=`1#L z@d0^AJ<}h$JDH&xEWS-JBh2^#cj#AEfNN}A_r~KZz`AkumZyXJ4R&fIFHoyx2!l}- zaLWX{(?W7T*M|o2^ji3hwrP-+R#W>CA{JMl{2B`$=72$_Ytn!kDJ~(IAf{w^y`49y z*|X<~u>TOgR9kB8RR^8>dEHC{UzWqu6_is7fF}O)?1FLEZ+2E)VNmD-j_H`w?c=uU z(@f*S2fXiGea~(t<-7BsJSlv90F2Di3qj{=C%F1Av_JL9*l&J=++FLdNcb;&({D`O z{@Db^?t}a5p}xzfUCj9B2xIZLs@G`pijF$$=;xBvy-5a`%XK4V|^F+y`?s>G6R;p=ABhLB~!f=!y9 z(=u1PD21I=Z7&f)M-3xwRmH4#sep}}Kuo!;rUiWvJpxA}?dsZh<@0QEt% z8B=K6tM;PNXF|0W`Hl%?dx zJjsrU!6c-1OleKZk;-++OU1f)LNBhr^X(c%GB{<$X}#OvJJLvdc(J5Lb)>C9OK3I>KHhI9(dOL&d5FoILVKe0 z)utQ9rM6BgL{D2%>DVL>a(dMSzc5$0lt$58kIN~k>T`RQ!a{V~?>QOsZ6~K}zIS{6 zlJBfsl2lQ0Y`YS;I^8dON^wKKHZ-?40&%6bv%SOg{docNLt_kQ(gX;budYo=zthCN z6Xj2;vS%d5NcDZ$d4Q9W=SVzL8+N4|2gA%!2FSnO%=>WwBo`4wD)Sw*e?$52uwCeb zqH8kd!71?DAJDdA`Q7Mp?z7poe%=)91x|bSrVzfP>6=}$kDjU6#evRg#~IKFnh5v2 zVsLRsVaASvs)nD&b9eyZftJTj3aXToAst1v-9>@$aBq;lt*Gb6k{tu~AL%iUD3aws zLK6U7oZrV86h~KUm8k=6lhuQdxj(f)*#Tmn$-n?|U_E&W6ra^!tYytgI)o&sR5td- zMBw@mF-P4Lh6ZHffIQ_yFBLH9)rB|Lgr$6aUVQyvKgigXW`^0hor zlAiTw);yu3eRt3grk}ZD)LN7-8={FeP~(w1=t?pS)hdE&TEx#!lsV8d>`_+MoPzY{ zKWn(RTf7gS&qG&VFZ!T6e;E}+t&UJuxv(JX_pBGX>2smdyw?nTnY|jv-S8Rm@bwJ? zk`{`U+)eUCyiKZ7|LOmT(Ih~kOhKnaWI){q(NrsGv*+t76CXZn zX$8yr_m({y09BN^2qg@;8K2wR0D1d zsJs~sCmN0^9t%B&NrABcKmM8E6f>{?z~NI1KOS!k-q-J{GA36fp*a3%IR&%@GS1{% z9l5%-_`&e#Dz#Be{;n+eAs;)(_$~#?LE?#c^Djd#2?(kfOAv!n*rh|dbb~o78WdR$ z!q786DOe_=1?xI-FT^S!Gqz@B%#-ODW%3Rt^ODD|$Ef0n{tCk}&%LHXbK6&)<~R`w zt|}dG9HHryblO7xrs1oM$GJ10aC}m4czREmNqNB$s~cQIZYnhOR%z7KE6r~AL+uI0 zWwBDxS8u~grr#gNY{>tndFVVaQ}CtIJ9Fk&%G_uphSja_wYWB=@-e3(b}u5o1F>6N zQ#+Sw8tsdh{0a82W|8V7L@r$IG^{)<>_Ea3i+N=j;_Ny&-=2Ly&XqWtY#Y8Tq~Y5H zTi3xlPsnW0GC_O(BYSp3{km@X#b`j@0fh;rYUJSBC2OF?`tOrlmTFsW+o=v-+Ly$? z=Z%!8sCZo7T=);@^bc&;rDt*y|6fv{gmlrPq+9ow(pW|@{|PLBJ6Zycq-_ET zy!s0E{2zgKXvZlm=wLT+!nh$8)LFK4az;JdG(7H)SuJdTUe(RspON#c0WQEs&$5!~XZF%n;((S8q(bP6Eh`V!6CdDgfRD@bu`9m%VUkOv6OxQcPzYxY!cqU;J`PzGP3 zWMb!GGCXAr;OfUQgUShg*n05 zVDA66y9^$qv@5vhuTag6pqX1f{aDF`J!4#A$_zxbdntcTDubld z36xhceXK*(saDokP@3%ti76|QExINWnIYxRAZ8SgJK9a(+Cz|> zXTWp1;COYDzxKa+f zah@%X7S5*`7L~|G?g1xq`0v)v{Kd-1hWF_{trDEMkE+O$q3 z`Km;I0cH$IG1f2lX8cgoZYOHA5ox8J@pm(Ih<|b%q{W4apo3PkDMDoBlK2oL2LPv% zE!NE3`=38pQE2d^wh{E@bpyCo3nl;ovb)VtDi%gS2%PR^l8#&nabV1ocTSZ1AISv+xpNVr3?EVSmOF5D*g&i;Et|_!{f_N{#qbZIf*ymijD`HQyAUEqud7Bfy z5Q$FWZ6K7K``k(~Kig$>Fty^z*X9=&0MJAdgqe_UlDy=ShPi7NK}i#aePvqT=*jKW z@vKIBFVWb0WJt1;am@(%Jyz#vZ25|M)Z^xDeV*Mq@v%>Fl(Dx{W(0<5D)kQ*GN&MtaqhOnrK& zX4lA_BS3IBo+$^5tMK;xzwS)YBh|)19}t6_uT#=(B~?}cOh9cS49wPI=;D^37&k=t zQ(+|;>i4&ikzmM<69U6;9xm1f#yW@pvwdSwUUV(l%Z8_+8O>azzY`3OR`abLo0d5) z*oLBCu#8-(7YYc))*cYhApxpO-EAYSw?p)JZ#OvcI)0E`a~CDWuJCr!o5&0>m`w8V zZ=uV>_F-}YQgo8raA^!gg)PoQ!B-`iNzv1!E;r!OI1^Ez4hZCYL^t!ZDgk`%%|Yf3 z1Wzxp&s#4l=q()hlAJV}8H0hz6D+#SK-MW0UB*(IOlHB{MZ#jLj*_@gKk((8!_`1)NX@^!h zZ2|?86MhtD%)C_rzb7SZv%s~0upq~rA7C&?SCKocLC0Ur5M+3tukf3$qXq%mX_K4m zgO2%^&wdIhgp^V^?`D4(gb|@3{b|&?Zqi@O?-7pgpRTQ~qmdsLLIfb+f!+rFn$M1^ z=)-&Ik8!pa5R*2^vyA{_e|)^I55Qg6O<6<`1Bb)c@gnN5&`K{qwY-$q_C_ux*5iig zhChJ6)g|n2G1^HKmXF_t%~z!(hiEqgZ|&a7$l60*ziZO2vRde&Ur{?64X*s2e~Em8(p7wi^L=m;LKJ?f%A`&{lo5<={?l|_0XjEipY%ZVmLXHtjaVzG#T(}Zsg|0AN!pd zN$P$D)3+xERVE;WXe-Jfm#Om9tg3(G7}9bv_D&g$$8o2 z@DXdc$fR-by+KX)mwMCX#6m8{@Tx=H?j*N!#lGjf*J{!5XY!uljjODCR+JMT%yffg zCk=p4Kofr(^X2$DFA!Fr-&g%WtiPCX@KnM}kQNa>S@Q^giKXnv@{;s<#5s+=<8fS1 zhB~1af#mzKy#ZA~#=T^&>;i%TVR-zoVs>S{>Sq5RL(mqMyqY3%HWr+tV&`r&5NFGFhYTnQ4S9x~ zBN%d1AjPJ!Ba3al z1crlkH${7kC)oXAIC&k9g7>|X9hU1XX;ciWLGTeK<5R8_P_#q1y3vx#f3od?i1=;t zBwH5Cy9P}EGw%MTz`k9xnh94}4lb$3Dh9iZRrcJ_x|O`Gj*9OLGbwm2+&IoM*X)dS z>!&wI@1v`i7o1=Fwzfdmwc8uX{d4Qja1(H|TCWJ1DSf|4+(u2PQGpTAL0Nb#97wduUY1)M_1_dOh9LF?1dXT!VkV+V;F;gH;$= z;EG7h?HLLmgR;ejNOtk52vEf_J4ZB;+pj~{;)T*M(&xrw}67=3{p__ z7w1x)H6NN+fv=J_@pl_6*bJ(g_8n$lQixZZTPr|731<-=J?mk<{8k$hM;4Je5~d`v zW+bqy`Zu1jJ{2`px8v!p@rZ0ithEk(NnE62cXA3k5g76Av5;gTc-a= zo+)xNKE|_yxT`p+L0^}8mis5bzD6&<;@1JrmY#PEtAjJK9h|k(sS~KfhIK zQd7YR(xhY35Z0agJz|H^1cH4dSM!Kn03%z4ow-|anz?y;G~sYa&h;S<$?@>Dd;GrP z+Bqw67`vJ?U6g`7Iv=`XwrAwMd!1EiWbQ>hH$1GrHgPY7Gr3ns@8W2FUW%u&`eQ*5 z;V@-O<1q7x%Dg`zIz`EQ5lq1M!H_1qErr2{78ebR(e3U<*#_u7 z1HnP$M1$w9Av)&U$wggN!4xU246)=uWL>h1XB`GvkH=65!fv7H+2Zs;A=(Y#IYB@F z9@JeI&P?eV06KQNf`XK9PCYZtwAQ*7aBA8RLUeJNs-pw(bd?ox3g3{fE6aCl=!9i) zk}*7d`p-Uf;{sN4AkCd|7CB@5&LqY!u!)5oX|+$KJK%XnuzAs_RqWDHyi`rkBhXLg zK2*E5R#0jvJMs`3$0C*AMXqr zX-VA03sws(E#qc``MSseC%uy|<%?YJJS6)WrEcSjJxtGyoY&5&XHB(YFF0Vr`$e14 ze=CVStG&^X-JnzjVMp8v8;JR4>-}5-4T=3r`OxKdAZ(h6@l_-Y1UaMe`L%Y zo&nR6JKX^OZUC3YTlYP0Sb9l@dH`fGn|pVTR3xYFChqW5!M8Q-Cge_#hO5^>K&DvK z>AZWNX*LwScL_2!dutRNsXGxoC-38Sf{-c{U&4p89(#+dQ|<&yBtcsnM#hWGlWuiyGR z<2evGtx$5dOcRLH^Y7(t%D{4($lI5^@TaEznxJM)UGjr7s5r-}vWn#+qvS}i>VF^| z>ISRNF9skSC<)I{;Zcwgyx%vb^Vw zY@v`^Gw}iW79wD~xlzVyQ@d#p{(y8kz$h**@`b1p!VC~j(Rw^u;#A{MsAWtMqJu> zpXc0Fa{rt%e6_=~fqaA($C8m+gq?dTBu5TN8>a0YnIB3s;<^Y;%^v{@>_3$gKmb4* z>7u}m-aivYTRi8o0?g`8n_tp0NQ$ULc0xZ+cpewPK@l zp3048aj9JE_Q&huHvW<+)|rQ!*#}z~%JTOFO1Pw2fH~$a2`TzyhFeSb)N%GccH05S z9No&Nq@rp((B$C1_Hwbq*Km=PU1Myb+tmwd^}^XDW+D=4635-He9XqX&XLDrxJI2P z4cPpvKqQh|tAB0$CCW9&O{)9XWfuTg_Ao?2@1)g;DD2;MY1T!J6@V^6E_YI+;_+pL zXq+EwC^8(1<*wMYHoYuuki;uckh}W()jsO<4suynb5b;@_8-iwiDwE140JG@2_ajCwBBRWA`% z$K@I>EJ;F0GqTTYU{Vy8v%K01do}}<2F<(=N{-cIC@HYW=YNeJqy!B2GcV{XR}e93 zy?|QAvteqlBaS~EdqxrM%{hUU%d{P+iTp0PC1EJ|kFwEP<*a(mffHyMRoqOR4TnFdg4(jAQ~^N-Xu^Db-Sx=cn!A9>xc zP`Y)bfNE)H((*4p>H`F&<<-47gqRFfqc$gZ|4U&mx}NO|So)1aX91KfWjuSxxs({a zzEo|q0I+~h=ypD71Q|$2Z>r~B?jA_Nu=<8|PsoIf=*cddN@z;_2Hm?qTy93Wj%8 zdcuCvGTu@w{>-Mp=Gb-w69fmt&EZ|%kG(e|h~^OK?u?hCIQ$j zsIMVeQh6q_a_27#es=26V{xIu&G0-~Yn4SdCBJK>atXCTwo?!Oe_E8Jz!6TaoV)by zZ`rs2Dc8n~?Lx;KPL(o|MR&B>z!*XCq0n4=%J7(Sc;hk!bZH}|yp8f!3*zu=B+?pY zTKpST28rsK44rV!Oq}DntnLENFQ8^|Vj7C`8}MWXPjT)` zX>yCZ#N@APt&`t5Kk|c@&C-C!K5}16`292l2(1Q$S*(WE|FO_fkCd-k{_-P?js;e1 zKs0eyT^-fP8f_1_B4L{b!nVYat8NF~mDjc+e)MHAsCX81a`E@d-JIa#+hXB`9R2TuHM@pfz%?}aPrC$gFVx@~Blls$WM*40&~l`+_4H2*>k z#e^T3Zu30#jkw=VE&%M*bEubv++za zK_QBXBGg=sezBNU8wB_*!}F2UHr4pxO??R+<=X9Q1!*qt;_^j(Z(YMx6goj^I;-S> za7ce|j%dwvSr7(`s!H=W+_1Snbnfzc9BxcI*QsUO(QH4Qf5Yn)-{vc7vvfXsV0~}n ziO*!52F2xZJ@t`AUt4q>`J9bCEenV=?Syy7cv`SyMea1S9-uTWEa~X zOzcX)Y-YHf{C3~&zj-PA$`U6WVn-a|-~m1UfNi^+R^otSA?Xg3{Uu z_l0}A6D?yqb?2jXvFNMG9T z(l~3W-!czB1@gTBwm<*{sgsnBoOeVp{`Ieo_oWPDP!)p0mXnYnDi4ml17qCV_9nGG z-;2a&vXP`qPB**8=^5XOY53 zJb&$bPtA^eZC+0r(H(&!{W2X;ZzNgB)`z@J8v7~~ghR@j3f5UY&XEGmQh|NaiW4DwTILZ4xH)alInr9 zX=Ly{-&Lk)>YOhNr*zY0iH8XA`#VCKwwfUV#w8-X#QfEbn7api;*1;c6C|`oGNE#I zGQ`lNx^ujo6^N#l&|1lbtqcqt_uYc^Q_;R4Y@J8JReH^=(_;nKlN&=4UiKhBr>AzE z$umXV@kmNt-EfqfphjcCwN;I~x8jWjH&|Vd53~!R;}g@q!;AN zI9z=vOZpPX-T%SX^ZY{_Q<@ZHCogXQ>g226Xd{gOAT+Bc}j0fXMSq`Q9$8HXbI)X%{2mD>Z^*x+XVuWVJ zTDQ0Z8T-MEinj$MIz;|X+9@UMt$O`HzF`)I`#*%kvcAx$^m6NqL7x3UbHxQ#eU9T$ zOzaOtf-~H{17gv4-PA&dl?|463zTb|hZ=&k!~5Kq1+MkfB-RrO^3IX{?ZFn&jT8{x ztwxfl=YQiP@A?NuJR5#k zC!_m4lT`=dv_XOGVe1ron>~oJIY({vpNaLQR@!4m)4s>{k(-Tk4n82z`_D?R`cz=; zVLsHro7XN2gj8bkFNWM0(7BIJM8H8;W}0m>Pd_TTh+G*YkP|v!OKx;HXMshidZ>l} ze9s=;A)`0#9<+uYX5UL6Sr%0RQD(0wd&W_drk)`jTw-}+p2&a57+{6o1$%!0jU78A zJ|$cMSE}i9JS3G18q+nF!iLKUXmCRmvFD2!DQ8~&o*a7B<`kRCo|ewjTW;%Zn1*9? zK<6YM@)(Uyu6TfK)1nHW>!xIo3glzYv+ZldTk~0=*2ibbKe>Zm-%&BV7#^b{r0_RJmub^L% zFNR6T;rsw!%FSv#7GdQ8kW~B05AEEo1QGxQ2EN1+kM~0M^Jk>U#faz>?kiGRFCR+; zJ!`k;phv};4@NKt+Lf!Z8v(gQ>CVPuktJk124~jdl|Du zPC!DWHqS`QS#zWuKAJ|Pfb~*Gli=~rBpqzGRGV8Og936B;^o3aq+c3+H9{mJleG_U zX^(lgJC#cmf2Fpe5f*e>xMfivPd;Lr3cs;mh=WWbxUZcvTUBu0+#3qAV)mNn}2%m3k&x$F8W16^xyx9cddu>ckkGqaT-vq+EpmB9h_GL~wY`?P`W&)0vINS#vEAZXlEl7+Q9IiVx4R~7 z6jW?puM&}L>s#m}VxamphKgkRu|Vl|rDCb_gYU^9J?;x^sy&2>HUuTHu`I5RIsu~| zO1(gFUS?jKmNHl>38hzV5Izehvqu6Ftmski0>%t%{IRAj6N!*Oy_Y} zvNa25hT3xaDT@h;HbWUZtEEVNFAXBgDz)oZU`NKtIotydbAA-+I-Fy9SNu)XWE+o- zp@XZNPkw7%AJxVv;J?4;i2<+W`<_*;fIr{PLve`v5WwcFcEu&$SA1BA1MHkCTv8FrNS#wmG^qg!U5M(Tj;GLc-fD=|1Ys?|`lFPDJsD*jGLq zruP*pt5#Z6O62iD@m1%BXzY!xK=iSIiX@ezO3m*~i(N!dsv=GWm)3ZVA}IFcO|~eI zV?PR~Mn%(7QmE@ntZ6XKI;EUV|Kd8I7AI)&;DmXLt0i7)dS&LeRa2xT@zJz5AN9FB z1|RVBwQkjNCf-0Y1DvHo>x8D>g?OV$ypaOeoliH}PaA0AneN)234+&Ou6+(uGxz9Y zPK}2pJvvbWeWwws13D*0?-jSQxwI=zT8fL}FaEE-+vHmRYP|HRae+J?qVl&=2#aAg z(0#Zmb%F$^%<@HNb4WW6JoKsUR%Q{;zE;>9hpcNGc&m(t+^=~Je)vXJcPK1yq&D6p zp)hm5D^Eps7D1|OD|)ZIsbwAm{Uvn%&IGU&J=byR6T}3*+acJ$?-EKn{N8i{smzx3 zCkr|KqVA4Tk`82}M^TnBbf$}Y0bhaT2zt^=Ssx}fFu42uvTee^sOs`a&zA&Fe8f2y z7)qs{C^(BarGWCZ?qOig@O0EM*99&FI(VJO>~K(0i{GU0lX_+=!qh)OIGC^Q7%2hd zekHSntMMz*>JxAQyqIC3?#l0M??v2$q{`wxow|B$CT&E$N-VHwajXi+#X z`O(i3vnCn&Z&;gueGO+0zAK|)sZ2w@2 z2bTFwvkeSo{bgFf(5m=!PD@ht+2qwqS9-Mw7U!)_zE|;r!q5jN*tymM4Wh=j%lOJ# zaGf8`DX`|~ftGy}N%nO`?Aw5d$i@^3yNeD@iACT?urqxBbb z#lJoDnDw9i!QB!%Z?pX>(Slc(M>k)xK@uG;^pj;=D+*HOodA9S)cQleI|9SkOO5MM zC_nOW^dL~w5^kp73w#j)!Y+Y^GsbE+9KUk$VmYrvz<8<1)2 z+_2i6taa7DdLxJLHePv?U0KIfQpgS#E)%#kTzzut7?d^3H;eGSE1&ofC{adOy?$Ua zquN5Z={W@Ns{HAD_(wPru^k6ltrpXK2A6$+zu|?hx2~T2=5o#m2*+hHWm5}gj494* zj5_w`CBqn&b%{!_B0!;WK*QP<-x|_^B+TnQ3oh4Jeb_e5{*l zcQY-g=JduzI-A;$Bn~vlsNnMxr?t{07NyU(=HAYEM@uq;*}wjrM7If*N`*|M!#1+ zweU_ppxBk!XTL#i(A$WH<&vP9U2Wr(w3Y`%DqT2Kxu!Mp8odG~m?)Yo{4}c^d z7}n}C%o+8F|Ms)nV~HmeJ|J|fTRi3?~b0F-#_M|dx9!7u-iS#8%9WOqc8sFPlr=b56SgZF7jwz{ZcUml(7D)oo^(r z`f>mx=$|Ztu|`#tdh7d`JCSYrXD!3-9rl2GTU%=PVYzD63u`9E^?mp}Q787i4PJlX z^m!np+J1ak5=kS`!&#MnBu9_r={2Ix)VQQuKtAu3!(Y0Q_53?A6}{?|GZnAUa4_Eq z`bB!RAg4EBa{SNvK!!o8{CC0fL;+0MpXJN>N+M@BVs>al;N=<9Z@;j#^1=k@n!s%>&l?QF zwZ97QViPgbcxw{*;0ve@Jv&m^MyLlO5@j8Khr*}J9Sw7o0hbr+&Kpz%47p{QPCo_c z&O`Y5DQX+Map_QTG_ zx&OZBvi7}6fIg&C#kOPzJkByKK!oG}#kT**Vb3x1!zA{t0|6R(XB$|bifA;d##oN$ z;eVV4Heq|edc6}iVnML^TPZu&=Ow=j#d`pTXdiL z{ZW`LyCQZ$m!r?%^7~wO4gFtcwjF}NJSYx*HW<)Bu+}sH(N+irTGv6Il)?wW%Hz%#@^WF zDhT?I*MZ1%@^^fhcz$J}O%#$RC6Px?V#(3IrNN!z!LXnWtEnAkZcJXg`qnsSru2(8_ zzyx!GgZBaQPD?X$jV#%dC(8n2yE8@YjuIpRaqMFxI8%YR_2%*?FhfD5&-|w&n71ce zkA1LW0rZqj7o~Fge&GosE1Tj~tuF6Bdl1k?>Vl|on(~3HT>WB>{_e;oyD7yzaLn$w zf8PLQmV&>y8iGe4Gb81Wqj!&SGc)NEp8#uFh6Y=tME5_b9upaE2==LJ^V!bBXTXVIvVLrvkuytS>3@Z4KE(Pn8w;juxI3=3yVpA3<|GH|hACCz z%|+1o7s_&?T2p&|^mN2WRxnJ_p=a0;5=BGR{!l_^&)Go@*OFBmAePwn1t7smUz z9K5^A_zsn)Oa)i}Mr5zuZiGMLv^xMy2cGX#QpOx?U4B7P!^7^y_NAT2sbqbDz39AgSl?`h#Z}BRI|2HIX!(AN+SA zUF7CZ^Qb3sCtt7ATsh(N2KwD|Pd>NqAk$x#Bb}T8t%@?u$=2BeN3h;Lp)OolP0HH- zClS+aK>7H+WZT}8ib)L^NjjbpPwSeR^^0h@r)dn-QGtvXxV9f-R*{wlc9dBLi!*C$ zaosh&_8;&zd2vRwISXw;QWNJ#-U{lPTEP4`hxK@%XA~MPHfN5CXQ-!ShO{Xux@ci~ zx~D)zJYjlo)*dsZSH2>oA~|{tQw!C<>B*!N%%O-pGPbqDG}Mc*MDw|$YvldNy}zfF zy897U=&eXDHs;55>!n+4ZT3;z=Uw-L3j_#nu|1y`Ir;D z2oNt=8SqZ+cTqyV;p`RPVET#s8~yf~n@xe&k>`i7{y8SSkVXLxwY~XNM=q<@y`fES zs!@ipR}nU2$B;z%CJ-JpF0H`i>4%#1wWgNX7UH_Uv}&zwk!HtH7%Q7Z88xS zk9;z`b(`R&l=kyFO$`;IrE^4(4mg`-fd|-p178@)W<2H~py}>VuJ$1<;aj%9iKIiE ztmKt_mVM18`ANrD5XIAS4?c*h0(m;}bZLlnPwdllLA476R;JDwK*zQa2!))fAU*EB zL%Ic0AT_;op|~poAH)FOcZ`=n)i-h-Ht*V$LJVv&29egG%Hi3%ndM(4Db`esQznxA zseo+6C9`O2@5tTqWWymy=dDxT`P#9ZPg}J};-6^*f* z-(HOwREsVslk#oCx%I4%v>?d&$bQ}8S2`h&m!Gv!UtRhJnI%Y~TU`NOKU^9qu{0lR za5?tg_NXh#bsL@N>o*g~j(07v@tZZXaA2o=kNCP7c=)Y!iGS0Z1noiQ=;}WmsN8E? zp3?kCSwXx>=~_UJv^oZVhjZqgt1w=QgyLBDwx*(*XJXL}{=w+8@~V9LGL$%5i>iBJ zpo3d6l7{8IM&hp~1!LW#^u&{?>LYo(EA^_GQ8pAxL`io3F=1hMVVZO+=+7~F8mV^! z(qP9cv$f%mO*c7pm=XdSRd#^J2nf#HH#Y{Uo_eLQ@M`r#5CTriU<_EA0F$hK(@uTW zuznP1Fd|eT0pVM4oS+v|gsW;qcJ^dv+|0sgOS~JkI%*T5M*_jK9Ac1L1od3B*ZjCA z=-fvJnWb$f;brvkw_A=>>*NV^>7aW>=7|0HF;Z+jue*uG536oly?St~sv}rVBKC4+ zhzdY{)HP;`_j$TnnhKh82nPvtVdX&{+Ge~_9zA95~y0CyNGGZ>m%gYIGoJ#ap}*?yciJtFz0$ zG}vWQxt#eQ`Vt+Ux+5K6itr%nuL@QDfuGkNs&l(n^%tCEl2qE_`)N8q^`Y>T6ygJv zAk6R*YnXkCT(wXd9A3@Ka3vlPt5RXFdPb5L^+o|X6N=74M+%E21&8rruh>5CZw20wJ?S5Uxs$&@yf~Gp6O)g-jhib%JOgQnyAl z6=9SBox@vLTFrn88?2Hg83RU>~?Qn5s4r-|%b+5Y}b=&j%$<-3-ud zkU8+n_u$skE!XWtzlC*D@*r%B7YAiKSjS(4yL+qw|poHhwAVjg(r-CPBv3+UdH zIGYj$uz6IUjXv1V49|SOx|FD^TU(BX4DjP54T?y2G~$Ec%yC0=Ok@oHSA=0t!y+lk z+|#k3A6zG-YoYojq%B|f9y;_F&ly}a|BI>S8m4b}|Hyo-e=iDh33ym!&R@pO2?VocofgO(eTeTJ5uT5jF*`vblXZ&`31R4l$7ggRc=6S@~v)7$Z zh;` zKji#jrT{4`ry7DZJma077T1cZHZh`%$Z|ytWXEc{Aqq99>W2d5JSP!Jl*YIu{7aU3 z1E7tcK;Oj-W?l^?=#xrFXUC0iy3~JT_euOT#^%{POng6q<76iT4pc(6fOE`mq+@n# z>)^k(3m*Upui+m@N`bIHO+Al-l0;5InqNe)5edJ;^xe_ecil-fff2-NI0B-WoZ^&w zU-g%jVS?9TC_3j{XsO1{HCzR2vwH$IpwNe ztq-8m4M!fn#t&*#lE$?=I{IDj1%ysj?)yJL4_^704Fcg)Jb; zv5i;H7igd&R>r?f;pdMK!`AjI$zwGR1cvdo(^6)5lmh!z$t!s>Ty1zob-0sBBSc95L3acUSgMku@*Dq!VCQM14ZJlydR*fUNfz38B6*x zwkxUZF^VrlWCxw`E%r9AYE{z#d|n)fkk%I{EQ)VLHP1@?T5(nM*^0h2X@{1{#N!0$ z0AlDMjG4Aj0(A8TX`PnrYw@Erb~_0GL!SE(<@(mu9>P|;9fJo2K2M#{B${FaVn!SW z)?Z|_YWGaEFSqP*hZM%*r$ei0nEm`#UjhKTY@$g9VrTOF>MgBpzUlQ;Vz1Q|p8GVR zSp;$xv)i${P1@LY3upW+xII3K8$KHAIf{wC1A@6MY(%{C%V4~q~;K!EZNH*chj< zxj=^8D&$kFG-&60enI%GAihPQs)5~+Xv;aL$JP72oTv=CjqBY3w1U06(&=4>{205( z?_JE?g4iPubt^UAUr_}1o9#$5TIbG8vgVH`t#4t*0~F-&!AfD+=9RFNdddrJ2!q{~ zXTED}z28J4ib9TWAH*!`Qa8>Bo;X{K=E2^?(%q0h%XJ5GLTl2m4R-{iy<%kw*D({P zQ2bs6a}xT|y2v?LX@814_CtJPL4I)Vb-aJGV*0?Pm5SR*gif7gEQr@fA4WnWg>_+y zUZi5k76eVT*<4KluymqrT4Tqd@g3RKP-5OG-JD>T*Bu!Al)CBuw_3S!d)$tawzhyN zS61W_T}juxhu(G8Yy|QvgbZb1Mb#&2+6@b^K{&UUsowq2b^9=P|B{~~NGn^<5$VU7 z3B;YiEY9{p08svB7+LzhCRh(ZW79N`qALB-+19b#x+2-Y$q=KiPWqlMrqX@@UOl`1 zl_gT;xkqEdEw@+!;X9@4h4+|1p`0HFOKG==JGN_xzgb<67U7L+_IY{r4 zH5<0Cs+ix))TD;Ia;Jq8=(Fa2sb&Y>4v(Cw&pMR}HVZz2d0cie#~WQFmb7qpmRUdP zZlE0vB=vGf%ychc*3;78&_$%k7hqPM#ldoFyyS2f!v(J&Jry`lJQ`d@M zbYy+g?)XdEayu_F2u|X{CumDS7#aJd@Q&qZarhDzo>wm7TraQ&ZGx#eyp6j!7QpX)@M}Wt(nm>ECs2yPsSSwlr;K7oSJ{90BF9=D(a+{ zz%oZ?RR~bYAucZ49=bU+pvMx_y$29lja;m|We41d0yf7l(|AFUijuJOOENZ?!PQo) zbpF|O>I9FVu;l6UN1qR2#YT)kVBOf%ICu~YO1wBH^*m_&&jkgMuD2X;<9f5B{^?%E zcAEjCjvOvefaI-Rebbh9t35E^foSjsnJ^RE*4U4KoR2h?(9=eY`gc(E*@*F8%Xg0Rm{(BMczoIj$ zeOSZY`s8VDyl;1x^VOaGbd}~#-FrNcw(!Lr5q?vBJ;;LdE0x@Y5K!`h_6q9b#Fds!VKrs+p(pOYv$&Az09eM+-QB+x7Z#W*8|aQo<@}``QtyXI+0? zNhhBSbQWnBcd5@%?NMksdJ5SB*e*movgw{{Bi+{5d>Are*>{8_AJf18_j#D%P z_}mWl5{sEBL$X$OZp3XNOaa(-u`sn)E2|yB}f+ zKLOdjc$*Dmd}fQKNx-oU98hsLK2Rae+EIM4`54>fZrq?x6s>cdbb^}bF_I78eL3|a z=v)zX82E8P-@A0O;46lK>jqG4rc360EIe@oEcK5c&!0mEs|BNh7kIQ9v(0XITxfli?I8|y&;?dj%k(I{nFy4j5irN=$%M4FYa zUx7s#DMKYCp%LMmklZUBfQ9`H99F~_d)Cbza$tbg7jPKfC10jK4RF_49Zip_fPCX@ z)8h1e_L8jr{j#T@DElj%@+zB1?0>l^h#;=2#+%}Z)pZ39*^QSYiIMDhfZ8|ptV-jt zP)N&Fs`Yiez~vBJ&}e&-hi*SQ$`&HfOK!u<2oG*}3`zpnR?)W#W%{Au-+IoYVxrB^ zAhlf;{U-_glu1%hC&&QwuV3P;7KoMjnUpqgN8E*HO2teB1r1t+kH+|+JXL8>X+ z+)ki0U5$Y25X#OKTIuDDL$8jA+!?*b=Vl3Z(wg9Bo5cw^7R`gub<_pMGVEdH z%W=$P?9S%9CzYFBQ^excWAdz@t^ZQ|?k`!=bdFH)#&hC^+IO8{u=JMFe4Ak|%0mXP zaSjU@C`8Zi+7F(>5UBGe(7zID5V&2G>ne4S33}M6@Jgy5z(h9xLnM) zFO@;2V%_V@EiXFlC`Sr_1TTDw!-ee$Byxr~zflb)?b+Y(LlmYB#k#vZKUM-^q>d2c#7Myh{q8{UbYmN($En{FQl4UyL>Nn#8W`LeH9PuI3t1%IzCS$=tT0I(hh)?*}3OPJ$xc5nTQUw*vV2j|?a) zX}$(67s`bYb9xKoQR=2Apf?@}Id-FSgS_Yr(GlgeipQSx@YYHPidFd7O)p9LPUXhS z^Sca>Gj6sFFXb&{=?z~1?3|)Ki9SrCer?u*NTM~rZ3B-lyaDI~HtUi77pij@zYY=X z@|Gx%P;KG45U$z!cj!Vic@?yu%THd3oHOnN^fD|Yok!;IET#a8b@A*)zD2_Pv$L6{ z6shrulaOyHm_e5{i4w~{Q$_g4N9uk2>^w1!pRVdLzEton4@eDwN&7wa{j)j-S4GJYY^A_*d@@$?LW|DLX-RvamUn_-fq4}LHkq6D#@@6d zR%VRDS8TF^JS!jdS^D^%7-nc!ic@eQL~;xuQEDInV`uIcljz7UPn>jX&gaHE%{;v{2v7WEYaE%%7PH|VLCKTKq=(~fQPXI@l5C|rv4(e@_u60qOF zT4(eKkb=;^EQDHT-x3uXDa0geH%4*_6r5dcsUL7gWZMGjTEEeH7qW~FvnJZ+w^5vE zOkJK&mfTgxDBTI);5KG@Ni_=;5jaDtXc39&uB7ENtq`8jKxpq@o!8R3{J+|7EqH)0 zFXVGir{18$fKgTQ!lCQjIdobaP9Q}^5B6i`9jZc2O^g4cxz%BOXKu6)d{p0v|Flc8 zbRfV6Y;Ik12w~hi%%?AX^T4B1MM&P}0b%Z-z29FR*$wzGvQ`!yb&NGAjPANX`7jb5 z6-M8-tMh9#;f1t0idh__&{k(Lg5Jtp{AbYAw!!3g7FbM?#x)I!aane&QLqG{t((l) z`)P`MD>sInsx(suS}rY-z6PWXK^Si!M>eA%j@tn>|C>hvqZKwd3tf%dcV{+$DRj#K ztO{A1>HgA(_8+Ey#rx3Y+Pe6oxtV!yl{yUwsq@1MSp1(uH~=?!ZAK_TagJSMEg;-n z^V2e)S1XlFQXIgrtm9@0le&FO2St`piwyMvc>cKP0!=B!-N%{-mL9WHj7FzWpSvfB zLzKpqx{tfjzLd=9wWwv72*T^3BgEKsF@LCcoJwzXr?53zzOIqm#z$m3jRY3XnW&8s zEE)c;mt-E6-8i>FJ9}&Ei>IEX{Vj({oX;qgWl_?;>)FsJG}cjO(jeCj@``n_V`TMe z5dfBzhC*fO{K7GH3~03;E`M;dHd^^dKcr^_TxZSj&+!3$dhp@Y4zNlo#nL={ZKYkc zBD!<)9>0d1>1k{4+WJn2#n@V@#h3Q$4sZ*)%TYB;;J`YqFBjnhAzG3acI~nI9H_Xg zFPMDO8$B6AB6Q`jS(TNu;KBfeUeHLXD8bdTOfj>Vv^{{C&E#(6%LI8wsZ`713jxT(Gg(3dyFPYa#ezC!5E$|_=i_Z{ zKV|=j8U1FHZm~z0I?v(PnQw7g%2(3>iVBOx(#w^T)5r*}ED?Ez*b+au@xEj%zH+zW zM#Cdyn zPzI`ILM{a32z3~t^2D(R7Q#eR{z)y$ruq2S!uykOz3-gzugCv5i= z;`Ju@BK;I4Y{S~J2(Tn$JR%4qvs9iUft>zEw^^K4pTf|;?p$gI|5rg88)?scNYGqe zS}*Y~{$8GRkXO)lZi0ZTn&%t*Fe1Q;O4K{hcV5-2^K4yAqS#8> z?*BWH-6>Bs$WQ?L(Gr1Ix;k?=Ds&jci|6x{YBaWp2dx{!52hgmaY59@OFsLXVLR<5y{TPASR39CJXHS2m{y zciLEU)Pt{xoRb26^g5;p*VfIW6Pc7kj=8v3r~crU>Y{}Se&OI$Dx7uygEo@H3Vsi1 z-v_ct3JYDT&fF#uTJxS?W=uL-R*3+Aq$$^=k>Gv$Bx0^rIP+}a>{PxRmaLr9H!fT2 z=E^hb7}t$agDP?;(LK0!(5&7QP$jKS8vl=AwNxf!+kS$tOXigVn3y@%L9a*8$X{M? z_h=Qb9RTpGTwAm=!83MnGtNoauD86Wnj`$qJlcVn>%M#h zH0UVIDH-liw=d(BD@Byde03^BRk-nFynJiwX9MmdJ8{)H)#e1BgjltX}y_aB)r zq;8Jxh%)p1#<#-EYgOAObuTXP+F;^h2bXK7qvtHq0^%H1gM%hYLE-^mBeC+Po*lGZ z^3)!JF2_CoqG$|JOYv8&t@+{f#|n8PH#|VIU~`yAXX9mgCFHiStUXU_h@mwE{$_UA zJ)?cS{Ir#iz1!Oa*HH}wV2d%8dtfjnh^DsBMt7}@I-h0*J=?~V#+9OLWn3syktWU6 zb*@^P#21v!XfHs z{Pe1&H!7xfWcUFPfWT%Dg1gc2es@-0BROPqzZsy_Klj8^F>9gyW`EI3`YVHG^4H| zA9{I=jQ%hA6spq~Ddjfi+@$O+|FO57%+aFY9Ln8I7^Innk!X&zv1JYK67IMbMiu9Q zb8O7Kbf2TFM!B<;kBQN7?PptpOG8R>q#vclK+nLOk8BQSs6-J^5^Nc`@fllG(ov)& z@DJSPqjH=x^6^VkwSd)*$e=t-bKdbzPD3f!_5A!g-%4r`8Qe`P&o^I_?};*guwv*v zgLS`jfk3DJ9zryh0<5e#GE>s1&b7ALpSMYtNd6;Y*cga2m0b}_H{jfru`^MYQ{(Ls=W#V;NV~UWx+O+3C9WyS=&Suw)GYr}HFE6)NLC9! zfso|EGKl;`c6JW6Xw`9zKDojIp6Xeuj*8c*bQ<`rw$8gqu~LdQ9{e^~jx!xQAM^Te zgrQBtaF>c?RBG*fK|=I@yyf}8z7;(Fw*X95N#rXGoiF*%5CBq$zO@1?RNBT5?3l)Q z(n}_1XCto1ir3qD*z>dWVWpS7g|h@SzL+6VS)AJ>_t7q#TVS5Ec0G+xhm+opxg>K0 zTDw{>bIv|YEQ*XEWS(s8Pp!#9Uz&Xd#-966@rSK^N9*Al<2%8%h_@-Wc^5(GT7Q{F zD)^Xj;0590sOj)&5?i?kef+=I2x#h=NO*yhYf$a6Is)?2Ba9e zs!hdCH+Or<<^C}3WRKg2T>SNg>!)Z7FjB~-&OL*dgy%U~c;4pF&7##TNHc2fMOFFr znGs1kD>**&Ari-Gm?xH7tK??a;){m125!)vBbsM?skQOR`2pa3ta(PfXDsPlHNy?m zH<_U{*C7riLv*!v?@_wh5k|E5R-!a(?)JU_F{IP;Vtj&j*W_xL1;6qU!;Gb2*2|PM z)%(#7c@rMD*CxGUJp>_Pv&3r6=dspc9{qCiwsuj=1e;p=JsDXnAHmzm^lfxCa!%;k zz)VngM-d?#+`%)`3LP5+*@LcLeRVnP7{iV~epg&Ji(8xM31&2xxw`RdICz@MPnhHN zA(Q`~2~LdmYTRK?2PrJ)ySj-k_E9>=X6hY+#&Ie|H(SUW`sllR41VlhmJ&vB@4k$K zW`V=OOH;=tAM;*`w$FiE6-|_hQJ0);KEJ{#W{17#R7~r@rk2h5*Ks~%k##SCm9bgr zGBRI)A$C(p<4`CfXpEK2N2H<^KAwc-aq-vWRZjWBph)Jl4)Fd1CE_1rS1Y`AJj6R$ zBzzl0Q+j6CeT^Sc+XaSK6D}D3#NoJWOF~-KkYTA>H3xt`+qhZz#+`v_pl@vb6ssev z1FlsK#N5`KsFGSvyR)T?0Sx7e=ptQo-n%KB7zRLO!!HFQ0{g$22P{AFW(ykr?ovo% znT%~m?6%TP^L*By6W1$ls1qHhD{r5%mCGq8T|pW>Q;40)%pi_Y(*F=RvDjf;eHU*I z1+o1`EpuHGN5h9$bHBQmKI~NVOp_-SrLWplyvV@bSyThz=b6P@6wqU7xY_*Zc|+&- zJ>|sQ8C+S361Y-T!a{-wuRlKV7FMyZ&aXmwg{TQmz2(a^zMUH8YokZ+D9B#lt+6b_ zQ@i3F08(LsSmCm;WfE#t0Ax(310xW&8ZXA)(gIJX+#T{xHvySi_M3n2pe`ejzOfru z6t%sjZPJ4SQ+_AHQ5k`Tp4PyOQNq0Od*t5jyD!XmD8CqOR-_u?=@a^M>fDb2D1*>0 zkB=ktt0ID#P96C~=wd6%H(=M3F?8B5;Fl8Y5mm;V{Bi^ohc53_-1!)htj1L;t+*JP zw5!BA%TExvx}RNk|W&0nsk z)>Mv!8;hl0p^s1W7^v(`8MFM)ejxcVOqkikrN!m%X}7k|YEZL3G1KExdS+ji0Y3Pi^!=VHMqvq~Y1tyWE` z_Np?W6N~A5>|`VzMXalUlA{xY+|B)b5ow`Xzui#nSzZg?LbpMO!i^aL7OX6lOdRU# z8US@t2?_(eu-$e`<4QZ1`0#+wmM3ZRsF zd~A#*T$!qYsj4&|VuFze4ljq(!-8U)sDrGdDS7$I=l8H!A9sYKMaC!A|7Vsg#$DtZ z@qj;r$<^icPR7hzNn3r7(uHA2AigD^QlpLOZ!jo}hZLGqFNeG5{!`cuMtV#rcQS4? zb-TEQo(PM|+P$ZpG8Gh;OsYK`vgw?It}L51U$?r4p=&RgCEDV^+>F#wGJhvLOv)Rb z_TokK-<~!G8Ol?(IE_f0oAhcj z2^u{*?tWJR5zY}^-qBP}HZx|KWaHw(ktZjLK{mAVAI1AR?*k?Kt9G>_{WIw3X2fP( zK)Y45k)Gv!|6}%^Jgvn#&(}375e}oNP~|M#faAt6P%{A`o*g8LpafON)gu}xCi%fa ze|I3Q^k{rziXBz|OUMDS7Zu7is4+cRKk_~%_V5cP#9ZTr==?pdys3m>Y7x;e2of&c z5SC**DhZY__};omm3J}8!1BsSTa^M%9GpYPnuK3jChrRt#FJv{GALB zR~F;-n)5Vvb#lIF#*lnW8R>)QI?K0d&FdoSbNDx|So;@PG!8I4f{t?*g+n_74XLRQyn|VDd_Ms9+Ib*~x2l~1o~&tE-Z98Ie=93B zd2h7jPjY^_6g?GjprxrTP*gsxz;N$M~&DkvT^p3p2br{b;q2&SlwW(3%FMU`_wQf3iTusBd z(*-vYul^i$&XalPz_Bgq;lj!fg>**yuM&%vlno{D6==`HfQos#L=4+H>rEV`?4MLE z*x=W=ANTAMqQJy`{^2C~01VWrfIOkRn(@VBJgTV_xH?aYNM|}y=NaWhZibRk7v+v8 zHgr$9O%kuw)Al3DL!TC_rZG01hEPDgF16CpkxhApb#I7G*xpqN8z~u}2e0`&zz$5c zb0mq`Mvof+idGJn3-qq;ozSsb;m^n`>j=B86NOH@dqq`b5Eo*^SrG;i$J~$2MQIXR z6_+GE9D+(@2(!qkyUC*U)I+#Cqz_C`gID({=r{e=6u=5=DsS@hqDQ+>l>jDMiGx+riH(mgDq3ZlX+{yiEp& zu|5QDLhIACJq&FVYXFW@DgQg6lLI{;8)SKCr(8-v)6Y;KwK$GBKFqnz7TsBmTp~OH zSn648^oBj>V~%UP8!N%!plD-dIeOv7G#hrg{x68}{8zUfQ6r;jV~D6ki0&m!PdbQB z2^8Q9kNU}p;{Ns_ft;w{Q^AyLg-Y{M3SZ)&AJtz(T2;zwal1)@2!m6`jK1$oUqO(E zC<0f)FCS4s?K#wE@HYA|v>y>@kuDmyt5CWG531D(eoufz?Aqii)U=3ft;VXlC;-&cE$Ad)11$Z@XBKx-6q<9b}oeig}M@i8-`( z%AJd=g&GosY9Wnv0{#*K9?cGqF|PU;06mi#d>VWv9=cb5bx6&dGWVHYPhZ96EH%dt zeXnz4+Wd4dX{9Wj+%}}F-OBtx!dSZ?o=|5V5KDzaJ+lm;*ERwYYkVcnBjL0W>}Pd; zj)Yl1*rsk>1)c}g%Bv?qk}+5S86g!D4G_CA)5lhJ9S_?i$J1-Ft9SN9D^ z$f^xWI3G6NlZ}N&%ukv4BwI3~ZYaLRD)Vw(=zKQh*H9w;NG@(4nszQnuxUXeT3;WX zuIcqEA=suJ#}nQXFbF4Wa11}5*3^cuE6pDz7D|sZ<1qC98L0WDQ$FlR`p-SA$}3py z+=S#_^_4Dl(*r$8Tr1hgI5Zonf!+_DoRV^<-q0REuF^Ns*D##rzA{&= zV_%j=yC#tVWq^Zq_S{99-nPxk_|nfC+w)6UhLNN_rx_IQ)YT4;0Fvag)dEJiX)A2v zu9XN$7@&rAtHZvgA7728LpBsmgQZCx*5V2_+E)_jq=fuB)%q9%2CJMN?2#cwik3F45AuPsWhY*|cS^jU zDBtPY%#b7-{+|>L=C+r-8_*A?#_bQJv3CH;5Le)1nEdTdIV(G)!l5%J$=zF*zP?3P z8AK`qSb11=&0J4QR+TH4uy18O5oOhB=7+4UlIL%MkrgfE*}S>*xwPoQHUTJ~pKH1R~LHGZkpEP}Rw6NL>BIQ*l&8i#UaGn^WNdpyEyl;VvR|8%5q z$QC5!tK2X)MNEn{2wFRT)ewatPJ$@Awup4=^v5~wlrRK`vX**|V~R2^q{t1Iet^25 zi4)dxc4h7ET9db+Vv+M$ouJ+XaP# z`pRlOmPd{Vhe_J?huEa|&^)Vl{nK}%Jx$43ua1@^ERVWEX6m?FG2UI>#2i(Nl597)>2bojBC=#K3A*}-rm}< zeoWWHq}gyfZS2_Z7LSS8es6cJH&{ltFB&nkS|9sw{t)ZMvPxllOH`ir4pV5zienpL z)}|j?dlF%@d{^wmn8ne}Or=_hc#I5kFbgB{1kQl5j$Wj{IOuEfeBR%wn0)Af)vtE6 zx0OE~|2TzT&=2@OL>K==#{zBo3cIvDW(vo+NkVmDPc9qYBeq-;))-LF5Hs~bGD{vp zdN*IP%HlYPpP9#9HG=9H@3013pUD71GIooVB6@8ifsYn5k&5i5;0P<~@TCJi25l{L za8jqxIwEyvcA-qPvSkf+G?>$y-J^Xk%Id#9CFtW_R6%i#fJN4D_ubzxMORrQ+tTm9 z`N5-S{AGJ3IgeDNKBhq@+U;(E>cmw|83VYhXmK616M3J+U?OJ@etm@OL^B&rjN*g$ z3Izio0JFtJ7y?o#pZkl`A)vjbwg&OSJK+X{A5R{99+Ma?g@3q|K?ln&qU|`F%Q26W z6aQzeP)S{Cde|0t9Ax`O;pSyej8O}2IJ2U1^w^N~{RpqNl)Q5UKs>jZIrAs-9C3I`%*^>Cz)H(urYBCri@dBnDkTwaO+6EB4G|Z$%5a8rh`go zi(yG9TLVGoPW>&kvi~!$*~BOQt~GAl)g;jPc_)>@mRunPU z^c<@ynjVVVu(t%vxY(X?ALKpkvDG7s|*NZm)O ze0zk^9$QdYqnv$jr2C(%3B-B4C#`H|I4{ZN3^6{n0ocz?)#kOb&C5XvEBQN(rsoic}uhB}e17Sh@6#Uwb7Mz3|O zJQbn<^@)`Wy2nk|i8`7nbUJDnvM;{3SLLA=hO5?gNY2@$8`>tKa0i-L=6PsipvupY zF8^S==ktgE*+JOAQ}5_N^USg-i~`MIF0@xRNyP8ev8cG=iQ%@qi#z?yZu!q3{JdPzefl~Yd`dl+;p4{PR(>sVjuAbrPvf5&fRr zz4xIU)ihs%BJ~w4tRj2Bd}~tq+$$bX{gnI?I~{pNjp_L=2B@PI8gLy%^W1ayS*ln8 z4gsre=O?>kX##9xsuEy$UZ!kcg_21*{Rt<^DD-fS`8YV+NNCu zs1xUnJ)_&Du?5-Z-}j7!Ku(soiJ0uXNHan;tVpID!+GB=g-%TlH-LKoh**BmPx#DC zem&=J>D2^P3Yh8|7lrgP<>V(zYFbwfHRK?9!bJYAtxxZjnjTmTyNVnhdS{M^Z@8%B z&biHK!SIlePzFxHVXwaJNS4uTRx?N1Ce&tMltg8Pf!`xb0a`YoR z;9{kcc{P39Ca%Sp6{X{LT)gu0`~ZnnSMj|1&&A^nw)O%MAoV_d=7g_1_A^^2p)rDG z*(QBcxAZP0sK)2 znd4S+0N&=mDA;srfAj|9l$KP?-LZ+g4oT+NPPeq*HxRBCB{Qd{OVA5v5!zATZLa#t zn-!hS@+?>acau#I&U~Cc8M*{wa9zD)5-$8^!>#mUluI3FpOjLv(=W|>BRgKLig%{R z*zDQ!TUT9&9BjwtJ{b`(!~pQI!7N8N9MlH8KpW>R8dhV}W+`b5Rs_CsvT#n5_4t4x z?}CS>m8XQnrOs=|=`#F2JiMnnlD;4k+OMuz)7?t_=MSK2iYYNRG(okJBf@A#p+Uv> zL@en*Kn%yHFs{xFYixEWMnYo3lU-Ly4-La#JorZvfn1qg7fSYOtaZhe=ImkP?mDD3 zdBnx7HUN&0E?lE8reG{|#RM7GbZ7xNvI6I+TU&Lok(<&aoPM)Qb7CO1Ckb<1Bhp$2yIm*jF-Oy*XU8B zh=|PCNvDnJaVw7J#kh{ZDmp})E@%y<)w=@^1?xErB8m5IgKHT!Q)?ybLN^6r=dNMC^TePH4+)vg4R&fl`m zu+e02be~NhMU`V6Qu|&Q8fs;~Rx35X9C_vwm=|^iXCVXS{B(S@auljbDcc~u%WBm5 zw?-;CX-J(sAJ*03_=L9&ela#fLUk0ETFbkllPo)lD+p(Q0wqMYI?TV%f>d?eraq3i zVF)b=t5yNb%xKmO`T8#PbUzclr!ttrQ`;&SCO87AuT6d$UKgccaRjR>QBVCYro}pu zMN`)3Q0j?kf3*sTTj}~TU+k3T($Mig2&v@+h`shiVK(mLT}L1CTRu%%Z>BH0dBBDj z!_<%GJsl^7Xs9YBN}wtau`av()fwDTfsGTf=KP#R+S11T5|DyO{?BC7qVWsaW&4ko?HsbwbpdR0 zw@6BixIQSF!%Gf`7F_{G8x^t6LUFScQq{f_A{`mF6dsMgH|y%g`fW~*xp}J2h5f9Z zWJMmj`CvB+02QC8^vNN!2){qqQPKJVWZ)*HvoCh|7>RDSt0IW~#PWi2VF2znJ?G07 zdHmJ7oabs|o9Zb#cG`Y-M?1lhg!{?1bbwlu&Wm_v-zQ0X*2Q&yT&RX3oCD+(H@RtS zNS$d+woTJ=FV~DgDN^NSPDaM=1Drbf>7__=gca}v!QY6_2bt374x43YE&*FIW+iO# zt#Yz;e#z`N^{x?CSpe8yvqSEDhZOzW!9;(zP0HFqmdHAMg*fS z*m!gZ&TIfS(dR)i5h}6vAq2Ol*|_^I%vr$E+5=yRw7doCTgD|&CGYNEa0U4uST|r2 z&(v@Yq4v_$o~h7g+MbtbG6Kx}1wI8^Ci^_`C;vHJ1LhP02>Z#1I=raG0^wzB`1?#W zC5$>v2HPaQ_`Y%RBdrFr+}`}QOO%E8dyB~SxGc3|ND%Yz>*z*N* ztF}t}H771B8fvIDKZ4(P*Z8@`lv!7SFqyx|U0|Cz0e_hS41=|3X99EQAdItSa$9x9 z1hg<$r=;>QY?<&c_(d?imG~X+;k7Lo)Ln2X0xp__G#akT_?W_Nq$k;jHP^}WFM_wo z22Sg4P%m8RL=|Wr?GElrrADcy{LR_gbj`Q(AN|?N@UoI+S&>}{;s?XbkVWyvKt|X{ z@|Q7>7?#cFBEm@B`(l;K>nEO=fh;H?1aRz+t$4V*W(33$@Sa(E!DkKdO9GYMbMearay4^GtrL}%1psH{PF_Vx z&ntDM%*JZBvs8;e3%XAtE<$@a1rPL}N#85bT9^@m(3~R!3H9|v^Tc~ngV6LeuU&ug zB5Qug+*EJb*X5Y)fg~|`4Iyk9;W?rR6al${ONd%DRn)S0Y-VZf z^ZKVy_Yw2Sfo}gOo^&k6m>7e6+v}Q-*)^Rk4!Whv1!6JdN09PBd0vp`x0nOO8|sPC zy}tg1k|_G?IV$s8cdV20OZ!K@FPsC>AT%dHp*b3|PVqD6V74qm)et?zj!^RRmmd%q z{};%K_C?PVxnDnr7G^5kHq3!U z@!T4dF@2;p0GngLM_~#$9j|fY?B03h8oba9<~Q!?+|@PGdXk%yOtuY-LJPqZ>U*S% zo^#j&%uwFvL3Dxf@DQcZ=?kMK(zSZzcA|HuV7X(0F558U`MvZ`j=RM@&Lh5_|AUa=f)x+-@Mu;@Vzgfs4)rNnK!~7Vh$Ke zTWVlYD0AK^iI(s~*V)o{ZIcYNJY&S~C{<10xy4y#QZYXyY@{Qj^1$#q(eTCpF|-Y5xwABnDF*DqgHjVo9brdVw`d-jl9^{#b!9@=<>4~+DR z`Cyiz7QaLva954_H9u}TGtR=a$%DL%#Lp8h%fD%n^$-#_j6F@0O8U#;bYnUxM#CfW z?nZOTe1h_O{2}>1V-|O{t)O|2r7|h%fJHHFko%AtOsL3!t5<%xaI~Hga4q&?k?+psGX|XkG2W0(R#?Jv&Zhqjh zQ;RHe7Et7=`kTMzJ*WkUwWU2K-rF8965s9_pG7Vxo7qAQt*a-<@c8JT>lm2ClSGBD z$$V(l>Rj_HxG|IkG8SdfKw~l`L>6a7Jk~Y)^xjUL71z{J7gM;j4}%Fq6}Z?5O{>_O z?_g<&fkH3lHXwBWJBU>&l3*7kZCAPWV zOBuD6%wdDOkjC;tY!=Xve|83b^SMqo+7q#%!ekD3RQq6c4OgJEJ&WF~J%^=NRKOp3;!522G0UGOVze@_s@mi>B>UZ|zJrvA14b4R+t`Gamf`B!dD zU99Rwa$CU-I@iqyb{c&1V^)jHla$HrW>i2)ItSxTv-YxW|Z(@&AliyJsMyq<`t9) z`s{U?y_~>(qMi7^`BLWx^2*!?yS}u-y(nBxO9oAy$_MpB=`k6|5KA%RI*N*E& z1@2Q5P59Gmr>%jlyRYom2?Ay0WdF9dRCQT%s8W9|oOy1KOan!i*aXu>13Dy5?VKzj z<#8k+mU90kAqAGX*k1b|NI)x&GZ4c&mbC}ln5N} zkXe!1Q5Y|!5{5Rtz$c73w^u#EUn9kc%i55Az4(Ma*PWX2#q~`ia|E2;gvW!nVjuug zS%{777ZT+X)Jx*B{KRq}%DUl`YW?=90o%lkMF`R~1Pfhrt>VaNdCl#E%m;s;b7IyO z3Hn$Ju^(Xxqy%#VHNA2pPfZ~>jh7}2PhlnvRYqn8>f&(;A;`i7W}HPf;m1NpQ2r36uy9Z6;{O zL3u6G@s@uw-)$zN^KYAx_645b9h2NB$8c8Y0CAwxfdRogJ!7P7t0@905#$c1q#S>r zqBEd#6voB7655g+n-Yo%RXt;iebMAbE%6??UR*s;G9LbjtrC0OQ(FYuc+HD;%dPdQ z$d-{&18+I>;{vIH!vAH)bhORu7Mtqho>nOVdi;(f?f?S$0NL&%Hgg%S%}Xz2+V<~! z!@>xFUtve_%f7Wx^bA`VY1LqEwz~3<557e(?&O)LDbT) zm``K0z+xx4By1|_jhMz1@P|+3RIQBPi{bd4_c5)qcd>#k4*%GD8$(%{^M+?mwY`mU19AJ~069CX{#R|_@>IaCmV0$;&FNc3&rinuG9U06l~qyBuk7_|c6~UBVSJ1m=(H##4`R**F(8m+&+V$+u^RMN?l)@3WO|Zk>|zZM~k5{@2t?2-mR^ zw&V&h|2_G?(qz~UUC*MYBER_>xuwpaVU3*{rg;;)*uk@ns?f6T5KYsi$7f2wD&;pV z`^OQ1ks1%8G47Dn&E;`hry(%q?Op8f`+e;pd{#fxkuhwNKaPxAz#R&HZpaZzN4yJl zNs2Cg5|5n}Q<4jN6Q|QB@E{n9kVFwQ?3FEz5`vw3aJ3a)o2d;frRxo}q-+JuiQmUb zjlFYK#%o+$WkxE#a5{PG3Y1}})Ob&_!3lS;dr~zCy$Hl@lPMdzw5%Cbtx03(bbbm= zNbqOg1b841N(e1XUi>LQkyaZCJ-&e!x)d}Kq16|q$cKoDV192dRhSzoDT<>?| zM97YCRG3c;FgFYP6TiHK37U#x(n3Q^pVcgWyR5xkVV8ch7isWLt132;m#@}(ms#lh z7?vUB@n)_;O=jx=xizqwkuIr$nujRQ9SQE01`lOaj78LK7%m+u#>U z7i@s**}nU^iHMu`*4or~dJ@qi^r)W__R2;TfqB(Zf|N_6xFU6J#iojOsAC52-Ad>) zk|sc;WRqDTf$X*C@0bu-W-K(hu9v5#sHe7Z=5$Waic`E=h1s?I1cQrUtAxg1Y~2za zWllS@P`HWSa$&Z@)uu)fku+u=@n|1Rc>sXY(Y?>(5g+>yK}*V)Ux5(8QKmwPr}!vo zt~ix_4P+?0!liqm_=wT{nSe2)YYNR2eMF+J+iXAB`NyrG;TjXR@8;eD<#g*qZCyMc z&vA{&y94j|AxXP(?&@^@>e!ZNBdH~GT5ao1;J*@~DF{60jQ9u%E>|jN5 z)2}cwJce^UKX3z5Zi%!~g z(m$Q1s+ToR4Wgk>@MUGx@#hFyH@wt;sDzd*O95ugmb#f$`f5M zT`#RVVTxCyF~(-XK>YMxPDld0Qr&CE^AfmqP6|w$!SGpI2128Kvk1e&i+Nymb+~VH z@S~yU?*Wnf#q=VV>q?IDF6XIg(JwA+!Vg32M2zNb;nxivg1=Rx({coAoz2<6=!qDO z4&(I81&q&(LQo97JA({$uxDMCr?^(STT5TBryq`thLu3X3SzH^g@qHs(C5CFn&@g+ zxaz%oY^!3Kw-@WNx=WFvB<}VXfVz{Q%$uWKr%~NPRHSbK9$l+ykY!?OEanQxwzhTq zW`*v%bd(r=#kS_s#;{-f$HLAeXFN_5+FP(fzSh? zQUdjH-Yigu6EoY@RaXfQ+f-||DjvKLHLALr^f0CHuT34p;kBux+$c$>Az^m>iWTxA z^ke>{O}tPO zeQ&NU7uz=$OoQ9CR0$7vg?wn`!oKS?Rb#^#NuN+?wgeFbWlXxG9*aqFzvWO;3#V2` zOjQzs>1bGCD`f5=V^!BA+nHR))esFDaiJuRksZA;yM`_nHF;zZmJG1-lCgX){MCo{ zcjN*w)#~}fT%LY>F|iVC>I!Pof?pq0GYz>%?!adsi})x?ci;0kLtgFGm4@)|_zi+V zn5A{;(gehUXxdC>e)di@F;1ERfw>#DFdE5U-ofD1n1UqVlHPl;?XouMs=|9PzIC34 z9Hk6c6qN7&MBFY9pjxlzihS??nGaM?7%5r7=!m|;$l%y~QA7n&c}oveqUWWfUZ4G= zC23*MFf}S+9ul+|w&f7E2V@PUVP+X0E9gl9w49wDz19Asy=Dy$4mE$5$vdpGt=5ij zd2&NL>@`7&7lAiQ6*Q~3#Bk74?h?lI-N21L7s`O%#%dU)XgS(c(2+!~87)b<1$65r zRR2A*mUa6L+yW(&80M-i9CId+V`MYlWPJIf$CtQeIFAmZG`94*)ln z#PgRwX9saP zivj7ZU(QlMOwbUk^|4(^V* z;w9^2&!OSClJeu?$6|0qP|>N?8}@-Ut)_IbcwO-F0{x{UuJ*fLL6QdF`A3|yJ&h%L z=#RQm8_-!Fn)U9rE7Y%@<50K+z}nd133ePFwe~adem>Qg4;-vt*|@DF#z<*w`zx^N zRJ}XW#{ApHlTJbX;Ix)XJhdUix5kjpjz$r;zTCO*5v?9nv&@@EIzDV)$ z5sJ+P(~;U0l0qUJFQ&p*;jh@-9&V!tewoAN$QG7u-xbP_O4&HXgfAu&7czhOq@h#y_l~Hll8azT-@kq_UtuR&;(Al|?J=v3!2M6Axl-~blTH<% z5b>u6x9Ex8%Y<~)PW0ApVd_5fe)Frelp1N$WkT4{;-)PJ#-yYV4S8rbo0#j(AZjzR z#?^Z=?Im%Q9+z#xxR3tCW0pfea|HIf$O5AHM3SZD_93eIgIt3gVK2@rzyVz$X^=m6 zIot}-ZTYW`q3u7=cb?05$7uGi1+%&rT%XwBh)(dJui7%T=T#vtRlJEHncmY=qTVl8 z9-TCHHm;JMe^e z4N;@~F)S|HvBNRT`b=kr; zv(i`M!dwMRJRtgK=SlUA#7GY&ntI>XUH8urNN|3B*S+aZt7K zqg+`F!|savM+Hi2TinTal%SL|KZ%T*NJecY6&<8Z@`CIc<_pn`f&eYCh(rD?au^+8 zoOy^Fev&Z~6}Wq?1Wdq-1p3w+6>t#oy0q zE6*Rz$}|GhMxZE*zLlEA;CRE2R$Rr!jVhEoYKbq_g#*1h1|BJ;IsZWF)ric`@H}?Q zev(FNm;_!hiNN%eFyV}Z9~&P340qwmo{lqjzi{+V&)e$Z$u;V2TVNuC5GV^O-ymp& z)A>}H1t&cvlODOIl0?6>DR)zTTOr~cTvwg^gbb| z`ehytgf0{Xx6nF2!a+iW96IGZiW z&ot56dt7THtzfgeQ}BX@?;k*gk#ozbTNEZ$qhI+&rgPOt{626*9CG}gcxmU-pxrtU zfP0!JCNEsq1VzItgvU56qYgp@i+r~~!UaXnIDdtyWYGqqYlwl2b-37>?rkIe-pRN> zDLCMM_k*m6^hcsT&8OwAL{3k)2M(iYXv_fT9dX(Q1~wt)LD(IPz+2+))n-o={_b>9 z1t&eT>Hh0G)SS2!iR-)8y_Rp4^LT1BwLa^HTy1L&J9A3(QbFtot40JWY~Y8e9x1Tn zet1*nJUi5B=4^}Sn1HP-_}~!2e=BI@Z z(2ZaSVmwWJN()eS%5(D16*smZME-TG{)jkBQ<4 zlC_(10GxcV^}$%zRvcZ<_uqxdS}O~lnL_1S8PZoofLvz1vW@FvajeyieZ{{G`m0?gP(?O}xB z&yoq)aV0}lJfYgPs^X44cUqp0tvO7ac~%zvQ}umODF;+s8kCcN>@#YK4$5reG5rqA1U}oO+d8X3Rs{mNSTBKrQ%yEixO2CH*zyW3EZr z*KX6IgvbzWR|@EG@(`(hH5jGF&9khEmgr0RvKluf+lg{98)BQY2m%92O>MNp0rH zW|Oq#DEhU-7HseeKzUfw{}p{95NR->^QY(TKK2W*9_mU{EV~B>&GId4H#;mX=lvqC|1c;2KmY z8lhpN)6(-;BMG{EsaBrPA!Za{jH^Oo55k(gwCA|Dl~zH6&gmAh0a_F178!|wg)b3e zENjjjQGz>m=|d&V+PbEr=6N05a3v7R{g$}jbo?(s%F$Vm4175-^Wu*gVz6Cfj6vHT z9`vl+$kb!Xbb{7fsjwxQzd;zGq>;(Q7O%_vUq2mCL>CPM2V!w=PC!W!sz}0sTh`u{ z8wTzf=8eDj`{oSKJU`^S&21KB$`B+RDz@87e1ycW(|_vFP2gSP4Wocxo}7=(&vk(M zKYe$YBO+a7)!bZMv0cA};6Q&IJ_T$1OAwSY)@OAdC3CL@yVwV(`3qniHytLcC#ds+ z2i9cR_mCAmG&hE~@X*c;vk`;p5hsGVqzR-Lb7z)#k>J+}@w3VHFxO4Kxn%WD2&d94 zJSj)ce&-P^y7Ign1tS=HzkqS7iCf5ksN(C^lnnq{`>xw{l+-laNa6I;SR(9w@ll)6 zsI%zCHUM$QPifO2?Okon+Ssn1I8?$1_xRxGTRAAFqkVA_YdA~$2k`R!N0J16+lv9e zguP<+9}uBzk4^8|uS47rvO)MccuZIjS+8f$9l+>S#oW!p+P!$(X1nCyVY~ZWor?Pj zN5Ym$(l(+jv7soLFsd9nH&=$k6X6Y!h?OO2hbT)s=G5VEJrS++BU;{A%%7j7!1MEJ zcE)SyakB$GU@!LWcC?h$?^*`fWZ*Bj^jW;YeskC43xq!lT+7X~6mg|n{;FW*0@ctjoCYkW{Oazm>*^&o!H1ASdF&7zid`|jx%Y~-9h@PDRMZes zQvqwdKeD8|2!i84bqm02c7MnA0-xD7^hMlL^`am?`qo5>MPt^DQ40#-=e0y(@_>d& z*ltXv!%bud&~=C)}t_K_Gq$e`75m56GMPTj6tfX2rsB35q3GEYU?oEn%@r zb06?)j{Jbt|7wth3kSI?)U~FdT#xdBGP1*%|&&R#lqs7X-i4API{y`^jjJU7BDZ0l3*~EpPzo0 zo}k3Z(=+yAJ%fGrAEWQ3q{g6Kw4fU5znl{pQ%RP3D6;NhKIR$$1qSvqZVqsU}L ztHxA}?#Tbw??Fn1g=@KiP|H+mHUR+|=l7%bz22gmJKoiVTjk&Ti5LK1W)kuI&E{4IJ=vXwZl^G(zS)(~`0vb+JoqLBEPRTObRsQUpiOO$qW{Fj(4MzZZE$xi% z=X{bK)20C&q7=H1$+Z0318&)em^oM@w{H!QsHi|fKZ(9fTa>Zz6`A`~KWQ(|wq2q3j;257Hf;v%egOmj0$SesC4~Kv;sex8^YbK-p3l%n5dh z2*z-)>p!k77TX6B|AH7uzUD#cZndgwUvyf|I{Ebnl5f>geK;&E`m5@5>Ah4pFc;|z}&IAp7+YVo^(?Le^ELVE-Ko`U?KAaq*gZoeU_wMKh@uK4YhKT z879#ixGBlqF+;+|c8HC8Am64&d9i(trbrDzIAnTbk}M}nt1O@GehyB*_DrK59P=NP z3}xhPtAZRLTm@`5p`3Tu=Xb=rRMtIoO#X>OVGXkb_rN3RsCP>wT@)b8xPxWz9c=|A zv{({*=aHu9;%I9^?FRN+2*Qs|W_L&W02LX*W|SH~ox;-u<~;i`%7UIr=bR?^1C}y- zB-WIAaF=d@tjBRn#eW9Y9HgM3Qy}Q?q+xOPiFAWYVRGMh*VudMbs?lIJ{PB?v_XOY zBh?JTcXavHByKnahIj;^P+H9@SCi>D^a$y7JqVEKQf}6(T%oHm<;i*2l~0^BwzfXh z{s(fqHkK7y1s7-&T~%MIa~qiV>ciss{SL6sCC>+k`6{Lfrx`_mrr*egmz2ToPuG~C zVI2AT_N+pt8;-E~VqEVbpu(pPy`>cmh_dXZC0Ey2j4r|VAZ`QbsagpyFKWg(+!)$O z=OvL9k`vU}@>g#Fj(Q@iP5X%h7vvIU2AAiA011MFJ}Ni@q3XJwsY`(Sqf!UAiuL%f zoC8$3DuoK@3bNjgCNw7jZ*tW0v688S?G-;yR#BpNKEYDgey9{@)L8O$8~d;}jB5c+XeKrkdvl)bP#vD!OWRFR`1lYQhppWuW+`P+92Gt$ECP+4Bu%SKRE8Zn zZ85aBI>8tG<+A#Wuf{Q}CiGCIx2X`j0GZ(L#*hecxgF48kjLRn62_?`DImWr6qc0t zQNAY__J&$>h6OaX*r%;5MH*2kkQj5H8##$>4?rP)huDRy4SJsT7MEoSW*S&t3 z>-2n(yU&&aHjK3*riGzsKCZhnE~*e zkKCP=>2h^?g7z>G?-*|Y*3E%eXlstn!+&>_#}wSvbzH&Ex(~Bcdb-p|4VyQST3Z9A#ggPGG?e@DK68a zJqXeMx6zqlFPjPNZT<(AV`i3Z#D!=1^mTn05l%VE>fru7jyAOGQ%zf|>PM7mnlo_} z6e_~En!~GRZzyk1K0G1TP~ykT1{%1IKt!MJV5#{bHZwDHuj6286GD`v*$B?8k{5cZVHYmGWRRe|q zmMe@C!fTy@?vzD$&V}10$%0kck@_6Ss0tioI-aZs-W`lIy-@ERO-A|wvozHQ1(PCC zb2Ra+V|H+YsY*mC^+b_xgtrelC`#(vy?MUt#Ew1a`ViYDg ziRj2Pz4QAKFF%^>&-16C2xR;iL-|SwM(8f+(`__elg<$87-TNKH?rc46NZoUXEOl- z+_WJ`{?tKViEq3i|9#G5rU=$}?#o-wdi`yf&i(14i~}!!@6bPhT5|p+XcKm-TR|(N zMZ1INN5z9<%rNgWc@$9l@H4w!#1Gh3YdB57){|^&j30f)A3_xNPs8vfOnbb3M8|g| zhd=NM-Y!VidM+mEbyp`E*PNtad#86LkUAaW-*&@z5E-Ak90ZEq{2ReWu=_VhHZq>R zhZ}snk67gxB=L}LNrUK5utUEtVk6(h##0W|4;uijRHoqPEloM`_3_iPnW;$)$s$iq z`Nhy&__|%+dVzEq=O5FBpi`%D0%XaDdZlzO>G*0&IC^a+E=eoqgxG765J^tFsyoe- z)-Hq~Nvh}*a7rRphOYe!ioY&~V)5wBQQq1@^|R+KJ=Bd(hUBi|Q=LUvp5z=r<{s^G z-@{BP6F^b0X{zE(R7DC-CB4s?>yJl4G0+UPYmS@!Otn1`8m!SW)jb-HM40py=tGE} zdbTFMYCDt89bk|zmRvZf51{6$^Q~CGUJ_YvxN1p&Mhk%+*AY|W>YQDqFt>68oz=dZ z$uPkv*$4YBYK&1pCej5x4`V7iyL1(su%6M*qVzRRmNjQJasIQN0i*(q?P0ye&c!z9 ziOk(WH#HwIsWgg15yYk6HQv0XP;~vCLz7DTaW3|VGOW)0l*&3cv)$ZOQK^%7)^P<5 zB{?KtKztT?_4@rb!cWy{ZWC@5ob3i7#|{~@Vnov{VPkm@)!>uFMptA5At`K#UhT>h z+|mAC0bv)O=oCG0ncISYlrc>@Sok9sgs0g^i6AIftpZokXBZNiN%<*dWkJkpHH(Po z)4;UdEc)EgqF7BmJA82w+q?_KHg4k9D&%cMA&DAYIx=ugxqIlYIHp^s2xFqD01!kB zhZ&CbLW$#mnAcm&5FGU_N^`Hmx7l#Z>m`ys*j(aNOHsZNO>T_&+J$<#_+MM3Ypw=b8x zZSGe5HWrKWJRX$lh1lF8c8cdn^}{af>W-xH(D;kfvd_<@I#JZX&90LpWftOq%DZ-Zst+R&d6#fb& z?~6Ig9RO?LQ%JPyuJ~+C$WFr_RqgQB2H$@Mv5a5&?HT^RE#t~_{J>4{nkd?bcUQf_ zmnuGQQ&79P@lXKDO>>ATGmM-XN=tYj^q$p1e?HsjoACZUzU`c;Do&(KyWG{$`RgyV zJguSfDVGl9*iUnU#=yP3>Xg3#4~ANVqs>iVwSWL%WTNFe&yQ@2Y^N2w77&t?DJ5km zV895u)Q7q$p0t!R<4cjJn`br(_UhHjnO{-LZ%Mas9ZlKgZxBRP*svKg|0ZcS5d}iQ z2|weWvBkY2I^Mw&#?bCh&CE4JzOqcM9Xe*;r>hdqz*@^ap&Pgq5@fIEwzLO~pa2py77Ljv6=PW28kYHc@Fn^6ln~bK2uwLpKcn^7of9DXnp6 z0y~al)G!#FOA4(fIEgb5-WiAgs$*t*JoDwQk?||s__Jig-oy>h9E4jG)++XS3Etx2 zc;r#~)b_m;K(N=qBYr_kjjSM`LJM*P9!<3}G4D1ls~m-~Q*Xka@hkZ-A(PBg2&+s+ zdzH39$9!=hPjVu2Q3Bn2F<3>1f`1q*gU1JrCqWSrhSbv3CKM!SG^!o7JHy?AE@0g2 zjK-HxmgyL1wK73u_NJl*pkKU4s8H-w7u2H&yN`iJ-jf_Yk< zGEL}=O?4^svadkmF3opxiRg0Dt^mHU^Ml{?=8`fsqa{_wvv`$?J2RKIhcsFFmHi%! zwm6&*rbc^(LMACNY8lA&%B!D41ykKtwR)br_a!r@#>~ITyYU?GZoxx-bgC%xJkci) z-T*Oea`8I=ya~a<1x(?0!1WUkzEZFjLooE9^6FDKC@iGtDVZwGz7Bz?;d@TKGB2&= z$Q-GyhdZ$-($%!n+NVxWG&Nn&Xw#lX3f4 ziC*a7z)6Oj>xgzVhWMAA?YYlS#jr;aZP3L<|*#)pwgo zG8JW~YYn0F>7A7?G7c$RyatYGZjX{p-irh5KhSD>k}0aS4+85>d|}>*mZ0Kd20{r6 z?|X5?t}n@l2xPJcZ!d6L_II35W$>nlI3Wv%B63i$2=>)a{|?c25fxc6Tm+S-SVR+6Nq_y(g6>}Dp8D>o*oLi~9+B`j zc(Qosqg2cCPbQ~SE^335q4If0Pp{|@m2x&9^&T6aV!xZq@*C-(nS&0*n-tbpuZ5O^ zhna)u;V-gUjk_Re7C)D&b+K?I@_s+dErP2HRd5BP9bPhDywHK`El1D2m zsoG3IPrYKrV@EiCRy7e#_zlW!J@)MUHb=Wk4Kr*Jku&m9sPh`$`Z4y7AXLK$tv~;u z%_ayZy)RSmRy;z9l~yok#Acji7L;%dPoh-RE}KRX%B!pZ)5UE>VOHJg!?1500scLC^mAtgPu!A%&p{t8xk;NdtrkH1thIV zL3*03%WEko&A`(sV02JxHLIj!m|q5Sj*zmdFo&>B_N>AmyhWjt!98!nI1A4iK$BTQ zBtJqP>-Q`?7-o`9D7{by5}KNL&Ep;V(-l>*CI`*p-rqcU$DD$4FX-u>b`h0A!abb| z)Qi^5B4>NomJ@9Q2EqY806~!94o)x5K#&ceE=g>jt>?i>qNoS2@4S`4HLh|+OC5Xm zPa_P!UWlQk_&P+wlv|T&BvLLVlfUdQgl#-seW$1nLBWC8R2c&U7p#iw%-k-^KoYtp%ImpHL_JU8z!8i z)jLlYz%88*Cyf{Ka1TfBb4;aK-~1gL7g+ zi&c6MSV2{=e@EMy|Aa*vwP&$IoE$hpxC1pveym2SL7|Aw$e$>q@7Voj-ZeFd0Cb)o!vfj( zJ@7MF((2_NY?;*!ws`J}2(8a(1hvtCNFbd+cm6{nu?Yhe6 zS>|cAV3W2?7}Y%8Xq3zDLD*kc0?Ql@vOX~8mQ#m?e8R7yM|Wb66qfvF$o`y2s^{Cx zQ|?>mK6!(!hM(oeij4dAAxJz2-vxG2!UoGK&09O`GEWfc+<$tS{ek8e z*U2S{eYe5gZ;L(a9iicYjBhMxodj0YLIPa!)dz?)UwO!6jnI)wBad(5C5qS~-O+@{ zcu!}0w_$&tLqo<>a@a^{s+G`|>0ll7ui-!;WH|}<*EpI`IRd*T3oQ)+qMz^x~cLlaZ{~;PaIf$3|-Ri$Y`9$F2=Cg@K_OEBb*xLZO3JX8cPWT{Wg-(%gMTtz}pg1m`^*W=+WI0rH~K9%I+qQdGB^ zj0S4j8v19T{aA&P9T7wKf#iwrx_8`yVQmHCv}A+iBh&txKZf<<8uSh>;>|0i=QSh6 z3e!Ub-c;Gk~{BinO*9C zYAoFah61GuKNjKw-@WjU;|U~533Bf)=p>%dF=ZxY&=GfrCx>k*@~HFZF*(oq@SIw=E3oY+U=&}ofj(gx&aa3_JYK3>HUcX}E!}kLLnG6IoNIr`*OEQsc{S3uV!q|nWbzoL=yYmJK$Y7hs4z#|*su%vovO6`DjzBfXiAb)*`w!p7c>O_2~+ zb8~s-4potF<;vQ!iH0i4-w?iDaT*ZW+J$?B7oILuW|}IGS6tn@I{IqQ7Kf3;=IX?3 zE`H_Ce3z$+oX#RRr-|_4jE*vBRh$A1r4f>UqpO;b`s^tRp72xp_dMie2kafIG7ohV zEiPk+z#P@`F|Xf_2C0c07v?fTFUtFenj=?PK%^36LrA~Kqw`Q}oXk%J7PG-KmJ@P1 zGxMi$_CVmWI|TQoj?Fl@0Cx?_GP$xzmWmH48hK-@fJV%K&wqv-UG7F~#nG{_2;LiH zRN`(rHkrs$)Hz%mXV231bP{vMRFIh%v!YUUe+`}>>E0yX(~)*KkySo1f5e1dQa<%A znwqvWjV6h%4kA-3(#GiA@l39Azvw@(R5Ri#tkQl3`{}7!G~;+DOxuM zOs?Y-(NUZ0K7$%lnP8Tr6l`ElD~V<6qm0rELnOK;fgH>W2Z=BpunD0d7h@t`s-%CRas4{6MmQ1MpS0H!k2MP>QJM+bjvJ0`DL-O_J__vm z#bXE9H?zVHwe)l*rlH9;W|6D}r$8o7Ai=sPWYD*Fhmxt=1;AJu;9YssDy0%=AUxNt z1NrqOpjuK|c^0ca^dPIk-sivZUyakJDz7zyJsB&fi@Su2;{^m(O0-cND>#~3;^~I; z2B&=N5MJ>MoEFn`a^lB_is%?bZsr6v|JcNgaQc3atGi@q4NBqFc?6D~K2Q9$qzaUCv(+zpZx0SSmZsB_vtE%^f0KY;VXP%Gr6G7lBwcnH}+pVk<1M{clNA( z@(5>X9FNTM*Q#~YoDue1Lr2{SF&BwQrwj%g>1YgK7K2K#+*yOHXac8Do5RTPSXmBD zU1h!MR>Z${NYoJdl<^8nF4fC`3JFh3YN$ zgbA-@{4(Lzw`N7QA|Ph2WHX(Hucv?d8=lBDYx^`-Yi7ES&en?I?#DO`(O>1fIHwj>mmtNNe|&%2pDQJD(6{*?*OQzj#$l!;&^qvhGQNW_+q z6eznwRTnif?`7K8I7qTKV}*V^Z=?VOHrMTb^Asoz{fWgV`r0HOpS}hrKL|XvTfi^10#GWZH_>Tn(J|i zP`dbh(FmFX8hVd9q>oW@JxSEg*Mdkc9?ylCuM*BuJD8a%eo?FzsVP;%%LsN-59B?9 z<;T2-fWcUzpEOwG1|xyOk*Qu+cDwXsL#T|v?C_NAi-5Sft9*RABiit;k+vvoHiMTK z8_r0Gt7^k;LJwXZ#0md-CF~g45Tr|Zw9|qYqPOvyq7{3Ui|wDnf}3PoH<%Fdt-X$8 z?g9N*$uCL>)xtRUv=3LVaA?1(I?>3*2XK^!Nfsl#5wk#W213pWCx7VwB`w9bPBAN&xW^zrHiR#t0i|0q3$vz6dmD%m|sP6bB;8G3sM zc6;d5_|P84E@!i4RyzhYSLF36xEH<@sR(SRn?KcAXSk{- zKiW=3m}mK6VHxG%#F!GEuTkxxNmz^WLesGKtYp7AG_-jhgt_u<3-pgJ0AFrtDLnL` zf5SYTh)Drb0LDfa$LX*tsk9Q{P$&k&Ulee(nFW3DqhQB~b3mj>aPs1x6kcQw0#$mq zlW=J~DF_Sth5641wYr3-aXzs=j!K`I=Tg}<1-qzM0CI&6?F1-@5HqN-{au{D+rV}2 zkB!(;o^7}zyxgBB+*9tA)HqSc0_j%t;a-Ao<#RxRKjx2$Gz81SF|93>B*-+5r~U4& zu1~Q5LhOp{w*Xb$$~=fN!TO_y=EHX1WNdcT6Gy-!A@!axbgj3}6%7CZ)QAO?&lFXT R5Ky-1-y8w}0000ASz6smv9ACC literal 0 HcmV?d00001 diff --git a/ansible/01_old/roles/dsk_bot.datasaker/handlers/main.yml b/ansible/01_old/roles/dsk_bot.datasaker/handlers/main.yml new file mode 100644 index 0000000..7e3220d --- /dev/null +++ b/ansible/01_old/roles/dsk_bot.datasaker/handlers/main.yml @@ -0,0 +1,34 @@ +--- +- name: Reload systemd configuration + ansible.builtin.systemd: + daemon_reload: True + +- name: Restart dsk-trace-agent service + ansible.builtin.systemd: + name: dsk-trace-agent + enabled: true + state: restarted + +- name: Restart dsk-node-agent service + ansible.builtin.systemd: + name: dsk-node-agent + enabled: true + state: restarted + +- name: Restart dsk-log-agent service + ansible.builtin.systemd: + name: dsk-log-agent + enabled: true + state: restarted + +- name: Restart dsk-postgres-agent service + ansible.builtin.systemd: + name: dsk-postgres-agent + enabled: true + state: restarted + +- name: Restart dsk-plan-postgres-agent service + ansible.builtin.systemd: + name: dsk-plan-postgres-agent + enabled: true + state: restarted \ No newline at end of file diff --git a/ansible/01_old/roles/dsk_bot.datasaker/meta/.galaxy_install_info b/ansible/01_old/roles/dsk_bot.datasaker/meta/.galaxy_install_info new file mode 100644 index 0000000..ef68bf0 --- /dev/null +++ b/ansible/01_old/roles/dsk_bot.datasaker/meta/.galaxy_install_info @@ -0,0 +1,2 @@ +install_date: Thu Aug 17 05:34:10 2023 +version: 1.0.4 diff --git a/ansible/01_old/roles/dsk_bot.datasaker/meta/main.yml b/ansible/01_old/roles/dsk_bot.datasaker/meta/main.yml new file mode 100644 index 0000000..2844256 --- /dev/null +++ b/ansible/01_old/roles/dsk_bot.datasaker/meta/main.yml @@ -0,0 +1,28 @@ +--- +galaxy_info: + role_name: datasaker + namespace: datasaker + author: 'datasaker' + description: Install Datasaker agent + license: Apache2 + min_ansible_version: 2.6 + github_branch: main + platforms: + - name: Ubuntu + versions: + - focal + - name: Debian + versions: + - jessie + - name: EL + versions: + - '7' + - '8' + - '9' + - name: Amazon Linux + versions: + - '2' + - '2023' + galaxy_tags: + - monitoring +dependencies: [] diff --git a/ansible/01_old/roles/dsk_bot.datasaker/tasks/check-agent.yml b/ansible/01_old/roles/dsk_bot.datasaker/tasks/check-agent.yml new file mode 100644 index 0000000..63d277d --- /dev/null +++ b/ansible/01_old/roles/dsk_bot.datasaker/tasks/check-agent.yml @@ -0,0 +1,49 @@ +--- +- name: "Check dsk-node-agent" + include_tasks: dsk-node-agent.yml + when: '"dsk-node-agent" in datasaker_agents' + +- name: "Check dsk-trace-agent" + include_tasks: dsk-trace-agent.yml + when: '"dsk-trace-agent" in datasaker_agents' + +- name: "Check dsk-log-agent" + include_tasks: dsk-log-agent.yml + when: + - '"dsk-log-agent" in datasaker_agents' + +- name: "Check dsk-postgres-agent" + include_tasks: dsk-postgres-agent.yml + when: '"dsk-postgres-agent" in datasaker_agents' + +- name: "Check dsk-plan-postgres-agent" + include_tasks: dsk-plan-postgres-agent.yml + when: '"dsk-plan-postgres-agent" in datasaker_agents' + +- name: "Check dsk-docker-node-agent" + include_tasks: dsk-docker-node-agent.yml + when: '"dsk-docker-node-agent" in datasaker_docker_agents' + +- name: "Check dsk-docker-elasticsearch-agent" + include_tasks: dsk-docker-elasticsearch-agent.yml + when: '"dsk-docker-elasticsearch-agent" in datasaker_docker_agents' + +- name: "Check dsk-docker-log-agent" + include_tasks: dsk-docker-log-agent.yml + when: '"dsk-docker-log-agent" in datasaker_docker_agents' + +- name: "Check dsk-docker-mongo-agent" + include_tasks: dsk-docker-mongo-agent.yml + when: '"dsk-docker-mongo-agent" in datasaker_docker_agents' + +- name: "Check dsk-docker-mysql-agent" + include_tasks: dsk-docker-mysql-agent.yml + when: '"dsk-docker-mysql-agent" in datasaker_docker_agents' + +- name: "Check dsk-docker-postgres-agent" + include_tasks: dsk-docker-postgres-agent.yml + when: '"dsk-docker-postgres-agent" in datasaker_docker_agents' + +- name: "Check dsk-docker-trace-agent" + include_tasks: dsk-docker-trace-agent.yml + when: '"dsk-docker-trace-agent" in datasaker_docker_agents' \ No newline at end of file diff --git a/ansible/01_old/roles/dsk_bot.datasaker/tasks/dsk-common.yml b/ansible/01_old/roles/dsk_bot.datasaker/tasks/dsk-common.yml new file mode 100644 index 0000000..773a8ee --- /dev/null +++ b/ansible/01_old/roles/dsk_bot.datasaker/tasks/dsk-common.yml @@ -0,0 +1,53 @@ +--- +- name: Make Datasaker Host Agent Directory + ansible.builtin.file: + path: "{{ item }}" + state: directory + recurse: yes + owner: root + group: root + with_items: + - "{{ datasaker_host_path }}" + when: + - ansible_facts.os_family in ["Debian", "RedHat"] + - datasaker_agents + +- name: "create Host Agent global-config" + ansible.builtin.file: + path: "{{ datasaker_host_path }}/global-config.yml" + state: touch + when: + - ansible_facts.os_family in ["Debian", "RedHat"] + - datasaker_agents + +- name: "Setting Host Agent global-config" + template: + src: global-config.yml.j2 + dest: "{{ datasaker_host_path }}/global-config.yml" + when: + - ansible_facts.os_family in ["Debian", "RedHat"] + - datasaker_agents + +- name: "Make Datasaker Docker Agent Directory" + ansible.builtin.file: + path: "{{ item }}" + state: directory + recurse: yes + with_items: + - "{{ datasaker_docker_config_path }}" + when: + - datasaker_docker_agents + +- name: "Create Docker Agent global-config" + ansible.builtin.file: + path: "{{ datasaker_docker_global_config }}" + state: touch + when: + - datasaker_docker_agents + +- name: "Setting Docker Agent global-config" + template: + src: global-config.yml.j2 + dest: "{{ datasaker_docker_global_config }}" + when: + - datasaker_docker_agents diff --git a/ansible/01_old/roles/dsk_bot.datasaker/tasks/dsk-debian-pkg.yml b/ansible/01_old/roles/dsk_bot.datasaker/tasks/dsk-debian-pkg.yml new file mode 100644 index 0000000..a70a13c --- /dev/null +++ b/ansible/01_old/roles/dsk_bot.datasaker/tasks/dsk-debian-pkg.yml @@ -0,0 +1,102 @@ +--- +# - set_fact: +# datasaker_agents: "{{ datasaker_agents | reject('equalto', 'dsk-log-agent') | list }}" +# when: ansible_facts.distribution_version != "18.04" +- name: Set os-specific variables + set_fact: + os_specific: "{% if ansible_facts['os_family'] == 'RedHat' and ansible_facts['distribution_major_version'] == '7' %}datasaker-redhat-7 + {% elif ansible_facts['os_family'] == 'RedHat' and ansible_facts['distribution_major_version'] == '8' %}datasaker-redhat-8 + {% elif ansible_facts['os_family'] == 'Amazon' and ansible_facts['distribution_major_version'] == '2' %}datasaker-amazonlinux-2 + {% elif ansible_facts['os_family'] == 'Debian'%}debian-repos + {% else %}unknown{% endif %}" + +- name: "Create temporary directory for key manipulation" + tempfile: + state: directory + suffix: keys + register: tempdir + +- name: "download keyring then add key to keyring" + get_url: + url: "{{ dsk_public_gpg_key }}" + dest: "{{ tempdir.path }}/datasaker.gpg.key" + force: yes + +- name: "Create {{ apt_usr_share_keyring }} if it doesn't exist" + file: + path: "{{ apt_usr_share_keyring }}" + state: touch + mode: "0644" + changed_when: False + +- name: "Ensure downloaded file for binary keyring" + shell: "cat {{ tempdir.path }}/datasaker.gpg.key | sudo gpg --import --batch --no-default-keyring --keyring {{ apt_usr_share_keyring }}" + +- name: "copy keyring to trusted keyring" + copy: + src: "{{ apt_usr_share_keyring }}" + dest: "{{ apt_trusted_d_keyring }}" + mode: "0644" + remote_src: yes + +- name: "Remove temporary directory for key manipulation" + file: + path: "{{ tempdir.path }}" + state: absent + +- name: "Add datasaker repository" + apt_repository: + repo: "deb [signed-by={{ apt_usr_share_keyring }}] https://nexus.exem-oss.org/repository/debian-repos/ ubuntu main" + state: present + filename: datasaker + +- name: "Check datasaker Agent" + include_tasks: check-agent.yml + +- name: "Install metric sidecar" + apt: + name: "dsk-metric-sidecar" + state: present + update_cache: yes + force: yes + +- name: "Make Datasaker Log Directory" + ansible.builtin.file: + path: "{{ datasaker_host_log_path }}/{{ item }}" + state: directory + recurse: yes + owner: root + group: root + with_items: + - "{{ datasaker_agents }}" + +- name: "Install datasaker agent" + apt: + name: "{{ item }}" + state: latest + update_cache: yes + with_items: + - "{{ datasaker_agents }}" + ignore_errors: true + +- name: "Install datasaker log agent" + apt: + name: "td-agent" + state: latest + update_cache: yes + when: + - '"dsk-log-agent" in datasaker_agents' + - log_agent_image_tag != "latest" + +- name: "Reload systemd configuration" + ansible.builtin.systemd: + daemon_reload: True + +- name: "Restart dsk-agent service" + ansible.builtin.systemd: + name: "{{ item }}" + enabled: true + state: restarted + with_items: + - "{{ datasaker_agents }}" + ignore_errors: true \ No newline at end of file diff --git a/ansible/01_old/roles/dsk_bot.datasaker/tasks/dsk-docker-log-agent.yml b/ansible/01_old/roles/dsk_bot.datasaker/tasks/dsk-docker-log-agent.yml new file mode 100644 index 0000000..e80af9b --- /dev/null +++ b/ansible/01_old/roles/dsk_bot.datasaker/tasks/dsk-docker-log-agent.yml @@ -0,0 +1,53 @@ +--- +- name: "Create datasaker local directory" + ansible.builtin.file: + path: "{{ item }}" + state: directory + owner: "{{datasaker_docker_user}}" + group: "{{datasaker_docker_group}}" + mode: '0755' + with_items: + - "{{ datasaker_docker_path }}/agent/dsk-log-agent" + +- name: "create agent config" + ansible.builtin.file: + path: "{{ datasaker_docker_path }}/dsk-docker-log-agent-config.yml" + state: touch + +- name: "Setting dsk-log-agent config" + template: + src: docker-log-agent-config.yml.j2 + dest: "{{ datasaker_docker_path }}/dsk-docker-log-agent-config.yml" + +- name: "Run Datasaker Log Agent" + docker_container: + name: dsk-docker-log-agent + image: "datasaker/dsk-log-agent:{{ log_agent_image_tag }}" + state: started + restart_policy: "{{ log_agent_restart_policy }}" + ports: + - "{{ log_agent_port }}:{{ log_agent_port }}" + env: + DKS_LOG_LEVEL: "{{ log_agent_log_level }}" + command: "-global.config=/etc/datasaker/global-config.yml -agent.config=/etc/datasaker/dsk-log-agent/agent-config.yml -mount.volume=true" + volumes: "{{ combined_volumes }}" + vars: + base_volumes: + - "{{ datasaker_docker_path }}:/var/datasaker/" + - "{{ datasaker_docker_global_config }}:/etc/datasaker/global-config.yml:ro" + - "{{ datasaker_docker_path }}/dsk-docker-log-agent-config.yml:/etc/datasaker/dsk-log-agent/agent-config.yml:ro" + custom_volumes: "{{ custom_log_volume | default([docker_default_path]) | map('regex_replace', '^(.*)$', '\\1:\\1:ro') | list }}" + combined_volumes: "{{ base_volumes + custom_volumes }}" + +- name: "Run APP with fluentd logging" + docker_container: + name: "{{ app_name }}" + image: "{{ app_image }}" + state: started + log_options: + log-driver: fluentd + log-opt: + fluentd-address: "{{ log_agent_port }}:{{ log_agent_port }}" + when: + - app_name is defined + - app_image is defined diff --git a/ansible/01_old/roles/dsk_bot.datasaker/tasks/dsk-docker-node-agent.yml b/ansible/01_old/roles/dsk_bot.datasaker/tasks/dsk-docker-node-agent.yml new file mode 100644 index 0000000..ba277bf --- /dev/null +++ b/ansible/01_old/roles/dsk_bot.datasaker/tasks/dsk-docker-node-agent.yml @@ -0,0 +1,68 @@ +--- +- name: "Create datasaker group" + ansible.builtin.group: + name: "{{datasaker_docker_group}}" + gid: "{{datasaker_docker_user_gid}}" + state: present + +- name: "Create datasaker user" + block: + - ansible.builtin.user: + name: "{{datasaker_docker_user}}" + uid: "{{datasaker_docker_user_uid}}" + group: "{{datasaker_docker_group}}" + shell: /usr/sbin/nologin + system: yes + state: present + rescue: + - ansible.builtin.shell: "sudo useradd -r -u {{datasaker_docker_user_uid}} -g {{datasaker_docker_group}} -s /usr/sbin/nologin {{datasaker_docker_user}}" + ignore_errors: true + +- name: "Create datasaker local directory" + ansible.builtin.file: + path: "{{ item }}" + state: directory + owner: "{{datasaker_docker_user}}" + group: "{{datasaker_docker_group}}" + mode: '0755' + with_items: + - "{{ datasaker_docker_path }}/agent/dsk-container-agent" + - "{{ datasaker_docker_path }}/agent/dsk-node-agent" + - "{{ datasaker_docker_path }}/log" + +- name: "Run Datasaker Container Agent" + ansible.builtin.docker_container: + name: dsk-docker-container-agent + image: "datasaker/dsk-container-agent:{{ container_agent_image_tag }}" + state: started + restart_policy: "{{ container_agent_restart_policy }}" + privileged: yes + env: + DSK_CLUSTER_ID: "{{ VAR_CLUSTER_ID }}" + GOMAXPROCS: '1' + DSK_LOG_LEVEL: "{{ container_agent_log_level }}" + volumes: + - "{{ datasaker_docker_path }}:/var/datasaker/" + - "/:/rootfs/:ro" + - "/var/run/:/var/run/:ro" + - "/sys/:/sys/:ro" + - "/dev/disk/:/dev/disk/:ro" + - "{{ datasaker_docker_global_config }}:/etc/datasaker/global-config.yml:ro" + +- name: "Run Datasaker Node Agent" + ansible.builtin.docker_container: + name: dsk-docker-node-agent + image: "datasaker/dsk-node-agent:{{ node_agent_image_tag }}" + state: started + restart_policy: "{{ node_agent_restart_policy }}" + privileged: yes + network_mode: host + pid_mode: host + env: + DSK_CLUSTER_ID: "{{ VAR_CLUSTER_ID }}" + DSK_LOG_LEVEL: "{{ node_agent_log_level }}" + volumes: + - "{{ datasaker_docker_path }}:/var/datasaker/" + - "/proc/:/host/proc/:ro" + - "/sys/:/host/sys/:ro" + - "{{ datasaker_docker_global_config }}:/etc/datasaker/global-config.yml:ro" \ No newline at end of file diff --git a/ansible/01_old/roles/dsk_bot.datasaker/tasks/dsk-docker-postgres-agent.yml b/ansible/01_old/roles/dsk_bot.datasaker/tasks/dsk-docker-postgres-agent.yml new file mode 100644 index 0000000..9ae1589 --- /dev/null +++ b/ansible/01_old/roles/dsk_bot.datasaker/tasks/dsk-docker-postgres-agent.yml @@ -0,0 +1,62 @@ +--- +- name: "Create datasaker local directory" + ansible.builtin.file: + path: "{{ item }}" + state: directory + owner: "{{datasaker_docker_user}}" + group: "{{datasaker_docker_group}}" + mode: '0755' + with_items: + - "{{ datasaker_docker_path }}/agent/dsk-postgres-agent" + - "{{ datasaker_docker_path }}/agent/dsk-plan-postgres-agent" + +- name: "Create agent config" + ansible.builtin.file: + path: "{{ datasaker_docker_path }}/dsk-docker-postgres-config.yml" + state: touch + +- name: "Setting dsk-postgres-agent config" + template: + src: docker-postgres-agent-config.yml.j2 + dest: "{{ datasaker_docker_path }}/dsk-docker-postgres-config.yml" + +- name: "Create agent config" + ansible.builtin.file: + path: "{{ datasaker_docker_path }}/dsk-docker-plan-postgres-config.yml" + state: touch + +- name: "Setting plan-dsk-postgres-agent config" + template: + src: docker-plan-postgres-agent-config.yml.j2 + dest: "{{ datasaker_docker_path }}/dsk-docker-plan-postgres-config.yml" + +- name: "Run dsk-postgres-agent container" + docker_container: + name: dsk-docker-postgres-agent + image: "datasaker/dsk-postgres-agent:{{ postgres_agent_image_tag }}" + state: started + restart_policy: "{{ postgres_agent_restart_policy }}" + detach: true + env: + DKS_LOG_LEVEL: "info" + DATA_SOURCE_USER: "{{ postgres_user_name | default('') }}" + DATA_SOURCE_PASS: "{{ postgres_user_password | default('') }}" + DATA_SOURCE_URI: "{{ postgres_database_address | default('') }}:{{ postgres_database_port | default('') }}?sslmode=disable" + volumes: + - "{{ datasaker_docker_path }}:/var/datasaker/" + - "{{ datasaker_docker_global_config }}:/etc/datasaker/global-config.yml:ro" + - "{{ datasaker_docker_path }}/dsk-docker-postgres-config.yml:/etc/datasaker/dsk-postgres-agent/agent-config.yml:ro" + +- name: "Run dsk-plan-postgres-agent container" + docker_container: + name: dsk-docker-plan-postgres-agent + image: "datasaker/dsk-plan-postgres-agent:{{ plan_postgres_agent_image_tag }}" + state: started + restart_policy: "{{ plan_postgres_agent_restart_policy }}" + detach: true + env: + DKS_LOG_LEVEL: "info" + volumes: + - "{{ datasaker_docker_path }}:/var/datasaker/" + - "{{ datasaker_docker_global_config }}:/etc/datasaker/global-config.yml:ro" + - "{{ datasaker_docker_path }}/dsk-docker-plan-postgres-config.yml:/etc/datasaker/dsk-plan-postgres-agent/agent-config.yml:ro" \ No newline at end of file diff --git a/ansible/01_old/roles/dsk_bot.datasaker/tasks/dsk-docker-trace-agent.yml b/ansible/01_old/roles/dsk_bot.datasaker/tasks/dsk-docker-trace-agent.yml new file mode 100644 index 0000000..76fa4a3 --- /dev/null +++ b/ansible/01_old/roles/dsk_bot.datasaker/tasks/dsk-docker-trace-agent.yml @@ -0,0 +1,37 @@ +--- +- name: "Create datasaker local directory" + ansible.builtin.file: + path: "{{ item }}" + state: directory + owner: "{{datasaker_docker_user}}" + group: "{{datasaker_docker_group}}" + mode: '0755' + with_items: + - "{{ datasaker_docker_path }}/agent/dsk-trace-agent" + +- name: "Create agent config" + ansible.builtin.file: + path: "{{ datasaker_docker_path }}/dsk-docker-trace-agent-config.yml" + state: touch + +- name: "Setting dsk-trace-agent config" + template: + src: trace-agent-config.yml.j2 + dest: "{{ datasaker_docker_path }}/dsk-docker-trace-agent-config.yml" + +- name: "Run Datasaker trace Agent" + ansible.builtin.docker_container: + name: dsk-docker-trace-agent + image: "datasaker/dsk-trace-agent:{{ trace_agent_image_tag }}" + state: started + restart_policy: "{{ trace_agent_restart_policy }}" + detach: yes + published_ports: + - 4317:4317/tcp + - 4318:4318/tcp + env: + DKS_LOG_LEVEL: "{{ trace_agent_log_level }}" + volumes: + - "{{ datasaker_docker_path }}:/var/datasaker/" + - "{{ datasaker_docker_global_config }}:/etc/datasaker/global-config.yml:ro" + - "{{ datasaker_docker_path }}/dsk-docker-trace-agent-config.yml:/etc/datasaker/dsk-trace-agent/agent-config.yml:ro" \ No newline at end of file diff --git a/ansible/01_old/roles/dsk_bot.datasaker/tasks/dsk-log-agent.yml b/ansible/01_old/roles/dsk_bot.datasaker/tasks/dsk-log-agent.yml new file mode 100644 index 0000000..74527a0 --- /dev/null +++ b/ansible/01_old/roles/dsk_bot.datasaker/tasks/dsk-log-agent.yml @@ -0,0 +1,65 @@ +--- +- name: "Make agent Directory" + ansible.builtin.file: + path: "/etc/datasaker/{{ item }}" + state: directory + recurse: yes + owner: root + group: root + with_items: + - "dsk-log-agent" + +- name: "Create agent config" + ansible.builtin.file: + path: "{{ datasaker_host_path }}/dsk-log-agent/agent-config.yml" + state: touch + +- name: "Setting dsk-log-agent config" + template: + src: log-agent-config.yml.j2 + dest: "{{ datasaker_host_path }}/dsk-log-agent/agent-config.yml" + +- name: "Install fluent-bit" + block: + - name: "Install fluent-bit" + shell: curl https://raw.githubusercontent.com/fluent/fluent-bit/master/install.sh | sh + rescue: + - name: "Copy RPM files" + copy: + src: "{{ item }}" + dest: "/tmp/" + with_items: + - "{{ role_path }}/files/libpq-13.5-1.el8.x86_64.rpm" + - "{{ role_path }}/files/libpq-devel-13.5-1.el8.x86_64.rpm" + when: + - ansible_facts['os_family'] == 'RedHat' + - ansible_facts['distribution_major_version'] == '8' + - name: "Install libpq-13.5-1" + command: rpm -ivh /tmp/{{ item }} + with_items: + - libpq-13.5-1.el8.x86_64.rpm + - libpq-devel-13.5-1.el8.x86_64.rpm + when: + - ansible_facts['os_family'] == 'RedHat' + - ansible_facts['distribution_major_version'] == '8' + - template: + src: fluent-bit-repo.yml.j2 + dest: "/etc/yum.repos.d/fluent-bit.repo" + when: + - ansible_facts['os_family'] == 'RedHat' + - ansible_facts['distribution_major_version'] == '7' + - yum: + name: "{{ item }}" + state: present + update_cache: yes + with_items: + - fluent-bit + - file: + path: /tmp/{{ item }} + state: absent + with_items: + - libpq-13.5-1.el8.x86_64.rpm + - libpq-devel-13.5-1.el8.x86_64.rpm + when: + - ansible_facts['os_family'] == 'RedHat' + - ansible_facts['distribution_major_version'] == '8' \ No newline at end of file diff --git a/ansible/01_old/roles/dsk_bot.datasaker/tasks/dsk-node-agent.yml b/ansible/01_old/roles/dsk_bot.datasaker/tasks/dsk-node-agent.yml new file mode 100644 index 0000000..3cd4c56 --- /dev/null +++ b/ansible/01_old/roles/dsk_bot.datasaker/tasks/dsk-node-agent.yml @@ -0,0 +1,20 @@ +--- +- name: "Make agent Directory" + ansible.builtin.file: + path: "{{ datasaker_host_path }}/{{ item }}" + state: directory + recurse: yes + owner: root + group: root + with_items: + - "dsk-node-agent" + +- name: "Create agent config" + ansible.builtin.file: + path: "{{ datasaker_host_path }}/dsk-node-agent/agent-config.yml" + state: touch + +- name: "Setting dsk-node-agent config" + template: + src: node-agent-config.yml.j2 + dest: "{{ datasaker_host_path }}/dsk-node-agent/agent-config.yml" \ No newline at end of file diff --git a/ansible/01_old/roles/dsk_bot.datasaker/tasks/dsk-plan-postgres-agent.yml b/ansible/01_old/roles/dsk_bot.datasaker/tasks/dsk-plan-postgres-agent.yml new file mode 100644 index 0000000..de2e08f --- /dev/null +++ b/ansible/01_old/roles/dsk_bot.datasaker/tasks/dsk-plan-postgres-agent.yml @@ -0,0 +1,20 @@ +--- +- name: "Make agent Directory" + ansible.builtin.file: + path: "{{ datasaker_host_path }}/{{ item }}" + state: directory + recurse: yes + owner: root + group: root + with_items: + - "dsk-plan-postgres-agent" + +- name: "Create agent config" + ansible.builtin.file: + path: "{{ datasaker_host_path }}/dsk-plan-postgres-agent/agent-config.yml" + state: touch + +- name: "Setting dsk-plan-postgres-agent config" + template: + src: plan-postgres-agent-config.yml.j2 + dest: "{{ datasaker_host_path }}/dsk-plan-postgres-agent/agent-config.yml" \ No newline at end of file diff --git a/ansible/01_old/roles/dsk_bot.datasaker/tasks/dsk-postgres-agent.yml b/ansible/01_old/roles/dsk_bot.datasaker/tasks/dsk-postgres-agent.yml new file mode 100644 index 0000000..e187ded --- /dev/null +++ b/ansible/01_old/roles/dsk_bot.datasaker/tasks/dsk-postgres-agent.yml @@ -0,0 +1,20 @@ +--- +- name: "Make agent Directory" + ansible.builtin.file: + path: "{{ datasaker_host_path }}/{{ item }}" + state: directory + recurse: yes + owner: root + group: root + with_items: + - "dsk-postgres-agent" + +- name: "Create agent config" + ansible.builtin.file: + path: "{{ datasaker_host_path }}/dsk-postgres-agent/agent-config.yml" + state: touch + +- name: "Setting dsk-postgres-agent config" + template: + src: postgres-agent-config.yml.j2 + dest: "{{ datasaker_host_path }}/dsk-postgres-agent/agent-config.yml" \ No newline at end of file diff --git a/ansible/01_old/roles/dsk_bot.datasaker/tasks/dsk-redhat-pkg.yml b/ansible/01_old/roles/dsk_bot.datasaker/tasks/dsk-redhat-pkg.yml new file mode 100644 index 0000000..08d2a64 --- /dev/null +++ b/ansible/01_old/roles/dsk_bot.datasaker/tasks/dsk-redhat-pkg.yml @@ -0,0 +1,61 @@ +--- +- name: Set os-specific variables + set_fact: + os_specific: "{% if ansible_facts['os_family'] == 'RedHat' and ansible_facts['distribution_major_version'] == '7' %}datasaker-redhat-7 + {% elif ansible_facts['os_family'] == 'RedHat' and ansible_facts['distribution_major_version'] == '8' %}datasaker-redhat-8 + {% elif ansible_facts['os_family'] == 'RedHat' and ansible_facts['distribution_major_version'] == '9' %}datasaker-redhat-9 + {% elif ansible_facts['os_family'] == 'Amazon' and ansible_facts['distribution_major_version'] == '2' %}datasaker-amazonlinux-2 + {% else %}unknown{% endif %}" + +- name: "Add datasaker repository" + yum_repository: + name: datasaker + description: datasaker-repo + baseurl: "{{ datasaker_yum_repo }}{{ os_specific }}" + enabled: "{{ datasaker_yum_enabled }}" + gpgcheck: "{{ datasaker_yum_gpgcheck }}" + +# - set_fact: +# datasaker_agents: "{{ datasaker_agents | reject('equalto', 'dsk-log-agent') | list }}" + +- name: "Check datasaker Agent" + include_tasks: check-agent.yml + +- name: Make Datasaker Log Directory + ansible.builtin.file: + path: "{{ datasaker_host_log_path }}/{{ item }}" + state: directory + recurse: yes + owner: root + group: root + with_items: + - "{{ datasaker_agents }}" + +- name: "Install datasaker log agent" + apt: + name: "td-agent" + state: latest + update_cache: yes + when: + - '"dsk-log-agent" in datasaker_agents' + - log_agent_image_tag != "latest" + +- name: "Install datasaker agent" + yum: + name: "{{ item }}" + state: present + update_cache: yes + with_items: + - "{{ datasaker_agents }}" + notify: + - Reload systemd configuration + - Restart {{ item }} service + ignore_errors: true + +- name: "Setting dsk-log-agent config" + template: + src: log-agent-config.yml.j2 + dest: "{{ datasaker_host_path }}/dsk-log-agent/agent-config.yml" + when: + - '"dsk-log-agent" in datasaker_agents' + - log_agent_image_tag == "latest" \ No newline at end of file diff --git a/ansible/01_old/roles/dsk_bot.datasaker/tasks/dsk-trace-agent.yml b/ansible/01_old/roles/dsk_bot.datasaker/tasks/dsk-trace-agent.yml new file mode 100644 index 0000000..af6b8ac --- /dev/null +++ b/ansible/01_old/roles/dsk_bot.datasaker/tasks/dsk-trace-agent.yml @@ -0,0 +1,20 @@ +--- +- name: "Make agent Directory" + ansible.builtin.file: + path: "{{ datasaker_host_path }}/{{ item }}" + state: directory + recurse: yes + owner: root + group: root + with_items: + - "dsk-trace-agent" + +- name: "Create agent config" + ansible.builtin.file: + path: "{{ datasaker_host_path }}/dsk-trace-agent/agent-config.yml" + state: touch + +- name: "Setting dsk-trace-agent config" + template: + src: trace-agent-config.yml.j2 + dest: "{{ datasaker_host_path }}/dsk-trace-agent/agent-config.yml" \ No newline at end of file diff --git a/ansible/01_old/roles/dsk_bot.datasaker/tasks/gather-facts.yml b/ansible/01_old/roles/dsk_bot.datasaker/tasks/gather-facts.yml new file mode 100644 index 0000000..8e413e4 --- /dev/null +++ b/ansible/01_old/roles/dsk_bot.datasaker/tasks/gather-facts.yml @@ -0,0 +1,3 @@ +--- +- name: Gather Ansible Facts + ansible.builtin.setup: \ No newline at end of file diff --git a/ansible/01_old/roles/dsk_bot.datasaker/tasks/main.yml b/ansible/01_old/roles/dsk_bot.datasaker/tasks/main.yml new file mode 100644 index 0000000..59d6ae3 --- /dev/null +++ b/ansible/01_old/roles/dsk_bot.datasaker/tasks/main.yml @@ -0,0 +1,47 @@ +--- +- name: "Include Gather Ansible Facts task on Ansible >= 2.10" + include_tasks: gather-facts.yml + when: + - ansible_version.major >= 2 + - ansible_version.minor >= 10 + +- name: "Check Datasaker Agent List" + set_fact: + datasaker_agents: [] + when: datasaker_docker_agents | select('search', 'docker') | list | count > 0 + +- name: "Include Datasaker Add Directory Config" + include_tasks: dsk-common.yml + when: + - uninstall == False + +- name: "Include Datasaker Debian Host Agent Install" + include_tasks: dsk-debian-pkg.yml + when: + - uninstall == False + - ansible_facts.os_family == "Debian" + - datasaker_agents + +- name: "Include Datasaker RedHat Host Agent Install" + include_tasks: dsk-redhat-pkg.yml + when: + - uninstall == False + - ansible_facts.os_family == "RedHat" + - datasaker_agents + +- name: "Include Datasaker Agent Install" + include_tasks: check-agent.yml + when: + - uninstall == False + - datasaker_docker_agents + +- name: "Include Docker Agent Path Permissions Setting" + include_tasks: permissions.yml + when: + - uninstall == False + - datasaker_docker_agents + +- name: "Uninstall Datasaker Agent" + include_tasks: remove-datasaker.yml + when: + - uninstall == True \ No newline at end of file diff --git a/ansible/01_old/roles/dsk_bot.datasaker/tasks/permissions.yml b/ansible/01_old/roles/dsk_bot.datasaker/tasks/permissions.yml new file mode 100644 index 0000000..1b912a4 --- /dev/null +++ b/ansible/01_old/roles/dsk_bot.datasaker/tasks/permissions.yml @@ -0,0 +1,6 @@ +- name: "Change owner and group of datasaker docker directory" + ansible.builtin.file: + path: "{{ datasaker_docker_path }}" + owner: "{{ datasaker_docker_user }}" + group: "{{ datasaker_docker_group }}" + recurse: yes \ No newline at end of file diff --git a/ansible/01_old/roles/dsk_bot.datasaker/tasks/remove-datasaker.yml b/ansible/01_old/roles/dsk_bot.datasaker/tasks/remove-datasaker.yml new file mode 100644 index 0000000..7d8d4d7 --- /dev/null +++ b/ansible/01_old/roles/dsk_bot.datasaker/tasks/remove-datasaker.yml @@ -0,0 +1,135 @@ +--- +- name: Stop agent service + ansible.builtin.systemd: + name: "{{ item }}" + enabled: false + state: stopped + with_items: + - "{{ datasaker_agents }}" + when: + - datasaker_agents + ignore_errors: true + +- name: "Uninstall datasaker agent" + apt: + name: "{{ item }}" + state: absent + update_cache: yes + with_items: + - "{{ datasaker_agents }}" + when: + - ansible_facts.os_family == "Debian" + - datasaker_agents + notify: + - Reload systemd configuration + ignore_errors: true + +- name: "Uninstall datasaker agent" + yum: + name: "{{ item }}" + state: absent + with_items: + - "{{ datasaker_agents }}" + when: + - ansible_facts.os_family == "RedHat" + - datasaker_agents + notify: + - Reload systemd configuration + ignore_errors: true + +- name: Remove Datasaker Log Directory + ansible.builtin.file: + path: "{{ datasaker_host_log_path }}/{{ item }}" + state: absent + with_items: + - "{{ datasaker_agents }}" + when: + - datasaker_agents + +- name: Remove Datasaker Agent Directory + ansible.builtin.file: + path: "{{ datasaker_host_path }}/{{ item }}" + state: absent + with_items: + - "{{ datasaker_agents }}" + when: + - datasaker_agents + +- name: Remove Datasaker Host Directory + ansible.builtin.file: + path: "{{ item }}" + state: absent + with_items: + - "{{ datasaker_host_path }}" + - "{{ datasaker_host_log_path }}" + - "{{ apt_sources_list }}" + - "{{ apt_trusted_d_keyring }}" + - "{{ apt_usr_share_keyring }}" + when: + - datasaker_clean == True + - datasaker_agents + +- name: Stop the Docker container + docker_container: + name: "{{ item }}" + state: absent + with_items: + - "{{ datasaker_docker_agents }}" + when: + - datasaker_docker_agents + ignore_errors: true + +- name: Stop the Docker container-agent container + docker_container: + name: "dsk-docker-container-agent" + state: absent + when: + - '"dsk-docker-node-agent" in datasaker_docker_agents' + ignore_errors: true + +- name: Stop the Docker plan-postgres-agent container + docker_container: + name: "dsk-docker-plan-postgres-agent" + state: absent + when: + - '"dsk-docker-postgres-agent" in datasaker_docker_agents' + ignore_errors: true + +- name: Remove Datasaker Docker Agent Directory + ansible.builtin.file: + path: "{{ datasaker_docker_path }}/{{ item }}" + state: absent + with_items: + - "{{ datasaker_docker_agents }}" + when: + - datasaker_docker_agents + +- name: "Remove datasaker_docker_user" + ansible.builtin.user: + name: "{{datasaker_docker_user}}" + state: absent + when: + - datasaker_clean == True + - datasaker_docker_agents + ignore_errors: yes + +- name: "Remove datasaker_docker_group" + ansible.builtin.group: + name: "{{datasaker_docker_group}}" + state: absent + when: + - datasaker_clean == True + - datasaker_docker_agents + ignore_errors: yes + +- name: "Remove datasaker_docker_directory" + ansible.builtin.file: + name: "{{ item }}" + state: absent + with_items: + - "{{ datasaker_docker_config_path }}" + - "{{ datasaker_docker_path }}" + when: + - datasaker_clean == True + - datasaker_docker_agents + ignore_errors: yes \ No newline at end of file diff --git a/ansible/01_old/roles/dsk_bot.datasaker/templates/docker-log-agent-config.yml.j2 b/ansible/01_old/roles/dsk_bot.datasaker/templates/docker-log-agent-config.yml.j2 new file mode 100644 index 0000000..d502480 --- /dev/null +++ b/ansible/01_old/roles/dsk_bot.datasaker/templates/docker-log-agent-config.yml.j2 @@ -0,0 +1,51 @@ +agent: + metadata: + agent_name: {{ metadata.agent_name | default('dsk-log-agent') }} + cluster_id: {{ metadata.cluster_id | default('unknown') }} + logs: +{% for log in logs | default([{'service': 'default', 'collect': {'type': 'file', 'category': 'etc', 'file': {'paths': ['/var/log/*.log']}}}]) %} + - service: {{ log.service | default('default') }} + tag: +{% for tag in log.tag | default([]) %} + - {{ tag }} +{% endfor %} + keyword: +{% for keyword in log.keyword | default([]) %} + - {{ keyword }} +{% endfor %} + multiline: + format: {{ log.multiline.format | default('') }} + pattern: +{% for pattern in log.multiline.pattern | default([]) %} + - {{ pattern }} +{% endfor %} + masking: +{% for mask in log.masking | default([]) %} + - pattern: {{ mask.pattern }} + replace: {{ mask.replace }} +{% endfor %} + collect: +{% if log.collect.type == "file" %} + type: {{ log.collect.type | default('file') }} + category: {{ log.collect.category | default('etc') }} + address: {{ log.collect.address | default('') }} + file: + paths: +{% for path in log.collect.file.paths %} + - {{ path }} +{% endfor %} + exclude_paths: +{% for exclude_path in log.collect.file.exclude_paths | default([]) %} + - {{ exclude_path }} +{% endfor %} +{% endif %} +{% if log.collect.type == "driver" %} + type: {{ log.collect.type | default('driver') }} + category: {{ log.collect.category | default('etc') }} + driver: + containers: +{% for container in (log.collect.docker_driver.container if 'docker_driver' in log.collect else ['*']) %} + - "{{ container }}" +{% endfor %} +{% endif %} +{% endfor %} \ No newline at end of file diff --git a/ansible/01_old/roles/dsk_bot.datasaker/templates/docker-plan-postgres-agent-config.yml.j2 b/ansible/01_old/roles/dsk_bot.datasaker/templates/docker-plan-postgres-agent-config.yml.j2 new file mode 100644 index 0000000..9311679 --- /dev/null +++ b/ansible/01_old/roles/dsk_bot.datasaker/templates/docker-plan-postgres-agent-config.yml.j2 @@ -0,0 +1,13 @@ +agent: + metadata: + agent_name: "{{ plan_postgres_agent_name | default('dsk-plan-postgres-agent') }}" + data_source_name: + user: "{{ plan_postgres_user_name | default('') }}" + password: "{{ plan_postgres_user_password | default('') }}" + address: "{{ plan_postgres_database_address | default('') }}" + port: "{{ plan_postgres_database_port | default('') }}" + DBName: "{{ plan_postgres_database_name | default('') }}" + explain: + scrape_interval: {{ plan_postgres_scrape_interval | default('30s') }} + scrape_timeout: {{ plan_postgres_scrape_timeout | default('5s') }} + slow_query_standard: {{ plan_postgres_slow_query_standard | default('5s') }} \ No newline at end of file diff --git a/ansible/01_old/roles/dsk_bot.datasaker/templates/docker-postgres-agent-config.yml.j2 b/ansible/01_old/roles/dsk_bot.datasaker/templates/docker-postgres-agent-config.yml.j2 new file mode 100644 index 0000000..7e4e166 --- /dev/null +++ b/ansible/01_old/roles/dsk_bot.datasaker/templates/docker-postgres-agent-config.yml.j2 @@ -0,0 +1,20 @@ +agent: + metadata: + agent_name: "{{ docker_postgres_agent_name | default('dsk-docker-postgres-agent') }}" + option: + exporter_config: + command: "/etc/datasaker/target-exporter" + port: 9187 + scrape_configs: + - job_name: dsk-postgres-agent + metrics_path: /metrics/short + url: localhost:9187 + filtering_configs: + rule: drop + - job_name: dsk-postgres-agent-long + scrape_interval: 60s + scrape_timeout: 10s + metrics_path: /metrics/long + url: localhost:9187 + filtering_configs: + rule: drop diff --git a/ansible/01_old/roles/dsk_bot.datasaker/templates/fluent-bit-repo.yml.j2 b/ansible/01_old/roles/dsk_bot.datasaker/templates/fluent-bit-repo.yml.j2 new file mode 100644 index 0000000..0d0945d --- /dev/null +++ b/ansible/01_old/roles/dsk_bot.datasaker/templates/fluent-bit-repo.yml.j2 @@ -0,0 +1,7 @@ +[fluent-bit] +name = Fluent Bit +baseurl = https://packages.fluentbit.io/centos/7/$basearch/ +gpgcheck=1 +gpgkey=https://packages.fluentbit.io/fluentbit.key +repo_gpgcheck=1 +enabled=1 \ No newline at end of file diff --git a/ansible/01_old/roles/dsk_bot.datasaker/templates/global-config.yml.j2 b/ansible/01_old/roles/dsk_bot.datasaker/templates/global-config.yml.j2 new file mode 100644 index 0000000..646b5b9 --- /dev/null +++ b/ansible/01_old/roles/dsk_bot.datasaker/templates/global-config.yml.j2 @@ -0,0 +1,22 @@ +global: + api_key: {{ datasaker_api_key }} + gates: + trace_datagate: + url: {{ datagate_trace_url }}:{{ datagate_trace_port }} + remote_timeout: {{ datagate_trace_timeout }} + manifest_datagate: + url: {{ datagate_manifest_url }}:{{ datagate_manifest_port }} + remote_timeout: {{ datagate_manifest_timeout }} + metric_datagate: + url: {{ datagate_metric_url }}:{{ datagate_metric_port }} + remote_timeout: {{ datagate_metric_timeout }} + plan_datagate: + url: {{ datagate_plan_url }}:{{ datagate_plan_port }} + remote_timeout: {{ datagate_plan_timeout }} + loggate: + url: {{ datagate_loggate_url }}:{{ datagate_loggate_port }} + remote_timeout: {{ datagate_loggate_timeout }} + agent_manager: + url: {{ datasaker_api_url }} + base_url: /dsk-agentmanager-api/agent + send_interval: {{ datasaker_api_send_interval }} \ No newline at end of file diff --git a/ansible/01_old/roles/dsk_bot.datasaker/templates/log-agent-config.yml.j2 b/ansible/01_old/roles/dsk_bot.datasaker/templates/log-agent-config.yml.j2 new file mode 100644 index 0000000..a0e8269 --- /dev/null +++ b/ansible/01_old/roles/dsk_bot.datasaker/templates/log-agent-config.yml.j2 @@ -0,0 +1,42 @@ +agent: + metadata: + agent_name: {{ metadata.agent_name | default('dsk-log-agent') }} + cluster_id: {{ metadata.cluster_id | default('unknown') }} + logs: +{% for log in logs | default([{'service': 'default', 'collect': {'type': 'file', 'category': 'etc', 'file': {'paths': ['/var/log/*.log']}}}]) %} + - service: {{ log.service | default('default') }} + tag: +{% for tag in log.tag | default([]) %} + - {{ tag }} +{% endfor %} + keyword: +{% for keyword in log.keyword | default([]) %} + - {{ keyword }} +{% endfor %} + multiline: + format: {{ log.multiline.format | default('') }} + pattern: +{% for pattern in log.multiline.pattern | default([]) %} + - {{ pattern }} +{% endfor %} + masking: +{% for mask in log.masking | default([]) %} + - pattern: {{ mask.pattern }} + replace: {{ mask.replace }} +{% endfor %} + collect: +{% if log.collect.type == "file" %} + type: {{ log.collect.type | default('file') }} + category: {{ log.collect.category | default('etc') }} + address: {{ log.collect.address | default('') }} + file: + paths: +{% for path in log.collect.file.paths %} + - {{ path }} +{% endfor %} + exclude_paths: +{% for exclude_path in log.collect.file.exclude_paths | default([]) %} + - {{ exclude_path }} +{% endfor %} +{% endif %} +{% endfor %} \ No newline at end of file diff --git a/ansible/01_old/roles/dsk_bot.datasaker/templates/log-agent-config.yml.j2_bak b/ansible/01_old/roles/dsk_bot.datasaker/templates/log-agent-config.yml.j2_bak new file mode 100644 index 0000000..cc55c92 --- /dev/null +++ b/ansible/01_old/roles/dsk_bot.datasaker/templates/log-agent-config.yml.j2_bak @@ -0,0 +1,37 @@ +agent: + metadata: + agent_name: "{{ log_agent_name | default('dsk-log-agent') }}" + cluster_id: "{{ log_agent_cluster_id | default('unknown') }}" + environment: "{{ log_agent_environment | default('etc') }}" + collect: + - paths: + {% if paths is defined and paths | length > 0 %} + {% for path in paths %} + - "{{ path | default('') }}" + {% endfor %} + {% else %} + - /var/log/*/*.log + {% endif %} + + {% if exclude_paths is defined and exclude_paths | length > 0 %} + exclude_paths: + {% for exclude_path in exclude_paths %} + - "{{ exclude_path | default('') }}" + {% endfor %} + {% else %} + exclude_paths: [] + {% endif %} + + {% if keywords is defined and keywords | length > 0 %} + keywords: + {% for keyword in keywords %} + - "{{ keyword | default('') }}" + {% endfor %} + {% else %} + keywords: [] + {% endif %} + tag: "{{ log_agent_tag | default('sample') }}" + service: + name: "{{ log_agent_service_name | default('test') }}" + category: "{{ log_agent_service_category | default('etc') }}" + type: "{{ log_agent_service_type | default('etc') }}" \ No newline at end of file diff --git a/ansible/01_old/roles/dsk_bot.datasaker/templates/node-agent-config.yml.j2 b/ansible/01_old/roles/dsk_bot.datasaker/templates/node-agent-config.yml.j2 new file mode 100644 index 0000000..d00e55d --- /dev/null +++ b/ansible/01_old/roles/dsk_bot.datasaker/templates/node-agent-config.yml.j2 @@ -0,0 +1,18 @@ +agent: + metadata: + # agent_name: my-dsk-node-agent + # cluster_id: my-cluster + option: + exporter_config: + command: "dsk-node-exporter" + port: 19110 + args: + - --collector.filesystem.ignored-mount-points="^/(dev|proc|sys|run|var/lib/docker/.+|var/lib/kubelet/pods/.+)($|/)" + - --collector.tcpstat + scrape_interval: 15s + scrape_timeout: 5s + scrape_configs: + - job_name: dsk-node-agent + url: localhost:19110 + filtering_configs: + rule: drop \ No newline at end of file diff --git a/ansible/01_old/roles/dsk_bot.datasaker/templates/plan-postgres-agent-config.yml.j2 b/ansible/01_old/roles/dsk_bot.datasaker/templates/plan-postgres-agent-config.yml.j2 new file mode 100644 index 0000000..29ddb00 --- /dev/null +++ b/ansible/01_old/roles/dsk_bot.datasaker/templates/plan-postgres-agent-config.yml.j2 @@ -0,0 +1,17 @@ +agent: + metadata: + agent_name: "{{ plan_postgres_agent_name | default('dsk-plan-postgres-agent') }}" + data_source_name: + user: "{{ plan_postgres_user_name | default('') }}" + password: "{{ plan_postgres_user_password | default('') }}" + address: "{{ plan_postgres_database_address | default('') }}" + port: "{{ plan_postgres_database_port | default('') }}" + DBName: "{{ plan_postgres_database_name | default('') }}" + explain: + scrape_interval: "{{ plan_postgres_scrape_interval | default('30s') }}" + scrape_timeout: "{{ plan_postgres_scrape_timeout | default('5s') }}" + slow_query_standard: "{{ plan_postgres_slow_query_standard | default('5s') }}" + executor_number: "{{ plan_postgres_executor_number | default('10') }}" + sender_number: "{{ plan_postgres_sender_number | default('10') }}" + activity_query_buffer: "{{ plan_postgres_activity_query_buffer | default('50') }}" + plan_sender_buffer: "{{ plan_postgres_plan_sender_buffer | default('50') }}" diff --git a/ansible/01_old/roles/dsk_bot.datasaker/templates/postgres-agent-config.yml.j2 b/ansible/01_old/roles/dsk_bot.datasaker/templates/postgres-agent-config.yml.j2 new file mode 100644 index 0000000..cd5fb32 --- /dev/null +++ b/ansible/01_old/roles/dsk_bot.datasaker/templates/postgres-agent-config.yml.j2 @@ -0,0 +1,19 @@ +agent: + metadata: + agent_name: "{{ postgres_agent_name | default('dsk-postgres-agent') }}" + option: + exporter_config: + command: "/usr/bin/dsk-postgres-exporter" + port: 19187 + args: + - --extend.query-path=/etc/datasaker/dsk-postgres-agent/queries.yaml + - --data-source-user="{{ postgres_user_name | default('') }}" + - --data-source-pass="{{ postgres_user_password | default('') }}" + - --data-source-uri="{{ postgres_database_address | default('') }}":"{{ postgres_database_port | default('') }}" + scrape_interval: 15s + scrape_timeout: 5s + scrape_configs: + - job_name: dsk-postgres-agent + url: localhost:19187 + filtering_configs: + rule: drop diff --git a/ansible/01_old/roles/dsk_bot.datasaker/templates/trace-agent-config.yml.j2 b/ansible/01_old/roles/dsk_bot.datasaker/templates/trace-agent-config.yml.j2 new file mode 100644 index 0000000..c468258 --- /dev/null +++ b/ansible/01_old/roles/dsk_bot.datasaker/templates/trace-agent-config.yml.j2 @@ -0,0 +1,7 @@ +agent: + metadata: + agent_name: "{{ trace_agent_name | default('trace-agent') }}" + cluster_id: "{{ trace_agent_cluster_id | default('unknown_cluster') }}" + option: + collector_config: + sampling_rate: {{ trace_sampling_rate | default(10) | int }} diff --git a/ansible/01_old/roles/kubernetes_install/README.md b/ansible/01_old/roles/kubernetes_install/README.md new file mode 100644 index 0000000..225dd44 --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/README.md @@ -0,0 +1,38 @@ +Role Name +========= + +A brief description of the role goes here. + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +Role Variables +-------------- + +A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. + +Dependencies +------------ + +A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + + - hosts: servers + roles: + - { role: username.rolename, x: 42 } + +License +------- + +BSD + +Author Information +------------------ + +An optional section for the role authors to include contact information, or a website (HTML is not allowed). diff --git a/ansible/01_old/roles/kubernetes_install/defaults/main.yml b/ansible/01_old/roles/kubernetes_install/defaults/main.yml new file mode 100644 index 0000000..55b8a06 --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/defaults/main.yml @@ -0,0 +1,140 @@ +helm_checksum: sha256:72f1c0fcfb17b41b89087e9232e50f20c606e44a0edc2bb9737e05d1c75b8c4f +helm_version: v3.10.2 + +kubernetes_version: 1.25.2 + +kubernetes_kubelet_extra_args: "" +kubernetes_kubeadm_init_extra_opts: "" +kubernetes_join_command_extra_opts: "" + +kubernetes_pod_network: + cni: 'calico' + cidr: '10.96.0.0/12' + +kubernetes_calico_manifest_file: https://docs.projectcalico.org/manifests/calico.yaml + +kubernetes_metric_server_file: https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml + +containerd_config: + version: 2 + root: /var/lib/containerd + state: /run/containerd + plugin_dir: "" + disabled_plugins: [] + required_plugins: [] + oom_score: 0 + grpc: + address: /run/containerd/containerd.sock + tcp_address: "" + tcp_tls_cert: "" + tcp_tls_key: "" + uid: 0 + gid: 0 + max_recv_message_size: 16777216 + max_send_message_size: 16777216 + ttrpc: + address: "" + uid: 0 + gid: 0 + debug: + address: "" + uid: 0 + gid: 0 + level: "" + metrics: + address: "" + grpc_histogram: false + cgroup: + path: "" + timeouts: + "io.containerd.timeout.shim.cleanup": 5s + "io.containerd.timeout.shim.load": 5s + "io.containerd.timeout.shim.shutdown": 3s + "io.containerd.timeout.task.state": 2s + plugins: + "io.containerd.gc.v1.scheduler": + pause_threshold: 0.02 + deletion_threshold: 0 + mutation_threshold: 100 + schedule_delay: 0s + startup_delay: 100ms + "io.containerd.grpc.v1.cri": + disable_tcp_service: true + stream_server_address: 127.0.0.1 + stream_server_port: "0" + stream_idle_timeout: 4h0m0s + enable_selinux: false + sandbox_image: k8s.gcr.io/pause:3.1 + stats_collect_period: 10 + systemd_cgroup: false + enable_tls_streaming: false + max_container_log_line_size: 16384 + disable_cgroup: false + disable_apparmor: false + restrict_oom_score_adj: false + max_concurrent_downloads: 3 + disable_proc_mount: false + containerd: + snapshotter: overlayfs + default_runtime_name: runc + no_pivot: false + default_runtime: + runtime_type: "" + runtime_engine: "" + runtime_root: "" + privileged_without_host_devices: false + untrusted_workload_runtime: + runtime_type: "" + runtime_engine: "" + runtime_root: "" + privileged_without_host_devices: false + runtimes: + runc: + runtime_type: io.containerd.runc.v1 + runtime_engine: "" + runtime_root: "" + privileged_without_host_devices: false + cni: + bin_dir: /opt/cni/bin + conf_dir: /etc/cni/net.d + max_conf_num: 1 + conf_template: "" + registry: + configs: + "10.10.31.243:5000": + tls: + insecure_skip_verify: true + mirrors: + "docker.io": + endpoint: + - https://registry-1.docker.io + "10.10.31.243:5000": + endpoint: + - http://10.10.31.243:5000 + x509_key_pair_streaming: + tls_cert_file: "" + tls_key_file: "" + "io.containerd.internal.v1.opt": + path: /opt/containerd + "io.containerd.internal.v1.restart": + interval: 10s + "io.containerd.metadata.v1.bolt": + content_sharing_policy: shared + "io.containerd.monitor.v1.cgroups": + no_prometheus: false + "io.containerd.runtime.v1.linux": + shim: containerd-shim + runtime: runc + runtime_root: "" + no_shim: false + shim_debug: false + "io.containerd.runtime.v2.task": + platforms: + - linux/amd64 + "io.containerd.service.v1.diff-service": + default: + - walking + "io.containerd.snapshotter.v1.devmapper": + root_path: "" + pool_name: "" + base_image_size: "" diff --git a/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/.helmignore b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/.helmignore new file mode 100644 index 0000000..50af031 --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/CHANGELOG.md b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/CHANGELOG.md new file mode 100644 index 0000000..27a52e8 --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/CHANGELOG.md @@ -0,0 +1,445 @@ +# Changelog + +This file documents all notable changes to [ingress-nginx](https://github.com/kubernetes/ingress-nginx) Helm Chart. The release numbering uses [semantic versioning](http://semver.org). + +### 4.2.1 + +- The sha of kube-webhook-certgen image & the opentelemetry image, in values file, was changed to new images built on alpine-v3.16.1 +- "[8896](https://github.com/kubernetes/ingress-nginx/pull/8896) updated to new images built today" + +### 4.2.0 + +- Support for Kubernetes v1.19.0 was removed +- "[8810](https://github.com/kubernetes/ingress-nginx/pull/8810) Prepare for v1.3.0" +- "[8808](https://github.com/kubernetes/ingress-nginx/pull/8808) revert arch var name" +- "[8805](https://github.com/kubernetes/ingress-nginx/pull/8805) Bump k8s.io/klog/v2 from 2.60.1 to 2.70.1" +- "[8803](https://github.com/kubernetes/ingress-nginx/pull/8803) Update to nginx base with alpine v3.16" +- "[8802](https://github.com/kubernetes/ingress-nginx/pull/8802) chore: start v1.3.0 release process" +- "[8798](https://github.com/kubernetes/ingress-nginx/pull/8798) Add v1.24.0 to test matrix" +- "[8796](https://github.com/kubernetes/ingress-nginx/pull/8796) fix: add MAC_OS variable for static-check" +- "[8793](https://github.com/kubernetes/ingress-nginx/pull/8793) changed to alpine-v3.16" +- "[8781](https://github.com/kubernetes/ingress-nginx/pull/8781) Bump github.com/stretchr/testify from 1.7.5 to 1.8.0" +- "[8778](https://github.com/kubernetes/ingress-nginx/pull/8778) chore: remove stable.txt from release process" +- "[8775](https://github.com/kubernetes/ingress-nginx/pull/8775) Remove stable" +- "[8773](https://github.com/kubernetes/ingress-nginx/pull/8773) Bump github/codeql-action from 2.1.14 to 2.1.15" +- "[8772](https://github.com/kubernetes/ingress-nginx/pull/8772) Bump ossf/scorecard-action from 1.1.1 to 1.1.2" +- "[8771](https://github.com/kubernetes/ingress-nginx/pull/8771) fix bullet md format" +- "[8770](https://github.com/kubernetes/ingress-nginx/pull/8770) Add condition for monitoring.coreos.com/v1 API" +- "[8769](https://github.com/kubernetes/ingress-nginx/pull/8769) Fix typos and add links to developer guide" +- "[8767](https://github.com/kubernetes/ingress-nginx/pull/8767) change v1.2.0 to v1.2.1 in deploy doc URLs" +- "[8765](https://github.com/kubernetes/ingress-nginx/pull/8765) Bump github/codeql-action from 1.0.26 to 2.1.14" +- "[8752](https://github.com/kubernetes/ingress-nginx/pull/8752) Bump github.com/spf13/cobra from 1.4.0 to 1.5.0" +- "[8751](https://github.com/kubernetes/ingress-nginx/pull/8751) Bump github.com/stretchr/testify from 1.7.2 to 1.7.5" +- "[8750](https://github.com/kubernetes/ingress-nginx/pull/8750) added announcement" +- "[8740](https://github.com/kubernetes/ingress-nginx/pull/8740) change sha e2etestrunner and echoserver" +- "[8738](https://github.com/kubernetes/ingress-nginx/pull/8738) Update docs to make it easier for noobs to follow step by step" +- "[8737](https://github.com/kubernetes/ingress-nginx/pull/8737) updated baseimage sha" +- "[8736](https://github.com/kubernetes/ingress-nginx/pull/8736) set ld-musl-path" +- "[8733](https://github.com/kubernetes/ingress-nginx/pull/8733) feat: migrate leaderelection lock to leases" +- "[8726](https://github.com/kubernetes/ingress-nginx/pull/8726) prometheus metric: upstream_latency_seconds" +- "[8720](https://github.com/kubernetes/ingress-nginx/pull/8720) Ci pin deps" +- "[8719](https://github.com/kubernetes/ingress-nginx/pull/8719) Working OpenTelemetry sidecar (base nginx image)" +- "[8714](https://github.com/kubernetes/ingress-nginx/pull/8714) Create Openssf scorecard" +- "[8708](https://github.com/kubernetes/ingress-nginx/pull/8708) Bump github.com/prometheus/common from 0.34.0 to 0.35.0" +- "[8703](https://github.com/kubernetes/ingress-nginx/pull/8703) Bump actions/dependency-review-action from 1 to 2" +- "[8701](https://github.com/kubernetes/ingress-nginx/pull/8701) Fix several typos" +- "[8699](https://github.com/kubernetes/ingress-nginx/pull/8699) fix the gosec test and a make target for it" +- "[8698](https://github.com/kubernetes/ingress-nginx/pull/8698) Bump actions/upload-artifact from 2.3.1 to 3.1.0" +- "[8697](https://github.com/kubernetes/ingress-nginx/pull/8697) Bump actions/setup-go from 2.2.0 to 3.2.0" +- "[8695](https://github.com/kubernetes/ingress-nginx/pull/8695) Bump actions/download-artifact from 2 to 3" +- "[8694](https://github.com/kubernetes/ingress-nginx/pull/8694) Bump crazy-max/ghaction-docker-buildx from 1.6.2 to 3.3.1" + +### 4.1.2 + +- "[8587](https://github.com/kubernetes/ingress-nginx/pull/8587) Add CAP_SYS_CHROOT to DS/PSP when needed" +- "[8458](https://github.com/kubernetes/ingress-nginx/pull/8458) Add portNamePreffix Helm chart parameter" +- "[8522](https://github.com/kubernetes/ingress-nginx/pull/8522) Add documentation for controller.service.loadBalancerIP in Helm chart" + +### 4.1.0 + +- "[8481](https://github.com/kubernetes/ingress-nginx/pull/8481) Fix log creation in chroot script" +- "[8479](https://github.com/kubernetes/ingress-nginx/pull/8479) changed nginx base img tag to img built with alpine3.14.6" +- "[8478](https://github.com/kubernetes/ingress-nginx/pull/8478) update base images and protobuf gomod" +- "[8468](https://github.com/kubernetes/ingress-nginx/pull/8468) Fallback to ngx.var.scheme for redirectScheme with use-forward-headers when X-Forwarded-Proto is empty" +- "[8456](https://github.com/kubernetes/ingress-nginx/pull/8456) Implement object deep inspector" +- "[8455](https://github.com/kubernetes/ingress-nginx/pull/8455) Update dependencies" +- "[8454](https://github.com/kubernetes/ingress-nginx/pull/8454) Update index.md" +- "[8447](https://github.com/kubernetes/ingress-nginx/pull/8447) typo fixing" +- "[8446](https://github.com/kubernetes/ingress-nginx/pull/8446) Fix suggested annotation-value-word-blocklist" +- "[8444](https://github.com/kubernetes/ingress-nginx/pull/8444) replace deprecated topology key in example with current one" +- "[8443](https://github.com/kubernetes/ingress-nginx/pull/8443) Add dependency review enforcement" +- "[8434](https://github.com/kubernetes/ingress-nginx/pull/8434) added new auth-tls-match-cn annotation" +- "[8426](https://github.com/kubernetes/ingress-nginx/pull/8426) Bump github.com/prometheus/common from 0.32.1 to 0.33.0" + +### 4.0.18 + +- "[8291](https://github.com/kubernetes/ingress-nginx/pull/8291) remove git tag env from cloud build" +- "[8286](https://github.com/kubernetes/ingress-nginx/pull/8286) Fix OpenTelemetry sidecar image build" +- "[8277](https://github.com/kubernetes/ingress-nginx/pull/8277) Add OpenSSF Best practices badge" +- "[8273](https://github.com/kubernetes/ingress-nginx/pull/8273) Issue#8241" +- "[8267](https://github.com/kubernetes/ingress-nginx/pull/8267) Add fsGroup value to admission-webhooks/job-patch charts" +- "[8262](https://github.com/kubernetes/ingress-nginx/pull/8262) Updated confusing error" +- "[8256](https://github.com/kubernetes/ingress-nginx/pull/8256) fix: deny locations with invalid auth-url annotation" +- "[8253](https://github.com/kubernetes/ingress-nginx/pull/8253) Add a certificate info metric" +- "[8236](https://github.com/kubernetes/ingress-nginx/pull/8236) webhook: remove useless code." +- "[8227](https://github.com/kubernetes/ingress-nginx/pull/8227) Update libraries in webhook image" +- "[8225](https://github.com/kubernetes/ingress-nginx/pull/8225) fix inconsistent-label-cardinality for prometheus metrics: nginx_ingress_controller_requests" +- "[8221](https://github.com/kubernetes/ingress-nginx/pull/8221) Do not validate ingresses with unknown ingress class in admission webhook endpoint" +- "[8210](https://github.com/kubernetes/ingress-nginx/pull/8210) Bump github.com/prometheus/client_golang from 1.11.0 to 1.12.1" +- "[8209](https://github.com/kubernetes/ingress-nginx/pull/8209) Bump google.golang.org/grpc from 1.43.0 to 1.44.0" +- "[8204](https://github.com/kubernetes/ingress-nginx/pull/8204) Add Artifact Hub lint" +- "[8203](https://github.com/kubernetes/ingress-nginx/pull/8203) Fix Indentation of example and link to cert-manager tutorial" +- "[8201](https://github.com/kubernetes/ingress-nginx/pull/8201) feat(metrics): add path and method labels to requests countera" +- "[8199](https://github.com/kubernetes/ingress-nginx/pull/8199) use functional options to reduce number of methods creating an EchoDeployment" +- "[8196](https://github.com/kubernetes/ingress-nginx/pull/8196) docs: fix inconsistent controller annotation" +- "[8191](https://github.com/kubernetes/ingress-nginx/pull/8191) Using Go install for misspell" +- "[8186](https://github.com/kubernetes/ingress-nginx/pull/8186) prometheus+grafana using servicemonitor" +- "[8185](https://github.com/kubernetes/ingress-nginx/pull/8185) Append elements on match, instead of removing for cors-annotations" +- "[8179](https://github.com/kubernetes/ingress-nginx/pull/8179) Bump github.com/opencontainers/runc from 1.0.3 to 1.1.0" +- "[8173](https://github.com/kubernetes/ingress-nginx/pull/8173) Adding annotations to the controller service account" +- "[8163](https://github.com/kubernetes/ingress-nginx/pull/8163) Update the $req_id placeholder description" +- "[8162](https://github.com/kubernetes/ingress-nginx/pull/8162) Versioned static manifests" +- "[8159](https://github.com/kubernetes/ingress-nginx/pull/8159) Adding some geoip variables and default values" +- "[8155](https://github.com/kubernetes/ingress-nginx/pull/8155) #7271 feat: avoid-pdb-creation-when-default-backend-disabled-and-replicas-gt-1" +- "[8151](https://github.com/kubernetes/ingress-nginx/pull/8151) Automatically generate helm docs" +- "[8143](https://github.com/kubernetes/ingress-nginx/pull/8143) Allow to configure delay before controller exits" +- "[8136](https://github.com/kubernetes/ingress-nginx/pull/8136) add ingressClass option to helm chart - back compatibility with ingress.class annotations" +- "[8126](https://github.com/kubernetes/ingress-nginx/pull/8126) Example for JWT" + + +### 4.0.15 + +- [8120] https://github.com/kubernetes/ingress-nginx/pull/8120 Update go in runner and release v1.1.1 +- [8119] https://github.com/kubernetes/ingress-nginx/pull/8119 Update to go v1.17.6 +- [8118] https://github.com/kubernetes/ingress-nginx/pull/8118 Remove deprecated libraries, update other libs +- [8117] https://github.com/kubernetes/ingress-nginx/pull/8117 Fix codegen errors +- [8115] https://github.com/kubernetes/ingress-nginx/pull/8115 chart/ghaction: set the correct permission to have access to push a release +- [8098] https://github.com/kubernetes/ingress-nginx/pull/8098 generating SHA for CA only certs in backend_ssl.go + comparision of P… +- [8088] https://github.com/kubernetes/ingress-nginx/pull/8088 Fix Edit this page link to use main branch +- [8072] https://github.com/kubernetes/ingress-nginx/pull/8072 Expose GeoIP2 Continent code as variable +- [8061] https://github.com/kubernetes/ingress-nginx/pull/8061 docs(charts): using helm-docs for chart +- [8058] https://github.com/kubernetes/ingress-nginx/pull/8058 Bump github.com/spf13/cobra from 1.2.1 to 1.3.0 +- [8054] https://github.com/kubernetes/ingress-nginx/pull/8054 Bump google.golang.org/grpc from 1.41.0 to 1.43.0 +- [8051] https://github.com/kubernetes/ingress-nginx/pull/8051 align bug report with feature request regarding kind documentation +- [8046] https://github.com/kubernetes/ingress-nginx/pull/8046 Report expired certificates (#8045) +- [8044] https://github.com/kubernetes/ingress-nginx/pull/8044 remove G109 check till gosec resolves issues +- [8042] https://github.com/kubernetes/ingress-nginx/pull/8042 docs_multiple_instances_one_cluster_ticket_7543 +- [8041] https://github.com/kubernetes/ingress-nginx/pull/8041 docs: fix typo'd executible name +- [8035] https://github.com/kubernetes/ingress-nginx/pull/8035 Comment busy owners +- [8029] https://github.com/kubernetes/ingress-nginx/pull/8029 Add stream-snippet as a ConfigMap and Annotation option +- [8023] https://github.com/kubernetes/ingress-nginx/pull/8023 fix nginx compilation flags +- [8021] https://github.com/kubernetes/ingress-nginx/pull/8021 Disable default modsecurity_rules_file if modsecurity-snippet is specified +- [8019] https://github.com/kubernetes/ingress-nginx/pull/8019 Revise main documentation page +- [8018] https://github.com/kubernetes/ingress-nginx/pull/8018 Preserve order of plugin invocation +- [8015] https://github.com/kubernetes/ingress-nginx/pull/8015 Add newline indenting to admission webhook annotations +- [8014] https://github.com/kubernetes/ingress-nginx/pull/8014 Add link to example error page manifest in docs +- [8009] https://github.com/kubernetes/ingress-nginx/pull/8009 Fix spelling in documentation and top-level files +- [8008] https://github.com/kubernetes/ingress-nginx/pull/8008 Add relabelings in controller-servicemonitor.yaml +- [8003] https://github.com/kubernetes/ingress-nginx/pull/8003 Minor improvements (formatting, consistency) in install guide +- [8001] https://github.com/kubernetes/ingress-nginx/pull/8001 fix: go-grpc Dockerfile +- [7999] https://github.com/kubernetes/ingress-nginx/pull/7999 images: use k8s-staging-test-infra/gcb-docker-gcloud +- [7996] https://github.com/kubernetes/ingress-nginx/pull/7996 doc: improvement +- [7983] https://github.com/kubernetes/ingress-nginx/pull/7983 Fix a couple of misspellings in the annotations documentation. +- [7979] https://github.com/kubernetes/ingress-nginx/pull/7979 allow set annotations for admission Jobs +- [7977] https://github.com/kubernetes/ingress-nginx/pull/7977 Add ssl_reject_handshake to defaul server +- [7975] https://github.com/kubernetes/ingress-nginx/pull/7975 add legacy version update v0.50.0 to main changelog +- [7972] https://github.com/kubernetes/ingress-nginx/pull/7972 updated service upstream definition + +### 4.0.14 + +- [8061] https://github.com/kubernetes/ingress-nginx/pull/8061 Using helm-docs to populate values table in README.md + +### 4.0.13 + +- [8008] https://github.com/kubernetes/ingress-nginx/pull/8008 Add relabelings in controller-servicemonitor.yaml + +### 4.0.12 + +- [7978] https://github.com/kubernetes/ingress-nginx/pull/7979 Support custom annotations in admissions Jobs + +### 4.0.11 + +- [7873] https://github.com/kubernetes/ingress-nginx/pull/7873 Makes the [appProtocol](https://kubernetes.io/docs/concepts/services-networking/_print/#application-protocol) field optional. + +### 4.0.10 + +- [7964] https://github.com/kubernetes/ingress-nginx/pull/7964 Update controller version to v1.1.0 + +### 4.0.9 + +- [6992] https://github.com/kubernetes/ingress-nginx/pull/6992 Add ability to specify labels for all resources + +### 4.0.7 + +- [7923] https://github.com/kubernetes/ingress-nginx/pull/7923 Release v1.0.5 of ingress-nginx +- [7806] https://github.com/kubernetes/ingress-nginx/pull/7806 Choice option for internal/external loadbalancer type service + +### 4.0.6 + +- [7804] https://github.com/kubernetes/ingress-nginx/pull/7804 Release v1.0.4 of ingress-nginx +- [7651] https://github.com/kubernetes/ingress-nginx/pull/7651 Support ipFamilyPolicy and ipFamilies fields in Helm Chart +- [7798] https://github.com/kubernetes/ingress-nginx/pull/7798 Exoscale: use HTTP Healthcheck mode +- [7793] https://github.com/kubernetes/ingress-nginx/pull/7793 Update kube-webhook-certgen to v1.1.1 + +### 4.0.5 + +- [7740] https://github.com/kubernetes/ingress-nginx/pull/7740 Release v1.0.3 of ingress-nginx + +### 4.0.3 + +- [7707] https://github.com/kubernetes/ingress-nginx/pull/7707 Release v1.0.2 of ingress-nginx + +### 4.0.2 + +- [7681] https://github.com/kubernetes/ingress-nginx/pull/7681 Release v1.0.1 of ingress-nginx + +### 4.0.1 + +- [7535] https://github.com/kubernetes/ingress-nginx/pull/7535 Release v1.0.0 ingress-nginx + +### 3.34.0 + +- [7256] https://github.com/kubernetes/ingress-nginx/pull/7256 Add namespace field in the namespace scoped resource templates + +### 3.33.0 + +- [7164] https://github.com/kubernetes/ingress-nginx/pull/7164 Update nginx to v1.20.1 + +### 3.32.0 + +- [7117] https://github.com/kubernetes/ingress-nginx/pull/7117 Add annotations for HPA + +### 3.31.0 + +- [7137] https://github.com/kubernetes/ingress-nginx/pull/7137 Add support for custom probes + +### 3.30.0 + +- [#7092](https://github.com/kubernetes/ingress-nginx/pull/7092) Removes the possibility of using localhost in ExternalNames as endpoints + +### 3.29.0 + +- [X] [#6945](https://github.com/kubernetes/ingress-nginx/pull/7020) Add option to specify job label for ServiceMonitor + +### 3.28.0 + +- [ ] [#6900](https://github.com/kubernetes/ingress-nginx/pull/6900) Support existing PSPs + +### 3.27.0 + +- Update ingress-nginx v0.45.0 + +### 3.26.0 + +- [X] [#6979](https://github.com/kubernetes/ingress-nginx/pull/6979) Changed servicePort value for metrics + +### 3.25.0 + +- [X] [#6957](https://github.com/kubernetes/ingress-nginx/pull/6957) Add ability to specify automountServiceAccountToken + +### 3.24.0 + +- [X] [#6908](https://github.com/kubernetes/ingress-nginx/pull/6908) Add volumes to default-backend deployment + +### 3.23.0 + +- Update ingress-nginx v0.44.0 + +### 3.22.0 + +- [X] [#6802](https://github.com/kubernetes/ingress-nginx/pull/6802) Add value for configuring a custom Diffie-Hellman parameters file +- [X] [#6815](https://github.com/kubernetes/ingress-nginx/pull/6815) Allow use of numeric namespaces in helm chart + +### 3.21.0 + +- [X] [#6783](https://github.com/kubernetes/ingress-nginx/pull/6783) Add custom annotations to ScaledObject +- [X] [#6761](https://github.com/kubernetes/ingress-nginx/pull/6761) Adding quotes in the serviceAccount name in Helm values +- [X] [#6767](https://github.com/kubernetes/ingress-nginx/pull/6767) Remove ClusterRole when scope option is enabled +- [X] [#6785](https://github.com/kubernetes/ingress-nginx/pull/6785) Update kube-webhook-certgen image to v1.5.1 + +### 3.20.1 + +- Do not create KEDA in case of DaemonSets. +- Fix KEDA v2 definition + +### 3.20.0 + +- [X] [#6730](https://github.com/kubernetes/ingress-nginx/pull/6730) Do not create HPA for defaultBackend if not enabled. + +### 3.19.0 + +- Update ingress-nginx v0.43.0 + +### 3.18.0 + +- [X] [#6688](https://github.com/kubernetes/ingress-nginx/pull/6688) Allow volume-type emptyDir in controller podsecuritypolicy +- [X] [#6691](https://github.com/kubernetes/ingress-nginx/pull/6691) Improve parsing of helm parameters + +### 3.17.0 + +- Update ingress-nginx v0.42.0 + +### 3.16.1 + +- Fix chart-releaser action + +### 3.16.0 + +- [X] [#6646](https://github.com/kubernetes/ingress-nginx/pull/6646) Added LoadBalancerIP value for internal service + +### 3.15.1 + +- Fix chart-releaser action + +### 3.15.0 + +- [X] [#6586](https://github.com/kubernetes/ingress-nginx/pull/6586) Fix 'maxmindLicenseKey' location in values.yaml + +### 3.14.0 + +- [X] [#6469](https://github.com/kubernetes/ingress-nginx/pull/6469) Allow custom service names for controller and backend + +### 3.13.0 + +- [X] [#6544](https://github.com/kubernetes/ingress-nginx/pull/6544) Fix default backend HPA name variable + +### 3.12.0 + +- [X] [#6514](https://github.com/kubernetes/ingress-nginx/pull/6514) Remove helm2 support and update docs + +### 3.11.1 + +- [X] [#6505](https://github.com/kubernetes/ingress-nginx/pull/6505) Reorder HPA resource list to work with GitOps tooling + +### 3.11.0 + +- Support Keda Autoscaling + +### 3.10.1 + +- Fix regression introduced in 0.41.0 with external authentication + +### 3.10.0 + +- Fix routing regression introduced in 0.41.0 with PathType Exact + +### 3.9.0 + +- [X] [#6423](https://github.com/kubernetes/ingress-nginx/pull/6423) Add Default backend HPA autoscaling + +### 3.8.0 + +- [X] [#6395](https://github.com/kubernetes/ingress-nginx/pull/6395) Update jettech/kube-webhook-certgen image +- [X] [#6377](https://github.com/kubernetes/ingress-nginx/pull/6377) Added loadBalancerSourceRanges for internal lbs +- [X] [#6356](https://github.com/kubernetes/ingress-nginx/pull/6356) Add securitycontext settings on defaultbackend +- [X] [#6401](https://github.com/kubernetes/ingress-nginx/pull/6401) Fix controller service annotations +- [X] [#6403](https://github.com/kubernetes/ingress-nginx/pull/6403) Initial helm chart changelog + +### 3.7.1 + +- [X] [#6326](https://github.com/kubernetes/ingress-nginx/pull/6326) Fix liveness and readiness probe path in daemonset chart + +### 3.7.0 + +- [X] [#6316](https://github.com/kubernetes/ingress-nginx/pull/6316) Numerals in podAnnotations in quotes [#6315](https://github.com/kubernetes/ingress-nginx/issues/6315) + +### 3.6.0 + +- [X] [#6305](https://github.com/kubernetes/ingress-nginx/pull/6305) Add default linux nodeSelector + +### 3.5.1 + +- [X] [#6299](https://github.com/kubernetes/ingress-nginx/pull/6299) Fix helm chart release + +### 3.5.0 + +- [X] [#6260](https://github.com/kubernetes/ingress-nginx/pull/6260) Allow Helm Chart to customize admission webhook's annotations, timeoutSeconds, namespaceSelector, objectSelector and cert files locations + +### 3.4.0 + +- [X] [#6268](https://github.com/kubernetes/ingress-nginx/pull/6268) Update to 0.40.2 in helm chart #6288 + +### 3.3.1 + +- [X] [#6259](https://github.com/kubernetes/ingress-nginx/pull/6259) Release helm chart +- [X] [#6258](https://github.com/kubernetes/ingress-nginx/pull/6258) Fix chart markdown link +- [X] [#6253](https://github.com/kubernetes/ingress-nginx/pull/6253) Release v0.40.0 + +### 3.3.1 + +- [X] [#6233](https://github.com/kubernetes/ingress-nginx/pull/6233) Add admission controller e2e test + +### 3.3.0 + +- [X] [#6203](https://github.com/kubernetes/ingress-nginx/pull/6203) Refactor parsing of key values +- [X] [#6162](https://github.com/kubernetes/ingress-nginx/pull/6162) Add helm chart options to expose metrics service as NodePort +- [X] [#6180](https://github.com/kubernetes/ingress-nginx/pull/6180) Fix helm chart admissionReviewVersions regression +- [X] [#6169](https://github.com/kubernetes/ingress-nginx/pull/6169) Fix Typo in example prometheus rules + +### 3.0.0 + +- [X] [#6167](https://github.com/kubernetes/ingress-nginx/pull/6167) Update chart requirements + +### 2.16.0 + +- [X] [#6154](https://github.com/kubernetes/ingress-nginx/pull/6154) add `topologySpreadConstraint` to controller + +### 2.15.0 + +- [X] [#6087](https://github.com/kubernetes/ingress-nginx/pull/6087) Adding parameter for externalTrafficPolicy in internal controller service spec + +### 2.14.0 + +- [X] [#6104](https://github.com/kubernetes/ingress-nginx/pull/6104) Misc fixes for nginx-ingress chart for better keel and prometheus-operator integration + +### 2.13.0 + +- [X] [#6093](https://github.com/kubernetes/ingress-nginx/pull/6093) Release v0.35.0 + +### 2.13.0 + +- [X] [#6093](https://github.com/kubernetes/ingress-nginx/pull/6093) Release v0.35.0 +- [X] [#6080](https://github.com/kubernetes/ingress-nginx/pull/6080) Switch images to k8s.gcr.io after Vanity Domain Flip + +### 2.12.1 + +- [X] [#6075](https://github.com/kubernetes/ingress-nginx/pull/6075) Sync helm chart affinity examples + +### 2.12.0 + +- [X] [#6039](https://github.com/kubernetes/ingress-nginx/pull/6039) Add configurable serviceMonitor metricRelabelling and targetLabels +- [X] [#6044](https://github.com/kubernetes/ingress-nginx/pull/6044) Fix YAML linting + +### 2.11.3 + +- [X] [#6038](https://github.com/kubernetes/ingress-nginx/pull/6038) Bump chart version PATCH + +### 2.11.2 + +- [X] [#5951](https://github.com/kubernetes/ingress-nginx/pull/5951) Bump chart patch version + +### 2.11.1 + +- [X] [#5900](https://github.com/kubernetes/ingress-nginx/pull/5900) Release helm chart for v0.34.1 + +### 2.11.0 + +- [X] [#5879](https://github.com/kubernetes/ingress-nginx/pull/5879) Update helm chart for v0.34.0 +- [X] [#5671](https://github.com/kubernetes/ingress-nginx/pull/5671) Make liveness probe more fault tolerant than readiness probe + +### 2.10.0 + +- [X] [#5843](https://github.com/kubernetes/ingress-nginx/pull/5843) Update jettech/kube-webhook-certgen image + +### 2.9.1 + +- [X] [#5823](https://github.com/kubernetes/ingress-nginx/pull/5823) Add quoting to sysctls because numeric values need to be presented as strings (#5823) + +### 2.9.0 + +- [X] [#5795](https://github.com/kubernetes/ingress-nginx/pull/5795) Use fully qualified images to avoid cri-o issues + + +### TODO + +Keep building the changelog using *git log charts* checking the tag diff --git a/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/Chart.yaml b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/Chart.yaml new file mode 100644 index 0000000..55c0b54 --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/Chart.yaml @@ -0,0 +1,23 @@ +annotations: + artifacthub.io/changes: | + - "[8896](https://github.com/kubernetes/ingress-nginx/pull/8896) updated to new images built today" + - "fix permissions about configmap" + artifacthub.io/prerelease: "false" +apiVersion: v2 +appVersion: 1.3.1 +description: Ingress controller for Kubernetes using NGINX as a reverse proxy and + load balancer +home: https://github.com/kubernetes/ingress-nginx +icon: https://upload.wikimedia.org/wikipedia/commons/thumb/c/c5/Nginx_logo.svg/500px-Nginx_logo.svg.png +keywords: +- ingress +- nginx +kubeVersion: '>=1.20.0-0' +maintainers: +- name: rikatz +- name: strongjz +- name: tao12345666333 +name: ingress-nginx +sources: +- https://github.com/kubernetes/ingress-nginx +version: 4.2.5 diff --git a/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/OWNERS b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/OWNERS new file mode 100644 index 0000000..6b7e049 --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/OWNERS @@ -0,0 +1,10 @@ +# See the OWNERS docs: https://github.com/kubernetes/community/blob/master/contributors/guide/owners.md + +approvers: +- ingress-nginx-helm-maintainers + +reviewers: +- ingress-nginx-helm-reviewers + +labels: +- area/helm diff --git a/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/README.md b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/README.md new file mode 100644 index 0000000..4e6a696 --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/README.md @@ -0,0 +1,494 @@ +# ingress-nginx + +[ingress-nginx](https://github.com/kubernetes/ingress-nginx) Ingress controller for Kubernetes using NGINX as a reverse proxy and load balancer + +![Version: 4.2.5](https://img.shields.io/badge/Version-4.2.5-informational?style=flat-square) ![AppVersion: 1.3.1](https://img.shields.io/badge/AppVersion-1.3.1-informational?style=flat-square) + +To use, add `ingressClassName: nginx` spec field or the `kubernetes.io/ingress.class: nginx` annotation to your Ingress resources. + +This chart bootstraps an ingress-nginx deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +## Prerequisites + +- Chart version 3.x.x: Kubernetes v1.16+ +- Chart version 4.x.x and above: Kubernetes v1.19+ + +## Get Repo Info + +```console +helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx +helm repo update +``` + +## Install Chart + +**Important:** only helm3 is supported + +```console +helm install [RELEASE_NAME] ingress-nginx/ingress-nginx +``` + +The command deploys ingress-nginx on the Kubernetes cluster in the default configuration. + +_See [configuration](#configuration) below._ + +_See [helm install](https://helm.sh/docs/helm/helm_install/) for command documentation._ + +## Uninstall Chart + +```console +helm uninstall [RELEASE_NAME] +``` + +This removes all the Kubernetes components associated with the chart and deletes the release. + +_See [helm uninstall](https://helm.sh/docs/helm/helm_uninstall/) for command documentation._ + +## Upgrading Chart + +```console +helm upgrade [RELEASE_NAME] [CHART] --install +``` + +_See [helm upgrade](https://helm.sh/docs/helm/helm_upgrade/) for command documentation._ + +### Upgrading With Zero Downtime in Production + +By default the ingress-nginx controller has service interruptions whenever it's pods are restarted or redeployed. In order to fix that, see the excellent blog post by Lindsay Landry from Codecademy: [Kubernetes: Nginx and Zero Downtime in Production](https://medium.com/codecademy-engineering/kubernetes-nginx-and-zero-downtime-in-production-2c910c6a5ed8). + +### Migrating from stable/nginx-ingress + +There are two main ways to migrate a release from `stable/nginx-ingress` to `ingress-nginx/ingress-nginx` chart: + +1. For Nginx Ingress controllers used for non-critical services, the easiest method is to [uninstall](#uninstall-chart) the old release and [install](#install-chart) the new one +1. For critical services in production that require zero-downtime, you will want to: + 1. [Install](#install-chart) a second Ingress controller + 1. Redirect your DNS traffic from the old controller to the new controller + 1. Log traffic from both controllers during this changeover + 1. [Uninstall](#uninstall-chart) the old controller once traffic has fully drained from it + 1. For details on all of these steps see [Upgrading With Zero Downtime in Production](#upgrading-with-zero-downtime-in-production) + +Note that there are some different and upgraded configurations between the two charts, described by Rimas Mocevicius from JFrog in the "Upgrading to ingress-nginx Helm chart" section of [Migrating from Helm chart nginx-ingress to ingress-nginx](https://rimusz.net/migrating-to-ingress-nginx). As the `ingress-nginx/ingress-nginx` chart continues to update, you will want to check current differences by running [helm configuration](#configuration) commands on both charts. + +## Configuration + +See [Customizing the Chart Before Installing](https://helm.sh/docs/intro/using_helm/#customizing-the-chart-before-installing). To see all configurable options with detailed comments, visit the chart's [values.yaml](./values.yaml), or run these configuration commands: + +```console +helm show values ingress-nginx/ingress-nginx +``` + +### PodDisruptionBudget + +Note that the PodDisruptionBudget resource will only be defined if the replicaCount is greater than one, +else it would make it impossible to evacuate a node. See [gh issue #7127](https://github.com/helm/charts/issues/7127) for more info. + +### Prometheus Metrics + +The Nginx ingress controller can export Prometheus metrics, by setting `controller.metrics.enabled` to `true`. + +You can add Prometheus annotations to the metrics service using `controller.metrics.service.annotations`. +Alternatively, if you use the Prometheus Operator, you can enable ServiceMonitor creation using `controller.metrics.serviceMonitor.enabled`. And set `controller.metrics.serviceMonitor.additionalLabels.release="prometheus"`. "release=prometheus" should match the label configured in the prometheus servicemonitor ( see `kubectl get servicemonitor prometheus-kube-prom-prometheus -oyaml -n prometheus`) + +### ingress-nginx nginx\_status page/stats server + +Previous versions of this chart had a `controller.stats.*` configuration block, which is now obsolete due to the following changes in nginx ingress controller: + +- In [0.16.1](https://github.com/kubernetes/ingress-nginx/blob/main/Changelog.md#0161), the vts (virtual host traffic status) dashboard was removed +- In [0.23.0](https://github.com/kubernetes/ingress-nginx/blob/main/Changelog.md#0230), the status page at port 18080 is now a unix socket webserver only available at localhost. + You can use `curl --unix-socket /tmp/nginx-status-server.sock http://localhost/nginx_status` inside the controller container to access it locally, or use the snippet from [nginx-ingress changelog](https://github.com/kubernetes/ingress-nginx/blob/main/Changelog.md#0230) to re-enable the http server + +### ExternalDNS Service Configuration + +Add an [ExternalDNS](https://github.com/kubernetes-incubator/external-dns) annotation to the LoadBalancer service: + +```yaml +controller: + service: + annotations: + external-dns.alpha.kubernetes.io/hostname: kubernetes-example.com. +``` + +### AWS L7 ELB with SSL Termination + +Annotate the controller as shown in the [nginx-ingress l7 patch](https://github.com/kubernetes/ingress-nginx/blob/ab3a789caae65eec4ad6e3b46b19750b481b6bce/deploy/aws/l7/service-l7.yaml): + +```yaml +controller: + service: + targetPorts: + http: http + https: http + annotations: + service.beta.kubernetes.io/aws-load-balancer-ssl-cert: arn:aws:acm:XX-XXXX-X:XXXXXXXXX:certificate/XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX + service.beta.kubernetes.io/aws-load-balancer-backend-protocol: "http" + service.beta.kubernetes.io/aws-load-balancer-ssl-ports: "https" + service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout: '3600' +``` + +### AWS route53-mapper + +To configure the LoadBalancer service with the [route53-mapper addon](https://github.com/kubernetes/kops/blob/be63d4f1a7a46daaf1c4c482527328236850f111/addons/route53-mapper/README.md), add the `domainName` annotation and `dns` label: + +```yaml +controller: + service: + labels: + dns: "route53" + annotations: + domainName: "kubernetes-example.com" +``` + +### Additional Internal Load Balancer + +This setup is useful when you need both external and internal load balancers but don't want to have multiple ingress controllers and multiple ingress objects per application. + +By default, the ingress object will point to the external load balancer address, but if correctly configured, you can make use of the internal one if the URL you are looking up resolves to the internal load balancer's URL. + +You'll need to set both the following values: + +`controller.service.internal.enabled` +`controller.service.internal.annotations` + +If one of them is missing the internal load balancer will not be deployed. Example you may have `controller.service.internal.enabled=true` but no annotations set, in this case no action will be taken. + +`controller.service.internal.annotations` varies with the cloud service you're using. + +Example for AWS: + +```yaml +controller: + service: + internal: + enabled: true + annotations: + # Create internal ELB + service.beta.kubernetes.io/aws-load-balancer-internal: "true" + # Any other annotation can be declared here. +``` + +Example for GCE: + +```yaml +controller: + service: + internal: + enabled: true + annotations: + # Create internal LB. More informations: https://cloud.google.com/kubernetes-engine/docs/how-to/internal-load-balancing + # For GKE versions 1.17 and later + networking.gke.io/load-balancer-type: "Internal" + # For earlier versions + # cloud.google.com/load-balancer-type: "Internal" + + # Any other annotation can be declared here. +``` + +Example for Azure: + +```yaml +controller: + service: + annotations: + # Create internal LB + service.beta.kubernetes.io/azure-load-balancer-internal: "true" + # Any other annotation can be declared here. +``` + +Example for Oracle Cloud Infrastructure: + +```yaml +controller: + service: + annotations: + # Create internal LB + service.beta.kubernetes.io/oci-load-balancer-internal: "true" + # Any other annotation can be declared here. +``` + +An use case for this scenario is having a split-view DNS setup where the public zone CNAME records point to the external balancer URL while the private zone CNAME records point to the internal balancer URL. This way, you only need one ingress kubernetes object. + +Optionally you can set `controller.service.loadBalancerIP` if you need a static IP for the resulting `LoadBalancer`. + +### Ingress Admission Webhooks + +With nginx-ingress-controller version 0.25+, the nginx ingress controller pod exposes an endpoint that will integrate with the `validatingwebhookconfiguration` Kubernetes feature to prevent bad ingress from being added to the cluster. +**This feature is enabled by default since 0.31.0.** + +With nginx-ingress-controller in 0.25.* work only with kubernetes 1.14+, 0.26 fix [this issue](https://github.com/kubernetes/ingress-nginx/pull/4521) + +### Helm Error When Upgrading: spec.clusterIP: Invalid value: "" + +If you are upgrading this chart from a version between 0.31.0 and 1.2.2 then you may get an error like this: + +```console +Error: UPGRADE FAILED: Service "?????-controller" is invalid: spec.clusterIP: Invalid value: "": field is immutable +``` + +Detail of how and why are in [this issue](https://github.com/helm/charts/pull/13646) but to resolve this you can set `xxxx.service.omitClusterIP` to `true` where `xxxx` is the service referenced in the error. + +As of version `1.26.0` of this chart, by simply not providing any clusterIP value, `invalid: spec.clusterIP: Invalid value: "": field is immutable` will no longer occur since `clusterIP: ""` will not be rendered. + +## Requirements + +Kubernetes: `>=1.20.0-0` + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| commonLabels | object | `{}` | | +| controller.addHeaders | object | `{}` | Will add custom headers before sending response traffic to the client according to: https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/#add-headers | +| controller.admissionWebhooks.annotations | object | `{}` | | +| controller.admissionWebhooks.certificate | string | `"/usr/local/certificates/cert"` | | +| controller.admissionWebhooks.createSecretJob.resources | object | `{}` | | +| controller.admissionWebhooks.enabled | bool | `true` | | +| controller.admissionWebhooks.existingPsp | string | `""` | Use an existing PSP instead of creating one | +| controller.admissionWebhooks.extraEnvs | list | `[]` | Additional environment variables to set | +| controller.admissionWebhooks.failurePolicy | string | `"Fail"` | Admission Webhook failure policy to use | +| controller.admissionWebhooks.key | string | `"/usr/local/certificates/key"` | | +| controller.admissionWebhooks.labels | object | `{}` | Labels to be added to admission webhooks | +| controller.admissionWebhooks.namespaceSelector | object | `{}` | | +| controller.admissionWebhooks.networkPolicyEnabled | bool | `false` | | +| controller.admissionWebhooks.objectSelector | object | `{}` | | +| controller.admissionWebhooks.patch.enabled | bool | `true` | | +| controller.admissionWebhooks.patch.image.digest | string | `"sha256:549e71a6ca248c5abd51cdb73dbc3083df62cf92ed5e6147c780e30f7e007a47"` | | +| controller.admissionWebhooks.patch.image.image | string | `"ingress-nginx/kube-webhook-certgen"` | | +| controller.admissionWebhooks.patch.image.pullPolicy | string | `"IfNotPresent"` | | +| controller.admissionWebhooks.patch.image.registry | string | `"registry.k8s.io"` | | +| controller.admissionWebhooks.patch.image.tag | string | `"v1.3.0"` | | +| controller.admissionWebhooks.patch.labels | object | `{}` | Labels to be added to patch job resources | +| controller.admissionWebhooks.patch.nodeSelector."kubernetes.io/os" | string | `"linux"` | | +| controller.admissionWebhooks.patch.podAnnotations | object | `{}` | | +| controller.admissionWebhooks.patch.priorityClassName | string | `""` | Provide a priority class name to the webhook patching job # | +| controller.admissionWebhooks.patch.securityContext.fsGroup | int | `2000` | | +| controller.admissionWebhooks.patch.securityContext.runAsNonRoot | bool | `true` | | +| controller.admissionWebhooks.patch.securityContext.runAsUser | int | `2000` | | +| controller.admissionWebhooks.patch.tolerations | list | `[]` | | +| controller.admissionWebhooks.patchWebhookJob.resources | object | `{}` | | +| controller.admissionWebhooks.port | int | `8443` | | +| controller.admissionWebhooks.service.annotations | object | `{}` | | +| controller.admissionWebhooks.service.externalIPs | list | `[]` | | +| controller.admissionWebhooks.service.loadBalancerSourceRanges | list | `[]` | | +| controller.admissionWebhooks.service.servicePort | int | `443` | | +| controller.admissionWebhooks.service.type | string | `"ClusterIP"` | | +| controller.affinity | object | `{}` | Affinity and anti-affinity rules for server scheduling to nodes # Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity # | +| controller.allowSnippetAnnotations | bool | `true` | This configuration defines if Ingress Controller should allow users to set their own *-snippet annotations, otherwise this is forbidden / dropped when users add those annotations. Global snippets in ConfigMap are still respected | +| controller.annotations | object | `{}` | Annotations to be added to the controller Deployment or DaemonSet # | +| controller.autoscaling.behavior | object | `{}` | | +| controller.autoscaling.enabled | bool | `false` | | +| controller.autoscaling.maxReplicas | int | `11` | | +| controller.autoscaling.minReplicas | int | `1` | | +| controller.autoscaling.targetCPUUtilizationPercentage | int | `50` | | +| controller.autoscaling.targetMemoryUtilizationPercentage | int | `50` | | +| controller.autoscalingTemplate | list | `[]` | | +| controller.config | object | `{}` | Will add custom configuration options to Nginx https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/ | +| controller.configAnnotations | object | `{}` | Annotations to be added to the controller config configuration configmap. | +| controller.configMapNamespace | string | `""` | Allows customization of the configmap / nginx-configmap namespace; defaults to $(POD_NAMESPACE) | +| controller.containerName | string | `"controller"` | Configures the controller container name | +| controller.containerPort | object | `{"http":80,"https":443}` | Configures the ports that the nginx-controller listens on | +| controller.customTemplate.configMapKey | string | `""` | | +| controller.customTemplate.configMapName | string | `""` | | +| controller.dnsConfig | object | `{}` | Optionally customize the pod dnsConfig. | +| controller.dnsPolicy | string | `"ClusterFirst"` | Optionally change this to ClusterFirstWithHostNet in case you have 'hostNetwork: true'. By default, while using host network, name resolution uses the host's DNS. If you wish nginx-controller to keep resolving names inside the k8s network, use ClusterFirstWithHostNet. | +| controller.electionID | string | `"ingress-controller-leader"` | Election ID to use for status update | +| controller.enableMimalloc | bool | `true` | Enable mimalloc as a drop-in replacement for malloc. # ref: https://github.com/microsoft/mimalloc # | +| controller.existingPsp | string | `""` | Use an existing PSP instead of creating one | +| controller.extraArgs | object | `{}` | Additional command line arguments to pass to nginx-ingress-controller E.g. to specify the default SSL certificate you can use | +| controller.extraContainers | list | `[]` | Additional containers to be added to the controller pod. See https://github.com/lemonldap-ng-controller/lemonldap-ng-controller as example. | +| controller.extraEnvs | list | `[]` | Additional environment variables to set | +| controller.extraInitContainers | list | `[]` | Containers, which are run before the app containers are started. | +| controller.extraModules | list | `[]` | | +| controller.extraVolumeMounts | list | `[]` | Additional volumeMounts to the controller main container. | +| controller.extraVolumes | list | `[]` | Additional volumes to the controller pod. | +| controller.healthCheckHost | string | `""` | Address to bind the health check endpoint. It is better to set this option to the internal node address if the ingress nginx controller is running in the `hostNetwork: true` mode. | +| controller.healthCheckPath | string | `"/healthz"` | Path of the health check endpoint. All requests received on the port defined by the healthz-port parameter are forwarded internally to this path. | +| controller.hostNetwork | bool | `false` | Required for use with CNI based kubernetes installations (such as ones set up by kubeadm), since CNI and hostport don't mix yet. Can be deprecated once https://github.com/kubernetes/kubernetes/issues/23920 is merged | +| controller.hostPort.enabled | bool | `false` | Enable 'hostPort' or not | +| controller.hostPort.ports.http | int | `80` | 'hostPort' http port | +| controller.hostPort.ports.https | int | `443` | 'hostPort' https port | +| controller.hostname | object | `{}` | Optionally customize the pod hostname. | +| controller.image.allowPrivilegeEscalation | bool | `true` | | +| controller.image.chroot | bool | `false` | | +| controller.image.digest | string | `"sha256:54f7fe2c6c5a9db9a0ebf1131797109bb7a4d91f56b9b362bde2abd237dd1974"` | | +| controller.image.digestChroot | string | `"sha256:a8466b19c621bd550b1645e27a004a5cc85009c858a9ab19490216735ac432b1"` | | +| controller.image.image | string | `"ingress-nginx/controller"` | | +| controller.image.pullPolicy | string | `"IfNotPresent"` | | +| controller.image.registry | string | `"registry.k8s.io"` | | +| controller.image.runAsUser | int | `101` | | +| controller.image.tag | string | `"v1.3.1"` | | +| controller.ingressClass | string | `"nginx"` | For backwards compatibility with ingress.class annotation, use ingressClass. Algorithm is as follows, first ingressClassName is considered, if not present, controller looks for ingress.class annotation | +| controller.ingressClassByName | bool | `false` | Process IngressClass per name (additionally as per spec.controller). | +| controller.ingressClassResource.controllerValue | string | `"k8s.io/ingress-nginx"` | Controller-value of the controller that is processing this ingressClass | +| controller.ingressClassResource.default | bool | `false` | Is this the default ingressClass for the cluster | +| controller.ingressClassResource.enabled | bool | `true` | Is this ingressClass enabled or not | +| controller.ingressClassResource.name | string | `"nginx"` | Name of the ingressClass | +| controller.ingressClassResource.parameters | object | `{}` | Parameters is a link to a custom resource containing additional configuration for the controller. This is optional if the controller does not require extra parameters. | +| controller.keda.apiVersion | string | `"keda.sh/v1alpha1"` | | +| controller.keda.behavior | object | `{}` | | +| controller.keda.cooldownPeriod | int | `300` | | +| controller.keda.enabled | bool | `false` | | +| controller.keda.maxReplicas | int | `11` | | +| controller.keda.minReplicas | int | `1` | | +| controller.keda.pollingInterval | int | `30` | | +| controller.keda.restoreToOriginalReplicaCount | bool | `false` | | +| controller.keda.scaledObject.annotations | object | `{}` | | +| controller.keda.triggers | list | `[]` | | +| controller.kind | string | `"Deployment"` | Use a `DaemonSet` or `Deployment` | +| controller.labels | object | `{}` | Labels to be added to the controller Deployment or DaemonSet and other resources that do not have option to specify labels # | +| controller.lifecycle | object | `{"preStop":{"exec":{"command":["/wait-shutdown"]}}}` | Improve connection draining when ingress controller pod is deleted using a lifecycle hook: With this new hook, we increased the default terminationGracePeriodSeconds from 30 seconds to 300, allowing the draining of connections up to five minutes. If the active connections end before that, the pod will terminate gracefully at that time. To effectively take advantage of this feature, the Configmap feature worker-shutdown-timeout new value is 240s instead of 10s. # | +| controller.livenessProbe.failureThreshold | int | `5` | | +| controller.livenessProbe.httpGet.path | string | `"/healthz"` | | +| controller.livenessProbe.httpGet.port | int | `10254` | | +| controller.livenessProbe.httpGet.scheme | string | `"HTTP"` | | +| controller.livenessProbe.initialDelaySeconds | int | `10` | | +| controller.livenessProbe.periodSeconds | int | `10` | | +| controller.livenessProbe.successThreshold | int | `1` | | +| controller.livenessProbe.timeoutSeconds | int | `1` | | +| controller.maxmindLicenseKey | string | `""` | Maxmind license key to download GeoLite2 Databases. # https://blog.maxmind.com/2019/12/18/significant-changes-to-accessing-and-using-geolite2-databases | +| controller.metrics.enabled | bool | `false` | | +| controller.metrics.port | int | `10254` | | +| controller.metrics.prometheusRule.additionalLabels | object | `{}` | | +| controller.metrics.prometheusRule.enabled | bool | `false` | | +| controller.metrics.prometheusRule.rules | list | `[]` | | +| controller.metrics.service.annotations | object | `{}` | | +| controller.metrics.service.externalIPs | list | `[]` | List of IP addresses at which the stats-exporter service is available # Ref: https://kubernetes.io/docs/user-guide/services/#external-ips # | +| controller.metrics.service.loadBalancerSourceRanges | list | `[]` | | +| controller.metrics.service.servicePort | int | `10254` | | +| controller.metrics.service.type | string | `"ClusterIP"` | | +| controller.metrics.serviceMonitor.additionalLabels | object | `{}` | | +| controller.metrics.serviceMonitor.enabled | bool | `false` | | +| controller.metrics.serviceMonitor.metricRelabelings | list | `[]` | | +| controller.metrics.serviceMonitor.namespace | string | `""` | | +| controller.metrics.serviceMonitor.namespaceSelector | object | `{}` | | +| controller.metrics.serviceMonitor.relabelings | list | `[]` | | +| controller.metrics.serviceMonitor.scrapeInterval | string | `"30s"` | | +| controller.metrics.serviceMonitor.targetLabels | list | `[]` | | +| controller.minAvailable | int | `1` | | +| controller.minReadySeconds | int | `0` | `minReadySeconds` to avoid killing pods before we are ready # | +| controller.name | string | `"controller"` | | +| controller.nodeSelector | object | `{"kubernetes.io/os":"linux"}` | Node labels for controller pod assignment # Ref: https://kubernetes.io/docs/user-guide/node-selection/ # | +| controller.podAnnotations | object | `{}` | Annotations to be added to controller pods # | +| controller.podLabels | object | `{}` | Labels to add to the pod container metadata | +| controller.podSecurityContext | object | `{}` | Security Context policies for controller pods | +| controller.priorityClassName | string | `""` | | +| controller.proxySetHeaders | object | `{}` | Will add custom headers before sending traffic to backends according to https://github.com/kubernetes/ingress-nginx/tree/main/docs/examples/customization/custom-headers | +| controller.publishService | object | `{"enabled":true,"pathOverride":""}` | Allows customization of the source of the IP address or FQDN to report in the ingress status field. By default, it reads the information provided by the service. If disable, the status field reports the IP address of the node or nodes where an ingress controller pod is running. | +| controller.publishService.enabled | bool | `true` | Enable 'publishService' or not | +| controller.publishService.pathOverride | string | `""` | Allows overriding of the publish service to bind to Must be / | +| controller.readinessProbe.failureThreshold | int | `3` | | +| controller.readinessProbe.httpGet.path | string | `"/healthz"` | | +| controller.readinessProbe.httpGet.port | int | `10254` | | +| controller.readinessProbe.httpGet.scheme | string | `"HTTP"` | | +| controller.readinessProbe.initialDelaySeconds | int | `10` | | +| controller.readinessProbe.periodSeconds | int | `10` | | +| controller.readinessProbe.successThreshold | int | `1` | | +| controller.readinessProbe.timeoutSeconds | int | `1` | | +| controller.replicaCount | int | `1` | | +| controller.reportNodeInternalIp | bool | `false` | Bare-metal considerations via the host network https://kubernetes.github.io/ingress-nginx/deploy/baremetal/#via-the-host-network Ingress status was blank because there is no Service exposing the NGINX Ingress controller in a configuration using the host network, the default --publish-service flag used in standard cloud setups does not apply | +| controller.resources.requests.cpu | string | `"100m"` | | +| controller.resources.requests.memory | string | `"90Mi"` | | +| controller.scope.enabled | bool | `false` | Enable 'scope' or not | +| controller.scope.namespace | string | `""` | Namespace to limit the controller to; defaults to $(POD_NAMESPACE) | +| controller.scope.namespaceSelector | string | `""` | When scope.enabled == false, instead of watching all namespaces, we watching namespaces whose labels only match with namespaceSelector. Format like foo=bar. Defaults to empty, means watching all namespaces. | +| controller.service.annotations | object | `{}` | | +| controller.service.appProtocol | bool | `true` | If enabled is adding an appProtocol option for Kubernetes service. An appProtocol field replacing annotations that were using for setting a backend protocol. Here is an example for AWS: service.beta.kubernetes.io/aws-load-balancer-backend-protocol: http It allows choosing the protocol for each backend specified in the Kubernetes service. See the following GitHub issue for more details about the purpose: https://github.com/kubernetes/kubernetes/issues/40244 Will be ignored for Kubernetes versions older than 1.20 # | +| controller.service.enableHttp | bool | `true` | | +| controller.service.enableHttps | bool | `true` | | +| controller.service.enabled | bool | `true` | | +| controller.service.external.enabled | bool | `true` | | +| controller.service.externalIPs | list | `[]` | List of IP addresses at which the controller services are available # Ref: https://kubernetes.io/docs/user-guide/services/#external-ips # | +| controller.service.internal.annotations | object | `{}` | Annotations are mandatory for the load balancer to come up. Varies with the cloud service. | +| controller.service.internal.enabled | bool | `false` | Enables an additional internal load balancer (besides the external one). | +| controller.service.internal.loadBalancerSourceRanges | list | `[]` | Restrict access For LoadBalancer service. Defaults to 0.0.0.0/0. | +| controller.service.ipFamilies | list | `["IPv4"]` | List of IP families (e.g. IPv4, IPv6) assigned to the service. This field is usually assigned automatically based on cluster configuration and the ipFamilyPolicy field. # Ref: https://kubernetes.io/docs/concepts/services-networking/dual-stack/ | +| controller.service.ipFamilyPolicy | string | `"SingleStack"` | Represents the dual-stack-ness requested or required by this Service. Possible values are SingleStack, PreferDualStack or RequireDualStack. The ipFamilies and clusterIPs fields depend on the value of this field. # Ref: https://kubernetes.io/docs/concepts/services-networking/dual-stack/ | +| controller.service.labels | object | `{}` | | +| controller.service.loadBalancerIP | string | `""` | Used by cloud providers to connect the resulting `LoadBalancer` to a pre-existing static IP according to https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer | +| controller.service.loadBalancerSourceRanges | list | `[]` | | +| controller.service.nodePorts.http | string | `""` | | +| controller.service.nodePorts.https | string | `""` | | +| controller.service.nodePorts.tcp | object | `{}` | | +| controller.service.nodePorts.udp | object | `{}` | | +| controller.service.ports.http | int | `80` | | +| controller.service.ports.https | int | `443` | | +| controller.service.targetPorts.http | string | `"http"` | | +| controller.service.targetPorts.https | string | `"https"` | | +| controller.service.type | string | `"LoadBalancer"` | | +| controller.shareProcessNamespace | bool | `false` | | +| controller.sysctls | object | `{}` | See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for notes on enabling and using sysctls | +| controller.tcp.annotations | object | `{}` | Annotations to be added to the tcp config configmap | +| controller.tcp.configMapNamespace | string | `""` | Allows customization of the tcp-services-configmap; defaults to $(POD_NAMESPACE) | +| controller.terminationGracePeriodSeconds | int | `300` | `terminationGracePeriodSeconds` to avoid killing pods before we are ready # wait up to five minutes for the drain of connections # | +| controller.tolerations | list | `[]` | Node tolerations for server scheduling to nodes with taints # Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ # | +| controller.topologySpreadConstraints | list | `[]` | Topology spread constraints rely on node labels to identify the topology domain(s) that each Node is in. # Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ # | +| controller.udp.annotations | object | `{}` | Annotations to be added to the udp config configmap | +| controller.udp.configMapNamespace | string | `""` | Allows customization of the udp-services-configmap; defaults to $(POD_NAMESPACE) | +| controller.updateStrategy | object | `{}` | The update strategy to apply to the Deployment or DaemonSet # | +| controller.watchIngressWithoutClass | bool | `false` | Process Ingress objects without ingressClass annotation/ingressClassName field Overrides value for --watch-ingress-without-class flag of the controller binary Defaults to false | +| defaultBackend.affinity | object | `{}` | | +| defaultBackend.autoscaling.annotations | object | `{}` | | +| defaultBackend.autoscaling.enabled | bool | `false` | | +| defaultBackend.autoscaling.maxReplicas | int | `2` | | +| defaultBackend.autoscaling.minReplicas | int | `1` | | +| defaultBackend.autoscaling.targetCPUUtilizationPercentage | int | `50` | | +| defaultBackend.autoscaling.targetMemoryUtilizationPercentage | int | `50` | | +| defaultBackend.containerSecurityContext | object | `{}` | Security Context policies for controller main container. See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for notes on enabling and using sysctls # | +| defaultBackend.enabled | bool | `false` | | +| defaultBackend.existingPsp | string | `""` | Use an existing PSP instead of creating one | +| defaultBackend.extraArgs | object | `{}` | | +| defaultBackend.extraEnvs | list | `[]` | Additional environment variables to set for defaultBackend pods | +| defaultBackend.extraVolumeMounts | list | `[]` | | +| defaultBackend.extraVolumes | list | `[]` | | +| defaultBackend.image.allowPrivilegeEscalation | bool | `false` | | +| defaultBackend.image.image | string | `"defaultbackend-amd64"` | | +| defaultBackend.image.pullPolicy | string | `"IfNotPresent"` | | +| defaultBackend.image.readOnlyRootFilesystem | bool | `true` | | +| defaultBackend.image.registry | string | `"registry.k8s.io"` | | +| defaultBackend.image.runAsNonRoot | bool | `true` | | +| defaultBackend.image.runAsUser | int | `65534` | | +| defaultBackend.image.tag | string | `"1.5"` | | +| defaultBackend.labels | object | `{}` | Labels to be added to the default backend resources | +| defaultBackend.livenessProbe.failureThreshold | int | `3` | | +| defaultBackend.livenessProbe.initialDelaySeconds | int | `30` | | +| defaultBackend.livenessProbe.periodSeconds | int | `10` | | +| defaultBackend.livenessProbe.successThreshold | int | `1` | | +| defaultBackend.livenessProbe.timeoutSeconds | int | `5` | | +| defaultBackend.minAvailable | int | `1` | | +| defaultBackend.name | string | `"defaultbackend"` | | +| defaultBackend.nodeSelector | object | `{"kubernetes.io/os":"linux"}` | Node labels for default backend pod assignment # Ref: https://kubernetes.io/docs/user-guide/node-selection/ # | +| defaultBackend.podAnnotations | object | `{}` | Annotations to be added to default backend pods # | +| defaultBackend.podLabels | object | `{}` | Labels to add to the pod container metadata | +| defaultBackend.podSecurityContext | object | `{}` | Security Context policies for controller pods See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for notes on enabling and using sysctls # | +| defaultBackend.port | int | `8080` | | +| defaultBackend.priorityClassName | string | `""` | | +| defaultBackend.readinessProbe.failureThreshold | int | `6` | | +| defaultBackend.readinessProbe.initialDelaySeconds | int | `0` | | +| defaultBackend.readinessProbe.periodSeconds | int | `5` | | +| defaultBackend.readinessProbe.successThreshold | int | `1` | | +| defaultBackend.readinessProbe.timeoutSeconds | int | `5` | | +| defaultBackend.replicaCount | int | `1` | | +| defaultBackend.resources | object | `{}` | | +| defaultBackend.service.annotations | object | `{}` | | +| defaultBackend.service.externalIPs | list | `[]` | List of IP addresses at which the default backend service is available # Ref: https://kubernetes.io/docs/user-guide/services/#external-ips # | +| defaultBackend.service.loadBalancerSourceRanges | list | `[]` | | +| defaultBackend.service.servicePort | int | `80` | | +| defaultBackend.service.type | string | `"ClusterIP"` | | +| defaultBackend.serviceAccount.automountServiceAccountToken | bool | `true` | | +| defaultBackend.serviceAccount.create | bool | `true` | | +| defaultBackend.serviceAccount.name | string | `""` | | +| defaultBackend.tolerations | list | `[]` | Node tolerations for server scheduling to nodes with taints # Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ # | +| dhParam | string | `nil` | A base64-encoded Diffie-Hellman parameter. This can be generated with: `openssl dhparam 4096 2> /dev/null | base64` # Ref: https://github.com/kubernetes/ingress-nginx/tree/main/docs/examples/customization/ssl-dh-param | +| imagePullSecrets | list | `[]` | Optional array of imagePullSecrets containing private registry credentials # Ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ | +| podSecurityPolicy.enabled | bool | `false` | | +| portNamePrefix | string | `""` | Prefix for TCP and UDP ports names in ingress controller service # Some cloud providers, like Yandex Cloud may have a requirements for a port name regex to support cloud load balancer integration | +| rbac.create | bool | `true` | | +| rbac.scope | bool | `false` | | +| revisionHistoryLimit | int | `10` | Rollback limit # | +| serviceAccount.annotations | object | `{}` | Annotations for the controller service account | +| serviceAccount.automountServiceAccountToken | bool | `true` | | +| serviceAccount.create | bool | `true` | | +| serviceAccount.name | string | `""` | | +| tcp | object | `{}` | TCP service key-value pairs # Ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/exposing-tcp-udp-services.md # | +| udp | object | `{}` | UDP service key-value pairs # Ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/exposing-tcp-udp-services.md # | + diff --git a/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/README.md.gotmpl b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/README.md.gotmpl new file mode 100644 index 0000000..8959961 --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/README.md.gotmpl @@ -0,0 +1,235 @@ +{{ template "chart.header" . }} +[ingress-nginx](https://github.com/kubernetes/ingress-nginx) Ingress controller for Kubernetes using NGINX as a reverse proxy and load balancer + +{{ template "chart.versionBadge" . }}{{ template "chart.typeBadge" . }}{{ template "chart.appVersionBadge" . }} + +To use, add `ingressClassName: nginx` spec field or the `kubernetes.io/ingress.class: nginx` annotation to your Ingress resources. + +This chart bootstraps an ingress-nginx deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +## Prerequisites + +- Chart version 3.x.x: Kubernetes v1.16+ +- Chart version 4.x.x and above: Kubernetes v1.19+ + +## Get Repo Info + +```console +helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx +helm repo update +``` + +## Install Chart + +**Important:** only helm3 is supported + +```console +helm install [RELEASE_NAME] ingress-nginx/ingress-nginx +``` + +The command deploys ingress-nginx on the Kubernetes cluster in the default configuration. + +_See [configuration](#configuration) below._ + +_See [helm install](https://helm.sh/docs/helm/helm_install/) for command documentation._ + +## Uninstall Chart + +```console +helm uninstall [RELEASE_NAME] +``` + +This removes all the Kubernetes components associated with the chart and deletes the release. + +_See [helm uninstall](https://helm.sh/docs/helm/helm_uninstall/) for command documentation._ + +## Upgrading Chart + +```console +helm upgrade [RELEASE_NAME] [CHART] --install +``` + +_See [helm upgrade](https://helm.sh/docs/helm/helm_upgrade/) for command documentation._ + +### Upgrading With Zero Downtime in Production + +By default the ingress-nginx controller has service interruptions whenever it's pods are restarted or redeployed. In order to fix that, see the excellent blog post by Lindsay Landry from Codecademy: [Kubernetes: Nginx and Zero Downtime in Production](https://medium.com/codecademy-engineering/kubernetes-nginx-and-zero-downtime-in-production-2c910c6a5ed8). + +### Migrating from stable/nginx-ingress + +There are two main ways to migrate a release from `stable/nginx-ingress` to `ingress-nginx/ingress-nginx` chart: + +1. For Nginx Ingress controllers used for non-critical services, the easiest method is to [uninstall](#uninstall-chart) the old release and [install](#install-chart) the new one +1. For critical services in production that require zero-downtime, you will want to: + 1. [Install](#install-chart) a second Ingress controller + 1. Redirect your DNS traffic from the old controller to the new controller + 1. Log traffic from both controllers during this changeover + 1. [Uninstall](#uninstall-chart) the old controller once traffic has fully drained from it + 1. For details on all of these steps see [Upgrading With Zero Downtime in Production](#upgrading-with-zero-downtime-in-production) + +Note that there are some different and upgraded configurations between the two charts, described by Rimas Mocevicius from JFrog in the "Upgrading to ingress-nginx Helm chart" section of [Migrating from Helm chart nginx-ingress to ingress-nginx](https://rimusz.net/migrating-to-ingress-nginx). As the `ingress-nginx/ingress-nginx` chart continues to update, you will want to check current differences by running [helm configuration](#configuration) commands on both charts. + +## Configuration + +See [Customizing the Chart Before Installing](https://helm.sh/docs/intro/using_helm/#customizing-the-chart-before-installing). To see all configurable options with detailed comments, visit the chart's [values.yaml](./values.yaml), or run these configuration commands: + +```console +helm show values ingress-nginx/ingress-nginx +``` + +### PodDisruptionBudget + +Note that the PodDisruptionBudget resource will only be defined if the replicaCount is greater than one, +else it would make it impossible to evacuate a node. See [gh issue #7127](https://github.com/helm/charts/issues/7127) for more info. + +### Prometheus Metrics + +The Nginx ingress controller can export Prometheus metrics, by setting `controller.metrics.enabled` to `true`. + +You can add Prometheus annotations to the metrics service using `controller.metrics.service.annotations`. +Alternatively, if you use the Prometheus Operator, you can enable ServiceMonitor creation using `controller.metrics.serviceMonitor.enabled`. And set `controller.metrics.serviceMonitor.additionalLabels.release="prometheus"`. "release=prometheus" should match the label configured in the prometheus servicemonitor ( see `kubectl get servicemonitor prometheus-kube-prom-prometheus -oyaml -n prometheus`) + +### ingress-nginx nginx\_status page/stats server + +Previous versions of this chart had a `controller.stats.*` configuration block, which is now obsolete due to the following changes in nginx ingress controller: + +- In [0.16.1](https://github.com/kubernetes/ingress-nginx/blob/main/Changelog.md#0161), the vts (virtual host traffic status) dashboard was removed +- In [0.23.0](https://github.com/kubernetes/ingress-nginx/blob/main/Changelog.md#0230), the status page at port 18080 is now a unix socket webserver only available at localhost. + You can use `curl --unix-socket /tmp/nginx-status-server.sock http://localhost/nginx_status` inside the controller container to access it locally, or use the snippet from [nginx-ingress changelog](https://github.com/kubernetes/ingress-nginx/blob/main/Changelog.md#0230) to re-enable the http server + +### ExternalDNS Service Configuration + +Add an [ExternalDNS](https://github.com/kubernetes-incubator/external-dns) annotation to the LoadBalancer service: + +```yaml +controller: + service: + annotations: + external-dns.alpha.kubernetes.io/hostname: kubernetes-example.com. +``` + +### AWS L7 ELB with SSL Termination + +Annotate the controller as shown in the [nginx-ingress l7 patch](https://github.com/kubernetes/ingress-nginx/blob/ab3a789caae65eec4ad6e3b46b19750b481b6bce/deploy/aws/l7/service-l7.yaml): + +```yaml +controller: + service: + targetPorts: + http: http + https: http + annotations: + service.beta.kubernetes.io/aws-load-balancer-ssl-cert: arn:aws:acm:XX-XXXX-X:XXXXXXXXX:certificate/XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX + service.beta.kubernetes.io/aws-load-balancer-backend-protocol: "http" + service.beta.kubernetes.io/aws-load-balancer-ssl-ports: "https" + service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout: '3600' +``` + +### AWS route53-mapper + +To configure the LoadBalancer service with the [route53-mapper addon](https://github.com/kubernetes/kops/blob/be63d4f1a7a46daaf1c4c482527328236850f111/addons/route53-mapper/README.md), add the `domainName` annotation and `dns` label: + +```yaml +controller: + service: + labels: + dns: "route53" + annotations: + domainName: "kubernetes-example.com" +``` + +### Additional Internal Load Balancer + +This setup is useful when you need both external and internal load balancers but don't want to have multiple ingress controllers and multiple ingress objects per application. + +By default, the ingress object will point to the external load balancer address, but if correctly configured, you can make use of the internal one if the URL you are looking up resolves to the internal load balancer's URL. + +You'll need to set both the following values: + +`controller.service.internal.enabled` +`controller.service.internal.annotations` + +If one of them is missing the internal load balancer will not be deployed. Example you may have `controller.service.internal.enabled=true` but no annotations set, in this case no action will be taken. + +`controller.service.internal.annotations` varies with the cloud service you're using. + +Example for AWS: + +```yaml +controller: + service: + internal: + enabled: true + annotations: + # Create internal ELB + service.beta.kubernetes.io/aws-load-balancer-internal: "true" + # Any other annotation can be declared here. +``` + +Example for GCE: + +```yaml +controller: + service: + internal: + enabled: true + annotations: + # Create internal LB. More informations: https://cloud.google.com/kubernetes-engine/docs/how-to/internal-load-balancing + # For GKE versions 1.17 and later + networking.gke.io/load-balancer-type: "Internal" + # For earlier versions + # cloud.google.com/load-balancer-type: "Internal" + + # Any other annotation can be declared here. +``` + +Example for Azure: + +```yaml +controller: + service: + annotations: + # Create internal LB + service.beta.kubernetes.io/azure-load-balancer-internal: "true" + # Any other annotation can be declared here. +``` + +Example for Oracle Cloud Infrastructure: + +```yaml +controller: + service: + annotations: + # Create internal LB + service.beta.kubernetes.io/oci-load-balancer-internal: "true" + # Any other annotation can be declared here. +``` + +An use case for this scenario is having a split-view DNS setup where the public zone CNAME records point to the external balancer URL while the private zone CNAME records point to the internal balancer URL. This way, you only need one ingress kubernetes object. + +Optionally you can set `controller.service.loadBalancerIP` if you need a static IP for the resulting `LoadBalancer`. + +### Ingress Admission Webhooks + +With nginx-ingress-controller version 0.25+, the nginx ingress controller pod exposes an endpoint that will integrate with the `validatingwebhookconfiguration` Kubernetes feature to prevent bad ingress from being added to the cluster. +**This feature is enabled by default since 0.31.0.** + +With nginx-ingress-controller in 0.25.* work only with kubernetes 1.14+, 0.26 fix [this issue](https://github.com/kubernetes/ingress-nginx/pull/4521) + +### Helm Error When Upgrading: spec.clusterIP: Invalid value: "" + +If you are upgrading this chart from a version between 0.31.0 and 1.2.2 then you may get an error like this: + +```console +Error: UPGRADE FAILED: Service "?????-controller" is invalid: spec.clusterIP: Invalid value: "": field is immutable +``` + +Detail of how and why are in [this issue](https://github.com/helm/charts/pull/13646) but to resolve this you can set `xxxx.service.omitClusterIP` to `true` where `xxxx` is the service referenced in the error. + +As of version `1.26.0` of this chart, by simply not providing any clusterIP value, `invalid: spec.clusterIP: Invalid value: "": field is immutable` will no longer occur since `clusterIP: ""` will not be rendered. + +{{ template "chart.requirementsSection" . }} + +{{ template "chart.valuesSection" . }} + +{{ template "helm-docs.versionFooter" . }} diff --git a/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/controller-custom-ingressclass-flags.yaml b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/controller-custom-ingressclass-flags.yaml new file mode 100644 index 0000000..b28a232 --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/controller-custom-ingressclass-flags.yaml @@ -0,0 +1,7 @@ +controller: + watchIngressWithoutClass: true + ingressClassResource: + name: custom-nginx + enabled: true + default: true + controllerValue: "k8s.io/custom-nginx" diff --git a/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/daemonset-customconfig-values.yaml b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/daemonset-customconfig-values.yaml new file mode 100644 index 0000000..4393a5b --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/daemonset-customconfig-values.yaml @@ -0,0 +1,14 @@ +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + kind: DaemonSet + allowSnippetAnnotations: false + admissionWebhooks: + enabled: false + service: + type: ClusterIP + + config: + use-proxy-protocol: "true" diff --git a/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/daemonset-customnodeport-values.yaml b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/daemonset-customnodeport-values.yaml new file mode 100644 index 0000000..1d94be2 --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/daemonset-customnodeport-values.yaml @@ -0,0 +1,22 @@ +controller: + kind: DaemonSet + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + + service: + type: NodePort + nodePorts: + tcp: + 9000: 30090 + udp: + 9001: 30091 + +tcp: + 9000: "default/test:8080" + +udp: + 9001: "default/test:8080" diff --git a/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/daemonset-extra-modules.yaml b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/daemonset-extra-modules.yaml new file mode 100644 index 0000000..f299dbf --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/daemonset-extra-modules.yaml @@ -0,0 +1,10 @@ +controller: + kind: DaemonSet + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + service: + type: ClusterIP + extraModules: + - name: opentelemetry + image: busybox diff --git a/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/daemonset-headers-values.yaml b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/daemonset-headers-values.yaml new file mode 100644 index 0000000..ab7d47b --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/daemonset-headers-values.yaml @@ -0,0 +1,14 @@ +controller: + kind: DaemonSet + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + addHeaders: + X-Frame-Options: deny + proxySetHeaders: + X-Forwarded-Proto: https + service: + type: ClusterIP diff --git a/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/daemonset-internal-lb-values.yaml b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/daemonset-internal-lb-values.yaml new file mode 100644 index 0000000..0a200a7 --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/daemonset-internal-lb-values.yaml @@ -0,0 +1,14 @@ +controller: + kind: DaemonSet + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + service: + type: ClusterIP + internal: + enabled: true + annotations: + service.beta.kubernetes.io/aws-load-balancer-internal: "true" diff --git a/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/daemonset-nodeport-values.yaml b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/daemonset-nodeport-values.yaml new file mode 100644 index 0000000..3b7aa2f --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/daemonset-nodeport-values.yaml @@ -0,0 +1,10 @@ +controller: + kind: DaemonSet + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + service: + type: NodePort diff --git a/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/daemonset-podannotations-values.yaml b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/daemonset-podannotations-values.yaml new file mode 100644 index 0000000..0b55306 --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/daemonset-podannotations-values.yaml @@ -0,0 +1,17 @@ +controller: + kind: DaemonSet + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + metrics: + enabled: true + service: + type: ClusterIP + podAnnotations: + prometheus.io/path: /metrics + prometheus.io/port: "10254" + prometheus.io/scheme: http + prometheus.io/scrape: "true" diff --git a/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/daemonset-tcp-udp-configMapNamespace-values.yaml b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/daemonset-tcp-udp-configMapNamespace-values.yaml new file mode 100644 index 0000000..acd86a7 --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/daemonset-tcp-udp-configMapNamespace-values.yaml @@ -0,0 +1,20 @@ +controller: + kind: DaemonSet + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + service: + type: ClusterIP + tcp: + configMapNamespace: default + udp: + configMapNamespace: default + +tcp: + 9000: "default/test:8080" + +udp: + 9001: "default/test:8080" diff --git a/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/daemonset-tcp-udp-portNamePrefix-values.yaml b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/daemonset-tcp-udp-portNamePrefix-values.yaml new file mode 100644 index 0000000..90b0f57 --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/daemonset-tcp-udp-portNamePrefix-values.yaml @@ -0,0 +1,18 @@ +controller: + kind: DaemonSet + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + service: + type: ClusterIP + +tcp: + 9000: "default/test:8080" + +udp: + 9001: "default/test:8080" + +portNamePrefix: "port" diff --git a/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/daemonset-tcp-udp-values.yaml b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/daemonset-tcp-udp-values.yaml new file mode 100644 index 0000000..25ee64d --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/daemonset-tcp-udp-values.yaml @@ -0,0 +1,16 @@ +controller: + kind: DaemonSet + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + service: + type: ClusterIP + +tcp: + 9000: "default/test:8080" + +udp: + 9001: "default/test:8080" diff --git a/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/daemonset-tcp-values.yaml b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/daemonset-tcp-values.yaml new file mode 100644 index 0000000..380c8b4 --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/daemonset-tcp-values.yaml @@ -0,0 +1,14 @@ +controller: + kind: DaemonSet + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + service: + type: ClusterIP + +tcp: + 9000: "default/test:8080" + 9001: "default/test:8080" diff --git a/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/deamonset-default-values.yaml b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/deamonset-default-values.yaml new file mode 100644 index 0000000..82fa23e --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/deamonset-default-values.yaml @@ -0,0 +1,10 @@ +controller: + kind: DaemonSet + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + service: + type: ClusterIP diff --git a/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/deamonset-metrics-values.yaml b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/deamonset-metrics-values.yaml new file mode 100644 index 0000000..cb3cb54 --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/deamonset-metrics-values.yaml @@ -0,0 +1,12 @@ +controller: + kind: DaemonSet + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + metrics: + enabled: true + service: + type: ClusterIP diff --git a/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/deamonset-psp-values.yaml b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/deamonset-psp-values.yaml new file mode 100644 index 0000000..8026a63 --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/deamonset-psp-values.yaml @@ -0,0 +1,13 @@ +controller: + kind: DaemonSet + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + service: + type: ClusterIP + +podSecurityPolicy: + enabled: true diff --git a/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/deamonset-webhook-and-psp-values.yaml b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/deamonset-webhook-and-psp-values.yaml new file mode 100644 index 0000000..fccdb13 --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/deamonset-webhook-and-psp-values.yaml @@ -0,0 +1,13 @@ +controller: + kind: DaemonSet + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: true + service: + type: ClusterIP + +podSecurityPolicy: + enabled: true diff --git a/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/deamonset-webhook-values.yaml b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/deamonset-webhook-values.yaml new file mode 100644 index 0000000..54d364d --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/deamonset-webhook-values.yaml @@ -0,0 +1,10 @@ +controller: + kind: DaemonSet + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: true + service: + type: ClusterIP diff --git a/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/deployment-autoscaling-behavior-values.yaml b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/deployment-autoscaling-behavior-values.yaml new file mode 100644 index 0000000..dca3f35 --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/deployment-autoscaling-behavior-values.yaml @@ -0,0 +1,14 @@ +controller: + autoscaling: + enabled: true + behavior: + scaleDown: + stabilizationWindowSeconds: 300 + policies: + - type: Pods + value: 1 + periodSeconds: 180 + admissionWebhooks: + enabled: false + service: + type: ClusterIP diff --git a/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/deployment-autoscaling-values.yaml b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/deployment-autoscaling-values.yaml new file mode 100644 index 0000000..b8b3ac6 --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/deployment-autoscaling-values.yaml @@ -0,0 +1,11 @@ +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + autoscaling: + enabled: true + admissionWebhooks: + enabled: false + service: + type: ClusterIP diff --git a/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/deployment-customconfig-values.yaml b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/deployment-customconfig-values.yaml new file mode 100644 index 0000000..1749418 --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/deployment-customconfig-values.yaml @@ -0,0 +1,12 @@ +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + config: + use-proxy-protocol: "true" + allowSnippetAnnotations: false + admissionWebhooks: + enabled: false + service: + type: ClusterIP diff --git a/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/deployment-customnodeport-values.yaml b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/deployment-customnodeport-values.yaml new file mode 100644 index 0000000..a564eaf --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/deployment-customnodeport-values.yaml @@ -0,0 +1,20 @@ +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + service: + type: NodePort + nodePorts: + tcp: + 9000: 30090 + udp: + 9001: 30091 + +tcp: + 9000: "default/test:8080" + +udp: + 9001: "default/test:8080" diff --git a/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/deployment-default-values.yaml b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/deployment-default-values.yaml new file mode 100644 index 0000000..9f46b4e --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/deployment-default-values.yaml @@ -0,0 +1,8 @@ +# Left blank to test default values +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + service: + type: ClusterIP diff --git a/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/deployment-extra-modules.yaml b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/deployment-extra-modules.yaml new file mode 100644 index 0000000..ec59235 --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/deployment-extra-modules.yaml @@ -0,0 +1,10 @@ +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + service: + type: ClusterIP + extraModules: + - name: opentelemetry + image: busybox diff --git a/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/deployment-headers-values.yaml b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/deployment-headers-values.yaml new file mode 100644 index 0000000..17a11ac --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/deployment-headers-values.yaml @@ -0,0 +1,13 @@ +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + addHeaders: + X-Frame-Options: deny + proxySetHeaders: + X-Forwarded-Proto: https + service: + type: ClusterIP diff --git a/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/deployment-internal-lb-values.yaml b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/deployment-internal-lb-values.yaml new file mode 100644 index 0000000..fd8df8d --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/deployment-internal-lb-values.yaml @@ -0,0 +1,13 @@ +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + service: + type: ClusterIP + internal: + enabled: true + annotations: + service.beta.kubernetes.io/aws-load-balancer-internal: "true" diff --git a/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/deployment-metrics-values.yaml b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/deployment-metrics-values.yaml new file mode 100644 index 0000000..9209ad5 --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/deployment-metrics-values.yaml @@ -0,0 +1,11 @@ +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + metrics: + enabled: true + service: + type: ClusterIP diff --git a/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/deployment-nodeport-values.yaml b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/deployment-nodeport-values.yaml new file mode 100644 index 0000000..cd9b323 --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/deployment-nodeport-values.yaml @@ -0,0 +1,9 @@ +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + service: + type: NodePort diff --git a/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/deployment-podannotations-values.yaml b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/deployment-podannotations-values.yaml new file mode 100644 index 0000000..b48d93c --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/deployment-podannotations-values.yaml @@ -0,0 +1,16 @@ +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + metrics: + enabled: true + service: + type: ClusterIP + podAnnotations: + prometheus.io/path: /metrics + prometheus.io/port: "10254" + prometheus.io/scheme: http + prometheus.io/scrape: "true" diff --git a/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/deployment-psp-values.yaml b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/deployment-psp-values.yaml new file mode 100644 index 0000000..2f332a7 --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/deployment-psp-values.yaml @@ -0,0 +1,10 @@ +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + service: + type: ClusterIP + +podSecurityPolicy: + enabled: true diff --git a/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/deployment-tcp-udp-configMapNamespace-values.yaml b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/deployment-tcp-udp-configMapNamespace-values.yaml new file mode 100644 index 0000000..c51a4e9 --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/deployment-tcp-udp-configMapNamespace-values.yaml @@ -0,0 +1,19 @@ +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + service: + type: ClusterIP + tcp: + configMapNamespace: default + udp: + configMapNamespace: default + +tcp: + 9000: "default/test:8080" + +udp: + 9001: "default/test:8080" diff --git a/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/deployment-tcp-udp-portNamePrefix-values.yaml b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/deployment-tcp-udp-portNamePrefix-values.yaml new file mode 100644 index 0000000..56323c5 --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/deployment-tcp-udp-portNamePrefix-values.yaml @@ -0,0 +1,17 @@ +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + service: + type: ClusterIP + +tcp: + 9000: "default/test:8080" + +udp: + 9001: "default/test:8080" + +portNamePrefix: "port" diff --git a/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/deployment-tcp-udp-values.yaml b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/deployment-tcp-udp-values.yaml new file mode 100644 index 0000000..5b45b69 --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/deployment-tcp-udp-values.yaml @@ -0,0 +1,15 @@ +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + service: + type: ClusterIP + +tcp: + 9000: "default/test:8080" + +udp: + 9001: "default/test:8080" diff --git a/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/deployment-tcp-values.yaml b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/deployment-tcp-values.yaml new file mode 100644 index 0000000..ac0b6e6 --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/deployment-tcp-values.yaml @@ -0,0 +1,11 @@ +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + service: + type: ClusterIP + +tcp: + 9000: "default/test:8080" + 9001: "default/test:8080" diff --git a/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/deployment-webhook-and-psp-values.yaml b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/deployment-webhook-and-psp-values.yaml new file mode 100644 index 0000000..6195bb3 --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/deployment-webhook-and-psp-values.yaml @@ -0,0 +1,12 @@ +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: true + service: + type: ClusterIP + +podSecurityPolicy: + enabled: true diff --git a/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/deployment-webhook-extraEnvs-values.yaml b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/deployment-webhook-extraEnvs-values.yaml new file mode 100644 index 0000000..95487b0 --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/deployment-webhook-extraEnvs-values.yaml @@ -0,0 +1,12 @@ +controller: + service: + type: ClusterIP + admissionWebhooks: + enabled: true + extraEnvs: + - name: FOO + value: foo + - name: TEST + value: test + patch: + enabled: true diff --git a/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/deployment-webhook-resources-values.yaml b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/deployment-webhook-resources-values.yaml new file mode 100644 index 0000000..49ebbb0 --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/deployment-webhook-resources-values.yaml @@ -0,0 +1,23 @@ +controller: + service: + type: ClusterIP + admissionWebhooks: + enabled: true + createSecretJob: + resources: + limits: + cpu: 10m + memory: 20Mi + requests: + cpu: 10m + memory: 20Mi + patchWebhookJob: + resources: + limits: + cpu: 10m + memory: 20Mi + requests: + cpu: 10m + memory: 20Mi + patch: + enabled: true diff --git a/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/deployment-webhook-values.yaml b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/deployment-webhook-values.yaml new file mode 100644 index 0000000..76669a5 --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/ci/deployment-webhook-values.yaml @@ -0,0 +1,9 @@ +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: true + service: + type: ClusterIP diff --git a/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/override-values.yaml b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/override-values.yaml new file mode 100644 index 0000000..e190f03 --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/override-values.yaml @@ -0,0 +1,10 @@ +controller: + kind: DaemonSet + + service: + type: LoadBalancer + nodePorts: + http: "30000" + https: "30001" + tcp: {} + udp: {} diff --git a/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/temp.yaml b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/temp.yaml new file mode 100644 index 0000000..2b28787 --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/temp.yaml @@ -0,0 +1,724 @@ +--- +# Source: ingress-nginx/templates/controller-serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: release-name-ingress-nginx + namespace: default +automountServiceAccountToken: true +--- +# Source: ingress-nginx/templates/controller-configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: release-name-ingress-nginx-controller + namespace: default +data: + allow-snippet-annotations: "true" +--- +# Source: ingress-nginx/templates/clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + name: release-name-ingress-nginx +rules: + - apiGroups: + - "" + resources: + - configmaps + - endpoints + - nodes + - pods + - secrets + - namespaces + verbs: + - list + - watch + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - list + - watch + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - apiGroups: + - networking.k8s.io + resources: + - ingresses/status + verbs: + - update + - apiGroups: + - networking.k8s.io + resources: + - ingressclasses + verbs: + - get + - list + - watch +--- +# Source: ingress-nginx/templates/clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + name: release-name-ingress-nginx +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: release-name-ingress-nginx +subjects: + - kind: ServiceAccount + name: release-name-ingress-nginx + namespace: "default" +--- +# Source: ingress-nginx/templates/controller-role.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: release-name-ingress-nginx + namespace: default +rules: + - apiGroups: + - "" + resources: + - namespaces + verbs: + - get + - apiGroups: + - "" + resources: + - configmaps + - pods + - secrets + - endpoints + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - watch + - apiGroups: + - networking.k8s.io + resources: + - ingresses/status + verbs: + - update + - apiGroups: + - networking.k8s.io + resources: + - ingressclasses + verbs: + - get + - list + - watch + # TODO(Jintao Zhang) + # Once we release a new version of the controller, + # we will be able to remove the configmap related permissions + # We have used the Lease API for selection + # ref: https://github.com/kubernetes/ingress-nginx/pull/8921 + - apiGroups: + - "" + resources: + - configmaps + resourceNames: + - ingress-controller-leader + verbs: + - get + - update + - apiGroups: + - "" + resources: + - configmaps + verbs: + - create + - apiGroups: + - coordination.k8s.io + resources: + - leases + resourceNames: + - ingress-controller-leader + verbs: + - get + - update + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +--- +# Source: ingress-nginx/templates/controller-rolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: release-name-ingress-nginx + namespace: default +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: release-name-ingress-nginx +subjects: + - kind: ServiceAccount + name: release-name-ingress-nginx + namespace: "default" +--- +# Source: ingress-nginx/templates/controller-service-webhook.yaml +apiVersion: v1 +kind: Service +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: release-name-ingress-nginx-controller-admission + namespace: default +spec: + type: ClusterIP + ports: + - name: https-webhook + port: 443 + targetPort: webhook + appProtocol: https + selector: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/component: controller +--- +# Source: ingress-nginx/templates/controller-service.yaml +apiVersion: v1 +kind: Service +metadata: + annotations: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: release-name-ingress-nginx-controller + namespace: default +spec: + type: LoadBalancer + ipFamilyPolicy: SingleStack + ipFamilies: + - IPv4 + ports: + - name: http + port: 80 + protocol: TCP + targetPort: http + appProtocol: http + - name: https + port: 443 + protocol: TCP + targetPort: https + appProtocol: https + selector: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/component: controller +--- +# Source: ingress-nginx/templates/controller-deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: release-name-ingress-nginx-controller + namespace: default +spec: + selector: + matchLabels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/component: controller + replicas: 1 + revisionHistoryLimit: 10 + minReadySeconds: 0 + template: + metadata: + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/component: controller + spec: + dnsPolicy: ClusterFirst + containers: + - name: controller + image: "registry.k8s.io/ingress-nginx/controller:v1.3.1@sha256:54f7fe2c6c5a9db9a0ebf1131797109bb7a4d91f56b9b362bde2abd237dd1974" + imagePullPolicy: IfNotPresent + lifecycle: + preStop: + exec: + command: + - /wait-shutdown + args: + - /nginx-ingress-controller + - --publish-service=$(POD_NAMESPACE)/release-name-ingress-nginx-controller + - --election-id=ingress-controller-leader + - --controller-class=k8s.io/ingress-nginx + - --ingress-class=nginx + - --configmap=$(POD_NAMESPACE)/release-name-ingress-nginx-controller + - --validating-webhook=:8443 + - --validating-webhook-certificate=/usr/local/certificates/cert + - --validating-webhook-key=/usr/local/certificates/key + securityContext: + capabilities: + drop: + - ALL + add: + - NET_BIND_SERVICE + runAsUser: 101 + allowPrivilegeEscalation: true + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LD_PRELOAD + value: /usr/local/lib/libmimalloc.so + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + readinessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + ports: + - name: http + containerPort: 80 + protocol: TCP + - name: https + containerPort: 443 + protocol: TCP + - name: webhook + containerPort: 8443 + protocol: TCP + volumeMounts: + - name: webhook-cert + mountPath: /usr/local/certificates/ + readOnly: true + resources: + requests: + cpu: 100m + memory: 90Mi + nodeSelector: + kubernetes.io/os: linux + serviceAccountName: release-name-ingress-nginx + terminationGracePeriodSeconds: 300 + volumes: + - name: webhook-cert + secret: + secretName: release-name-ingress-nginx-admission +--- +# Source: ingress-nginx/templates/controller-ingressclass.yaml +# We don't support namespaced ingressClass yet +# So a ClusterRole and a ClusterRoleBinding is required +apiVersion: networking.k8s.io/v1 +kind: IngressClass +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: nginx +spec: + controller: k8s.io/ingress-nginx +--- +# Source: ingress-nginx/templates/admission-webhooks/validating-webhook.yaml +# before changing this value, check the required kubernetes version +# https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#prerequisites +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook + name: release-name-ingress-nginx-admission +webhooks: + - name: validate.nginx.ingress.kubernetes.io + matchPolicy: Equivalent + rules: + - apiGroups: + - networking.k8s.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - ingresses + failurePolicy: Fail + sideEffects: None + admissionReviewVersions: + - v1 + clientConfig: + service: + namespace: "default" + name: release-name-ingress-nginx-controller-admission + path: /networking/v1/ingresses +--- +# Source: ingress-nginx/templates/admission-webhooks/job-patch/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: release-name-ingress-nginx-admission + namespace: default + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +--- +# Source: ingress-nginx/templates/admission-webhooks/job-patch/clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: release-name-ingress-nginx-admission + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +rules: + - apiGroups: + - admissionregistration.k8s.io + resources: + - validatingwebhookconfigurations + verbs: + - get + - update +--- +# Source: ingress-nginx/templates/admission-webhooks/job-patch/clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: release-name-ingress-nginx-admission + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: release-name-ingress-nginx-admission +subjects: + - kind: ServiceAccount + name: release-name-ingress-nginx-admission + namespace: "default" +--- +# Source: ingress-nginx/templates/admission-webhooks/job-patch/role.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: release-name-ingress-nginx-admission + namespace: default + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +rules: + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - create +--- +# Source: ingress-nginx/templates/admission-webhooks/job-patch/rolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: release-name-ingress-nginx-admission + namespace: default + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: release-name-ingress-nginx-admission +subjects: + - kind: ServiceAccount + name: release-name-ingress-nginx-admission + namespace: "default" +--- +# Source: ingress-nginx/templates/admission-webhooks/job-patch/job-createSecret.yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: release-name-ingress-nginx-admission-create + namespace: default + annotations: + "helm.sh/hook": pre-install,pre-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +spec: + template: + metadata: + name: release-name-ingress-nginx-admission-create + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook + spec: + containers: + - name: create + image: "registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.3.0@sha256:549e71a6ca248c5abd51cdb73dbc3083df62cf92ed5e6147c780e30f7e007a47" + imagePullPolicy: IfNotPresent + args: + - create + - --host=release-name-ingress-nginx-controller-admission,release-name-ingress-nginx-controller-admission.$(POD_NAMESPACE).svc + - --namespace=$(POD_NAMESPACE) + - --secret-name=release-name-ingress-nginx-admission + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + securityContext: + allowPrivilegeEscalation: false + restartPolicy: OnFailure + serviceAccountName: release-name-ingress-nginx-admission + nodeSelector: + kubernetes.io/os: linux + securityContext: + fsGroup: 2000 + runAsNonRoot: true + runAsUser: 2000 +--- +# Source: ingress-nginx/templates/admission-webhooks/job-patch/job-patchWebhook.yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: release-name-ingress-nginx-admission-patch + namespace: default + annotations: + "helm.sh/hook": post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +spec: + template: + metadata: + name: release-name-ingress-nginx-admission-patch + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook + spec: + containers: + - name: patch + image: "registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.3.0@sha256:549e71a6ca248c5abd51cdb73dbc3083df62cf92ed5e6147c780e30f7e007a47" + imagePullPolicy: IfNotPresent + args: + - patch + - --webhook-name=release-name-ingress-nginx-admission + - --namespace=$(POD_NAMESPACE) + - --patch-mutating=false + - --secret-name=release-name-ingress-nginx-admission + - --patch-failure-policy=Fail + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + securityContext: + allowPrivilegeEscalation: false + restartPolicy: OnFailure + serviceAccountName: release-name-ingress-nginx-admission + nodeSelector: + kubernetes.io/os: linux + securityContext: + fsGroup: 2000 + runAsNonRoot: true + runAsUser: 2000 diff --git a/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/temp2.yaml b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/temp2.yaml new file mode 100644 index 0000000..9ef52fc --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/temp2.yaml @@ -0,0 +1,725 @@ +--- +# Source: ingress-nginx/templates/controller-serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: release-name-ingress-nginx + namespace: default +automountServiceAccountToken: true +--- +# Source: ingress-nginx/templates/controller-configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: release-name-ingress-nginx-controller + namespace: default +data: + allow-snippet-annotations: "true" +--- +# Source: ingress-nginx/templates/clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + name: release-name-ingress-nginx +rules: + - apiGroups: + - "" + resources: + - configmaps + - endpoints + - nodes + - pods + - secrets + - namespaces + verbs: + - list + - watch + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - list + - watch + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - apiGroups: + - networking.k8s.io + resources: + - ingresses/status + verbs: + - update + - apiGroups: + - networking.k8s.io + resources: + - ingressclasses + verbs: + - get + - list + - watch +--- +# Source: ingress-nginx/templates/clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + name: release-name-ingress-nginx +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: release-name-ingress-nginx +subjects: + - kind: ServiceAccount + name: release-name-ingress-nginx + namespace: "default" +--- +# Source: ingress-nginx/templates/controller-role.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: release-name-ingress-nginx + namespace: default +rules: + - apiGroups: + - "" + resources: + - namespaces + verbs: + - get + - apiGroups: + - "" + resources: + - configmaps + - pods + - secrets + - endpoints + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - watch + - apiGroups: + - networking.k8s.io + resources: + - ingresses/status + verbs: + - update + - apiGroups: + - networking.k8s.io + resources: + - ingressclasses + verbs: + - get + - list + - watch + # TODO(Jintao Zhang) + # Once we release a new version of the controller, + # we will be able to remove the configmap related permissions + # We have used the Lease API for selection + # ref: https://github.com/kubernetes/ingress-nginx/pull/8921 + - apiGroups: + - "" + resources: + - configmaps + resourceNames: + - ingress-controller-leader + verbs: + - get + - update + - apiGroups: + - "" + resources: + - configmaps + verbs: + - create + - apiGroups: + - coordination.k8s.io + resources: + - leases + resourceNames: + - ingress-controller-leader + verbs: + - get + - update + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +--- +# Source: ingress-nginx/templates/controller-rolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: release-name-ingress-nginx + namespace: default +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: release-name-ingress-nginx +subjects: + - kind: ServiceAccount + name: release-name-ingress-nginx + namespace: "default" +--- +# Source: ingress-nginx/templates/controller-service-webhook.yaml +apiVersion: v1 +kind: Service +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: release-name-ingress-nginx-controller-admission + namespace: default +spec: + type: ClusterIP + ports: + - name: https-webhook + port: 443 + targetPort: webhook + appProtocol: https + selector: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/component: controller +--- +# Source: ingress-nginx/templates/controller-service.yaml +apiVersion: v1 +kind: Service +metadata: + annotations: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: release-name-ingress-nginx-controller + namespace: default +spec: + type: LoadBalancer + ipFamilyPolicy: SingleStack + ipFamilies: + - IPv4 + ports: + - name: http + port: 80 + protocol: TCP + targetPort: http + appProtocol: http + nodePort: 30000 + - name: https + port: 443 + protocol: TCP + targetPort: https + appProtocol: https + nodePort: 30001 + selector: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/component: controller +--- +# Source: ingress-nginx/templates/controller-daemonset.yaml +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: release-name-ingress-nginx-controller + namespace: default +spec: + selector: + matchLabels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/component: controller + revisionHistoryLimit: 10 + minReadySeconds: 0 + template: + metadata: + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/component: controller + spec: + dnsPolicy: ClusterFirst + containers: + - name: controller + image: "registry.k8s.io/ingress-nginx/controller:v1.3.1@sha256:54f7fe2c6c5a9db9a0ebf1131797109bb7a4d91f56b9b362bde2abd237dd1974" + imagePullPolicy: IfNotPresent + lifecycle: + preStop: + exec: + command: + - /wait-shutdown + args: + - /nginx-ingress-controller + - --publish-service=$(POD_NAMESPACE)/release-name-ingress-nginx-controller + - --election-id=ingress-controller-leader + - --controller-class=k8s.io/ingress-nginx + - --ingress-class=nginx + - --configmap=$(POD_NAMESPACE)/release-name-ingress-nginx-controller + - --validating-webhook=:8443 + - --validating-webhook-certificate=/usr/local/certificates/cert + - --validating-webhook-key=/usr/local/certificates/key + securityContext: + capabilities: + drop: + - ALL + add: + - NET_BIND_SERVICE + runAsUser: 101 + allowPrivilegeEscalation: true + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LD_PRELOAD + value: /usr/local/lib/libmimalloc.so + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + readinessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + ports: + - name: http + containerPort: 80 + protocol: TCP + - name: https + containerPort: 443 + protocol: TCP + - name: webhook + containerPort: 8443 + protocol: TCP + volumeMounts: + - name: webhook-cert + mountPath: /usr/local/certificates/ + readOnly: true + resources: + requests: + cpu: 100m + memory: 90Mi + nodeSelector: + kubernetes.io/os: linux + serviceAccountName: release-name-ingress-nginx + terminationGracePeriodSeconds: 300 + volumes: + - name: webhook-cert + secret: + secretName: release-name-ingress-nginx-admission +--- +# Source: ingress-nginx/templates/controller-ingressclass.yaml +# We don't support namespaced ingressClass yet +# So a ClusterRole and a ClusterRoleBinding is required +apiVersion: networking.k8s.io/v1 +kind: IngressClass +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: nginx +spec: + controller: k8s.io/ingress-nginx +--- +# Source: ingress-nginx/templates/admission-webhooks/validating-webhook.yaml +# before changing this value, check the required kubernetes version +# https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#prerequisites +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook + name: release-name-ingress-nginx-admission +webhooks: + - name: validate.nginx.ingress.kubernetes.io + matchPolicy: Equivalent + rules: + - apiGroups: + - networking.k8s.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - ingresses + failurePolicy: Fail + sideEffects: None + admissionReviewVersions: + - v1 + clientConfig: + service: + namespace: "default" + name: release-name-ingress-nginx-controller-admission + path: /networking/v1/ingresses +--- +# Source: ingress-nginx/templates/admission-webhooks/job-patch/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: release-name-ingress-nginx-admission + namespace: default + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +--- +# Source: ingress-nginx/templates/admission-webhooks/job-patch/clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: release-name-ingress-nginx-admission + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +rules: + - apiGroups: + - admissionregistration.k8s.io + resources: + - validatingwebhookconfigurations + verbs: + - get + - update +--- +# Source: ingress-nginx/templates/admission-webhooks/job-patch/clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: release-name-ingress-nginx-admission + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: release-name-ingress-nginx-admission +subjects: + - kind: ServiceAccount + name: release-name-ingress-nginx-admission + namespace: "default" +--- +# Source: ingress-nginx/templates/admission-webhooks/job-patch/role.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: release-name-ingress-nginx-admission + namespace: default + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +rules: + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - create +--- +# Source: ingress-nginx/templates/admission-webhooks/job-patch/rolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: release-name-ingress-nginx-admission + namespace: default + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: release-name-ingress-nginx-admission +subjects: + - kind: ServiceAccount + name: release-name-ingress-nginx-admission + namespace: "default" +--- +# Source: ingress-nginx/templates/admission-webhooks/job-patch/job-createSecret.yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: release-name-ingress-nginx-admission-create + namespace: default + annotations: + "helm.sh/hook": pre-install,pre-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +spec: + template: + metadata: + name: release-name-ingress-nginx-admission-create + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook + spec: + containers: + - name: create + image: "registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.3.0@sha256:549e71a6ca248c5abd51cdb73dbc3083df62cf92ed5e6147c780e30f7e007a47" + imagePullPolicy: IfNotPresent + args: + - create + - --host=release-name-ingress-nginx-controller-admission,release-name-ingress-nginx-controller-admission.$(POD_NAMESPACE).svc + - --namespace=$(POD_NAMESPACE) + - --secret-name=release-name-ingress-nginx-admission + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + securityContext: + allowPrivilegeEscalation: false + restartPolicy: OnFailure + serviceAccountName: release-name-ingress-nginx-admission + nodeSelector: + kubernetes.io/os: linux + securityContext: + fsGroup: 2000 + runAsNonRoot: true + runAsUser: 2000 +--- +# Source: ingress-nginx/templates/admission-webhooks/job-patch/job-patchWebhook.yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: release-name-ingress-nginx-admission-patch + namespace: default + annotations: + "helm.sh/hook": post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +spec: + template: + metadata: + name: release-name-ingress-nginx-admission-patch + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook + spec: + containers: + - name: patch + image: "registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.3.0@sha256:549e71a6ca248c5abd51cdb73dbc3083df62cf92ed5e6147c780e30f7e007a47" + imagePullPolicy: IfNotPresent + args: + - patch + - --webhook-name=release-name-ingress-nginx-admission + - --namespace=$(POD_NAMESPACE) + - --patch-mutating=false + - --secret-name=release-name-ingress-nginx-admission + - --patch-failure-policy=Fail + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + securityContext: + allowPrivilegeEscalation: false + restartPolicy: OnFailure + serviceAccountName: release-name-ingress-nginx-admission + nodeSelector: + kubernetes.io/os: linux + securityContext: + fsGroup: 2000 + runAsNonRoot: true + runAsUser: 2000 diff --git a/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/NOTES.txt b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/NOTES.txt new file mode 100644 index 0000000..8985c56 --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/NOTES.txt @@ -0,0 +1,80 @@ +The ingress-nginx controller has been installed. + +{{- if contains "NodePort" .Values.controller.service.type }} +Get the application URL by running these commands: + +{{- if (not (empty .Values.controller.service.nodePorts.http)) }} + export HTTP_NODE_PORT={{ .Values.controller.service.nodePorts.http }} +{{- else }} + export HTTP_NODE_PORT=$(kubectl --namespace {{ .Release.Namespace }} get services -o jsonpath="{.spec.ports[0].nodePort}" {{ include "ingress-nginx.controller.fullname" . }}) +{{- end }} +{{- if (not (empty .Values.controller.service.nodePorts.https)) }} + export HTTPS_NODE_PORT={{ .Values.controller.service.nodePorts.https }} +{{- else }} + export HTTPS_NODE_PORT=$(kubectl --namespace {{ .Release.Namespace }} get services -o jsonpath="{.spec.ports[1].nodePort}" {{ include "ingress-nginx.controller.fullname" . }}) +{{- end }} + export NODE_IP=$(kubectl --namespace {{ .Release.Namespace }} get nodes -o jsonpath="{.items[0].status.addresses[1].address}") + + echo "Visit http://$NODE_IP:$HTTP_NODE_PORT to access your application via HTTP." + echo "Visit https://$NODE_IP:$HTTPS_NODE_PORT to access your application via HTTPS." +{{- else if contains "LoadBalancer" .Values.controller.service.type }} +It may take a few minutes for the LoadBalancer IP to be available. +You can watch the status by running 'kubectl --namespace {{ .Release.Namespace }} get services -o wide -w {{ include "ingress-nginx.controller.fullname" . }}' +{{- else if contains "ClusterIP" .Values.controller.service.type }} +Get the application URL by running these commands: + export POD_NAME=$(kubectl --namespace {{ .Release.Namespace }} get pods -o jsonpath="{.items[0].metadata.name}" -l "app={{ template "ingress-nginx.name" . }},component={{ .Values.controller.name }},release={{ .Release.Name }}") + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:80 + echo "Visit http://127.0.0.1:8080 to access your application." +{{- end }} + +An example Ingress that makes use of the controller: + +{{- $isV1 := semverCompare ">=1" .Chart.AppVersion}} + apiVersion: networking.k8s.io/v1 + kind: Ingress + metadata: + name: example + namespace: foo + {{- if eq $isV1 false }} + annotations: + kubernetes.io/ingress.class: {{ .Values.controller.ingressClass }} + {{- end }} + spec: + {{- if $isV1 }} + ingressClassName: {{ .Values.controller.ingressClassResource.name }} + {{- end }} + rules: + - host: www.example.com + http: + paths: + - pathType: Prefix + backend: + service: + name: exampleService + port: + number: 80 + path: / + # This section is only required if TLS is to be enabled for the Ingress + tls: + - hosts: + - www.example.com + secretName: example-tls + +If TLS is enabled for the Ingress, a Secret containing the certificate and key must also be provided: + + apiVersion: v1 + kind: Secret + metadata: + name: example-tls + namespace: foo + data: + tls.crt: + tls.key: + type: kubernetes.io/tls + +{{- if .Values.controller.headers }} +################################################################################# +###### WARNING: `controller.headers` has been deprecated! ##### +###### It has been renamed to `controller.proxySetHeaders`. ##### +################################################################################# +{{- end }} diff --git a/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/_helpers.tpl b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/_helpers.tpl new file mode 100644 index 0000000..e69de0c --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/_helpers.tpl @@ -0,0 +1,185 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "ingress-nginx.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "ingress-nginx.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "ingress-nginx.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + + +{{/* +Container SecurityContext. +*/}} +{{- define "controller.containerSecurityContext" -}} +{{- if .Values.controller.containerSecurityContext -}} +{{- toYaml .Values.controller.containerSecurityContext -}} +{{- else -}} +capabilities: + drop: + - ALL + add: + - NET_BIND_SERVICE + {{- if .Values.controller.image.chroot }} + - SYS_CHROOT + {{- end }} +runAsUser: {{ .Values.controller.image.runAsUser }} +allowPrivilegeEscalation: {{ .Values.controller.image.allowPrivilegeEscalation }} +{{- end }} +{{- end -}} + +{{/* +Get specific image +*/}} +{{- define "ingress-nginx.image" -}} +{{- if .chroot -}} +{{- printf "%s-chroot" .image -}} +{{- else -}} +{{- printf "%s" .image -}} +{{- end }} +{{- end -}} + +{{/* +Get specific image digest +*/}} +{{- define "ingress-nginx.imageDigest" -}} +{{- if .chroot -}} +{{- if .digestChroot -}} +{{- printf "@%s" .digestChroot -}} +{{- end }} +{{- else -}} +{{ if .digest -}} +{{- printf "@%s" .digest -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified controller name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "ingress-nginx.controller.fullname" -}} +{{- printf "%s-%s" (include "ingress-nginx.fullname" .) .Values.controller.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Construct the path for the publish-service. + +By convention this will simply use the / to match the name of the +service generated. + +Users can provide an override for an explicit service they want bound via `.Values.controller.publishService.pathOverride` + +*/}} +{{- define "ingress-nginx.controller.publishServicePath" -}} +{{- $defServiceName := printf "%s/%s" "$(POD_NAMESPACE)" (include "ingress-nginx.controller.fullname" .) -}} +{{- $servicePath := default $defServiceName .Values.controller.publishService.pathOverride }} +{{- print $servicePath | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified default backend name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "ingress-nginx.defaultBackend.fullname" -}} +{{- printf "%s-%s" (include "ingress-nginx.fullname" .) .Values.defaultBackend.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Common labels +*/}} +{{- define "ingress-nginx.labels" -}} +helm.sh/chart: {{ include "ingress-nginx.chart" . }} +{{ include "ingress-nginx.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/part-of: {{ template "ingress-nginx.name" . }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- if .Values.commonLabels}} +{{ toYaml .Values.commonLabels }} +{{- end }} +{{- end -}} + +{{/* +Selector labels +*/}} +{{- define "ingress-nginx.selectorLabels" -}} +app.kubernetes.io/name: {{ include "ingress-nginx.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} + +{{/* +Create the name of the controller service account to use +*/}} +{{- define "ingress-nginx.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "ingress-nginx.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the backend service account to use - only used when podsecuritypolicy is also enabled +*/}} +{{- define "ingress-nginx.defaultBackend.serviceAccountName" -}} +{{- if .Values.defaultBackend.serviceAccount.create -}} + {{ default (printf "%s-backend" (include "ingress-nginx.fullname" .)) .Values.defaultBackend.serviceAccount.name }} +{{- else -}} + {{ default "default-backend" .Values.defaultBackend.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiGroup for PodSecurityPolicy. +*/}} +{{- define "podSecurityPolicy.apiGroup" -}} +{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "policy" -}} +{{- else -}} +{{- print "extensions" -}} +{{- end -}} +{{- end -}} + +{{/* +Check the ingress controller version tag is at most three versions behind the last release +*/}} +{{- define "isControllerTagValid" -}} +{{- if not (semverCompare ">=0.27.0-0" .Values.controller.image.tag) -}} +{{- fail "Controller container image tag should be 0.27.0 or higher" -}} +{{- end -}} +{{- end -}} + +{{/* +IngressClass parameters. +*/}} +{{- define "ingressClass.parameters" -}} + {{- if .Values.controller.ingressClassResource.parameters -}} + parameters: +{{ toYaml .Values.controller.ingressClassResource.parameters | indent 4}} + {{ end }} +{{- end -}} diff --git a/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/_params.tpl b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/_params.tpl new file mode 100644 index 0000000..305ce0d --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/_params.tpl @@ -0,0 +1,62 @@ +{{- define "ingress-nginx.params" -}} +- /nginx-ingress-controller +{{- if .Values.defaultBackend.enabled }} +- --default-backend-service=$(POD_NAMESPACE)/{{ include "ingress-nginx.defaultBackend.fullname" . }} +{{- end }} +{{- if and .Values.controller.publishService.enabled .Values.controller.service.enabled }} +{{- if .Values.controller.service.external.enabled }} +- --publish-service={{ template "ingress-nginx.controller.publishServicePath" . }} +{{- else if .Values.controller.service.internal.enabled }} +- --publish-service={{ template "ingress-nginx.controller.publishServicePath" . }}-internal +{{- end }} +{{- end }} +- --election-id={{ .Values.controller.electionID }} +- --controller-class={{ .Values.controller.ingressClassResource.controllerValue }} +{{- if .Values.controller.ingressClass }} +- --ingress-class={{ .Values.controller.ingressClass }} +{{- end }} +- --configmap={{ default "$(POD_NAMESPACE)" .Values.controller.configMapNamespace }}/{{ include "ingress-nginx.controller.fullname" . }} +{{- if .Values.tcp }} +- --tcp-services-configmap={{ default "$(POD_NAMESPACE)" .Values.controller.tcp.configMapNamespace }}/{{ include "ingress-nginx.fullname" . }}-tcp +{{- end }} +{{- if .Values.udp }} +- --udp-services-configmap={{ default "$(POD_NAMESPACE)" .Values.controller.udp.configMapNamespace }}/{{ include "ingress-nginx.fullname" . }}-udp +{{- end }} +{{- if .Values.controller.scope.enabled }} +- --watch-namespace={{ default "$(POD_NAMESPACE)" .Values.controller.scope.namespace }} +{{- end }} +{{- if and (not .Values.controller.scope.enabled) .Values.controller.scope.namespaceSelector }} +- --watch-namespace-selector={{ default "" .Values.controller.scope.namespaceSelector }} +{{- end }} +{{- if and .Values.controller.reportNodeInternalIp .Values.controller.hostNetwork }} +- --report-node-internal-ip-address={{ .Values.controller.reportNodeInternalIp }} +{{- end }} +{{- if .Values.controller.admissionWebhooks.enabled }} +- --validating-webhook=:{{ .Values.controller.admissionWebhooks.port }} +- --validating-webhook-certificate={{ .Values.controller.admissionWebhooks.certificate }} +- --validating-webhook-key={{ .Values.controller.admissionWebhooks.key }} +{{- end }} +{{- if .Values.controller.maxmindLicenseKey }} +- --maxmind-license-key={{ .Values.controller.maxmindLicenseKey }} +{{- end }} +{{- if .Values.controller.healthCheckHost }} +- --healthz-host={{ .Values.controller.healthCheckHost }} +{{- end }} +{{- if not (eq .Values.controller.healthCheckPath "/healthz") }} +- --health-check-path={{ .Values.controller.healthCheckPath }} +{{- end }} +{{- if .Values.controller.ingressClassByName }} +- --ingress-class-by-name=true +{{- end }} +{{- if .Values.controller.watchIngressWithoutClass }} +- --watch-ingress-without-class=true +{{- end }} +{{- range $key, $value := .Values.controller.extraArgs }} +{{- /* Accept keys without values or with false as value */}} +{{- if eq ($value | quote | len) 2 }} +- --{{ $key }} +{{- else }} +- --{{ $key }}={{ $value }} +{{- end }} +{{- end }} +{{- end -}} diff --git a/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/admission-webhooks/job-patch/clusterrole.yaml b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/admission-webhooks/job-patch/clusterrole.yaml new file mode 100644 index 0000000..5659a1f --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/admission-webhooks/job-patch/clusterrole.yaml @@ -0,0 +1,34 @@ +{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "ingress-nginx.fullname" . }}-admission + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: admission-webhook + {{- with .Values.controller.admissionWebhooks.patch.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +rules: + - apiGroups: + - admissionregistration.k8s.io + resources: + - validatingwebhookconfigurations + verbs: + - get + - update +{{- if .Values.podSecurityPolicy.enabled }} + - apiGroups: ['extensions'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + {{- with .Values.controller.admissionWebhooks.existingPsp }} + - {{ . }} + {{- else }} + - {{ include "ingress-nginx.fullname" . }}-admission + {{- end }} +{{- end }} +{{- end }} diff --git a/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/admission-webhooks/job-patch/clusterrolebinding.yaml b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/admission-webhooks/job-patch/clusterrolebinding.yaml new file mode 100644 index 0000000..abf17fb --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/admission-webhooks/job-patch/clusterrolebinding.yaml @@ -0,0 +1,23 @@ +{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ include "ingress-nginx.fullname" . }}-admission + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: admission-webhook + {{- with .Values.controller.admissionWebhooks.patch.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ include "ingress-nginx.fullname" . }}-admission +subjects: + - kind: ServiceAccount + name: {{ include "ingress-nginx.fullname" . }}-admission + namespace: {{ .Release.Namespace | quote }} +{{- end }} diff --git a/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/admission-webhooks/job-patch/job-createSecret.yaml b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/admission-webhooks/job-patch/job-createSecret.yaml new file mode 100644 index 0000000..7558e0b --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/admission-webhooks/job-patch/job-createSecret.yaml @@ -0,0 +1,79 @@ +{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled -}} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ include "ingress-nginx.fullname" . }}-admission-create + namespace: {{ .Release.Namespace }} + annotations: + "helm.sh/hook": pre-install,pre-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + {{- with .Values.controller.admissionWebhooks.annotations }} + {{- toYaml . | nindent 4 }} + {{- end }} + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: admission-webhook + {{- with .Values.controller.admissionWebhooks.patch.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: +{{- if .Capabilities.APIVersions.Has "batch/v1alpha1" }} + # Alpha feature since k8s 1.12 + ttlSecondsAfterFinished: 0 +{{- end }} + template: + metadata: + name: {{ include "ingress-nginx.fullname" . }}-admission-create + {{- if .Values.controller.admissionWebhooks.patch.podAnnotations }} + annotations: {{ toYaml .Values.controller.admissionWebhooks.patch.podAnnotations | nindent 8 }} + {{- end }} + labels: + {{- include "ingress-nginx.labels" . | nindent 8 }} + app.kubernetes.io/component: admission-webhook + {{- with .Values.controller.admissionWebhooks.patch.labels }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + {{- if .Values.controller.admissionWebhooks.patch.priorityClassName }} + priorityClassName: {{ .Values.controller.admissionWebhooks.patch.priorityClassName }} + {{- end }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: {{ toYaml .Values.imagePullSecrets | nindent 8 }} + {{- end }} + containers: + - name: create + {{- with .Values.controller.admissionWebhooks.patch.image }} + image: "{{- if .repository -}}{{ .repository }}{{ else }}{{ .registry }}/{{ .image }}{{- end -}}:{{ .tag }}{{- if (.digest) -}} @{{.digest}} {{- end -}}" + {{- end }} + imagePullPolicy: {{ .Values.controller.admissionWebhooks.patch.image.pullPolicy }} + args: + - create + - --host={{ include "ingress-nginx.controller.fullname" . }}-admission,{{ include "ingress-nginx.controller.fullname" . }}-admission.$(POD_NAMESPACE).svc + - --namespace=$(POD_NAMESPACE) + - --secret-name={{ include "ingress-nginx.fullname" . }}-admission + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + {{- if .Values.controller.admissionWebhooks.extraEnvs }} + {{- toYaml .Values.controller.admissionWebhooks.extraEnvs | nindent 12 }} + {{- end }} + securityContext: + allowPrivilegeEscalation: false + {{- if .Values.controller.admissionWebhooks.createSecretJob.resources }} + resources: {{ toYaml .Values.controller.admissionWebhooks.createSecretJob.resources | nindent 12 }} + {{- end }} + restartPolicy: OnFailure + serviceAccountName: {{ include "ingress-nginx.fullname" . }}-admission + {{- if .Values.controller.admissionWebhooks.patch.nodeSelector }} + nodeSelector: {{ toYaml .Values.controller.admissionWebhooks.patch.nodeSelector | nindent 8 }} + {{- end }} + {{- if .Values.controller.admissionWebhooks.patch.tolerations }} + tolerations: {{ toYaml .Values.controller.admissionWebhooks.patch.tolerations | nindent 8 }} + {{- end }} + {{- if .Values.controller.admissionWebhooks.patch.securityContext }} + securityContext: + {{- toYaml .Values.controller.admissionWebhooks.patch.securityContext | nindent 8 }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/admission-webhooks/job-patch/job-patchWebhook.yaml b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/admission-webhooks/job-patch/job-patchWebhook.yaml new file mode 100644 index 0000000..0528215 --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/admission-webhooks/job-patch/job-patchWebhook.yaml @@ -0,0 +1,81 @@ +{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled -}} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ include "ingress-nginx.fullname" . }}-admission-patch + namespace: {{ .Release.Namespace }} + annotations: + "helm.sh/hook": post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + {{- with .Values.controller.admissionWebhooks.annotations }} + {{- toYaml . | nindent 4 }} + {{- end }} + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: admission-webhook + {{- with .Values.controller.admissionWebhooks.patch.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: +{{- if .Capabilities.APIVersions.Has "batch/v1alpha1" }} + # Alpha feature since k8s 1.12 + ttlSecondsAfterFinished: 0 +{{- end }} + template: + metadata: + name: {{ include "ingress-nginx.fullname" . }}-admission-patch + {{- if .Values.controller.admissionWebhooks.patch.podAnnotations }} + annotations: {{ toYaml .Values.controller.admissionWebhooks.patch.podAnnotations | nindent 8 }} + {{- end }} + labels: + {{- include "ingress-nginx.labels" . | nindent 8 }} + app.kubernetes.io/component: admission-webhook + {{- with .Values.controller.admissionWebhooks.patch.labels }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + {{- if .Values.controller.admissionWebhooks.patch.priorityClassName }} + priorityClassName: {{ .Values.controller.admissionWebhooks.patch.priorityClassName }} + {{- end }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: {{ toYaml .Values.imagePullSecrets | nindent 8 }} + {{- end }} + containers: + - name: patch + {{- with .Values.controller.admissionWebhooks.patch.image }} + image: "{{- if .repository -}}{{ .repository }}{{ else }}{{ .registry }}/{{ .image }}{{- end -}}:{{ .tag }}{{- if (.digest) -}} @{{.digest}} {{- end -}}" + {{- end }} + imagePullPolicy: {{ .Values.controller.admissionWebhooks.patch.image.pullPolicy }} + args: + - patch + - --webhook-name={{ include "ingress-nginx.fullname" . }}-admission + - --namespace=$(POD_NAMESPACE) + - --patch-mutating=false + - --secret-name={{ include "ingress-nginx.fullname" . }}-admission + - --patch-failure-policy={{ .Values.controller.admissionWebhooks.failurePolicy }} + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + {{- if .Values.controller.admissionWebhooks.extraEnvs }} + {{- toYaml .Values.controller.admissionWebhooks.extraEnvs | nindent 12 }} + {{- end }} + securityContext: + allowPrivilegeEscalation: false + {{- if .Values.controller.admissionWebhooks.patchWebhookJob.resources }} + resources: {{ toYaml .Values.controller.admissionWebhooks.patchWebhookJob.resources | nindent 12 }} + {{- end }} + restartPolicy: OnFailure + serviceAccountName: {{ include "ingress-nginx.fullname" . }}-admission + {{- if .Values.controller.admissionWebhooks.patch.nodeSelector }} + nodeSelector: {{ toYaml .Values.controller.admissionWebhooks.patch.nodeSelector | nindent 8 }} + {{- end }} + {{- if .Values.controller.admissionWebhooks.patch.tolerations }} + tolerations: {{ toYaml .Values.controller.admissionWebhooks.patch.tolerations | nindent 8 }} + {{- end }} + {{- if .Values.controller.admissionWebhooks.patch.securityContext }} + securityContext: + {{- toYaml .Values.controller.admissionWebhooks.patch.securityContext | nindent 8 }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/admission-webhooks/job-patch/psp.yaml b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/admission-webhooks/job-patch/psp.yaml new file mode 100644 index 0000000..70edde3 --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/admission-webhooks/job-patch/psp.yaml @@ -0,0 +1,39 @@ +{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled .Values.podSecurityPolicy.enabled (empty .Values.controller.admissionWebhooks.existingPsp) -}} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ include "ingress-nginx.fullname" . }}-admission + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: admission-webhook + {{- with .Values.controller.admissionWebhooks.patch.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + allowPrivilegeEscalation: false + fsGroup: + ranges: + - max: 65535 + min: 1 + rule: MustRunAs + requiredDropCapabilities: + - ALL + runAsUser: + rule: MustRunAsNonRoot + seLinux: + rule: RunAsAny + supplementalGroups: + ranges: + - max: 65535 + min: 1 + rule: MustRunAs + volumes: + - configMap + - emptyDir + - projected + - secret + - downwardAPI +{{- end }} diff --git a/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/admission-webhooks/job-patch/role.yaml b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/admission-webhooks/job-patch/role.yaml new file mode 100644 index 0000000..795bac6 --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/admission-webhooks/job-patch/role.yaml @@ -0,0 +1,24 @@ +{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ include "ingress-nginx.fullname" . }}-admission + namespace: {{ .Release.Namespace }} + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: admission-webhook + {{- with .Values.controller.admissionWebhooks.patch.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +rules: + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - create +{{- end }} diff --git a/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/admission-webhooks/job-patch/rolebinding.yaml b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/admission-webhooks/job-patch/rolebinding.yaml new file mode 100644 index 0000000..698c5c8 --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/admission-webhooks/job-patch/rolebinding.yaml @@ -0,0 +1,24 @@ +{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ include "ingress-nginx.fullname" . }}-admission + namespace: {{ .Release.Namespace }} + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: admission-webhook + {{- with .Values.controller.admissionWebhooks.patch.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ include "ingress-nginx.fullname" . }}-admission +subjects: + - kind: ServiceAccount + name: {{ include "ingress-nginx.fullname" . }}-admission + namespace: {{ .Release.Namespace | quote }} +{{- end }} diff --git a/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/admission-webhooks/job-patch/serviceaccount.yaml b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/admission-webhooks/job-patch/serviceaccount.yaml new file mode 100644 index 0000000..eae4751 --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/admission-webhooks/job-patch/serviceaccount.yaml @@ -0,0 +1,16 @@ +{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "ingress-nginx.fullname" . }}-admission + namespace: {{ .Release.Namespace }} + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: admission-webhook + {{- with .Values.controller.admissionWebhooks.patch.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/admission-webhooks/validating-webhook.yaml b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/admission-webhooks/validating-webhook.yaml new file mode 100644 index 0000000..8caffcb --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/admission-webhooks/validating-webhook.yaml @@ -0,0 +1,48 @@ +{{- if .Values.controller.admissionWebhooks.enabled -}} +# before changing this value, check the required kubernetes version +# https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#prerequisites +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + {{- if .Values.controller.admissionWebhooks.annotations }} + annotations: {{ toYaml .Values.controller.admissionWebhooks.annotations | nindent 4 }} + {{- end }} + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: admission-webhook + {{- with .Values.controller.admissionWebhooks.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.fullname" . }}-admission +webhooks: + - name: validate.nginx.ingress.kubernetes.io + matchPolicy: Equivalent + rules: + - apiGroups: + - networking.k8s.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - ingresses + failurePolicy: {{ .Values.controller.admissionWebhooks.failurePolicy | default "Fail" }} + sideEffects: None + admissionReviewVersions: + - v1 + clientConfig: + service: + namespace: {{ .Release.Namespace | quote }} + name: {{ include "ingress-nginx.controller.fullname" . }}-admission + path: /networking/v1/ingresses + {{- if .Values.controller.admissionWebhooks.timeoutSeconds }} + timeoutSeconds: {{ .Values.controller.admissionWebhooks.timeoutSeconds }} + {{- end }} + {{- if .Values.controller.admissionWebhooks.namespaceSelector }} + namespaceSelector: {{ toYaml .Values.controller.admissionWebhooks.namespaceSelector | nindent 6 }} + {{- end }} + {{- if .Values.controller.admissionWebhooks.objectSelector }} + objectSelector: {{ toYaml .Values.controller.admissionWebhooks.objectSelector | nindent 6 }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/clusterrole.yaml b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/clusterrole.yaml new file mode 100644 index 0000000..0e725ec --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/clusterrole.yaml @@ -0,0 +1,94 @@ +{{- if .Values.rbac.create }} + +{{- if and .Values.rbac.scope (not .Values.controller.scope.enabled) -}} + {{ required "Invalid configuration: 'rbac.scope' should be equal to 'controller.scope.enabled' (true/false)." (index (dict) ".") }} +{{- end }} + +{{- if not .Values.rbac.scope -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.fullname" . }} +rules: + - apiGroups: + - "" + resources: + - configmaps + - endpoints + - nodes + - pods + - secrets +{{- if not .Values.controller.scope.enabled }} + - namespaces +{{- end}} + verbs: + - list + - watch + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - list + - watch +{{- if and .Values.controller.scope.enabled .Values.controller.scope.namespace }} + - apiGroups: + - "" + resources: + - namespaces + resourceNames: + - "{{ .Values.controller.scope.namespace }}" + verbs: + - get +{{- end }} + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - apiGroups: + - networking.k8s.io + resources: + - ingresses/status + verbs: + - update + - apiGroups: + - networking.k8s.io + resources: + - ingressclasses + verbs: + - get + - list + - watch +{{- end }} + +{{- end }} diff --git a/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/clusterrolebinding.yaml b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/clusterrolebinding.yaml new file mode 100644 index 0000000..acbbd8b --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/clusterrolebinding.yaml @@ -0,0 +1,19 @@ +{{- if and .Values.rbac.create (not .Values.rbac.scope) -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.fullname" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ include "ingress-nginx.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ template "ingress-nginx.serviceAccountName" . }} + namespace: {{ .Release.Namespace | quote }} +{{- end }} diff --git a/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/controller-configmap-addheaders.yaml b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/controller-configmap-addheaders.yaml new file mode 100644 index 0000000..dfd49a1 --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/controller-configmap-addheaders.yaml @@ -0,0 +1,14 @@ +{{- if .Values.controller.addHeaders -}} +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.fullname" . }}-custom-add-headers + namespace: {{ .Release.Namespace }} +data: {{ toYaml .Values.controller.addHeaders | nindent 2 }} +{{- end }} diff --git a/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/controller-configmap-proxyheaders.yaml b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/controller-configmap-proxyheaders.yaml new file mode 100644 index 0000000..f8d15fa --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/controller-configmap-proxyheaders.yaml @@ -0,0 +1,19 @@ +{{- if or .Values.controller.proxySetHeaders .Values.controller.headers -}} +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.fullname" . }}-custom-proxy-headers + namespace: {{ .Release.Namespace }} +data: +{{- if .Values.controller.proxySetHeaders }} +{{ toYaml .Values.controller.proxySetHeaders | indent 2 }} +{{ else if and .Values.controller.headers (not .Values.controller.proxySetHeaders) }} +{{ toYaml .Values.controller.headers | indent 2 }} +{{- end }} +{{- end }} diff --git a/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/controller-configmap-tcp.yaml b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/controller-configmap-tcp.yaml new file mode 100644 index 0000000..0f6088e --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/controller-configmap-tcp.yaml @@ -0,0 +1,17 @@ +{{- if .Values.tcp -}} +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +{{- if .Values.controller.tcp.annotations }} + annotations: {{ toYaml .Values.controller.tcp.annotations | nindent 4 }} +{{- end }} + name: {{ include "ingress-nginx.fullname" . }}-tcp + namespace: {{ .Release.Namespace }} +data: {{ tpl (toYaml .Values.tcp) . | nindent 2 }} +{{- end }} diff --git a/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/controller-configmap-udp.yaml b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/controller-configmap-udp.yaml new file mode 100644 index 0000000..3772ec5 --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/controller-configmap-udp.yaml @@ -0,0 +1,17 @@ +{{- if .Values.udp -}} +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +{{- if .Values.controller.udp.annotations }} + annotations: {{ toYaml .Values.controller.udp.annotations | nindent 4 }} +{{- end }} + name: {{ include "ingress-nginx.fullname" . }}-udp + namespace: {{ .Release.Namespace }} +data: {{ tpl (toYaml .Values.udp) . | nindent 2 }} +{{- end }} diff --git a/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/controller-configmap.yaml b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/controller-configmap.yaml new file mode 100644 index 0000000..f28b26e --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/controller-configmap.yaml @@ -0,0 +1,29 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +{{- if .Values.controller.configAnnotations }} + annotations: {{ toYaml .Values.controller.configAnnotations | nindent 4 }} +{{- end }} + name: {{ include "ingress-nginx.controller.fullname" . }} + namespace: {{ .Release.Namespace }} +data: + allow-snippet-annotations: "{{ .Values.controller.allowSnippetAnnotations }}" +{{- if .Values.controller.addHeaders }} + add-headers: {{ .Release.Namespace }}/{{ include "ingress-nginx.fullname" . }}-custom-add-headers +{{- end }} +{{- if or .Values.controller.proxySetHeaders .Values.controller.headers }} + proxy-set-headers: {{ .Release.Namespace }}/{{ include "ingress-nginx.fullname" . }}-custom-proxy-headers +{{- end }} +{{- if .Values.dhParam }} + ssl-dh-param: {{ printf "%s/%s" .Release.Namespace (include "ingress-nginx.controller.fullname" .) }} +{{- end }} +{{- range $key, $value := .Values.controller.config }} + {{- $key | nindent 2 }}: {{ $value | quote }} +{{- end }} + diff --git a/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/controller-daemonset.yaml b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/controller-daemonset.yaml new file mode 100644 index 0000000..80c268f --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/controller-daemonset.yaml @@ -0,0 +1,223 @@ +{{- if or (eq .Values.controller.kind "DaemonSet") (eq .Values.controller.kind "Both") -}} +{{- include "isControllerTagValid" . -}} +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.controller.fullname" . }} + namespace: {{ .Release.Namespace }} + {{- if .Values.controller.annotations }} + annotations: {{ toYaml .Values.controller.annotations | nindent 4 }} + {{- end }} +spec: + selector: + matchLabels: + {{- include "ingress-nginx.selectorLabels" . | nindent 6 }} + app.kubernetes.io/component: controller + revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} + {{- if .Values.controller.updateStrategy }} + updateStrategy: {{ toYaml .Values.controller.updateStrategy | nindent 4 }} + {{- end }} + minReadySeconds: {{ .Values.controller.minReadySeconds }} + template: + metadata: + {{- if .Values.controller.podAnnotations }} + annotations: + {{- range $key, $value := .Values.controller.podAnnotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} + {{- end }} + labels: + {{- include "ingress-nginx.selectorLabels" . | nindent 8 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if .Values.controller.podLabels }} + {{- toYaml .Values.controller.podLabels | nindent 8 }} + {{- end }} + spec: + {{- if .Values.controller.dnsConfig }} + dnsConfig: {{ toYaml .Values.controller.dnsConfig | nindent 8 }} + {{- end }} + {{- if .Values.controller.hostname }} + hostname: {{ toYaml .Values.controller.hostname | nindent 8 }} + {{- end }} + dnsPolicy: {{ .Values.controller.dnsPolicy }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: {{ toYaml .Values.imagePullSecrets | nindent 8 }} + {{- end }} + {{- if .Values.controller.priorityClassName }} + priorityClassName: {{ .Values.controller.priorityClassName }} + {{- end }} + {{- if or .Values.controller.podSecurityContext .Values.controller.sysctls }} + securityContext: + {{- end }} + {{- if .Values.controller.podSecurityContext }} + {{- toYaml .Values.controller.podSecurityContext | nindent 8 }} + {{- end }} + {{- if .Values.controller.sysctls }} + sysctls: + {{- range $sysctl, $value := .Values.controller.sysctls }} + - name: {{ $sysctl | quote }} + value: {{ $value | quote }} + {{- end }} + {{- end }} + {{- if .Values.controller.shareProcessNamespace }} + shareProcessNamespace: {{ .Values.controller.shareProcessNamespace }} + {{- end }} + containers: + - name: {{ .Values.controller.containerName }} + {{- with .Values.controller.image }} + image: "{{- if .repository -}}{{ .repository }}{{ else }}{{ .registry }}/{{ include "ingress-nginx.image" . }}{{- end -}}:{{ .tag }}{{ include "ingress-nginx.imageDigest" . }}" + {{- end }} + imagePullPolicy: {{ .Values.controller.image.pullPolicy }} + {{- if .Values.controller.lifecycle }} + lifecycle: {{ toYaml .Values.controller.lifecycle | nindent 12 }} + {{- end }} + args: + {{- include "ingress-nginx.params" . | nindent 12 }} + securityContext: {{ include "controller.containerSecurityContext" . | nindent 12 }} + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + {{- if .Values.controller.enableMimalloc }} + - name: LD_PRELOAD + value: /usr/local/lib/libmimalloc.so + {{- end }} + {{- if .Values.controller.extraEnvs }} + {{- toYaml .Values.controller.extraEnvs | nindent 12 }} + {{- end }} + {{- if .Values.controller.startupProbe }} + startupProbe: {{ toYaml .Values.controller.startupProbe | nindent 12 }} + {{- end }} + livenessProbe: {{ toYaml .Values.controller.livenessProbe | nindent 12 }} + readinessProbe: {{ toYaml .Values.controller.readinessProbe | nindent 12 }} + ports: + {{- range $key, $value := .Values.controller.containerPort }} + - name: {{ $key }} + containerPort: {{ $value }} + protocol: TCP + {{- if $.Values.controller.hostPort.enabled }} + hostPort: {{ index $.Values.controller.hostPort.ports $key | default $value }} + {{- end }} + {{- end }} + {{- if .Values.controller.metrics.enabled }} + - name: http-metrics + containerPort: {{ .Values.controller.metrics.port }} + protocol: TCP + {{- end }} + {{- if .Values.controller.admissionWebhooks.enabled }} + - name: webhook + containerPort: {{ .Values.controller.admissionWebhooks.port }} + protocol: TCP + {{- end }} + {{- range $key, $value := .Values.tcp }} + - name: {{ if $.Values.portNamePrefix }}{{ $.Values.portNamePrefix }}-{{ end }}{{ $key }}-tcp + containerPort: {{ $key }} + protocol: TCP + {{- if $.Values.controller.hostPort.enabled }} + hostPort: {{ $key }} + {{- end }} + {{- end }} + {{- range $key, $value := .Values.udp }} + - name: {{ if $.Values.portNamePrefix }}{{ $.Values.portNamePrefix }}-{{ end }}{{ $key }}-udp + containerPort: {{ $key }} + protocol: UDP + {{- if $.Values.controller.hostPort.enabled }} + hostPort: {{ $key }} + {{- end }} + {{- end }} + {{- if (or .Values.controller.customTemplate.configMapName .Values.controller.extraVolumeMounts .Values.controller.admissionWebhooks.enabled .Values.controller.extraModules) }} + volumeMounts: + {{- if .Values.controller.extraModules }} + - name: modules + mountPath: /modules_mount + {{- end }} + {{- if .Values.controller.customTemplate.configMapName }} + - mountPath: /etc/nginx/template + name: nginx-template-volume + readOnly: true + {{- end }} + {{- if .Values.controller.admissionWebhooks.enabled }} + - name: webhook-cert + mountPath: /usr/local/certificates/ + readOnly: true + {{- end }} + {{- if .Values.controller.extraVolumeMounts }} + {{- toYaml .Values.controller.extraVolumeMounts | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.controller.resources }} + resources: {{ toYaml .Values.controller.resources | nindent 12 }} + {{- end }} + {{- if .Values.controller.extraContainers }} + {{ toYaml .Values.controller.extraContainers | nindent 8 }} + {{- end }} + + + {{- if (or .Values.controller.extraInitContainers .Values.controller.extraModules) }} + initContainers: + {{- if .Values.controller.extraInitContainers }} + {{ toYaml .Values.controller.extraInitContainers | nindent 8 }} + {{- end }} + {{- if .Values.controller.extraModules }} + {{- range .Values.controller.extraModules }} + - name: {{ .Name }} + image: {{ .Image }} + command: ['sh', '-c', '/usr/local/bin/init_module.sh'] + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.controller.hostNetwork }} + hostNetwork: {{ .Values.controller.hostNetwork }} + {{- end }} + {{- if .Values.controller.nodeSelector }} + nodeSelector: {{ toYaml .Values.controller.nodeSelector | nindent 8 }} + {{- end }} + {{- if .Values.controller.tolerations }} + tolerations: {{ toYaml .Values.controller.tolerations | nindent 8 }} + {{- end }} + {{- if .Values.controller.affinity }} + affinity: {{ toYaml .Values.controller.affinity | nindent 8 }} + {{- end }} + {{- if .Values.controller.topologySpreadConstraints }} + topologySpreadConstraints: {{ toYaml .Values.controller.topologySpreadConstraints | nindent 8 }} + {{- end }} + serviceAccountName: {{ template "ingress-nginx.serviceAccountName" . }} + terminationGracePeriodSeconds: {{ .Values.controller.terminationGracePeriodSeconds }} + {{- if (or .Values.controller.customTemplate.configMapName .Values.controller.extraVolumeMounts .Values.controller.admissionWebhooks.enabled .Values.controller.extraVolumes .Values.controller.extraModules) }} + volumes: + {{- if .Values.controller.extraModules }} + - name: modules + emptyDir: {} + {{- end }} + {{- if .Values.controller.customTemplate.configMapName }} + - name: nginx-template-volume + configMap: + name: {{ .Values.controller.customTemplate.configMapName }} + items: + - key: {{ .Values.controller.customTemplate.configMapKey }} + path: nginx.tmpl + {{- end }} + {{- if .Values.controller.admissionWebhooks.enabled }} + - name: webhook-cert + secret: + secretName: {{ include "ingress-nginx.fullname" . }}-admission + {{- end }} + {{- if .Values.controller.extraVolumes }} + {{ toYaml .Values.controller.extraVolumes | nindent 8 }} + {{- end }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/controller-deployment.yaml b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/controller-deployment.yaml new file mode 100644 index 0000000..5ad1867 --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/controller-deployment.yaml @@ -0,0 +1,228 @@ +{{- if or (eq .Values.controller.kind "Deployment") (eq .Values.controller.kind "Both") -}} +{{- include "isControllerTagValid" . -}} +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.controller.fullname" . }} + namespace: {{ .Release.Namespace }} + {{- if .Values.controller.annotations }} + annotations: {{ toYaml .Values.controller.annotations | nindent 4 }} + {{- end }} +spec: + selector: + matchLabels: + {{- include "ingress-nginx.selectorLabels" . | nindent 6 }} + app.kubernetes.io/component: controller + {{- if not .Values.controller.autoscaling.enabled }} + replicas: {{ .Values.controller.replicaCount }} + {{- end }} + revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} + {{- if .Values.controller.updateStrategy }} + strategy: + {{ toYaml .Values.controller.updateStrategy | nindent 4 }} + {{- end }} + minReadySeconds: {{ .Values.controller.minReadySeconds }} + template: + metadata: + {{- if .Values.controller.podAnnotations }} + annotations: + {{- range $key, $value := .Values.controller.podAnnotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} + {{- end }} + labels: + {{- include "ingress-nginx.selectorLabels" . | nindent 8 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if .Values.controller.podLabels }} + {{- toYaml .Values.controller.podLabels | nindent 8 }} + {{- end }} + spec: + {{- if .Values.controller.dnsConfig }} + dnsConfig: {{ toYaml .Values.controller.dnsConfig | nindent 8 }} + {{- end }} + {{- if .Values.controller.hostname }} + hostname: {{ toYaml .Values.controller.hostname | nindent 8 }} + {{- end }} + dnsPolicy: {{ .Values.controller.dnsPolicy }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: {{ toYaml .Values.imagePullSecrets | nindent 8 }} + {{- end }} + {{- if .Values.controller.priorityClassName }} + priorityClassName: {{ .Values.controller.priorityClassName | quote }} + {{- end }} + {{- if or .Values.controller.podSecurityContext .Values.controller.sysctls }} + securityContext: + {{- end }} + {{- if .Values.controller.podSecurityContext }} + {{- toYaml .Values.controller.podSecurityContext | nindent 8 }} + {{- end }} + {{- if .Values.controller.sysctls }} + sysctls: + {{- range $sysctl, $value := .Values.controller.sysctls }} + - name: {{ $sysctl | quote }} + value: {{ $value | quote }} + {{- end }} + {{- end }} + {{- if .Values.controller.shareProcessNamespace }} + shareProcessNamespace: {{ .Values.controller.shareProcessNamespace }} + {{- end }} + containers: + - name: {{ .Values.controller.containerName }} + {{- with .Values.controller.image }} + image: "{{- if .repository -}}{{ .repository }}{{ else }}{{ .registry }}/{{ include "ingress-nginx.image" . }}{{- end -}}:{{ .tag }}{{ include "ingress-nginx.imageDigest" . }}" + {{- end }} + imagePullPolicy: {{ .Values.controller.image.pullPolicy }} + {{- if .Values.controller.lifecycle }} + lifecycle: {{ toYaml .Values.controller.lifecycle | nindent 12 }} + {{- end }} + args: + {{- include "ingress-nginx.params" . | nindent 12 }} + securityContext: {{ include "controller.containerSecurityContext" . | nindent 12 }} + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + {{- if .Values.controller.enableMimalloc }} + - name: LD_PRELOAD + value: /usr/local/lib/libmimalloc.so + {{- end }} + {{- if .Values.controller.extraEnvs }} + {{- toYaml .Values.controller.extraEnvs | nindent 12 }} + {{- end }} + {{- if .Values.controller.startupProbe }} + startupProbe: {{ toYaml .Values.controller.startupProbe | nindent 12 }} + {{- end }} + livenessProbe: {{ toYaml .Values.controller.livenessProbe | nindent 12 }} + readinessProbe: {{ toYaml .Values.controller.readinessProbe | nindent 12 }} + ports: + {{- range $key, $value := .Values.controller.containerPort }} + - name: {{ $key }} + containerPort: {{ $value }} + protocol: TCP + {{- if $.Values.controller.hostPort.enabled }} + hostPort: {{ index $.Values.controller.hostPort.ports $key | default $value }} + {{- end }} + {{- end }} + {{- if .Values.controller.metrics.enabled }} + - name: http-metrics + containerPort: {{ .Values.controller.metrics.port }} + protocol: TCP + {{- end }} + {{- if .Values.controller.admissionWebhooks.enabled }} + - name: webhook + containerPort: {{ .Values.controller.admissionWebhooks.port }} + protocol: TCP + {{- end }} + {{- range $key, $value := .Values.tcp }} + - name: {{ if $.Values.portNamePrefix }}{{ $.Values.portNamePrefix }}-{{ end }}{{ $key }}-tcp + containerPort: {{ $key }} + protocol: TCP + {{- if $.Values.controller.hostPort.enabled }} + hostPort: {{ $key }} + {{- end }} + {{- end }} + {{- range $key, $value := .Values.udp }} + - name: {{ if $.Values.portNamePrefix }}{{ $.Values.portNamePrefix }}-{{ end }}{{ $key }}-udp + containerPort: {{ $key }} + protocol: UDP + {{- if $.Values.controller.hostPort.enabled }} + hostPort: {{ $key }} + {{- end }} + {{- end }} + {{- if (or .Values.controller.customTemplate.configMapName .Values.controller.extraVolumeMounts .Values.controller.admissionWebhooks.enabled .Values.controller.extraModules) }} + volumeMounts: + {{- if .Values.controller.extraModules }} + - name: modules + mountPath: /modules_mount + {{- end }} + {{- if .Values.controller.customTemplate.configMapName }} + - mountPath: /etc/nginx/template + name: nginx-template-volume + readOnly: true + {{- end }} + {{- if .Values.controller.admissionWebhooks.enabled }} + - name: webhook-cert + mountPath: /usr/local/certificates/ + readOnly: true + {{- end }} + {{- if .Values.controller.extraVolumeMounts }} + {{- toYaml .Values.controller.extraVolumeMounts | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.controller.resources }} + resources: {{ toYaml .Values.controller.resources | nindent 12 }} + {{- end }} + {{- if .Values.controller.extraContainers }} + {{ toYaml .Values.controller.extraContainers | nindent 8 }} + {{- end }} + {{- if (or .Values.controller.extraInitContainers .Values.controller.extraModules) }} + initContainers: + {{- if .Values.controller.extraInitContainers }} + {{ toYaml .Values.controller.extraInitContainers | nindent 8 }} + {{- end }} + {{- if .Values.controller.extraModules }} + {{- range .Values.controller.extraModules }} + - name: {{ .name }} + image: {{ .image }} + command: ['sh', '-c', '/usr/local/bin/init_module.sh'] + volumeMounts: + - name: modules + mountPath: /modules_mount + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.controller.hostNetwork }} + hostNetwork: {{ .Values.controller.hostNetwork }} + {{- end }} + {{- if .Values.controller.nodeSelector }} + nodeSelector: {{ toYaml .Values.controller.nodeSelector | nindent 8 }} + {{- end }} + {{- if .Values.controller.tolerations }} + tolerations: {{ toYaml .Values.controller.tolerations | nindent 8 }} + {{- end }} + {{- if .Values.controller.affinity }} + affinity: {{ toYaml .Values.controller.affinity | nindent 8 }} + {{- end }} + {{- if .Values.controller.topologySpreadConstraints }} + topologySpreadConstraints: {{ toYaml .Values.controller.topologySpreadConstraints | nindent 8 }} + {{- end }} + serviceAccountName: {{ template "ingress-nginx.serviceAccountName" . }} + terminationGracePeriodSeconds: {{ .Values.controller.terminationGracePeriodSeconds }} + {{- if (or .Values.controller.customTemplate.configMapName .Values.controller.extraVolumeMounts .Values.controller.admissionWebhooks.enabled .Values.controller.extraVolumes .Values.controller.extraModules) }} + volumes: + {{- if .Values.controller.extraModules }} + - name: modules + emptyDir: {} + {{- end }} + {{- if .Values.controller.customTemplate.configMapName }} + - name: nginx-template-volume + configMap: + name: {{ .Values.controller.customTemplate.configMapName }} + items: + - key: {{ .Values.controller.customTemplate.configMapKey }} + path: nginx.tmpl + {{- end }} + {{- if .Values.controller.admissionWebhooks.enabled }} + - name: webhook-cert + secret: + secretName: {{ include "ingress-nginx.fullname" . }}-admission + {{- end }} + {{- if .Values.controller.extraVolumes }} + {{ toYaml .Values.controller.extraVolumes | nindent 8 }} + {{- end }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/controller-hpa.yaml b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/controller-hpa.yaml new file mode 100644 index 0000000..e0979f1 --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/controller-hpa.yaml @@ -0,0 +1,52 @@ +{{- if and .Values.controller.autoscaling.enabled (or (eq .Values.controller.kind "Deployment") (eq .Values.controller.kind "Both")) -}} +{{- if not .Values.controller.keda.enabled }} + +apiVersion: autoscaling/v2beta2 +kind: HorizontalPodAutoscaler +metadata: + annotations: + {{- with .Values.controller.autoscaling.annotations }} + {{- toYaml . | trimSuffix "\n" | nindent 4 }} + {{- end }} + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.controller.fullname" . }} + namespace: {{ .Release.Namespace }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "ingress-nginx.controller.fullname" . }} + minReplicas: {{ .Values.controller.autoscaling.minReplicas }} + maxReplicas: {{ .Values.controller.autoscaling.maxReplicas }} + metrics: + {{- with .Values.controller.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: {{ . }} + {{- end }} + {{- with .Values.controller.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ . }} + {{- end }} + {{- with .Values.controller.autoscalingTemplate }} + {{- toYaml . | nindent 2 }} + {{- end }} + {{- with .Values.controller.autoscaling.behavior }} + behavior: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} +{{- end }} + diff --git a/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/controller-ingressclass.yaml b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/controller-ingressclass.yaml new file mode 100644 index 0000000..9492784 --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/controller-ingressclass.yaml @@ -0,0 +1,21 @@ +{{- if .Values.controller.ingressClassResource.enabled -}} +# We don't support namespaced ingressClass yet +# So a ClusterRole and a ClusterRoleBinding is required +apiVersion: networking.k8s.io/v1 +kind: IngressClass +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ .Values.controller.ingressClassResource.name }} +{{- if .Values.controller.ingressClassResource.default }} + annotations: + ingressclass.kubernetes.io/is-default-class: "true" +{{- end }} +spec: + controller: {{ .Values.controller.ingressClassResource.controllerValue }} + {{ template "ingressClass.parameters" . }} +{{- end }} diff --git a/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/controller-keda.yaml b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/controller-keda.yaml new file mode 100644 index 0000000..875157e --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/controller-keda.yaml @@ -0,0 +1,42 @@ +{{- if and .Values.controller.keda.enabled (or (eq .Values.controller.kind "Deployment") (eq .Values.controller.kind "Both")) -}} +# https://keda.sh/docs/ + +apiVersion: {{ .Values.controller.keda.apiVersion }} +kind: ScaledObject +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.controller.fullname" . }} + {{- if .Values.controller.keda.scaledObject.annotations }} + annotations: {{ toYaml .Values.controller.keda.scaledObject.annotations | nindent 4 }} + {{- end }} +spec: + scaleTargetRef: +{{- if eq .Values.controller.keda.apiVersion "keda.k8s.io/v1alpha1" }} + deploymentName: {{ include "ingress-nginx.controller.fullname" . }} +{{- else if eq .Values.controller.keda.apiVersion "keda.sh/v1alpha1" }} + name: {{ include "ingress-nginx.controller.fullname" . }} +{{- end }} + pollingInterval: {{ .Values.controller.keda.pollingInterval }} + cooldownPeriod: {{ .Values.controller.keda.cooldownPeriod }} + minReplicaCount: {{ .Values.controller.keda.minReplicas }} + maxReplicaCount: {{ .Values.controller.keda.maxReplicas }} + triggers: +{{- with .Values.controller.keda.triggers }} +{{ toYaml . | indent 2 }} +{{ end }} + advanced: + restoreToOriginalReplicaCount: {{ .Values.controller.keda.restoreToOriginalReplicaCount }} +{{- if .Values.controller.keda.behavior }} + horizontalPodAutoscalerConfig: + behavior: +{{ with .Values.controller.keda.behavior -}} +{{ toYaml . | indent 8 }} +{{ end }} + +{{- end }} +{{- end }} diff --git a/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/controller-poddisruptionbudget.yaml b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/controller-poddisruptionbudget.yaml new file mode 100644 index 0000000..8dfbe98 --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/controller-poddisruptionbudget.yaml @@ -0,0 +1,19 @@ +{{- if or (and .Values.controller.autoscaling.enabled (gt (.Values.controller.autoscaling.minReplicas | int) 1)) (and (not .Values.controller.autoscaling.enabled) (gt (.Values.controller.replicaCount | int) 1)) }} +apiVersion: {{ ternary "policy/v1" "policy/v1beta1" (semverCompare ">=1.21.0-0" .Capabilities.KubeVersion.Version) }} +kind: PodDisruptionBudget +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.controller.fullname" . }} + namespace: {{ .Release.Namespace }} +spec: + selector: + matchLabels: + {{- include "ingress-nginx.selectorLabels" . | nindent 6 }} + app.kubernetes.io/component: controller + minAvailable: {{ .Values.controller.minAvailable }} +{{- end }} diff --git a/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/controller-prometheusrules.yaml b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/controller-prometheusrules.yaml new file mode 100644 index 0000000..78b5362 --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/controller-prometheusrules.yaml @@ -0,0 +1,21 @@ +{{- if and ( .Values.controller.metrics.enabled ) ( .Values.controller.metrics.prometheusRule.enabled ) ( .Capabilities.APIVersions.Has "monitoring.coreos.com/v1" ) -}} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ include "ingress-nginx.controller.fullname" . }} +{{- if .Values.controller.metrics.prometheusRule.namespace }} + namespace: {{ .Values.controller.metrics.prometheusRule.namespace | quote }} +{{- end }} + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- if .Values.controller.metrics.prometheusRule.additionalLabels }} + {{- toYaml .Values.controller.metrics.prometheusRule.additionalLabels | nindent 4 }} + {{- end }} +spec: +{{- if .Values.controller.metrics.prometheusRule.rules }} + groups: + - name: {{ template "ingress-nginx.name" . }} + rules: {{- toYaml .Values.controller.metrics.prometheusRule.rules | nindent 4 }} +{{- end }} +{{- end }} diff --git a/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/controller-psp.yaml b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/controller-psp.yaml new file mode 100644 index 0000000..2e0499c --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/controller-psp.yaml @@ -0,0 +1,94 @@ +{{- if (semverCompare "<1.25.0-0" .Capabilities.KubeVersion.Version) }} +{{- if and .Values.podSecurityPolicy.enabled (empty .Values.controller.existingPsp) -}} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ include "ingress-nginx.fullname" . }} + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + allowedCapabilities: + - NET_BIND_SERVICE + {{- if .Values.controller.image.chroot }} + - SYS_CHROOT + {{- end }} +{{- if .Values.controller.sysctls }} + allowedUnsafeSysctls: + {{- range $sysctl, $value := .Values.controller.sysctls }} + - {{ $sysctl }} + {{- end }} +{{- end }} + privileged: false + allowPrivilegeEscalation: true + # Allow core volume types. + volumes: + - 'configMap' + - 'emptyDir' + #- 'projected' + - 'secret' + #- 'downwardAPI' +{{- if .Values.controller.hostNetwork }} + hostNetwork: {{ .Values.controller.hostNetwork }} +{{- end }} +{{- if or .Values.controller.hostNetwork .Values.controller.hostPort.enabled }} + hostPorts: +{{- if .Values.controller.hostNetwork }} +{{- range $key, $value := .Values.controller.containerPort }} + # {{ $key }} + - min: {{ $value }} + max: {{ $value }} +{{- end }} +{{- else if .Values.controller.hostPort.enabled }} +{{- range $key, $value := .Values.controller.hostPort.ports }} + # {{ $key }} + - min: {{ $value }} + max: {{ $value }} +{{- end }} +{{- end }} +{{- if .Values.controller.metrics.enabled }} + # metrics + - min: {{ .Values.controller.metrics.port }} + max: {{ .Values.controller.metrics.port }} +{{- end }} +{{- if .Values.controller.admissionWebhooks.enabled }} + # admission webhooks + - min: {{ .Values.controller.admissionWebhooks.port }} + max: {{ .Values.controller.admissionWebhooks.port }} +{{- end }} +{{- range $key, $value := .Values.tcp }} + # {{ $key }}-tcp + - min: {{ $key }} + max: {{ $key }} +{{- end }} +{{- range $key, $value := .Values.udp }} + # {{ $key }}-udp + - min: {{ $key }} + max: {{ $key }} +{{- end }} +{{- end }} + hostIPC: false + hostPID: false + runAsUser: + # Require the container to run without root privileges. + rule: 'MustRunAsNonRoot' + supplementalGroups: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + readOnlyRootFilesystem: false + seLinux: + rule: 'RunAsAny' +{{- end }} +{{- end }} diff --git a/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/controller-role.yaml b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/controller-role.yaml new file mode 100644 index 0000000..330be8c --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/controller-role.yaml @@ -0,0 +1,113 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.fullname" . }} + namespace: {{ .Release.Namespace }} +rules: + - apiGroups: + - "" + resources: + - namespaces + verbs: + - get + - apiGroups: + - "" + resources: + - configmaps + - pods + - secrets + - endpoints + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - watch + - apiGroups: + - networking.k8s.io + resources: + - ingresses/status + verbs: + - update + - apiGroups: + - networking.k8s.io + resources: + - ingressclasses + verbs: + - get + - list + - watch + # TODO(Jintao Zhang) + # Once we release a new version of the controller, + # we will be able to remove the configmap related permissions + # We have used the Lease API for selection + # ref: https://github.com/kubernetes/ingress-nginx/pull/8921 + - apiGroups: + - "" + resources: + - configmaps + resourceNames: + - {{ .Values.controller.electionID }} + verbs: + - get + - update + - apiGroups: + - "" + resources: + - configmaps + verbs: + - create + - apiGroups: + - coordination.k8s.io + resources: + - leases + resourceNames: + - {{ .Values.controller.electionID }} + verbs: + - get + - update + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +{{- if .Values.podSecurityPolicy.enabled }} + - apiGroups: [{{ template "podSecurityPolicy.apiGroup" . }}] + resources: ['podsecuritypolicies'] + verbs: ['use'] + {{- with .Values.controller.existingPsp }} + resourceNames: [{{ . }}] + {{- else }} + resourceNames: [{{ include "ingress-nginx.fullname" . }}] + {{- end }} +{{- end }} +{{- end }} diff --git a/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/controller-rolebinding.yaml b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/controller-rolebinding.yaml new file mode 100644 index 0000000..e846a11 --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/controller-rolebinding.yaml @@ -0,0 +1,21 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.fullname" . }} + namespace: {{ .Release.Namespace }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ include "ingress-nginx.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ template "ingress-nginx.serviceAccountName" . }} + namespace: {{ .Release.Namespace | quote }} +{{- end }} diff --git a/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/controller-service-internal.yaml b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/controller-service-internal.yaml new file mode 100644 index 0000000..aae3e15 --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/controller-service-internal.yaml @@ -0,0 +1,79 @@ +{{- if and .Values.controller.service.enabled .Values.controller.service.internal.enabled .Values.controller.service.internal.annotations}} +apiVersion: v1 +kind: Service +metadata: + annotations: + {{- range $key, $value := .Values.controller.service.internal.annotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- if .Values.controller.service.labels }} + {{- toYaml .Values.controller.service.labels | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.controller.fullname" . }}-internal + namespace: {{ .Release.Namespace }} +spec: + type: "{{ .Values.controller.service.type }}" +{{- if .Values.controller.service.internal.loadBalancerIP }} + loadBalancerIP: {{ .Values.controller.service.internal.loadBalancerIP }} +{{- end }} +{{- if .Values.controller.service.internal.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{ toYaml .Values.controller.service.internal.loadBalancerSourceRanges | nindent 4 }} +{{- end }} +{{- if .Values.controller.service.internal.externalTrafficPolicy }} + externalTrafficPolicy: {{ .Values.controller.service.internal.externalTrafficPolicy }} +{{- end }} + ports: + {{- $setNodePorts := (or (eq .Values.controller.service.type "NodePort") (eq .Values.controller.service.type "LoadBalancer")) }} + {{- if .Values.controller.service.enableHttp }} + - name: http + port: {{ .Values.controller.service.ports.http }} + protocol: TCP + targetPort: {{ .Values.controller.service.targetPorts.http }} + {{- if semverCompare ">=1.20" .Capabilities.KubeVersion.Version }} + appProtocol: http + {{- end }} + {{- if (and $setNodePorts (not (empty .Values.controller.service.nodePorts.http))) }} + nodePort: {{ .Values.controller.service.nodePorts.http }} + {{- end }} + {{- end }} + {{- if .Values.controller.service.enableHttps }} + - name: https + port: {{ .Values.controller.service.ports.https }} + protocol: TCP + targetPort: {{ .Values.controller.service.targetPorts.https }} + {{- if semverCompare ">=1.20" .Capabilities.KubeVersion.Version }} + appProtocol: https + {{- end }} + {{- if (and $setNodePorts (not (empty .Values.controller.service.nodePorts.https))) }} + nodePort: {{ .Values.controller.service.nodePorts.https }} + {{- end }} + {{- end }} + {{- range $key, $value := .Values.tcp }} + - name: {{ if $.Values.portNamePrefix }}{{ $.Values.portNamePrefix }}-{{ end }}{{ $key }}-tcp + port: {{ $key }} + protocol: TCP + targetPort: {{ if $.Values.portNamePrefix }}{{ $.Values.portNamePrefix }}-{{ end }}{{ $key }}-tcp + {{- if $.Values.controller.service.nodePorts.tcp }} + {{- if index $.Values.controller.service.nodePorts.tcp $key }} + nodePort: {{ index $.Values.controller.service.nodePorts.tcp $key }} + {{- end }} + {{- end }} + {{- end }} + {{- range $key, $value := .Values.udp }} + - name: {{ if $.Values.portNamePrefix }}{{ $.Values.portNamePrefix }}-{{ end }}{{ $key }}-udp + port: {{ $key }} + protocol: UDP + targetPort: {{ if $.Values.portNamePrefix }}{{ $.Values.portNamePrefix }}-{{ end }}{{ $key }}-udp + {{- if $.Values.controller.service.nodePorts.udp }} + {{- if index $.Values.controller.service.nodePorts.udp $key }} + nodePort: {{ index $.Values.controller.service.nodePorts.udp $key }} + {{- end }} + {{- end }} + {{- end }} + selector: + {{- include "ingress-nginx.selectorLabels" . | nindent 4 }} + app.kubernetes.io/component: controller +{{- end }} diff --git a/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/controller-service-metrics.yaml b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/controller-service-metrics.yaml new file mode 100644 index 0000000..1c1d5bd --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/controller-service-metrics.yaml @@ -0,0 +1,45 @@ +{{- if .Values.controller.metrics.enabled -}} +apiVersion: v1 +kind: Service +metadata: +{{- if .Values.controller.metrics.service.annotations }} + annotations: {{ toYaml .Values.controller.metrics.service.annotations | nindent 4 }} +{{- end }} + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- if .Values.controller.metrics.service.labels }} + {{- toYaml .Values.controller.metrics.service.labels | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.controller.fullname" . }}-metrics + namespace: {{ .Release.Namespace }} +spec: + type: {{ .Values.controller.metrics.service.type }} +{{- if .Values.controller.metrics.service.clusterIP }} + clusterIP: {{ .Values.controller.metrics.service.clusterIP }} +{{- end }} +{{- if .Values.controller.metrics.service.externalIPs }} + externalIPs: {{ toYaml .Values.controller.metrics.service.externalIPs | nindent 4 }} +{{- end }} +{{- if .Values.controller.metrics.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.controller.metrics.service.loadBalancerIP }} +{{- end }} +{{- if .Values.controller.metrics.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{ toYaml .Values.controller.metrics.service.loadBalancerSourceRanges | nindent 4 }} +{{- end }} +{{- if .Values.controller.metrics.service.externalTrafficPolicy }} + externalTrafficPolicy: {{ .Values.controller.metrics.service.externalTrafficPolicy }} +{{- end }} + ports: + - name: http-metrics + port: {{ .Values.controller.metrics.service.servicePort }} + protocol: TCP + targetPort: http-metrics + {{- $setNodePorts := (or (eq .Values.controller.metrics.service.type "NodePort") (eq .Values.controller.metrics.service.type "LoadBalancer")) }} + {{- if (and $setNodePorts (not (empty .Values.controller.metrics.service.nodePort))) }} + nodePort: {{ .Values.controller.metrics.service.nodePort }} + {{- end }} + selector: + {{- include "ingress-nginx.selectorLabels" . | nindent 4 }} + app.kubernetes.io/component: controller +{{- end }} diff --git a/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/controller-service-webhook.yaml b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/controller-service-webhook.yaml new file mode 100644 index 0000000..2aae24f --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/controller-service-webhook.yaml @@ -0,0 +1,40 @@ +{{- if .Values.controller.admissionWebhooks.enabled -}} +apiVersion: v1 +kind: Service +metadata: +{{- if .Values.controller.admissionWebhooks.service.annotations }} + annotations: {{ toYaml .Values.controller.admissionWebhooks.service.annotations | nindent 4 }} +{{- end }} + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.controller.fullname" . }}-admission + namespace: {{ .Release.Namespace }} +spec: + type: {{ .Values.controller.admissionWebhooks.service.type }} +{{- if .Values.controller.admissionWebhooks.service.clusterIP }} + clusterIP: {{ .Values.controller.admissionWebhooks.service.clusterIP }} +{{- end }} +{{- if .Values.controller.admissionWebhooks.service.externalIPs }} + externalIPs: {{ toYaml .Values.controller.admissionWebhooks.service.externalIPs | nindent 4 }} +{{- end }} +{{- if .Values.controller.admissionWebhooks.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.controller.admissionWebhooks.service.loadBalancerIP }} +{{- end }} +{{- if .Values.controller.admissionWebhooks.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{ toYaml .Values.controller.admissionWebhooks.service.loadBalancerSourceRanges | nindent 4 }} +{{- end }} + ports: + - name: https-webhook + port: 443 + targetPort: webhook + {{- if semverCompare ">=1.20" .Capabilities.KubeVersion.Version }} + appProtocol: https + {{- end }} + selector: + {{- include "ingress-nginx.selectorLabels" . | nindent 4 }} + app.kubernetes.io/component: controller +{{- end }} diff --git a/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/controller-service.yaml b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/controller-service.yaml new file mode 100644 index 0000000..2b28196 --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/controller-service.yaml @@ -0,0 +1,101 @@ +{{- if and .Values.controller.service.enabled .Values.controller.service.external.enabled -}} +apiVersion: v1 +kind: Service +metadata: + annotations: + {{- range $key, $value := .Values.controller.service.annotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- if .Values.controller.service.labels }} + {{- toYaml .Values.controller.service.labels | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.controller.fullname" . }} + namespace: {{ .Release.Namespace }} +spec: + type: {{ .Values.controller.service.type }} +{{- if .Values.controller.service.clusterIP }} + clusterIP: {{ .Values.controller.service.clusterIP }} +{{- end }} +{{- if .Values.controller.service.externalIPs }} + externalIPs: {{ toYaml .Values.controller.service.externalIPs | nindent 4 }} +{{- end }} +{{- if .Values.controller.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.controller.service.loadBalancerIP }} +{{- end }} +{{- if .Values.controller.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{ toYaml .Values.controller.service.loadBalancerSourceRanges | nindent 4 }} +{{- end }} +{{- if .Values.controller.service.externalTrafficPolicy }} + externalTrafficPolicy: {{ .Values.controller.service.externalTrafficPolicy }} +{{- end }} +{{- if .Values.controller.service.sessionAffinity }} + sessionAffinity: {{ .Values.controller.service.sessionAffinity }} +{{- end }} +{{- if .Values.controller.service.healthCheckNodePort }} + healthCheckNodePort: {{ .Values.controller.service.healthCheckNodePort }} +{{- end }} +{{- if semverCompare ">=1.21.0-0" .Capabilities.KubeVersion.Version -}} +{{- if .Values.controller.service.ipFamilyPolicy }} + ipFamilyPolicy: {{ .Values.controller.service.ipFamilyPolicy }} +{{- end }} +{{- end }} +{{- if semverCompare ">=1.21.0-0" .Capabilities.KubeVersion.Version -}} +{{- if .Values.controller.service.ipFamilies }} + ipFamilies: {{ toYaml .Values.controller.service.ipFamilies | nindent 4 }} +{{- end }} +{{- end }} + ports: + {{- $setNodePorts := (or (eq .Values.controller.service.type "NodePort") (eq .Values.controller.service.type "LoadBalancer")) }} + {{- if .Values.controller.service.enableHttp }} + - name: http + port: {{ .Values.controller.service.ports.http }} + protocol: TCP + targetPort: {{ .Values.controller.service.targetPorts.http }} + {{- if and (semverCompare ">=1.20" .Capabilities.KubeVersion.Version) (.Values.controller.service.appProtocol) }} + appProtocol: http + {{- end }} + {{- if (and $setNodePorts (not (empty .Values.controller.service.nodePorts.http))) }} + nodePort: {{ .Values.controller.service.nodePorts.http }} + {{- end }} + {{- end }} + {{- if .Values.controller.service.enableHttps }} + - name: https + port: {{ .Values.controller.service.ports.https }} + protocol: TCP + targetPort: {{ .Values.controller.service.targetPorts.https }} + {{- if and (semverCompare ">=1.20" .Capabilities.KubeVersion.Version) (.Values.controller.service.appProtocol) }} + appProtocol: https + {{- end }} + {{- if (and $setNodePorts (not (empty .Values.controller.service.nodePorts.https))) }} + nodePort: {{ .Values.controller.service.nodePorts.https }} + {{- end }} + {{- end }} + {{- range $key, $value := .Values.tcp }} + - name: {{ if $.Values.portNamePrefix }}{{ $.Values.portNamePrefix }}-{{ end }}{{ $key }}-tcp + port: {{ $key }} + protocol: TCP + targetPort: {{ if $.Values.portNamePrefix }}{{ $.Values.portNamePrefix }}-{{ end }}{{ $key }}-tcp + {{- if $.Values.controller.service.nodePorts.tcp }} + {{- if index $.Values.controller.service.nodePorts.tcp $key }} + nodePort: {{ index $.Values.controller.service.nodePorts.tcp $key }} + {{- end }} + {{- end }} + {{- end }} + {{- range $key, $value := .Values.udp }} + - name: {{ if $.Values.portNamePrefix }}{{ $.Values.portNamePrefix }}-{{ end }}{{ $key }}-udp + port: {{ $key }} + protocol: UDP + targetPort: {{ if $.Values.portNamePrefix }}{{ $.Values.portNamePrefix }}-{{ end }}{{ $key }}-udp + {{- if $.Values.controller.service.nodePorts.udp }} + {{- if index $.Values.controller.service.nodePorts.udp $key }} + nodePort: {{ index $.Values.controller.service.nodePorts.udp $key }} + {{- end }} + {{- end }} + {{- end }} + selector: + {{- include "ingress-nginx.selectorLabels" . | nindent 4 }} + app.kubernetes.io/component: controller +{{- end }} diff --git a/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/controller-serviceaccount.yaml b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/controller-serviceaccount.yaml new file mode 100644 index 0000000..824b2a1 --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/controller-serviceaccount.yaml @@ -0,0 +1,18 @@ +{{- if or .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ template "ingress-nginx.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} + {{- if .Values.serviceAccount.annotations }} + annotations: + {{ toYaml .Values.serviceAccount.annotations | indent 4 }} + {{- end }} +automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }} +{{- end }} diff --git a/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/controller-servicemonitor.yaml b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/controller-servicemonitor.yaml new file mode 100644 index 0000000..973d36b --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/controller-servicemonitor.yaml @@ -0,0 +1,48 @@ +{{- if and .Values.controller.metrics.enabled .Values.controller.metrics.serviceMonitor.enabled -}} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "ingress-nginx.controller.fullname" . }} +{{- if .Values.controller.metrics.serviceMonitor.namespace }} + namespace: {{ .Values.controller.metrics.serviceMonitor.namespace | quote }} +{{- end }} + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- if .Values.controller.metrics.serviceMonitor.additionalLabels }} + {{- toYaml .Values.controller.metrics.serviceMonitor.additionalLabels | nindent 4 }} + {{- end }} +spec: + endpoints: + - port: http-metrics + interval: {{ .Values.controller.metrics.serviceMonitor.scrapeInterval }} + {{- if .Values.controller.metrics.serviceMonitor.honorLabels }} + honorLabels: true + {{- end }} + {{- if .Values.controller.metrics.serviceMonitor.relabelings }} + relabelings: {{ toYaml .Values.controller.metrics.serviceMonitor.relabelings | nindent 8 }} + {{- end }} + {{- if .Values.controller.metrics.serviceMonitor.metricRelabelings }} + metricRelabelings: {{ toYaml .Values.controller.metrics.serviceMonitor.metricRelabelings | nindent 8 }} + {{- end }} +{{- if .Values.controller.metrics.serviceMonitor.jobLabel }} + jobLabel: {{ .Values.controller.metrics.serviceMonitor.jobLabel | quote }} +{{- end }} +{{- if .Values.controller.metrics.serviceMonitor.namespaceSelector }} + namespaceSelector: {{ toYaml .Values.controller.metrics.serviceMonitor.namespaceSelector | nindent 4 }} +{{- else }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} +{{- end }} +{{- if .Values.controller.metrics.serviceMonitor.targetLabels }} + targetLabels: + {{- range .Values.controller.metrics.serviceMonitor.targetLabels }} + - {{ . }} + {{- end }} +{{- end }} + selector: + matchLabels: + {{- include "ingress-nginx.selectorLabels" . | nindent 6 }} + app.kubernetes.io/component: controller +{{- end }} diff --git a/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/controller-wehbooks-networkpolicy.yaml b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/controller-wehbooks-networkpolicy.yaml new file mode 100644 index 0000000..f74c2fb --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/controller-wehbooks-networkpolicy.yaml @@ -0,0 +1,19 @@ +{{- if .Values.controller.admissionWebhooks.enabled }} +{{- if .Values.controller.admissionWebhooks.networkPolicyEnabled }} + +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: {{ include "ingress-nginx.fullname" . }}-webhooks-allow + namespace: {{ .Release.Namespace }} +spec: + ingress: + - {} + podSelector: + matchLabels: + app.kubernetes.io/name: {{ include "ingress-nginx.name" . }} + policyTypes: + - Ingress + +{{- end }} +{{- end }} diff --git a/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/default-backend-deployment.yaml b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/default-backend-deployment.yaml new file mode 100644 index 0000000..fd3e96e --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/default-backend-deployment.yaml @@ -0,0 +1,118 @@ +{{- if .Values.defaultBackend.enabled -}} +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: default-backend + {{- with .Values.defaultBackend.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.defaultBackend.fullname" . }} + namespace: {{ .Release.Namespace }} +spec: + selector: + matchLabels: + {{- include "ingress-nginx.selectorLabels" . | nindent 6 }} + app.kubernetes.io/component: default-backend +{{- if not .Values.defaultBackend.autoscaling.enabled }} + replicas: {{ .Values.defaultBackend.replicaCount }} +{{- end }} + revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} + template: + metadata: + {{- if .Values.defaultBackend.podAnnotations }} + annotations: {{ toYaml .Values.defaultBackend.podAnnotations | nindent 8 }} + {{- end }} + labels: + {{- include "ingress-nginx.selectorLabels" . | nindent 8 }} + app.kubernetes.io/component: default-backend + {{- with .Values.defaultBackend.labels }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if .Values.defaultBackend.podLabels }} + {{- toYaml .Values.defaultBackend.podLabels | nindent 8 }} + {{- end }} + spec: + {{- if .Values.imagePullSecrets }} + imagePullSecrets: {{ toYaml .Values.imagePullSecrets | nindent 8 }} + {{- end }} + {{- if .Values.defaultBackend.priorityClassName }} + priorityClassName: {{ .Values.defaultBackend.priorityClassName }} + {{- end }} + {{- if .Values.defaultBackend.podSecurityContext }} + securityContext: {{ toYaml .Values.defaultBackend.podSecurityContext | nindent 8 }} + {{- end }} + containers: + - name: {{ template "ingress-nginx.name" . }}-default-backend + {{- with .Values.defaultBackend.image }} + image: "{{- if .repository -}}{{ .repository }}{{ else }}{{ .registry }}/{{ .image }}{{- end -}}:{{ .tag }}{{- if (.digest) -}} @{{.digest}} {{- end -}}" + {{- end }} + imagePullPolicy: {{ .Values.defaultBackend.image.pullPolicy }} + {{- if .Values.defaultBackend.extraArgs }} + args: + {{- range $key, $value := .Values.defaultBackend.extraArgs }} + {{- /* Accept keys without values or with false as value */}} + {{- if eq ($value | quote | len) 2 }} + - --{{ $key }} + {{- else }} + - --{{ $key }}={{ $value }} + {{- end }} + {{- end }} + {{- end }} + securityContext: + capabilities: + drop: + - ALL + runAsUser: {{ .Values.defaultBackend.image.runAsUser }} + runAsNonRoot: {{ .Values.defaultBackend.image.runAsNonRoot }} + allowPrivilegeEscalation: {{ .Values.defaultBackend.image.allowPrivilegeEscalation }} + readOnlyRootFilesystem: {{ .Values.defaultBackend.image.readOnlyRootFilesystem}} + {{- if .Values.defaultBackend.extraEnvs }} + env: {{ toYaml .Values.defaultBackend.extraEnvs | nindent 12 }} + {{- end }} + livenessProbe: + httpGet: + path: /healthz + port: {{ .Values.defaultBackend.port }} + scheme: HTTP + initialDelaySeconds: {{ .Values.defaultBackend.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.defaultBackend.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.defaultBackend.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.defaultBackend.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.defaultBackend.livenessProbe.failureThreshold }} + readinessProbe: + httpGet: + path: /healthz + port: {{ .Values.defaultBackend.port }} + scheme: HTTP + initialDelaySeconds: {{ .Values.defaultBackend.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.defaultBackend.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.defaultBackend.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.defaultBackend.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.defaultBackend.readinessProbe.failureThreshold }} + ports: + - name: http + containerPort: {{ .Values.defaultBackend.port }} + protocol: TCP + {{- if .Values.defaultBackend.extraVolumeMounts }} + volumeMounts: {{- toYaml .Values.defaultBackend.extraVolumeMounts | nindent 12 }} + {{- end }} + {{- if .Values.defaultBackend.resources }} + resources: {{ toYaml .Values.defaultBackend.resources | nindent 12 }} + {{- end }} + {{- if .Values.defaultBackend.nodeSelector }} + nodeSelector: {{ toYaml .Values.defaultBackend.nodeSelector | nindent 8 }} + {{- end }} + serviceAccountName: {{ template "ingress-nginx.defaultBackend.serviceAccountName" . }} + {{- if .Values.defaultBackend.tolerations }} + tolerations: {{ toYaml .Values.defaultBackend.tolerations | nindent 8 }} + {{- end }} + {{- if .Values.defaultBackend.affinity }} + affinity: {{ toYaml .Values.defaultBackend.affinity | nindent 8 }} + {{- end }} + terminationGracePeriodSeconds: 60 + {{- if .Values.defaultBackend.extraVolumes }} + volumes: {{ toYaml .Values.defaultBackend.extraVolumes | nindent 8 }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/default-backend-hpa.yaml b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/default-backend-hpa.yaml new file mode 100644 index 0000000..594d265 --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/default-backend-hpa.yaml @@ -0,0 +1,33 @@ +{{- if and .Values.defaultBackend.enabled .Values.defaultBackend.autoscaling.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: default-backend + {{- with .Values.defaultBackend.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ template "ingress-nginx.defaultBackend.fullname" . }} + namespace: {{ .Release.Namespace }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ template "ingress-nginx.defaultBackend.fullname" . }} + minReplicas: {{ .Values.defaultBackend.autoscaling.minReplicas }} + maxReplicas: {{ .Values.defaultBackend.autoscaling.maxReplicas }} + metrics: +{{- with .Values.defaultBackend.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ . }} +{{- end }} +{{- with .Values.defaultBackend.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + targetAverageUtilization: {{ . }} +{{- end }} +{{- end }} diff --git a/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/default-backend-poddisruptionbudget.yaml b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/default-backend-poddisruptionbudget.yaml new file mode 100644 index 0000000..00891ce --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/default-backend-poddisruptionbudget.yaml @@ -0,0 +1,21 @@ +{{- if .Values.defaultBackend.enabled -}} +{{- if or (gt (.Values.defaultBackend.replicaCount | int) 1) (gt (.Values.defaultBackend.autoscaling.minReplicas | int) 1) }} +apiVersion: {{ ternary "policy/v1" "policy/v1beta1" (semverCompare ">=1.21.0-0" .Capabilities.KubeVersion.Version) }} +kind: PodDisruptionBudget +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: default-backend + {{- with .Values.defaultBackend.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.defaultBackend.fullname" . }} + namespace: {{ .Release.Namespace }} +spec: + selector: + matchLabels: + {{- include "ingress-nginx.selectorLabels" . | nindent 6 }} + app.kubernetes.io/component: default-backend + minAvailable: {{ .Values.defaultBackend.minAvailable }} +{{- end }} +{{- end }} diff --git a/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/default-backend-psp.yaml b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/default-backend-psp.yaml new file mode 100644 index 0000000..c144c8f --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/default-backend-psp.yaml @@ -0,0 +1,38 @@ +{{- if (semverCompare "<1.25.0-0" .Capabilities.KubeVersion.Version) }} +{{- if and .Values.podSecurityPolicy.enabled .Values.defaultBackend.enabled (empty .Values.defaultBackend.existingPsp) -}} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ include "ingress-nginx.fullname" . }}-backend + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: default-backend + {{- with .Values.defaultBackend.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + allowPrivilegeEscalation: false + fsGroup: + ranges: + - max: 65535 + min: 1 + rule: MustRunAs + requiredDropCapabilities: + - ALL + runAsUser: + rule: MustRunAsNonRoot + seLinux: + rule: RunAsAny + supplementalGroups: + ranges: + - max: 65535 + min: 1 + rule: MustRunAs + volumes: + - configMap + - emptyDir + - projected + - secret + - downwardAPI +{{- end }} +{{- end }} diff --git a/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/default-backend-role.yaml b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/default-backend-role.yaml new file mode 100644 index 0000000..a2b457c --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/default-backend-role.yaml @@ -0,0 +1,22 @@ +{{- if and .Values.rbac.create .Values.podSecurityPolicy.enabled .Values.defaultBackend.enabled -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: default-backend + {{- with .Values.defaultBackend.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.fullname" . }}-backend + namespace: {{ .Release.Namespace }} +rules: + - apiGroups: [{{ template "podSecurityPolicy.apiGroup" . }}] + resources: ['podsecuritypolicies'] + verbs: ['use'] + {{- with .Values.defaultBackend.existingPsp }} + resourceNames: [{{ . }}] + {{- else }} + resourceNames: [{{ include "ingress-nginx.fullname" . }}-backend] + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/default-backend-rolebinding.yaml b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/default-backend-rolebinding.yaml new file mode 100644 index 0000000..dbaa516 --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/default-backend-rolebinding.yaml @@ -0,0 +1,21 @@ +{{- if and .Values.rbac.create .Values.podSecurityPolicy.enabled .Values.defaultBackend.enabled -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: default-backend + {{- with .Values.defaultBackend.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.fullname" . }}-backend + namespace: {{ .Release.Namespace }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ include "ingress-nginx.fullname" . }}-backend +subjects: + - kind: ServiceAccount + name: {{ template "ingress-nginx.defaultBackend.serviceAccountName" . }} + namespace: {{ .Release.Namespace | quote }} +{{- end }} diff --git a/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/default-backend-service.yaml b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/default-backend-service.yaml new file mode 100644 index 0000000..5f1d09a --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/default-backend-service.yaml @@ -0,0 +1,41 @@ +{{- if .Values.defaultBackend.enabled -}} +apiVersion: v1 +kind: Service +metadata: +{{- if .Values.defaultBackend.service.annotations }} + annotations: {{ toYaml .Values.defaultBackend.service.annotations | nindent 4 }} +{{- end }} + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: default-backend + {{- with .Values.defaultBackend.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.defaultBackend.fullname" . }} + namespace: {{ .Release.Namespace }} +spec: + type: {{ .Values.defaultBackend.service.type }} +{{- if .Values.defaultBackend.service.clusterIP }} + clusterIP: {{ .Values.defaultBackend.service.clusterIP }} +{{- end }} +{{- if .Values.defaultBackend.service.externalIPs }} + externalIPs: {{ toYaml .Values.defaultBackend.service.externalIPs | nindent 4 }} +{{- end }} +{{- if .Values.defaultBackend.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.defaultBackend.service.loadBalancerIP }} +{{- end }} +{{- if .Values.defaultBackend.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{ toYaml .Values.defaultBackend.service.loadBalancerSourceRanges | nindent 4 }} +{{- end }} + ports: + - name: http + port: {{ .Values.defaultBackend.service.servicePort }} + protocol: TCP + targetPort: http + {{- if semverCompare ">=1.20" .Capabilities.KubeVersion.Version }} + appProtocol: http + {{- end }} + selector: + {{- include "ingress-nginx.selectorLabels" . | nindent 4 }} + app.kubernetes.io/component: default-backend +{{- end }} diff --git a/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/default-backend-serviceaccount.yaml b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/default-backend-serviceaccount.yaml new file mode 100644 index 0000000..b45a95a --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/default-backend-serviceaccount.yaml @@ -0,0 +1,14 @@ +{{- if and .Values.defaultBackend.enabled .Values.defaultBackend.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: default-backend + {{- with .Values.defaultBackend.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ template "ingress-nginx.defaultBackend.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +automountServiceAccountToken: {{ .Values.defaultBackend.serviceAccount.automountServiceAccountToken }} +{{- end }} diff --git a/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/dh-param-secret.yaml b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/dh-param-secret.yaml new file mode 100644 index 0000000..12e7a4f --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/templates/dh-param-secret.yaml @@ -0,0 +1,10 @@ +{{- with .Values.dhParam -}} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "ingress-nginx.controller.fullname" $ }} + labels: + {{- include "ingress-nginx.labels" $ | nindent 4 }} +data: + dhparam.pem: {{ . }} +{{- end }} diff --git a/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/values.yaml b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/values.yaml new file mode 100644 index 0000000..9ec174f --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/files/ingress-nginx/values.yaml @@ -0,0 +1,944 @@ +## nginx configuration +## Ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/nginx-configuration/index.md +## + +## Overrides for generated resource names +# See templates/_helpers.tpl +# nameOverride: +# fullnameOverride: + +## Labels to apply to all resources +## +commonLabels: {} +# scmhash: abc123 +# myLabel: aakkmd + +controller: + name: controller + image: + ## Keep false as default for now! + chroot: false + registry: registry.k8s.io + image: ingress-nginx/controller + ## for backwards compatibility consider setting the full image url via the repository value below + ## use *either* current default registry/image or repository format or installing chart by providing the values.yaml will fail + ## repository: + tag: "v1.3.1" + digest: sha256:54f7fe2c6c5a9db9a0ebf1131797109bb7a4d91f56b9b362bde2abd237dd1974 + digestChroot: sha256:a8466b19c621bd550b1645e27a004a5cc85009c858a9ab19490216735ac432b1 + pullPolicy: IfNotPresent + # www-data -> uid 101 + runAsUser: 101 + allowPrivilegeEscalation: true + + # -- Use an existing PSP instead of creating one + existingPsp: "" + + # -- Configures the controller container name + containerName: controller + + # -- Configures the ports that the nginx-controller listens on + containerPort: + http: 80 + https: 443 + + # -- Will add custom configuration options to Nginx https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/ + config: {} + + # -- Annotations to be added to the controller config configuration configmap. + configAnnotations: {} + + # -- Will add custom headers before sending traffic to backends according to https://github.com/kubernetes/ingress-nginx/tree/main/docs/examples/customization/custom-headers + proxySetHeaders: {} + + # -- Will add custom headers before sending response traffic to the client according to: https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/#add-headers + addHeaders: {} + + # -- Optionally customize the pod dnsConfig. + dnsConfig: {} + + # -- Optionally customize the pod hostname. + hostname: {} + + # -- Optionally change this to ClusterFirstWithHostNet in case you have 'hostNetwork: true'. + # By default, while using host network, name resolution uses the host's DNS. If you wish nginx-controller + # to keep resolving names inside the k8s network, use ClusterFirstWithHostNet. + dnsPolicy: ClusterFirst + + # -- Bare-metal considerations via the host network https://kubernetes.github.io/ingress-nginx/deploy/baremetal/#via-the-host-network + # Ingress status was blank because there is no Service exposing the NGINX Ingress controller in a configuration using the host network, the default --publish-service flag used in standard cloud setups does not apply + reportNodeInternalIp: false + + # -- Process Ingress objects without ingressClass annotation/ingressClassName field + # Overrides value for --watch-ingress-without-class flag of the controller binary + # Defaults to false + watchIngressWithoutClass: false + + # -- Process IngressClass per name (additionally as per spec.controller). + ingressClassByName: false + + # -- This configuration defines if Ingress Controller should allow users to set + # their own *-snippet annotations, otherwise this is forbidden / dropped + # when users add those annotations. + # Global snippets in ConfigMap are still respected + allowSnippetAnnotations: true + + # -- Required for use with CNI based kubernetes installations (such as ones set up by kubeadm), + # since CNI and hostport don't mix yet. Can be deprecated once https://github.com/kubernetes/kubernetes/issues/23920 + # is merged + hostNetwork: false + + ## Use host ports 80 and 443 + ## Disabled by default + hostPort: + # -- Enable 'hostPort' or not + enabled: false + ports: + # -- 'hostPort' http port + http: 80 + # -- 'hostPort' https port + https: 443 + + # -- Election ID to use for status update + electionID: ingress-controller-leader + + ## This section refers to the creation of the IngressClass resource + ## IngressClass resources are supported since k8s >= 1.18 and required since k8s >= 1.19 + ingressClassResource: + # -- Name of the ingressClass + name: nginx + # -- Is this ingressClass enabled or not + enabled: true + # -- Is this the default ingressClass for the cluster + default: false + # -- Controller-value of the controller that is processing this ingressClass + controllerValue: "k8s.io/ingress-nginx" + + # -- Parameters is a link to a custom resource containing additional + # configuration for the controller. This is optional if the controller + # does not require extra parameters. + parameters: {} + + # -- For backwards compatibility with ingress.class annotation, use ingressClass. + # Algorithm is as follows, first ingressClassName is considered, if not present, controller looks for ingress.class annotation + ingressClass: nginx + + # -- Labels to add to the pod container metadata + podLabels: {} + # key: value + + # -- Security Context policies for controller pods + podSecurityContext: {} + + # -- See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for notes on enabling and using sysctls + sysctls: {} + # sysctls: + # "net.core.somaxconn": "8192" + + # -- Allows customization of the source of the IP address or FQDN to report + # in the ingress status field. By default, it reads the information provided + # by the service. If disable, the status field reports the IP address of the + # node or nodes where an ingress controller pod is running. + publishService: + # -- Enable 'publishService' or not + enabled: true + # -- Allows overriding of the publish service to bind to + # Must be / + pathOverride: "" + + # Limit the scope of the controller to a specific namespace + scope: + # -- Enable 'scope' or not + enabled: false + # -- Namespace to limit the controller to; defaults to $(POD_NAMESPACE) + namespace: "" + # -- When scope.enabled == false, instead of watching all namespaces, we watching namespaces whose labels + # only match with namespaceSelector. Format like foo=bar. Defaults to empty, means watching all namespaces. + namespaceSelector: "" + + # -- Allows customization of the configmap / nginx-configmap namespace; defaults to $(POD_NAMESPACE) + configMapNamespace: "" + + tcp: + # -- Allows customization of the tcp-services-configmap; defaults to $(POD_NAMESPACE) + configMapNamespace: "" + # -- Annotations to be added to the tcp config configmap + annotations: {} + + udp: + # -- Allows customization of the udp-services-configmap; defaults to $(POD_NAMESPACE) + configMapNamespace: "" + # -- Annotations to be added to the udp config configmap + annotations: {} + + # -- Maxmind license key to download GeoLite2 Databases. + ## https://blog.maxmind.com/2019/12/18/significant-changes-to-accessing-and-using-geolite2-databases + maxmindLicenseKey: "" + + # -- Additional command line arguments to pass to nginx-ingress-controller + # E.g. to specify the default SSL certificate you can use + extraArgs: {} + ## extraArgs: + ## default-ssl-certificate: "/" + + # -- Additional environment variables to set + extraEnvs: [] + # extraEnvs: + # - name: FOO + # valueFrom: + # secretKeyRef: + # key: FOO + # name: secret-resource + + # -- Use a `DaemonSet` or `Deployment` + kind: Deployment + + # -- Annotations to be added to the controller Deployment or DaemonSet + ## + annotations: {} + # keel.sh/pollSchedule: "@every 60m" + + # -- Labels to be added to the controller Deployment or DaemonSet and other resources that do not have option to specify labels + ## + labels: {} + # keel.sh/policy: patch + # keel.sh/trigger: poll + + + # -- The update strategy to apply to the Deployment or DaemonSet + ## + updateStrategy: {} + # rollingUpdate: + # maxUnavailable: 1 + # type: RollingUpdate + + # -- `minReadySeconds` to avoid killing pods before we are ready + ## + minReadySeconds: 0 + + + # -- Node tolerations for server scheduling to nodes with taints + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + tolerations: [] + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + + # -- Affinity and anti-affinity rules for server scheduling to nodes + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## + affinity: {} + # # An example of preferred pod anti-affinity, weight is in the range 1-100 + # podAntiAffinity: + # preferredDuringSchedulingIgnoredDuringExecution: + # - weight: 100 + # podAffinityTerm: + # labelSelector: + # matchExpressions: + # - key: app.kubernetes.io/name + # operator: In + # values: + # - ingress-nginx + # - key: app.kubernetes.io/instance + # operator: In + # values: + # - ingress-nginx + # - key: app.kubernetes.io/component + # operator: In + # values: + # - controller + # topologyKey: kubernetes.io/hostname + + # # An example of required pod anti-affinity + # podAntiAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # - labelSelector: + # matchExpressions: + # - key: app.kubernetes.io/name + # operator: In + # values: + # - ingress-nginx + # - key: app.kubernetes.io/instance + # operator: In + # values: + # - ingress-nginx + # - key: app.kubernetes.io/component + # operator: In + # values: + # - controller + # topologyKey: "kubernetes.io/hostname" + + # -- Topology spread constraints rely on node labels to identify the topology domain(s) that each Node is in. + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + ## + topologySpreadConstraints: [] + # - maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # whenUnsatisfiable: DoNotSchedule + # labelSelector: + # matchLabels: + # app.kubernetes.io/instance: ingress-nginx-internal + + # -- `terminationGracePeriodSeconds` to avoid killing pods before we are ready + ## wait up to five minutes for the drain of connections + ## + terminationGracePeriodSeconds: 300 + + # -- Node labels for controller pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: + kubernetes.io/os: linux + + ## Liveness and readiness probe values + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes + ## + ## startupProbe: + ## httpGet: + ## # should match container.healthCheckPath + ## path: "/healthz" + ## port: 10254 + ## scheme: HTTP + ## initialDelaySeconds: 5 + ## periodSeconds: 5 + ## timeoutSeconds: 2 + ## successThreshold: 1 + ## failureThreshold: 5 + livenessProbe: + httpGet: + # should match container.healthCheckPath + path: "/healthz" + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + httpGet: + # should match container.healthCheckPath + path: "/healthz" + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 3 + + + # -- Path of the health check endpoint. All requests received on the port defined by + # the healthz-port parameter are forwarded internally to this path. + healthCheckPath: "/healthz" + + # -- Address to bind the health check endpoint. + # It is better to set this option to the internal node address + # if the ingress nginx controller is running in the `hostNetwork: true` mode. + healthCheckHost: "" + + # -- Annotations to be added to controller pods + ## + podAnnotations: {} + + replicaCount: 1 + + minAvailable: 1 + + ## Define requests resources to avoid probe issues due to CPU utilization in busy nodes + ## ref: https://github.com/kubernetes/ingress-nginx/issues/4735#issuecomment-551204903 + ## Ideally, there should be no limits. + ## https://engineering.indeedblog.com/blog/2019/12/cpu-throttling-regression-fix/ + resources: + ## limits: + ## cpu: 100m + ## memory: 90Mi + requests: + cpu: 100m + memory: 90Mi + + # Mutually exclusive with keda autoscaling + autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 11 + targetCPUUtilizationPercentage: 50 + targetMemoryUtilizationPercentage: 50 + behavior: {} + # scaleDown: + # stabilizationWindowSeconds: 300 + # policies: + # - type: Pods + # value: 1 + # periodSeconds: 180 + # scaleUp: + # stabilizationWindowSeconds: 300 + # policies: + # - type: Pods + # value: 2 + # periodSeconds: 60 + + autoscalingTemplate: [] + # Custom or additional autoscaling metrics + # ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-custom-metrics + # - type: Pods + # pods: + # metric: + # name: nginx_ingress_controller_nginx_process_requests_total + # target: + # type: AverageValue + # averageValue: 10000m + + # Mutually exclusive with hpa autoscaling + keda: + apiVersion: "keda.sh/v1alpha1" + ## apiVersion changes with keda 1.x vs 2.x + ## 2.x = keda.sh/v1alpha1 + ## 1.x = keda.k8s.io/v1alpha1 + enabled: false + minReplicas: 1 + maxReplicas: 11 + pollingInterval: 30 + cooldownPeriod: 300 + restoreToOriginalReplicaCount: false + scaledObject: + annotations: {} + # Custom annotations for ScaledObject resource + # annotations: + # key: value + triggers: [] + # - type: prometheus + # metadata: + # serverAddress: http://:9090 + # metricName: http_requests_total + # threshold: '100' + # query: sum(rate(http_requests_total{deployment="my-deployment"}[2m])) + + behavior: {} + # scaleDown: + # stabilizationWindowSeconds: 300 + # policies: + # - type: Pods + # value: 1 + # periodSeconds: 180 + # scaleUp: + # stabilizationWindowSeconds: 300 + # policies: + # - type: Pods + # value: 2 + # periodSeconds: 60 + + # -- Enable mimalloc as a drop-in replacement for malloc. + ## ref: https://github.com/microsoft/mimalloc + ## + enableMimalloc: true + + ## Override NGINX template + customTemplate: + configMapName: "" + configMapKey: "" + + service: + enabled: true + + # -- If enabled is adding an appProtocol option for Kubernetes service. An appProtocol field replacing annotations that were + # using for setting a backend protocol. Here is an example for AWS: service.beta.kubernetes.io/aws-load-balancer-backend-protocol: http + # It allows choosing the protocol for each backend specified in the Kubernetes service. + # See the following GitHub issue for more details about the purpose: https://github.com/kubernetes/kubernetes/issues/40244 + # Will be ignored for Kubernetes versions older than 1.20 + ## + appProtocol: true + + annotations: {} + labels: {} + # clusterIP: "" + + # -- List of IP addresses at which the controller services are available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + # -- Used by cloud providers to connect the resulting `LoadBalancer` to a pre-existing static IP according to https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer + loadBalancerIP: "" + loadBalancerSourceRanges: [] + + enableHttp: true + enableHttps: true + + ## Set external traffic policy to: "Local" to preserve source IP on providers supporting it. + ## Ref: https://kubernetes.io/docs/tutorials/services/source-ip/#source-ip-for-services-with-typeloadbalancer + # externalTrafficPolicy: "" + + ## Must be either "None" or "ClientIP" if set. Kubernetes will default to "None". + ## Ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + # sessionAffinity: "" + + ## Specifies the health check node port (numeric port number) for the service. If healthCheckNodePort isn’t specified, + ## the service controller allocates a port from your cluster’s NodePort range. + ## Ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + # healthCheckNodePort: 0 + + # -- Represents the dual-stack-ness requested or required by this Service. Possible values are + # SingleStack, PreferDualStack or RequireDualStack. + # The ipFamilies and clusterIPs fields depend on the value of this field. + ## Ref: https://kubernetes.io/docs/concepts/services-networking/dual-stack/ + ipFamilyPolicy: "SingleStack" + + # -- List of IP families (e.g. IPv4, IPv6) assigned to the service. This field is usually assigned automatically + # based on cluster configuration and the ipFamilyPolicy field. + ## Ref: https://kubernetes.io/docs/concepts/services-networking/dual-stack/ + ipFamilies: + - IPv4 + + ports: + http: 80 + https: 443 + + targetPorts: + http: http + https: https + + type: LoadBalancer + + ## type: NodePort + ## nodePorts: + ## http: 32080 + ## https: 32443 + ## tcp: + ## 8080: 32808 + nodePorts: + http: "" + https: "" + tcp: {} + udp: {} + + external: + enabled: true + + internal: + # -- Enables an additional internal load balancer (besides the external one). + enabled: false + # -- Annotations are mandatory for the load balancer to come up. Varies with the cloud service. + annotations: {} + + # loadBalancerIP: "" + + # -- Restrict access For LoadBalancer service. Defaults to 0.0.0.0/0. + loadBalancerSourceRanges: [] + + ## Set external traffic policy to: "Local" to preserve source IP on + ## providers supporting it + ## Ref: https://kubernetes.io/docs/tutorials/services/source-ip/#source-ip-for-services-with-typeloadbalancer + # externalTrafficPolicy: "" + + # shareProcessNamespace enables process namespace sharing within the pod. + # This can be used for example to signal log rotation using `kill -USR1` from a sidecar. + shareProcessNamespace: false + + # -- Additional containers to be added to the controller pod. + # See https://github.com/lemonldap-ng-controller/lemonldap-ng-controller as example. + extraContainers: [] + # - name: my-sidecar + # image: nginx:latest + # - name: lemonldap-ng-controller + # image: lemonldapng/lemonldap-ng-controller:0.2.0 + # args: + # - /lemonldap-ng-controller + # - --alsologtostderr + # - --configmap=$(POD_NAMESPACE)/lemonldap-ng-configuration + # env: + # - name: POD_NAME + # valueFrom: + # fieldRef: + # fieldPath: metadata.name + # - name: POD_NAMESPACE + # valueFrom: + # fieldRef: + # fieldPath: metadata.namespace + # volumeMounts: + # - name: copy-portal-skins + # mountPath: /srv/var/lib/lemonldap-ng/portal/skins + + # -- Additional volumeMounts to the controller main container. + extraVolumeMounts: [] + # - name: copy-portal-skins + # mountPath: /var/lib/lemonldap-ng/portal/skins + + # -- Additional volumes to the controller pod. + extraVolumes: [] + # - name: copy-portal-skins + # emptyDir: {} + + # -- Containers, which are run before the app containers are started. + extraInitContainers: [] + # - name: init-myservice + # image: busybox + # command: ['sh', '-c', 'until nslookup myservice; do echo waiting for myservice; sleep 2; done;'] + + extraModules: [] + ## Modules, which are mounted into the core nginx image + # - name: opentelemetry + # image: registry.k8s.io/ingress-nginx/opentelemetry:v20220801-g00ee51f09@sha256:482562feba02ad178411efc284f8eb803a185e3ea5588b6111ccbc20b816b427 + # + # The image must contain a `/usr/local/bin/init_module.sh` executable, which + # will be executed as initContainers, to move its config files within the + # mounted volume. + + admissionWebhooks: + annotations: {} + # ignore-check.kube-linter.io/no-read-only-rootfs: "This deployment needs write access to root filesystem". + + ## Additional annotations to the admission webhooks. + ## These annotations will be added to the ValidatingWebhookConfiguration and + ## the Jobs Spec of the admission webhooks. + enabled: true + # -- Additional environment variables to set + extraEnvs: [] + # extraEnvs: + # - name: FOO + # valueFrom: + # secretKeyRef: + # key: FOO + # name: secret-resource + # -- Admission Webhook failure policy to use + failurePolicy: Fail + # timeoutSeconds: 10 + port: 8443 + certificate: "/usr/local/certificates/cert" + key: "/usr/local/certificates/key" + namespaceSelector: {} + objectSelector: {} + # -- Labels to be added to admission webhooks + labels: {} + + # -- Use an existing PSP instead of creating one + existingPsp: "" + networkPolicyEnabled: false + + service: + annotations: {} + # clusterIP: "" + externalIPs: [] + # loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 443 + type: ClusterIP + + createSecretJob: + resources: {} + # limits: + # cpu: 10m + # memory: 20Mi + # requests: + # cpu: 10m + # memory: 20Mi + + patchWebhookJob: + resources: {} + + patch: + enabled: true + image: + registry: registry.k8s.io + image: ingress-nginx/kube-webhook-certgen + ## for backwards compatibility consider setting the full image url via the repository value below + ## use *either* current default registry/image or repository format or installing chart by providing the values.yaml will fail + ## repository: + tag: v1.3.0 + digest: sha256:549e71a6ca248c5abd51cdb73dbc3083df62cf92ed5e6147c780e30f7e007a47 + pullPolicy: IfNotPresent + # -- Provide a priority class name to the webhook patching job + ## + priorityClassName: "" + podAnnotations: {} + nodeSelector: + kubernetes.io/os: linux + tolerations: [] + # -- Labels to be added to patch job resources + labels: {} + securityContext: + runAsNonRoot: true + runAsUser: 2000 + fsGroup: 2000 + + + metrics: + port: 10254 + # if this port is changed, change healthz-port: in extraArgs: accordingly + enabled: false + + service: + annotations: {} + # prometheus.io/scrape: "true" + # prometheus.io/port: "10254" + + # clusterIP: "" + + # -- List of IP addresses at which the stats-exporter service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + # loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 10254 + type: ClusterIP + # externalTrafficPolicy: "" + # nodePort: "" + + serviceMonitor: + enabled: false + additionalLabels: {} + ## The label to use to retrieve the job name from. + ## jobLabel: "app.kubernetes.io/name" + namespace: "" + namespaceSelector: {} + ## Default: scrape .Release.Namespace only + ## To scrape all, use the following: + ## namespaceSelector: + ## any: true + scrapeInterval: 30s + # honorLabels: true + targetLabels: [] + relabelings: [] + metricRelabelings: [] + + prometheusRule: + enabled: false + additionalLabels: {} + # namespace: "" + rules: [] + # # These are just examples rules, please adapt them to your needs + # - alert: NGINXConfigFailed + # expr: count(nginx_ingress_controller_config_last_reload_successful == 0) > 0 + # for: 1s + # labels: + # severity: critical + # annotations: + # description: bad ingress config - nginx config test failed + # summary: uninstall the latest ingress changes to allow config reloads to resume + # - alert: NGINXCertificateExpiry + # expr: (avg(nginx_ingress_controller_ssl_expire_time_seconds) by (host) - time()) < 604800 + # for: 1s + # labels: + # severity: critical + # annotations: + # description: ssl certificate(s) will expire in less then a week + # summary: renew expiring certificates to avoid downtime + # - alert: NGINXTooMany500s + # expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"5.+"} ) / sum(nginx_ingress_controller_requests) ) > 5 + # for: 1m + # labels: + # severity: warning + # annotations: + # description: Too many 5XXs + # summary: More than 5% of all requests returned 5XX, this requires your attention + # - alert: NGINXTooMany400s + # expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"4.+"} ) / sum(nginx_ingress_controller_requests) ) > 5 + # for: 1m + # labels: + # severity: warning + # annotations: + # description: Too many 4XXs + # summary: More than 5% of all requests returned 4XX, this requires your attention + + # -- Improve connection draining when ingress controller pod is deleted using a lifecycle hook: + # With this new hook, we increased the default terminationGracePeriodSeconds from 30 seconds + # to 300, allowing the draining of connections up to five minutes. + # If the active connections end before that, the pod will terminate gracefully at that time. + # To effectively take advantage of this feature, the Configmap feature + # worker-shutdown-timeout new value is 240s instead of 10s. + ## + lifecycle: + preStop: + exec: + command: + - /wait-shutdown + + priorityClassName: "" + +# -- Rollback limit +## +revisionHistoryLimit: 10 + +## Default 404 backend +## +defaultBackend: + ## + enabled: false + + name: defaultbackend + image: + registry: registry.k8s.io + image: defaultbackend-amd64 + ## for backwards compatibility consider setting the full image url via the repository value below + ## use *either* current default registry/image or repository format or installing chart by providing the values.yaml will fail + ## repository: + tag: "1.5" + pullPolicy: IfNotPresent + # nobody user -> uid 65534 + runAsUser: 65534 + runAsNonRoot: true + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + + # -- Use an existing PSP instead of creating one + existingPsp: "" + + extraArgs: {} + + serviceAccount: + create: true + name: "" + automountServiceAccountToken: true + # -- Additional environment variables to set for defaultBackend pods + extraEnvs: [] + + port: 8080 + + ## Readiness and liveness probes for default backend + ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/ + ## + livenessProbe: + failureThreshold: 3 + initialDelaySeconds: 30 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + readinessProbe: + failureThreshold: 6 + initialDelaySeconds: 0 + periodSeconds: 5 + successThreshold: 1 + timeoutSeconds: 5 + + # -- Node tolerations for server scheduling to nodes with taints + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + tolerations: [] + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + + affinity: {} + + # -- Security Context policies for controller pods + # See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for + # notes on enabling and using sysctls + ## + podSecurityContext: {} + + # -- Security Context policies for controller main container. + # See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for + # notes on enabling and using sysctls + ## + containerSecurityContext: {} + + # -- Labels to add to the pod container metadata + podLabels: {} + # key: value + + # -- Node labels for default backend pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: + kubernetes.io/os: linux + + # -- Annotations to be added to default backend pods + ## + podAnnotations: {} + + replicaCount: 1 + + minAvailable: 1 + + resources: {} + # limits: + # cpu: 10m + # memory: 20Mi + # requests: + # cpu: 10m + # memory: 20Mi + + extraVolumeMounts: [] + ## Additional volumeMounts to the default backend container. + # - name: copy-portal-skins + # mountPath: /var/lib/lemonldap-ng/portal/skins + + extraVolumes: [] + ## Additional volumes to the default backend pod. + # - name: copy-portal-skins + # emptyDir: {} + + autoscaling: + annotations: {} + enabled: false + minReplicas: 1 + maxReplicas: 2 + targetCPUUtilizationPercentage: 50 + targetMemoryUtilizationPercentage: 50 + + service: + annotations: {} + + # clusterIP: "" + + # -- List of IP addresses at which the default backend service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + # loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 80 + type: ClusterIP + + priorityClassName: "" + # -- Labels to be added to the default backend resources + labels: {} + +## Enable RBAC as per https://github.com/kubernetes/ingress-nginx/blob/main/docs/deploy/rbac.md and https://github.com/kubernetes/ingress-nginx/issues/266 +rbac: + create: true + scope: false + +## If true, create & use Pod Security Policy resources +## https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +podSecurityPolicy: + enabled: false + +serviceAccount: + create: true + name: "" + automountServiceAccountToken: true + # -- Annotations for the controller service account + annotations: {} + +# -- Optional array of imagePullSecrets containing private registry credentials +## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ +imagePullSecrets: [] +# - name: secretName + +# -- TCP service key-value pairs +## Ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/exposing-tcp-udp-services.md +## +tcp: {} +# 8080: "default/example-tcp-svc:9000" + +# -- UDP service key-value pairs +## Ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/exposing-tcp-udp-services.md +## +udp: {} +# 53: "kube-system/kube-dns:53" + +# -- Prefix for TCP and UDP ports names in ingress controller service +## Some cloud providers, like Yandex Cloud may have a requirements for a port name regex to support cloud load balancer integration +portNamePrefix: "" + +# -- (string) A base64-encoded Diffie-Hellman parameter. +# This can be generated with: `openssl dhparam 4096 2> /dev/null | base64` +## Ref: https://github.com/kubernetes/ingress-nginx/tree/main/docs/examples/customization/ssl-dh-param +dhParam: diff --git a/ansible/01_old/roles/kubernetes_install/files/kubeconfig b/ansible/01_old/roles/kubernetes_install/files/kubeconfig new file mode 100644 index 0000000..95b048c --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/files/kubeconfig @@ -0,0 +1,20 @@ +apiVersion: v1 +clusters: +- cluster: + certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUMvakNDQWVhZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQjRYRFRJeU1USXdOakV3TkRZME5Gb1hEVE15TVRJd016RXdORFkwTkZvd0ZURVRNQkVHQTFVRQpBeE1LYTNWaVpYSnVaWFJsY3pDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBT0xYCnhYSWpmVWdFdXdxMFZrYXQ0N2d6Q09vRERDL1YvRHl5T0dWZG1PSDEwd1dHZmwrMFI0dkkwaWlHc0ZKRVFqWWMKRGt6THZUOHlRNVpybFpiV3VuVkFYaTlrcHRxY0d2R2NZYXdrMGVIRVdqQ1RRTXo0T2dOd2JuQytKdkd1MnJiVgpaUVVvRzV1cHpDRkM0Q0RtM1h2Ym14RjdYUU11L0FOU045V1UwYS9Td0tvR25EaVMyV1JBOFJpRG9DYURKcGprCjc1azAyMkZOanlOZkxadktmMzFzNTg3OGF1bS9WS0UzeVV4SGhqVzFEeEpTWUMrK1RhV0F0MWpBVXZObWVNQmQKVStmRW0xbUVnNWtnWVBER2d3czBSVHB4WFk1U3dTL3hKVkd2VjRYeExlWEVoWmc2ZlF4c0hvcG1vdVZJOEJLMQpUdW1ram4zTkhrU0dtRVEzczBFQ0F3RUFBYU5aTUZjd0RnWURWUjBQQVFIL0JBUURBZ0trTUE4R0ExVWRFd0VCCi93UUZNQU1CQWY4d0hRWURWUjBPQkJZRUZNc2RSdjJzQ2RaUW1ZM1dVZ1U4aUlhOHM5cWpNQlVHQTFVZEVRUU8KTUF5Q0NtdDFZbVZ5Ym1WMFpYTXdEUVlKS29aSWh2Y05BUUVMQlFBRGdnRUJBRXZ5WWorT2QwVSttYkdkSzJyWApTcXdiZjY0M0V4YWlkVDVWQjhPUExoNm1kbVQvTk04cHdYUDg0Y0J2RC9rSzBXTllObkc2RFhTS3BtWjBSM1BmCm03ck8wZ2N4M1dYaDIzWHk5VmJWcGcyK0FXdFRRVDd5dVM3L3U3YjVnV1lQNzVqNk0rOWQvdXNtc3g3alFxNjEKa3NJTzlKNXVmNCtXSHJtQm1WMGNydGZTTjN5bTF2Q2VDZzNNeFpkY3hIUXgrUjNNWFZyYVZFK1NIUE5hdzgzQQppZFI3eDkyenMwY3dRcy9wSGxRK0t5aDFnQ01JQkVFTXM1OERwZU11Q0VUaEZ2elBuL1l6YlVoNUYrcm1KNytoCjJ5VVV0R2x5V3NJQ2l5ODl0Y2pOT1RBN2EvSGJ5ZDk4MmxIM1hLY1ZlUUdYVlJFTFF0UU9sUVNHOVRQU1R4OFcKNWdJPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== + server: https://10.10.30.214:6443 + name: kubernetes +contexts: +- context: + cluster: kubernetes + user: kubernetes-admin + name: kubernetes-admin@kubernetes +current-context: kubernetes-admin@kubernetes +kind: Config +preferences: {} +users: +- name: kubernetes-admin + user: + client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURJVENDQWdtZ0F3SUJBZ0lJYzVmZFNYMzFJTWd3RFFZSktvWklodmNOQVFFTEJRQXdGVEVUTUJFR0ExVUUKQXhNS2EzVmlaWEp1WlhSbGN6QWVGdzB5TWpFeU1EWXhNRFEyTkRSYUZ3MHlNekV5TURZeE1EUTJORGRhTURReApGekFWQmdOVkJBb1REbk41YzNSbGJUcHRZWE4wWlhKek1Sa3dGd1lEVlFRREV4QnJkV0psY201bGRHVnpMV0ZrCmJXbHVNSUlCSWpBTkJna3Foa2lHOXcwQkFRRUZBQU9DQVE4QU1JSUJDZ0tDQVFFQTFBRGVDUGF5elpXTWtpV1AKQzV0RmRzL3orSmpndDYzUzFxMC9TQ2VYTWx0MGRTVGFqb3B5UzRueFVPb1ZXYmY2M016NEQ0RzRDWDNheW1TWgpLa1JmbVJ5SEtNRTVWTll6WUNWSUY1Qm1acEpVUHJ6OU5vZVRocDlQOTV2M3hIWlNBektvKysyZnQ1dzJ1dGJYCnBkN3g0Nk5KSnBPb1I5M2ZxOFFZRkhHOEhhSEd6WjhJbS9CV21KVEJua2R2aGJxczB2R3ZmbkdXRGhFT05wd2IKd2w4Z0kwMFpqTHhBRkZxY1d1MVZtNmJZYXUzVDlrSGdEcEplNUVrSFVFZ2cwWWlOTlZlUzJHdGFaeUpiWUcxZQo4Um9Zd3k0cFlvbzVlMW1sZDJHWE1EUWhkQk1oWGk5R1NsSC9GaWlqN24xTVZYTFpCSk9UL0lSdElQR1dIdmtXCmxVcEQ4UUlEQVFBQm8xWXdWREFPQmdOVkhROEJBZjhFQkFNQ0JhQXdFd1lEVlIwbEJBd3dDZ1lJS3dZQkJRVUgKQXdJd0RBWURWUjBUQVFIL0JBSXdBREFmQmdOVkhTTUVHREFXZ0JUTEhVYjlyQW5XVUptTjFsSUZQSWlHdkxQYQpvekFOQmdrcWhraUc5dzBCQVFzRkFBT0NBUUVBTTFLbEwyL3BwU1pCTGpTd3NWZzhGYTlkWk05RDIzNDNJdzY3Ci9QN1JWKzRjSVJqSXlJZFZGMUgrbkl2bFl0cncrMkZDTHpQYWNYcTZUWi9IUktFVEZtb1FYRnhnVk5GRDZVZXAKTW1sd2FCK1BnUmFRWDZoZjJlRUNXaXBobXpLRitsQnRtNENpci9USlZwTitQQ1Jhc0VkVW9pVUVtUHVsRXYvUQp1bDg3Q3RUbVAzajkxRi94eWFrYmhSS2pPbUY0dFRlZ1E4ZUE2SkFYV0d6S09XMzdZR1BHc091ZXlzU3hzanp0CjdpRjczVGVKdmdtbUllWHkrMkdoTFdIQ0tHVGJUek1UZnpYV0J4ODBXbzVjdFNaWmYzT0NJWWRHM1ViY0lsdXMKR0JreXM4b0phSXFHSE93NUpCRE1PNzliajQrbU01MlJGNmtQbkJDL2duckUyQUdHM3c9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== + client-key-data: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcEFJQkFBS0NBUUVBMUFEZUNQYXl6WldNa2lXUEM1dEZkcy96K0pqZ3Q2M1MxcTAvU0NlWE1sdDBkU1RhCmpvcHlTNG54VU9vVldiZjYzTXo0RDRHNENYM2F5bVNaS2tSZm1SeUhLTUU1Vk5ZellDVklGNUJtWnBKVVByejkKTm9lVGhwOVA5NXYzeEhaU0F6S28rKzJmdDV3MnV0YlhwZDd4NDZOSkpwT29SOTNmcThRWUZIRzhIYUhHelo4SQptL0JXbUpUQm5rZHZoYnFzMHZHdmZuR1dEaEVPTnB3YndsOGdJMDBaakx4QUZGcWNXdTFWbTZiWWF1M1Q5a0hnCkRwSmU1RWtIVUVnZzBZaU5OVmVTMkd0YVp5SmJZRzFlOFJvWXd5NHBZb281ZTFtbGQyR1hNRFFoZEJNaFhpOUcKU2xIL0ZpaWo3bjFNVlhMWkJKT1QvSVJ0SVBHV0h2a1dsVXBEOFFJREFRQUJBb0lCQUZsT0VEb29hY091WnF1OQp4SmM0RGpmeGU2MVNBUDkrNnB6aUdCRTJGRHZ6U0loOFFORGd3eXJNN2VtTzRmV01TZEd2U2lPR0dsZHRPN2djClRtVCtybUthSU5sckk5SjM5T1pnYmhEM0ZCdkxNay9IWHNjVXIzRjdOTDF5WnhuTVdkbmRBbEExbGgxTFljYXMKNytTQW1OYXlsd0w0R21CRHQ0L3NwOVFjNFFoOXRDQXdTMGUvb1k1cnl4QzFBb25zNUFIMmJGNGg3SGRMM0tvWApHMjRjTm9MY2d0K0J4M00wYSs2aGFoSmV6aGhVL2R5L1dRdjlJay9VRlJra3dBdkxvM3VPUVA1bzB0RVpXcXF1CkhUM3VRLzM4bTBwOGE1U21WR250RTNGWERQRlJGUi9aWXBjME1FMHRPYzZ0U3BmUHQzajRqUDBubjRGMStXdEQKTWhBSlNIMENnWUVBNHNzTTJ3Y1lKaUZhdTFCRzd4cXJkVGMrTWl4L3pWWWZtZHRhemtUcEsvRFF3ZmdRWVREbgpWbWRwNW5MRTdyeTRaMU90RGg3d0pKTVMySHRvRW9sTk9YMjVMKzNBaFgzRFUrU1lNeDJ2VU1jcUVzak1hUHFDCjFvM0dxa2JiNmozS2RZck9jWDhFUXVBWlhmc0RTSWxUOHNxcFdRcGFEZE9RcE15Y05WYzNjTDhDZ1lFQTcwNDUKd1d1dXN2UitGWUgrdW12bU9Ndm9VdmNROXJFcnZGcEJkdndiQnJsWHgvWFBNNE9kYXZXV1hQb3hxbWpUeExzcApnV000V0lVUUN6RjZGb2V5Kzc0T2ZmTmJtZG8renNVSkx5bnBPb3AyWUNUWk8vQ1NCNTFXNHN6UTlQaTErak9xCkdqdjVCK1RqV3o0cEdKWWp2eGpXZ2Nha1FDZEgzRHVTeFlYMngwOENnWUJaT0RRb2ZsUDd2Q2RyaFJ0Q3VTVTIKaWJNSUhnVnhEQzZHWW9zSWxvZDhaOUpZWEhSbEo4MzZhZGg1ZGpFUEVtTWhFd1FEaUJ4RTV5OEV4eGVjSXpPawpLRmVRQ1dJeG9kWVR6TndyVDhSR2JQT2FUREJPSkM4UXBObkE1dnRnM1VvbWo2TERkNHAvbkpXZUtUK1RhNk1BCjRzVllhQUFoYkZkODNabWVTbDlmRlFLQmdRQ3RURlQvQVdCT01FaHVndWxaVDNJMWgxVURYL0JrOWdEYU1mSmUKbkV0bUh5cTJvQWdoSWhzSnJqZnB0VFhxVm1lbGZIU2VRcUEzV29VMzFlaTRFQ1ZKc1dVRlNRcjQ2OWU0SFhCOQpPeml2TUQ1eGViM25ibHdTTDVzUU80ckhIS1dNUDRYYjRicUNRUHQweEJzMnR1UEVLOVNMdnJLTDB1WnpVcUVECmNmUTRlUUtCZ1FDV0xtUEJwUDVMVFA0YmxRVDFMMy9LQ0cweGxzL2pFL1JBVlE5VmY1UVVPeFlUaHBlZTUzdXAKUmwxOEVuSks1THpuNlF2TzRMeXRGdVNLeG5aRnYxY1pycUNKR0owcXhNc3VpTGVtbnYxb2NGMDdiWkYwV0pzWQowak9Ha0oweXZtUlFBVzhQUEhJeGdzN3Y3ekNzZHdTcmx3REEvYnNxa1BUVEJxNlpSUHhSUXc9PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo= + diff --git a/ansible/01_old/roles/kubernetes_install/handlers/main.yml b/ansible/01_old/roles/kubernetes_install/handlers/main.yml new file mode 100644 index 0000000..4bf601f --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/handlers/main.yml @@ -0,0 +1,10 @@ +--- +- name: Reload systemd configuration + service: + daemon_reload: True + +- name: Restart containerd service + service: + name: containerd + enabled: true + state: restarted diff --git a/ansible/01_old/roles/kubernetes_install/meta/main.yml b/ansible/01_old/roles/kubernetes_install/meta/main.yml new file mode 100644 index 0000000..c572acc --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/meta/main.yml @@ -0,0 +1,52 @@ +galaxy_info: + author: your name + description: your role description + company: your company (optional) + + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: http://example.com/issue/tracker + + # Choose a valid license ID from https://spdx.org - some suggested licenses: + # - BSD-3-Clause (default) + # - MIT + # - GPL-2.0-or-later + # - GPL-3.0-only + # - Apache-2.0 + # - CC-BY-4.0 + license: license (GPL-2.0-or-later, MIT, etc) + + min_ansible_version: 2.1 + + # If this a Container Enabled role, provide the minimum Ansible Container version. + # min_ansible_container_version: + + # + # Provide a list of supported platforms, and for each platform a list of versions. + # If you don't wish to enumerate all versions for a particular platform, use 'all'. + # To view available platforms and versions (or releases), visit: + # https://galaxy.ansible.com/api/v1/platforms/ + # + # platforms: + # - name: Fedora + # versions: + # - all + # - 25 + # - name: SomePlatform + # versions: + # - all + # - 1.0 + # - 7 + # - 99.99 + + galaxy_tags: [] + # List tags for your role here, one per line. A tag is a keyword that describes + # and categorizes the role. Users find roles by searching for tags. Be sure to + # remove the '[]' above, if you add tags to this list. + # + # NOTE: A tag is limited to a single word comprised of alphanumeric characters. + # Maximum 20 tags per role. + +dependencies: [] + # List your role dependencies here, one per line. Be sure to remove the '[]' above, + # if you add dependencies to this list. diff --git a/ansible/01_old/roles/kubernetes_install/tasks/helm-chart-nginx.yml b/ansible/01_old/roles/kubernetes_install/tasks/helm-chart-nginx.yml new file mode 100644 index 0000000..3fd6896 --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/tasks/helm-chart-nginx.yml @@ -0,0 +1,13 @@ +--- +- name: Create Nginx Ingress Controller deployment + kubernetes.core.helm: + kubeconfig: "{{ role_path }}/files/kubeconfig" + name: "{{item}}" + release_name: "{{item}}" + release_namespace: "{{item}}" + chart_ref: "{{ role_path }}/files/{{item}}" + create_namespace: yes + release_state: present + with_items: + - nginx-ingress + diff --git a/ansible/01_old/roles/kubernetes_install/tasks/helm-install.yml b/ansible/01_old/roles/kubernetes_install/tasks/helm-install.yml new file mode 100644 index 0000000..d057455 --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/tasks/helm-install.yml @@ -0,0 +1,60 @@ +--- +- name: Create Helm temporary directory + file: + path: /tmp/helm + state: directory + mode: "0755" + +- name: Fetch Helm package + get_url: + url: 'https://get.helm.sh/helm-{{ helm_version }}-linux-amd64.tar.gz' + dest: /tmp/helm.tar.gz + checksum: '{{ helm_checksum }}' + +- name: Extract Helm package + unarchive: + remote_src: true + src: /tmp/helm.tar.gz + dest: /tmp/helm + +- name: Ensure "docker" group exists + group: + name: docker + state: present + become: true + +- name: Install helm to /usr/local/bin + copy: + remote_src: true + src: /tmp/helm/linux-amd64/helm + dest: /usr/local/bin/helm + owner: root + group: docker + mode: "0755" + become: true + +- name: Cleanup Helm temporary directory + file: + path: /tmp/helm + state: absent + +- name: Cleanup Helm temporary download + file: + path: /tmp/helm.tar.gz + state: absent + +- name: Ensure bash_completion.d directory exists + file: + path: /etc/bash_completion.d + state: directory + mode: "0755" + become: true + +- name: Setup Helm tab-completion + shell: | + set -o pipefail + /usr/local/bin/helm completion bash | tee /etc/bash_completion.d/helm + args: + executable: /bin/bash + changed_when: false + become: true diff --git a/ansible/01_old/roles/kubernetes_install/tasks/k8s-helm-chart.yml b/ansible/01_old/roles/kubernetes_install/tasks/k8s-helm-chart.yml new file mode 100644 index 0000000..6a4e308 --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/tasks/k8s-helm-chart.yml @@ -0,0 +1,7 @@ +--- +# Set up master. +- include_tasks: helm-install.yml + when: kubernetes_role == 'master' + +- include_tasks: helm-chart-nginx.yml + when: kubernetes_role == 'master' diff --git a/ansible/01_old/roles/kubernetes_install/tasks/k8s-main.yml b/ansible/01_old/roles/kubernetes_install/tasks/k8s-main.yml new file mode 100644 index 0000000..785048f --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/tasks/k8s-main.yml @@ -0,0 +1,68 @@ +--- +- name: Install kubernetes + block: + - name: 'Add kubernetes repo key' + apt_key: + url: https://packages.cloud.google.com/apt/doc/apt-key.gpg + state: present + become: true + - name: Add kubernetes repository + apt_repository: + repo: deb http://apt.kubernetes.io kubernetes-xenial main + state: present + filename: 'kubernetes' + become: true + - name: Install kubernetes components + apt: + name: ['kubelet={{kubernetes_version}}-*', 'kubeadm={{kubernetes_version}}-*', 'kubectl={{kubernetes_version}}-*'] + state: present + update_cache: yes + force: yes + dpkg_options: force-downgrade + +- name: Hold kubernetes packages + dpkg_selections: + name: "{{item}}" + selection: hold + with_items: + - kubelet + - kubectl + - kubeadm + +- name: Enable kubelet service + systemd: + name: kubelet + enabled: true + masked: false + +- name: Check if Kubernetes has already been initialized. + stat: + path: /etc/kubernetes/admin.conf + register: kubernetes_init_stat + + + +# Set up master. +- include_tasks: k8s-master.yml + when: kubernetes_role == 'master' + +# Set up nodes. +- name: Get the kubeadm join command from the Kubernetes master. + command: kubeadm token create --print-join-command + changed_when: false + when: kubernetes_role == 'master' + register: kubernetes_join_command_result + +- name: Set the kubeadm join command globally. + set_fact: + kubernetes_join_command: > + {{ kubernetes_join_command_result.stdout }} + {{ kubernetes_join_command_extra_opts }} + when: kubernetes_join_command_result.stdout is defined + delegate_to: "{{ item }}" + delegate_facts: true + with_items: "{{ groups['all'] }}" + +- include_tasks: k8s-node.yml + when: kubernetes_role == 'node' + diff --git a/ansible/01_old/roles/kubernetes_install/tasks/k8s-master.yml b/ansible/01_old/roles/kubernetes_install/tasks/k8s-master.yml new file mode 100644 index 0000000..dc257cb --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/tasks/k8s-master.yml @@ -0,0 +1,56 @@ +--- +- name: Initialize Kubernetes master with kubeadm init. + command: > + kubeadm init + --pod-network-cidr={{ kubernetes_pod_network.cidr }} + --apiserver-advertise-address={{ kubernetes_apiserver_advertise_address | default(ansible_default_ipv4.address, true) }} + {{ kubernetes_kubeadm_init_extra_opts }} + register: kubeadmin_init + when: not kubernetes_init_stat.stat.exists + +- name: Print the init output to screen. + debug: + var: kubeadmin_init.stdout + verbosity: 2 + when: not kubernetes_init_stat.stat.exists + +- name: Ensure .kube directory exists. + file: + path: ~/.kube + state: directory + +- name: Symlink the kubectl admin.conf to ~/.kube/conf. + file: + src: /etc/kubernetes/admin.conf + dest: ~/.kube/config + state: link + +- name: Configure Calico networking. + command: "{{ item }}" + with_items: + - kubectl apply -f {{ kubernetes_calico_manifest_file }} + register: calico_result + changed_when: "'created' in calico_result.stdout" + when: kubernetes_pod_network.cni == 'calico' + +- name: Configure Metric Server + command: "{{ item }}" + with_items: + - kubectl apply -f {{ kubernetes_metric_server_file }} + register: metric_server_result + changed_when: "'created' in metric_server_result.stdout" + +- name: Kubectl Cheat Sheet + lineinfile: + path: ~/.bashrc + line: "{{ item }}" + with_items: + - echo "source <(kubectl completion bash)" + - alias k=kubectl + - complete -o default -F __start_kubectl k + +- name: Get kubeconfig + fetch: + src: /etc/kubernetes/admin.conf + dest: ~/.kube/data/ansible_config + flat: yes diff --git a/ansible/01_old/roles/kubernetes_install/tasks/k8s-node.yml b/ansible/01_old/roles/kubernetes_install/tasks/k8s-node.yml new file mode 100644 index 0000000..304cbf1 --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/tasks/k8s-node.yml @@ -0,0 +1,6 @@ +--- +- name: Join node to Kubernetes master + shell: > + {{ kubernetes_join_command }} + creates=/etc/kubernetes/kubelet.conf + tags: ['skip_ansible_lint'] diff --git a/ansible/01_old/roles/kubernetes_install/tasks/main.yml b/ansible/01_old/roles/kubernetes_install/tasks/main.yml new file mode 100644 index 0000000..d8978ed --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/tasks/main.yml @@ -0,0 +1,10 @@ +--- +- include: os-main.yml + tags: os-main + +- include: os-runtime.yml + tags: os-runtime + +- include: k8s-main.yml + tags: k8s-main + diff --git a/ansible/01_old/roles/kubernetes_install/tasks/os-main.yml b/ansible/01_old/roles/kubernetes_install/tasks/os-main.yml new file mode 100644 index 0000000..ffadc28 --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/tasks/os-main.yml @@ -0,0 +1,70 @@ +--- +- name: Update and upgrade apt packages + apt: + upgrade: yes + update_cache: yes + force_apt_get: yes + cache_valid_time: 86400 + +- name: Install apt packages + apt: + name: ['cloud-utils', 'apt-transport-https', 'ca-certificates', 'curl', 'socat', 'conntrack', 'gnupg', 'lsb-release', 'bash-completion', 'chrony'] + state: present + +- name: Disable ufw + command: 'ufw disable' + +- name: Disable SWAP since kubernetes can't work with swap enabled (1/2) + command: 'swapoff -a' + +- name: Disable SWAP in fstab since kubernetes can't work with swap enabled (2/2) + replace: + path: /etc/fstab + regexp: '^([^#].*?\sswap\s+sw\s+.*)$' + replace: '# \1' + +- name: Add br_netfilter to module autoload + lineinfile: + path: /etc/modules-load.d/k8s2.conf + line: "{{ item }}" + create: true + with_items: + - 'overlay' + - 'br_netfilter' + +- name: Add br_netfilter to module autoload + modprobe: + name: "{{ item }}" + state: present + become: true + with_items: + - 'overlay' + - 'br_netfilter' + +- name: Add br_netfilter to module autoload + lineinfile: + path: /etc/sysctl.d/k8s.conf + line: "{{ item }}" + create: true + with_items: + - 'net.bridge.bridge-nf-call-iptables = 1' + - 'net.bridge.bridge-nf-call-ip6tables = 1' + - 'net.ipv4.ip_forward = 1' + +- name: Disable net.bridge.bridge-nf-call-iptables + sysctl: + name: "{{ item }}" + value: 1 + with_items: + - 'net.bridge.bridge-nf-call-iptables' + - 'net.bridge.bridge-nf-call-ip6tables' + +- name: Disable net.ipv4.ip_forward + sysctl: + name: net.ipv4.ip_forward + value: "1" + +- name: Setting hosts file + template: + src: hosts.j2 + dest: /etc/hosts diff --git a/ansible/01_old/roles/kubernetes_install/tasks/os-runtime.yml b/ansible/01_old/roles/kubernetes_install/tasks/os-runtime.yml new file mode 100644 index 0000000..60be402 --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/tasks/os-runtime.yml @@ -0,0 +1,45 @@ +--- +- name: Add docker apt key + apt_key: + url: https://download.docker.com/linux/{{ ansible_distribution | lower }}/gpg + + +- name: Add docker apt repository + apt_repository: + repo: deb [arch=amd64] https://download.docker.com/linux/{{ ansible_distribution | lower }} {{ ansible_distribution_release }} stable + filename: docker + register: containerd_apt_repo_task + +- name: apt update + apt: + update_cache: yes + when: containerd_apt_repo_task.changed + +- name: Create containerd configuration directory + file: + path: /etc/containerd + state: directory + +- name: Configure containerd + template: + src: config.toml.j2 + dest: /etc/containerd/config.toml + notify: + - Restart containerd service + +- name: Install required packages + apt: + name: + - containerd.io + notify: + - Reload systemd configuration + - Restart containerd service + +- meta: flush_handlers + +- name: Enable containerd service + service: + name: containerd + enabled: True + state: started + diff --git a/ansible/01_old/roles/kubernetes_install/templates/config.toml.j2 b/ansible/01_old/roles/kubernetes_install/templates/config.toml.j2 new file mode 100644 index 0000000..0217565 --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/templates/config.toml.j2 @@ -0,0 +1,5 @@ +# {{ ansible_managed }} + +{% from 'yaml2toml_macro.j2' import yaml2toml with context -%} + +{{ yaml2toml(containerd_config) }} diff --git a/ansible/01_old/roles/kubernetes_install/templates/hosts.j2 b/ansible/01_old/roles/kubernetes_install/templates/hosts.j2 new file mode 100644 index 0000000..18804b7 --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/templates/hosts.j2 @@ -0,0 +1,6 @@ +127.0.0.1 localhost +:: 1 localhost + +{% for host in groups.all %} +{{ hostvars[host].ansible_default_ipv4.address }} {{ hostvars[host].ansible_fqdn }} {{ hostvars[host].ansible_hostname }} +{%endfor%} diff --git a/ansible/01_old/roles/kubernetes_install/templates/yaml2toml_macro.j2 b/ansible/01_old/roles/kubernetes_install/templates/yaml2toml_macro.j2 new file mode 100644 index 0000000..33f69d0 --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/templates/yaml2toml_macro.j2 @@ -0,0 +1,58 @@ +{%- macro yaml2inline_toml(item, depth) -%} + {%- if item is string or item is number -%} + {#- First, process all primitive types. -#} + {{ item | to_json }} + {%- elif item is mapping -%} + {#- Second, process all mappings. -#} + {#- Note that inline mappings must not contain newlines (except inside contained lists). -#} + {{ "{" }} + {%- for key, value in item.items() | sort -%} + {{ " " + + (key | to_json) + + " = " + + yaml2inline_toml(value, depth) + }} + {%- if not loop.last -%}{{ "," }}{%- endif -%} + {%- endfor -%} + {{ " }" }} + {%- else -%} + {#- Third, process all lists. -#} + {%- if item | length == 0 -%}{{ "[]" }}{%- else -%} + {{ "[" }} + {%- for entry in item -%} + {{ "\n" + + (" " * (depth + 1)) + + yaml2inline_toml(entry, depth + 1) + }} + {%- if not loop.last -%}{{ "," }}{%- endif -%} + {%- endfor -%} + {{ "\n" + (" " * depth) + "]" }} + {%- endif -%} + {%- endif -%} +{%- endmacro -%} + +{%- macro yaml2toml(item, super_keys=[]) -%} + {%- for key, value in item.items() | sort -%} + {%- if value is not mapping -%} + {#- First, process all non-mappings. -#} + {{ (" " * (super_keys | length)) + + (key | to_json) + + " = " + + (yaml2inline_toml(value, super_keys | length)) + + "\n" + }} + {%- endif -%} + {%- endfor -%} + {%- for key, value in item.items() | sort -%} + {%- if value is mapping -%} + {#- Second, process all mappings. -#} + {{ "\n" + + (" " * (super_keys | length)) + + "[" + + ((super_keys+[key]) | map('to_json') | join(".")) + + "]\n" + + yaml2toml(value, super_keys+[key]) + }} + {%- endif -%} + {%- endfor -%} +{%- endmacro -%} diff --git a/ansible/01_old/roles/kubernetes_install/tests/inventory b/ansible/01_old/roles/kubernetes_install/tests/inventory new file mode 100644 index 0000000..878877b --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/tests/inventory @@ -0,0 +1,2 @@ +localhost + diff --git a/ansible/01_old/roles/kubernetes_install/tests/test.yml b/ansible/01_old/roles/kubernetes_install/tests/test.yml new file mode 100644 index 0000000..191e731 --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - apache diff --git a/ansible/01_old/roles/kubernetes_install/vars/main.yml b/ansible/01_old/roles/kubernetes_install/vars/main.yml new file mode 100644 index 0000000..2aa5032 --- /dev/null +++ b/ansible/01_old/roles/kubernetes_install/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for apache diff --git a/ansible/01_old/roles/node/tasks/main.yml b/ansible/01_old/roles/node/tasks/main.yml new file mode 100644 index 0000000..d5b4c61 --- /dev/null +++ b/ansible/01_old/roles/node/tasks/main.yml @@ -0,0 +1,3 @@ +--- +- name: echo hello + command: echo "hello" diff --git a/ansible/01_old/roles/node/templates/common-auth.j2 b/ansible/01_old/roles/node/templates/common-auth.j2 new file mode 100755 index 0000000..64a603b --- /dev/null +++ b/ansible/01_old/roles/node/templates/common-auth.j2 @@ -0,0 +1,27 @@ +# +# /etc/pam.d/common-auth - authentication settings common to all services +# +# This file is included from other service-specific PAM config files, +# and should contain a list of the authentication modules that define +# the central authentication scheme for use on the system +# (e.g., /etc/shadow, LDAP, Kerberos, etc.). The default is to use the +# traditional Unix authentication mechanisms. +# +# As of pam 1.0.1-6, this file is managed by pam-auth-update by default. +# To take advantage of this, it is recommended that you configure any +# local modules either before or after the default block, and use +# pam-auth-update to manage selection of other modules. See +# pam-auth-update(8) for details. +auth required pam_tally2.so onerr={{onerr}} even_deny_root deny={{deny}} unlock_time={{unlock_time}} + +# here are the per-package modules (the "Primary" block) +auth [success=1 default=ignore] pam_unix.so nullok +# here's the fallback if no module succeeds +auth requisite pam_deny.so +# prime the stack with a positive return value if there isn't one already; +# this avoids us returning an error just because nothing sets a success code +auth required pam_permit.so +# since the modules above will each just jump around +# and here are more per-package modules (the "Additional" block) +auth optional pam_cap.so +# end of pam-auth-update config diff --git a/ansible/01_old/roles/node/templates/pwquality.conf.j2 b/ansible/01_old/roles/node/templates/pwquality.conf.j2 new file mode 100755 index 0000000..3ec2cbe --- /dev/null +++ b/ansible/01_old/roles/node/templates/pwquality.conf.j2 @@ -0,0 +1,50 @@ +# Configuration for systemwide password quality limits +# Defaults: +# +# Number of characters in the new password that must not be present in the +# old password. +# difok = 5 +# +# Minimum acceptable size for the new password (plus one if +# credits are not disabled which is the default). (See pam_cracklib manual.) +# Cannot be set to lower value than 6. +minlen = {{pwquality_minlen}} +# +# The maximum credit for having digits in the new password. If less than 0 +# it is the minimum number of digits in the new password. +dcredit = {{pwquality_dcredit}} +# +# The maximum credit for having uppercase characters in the new password. +# If less than 0 it is the minimum number of uppercase characters in the new +# password. +ucredit = {{pwquality_ucredit}} +# +# The maximum credit for having lowercase characters in the new password. +# If less than 0 it is the minimum number of lowercase characters in the new +# password. +lcredit = {{pwquality_lcredit}} +# +# The maximum credit for having other characters in the new password. +# If less than 0 it is the minimum number of other characters in the new +# password. +ocredit = {{pwquality_ocredit}} +# +# The minimum number of required classes of characters for the new +# password (digits, uppercase, lowercase, others). +# minclass = 0 +# +# The maximum number of allowed consecutive same characters in the new password. +# The check is disabled if the value is 0. +maxrepeat = {{pwquality_maxrepeat}} +# +# The maximum number of allowed consecutive characters of the same class in the +# new password. +# The check is disabled if the value is 0. +# maxclassrepeat = 0 +# +# Whether to check for the words from the passwd entry GECOS string of the user. +# The check is enabled if the value is not 0. +# gecoscheck = 0 +# +# Path to the cracklib dictionaries. Default is to use the cracklib default. +# dictpath = diff --git a/ansible/01_old/roles/node/templates/sysctl.j2 b/ansible/01_old/roles/node/templates/sysctl.j2 new file mode 100644 index 0000000..5f2e952 --- /dev/null +++ b/ansible/01_old/roles/node/templates/sysctl.j2 @@ -0,0 +1,79 @@ +# +# /etc/sysctl.conf - Configuration file for setting system variables +# See /etc/sysctl.d/ for additional system variables. +# See sysctl.conf (5) for information. +# + +#kernel.domainname = example.com + +# Uncomment the following to stop low-level messages on console +#kernel.printk = 3 4 1 3 + +################################################################### +# Functions previously found in netbase +# + +# Uncomment the next two lines to enable Spoof protection (reverse-path filter) +# Turn on Source Address Verification in all interfaces to +# prevent some spoofing attacks +#net.ipv4.conf.default.rp_filter=1 +#net.ipv4.conf.all.rp_filter=1 + +# Uncomment the next line to enable TCP/IP SYN cookies +# See http://lwn.net/Articles/277146/ +# Note: This may impact IPv6 TCP sessions too +#net.ipv4.tcp_syncookies=1 + +# Uncomment the next line to enable packet forwarding for IPv4 +#net.ipv4.ip_forward=1 + +# Uncomment the next line to enable packet forwarding for IPv6 +# Enabling this option disables Stateless Address Autoconfiguration +# based on Router Advertisements for this host +#net.ipv6.conf.all.forwarding=1 + + +################################################################### +# Additional settings - these settings can improve the network +# security of the host and prevent against some network attacks +# including spoofing attacks and man in the middle attacks through +# redirection. Some network environments, however, require that these +# settings are disabled so review and enable them as needed. +# +# Do not accept ICMP redirects (prevent MITM attacks) +#net.ipv4.conf.all.accept_redirects = 0 +#net.ipv6.conf.all.accept_redirects = 0 +# _or_ +# Accept ICMP redirects only for gateways listed in our default +# gateway list (enabled by default) +# net.ipv4.conf.all.secure_redirects = 1 +# +# Do not send ICMP redirects (we are not a router) +#net.ipv4.conf.all.send_redirects = 0 +# +# Do not accept IP source route packets (we are not a router) +#net.ipv4.conf.all.accept_source_route = 0 +#net.ipv6.conf.all.accept_source_route = 0 +# +# Log Martian Packets +#net.ipv4.conf.all.log_martians = 1 +# + +################################################################### +# Magic system request Key +# 0=disable, 1=enable all, >1 bitmask of sysrq functions +# See https://www.kernel.org/doc/html/latest/admin-guide/sysrq.html +# for what other values do +#kernel.sysrq=438 + +net.core.default_qdisc = fq +net.core.rmem_max = 268435456 +net.core.wmem_max = 268435456 +net.ipv4.conf.all.arp_announce = 2 +net.ipv4.conf.all.arp_filter = 1 +net.ipv4.conf.all.arp_ignore = 1 +net.ipv4.conf.default.arp_filter = 1 +net.ipv4.tcp_congestion_control = htcp +net.ipv4.tcp_no_metrics_save = 1 +net.ipv4.tcp_rmem = 4096 87380 134217728 +net.ipv4.tcp_wmem = 4096 65536 134217728 diff --git a/ansible/01_old/roles/password_change/README.md b/ansible/01_old/roles/password_change/README.md new file mode 100644 index 0000000..225dd44 --- /dev/null +++ b/ansible/01_old/roles/password_change/README.md @@ -0,0 +1,38 @@ +Role Name +========= + +A brief description of the role goes here. + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +Role Variables +-------------- + +A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. + +Dependencies +------------ + +A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + + - hosts: servers + roles: + - { role: username.rolename, x: 42 } + +License +------- + +BSD + +Author Information +------------------ + +An optional section for the role authors to include contact information, or a website (HTML is not allowed). diff --git a/ansible/01_old/roles/password_change/defaults/main.yml b/ansible/01_old/roles/password_change/defaults/main.yml new file mode 100644 index 0000000..5415520 --- /dev/null +++ b/ansible/01_old/roles/password_change/defaults/main.yml @@ -0,0 +1,15 @@ +--- +# defaults file for password + +encrypt: 0 # strings 0 , encrypted 1 +debug_mode: False +sshrootlogin: forced-commands-only +sshmainport: 2222 +iptables_rules: + - { source: "10.10.45.0/24", target: "DROP" } + - { source: "10.10.47.0/24", target: "DROP" } + - { source: "10.10.48.0/24", target: "DROP" } + - { source: "10.10.50.0/24", target: "DROP" } + - { source: "10.10.37.0/24", target: "DROP" } +delete_rule: False +add_rule: True \ No newline at end of file diff --git a/ansible/01_old/roles/password_change/files/00_old/gen_password.py b/ansible/01_old/roles/password_change/files/00_old/gen_password.py new file mode 100644 index 0000000..b1b4e13 --- /dev/null +++ b/ansible/01_old/roles/password_change/files/00_old/gen_password.py @@ -0,0 +1,44 @@ +#!/usr/bin/python3 + +import base64, random, string, os +from Crypto.Cipher import AES +from Crypto.Random import get_random_bytes +from Crypto.Util.Padding import pad, unpad + +try: + encrypt_flag=True if os.sys.argv[1].lower()=='1' else False +except Exception as err: + encrypt_flag=False + +def generate_password(length=8, num_uppercase=1, num_lowercase=1, num_digits=1, num_sp_char=1): + sp_char = '!@#$' + all_chars = string.ascii_letters + string.digits + sp_char + + password = [ + *random.choices(string.ascii_uppercase, k=num_uppercase), + *random.choices(string.ascii_lowercase, k=num_lowercase), + *random.choices(string.digits, k=num_digits), + *random.choices(sp_char, k=num_sp_char) + ] + + remaining_length = length - (num_uppercase + num_lowercase + num_digits + num_sp_char) + password += random.choices(all_chars, k=remaining_length) + + random.shuffle(password) + return ''.join(password) + +def encrypt(plain_text, key): + manual_iv = b'PhilinnovatorDEV' + cipher = AES.new(key, AES.MODE_CBC, iv=manual_iv) + ct_bytes = cipher.encrypt(pad(plain_text.encode(), 16)) + ct = base64.b64encode(ct_bytes).decode('utf-8') + return ct + +key = b'PhilinnovatorDEVPhilinnovatorDEV' +plain_text = generate_password() + +if encrypt_flag: + encrypted_text = encrypt(plain_text, key) + print(encrypted_text) +else: + print(plain_text) diff --git a/ansible/01_old/roles/password_change/files/00_old/vault_test.py b/ansible/01_old/roles/password_change/files/00_old/vault_test.py new file mode 100644 index 0000000..18f6988 --- /dev/null +++ b/ansible/01_old/roles/password_change/files/00_old/vault_test.py @@ -0,0 +1,11 @@ +import hvac + +str_url = "http://10.10.43.98:31080" +str_token = "hvs.CAESIMV6zCg-GpUP4pQgVA5f1ZXkgyJZrqOC6QDCegrpiAX9Gh4KHGh2cy5ORkpkc2ZyVUxYd09qUVFtQldRNDBjS3I" +client = hvac.Client(url=str_url, token=str_token) + +str_mount_point = 'kv' +str_secret_path = 'host1' +read_secret_result = client.secrets.kv.v1.read_secret(mount_point=str_mount_point, path=str_secret_path) +print(read_secret_result) + diff --git a/ansible/01_old/roles/password_change/files/custom_excel b/ansible/01_old/roles/password_change/files/custom_excel new file mode 100755 index 0000000..707736c --- /dev/null +++ b/ansible/01_old/roles/password_change/files/custom_excel @@ -0,0 +1,108 @@ +#!/usr/bin/python3 +#-*- coding: utf-8 -*- + +import os, sys, time, errno, socket, signal, psutil, random, logging.handlers, subprocess, paramiko, hvac +from xlwt import Workbook, XFStyle, Borders, Font, Pattern +from socket import error as SocketError + +process_time = time.strftime("%Y%m%d_%H%M", time.localtime()) +excel_file_name = '/mnt/nas/{}.xls'.format(process_time) + +def process_close(flag=True, result=''): + if flag: + #print("[Success]") + print(excel_file_name) + else: + print("[Fail]:{}".format(result)) + + sys.exit(0) + +def set_header(sheet, header_list): + # 폰트 설정 + font = Font() + font.bold = True + + # 테두리 설정 + borders = Borders() + borders.left = Borders.THIN + borders.right = Borders.THIN + borders.top = Borders.THIN + borders.bottom = Borders.THIN + + # 배경색 설정 + pattern = Pattern() + pattern.pattern = Pattern.SOLID_PATTERN + pattern.pattern_fore_colour = 22 + + hdrstyle = XFStyle() + hdrstyle.font = font + hdrstyle.borders = borders + hdrstyle.pattern = pattern + + for idx, header in enumerate(header_list): + sheet.write(0, idx, header, hdrstyle) + sheet.col(idx).width = len(header) * 800 + +def write_data(sheet, data_list): + datestyle = XFStyle() + datestyle.num_format_str = 'YYYY-MM-DD' + + for row_num, data in enumerate(data_list, start=1): + for col_num, cell_data in enumerate(data): + if col_num == 7: + sheet.write(row_num, col_num, cell_data, datestyle) + elif col_num in [1, 4, 5]: + formatted_data = u'{}'.format(cell_data) if cell_data else '' + sheet.write(row_num, col_num, formatted_data) + else: + sheet.write(row_num, col_num, cell_data) + +def excel_write(header_list=[], data_list=[], filename='', sheetTitle=''): + workbook = Workbook(style_compression=2, encoding='utf-8') + sheet = workbook.add_sheet(sheetTitle) + + set_header(sheet, header_list) + write_data(sheet, data_list) + + sheet.panes_frozen = True + sheet.vert_split_pos = 0 + sheet.horz_split_pos = 1 + workbook.save(filename) + +def main(): + header_list=['번호','호스트 유형','호스트명','호스트 IP','포트번호','프로토콜','인증방법','1차 로그인 계정명','1차 로그인 비밀번호','1차 로그인 계정명','2차 로그인 비밀번호','용도','비고'] + data_list=[] + + openfile=open('/tmp/host_list','r') + readfile=openfile.readlines() + openfile.close() + for idx, host_data in enumerate(readfile): + try: + if idx==0: continue + host_num=idx + hosttype=host_data.strip().split(' ')[0] + hostname=host_data.strip().split(' ')[1] + host_ips=host_data.strip().split(' ')[2] + port_num=int(host_data.strip().split(' ')[3]) + protocol='SSH' + auth_con='Password' + username=host_data.strip().split(' ')[4] + first_pw=host_data.strip().split(' ')[5] + rootuser=host_data.strip().split(' ')[6] + secon_pw=host_data.strip().split(' ')[7] + descript='-' + remarks_='-' + data_list.append([host_num,hosttype,hostname,host_ips,port_num,protocol,auth_con,username,first_pw,rootuser,secon_pw,descript,remarks_,]) + except: + continue + + excel_write(header_list, data_list, excel_file_name, 'TEST') + +DEBUG=False +try: + if os.sys.argv[1]: DEBUG=True +except: + pass +main() +process_close() + diff --git a/ansible/01_old/roles/password_change/files/decrypt_password b/ansible/01_old/roles/password_change/files/decrypt_password new file mode 100755 index 0000000..5e31c71 --- /dev/null +++ b/ansible/01_old/roles/password_change/files/decrypt_password @@ -0,0 +1,21 @@ +#!/usr/bin/python3 +#-*- coding: utf-8 -*- + +import base64, random, string, os +from Crypto.Cipher import AES +from Crypto.Random import get_random_bytes +from Crypto.Util.Padding import pad, unpad + +try: + encrypted_text=os.sys.argv[1] +except: + encrypted_text="q6i1/JxyNe1OUrO0JKu+Z4WQTyQZam2yIJTp43dl1pI=" + +def decrypt(ct, key): + manual_iv = b'PhilinnovatorDEV' + ct_bytes = base64.b64decode(ct) + cipher = AES.new(key, AES.MODE_CBC, iv=manual_iv) + return unpad(cipher.decrypt(ct_bytes), 16).decode('utf-8') + +key = b'PhilinnovatorDEVPhilinnovatorDEV' +print(decrypt(encrypted_text, key)) \ No newline at end of file diff --git a/ansible/01_old/roles/password_change/files/gen_password b/ansible/01_old/roles/password_change/files/gen_password new file mode 100755 index 0000000..febe48a --- /dev/null +++ b/ansible/01_old/roles/password_change/files/gen_password @@ -0,0 +1,45 @@ +#!/usr/bin/python3 +#-*- coding: utf-8 -*- + +import base64, random, string, os +from Crypto.Cipher import AES +from Crypto.Random import get_random_bytes +from Crypto.Util.Padding import pad, unpad + +try: + encrypt_flag=True if os.sys.argv[1].lower()=='1' else False +except Exception as err: + encrypt_flag=False + +def generate_password(length=12, num_uppercase=3, num_lowercase=4, num_digits=3, num_sp_char=2): + sp_char = '!@#$' + all_chars = string.ascii_letters + string.digits + sp_char + + password = [ + *random.choices(string.ascii_uppercase, k=num_uppercase), + *random.choices(string.ascii_lowercase, k=num_lowercase), + *random.choices(string.digits, k=num_digits), + *random.choices(sp_char, k=num_sp_char) + ] + + remaining_length = length - (num_uppercase + num_lowercase + num_digits + num_sp_char) + password += random.choices(all_chars, k=remaining_length) + + random.shuffle(password) + return ''.join(password) + +def encrypt(plain_text, key): + manual_iv = b'PhilinnovatorDEV' + cipher = AES.new(key, AES.MODE_CBC, iv=manual_iv) + ct_bytes = cipher.encrypt(pad(plain_text.encode(), 16)) + ct = base64.b64encode(ct_bytes).decode('utf-8') + return ct + +key = b'PhilinnovatorDEVPhilinnovatorDEV' +plain_text = generate_password() + +if encrypt_flag: + encrypted_text = encrypt(plain_text, key) + print(encrypted_text) +else: + print(plain_text) diff --git a/ansible/01_old/roles/password_change/files/vault_get b/ansible/01_old/roles/password_change/files/vault_get new file mode 100755 index 0000000..d0fabdb --- /dev/null +++ b/ansible/01_old/roles/password_change/files/vault_get @@ -0,0 +1,17 @@ +#!/usr/bin/python3 +#-*- coding: utf-8 -*- + +import hvac +import os + +hostname=os.sys.argv[1] + +str_url = "http://10.10.43.240:30803" +client = hvac.Client(url=str_url) +client.auth.approle.login(role_id="e96c5fd8-abde-084a-fde7-7450a9348a70", secret_id="5371706b-414a-11d3-f3fd-6cf98871aad1") + +try: + data = client.secrets.kv.v2.read_secret_version(mount_point='host', path=hostname, raise_on_deleted_version=True)['data']['data'] + print(data) +except Exception as err: + print(err) diff --git a/ansible/01_old/roles/password_change/files/vault_put b/ansible/01_old/roles/password_change/files/vault_put new file mode 100755 index 0000000..bf87c25 --- /dev/null +++ b/ansible/01_old/roles/password_change/files/vault_put @@ -0,0 +1,21 @@ +#!/usr/bin/python3 +#-*- coding: utf-8 -*- + +import hvac +import os + +hostname=os.sys.argv[1] +username=os.sys.argv[2] +user_pass=os.sys.argv[3] +adminuser=os.sys.argv[4] +adminpass=os.sys.argv[5] + +str_url = "http://10.10.43.240:30803" +client = hvac.Client(url=str_url) +client.auth.approle.login(role_id="e96c5fd8-abde-084a-fde7-7450a9348a70", secret_id="5371706b-414a-11d3-f3fd-6cf98871aad1") + +client.secrets.kv.v2.create_or_update_secret( + mount_point='host', + path=hostname, + secret=dict(username=f'{username}',user_pass=f'{user_pass}',adminuser=f'{adminuser}',adminpass=f'{adminpass}') +) diff --git a/ansible/01_old/roles/password_change/handlers/main.yml b/ansible/01_old/roles/password_change/handlers/main.yml new file mode 100644 index 0000000..b44722c --- /dev/null +++ b/ansible/01_old/roles/password_change/handlers/main.yml @@ -0,0 +1,16 @@ +--- +- name: Reload systemd configuration + ansible.builtin.systemd: + daemon_reload: True + +- name: Restart teleport service + ansible.builtin.systemd: + name: teleport + enabled: true + state: restarted + +- name: restart sshd + service: + name: sshd + state: restarted + enabled: true \ No newline at end of file diff --git a/ansible/01_old/roles/password_change/meta/main.yml b/ansible/01_old/roles/password_change/meta/main.yml new file mode 100644 index 0000000..c572acc --- /dev/null +++ b/ansible/01_old/roles/password_change/meta/main.yml @@ -0,0 +1,52 @@ +galaxy_info: + author: your name + description: your role description + company: your company (optional) + + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: http://example.com/issue/tracker + + # Choose a valid license ID from https://spdx.org - some suggested licenses: + # - BSD-3-Clause (default) + # - MIT + # - GPL-2.0-or-later + # - GPL-3.0-only + # - Apache-2.0 + # - CC-BY-4.0 + license: license (GPL-2.0-or-later, MIT, etc) + + min_ansible_version: 2.1 + + # If this a Container Enabled role, provide the minimum Ansible Container version. + # min_ansible_container_version: + + # + # Provide a list of supported platforms, and for each platform a list of versions. + # If you don't wish to enumerate all versions for a particular platform, use 'all'. + # To view available platforms and versions (or releases), visit: + # https://galaxy.ansible.com/api/v1/platforms/ + # + # platforms: + # - name: Fedora + # versions: + # - all + # - 25 + # - name: SomePlatform + # versions: + # - all + # - 1.0 + # - 7 + # - 99.99 + + galaxy_tags: [] + # List tags for your role here, one per line. A tag is a keyword that describes + # and categorizes the role. Users find roles by searching for tags. Be sure to + # remove the '[]' above, if you add tags to this list. + # + # NOTE: A tag is limited to a single word comprised of alphanumeric characters. + # Maximum 20 tags per role. + +dependencies: [] + # List your role dependencies here, one per line. Be sure to remove the '[]' above, + # if you add dependencies to this list. diff --git a/ansible/01_old/roles/password_change/tasks/01_get_password.yml b/ansible/01_old/roles/password_change/tasks/01_get_password.yml new file mode 100644 index 0000000..1a30006 --- /dev/null +++ b/ansible/01_old/roles/password_change/tasks/01_get_password.yml @@ -0,0 +1,41 @@ +--- +- name: get password + command: "{{ role_path }}/files/gen_password {{ encrypt }}" + register: user_password + delegate_to: 127.0.0.1 + when: manual_password is not defined + +- name: get admin password + command: "{{ role_path }}/files/gen_password {{ encrypt }}" + register: admin_password + delegate_to: 127.0.0.1 + when: manual_password is not defined + +- name: set fact user password + block: + - set_fact: + user_password: "{{ user_password.stdout }}" + rescue: + - set_fact: + user_password: "{{ manual_password }}" + always: + - debug: + msg: "{{ username }} : {{ user_password }}" + when: debug_mode == True + +- name: set fact admin password + block: + - set_fact: + admin_password: "{{ admin_password.stdout }}" + rescue: + - set_fact: + admin_password: "{{ manual_password }}" + always: + - debug: + msg: "{{ adminuser }} : {{ admin_password }}" + when: debug_mode == True + +- debug: + msg: "({user_password}}" + msg: "({admin_password}}" + diff --git a/ansible/01_old/roles/password_change/tasks/02_change_password.yml b/ansible/01_old/roles/password_change/tasks/02_change_password.yml new file mode 100644 index 0000000..c722da8 --- /dev/null +++ b/ansible/01_old/roles/password_change/tasks/02_change_password.yml @@ -0,0 +1,24 @@ +--- +- include_tasks: 99_decrypt_password.yml + when: + - encrypt == 1 + - manual_password is not defined + +- debug: + var: user_password + +- name: user password change + user: + name: "{{ item }}" + password: "{{ user_password | password_hash('sha512') }}" + state: present + with_items: + - "{{ username }}" + +- name: admin password change + user: + name: "{{ item }}" + password: "{{ admin_password | password_hash('sha512') }}" + state: present + with_items: + - "{{ adminuser }}" diff --git a/ansible/01_old/roles/password_change/tasks/03_vault.yml b/ansible/01_old/roles/password_change/tasks/03_vault.yml new file mode 100644 index 0000000..1f3aa95 --- /dev/null +++ b/ansible/01_old/roles/password_change/tasks/03_vault.yml @@ -0,0 +1,21 @@ +--- +- name: Check if ansible_port is defined + set_fact: + ansible_port: "{{ ansible_port | default(22) }}" + +- debug: + msg: "{{ ansible_distribution }} {{ ansible_hostname }} {{ ansible_default_ipv4.address }} {{ ansible_port }} {{ username }} {{ user_password }} {{ adminuser }} {{ admin_password }}" + when: debug_mode == True + +- name: put vault + command: "{{ role_path }}/files/vault_put {{ ansible_default_ipv4.address }} {{ username }} {{ user_password }} {{ adminuser }} {{ admin_password }}" + delegate_to: 127.0.0.1 + +- name: get vault + command: "{{ role_path }}/files/vault_get {{ ansible_default_ipv4.address }} {{ username }} {{ user_password }} {{ adminuser }} {{ admin_password }}" + register: get_vault + delegate_to: 127.0.0.1 + +- debug: + msg: "{{get_vault.stdout_lines}}" + when: debug_mode == True diff --git a/ansible/01_old/roles/password_change/tasks/04_excel_export.yml b/ansible/01_old/roles/password_change/tasks/04_excel_export.yml new file mode 100644 index 0000000..94a2d3f --- /dev/null +++ b/ansible/01_old/roles/password_change/tasks/04_excel_export.yml @@ -0,0 +1,54 @@ +--- +- name: Redirect output to local file + delegate_to: localhost + copy: + content: "[{{ ansible_date_time.date }} {{ ansible_date_time.hour }}:{{ ansible_date_time.minute }}:{{ ansible_date_time.second }}]" + dest: "/tmp/host_list" + mode: '0666' + backup: yes + +- name: Append output to local file + delegate_to: localhost + lineinfile: + path: "/tmp/host_list" + line: "{{ ansible_distribution }} {{ ansible_hostname }} {{ ansible_default_ipv4.address }} {{ sshmainport }} {{ username }} {{ user_password }} {{ adminuser }} {{ admin_password }}" + create: yes + +- name: Create a directory if it does not exist + delegate_to: localhost + ansible.builtin.file: + path: /mnt/nas + state: directory + mode: '0755' + + #- name: Mount NFS volumes with noauto according to boot option + # delegate_to: localhost + # ansible.posix.mount: + # src: 10.10.43.42:/volume1/platform/02_비밀번호관리대장 + # path: /mnt/nas + # opts: rw,sync,hard + # boot: false + # state: mounted + # fstype: nfs + + +- name: excel export + command: "{{ role_path }}/files/custom_excel" + delegate_to: 127.0.0.1 + register: excel_name + +- name: debug excel output + debug: + var: excel_name.stdout + +- name: excel copy + copy: + src: "{{ excel_name.stdout }}" + dest: /mnt/nas + delegate_to: 10.10.43.43 + vars: + ansible_ssh_user: dev2-iac + # ansible_ssh_pass: Saasadmin1234! + # ansible_become_pass: Saasadmin1234! + ansible_port: 2222 + # ansible_ssh_common_args: '-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null' diff --git a/ansible/01_old/roles/password_change/tasks/99_decrypt_password.yml b/ansible/01_old/roles/password_change/tasks/99_decrypt_password.yml new file mode 100644 index 0000000..164cecc --- /dev/null +++ b/ansible/01_old/roles/password_change/tasks/99_decrypt_password.yml @@ -0,0 +1,27 @@ +--- +- name: user_password decrypt + command: "{{ role_path }}/files/decrypt_password {{ user_password }}" + register: user_password + delegate_to: 127.0.0.1 + +- name: admin_password decrypt + command: "{{ role_path }}/files/decrypt_password {{ admin_password }}" + register: admin_password + delegate_to: 127.0.0.1 + when: + - encrypt == 1 + - manual_password is not defined + +- name: admin_password re fact + set_fact: + admin_password: "{{ admin_password.stdout }}" + when: + - encrypt == 1 + - manual_password is not defined + +- name: user_password re fact + set_fact: + user_password: "{{ user_password.stdout }}" + when: + - encrypt == 1 + - manual_password is not defined diff --git a/ansible/01_old/roles/password_change/tasks/main.yml b/ansible/01_old/roles/password_change/tasks/main.yml new file mode 100644 index 0000000..f634141 --- /dev/null +++ b/ansible/01_old/roles/password_change/tasks/main.yml @@ -0,0 +1,12 @@ +--- +- include: 01_get_password.yml + tags: password + +- include: 02_change_password.yml + tags: change + +- include: 03_vault.yml + tags: vault + +- include: 04_excel_export.yml + tags: excel diff --git a/ansible/01_old/roles/password_change/templates/allow_users.j2 b/ansible/01_old/roles/password_change/templates/allow_users.j2 new file mode 100755 index 0000000..67c88da --- /dev/null +++ b/ansible/01_old/roles/password_change/templates/allow_users.j2 @@ -0,0 +1,22 @@ +AllowUsers dev2-iac@10.10.43.* +AllowUsers *@10.20.142.* +{% if ansible_distribution == "Ubuntu" %} +AllowUsers ubuntu@10.10.43.* +{% endif %} +{% if ansible_distribution == "CentOS" %} +AllowUsers centos@10.10.43.* +{% endif %} +{% if ansible_distribution == "RedHat" %} +AllowUsers redhat@10.10.43.* +{% endif %} + +{% if admin_users is defined %} +{% for user in admin_users %} +AllowUsers {{ user.name }}@{{ user.ip }} +{% endfor %} +{% endif %} +{% if allow_users is defined %} +{% for user in allow_users %} +AllowUsers {{ user.name }}@{{ user.ip }} +{% endfor %} +{% endif %} \ No newline at end of file diff --git a/ansible/01_old/roles/password_change/tests/inventory b/ansible/01_old/roles/password_change/tests/inventory new file mode 100644 index 0000000..878877b --- /dev/null +++ b/ansible/01_old/roles/password_change/tests/inventory @@ -0,0 +1,2 @@ +localhost + diff --git a/ansible/01_old/roles/password_change/tests/test.yml b/ansible/01_old/roles/password_change/tests/test.yml new file mode 100644 index 0000000..c604954 --- /dev/null +++ b/ansible/01_old/roles/password_change/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - password diff --git a/ansible/01_old/roles/password_change/vars/main.yml b/ansible/01_old/roles/password_change/vars/main.yml new file mode 100644 index 0000000..1392b01 --- /dev/null +++ b/ansible/01_old/roles/password_change/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for password diff --git a/ansible/01_old/roles/security-settings/.DS_Store b/ansible/01_old/roles/security-settings/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..8bec8722c381ca317bf822109fde6f2fef5be2e9 GIT binary patch literal 6148 zcmeHK%}T>S5Z>*NO({YS3VK`cTClAJEnY&bFJMFuDm5WRLu0lysXdfJ?)pN$h|lB9 z?gm;69!2a7?0&QJvzz%K`@3s4^wN~G0SPiRXJ$TP@;bnd{PhEd@hofsL=V=Ey!=E_c+eMH2(Wz)uX|{ve>0lQo&M}y4)ai_?m0=#Ua`AZKYIU#+70$S$k$Pf)7}#W>s)sh7|CjK~ zls@t|Q)omC5Ci{=0bUz=Ll=rNXY04}@T?Wk9-*ONT#gC|=o^;+7~no~pq$z-P=`3j XV6G8oLAy!^q>F$eggRp27Z~^g0k}&- literal 0 HcmV?d00001 diff --git a/ansible/01_old/roles/security-settings/defaults/main.yml b/ansible/01_old/roles/security-settings/defaults/main.yml new file mode 100755 index 0000000..4d29a9e --- /dev/null +++ b/ansible/01_old/roles/security-settings/defaults/main.yml @@ -0,0 +1,46 @@ +# Password aging settings +os_auth_pw_max_age: 90 +os_auth_pw_min_age: 1 +os_auth_pw_warn_age: 7 +passhistory: 2 + +# Inactivity and Failed attempts lockout settings +fail_deny: 5 +fail_unlock: 0 +inactive_lock: 0 +shell_timeout: 300 + +# tally settings +onerr: 'fail' +deny: 5 +unlock_time: 300 + +# Password complexity settings +pwquality_minlen: 9 +pwquality_maxrepeat: 3 +pwquality_lcredit: -1 +pwquality_ucredit: -1 +pwquality_dcredit: -1 +pwquality_ocredit: -1 + +# SSH settings +sshrootlogin: 'yes' +sshmainport: 22 +ssh_service_name: sshd + +# Crictl setup +crictl_app: crictl +crictl_version: 1.25.0 +crictl_os: linux +crictl_arch: amd64 +crictl_dl_url: https://github.com/kubernetes-sigs/cri-tools/releases/download/v{{ crictl_version }}/{{ crictl_app }}-v{{ crictl_version }}-{{ crictl_os }}-{{ crictl_arch }}.tar.gz +crictl_bin_path: /usr/local/bin +crictl_file_owner: root +crictl_file_group: root + +# temp +username: root +password: saasadmin1234 + +# common user flag +common_user: False diff --git a/ansible/01_old/roles/security-settings/files/login_banner b/ansible/01_old/roles/security-settings/files/login_banner new file mode 100755 index 0000000..d294eeb --- /dev/null +++ b/ansible/01_old/roles/security-settings/files/login_banner @@ -0,0 +1,20 @@ +#!/bin/sh +printf ''' + |-----------------------------------------------------------------| + | This system is for the use of authorized users only. | + | Individuals using this computer system without authority, or in | + | excess of their authority, are subject to having all of their | + | activities on this system monitored and recorded by system | + | personnel. | + | | + | In the course of monitoring individuals improperly using this | + | system, or in the course of system maintenance, the activities | + | of authorized users may also be monitored. | + | | + | Anyone using this system expressly consents to such monitoring | + | and is advised that if such monitoring reveals possible | + | evidence of criminal activity, system personnel may provide the | + | evidence of such monitoring to law enforcement officials. | + |-----------------------------------------------------------------| +''' + diff --git a/ansible/01_old/roles/security-settings/handlers/main.yml b/ansible/01_old/roles/security-settings/handlers/main.yml new file mode 100755 index 0000000..abab7ef --- /dev/null +++ b/ansible/01_old/roles/security-settings/handlers/main.yml @@ -0,0 +1,6 @@ +--- +- name: restart sshd + service: + name: "{{ ssh_service_name }}" + state: restarted + enabled: true diff --git a/ansible/01_old/roles/security-settings/tasks/admin_set.yml b/ansible/01_old/roles/security-settings/tasks/admin_set.yml new file mode 100755 index 0000000..3836c16 --- /dev/null +++ b/ansible/01_old/roles/security-settings/tasks/admin_set.yml @@ -0,0 +1,7 @@ +--- +- name: user change + user: + name: "{{ username }}" + password: "{{ password | password_hash('sha512') }}" + state: present + diff --git a/ansible/01_old/roles/security-settings/tasks/banner.yml b/ansible/01_old/roles/security-settings/tasks/banner.yml new file mode 100755 index 0000000..6a172c9 --- /dev/null +++ b/ansible/01_old/roles/security-settings/tasks/banner.yml @@ -0,0 +1,29 @@ +--- +- name: Create a tar.gz archive of a single file. + archive: + path: /etc/update-motd.d/* + dest: /etc/update-motd.d/motd.tar.gz + format: gz + force_archive: true + +- name: remove a motd.d files + file: + path: /etc/update-motd.d/{{ item }} + state: absent + with_items: + - 10-help-text + - 85-fwupd + - 90-updates-available + - 91-release-upgrade + - 95-hwe-eol + - 98-fsck-at-reboot + - 50-motd-news + - 88-esm-announce + +- name: Create login banner + copy: + src: login_banner + dest: /etc/update-motd.d/00-header + owner: root + group: root + mode: 0755 diff --git a/ansible/01_old/roles/security-settings/tasks/crictl.yml b/ansible/01_old/roles/security-settings/tasks/crictl.yml new file mode 100755 index 0000000..125a878 --- /dev/null +++ b/ansible/01_old/roles/security-settings/tasks/crictl.yml @@ -0,0 +1,19 @@ +--- +- name: Downloading and extracting {{ crictl_app }} {{ crictl_version }} + unarchive: + src: "{{ crictl_dl_url }}" + dest: "{{ crictl_bin_path }}" + owner: "{{ crictl_file_owner }}" + group: "{{ crictl_file_group }}" + extra_opts: + - crictl + remote_src: yes + +- name: Crictl command crontab setting + ansible.builtin.cron: + name: crontab command + minute: "0" + hour: "3" + user: root + job: "/usr/local/bin/crictl rmi --prune" + diff --git a/ansible/01_old/roles/security-settings/tasks/login_defs.yml b/ansible/01_old/roles/security-settings/tasks/login_defs.yml new file mode 100755 index 0000000..f25702a --- /dev/null +++ b/ansible/01_old/roles/security-settings/tasks/login_defs.yml @@ -0,0 +1,48 @@ +--- +- name: Set pass max days + lineinfile: + dest: /etc/login.defs + state: present + regexp: '^PASS_MAX_DAYS.*$' + line: "PASS_MAX_DAYS\t{{os_auth_pw_max_age}}" + backrefs: yes + +- name: Set pass min days + lineinfile: + dest: /etc/login.defs + state: present + regexp: '^PASS_MIN_DAYS.*$' + line: "PASS_MIN_DAYS\t{{os_auth_pw_min_age}}" + backrefs: yes + +- name: Set pass min length + lineinfile: + dest: /etc/login.defs + state: present + regexp: '^PASS_MIN_LEN.*$' + line: "PASS_MIN_LEN\t{{pwquality_minlen}}" + backrefs: yes + +- name: Set pass warn days + lineinfile: + dest: /etc/login.defs + state: present + regexp: '^PASS_WARN_AGE.*$' + line: "PASS_WARN_AGE\t{{os_auth_pw_warn_age}}" + backrefs: yes + +- name: Set password encryption to SHA512 + lineinfile: + dest: /etc/login.defs + state: present + regexp: '^ENCRYPT_METHOD\s.*$' + line: "ENCRYPT_METHOD\tSHA512" + backrefs: yes + +- name: Disable MD5 crypt explicitly + lineinfile: + dest: /etc/login.defs + state: present + regexp: '^MD5_CRYPT_ENAB.*$' + line: "MD5_CRYPT_ENAB NO" + backrefs: yes diff --git a/ansible/01_old/roles/security-settings/tasks/main.yml b/ansible/01_old/roles/security-settings/tasks/main.yml new file mode 100755 index 0000000..c9a3fe6 --- /dev/null +++ b/ansible/01_old/roles/security-settings/tasks/main.yml @@ -0,0 +1,24 @@ +--- +- include: login_defs.yml + tags: login_defs + +- include: pam.yml + tags: pam + +- include: sshd_config.yml + tags: sshd_config + +- include: sudoers.yml + tags: sudoers + +- include: profile.yml + tags: profile + +- include: banner.yml + tags: banner + +- include: crictl.yml + tags: crictl + +- include: admin_set.yml + tags: admin_set diff --git a/ansible/01_old/roles/security-settings/tasks/pam.yml b/ansible/01_old/roles/security-settings/tasks/pam.yml new file mode 100755 index 0000000..ae1c637 --- /dev/null +++ b/ansible/01_old/roles/security-settings/tasks/pam.yml @@ -0,0 +1,50 @@ +--- +- name: Add pam_tally2.so + template: + src: common-auth.j2 + dest: /etc/pam.d/common-auth + owner: root + group: root + mode: 0644 + +- name: Create pwquality.conf password complexity configuration + block: + - apt: + name: libpam-pwquality + state: present + install_recommends: false + - template: + src: pwquality.conf.j2 + dest: /etc/security/pwquality.conf + owner: root + group: root + mode: 0644 + +- name: Add pam_tally2.so + block: + - lineinfile: + dest: /etc/pam.d/common-account + regexp: '^account\srequisite' + line: "account requisite pam_deny.so" + + - lineinfile: + dest: /etc/pam.d/common-account + regexp: '^account\srequired' + line: "account required pam_tally2.so" + +- name: password reuse is limited + lineinfile: + dest: /etc/pam.d/common-password + line: "password required pam_pwhistory.so remember=5" + +- name: password hashing algorithm is SHA-512 + lineinfile: + dest: /etc/pam.d/common-password + regexp: '^password\s+\[success' + line: "password [success=1 default=ignore] pam_unix.so sha512" + +- name: Shadow Password Suite Parameters + lineinfile: + dest: /etc/pam.d/common-password + regexp: '^password\s+\[success' + line: "password [success=1 default=ignore] pam_unix.so sha512" diff --git a/ansible/01_old/roles/security-settings/tasks/profile.yml b/ansible/01_old/roles/security-settings/tasks/profile.yml new file mode 100755 index 0000000..fb1b456 --- /dev/null +++ b/ansible/01_old/roles/security-settings/tasks/profile.yml @@ -0,0 +1,24 @@ +--- +- name: Set session timeout + lineinfile: + dest: /etc/profile + regexp: '^TMOUT=.*' + insertbefore: '^readonly TMOUT' + line: 'TMOUT={{shell_timeout}}' + state: "{{ 'absent' if (shell_timeout == 0) else 'present' }}" + +- name: Set TMOUT readonly + lineinfile: + dest: /etc/profile + regexp: '^readonly TMOUT' + insertafter: 'TMOUT={{shell_timeout}}' + line: 'readonly TMOUT' + state: "{{ 'absent' if (shell_timeout == 0) else 'present' }}" + +- name: Set export TMOUT + lineinfile: + dest: /etc/profile + regexp: '^export TMOUT.*' + insertafter: 'readonly TMOUT' + line: 'export TMOUT' + state: "{{ 'absent' if (shell_timeout == 0) else 'present' }}" diff --git a/ansible/01_old/roles/security-settings/tasks/sshd_config.yml b/ansible/01_old/roles/security-settings/tasks/sshd_config.yml new file mode 100755 index 0000000..6b9f7a3 --- /dev/null +++ b/ansible/01_old/roles/security-settings/tasks/sshd_config.yml @@ -0,0 +1,30 @@ +--- +- name: Configure ssh root login to {{sshrootlogin}} + lineinfile: + dest: /etc/ssh/sshd_config + regexp: '^(#)?PermitRootLogin.*' + line: 'PermitRootLogin {{sshrootlogin}}' + insertbefore: '^Match.*' + state: present + owner: root + group: root + mode: 0640 + notify: restart sshd + +- name: SSH Listen on Main Port + lineinfile: + dest: /etc/ssh/sshd_config + insertbefore: '^#*AddressFamily' + line: 'Port {{sshmainport}}' + state: present + owner: root + group: root + mode: 0640 + notify: restart sshd + +- name: "Setting sshd allow users" + template: + src: allow_users.j2 + dest: "/etc/ssh/sshd_config.d/allow_users.conf" + notify: restart sshd + diff --git a/ansible/01_old/roles/security-settings/tasks/sudoers.yml b/ansible/01_old/roles/security-settings/tasks/sudoers.yml new file mode 100755 index 0000000..4be28c0 --- /dev/null +++ b/ansible/01_old/roles/security-settings/tasks/sudoers.yml @@ -0,0 +1,94 @@ +--- +- name: "Create devops group" + ansible.builtin.group: + name: "devops" + state: present + +- name: "get current users" + shell: "cat /etc/passwd | egrep -iv '(false|nologin|sync|root|dev2-iac)' | awk -F: '{print $1}'" + register: deleting_users + +- name: "Delete users" + ansible.builtin.user: + name: "{{ item }}" + state: absent + remove: yes + with_items: "{{ deleting_users.stdout_lines }}" + when: item != ansible_user + ignore_errors: true + +- name: "Create admin user" + ansible.builtin.user: + name: "{{ item.name }}" + group: "devops" + shell: "/bin/bash" + system: yes + state: present + with_items: "{{ admin_users }}" + when: + - item.name is defined + ignore_errors: true + +- name: "admin user password change" + user: + name: "{{ item.name }}" + password: "{{ password | password_hash('sha512') }}" + state: present + with_items: "{{ admin_users }}" + when: + - item.name is defined + ignore_errors: true + +- name: "Add admin user key" + authorized_key: + user: "{{ item.name }}" + state: present + key: "{{ item.key }}" + with_items: "{{ admin_users }}" + when: + - item.name is defined + - item.key is defined + - common_user is defined + ignore_errors: true + +- name: "Create common user" + ansible.builtin.user: + name: "{{ item.name }}" + group: "users" + shell: "/bin/bash" + system: yes + state: present + with_items: "{{ allow_users }}" + when: + - item.name is defined + - common_user is defined + ignore_errors: true + +- name: "Change common user password change" + user: + name: "{{ item.name }}" + password: "{{ password | password_hash('sha512') }}" + state: present + with_items: "{{ allow_users }}" + when: + - item.name is defined + - common_user is defined + ignore_errors: true + +- name: "Add common user key" + authorized_key: + user: "{{ item.name }}" + state: present + key: "{{ item.key }}" + with_items: "{{ allow_users }}" + when: + - item.name is defined + - item.key is defined + - common_user is defined + ignore_errors: true + +- name: "Setting sudoers allow users" + template: + src: sudoers_users.j2 + dest: "/etc/sudoers.d/sudoers_users" + ignore_errors: true diff --git a/ansible/01_old/roles/security-settings/templates/allow_users.j2 b/ansible/01_old/roles/security-settings/templates/allow_users.j2 new file mode 100755 index 0000000..fab55dc --- /dev/null +++ b/ansible/01_old/roles/security-settings/templates/allow_users.j2 @@ -0,0 +1,11 @@ +AllowUsers dev2-iac@10.10.43.* +{% if admin_users is defined %} +{% for user in admin_users %} +AllowUsers {{ user.name }}@{{ user.ip }} +{% endfor %} +{% endif %} +{% if allow_users is defined %} +{% for user in allow_users %} +AllowUsers {{ user.name }}@{{ user.ip }} +{% endfor %} +{% endif %} diff --git a/ansible/01_old/roles/security-settings/templates/common-auth.j2 b/ansible/01_old/roles/security-settings/templates/common-auth.j2 new file mode 100755 index 0000000..64a603b --- /dev/null +++ b/ansible/01_old/roles/security-settings/templates/common-auth.j2 @@ -0,0 +1,27 @@ +# +# /etc/pam.d/common-auth - authentication settings common to all services +# +# This file is included from other service-specific PAM config files, +# and should contain a list of the authentication modules that define +# the central authentication scheme for use on the system +# (e.g., /etc/shadow, LDAP, Kerberos, etc.). The default is to use the +# traditional Unix authentication mechanisms. +# +# As of pam 1.0.1-6, this file is managed by pam-auth-update by default. +# To take advantage of this, it is recommended that you configure any +# local modules either before or after the default block, and use +# pam-auth-update to manage selection of other modules. See +# pam-auth-update(8) for details. +auth required pam_tally2.so onerr={{onerr}} even_deny_root deny={{deny}} unlock_time={{unlock_time}} + +# here are the per-package modules (the "Primary" block) +auth [success=1 default=ignore] pam_unix.so nullok +# here's the fallback if no module succeeds +auth requisite pam_deny.so +# prime the stack with a positive return value if there isn't one already; +# this avoids us returning an error just because nothing sets a success code +auth required pam_permit.so +# since the modules above will each just jump around +# and here are more per-package modules (the "Additional" block) +auth optional pam_cap.so +# end of pam-auth-update config diff --git a/ansible/01_old/roles/security-settings/templates/pwquality.conf.j2 b/ansible/01_old/roles/security-settings/templates/pwquality.conf.j2 new file mode 100755 index 0000000..3ec2cbe --- /dev/null +++ b/ansible/01_old/roles/security-settings/templates/pwquality.conf.j2 @@ -0,0 +1,50 @@ +# Configuration for systemwide password quality limits +# Defaults: +# +# Number of characters in the new password that must not be present in the +# old password. +# difok = 5 +# +# Minimum acceptable size for the new password (plus one if +# credits are not disabled which is the default). (See pam_cracklib manual.) +# Cannot be set to lower value than 6. +minlen = {{pwquality_minlen}} +# +# The maximum credit for having digits in the new password. If less than 0 +# it is the minimum number of digits in the new password. +dcredit = {{pwquality_dcredit}} +# +# The maximum credit for having uppercase characters in the new password. +# If less than 0 it is the minimum number of uppercase characters in the new +# password. +ucredit = {{pwquality_ucredit}} +# +# The maximum credit for having lowercase characters in the new password. +# If less than 0 it is the minimum number of lowercase characters in the new +# password. +lcredit = {{pwquality_lcredit}} +# +# The maximum credit for having other characters in the new password. +# If less than 0 it is the minimum number of other characters in the new +# password. +ocredit = {{pwquality_ocredit}} +# +# The minimum number of required classes of characters for the new +# password (digits, uppercase, lowercase, others). +# minclass = 0 +# +# The maximum number of allowed consecutive same characters in the new password. +# The check is disabled if the value is 0. +maxrepeat = {{pwquality_maxrepeat}} +# +# The maximum number of allowed consecutive characters of the same class in the +# new password. +# The check is disabled if the value is 0. +# maxclassrepeat = 0 +# +# Whether to check for the words from the passwd entry GECOS string of the user. +# The check is enabled if the value is not 0. +# gecoscheck = 0 +# +# Path to the cracklib dictionaries. Default is to use the cracklib default. +# dictpath = diff --git a/ansible/01_old/roles/security-settings/templates/sudoers_users.j2 b/ansible/01_old/roles/security-settings/templates/sudoers_users.j2 new file mode 100755 index 0000000..4c30d95 --- /dev/null +++ b/ansible/01_old/roles/security-settings/templates/sudoers_users.j2 @@ -0,0 +1,6 @@ +dev2-iac ALL=(ALL) NOPASSWD: ALL +{% if allow_users is defined %} +{% for user in admin_users %} +{{ user.name }} ALL=(ALL) NOPASSWD: ALL +{% endfor %} +{% endif %} diff --git a/ansible/01_old/roles/teleport/.DS_Store b/ansible/01_old/roles/teleport/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..1f3ccf439cf8f0103a453c9075343c852d785a98 GIT binary patch literal 6148 zcmeHK%}T>S5Z>*NO({YT3gT(OYr+0VwRj1!zJL)usMLfM4UO5-q~=fxc>sMOAH?Tz zW_JTE7LOu!26n&M`Pt2Uko{qdac3TM7;7@dENF-vl?{UCjjoajM&xRa#LtqcA4z}1 zG=I~C-(F=Y^H{X}kT&OXW&+r)JfxhV|e+%gjx^be=fg><&lQQbvBE zd;U#0&PL6>3mK{QvAq>V4a(^4ffy^8^kApD8UZk&K_fT4WTFgv0+N z2-m5AI+dFz2G{9e7bea&m}}JOjH{Jl9@Z$`^bTEYQI1o Z;%tMtMw|uhDjkq60*Vmoh=E^V;0w8(ON0Ob literal 0 HcmV?d00001 diff --git a/ansible/01_old/roles/teleport/README.md b/ansible/01_old/roles/teleport/README.md new file mode 100644 index 0000000..225dd44 --- /dev/null +++ b/ansible/01_old/roles/teleport/README.md @@ -0,0 +1,38 @@ +Role Name +========= + +A brief description of the role goes here. + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +Role Variables +-------------- + +A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. + +Dependencies +------------ + +A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + + - hosts: servers + roles: + - { role: username.rolename, x: 42 } + +License +------- + +BSD + +Author Information +------------------ + +An optional section for the role authors to include contact information, or a website (HTML is not allowed). diff --git a/ansible/01_old/roles/teleport/defaults/main.yml b/ansible/01_old/roles/teleport/defaults/main.yml new file mode 100644 index 0000000..79506a1 --- /dev/null +++ b/ansible/01_old/roles/teleport/defaults/main.yml @@ -0,0 +1,9 @@ +--- +# defaults file for teleport +teleport_uri: teleport.kr.datasaker.io +teleport_version: 13.3.8 +remove: False +update: False +install: False +custom_labels: [] + diff --git a/ansible/01_old/roles/teleport/handlers/main.yml b/ansible/01_old/roles/teleport/handlers/main.yml new file mode 100644 index 0000000..4b32df4 --- /dev/null +++ b/ansible/01_old/roles/teleport/handlers/main.yml @@ -0,0 +1,10 @@ +--- +- name: Reload systemd configuration + ansible.builtin.systemd: + daemon_reload: True + +- name: Restart teleport service + ansible.builtin.systemd: + name: teleport + enabled: true + state: restarted \ No newline at end of file diff --git a/ansible/01_old/roles/teleport/meta/main.yml b/ansible/01_old/roles/teleport/meta/main.yml new file mode 100644 index 0000000..c572acc --- /dev/null +++ b/ansible/01_old/roles/teleport/meta/main.yml @@ -0,0 +1,52 @@ +galaxy_info: + author: your name + description: your role description + company: your company (optional) + + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: http://example.com/issue/tracker + + # Choose a valid license ID from https://spdx.org - some suggested licenses: + # - BSD-3-Clause (default) + # - MIT + # - GPL-2.0-or-later + # - GPL-3.0-only + # - Apache-2.0 + # - CC-BY-4.0 + license: license (GPL-2.0-or-later, MIT, etc) + + min_ansible_version: 2.1 + + # If this a Container Enabled role, provide the minimum Ansible Container version. + # min_ansible_container_version: + + # + # Provide a list of supported platforms, and for each platform a list of versions. + # If you don't wish to enumerate all versions for a particular platform, use 'all'. + # To view available platforms and versions (or releases), visit: + # https://galaxy.ansible.com/api/v1/platforms/ + # + # platforms: + # - name: Fedora + # versions: + # - all + # - 25 + # - name: SomePlatform + # versions: + # - all + # - 1.0 + # - 7 + # - 99.99 + + galaxy_tags: [] + # List tags for your role here, one per line. A tag is a keyword that describes + # and categorizes the role. Users find roles by searching for tags. Be sure to + # remove the '[]' above, if you add tags to this list. + # + # NOTE: A tag is limited to a single word comprised of alphanumeric characters. + # Maximum 20 tags per role. + +dependencies: [] + # List your role dependencies here, one per line. Be sure to remove the '[]' above, + # if you add dependencies to this list. diff --git a/ansible/01_old/roles/teleport/tasks/main.yml b/ansible/01_old/roles/teleport/tasks/main.yml new file mode 100644 index 0000000..8fd44f0 --- /dev/null +++ b/ansible/01_old/roles/teleport/tasks/main.yml @@ -0,0 +1,33 @@ +- name: "Create temporary directory for key manipulation" + tempfile: + state: directory + suffix: keys + register: tempdir + when: + - install == True or update == True + - remove == False + +- name: "Include Teleport Agent Install" + include_tasks: teleport_install.yml + tags: install + when: + - install == True + +- name: "Include Teleport Agent update" + include_tasks: teleport_update.yml + tags: remove + when: + - update == True + +- name: "Remove temporary directory for key manipulation" + file: + path: "{{ tempdir.path }}" + state: absent + when: + - install == True or update == True + +- name: "Include Teleport Agent remove" + include_tasks: teleport_remove.yml + tags: remove + when: + - remove == True diff --git a/ansible/01_old/roles/teleport/tasks/teleport_install.yml b/ansible/01_old/roles/teleport/tasks/teleport_install.yml new file mode 100644 index 0000000..0d849ba --- /dev/null +++ b/ansible/01_old/roles/teleport/tasks/teleport_install.yml @@ -0,0 +1,25 @@ +--- +- name: "Run tctl nodes add and capture the output" + command: tctl nodes add + register: tctl_output + changed_when: false + delegate_to: 127.0.0.1 + +- name: "Extract token and ca_pin" + set_fact: + get_join_token: "{{ (tctl_output.stdout | regex_search('--token=(\\S+)', '\\1'))[0] }}" + get_ca_pin: "{{ (tctl_output.stdout | regex_search('--ca-pin=(\\S+)', '\\1'))[0] }}" + +- name: "Debug extracted values" + debug: + msg: + - "join_token: {{ get_join_token }}" + - "ca_pin: {{ get_ca_pin }}" + +- name: "Create Teleport install script" + template: + src: install-node.sh.j2 + dest: "{{ tempdir.path }}/install-node.sh" + +- name: "Run Teleport Install Script" + command: "bash {{ tempdir.path }}/install-node.sh" diff --git a/ansible/01_old/roles/teleport/tasks/teleport_remove.yml b/ansible/01_old/roles/teleport/tasks/teleport_remove.yml new file mode 100644 index 0000000..504e042 --- /dev/null +++ b/ansible/01_old/roles/teleport/tasks/teleport_remove.yml @@ -0,0 +1,27 @@ +--- +- name: "Remove Teleport on RedHat-based systems" + yum: + name: teleport + state: absent + when: ansible_os_family == "RedHat" + +- name: "Remove Teleport on Debian-based systems" + apt: + name: teleport + state: absent + when: ansible_os_family == "Debian" + +- name: "Remove Teleport directories and files" + file: + path: "{{ item }}" + state: absent + with_items: + - /var/lib/teleport + - /etc/teleport.yaml + - /usr/local/bin/teleport + - /usr/local/bin/tctl + - /usr/local/bin/tsh + +- name: "Kill Teleport processes" + command: pkill -9 teleport + ignore_errors: yes diff --git a/ansible/01_old/roles/teleport/tasks/teleport_update.yml b/ansible/01_old/roles/teleport/tasks/teleport_update.yml new file mode 100644 index 0000000..5efe98f --- /dev/null +++ b/ansible/01_old/roles/teleport/tasks/teleport_update.yml @@ -0,0 +1,47 @@ +--- + +- name: "Run token the output" + shell: "cat /etc/teleport.yaml | grep 'token_name:' | awk '{print $2}'" + register: token_output + changed_when: false + ignore_errors: true + +- name: "Run ca_pin the output" + shell: "cat /etc/teleport.yaml | grep 'ca_pin:' | awk '{print $2}'" + register: ca_output + changed_when: false + ignore_errors: true + +- name: "Extract token and ca_pin" + set_fact: + get_join_token: "{{ token_output.stdout }}" + get_ca_pin: "{{ ca_output.stdout }}" + +- name: "Debug extracted values" + debug: + msg: + - "join_token: {{ get_join_token }}" + - "ca_pin: {{ get_ca_pin }}" + +- name: "Update Teleport yaml" + template: + src: teleport.yaml.j2 + dest: "/etc/teleport.yaml" + +- name: "Update Teleport on RedHat-based systems" + yum: + name: teleport + state: latest + when: ansible_os_family == "RedHat" + notify: + - Reload systemd configuration + - Restart teleport service + +- name: "Update Teleport on Debian-based systems" + apt: + name: teleport + state: latest + when: ansible_os_family == "Debian" + notify: + - Reload systemd configuration + - Restart teleport service \ No newline at end of file diff --git a/ansible/01_old/roles/teleport/templates/install-node.sh.j2 b/ansible/01_old/roles/teleport/templates/install-node.sh.j2 new file mode 100644 index 0000000..1972e5b --- /dev/null +++ b/ansible/01_old/roles/teleport/templates/install-node.sh.j2 @@ -0,0 +1,999 @@ +#!/bin/bash +set -euo pipefail +SCRIPT_NAME="teleport-installer" + +# default values +ALIVE_CHECK_DELAY=3 +CONNECTIVITY_TEST_METHOD="" +COPY_COMMAND="cp" +DISTRO_TYPE="" +IGNORE_CONNECTIVITY_CHECK="${TELEPORT_IGNORE_CONNECTIVITY_CHECK:-false}" +LAUNCHD_CONFIG_PATH="/Library/LaunchDaemons" +LOG_FILENAME="$(mktemp -t ${SCRIPT_NAME}.log.XXXXXXXXXX)" +MACOS_STDERR_LOG="/var/log/teleport-stderr.log" +MACOS_STDOUT_LOG="/var/log/teleport-stdout.log" +SYSTEMD_UNIT_PATH="/lib/systemd/system/teleport.service" +TARGET_PORT_DEFAULT=443 +TELEPORT_ARCHIVE_PATH='teleport' +TELEPORT_BINARY_DIR="/usr/local/bin" +TELEPORT_BINARY_LIST="teleport tctl tsh" +TELEPORT_CONFIG_PATH="/etc/teleport.yaml" +TELEPORT_DATA_DIR="/var/lib/teleport" +TELEPORT_DOCS_URL="https://goteleport.com/docs/" +TELEPORT_FORMAT="" + +# initialise variables (because set -u disallows unbound variables) +f="" +l="" +DISABLE_TLS_VERIFICATION=false +NODENAME=$(hostname) +IGNORE_CHECKS=false +OVERRIDE_FORMAT="" +QUIET=false +APP_INSTALL_DECISION="" +INTERACTIVE=false + +# the default value of each variable is a templatable Go value so that it can +# optionally be replaced by the server before the script is served up +TELEPORT_VERSION='{{ teleport_version }}' +TELEPORT_PACKAGE_NAME='teleport' +REPO_CHANNEL='' +TARGET_HOSTNAME='{{ teleport_uri }}' +TARGET_PORT='443' +JOIN_TOKEN='{{ get_join_token }}' +JOIN_METHOD='' +JOIN_METHOD_FLAG="" +[ -n "$JOIN_METHOD" ] && JOIN_METHOD_FLAG="--join-method ${JOIN_METHOD}" + +# inject labels into the configuration +# LABELS='teleport.internal/resource-id=0ec993a8-b1ec-4fa6-8fc5-4e73e3e5306e','env=localhost' +LABELS='ipaddr={{ansible_default_ipv4.address}},group={{ group_names[-1] }},os={{ ansible_distribution }}{% if custom_labels %},{{ custom_labels }}{% endif %}' +LABELS_FLAG=() +[ -n "$LABELS" ] && LABELS_FLAG=(--labels "${LABELS}") + +LABELS_FLAG=() +[ -n "$LABELS" ] && LABELS_FLAG=(--labels "${LABELS}") + +# When all stanza generators have been updated to use the new +# `teleport configure` commands CA_PIN_HASHES can be removed along +# with the script passing it in in `join_tokens.go`. +CA_PIN_HASHES='{{ get_ca_pin }}' +CA_PINS='{{ get_ca_pin }}' +ARG_CA_PIN_HASHES="" +APP_INSTALL_MODE='false' +APP_NAME='' +APP_URI='' +DB_INSTALL_MODE='false' + +# usage message +# shellcheck disable=SC2086 +usage() { echo "Usage: $(basename $0) [-v teleport_version] [-h target_hostname] [-p target_port] [-j join_token] [-c ca_pin_hash]... [-q] [-l log_filename] [-a app_name] [-u app_uri] " 1>&2; exit 1; } +while getopts ":v:h:p:j:c:f:ql:ika:u:" o; do + case "${o}" in + v) TELEPORT_VERSION=${OPTARG};; + h) TARGET_HOSTNAME=${OPTARG};; + p) TARGET_PORT=${OPTARG};; + j) JOIN_TOKEN=${OPTARG};; + c) ARG_CA_PIN_HASHES="${ARG_CA_PIN_HASHES} ${OPTARG}";; + f) f=${OPTARG}; if [[ ${f} != "tarball" && ${f} != "deb" && ${f} != "rpm" ]]; then usage; fi;; + q) QUIET=true;; + l) l=${OPTARG};; + i) IGNORE_CHECKS=true; COPY_COMMAND="cp -f";; + k) DISABLE_TLS_VERIFICATION=true;; + a) APP_INSTALL_MODE=true && APP_NAME=${OPTARG};; + u) APP_INSTALL_MODE=true && APP_URI=${OPTARG};; + *) usage;; + esac +done +shift $((OPTIND-1)) + +if [[ "${ARG_CA_PIN_HASHES}" != "" ]]; then + CA_PIN_HASHES="${ARG_CA_PIN_HASHES}" +fi + +# function to construct a go template variable +# go's template parser is a bit finicky, so we dynamically build the value one character at a time +construct_go_template() { + OUTPUT="{" + OUTPUT+="{" + OUTPUT+="." + OUTPUT+="${1}" + OUTPUT+="}" + OUTPUT+="}" + echo "${OUTPUT}" +} + +# check whether we are root, exit if not +assert_running_as_root() { + if ! [ "$(id -u)" = 0 ]; then + echo "This script must be run as root." 1>&2 + exit 1 + fi +} + +# function to check whether variables are either blank or set to the default go template value +# (because they haven't been set by the go script generator or a command line argument) +# returns 1 if the variable is set to a default/zero value +# returns 0 otherwise (i.e. it needs to be set interactively) +check_variable() { + VARIABLE_VALUE="${!1}" + GO_TEMPLATE_NAME=$(construct_go_template "${2}") + if [[ "${VARIABLE_VALUE}" == "" ]] || [[ "${VARIABLE_VALUE}" == "${GO_TEMPLATE_NAME}" ]]; then + return 1 + fi + return 0 +} + +# function to check whether a provided value is "truthy" i.e. it looks like you're trying to say "yes" +is_truthy() { + declare -a TRUTHY_VALUES + TRUTHY_VALUES=("y" "Y" "yes" "YES" "ye" "YE" "yep" "YEP" "ya" "YA") + CHECK_VALUE="$1" + for ARRAY_VALUE in "${TRUTHY_VALUES[@]}"; do [[ "${CHECK_VALUE}" == "${ARRAY_VALUE}" ]] && return 0; done + return 1 +} + +# function to read input until the value you get is non-empty +read_nonblank_input() { + INPUT="" + VARIABLE_TO_ASSIGN="$1" + shift + PROMPT="$*" + until [[ "${INPUT}" != "" ]]; do + echo -n "${PROMPT}" + read -r INPUT + done + printf -v "${VARIABLE_TO_ASSIGN}" '%s' "${INPUT}" +} + +# error if we're not root +assert_running_as_root + +# set/read values interactively if not provided +# users will be prompted to enter their own value if all the following are true: +# - the current value is blank, or equal to the default Go template value +# - the value has not been provided by command line argument +! check_variable TELEPORT_VERSION version && INTERACTIVE=true && read_nonblank_input TELEPORT_VERSION "Enter Teleport version to install (without v): " +! check_variable TARGET_HOSTNAME hostname && INTERACTIVE=true && read_nonblank_input TARGET_HOSTNAME "Enter target hostname to connect to: " +! check_variable TARGET_PORT port && INTERACTIVE=true && { echo -n "Enter target port to connect to [${TARGET_PORT_DEFAULT}]: "; read -r TARGET_PORT; } +! check_variable JOIN_TOKEN token && INTERACTIVE=true && read_nonblank_input JOIN_TOKEN "Enter Teleport join token as provided: " +! check_variable CA_PIN_HASHES caPins && INTERACTIVE=true && read_nonblank_input CA_PIN_HASHES "Enter CA pin hash (separate multiple hashes with spaces): " +[ -n "${f}" ] && OVERRIDE_FORMAT=${f} +[ -n "${l}" ] && LOG_FILENAME=${l} +# if app service mode is not set (or is the default value) and we are running interactively (i.e. the user has provided some input already), +# prompt the user to choose whether to enable app_service +if [[ "${INTERACTIVE}" == "true" ]]; then + if ! check_variable APP_INSTALL_MODE appInstallMode; then + APP_INSTALL_MODE="false" + echo -n "Would you like to enable and configure Teleport's app_service, to use Teleport as a reverse proxy for a web application? [y/n, default: n] " + read -r APP_INSTALL_DECISION + if is_truthy "${APP_INSTALL_DECISION}"; then + APP_INSTALL_MODE="true" + fi + fi +fi +# prompt for extra needed values if we're running in app service mode +if [[ "${APP_INSTALL_MODE}" == "true" ]]; then + ! check_variable APP_NAME appName && read_nonblank_input APP_NAME "Enter app name to install (must be DNS-compatible; less than 63 characters, no spaces, only - or _ as punctuation): " + ! check_variable APP_URI appURI && read_nonblank_input APP_URI "Enter app URI (the host running the Teleport app service must be able to connect to this): " + # generate app public addr by concatenating values + APP_PUBLIC_ADDR="${APP_NAME}.${TARGET_HOSTNAME}" +fi + +# set default target port if value not provided +if [[ "${TARGET_PORT}" == "" ]]; then + TARGET_PORT=${TARGET_PORT_DEFAULT} +fi + +# clear log file if provided +if [[ "${LOG_FILENAME}" != "" ]]; then + if [ -f "${LOG_FILENAME}" ]; then + echo -n "" > "${LOG_FILENAME}" + fi +fi + +# log functions +log_date() { echo -n "$(date '+%Y-%m-%d %H:%M:%S %Z')"; } +log() { + LOG_LINE="$(log_date) [${SCRIPT_NAME}] $*" + if [[ ${QUIET} != "true" ]]; then + echo "${LOG_LINE}" + fi + if [[ "${LOG_FILENAME}" != "" ]]; then + echo "${LOG_LINE}" >> "${LOG_FILENAME}" + fi +} +# writes a line with no timestamp or starting data, always prints +log_only() { + LOG_LINE="$*" + echo "${LOG_LINE}" + if [[ "${LOG_FILENAME}" != "" ]]; then + echo "${LOG_LINE}" >> "${LOG_FILENAME}" + fi +} +# writes a line by itself as a header +log_header() { + LOG_LINE="$*" + echo "" + echo "${LOG_LINE}" + echo "" + if [[ "${LOG_FILENAME}" != "" ]]; then + echo "${LOG_LINE}" >> "${LOG_FILENAME}" + fi +} +# important log lines, print even when -q (quiet) is passed +log_important() { + LOG_LINE="$(log_date) [${SCRIPT_NAME}] ---> $*" + echo "${LOG_LINE}" + if [[ "${LOG_FILENAME}" != "" ]]; then + echo "${LOG_LINE}" >> "${LOG_FILENAME}" + fi +} +log_cleanup_message() { + log_only "This script does not overwrite any existing settings or Teleport installations." + log_only "Please clean up by running any of the following steps as necessary:" + log_only "- stop any running Teleport processes" + log_only " - pkill -f teleport" + log_only "- remove any data under ${TELEPORT_DATA_DIR}, along with the directory itself" + log_only " - rm -rf ${TELEPORT_DATA_DIR}" + log_only "- remove any configuration at ${TELEPORT_CONFIG_PATH}" + log_only " - rm -f ${TELEPORT_CONFIG_PATH}" + log_only "- remove any Teleport binaries (${TELEPORT_BINARY_LIST}) installed under ${TELEPORT_BINARY_DIR}" + for BINARY in ${TELEPORT_BINARY_LIST}; do EXAMPLE_DELETE_COMMAND+="${TELEPORT_BINARY_DIR}/${BINARY} "; done + log_only " - rm -f ${EXAMPLE_DELETE_COMMAND}" + log_only "Run this installer again when done." + log_only +} + +# other functions +# check whether a named program exists +check_exists() { NAME=$1; if type "${NAME}" >/dev/null 2>&1; then return 0; else return 1; fi; } +# checks for the existence of a list of named binaries and exits with error if any of them don't exist +check_exists_fatal() { + for TOOL in "$@"; do + if ! check_exists "${TOOL}"; then + log_important "Error: cannot find ${TOOL} - it needs to be installed" + exit 1 + fi + done +} +# check connectivity to the given host/port and make a request to see if Teleport is listening +# uses the global variable CONNECTIVITY_TEST_METHOD to return the name of the checker, as return +# values aren't really a thing that exists in bash +check_connectivity() { + HOST=$1 + PORT=$2 + # check with nc + if check_exists nc; then + CONNECTIVITY_TEST_METHOD="nc" + if nc -z -w3 "${HOST}" "${PORT}" >/dev/null 2>&1; then return 0; else return 1; fi + # if there's no nc, check with telnet + elif check_exists telnet; then + CONNECTIVITY_TEST_METHOD="telnet" + if echo -e '\x1dclose\x0d' | telnet "${HOST}" "${PORT}" >/dev/null 2>&1; then return 0; else return 1; fi + # if there's no nc or telnet, try and use /dev/tcp + elif [ -f /dev/tcp ]; then + CONNECTIVITY_TEST_METHOD="/dev/tcp" + if (head -1 < "/dev/tcp/${HOST}/${PORT}") >/dev/null 2>&1; then return 0; else return 1; fi + else + return 255 + fi +} +# check whether a teleport DEB is already installed and exit with error if so +check_deb_not_already_installed() { + check_exists_fatal dpkg awk + DEB_INSTALLED=$(dpkg -l | awk '{print $2}' | grep -E ^teleport || true) + if [[ ${DEB_INSTALLED} != "" ]]; then + log_important "It looks like there is already a Teleport DEB package installed (name: ${DEB_INSTALLED})." + log_important "You will need to remove that package before using this script." + exit 1 + fi +} +# check whether a teleport RPM is already installed and exit with error if so +check_rpm_not_already_installed() { + check_exists_fatal rpm + RPM_INSTALLED=$(rpm -qa | grep -E ^teleport || true) + if [[ ${RPM_INSTALLED} != "" ]]; then + log_important "It looks like there is already a Teleport RPM package installed (name: ${RPM_INSTALLED})." + log_important "You will need to remove that package before using this script." + exit 1 + fi +} +# function to check if given variable is set +check_set() { + CHECK_KEY=${1} || true + CHECK_VALUE=${!1} || true + if [[ "${CHECK_VALUE}" == "" ]]; then + log "Required variable ${CHECK_KEY} is not set" + exit 1 + else + log "${CHECK_KEY}: ${CHECK_VALUE}" + fi +} +# checks that teleport binary can be found in path and runs 'teleport version' +check_teleport_binary() { + FOUND_TELEPORT_VERSION=$(${TELEPORT_BINARY_DIR}/teleport version) + if [[ "${FOUND_TELEPORT_VERSION}" == "" ]]; then + log "Cannot find Teleport binary" + return 1 + else + log "Found: ${FOUND_TELEPORT_VERSION}"; + return 0 + fi +} +# wrapper to download with curl +download() { + URL=$1 + OUTPUT_PATH=$2 + CURL_COMMAND="curl -fsSL --retry 5 --retry-delay 5" + # optionally allow disabling of TLS verification (can be useful on older distros + # which often have an out-of-date set of CA certificate bundle which won't validate) + if [[ ${DISABLE_TLS_VERIFICATION} == "true" ]]; then + CURL_COMMAND+=" -k" + fi + log "Running ${CURL_COMMAND} ${URL}" + log "Downloading to ${OUTPUT_PATH}" + # handle errors with curl + if ! ${CURL_COMMAND} -o "${OUTPUT_PATH}" "${URL}"; then + log_important "curl error downloading ${URL}" + log "On an older OS, this may be related to the CA certificate bundle being too old." + log "You can pass the hidden -k flag to this script to disable TLS verification - this is not recommended!" + exit 1 + fi + # check that the file has a non-zero size as an extra validation + check_exists_fatal wc xargs + FILE_SIZE="$(wc -c <"${OUTPUT_PATH}" | xargs)" + if [ "${FILE_SIZE}" -eq 0 ]; then + log_important "The downloaded file has a size of 0 bytes, which means an error occurred. Cannot continue." + exit 1 + else + log "Downloaded file size: ${FILE_SIZE} bytes" + fi + # if we have a hashing utility installed, also download and validate the checksum + SHA_COMMAND="" + # shasum is installed by default on MacOS and some distros + if check_exists shasum; then + SHA_COMMAND="shasum -a 256" + # sha256sum is installed by default in some other distros + elif check_exists sha256sum; then + SHA_COMMAND="sha256sum" + fi + if [[ "${SHA_COMMAND}" != "" ]]; then + log "Will use ${SHA_COMMAND} to validate the checksum of the downloaded file" + SHA_URL="${URL}.sha256" + SHA_PATH="${OUTPUT_PATH}.sha256" + ${CURL_COMMAND} -o "${SHA_PATH}" "${SHA_URL}" + if ${SHA_COMMAND} --status -c "${SHA_PATH}"; then + log "The downloaded file's checksum validated correctly" + else + SHA_EXPECTED=$(cat "${SHA_PATH}") + SHA_ACTUAL=$(${SHA_COMMAND} "${OUTPUT_PATH}") + if check_exists awk; then + SHA_EXPECTED=$(echo "${SHA_EXPECTED}" | awk '{print $1}') + SHA_ACTUAL=$(echo "${SHA_ACTUAL}" | awk '{print $1}') + fi + log_important "Checksum of the downloaded file did not validate correctly" + log_important "Expected: ${SHA_EXPECTED}" + log_important "Got: ${SHA_ACTUAL}" + log_important "Try rerunning this script from the start. If the issue persists, contact Teleport support." + exit 1 + fi + else + log "shasum/sha256sum utilities not found, will skip checksum validation" + fi +} +# gets the filename from a full path (https://target.site/path/to/file.tar.gz -> file.tar.gz) +get_download_filename() { echo "${1##*/}"; } +# gets the pid of any running teleport process (and converts newlines to spaces) +get_teleport_pid() { + check_exists_fatal pgrep xargs + pgrep teleport | xargs echo +} +# returns a command which will start teleport using the config +get_teleport_start_command() { + echo "${TELEPORT_BINARY_DIR}/teleport start --config=${TELEPORT_CONFIG_PATH}" +} +# installs the teleport-provided launchd config +install_launchd_config() { + log "Installing Teleport launchd config to ${LAUNCHD_CONFIG_PATH}" + ${COPY_COMMAND} ./${TELEPORT_ARCHIVE_PATH}/examples/launchd/com.goteleport.teleport.plist ${LAUNCHD_CONFIG_PATH}/com.goteleport.teleport.plist +} +# installs the teleport-provided systemd unit +install_systemd_unit() { + log "Installing Teleport systemd unit to ${SYSTEMD_UNIT_PATH}" + ${COPY_COMMAND} ./${TELEPORT_ARCHIVE_PATH}/examples/systemd/teleport.service ${SYSTEMD_UNIT_PATH} + log "Reloading unit files (systemctl daemon-reload)" + systemctl daemon-reload +} +# formats the arguments as a yaml list +get_yaml_list() { + name="${1}" + list="${2}" + indentation="${3}" + echo "${indentation}${name}:" + for item in ${list}; do + echo "${indentation}- ${item}" + done +} + +# installs the provided teleport config (for app service) +install_teleport_app_config() { + log "Writing Teleport app service config to ${TELEPORT_CONFIG_PATH}" + CA_PINS_CONFIG=$(get_yaml_list "ca_pin" "${CA_PIN_HASHES}" " ") + cat << EOF > ${TELEPORT_CONFIG_PATH} +version: v3 +teleport: + nodename: ${NODENAME} + auth_token: ${JOIN_TOKEN} +${CA_PINS_CONFIG} + proxy_server: ${TARGET_HOSTNAME}:${TARGET_PORT} + log: + output: stderr + severity: INFO +auth_service: + enabled: no +ssh_service: + enabled: no +proxy_service: + enabled: no +app_service: + enabled: yes + apps: + - name: "${APP_NAME}" + uri: "${APP_URI}" + public_addr: ${APP_PUBLIC_ADDR} +EOF +} +# installs the provided teleport config (for database service) +install_teleport_database_config() { + log "Writing Teleport database service config to ${TELEPORT_CONFIG_PATH}" + CA_PINS_CONFIG=$(get_yaml_list "ca_pin" "${CA_PIN_HASHES}" " ") + + # This file is processed by `shellschek` as part of the lint step + # It detects an issue because of un-set variables - $index and $line. This check is called SC2154. + # However, that's not an issue, because those variables are replaced when we run go's text/template engine over it. + # When executing the script, those are no long variables but actual values. + # shellcheck disable=SC2154 + cat << EOF > ${TELEPORT_CONFIG_PATH} +version: v3 +teleport: + nodename: ${NODENAME} + auth_token: ${JOIN_TOKEN} +${CA_PINS_CONFIG} + proxy_server: ${TARGET_HOSTNAME}:${TARGET_PORT} + log: + output: stderr + severity: INFO +auth_service: + enabled: no +ssh_service: + enabled: no +proxy_service: + enabled: no +db_service: + enabled: "yes" + resources: + - labels: +EOF +} +# installs the provided teleport config (for node service) +install_teleport_node_config() { + log "Writing Teleport node service config to ${TELEPORT_CONFIG_PATH}" + ${TELEPORT_BINARY_DIR}/teleport node configure \ + --token ${JOIN_TOKEN} \ + ${JOIN_METHOD_FLAG} \ + --ca-pin ${CA_PINS} \ + --proxy ${TARGET_HOSTNAME}:${TARGET_PORT} \ + "${LABELS_FLAG[@]}" \ + --output ${TELEPORT_CONFIG_PATH} +} +# checks whether the given host is running MacOS +is_macos_host() { if [[ ${OSTYPE} == "darwin"* ]]; then return 0; else return 1; fi } +# checks whether teleport is already running on the host +is_running_teleport() { + check_exists_fatal pgrep + TELEPORT_PID=$(get_teleport_pid) + if [[ "${TELEPORT_PID}" != "" ]]; then return 0; else return 1; fi +} +# checks whether the given host is running systemd as its init system +is_using_systemd() { if [ -d /run/systemd/system ]; then return 0; else return 1; fi } +# prints a warning if the host isn't running systemd +no_systemd_warning() { + log_important "This host is not running systemd, so Teleport cannot be started automatically when it exits." + log_important "Please investigate an alternative way to keep Teleport running." + log_important "You can find information in our documentation: ${TELEPORT_DOCS_URL}" + log_important "For now, Teleport will be started in the foreground - you can press Ctrl+C to exit." + log_only + log_only "Run this command to start Teleport in future:" + log_only "$(get_teleport_start_command)" + log_only + log_only "------------------------------------------------------------------------" + log_only "| IMPORTANT: TELEPORT WILL STOP RUNNING AFTER YOU CLOSE THIS TERMINAL! |" + log_only "| YOU MUST CONFIGURE A SERVICE MANAGER TO MAKE IT RUN ON STARTUP! |" + log_only "------------------------------------------------------------------------" + log_only +} +# print a message giving the name of the node and a link to the docs +# gives some debugging instructions if the service didn't start successfully +print_welcome_message() { + log_only "" + if is_running_teleport; then + log_only "Teleport has been started." + log_only "" + if is_using_systemd; then + log_only "View its status with 'sudo systemctl status teleport.service'" + log_only "View Teleport logs using 'sudo journalctl -u teleport.service'" + log_only "To stop Teleport, run 'sudo systemctl stop teleport.service'" + log_only "To start Teleport again if you stop it, run 'sudo systemctl start teleport.service'" + elif is_macos_host; then + log_only "View Teleport logs in '${MACOS_STDERR_LOG}' and '${MACOS_STDOUT_LOG}'" + log_only "To stop Teleport, run 'sudo launchctl unload ${LAUNCHD_CONFIG_PATH}/com.goteleport.teleport.plist'" + log_only "To start Teleport again if you stop it, run 'sudo launchctl load ${LAUNCHD_CONFIG_PATH}/com.goteleport.teleport.plist'" + fi + log_only "" + log_only "You can see this node connected in the Teleport web UI or 'tsh ls' with the name '${NODENAME}'" + log_only "Find more details on how to use Teleport here: https://goteleport.com/docs/user-manual/" + else + log_important "The Teleport service was installed, but it does not appear to have started successfully." + if is_using_systemd; then + log_important "Check the Teleport service's status with 'systemctl status teleport.service'" + log_important "View Teleport logs with 'journalctl -u teleport.service'" + elif is_macos_host; then + log_important "Check Teleport logs in '${MACOS_STDERR_LOG}' and '${MACOS_STDOUT_LOG}'" + fi + log_important "Contact Teleport support for further assistance." + fi + log_only "" +} +# start teleport in foreground (when there's no systemd) +start_teleport_foreground() { + log "Starting Teleport in the foreground" + # shellcheck disable=SC2091 + $(get_teleport_start_command) +} +# start teleport via launchd (after installing config) +start_teleport_launchd() { + log "Starting Teleport via launchctl. It will automatically be started whenever the system reboots." + launchctl load ${LAUNCHD_CONFIG_PATH}/com.goteleport.teleport.plist + sleep ${ALIVE_CHECK_DELAY} +} +# start teleport via systemd (after installing unit) +start_teleport_systemd() { + log "Starting Teleport via systemd. It will automatically be started whenever the system reboots." + systemctl enable teleport.service + systemctl start teleport.service + sleep ${ALIVE_CHECK_DELAY} +} +# checks whether teleport binaries exist on the host +teleport_binaries_exist() { + for BINARY_NAME in teleport tctl tsh; do + if [ -f ${TELEPORT_BINARY_DIR}/${BINARY_NAME} ]; then return 0; else return 1; fi + done +} +# checks whether a teleport config exists on the host +teleport_config_exists() { if [ -f ${TELEPORT_CONFIG_PATH} ]; then return 0; else return 1; fi; } +# checks whether a teleport data dir exists on the host +teleport_datadir_exists() { if [ -d ${TELEPORT_DATA_DIR} ]; then return 0; else return 1; fi; } + +# error out if any required values are not set +check_set TELEPORT_VERSION +check_set TARGET_HOSTNAME +check_set TARGET_PORT +check_set JOIN_TOKEN +check_set CA_PIN_HASHES +if [[ "${APP_INSTALL_MODE}" == "true" ]]; then + check_set APP_NAME + check_set APP_URI + check_set APP_PUBLIC_ADDR +fi + +### +# main script starts here +### +# check connectivity to teleport server/port +if [[ "${IGNORE_CONNECTIVITY_CHECK}" == "true" ]]; then + log "TELEPORT_IGNORE_CONNECTIVITY_CHECK=true, not running connectivity check" +else + log "Checking TCP connectivity to Teleport server (${TARGET_HOSTNAME}:${TARGET_PORT})" + if ! check_connectivity "${TARGET_HOSTNAME}" "${TARGET_PORT}"; then + # if we don't have a connectivity test method assigned, we know we couldn't run the test + if [[ ${CONNECTIVITY_TEST_METHOD} == "" ]]; then + log "Couldn't find nc, telnet or /dev/tcp to do a connection test" + log "Going to blindly continue without testing connectivity" + else + log_important "Couldn't open a connection to the Teleport server (${TARGET_HOSTNAME}:${TARGET_PORT}) via ${CONNECTIVITY_TEST_METHOD}" + log_important "This issue will need to be fixed before the script can continue." + log_important "If you think this is an error, add 'export TELEPORT_IGNORE_CONNECTIVITY_CHECK=true && ' before the curl command which runs the script." + exit 1 + fi + else + log "Connectivity to Teleport server (via ${CONNECTIVITY_TEST_METHOD}) looks good" + fi +fi + +# use OSTYPE variable to figure out host type/arch +if [[ "${OSTYPE}" == "linux-gnu"* ]]; then + # linux host, now detect arch + TELEPORT_BINARY_TYPE="linux" + ARCH=$(uname -m) + log "Detected host: ${OSTYPE}, using Teleport binary type ${TELEPORT_BINARY_TYPE}" + if [[ ${ARCH} == "armv7l" ]]; then + TELEPORT_ARCH="arm" + elif [[ ${ARCH} == "aarch64" ]]; then + TELEPORT_ARCH="arm64" + elif [[ ${ARCH} == "x86_64" ]]; then + TELEPORT_ARCH="amd64" + elif [[ ${ARCH} == "i686" ]]; then + TELEPORT_ARCH="386" + else + log_important "Error: cannot detect architecture from uname -m: ${ARCH}" + exit 1 + fi + log "Detected arch: ${ARCH}, using Teleport arch ${TELEPORT_ARCH}" + # if the download format is already set, we have no need to detect distro + if [[ ${TELEPORT_FORMAT} == "" ]]; then + # detect distro + # if /etc/os-release doesn't exist, we need to use some other logic + if [ ! -f /etc/os-release ]; then + if [ -f /etc/centos-release ]; then + if grep -q 'CentOS release 6' /etc/centos-release; then + log_important "Detected host type: CentOS 6 [$(cat /etc/centos-release)]" + log_important "Teleport will not work on CentOS 6 -based servers due to the glibc version being too low." + exit 1 + fi + elif [ -f /etc/redhat-release ]; then + if grep -q 'Red Hat Enterprise Linux Server release 5' /etc/redhat-release; then + log_important "Detected host type: RHEL5 [$(cat /etc/redhat-release)]" + log_important "Teleport will not work on RHEL5-based servers due to the glibc version being too low." + exit 1 + elif grep -q 'Red Hat Enterprise Linux Server release 6' /etc/redhat-release; then + log_important "Detected host type: RHEL6 [$(cat /etc/redhat-release)]" + log_important "Teleport will not work on RHEL6-based servers due to the glibc version being too low." + exit 1 + fi + fi + # use ID_LIKE value from /etc/os-release (if set) + # this is 'debian' on ubuntu/raspbian, 'centos rhel fedora' on amazon linux etc + else + check_exists_fatal cut + DISTRO_TYPE=$(grep ID_LIKE /etc/os-release | cut -d= -f2) || true + if [[ ${DISTRO_TYPE} == "" ]]; then + # use exact ID value from /etc/os-release if ID_LIKE is not set + DISTRO_TYPE=$(grep -w ID /etc/os-release | cut -d= -f2) + fi + if [[ ${DISTRO_TYPE} =~ "debian" ]]; then + TELEPORT_FORMAT="deb" + elif [[ "$DISTRO_TYPE" =~ "amzn"* ]] || [[ ${DISTRO_TYPE} =~ "centos"* ]] || [[ ${DISTRO_TYPE} =~ "rhel" ]] || [[ ${DISTRO_TYPE} =~ "fedora"* ]]; then + TELEPORT_FORMAT="rpm" + else + log "Couldn't match a distro type using /etc/os-release, falling back to tarball installer" + TELEPORT_FORMAT="tarball" + fi + fi + log "Detected distro type: ${DISTRO_TYPE}" + #suse, also identified as sles, uses a different path for its systemd then other distro types like ubuntu + if [[ ${DISTRO_TYPE} =~ "suse"* ]] || [[ ${DISTRO_TYPE} =~ "sles"* ]]; then + SYSTEMD_UNIT_PATH="/etc/systemd/system/teleport.service" + fi + fi +elif [[ "${OSTYPE}" == "darwin"* ]]; then + # macos host, now detect arch + TELEPORT_BINARY_TYPE="darwin" + ARCH=$(uname -m) + log "Detected host: ${OSTYPE}, using Teleport binary type ${TELEPORT_BINARY_TYPE}" + if [[ ${ARCH} == "arm64" ]]; then + TELEPORT_ARCH="arm64" + elif [[ ${ARCH} == "x86_64" ]]; then + TELEPORT_ARCH="amd64" + else + log_important "Error: unsupported architecture from uname -m: ${ARCH}" + exit 1 + fi + log "Detected MacOS ${ARCH} architecture, using Teleport arch ${TELEPORT_ARCH}" + TELEPORT_FORMAT="tarball" +else + log_important "Error - unsupported platform: ${OSTYPE}" + exit 1 +fi +log "Using Teleport distribution: ${TELEPORT_FORMAT}" + +# create temporary directory and exit cleanup logic +TEMP_DIR=$(mktemp -d -t teleport-XXXXXXXXXX) +log "Created temp dir ${TEMP_DIR}" +pushd "${TEMP_DIR}" >/dev/null 2>&1 + +finish() { + popd >/dev/null 2>&1 + rm -rf "${TEMP_DIR}" +} +trap finish EXIT + +# optional format override (mostly for testing) +if [[ ${OVERRIDE_FORMAT} != "" ]]; then + TELEPORT_FORMAT="${OVERRIDE_FORMAT}" + log "Overriding TELEPORT_FORMAT to ${OVERRIDE_FORMAT}" +fi + +# check whether teleport is running already +# if it is, we exit gracefully with an error +if is_running_teleport; then + if [[ ${IGNORE_CHECKS} != "true" ]]; then + TELEPORT_PID=$(get_teleport_pid) + log_header "Warning: Teleport appears to already be running on this host (pid: ${TELEPORT_PID})" + log_cleanup_message + exit 1 + else + log "Ignoring is_running_teleport as requested" + fi +fi + +# check for existing config file +if teleport_config_exists; then + if [[ ${IGNORE_CHECKS} != "true" ]]; then + log_header "Warning: There is already a Teleport config file present at ${TELEPORT_CONFIG_PATH}." + log_cleanup_message + exit 1 + else + log "Ignoring teleport_config_exists as requested" + fi +fi + +# check for existing data directory +if teleport_datadir_exists; then + if [[ ${IGNORE_CHECKS} != "true" ]]; then + log_header "Warning: Found existing Teleport data directory (${TELEPORT_DATA_DIR})." + log_cleanup_message + exit 1 + else + log "Ignoring teleport_datadir_exists as requested" + fi +fi + +# check for existing binaries +if teleport_binaries_exist; then + if [[ ${IGNORE_CHECKS} != "true" ]]; then + log_header "Warning: Found existing Teleport binaries under ${TELEPORT_BINARY_DIR}." + log_cleanup_message + exit 1 + else + log "Ignoring teleport_binaries_exist as requested" + fi +fi + +install_from_file() { + # select correct URL/installation method based on distro + if [[ ${TELEPORT_FORMAT} == "tarball" ]]; then + URL="https://get.gravitational.com/${TELEPORT_PACKAGE_NAME}-v${TELEPORT_VERSION}-${TELEPORT_BINARY_TYPE}-${TELEPORT_ARCH}-bin.tar.gz" + + # check that needed tools are installed + check_exists_fatal curl tar + # download tarball + log "Downloading Teleport ${TELEPORT_FORMAT} release ${TELEPORT_VERSION}" + DOWNLOAD_FILENAME=$(get_download_filename "${URL}") + download "${URL}" "${TEMP_DIR}/${DOWNLOAD_FILENAME}" + # extract tarball + tar -xzf "${TEMP_DIR}/${DOWNLOAD_FILENAME}" -C "${TEMP_DIR}" + # install binaries to /usr/local/bin + for BINARY in ${TELEPORT_BINARY_LIST}; do + ${COPY_COMMAND} "${TELEPORT_ARCHIVE_PATH}/${BINARY}" "${TELEPORT_BINARY_DIR}/" + done + elif [[ ${TELEPORT_FORMAT} == "deb" ]]; then + # convert teleport arch to deb arch + if [[ ${TELEPORT_ARCH} == "amd64" ]]; then + DEB_ARCH="amd64" + elif [[ ${TELEPORT_ARCH} == "386" ]]; then + DEB_ARCH="i386" + elif [[ ${TELEPORT_ARCH} == "arm" ]]; then + DEB_ARCH="arm" + elif [[ ${TELEPORT_ARCH} == "arm64" ]]; then + DEB_ARCH="arm64" + fi + URL="https://get.gravitational.com/${TELEPORT_PACKAGE_NAME}_${TELEPORT_VERSION}_${DEB_ARCH}.deb" + check_deb_not_already_installed + # check that needed tools are installed + check_exists_fatal curl dpkg + # download deb and register cleanup operation + log "Downloading Teleport ${TELEPORT_FORMAT} release ${TELEPORT_VERSION}" + DOWNLOAD_FILENAME=$(get_download_filename "${URL}") + download "${URL}" "${TEMP_DIR}/${DOWNLOAD_FILENAME}" + # install deb + log "Using dpkg to install ${TEMP_DIR}/${DOWNLOAD_FILENAME}" + dpkg -i "${TEMP_DIR}/${DOWNLOAD_FILENAME}" + elif [[ ${TELEPORT_FORMAT} == "rpm" ]]; then + # convert teleport arch to rpm arch + if [[ ${TELEPORT_ARCH} == "amd64" ]]; then + RPM_ARCH="x86_64" + elif [[ ${TELEPORT_ARCH} == "386" ]]; then + RPM_ARCH="i386" + elif [[ ${TELEPORT_ARCH} == "arm" ]]; then + RPM_ARCH="arm" + elif [[ ${TELEPORT_ARCH} == "arm64" ]]; then + RPM_ARCH="arm64" + fi + URL="https://get.gravitational.com/${TELEPORT_PACKAGE_NAME}-${TELEPORT_VERSION}-1.${RPM_ARCH}.rpm" + check_rpm_not_already_installed + # check for package managers + if check_exists dnf; then + log "Found 'dnf' package manager, using it" + PACKAGE_MANAGER_COMMAND="dnf -y install" + elif check_exists yum; then + log "Found 'yum' package manager, using it" + PACKAGE_MANAGER_COMMAND="yum -y localinstall" + else + PACKAGE_MANAGER_COMMAND="" + log "Cannot find 'yum' or 'dnf' package manager commands, will try installing the rpm manually instead" + fi + # check that needed tools are installed + check_exists_fatal curl + log "Downloading Teleport ${TELEPORT_FORMAT} release ${TELEPORT_VERSION}" + DOWNLOAD_FILENAME=$(get_download_filename "${URL}") + download "${URL}" "${TEMP_DIR}/${DOWNLOAD_FILENAME}" + # install with package manager if available + if [[ ${PACKAGE_MANAGER_COMMAND} != "" ]]; then + log "Installing Teleport release from ${TEMP_DIR}/${DOWNLOAD_FILENAME} using ${PACKAGE_MANAGER_COMMAND}" + # install rpm with package manager + ${PACKAGE_MANAGER_COMMAND} "${TEMP_DIR}/${DOWNLOAD_FILENAME}" + # use rpm if we couldn't find a package manager + else + # install RPM (in upgrade mode) + log "Using rpm to install ${TEMP_DIR}/${DOWNLOAD_FILENAME}" + rpm -Uvh "${TEMP_DIR}/${DOWNLOAD_FILENAME}" + fi + else + log_important "Can't figure out what Teleport format to use" + exit 1 + fi +} + +install_from_repo() { + if [[ "${REPO_CHANNEL}" == "" ]]; then + # By default, use the current version's channel. + REPO_CHANNEL=stable/v"${TELEPORT_VERSION//.*/}" + fi + + # Populate $ID, $VERSION_ID, $VERSION_CODENAME and other env vars identifying the OS. + # shellcheck disable=SC1091 + . /etc/os-release + + PACKAGE_LIST=$(package_list) + if [ "$ID" == "debian" ] || [ "$ID" == "ubuntu" ]; then + # old versions of ubuntu require that keys get added by `apt-key add`, without + # adding the key apt shows a key signing error when installing teleport. + if [[ + ($ID == "ubuntu" && $VERSION_ID == "16.04") || \ + ($ID == "debian" && $VERSION_ID == "9" ) + ]]; then + apt install apt-transport-https gnupg -y + curl -fsSL https://apt.releases.teleport.dev/gpg | apt-key add - + echo "deb https://apt.releases.teleport.dev/${ID} ${VERSION_CODENAME} ${REPO_CHANNEL}" > /etc/apt/sources.list.d/teleport.list + else + curl -fsSL https://apt.releases.teleport.dev/gpg \ + -o /usr/share/keyrings/teleport-archive-keyring.asc + echo "deb [signed-by=/usr/share/keyrings/teleport-archive-keyring.asc] \ + https://apt.releases.teleport.dev/${ID} ${VERSION_CODENAME} ${REPO_CHANNEL}" > /etc/apt/sources.list.d/teleport.list + fi + apt-get update + apt-get install -y ${PACKAGE_LIST} + elif [ "$ID" = "amzn" ] || [ "$ID" = "rhel" ] || [ "$ID" = "centos" ] ; then + if [ "$ID" = "rhel" ]; then + VERSION_ID="${VERSION_ID//.*/}" # convert version numbers like '7.2' to only include the major version + fi + yum install -y yum-utils + yum-config-manager --add-repo \ + "$(rpm --eval "https://yum.releases.teleport.dev/$ID/$VERSION_ID/Teleport/%{_arch}/${REPO_CHANNEL}/teleport.repo")" + + # Remove metadata cache to prevent cache from other channel (eg, prior version) + # See: https://github.com/gravitational/teleport/issues/22581 + yum --disablerepo="*" --enablerepo="teleport" clean metadata + + yum install -y ${PACKAGE_LIST} + else + echo "Unsupported distro: $ID" + exit 1 + fi +} + +# package_list returns the list of packages to install. +# The list of packages can be fed into yum or apt because they already have the expected format when pinning versions. +package_list() { + TELEPORT_PACKAGE_PIN_VERSION=${TELEPORT_PACKAGE_NAME} + TELEPORT_UPDATER_PIN_VERSION="${TELEPORT_PACKAGE_NAME}-updater" + + if [[ "${TELEPORT_FORMAT}" == "deb" ]]; then + TELEPORT_PACKAGE_PIN_VERSION+="=${TELEPORT_VERSION}" + TELEPORT_UPDATER_PIN_VERSION+="=${TELEPORT_VERSION}" + + elif [[ "${TELEPORT_FORMAT}" == "rpm" ]]; then + TELEPORT_YUM_VERSION="${TELEPORT_VERSION//-/_}" + TELEPORT_PACKAGE_PIN_VERSION+="-${TELEPORT_YUM_VERSION}" + TELEPORT_UPDATER_PIN_VERSION+="-${TELEPORT_YUM_VERSION}" + fi + + PACKAGE_LIST=${TELEPORT_PACKAGE_PIN_VERSION} + # (warning): This expression is constant. Did you forget the $ on a variable? + # Disabling the warning above because expression is templated. + # shellcheck disable=SC2050 + if is_using_systemd && [[ "false" == "true" ]]; then + # Teleport Updater requires systemd. + PACKAGE_LIST+=" ${TELEPORT_UPDATER_PIN_VERSION}" + fi + echo ${PACKAGE_LIST} +} + +is_repo_available() { + if [[ "${OSTYPE}" != "linux-gnu" ]]; then + return 1 + fi + + # Populate $ID, $VERSION_ID and other env vars identifying the OS. + # shellcheck disable=SC1091 + . /etc/os-release + + # The following distros+version have a Teleport repository to install from. + case "${ID}-${VERSION_ID}" in + ubuntu-16.04* | ubuntu-18.04* | ubuntu-20.04* | ubuntu-22.04* | \ + debian-9* | debian-10* | debian-11* | \ + rhel-7* | rhel-8* | rhel-9* | \ + centos-7* | centos-8* | centos-9* | \ + amzn-2 | amzn-2023) + return 0;; + esac + + return 1 +} + +if is_repo_available; then + log "Installing repo for distro $ID." + install_from_repo +else + log "Installing from binary file." + install_from_file +fi + +# check that teleport binary can be found and runs +if ! check_teleport_binary; then + log_important "The Teleport binary could not be found at ${TELEPORT_BINARY_DIR} as expected." + log_important "This usually means that there was an error during installation." + log_important "Check this log for obvious signs of error and contact Teleport support" + log_important "for further assistance." + exit 1 +fi + +# install teleport config +# check the mode and write the appropriate config type +if [[ "${APP_INSTALL_MODE}" == "true" ]]; then + install_teleport_app_config +elif [[ "${DB_INSTALL_MODE}" == "true" ]]; then + install_teleport_database_config +else + install_teleport_node_config +fi + + +# Used to track whether a Teleport agent was installed using this method. +export TELEPORT_INSTALL_METHOD_NODE_SCRIPT="true" + +# install systemd unit if applicable (linux hosts) +if is_using_systemd; then + log "Host is using systemd" + # we only need to manually install the systemd config if teleport was installed via tarball + # all other packages will deploy it automatically + if [[ ${TELEPORT_FORMAT} == "tarball" ]]; then + install_systemd_unit + fi + start_teleport_systemd + print_welcome_message +# install launchd config on MacOS hosts +elif is_macos_host; then + log "Host is running MacOS" + install_launchd_config + start_teleport_launchd + print_welcome_message +# not a MacOS host and no systemd available, print a warning +# and temporarily start Teleport in the foreground +else + log "Host does not appear to be using systemd" + no_systemd_warning + start_teleport_foreground +fi + diff --git a/ansible/01_old/roles/teleport/templates/teleport.yaml.j2 b/ansible/01_old/roles/teleport/templates/teleport.yaml.j2 new file mode 100644 index 0000000..180af28 --- /dev/null +++ b/ansible/01_old/roles/teleport/templates/teleport.yaml.j2 @@ -0,0 +1,35 @@ +version: v3 +teleport: + nodename: {{ ansible_hostname }} + data_dir: /var/lib/teleport + join_params: + token_name: {{ get_join_token }} + method: token + proxy_server: {{ teleport_uri }}:443 + log: + output: stderr + severity: INFO + format: + output: text + ca_pin: {{ get_ca_pin }} + diag_addr: "" +auth_service: + enabled: "no" +ssh_service: + enabled: "yes" + labels: + ipaddr: {{ansible_default_ipv4.address}} + group: {{ group_names[-1] }} + os: {{ ansible_distribution }} +{% if custom_labels %} + {{ custom_labels }} +{% endif %} + commands: + - name: hostname + command: [hostname] + period: 1m0s +proxy_service: + enabled: "no" + https_keypairs: [] + https_keypairs_reload_interval: 0s + acme: {} diff --git a/ansible/01_old/roles/teleport/vars/main.yml b/ansible/01_old/roles/teleport/vars/main.yml new file mode 100644 index 0000000..d06c156 --- /dev/null +++ b/ansible/01_old/roles/teleport/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for teleport diff --git a/ansible/01_old/roles/test/defaults/main.yml b/ansible/01_old/roles/test/defaults/main.yml new file mode 100644 index 0000000..7c45df5 --- /dev/null +++ b/ansible/01_old/roles/test/defaults/main.yml @@ -0,0 +1,65 @@ +# helm file install +helm_checksum: sha256:950439759ece902157cf915b209b8d694e6f675eaab5099fb7894f30eeaee9a2 +helm_version: v3.10.3 + +# cmoa info +cmoa_namespace: imxc +cmoa_version: rel3.4.8 + +# default ip/version (not change) +before_ip: 111.111.111.111 +before_version: rel0.0.0 + +# files/00-default in role +docker_secret_file: secret_nexus.yaml + +# all, jaeger, jspd +imxc_ui: all + +# [docker_config_path] +docker_config_nexus: dockerconfig/docker_config_nexus.json + +# [jaeger] +jaeger_servicename: imxc-ui-service-jaeger +jaeger_service_port: 80 +jaeger_nodePort: 31080 # only imxc-ui-jaeger option (imxc-ui-jaeger template default port=31084) + +# [minio] +minio_service_name: minio +minio_service_port: 9000 +minio_nodePort: 32002 +minio_user: cloudmoa +minio_pass: admin1234 +bucket_name: cortex-bucket +days: 42 +rule_id: cloudmoa + +# [Elasticsearch] +elasticsearch_service_name: elasticsearch +elasticsearch_service_port: 9200 +elasticsearch_nodePort: 30200 + +# [Keycloak] +# Keycloak configuration settings +keycloak_http_port: 31082 +keycloak_https_port: 8443 +keycloak_management_http_port: 31990 +keycloak_realm: exem + +# Keycloak administration console user +keycloak_admin_user: admin +keycloak_admin_password: admin +keycloak_auth_realm: master +keycloak_auth_client: admin-cli +keycloak_context: /auth +keycloak_login_theme: CloudMOA_V2 + +# keycloak_clients +keycloak_clients: + - name: 'authorization_server' + client_id: authorization_server + realm: exem + redirect_uris: "http://{{ ansible_default_ipv4.address }}:31080/*,http://{{ ansible_default_ipv4.address }}:31084/*,http://localhost:8080/*,http://localhost:8081/*" + public_client: True + + diff --git a/ansible/01_old/roles/test/files/00-default/sa_patch.sh b/ansible/01_old/roles/test/files/00-default/sa_patch.sh new file mode 100755 index 0000000..618a35b --- /dev/null +++ b/ansible/01_old/roles/test/files/00-default/sa_patch.sh @@ -0,0 +1,8 @@ +#!/bin/bash + +export KUBECONFIG=$1 + +kubectl wait node --for=condition=ready --all --timeout=60s + +#kubectl -n imxc patch sa default -p '{"imagePullSecrets": [{"name": "regcred"}]}' +kubectl -n default patch sa default -p '{"imagePullSecrets": [{"name": "regcred"}]}' diff --git a/ansible/01_old/roles/test/files/00-default/secret_dockerhub.yaml b/ansible/01_old/roles/test/files/00-default/secret_dockerhub.yaml new file mode 100644 index 0000000..268027b --- /dev/null +++ b/ansible/01_old/roles/test/files/00-default/secret_dockerhub.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: Secret +metadata: + name: regcred +data: + .dockerconfigjson: ewogICJhdXRocyI6IHsKICAgICJodHRwczovL2luZGV4LmRvY2tlci5pby92MS8iOiB7CiAgICAgICJhdXRoIjogIlpYaGxiV1JsZGpJNk0yWXlObVV6T0RjdFlqY3paQzAwTkRVMUxUazNaRFV0T1dWaU9EWmtObVl4WXpOayIKICAgIH0KICB9Cn0KCg== +type: kubernetes.io/dockerconfigjson diff --git a/ansible/01_old/roles/test/files/00-default/secret_nexus.yaml b/ansible/01_old/roles/test/files/00-default/secret_nexus.yaml new file mode 100644 index 0000000..6a2543f --- /dev/null +++ b/ansible/01_old/roles/test/files/00-default/secret_nexus.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +data: + .dockerconfigjson: ewogICJhdXRocyI6IHsKICAgICIxMC4xMC4zMS4yNDM6NTAwMCI6IHsKICAgICAgImF1dGgiOiAiWTI5eVpUcGpiM0psWVdSdGFXNHhNak0wIgogICAgfQogIH0KfQoK +kind: Secret +metadata: + name: regcred +type: kubernetes.io/dockerconfigjson + diff --git a/ansible/01_old/roles/test/files/01-storage/00-storageclass.yaml b/ansible/01_old/roles/test/files/01-storage/00-storageclass.yaml new file mode 100644 index 0000000..8f41292 --- /dev/null +++ b/ansible/01_old/roles/test/files/01-storage/00-storageclass.yaml @@ -0,0 +1,6 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: exem-local-storage +provisioner: kubernetes.io/no-provisioner +volumeBindingMode: WaitForFirstConsumer diff --git a/ansible/01_old/roles/test/files/01-storage/01-persistentvolume.yaml b/ansible/01_old/roles/test/files/01-storage/01-persistentvolume.yaml new file mode 100644 index 0000000..1bd4546 --- /dev/null +++ b/ansible/01_old/roles/test/files/01-storage/01-persistentvolume.yaml @@ -0,0 +1,92 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: minio-pv-0 +spec: + capacity: + storage: 50Gi + volumeMode: Filesystem + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Retain + storageClassName: exem-local-storage + local: + path: /media/data/minio/pv1 + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: cmoa + operator: In + values: + - worker1 + +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: minio-pv-1 +spec: + capacity: + storage: 50Gi + volumeMode: Filesystem + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Retain + storageClassName: exem-local-storage + local: + path: /media/data/minio/pv2 + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: cmoa + operator: In + values: + - worker1 +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: minio-pv-2 +spec: + capacity: + storage: 50Gi + volumeMode: Filesystem + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Retain + storageClassName: exem-local-storage + local: + path: /media/data/minio/pv3 + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: cmoa + operator: In + values: + - worker2 +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: minio-pv-3 +spec: + capacity: + storage: 50Gi + volumeMode: Filesystem + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Retain + storageClassName: exem-local-storage + local: + path: /media/data/minio/pv4 + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: cmoa + operator: In + values: + - worker2 diff --git a/ansible/01_old/roles/test/files/01-storage/cmoa_minio b/ansible/01_old/roles/test/files/01-storage/cmoa_minio new file mode 100755 index 0000000..522b87d --- /dev/null +++ b/ansible/01_old/roles/test/files/01-storage/cmoa_minio @@ -0,0 +1,63 @@ +#! /usr/bin/python3 +#-*- coding:utf-8 -*- + +import os, sys, time, urllib3 +from minio import Minio +from minio.lifecycleconfig import Expiration, LifecycleConfig, Rule, Transition +from minio.commonconfig import ENABLED, Filter + +def minio_conn(ipaddr, portnum, ac_key, sec_key): + conn='{}:{}'.format(ipaddr,portnum) + url='http://{}'.format(conn) + print(url) + minio_client = Minio( + conn, access_key=ac_key, secret_key=sec_key, secure=False, + http_client=urllib3.ProxyManager( + url, timeout=urllib3.Timeout.DEFAULT_TIMEOUT, + retries=urllib3.Retry( + total=5, backoff_factor=0.2, + status_forcelist=[ + 500, 502, 503, 504 + ], + ), + ), + ) + + return minio_client + +def minio_create_buckets(minio_client, bucket_name, days, rule_id="cloudmoa"): + config = LifecycleConfig( + [ + Rule( + ENABLED, + rule_filter=Filter(prefix=""), + rule_id=rule_id, + expiration=Expiration(days=days), + ), + ], + ) + minio_client.set_bucket_lifecycle(bucket_name, config) + +def minio_delete_bucket(client, bucket_name): + client.delete_bucket_lifecycle(bucket_name) + +def main(): + s3_url = os.sys.argv[1].split(':')[0] + s3_url_port = os.sys.argv[1].split(':')[1] + minio_user = os.sys.argv[2] + minio_pass = os.sys.argv[3] + bucket_name = os.sys.argv[4] + minio_days = os.sys.argv[5] + rule_id = os.sys.argv[6] + + print(s3_url, s3_url_port, minio_user, minio_pass) + + minio_client=minio_conn(s3_url, s3_url_port, minio_user, minio_pass) + minio_create_buckets(minio_client, bucket_name, minio_days, rule_id) + +if __name__ == "__main__": + try: + main() + except Exception as err: + print("[Usage] minio {url:port} {username} {password} {bucketName} {days} {ruleId}") + print(err) \ No newline at end of file diff --git a/ansible/01_old/roles/test/files/01-storage/minio/.helmignore b/ansible/01_old/roles/test/files/01-storage/minio/.helmignore new file mode 100644 index 0000000..a9fe727 --- /dev/null +++ b/ansible/01_old/roles/test/files/01-storage/minio/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +# OWNERS file for Kubernetes +OWNERS \ No newline at end of file diff --git a/ansible/01_old/roles/test/files/01-storage/minio/Chart.yaml b/ansible/01_old/roles/test/files/01-storage/minio/Chart.yaml new file mode 100644 index 0000000..fc21076 --- /dev/null +++ b/ansible/01_old/roles/test/files/01-storage/minio/Chart.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +description: Multi-Cloud Object Storage +name: minio +version: 4.0.2 +appVersion: RELEASE.2022-05-08T23-50-31Z +keywords: + - minio + - storage + - object-storage + - s3 + - cluster +home: https://min.io +icon: https://min.io/resources/img/logo/MINIO_wordmark.png +sources: +- https://github.com/minio/minio +maintainers: +- name: MinIO, Inc + email: dev@minio.io diff --git a/ansible/01_old/roles/test/files/01-storage/minio/README.md b/ansible/01_old/roles/test/files/01-storage/minio/README.md new file mode 100644 index 0000000..ad3eb7d --- /dev/null +++ b/ansible/01_old/roles/test/files/01-storage/minio/README.md @@ -0,0 +1,235 @@ +# MinIO Helm Chart + +[![Slack](https://slack.min.io/slack?type=svg)](https://slack.min.io) [![license](https://img.shields.io/badge/license-AGPL%20V3-blue)](https://github.com/minio/minio/blob/master/LICENSE) + +MinIO is a High Performance Object Storage released under GNU Affero General Public License v3.0. It is API compatible with Amazon S3 cloud storage service. Use MinIO to build high performance infrastructure for machine learning, analytics and application data workloads. + +For more detailed documentation please visit [here](https://docs.minio.io/) + +## Introduction + +This chart bootstraps MinIO Cluster on [Kubernetes](http://kubernetes.io) using the [Helm](https://helm.sh) package manager. + +## Prerequisites + +- Helm cli with Kubernetes cluster configured. +- PV provisioner support in the underlying infrastructure. (We recommend using ) +- Use Kubernetes version v1.19 and later for best experience. + +## Configure MinIO Helm repo + +```bash +helm repo add minio https://charts.min.io/ +``` + +### Installing the Chart + +Install this chart using: + +```bash +helm install --namespace minio --set rootUser=rootuser,rootPassword=rootpass123 --generate-name minio/minio +``` + +The command deploys MinIO on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation. + +### Upgrading the Chart + +You can use Helm to update MinIO version in a live release. Assuming your release is named as `my-release`, get the values using the command: + +```bash +helm get values my-release > old_values.yaml +``` + +Then change the field `image.tag` in `old_values.yaml` file with MinIO image tag you want to use. Now update the chart using + +```bash +helm upgrade -f old_values.yaml my-release minio/minio +``` + +Default upgrade strategies are specified in the `values.yaml` file. Update these fields if you'd like to use a different strategy. + +### Configuration + +Refer the [Values file](./values.yaml) for all the possible config fields. + +You can specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```bash +helm install --name my-release --set persistence.size=1Ti minio/minio +``` + +The above command deploys MinIO server with a 1Ti backing persistent volume. + +Alternately, you can provide a YAML file that specifies parameter values while installing the chart. For example, + +```bash +helm install --name my-release -f values.yaml minio/minio +``` + +### Persistence + +This chart provisions a PersistentVolumeClaim and mounts corresponding persistent volume to default location `/export`. You'll need physical storage available in the Kubernetes cluster for this to work. If you'd rather use `emptyDir`, disable PersistentVolumeClaim by: + +```bash +helm install --set persistence.enabled=false minio/minio +``` + +> *"An emptyDir volume is first created when a Pod is assigned to a Node, and exists as long as that Pod is running on that node. When a Pod is removed from a node for any reason, the data in the emptyDir is deleted forever."* + +### Existing PersistentVolumeClaim + +If a Persistent Volume Claim already exists, specify it during installation. + +1. Create the PersistentVolume +2. Create the PersistentVolumeClaim +3. Install the chart + +```bash +helm install --set persistence.existingClaim=PVC_NAME minio/minio +``` + +### NetworkPolicy + +To enable network policy for MinIO, +install [a networking plugin that implements the Kubernetes +NetworkPolicy spec](https://kubernetes.io/docs/tasks/administer-cluster/declare-network-policy#before-you-begin), +and set `networkPolicy.enabled` to `true`. + +For Kubernetes v1.5 & v1.6, you must also turn on NetworkPolicy by setting +the DefaultDeny namespace annotation. Note: this will enforce policy for *all* pods in the namespace: + +``` +kubectl annotate namespace default "net.beta.kubernetes.io/network-policy={\"ingress\":{\"isolation\":\"DefaultDeny\"}}" +``` + +With NetworkPolicy enabled, traffic will be limited to just port 9000. + +For more precise policy, set `networkPolicy.allowExternal=true`. This will +only allow pods with the generated client label to connect to MinIO. +This label will be displayed in the output of a successful install. + +### Existing secret + +Instead of having this chart create the secret for you, you can supply a preexisting secret, much +like an existing PersistentVolumeClaim. + +First, create the secret: + +```bash +kubectl create secret generic my-minio-secret --from-literal=rootUser=foobarbaz --from-literal=rootPassword=foobarbazqux +``` + +Then install the chart, specifying that you want to use an existing secret: + +```bash +helm install --set existingSecret=my-minio-secret minio/minio +``` + +The following fields are expected in the secret: + +| .data.\ in Secret | Corresponding variable | Description | Required | +|:------------------------|:-----------------------|:---------------|:---------| +| `rootUser` | `rootUser` | Root user. | yes | +| `rootPassword` | `rootPassword` | Root password. | yes | + +All corresponding variables will be ignored in values file. + +### Configure TLS + +To enable TLS for MinIO containers, acquire TLS certificates from a CA or create self-signed certificates. While creating / acquiring certificates ensure the corresponding domain names are set as per the standard [DNS naming conventions](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-identity) in a Kubernetes StatefulSet (for a distributed MinIO setup). Then create a secret using + +```bash +kubectl create secret generic tls-ssl-minio --from-file=path/to/private.key --from-file=path/to/public.crt +``` + +Then install the chart, specifying that you want to use the TLS secret: + +```bash +helm install --set tls.enabled=true,tls.certSecret=tls-ssl-minio minio/minio +``` + +### Installing certificates from third party CAs + +MinIO can connect to other servers, including MinIO nodes or other server types such as NATs and Redis. If these servers use certificates that were not registered with a known CA, add trust for these certificates to MinIO Server by bundling these certificates into a Kubernetes secret and providing it to Helm via the `trustedCertsSecret` value. If `.Values.tls.enabled` is `true` and you're installing certificates for third party CAs, remember to include MinIO's own certificate with key `public.crt`, if it also needs to be trusted. + +For instance, given that TLS is enabled and you need to add trust for MinIO's own CA and for the CA of a Keycloak server, a Kubernetes secret can be created from the certificate files using `kubectl`: + +``` +kubectl -n minio create secret generic minio-trusted-certs --from-file=public.crt --from-file=keycloak.crt +``` + +If TLS is not enabled, you would need only the third party CA: + +``` +kubectl -n minio create secret generic minio-trusted-certs --from-file=keycloak.crt +``` + +The name of the generated secret can then be passed to Helm using a values file or the `--set` parameter: + +``` +trustedCertsSecret: "minio-trusted-certs" + +or + +--set trustedCertsSecret=minio-trusted-certs +``` + +### Create buckets after install + +Install the chart, specifying the buckets you want to create after install: + +```bash +helm install --set buckets[0].name=bucket1,buckets[0].policy=none,buckets[0].purge=false minio/minio +``` + +Description of the configuration parameters used above - + +- `buckets[].name` - name of the bucket to create, must be a string with length > 0 +- `buckets[].policy` - can be one of none|download|upload|public +- `buckets[].purge` - purge if bucket exists already + +33# Create policies after install +Install the chart, specifying the policies you want to create after install: + +```bash +helm install --set policies[0].name=mypolicy,policies[0].statements[0].resources[0]='arn:aws:s3:::bucket1',policies[0].statements[0].actions[0]='s3:ListBucket',policies[0].statements[0].actions[1]='s3:GetObject' minio/minio +``` + +Description of the configuration parameters used above - + +- `policies[].name` - name of the policy to create, must be a string with length > 0 +- `policies[].statements[]` - list of statements, includes actions and resources +- `policies[].statements[].resources[]` - list of resources that applies the statement +- `policies[].statements[].actions[]` - list of actions granted + +### Create user after install + +Install the chart, specifying the users you want to create after install: + +```bash +helm install --set users[0].accessKey=accessKey,users[0].secretKey=secretKey,users[0].policy=none,users[1].accessKey=accessKey2,users[1].secretRef=existingSecret,users[1].secretKey=password,users[1].policy=none minio/minio +``` + +Description of the configuration parameters used above - + +- `users[].accessKey` - accessKey of user +- `users[].secretKey` - secretKey of usersecretRef +- `users[].existingSecret` - secret name that contains the secretKey of user +- `users[].existingSecretKey` - data key in existingSecret secret containing the secretKey +- `users[].policy` - name of the policy to assign to user + +## Uninstalling the Chart + +Assuming your release is named as `my-release`, delete it using the command: + +```bash +helm delete my-release +``` + +or + +```bash +helm uninstall my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. diff --git a/ansible/01_old/roles/test/files/01-storage/minio/templates/NOTES.txt b/ansible/01_old/roles/test/files/01-storage/minio/templates/NOTES.txt new file mode 100644 index 0000000..9337196 --- /dev/null +++ b/ansible/01_old/roles/test/files/01-storage/minio/templates/NOTES.txt @@ -0,0 +1,43 @@ +{{- if eq .Values.service.type "ClusterIP" "NodePort" }} +MinIO can be accessed via port {{ .Values.service.port }} on the following DNS name from within your cluster: +{{ template "minio.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local + +To access MinIO from localhost, run the below commands: + + 1. export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + + 2. kubectl port-forward $POD_NAME 9000 --namespace {{ .Release.Namespace }} + +Read more about port forwarding here: http://kubernetes.io/docs/user-guide/kubectl/kubectl_port-forward/ + +You can now access MinIO server on http://localhost:9000. Follow the below steps to connect to MinIO server with mc client: + + 1. Download the MinIO mc client - https://docs.minio.io/docs/minio-client-quickstart-guide + + 2. export MC_HOST_{{ template "minio.fullname" . }}-local=http://$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "minio.secretName" . }} -o jsonpath="{.data.rootUser}" | base64 --decode):$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "minio.secretName" . }} -o jsonpath="{.data.rootPassword}" | base64 --decode)@localhost:{{ .Values.service.port }} + + 3. mc ls {{ template "minio.fullname" . }}-local + +{{- end }} +{{- if eq .Values.service.type "LoadBalancer" }} +MinIO can be accessed via port {{ .Values.service.port }} on an external IP address. Get the service external IP address by: +kubectl get svc --namespace {{ .Release.Namespace }} -l app={{ template "minio.fullname" . }} + +Note that the public IP may take a couple of minutes to be available. + +You can now access MinIO server on http://:9000. Follow the below steps to connect to MinIO server with mc client: + + 1. Download the MinIO mc client - https://docs.minio.io/docs/minio-client-quickstart-guide + + 2. export MC_HOST_{{ template "minio.fullname" . }}-local=http://$(kubectl get secret {{ template "minio.secretName" . }} --namespace {{ .Release.Namespace }} -o jsonpath="{.data.rootUser}" | base64 --decode):$(kubectl get secret {{ template "minio.secretName" . }} -o jsonpath="{.data.rootPassword}" | base64 --decode)@:{{ .Values.service.port }} + + 3. mc ls {{ template "minio.fullname" . }} + +Alternately, you can use your browser or the MinIO SDK to access the server - https://docs.minio.io/categories/17 +{{- end }} + +{{ if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }} +Note: Since NetworkPolicy is enabled, only pods with label +{{ template "minio.fullname" . }}-client=true" +will be able to connect to this minio cluster. +{{- end }} diff --git a/ansible/01_old/roles/test/files/01-storage/minio/templates/_helper_create_bucket.txt b/ansible/01_old/roles/test/files/01-storage/minio/templates/_helper_create_bucket.txt new file mode 100644 index 0000000..35a48fc --- /dev/null +++ b/ansible/01_old/roles/test/files/01-storage/minio/templates/_helper_create_bucket.txt @@ -0,0 +1,109 @@ +#!/bin/sh +set -e ; # Have script exit in the event of a failed command. + +{{- if .Values.configPathmc }} +MC_CONFIG_DIR="{{ .Values.configPathmc }}" +MC="/usr/bin/mc --insecure --config-dir ${MC_CONFIG_DIR}" +{{- else }} +MC="/usr/bin/mc --insecure" +{{- end }} + +# connectToMinio +# Use a check-sleep-check loop to wait for MinIO service to be available +connectToMinio() { + SCHEME=$1 + ATTEMPTS=0 ; LIMIT=29 ; # Allow 30 attempts + set -e ; # fail if we can't read the keys. + ACCESS=$(cat /config/rootUser) ; SECRET=$(cat /config/rootPassword) ; + set +e ; # The connections to minio are allowed to fail. + echo "Connecting to MinIO server: $SCHEME://$MINIO_ENDPOINT:$MINIO_PORT" ; + MC_COMMAND="${MC} alias set myminio $SCHEME://$MINIO_ENDPOINT:$MINIO_PORT $ACCESS $SECRET" ; + $MC_COMMAND ; + STATUS=$? ; + until [ $STATUS = 0 ] + do + ATTEMPTS=`expr $ATTEMPTS + 1` ; + echo \"Failed attempts: $ATTEMPTS\" ; + if [ $ATTEMPTS -gt $LIMIT ]; then + exit 1 ; + fi ; + sleep 2 ; # 1 second intervals between attempts + $MC_COMMAND ; + STATUS=$? ; + done ; + set -e ; # reset `e` as active + return 0 +} + +# checkBucketExists ($bucket) +# Check if the bucket exists, by using the exit code of `mc ls` +checkBucketExists() { + BUCKET=$1 + CMD=$(${MC} ls myminio/$BUCKET > /dev/null 2>&1) + return $? +} + +# createBucket ($bucket, $policy, $purge) +# Ensure bucket exists, purging if asked to +createBucket() { + BUCKET=$1 + POLICY=$2 + PURGE=$3 + VERSIONING=$4 + + # Purge the bucket, if set & exists + # Since PURGE is user input, check explicitly for `true` + if [ $PURGE = true ]; then + if checkBucketExists $BUCKET ; then + echo "Purging bucket '$BUCKET'." + set +e ; # don't exit if this fails + ${MC} rm -r --force myminio/$BUCKET + set -e ; # reset `e` as active + else + echo "Bucket '$BUCKET' does not exist, skipping purge." + fi + fi + + # Create the bucket if it does not exist + if ! checkBucketExists $BUCKET ; then + echo "Creating bucket '$BUCKET'" + ${MC} mb myminio/$BUCKET + else + echo "Bucket '$BUCKET' already exists." + fi + + + # set versioning for bucket + if [ ! -z $VERSIONING ] ; then + if [ $VERSIONING = true ] ; then + echo "Enabling versioning for '$BUCKET'" + ${MC} version enable myminio/$BUCKET + elif [ $VERSIONING = false ] ; then + echo "Suspending versioning for '$BUCKET'" + ${MC} version suspend myminio/$BUCKET + fi + else + echo "Bucket '$BUCKET' versioning unchanged." + fi + + # At this point, the bucket should exist, skip checking for existence + # Set policy on the bucket + echo "Setting policy of bucket '$BUCKET' to '$POLICY'." + ${MC} policy set $POLICY myminio/$BUCKET +} + +# Try connecting to MinIO instance +{{- if .Values.tls.enabled }} +scheme=https +{{- else }} +scheme=http +{{- end }} +connectToMinio $scheme + +{{ if .Values.buckets }} +{{ $global := . }} +# Create the buckets +{{- range .Values.buckets }} +createBucket {{ tpl .name $global }} {{ .policy }} {{ .purge }} {{ .versioning }} +{{- end }} +{{- end }} diff --git a/ansible/01_old/roles/test/files/01-storage/minio/templates/_helper_create_policy.txt b/ansible/01_old/roles/test/files/01-storage/minio/templates/_helper_create_policy.txt new file mode 100644 index 0000000..d565b16 --- /dev/null +++ b/ansible/01_old/roles/test/files/01-storage/minio/templates/_helper_create_policy.txt @@ -0,0 +1,75 @@ +#!/bin/sh +set -e ; # Have script exit in the event of a failed command. + +{{- if .Values.configPathmc }} +MC_CONFIG_DIR="{{ .Values.configPathmc }}" +MC="/usr/bin/mc --insecure --config-dir ${MC_CONFIG_DIR}" +{{- else }} +MC="/usr/bin/mc --insecure" +{{- end }} + +# connectToMinio +# Use a check-sleep-check loop to wait for MinIO service to be available +connectToMinio() { + SCHEME=$1 + ATTEMPTS=0 ; LIMIT=29 ; # Allow 30 attempts + set -e ; # fail if we can't read the keys. + ACCESS=$(cat /config/rootUser) ; SECRET=$(cat /config/rootPassword) ; + set +e ; # The connections to minio are allowed to fail. + echo "Connecting to MinIO server: $SCHEME://$MINIO_ENDPOINT:$MINIO_PORT" ; + MC_COMMAND="${MC} alias set myminio $SCHEME://$MINIO_ENDPOINT:$MINIO_PORT $ACCESS $SECRET" ; + $MC_COMMAND ; + STATUS=$? ; + until [ $STATUS = 0 ] + do + ATTEMPTS=`expr $ATTEMPTS + 1` ; + echo \"Failed attempts: $ATTEMPTS\" ; + if [ $ATTEMPTS -gt $LIMIT ]; then + exit 1 ; + fi ; + sleep 2 ; # 1 second intervals between attempts + $MC_COMMAND ; + STATUS=$? ; + done ; + set -e ; # reset `e` as active + return 0 +} + +# checkPolicyExists ($policy) +# Check if the policy exists, by using the exit code of `mc admin policy info` +checkPolicyExists() { + POLICY=$1 + CMD=$(${MC} admin policy info myminio $POLICY > /dev/null 2>&1) + return $? +} + +# createPolicy($name, $filename) +createPolicy () { + NAME=$1 + FILENAME=$2 + + # Create the name if it does not exist + echo "Checking policy: $NAME (in /config/$FILENAME.json)" + if ! checkPolicyExists $NAME ; then + echo "Creating policy '$NAME'" + else + echo "Policy '$NAME' already exists." + fi + ${MC} admin policy add myminio $NAME /config/$FILENAME.json + +} + +# Try connecting to MinIO instance +{{- if .Values.tls.enabled }} +scheme=https +{{- else }} +scheme=http +{{- end }} +connectToMinio $scheme + +{{ if .Values.policies }} +# Create the policies +{{- range $idx, $policy := .Values.policies }} +createPolicy {{ $policy.name }} policy_{{ $idx }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/ansible/01_old/roles/test/files/01-storage/minio/templates/_helper_create_user.txt b/ansible/01_old/roles/test/files/01-storage/minio/templates/_helper_create_user.txt new file mode 100644 index 0000000..7771428 --- /dev/null +++ b/ansible/01_old/roles/test/files/01-storage/minio/templates/_helper_create_user.txt @@ -0,0 +1,88 @@ +#!/bin/sh +set -e ; # Have script exit in the event of a failed command. + +{{- if .Values.configPathmc }} +MC_CONFIG_DIR="{{ .Values.configPathmc }}" +MC="/usr/bin/mc --insecure --config-dir ${MC_CONFIG_DIR}" +{{- else }} +MC="/usr/bin/mc --insecure" +{{- end }} + +# connectToMinio +# Use a check-sleep-check loop to wait for MinIO service to be available +connectToMinio() { + SCHEME=$1 + ATTEMPTS=0 ; LIMIT=29 ; # Allow 30 attempts + set -e ; # fail if we can't read the keys. + ACCESS=$(cat /config/rootUser) ; SECRET=$(cat /config/rootPassword) ; + set +e ; # The connections to minio are allowed to fail. + echo "Connecting to MinIO server: $SCHEME://$MINIO_ENDPOINT:$MINIO_PORT" ; + MC_COMMAND="${MC} alias set myminio $SCHEME://$MINIO_ENDPOINT:$MINIO_PORT $ACCESS $SECRET" ; + $MC_COMMAND ; + STATUS=$? ; + until [ $STATUS = 0 ] + do + ATTEMPTS=`expr $ATTEMPTS + 1` ; + echo \"Failed attempts: $ATTEMPTS\" ; + if [ $ATTEMPTS -gt $LIMIT ]; then + exit 1 ; + fi ; + sleep 2 ; # 1 second intervals between attempts + $MC_COMMAND ; + STATUS=$? ; + done ; + set -e ; # reset `e` as active + return 0 +} + +# checkUserExists ($username) +# Check if the user exists, by using the exit code of `mc admin user info` +checkUserExists() { + USER=$1 + CMD=$(${MC} admin user info myminio $USER > /dev/null 2>&1) + return $? +} + +# createUser ($username, $password, $policy) +createUser() { + USER=$1 + PASS=$2 + POLICY=$3 + + # Create the user if it does not exist + if ! checkUserExists $USER ; then + echo "Creating user '$USER'" + ${MC} admin user add myminio $USER $PASS + else + echo "User '$USER' already exists." + fi + + + # set policy for user + if [ ! -z $POLICY -a $POLICY != " " ] ; then + echo "Adding policy '$POLICY' for '$USER'" + ${MC} admin policy set myminio $POLICY user=$USER + else + echo "User '$USER' has no policy attached." + fi +} + +# Try connecting to MinIO instance +{{- if .Values.tls.enabled }} +scheme=https +{{- else }} +scheme=http +{{- end }} +connectToMinio $scheme + +{{ if .Values.users }} +{{ $global := . }} +# Create the users +{{- range .Values.users }} +{{- if .existingSecret }} +createUser {{ tpl .accessKey $global }} $(cat /config/secrets/{{ tpl .accessKey $global }}) {{ .policy }} +{{ else }} +createUser {{ tpl .accessKey $global }} {{ .secretKey }} {{ .policy }} +{{- end }} +{{- end }} +{{- end }} diff --git a/ansible/01_old/roles/test/files/01-storage/minio/templates/_helper_custom_command.txt b/ansible/01_old/roles/test/files/01-storage/minio/templates/_helper_custom_command.txt new file mode 100644 index 0000000..b583a77 --- /dev/null +++ b/ansible/01_old/roles/test/files/01-storage/minio/templates/_helper_custom_command.txt @@ -0,0 +1,58 @@ +#!/bin/sh +set -e ; # Have script exit in the event of a failed command. + +{{- if .Values.configPathmc }} +MC_CONFIG_DIR="{{ .Values.configPathmc }}" +MC="/usr/bin/mc --insecure --config-dir ${MC_CONFIG_DIR}" +{{- else }} +MC="/usr/bin/mc --insecure" +{{- end }} + +# connectToMinio +# Use a check-sleep-check loop to wait for MinIO service to be available +connectToMinio() { + SCHEME=$1 + ATTEMPTS=0 ; LIMIT=29 ; # Allow 30 attempts + set -e ; # fail if we can't read the keys. + ACCESS=$(cat /config/rootUser) ; SECRET=$(cat /config/rootPassword) ; + set +e ; # The connections to minio are allowed to fail. + echo "Connecting to MinIO server: $SCHEME://$MINIO_ENDPOINT:$MINIO_PORT" ; + MC_COMMAND="${MC} alias set myminio $SCHEME://$MINIO_ENDPOINT:$MINIO_PORT $ACCESS $SECRET" ; + $MC_COMMAND ; + STATUS=$? ; + until [ $STATUS = 0 ] + do + ATTEMPTS=`expr $ATTEMPTS + 1` ; + echo \"Failed attempts: $ATTEMPTS\" ; + if [ $ATTEMPTS -gt $LIMIT ]; then + exit 1 ; + fi ; + sleep 2 ; # 1 second intervals between attempts + $MC_COMMAND ; + STATUS=$? ; + done ; + set -e ; # reset `e` as active + return 0 +} + +# runCommand ($@) +# Run custom mc command +runCommand() { + ${MC} "$@" + return $? +} + +# Try connecting to MinIO instance +{{- if .Values.tls.enabled }} +scheme=https +{{- else }} +scheme=http +{{- end }} +connectToMinio $scheme + +{{ if .Values.customCommands }} +# Run custom commands +{{- range .Values.customCommands }} +runCommand {{ .command }} +{{- end }} +{{- end }} diff --git a/ansible/01_old/roles/test/files/01-storage/minio/templates/_helper_policy.tpl b/ansible/01_old/roles/test/files/01-storage/minio/templates/_helper_policy.tpl new file mode 100644 index 0000000..83a2e15 --- /dev/null +++ b/ansible/01_old/roles/test/files/01-storage/minio/templates/_helper_policy.tpl @@ -0,0 +1,18 @@ +{{- $statements_length := len .statements -}} +{{- $statements_length := sub $statements_length 1 -}} +{ + "Version": "2012-10-17", + "Statement": [ +{{- range $i, $statement := .statements }} + { + "Effect": "Allow", + "Action": [ +"{{ $statement.actions | join "\",\n\"" }}" + ]{{ if $statement.resources }}, + "Resource": [ +"{{ $statement.resources | join "\",\n\"" }}" + ]{{ end }} + }{{ if lt $i $statements_length }},{{end }} +{{- end }} + ] +} diff --git a/ansible/01_old/roles/test/files/01-storage/minio/templates/_helpers.tpl b/ansible/01_old/roles/test/files/01-storage/minio/templates/_helpers.tpl new file mode 100644 index 0000000..4e38194 --- /dev/null +++ b/ansible/01_old/roles/test/files/01-storage/minio/templates/_helpers.tpl @@ -0,0 +1,218 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "minio.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "minio.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "minio.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "minio.networkPolicy.apiVersion" -}} +{{- if semverCompare ">=1.4-0, <1.7-0" .Capabilities.KubeVersion.Version -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare ">=1.7-0, <1.16-0" .Capabilities.KubeVersion.Version -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else if semverCompare "^1.16-0" .Capabilities.KubeVersion.Version -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for deployment. +*/}} +{{- define "minio.deployment.apiVersion" -}} +{{- if semverCompare "<1.9-0" .Capabilities.KubeVersion.Version -}} +{{- print "apps/v1beta2" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for statefulset. +*/}} +{{- define "minio.statefulset.apiVersion" -}} +{{- if semverCompare "<1.16-0" .Capabilities.KubeVersion.Version -}} +{{- print "apps/v1beta2" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for ingress. +*/}} +{{- define "minio.ingress.apiVersion" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for console ingress. +*/}} +{{- define "minio.consoleIngress.apiVersion" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Determine secret name. +*/}} +{{- define "minio.secretName" -}} +{{- if .Values.existingSecret -}} +{{- .Values.existingSecret }} +{{- else -}} +{{- include "minio.fullname" . -}} +{{- end -}} +{{- end -}} + +{{/* +Determine name for scc role and rolebinding +*/}} +{{- define "minio.sccRoleName" -}} +{{- printf "%s-%s" "scc" (include "minio.fullname" .) | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Properly format optional additional arguments to MinIO binary +*/}} +{{- define "minio.extraArgs" -}} +{{- range .Values.extraArgs -}} +{{ " " }}{{ . }} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "minio.imagePullSecrets" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +Also, we can not use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} +{{- if .Values.global.imagePullSecrets }} +imagePullSecrets: +{{- range .Values.global.imagePullSecrets }} + - name: {{ . }} +{{- end }} +{{- else if .Values.imagePullSecrets }} +imagePullSecrets: + {{ toYaml .Values.imagePullSecrets }} +{{- end -}} +{{- else if .Values.imagePullSecrets }} +imagePullSecrets: + {{ toYaml .Values.imagePullSecrets }} +{{- end -}} +{{- end -}} + +{{/* +Formats volumeMount for MinIO TLS keys and trusted certs +*/}} +{{- define "minio.tlsKeysVolumeMount" -}} +{{- if .Values.tls.enabled }} +- name: cert-secret-volume + mountPath: {{ .Values.certsPath }} +{{- end }} +{{- if or .Values.tls.enabled (ne .Values.trustedCertsSecret "") }} +{{- $casPath := printf "%s/CAs" .Values.certsPath | clean }} +- name: trusted-cert-secret-volume + mountPath: {{ $casPath }} +{{- end }} +{{- end -}} + +{{/* +Formats volume for MinIO TLS keys and trusted certs +*/}} +{{- define "minio.tlsKeysVolume" -}} +{{- if .Values.tls.enabled }} +- name: cert-secret-volume + secret: + secretName: {{ .Values.tls.certSecret }} + items: + - key: {{ .Values.tls.publicCrt }} + path: public.crt + - key: {{ .Values.tls.privateKey }} + path: private.key +{{- end }} +{{- if or .Values.tls.enabled (ne .Values.trustedCertsSecret "") }} +{{- $certSecret := eq .Values.trustedCertsSecret "" | ternary .Values.tls.certSecret .Values.trustedCertsSecret }} +{{- $publicCrt := eq .Values.trustedCertsSecret "" | ternary .Values.tls.publicCrt "" }} +- name: trusted-cert-secret-volume + secret: + secretName: {{ $certSecret }} + {{- if ne $publicCrt "" }} + items: + - key: {{ $publicCrt }} + path: public.crt + {{- end }} +{{- end }} +{{- end -}} + +{{/* +Returns the available value for certain key in an existing secret (if it exists), +otherwise it generates a random value. +*/}} +{{- define "minio.getValueFromSecret" }} + {{- $len := (default 16 .Length) | int -}} + {{- $obj := (lookup "v1" "Secret" .Namespace .Name).data -}} + {{- if $obj }} + {{- index $obj .Key | b64dec -}} + {{- else -}} + {{- randAlphaNum $len -}} + {{- end -}} +{{- end }} + +{{- define "minio.root.username" -}} + {{- if .Values.rootUser }} + {{- .Values.rootUser | toString }} + {{- else }} + {{- include "minio.getValueFromSecret" (dict "Namespace" .Release.Namespace "Name" (include "minio.fullname" .) "Length" 20 "Key" "rootUser") }} + {{- end }} +{{- end -}} + +{{- define "minio.root.password" -}} + {{- if .Values.rootPassword }} + {{- .Values.rootPassword | toString }} + {{- else }} + {{- include "minio.getValueFromSecret" (dict "Namespace" .Release.Namespace "Name" (include "minio.fullname" .) "Length" 40 "Key" "rootPassword") }} + {{- end }} +{{- end -}} \ No newline at end of file diff --git a/ansible/01_old/roles/test/files/01-storage/minio/templates/configmap.yaml b/ansible/01_old/roles/test/files/01-storage/minio/templates/configmap.yaml new file mode 100644 index 0000000..95a7c60 --- /dev/null +++ b/ansible/01_old/roles/test/files/01-storage/minio/templates/configmap.yaml @@ -0,0 +1,24 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "minio.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +data: + initialize: |- +{{ include (print $.Template.BasePath "/_helper_create_bucket.txt") . | indent 4 }} + add-user: |- +{{ include (print $.Template.BasePath "/_helper_create_user.txt") . | indent 4 }} + add-policy: |- +{{ include (print $.Template.BasePath "/_helper_create_policy.txt") . | indent 4 }} +{{- range $idx, $policy := .Values.policies }} + # {{ $policy.name }} + policy_{{ $idx }}.json: |- +{{ include (print $.Template.BasePath "/_helper_policy.tpl") . | indent 4 }} +{{ end }} + custom-command: |- +{{ include (print $.Template.BasePath "/_helper_custom_command.txt") . | indent 4 }} diff --git a/ansible/01_old/roles/test/files/01-storage/minio/templates/console-ingress.yaml b/ansible/01_old/roles/test/files/01-storage/minio/templates/console-ingress.yaml new file mode 100644 index 0000000..2ce9a93 --- /dev/null +++ b/ansible/01_old/roles/test/files/01-storage/minio/templates/console-ingress.yaml @@ -0,0 +1,58 @@ +{{- if .Values.consoleIngress.enabled -}} +{{- $fullName := printf "%s-console" (include "minio.fullname" .) -}} +{{- $servicePort := .Values.consoleService.port -}} +{{- $ingressPath := .Values.consoleIngress.path -}} +apiVersion: {{ template "minio.consoleIngress.apiVersion" . }} +kind: Ingress +metadata: + name: {{ $fullName }} + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- with .Values.consoleIngress.labels }} +{{ toYaml . | indent 4 }} +{{- end }} + +{{- with .Values.consoleIngress.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +spec: +{{- if .Values.consoleIngress.ingressClassName }} + ingressClassName: {{ .Values.consoleIngress.ingressClassName }} +{{- end }} +{{- if .Values.consoleIngress.tls }} + tls: + {{- range .Values.consoleIngress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} +{{- end }} + rules: + {{- range .Values.consoleIngress.hosts }} + - http: + paths: + - path: {{ $ingressPath }} + {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} + pathType: Prefix + backend: + service: + name: {{ $fullName }} + port: + number: {{ $servicePort }} + {{- else }} + backend: + serviceName: {{ $fullName }} + servicePort: {{ $servicePort }} + {{- end }} + {{- if . }} + host: {{ . | quote }} + {{- end }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/test/files/01-storage/minio/templates/console-service.yaml b/ansible/01_old/roles/test/files/01-storage/minio/templates/console-service.yaml new file mode 100644 index 0000000..f4b1294 --- /dev/null +++ b/ansible/01_old/roles/test/files/01-storage/minio/templates/console-service.yaml @@ -0,0 +1,48 @@ +{{ $scheme := "http" }} +{{- if .Values.tls.enabled }} +{{ $scheme = "https" }} +{{ end }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "minio.fullname" . }}-console + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- if .Values.consoleService.annotations }} + annotations: +{{ toYaml .Values.consoleService.annotations | indent 4 }} +{{- end }} +spec: +{{- if (or (eq .Values.consoleService.type "ClusterIP" "") (empty .Values.consoleService.type)) }} + type: ClusterIP + {{- if not (empty .Values.consoleService.clusterIP) }} + clusterIP: {{ .Values.consoleService.clusterIP }} + {{end}} +{{- else if eq .Values.consoleService.type "LoadBalancer" }} + type: {{ .Values.consoleService.type }} + loadBalancerIP: {{ default "" .Values.consoleService.loadBalancerIP }} +{{- else }} + type: {{ .Values.consoleService.type }} +{{- end }} + ports: + - name: {{ $scheme }} + port: {{ .Values.consoleService.port }} + protocol: TCP +{{- if (and (eq .Values.consoleService.type "NodePort") ( .Values.consoleService.nodePort)) }} + nodePort: {{ .Values.consoleService.nodePort }} +{{- else }} + targetPort: {{ .Values.consoleService.port }} +{{- end}} +{{- if .Values.consoleService.externalIPs }} + externalIPs: +{{- range $i , $ip := .Values.consoleService.externalIPs }} + - {{ $ip }} +{{- end }} +{{- end }} + selector: + app: {{ template "minio.name" . }} + release: {{ .Release.Name }} diff --git a/ansible/01_old/roles/test/files/01-storage/minio/templates/deployment.yaml b/ansible/01_old/roles/test/files/01-storage/minio/templates/deployment.yaml new file mode 100644 index 0000000..a06bc35 --- /dev/null +++ b/ansible/01_old/roles/test/files/01-storage/minio/templates/deployment.yaml @@ -0,0 +1,174 @@ +{{- if eq .Values.mode "standalone" }} +{{ $scheme := "http" }} +{{- if .Values.tls.enabled }} +{{ $scheme = "https" }} +{{ end }} +{{ $bucketRoot := or ($.Values.bucketRoot) ($.Values.mountPath) }} +apiVersion: {{ template "minio.deployment.apiVersion" . }} +kind: Deployment +metadata: + name: {{ template "minio.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- if .Values.additionalLabels }} +{{ toYaml .Values.additionalLabels | trimSuffix "\n" | indent 4 }} +{{- end }} +{{- if .Values.additionalAnnotations }} + annotations: +{{ toYaml .Values.additionalAnnotations | trimSuffix "\n" | indent 4 }} +{{- end }} +spec: + strategy: + type: {{ .Values.DeploymentUpdate.type }} + {{- if eq .Values.DeploymentUpdate.type "RollingUpdate" }} + rollingUpdate: + maxSurge: {{ .Values.DeploymentUpdate.maxSurge }} + maxUnavailable: {{ .Values.DeploymentUpdate.maxUnavailable }} + {{- end}} + replicas: 1 + selector: + matchLabels: + app: {{ template "minio.name" . }} + release: {{ .Release.Name }} + template: + metadata: + name: {{ template "minio.fullname" . }} + labels: + app: {{ template "minio.name" . }} + release: {{ .Release.Name }} +{{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 8 }} +{{- end }} + annotations: +{{- if not .Values.ignoreChartChecksums }} + checksum/secrets: {{ include (print $.Template.BasePath "/secrets.yaml") . | sha256sum }} + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} +{{- end }} +{{- if .Values.podAnnotations }} +{{ toYaml .Values.podAnnotations | trimSuffix "\n" | indent 8 }} +{{- end }} + spec: + {{- if .Values.priorityClassName }} + priorityClassName: "{{ .Values.priorityClassName }}" + {{- end }} +{{- if and .Values.securityContext.enabled .Values.persistence.enabled }} + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + runAsGroup: {{ .Values.securityContext.runAsGroup }} + fsGroup: {{ .Values.securityContext.fsGroup }} + {{- if and (ge .Capabilities.KubeVersion.Major "1") (ge .Capabilities.KubeVersion.Minor "20") }} + fsGroupChangePolicy: {{ .Values.securityContext.fsGroupChangePolicy }} + {{- end }} +{{- end }} +{{ if .Values.serviceAccount.create }} + serviceAccountName: {{ .Values.serviceAccount.name }} +{{- end }} + containers: + - name: {{ .Chart.Name }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + command: + - "/bin/sh" + - "-ce" + - "/usr/bin/docker-entrypoint.sh minio server {{ $bucketRoot }} -S {{ .Values.certsPath }} --address :{{ .Values.minioAPIPort }} --console-address :{{ .Values.minioConsolePort }} {{- template "minio.extraArgs" . }}" + volumeMounts: + - name: minio-user + mountPath: "/tmp/credentials" + readOnly: true + {{- if .Values.persistence.enabled }} + - name: export + mountPath: {{ .Values.mountPath }} + {{- if .Values.persistence.subPath }} + subPath: "{{ .Values.persistence.subPath }}" + {{- end }} + {{- end }} + {{- if .Values.extraSecret }} + - name: extra-secret + mountPath: "/tmp/minio-config-env" + {{- end }} + {{- include "minio.tlsKeysVolumeMount" . | indent 12 }} + ports: + - name: {{ $scheme }} + containerPort: {{ .Values.minioAPIPort }} + - name: {{ $scheme }}-console + containerPort: {{ .Values.minioConsolePort }} + env: + - name: MINIO_ROOT_USER + valueFrom: + secretKeyRef: + name: {{ template "minio.secretName" . }} + key: rootUser + - name: MINIO_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "minio.secretName" . }} + key: rootPassword + {{- if .Values.extraSecret }} + - name: MINIO_CONFIG_ENV_FILE + value: "/tmp/minio-config-env/config.env" + {{- end}} + {{- if .Values.metrics.serviceMonitor.public }} + - name: MINIO_PROMETHEUS_AUTH_TYPE + value: "public" + {{- end}} + {{- if .Values.etcd.endpoints }} + - name: MINIO_ETCD_ENDPOINTS + value: {{ join "," .Values.etcd.endpoints | quote }} + {{- if .Values.etcd.clientCert }} + - name: MINIO_ETCD_CLIENT_CERT + value: "/tmp/credentials/etcd_client_cert.pem" + {{- end }} + {{- if .Values.etcd.clientCertKey }} + - name: MINIO_ETCD_CLIENT_CERT_KEY + value: "/tmp/credentials/etcd_client_cert_key.pem" + {{- end }} + {{- if .Values.etcd.pathPrefix }} + - name: MINIO_ETCD_PATH_PREFIX + value: {{ .Values.etcd.pathPrefix }} + {{- end }} + {{- if .Values.etcd.corednsPathPrefix }} + - name: MINIO_ETCD_COREDNS_PATH + value: {{ .Values.etcd.corednsPathPrefix }} + {{- end }} + {{- end }} + {{- range $key, $val := .Values.environment }} + - name: {{ $key }} + value: {{ $val | quote }} + {{- end}} + resources: +{{ toYaml .Values.resources | indent 12 }} +{{- with .Values.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 8 }} +{{- end }} +{{- include "minio.imagePullSecrets" . | indent 6 }} +{{- with .Values.affinity }} + affinity: +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} +{{- end }} + volumes: + - name: export + {{- if .Values.persistence.enabled }} + persistentVolumeClaim: + claimName: {{ .Values.persistence.existingClaim | default (include "minio.fullname" .) }} + {{- else }} + emptyDir: {} + {{- end }} + {{- if .Values.extraSecret }} + - name: extra-secret + secret: + secretName: {{ .Values.extraSecret }} + {{- end }} + - name: minio-user + secret: + secretName: {{ template "minio.secretName" . }} + {{- include "minio.tlsKeysVolume" . | indent 8 }} +{{- end }} diff --git a/ansible/01_old/roles/test/files/01-storage/minio/templates/gateway-deployment.yaml b/ansible/01_old/roles/test/files/01-storage/minio/templates/gateway-deployment.yaml new file mode 100644 index 0000000..b14f86b --- /dev/null +++ b/ansible/01_old/roles/test/files/01-storage/minio/templates/gateway-deployment.yaml @@ -0,0 +1,173 @@ +{{- if eq .Values.mode "gateway" }} +{{ $scheme := "http" }} +{{- if .Values.tls.enabled }} +{{ $scheme = "https" }} +{{ end }} +{{ $bucketRoot := or ($.Values.bucketRoot) ($.Values.mountPath) }} +apiVersion: {{ template "minio.deployment.apiVersion" . }} +kind: Deployment +metadata: + name: {{ template "minio.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- if .Values.additionalLabels }} +{{ toYaml .Values.additionalLabels | trimSuffix "\n" | indent 4 }} +{{- end }} +{{- if .Values.additionalAnnotations }} + annotations: +{{ toYaml .Values.additionalAnnotations | trimSuffix "\n" | indent 4 }} +{{- end }} +spec: + strategy: + type: {{ .Values.DeploymentUpdate.type }} + {{- if eq .Values.DeploymentUpdate.type "RollingUpdate" }} + rollingUpdate: + maxSurge: {{ .Values.DeploymentUpdate.maxSurge }} + maxUnavailable: {{ .Values.DeploymentUpdate.maxUnavailable }} + {{- end}} + replicas: {{ .Values.gateway.replicas }} + selector: + matchLabels: + app: {{ template "minio.name" . }} + release: {{ .Release.Name }} + template: + metadata: + name: {{ template "minio.fullname" . }} + labels: + app: {{ template "minio.name" . }} + release: {{ .Release.Name }} +{{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 8 }} +{{- end }} + annotations: +{{- if not .Values.ignoreChartChecksums }} + checksum/secrets: {{ include (print $.Template.BasePath "/secrets.yaml") . | sha256sum }} + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} +{{- end }} +{{- if .Values.podAnnotations }} +{{ toYaml .Values.podAnnotations | trimSuffix "\n" | indent 8 }} +{{- end }} + spec: + {{- if .Values.priorityClassName }} + priorityClassName: "{{ .Values.priorityClassName }}" + {{- end }} +{{- if and .Values.securityContext.enabled .Values.persistence.enabled }} + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + runAsGroup: {{ .Values.securityContext.runAsGroup }} + fsGroup: {{ .Values.securityContext.fsGroup }} +{{- end }} +{{ if .Values.serviceAccount.create }} + serviceAccountName: {{ .Values.serviceAccount.name }} +{{- end }} + containers: + - name: {{ .Chart.Name }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + command: + - "/bin/sh" + - "-ce" + {{- if eq .Values.gateway.type "nas" }} + - "/usr/bin/docker-entrypoint.sh minio gateway nas {{ $bucketRoot }} -S {{ .Values.certsPath }} --address :{{ .Values.minioAPIPort }} --console-address :{{ .Values.minioConsolePort }} {{- template "minio.extraArgs" . }} " + {{- end }} + volumeMounts: + - name: minio-user + mountPath: "/tmp/credentials" + readOnly: true + {{- if .Values.persistence.enabled }} + - name: export + mountPath: {{ .Values.mountPath }} + {{- if .Values.persistence.subPath }} + subPath: "{{ .Values.persistence.subPath }}" + {{- end }} + {{- end }} + {{- if .Values.extraSecret }} + - name: extra-secret + mountPath: "/tmp/minio-config-env" + {{- end }} + {{- include "minio.tlsKeysVolumeMount" . | indent 12 }} + ports: + - name: {{ $scheme }} + containerPort: {{ .Values.minioAPIPort }} + - name: {{ $scheme }}-console + containerPort: {{ .Values.minioConsolePort }} + env: + - name: MINIO_ROOT_USER + valueFrom: + secretKeyRef: + name: {{ template "minio.secretName" . }} + key: rootUser + - name: MINIO_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "minio.secretName" . }} + key: rootPassword + {{- if .Values.extraSecret }} + - name: MINIO_CONFIG_ENV_FILE + value: "/tmp/minio-config-env/config.env" + {{- end}} + {{- if .Values.metrics.serviceMonitor.public }} + - name: MINIO_PROMETHEUS_AUTH_TYPE + value: "public" + {{- end}} + {{- if .Values.etcd.endpoints }} + - name: MINIO_ETCD_ENDPOINTS + value: {{ join "," .Values.etcd.endpoints | quote }} + {{- if .Values.etcd.clientCert }} + - name: MINIO_ETCD_CLIENT_CERT + value: "/tmp/credentials/etcd_client.crt" + {{- end }} + {{- if .Values.etcd.clientCertKey }} + - name: MINIO_ETCD_CLIENT_CERT_KEY + value: "/tmp/credentials/etcd_client.key" + {{- end }} + {{- if .Values.etcd.pathPrefix }} + - name: MINIO_ETCD_PATH_PREFIX + value: {{ .Values.etcd.pathPrefix }} + {{- end }} + {{- if .Values.etcd.corednsPathPrefix }} + - name: MINIO_ETCD_COREDNS_PATH + value: {{ .Values.etcd.corednsPathPrefix }} + {{- end }} + {{- end }} + {{- range $key, $val := .Values.environment }} + - name: {{ $key }} + value: {{ $val | quote }} + {{- end}} + resources: +{{ toYaml .Values.resources | indent 12 }} +{{- with .Values.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 8 }} +{{- end }} +{{- include "minio.imagePullSecrets" . | indent 6 }} +{{- with .Values.affinity }} + affinity: +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} +{{- end }} + volumes: + - name: export + {{- if .Values.persistence.enabled }} + persistentVolumeClaim: + claimName: {{ .Values.persistence.existingClaim | default (include "minio.fullname" .) }} + {{- else }} + emptyDir: {} + {{- end }} + - name: minio-user + secret: + secretName: {{ template "minio.secretName" . }} + {{- if .Values.extraSecret }} + - name: extra-secret + secret: + secretName: {{ .Values.extraSecret }} + {{- end }} + {{- include "minio.tlsKeysVolume" . | indent 8 }} +{{- end }} diff --git a/ansible/01_old/roles/test/files/01-storage/minio/templates/ingress.yaml b/ansible/01_old/roles/test/files/01-storage/minio/templates/ingress.yaml new file mode 100644 index 0000000..8d9a837 --- /dev/null +++ b/ansible/01_old/roles/test/files/01-storage/minio/templates/ingress.yaml @@ -0,0 +1,58 @@ +{{- if .Values.ingress.enabled -}} +{{- $fullName := include "minio.fullname" . -}} +{{- $servicePort := .Values.service.port -}} +{{- $ingressPath := .Values.ingress.path -}} +apiVersion: {{ template "minio.ingress.apiVersion" . }} +kind: Ingress +metadata: + name: {{ $fullName }} + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- with .Values.ingress.labels }} +{{ toYaml . | indent 4 }} +{{- end }} + +{{- with .Values.ingress.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +spec: +{{- if .Values.ingress.ingressClassName }} + ingressClassName: {{ .Values.ingress.ingressClassName }} +{{- end }} +{{- if .Values.ingress.tls }} + tls: + {{- range .Values.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} +{{- end }} + rules: + {{- range .Values.ingress.hosts }} + - http: + paths: + - path: {{ $ingressPath }} + {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} + pathType: Prefix + backend: + service: + name: {{ $fullName }} + port: + number: {{ $servicePort }} + {{- else }} + backend: + serviceName: {{ $fullName }} + servicePort: {{ $servicePort }} + {{- end }} + {{- if . }} + host: {{ . | quote }} + {{- end }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/test/files/01-storage/minio/templates/networkpolicy.yaml b/ansible/01_old/roles/test/files/01-storage/minio/templates/networkpolicy.yaml new file mode 100644 index 0000000..68a2599 --- /dev/null +++ b/ansible/01_old/roles/test/files/01-storage/minio/templates/networkpolicy.yaml @@ -0,0 +1,27 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ template "minio.networkPolicy.apiVersion" . }} +metadata: + name: {{ template "minio.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + podSelector: + matchLabels: + app: {{ template "minio.name" . }} + release: {{ .Release.Name }} + ingress: + - ports: + - port: {{ .Values.service.port }} + - port: {{ .Values.consoleService.port }} + {{- if not .Values.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ template "minio.name" . }}-client: "true" + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/test/files/01-storage/minio/templates/poddisruptionbudget.yaml b/ansible/01_old/roles/test/files/01-storage/minio/templates/poddisruptionbudget.yaml new file mode 100644 index 0000000..8037eb7 --- /dev/null +++ b/ansible/01_old/roles/test/files/01-storage/minio/templates/poddisruptionbudget.yaml @@ -0,0 +1,14 @@ +{{- if .Values.podDisruptionBudget.enabled }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: minio + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }} +spec: + maxUnavailable: {{ .Values.podDisruptionBudget.maxUnavailable }} + selector: + matchLabels: + app: {{ template "minio.name" . }} +{{- end }} \ No newline at end of file diff --git a/ansible/01_old/roles/test/files/01-storage/minio/templates/post-install-create-bucket-job.yaml b/ansible/01_old/roles/test/files/01-storage/minio/templates/post-install-create-bucket-job.yaml new file mode 100644 index 0000000..434b31d --- /dev/null +++ b/ansible/01_old/roles/test/files/01-storage/minio/templates/post-install-create-bucket-job.yaml @@ -0,0 +1,87 @@ +{{- if .Values.buckets }} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ template "minio.fullname" . }}-make-bucket-job + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }}-make-bucket-job + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + annotations: + "helm.sh/hook": post-install,post-upgrade + "helm.sh/hook-delete-policy": hook-succeeded,before-hook-creation +{{- with .Values.makeBucketJob.annotations }} +{{ toYaml . | indent 4 }} +{{- end }} +spec: + template: + metadata: + labels: + app: {{ template "minio.name" . }}-job + release: {{ .Release.Name }} +{{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 8 }} +{{- end }} +{{- if .Values.makeBucketJob.podAnnotations }} + annotations: +{{ toYaml .Values.makeBucketJob.podAnnotations | indent 8 }} +{{- end }} + spec: + restartPolicy: OnFailure +{{- include "minio.imagePullSecrets" . | indent 6 }} +{{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.makeBucketJob.nodeSelector | indent 8 }} +{{- end }} +{{- with .Values.makeBucketJob.affinity }} + affinity: +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.makeBucketJob.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} +{{- end }} +{{- if .Values.makeBucketJob.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.makeBucketJob.securityContext.runAsUser }} + runAsGroup: {{ .Values.makeBucketJob.securityContext.runAsGroup }} + fsGroup: {{ .Values.makeBucketJob.securityContext.fsGroup }} +{{- end }} + volumes: + - name: minio-configuration + projected: + sources: + - configMap: + name: {{ template "minio.fullname" . }} + - secret: + name: {{ template "minio.secretName" . }} + {{- if .Values.tls.enabled }} + - name: cert-secret-volume-mc + secret: + secretName: {{ .Values.tls.certSecret }} + items: + - key: {{ .Values.tls.publicCrt }} + path: CAs/public.crt + {{ end }} + containers: + - name: minio-mc + image: "{{ .Values.mcImage.repository }}:{{ .Values.mcImage.tag }}" + imagePullPolicy: {{ .Values.mcImage.pullPolicy }} + command: ["/bin/sh", "/config/initialize"] + env: + - name: MINIO_ENDPOINT + value: {{ template "minio.fullname" . }} + - name: MINIO_PORT + value: {{ .Values.service.port | quote }} + volumeMounts: + - name: minio-configuration + mountPath: /config + {{- if .Values.tls.enabled }} + - name: cert-secret-volume-mc + mountPath: {{ .Values.configPathmc }}certs + {{ end }} + resources: +{{ toYaml .Values.makeBucketJob.resources | indent 10 }} +{{- end }} diff --git a/ansible/01_old/roles/test/files/01-storage/minio/templates/post-install-create-policy-job.yaml b/ansible/01_old/roles/test/files/01-storage/minio/templates/post-install-create-policy-job.yaml new file mode 100644 index 0000000..ae78769 --- /dev/null +++ b/ansible/01_old/roles/test/files/01-storage/minio/templates/post-install-create-policy-job.yaml @@ -0,0 +1,87 @@ +{{- if .Values.policies }} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ template "minio.fullname" . }}-make-policies-job + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }}-make-policies-job + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + annotations: + "helm.sh/hook": post-install,post-upgrade + "helm.sh/hook-delete-policy": hook-succeeded,before-hook-creation +{{- with .Values.makePolicyJob.annotations }} +{{ toYaml . | indent 4 }} +{{- end }} +spec: + template: + metadata: + labels: + app: {{ template "minio.name" . }}-job + release: {{ .Release.Name }} +{{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 8 }} +{{- end }} +{{- if .Values.makePolicyJob.podAnnotations }} + annotations: +{{ toYaml .Values.makePolicyJob.podAnnotations | indent 8 }} +{{- end }} + spec: + restartPolicy: OnFailure +{{- include "minio.imagePullSecrets" . | indent 6 }} +{{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.makePolicyJob.nodeSelector | indent 8 }} +{{- end }} +{{- with .Values.makePolicyJob.affinity }} + affinity: +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.makePolicyJob.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} +{{- end }} +{{- if .Values.makePolicyJob.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.makePolicyJob.securityContext.runAsUser }} + runAsGroup: {{ .Values.makePolicyJob.securityContext.runAsGroup }} + fsGroup: {{ .Values.makePolicyJob.securityContext.fsGroup }} +{{- end }} + volumes: + - name: minio-configuration + projected: + sources: + - configMap: + name: {{ template "minio.fullname" . }} + - secret: + name: {{ template "minio.secretName" . }} + {{- if .Values.tls.enabled }} + - name: cert-secret-volume-mc + secret: + secretName: {{ .Values.tls.certSecret }} + items: + - key: {{ .Values.tls.publicCrt }} + path: CAs/public.crt + {{ end }} + containers: + - name: minio-mc + image: "{{ .Values.mcImage.repository }}:{{ .Values.mcImage.tag }}" + imagePullPolicy: {{ .Values.mcImage.pullPolicy }} + command: ["/bin/sh", "/config/add-policy"] + env: + - name: MINIO_ENDPOINT + value: {{ template "minio.fullname" . }} + - name: MINIO_PORT + value: {{ .Values.service.port | quote }} + volumeMounts: + - name: minio-configuration + mountPath: /config + {{- if .Values.tls.enabled }} + - name: cert-secret-volume-mc + mountPath: {{ .Values.configPathmc }}certs + {{ end }} + resources: +{{ toYaml .Values.makePolicyJob.resources | indent 10 }} +{{- end }} diff --git a/ansible/01_old/roles/test/files/01-storage/minio/templates/post-install-create-user-job.yaml b/ansible/01_old/roles/test/files/01-storage/minio/templates/post-install-create-user-job.yaml new file mode 100644 index 0000000..d3750e8 --- /dev/null +++ b/ansible/01_old/roles/test/files/01-storage/minio/templates/post-install-create-user-job.yaml @@ -0,0 +1,97 @@ +{{- $global := . -}} +{{- if .Values.users }} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ template "minio.fullname" . }}-make-user-job + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }}-make-user-job + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + annotations: + "helm.sh/hook": post-install,post-upgrade + "helm.sh/hook-delete-policy": hook-succeeded,before-hook-creation +{{- with .Values.makeUserJob.annotations }} +{{ toYaml . | indent 4 }} +{{- end }} +spec: + template: + metadata: + labels: + app: {{ template "minio.name" . }}-job + release: {{ .Release.Name }} +{{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 8 }} +{{- end }} +{{- if .Values.makeUserJob.podAnnotations }} + annotations: +{{ toYaml .Values.makeUserJob.podAnnotations | indent 8 }} +{{- end }} + spec: + restartPolicy: OnFailure +{{- include "minio.imagePullSecrets" . | indent 6 }} +{{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.makeUserJob.nodeSelector | indent 8 }} +{{- end }} +{{- with .Values.makeUserJob.affinity }} + affinity: +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.makeUserJob.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} +{{- end }} +{{- if .Values.makeUserJob.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.makeUserJob.securityContext.runAsUser }} + runAsGroup: {{ .Values.makeUserJob.securityContext.runAsGroup }} + fsGroup: {{ .Values.makeUserJob.securityContext.fsGroup }} +{{- end }} + volumes: + - name: minio-configuration + projected: + sources: + - configMap: + name: {{ template "minio.fullname" . }} + - secret: + name: {{ template "minio.secretName" . }} + {{- range .Values.users }} + {{- if .existingSecret }} + - secret: + name: {{ tpl .existingSecret $global }} + items: + - key: {{ .existingSecretKey }} + path: secrets/{{ tpl .accessKey $global }} + {{- end }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: cert-secret-volume-mc + secret: + secretName: {{ .Values.tls.certSecret }} + items: + - key: {{ .Values.tls.publicCrt }} + path: CAs/public.crt + {{ end }} + containers: + - name: minio-mc + image: "{{ .Values.mcImage.repository }}:{{ .Values.mcImage.tag }}" + imagePullPolicy: {{ .Values.mcImage.pullPolicy }} + command: ["/bin/sh", "/config/add-user"] + env: + - name: MINIO_ENDPOINT + value: {{ template "minio.fullname" . }} + - name: MINIO_PORT + value: {{ .Values.service.port | quote }} + volumeMounts: + - name: minio-configuration + mountPath: /config + {{- if .Values.tls.enabled }} + - name: cert-secret-volume-mc + mountPath: {{ .Values.configPathmc }}certs + {{ end }} + resources: +{{ toYaml .Values.makeUserJob.resources | indent 10 }} +{{- end }} diff --git a/ansible/01_old/roles/test/files/01-storage/minio/templates/post-install-custom-command.yaml b/ansible/01_old/roles/test/files/01-storage/minio/templates/post-install-custom-command.yaml new file mode 100644 index 0000000..7e83faf --- /dev/null +++ b/ansible/01_old/roles/test/files/01-storage/minio/templates/post-install-custom-command.yaml @@ -0,0 +1,87 @@ +{{- if .Values.customCommands }} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ template "minio.fullname" . }}-custom-command-job + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }}-custom-command-job + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + annotations: + "helm.sh/hook": post-install,post-upgrade + "helm.sh/hook-delete-policy": hook-succeeded,before-hook-creation +{{- with .Values.customCommandJob.annotations }} +{{ toYaml . | indent 4 }} +{{- end }} +spec: + template: + metadata: + labels: + app: {{ template "minio.name" . }}-job + release: {{ .Release.Name }} +{{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 8 }} +{{- end }} +{{- if .Values.customCommandJob.podAnnotations }} + annotations: +{{ toYaml .Values.customCommandJob.podAnnotations | indent 8 }} +{{- end }} + spec: + restartPolicy: OnFailure +{{- include "minio.imagePullSecrets" . | indent 6 }} +{{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.customCommandJob.nodeSelector | indent 8 }} +{{- end }} +{{- with .Values.customCommandJob.affinity }} + affinity: +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.customCommandJob.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} +{{- end }} +{{- if .Values.customCommandJob.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.customCommandJob.securityContext.runAsUser }} + runAsGroup: {{ .Values.customCommandJob.securityContext.runAsGroup }} + fsGroup: {{ .Values.customCommandJob.securityContext.fsGroup }} +{{- end }} + volumes: + - name: minio-configuration + projected: + sources: + - configMap: + name: {{ template "minio.fullname" . }} + - secret: + name: {{ template "minio.secretName" . }} + {{- if .Values.tls.enabled }} + - name: cert-secret-volume-mc + secret: + secretName: {{ .Values.tls.certSecret }} + items: + - key: {{ .Values.tls.publicCrt }} + path: CAs/public.crt + {{ end }} + containers: + - name: minio-mc + image: "{{ .Values.mcImage.repository }}:{{ .Values.mcImage.tag }}" + imagePullPolicy: {{ .Values.mcImage.pullPolicy }} + command: ["/bin/sh", "/config/custom-command"] + env: + - name: MINIO_ENDPOINT + value: {{ template "minio.fullname" . }} + - name: MINIO_PORT + value: {{ .Values.service.port | quote }} + volumeMounts: + - name: minio-configuration + mountPath: /config + {{- if .Values.tls.enabled }} + - name: cert-secret-volume-mc + mountPath: {{ .Values.configPathmc }}certs + {{ end }} + resources: +{{ toYaml .Values.customCommandJob.resources | indent 10 }} +{{- end }} diff --git a/ansible/01_old/roles/test/files/01-storage/minio/templates/pvc.yaml b/ansible/01_old/roles/test/files/01-storage/minio/templates/pvc.yaml new file mode 100644 index 0000000..369aade --- /dev/null +++ b/ansible/01_old/roles/test/files/01-storage/minio/templates/pvc.yaml @@ -0,0 +1,35 @@ +{{- if eq .Values.mode "standalone" }} +{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) }} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: {{ template "minio.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- if .Values.persistence.annotations }} + annotations: +{{ toYaml .Values.persistence.annotations | trimSuffix "\n" | indent 4 }} +{{- end }} +spec: + accessModes: + - {{ .Values.persistence.accessMode | quote }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + +{{- if .Values.persistence.storageClass }} +{{- if (eq "-" .Values.persistence.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.persistence.storageClass }}" +{{- end }} +{{- end }} +{{- if .Values.persistence.VolumeName }} + volumeName: "{{ .Values.persistence.VolumeName }}" +{{- end }} +{{- end }} +{{- end }} diff --git a/ansible/01_old/roles/test/files/01-storage/minio/templates/secrets.yaml b/ansible/01_old/roles/test/files/01-storage/minio/templates/secrets.yaml new file mode 100644 index 0000000..da2ecab --- /dev/null +++ b/ansible/01_old/roles/test/files/01-storage/minio/templates/secrets.yaml @@ -0,0 +1,22 @@ +{{- if not .Values.existingSecret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "minio.secretName" . }} + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +type: Opaque +data: + rootUser: {{ include "minio.root.username" . | b64enc | quote }} + rootPassword: {{ include "minio.root.password" . | b64enc | quote }} + {{- if .Values.etcd.clientCert }} + etcd_client.crt: {{ .Values.etcd.clientCert | toString | b64enc | quote }} + {{- end }} + {{- if .Values.etcd.clientCertKey }} + etcd_client.key: {{ .Values.etcd.clientCertKey | toString | b64enc | quote }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/test/files/01-storage/minio/templates/securitycontextconstraints.yaml b/ansible/01_old/roles/test/files/01-storage/minio/templates/securitycontextconstraints.yaml new file mode 100644 index 0000000..4bac7e3 --- /dev/null +++ b/ansible/01_old/roles/test/files/01-storage/minio/templates/securitycontextconstraints.yaml @@ -0,0 +1,45 @@ +{{- if and .Values.securityContext.enabled .Values.persistence.enabled (.Capabilities.APIVersions.Has "security.openshift.io/v1") }} +apiVersion: security.openshift.io/v1 +kind: SecurityContextConstraints +metadata: + name: {{ template "minio.fullname" . }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +allowHostDirVolumePlugin: false +allowHostIPC: false +allowHostNetwork: false +allowHostPID: false +allowHostPorts: false +allowPrivilegeEscalation: true +allowPrivilegedContainer: false +allowedCapabilities: [] +readOnlyRootFilesystem: false +defaultAddCapabilities: [] +requiredDropCapabilities: +- KILL +- MKNOD +- SETUID +- SETGID +fsGroup: + type: MustRunAs + ranges: + - max: {{ .Values.securityContext.fsGroup }} + min: {{ .Values.securityContext.fsGroup }} +runAsUser: + type: MustRunAs + uid: {{ .Values.securityContext.runAsUser }} +seLinuxContext: + type: MustRunAs +supplementalGroups: + type: RunAsAny +volumes: +- configMap +- downwardAPI +- emptyDir +- persistentVolumeClaim +- projected +- secret +{{- end }} diff --git a/ansible/01_old/roles/test/files/01-storage/minio/templates/service.yaml b/ansible/01_old/roles/test/files/01-storage/minio/templates/service.yaml new file mode 100644 index 0000000..64aa990 --- /dev/null +++ b/ansible/01_old/roles/test/files/01-storage/minio/templates/service.yaml @@ -0,0 +1,49 @@ +{{ $scheme := "http" }} +{{- if .Values.tls.enabled }} +{{ $scheme = "https" }} +{{ end }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "minio.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + monitoring: "true" +{{- if .Values.service.annotations }} + annotations: +{{ toYaml .Values.service.annotations | indent 4 }} +{{- end }} +spec: +{{- if (or (eq .Values.service.type "ClusterIP" "") (empty .Values.service.type)) }} + type: ClusterIP + {{- if not (empty .Values.service.clusterIP) }} + clusterIP: {{ .Values.service.clusterIP }} + {{end}} +{{- else if eq .Values.service.type "LoadBalancer" }} + type: {{ .Values.service.type }} + loadBalancerIP: {{ default "" .Values.service.loadBalancerIP }} +{{- else }} + type: {{ .Values.service.type }} +{{- end }} + ports: + - name: {{ $scheme }} + port: {{ .Values.service.port }} + protocol: TCP +{{- if (and (eq .Values.service.type "NodePort") ( .Values.service.nodePort)) }} + nodePort: {{ .Values.service.nodePort }} +{{- else }} + targetPort: 9000 +{{- end}} +{{- if .Values.service.externalIPs }} + externalIPs: +{{- range $i , $ip := .Values.service.externalIPs }} + - {{ $ip }} +{{- end }} +{{- end }} + selector: + app: {{ template "minio.name" . }} + release: {{ .Release.Name }} diff --git a/ansible/01_old/roles/test/files/01-storage/minio/templates/serviceaccount.yaml b/ansible/01_old/roles/test/files/01-storage/minio/templates/serviceaccount.yaml new file mode 100644 index 0000000..6a4bd94 --- /dev/null +++ b/ansible/01_old/roles/test/files/01-storage/minio/templates/serviceaccount.yaml @@ -0,0 +1,7 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Values.serviceAccount.name | quote }} + namespace: {{ .Release.Namespace | quote }} +{{- end -}} diff --git a/ansible/01_old/roles/test/files/01-storage/minio/templates/servicemonitor.yaml b/ansible/01_old/roles/test/files/01-storage/minio/templates/servicemonitor.yaml new file mode 100644 index 0000000..809848f --- /dev/null +++ b/ansible/01_old/roles/test/files/01-storage/minio/templates/servicemonitor.yaml @@ -0,0 +1,51 @@ +{{- if .Values.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "minio.fullname" . }} + {{- if .Values.metrics.serviceMonitor.namespace }} + namespace: {{ .Values.metrics.serviceMonitor.namespace }} + {{ else }} + namespace: {{ .Release.Namespace | quote }} + {{- end }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- if .Values.metrics.serviceMonitor.additionalLabels }} +{{ toYaml .Values.metrics.serviceMonitor.additionalLabels | indent 4 }} + {{- end }} +spec: + endpoints: + {{- if .Values.tls.enabled }} + - port: https + scheme: https + {{ else }} + - port: http + scheme: http + {{- end }} + path: /minio/v2/metrics/cluster + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.relabelConfigs }} +{{ toYaml .Values.metrics.serviceMonitor.relabelConfigs | indent 6 }} + {{- end }} + {{- if not .Values.metrics.serviceMonitor.public }} + bearerTokenSecret: + name: {{ template "minio.fullname" . }}-prometheus + key: token + {{- end }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace | quote }} + selector: + matchLabels: + app: {{ include "minio.name" . }} + release: {{ .Release.Name }} + monitoring: "true" +{{- end }} diff --git a/ansible/01_old/roles/test/files/01-storage/minio/templates/statefulset.yaml b/ansible/01_old/roles/test/files/01-storage/minio/templates/statefulset.yaml new file mode 100644 index 0000000..b4160f0 --- /dev/null +++ b/ansible/01_old/roles/test/files/01-storage/minio/templates/statefulset.yaml @@ -0,0 +1,217 @@ +{{- if eq .Values.mode "distributed" }} +{{ $poolCount := .Values.pools | int }} +{{ $nodeCount := .Values.replicas | int }} +{{ $drivesPerNode := .Values.drivesPerNode | int }} +{{ $scheme := "http" }} +{{- if .Values.tls.enabled }} +{{ $scheme = "https" }} +{{ end }} +{{ $mountPath := .Values.mountPath }} +{{ $bucketRoot := or ($.Values.bucketRoot) ($.Values.mountPath) }} +{{ $subPath := .Values.persistence.subPath }} +{{ $penabled := .Values.persistence.enabled }} +{{ $accessMode := .Values.persistence.accessMode }} +{{ $storageClass := .Values.persistence.storageClass }} +{{ $psize := .Values.persistence.size }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "minio.fullname" . }}-svc + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +spec: + publishNotReadyAddresses: true + clusterIP: None + ports: + - name: {{ $scheme }} + port: {{ .Values.service.port }} + protocol: TCP + selector: + app: {{ template "minio.name" . }} + release: {{ .Release.Name }} +--- +apiVersion: {{ template "minio.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ template "minio.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- if .Values.additionalLabels }} +{{ toYaml .Values.additionalLabels | trimSuffix "\n" | indent 4 }} +{{- end }} +{{- if .Values.additionalAnnotations }} + annotations: +{{ toYaml .Values.additionalAnnotations | trimSuffix "\n" | indent 4 }} +{{- end }} +spec: + updateStrategy: + type: {{ .Values.StatefulSetUpdate.updateStrategy }} + podManagementPolicy: "Parallel" + serviceName: {{ template "minio.fullname" . }}-svc + replicas: {{ mul $poolCount $nodeCount }} + selector: + matchLabels: + app: {{ template "minio.name" . }} + release: {{ .Release.Name }} + template: + metadata: + name: {{ template "minio.fullname" . }} + labels: + app: {{ template "minio.name" . }} + release: {{ .Release.Name }} +{{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 8 }} +{{- end }} + annotations: +{{- if not .Values.ignoreChartChecksums }} + checksum/secrets: {{ include (print $.Template.BasePath "/secrets.yaml") . | sha256sum }} + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} +{{- end }} +{{- if .Values.podAnnotations }} +{{ toYaml .Values.podAnnotations | trimSuffix "\n" | indent 8 }} +{{- end }} + spec: + {{- if .Values.priorityClassName }} + priorityClassName: "{{ .Values.priorityClassName }}" + {{- end }} +{{- if and .Values.securityContext.enabled .Values.persistence.enabled }} + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + runAsGroup: {{ .Values.securityContext.runAsGroup }} + fsGroup: {{ .Values.securityContext.fsGroup }} + {{- if and (ge .Capabilities.KubeVersion.Major "1") (ge .Capabilities.KubeVersion.Minor "20") }} + fsGroupChangePolicy: {{ .Values.securityContext.fsGroupChangePolicy }} + {{- end }} +{{- end }} +{{ if .Values.serviceAccount.create }} + serviceAccountName: {{ .Values.serviceAccount.name }} +{{- end }} + containers: + - name: {{ .Chart.Name }} + image: {{ .Values.image.repository }}:{{ .Values.image.tag }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + + command: [ "/bin/sh", + "-ce", + "/usr/bin/docker-entrypoint.sh minio server {{- range $i := until $poolCount }}{{ $factor := mul $i $nodeCount }}{{ $endIndex := add $factor $nodeCount }}{{ $beginIndex := mul $i $nodeCount }} {{ $scheme }}://{{ template `minio.fullname` $ }}-{{ `{` }}{{ $beginIndex }}...{{ sub $endIndex 1 }}{{ `}`}}.{{ template `minio.fullname` $ }}-svc.{{ $.Release.Namespace }}.svc.{{ $.Values.clusterDomain }}{{if (gt $drivesPerNode 1)}}{{ $bucketRoot }}-{{ `{` }}0...{{ sub $drivesPerNode 1 }}{{ `}` }}{{else}}{{ $bucketRoot }}{{end}}{{- end}} -S {{ .Values.certsPath }} --address :{{ .Values.minioAPIPort }} --console-address :{{ .Values.minioConsolePort }} {{- template `minio.extraArgs` . }}" ] + volumeMounts: + {{- if $penabled }} + {{- if (gt $drivesPerNode 1) }} + {{- range $i := until $drivesPerNode }} + - name: export-{{ $i }} + mountPath: {{ $mountPath }}-{{ $i }} + {{- if and $penabled $subPath }} + subPath: {{ $subPath }} + {{- end }} + {{- end }} + {{- else }} + - name: export + mountPath: {{ $mountPath }} + {{- if and $penabled $subPath }} + subPath: {{ $subPath }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.extraSecret }} + - name: extra-secret + mountPath: "/tmp/minio-config-env" + {{- end }} + {{- include "minio.tlsKeysVolumeMount" . | indent 12 }} + ports: + - name: {{ $scheme }} + containerPort: {{ .Values.minioAPIPort }} + - name: {{ $scheme }}-console + containerPort: {{ .Values.minioConsolePort }} + env: + - name: MINIO_ROOT_USER + valueFrom: + secretKeyRef: + name: {{ template "minio.secretName" . }} + key: rootUser + - name: MINIO_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "minio.secretName" . }} + key: rootPassword + {{- if .Values.extraSecret }} + - name: MINIO_CONFIG_ENV_FILE + value: "/tmp/minio-config-env/config.env" + {{- end}} + {{- if .Values.metrics.serviceMonitor.public }} + - name: MINIO_PROMETHEUS_AUTH_TYPE + value: "public" + {{- end}} + {{- range $key, $val := .Values.environment }} + - name: {{ $key }} + value: {{ $val | quote }} + {{- end}} + resources: +{{ toYaml .Values.resources | indent 12 }} + {{- with .Values.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 8 }} + {{- end }} +{{- include "minio.imagePullSecrets" . | indent 6 }} + {{- with .Values.affinity }} + affinity: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} + {{- end }} + volumes: + - name: minio-user + secret: + secretName: {{ template "minio.secretName" . }} + {{- if .Values.extraSecret }} + - name: extra-secret + secret: + secretName: {{ .Values.extraSecret }} + {{- end }} + {{- include "minio.tlsKeysVolume" . | indent 8 }} +{{- if .Values.persistence.enabled }} + volumeClaimTemplates: + {{- if gt $drivesPerNode 1 }} + {{- range $diskId := until $drivesPerNode}} + - metadata: + name: export-{{ $diskId }} + {{- if $.Values.persistence.annotations }} + annotations: +{{ toYaml $.Values.persistence.annotations | trimSuffix "\n" | indent 10 }} + {{- end }} + spec: + accessModes: [ {{ $accessMode | quote }} ] + {{- if $storageClass }} + storageClassName: {{ $storageClass }} + {{- end }} + resources: + requests: + storage: {{ $psize }} + {{- end }} + {{- else }} + - metadata: + name: export + {{- if $.Values.persistence.annotations }} + annotations: +{{ toYaml $.Values.persistence.annotations | trimSuffix "\n" | indent 10 }} + {{- end }} + spec: + accessModes: [ {{ $accessMode | quote }} ] + {{- if $storageClass }} + storageClassName: {{ $storageClass }} + {{- end }} + resources: + requests: + storage: {{ $psize }} + {{- end }} +{{- end }} +{{- end }} diff --git a/ansible/01_old/roles/test/files/01-storage/minio/values.yaml b/ansible/01_old/roles/test/files/01-storage/minio/values.yaml new file mode 100644 index 0000000..a957f7f --- /dev/null +++ b/ansible/01_old/roles/test/files/01-storage/minio/values.yaml @@ -0,0 +1,461 @@ +## Provide a name in place of minio for `app:` labels +## +nameOverride: "" + +## Provide a name to substitute for the full names of resources +## +fullnameOverride: "" + +## set kubernetes cluster domain where minio is running +## +clusterDomain: cluster.local + +## Set default image, imageTag, and imagePullPolicy. mode is used to indicate the +## +image: + repository: 10.10.31.243:5000/cmoa3/minio + tag: RELEASE.2022-05-08T23-50-31Z + pullPolicy: IfNotPresent + +imagePullSecrets: + - name: "regcred" +# - name: "image-pull-secret" + +## Set default image, imageTag, and imagePullPolicy for the `mc` (the minio +## client used to create a default bucket). +## +mcImage: + repository: 10.10.31.243:5000/cmoa3/mc + tag: RELEASE.2022-05-09T04-08-26Z + pullPolicy: IfNotPresent + +## minio mode, i.e. standalone or distributed or gateway. +mode: distributed ## other supported values are "standalone", "gateway" + +## Additional labels to include with deployment or statefulset +additionalLabels: [] + +## Additional annotations to include with deployment or statefulset +additionalAnnotations: [] + +## Typically the deployment/statefulset includes checksums of secrets/config, +## So that when these change on a subsequent helm install, the deployment/statefulset +## is restarted. This can result in unnecessary restarts under GitOps tooling such as +## flux, so set to "true" to disable this behaviour. +ignoreChartChecksums: false + +## Additional arguments to pass to minio binary +extraArgs: [] + +## Port number for MinIO S3 API Access +minioAPIPort: "9000" + +## Port number for MinIO Browser COnsole Access +minioConsolePort: "9001" + +## Update strategy for Deployments +DeploymentUpdate: + type: RollingUpdate + maxUnavailable: 0 + maxSurge: 100% + +## Update strategy for StatefulSets +StatefulSetUpdate: + updateStrategy: RollingUpdate + +## Pod priority settings +## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ +## +priorityClassName: "" + +## Set default rootUser, rootPassword +## AccessKey and secretKey is generated when not set +## Distributed MinIO ref: https://docs.minio.io/docs/distributed-minio-quickstart-guide +## +rootUser: "admin" +rootPassword: "passW0rd" + +## Use existing Secret that store following variables: +## +## | Chart var | .data. in Secret | +## |:----------------------|:-------------------------| +## | rootUser | rootUser | +## | rootPassword | rootPassword | +## +## All mentioned variables will be ignored in values file. +## .data.rootUser and .data.rootPassword are mandatory, +## others depend on enabled status of corresponding sections. +existingSecret: "" + +## Directory on the MinIO pof +certsPath: "/etc/minio/certs/" +configPathmc: "/etc/minio/mc/" + +## Path where PV would be mounted on the MinIO Pod +mountPath: "/export" +## Override the root directory which the minio server should serve from. +## If left empty, it defaults to the value of {{ .Values.mountPath }} +## If defined, it must be a sub-directory of the path specified in {{ .Values.mountPath }} +## +bucketRoot: "" + +# Number of drives attached to a node +drivesPerNode: 2 +# Number of MinIO containers running +#replicas: 16 +replicas: 2 +# Number of expanded MinIO clusters +pools: 1 + +# Deploy if 'mode == gateway' - 4 replicas. +gateway: + type: "nas" # currently only "nas" are supported. + replicas: 4 + +## TLS Settings for MinIO +tls: + enabled: false + ## Create a secret with private.key and public.crt files and pass that here. Ref: https://github.com/minio/minio/tree/master/docs/tls/kubernetes#2-create-kubernetes-secret + certSecret: "" + publicCrt: public.crt + privateKey: private.key + +## Trusted Certificates Settings for MinIO. Ref: https://docs.minio.io/docs/how-to-secure-access-to-minio-server-with-tls#install-certificates-from-third-party-cas +## Bundle multiple trusted certificates into one secret and pass that here. Ref: https://github.com/minio/minio/tree/master/docs/tls/kubernetes#2-create-kubernetes-secret +## When using self-signed certificates, remember to include MinIO's own certificate in the bundle with key public.crt. +## If certSecret is left empty and tls is enabled, this chart installs the public certificate from .Values.tls.certSecret. +trustedCertsSecret: "" + +## Enable persistence using Persistent Volume Claims +## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ +## +persistence: + enabled: true + annotations: {} + + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + existingClaim: "" + + ## minio data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + ## Storage class of PV to bind. By default it looks for standard storage class. + ## If the PV uses a different storage class, specify that here. + storageClass: "exem-local-storage" + VolumeName: "" + accessMode: ReadWriteOnce + size: 50Gi + + ## If subPath is set mount a sub folder of a volume instead of the root of the volume. + ## This is especially handy for volume plugins that don't natively support sub mounting (like glusterfs). + ## + subPath: "" + +## Expose the MinIO service to be accessed from outside the cluster (LoadBalancer service). +## or access it from within the cluster (ClusterIP service). Set the service type and the port to serve it. +## ref: http://kubernetes.io/docs/user-guide/services/ +## +#service: +# type: NodePort +# clusterIP: ~ + ## Make sure to match it to minioAPIPort +# port: "9000" +# nodePort: "32002" + +service: + type: ClusterIP + clusterIP: ~ + ## Make sure to match it to minioAPIPort + port: "9000" + +## Configure Ingress based on the documentation here: https://kubernetes.io/docs/concepts/services-networking/ingress/ +## + +ingress: + enabled: false + # ingressClassName: "" + labels: {} + # node-role.kubernetes.io/ingress: platform + + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + # kubernetes.io/ingress.allow-http: "false" + # kubernetes.io/ingress.global-static-ip-name: "" + # nginx.ingress.kubernetes.io/secure-backends: "true" + # nginx.ingress.kubernetes.io/backend-protocol: "HTTPS" + # nginx.ingress.kubernetes.io/whitelist-source-range: 0.0.0.0/0 + path: / + hosts: + - minio-example.local + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +consoleService: + type: NodePort + clusterIP: ~ + ## Make sure to match it to minioConsolePort + port: "9001" + nodePort: "32001" + +consoleIngress: + enabled: false + # ingressClassName: "" + labels: {} + # node-role.kubernetes.io/ingress: platform + + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + # kubernetes.io/ingress.allow-http: "false" + # kubernetes.io/ingress.global-static-ip-name: "" + # nginx.ingress.kubernetes.io/secure-backends: "true" + # nginx.ingress.kubernetes.io/backend-protocol: "HTTPS" + # nginx.ingress.kubernetes.io/whitelist-source-range: 0.0.0.0/0 + path: / + hosts: + - console.minio-example.local + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +## Node labels for pod assignment +## Ref: https://kubernetes.io/docs/user-guide/node-selection/ +## +nodeSelector: {} +tolerations: [] +affinity: {} + +## Add stateful containers to have security context, if enabled MinIO will run as this +## user and group NOTE: securityContext is only enabled if persistence.enabled=true +securityContext: + enabled: true + runAsUser: 1000 + runAsGroup: 1000 + fsGroup: 1000 + fsGroupChangePolicy: "OnRootMismatch" + +# Additational pod annotations +podAnnotations: {} + +# Additional pod labels +podLabels: {} + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: + requests: + #memory: 16Gi + memory: 1Gi + cpu: 200m + +## List of policies to be created after minio install +## +## In addition to default policies [readonly|readwrite|writeonly|consoleAdmin|diagnostics] +## you can define additional policies with custom supported actions and resources +policies: [] +## writeexamplepolicy policy grants creation or deletion of buckets with name +## starting with example. In addition, grants objects write permissions on buckets starting with +## example. +# - name: writeexamplepolicy +# statements: +# - resources: +# - 'arn:aws:s3:::example*/*' +# actions: +# - "s3:AbortMultipartUpload" +# - "s3:GetObject" +# - "s3:DeleteObject" +# - "s3:PutObject" +# - "s3:ListMultipartUploadParts" +# - resources: +# - 'arn:aws:s3:::example*' +# actions: +# - "s3:CreateBucket" +# - "s3:DeleteBucket" +# - "s3:GetBucketLocation" +# - "s3:ListBucket" +# - "s3:ListBucketMultipartUploads" +## readonlyexamplepolicy policy grants access to buckets with name starting with example. +## In addition, grants objects read permissions on buckets starting with example. +# - name: readonlyexamplepolicy +# statements: +# - resources: +# - 'arn:aws:s3:::example*/*' +# actions: +# - "s3:GetObject" +# - resources: +# - 'arn:aws:s3:::example*' +# actions: +# - "s3:GetBucketLocation" +# - "s3:ListBucket" +# - "s3:ListBucketMultipartUploads" +## Additional Annotations for the Kubernetes Job makePolicyJob +makePolicyJob: + podAnnotations: + annotations: + securityContext: + enabled: false + runAsUser: 1000 + runAsGroup: 1000 + fsGroup: 1000 + resources: + requests: + memory: 128Mi + nodeSelector: {} + tolerations: [] + affinity: {} + +## List of users to be created after minio install +## +users: + ## Username, password and policy to be assigned to the user + ## Default policies are [readonly|readwrite|writeonly|consoleAdmin|diagnostics] + ## Add new policies as explained here https://docs.min.io/docs/minio-multi-user-quickstart-guide.html + ## NOTE: this will fail if LDAP is enabled in your MinIO deployment + ## make sure to disable this if you are using LDAP. + - accessKey: cloudmoa + secretKey: admin1234 + policy: consoleAdmin + # Or you can refer to specific secret + #- accessKey: externalSecret + # existingSecret: my-secret + # existingSecretKey: password + # policy: readonly + + +## Additional Annotations for the Kubernetes Job makeUserJob +makeUserJob: + podAnnotations: + annotations: + securityContext: + enabled: false + runAsUser: 1000 + runAsGroup: 1000 + fsGroup: 1000 + resources: + requests: + memory: 128Mi + nodeSelector: {} + tolerations: [] + affinity: {} + +## List of buckets to be created after minio install +## +buckets: + - name: cortex-bucket + policy: none + purge: false + versioning: false + + # # Name of the bucket + # - name: bucket1 + # # Policy to be set on the + # # bucket [none|download|upload|public] + # policy: none + # # Purge if bucket exists already + # purge: false + # # set versioning for + # # bucket [true|false] + # versioning: false + # - name: bucket2 + # policy: none + # purge: false + # versioning: true + +## Additional Annotations for the Kubernetes Job makeBucketJob +makeBucketJob: + podAnnotations: + annotations: + securityContext: + enabled: false + runAsUser: 1000 + runAsGroup: 1000 + fsGroup: 1000 + resources: + requests: + memory: 128Mi + nodeSelector: {} + tolerations: [] + affinity: {} + +## List of command to run after minio install +## NOTE: the mc command TARGET is always "myminio" +customCommands: + # - command: "admin policy set myminio consoleAdmin group='cn=ops,cn=groups,dc=example,dc=com'" + +## Additional Annotations for the Kubernetes Job customCommandJob +customCommandJob: + podAnnotations: + annotations: + securityContext: + enabled: false + runAsUser: 1000 + runAsGroup: 1000 + fsGroup: 1000 + resources: + requests: + memory: 128Mi + nodeSelector: {} + tolerations: [] + affinity: {} + +## Use this field to add environment variables relevant to MinIO server. These fields will be passed on to MinIO container(s) +## when Chart is deployed +environment: + ## Please refer for comprehensive list https://docs.min.io/minio/baremetal/reference/minio-server/minio-server.html + ## MINIO_SUBNET_LICENSE: "License key obtained from https://subnet.min.io" + ## MINIO_BROWSER: "off" + +## The name of a secret in the same kubernetes namespace which contain secret values +## This can be useful for LDAP password, etc +## The key in the secret must be 'config.env' +## +# extraSecret: minio-extraenv + +networkPolicy: + enabled: false + allowExternal: true + +## PodDisruptionBudget settings +## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ +## +podDisruptionBudget: + enabled: false + maxUnavailable: 1 + +## Specify the service account to use for the MinIO pods. If 'create' is set to 'false' +## and 'name' is left unspecified, the account 'default' will be used. +serviceAccount: + create: true + ## The name of the service account to use. If 'create' is 'true', a service account with that name + ## will be created. + name: "minio-sa" + +metrics: + serviceMonitor: + enabled: false + public: true + additionalLabels: {} + relabelConfigs: {} + # namespace: monitoring + # interval: 30s + # scrapeTimeout: 10s + +## ETCD settings: https://github.com/minio/minio/blob/master/docs/sts/etcd.md +## Define endpoints to enable this section. +etcd: + endpoints: [] + pathPrefix: "" + corednsPathPrefix: "" + clientCert: "" + clientCertKey: "" diff --git a/ansible/01_old/roles/test/files/02-base/00-kafka-broker-config.yaml b/ansible/01_old/roles/test/files/02-base/00-kafka-broker-config.yaml new file mode 100644 index 0000000..ddf76e1 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/00-kafka-broker-config.yaml @@ -0,0 +1,161 @@ +kind: ConfigMap +metadata: + name: broker-config + namespace: imxc +apiVersion: v1 +data: + init.sh: |- + #!/bin/bash + set -e + set -x + cp /etc/kafka-configmap/log4j.properties /etc/kafka/ + KAFKA_BROKER_ID=${HOSTNAME##*-} + SEDS=("s/#init#broker.id=#init#/broker.id=$KAFKA_BROKER_ID/") + LABELS="kafka-broker-id=$KAFKA_BROKER_ID" + ANNOTATIONS="" + hash kubectl 2>/dev/null || { + SEDS+=("s/#init#broker.rack=#init#/#init#broker.rack=# kubectl not found in path/") + } && { + ZONE=$(kubectl get node "$NODE_NAME" -o=go-template='{{index .metadata.labels "failure-domain.beta.kubernetes.io/zone"}}') + if [ $? -ne 0 ]; then + SEDS+=("s/#init#broker.rack=#init#/#init#broker.rack=# zone lookup failed, see -c init-config logs/") + elif [ "x$ZONE" == "x" ]; then + SEDS+=("s/#init#broker.rack=#init#/#init#broker.rack=# zone label not found for node $NODE_NAME/") + else + SEDS+=("s/#init#broker.rack=#init#/broker.rack=$ZONE/") + LABELS="$LABELS kafka-broker-rack=$ZONE" + fi + # Node Port 설정 주석처리 + # OUTSIDE_HOST=$(kubectl get node "$NODE_NAME" -o jsonpath='{.status.addresses[?(@.type=="InternalIP")].address}') + OUTSIDE_HOST=kafka-outside-${KAFKA_BROKER_ID} + GLOBAL_HOST=kafka-global-${KAFKA_BROKER_ID} + if [ $? -ne 0 ]; then + echo "Outside (i.e. cluster-external access) host lookup command failed" + else + OUTSIDE_PORT=3240${KAFKA_BROKER_ID} + GLOBAL_PORT=3250${KAFKA_BROKER_ID} + # datagate 도입했으므로 Kube DNS 기반 통신 + SEDS+=("s|#init#advertised.listeners=OUTSIDE://#init#|advertised.listeners=OUTSIDE://${OUTSIDE_HOST}:${OUTSIDE_PORT},GLOBAL://${GLOBAL_HOST}:${GLOBAL_PORT}|") + ANNOTATIONS="$ANNOTATIONS kafka-listener-outside-host=$OUTSIDE_HOST kafka-listener-outside-port=$OUTSIDE_PORT" + fi + if [ ! -z "$LABELS" ]; then + kubectl -n $POD_NAMESPACE label pod $POD_NAME $LABELS || echo "Failed to label $POD_NAMESPACE.$POD_NAME - RBAC issue?" + fi + if [ ! -z "$ANNOTATIONS" ]; then + kubectl -n $POD_NAMESPACE annotate pod $POD_NAME $ANNOTATIONS || echo "Failed to annotate $POD_NAMESPACE.$POD_NAME - RBAC issue?" + fi + } + printf '%s\n' "${SEDS[@]}" | sed -f - /etc/kafka-configmap/server.properties > /etc/kafka/server.properties.tmp + [ $? -eq 0 ] && mv /etc/kafka/server.properties.tmp /etc/kafka/server.properties + server.properties: |- + log.dirs=/var/lib/kafka/data/topics + ############################# Zookeeper ############################# + zookeeper.connect=zookeeper:2181 + #zookeeper.connection.timeout.ms=6000 + ############################# Group Coordinator Settings ############################# + #group.initial.rebalance.delay.ms=0 + ############################# Thread ############################# + #background.threads=10 + #num.recovery.threads.per.data.dir=1 + ############################# Topic ############################# + auto.create.topics.enable=true + delete.topic.enable=true + default.replication.factor=2 + ############################# Msg Replication ############################# + min.insync.replicas=1 + num.io.threads=10 + num.network.threads=4 + num.replica.fetchers=4 + replica.fetch.min.bytes=1 + socket.receive.buffer.bytes=1048576 + socket.send.buffer.bytes=1048576 + replica.socket.receive.buffer.bytes=1048576 + socket.request.max.bytes=204857600 + ############################# Partition ############################# + #auto.leader.rebalance.enable=true + num.partitions=12 + ############################# Log size ############################# + message.max.bytes=204857600 + max.message.bytes=204857600 + ############################# Log Flush Policy ############################# + #log.flush.interval.messages=10000 + #log.flush.interval.ms=1000 + ############################# Log Retention Policy ############################# + log.retention.minutes=1 + offsets.retention.minutes=1440 + #log.retention.bytes=1073741824 + #log.segment.bytes=1073741824 + log.retention.check.interval.ms=10000 + ############################# Internal Topic Settings ############################# + offsets.topic.replication.factor=1 + #transaction.state.log.replication.factor=1 + #transaction.state.log.min.isr=1 + ############################# ETC ############################# + listeners=OUTSIDE://:9094,PLAINTEXT://:9092,GLOBAL://:9095 + listener.security.protocol.map=PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL,OUTSIDE:PLAINTEXT,GLOBAL:PLAINTEXT + #listeners=PLAINTEXT://:9092 + inter.broker.listener.name=PLAINTEXT + #init#broker.id=#init# + #init#broker.rack=#init# + log4j.properties: |- + # Unspecified loggers and loggers with additivity=true output to server.log and stdout + # Note that INFO only applies to unspecified loggers, the log level of the child logger is used otherwise + log4j.rootLogger=INFO, stdout + log4j.appender.stdout=org.apache.log4j.ConsoleAppender + log4j.appender.stdout.layout=org.apache.log4j.PatternLayout + log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n + log4j.appender.kafkaAppender=org.apache.log4j.DailyRollingFileAppender + log4j.appender.kafkaAppender.DatePattern='.'yyyy-MM-dd-HH + log4j.appender.kafkaAppender.File=${kafka.logs.dir}/server.log + log4j.appender.kafkaAppender.layout=org.apache.log4j.PatternLayout + log4j.appender.kafkaAppender.layout.ConversionPattern=[%d] %p %m (%c)%n + log4j.appender.stateChangeAppender=org.apache.log4j.DailyRollingFileAppender + log4j.appender.stateChangeAppender.DatePattern='.'yyyy-MM-dd-HH + log4j.appender.stateChangeAppender.File=${kafka.logs.dir}/state-change.log + log4j.appender.stateChangeAppender.layout=org.apache.log4j.PatternLayout + log4j.appender.stateChangeAppender.layout.ConversionPattern=[%d] %p %m (%c)%n + log4j.appender.requestAppender=org.apache.log4j.DailyRollingFileAppender + log4j.appender.requestAppender.DatePattern='.'yyyy-MM-dd-HH + log4j.appender.requestAppender.File=${kafka.logs.dir}/kafka-request.log + log4j.appender.requestAppender.layout=org.apache.log4j.PatternLayout + log4j.appender.requestAppender.layout.ConversionPattern=[%d] %p %m (%c)%n + log4j.appender.cleanerAppender=org.apache.log4j.DailyRollingFileAppender + log4j.appender.cleanerAppender.DatePattern='.'yyyy-MM-dd-HH + log4j.appender.cleanerAppender.File=${kafka.logs.dir}/log-cleaner.log + log4j.appender.cleanerAppender.layout=org.apache.log4j.PatternLayout + log4j.appender.cleanerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n + log4j.appender.controllerAppender=org.apache.log4j.DailyRollingFileAppender + log4j.appender.controllerAppender.DatePattern='.'yyyy-MM-dd-HH + log4j.appender.controllerAppender.File=${kafka.logs.dir}/controller.log + log4j.appender.controllerAppender.layout=org.apache.log4j.PatternLayout + log4j.appender.controllerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n + log4j.appender.authorizerAppender=org.apache.log4j.DailyRollingFileAppender + log4j.appender.authorizerAppender.DatePattern='.'yyyy-MM-dd-HH + log4j.appender.authorizerAppender.File=${kafka.logs.dir}/kafka-authorizer.log + log4j.appender.authorizerAppender.layout=org.apache.log4j.PatternLayout + log4j.appender.authorizerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n + # Change the two lines below to adjust ZK client logging + log4j.logger.org.I0Itec.zkclient.ZkClient=INFO + log4j.logger.org.apache.zookeeper=INFO + # Change the two lines below to adjust the general broker logging level (output to server.log and stdout) + log4j.logger.kafka=INFO + log4j.logger.org.apache.kafka=INFO + # Change to DEBUG or TRACE to enable request logging + log4j.logger.kafka.request.logger=WARN, requestAppender + log4j.additivity.kafka.request.logger=false + # Uncomment the lines below and change log4j.logger.kafka.network.RequestChannel$ to TRACE for additional output + # related to the handling of requests + #log4j.logger.kafka.network.Processor=TRACE, requestAppender + #log4j.logger.kafka.server.KafkaApis=TRACE, requestAppender + #log4j.additivity.kafka.server.KafkaApis=false + log4j.logger.kafka.network.RequestChannel$=WARN, requestAppender + log4j.additivity.kafka.network.RequestChannel$=false + log4j.logger.kafka.controller=TRACE, controllerAppender + log4j.additivity.kafka.controller=false + log4j.logger.kafka.log.LogCleaner=INFO, cleanerAppender + log4j.additivity.kafka.log.LogCleaner=false + log4j.logger.state.change.logger=TRACE, stateChangeAppender + log4j.additivity.state.change.logger=false + # Change to DEBUG to enable audit log for the authorizer + log4j.logger.kafka.authorizer.logger=WARN, authorizerAppender + log4j.additivity.kafka.authorizer.logger=false diff --git a/ansible/01_old/roles/test/files/02-base/01-coredns.yaml b/ansible/01_old/roles/test/files/02-base/01-coredns.yaml new file mode 100644 index 0000000..c1cb74b --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/01-coredns.yaml @@ -0,0 +1,35 @@ +apiVersion: v1 +kind: Service +metadata: + annotations: + prometheus.io/port: "9153" + prometheus.io/scrape: "true" + labels: + addonmanager.kubernetes.io/mode: Reconcile + k8s-app: kube-dns + kubernetes.io/name: coredns + name: coredns + namespace: kube-system +spec: + internalTrafficPolicy: Cluster + ipFamilies: + - IPv4 + ipFamilyPolicy: SingleStack + ports: + - name: dns + port: 53 + protocol: UDP + targetPort: 53 + - name: dns-tcp + port: 53 + protocol: TCP + targetPort: 53 + - name: metrics + port: 9153 + protocol: TCP + targetPort: 9153 + selector: + k8s-app: kube-dns + sessionAffinity: None + type: ClusterIP + diff --git a/ansible/01_old/roles/test/files/02-base/base/.helmignore b/ansible/01_old/roles/test/files/02-base/base/.helmignore new file mode 100644 index 0000000..50af031 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/ansible/01_old/roles/test/files/02-base/base/Chart.yaml b/ansible/01_old/roles/test/files/02-base/base/Chart.yaml new file mode 100644 index 0000000..74d1d30 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for Kubernetes +name: base +version: 0.1.0 diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/analysis/.helmignore b/ansible/01_old/roles/test/files/02-base/base/charts/analysis/.helmignore new file mode 100644 index 0000000..50af031 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/analysis/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/analysis/Chart.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/analysis/Chart.yaml new file mode 100644 index 0000000..74b9505 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/analysis/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for Kubernetes +name: analysis +version: 0.1.0 diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/analysis/templates/imxc-metric-analyzer-master.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/analysis/templates/imxc-metric-analyzer-master.yaml new file mode 100644 index 0000000..21a9298 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/analysis/templates/imxc-metric-analyzer-master.yaml @@ -0,0 +1,87 @@ +#docker run -d --hostname my-rabbit --name some-rabbit -p 8080:15672 -p 5672:5672 rabbitmq:3-management + +--- +kind: Service +apiVersion: v1 +metadata: + name: metric-analyzer-master + namespace: imxc +spec: +# clusterIP: None # We need a headless service to allow the pods to discover each + ports: # other during autodiscover phase for cluster creation. + - name: http # A ClusterIP will prevent resolving dns requests for other pods + protocol: TCP # under the same service. + port: 15672 + targetPort: 15672 +# nodePort: 30001 + - name: amqp + protocol: TCP + port: 5672 + targetPort: 5672 +# nodePort: 30002 + selector: + app: metric-analyzer-master +# type: NodePort +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: metric-analyzer-master + name: metric-analyzer-master + namespace: imxc +spec: + replicas: 1 + selector: + matchLabels: + app: metric-analyzer-master + template: + metadata: + labels: + app: metric-analyzer-master + spec: + containers: + - image: {{ .Values.global.IMXC_IN_REGISTRY }}/metric_analyzer:{{ .Values.global.METRIC_ANALYZER_MASTER_VERSION }} + imagePullPolicy: IfNotPresent + name: master +# volumeMounts: +# - mountPath: /etc/localtime +# name: timezone-config + env: + - name: BROKER + value: base-rabbitmq + - name: IMXC_RABBITMQ_CLIENT_ID + value: "user" + - name: IMXC_RABBITMQ_CLIENT_PASSWORD + value: "eorbahrhkswp" + - name: POSTGRES_SERVER + value: postgres + - name: POSTGRES_USER + value: admin + - name: POSTGRES_PW + value: eorbahrhkswp + - name: POSTGRES_DB + value: postgresdb + - name: PROMETHEUS_URL + value: http://base-cortex-nginx/prometheus + - name: POSTGRES_PORT + value: "5432" + - name: ES_SERVER + value: elasticsearch + - name: ES_PORT + value: "9200" + - name: ES_ID + value: "elastic" + - name: ES_PWD + value: "elastic" + - name: LOG_LEVEL + value: INFO + - name: AI_TYPE + value: BASELINE + - name: BASELINE_SIZE + value: "3" + - name: CHECK_DAY + value: "2" + resources: + requests: + memory: "100Mi" diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/analysis/templates/imxc-metric-analyzer-worker.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/analysis/templates/imxc-metric-analyzer-worker.yaml new file mode 100644 index 0000000..7e6eaea --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/analysis/templates/imxc-metric-analyzer-worker.yaml @@ -0,0 +1,38 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: metric-analyzer-worker + name: metric-analyzer-worker + namespace: imxc +spec: + replicas: 10 + selector: + matchLabels: + app: metric-analyzer-worker + template: + metadata: + labels: + app: metric-analyzer-worker + spec: + containers: + - image: {{ .Values.global.IMXC_IN_REGISTRY }}/metric_analyzer_worker:{{ .Values.global.METRIC_ANALYZER_WORKER_VERSION }} + imagePullPolicy: IfNotPresent + name: worker +# volumeMounts: +# - mountPath: /etc/localtime +# name: timezone-config + env: + - name: BROKER + value: base-rabbitmq + - name: IMXC_RABBITMQ_CLIENT_ID + value: "user" + - name: IMXC_RABBITMQ_CLIENT_PASSWORD + value: "eorbahrhkswp" +# volumes: +# - hostPath: +# path: /usr/share/zoneinfo/Asia/Seoul +# name: timezone-config + resources: + requests: + memory: "100Mi" diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/analysis/values.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/analysis/values.yaml new file mode 100644 index 0000000..d764210 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/analysis/values.yaml @@ -0,0 +1,68 @@ +# Default values for analysis. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: 10.10.31.243:5000/cmoa3/nginx + tag: stable + pullPolicy: IfNotPresent + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 80 + +ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: [] + + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +nodeSelector: {} + +tolerations: [] + +affinity: {} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/cortex/.helmignore b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/.helmignore new file mode 100644 index 0000000..db3418b --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/.helmignore @@ -0,0 +1,29 @@ +# Git +.git/ +.gitignore +.github/ + +# IDE +.project +.idea/ +*.tmproj + +# Common backup files +*.swp +*.bak +*.tmp +*~ + +# Cortex ignore +docs/ +tools/ +ct.yaml +ci/ +README.md.gotmpl +.prettierignore +CHANGELOG.md +MAINTAINERS.md +LICENSE +Makefile +renovate.json + diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/cortex/Chart.lock b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/Chart.lock new file mode 100644 index 0000000..f909218 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/Chart.lock @@ -0,0 +1,24 @@ +dependencies: +- name: memcached + repository: https://charts.bitnami.com/bitnami + version: 5.15.12 +- name: memcached + repository: https://charts.bitnami.com/bitnami + version: 5.15.12 +- name: memcached + repository: https://charts.bitnami.com/bitnami + version: 5.15.12 +- name: memcached + repository: https://charts.bitnami.com/bitnami + version: 5.15.12 +- name: memcached + repository: https://charts.bitnami.com/bitnami + version: 5.15.12 +- name: memcached + repository: https://charts.bitnami.com/bitnami + version: 5.15.12 +- name: memcached + repository: https://charts.bitnami.com/bitnami + version: 5.15.12 +digest: sha256:a6b7c1239f9cabc85dd647798a6f92ae8a9486756ab1e87fc11af2180ab03ee4 +generated: "2021-12-25T19:21:57.666697218Z" diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/cortex/Chart.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/Chart.yaml new file mode 100644 index 0000000..9122fe6 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/Chart.yaml @@ -0,0 +1,56 @@ +apiVersion: v2 +appVersion: v1.11.0 +dependencies: +- alias: memcached + condition: memcached.enabled + name: memcached + repository: https://charts.bitnami.com/bitnami + version: 5.15.12 +- alias: memcached-index-read + condition: memcached-index-read.enabled + name: memcached + repository: https://charts.bitnami.com/bitnami + version: 5.15.12 +- alias: memcached-index-write + condition: memcached-index-write.enabled + name: memcached + repository: https://charts.bitnami.com/bitnami + version: 5.15.12 +- alias: memcached-frontend + condition: memcached-frontend.enabled + name: memcached + repository: https://charts.bitnami.com/bitnami + version: 5.15.12 +- alias: memcached-blocks-index + name: memcached + repository: https://charts.bitnami.com/bitnami + tags: + - blocks-storage-memcached + version: 5.15.12 +- alias: memcached-blocks + name: memcached + repository: https://charts.bitnami.com/bitnami + tags: + - blocks-storage-memcached + version: 5.15.12 +- alias: memcached-blocks-metadata + name: memcached + repository: https://charts.bitnami.com/bitnami + tags: + - blocks-storage-memcached + version: 5.15.12 +description: Horizontally scalable, highly available, multi-tenant, long term Prometheus. +home: https://cortexmetrics.io/ +icon: https://avatars2.githubusercontent.com/u/43045022?s=200&v=4 +kubeVersion: ^1.19.0-0 +maintainers: +- email: thayward@infoblox.com + name: Tom Hayward + url: https://github.com/kd7lxl +- email: Niclas.Schad@plusserver.com + name: Niclas Schad + url: https://github.com/ShuzZzle +name: cortex +sources: +- https://github.com/cortexproject/cortex-helm-chart +version: 1.2.0 diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/cortex/README.md b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/README.md new file mode 100644 index 0000000..9a793d3 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/README.md @@ -0,0 +1,754 @@ + + +# cortex + +![Version: 1.2.0](https://img.shields.io/badge/Version-1.2.0-informational?style=flat-square) ![AppVersion: v1.11.0](https://img.shields.io/badge/AppVersion-v1.11.0-informational?style=flat-square) + +Horizontally scalable, highly available, multi-tenant, long term Prometheus. + +**Homepage:** + +## Maintainers + +| Name | Email | Url | +| ---- | ------ | --- | +| Tom Hayward | thayward@infoblox.com | https://github.com/kd7lxl | +| Niclas Schad | Niclas.Schad@plusserver.com | https://github.com/ShuzZzle | + +## Documentation + +Checkout our documentation for the cortex-helm-chart [here](https://cortexproject.github.io/cortex-helm-chart/) + +## Dependencies + +### Key-Value store + +Cortex requires a Key-Value (KV) store to store the ring. It can use traditional KV stores like [Consul](https://www.consul.io/) or [etcd](https://etcd.io/), but it can also build its own KV store on top of memberlist library using a gossip algorithm. + +The recommended approach is to use the built-in memberlist as a KV store, where supported. + +External KV stores can be installed alongside Cortex using their respective helm charts https://github.com/bitnami/charts/tree/master/bitnami/etcd and https://github.com/helm/charts/tree/master/stable/consul. + +### Storage + +Cortex requires a storage backend to store metrics and indexes. +See [cortex documentation](https://cortexmetrics.io/docs/) for details on storage types and documentation + +## Installation + +[Helm](https://helm.sh) must be installed to use the charts. +Please refer to Helm's [documentation](https://helm.sh/docs/) to get started. + +Once Helm is set up properly, add the repo as follows: + +```bash + helm repo add cortex-helm https://cortexproject.github.io/cortex-helm-chart +``` + +Cortex can now be installed with the following command: + +```bash + helm install cortex --namespace cortex cortex-helm/cortex +``` + +If you have custom options or values you want to override: + +```bash + helm install cortex --namespace cortex -f my-cortex-values.yaml cortex-helm/cortex +``` + +Specific versions of the chart can be installed using the `--version` option, with the default being the latest release. +What versions are available for installation can be listed with the following command: + +```bash + helm search repo cortex-helm +``` + +As part of this chart many different pods and services are installed which all +have varying resource requirements. Please make sure that you have sufficient +resources (CPU/memory) available in your cluster before installing Cortex Helm +chart. + +## Upgrades + +To upgrade Cortex use the following command: + +```bash + helm upgrade cortex -f my-cortex-values.yaml cortex-helm/cortex +``` +Note that it might be necessary to use `--reset-values` since some default values in the values.yaml might have changed or were removed. + +Source code can be found [here](https://cortexmetrics.io/) + +## Requirements + +Kubernetes: `^1.19.0-0` + +| Repository | Name | Version | +|------------|------|---------| +| https://charts.bitnami.com/bitnami | memcached(memcached) | 5.15.12 | +| https://charts.bitnami.com/bitnami | memcached-index-read(memcached) | 5.15.12 | +| https://charts.bitnami.com/bitnami | memcached-index-write(memcached) | 5.15.12 | +| https://charts.bitnami.com/bitnami | memcached-frontend(memcached) | 5.15.12 | +| https://charts.bitnami.com/bitnami | memcached-blocks-index(memcached) | 5.15.12 | +| https://charts.bitnami.com/bitnami | memcached-blocks(memcached) | 5.15.12 | +| https://charts.bitnami.com/bitnami | memcached-blocks-metadata(memcached) | 5.15.12 | + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| alertmanager.​affinity | object | `{}` | | +| alertmanager.​annotations | object | `{}` | | +| alertmanager.​containerSecurityContext.​enabled | bool | `true` | | +| alertmanager.​containerSecurityContext.​readOnlyRootFilesystem | bool | `true` | | +| alertmanager.​enabled | bool | `true` | | +| alertmanager.​env | list | `[]` | Extra env variables to pass to the cortex container | +| alertmanager.​extraArgs | object | `{}` | Additional Cortex container arguments, e.g. log level (debug, info, warn, error) | +| alertmanager.​extraContainers | list | `[]` | Additional containers to be added to the cortex pod. | +| alertmanager.​extraPorts | list | `[]` | Additional ports to the cortex services. Useful to expose extra container ports. | +| alertmanager.​extraVolumeMounts | list | `[]` | Extra volume mounts that will be added to the cortex container | +| alertmanager.​extraVolumes | list | `[]` | Additional volumes to the cortex pod. | +| alertmanager.​initContainers | list | `[]` | Init containers to be added to the cortex pod. | +| alertmanager.​livenessProbe.​httpGet.​path | string | `"/ready"` | | +| alertmanager.​livenessProbe.​httpGet.​port | string | `"http-metrics"` | | +| alertmanager.​nodeSelector | object | `{}` | | +| alertmanager.​persistentVolume.​accessModes | list | `["ReadWriteOnce"]` | Alertmanager data Persistent Volume access modes Must match those of existing PV or dynamic provisioner Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ | +| alertmanager.​persistentVolume.​annotations | object | `{}` | Alertmanager data Persistent Volume Claim annotations | +| alertmanager.​persistentVolume.​enabled | bool | `true` | If true and alertmanager.statefulSet.enabled is true, Alertmanager will create/use a Persistent Volume Claim If false, use emptyDir | +| alertmanager.​persistentVolume.​size | string | `"2Gi"` | Alertmanager data Persistent Volume size | +| alertmanager.​persistentVolume.​storageClass | string | `nil` | Alertmanager data Persistent Volume Storage Class If defined, storageClassName: If set to "-", storageClassName: "", which disables dynamic provisioning If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner. | +| alertmanager.​persistentVolume.​subPath | string | `""` | Subdirectory of Alertmanager data Persistent Volume to mount Useful if the volume's root directory is not empty | +| alertmanager.​podAnnotations | object | `{"prometheus.io/port":"8080","prometheus.io/scrape":"true"}` | Pod Annotations | +| alertmanager.​podDisruptionBudget | object | `{"maxUnavailable":1}` | If not set then a PodDisruptionBudget will not be created | +| alertmanager.​podLabels | object | `{}` | Pod Labels | +| alertmanager.​readinessProbe.​httpGet.​path | string | `"/ready"` | | +| alertmanager.​readinessProbe.​httpGet.​port | string | `"http-metrics"` | | +| alertmanager.​replicas | int | `1` | | +| alertmanager.​resources | object | `{}` | | +| alertmanager.​securityContext | object | `{}` | | +| alertmanager.​service.​annotations | object | `{}` | | +| alertmanager.​service.​labels | object | `{}` | | +| alertmanager.​serviceAccount.​name | string | `""` | "" disables the individual serviceAccount and uses the global serviceAccount for that component | +| alertmanager.​serviceMonitor.​additionalLabels | object | `{}` | | +| alertmanager.​serviceMonitor.​enabled | bool | `false` | | +| alertmanager.​serviceMonitor.​extraEndpointSpec | object | `{}` | Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint | +| alertmanager.​serviceMonitor.​metricRelabelings | list | `[]` | | +| alertmanager.​serviceMonitor.​relabelings | list | `[]` | | +| alertmanager.​sidecar | object | `{"containerSecurityContext":{"enabled":true,"readOnlyRootFilesystem":true},"defaultFolderName":null,"enableUniqueFilenames":false,"enabled":false,"folder":"/data","folderAnnotation":null,"image":{"repository":"quay.io/kiwigrid/k8s-sidecar","sha":"","tag":"1.10.7"},"imagePullPolicy":"IfNotPresent","label":"cortex_alertmanager","labelValue":null,"resources":{},"searchNamespace":null,"skipTlsVerify":false,"watchMethod":null}` | Sidecars that collect the configmaps with specified label and stores the included files them into the respective folders | +| alertmanager.​sidecar.​skipTlsVerify | bool | `false` | skipTlsVerify Set to true to skip tls verification for kube api calls | +| alertmanager.​startupProbe.​failureThreshold | int | `10` | | +| alertmanager.​startupProbe.​httpGet.​path | string | `"/ready"` | | +| alertmanager.​startupProbe.​httpGet.​port | string | `"http-metrics"` | | +| alertmanager.​statefulSet.​enabled | bool | `false` | If true, use a statefulset instead of a deployment for pod management. This is useful for using a persistent volume for storing silences between restarts. | +| alertmanager.​statefulStrategy.​type | string | `"RollingUpdate"` | | +| alertmanager.​strategy.​rollingUpdate.​maxSurge | int | `0` | | +| alertmanager.​strategy.​rollingUpdate.​maxUnavailable | int | `1` | | +| alertmanager.​strategy.​type | string | `"RollingUpdate"` | | +| alertmanager.​terminationGracePeriodSeconds | int | `60` | | +| alertmanager.​tolerations | list | `[]` | Tolerations for pod assignment ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ | +| clusterDomain | string | `"cluster.local"` | Kubernetes cluster DNS domain | +| compactor.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​key | string | `"app.kubernetes.io/component"` | | +| compactor.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​operator | string | `"In"` | | +| compactor.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​values[0] | string | `"compactor"` | | +| compactor.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​topologyKey | string | `"kubernetes.io/hostname"` | | +| compactor.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​weight | int | `100` | | +| compactor.​annotations | object | `{}` | | +| compactor.​containerSecurityContext.​enabled | bool | `true` | | +| compactor.​containerSecurityContext.​readOnlyRootFilesystem | bool | `true` | | +| compactor.​enabled | bool | `true` | | +| compactor.​env | list | `[]` | | +| compactor.​extraArgs | object | `{}` | Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) | +| compactor.​extraContainers | list | `[]` | | +| compactor.​extraPorts | list | `[]` | | +| compactor.​extraVolumeMounts | list | `[]` | | +| compactor.​extraVolumes | list | `[]` | | +| compactor.​initContainers | list | `[]` | | +| compactor.​livenessProbe.​httpGet.​path | string | `"/ready"` | | +| compactor.​livenessProbe.​httpGet.​port | string | `"http-metrics"` | | +| compactor.​livenessProbe.​httpGet.​scheme | string | `"HTTP"` | | +| compactor.​nodeSelector | object | `{}` | | +| compactor.​persistentVolume.​accessModes | list | `["ReadWriteOnce"]` | compactor data Persistent Volume access modes Must match those of existing PV or dynamic provisioner Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ | +| compactor.​persistentVolume.​annotations | object | `{}` | compactor data Persistent Volume Claim annotations | +| compactor.​persistentVolume.​enabled | bool | `true` | If true compactor will create/use a Persistent Volume Claim If false, use emptyDir | +| compactor.​persistentVolume.​size | string | `"2Gi"` | | +| compactor.​persistentVolume.​storageClass | string | `nil` | compactor data Persistent Volume Storage Class If defined, storageClassName: If set to "-", storageClassName: "", which disables dynamic provisioning If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner. | +| compactor.​persistentVolume.​subPath | string | `""` | Subdirectory of compactor data Persistent Volume to mount Useful if the volume's root directory is not empty | +| compactor.​podAnnotations | object | `{"prometheus.io/port":"8080","prometheus.io/scrape":"true"}` | Pod Annotations | +| compactor.​podDisruptionBudget.​maxUnavailable | int | `1` | | +| compactor.​podLabels | object | `{}` | Pod Labels | +| compactor.​readinessProbe.​httpGet.​path | string | `"/ready"` | | +| compactor.​readinessProbe.​httpGet.​port | string | `"http-metrics"` | | +| compactor.​replicas | int | `1` | | +| compactor.​resources | object | `{}` | | +| compactor.​securityContext | object | `{}` | | +| compactor.​service.​annotations | object | `{}` | | +| compactor.​service.​labels | object | `{}` | | +| compactor.​serviceAccount.​name | string | `""` | "" disables the individual serviceAccount and uses the global serviceAccount for that component | +| compactor.​serviceMonitor.​additionalLabels | object | `{}` | | +| compactor.​serviceMonitor.​enabled | bool | `false` | | +| compactor.​serviceMonitor.​extraEndpointSpec | object | `{}` | Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint | +| compactor.​serviceMonitor.​metricRelabelings | list | `[]` | | +| compactor.​serviceMonitor.​relabelings | list | `[]` | | +| compactor.​startupProbe.​failureThreshold | int | `60` | | +| compactor.​startupProbe.​httpGet.​path | string | `"/ready"` | | +| compactor.​startupProbe.​httpGet.​port | string | `"http-metrics"` | | +| compactor.​startupProbe.​httpGet.​scheme | string | `"HTTP"` | | +| compactor.​startupProbe.​initialDelaySeconds | int | `120` | | +| compactor.​startupProbe.​periodSeconds | int | `30` | | +| compactor.​strategy.​type | string | `"RollingUpdate"` | | +| compactor.​terminationGracePeriodSeconds | int | `240` | | +| compactor.​tolerations | list | `[]` | | +| config.​alertmanager.​enable_api | bool | `false` | Enable the experimental alertmanager config api. | +| config.​alertmanager.​external_url | string | `"/api/prom/alertmanager"` | | +| config.​alertmanager.​storage | object | `{}` | Type of backend to use to store alertmanager configs. Supported values are: "configdb", "gcs", "s3", "local". refer to: https://cortexmetrics.io/docs/configuration/configuration-file/#alertmanager_config | +| config.​api.​prometheus_http_prefix | string | `"/prometheus"` | | +| config.​api.​response_compression_enabled | bool | `true` | Use GZIP compression for API responses. Some endpoints serve large YAML or JSON blobs which can benefit from compression. | +| config.​auth_enabled | bool | `false` | | +| config.​blocks_storage.​bucket_store.​bucket_index.​enabled | bool | `true` | | +| config.​blocks_storage.​bucket_store.​sync_dir | string | `"/data/tsdb-sync"` | | +| config.​blocks_storage.​tsdb.​dir | string | `"/data/tsdb"` | | +| config.​distributor.​pool.​health_check_ingesters | bool | `true` | | +| config.​distributor.​shard_by_all_labels | bool | `true` | Distribute samples based on all labels, as opposed to solely by user and metric name. | +| config.​frontend.​log_queries_longer_than | string | `"10s"` | | +| config.​ingester.​lifecycler.​final_sleep | string | `"30s"` | Duration to sleep for before exiting, to ensure metrics are scraped. | +| config.​ingester.​lifecycler.​join_after | string | `"10s"` | We don't want to join immediately, but wait a bit to see other ingesters and their tokens first. It can take a while to have the full picture when using gossip | +| config.​ingester.​lifecycler.​num_tokens | int | `512` | | +| config.​ingester.​lifecycler.​observe_period | string | `"10s"` | To avoid generating same tokens by multiple ingesters, they can "observe" the ring for a while, after putting their own tokens into it. This is only useful when using gossip, since multiple ingesters joining at the same time can have conflicting tokens if they don't see each other yet. | +| config.​ingester.​lifecycler.​ring.​kvstore.​store | string | `"memberlist"` | | +| config.​ingester.​lifecycler.​ring.​replication_factor | int | `3` | Ingester replication factor per default is 3 | +| config.​ingester_client.​grpc_client_config.​max_recv_msg_size | int | `10485760` | | +| config.​ingester_client.​grpc_client_config.​max_send_msg_size | int | `10485760` | | +| config.​limits.​enforce_metric_name | bool | `true` | Enforce that every sample has a metric name | +| config.​limits.​max_query_lookback | string | `"0s"` | | +| config.​limits.​reject_old_samples | bool | `true` | | +| config.​limits.​reject_old_samples_max_age | string | `"168h"` | | +| config.​memberlist.​bind_port | int | `7946` | | +| config.​memberlist.​join_members | list | `["{{ include \"cortex.fullname\" $ }}-memberlist"]` | the service name of the memberlist if using memberlist discovery | +| config.​querier.​active_query_tracker_dir | string | `"/data/active-query-tracker"` | | +| config.​querier.​query_ingesters_within | string | `"13h"` | Maximum lookback beyond which queries are not sent to ingester. 0 means all queries are sent to ingester. Ingesters by default have no data older than 12 hours, so we can safely set this 13 hours | +| config.​querier.​query_store_after | string | `"12h"` | The time after which a metric should be queried from storage and not just ingesters. | +| config.​querier.​store_gateway_addresses | string | automatic | Comma separated list of store-gateway addresses in DNS Service Discovery format. This option should is set automatically when using the blocks storage and the store-gateway sharding is disabled (when enabled, the store-gateway instances form a ring and addresses are picked from the ring). | +| config.​query_range.​align_queries_with_step | bool | `true` | | +| config.​query_range.​cache_results | bool | `true` | | +| config.​query_range.​results_cache.​cache.​memcached.​expiration | string | `"1h"` | | +| config.​query_range.​results_cache.​cache.​memcached_client.​timeout | string | `"1s"` | | +| config.​query_range.​split_queries_by_interval | string | `"24h"` | | +| config.​ruler.​enable_alertmanager_discovery | bool | `false` | | +| config.​ruler.​enable_api | bool | `true` | Enable the experimental ruler config api. | +| config.​ruler.​storage | object | `{}` | Method to use for backend rule storage (configdb, azure, gcs, s3, swift, local) refer to https://cortexmetrics.io/docs/configuration/configuration-file/#ruler_config | +| config.​runtime_config.​file | string | `"/etc/cortex-runtime-config/runtime_config.yaml"` | | +| config.​server.​grpc_listen_port | int | `9095` | | +| config.​server.​grpc_server_max_concurrent_streams | int | `10000` | | +| config.​server.​grpc_server_max_recv_msg_size | int | `10485760` | | +| config.​server.​grpc_server_max_send_msg_size | int | `10485760` | | +| config.​server.​http_listen_port | int | `8080` | | +| config.​storage | object | `{"engine":"blocks","index_queries_cache_config":{"memcached":{"expiration":"1h"},"memcached_client":{"timeout":"1s"}}}` | See https://github.com/cortexproject/cortex/blob/master/docs/configuration/config-file-reference.md#storage_config | +| config.​storage.​index_queries_cache_config.​memcached.​expiration | string | `"1h"` | How long keys stay in the memcache | +| config.​storage.​index_queries_cache_config.​memcached_client.​timeout | string | `"1s"` | Maximum time to wait before giving up on memcached requests. | +| config.​store_gateway | object | `{"sharding_enabled":false}` | https://cortexmetrics.io/docs/configuration/configuration-file/#store_gateway_config | +| configs.​affinity | object | `{}` | | +| configs.​annotations | object | `{}` | | +| configs.​containerSecurityContext.​enabled | bool | `true` | | +| configs.​containerSecurityContext.​readOnlyRootFilesystem | bool | `true` | | +| configs.​enabled | bool | `false` | | +| configs.​env | list | `[]` | | +| configs.​extraArgs | object | `{}` | Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) | +| configs.​extraContainers | list | `[]` | | +| configs.​extraPorts | list | `[]` | | +| configs.​extraVolumeMounts | list | `[]` | | +| configs.​extraVolumes | list | `[]` | | +| configs.​initContainers | list | `[]` | | +| configs.​livenessProbe.​httpGet.​path | string | `"/ready"` | | +| configs.​livenessProbe.​httpGet.​port | string | `"http-metrics"` | | +| configs.​nodeSelector | object | `{}` | | +| configs.​persistentVolume.​subPath | string | `nil` | | +| configs.​podAnnotations | object | `{"prometheus.io/port":"8080","prometheus.io/scrape":"true"}` | Pod Annotations | +| configs.​podDisruptionBudget.​maxUnavailable | int | `1` | | +| configs.​podLabels | object | `{}` | Pod Labels | +| configs.​readinessProbe.​httpGet.​path | string | `"/ready"` | | +| configs.​readinessProbe.​httpGet.​port | string | `"http-metrics"` | | +| configs.​replicas | int | `1` | | +| configs.​resources | object | `{}` | | +| configs.​securityContext | object | `{}` | | +| configs.​service.​annotations | object | `{}` | | +| configs.​service.​labels | object | `{}` | | +| configs.​serviceAccount.​name | string | `""` | "" disables the individual serviceAccount and uses the global serviceAccount for that component | +| configs.​serviceMonitor.​additionalLabels | object | `{}` | | +| configs.​serviceMonitor.​enabled | bool | `false` | | +| configs.​serviceMonitor.​extraEndpointSpec | object | `{}` | Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint | +| configs.​serviceMonitor.​metricRelabelings | list | `[]` | | +| configs.​serviceMonitor.​relabelings | list | `[]` | | +| configs.​startupProbe.​failureThreshold | int | `10` | | +| configs.​startupProbe.​httpGet.​path | string | `"/ready"` | | +| configs.​startupProbe.​httpGet.​port | string | `"http-metrics"` | | +| configs.​strategy.​rollingUpdate.​maxSurge | int | `0` | | +| configs.​strategy.​rollingUpdate.​maxUnavailable | int | `1` | | +| configs.​strategy.​type | string | `"RollingUpdate"` | | +| configs.​terminationGracePeriodSeconds | int | `180` | | +| configs.​tolerations | list | `[]` | | +| configsdb_postgresql.​auth.​existing_secret.​key | string | `nil` | | +| configsdb_postgresql.​auth.​existing_secret.​name | string | `nil` | | +| configsdb_postgresql.​auth.​password | string | `nil` | | +| configsdb_postgresql.​enabled | bool | `false` | | +| configsdb_postgresql.​uri | string | `nil` | | +| distributor.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​key | string | `"app.kubernetes.io/component"` | | +| distributor.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​operator | string | `"In"` | | +| distributor.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​values[0] | string | `"distributor"` | | +| distributor.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​topologyKey | string | `"kubernetes.io/hostname"` | | +| distributor.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​weight | int | `100` | | +| distributor.​annotations | object | `{}` | | +| distributor.​autoscaling.​behavior | object | `{}` | Ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-configurable-scaling-behavior | +| distributor.​autoscaling.​enabled | bool | `false` | Creates a HorizontalPodAutoscaler for the distributor pods. | +| distributor.​autoscaling.​maxReplicas | int | `30` | | +| distributor.​autoscaling.​minReplicas | int | `2` | | +| distributor.​autoscaling.​targetCPUUtilizationPercentage | int | `80` | | +| distributor.​autoscaling.​targetMemoryUtilizationPercentage | int | `0` | | +| distributor.​containerSecurityContext.​enabled | bool | `true` | | +| distributor.​containerSecurityContext.​readOnlyRootFilesystem | bool | `true` | | +| distributor.​env | list | `[]` | | +| distributor.​extraArgs | object | `{}` | Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) | +| distributor.​extraContainers | list | `[]` | | +| distributor.​extraPorts | list | `[]` | | +| distributor.​extraVolumeMounts | list | `[]` | | +| distributor.​extraVolumes | list | `[]` | | +| distributor.​initContainers | list | `[]` | | +| distributor.​lifecycle | object | `{}` | | +| distributor.​livenessProbe.​httpGet.​path | string | `"/ready"` | | +| distributor.​livenessProbe.​httpGet.​port | string | `"http-metrics"` | | +| distributor.​nodeSelector | object | `{}` | | +| distributor.​persistentVolume.​subPath | string | `nil` | | +| distributor.​podAnnotations | object | `{"prometheus.io/port":"8080","prometheus.io/scrape":"true"}` | Pod Annotations | +| distributor.​podDisruptionBudget.​maxUnavailable | int | `1` | | +| distributor.​podLabels | object | `{}` | Pod Labels | +| distributor.​readinessProbe.​httpGet.​path | string | `"/ready"` | | +| distributor.​readinessProbe.​httpGet.​port | string | `"http-metrics"` | | +| distributor.​replicas | int | `2` | | +| distributor.​resources | object | `{}` | | +| distributor.​securityContext | object | `{}` | | +| distributor.​service.​annotations | object | `{}` | | +| distributor.​service.​labels | object | `{}` | | +| distributor.​serviceAccount.​name | string | `""` | "" disables the individual serviceAccount and uses the global serviceAccount for that component | +| distributor.​serviceMonitor.​additionalLabels | object | `{}` | | +| distributor.​serviceMonitor.​enabled | bool | `false` | | +| distributor.​serviceMonitor.​extraEndpointSpec | object | `{}` | Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint | +| distributor.​serviceMonitor.​metricRelabelings | list | `[]` | | +| distributor.​serviceMonitor.​relabelings | list | `[]` | | +| distributor.​startupProbe.​failureThreshold | int | `10` | | +| distributor.​startupProbe.​httpGet.​path | string | `"/ready"` | | +| distributor.​startupProbe.​httpGet.​port | string | `"http-metrics"` | | +| distributor.​strategy.​rollingUpdate.​maxSurge | int | `0` | | +| distributor.​strategy.​rollingUpdate.​maxUnavailable | int | `1` | | +| distributor.​strategy.​type | string | `"RollingUpdate"` | | +| distributor.​terminationGracePeriodSeconds | int | `60` | | +| distributor.​tolerations | list | `[]` | | +| externalConfigSecretName | string | `"secret-with-config.yaml"` | | +| externalConfigVersion | string | `"0"` | | +| image.​pullPolicy | string | `"IfNotPresent"` | | +| image.​pullSecrets | list | `[]` | Optionally specify an array of imagePullSecrets. Secrets must be manually created in the namespace. ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ | +| image.​repository | string | `"quay.io/cortexproject/cortex"` | | +| image.​tag | string | `""` | Allows you to override the cortex version in this chart. Use at your own risk. | +| ingester.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​key | string | `"app.kubernetes.io/component"` | | +| ingester.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​operator | string | `"In"` | | +| ingester.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​values[0] | string | `"ingester"` | | +| ingester.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​topologyKey | string | `"kubernetes.io/hostname"` | | +| ingester.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​weight | int | `100` | | +| ingester.​annotations | object | `{}` | | +| ingester.​autoscaling.​behavior.​scaleDown.​policies | list | `[{"periodSeconds":1800,"type":"Pods","value":1}]` | see https://cortexmetrics.io/docs/guides/ingesters-scaling-up-and-down/#scaling-down for scaledown details | +| ingester.​autoscaling.​behavior.​scaleDown.​stabilizationWindowSeconds | int | `3600` | uses metrics from the past 1h to make scaleDown decisions | +| ingester.​autoscaling.​behavior.​scaleUp.​policies | list | `[{"periodSeconds":1800,"type":"Pods","value":1}]` | This default scaleup policy allows adding 1 pod every 30 minutes. Ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-configurable-scaling-behavior | +| ingester.​autoscaling.​enabled | bool | `false` | | +| ingester.​autoscaling.​maxReplicas | int | `30` | | +| ingester.​autoscaling.​minReplicas | int | `3` | | +| ingester.​autoscaling.​targetMemoryUtilizationPercentage | int | `80` | | +| ingester.​containerSecurityContext.​enabled | bool | `true` | | +| ingester.​containerSecurityContext.​readOnlyRootFilesystem | bool | `true` | | +| ingester.​env | list | `[]` | | +| ingester.​extraArgs | object | `{}` | Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) | +| ingester.​extraContainers | list | `[]` | | +| ingester.​extraPorts | list | `[]` | | +| ingester.​extraVolumeMounts | list | `[]` | | +| ingester.​extraVolumes | list | `[]` | | +| ingester.​initContainers | list | `[]` | | +| ingester.​lifecycle.​preStop | object | `{"httpGet":{"path":"/ingester/shutdown","port":"http-metrics"}}` | The /shutdown preStop hook is recommended as part of the ingester scaledown process, but can be removed to optimize rolling restarts in instances that will never be scaled down or when using chunks storage with WAL disabled. https://cortexmetrics.io/docs/guides/ingesters-scaling-up-and-down/#scaling-down | +| ingester.​livenessProbe | object | `{}` | Startup/liveness probes for ingesters are not recommended. Ref: https://cortexmetrics.io/docs/guides/running-cortex-on-kubernetes/#take-extra-care-with-ingesters | +| ingester.​nodeSelector | object | `{}` | | +| ingester.​persistentVolume.​accessModes | list | `["ReadWriteOnce"]` | Ingester data Persistent Volume access modes Must match those of existing PV or dynamic provisioner Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ | +| ingester.​persistentVolume.​annotations | object | `{}` | Ingester data Persistent Volume Claim annotations | +| ingester.​persistentVolume.​enabled | bool | `true` | If true and ingester.statefulSet.enabled is true, Ingester will create/use a Persistent Volume Claim If false, use emptyDir | +| ingester.​persistentVolume.​size | string | `"2Gi"` | Ingester data Persistent Volume size | +| ingester.​persistentVolume.​storageClass | string | `nil` | Ingester data Persistent Volume Storage Class If defined, storageClassName: If set to "-", storageClassName: "", which disables dynamic provisioning If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner. | +| ingester.​persistentVolume.​subPath | string | `""` | Subdirectory of Ingester data Persistent Volume to mount Useful if the volume's root directory is not empty | +| ingester.​podAnnotations | object | `{"prometheus.io/port":"8080","prometheus.io/scrape":"true"}` | Pod Annotations | +| ingester.​podDisruptionBudget.​maxUnavailable | int | `1` | | +| ingester.​podLabels | object | `{}` | Pod Labels | +| ingester.​readinessProbe.​httpGet.​path | string | `"/ready"` | | +| ingester.​readinessProbe.​httpGet.​port | string | `"http-metrics"` | | +| ingester.​replicas | int | `3` | | +| ingester.​resources | object | `{}` | | +| ingester.​securityContext | object | `{}` | | +| ingester.​service.​annotations | object | `{}` | | +| ingester.​service.​labels | object | `{}` | | +| ingester.​serviceAccount.​name | string | `nil` | | +| ingester.​serviceMonitor.​additionalLabels | object | `{}` | | +| ingester.​serviceMonitor.​enabled | bool | `false` | | +| ingester.​serviceMonitor.​extraEndpointSpec | object | `{}` | Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint | +| ingester.​serviceMonitor.​metricRelabelings | list | `[]` | | +| ingester.​serviceMonitor.​relabelings | list | `[]` | | +| ingester.​startupProbe | object | `{}` | Startup/liveness probes for ingesters are not recommended. Ref: https://cortexmetrics.io/docs/guides/running-cortex-on-kubernetes/#take-extra-care-with-ingesters | +| ingester.​statefulSet.​enabled | bool | `false` | If true, use a statefulset instead of a deployment for pod management. This is useful when using WAL | +| ingester.​statefulSet.​podManagementPolicy | string | `"OrderedReady"` | ref: https://cortexmetrics.io/docs/guides/ingesters-scaling-up-and-down/#scaling-down and https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-management-policies for scaledown details | +| ingester.​statefulStrategy.​type | string | `"RollingUpdate"` | | +| ingester.​strategy.​rollingUpdate.​maxSurge | int | `0` | | +| ingester.​strategy.​rollingUpdate.​maxUnavailable | int | `1` | | +| ingester.​strategy.​type | string | `"RollingUpdate"` | | +| ingester.​terminationGracePeriodSeconds | int | `240` | | +| ingester.​tolerations | list | `[]` | | +| ingress.​annotations | object | `{}` | | +| ingress.​enabled | bool | `false` | | +| ingress.​hosts[0].​host | string | `"chart-example.local"` | | +| ingress.​hosts[0].​paths[0] | string | `"/"` | | +| ingress.​ingressClass.​enabled | bool | `false` | | +| ingress.​ingressClass.​name | string | `"nginx"` | | +| ingress.​tls | list | `[]` | | +| memcached | object | `{"architecture":"high-availability","enabled":false,"extraEnv":[{"name":"MEMCACHED_CACHE_SIZE","value":"1024"},{"name":"MEMCACHED_MAX_CONNECTIONS","value":"1024"},{"name":"MEMCACHED_THREADS","value":"4"}],"metrics":{"enabled":true,"serviceMonitor":{"enabled":false}},"replicaCount":2,"resources":{}}` | chunk caching for legacy chunk storage engine | +| memcached-blocks-index.​architecture | string | `"high-availability"` | | +| memcached-blocks-index.​extraEnv[0] | object | `{"name":"MEMCACHED_CACHE_SIZE","value":"1024"}` | MEMCACHED_CACHE_SIZE is the amount of memory allocated to memcached for object storage | +| memcached-blocks-index.​extraEnv[1] | object | `{"name":"MEMCACHED_MAX_CONNECTIONS","value":"1024"}` | MEMCACHED_MAX_CONNECTIONS is the maximum number of simultaneous connections to the memcached service | +| memcached-blocks-index.​extraEnv[2] | object | `{"name":"MEMCACHED_THREADS","value":"4"}` | MEMCACHED_THREADS is the number of threads to use when processing incoming requests. By default, memcached is configured to use 4 concurrent threads. The threading improves the performance of storing and retrieving data in the cache, using a locking system to prevent different threads overwriting or updating the same values. | +| memcached-blocks-index.​metrics.​enabled | bool | `true` | | +| memcached-blocks-index.​metrics.​serviceMonitor.​enabled | bool | `false` | | +| memcached-blocks-index.​replicaCount | int | `2` | | +| memcached-blocks-index.​resources | object | `{}` | | +| memcached-blocks-metadata.​architecture | string | `"high-availability"` | | +| memcached-blocks-metadata.​extraEnv[0] | object | `{"name":"MEMCACHED_CACHE_SIZE","value":"1024"}` | MEMCACHED_CACHE_SIZE is the amount of memory allocated to memcached for object storage | +| memcached-blocks-metadata.​extraEnv[1] | object | `{"name":"MEMCACHED_MAX_CONNECTIONS","value":"1024"}` | MEMCACHED_MAX_CONNECTIONS is the maximum number of simultaneous connections to the memcached service | +| memcached-blocks-metadata.​extraEnv[2] | object | `{"name":"MEMCACHED_THREADS","value":"4"}` | MEMCACHED_THREADS is the number of threads to use when processing incoming requests. By default, memcached is configured to use 4 concurrent threads. The threading improves the performance of storing and retrieving data in the cache, using a locking system to prevent different threads overwriting or updating the same values. | +| memcached-blocks-metadata.​metrics.​enabled | bool | `true` | | +| memcached-blocks-metadata.​metrics.​serviceMonitor.​enabled | bool | `false` | | +| memcached-blocks-metadata.​replicaCount | int | `2` | | +| memcached-blocks-metadata.​resources | object | `{}` | | +| memcached-blocks.​architecture | string | `"high-availability"` | | +| memcached-blocks.​extraEnv[0] | object | `{"name":"MEMCACHED_CACHE_SIZE","value":"1024"}` | MEMCACHED_CACHE_SIZE is the amount of memory allocated to memcached for object storage | +| memcached-blocks.​extraEnv[1] | object | `{"name":"MEMCACHED_MAX_CONNECTIONS","value":"1024"}` | MEMCACHED_MAX_CONNECTIONS is the maximum number of simultaneous connections to the memcached service | +| memcached-blocks.​extraEnv[2] | object | `{"name":"MEMCACHED_THREADS","value":"4"}` | MEMCACHED_THREADS is the number of threads to use when processing incoming requests. By default, memcached is configured to use 4 concurrent threads. The threading improves the performance of storing and retrieving data in the cache, using a locking system to prevent different threads overwriting or updating the same values. | +| memcached-blocks.​metrics.​enabled | bool | `true` | | +| memcached-blocks.​metrics.​serviceMonitor.​enabled | bool | `false` | | +| memcached-blocks.​replicaCount | int | `2` | | +| memcached-blocks.​resources | object | `{}` | | +| memcached-frontend.​architecture | string | `"high-availability"` | | +| memcached-frontend.​enabled | bool | `false` | | +| memcached-frontend.​extraEnv[0] | object | `{"name":"MEMCACHED_CACHE_SIZE","value":"1024"}` | MEMCACHED_CACHE_SIZE is the amount of memory allocated to memcached for object storage | +| memcached-frontend.​extraEnv[1] | object | `{"name":"MEMCACHED_MAX_CONNECTIONS","value":"1024"}` | MEMCACHED_MAX_CONNECTIONS is the maximum number of simultaneous connections to the memcached service | +| memcached-frontend.​extraEnv[2] | object | `{"name":"MEMCACHED_THREADS","value":"4"}` | MEMCACHED_THREADS is the number of threads to use when processing incoming requests. By default, memcached is configured to use 4 concurrent threads. The threading improves the performance of storing and retrieving data in the cache, using a locking system to prevent different threads overwriting or updating the same values. | +| memcached-frontend.​metrics.​enabled | bool | `true` | | +| memcached-frontend.​metrics.​serviceMonitor.​enabled | bool | `false` | | +| memcached-frontend.​replicaCount | int | `2` | | +| memcached-frontend.​resources | object | `{}` | | +| memcached-index-read | object | `{"architecture":"high-availability","enabled":false,"extraEnv":[{"name":"MEMCACHED_CACHE_SIZE","value":"1024"},{"name":"MEMCACHED_MAX_CONNECTIONS","value":"1024"},{"name":"MEMCACHED_THREADS","value":"4"}],"metrics":{"enabled":true,"serviceMonitor":{"enabled":false}},"replicaCount":2,"resources":{}}` | index read caching for legacy chunk storage engine | +| memcached-index-read.​extraEnv[0] | object | `{"name":"MEMCACHED_CACHE_SIZE","value":"1024"}` | MEMCACHED_CACHE_SIZE is the amount of memory allocated to memcached for object storage | +| memcached-index-read.​extraEnv[1] | object | `{"name":"MEMCACHED_MAX_CONNECTIONS","value":"1024"}` | MEMCACHED_MAX_CONNECTIONS is the maximum number of simultaneous connections to the memcached service | +| memcached-index-read.​extraEnv[2] | object | `{"name":"MEMCACHED_THREADS","value":"4"}` | MEMCACHED_THREADS is the number of threads to use when processing incoming requests. By default, memcached is configured to use 4 concurrent threads. The threading improves the performance of storing and retrieving data in the cache, using a locking system to prevent different threads overwriting or updating the same values. | +| memcached-index-write | object | `{"architecture":"high-availability","enabled":false,"extraEnv":[{"name":"MEMCACHED_CACHE_SIZE","value":"1024"},{"name":"MEMCACHED_MAX_CONNECTIONS","value":"1024"},{"name":"MEMCACHED_THREADS","value":"4"}],"metrics":{"enabled":true,"serviceMonitor":{"enabled":false}},"replicaCount":2,"resources":{}}` | index write caching for legacy chunk storage engine | +| memcached-index-write.​extraEnv[0] | object | `{"name":"MEMCACHED_CACHE_SIZE","value":"1024"}` | MEMCACHED_CACHE_SIZE is the amount of memory allocated to memcached for object storage | +| memcached-index-write.​extraEnv[1] | object | `{"name":"MEMCACHED_MAX_CONNECTIONS","value":"1024"}` | MEMCACHED_MAX_CONNECTIONS is the maximum number of simultaneous connections to the memcached service | +| memcached-index-write.​extraEnv[2] | object | `{"name":"MEMCACHED_THREADS","value":"4"}` | MEMCACHED_THREADS is the number of threads to use when processing incoming requests. By default, memcached is configured to use 4 concurrent threads. The threading improves the performance of storing and retrieving data in the cache, using a locking system to prevent different threads overwriting or updating the same values. | +| memcached.​extraEnv[0] | object | `{"name":"MEMCACHED_CACHE_SIZE","value":"1024"}` | MEMCACHED_CACHE_SIZE is the amount of memory allocated to memcached for object storage | +| memcached.​extraEnv[1] | object | `{"name":"MEMCACHED_MAX_CONNECTIONS","value":"1024"}` | MEMCACHED_MAX_CONNECTIONS is the maximum number of simultaneous connections to the memcached service | +| memcached.​extraEnv[2] | object | `{"name":"MEMCACHED_THREADS","value":"4"}` | MEMCACHED_THREADS is the number of threads to use when processing incoming requests. By default, memcached is configured to use 4 concurrent threads. The threading improves the performance of storing and retrieving data in the cache, using a locking system to prevent different threads overwriting or updating the same values. | +| nginx.​affinity | object | `{}` | | +| nginx.​annotations | object | `{}` | | +| nginx.​autoscaling.​behavior | object | `{}` | Ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-configurable-scaling-behavior | +| nginx.​autoscaling.​enabled | bool | `false` | Creates a HorizontalPodAutoscaler for the nginx pods. | +| nginx.​autoscaling.​maxReplicas | int | `30` | | +| nginx.​autoscaling.​minReplicas | int | `2` | | +| nginx.​autoscaling.​targetCPUUtilizationPercentage | int | `80` | | +| nginx.​autoscaling.​targetMemoryUtilizationPercentage | int | `0` | | +| nginx.​config.​auth_orgs | list | `[]` | (optional) List of [auth tenants](https://cortexmetrics.io/docs/guides/auth/) to set in the nginx config | +| nginx.​config.​basicAuthSecretName | string | `""` | (optional) Name of basic auth secret. In order to use this option, a secret with htpasswd formatted contents at the key ".htpasswd" must exist. For example: apiVersion: v1 kind: Secret metadata: name: my-secret namespace: stringData: .htpasswd: | user1:$apr1$/woC1jnP$KAh0SsVn5qeSMjTtn0E9Q0 user2:$apr1$QdR8fNLT$vbCEEzDj7LyqCMyNpSoBh/ Please note that the use of basic auth will not identify organizations the way X-Scope-OrgID does. Thus, the use of basic auth alone will not prevent one tenant from viewing the metrics of another. To ensure tenants are scoped appropriately, explicitly set the `X-Scope-OrgID` header in the nginx config. Example setHeaders: X-Scope-OrgID: $remote_user | +| nginx.​config.​client_max_body_size | string | `"1M"` | ref: http://nginx.org/en/docs/http/ngx_http_core_module.html#client_max_body_size | +| nginx.​config.​dnsResolver | string | `"coredns.kube-system.svc.cluster.local"` | | +| nginx.​config.​httpSnippet | string | `""` | arbitrary snippet to inject in the http { } section of the nginx config | +| nginx.​config.​mainSnippet | string | `""` | arbitrary snippet to inject in the top section of the nginx config | +| nginx.​config.​serverSnippet | string | `""` | arbitrary snippet to inject in the server { } section of the nginx config | +| nginx.​config.​setHeaders | object | `{}` | | +| nginx.​containerSecurityContext.​enabled | bool | `true` | | +| nginx.​containerSecurityContext.​readOnlyRootFilesystem | bool | `false` | | +| nginx.​enabled | bool | `true` | | +| nginx.​env | list | `[]` | | +| nginx.​extraArgs | object | `{}` | Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) | +| nginx.​extraContainers | list | `[]` | | +| nginx.​extraPorts | list | `[]` | | +| nginx.​extraVolumeMounts | list | `[]` | | +| nginx.​extraVolumes | list | `[]` | | +| nginx.​http_listen_port | int | `80` | | +| nginx.​image.​pullPolicy | string | `"IfNotPresent"` | | +| nginx.​image.​repository | string | `"nginx"` | | +| nginx.​image.​tag | float | `1.21` | | +| nginx.​initContainers | list | `[]` | | +| nginx.​livenessProbe.​httpGet.​path | string | `"/healthz"` | | +| nginx.​livenessProbe.​httpGet.​port | string | `"http-metrics"` | | +| nginx.​nodeSelector | object | `{}` | | +| nginx.​persistentVolume.​subPath | string | `nil` | | +| nginx.​podAnnotations | object | `{}` | Pod Annotations | +| nginx.​podDisruptionBudget.​maxUnavailable | int | `1` | | +| nginx.​podLabels | object | `{}` | Pod Labels | +| nginx.​readinessProbe.​httpGet.​path | string | `"/healthz"` | | +| nginx.​readinessProbe.​httpGet.​port | string | `"http-metrics"` | | +| nginx.​replicas | int | `2` | | +| nginx.​resources | object | `{}` | | +| nginx.​securityContext | object | `{}` | | +| nginx.​service.​annotations | object | `{}` | | +| nginx.​service.​labels | object | `{}` | | +| nginx.​service.​type | string | `"ClusterIP"` | | +| nginx.​serviceAccount.​name | string | `""` | "" disables the individual serviceAccount and uses the global serviceAccount for that component | +| nginx.​startupProbe.​failureThreshold | int | `10` | | +| nginx.​startupProbe.​httpGet.​path | string | `"/healthz"` | | +| nginx.​startupProbe.​httpGet.​port | string | `"http-metrics"` | | +| nginx.​strategy.​rollingUpdate.​maxSurge | int | `0` | | +| nginx.​strategy.​rollingUpdate.​maxUnavailable | int | `1` | | +| nginx.​strategy.​type | string | `"RollingUpdate"` | | +| nginx.​terminationGracePeriodSeconds | int | `10` | | +| nginx.​tolerations | list | `[]` | | +| querier.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​key | string | `"app.kubernetes.io/component"` | | +| querier.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​operator | string | `"In"` | | +| querier.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​values[0] | string | `"querier"` | | +| querier.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​topologyKey | string | `"kubernetes.io/hostname"` | | +| querier.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​weight | int | `100` | | +| querier.​annotations | object | `{}` | | +| querier.​autoscaling.​behavior | object | `{}` | Ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-configurable-scaling-behavior | +| querier.​autoscaling.​enabled | bool | `false` | Creates a HorizontalPodAutoscaler for the querier pods. | +| querier.​autoscaling.​maxReplicas | int | `30` | | +| querier.​autoscaling.​minReplicas | int | `2` | | +| querier.​autoscaling.​targetCPUUtilizationPercentage | int | `80` | | +| querier.​autoscaling.​targetMemoryUtilizationPercentage | int | `0` | | +| querier.​containerSecurityContext.​enabled | bool | `true` | | +| querier.​containerSecurityContext.​readOnlyRootFilesystem | bool | `true` | | +| querier.​env | list | `[]` | | +| querier.​extraArgs | object | `{}` | Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) | +| querier.​extraContainers | list | `[]` | | +| querier.​extraPorts | list | `[]` | | +| querier.​extraVolumeMounts | list | `[]` | | +| querier.​extraVolumes | list | `[]` | | +| querier.​initContainers | list | `[]` | | +| querier.​lifecycle | object | `{}` | | +| querier.​livenessProbe.​httpGet.​path | string | `"/ready"` | | +| querier.​livenessProbe.​httpGet.​port | string | `"http-metrics"` | | +| querier.​nodeSelector | object | `{}` | | +| querier.​persistentVolume.​subPath | string | `nil` | | +| querier.​podAnnotations | object | `{"prometheus.io/port":"8080","prometheus.io/scrape":"true"}` | Pod Annotations | +| querier.​podDisruptionBudget.​maxUnavailable | int | `1` | | +| querier.​podLabels | object | `{}` | Pod Labels | +| querier.​readinessProbe.​httpGet.​path | string | `"/ready"` | | +| querier.​readinessProbe.​httpGet.​port | string | `"http-metrics"` | | +| querier.​replicas | int | `2` | | +| querier.​resources | object | `{}` | | +| querier.​securityContext | object | `{}` | | +| querier.​service.​annotations | object | `{}` | | +| querier.​service.​labels | object | `{}` | | +| querier.​serviceAccount.​name | string | `""` | "" disables the individual serviceAccount and uses the global serviceAccount for that component | +| querier.​serviceMonitor.​additionalLabels | object | `{}` | | +| querier.​serviceMonitor.​enabled | bool | `false` | | +| querier.​serviceMonitor.​extraEndpointSpec | object | `{}` | Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint | +| querier.​serviceMonitor.​metricRelabelings | list | `[]` | | +| querier.​serviceMonitor.​relabelings | list | `[]` | | +| querier.​startupProbe.​failureThreshold | int | `10` | | +| querier.​startupProbe.​httpGet.​path | string | `"/ready"` | | +| querier.​startupProbe.​httpGet.​port | string | `"http-metrics"` | | +| querier.​strategy.​rollingUpdate.​maxSurge | int | `0` | | +| querier.​strategy.​rollingUpdate.​maxUnavailable | int | `1` | | +| querier.​strategy.​type | string | `"RollingUpdate"` | | +| querier.​terminationGracePeriodSeconds | int | `180` | | +| querier.​tolerations | list | `[]` | | +| query_frontend.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​key | string | `"app.kubernetes.io/component"` | | +| query_frontend.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​operator | string | `"In"` | | +| query_frontend.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​values[0] | string | `"query-frontend"` | | +| query_frontend.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​topologyKey | string | `"kubernetes.io/hostname"` | | +| query_frontend.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​weight | int | `100` | | +| query_frontend.​annotations | object | `{}` | | +| query_frontend.​containerSecurityContext.​enabled | bool | `true` | | +| query_frontend.​containerSecurityContext.​readOnlyRootFilesystem | bool | `true` | | +| query_frontend.​env | list | `[]` | | +| query_frontend.​extraArgs | object | `{}` | Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) | +| query_frontend.​extraContainers | list | `[]` | | +| query_frontend.​extraPorts | list | `[]` | | +| query_frontend.​extraVolumeMounts | list | `[]` | | +| query_frontend.​extraVolumes | list | `[]` | | +| query_frontend.​initContainers | list | `[]` | | +| query_frontend.​lifecycle | object | `{}` | | +| query_frontend.​livenessProbe.​httpGet.​path | string | `"/ready"` | | +| query_frontend.​livenessProbe.​httpGet.​port | string | `"http-metrics"` | | +| query_frontend.​nodeSelector | object | `{}` | | +| query_frontend.​persistentVolume.​subPath | string | `nil` | | +| query_frontend.​podAnnotations | object | `{"prometheus.io/port":"8080","prometheus.io/scrape":"true"}` | Pod Annotations | +| query_frontend.​podDisruptionBudget.​maxUnavailable | int | `1` | | +| query_frontend.​podLabels | object | `{}` | Pod Labels | +| query_frontend.​readinessProbe.​httpGet.​path | string | `"/ready"` | | +| query_frontend.​readinessProbe.​httpGet.​port | string | `"http-metrics"` | | +| query_frontend.​replicas | int | `2` | | +| query_frontend.​resources | object | `{}` | | +| query_frontend.​securityContext | object | `{}` | | +| query_frontend.​service.​annotations | object | `{}` | | +| query_frontend.​service.​labels | object | `{}` | | +| query_frontend.​serviceAccount.​name | string | `""` | "" disables the individual serviceAccount and uses the global serviceAccount for that component | +| query_frontend.​serviceMonitor.​additionalLabels | object | `{}` | | +| query_frontend.​serviceMonitor.​enabled | bool | `false` | | +| query_frontend.​serviceMonitor.​extraEndpointSpec | object | `{}` | Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint | +| query_frontend.​serviceMonitor.​metricRelabelings | list | `[]` | | +| query_frontend.​serviceMonitor.​relabelings | list | `[]` | | +| query_frontend.​startupProbe.​failureThreshold | int | `10` | | +| query_frontend.​startupProbe.​httpGet.​path | string | `"/ready"` | | +| query_frontend.​startupProbe.​httpGet.​port | string | `"http-metrics"` | | +| query_frontend.​strategy.​rollingUpdate.​maxSurge | int | `0` | | +| query_frontend.​strategy.​rollingUpdate.​maxUnavailable | int | `1` | | +| query_frontend.​strategy.​type | string | `"RollingUpdate"` | | +| query_frontend.​terminationGracePeriodSeconds | int | `180` | | +| query_frontend.​tolerations | list | `[]` | | +| ruler.​affinity | object | `{}` | | +| ruler.​annotations | object | `{}` | | +| ruler.​containerSecurityContext.​enabled | bool | `true` | | +| ruler.​containerSecurityContext.​readOnlyRootFilesystem | bool | `true` | | +| ruler.​directories | object | `{}` | allow configuring rules via configmap. ref: https://cortexproject.github.io/cortex-helm-chart/guides/configure_rules_via_configmap.html | +| ruler.​enabled | bool | `true` | | +| ruler.​env | list | `[]` | | +| ruler.​extraArgs | object | `{}` | Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) | +| ruler.​extraContainers | list | `[]` | | +| ruler.​extraPorts | list | `[]` | | +| ruler.​extraVolumeMounts | list | `[]` | | +| ruler.​extraVolumes | list | `[]` | | +| ruler.​initContainers | list | `[]` | | +| ruler.​livenessProbe.​httpGet.​path | string | `"/ready"` | | +| ruler.​livenessProbe.​httpGet.​port | string | `"http-metrics"` | | +| ruler.​nodeSelector | object | `{}` | | +| ruler.​persistentVolume.​subPath | string | `nil` | | +| ruler.​podAnnotations | object | `{"prometheus.io/port":"8080","prometheus.io/scrape":"true"}` | Pod Annotations | +| ruler.​podDisruptionBudget.​maxUnavailable | int | `1` | | +| ruler.​podLabels | object | `{}` | Pod Labels | +| ruler.​readinessProbe.​httpGet.​path | string | `"/ready"` | | +| ruler.​readinessProbe.​httpGet.​port | string | `"http-metrics"` | | +| ruler.​replicas | int | `1` | | +| ruler.​resources | object | `{}` | | +| ruler.​securityContext | object | `{}` | | +| ruler.​service.​annotations | object | `{}` | | +| ruler.​service.​labels | object | `{}` | | +| ruler.​serviceAccount.​name | string | `""` | "" disables the individual serviceAccount and uses the global serviceAccount for that component | +| ruler.​serviceMonitor.​additionalLabels | object | `{}` | | +| ruler.​serviceMonitor.​enabled | bool | `false` | | +| ruler.​serviceMonitor.​extraEndpointSpec | object | `{}` | Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint | +| ruler.​serviceMonitor.​metricRelabelings | list | `[]` | | +| ruler.​serviceMonitor.​relabelings | list | `[]` | | +| ruler.​sidecar | object | `{"containerSecurityContext":{"enabled":true,"readOnlyRootFilesystem":true},"defaultFolderName":null,"enableUniqueFilenames":false,"enabled":false,"folder":"/tmp/rules","folderAnnotation":null,"image":{"repository":"quay.io/kiwigrid/k8s-sidecar","sha":"","tag":"1.10.7"},"imagePullPolicy":"IfNotPresent","label":"cortex_rules","labelValue":null,"resources":{},"searchNamespace":null,"watchMethod":null}` | Sidecars that collect the configmaps with specified label and stores the included files them into the respective folders | +| ruler.​sidecar.​defaultFolderName | string | `nil` | The default folder name, it will create a subfolder under the `folder` and put rules in there instead | +| ruler.​sidecar.​folder | string | `"/tmp/rules"` | folder in the pod that should hold the collected rules (unless `defaultFolderName` is set) | +| ruler.​sidecar.​folderAnnotation | string | `nil` | If specified, the sidecar will look for annotation with this name to create folder and put graph here. You can use this parameter together with `provider.foldersFromFilesStructure`to annotate configmaps and create folder structure. | +| ruler.​sidecar.​label | string | `"cortex_rules"` | label that the configmaps with rules are marked with | +| ruler.​sidecar.​labelValue | string | `nil` | value of label that the configmaps with rules are set to | +| ruler.​sidecar.​searchNamespace | string | `nil` | If specified, the sidecar will search for rules config-maps inside this namespace. Otherwise the namespace in which the sidecar is running will be used. It's also possible to specify ALL to search in all namespaces | +| ruler.​startupProbe.​failureThreshold | int | `10` | | +| ruler.​startupProbe.​httpGet.​path | string | `"/ready"` | | +| ruler.​startupProbe.​httpGet.​port | string | `"http-metrics"` | | +| ruler.​strategy.​rollingUpdate.​maxSurge | int | `0` | | +| ruler.​strategy.​rollingUpdate.​maxUnavailable | int | `1` | | +| ruler.​strategy.​type | string | `"RollingUpdate"` | | +| ruler.​terminationGracePeriodSeconds | int | `180` | | +| ruler.​tolerations | list | `[]` | | +| runtimeconfigmap.​annotations | object | `{}` | | +| runtimeconfigmap.​create | bool | `true` | If true, a configmap for the `runtime_config` will be created. If false, the configmap _must_ exist already on the cluster or pods will fail to create. | +| runtimeconfigmap.​runtime_config | object | `{}` | https://cortexmetrics.io/docs/configuration/arguments/#runtime-configuration-file | +| serviceAccount.​annotations | object | `{}` | | +| serviceAccount.​automountServiceAccountToken | bool | `true` | | +| serviceAccount.​create | bool | `true` | | +| serviceAccount.​name | string | `nil` | | +| store_gateway.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​key | string | `"app.kubernetes.io/component"` | | +| store_gateway.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​operator | string | `"In"` | | +| store_gateway.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​values[0] | string | `"store-gateway"` | | +| store_gateway.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​topologyKey | string | `"kubernetes.io/hostname"` | | +| store_gateway.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​weight | int | `100` | | +| store_gateway.​annotations | object | `{}` | | +| store_gateway.​containerSecurityContext.​enabled | bool | `true` | | +| store_gateway.​containerSecurityContext.​readOnlyRootFilesystem | bool | `true` | | +| store_gateway.​env | list | `[]` | | +| store_gateway.​extraArgs | object | `{}` | Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) | +| store_gateway.​extraContainers | list | `[]` | | +| store_gateway.​extraPorts | list | `[]` | | +| store_gateway.​extraVolumeMounts | list | `[]` | | +| store_gateway.​extraVolumes | list | `[]` | | +| store_gateway.​initContainers | list | `[]` | | +| store_gateway.​livenessProbe.​httpGet.​path | string | `"/ready"` | | +| store_gateway.​livenessProbe.​httpGet.​port | string | `"http-metrics"` | | +| store_gateway.​livenessProbe.​httpGet.​scheme | string | `"HTTP"` | | +| store_gateway.​nodeSelector | object | `{}` | | +| store_gateway.​persistentVolume.​accessModes | list | `["ReadWriteOnce"]` | Store-gateway data Persistent Volume access modes Must match those of existing PV or dynamic provisioner Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ | +| store_gateway.​persistentVolume.​annotations | object | `{}` | Store-gateway data Persistent Volume Claim annotations | +| store_gateway.​persistentVolume.​enabled | bool | `true` | If true Store-gateway will create/use a Persistent Volume Claim If false, use emptyDir | +| store_gateway.​persistentVolume.​size | string | `"2Gi"` | Store-gateway data Persistent Volume size | +| store_gateway.​persistentVolume.​storageClass | string | `nil` | Store-gateway data Persistent Volume Storage Class If defined, storageClassName: If set to "-", storageClassName: "", which disables dynamic provisioning If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner. | +| store_gateway.​persistentVolume.​subPath | string | `""` | Subdirectory of Store-gateway data Persistent Volume to mount Useful if the volume's root directory is not empty | +| store_gateway.​podAnnotations | object | `{"prometheus.io/port":"8080","prometheus.io/scrape":"true"}` | Pod Annotations | +| store_gateway.​podDisruptionBudget.​maxUnavailable | int | `1` | | +| store_gateway.​podLabels | object | `{}` | Pod Labels | +| store_gateway.​readinessProbe.​httpGet.​path | string | `"/ready"` | | +| store_gateway.​readinessProbe.​httpGet.​port | string | `"http-metrics"` | | +| store_gateway.​replicas | int | `1` | | +| store_gateway.​resources | object | `{}` | | +| store_gateway.​securityContext | object | `{}` | | +| store_gateway.​service.​annotations | object | `{}` | | +| store_gateway.​service.​labels | object | `{}` | | +| store_gateway.​serviceAccount.​name | string | `""` | "" disables the individual serviceAccount and uses the global serviceAccount for that component | +| store_gateway.​serviceMonitor.​additionalLabels | object | `{}` | | +| store_gateway.​serviceMonitor.​enabled | bool | `false` | | +| store_gateway.​serviceMonitor.​extraEndpointSpec | object | `{}` | Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint | +| store_gateway.​serviceMonitor.​metricRelabelings | list | `[]` | | +| store_gateway.​serviceMonitor.​relabelings | list | `[]` | | +| store_gateway.​startupProbe.​failureThreshold | int | `60` | | +| store_gateway.​startupProbe.​httpGet.​path | string | `"/ready"` | | +| store_gateway.​startupProbe.​httpGet.​port | string | `"http-metrics"` | | +| store_gateway.​startupProbe.​httpGet.​scheme | string | `"HTTP"` | | +| store_gateway.​startupProbe.​initialDelaySeconds | int | `120` | | +| store_gateway.​startupProbe.​periodSeconds | int | `30` | | +| store_gateway.​strategy.​type | string | `"RollingUpdate"` | | +| store_gateway.​terminationGracePeriodSeconds | int | `240` | | +| store_gateway.​tolerations | list | `[]` | | +| table_manager.​affinity | object | `{}` | | +| table_manager.​annotations | object | `{}` | | +| table_manager.​containerSecurityContext.​enabled | bool | `true` | | +| table_manager.​containerSecurityContext.​readOnlyRootFilesystem | bool | `true` | | +| table_manager.​env | list | `[]` | | +| table_manager.​extraArgs | object | `{}` | Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) | +| table_manager.​extraContainers | list | `[]` | | +| table_manager.​extraPorts | list | `[]` | | +| table_manager.​extraVolumeMounts | list | `[]` | | +| table_manager.​extraVolumes | list | `[]` | | +| table_manager.​initContainers | list | `[]` | | +| table_manager.​livenessProbe.​httpGet.​path | string | `"/ready"` | | +| table_manager.​livenessProbe.​httpGet.​port | string | `"http-metrics"` | | +| table_manager.​nodeSelector | object | `{}` | | +| table_manager.​persistentVolume.​subPath | string | `nil` | | +| table_manager.​podAnnotations | object | `{"prometheus.io/port":"8080","prometheus.io/scrape":"true"}` | Pod Annotations | +| table_manager.​podDisruptionBudget.​maxUnavailable | int | `1` | | +| table_manager.​podLabels | object | `{}` | Pod Labels | +| table_manager.​readinessProbe.​httpGet.​path | string | `"/ready"` | | +| table_manager.​readinessProbe.​httpGet.​port | string | `"http-metrics"` | | +| table_manager.​replicas | int | `1` | | +| table_manager.​resources | object | `{}` | | +| table_manager.​securityContext | object | `{}` | | +| table_manager.​service.​annotations | object | `{}` | | +| table_manager.​service.​labels | object | `{}` | | +| table_manager.​serviceAccount.​name | string | `""` | "" disables the individual serviceAccount and uses the global serviceAccount for that component | +| table_manager.​serviceMonitor.​additionalLabels | object | `{}` | | +| table_manager.​serviceMonitor.​enabled | bool | `false` | | +| table_manager.​serviceMonitor.​extraEndpointSpec | object | `{}` | Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint | +| table_manager.​serviceMonitor.​metricRelabelings | list | `[]` | | +| table_manager.​serviceMonitor.​relabelings | list | `[]` | | +| table_manager.​startupProbe.​failureThreshold | int | `10` | | +| table_manager.​startupProbe.​httpGet.​path | string | `"/ready"` | | +| table_manager.​startupProbe.​httpGet.​port | string | `"http-metrics"` | | +| table_manager.​strategy.​rollingUpdate.​maxSurge | int | `0` | | +| table_manager.​strategy.​rollingUpdate.​maxUnavailable | int | `1` | | +| table_manager.​strategy.​type | string | `"RollingUpdate"` | | +| table_manager.​terminationGracePeriodSeconds | int | `180` | | +| table_manager.​tolerations | list | `[]` | | +| tags.​blocks-storage-memcached | bool | `false` | Set to true to enable block storage memcached caching | +| useConfigMap | bool | `false` | | +| useExternalConfig | bool | `false` | | + diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/NOTES.txt b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/NOTES.txt new file mode 100644 index 0000000..1bd3203 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/NOTES.txt @@ -0,0 +1,9 @@ +{{- if eq .Values.config.storage.engine "chunks" }} +Cortex chunks storage has been deprecated, and it's now in maintenance mode: all Cortex users are encouraged to migrate to the blocks storage. +No new features will be added to the chunks storage. +Unlike the official cortex default configuration this helm-chart does not run the chunk engine by default. +{{- end }} + +Verify the application is working by running these commands: + kubectl --namespace {{ .Release.Namespace }} port-forward service/{{ include "cortex.querierFullname" . }} {{ .Values.config.server.http_listen_port }} + curl http://127.0.0.1:{{ .Values.config.server.http_listen_port }}/services diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/_helpers.tpl b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/_helpers.tpl new file mode 100644 index 0000000..81914c9 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/_helpers.tpl @@ -0,0 +1,155 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "cortex.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "cortex.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "cortex.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create the name of the service account +*/}} +{{- define "cortex.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "cortex.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Create the app name of cortex clients. Defaults to the same logic as "cortex.fullname", and default client expects "prometheus". +*/}} +{{- define "client.name" -}} +{{- if .Values.client.name -}} +{{- .Values.client.name -}} +{{- else if .Values.client.fullnameOverride -}} +{{- .Values.client.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default "prometheus" .Values.client.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + + +{{/* +Common labels +*/}} +{{- define "cortex.labels" -}} +helm.sh/chart: {{ include "cortex.chart" . }} +{{ include "cortex.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "cortex.selectorLabels" -}} +app.kubernetes.io/name: {{ include "cortex.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create configuration parameters for memcached configuration +*/}} +{{- define "cortex.memcached" -}} +{{- if and (eq .Values.config.storage.engine "blocks") (index .Values "tags" "blocks-storage-memcached") }} +- "-blocks-storage.bucket-store.index-cache.backend=memcached" +- "-blocks-storage.bucket-store.index-cache.memcached.addresses=dns+{{ .Release.Name }}-memcached-blocks-index.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}:11211" +- "-blocks-storage.bucket-store.chunks-cache.backend=memcached" +- "-blocks-storage.bucket-store.chunks-cache.memcached.addresses=dns+{{ .Release.Name }}-memcached-blocks.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}:11211" +- "-blocks-storage.bucket-store.metadata-cache.backend=memcached" +- "-blocks-storage.bucket-store.metadata-cache.memcached.addresses=dns+{{ .Release.Name }}-memcached-blocks-metadata.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}:11211" +{{- end -}} +{{- if and (ne .Values.config.storage.engine "blocks") .Values.memcached.enabled }} +- "-store.chunks-cache.memcached.addresses=dns+{{ .Release.Name }}-memcached.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}:11211" +{{- end -}} +{{- if and (ne .Values.config.storage.engine "blocks") (index .Values "memcached-index-read" "enabled") }} +- "-store.index-cache-read.memcached.addresses=dns+{{ .Release.Name }}-memcached-index-read.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}:11211" +{{- end -}} +{{- if and (ne .Values.config.storage.engine "blocks") (index .Values "memcached-index-write" "enabled") }} +- "-store.index-cache-write.memcached.addresses=dns+{{ .Release.Name }}-memcached-index-write.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}:11211" +{{- end -}} +{{- end -}} + +{{/* +Create configuration for frontend memcached configuration +*/}} +{{- define "cortex.frontend-memcached" -}} +{{- if index .Values "memcached-frontend" "enabled" }} +- "-frontend.memcached.addresses=dns+{{ template "cortex.fullname" . }}-memcached-frontend.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}:11211" +{{- end -}} +{{- end -}} + +{{/* +Determine the policy api version +*/}} +{{- define "cortex.pdbVersion" -}} +{{- if or (.Capabilities.APIVersions.Has "policy/v1/PodDisruptionBudget") (semverCompare ">=1.21" .Capabilities.KubeVersion.Version) -}} +policy/v1 +{{- else -}} +policy/v1beta1 +{{- end -}} +{{- end -}} + +{{/* +Get checksum of config secret or configMap +*/}} +{{- define "cortex.configChecksum" -}} +{{- if .Values.useExternalConfig -}} +{{- .Values.externalConfigVersion -}} +{{- else if .Values.useConfigMap -}} +{{- include (print $.Template.BasePath "/configmap.yaml") . | sha256sum -}} +{{- else -}} +{{- include (print $.Template.BasePath "/secret.yaml") . | sha256sum -}} +{{- end -}} +{{- end -}} + +{{/* +Get volume of config secret of configMap +*/}} +{{- define "cortex.configVolume" -}} +- name: config + {{- if .Values.useExternalConfig }} + secret: + secretName: {{ .Values.externalConfigSecretName }} + {{- else if .Values.useConfigMap }} + configMap: + name: {{ template "cortex.fullname" . }}-config + {{- else }} + secret: + secretName: {{ template "cortex.fullname" . }} + {{- end }} +{{- end -}} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/alertmanager/alertmanager-dep.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/alertmanager/alertmanager-dep.yaml new file mode 100644 index 0000000..49c4ca7 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/alertmanager/alertmanager-dep.yaml @@ -0,0 +1,30 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: alertmanager + namespace: imxc +spec: + replicas: 1 + selector: + matchLabels: + name: alertmanager + template: + metadata: + labels: + name: alertmanager + spec: + containers: + - name: alertmanager +# image: quay.io/cortexproject/cortex:v1.9.0 +# image: registry.cloud.intermax:5000/library/cortex:v1.11.0 + image: {{ .Values.global.IMXC_IN_REGISTRY }}/cortex:v1.11.0 + imagePullPolicy: IfNotPresent + args: + - -target=alertmanager +# - -log.level=debug + - -server.http-listen-port=80 + - -alertmanager.configs.url=http://{{ template "cortex.fullname" . }}-configs:8080 + - -alertmanager.web.external-url=/alertmanager + ports: + - containerPort: 80 diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/alertmanager/alertmanager-svc.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/alertmanager/alertmanager-svc.yaml new file mode 100644 index 0000000..989feb2 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/alertmanager/alertmanager-svc.yaml @@ -0,0 +1,10 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: alertmanager +spec: + ports: + - port: 80 + selector: + name: alertmanager diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/clusterrole.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/clusterrole.yaml new file mode 100644 index 0000000..cf7f25a --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/clusterrole.yaml @@ -0,0 +1,12 @@ +{{- if or .Values.ruler.sidecar.enabled .Values.alertmanager.sidecar.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "cortex.fullname" . }}-clusterrole + labels: + {{- include "cortex.labels" . | nindent 4 }} +rules: + - apiGroups: [""] # "" indicates the core API group + resources: ["configmaps", "secrets"] + verbs: ["get", "watch", "list"] +{{- end }} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/clusterrolebinding.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/clusterrolebinding.yaml new file mode 100644 index 0000000..c1d9884 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/clusterrolebinding.yaml @@ -0,0 +1,16 @@ +{{- if or .Values.ruler.sidecar.enabled .Values.alertmanager.sidecar.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "cortex.fullname" . }}-clusterrolebinding + labels: + {{- include "cortex.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "cortex.fullname" . }}-clusterrole +subjects: + - kind: ServiceAccount + name: {{ template "cortex.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end }} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/compactor/_helpers-compactor.tpl b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/compactor/_helpers-compactor.tpl new file mode 100644 index 0000000..f89b33c --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/compactor/_helpers-compactor.tpl @@ -0,0 +1,23 @@ + +{{/* +compactor fullname +*/}} +{{- define "cortex.compactorFullname" -}} +{{ include "cortex.fullname" . }}-compactor +{{- end }} + +{{/* +compactor common labels +*/}} +{{- define "cortex.compactorLabels" -}} +{{ include "cortex.labels" . }} +app.kubernetes.io/component: compactor +{{- end }} + +{{/* +compactor selector labels +*/}} +{{- define "cortex.compactorSelectorLabels" -}} +{{ include "cortex.selectorLabels" . }} +app.kubernetes.io/component: compactor +{{- end }} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/compactor/compactor-poddisruptionbudget.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/compactor/compactor-poddisruptionbudget.yaml new file mode 100644 index 0000000..8634e4c --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/compactor/compactor-poddisruptionbudget.yaml @@ -0,0 +1,14 @@ +{{- if and (gt (int .Values.compactor.replicas) 1) (.Values.compactor.podDisruptionBudget) }} +apiVersion: {{ include "cortex.pdbVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "cortex.compactorFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.compactorLabels" . | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "cortex.compactorSelectorLabels" . | nindent 6 }} + {{- toYaml .Values.compactor.podDisruptionBudget | nindent 2 }} +{{- end }} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/compactor/compactor-servicemonitor.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/compactor/compactor-servicemonitor.yaml new file mode 100644 index 0000000..a33e849 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/compactor/compactor-servicemonitor.yaml @@ -0,0 +1,42 @@ +{{- if .Values.compactor.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "cortex.compactorFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.compactorLabels" . | nindent 4 }} + {{- if .Values.compactor.serviceMonitor.additionalLabels }} +{{ toYaml .Values.compactor.serviceMonitor.additionalLabels | indent 4 }} + {{- end }} + {{- if .Values.compactor.serviceMonitor.annotations }} + annotations: +{{ toYaml .Values.compactor.serviceMonitor.annotations | indent 4 }} + {{- end }} +spec: + selector: + matchLabels: + {{- include "cortex.compactorSelectorLabels" . | nindent 6 }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace | quote }} + endpoints: + - port: http-metrics + {{- if .Values.compactor.serviceMonitor.interval }} + interval: {{ .Values.compactor.serviceMonitor.interval }} + {{- end }} + {{- if .Values.compactor.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.compactor.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.compactor.serviceMonitor.relabelings }} + relabelings: + {{- toYaml .Values.compactor.serviceMonitor.relabelings | nindent 4 }} + {{- end }} + {{- if .Values.compactor.serviceMonitor.metricRelabelings }} + metricRelabelings: + {{- toYaml .Values.compactor.serviceMonitor.metricRelabelings | nindent 4 }} + {{- end }} + {{- with .Values.compactor.serviceMonitor.extraEndpointSpec }} + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/compactor/compactor-statefulset.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/compactor/compactor-statefulset.yaml new file mode 100644 index 0000000..c0a1baf --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/compactor/compactor-statefulset.yaml @@ -0,0 +1,141 @@ +{{- if eq .Values.config.storage.engine "blocks" -}} +{{- if .Values.compactor.enabled -}} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "cortex.compactorFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.compactorLabels" . | nindent 4 }} + app.kubernetes.io/part-of: memberlist + annotations: + {{- toYaml .Values.compactor.annotations | nindent 4 }} +spec: + replicas: {{ .Values.compactor.replicas }} + selector: + matchLabels: + {{- include "cortex.compactorSelectorLabels" . | nindent 6 }} + updateStrategy: + {{- toYaml .Values.compactor.strategy | nindent 4 }} + serviceName: {{ template "cortex.fullname" . }}-compactor + {{- if .Values.compactor.persistentVolume.enabled }} + volumeClaimTemplates: + - metadata: + name: storage + {{- if .Values.compactor.persistentVolume.annotations }} + annotations: + {{ toYaml .Values.compactor.persistentVolume.annotations | nindent 10 }} + {{- end }} + spec: + {{- if .Values.compactor.persistentVolume.storageClass }} + {{- if (eq "-" .Values.compactor.persistentVolume.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.compactor.persistentVolume.storageClass }}" + {{- end }} + {{- end }} + accessModes: + {{ toYaml .Values.compactor.persistentVolume.accessModes | nindent 10 }} + resources: + requests: + storage: "{{ .Values.compactor.persistentVolume.size }}" + {{- end }} + template: + metadata: + labels: + {{- include "cortex.compactorLabels" . | nindent 8 }} + app.kubernetes.io/part-of: memberlist + {{- with .Values.compactor.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + annotations: + checksum/config: {{ include "cortex.configChecksum" . }} + {{- with .Values.compactor.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ .Values.compactor.serviceAccount.name | default (include "cortex.serviceAccountName" . ) }} + {{- if .Values.compactor.priorityClassName }} + priorityClassName: {{ .Values.compactor.priorityClassName }} + {{- end }} + {{- if .Values.compactor.securityContext.enabled }} + securityContext: {{- omit .Values.compactor.securityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + initContainers: + {{- toYaml .Values.compactor.initContainers | nindent 8 }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} + nodeSelector: + {{- toYaml .Values.compactor.nodeSelector | nindent 8 }} + affinity: + {{- toYaml .Values.compactor.affinity | nindent 8 }} + tolerations: + {{- toYaml .Values.compactor.tolerations | nindent 8 }} + terminationGracePeriodSeconds: {{ .Values.compactor.terminationGracePeriodSeconds }} + volumes: + {{- include "cortex.configVolume" . | nindent 8 }} + - name: runtime-config + configMap: + name: {{ template "cortex.fullname" . }}-runtime-config + {{- if not .Values.compactor.persistentVolume.enabled }} + - name: storage + emptyDir: {} + {{- end }} + {{- if .Values.compactor.extraVolumes }} + {{- toYaml .Values.compactor.extraVolumes | nindent 8 }} + {{- end }} + containers: + {{- if .Values.compactor.extraContainers }} + {{ toYaml .Values.compactor.extraContainers | nindent 8 }} + {{- end }} + - name: compactor + image: "{{ .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: + - "-target=compactor" + - "-config.file=/etc/cortex/cortex.yaml" + {{- include "cortex.memcached" . | nindent 12}} + {{- range $key, $value := .Values.compactor.extraArgs }} + - "-{{ $key }}={{ $value }}" + {{- end }} + volumeMounts: + {{- if .Values.compactor.extraVolumeMounts }} + {{- toYaml .Values.compactor.extraVolumeMounts | nindent 12}} + {{- end }} + - name: config + mountPath: /etc/cortex + - name: runtime-config + mountPath: /etc/cortex-runtime-config + - name: storage + mountPath: "/data" + {{- if .Values.compactor.persistentVolume.subPath }} + subPath: {{ .Values.compactor.persistentVolume.subPath }} + {{- end }} + ports: + - name: http-metrics + containerPort: {{ .Values.config.server.http_listen_port }} + protocol: TCP + - name: gossip + containerPort: {{ .Values.config.memberlist.bind_port }} + protocol: TCP + startupProbe: + {{- toYaml .Values.compactor.startupProbe | nindent 12 }} + livenessProbe: + {{- toYaml .Values.compactor.livenessProbe | nindent 12 }} + readinessProbe: + {{- toYaml .Values.compactor.readinessProbe | nindent 12 }} + resources: + {{- toYaml .Values.compactor.resources | nindent 12 }} + {{- if .Values.compactor.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.compactor.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.compactor.env }} + env: + {{- toYaml .Values.compactor.env | nindent 12 }} + {{- end }} +{{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/compactor/compactor-svc.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/compactor/compactor-svc.yaml new file mode 100644 index 0000000..ae20f78 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/compactor/compactor-svc.yaml @@ -0,0 +1,25 @@ +{{- if eq .Values.config.storage.engine "blocks" -}} +{{- if .Values.compactor.enabled -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "cortex.compactorFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.compactorLabels" . | nindent 4 }} + {{- with .Values.compactor.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- toYaml .Values.compactor.service.annotations | nindent 4 }} +spec: + type: ClusterIP + ports: + - port: {{ .Values.config.server.http_listen_port }} + protocol: TCP + name: http-metrics + targetPort: http-metrics + selector: + {{- include "cortex.compactorSelectorLabels" . | nindent 4 }} +{{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/configmap.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/configmap.yaml new file mode 100644 index 0000000..001b13a --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/configmap.yaml @@ -0,0 +1,12 @@ +{{- if (and (not .Values.useExternalConfig) (.Values.useConfigMap)) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "cortex.fullname" . }}-config + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.labels" . | nindent 4 }} +data: + cortex.yaml: | + {{- tpl (toYaml .Values.config) . | nindent 4 }} +{{- end }} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/configs/_helpers-configs.tpl b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/configs/_helpers-configs.tpl new file mode 100644 index 0000000..c8945dc --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/configs/_helpers-configs.tpl @@ -0,0 +1,23 @@ + +{{/* +configs fullname +*/}} +{{- define "cortex.configsFullname" -}} +{{ include "cortex.fullname" . }}-configs +{{- end }} + +{{/* +configs common labels +*/}} +{{- define "cortex.configsLabels" -}} +{{ include "cortex.labels" . }} +app.kubernetes.io/component: configs +{{- end }} + +{{/* +configs selector labels +*/}} +{{- define "cortex.configsSelectorLabels" -}} +{{ include "cortex.selectorLabels" . }} +app.kubernetes.io/component: configs +{{- end }} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/configs/configs-dep.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/configs/configs-dep.yaml new file mode 100644 index 0000000..86048ce --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/configs/configs-dep.yaml @@ -0,0 +1,124 @@ +{{- if .Values.configs.enabled -}} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "cortex.configsFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.configsLabels" . | nindent 4 }} + annotations: + {{- toYaml .Values.configs.annotations | nindent 4 }} +spec: + replicas: {{ .Values.configs.replicas }} + selector: + matchLabels: + {{- include "cortex.configsSelectorLabels" . | nindent 6 }} + strategy: + {{- toYaml .Values.configs.strategy | nindent 4 }} + template: + metadata: + labels: + {{- include "cortex.configsLabels" . | nindent 8 }} + {{- with .Values.configs.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + annotations: + checksum/config: {{ include "cortex.configChecksum" . }} + {{- with .Values.configs.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ .Values.configs.serviceAccount.name | default (include "cortex.serviceAccountName" . ) }} + {{- if .Values.configs.priorityClassName }} + priorityClassName: {{ .Values.configs.priorityClassName }} + {{- end }} + {{- if .Values.configs.securityContext.enabled }} + securityContext: {{- omit .Values.configs.securityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + initContainers: + {{- toYaml .Values.configs.initContainers | nindent 8 }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} + containers: + - name: configs + image: "{{ .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: + - "-target=configs" + - "-config.file=/etc/cortex/cortex.yaml" + {{- if .Values.configsdb_postgresql.enabled }} + - "-configs.database.uri={{ .Values.configsdb_postgresql.uri }}" + - "-configs.database.password-file=/etc/postgresql/password" + - "-configs.database.migrations-dir=/migrations" + {{- else }} + - "-configs.database.uri=memory://" + {{- end }} + {{- range $key, $value := .Values.configs.extraArgs }} + - "-{{ $key }}={{ $value }}" + {{- end }} + volumeMounts: + - name: config + mountPath: /etc/cortex + subPath: {{ .Values.configs.persistentVolume.subPath }} + - name: runtime-config + mountPath: /etc/cortex-runtime-config + {{- if .Values.configsdb_postgresql.enabled }} + - name: postgres-password + mountPath: /etc/postgresql + {{- end }} + {{- if .Values.configs.extraVolumeMounts }} + {{- toYaml .Values.configs.extraVolumeMounts | nindent 12}} + {{- end }} + ports: + - name: http-metrics + containerPort: {{ .Values.config.server.http_listen_port }} + protocol: TCP + - name: gossip + containerPort: {{ .Values.config.memberlist.bind_port }} + protocol: TCP + startupProbe: + {{- toYaml .Values.configs.startupProbe | nindent 12 }} + livenessProbe: + {{- toYaml .Values.configs.livenessProbe | nindent 12 }} + readinessProbe: + {{- toYaml .Values.configs.readinessProbe | nindent 12 }} + resources: + {{- toYaml .Values.configs.resources | nindent 12 }} + {{- if .Values.configs.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.configs.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.configs.env }} + env: + {{- toYaml .Values.configs.env | nindent 12 }} + {{- end }} + {{- if .Values.configs.extraContainers }} + {{- toYaml .Values.configs.extraContainers | nindent 8}} + {{- end }} + nodeSelector: + {{- toYaml .Values.configs.nodeSelector | nindent 8 }} + affinity: + {{- toYaml .Values.configs.affinity | nindent 8 }} + tolerations: + {{- toYaml .Values.configs.tolerations | nindent 8 }} + terminationGracePeriodSeconds: {{ .Values.configs.terminationGracePeriodSeconds }} + volumes: + {{- include "cortex.configVolume" . | nindent 8 }} + {{- if .Values.configsdb_postgresql.enabled }} + - name: postgres-password + secret: + secretName: {{ if .Values.configsdb_postgresql.auth.existing_secret.name }}{{ .Values.configsdb_postgresql.auth.existing_secret.name }}{{ else }}{{ template "cortex.fullname" . }}-postgresql{{ end }} + items: + - key: {{ if .Values.configsdb_postgresql.auth.existing_secret.name }}{{ .Values.configsdb_postgresql.auth.existing_secret.key }}{{ else }}postgresql-password{{ end }} + path: password + {{- end }} + - name: runtime-config + configMap: + name: {{ template "cortex.fullname" . }}-runtime-config + {{- if .Values.configs.extraVolumes }} + {{- toYaml .Values.configs.extraVolumes | nindent 8}} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/configs/configs-poddisruptionbudget.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/configs/configs-poddisruptionbudget.yaml new file mode 100644 index 0000000..b6e46b4 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/configs/configs-poddisruptionbudget.yaml @@ -0,0 +1,14 @@ +{{- if and (gt (int .Values.configs.replicas) 1) (.Values.configs.podDisruptionBudget) }} +apiVersion: {{ include "cortex.pdbVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "cortex.configsFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.configsLabels" . | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "cortex.configsSelectorLabels" . | nindent 6 }} + {{- toYaml .Values.configs.podDisruptionBudget | nindent 2 }} +{{- end }} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/configs/configs-servicemonitor.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/configs/configs-servicemonitor.yaml new file mode 100644 index 0000000..393bc32 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/configs/configs-servicemonitor.yaml @@ -0,0 +1,42 @@ +{{- if .Values.configs.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "cortex.configsFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.configsLabels" . | nindent 4 }} + {{- if .Values.configs.serviceMonitor.additionalLabels }} +{{ toYaml .Values.configs.serviceMonitor.additionalLabels | indent 4 }} + {{- end }} + {{- if .Values.configs.serviceMonitor.annotations }} + annotations: +{{ toYaml .Values.configs.serviceMonitor.annotations | indent 4 }} + {{- end }} +spec: + selector: + matchLabels: + {{- include "cortex.configsSelectorLabels" . | nindent 6 }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace | quote }} + endpoints: + - port: http-metrics + {{- if .Values.configs.serviceMonitor.interval }} + interval: {{ .Values.configs.serviceMonitor.interval }} + {{- end }} + {{- if .Values.configs.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.configs.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.configs.serviceMonitor.relabelings }} + relabelings: + {{- toYaml .Values.configs.serviceMonitor.relabelings | nindent 4 }} + {{- end }} + {{- if .Values.configs.serviceMonitor.metricRelabelings }} + metricRelabelings: + {{- toYaml .Values.configs.serviceMonitor.metricRelabelings | nindent 4 }} + {{- end }} + {{- with .Values.configs.serviceMonitor.extraEndpointSpec }} + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/configs/configs-svc.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/configs/configs-svc.yaml new file mode 100644 index 0000000..6dbc2cd --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/configs/configs-svc.yaml @@ -0,0 +1,23 @@ +{{- if .Values.configs.enabled -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "cortex.configsFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.configsLabels" . | nindent 4 }} + {{- with .Values.configs.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- toYaml .Values.configs.service.annotations | nindent 4 }} +spec: + type: ClusterIP + ports: + - port: {{ .Values.config.server.http_listen_port }} + protocol: TCP + name: http-metrics + targetPort: http-metrics + selector: + {{- include "cortex.configsSelectorLabels" . | nindent 4 }} +{{- end }} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/cortex-pv.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/cortex-pv.yaml new file mode 100644 index 0000000..472f83e --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/cortex-pv.yaml @@ -0,0 +1,68 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: ingester-pv-0 +spec: + capacity: + storage: 2Gi + volumeMode: Filesystem + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Retain + storageClassName: {{ .Values.global.DEFAULT_STORAGE_CLASS }} + local: + path: {{ .Values.global.IMXC_INGESTER_PV_PATH1 }} + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .Values.global.affinity_key }} + operator: In + values: + - {{ .Values.global.affinity_value1 }} +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: ingester-pv-1 +spec: + capacity: + storage: 2Gi + volumeMode: Filesystem + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Retain + storageClassName: {{ .Values.global.DEFAULT_STORAGE_CLASS }} + local: + path: {{ .Values.global.IMXC_INGESTER_PV_PATH2 }} + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .Values.global.affinity_key }} + operator: In + values: + - {{ .Values.global.affinity_value2 }} +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: ingester-pv-2 +spec: + capacity: + storage: 2Gi + volumeMode: Filesystem + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Retain + storageClassName: {{ .Values.global.DEFAULT_STORAGE_CLASS }} + local: + path: {{ .Values.global.IMXC_INGESTER_PV_PATH3 }} + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .Values.global.affinity_key }} + operator: In + values: + - {{ .Values.global.affinity_value3 }} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/distributor/_helpers-distributor.tpl b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/distributor/_helpers-distributor.tpl new file mode 100644 index 0000000..24e8d00 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/distributor/_helpers-distributor.tpl @@ -0,0 +1,23 @@ + +{{/* +distributor fullname +*/}} +{{- define "cortex.distributorFullname" -}} +{{ include "cortex.fullname" . }}-distributor +{{- end }} + +{{/* +distributor common labels +*/}} +{{- define "cortex.distributorLabels" -}} +{{ include "cortex.labels" . }} +app.kubernetes.io/component: distributor +{{- end }} + +{{/* +distributor selector labels +*/}} +{{- define "cortex.distributorSelectorLabels" -}} +{{ include "cortex.selectorLabels" . }} +app.kubernetes.io/component: distributor +{{- end }} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/distributor/distributor-dep.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/distributor/distributor-dep.yaml new file mode 100644 index 0000000..fc9c0ba --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/distributor/distributor-dep.yaml @@ -0,0 +1,121 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "cortex.distributorFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.distributorLabels" . | nindent 4 }} + app.kubernetes.io/part-of: memberlist + annotations: + {{- toYaml .Values.distributor.annotations | nindent 4 }} +spec: + {{- if not .Values.distributor.autoscaling.enabled }} + replicas: {{ .Values.distributor.replicas }} + {{- end }} + selector: + matchLabels: + {{- include "cortex.distributorSelectorLabels" . | nindent 6 }} + strategy: + {{- toYaml .Values.distributor.strategy | nindent 4 }} + template: + metadata: + labels: + {{- include "cortex.distributorLabels" . | nindent 8 }} + app.kubernetes.io/part-of: memberlist + {{- with .Values.distributor.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + annotations: + checksum/config: {{ include "cortex.configChecksum" . }} + {{- with .Values.distributor.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ .Values.distributor.serviceAccount.name | default (include "cortex.serviceAccountName" . ) }} + {{- if .Values.distributor.priorityClassName }} + priorityClassName: {{ .Values.distributor.priorityClassName }} + {{- end }} + {{- if .Values.distributor.securityContext.enabled }} + securityContext: {{- omit .Values.distributor.securityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + initContainers: + {{- toYaml .Values.distributor.initContainers | nindent 8 }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} + containers: + - name: distributor + image: "{{ .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: + - "-target=distributor" + - "-config.file=/etc/cortex/cortex.yaml" + {{- range $key, $value := .Values.distributor.extraArgs }} + - "-{{ $key }}={{ $value }}" + {{- end }} + volumeMounts: + {{- if .Values.distributor.extraVolumeMounts }} + {{- toYaml .Values.distributor.extraVolumeMounts | nindent 12}} + {{- end }} + - name: config + mountPath: /etc/cortex + - name: runtime-config + mountPath: /etc/cortex-runtime-config + - name: storage + mountPath: "/data" + subPath: {{ .Values.distributor.persistentVolume.subPath }} + ports: + - name: http-metrics + containerPort: {{ .Values.config.server.http_listen_port }} + protocol: TCP + - name: gossip + containerPort: {{ .Values.config.memberlist.bind_port }} + protocol: TCP + - name: grpc + containerPort: {{ .Values.config.server.grpc_listen_port }} + protocol: TCP + startupProbe: + {{- toYaml .Values.distributor.startupProbe | nindent 12 }} + livenessProbe: + {{- toYaml .Values.distributor.livenessProbe | nindent 12 }} + readinessProbe: + {{- toYaml .Values.distributor.readinessProbe | nindent 12 }} + resources: + {{- toYaml .Values.distributor.resources | nindent 12 }} + {{- if .Values.distributor.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.distributor.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.distributor.env }} + env: + {{- toYaml .Values.distributor.env | nindent 12 }} + {{- end }} + {{- with .Values.distributor.lifecycle }} + lifecycle: + {{- toYaml . | nindent 12 }} + {{- end }} + resources: + requests: + cpu: "100m" + {{- if .Values.distributor.extraContainers }} + {{- toYaml .Values.distributor.extraContainers | nindent 8}} + {{- end }} + nodeSelector: + {{- toYaml .Values.distributor.nodeSelector | nindent 8 }} + affinity: + {{- toYaml .Values.distributor.affinity | nindent 8 }} + tolerations: + {{- toYaml .Values.distributor.tolerations | nindent 8 }} + terminationGracePeriodSeconds: {{ .Values.distributor.terminationGracePeriodSeconds }} + volumes: + {{- include "cortex.configVolume" . | nindent 8 }} + - name: runtime-config + configMap: + name: {{ template "cortex.fullname" . }}-runtime-config + - name: storage + emptyDir: {} + {{- if .Values.distributor.extraVolumes }} + {{- toYaml .Values.distributor.extraVolumes | nindent 8}} + {{- end }} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/distributor/distributor-hpa.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/distributor/distributor-hpa.yaml new file mode 100644 index 0000000..0c1c9f6 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/distributor/distributor-hpa.yaml @@ -0,0 +1,39 @@ +{{- with .Values.distributor.autoscaling -}} +{{- if .enabled }} +apiVersion: autoscaling/v2beta2 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "cortex.distributorFullname" $ }} + namespace: {{ $.Release.Namespace }} + labels: + {{- include "cortex.distributorLabels" $ | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "cortex.distributorFullname" $ }} + minReplicas: {{ .minReplicas }} + maxReplicas: {{ .maxReplicas }} + metrics: + {{- with .targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: {{ . }} + {{- end }} + {{- with .targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ . }} + {{- end }} + {{- with .behavior }} + behavior: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} +{{- end }} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/distributor/distributor-poddisruptionbudget.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/distributor/distributor-poddisruptionbudget.yaml new file mode 100644 index 0000000..7b05701 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/distributor/distributor-poddisruptionbudget.yaml @@ -0,0 +1,14 @@ +{{- if and (gt (int .Values.distributor.replicas) 1) (.Values.distributor.podDisruptionBudget) }} +apiVersion: {{ include "cortex.pdbVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "cortex.distributorFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.distributorLabels" . | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "cortex.distributorSelectorLabels" . | nindent 6 }} + {{- toYaml .Values.distributor.podDisruptionBudget | nindent 2 }} +{{- end }} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/distributor/distributor-servicemonitor.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/distributor/distributor-servicemonitor.yaml new file mode 100644 index 0000000..5db8389 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/distributor/distributor-servicemonitor.yaml @@ -0,0 +1,42 @@ +{{- if .Values.distributor.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "cortex.distributorFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.distributorLabels" . | nindent 4 }} + {{- if .Values.distributor.serviceMonitor.additionalLabels }} +{{ toYaml .Values.distributor.serviceMonitor.additionalLabels | indent 4 }} + {{- end }} + {{- if .Values.distributor.serviceMonitor.annotations }} + annotations: +{{ toYaml .Values.distributor.serviceMonitor.annotations | indent 4 }} + {{- end }} +spec: + selector: + matchLabels: + {{- include "cortex.distributorSelectorLabels" . | nindent 6 }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace | quote }} + endpoints: + - port: http-metrics + {{- if .Values.distributor.serviceMonitor.interval }} + interval: {{ .Values.distributor.serviceMonitor.interval }} + {{- end }} + {{- if .Values.distributor.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.distributor.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.distributor.serviceMonitor.relabelings }} + relabelings: + {{- toYaml .Values.distributor.serviceMonitor.relabelings | nindent 4 }} + {{- end }} + {{- if .Values.distributor.serviceMonitor.metricRelabelings }} + metricRelabelings: + {{- toYaml .Values.distributor.serviceMonitor.metricRelabelings | nindent 4 }} + {{- end }} + {{- with .Values.distributor.serviceMonitor.extraEndpointSpec }} + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/distributor/distributor-svc-headless.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/distributor/distributor-svc-headless.yaml new file mode 100644 index 0000000..1c4f7f6 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/distributor/distributor-svc-headless.yaml @@ -0,0 +1,23 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "cortex.distributorFullname" . }}-headless + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.distributorLabels" . | nindent 4 }} + {{- with .Values.distributor.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- toYaml .Values.distributor.service.annotations | nindent 4 }} +spec: + type: ClusterIP + clusterIP: None + publishNotReadyAddresses: true + ports: + - port: {{ .Values.config.server.grpc_listen_port }} + protocol: TCP + name: grpc + targetPort: grpc + selector: + {{- include "cortex.distributorSelectorLabels" . | nindent 4 }} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/distributor/distributor-svc.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/distributor/distributor-svc.yaml new file mode 100644 index 0000000..2db7197 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/distributor/distributor-svc.yaml @@ -0,0 +1,21 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "cortex.distributorFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.distributorLabels" . | nindent 4 }} + {{- with .Values.distributor.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- toYaml .Values.distributor.service.annotations | nindent 4 }} +spec: + type: ClusterIP + ports: + - port: {{ .Values.config.server.http_listen_port }} + protocol: TCP + name: http-metrics + targetPort: http-metrics + selector: + {{- include "cortex.distributorSelectorLabels" . | nindent 4 }} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/ingester/_helpers-ingester.tpl b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/ingester/_helpers-ingester.tpl new file mode 100644 index 0000000..4705327 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/ingester/_helpers-ingester.tpl @@ -0,0 +1,23 @@ + +{{/* +ingester fullname +*/}} +{{- define "cortex.ingesterFullname" -}} +{{ include "cortex.fullname" . }}-ingester +{{- end }} + +{{/* +ingester common labels +*/}} +{{- define "cortex.ingesterLabels" -}} +{{ include "cortex.labels" . }} +app.kubernetes.io/component: ingester +{{- end }} + +{{/* +ingester selector labels +*/}} +{{- define "cortex.ingesterSelectorLabels" -}} +{{ include "cortex.selectorLabels" . }} +app.kubernetes.io/component: ingester +{{- end }} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/ingester/ingester-dep.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/ingester/ingester-dep.yaml new file mode 100644 index 0000000..b26d3a3 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/ingester/ingester-dep.yaml @@ -0,0 +1,130 @@ +{{- if not .Values.ingester.statefulSet.enabled -}} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "cortex.ingesterFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.ingesterLabels" . | nindent 4 }} + app.kubernetes.io/part-of: memberlist + annotations: + {{- toYaml .Values.ingester.annotations | nindent 4 }} +spec: + {{- if not .Values.ingester.autoscaling.enabled }} + replicas: {{ .Values.ingester.replicas }} + {{- end }} + selector: + matchLabels: + {{- include "cortex.ingesterSelectorLabels" . | nindent 6 }} + strategy: + {{- toYaml .Values.ingester.strategy | nindent 4 }} + template: + metadata: + labels: + {{- include "cortex.ingesterLabels" . | nindent 8 }} + app.kubernetes.io/part-of: memberlist + {{- with .Values.ingester.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + annotations: + checksum/config: {{ include "cortex.configChecksum" . }} + {{- with .Values.ingester.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ .Values.ingester.serviceAccount.name | default (include "cortex.serviceAccountName" . ) }} + {{- if .Values.ingester.priorityClassName }} + priorityClassName: {{ .Values.ingester.priorityClassName }} + {{- end }} + {{- if .Values.ingester.securityContext.enabled }} + securityContext: {{- omit .Values.ingester.securityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + initContainers: + {{- toYaml .Values.ingester.initContainers | nindent 8 }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} + containers: + - name: ingester + image: "{{ .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: + - "-target=ingester" + - "-config.file=/etc/cortex/cortex.yaml" + {{- include "cortex.memcached" . | nindent 12}} + {{- range $key, $value := .Values.ingester.extraArgs }} + - "-{{ $key }}={{ $value }}" + {{- end }} + volumeMounts: + {{- if .Values.ingester.extraVolumeMounts }} + {{- toYaml .Values.ingester.extraVolumeMounts | nindent 12}} + {{- end }} + - name: config + mountPath: /etc/cortex + - name: runtime-config + mountPath: /etc/cortex-runtime-config + - name: storage + mountPath: "/data" + {{- with .Values.ingester.persistentVolume.subPath }} + subPath: {{ . }} + {{- end }} + ports: + - name: http-metrics + containerPort: {{ .Values.config.server.http_listen_port }} + protocol: TCP + - name: grpc + containerPort: {{ .Values.config.server.grpc_listen_port }} + protocol: TCP + - name: gossip + containerPort: {{ .Values.config.memberlist.bind_port }} + protocol: TCP + {{- if .Values.ingester.startupProbe }} + startupProbe: + {{- toYaml .Values.ingester.startupProbe | nindent 12 }} + {{- end }} + {{- if .Values.ingester.livenessProbe }} + livenessProbe: + {{- toYaml .Values.ingester.livenessProbe | nindent 12 }} + {{- end }} + readinessProbe: + {{- toYaml .Values.ingester.readinessProbe | nindent 12 }} + resources: + {{- toYaml .Values.ingester.resources | nindent 12 }} + {{- if .Values.ingester.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.ingester.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + env: + {{- if .Values.ingester.env }} + {{ toYaml .Values.ingester.env | nindent 12 }} + {{- end }} + {{- with .Values.ingester.lifecycle }} + lifecycle: + {{- toYaml . | nindent 12 }} + {{- end }} + resources: + requests: + cpu: "100m" + {{- with .Values.ingester.extraContainers }} + {{- toYaml . | nindent 8 }} + {{- end }} + nodeSelector: + {{- toYaml .Values.ingester.nodeSelector | nindent 8 }} + affinity: + {{- toYaml .Values.ingester.affinity | nindent 8 }} + tolerations: + {{- toYaml .Values.ingester.tolerations | nindent 8 }} + terminationGracePeriodSeconds: {{ .Values.ingester.terminationGracePeriodSeconds }} + volumes: + {{- include "cortex.configVolume" . | nindent 8 }} + - name: runtime-config + configMap: + name: {{ template "cortex.fullname" . }}-runtime-config + - name: storage + emptyDir: {} + {{- if .Values.ingester.extraVolumes }} + {{- toYaml .Values.ingester.extraVolumes | nindent 8}} + {{- end }} +{{- end -}} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/ingester/ingester-hpa.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/ingester/ingester-hpa.yaml new file mode 100644 index 0000000..97c5290 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/ingester/ingester-hpa.yaml @@ -0,0 +1,29 @@ +{{- with .Values.ingester.autoscaling -}} +{{- if .enabled }} +apiVersion: autoscaling/v2beta2 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "cortex.ingesterFullname" $ }} + namespace: {{ $.Release.Namespace }} + labels: + {{- include "cortex.ingesterLabels" $ | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: {{ if $.Values.ingester.statefulSet.enabled }}StatefulSet{{ else }}Deployment{{ end }} + name: {{ include "cortex.ingesterFullname" $ }} + minReplicas: {{ .minReplicas }} + maxReplicas: {{ .maxReplicas }} + metrics: + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: {{ .targetMemoryUtilizationPercentage }} + {{- with .behavior }} + behavior: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} +{{- end }} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/ingester/ingester-poddisruptionbudget.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/ingester/ingester-poddisruptionbudget.yaml new file mode 100644 index 0000000..a47ecb4 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/ingester/ingester-poddisruptionbudget.yaml @@ -0,0 +1,14 @@ +{{- if and (gt (int .Values.ingester.replicas) 1) (.Values.ingester.podDisruptionBudget) }} +apiVersion: {{ include "cortex.pdbVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "cortex.ingesterFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.ingesterLabels" . | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "cortex.ingesterSelectorLabels" . | nindent 6 }} + {{- toYaml .Values.ingester.podDisruptionBudget | nindent 2 }} +{{- end }} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/ingester/ingester-servicemonitor.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/ingester/ingester-servicemonitor.yaml new file mode 100644 index 0000000..310ca54 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/ingester/ingester-servicemonitor.yaml @@ -0,0 +1,42 @@ +{{- if .Values.ingester.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "cortex.ingesterFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.ingesterLabels" . | nindent 4 }} + {{- if .Values.ingester.serviceMonitor.additionalLabels }} +{{ toYaml .Values.ingester.serviceMonitor.additionalLabels | indent 4 }} + {{- end }} + {{- if .Values.ingester.serviceMonitor.annotations }} + annotations: +{{ toYaml .Values.ingester.serviceMonitor.annotations | indent 4 }} + {{- end }} +spec: + selector: + matchLabels: + {{- include "cortex.ingesterSelectorLabels" . | nindent 6 }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace | quote }} + endpoints: + - port: http-metrics + {{- if .Values.ingester.serviceMonitor.interval }} + interval: {{ .Values.ingester.serviceMonitor.interval }} + {{- end }} + {{- if .Values.ingester.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.ingester.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.ingester.serviceMonitor.relabelings }} + relabelings: + {{- toYaml .Values.ingester.serviceMonitor.relabelings | nindent 4 }} + {{- end }} + {{- if .Values.ingester.serviceMonitor.metricRelabelings }} + metricRelabelings: + {{- toYaml .Values.ingester.serviceMonitor.metricRelabelings | nindent 4 }} + {{- end }} + {{- with .Values.ingester.serviceMonitor.extraEndpointSpec }} + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/ingester/ingester-statefulset.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/ingester/ingester-statefulset.yaml new file mode 100644 index 0000000..8016441 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/ingester/ingester-statefulset.yaml @@ -0,0 +1,153 @@ +{{- if .Values.ingester.statefulSet.enabled -}} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "cortex.ingesterFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.ingesterLabels" . | nindent 4 }} + app.kubernetes.io/part-of: memberlist + annotations: + {{- toYaml .Values.ingester.annotations | nindent 4 }} +spec: + {{- if not .Values.ingester.autoscaling.enabled }} + replicas: {{ .Values.ingester.replicas }} + {{- end }} + selector: + matchLabels: + {{- include "cortex.ingesterSelectorLabels" . | nindent 6 }} + updateStrategy: + {{- toYaml .Values.ingester.statefulStrategy | nindent 4 }} + podManagementPolicy: "{{ .Values.ingester.statefulSet.podManagementPolicy }}" + serviceName: {{ template "cortex.fullname" . }}-ingester-headless + {{- if .Values.ingester.persistentVolume.enabled }} + volumeClaimTemplates: + - metadata: + name: storage + {{- if .Values.ingester.persistentVolume.annotations }} + annotations: + {{ toYaml .Values.ingester.persistentVolume.annotations | nindent 10 }} + {{- end }} + spec: + {{- if .Values.ingester.persistentVolume.storageClass }} + {{- if (eq "-" .Values.ingester.persistentVolume.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.ingester.persistentVolume.storageClass }}" + {{- end }} + {{- end }} + accessModes: + {{ toYaml .Values.ingester.persistentVolume.accessModes | nindent 10 }} + resources: + requests: + storage: "{{ .Values.ingester.persistentVolume.size }}" + {{- end }} + template: + metadata: + labels: + {{- include "cortex.ingesterLabels" . | nindent 8 }} + app.kubernetes.io/part-of: memberlist + {{- with .Values.ingester.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + annotations: + checksum/config: {{ include "cortex.configChecksum" . }} + {{- with .Values.ingester.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ .Values.ingester.serviceAccount.name | default (include "cortex.serviceAccountName" . ) }} + {{- if .Values.ingester.priorityClassName }} + priorityClassName: {{ .Values.ingester.priorityClassName }} + {{- end }} + {{- if .Values.ingester.securityContext.enabled }} + securityContext: {{- omit .Values.ingester.securityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + initContainers: + {{- toYaml .Values.ingester.initContainers | nindent 8 }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} + nodeSelector: + {{- toYaml .Values.ingester.nodeSelector | nindent 8 }} + affinity: + {{- toYaml .Values.ingester.affinity | nindent 8 }} + tolerations: + {{- toYaml .Values.ingester.tolerations | nindent 8 }} + terminationGracePeriodSeconds: {{ .Values.ingester.terminationGracePeriodSeconds }} + volumes: + {{- include "cortex.configVolume" . | nindent 8 }} + - name: runtime-config + configMap: + name: {{ template "cortex.fullname" . }}-runtime-config + {{- if not .Values.ingester.persistentVolume.enabled }} + - name: storage + emptyDir: {} + {{- end }} + {{- if .Values.ingester.extraVolumes }} + {{- toYaml .Values.ingester.extraVolumes | nindent 8 }} + {{- end }} + containers: + {{- if .Values.ingester.extraContainers }} + {{- toYaml .Values.ingester.extraContainers | nindent 8 }} + {{- end }} + - name: ingester + image: "{{ .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: + - "-target=ingester" + - "-config.file=/etc/cortex/cortex.yaml" + {{- include "cortex.memcached" . | nindent 12}} + {{- range $key, $value := .Values.ingester.extraArgs }} + - "-{{ $key }}={{ $value }}" + {{- end }} + volumeMounts: + {{- if .Values.ingester.extraVolumeMounts }} + {{- toYaml .Values.ingester.extraVolumeMounts | nindent 12}} + {{- end }} + - name: config + mountPath: /etc/cortex + - name: runtime-config + mountPath: /etc/cortex-runtime-config + - name: storage + mountPath: "/data" + {{- with .Values.ingester.persistentVolume.subPath }} + subPath: {{ . }} + {{- end }} + ports: + - name: http-metrics + containerPort: {{ .Values.config.server.http_listen_port }} + protocol: TCP + - name: grpc + containerPort: {{ .Values.config.server.grpc_listen_port }} + protocol: TCP + - name: gossip + containerPort: {{ .Values.config.memberlist.bind_port }} + protocol: TCP + {{- if .Values.ingester.startupProbe }} + startupProbe: + {{- toYaml .Values.ingester.startupProbe | nindent 12 }} + {{- end }} + {{- if .Values.ingester.livenessProbe }} + livenessProbe: + {{- toYaml .Values.ingester.livenessProbe | nindent 12 }} + {{- end }} + readinessProbe: + {{- toYaml .Values.ingester.readinessProbe | nindent 12 }} + resources: + {{- toYaml .Values.ingester.resources | nindent 12 }} + {{- if .Values.ingester.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.ingester.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.ingester.env }} + env: + {{- toYaml .Values.ingester.env | nindent 12 }} + {{- end }} + {{- with .Values.ingester.lifecycle }} + lifecycle: + {{- toYaml . | nindent 12 }} + {{- end }} +{{- end -}} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/ingester/ingester-svc-headless.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/ingester/ingester-svc-headless.yaml new file mode 100644 index 0000000..b783caa --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/ingester/ingester-svc-headless.yaml @@ -0,0 +1,22 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "cortex.ingesterFullname" . }}-headless + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.ingesterLabels" . | nindent 4 }} + {{- with .Values.ingester.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- toYaml .Values.ingester.service.annotations | nindent 4 }} +spec: + type: ClusterIP + clusterIP: None + ports: + - port: {{ .Values.config.server.grpc_listen_port }} + protocol: TCP + name: grpc + targetPort: grpc + selector: + {{- include "cortex.ingesterSelectorLabels" . | nindent 4 }} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/ingester/ingester-svc.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/ingester/ingester-svc.yaml new file mode 100644 index 0000000..02183ae --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/ingester/ingester-svc.yaml @@ -0,0 +1,21 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "cortex.ingesterFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.ingesterLabels" . | nindent 4 }} + {{- with .Values.ingester.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- toYaml .Values.ingester.service.annotations | nindent 4 }} +spec: + type: ClusterIP + ports: + - port: {{ .Values.config.server.http_listen_port }} + protocol: TCP + name: http-metrics + targetPort: http-metrics + selector: + {{- include "cortex.ingesterSelectorLabels" . | nindent 4 }} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/nginx/_helpers-nginx.tpl b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/nginx/_helpers-nginx.tpl new file mode 100644 index 0000000..61d8b78 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/nginx/_helpers-nginx.tpl @@ -0,0 +1,23 @@ + +{{/* +nginx fullname +*/}} +{{- define "cortex.nginxFullname" -}} +{{ include "cortex.fullname" . }}-nginx +{{- end }} + +{{/* +nginx common labels +*/}} +{{- define "cortex.nginxLabels" -}} +{{ include "cortex.labels" . }} +app.kubernetes.io/component: nginx +{{- end }} + +{{/* +nginx selector labels +*/}} +{{- define "cortex.nginxSelectorLabels" -}} +{{ include "cortex.selectorLabels" . }} +app.kubernetes.io/component: nginx +{{- end }} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/nginx/nginx-config.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/nginx/nginx-config.yaml new file mode 100644 index 0000000..fd3474d --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/nginx/nginx-config.yaml @@ -0,0 +1,140 @@ +{{- if .Values.nginx.enabled }} +{{- $rootDomain := printf "%s.svc.%s:%d" .Release.Namespace .Values.clusterDomain (.Values.config.server.http_listen_port | int) }} +kind: ConfigMap +apiVersion: v1 +metadata: + name: {{ include "cortex.nginxFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.nginxLabels" . | nindent 4 }} +data: + nginx.conf: |- + worker_processes 5; ## Default: 1 + error_log /dev/stderr; + pid /tmp/nginx.pid; + worker_rlimit_nofile 8192; + + events { + worker_connections 4096; ## Default: 1024 + } + + {{- with .Values.nginx.config.mainSnippet }} + {{ tpl . $ | nindent 4 }} + {{- end }} + + http { + default_type application/octet-stream; + client_max_body_size {{.Values.nginx.config.client_max_body_size}}; + log_format main '$remote_addr - $remote_user [$time_local] $status ' + '"$request" $body_bytes_sent "$http_referer" ' + '"$http_user_agent" "$http_x_forwarded_for" $http_x_scope_orgid'; + access_log /dev/stderr main; + sendfile on; + tcp_nopush on; + resolver {{ default (printf "coredns.kube-system.svc.%s" .Values.clusterDomain ) .Values.nginx.config.dnsResolver }}; + + {{- with .Values.nginx.config.httpSnippet }} + {{ tpl . $ | nindent 6 }} + {{- end }} + + server { # simple reverse-proxy + listen {{ .Values.nginx.http_listen_port }}; + proxy_connect_timeout 300s; + proxy_send_timeout 300s; + proxy_read_timeout 300s; + proxy_http_version 1.1; + proxy_set_header X-Scope-OrgID 0; + + {{- range $key, $value := .Values.nginx.config.setHeaders }} + proxy_set_header {{ $key }} {{ $value }}; + {{- end }} + + {{ if .Values.nginx.config.basicAuthSecretName -}} + auth_basic "Restricted Content"; + auth_basic_user_file /etc/apache2/.htpasswd; + {{- end }} + + {{- with .Values.nginx.config.serverSnippet }} + {{ tpl . $ | nindent 8 }} + {{- end }} + + location = /healthz { + # auth_basic off is not set here, even when a basic auth directive is + # included in the server block, as Nginx's NGX_HTTP_REWRITE_PHASE + # (point when this return statement is evaluated) comes before the + # NGX_HTTP_ACCESS_PHASE (point when basic auth is evaluated). Thus, + # this return statement returns a response before basic auth is + # evaluated. + return 200 'alive'; + } + + # Distributor Config + location = /ring { + proxy_pass http://{{ template "cortex.fullname" . }}-distributor.{{ $rootDomain }}$request_uri; + } + + location = /all_user_stats { + proxy_pass http://{{ template "cortex.fullname" . }}-distributor.{{ $rootDomain }}$request_uri; + } + + location = /api/prom/push { + proxy_pass http://{{ template "cortex.fullname" . }}-distributor.{{ $rootDomain }}$request_uri; + } + + ## New Remote write API. Ref: https://cortexmetrics.io/docs/api/#remote-write + location = /api/v1/push { + proxy_pass http://{{ template "cortex.fullname" . }}-distributor.{{ $rootDomain }}$request_uri; + } + + # Alertmanager Config + location ~ /api/prom/alertmanager/.* { + proxy_pass http://{{ template "cortex.fullname" . }}-alertmanager.{{ $rootDomain }}$request_uri; + } + + location ~ /api/v1/alerts { + proxy_pass http://{{ template "cortex.fullname" . }}-alertmanager.{{ $rootDomain }}$request_uri; + } + + location ~ /multitenant_alertmanager/status { + proxy_pass http://{{ template "cortex.fullname" . }}-alertmanager.{{ $rootDomain }}$request_uri; + } + + # Ruler Config + location ~ /api/v1/rules { + proxy_pass http://{{ template "cortex.fullname" . }}-ruler.{{ $rootDomain }}$request_uri; + } + + location ~ /ruler/ring { + proxy_pass http://{{ template "cortex.fullname" . }}-ruler.{{ $rootDomain }}$request_uri; + } + + # Config Config + location ~ /api/prom/configs/.* { + proxy_pass http://{{ template "cortex.fullname" . }}-configs.{{ $rootDomain }}$request_uri; + } + + # Query Config + location ~ /api/prom/.* { + proxy_pass http://{{ template "cortex.fullname" . }}-query-frontend.{{ $rootDomain }}$request_uri; + } + + ## New Query frontend APIs as per https://cortexmetrics.io/docs/api/#querier--query-frontend + location ~ ^{{.Values.config.api.prometheus_http_prefix}}/api/v1/(read|metadata|labels|series|query_range|query) { + proxy_pass http://{{ template "cortex.fullname" . }}-query-frontend.{{ $rootDomain }}$request_uri; + } + + location ~ {{.Values.config.api.prometheus_http_prefix}}/api/v1/label/.* { + proxy_pass http://{{ template "cortex.fullname" . }}-query-frontend.{{ $rootDomain }}$request_uri; + } + {{- if and (.Values.config.auth_enabled) (.Values.nginx.config.auth_orgs) }} + # Auth orgs + {{- range $org := compact .Values.nginx.config.auth_orgs | uniq }} + location = /api/v1/push/{{ $org }} { + proxy_set_header X-Scope-OrgID {{ $org }}; + proxy_pass http://{{ template "cortex.fullname" $ }}-distributor.{{ $rootDomain }}/api/v1/push; + } + {{- end }} + {{- end }} + } + } +{{- end }} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/nginx/nginx-dep.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/nginx/nginx-dep.yaml new file mode 100644 index 0000000..bbd3a9d --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/nginx/nginx-dep.yaml @@ -0,0 +1,111 @@ +{{- if .Values.nginx.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "cortex.nginxFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.nginxLabels" . | nindent 4 }} + annotations: + {{- toYaml .Values.nginx.annotations | nindent 4 }} +spec: + {{- if not .Values.nginx.autoscaling.enabled }} + replicas: {{ .Values.nginx.replicas }} + {{- end }} + selector: + matchLabels: + {{- include "cortex.nginxSelectorLabels" . | nindent 6 }} + strategy: + {{- toYaml .Values.nginx.strategy | nindent 4 }} + template: + metadata: + labels: + {{- include "cortex.nginxLabels" . | nindent 8 }} + {{- with .Values.nginx.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + annotations: + checksum/config: {{ include (print $.Template.BasePath "/nginx/nginx-config.yaml") . | sha256sum }} + {{- with .Values.nginx.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ .Values.nginx.serviceAccount.name | default (include "cortex.serviceAccountName" . ) }} + {{- if .Values.nginx.priorityClassName }} + priorityClassName: {{ .Values.nginx.priorityClassName }} + {{- end }} + {{- if .Values.nginx.securityContext.enabled }} + securityContext: {{- omit .Values.nginx.securityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + initContainers: + {{- toYaml .Values.nginx.initContainers | nindent 8 }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} + containers: + - name: nginx + image: "{{ .Values.nginx.image.repository }}:{{ .Values.nginx.image.tag }}" + imagePullPolicy: {{ .Values.nginx.image.pullPolicy }} + {{- if .Values.nginx.extraArgs }} + args: + {{- range $key, $value := .Values.nginx.extraArgs }} + - "-{{ $key }}={{ $value }}" + {{- end }} + {{- end }} + volumeMounts: + {{- if .Values.nginx.extraVolumeMounts }} + {{- toYaml .Values.nginx.extraVolumeMounts | nindent 12}} + {{- end }} + - name: config + mountPath: /etc/nginx + {{- if .Values.nginx.config.basicAuthSecretName }} + - name: htpasswd + mountPath: /etc/apache2 + readOnly: true + {{- end }} + ports: + - name: http-metrics + containerPort: {{ .Values.nginx.http_listen_port }} + protocol: TCP + startupProbe: + {{- toYaml .Values.nginx.startupProbe | nindent 12 }} + livenessProbe: + {{- toYaml .Values.nginx.livenessProbe | nindent 12 }} + readinessProbe: + {{- toYaml .Values.nginx.readinessProbe | nindent 12 }} + resources: + {{- toYaml .Values.nginx.resources | nindent 12 }} + {{- if .Values.nginx.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.nginx.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.nginx.env }} + env: + {{- toYaml .Values.nginx.env | nindent 12 }} + {{- end }} + {{- if .Values.nginx.extraContainers }} + {{ toYaml .Values.nginx.extraContainers | indent 8}} + {{- end }} + nodeSelector: + {{- toYaml .Values.nginx.nodeSelector | nindent 8 }} + affinity: + {{- toYaml .Values.nginx.affinity | nindent 8 }} + tolerations: + {{- toYaml .Values.nginx.tolerations | nindent 8 }} + terminationGracePeriodSeconds: {{ .Values.nginx.terminationGracePeriodSeconds }} + volumes: + - name: config + configMap: + name: {{ template "cortex.fullname" . }}-nginx + {{- if .Values.nginx.config.basicAuthSecretName }} + - name: htpasswd + secret: + defaultMode: 420 + secretName: {{ .Values.nginx.config.basicAuthSecretName }} + {{- end }} + {{- if .Values.nginx.extraVolumes }} + {{- toYaml .Values.nginx.extraVolumes | nindent 8}} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/nginx/nginx-hpa.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/nginx/nginx-hpa.yaml new file mode 100644 index 0000000..b93a13d --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/nginx/nginx-hpa.yaml @@ -0,0 +1,39 @@ +{{- if and .Values.nginx.enabled .Values.nginx.autoscaling.enabled }} +{{- with .Values.nginx.autoscaling -}} +apiVersion: autoscaling/v2beta2 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "cortex.nginxFullname" $ }} + namespace: {{ $.Release.Namespace }} + labels: + {{- include "cortex.nginxLabels" $ | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "cortex.nginxFullname" $ }} + minReplicas: {{ .minReplicas }} + maxReplicas: {{ .maxReplicas }} + metrics: + {{- with .targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: {{ . }} + {{- end }} + {{- with .targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ . }} + {{- end }} + {{- with .behavior }} + behavior: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} +{{- end }} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/nginx/nginx-ingress.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/nginx/nginx-ingress.yaml new file mode 100644 index 0000000..51e6609 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/nginx/nginx-ingress.yaml @@ -0,0 +1,40 @@ +{{- if and .Values.ingress.enabled .Values.nginx.enabled -}} +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: {{ include "cortex.nginxFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.nginxLabels" . | nindent 4 }} + annotations: + {{- toYaml .Values.ingress.annotations | nindent 4 }} +spec: +{{- if .Values.ingress.ingressClass.enabled }} + ingressClassName: {{ .Values.ingress.ingressClass.name }} +{{- end }} +{{- if .Values.ingress.tls }} + tls: + {{- range .Values.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} +{{- end }} + rules: + {{- range .Values.ingress.hosts }} + - host: {{ .host | quote }} + http: + paths: + {{- range .paths }} + - path: {{ . }} + pathType: "Prefix" + backend: + service: + name: {{ include "cortex.nginxFullname" $ }} + port: + number: {{ $.Values.nginx.http_listen_port }} + {{- end }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/nginx/nginx-poddisruptionbudget.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/nginx/nginx-poddisruptionbudget.yaml new file mode 100644 index 0000000..959764a --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/nginx/nginx-poddisruptionbudget.yaml @@ -0,0 +1,14 @@ +{{- if and (.Values.nginx.enabled) (gt (int .Values.nginx.replicas) 1) (.Values.nginx.podDisruptionBudget) }} +apiVersion: {{ include "cortex.pdbVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "cortex.nginxFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.nginxLabels" . | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "cortex.nginxSelectorLabels" . | nindent 6 }} + {{- toYaml .Values.nginx.podDisruptionBudget | nindent 2 }} +{{- end }} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/nginx/nginx-svc.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/nginx/nginx-svc.yaml new file mode 100644 index 0000000..72a2c44 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/nginx/nginx-svc.yaml @@ -0,0 +1,23 @@ +{{- if .Values.nginx.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "cortex.nginxFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.nginxLabels" . | nindent 4 }} + {{- with .Values.nginx.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- toYaml .Values.nginx.service.annotations | nindent 4 }} +spec: + type: {{ .Values.nginx.service.type }} + ports: + - port: {{ .Values.nginx.http_listen_port }} + protocol: TCP + name: http-metrics + targetPort: http-metrics + selector: + {{- include "cortex.nginxSelectorLabels" . | nindent 4 }} +{{- end }} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/node-exporter.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/node-exporter.yaml new file mode 100644 index 0000000..7bb3983 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/node-exporter.yaml @@ -0,0 +1,96 @@ +apiVersion: v1 +kind: Service +metadata: + annotations: + prometheus.io/scrape: 'true' + labels: + app: node-exporter + name: node-exporter + name: node-exporter + namespace: imxc +spec: + clusterIP: None + ports: + - name: scrape + port: 9100 + protocol: TCP + selector: + app: node-exporter + type: ClusterIP +--- +{{- if semverCompare ">=1.16-0" .Capabilities.KubeVersion.GitVersion }} +apiVersion: apps/v1 +{{- else }} +apiVersion: extensions/v1beta1 +{{- end }} +kind: DaemonSet +metadata: + name: node-exporter + namespace: imxc +spec: +{{- if semverCompare ">=1.16-0" .Capabilities.KubeVersion.GitVersion }} + selector: + matchLabels: + app: node-exporter +{{- end }} + template: + metadata: + labels: + app: node-exporter + name: node-exporter + spec: + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - image: {{ .Values.global.IMXC_IN_REGISTRY }}/node-exporter + name: node-exporter + resources: + limits: + cpu: 250m + memory: 180Mi + requests: + cpu: 102m + memory: 180Mi + ports: + - containerPort: 9100 + hostPort: 9100 + name: scrape + args: + - --path.procfs=/host/proc + - --path.sysfs=/host/sys + - --path.rootfs=/host/root + - --collector.filesystem.ignored-mount-points=^/(dev|proc|sys|run|var/lib/docker/.+|var/lib/kubelet/pods/.+)($|/) + - --collector.tcpstat + # --log.level=debug + env: + - name: GOMAXPROCS + value: "1" + volumeMounts: + - mountPath: /host/proc + name: proc + readOnly: false + - mountPath: /host/sys + name: sys + readOnly: false + - mountPath: /host/root + mountPropagation: HostToContainer + name: root + readOnly: true + hostNetwork: true + hostPID: true + securityContext: + runAsNonRoot: true + runAsUser: 65534 + volumes: + - hostPath: + path: /proc + name: proc + - hostPath: + path: /sys + name: sys + - hostPath: + path: / + name: root diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/querier/_helpers-querier.tpl b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/querier/_helpers-querier.tpl new file mode 100644 index 0000000..c0a6204 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/querier/_helpers-querier.tpl @@ -0,0 +1,23 @@ + +{{/* +querier fullname +*/}} +{{- define "cortex.querierFullname" -}} +{{ include "cortex.fullname" . }}-querier +{{- end }} + +{{/* +querier common labels +*/}} +{{- define "cortex.querierLabels" -}} +{{ include "cortex.labels" . }} +app.kubernetes.io/component: querier +{{- end }} + +{{/* +querier selector labels +*/}} +{{- define "cortex.querierSelectorLabels" -}} +{{ include "cortex.selectorLabels" . }} +app.kubernetes.io/component: querier +{{- end }} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/querier/querier-dep.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/querier/querier-dep.yaml new file mode 100644 index 0000000..a84ba8a --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/querier/querier-dep.yaml @@ -0,0 +1,115 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "cortex.querierFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.querierLabels" . | nindent 4 }} + annotations: + {{- toYaml .Values.querier.annotations | nindent 4 }} +spec: + {{- if not .Values.querier.autoscaling.enabled }} + replicas: {{ .Values.querier.replicas }} + {{- end }} + selector: + matchLabels: + {{- include "cortex.querierSelectorLabels" . | nindent 6 }} + strategy: + {{- toYaml .Values.querier.strategy | nindent 4 }} + template: + metadata: + labels: + {{- include "cortex.querierLabels" . | nindent 8 }} + {{- with .Values.querier.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + annotations: + checksum/config: {{ include "cortex.configChecksum" . }} + {{- with .Values.querier.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ .Values.querier.serviceAccount.name | default (include "cortex.serviceAccountName" . ) }} + {{- if .Values.querier.priorityClassName }} + priorityClassName: {{ .Values.querier.priorityClassName }} + {{- end }} + {{- if .Values.querier.securityContext.enabled }} + securityContext: {{- omit .Values.querier.securityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + initContainers: + {{- toYaml .Values.querier.initContainers | nindent 8 }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} + containers: + - name: querier + image: "{{ .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: + - "-target=querier" + - "-config.file=/etc/cortex/cortex.yaml" + - "-querier.frontend-address={{ template "cortex.fullname" . }}-query-frontend-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}:{{ .Values.config.server.grpc_listen_port }}" + {{- include "cortex.memcached" . | nindent 12}} + {{- range $key, $value := .Values.querier.extraArgs }} + - "-{{ $key }}={{ $value }}" + {{- end }} + volumeMounts: + {{- if .Values.querier.extraVolumeMounts }} + {{- toYaml .Values.querier.extraVolumeMounts | nindent 12}} + {{- end }} + - name: config + mountPath: /etc/cortex + - name: runtime-config + mountPath: /etc/cortex-runtime-config + - name: storage + mountPath: "/data" + subPath: {{ .Values.querier.persistentVolume.subPath }} + ports: + - name: http-metrics + containerPort: {{ .Values.config.server.http_listen_port }} + protocol: TCP + startupProbe: + {{- toYaml .Values.querier.startupProbe | nindent 12 }} + livenessProbe: + {{- toYaml .Values.querier.livenessProbe | nindent 12 }} + readinessProbe: + {{- toYaml .Values.querier.readinessProbe | nindent 12 }} + resources: + {{- toYaml .Values.querier.resources | nindent 12 }} + {{- if .Values.querier.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.querier.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + env: + {{- if .Values.querier.env }} + {{- toYaml .Values.querier.env | nindent 12 }} + {{- end }} + {{- with .Values.querier.lifecycle }} + lifecycle: + {{- toYaml . | nindent 12 }} + {{- end }} + resources: + requests: + cpu: "100m" + {{- if .Values.querier.extraContainers }} + {{- toYaml .Values.querier.extraContainers | nindent 8}} + {{- end }} + nodeSelector: + {{- toYaml .Values.querier.nodeSelector | nindent 8 }} + affinity: + {{- toYaml .Values.querier.affinity | nindent 8 }} + tolerations: + {{- toYaml .Values.querier.tolerations | nindent 8 }} + terminationGracePeriodSeconds: {{ .Values.querier.terminationGracePeriodSeconds }} + volumes: + {{- include "cortex.configVolume" . | nindent 8 }} + - name: runtime-config + configMap: + name: {{ template "cortex.fullname" . }}-runtime-config + - name: storage + emptyDir: {} + {{- if .Values.querier.extraVolumes }} + {{- toYaml .Values.querier.extraVolumes | nindent 8}} + {{- end }} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/querier/querier-hpa.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/querier/querier-hpa.yaml new file mode 100644 index 0000000..f078526 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/querier/querier-hpa.yaml @@ -0,0 +1,39 @@ +{{- with .Values.querier.autoscaling -}} +{{- if .enabled }} +apiVersion: autoscaling/v2beta2 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "cortex.querierFullname" $ }} + namespace: {{ $.Release.Namespace }} + labels: + {{- include "cortex.querierLabels" $ | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "cortex.querierFullname" $ }} + minReplicas: {{ .minReplicas }} + maxReplicas: {{ .maxReplicas }} + metrics: + {{- with .targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: {{ . }} + {{- end }} + {{- with .targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ . }} + {{- end }} + {{- with .behavior }} + behavior: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} +{{- end }} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/querier/querier-poddisruptionbudget.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/querier/querier-poddisruptionbudget.yaml new file mode 100644 index 0000000..b69de62 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/querier/querier-poddisruptionbudget.yaml @@ -0,0 +1,14 @@ +{{- if and (gt (int .Values.querier.replicas) 1) (.Values.querier.podDisruptionBudget) }} +apiVersion: {{ include "cortex.pdbVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "cortex.querierFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.querierLabels" . | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "cortex.querierSelectorLabels" . | nindent 6 }} + {{- toYaml .Values.querier.podDisruptionBudget | nindent 2 }} +{{- end }} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/querier/querier-servicemonitor.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/querier/querier-servicemonitor.yaml new file mode 100644 index 0000000..c84d1a4 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/querier/querier-servicemonitor.yaml @@ -0,0 +1,42 @@ +{{- if .Values.querier.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "cortex.querierFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.querierLabels" . | nindent 4 }} + {{- if .Values.querier.serviceMonitor.additionalLabels }} +{{ toYaml .Values.querier.serviceMonitor.additionalLabels | indent 4 }} + {{- end }} + {{- if .Values.querier.serviceMonitor.annotations }} + annotations: +{{ toYaml .Values.querier.serviceMonitor.annotations | indent 4 }} + {{- end }} +spec: + selector: + matchLabels: + {{- include "cortex.querierSelectorLabels" . | nindent 6 }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace | quote }} + endpoints: + - port: http-metrics + {{- if .Values.querier.serviceMonitor.interval }} + interval: {{ .Values.querier.serviceMonitor.interval }} + {{- end }} + {{- if .Values.querier.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.querier.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.querier.serviceMonitor.relabelings }} + relabelings: + {{- toYaml .Values.querier.serviceMonitor.relabelings | nindent 4 }} + {{- end }} + {{- if .Values.querier.serviceMonitor.metricRelabelings }} + metricRelabelings: + {{- toYaml .Values.querier.serviceMonitor.metricRelabelings | nindent 4 }} + {{- end }} + {{- with .Values.querier.serviceMonitor.extraEndpointSpec }} + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/querier/querier-svc.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/querier/querier-svc.yaml new file mode 100644 index 0000000..0701b7d --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/querier/querier-svc.yaml @@ -0,0 +1,21 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "cortex.querierFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.querierLabels" . | nindent 4 }} + {{- with .Values.querier.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- toYaml .Values.querier.service.annotations | nindent 4 }} +spec: + type: ClusterIP + ports: + - port: {{ .Values.config.server.http_listen_port }} + protocol: TCP + name: http-metrics + targetPort: http-metrics + selector: + {{- include "cortex.querierSelectorLabels" . | nindent 4 }} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/query-frontend/_helpers-query-frontend.tpl b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/query-frontend/_helpers-query-frontend.tpl new file mode 100644 index 0000000..c1f74c9 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/query-frontend/_helpers-query-frontend.tpl @@ -0,0 +1,23 @@ + +{{/* +query-frontend fullname +*/}} +{{- define "cortex.queryFrontendFullname" -}} +{{ include "cortex.fullname" . }}-query-frontend +{{- end }} + +{{/* +query-frontend common labels +*/}} +{{- define "cortex.queryFrontendLabels" -}} +{{ include "cortex.labels" . }} +app.kubernetes.io/component: query-frontend +{{- end }} + +{{/* +query-frontend selector labels +*/}} +{{- define "cortex.queryFrontendSelectorLabels" -}} +{{ include "cortex.selectorLabels" . }} +app.kubernetes.io/component: query-frontend +{{- end }} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/query-frontend/query-frontend-dep.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/query-frontend/query-frontend-dep.yaml new file mode 100644 index 0000000..3e31d18 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/query-frontend/query-frontend-dep.yaml @@ -0,0 +1,107 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "cortex.queryFrontendFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.queryFrontendLabels" . | nindent 4 }} + annotations: + {{- toYaml .Values.query_frontend.annotations | nindent 4 }} +spec: + replicas: {{ .Values.query_frontend.replicas }} + selector: + matchLabels: + {{- include "cortex.queryFrontendSelectorLabels" . | nindent 6 }} + strategy: + {{- toYaml .Values.query_frontend.strategy | nindent 4 }} + template: + metadata: + labels: + {{- include "cortex.queryFrontendLabels" . | nindent 8 }} + {{- with .Values.query_frontend.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + annotations: + checksum/config: {{ include "cortex.configChecksum" . }} + {{- with .Values.query_frontend.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ .Values.query_frontend.serviceAccount.name | default (include "cortex.serviceAccountName" . ) }} + {{- if .Values.query_frontend.priorityClassName }} + priorityClassName: {{ .Values.query_frontend.priorityClassName }} + {{- end }} + {{- if .Values.query_frontend.securityContext.enabled }} + securityContext: {{- omit .Values.query_frontend.securityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + initContainers: + {{- toYaml .Values.query_frontend.initContainers | nindent 8 }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} + containers: + - name: query-frontend + image: "{{ .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: + - "-target=query-frontend" + - "-config.file=/etc/cortex/cortex.yaml" + {{- include "cortex.frontend-memcached" . | nindent 12 }} + {{- range $key, $value := .Values.query_frontend.extraArgs }} + - "-{{ $key }}={{ $value }}" + {{- end }} + volumeMounts: + {{- if .Values.query_frontend.extraVolumeMounts }} + {{- toYaml .Values.query_frontend.extraVolumeMounts | nindent 12}} + {{- end }} + - name: config + mountPath: /etc/cortex + - name: runtime-config + mountPath: /etc/cortex-runtime-config + ports: + - name: http-metrics + containerPort: {{ .Values.config.server.http_listen_port }} + protocol: TCP + - name: grpc + containerPort: {{ .Values.config.server.grpc_listen_port }} + protocol: TCP + startupProbe: + {{- toYaml .Values.query_frontend.startupProbe | nindent 12 }} + livenessProbe: + {{- toYaml .Values.query_frontend.livenessProbe | nindent 12 }} + readinessProbe: + {{- toYaml .Values.query_frontend.readinessProbe | nindent 12 }} + resources: + {{- toYaml .Values.query_frontend.resources | nindent 12 }} + {{- if .Values.query_frontend.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.query_frontend.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.query_frontend.env }} + env: + {{- toYaml .Values.query_frontend.env | nindent 12 }} + {{- end }} + {{- with .Values.query_frontend.lifecycle }} + lifecycle: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- if .Values.query_frontend.extraContainers }} + {{- toYaml .Values.query_frontend.extraContainers | nindent 8}} + {{- end }} + nodeSelector: + {{- toYaml .Values.query_frontend.nodeSelector | nindent 8 }} + affinity: + {{- toYaml .Values.query_frontend.affinity | nindent 8 }} + tolerations: + {{- toYaml .Values.query_frontend.tolerations | nindent 8 }} + terminationGracePeriodSeconds: {{ .Values.query_frontend.terminationGracePeriodSeconds }} + volumes: + {{- include "cortex.configVolume" . | nindent 8 }} + - name: runtime-config + configMap: + name: {{ template "cortex.fullname" . }}-runtime-config + {{- if .Values.query_frontend.extraVolumes }} + {{- toYaml .Values.query_frontend.extraVolumes | nindent 8}} + {{- end }} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/query-frontend/query-frontend-servicemonitor.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/query-frontend/query-frontend-servicemonitor.yaml new file mode 100644 index 0000000..2d76c6b --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/query-frontend/query-frontend-servicemonitor.yaml @@ -0,0 +1,42 @@ +{{- if .Values.query_frontend.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "cortex.queryFrontendFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.queryFrontendLabels" . | nindent 4 }} + {{- if .Values.query_frontend.serviceMonitor.additionalLabels }} +{{ toYaml .Values.query_frontend.serviceMonitor.additionalLabels | indent 4 }} + {{- end }} + {{- if .Values.query_frontend.serviceMonitor.annotations }} + annotations: +{{ toYaml .Values.query_frontend.serviceMonitor.annotations | indent 4 }} + {{- end }} +spec: + selector: + matchLabels: + {{- include "cortex.queryFrontendSelectorLabels" . | nindent 6 }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace | quote }} + endpoints: + - port: http-metrics + {{- if .Values.query_frontend.serviceMonitor.interval }} + interval: {{ .Values.query_frontend.serviceMonitor.interval }} + {{- end }} + {{- if .Values.query_frontend.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.query_frontend.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.query_frontend.serviceMonitor.relabelings }} + relabelings: + {{- toYaml .Values.query_frontend.serviceMonitor.relabelings | nindent 4 }} + {{- end }} + {{- if .Values.query_frontend.serviceMonitor.metricRelabelings }} + metricRelabelings: + {{- toYaml .Values.query_frontend.serviceMonitor.metricRelabelings | nindent 4 }} + {{- end }} + {{- with .Values.query_frontend.serviceMonitor.extraEndpointSpec }} + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/query-frontend/query-frontend-svc-headless.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/query-frontend/query-frontend-svc-headless.yaml new file mode 100644 index 0000000..939457c --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/query-frontend/query-frontend-svc-headless.yaml @@ -0,0 +1,23 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "cortex.queryFrontendFullname" . }}-headless + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.queryFrontendLabels" . | nindent 4 }} + {{- with .Values.query_frontend.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- toYaml .Values.query_frontend.service.annotations | nindent 4 }} +spec: + type: ClusterIP + clusterIP: None + publishNotReadyAddresses: true + ports: + - port: {{ .Values.config.server.grpc_listen_port }} + protocol: TCP + name: grpc + targetPort: grpc + selector: + {{- include "cortex.queryFrontendSelectorLabels" . | nindent 4 }} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/query-frontend/query-frontend-svc.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/query-frontend/query-frontend-svc.yaml new file mode 100644 index 0000000..85ff2e8 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/query-frontend/query-frontend-svc.yaml @@ -0,0 +1,21 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "cortex.queryFrontendFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.queryFrontendLabels" . | nindent 4 }} + {{- with .Values.query_frontend.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- toYaml .Values.query_frontend.service.annotations | nindent 4 }} +spec: + type: ClusterIP + ports: + - port: {{ .Values.config.server.http_listen_port }} + protocol: TCP + name: http-metrics + targetPort: http-metrics + selector: + {{- include "cortex.queryFrontendSelectorLabels" . | nindent 4 }} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/query-frontend/query-poddisruptionbudget.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/query-frontend/query-poddisruptionbudget.yaml new file mode 100644 index 0000000..5256949 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/query-frontend/query-poddisruptionbudget.yaml @@ -0,0 +1,14 @@ +{{- if and (gt (int .Values.query_frontend.replicas) 1) (.Values.query_frontend.podDisruptionBudget) }} +apiVersion: {{ include "cortex.pdbVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "cortex.queryFrontendFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.queryFrontendLabels" . | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "cortex.queryFrontendSelectorLabels" . | nindent 6 }} + {{- toYaml .Values.query_frontend.podDisruptionBudget | nindent 2 }} +{{- end }} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/ruler/_helpers-ruler.tpl b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/ruler/_helpers-ruler.tpl new file mode 100644 index 0000000..86270d0 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/ruler/_helpers-ruler.tpl @@ -0,0 +1,30 @@ + +{{/* +ruler fullname +*/}} +{{- define "cortex.rulerFullname" -}} +{{ include "cortex.fullname" . }}-ruler +{{- end }} + +{{/* +ruler common labels +*/}} +{{- define "cortex.rulerLabels" -}} +{{ include "cortex.labels" . }} +app.kubernetes.io/component: ruler +{{- end }} + +{{/* +ruler selector labels +*/}} +{{- define "cortex.rulerSelectorLabels" -}} +{{ include "cortex.selectorLabels" . }} +app.kubernetes.io/component: ruler +{{- end }} + +{{/* +format rules dir +*/}} +{{- define "cortex.rulerRulesDirName" -}} +rules-{{ . | replace "_" "-" | trimSuffix "-" }} +{{- end }} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/ruler/ruler-configmap.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/ruler/ruler-configmap.yaml new file mode 100644 index 0000000..8448108 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/ruler/ruler-configmap.yaml @@ -0,0 +1,14 @@ +{{- if .Values.ruler.enabled }} +{{- range $dir, $files := .Values.ruler.directories }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "cortex.rulerFullname" $ }}-{{ include "cortex.rulerRulesDirName" $dir }} + namespace: {{ $.Release.Namespace }} + labels: + {{- include "cortex.rulerLabels" $ | nindent 4 }} +data: + {{- toYaml $files | nindent 2}} +{{- end }} +{{- end }} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/ruler/ruler-dep.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/ruler/ruler-dep.yaml new file mode 100644 index 0000000..a8e034d --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/ruler/ruler-dep.yaml @@ -0,0 +1,191 @@ +{{- if .Values.ruler.enabled -}} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "cortex.rulerFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.rulerLabels" . | nindent 4 }} + app.kubernetes.io/part-of: memberlist + annotations: + {{- toYaml .Values.ruler.annotations | nindent 4 }} +spec: + replicas: {{ .Values.ruler.replicas }} + selector: + matchLabels: + {{- include "cortex.rulerSelectorLabels" . | nindent 6 }} + strategy: + {{- toYaml .Values.ruler.strategy | nindent 4 }} + template: + metadata: + labels: + {{- include "cortex.rulerLabels" . | nindent 8 }} + app.kubernetes.io/part-of: memberlist + {{- with .Values.ruler.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + annotations: + checksum/config: {{ include "cortex.configChecksum" . }} + {{- with .Values.ruler.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ .Values.ruler.serviceAccount.name | default (include "cortex.serviceAccountName" . ) }} + {{- if .Values.ruler.priorityClassName }} + priorityClassName: {{ .Values.ruler.priorityClassName }} + {{- end }} + {{- if .Values.ruler.securityContext.enabled }} + securityContext: {{- omit .Values.ruler.securityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + initContainers: + {{- toYaml .Values.ruler.initContainers | nindent 8 }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} + containers: + {{- if .Values.ruler.sidecar.enabled }} + - name: {{ template "cortex.name" . }}-sc-rules + {{- if .Values.ruler.sidecar.image.sha }} + image: "{{ .Values.ruler.sidecar.image.repository }}:{{ .Values.ruler.sidecar.image.tag }}@sha256:{{ .Values.ruler.sidecar.image.sha }}" + {{- else }} + image: "{{ .Values.ruler.sidecar.image.repository }}:{{ .Values.ruler.sidecar.image.tag }}" + {{- end }} + imagePullPolicy: {{ .Values.ruler.sidecar.imagePullPolicy }} + env: + {{- if .Values.ruler.sidecar.watchMethod }} + - name: METHOD + value: {{ .Values.ruler.sidecar.watchMethod }} + {{ end }} + - name: LABEL + value: "{{ .Values.ruler.sidecar.label }}" + {{- if .Values.ruler.sidecar.labelValue }} + - name: LABEL_VALUE + value: {{ quote .Values.ruler.sidecar.labelValue }} + {{- end }} + - name: FOLDER + value: "{{ .Values.ruler.sidecar.folder }}{{- with .Values.ruler.sidecar.defaultFolderName }}/{{ . }}{{- end }}" + - name: RESOURCE + value: "both" + {{- if .Values.ruler.sidecar.enableUniqueFilenames }} + - name: UNIQUE_FILENAMES + value: "{{ .Values.ruler.sidecar.enableUniqueFilenames }}" + {{- end }} + {{- if .Values.ruler.sidecar.searchNamespace }} + - name: NAMESPACE + value: "{{ .Values.ruler.sidecar.searchNamespace }}" + {{- end }} + {{- if .Values.ruler.sidecar.skipTlsVerify }} + - name: SKIP_TLS_VERIFY + value: "{{ .Values.ruler.sidecar.skipTlsVerify }}" + {{- end }} + {{- if .Values.ruler.sidecar.folderAnnotation }} + - name: FOLDER_ANNOTATION + value: "{{ .Values.ruler.sidecar.folderAnnotation }}" + {{- end }} + resources: + {{- toYaml .Values.ruler.sidecar.resources | nindent 12 }} + {{- if .Values.ruler.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.ruler.sidecar.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + volumeMounts: + - name: sc-rules-volume + mountPath: {{ .Values.ruler.sidecar.folder | quote }} + {{- end }} + - name: rules + image: "{{ .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: + - "-target=ruler" + - "-config.file=/etc/cortex/cortex.yaml" + {{- if .Values.configs.enabled }} + - "-ruler.configs.url=http://{{ template "cortex.configsFullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}:{{ .Values.config.server.http_listen_port }}" + {{- end }} + {{- if not .Values.config.ruler.alertmanager_url }} + {{- if .Values.config.ruler.enable_alertmanager_discovery }} + - "-ruler.alertmanager-url=http://_http-metrics._tcp.{{ template "cortex.name" . }}-alertmanager-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}/api/prom/alertmanager/" + {{- else }} + - "-ruler.alertmanager-url=http://{{ template "cortex.alertmanagerFullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}:{{ .Values.config.server.http_listen_port }}/api/prom/alertmanager/" + {{- end }} + {{- end }} + {{- include "cortex.memcached" . | nindent 12}} + {{- range $key, $value := .Values.ruler.extraArgs }} + - "-{{ $key }}={{ $value }}" + {{- end }} + volumeMounts: + {{- if .Values.ruler.extraVolumeMounts }} + {{- toYaml .Values.ruler.extraVolumeMounts | nindent 12}} + {{- end }} + {{- if .Values.ruler.sidecar.enabled }} + - name: sc-rules-volume + mountPath: {{ .Values.ruler.sidecar.folder | quote }} + {{ end }} + - name: config + mountPath: /etc/cortex + - name: runtime-config + mountPath: /etc/cortex-runtime-config + - name: storage + mountPath: /data + subPath: {{ .Values.ruler.persistentVolume.subPath }} + - name: tmp + mountPath: /rules + {{- range $dir, $_ := .Values.ruler.directories }} + - name: {{ include "cortex.rulerRulesDirName" $dir }} + mountPath: /etc/cortex/rules/{{ $dir }} + {{- end }} + ports: + - name: http-metrics + containerPort: {{ .Values.config.server.http_listen_port }} + protocol: TCP + - name: gossip + containerPort: {{ .Values.config.memberlist.bind_port }} + protocol: TCP + startupProbe: + {{- toYaml .Values.ruler.startupProbe | nindent 12 }} + livenessProbe: + {{- toYaml .Values.ruler.livenessProbe | nindent 12 }} + readinessProbe: + {{- toYaml .Values.ruler.readinessProbe | nindent 12 }} + resources: + {{- toYaml .Values.ruler.resources | nindent 12 }} + {{- if .Values.ruler.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.ruler.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.ruler.env }} + env: + {{- toYaml .Values.ruler.env | nindent 12 }} + {{- end }} + {{- if .Values.ruler.extraContainers }} + {{- toYaml .Values.ruler.extraContainers | nindent 8}} + {{- end }} + nodeSelector: + {{- toYaml .Values.ruler.nodeSelector | nindent 8 }} + affinity: + {{- toYaml .Values.ruler.affinity | nindent 8 }} + tolerations: + {{- toYaml .Values.ruler.tolerations | nindent 8 }} + terminationGracePeriodSeconds: {{ .Values.ruler.terminationGracePeriodSeconds }} + volumes: + {{- include "cortex.configVolume" . | nindent 8 }} + - name: runtime-config + configMap: + name: {{ template "cortex.fullname" . }}-runtime-config + - name: tmp + emptyDir: {} + {{- range $dir, $_ := .Values.ruler.directories }} + - name: {{ include "cortex.rulerRulesDirName" $dir }} + configMap: + name: {{ include "cortex.rulerFullname" $ }}-{{ include "cortex.rulerRulesDirName" $dir }} + {{- end }} + - name: storage + emptyDir: {} + {{- if .Values.ruler.sidecar.enabled }} + - name: sc-rules-volume + emptyDir: {} + {{- end }} + {{- if .Values.ruler.extraVolumes }} + {{- toYaml .Values.ruler.extraVolumes | nindent 8}} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/ruler/ruler-poddisruptionbudget.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/ruler/ruler-poddisruptionbudget.yaml new file mode 100644 index 0000000..52fb3e0 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/ruler/ruler-poddisruptionbudget.yaml @@ -0,0 +1,14 @@ +{{- if and (gt (int .Values.ruler.replicas) 1) (.Values.ruler.podDisruptionBudget) }} +apiVersion: {{ include "cortex.pdbVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "cortex.rulerFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.rulerLabels" . | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "cortex.rulerSelectorLabels" . | nindent 6 }} + {{- toYaml .Values.ruler.podDisruptionBudget | nindent 2 }} +{{- end }} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/ruler/ruler-servicemonitor.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/ruler/ruler-servicemonitor.yaml new file mode 100644 index 0000000..de6744f --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/ruler/ruler-servicemonitor.yaml @@ -0,0 +1,42 @@ +{{- if .Values.ruler.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "cortex.rulerFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.rulerLabels" . | nindent 4 }} + {{- if .Values.ruler.serviceMonitor.additionalLabels }} +{{ toYaml .Values.ruler.serviceMonitor.additionalLabels | indent 4 }} + {{- end }} + {{- if .Values.ruler.serviceMonitor.annotations }} + annotations: +{{ toYaml .Values.ruler.serviceMonitor.annotations | indent 4 }} + {{- end }} +spec: + selector: + matchLabels: + {{- include "cortex.rulerSelectorLabels" . | nindent 6 }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace | quote }} + endpoints: + - port: http-metrics + {{- if .Values.ruler.serviceMonitor.interval }} + interval: {{ .Values.ruler.serviceMonitor.interval }} + {{- end }} + {{- if .Values.ruler.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.ruler.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.ruler.serviceMonitor.relabelings }} + relabelings: + {{- toYaml .Values.ruler.serviceMonitor.relabelings | nindent 4 }} + {{- end }} + {{- if .Values.ruler.serviceMonitor.metricRelabelings }} + metricRelabelings: + {{- toYaml .Values.ruler.serviceMonitor.metricRelabelings | nindent 4 }} + {{- end }} + {{- with .Values.ruler.serviceMonitor.extraEndpointSpec }} + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/ruler/ruler-svc.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/ruler/ruler-svc.yaml new file mode 100644 index 0000000..7752ef4 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/ruler/ruler-svc.yaml @@ -0,0 +1,23 @@ +{{- if .Values.ruler.enabled -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "cortex.rulerFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.rulerLabels" . | nindent 4 }} + {{- with .Values.ruler.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- toYaml .Values.ruler.service.annotations | nindent 4 }} +spec: + type: ClusterIP + ports: + - port: {{ .Values.config.server.http_listen_port }} + protocol: TCP + name: http-metrics + targetPort: http-metrics + selector: + {{- include "cortex.rulerSelectorLabels" . | nindent 4 }} +{{- end }} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/runtime-configmap.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/runtime-configmap.yaml new file mode 100644 index 0000000..2b30599 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/runtime-configmap.yaml @@ -0,0 +1,18 @@ +{{- with .Values.runtimeconfigmap }} +{{- if .create }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "cortex.fullname" $ }}-runtime-config + namespace: {{ $.Release.Namespace }} + labels: + {{- include "cortex.labels" $ | nindent 4 }} + {{- with .annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +data: + runtime_config.yaml: | + {{- tpl (toYaml .runtime_config) $ | nindent 4 }} +{{- end }} +{{- end }} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/secret-postgresql.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/secret-postgresql.yaml new file mode 100644 index 0000000..9194971 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/secret-postgresql.yaml @@ -0,0 +1,11 @@ +{{- if and .Values.configsdb_postgresql.enabled .Values.configsdb_postgresql.auth.password -}} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "cortex.fullname" . }}-postgresql + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.labels" . | nindent 4 }} +data: + postgresql-password: {{ .Values.configsdb_postgresql.auth.password | b64enc}} +{{- end }} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/secret.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/secret.yaml new file mode 100644 index 0000000..ff0e78f --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/secret.yaml @@ -0,0 +1,11 @@ +{{- if (and (not .Values.useExternalConfig) (not .Values.useConfigMap)) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "cortex.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.labels" . | nindent 4 }} +data: + cortex.yaml: {{ tpl (toYaml .Values.config) . | b64enc }} +{{- end }} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/serviceaccount.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/serviceaccount.yaml new file mode 100644 index 0000000..963f866 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/serviceaccount.yaml @@ -0,0 +1,12 @@ +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "cortex.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.labels" . | nindent 4 }} + annotations: + {{- toYaml .Values.serviceAccount.annotations | nindent 4 }} +automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }} +{{- end }} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/store-gateway/_helpers-store-gateway.tpl b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/store-gateway/_helpers-store-gateway.tpl new file mode 100644 index 0000000..3cca867 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/store-gateway/_helpers-store-gateway.tpl @@ -0,0 +1,23 @@ + +{{/* +store-gateway fullname +*/}} +{{- define "cortex.storeGatewayFullname" -}} +{{ include "cortex.fullname" . }}-store-gateway +{{- end }} + +{{/* +store-gateway common labels +*/}} +{{- define "cortex.storeGatewayLabels" -}} +{{ include "cortex.labels" . }} +app.kubernetes.io/component: store-gateway +{{- end }} + +{{/* +store-gateway selector labels +*/}} +{{- define "cortex.storeGatewaySelectorLabels" -}} +{{ include "cortex.selectorLabels" . }} +app.kubernetes.io/component: store-gateway +{{- end }} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-poddisruptionbudget.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-poddisruptionbudget.yaml new file mode 100644 index 0000000..1019cc8 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-poddisruptionbudget.yaml @@ -0,0 +1,14 @@ +{{- if and (gt (int .Values.store_gateway.replicas) 1) (.Values.store_gateway.podDisruptionBudget) }} +apiVersion: {{ include "cortex.pdbVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "cortex.storeGatewayFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.storeGatewayLabels" . | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "cortex.storeGatewaySelectorLabels" . | nindent 6 }} + {{- toYaml .Values.store_gateway.podDisruptionBudget | nindent 2 }} +{{- end }} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-servicemonitor.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-servicemonitor.yaml new file mode 100644 index 0000000..39eaeda --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-servicemonitor.yaml @@ -0,0 +1,42 @@ +{{- if .Values.store_gateway.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "cortex.storeGatewayFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.storeGatewayLabels" . | nindent 4 }} + {{- if .Values.store_gateway.serviceMonitor.additionalLabels }} +{{ toYaml .Values.store_gateway.serviceMonitor.additionalLabels | indent 4 }} + {{- end }} + {{- if .Values.store_gateway.serviceMonitor.annotations }} + annotations: +{{ toYaml .Values.store_gateway.serviceMonitor.annotations | indent 4 }} + {{- end }} +spec: + selector: + matchLabels: + {{- include "cortex.storeGatewaySelectorLabels" . | nindent 6 }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace | quote }} + endpoints: + - port: http-metrics + {{- if .Values.store_gateway.serviceMonitor.interval }} + interval: {{ .Values.store_gateway.serviceMonitor.interval }} + {{- end }} + {{- if .Values.store_gateway.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.store_gateway.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.store_gateway.serviceMonitor.relabelings }} + relabelings: + {{- toYaml .Values.store_gateway.serviceMonitor.relabelings | nindent 4 }} + {{- end }} + {{- if .Values.store_gateway.serviceMonitor.metricRelabelings }} + metricRelabelings: + {{- toYaml .Values.store_gateway.serviceMonitor.metricRelabelings | nindent 4 }} + {{- end }} + {{- with .Values.store_gateway.serviceMonitor.extraEndpointSpec }} + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-statefulset.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-statefulset.yaml new file mode 100644 index 0000000..0238c75 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-statefulset.yaml @@ -0,0 +1,142 @@ +{{- if eq .Values.config.storage.engine "blocks" -}} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "cortex.storeGatewayFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.storeGatewayLabels" . | nindent 4 }} + app.kubernetes.io/part-of: memberlist + annotations: + {{- toYaml .Values.store_gateway.annotations | nindent 4 }} +spec: + replicas: {{ .Values.store_gateway.replicas }} + selector: + matchLabels: + {{- include "cortex.storeGatewaySelectorLabels" . | nindent 6 }} + updateStrategy: + {{- toYaml .Values.store_gateway.strategy | nindent 4 }} + serviceName: {{ template "cortex.fullname" . }}-store-gateway-headless + {{- if .Values.store_gateway.persistentVolume.enabled }} + volumeClaimTemplates: + - metadata: + name: storage + {{- if .Values.store_gateway.persistentVolume.annotations }} + annotations: + {{ toYaml .Values.store_gateway.persistentVolume.annotations | nindent 10 }} + {{- end }} + spec: + {{- if .Values.store_gateway.persistentVolume.storageClass }} + {{- if (eq "-" .Values.store_gateway.persistentVolume.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.store_gateway.persistentVolume.storageClass }}" + {{- end }} + {{- end }} + accessModes: + {{- toYaml .Values.store_gateway.persistentVolume.accessModes | nindent 10 }} + resources: + requests: + storage: "{{ .Values.store_gateway.persistentVolume.size }}" + {{- end }} + template: + metadata: + labels: + {{- include "cortex.storeGatewayLabels" . | nindent 8 }} + app.kubernetes.io/part-of: memberlist + {{- with .Values.store_gateway.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + annotations: + checksum/config: {{ include "cortex.configChecksum" . }} + {{- with .Values.store_gateway.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ .Values.store_gateway.serviceAccount.name | default (include "cortex.serviceAccountName" . ) }} + {{- if .Values.store_gateway.priorityClassName }} + priorityClassName: {{ .Values.store_gateway.priorityClassName }} + {{- end }} + {{- if .Values.store_gateway.securityContext.enabled }} + securityContext: {{- omit .Values.store_gateway.securityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + initContainers: + {{- toYaml .Values.store_gateway.initContainers | nindent 8 }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} + nodeSelector: + {{- toYaml .Values.store_gateway.nodeSelector | nindent 8 }} + affinity: + {{- toYaml .Values.store_gateway.affinity | nindent 8 }} + tolerations: + {{- toYaml .Values.store_gateway.tolerations | nindent 8 }} + terminationGracePeriodSeconds: {{ .Values.store_gateway.terminationGracePeriodSeconds }} + volumes: + {{- include "cortex.configVolume" . | nindent 8 }} + - name: runtime-config + configMap: + name: {{ template "cortex.fullname" . }}-runtime-config + {{- if not .Values.store_gateway.persistentVolume.enabled }} + - name: storage + emptyDir: {} + {{- end }} + {{- if .Values.store_gateway.extraVolumes }} + {{- toYaml .Values.store_gateway.extraVolumes | nindent 8 }} + {{- end }} + containers: + {{- if .Values.store_gateway.extraContainers }} + {{ toYaml .Values.store_gateway.extraContainers | nindent 8 }} + {{- end }} + - name: store-gateway + image: "{{ .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: + - "-target=store-gateway" + - "-config.file=/etc/cortex/cortex.yaml" + {{- include "cortex.memcached" . | nindent 12}} + {{- range $key, $value := .Values.store_gateway.extraArgs }} + - "-{{ $key }}={{ $value }}" + {{- end }} + volumeMounts: + {{- if .Values.store_gateway.extraVolumeMounts }} + {{- toYaml .Values.store_gateway.extraVolumeMounts | nindent 12}} + {{- end }} + - name: config + mountPath: /etc/cortex + - name: runtime-config + mountPath: /etc/cortex-runtime-config + - name: storage + mountPath: "/data" + {{- if .Values.store_gateway.persistentVolume.subPath }} + subPath: {{ .Values.store_gateway.persistentVolume.subPath }} + {{- end }} + ports: + - name: http-metrics + containerPort: {{ .Values.config.server.http_listen_port }} + protocol: TCP + - name: grpc + containerPort: {{ .Values.config.server.grpc_listen_port }} + protocol: TCP + - name: gossip + containerPort: {{ .Values.config.memberlist.bind_port }} + protocol: TCP + startupProbe: + {{- toYaml .Values.store_gateway.startupProbe | nindent 12 }} + livenessProbe: + {{- toYaml .Values.store_gateway.livenessProbe | nindent 12 }} + readinessProbe: + {{- toYaml .Values.store_gateway.readinessProbe | nindent 12 }} + resources: + {{- toYaml .Values.store_gateway.resources | nindent 12 }} + {{- if .Values.store_gateway.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.store_gateway.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.store_gateway.env }} + env: + {{- toYaml .Values.store_gateway.env | nindent 12 }} + {{- end }} +{{- end -}} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-svc-headless.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-svc-headless.yaml new file mode 100644 index 0000000..c56ec77 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-svc-headless.yaml @@ -0,0 +1,24 @@ +{{- if eq .Values.config.storage.engine "blocks" -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "cortex.storeGatewayFullname" . }}-headless + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.storeGatewayLabels" . | nindent 4 }} + {{- with .Values.store_gateway.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- toYaml .Values.store_gateway.service.annotations | nindent 4 }} +spec: + type: ClusterIP + clusterIP: None + ports: + - port: {{ .Values.config.server.grpc_listen_port }} + protocol: TCP + name: grpc + targetPort: grpc + selector: + {{- include "cortex.storeGatewaySelectorLabels" . | nindent 4 }} +{{- end -}} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-svc.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-svc.yaml new file mode 100644 index 0000000..f58019b --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-svc.yaml @@ -0,0 +1,23 @@ +{{- if eq .Values.config.storage.engine "blocks" -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "cortex.storeGatewayFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.storeGatewayLabels" . | nindent 4 }} + {{- with .Values.store_gateway.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- toYaml .Values.store_gateway.service.annotations | nindent 4 }} +spec: + type: ClusterIP + ports: + - port: {{ .Values.config.server.http_listen_port }} + protocol: TCP + name: http-metrics + targetPort: http-metrics + selector: + {{- include "cortex.storeGatewaySelectorLabels" . | nindent 4 }} +{{- end -}} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/svc-memberlist-headless.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/svc-memberlist-headless.yaml new file mode 100644 index 0000000..fc41461 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/svc-memberlist-headless.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "cortex.fullname" . }}-memberlist + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.labels" . | nindent 4 }} +spec: + type: ClusterIP + clusterIP: None + ports: + - port: {{ .Values.config.memberlist.bind_port }} + protocol: TCP + name: gossip + targetPort: gossip + selector: + {{- include "cortex.selectorLabels" . | nindent 4 }} + app.kubernetes.io/part-of: memberlist diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/table-manager/_helpers-table-manager.tpl b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/table-manager/_helpers-table-manager.tpl new file mode 100644 index 0000000..4798c6d --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/table-manager/_helpers-table-manager.tpl @@ -0,0 +1,23 @@ + +{{/* +table-manager fullname +*/}} +{{- define "cortex.tableManagerFullname" -}} +{{ include "cortex.fullname" . }}-table-manager +{{- end }} + +{{/* +table-manager common labels +*/}} +{{- define "cortex.tableManagerLabels" -}} +{{ include "cortex.labels" . }} +app.kubernetes.io/component: table-manager +{{- end }} + +{{/* +table-manager selector labels +*/}} +{{- define "cortex.tableManagerSelectorLabels" -}} +{{ include "cortex.selectorLabels" . }} +app.kubernetes.io/component: table-manager +{{- end }} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/table-manager/table-manager-dep.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/table-manager/table-manager-dep.yaml new file mode 100644 index 0000000..d24dcc3 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/table-manager/table-manager-dep.yaml @@ -0,0 +1,106 @@ +{{- if ne .Values.config.storage.engine "blocks" -}} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "cortex.tableManagerFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.tableManagerLabels" . | nindent 4 }} + annotations: + {{- toYaml .Values.table_manager.annotations | nindent 4 }} +spec: + replicas: {{ .Values.table_manager.replicas }} + selector: + matchLabels: + {{- include "cortex.tableManagerSelectorLabels" . | nindent 6 }} + strategy: + {{- toYaml .Values.table_manager.strategy | nindent 4 }} + template: + metadata: + labels: + {{- include "cortex.tableManagerLabels" . | nindent 8 }} + {{- with .Values.table_manager.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + annotations: + checksum/config: {{ include "cortex.configChecksum" . }} + {{- with .Values.table_manager.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ .Values.table_manager.serviceAccount.name | default (include "cortex.serviceAccountName" . ) }} + {{- if .Values.table_manager.priorityClassName }} + priorityClassName: {{ .Values.table_manager.priorityClassName }} + {{- end }} + {{- if .Values.table_manager.securityContext.enabled }} + securityContext: {{- omit .Values.table_manager.securityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + initContainers: + {{- toYaml .Values.table_manager.initContainers | nindent 8 }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} + containers: + - name: table-manager + image: "{{ .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: + - "-target=table-manager" + - "-config.file=/etc/cortex/cortex.yaml" + {{- range $key, $value := .Values.table_manager.extraArgs }} + - "-{{ $key }}={{ $value }}" + {{- end }} + volumeMounts: + {{- if .Values.table_manager.extraVolumeMounts }} + {{- toYaml .Values.table_manager.extraVolumeMounts | nindent 12}} + {{- end }} + - name: config + mountPath: /etc/cortex + - name: runtime-config + mountPath: /etc/cortex-runtime-config + - name: storage + mountPath: "/data" + subPath: {{ .Values.table_manager.persistentVolume.subPath }} + ports: + - name: http-metrics + containerPort: {{ .Values.config.server.http_listen_port }} + protocol: TCP + startupProbe: + {{- toYaml .Values.table_manager.startupProbe | nindent 12 }} + livenessProbe: + {{- toYaml .Values.table_manager.livenessProbe | nindent 12 }} + readinessProbe: + {{- toYaml .Values.table_manager.readinessProbe | nindent 12 }} + resources: + {{- toYaml .Values.table_manager.resources | nindent 12 }} + {{- if .Values.table_manager.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.table_manager.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.table_manager.env }} + env: + {{- toYaml .Values.table_manager.env | nindent 12 }} + {{- end }} + {{- if .Values.table_manager.extraContainers }} + {{- toYaml .Values.table_manager.extraContainers | nindent 8}} + {{- end }} + nodeSelector: + {{- toYaml .Values.table_manager.nodeSelector | nindent 8 }} + affinity: + {{- toYaml .Values.table_manager.affinity | nindent 8 }} + tolerations: + {{- toYaml .Values.table_manager.tolerations | nindent 8 }} + terminationGracePeriodSeconds: {{ .Values.table_manager.terminationGracePeriodSeconds }} + volumes: + {{- include "cortex.configVolume" . | nindent 8 }} + - name: runtime-config + configMap: + name: {{ template "cortex.fullname" . }}-runtime-config + - name: storage + emptyDir: {} + {{- if .Values.table_manager.extraVolumes }} + {{- toYaml .Values.table_manager.extraVolumes | nindent 8}} + {{- end }} +{{- end -}} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/table-manager/table-manager-poddisruptionbudget.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/table-manager/table-manager-poddisruptionbudget.yaml new file mode 100644 index 0000000..91adabf --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/table-manager/table-manager-poddisruptionbudget.yaml @@ -0,0 +1,14 @@ +{{- if and (gt (int .Values.table_manager.replicas) 1) (.Values.table_manager.podDisruptionBudget) }} +apiVersion: {{ include "cortex.pdbVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "cortex.tableManagerFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.tableManagerLabels" . | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "cortex.tableManagerSelectorLabels" . | nindent 6 }} + {{- toYaml .Values.table_manager.podDisruptionBudget | nindent 2 }} +{{- end }} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/table-manager/table-manager-servicemonitor.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/table-manager/table-manager-servicemonitor.yaml new file mode 100644 index 0000000..9748724 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/table-manager/table-manager-servicemonitor.yaml @@ -0,0 +1,42 @@ +{{- if .Values.table_manager.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "cortex.tableManagerFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.tableManagerLabels" . | nindent 4 }} + {{- if .Values.table_manager.serviceMonitor.additionalLabels }} +{{ toYaml .Values.table_manager.serviceMonitor.additionalLabels | indent 4 }} + {{- end }} + {{- if .Values.table_manager.serviceMonitor.annotations }} + annotations: +{{ toYaml .Values.table_manager.serviceMonitor.annotations | indent 4 }} + {{- end }} +spec: + selector: + matchLabels: + {{- include "cortex.tableManagerSelectorLabels" . | nindent 6 }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace | quote }} + endpoints: + - port: http-metrics + {{- if .Values.table_manager.serviceMonitor.interval }} + interval: {{ .Values.table_manager.serviceMonitor.interval }} + {{- end }} + {{- if .Values.table_manager.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.table_manager.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.table_manager.serviceMonitor.relabelings }} + relabelings: + {{- toYaml .Values.table_manager.serviceMonitor.relabelings | nindent 4 }} + {{- end }} + {{- if .Values.table_manager.serviceMonitor.metricRelabelings }} + metricRelabelings: + {{- toYaml .Values.table_manager.serviceMonitor.metricRelabelings | nindent 4 }} + {{- end }} + {{- with .Values.table_manager.serviceMonitor.extraEndpointSpec }} + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/table-manager/table-manager-svc.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/table-manager/table-manager-svc.yaml new file mode 100644 index 0000000..ff3c57d --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/templates/table-manager/table-manager-svc.yaml @@ -0,0 +1,23 @@ +{{- if ne .Values.config.storage.engine "blocks" -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "cortex.tableManagerFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.tableManagerLabels" . | nindent 4 }} + {{- with .Values.table_manager.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- toYaml .Values.table_manager.service.annotations | nindent 4 }} +spec: + type: ClusterIP + ports: + - port: {{ .Values.config.server.http_listen_port }} + protocol: TCP + name: http-metrics + targetPort: http-metrics + selector: + {{- include "cortex.tableManagerSelectorLabels" . | nindent 4 }} +{{- end -}} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/cortex/values.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/values.yaml new file mode 100644 index 0000000..4a0f8c8 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/cortex/values.yaml @@ -0,0 +1,1605 @@ +image: + #repository: quay.io/cortexproject/cortex + repository: 10.10.31.243:5000/cmoa3/cortex + # -- Allows you to override the cortex version in this chart. Use at your own risk. + #tag: "" + tag: v1.11.0 + pullPolicy: IfNotPresent + + # -- Optionally specify an array of imagePullSecrets. + # Secrets must be manually created in the namespace. + # ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + # pullSecrets: [] + pullSecrets: + - regcred + + +# -- Kubernetes cluster DNS domain +clusterDomain: cluster.local + +tags: + # -- Set to true to enable block storage memcached caching + blocks-storage-memcached: false + +ingress: + enabled: false + ingressClass: + enabled: false + name: "nginx" + annotations: {} + hosts: + - host: chart-example.local + paths: + - / + tls: [] + +serviceAccount: + create: true + name: + annotations: {} + automountServiceAccountToken: true + +useConfigMap: false +useExternalConfig: false +externalConfigSecretName: 'secret-with-config.yaml' +externalConfigVersion: '0' + +config: + auth_enabled: false + api: + prometheus_http_prefix: '/prometheus' + # -- Use GZIP compression for API responses. Some endpoints serve large YAML or JSON blobs + # which can benefit from compression. + response_compression_enabled: true + ingester: + walconfig: + wal_enabled: true + flush_on_shutdown_with_wal_enabled: true + recover_from_wal: true + lifecycler: + # -- We don't want to join immediately, but wait a bit to see other ingesters and their tokens first. + # It can take a while to have the full picture when using gossip + join_after: 10s + + # -- To avoid generating same tokens by multiple ingesters, they can "observe" the ring for a while, + # after putting their own tokens into it. This is only useful when using gossip, since multiple + # ingesters joining at the same time can have conflicting tokens if they don't see each other yet. + observe_period: 10s + # -- Duration to sleep for before exiting, to ensure metrics are scraped. + final_sleep: 30s + num_tokens: 512 + ring: + # -- Ingester replication factor per default is 3 + replication_factor: 3 + kvstore: + store: "memberlist" + limits: + # -- Enforce that every sample has a metric name + enforce_metric_name: true + reject_old_samples: true + reject_old_samples_max_age: 168h + max_query_lookback: 0s + server: + http_listen_port: 8080 + grpc_listen_port: 9095 + grpc_server_max_recv_msg_size: 10485760 + grpc_server_max_send_msg_size: 10485760 + grpc_server_max_concurrent_streams: 10000 + ingester_client: + grpc_client_config: + max_recv_msg_size: 10485760 + max_send_msg_size: 10485760 + # -- See https://github.com/cortexproject/cortex/blob/master/docs/configuration/config-file-reference.md#storage_config + storage: + engine: blocks + index_queries_cache_config: + memcached: + # -- How long keys stay in the memcache + expiration: 1h + memcached_client: + # -- Maximum time to wait before giving up on memcached requests. + timeout: 1s + blocks_storage: + # custume backend setting related to using s3 + backend: s3 + s3: + bucket_name: cortex-bucket + # -- The S3 bucket endpoint. It could be an AWS S3 endpoint listed at + # https://docs.aws.amazon.com/general/latest/gr/s3.html or the address of an + # S3-compatible service in hostname:port format. + endpoint: minio.imxc.svc.cluster.local:9000 + secret_access_key: admin1234 + access_key_id: cloudmoa + insecure: true + + tsdb: + dir: /data/tsdb + bucket_store: + sync_dir: /data/tsdb-sync + bucket_index: + enabled: true + # -- https://cortexmetrics.io/docs/configuration/configuration-file/#store_gateway_config + store_gateway: + sharding_enabled: false + distributor: + # -- Distribute samples based on all labels, as opposed to solely by user and + # metric name. + shard_by_all_labels: true + pool: + health_check_ingesters: true + memberlist: + bind_port: 7946 + # -- the service name of the memberlist + # if using memberlist discovery + join_members: + - '{{ include "cortex.fullname" $ }}-memberlist' + querier: + active_query_tracker_dir: /data/active-query-tracker + # -- Maximum lookback beyond which queries are not sent to ingester. 0 means all + # queries are sent to ingester. Ingesters by default have no data older than 12 hours, + # so we can safely set this 13 hours + query_ingesters_within: 9h + # -- The time after which a metric should be queried from storage and not just + # ingesters. + query_store_after: 7h + # -- Comma separated list of store-gateway addresses in DNS Service Discovery + # format. This option should is set automatically when using the blocks storage and the + # store-gateway sharding is disabled (when enabled, the store-gateway instances + # form a ring and addresses are picked from the ring). + # @default -- automatic + store_gateway_addresses: |- + {{ if and (eq .Values.config.storage.engine "blocks") (not .Values.config.store_gateway.sharding_enabled) -}} + dns+{{ include "cortex.storeGatewayFullname" $ }}-headless:9095 + {{- end }} + query_range: + split_queries_by_interval: 24h + align_queries_with_step: true + cache_results: true + results_cache: + cache: + memcached: + expiration: 1h + memcached_client: + timeout: 1s + ruler: + enable_alertmanager_discovery: false + # -- Enable the experimental ruler config api. + alertmanager_url: 'http://alertmanager.imxc/alertmanager' + enable_api: true + # -- Method to use for backend rule storage (configdb, azure, gcs, s3, swift, local) refer to https://cortexmetrics.io/docs/configuration/configuration-file/#ruler_config + storage: {} + runtime_config: + file: /etc/cortex-runtime-config/runtime_config.yaml + alertmanager: + # -- Enable the experimental alertmanager config api. + enable_api: true + external_url: 'http://alertmanager.imxc/alertmanager' + #external_url: '/api/prom/alertmanager' + # -- Type of backend to use to store alertmanager configs. Supported values are: "configdb", "gcs", "s3", "local". refer to: https://cortexmetrics.io/docs/configuration/configuration-file/#alertmanager_config + storage: {} + frontend: + log_queries_longer_than: 10s + # S3 사용 관련 커스텀 설정 + alertmanager_storage: + s3: + bucket_name: cortex-alertmanager + endpoint: minio.imxc.svc.cluster.local:9000 + secret_access_key: admin1234 + access_key_id: cloudmoa + insecure: true + ruler_storage: + s3: + bucket_name: cortex-ruler + endpoint: minio.imxc.svc.cluster.local:9000 + secret_access_key: admin1234 + access_key_id: cloudmoa + insecure: true + +runtimeconfigmap: + # -- If true, a configmap for the `runtime_config` will be created. + # If false, the configmap _must_ exist already on the cluster or pods will fail to create. + create: true + annotations: {} + # -- https://cortexmetrics.io/docs/configuration/arguments/#runtime-configuration-file + # 설정부 + runtime_config: {} +alertmanager: + enabled: true + replicas: 1 + + statefulSet: + # -- If true, use a statefulset instead of a deployment for pod management. + # This is useful for using a persistent volume for storing silences between restarts. + enabled: false + + service: + annotations: {} + labels: {} + + serviceAccount: + # -- "" disables the individual serviceAccount and uses the global serviceAccount for that component + name: "" + + serviceMonitor: + enabled: false + additionalLabels: {} + relabelings: [] + metricRelabelings: [] + # -- Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint + extraEndpointSpec: {} + + resources: {} + + # -- Additional Cortex container arguments, e.g. log level (debug, info, warn, error) + extraArgs: {} + # -experimental.alertmanager.enable-api: "true" + # -alertmanager.web.external-url: /alertmanager + # -- Pod Labels + podLabels: {} + + # -- Pod Annotations + podAnnotations: + prometheus.io/scrape: 'true' + prometheus.io/port: '8080' + + nodeSelector: {} + affinity: {} + annotations: {} + + persistentVolume: + # -- If true and alertmanager.statefulSet.enabled is true, + # Alertmanager will create/use a Persistent Volume Claim + # If false, use emptyDir + enabled: false + + # -- Alertmanager data Persistent Volume Claim annotations + annotations: {} + + # -- Alertmanager data Persistent Volume access modes + # Must match those of existing PV or dynamic provisioner + # Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + accessModes: + - ReadWriteOnce + + # -- Alertmanager data Persistent Volume size + size: 2Gi + + # -- Subdirectory of Alertmanager data Persistent Volume to mount + # Useful if the volume's root directory is not empty + subPath: '' + + # -- Alertmanager data Persistent Volume Storage Class + # If defined, storageClassName: + # If set to "-", storageClassName: "", which disables dynamic provisioning + # If undefined (the default) or set to null, no storageClassName spec is + # set, choosing the default provisioner. + storageClass: null + + startupProbe: + httpGet: + path: /ready + port: http-metrics + failureThreshold: 10 + livenessProbe: + httpGet: + path: /ready + port: http-metrics + readinessProbe: + httpGet: + path: /ready + port: http-metrics + + securityContext: {} + + containerSecurityContext: + enabled: true + readOnlyRootFilesystem: true + + # -- Tolerations for pod assignment + # ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + tolerations: [] + + # -- If not set then a PodDisruptionBudget will not be created + podDisruptionBudget: + maxUnavailable: 1 + + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + statefulStrategy: + type: RollingUpdate + + terminationGracePeriodSeconds: 60 + + # -- Init containers to be added to the cortex pod. + initContainers: [] + + # -- Additional containers to be added to the cortex pod. + extraContainers: [] + + # -- Additional volumes to the cortex pod. + extraVolumes: [] + + # -- Extra volume mounts that will be added to the cortex container + extraVolumeMounts: [] + + # -- Additional ports to the cortex services. Useful to expose extra container ports. + extraPorts: [] + + # -- Extra env variables to pass to the cortex container + env: [] + + # -- Sidecars that collect the configmaps with specified label and stores the included files them into the respective folders + sidecar: + image: + repository: 10.10.31.243:5000/cmoa3/k8s-sidecar + tag: 1.10.7 + sha: "" + imagePullPolicy: IfNotPresent + resources: {} + # -- skipTlsVerify Set to true to skip tls verification for kube api calls + skipTlsVerify: false + enableUniqueFilenames: false + enabled: false + label: cortex_alertmanager + watchMethod: null + labelValue: null + folder: /data + defaultFolderName: null + searchNamespace: null + folderAnnotation: null + containerSecurityContext: + enabled: true + readOnlyRootFilesystem: true + +distributor: + replicas: 2 + + service: + annotations: {} + labels: {} + + serviceAccount: + # -- "" disables the individual serviceAccount and uses the global serviceAccount for that component + name: "" + + serviceMonitor: + enabled: false + additionalLabels: {} + relabelings: [] + metricRelabelings: [] + # -- Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint + extraEndpointSpec: {} + + resources: {} + + # -- Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) + extraArgs: + -validation.max-label-names-per-series: "45" + + # -- Pod Labels + podLabels: {} + + # -- Pod Annotations + podAnnotations: + prometheus.io/scrape: 'true' + prometheus.io/port: '8080' + + nodeSelector: {} + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app.kubernetes.io/component + operator: In + values: + - distributor + topologyKey: 'kubernetes.io/hostname' + + annotations: {} + + autoscaling: + # -- Creates a HorizontalPodAutoscaler for the distributor pods. + enabled: false + minReplicas: 2 + maxReplicas: 30 + targetCPUUtilizationPercentage: 80 + targetMemoryUtilizationPercentage: 0 # 80 + # -- Ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-configurable-scaling-behavior + behavior: {} + + persistentVolume: + subPath: + + startupProbe: + httpGet: + path: /ready + port: http-metrics + failureThreshold: 10 + livenessProbe: + httpGet: + path: /ready + port: http-metrics + readinessProbe: + httpGet: + path: /ready + port: http-metrics + + securityContext: {} + + containerSecurityContext: + enabled: true + readOnlyRootFilesystem: true + + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + + terminationGracePeriodSeconds: 60 + + tolerations: [] + + podDisruptionBudget: + maxUnavailable: 1 + + initContainers: [] + extraContainers: [] + extraVolumes: [] + extraVolumeMounts: [] + extraPorts: [] + env: [] + lifecycle: {} + +ingester: + replicas: 3 + + statefulSet: + # -- If true, use a statefulset instead of a deployment for pod management. + # This is useful when using WAL + enabled: true + # -- ref: https://cortexmetrics.io/docs/guides/ingesters-scaling-up-and-down/#scaling-down and https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-management-policies for scaledown details + podManagementPolicy: OrderedReady + + service: + annotations: {} + labels: {} + + serviceAccount: + name: + + serviceMonitor: + enabled: false + additionalLabels: {} + relabelings: [] + metricRelabelings: [] + # -- Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint + extraEndpointSpec: {} + + resources: {} + + # -- Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) + extraArgs: {} + + # -- Pod Labels + podLabels: {} + + # -- Pod Annotations + podAnnotations: + prometheus.io/scrape: 'true' + prometheus.io/port: '8080' + + nodeSelector: {} + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app.kubernetes.io/component + operator: In + values: + - ingester + topologyKey: 'kubernetes.io/hostname' + + annotations: {} + + autoscaling: + enabled: false + minReplicas: 3 + maxReplicas: 30 + targetMemoryUtilizationPercentage: 80 + behavior: + scaleDown: + # -- see https://cortexmetrics.io/docs/guides/ingesters-scaling-up-and-down/#scaling-down for scaledown details + policies: + - type: Pods + value: 1 + # set to no less than 2x the maximum between -blocks-storage.bucket-store.sync-interval and -compactor.cleanup-interval + periodSeconds: 1800 + # -- uses metrics from the past 1h to make scaleDown decisions + stabilizationWindowSeconds: 3600 + scaleUp: + # -- This default scaleup policy allows adding 1 pod every 30 minutes. + # Ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-configurable-scaling-behavior + policies: + - type: Pods + value: 1 + periodSeconds: 1800 + + lifecycle: + # -- The /shutdown preStop hook is recommended as part of the ingester + # scaledown process, but can be removed to optimize rolling restarts in + # instances that will never be scaled down or when using chunks storage + # with WAL disabled. + # https://cortexmetrics.io/docs/guides/ingesters-scaling-up-and-down/#scaling-down + preStop: + httpGet: + path: "/ingester/shutdown" + port: http-metrics + + persistentVolume: + # -- If true and ingester.statefulSet.enabled is true, + # Ingester will create/use a Persistent Volume Claim + # If false, use emptyDir + enabled: true + + # -- Ingester data Persistent Volume Claim annotations + annotations: {} + + # -- Ingester data Persistent Volume access modes + # Must match those of existing PV or dynamic provisioner + # Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + accessModes: + - ReadWriteOnce + + # -- Ingester data Persistent Volume size + size: 2Gi + + # -- Subdirectory of Ingester data Persistent Volume to mount + # Useful if the volume's root directory is not empty + subPath: '' + + # -- Ingester data Persistent Volume Storage Class + # If defined, storageClassName: + # If set to "-", storageClassName: "", which disables dynamic provisioning + # If undefined (the default) or set to null, no storageClassName spec is + # set, choosing the default provisioner. + storageClass: exem-local-storage + + # -- Startup/liveness probes for ingesters are not recommended. + # Ref: https://cortexmetrics.io/docs/guides/running-cortex-on-kubernetes/#take-extra-care-with-ingesters + startupProbe: {} + + # -- Startup/liveness probes for ingesters are not recommended. + # Ref: https://cortexmetrics.io/docs/guides/running-cortex-on-kubernetes/#take-extra-care-with-ingesters + livenessProbe: {} + readinessProbe: + httpGet: + path: /ready + port: http-metrics + + securityContext: {} + + containerSecurityContext: + enabled: true + readOnlyRootFilesystem: true + + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + statefulStrategy: + type: RollingUpdate + + terminationGracePeriodSeconds: 240 + + tolerations: [] + + podDisruptionBudget: + maxUnavailable: 1 + + initContainers: [] + extraContainers: [] + extraVolumes: [] + extraVolumeMounts: [] + extraPorts: [] + env: [] + +ruler: + enabled: true + replicas: 1 + + service: + annotations: {} + labels: {} + + serviceAccount: + # -- "" disables the individual serviceAccount and uses the global serviceAccount for that component + name: "" + + serviceMonitor: + enabled: false + additionalLabels: {} + relabelings: [] + metricRelabelings: [] + # -- Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint + extraEndpointSpec: {} + + resources: {} + + # -- Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) + extraArgs: + #-ruler.configs.url: http://cortex-configs:8080 + #-ruler.alertmanager-url: http://cortex-alertmanager:8080 + -ruler.storage.type: configdb + + # -- Pod Labels + podLabels: {} + + # -- Pod Annotations + podAnnotations: + prometheus.io/scrape: 'true' + prometheus.io/port: '8080' + + nodeSelector: {} + affinity: {} + annotations: {} + persistentVolume: + subPath: + + startupProbe: + httpGet: + path: /ready + port: http-metrics + failureThreshold: 10 + livenessProbe: + httpGet: + path: /ready + port: http-metrics + readinessProbe: + httpGet: + path: /ready + port: http-metrics + + securityContext: {} + + containerSecurityContext: + enabled: true + readOnlyRootFilesystem: true + + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + + terminationGracePeriodSeconds: 180 + + tolerations: [] + + podDisruptionBudget: + maxUnavailable: 1 + + initContainers: [] + extraContainers: [] + extraVolumes: [] + extraVolumeMounts: [] + extraPorts: [] + env: [] + # -- allow configuring rules via configmap. ref: https://cortexproject.github.io/cortex-helm-chart/guides/configure_rules_via_configmap.html + directories: {} + + # -- Sidecars that collect the configmaps with specified label and stores the included files them into the respective folders + sidecar: + image: + repository: 10.10.31.243:5000/cmoa3/k8s-sidecar + tag: 1.10.7 + sha: "" + imagePullPolicy: IfNotPresent + resources: {} + # limits: + # cpu: 100m + # memory: 100Mi + # requests: + # cpu: 50m + # memory: 50Mi + # skipTlsVerify Set to true to skip tls verification for kube api calls + # skipTlsVerify: true + enableUniqueFilenames: false + enabled: false + # -- label that the configmaps with rules are marked with + label: cortex_rules + watchMethod: null + # -- value of label that the configmaps with rules are set to + labelValue: null + # -- folder in the pod that should hold the collected rules (unless `defaultFolderName` is set) + folder: /tmp/rules + # -- The default folder name, it will create a subfolder under the `folder` and put rules in there instead + defaultFolderName: null + # -- If specified, the sidecar will search for rules config-maps inside this namespace. + # Otherwise the namespace in which the sidecar is running will be used. + # It's also possible to specify ALL to search in all namespaces + searchNamespace: null + # -- If specified, the sidecar will look for annotation with this name to create folder and put graph here. + # You can use this parameter together with `provider.foldersFromFilesStructure`to annotate configmaps and create folder structure. + folderAnnotation: null + containerSecurityContext: + enabled: true + readOnlyRootFilesystem: true + +querier: + replicas: 2 + + service: + annotations: {} + labels: {} + + serviceAccount: + # -- "" disables the individual serviceAccount and uses the global serviceAccount for that component + name: "" + + serviceMonitor: + enabled: false + additionalLabels: {} + relabelings: [] + metricRelabelings: [] + # -- Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint + extraEndpointSpec: {} + + resources: {} + + # -- Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) + extraArgs: {} + + # -- Pod Labels + podLabels: {} + + # -- Pod Annotations + podAnnotations: + prometheus.io/scrape: 'true' + prometheus.io/port: '8080' + + nodeSelector: {} + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app.kubernetes.io/component + operator: In + values: + - querier + topologyKey: 'kubernetes.io/hostname' + + annotations: {} + + autoscaling: + # -- Creates a HorizontalPodAutoscaler for the querier pods. + enabled: false + minReplicas: 2 + maxReplicas: 30 + targetCPUUtilizationPercentage: 80 + targetMemoryUtilizationPercentage: 0 # 80 + # -- Ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-configurable-scaling-behavior + behavior: {} + + persistentVolume: + subPath: + + startupProbe: + httpGet: + path: /ready + port: http-metrics + failureThreshold: 10 + livenessProbe: + httpGet: + path: /ready + port: http-metrics + readinessProbe: + httpGet: + path: /ready + port: http-metrics + + securityContext: {} + + containerSecurityContext: + enabled: true + readOnlyRootFilesystem: true + + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + + terminationGracePeriodSeconds: 180 + + tolerations: [] + + podDisruptionBudget: + maxUnavailable: 1 + + initContainers: [] + extraContainers: [] + extraVolumes: [] + extraVolumeMounts: [] + extraPorts: [] + env: [] + lifecycle: {} + +query_frontend: + replicas: 2 + + service: + annotations: {} + labels: {} + + serviceAccount: + # -- "" disables the individual serviceAccount and uses the global serviceAccount for that component + name: "" + + serviceMonitor: + enabled: false + additionalLabels: {} + relabelings: [] + metricRelabelings: [] + # -- Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint + extraEndpointSpec: {} + + resources: {} + + # -- Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) + extraArgs: {} + + # -- Pod Labels + podLabels: {} + + # -- Pod Annotations + podAnnotations: + prometheus.io/scrape: 'true' + prometheus.io/port: '8080' + + nodeSelector: {} + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app.kubernetes.io/component + operator: In + values: + - query-frontend + topologyKey: 'kubernetes.io/hostname' + + annotations: {} + persistentVolume: + subPath: + + startupProbe: + httpGet: + path: /ready + port: http-metrics + failureThreshold: 10 + livenessProbe: + httpGet: + path: /ready + port: http-metrics + readinessProbe: + httpGet: + path: /ready + port: http-metrics + + securityContext: {} + containerSecurityContext: + enabled: true + readOnlyRootFilesystem: true + + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + + terminationGracePeriodSeconds: 180 + + tolerations: [] + + podDisruptionBudget: + maxUnavailable: 1 + + initContainers: [] + extraContainers: [] + extraVolumes: [] + extraVolumeMounts: [] + extraPorts: [] + env: [] + lifecycle: {} + +table_manager: + replicas: 1 + + service: + annotations: {} + labels: {} + + serviceAccount: + # -- "" disables the individual serviceAccount and uses the global serviceAccount for that component + name: "" + + serviceMonitor: + enabled: false + additionalLabels: {} + relabelings: [] + metricRelabelings: [] + # -- Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint + extraEndpointSpec: {} + + resources: {} + + # -- Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) + extraArgs: {} + + # -- Pod Labels + podLabels: {} + + # -- Pod Annotations + podAnnotations: + prometheus.io/scrape: 'true' + prometheus.io/port: '8080' + + nodeSelector: {} + affinity: {} + annotations: {} + persistentVolume: + subPath: + + startupProbe: + httpGet: + path: /ready + port: http-metrics + failureThreshold: 10 + livenessProbe: + httpGet: + path: /ready + port: http-metrics + readinessProbe: + httpGet: + path: /ready + port: http-metrics + + securityContext: {} + + containerSecurityContext: + enabled: true + readOnlyRootFilesystem: true + + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + + terminationGracePeriodSeconds: 180 + + tolerations: [] + + podDisruptionBudget: + maxUnavailable: 1 + + initContainers: [] + extraContainers: [] + extraVolumes: [] + extraVolumeMounts: [] + extraPorts: [] + env: [] + +configs: + enabled: true + replicas: 1 + + service: + annotations: {} + labels: {} + + serviceAccount: + # -- "" disables the individual serviceAccount and uses the global serviceAccount for that component + name: "" + + serviceMonitor: + enabled: false + additionalLabels: {} + relabelings: [] + metricRelabelings: [] + # -- Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint + extraEndpointSpec: {} + + resources: {} + + # -- Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) + extraArgs: + # -configs.database.migrations-dir: /migrations + # -- Pod Labels + podLabels: {} + + # -- Pod Annotations + podAnnotations: + prometheus.io/scrape: 'true' + prometheus.io/port: '8080' + + nodeSelector: {} + affinity: {} + annotations: {} + persistentVolume: + subPath: + + startupProbe: + httpGet: + path: /ready + port: http-metrics + failureThreshold: 10 + livenessProbe: + httpGet: + path: /ready + port: http-metrics + readinessProbe: + httpGet: + path: /ready + port: http-metrics + + securityContext: {} + + containerSecurityContext: + enabled: true + readOnlyRootFilesystem: true + + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + + terminationGracePeriodSeconds: 180 + + tolerations: [] + + podDisruptionBudget: + maxUnavailable: 1 + + initContainers: [] + extraContainers: [] + extraVolumes: [] + extraVolumeMounts: [] + extraPorts: [] + env: [] + +nginx: + enabled: true + replicas: 2 + http_listen_port: 80 + config: + dnsResolver: coredns.kube-system.svc.cluster.local + # -- ref: http://nginx.org/en/docs/http/ngx_http_core_module.html#client_max_body_size + client_max_body_size: 20M + # -- arbitrary snippet to inject in the http { } section of the nginx config + httpSnippet: "" + # -- arbitrary snippet to inject in the top section of the nginx config + mainSnippet: "" + # -- arbitrary snippet to inject in the server { } section of the nginx config + serverSnippet: "" + setHeaders: {} + # -- (optional) List of [auth tenants](https://cortexmetrics.io/docs/guides/auth/) to set in the nginx config + auth_orgs: [] + # -- (optional) Name of basic auth secret. + # In order to use this option, a secret with htpasswd formatted contents at + # the key ".htpasswd" must exist. For example: + # + # apiVersion: v1 + # kind: Secret + # metadata: + # name: my-secret + # namespace: + # stringData: + # .htpasswd: | + # user1:$apr1$/woC1jnP$KAh0SsVn5qeSMjTtn0E9Q0 + # user2:$apr1$QdR8fNLT$vbCEEzDj7LyqCMyNpSoBh/ + # + # Please note that the use of basic auth will not identify organizations + # the way X-Scope-OrgID does. Thus, the use of basic auth alone will not + # prevent one tenant from viewing the metrics of another. To ensure tenants + # are scoped appropriately, explicitly set the `X-Scope-OrgID` header + # in the nginx config. Example + # setHeaders: + # X-Scope-OrgID: $remote_user + basicAuthSecretName: "" + + image: + repository: 10.10.31.243:5000/cmoa3/nginx + tag: 1.21 + pullPolicy: IfNotPresent + + service: + type: ClusterIP + annotations: {} + labels: {} + + serviceAccount: + # -- "" disables the individual serviceAccount and uses the global serviceAccount for that component + name: "" + + resources: {} + + # -- Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) + extraArgs: {} + + # -- Pod Labels + podLabels: {} + + # -- Pod Annotations + podAnnotations: {} + + nodeSelector: {} + affinity: {} + annotations: {} + persistentVolume: + subPath: + + startupProbe: + httpGet: + path: /healthz + port: http-metrics + failureThreshold: 10 + livenessProbe: + httpGet: + path: /healthz + port: http-metrics + readinessProbe: + httpGet: + path: /healthz + port: http-metrics + + securityContext: {} + + containerSecurityContext: + enabled: true + readOnlyRootFilesystem: false + + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + + terminationGracePeriodSeconds: 10 + + tolerations: [] + + podDisruptionBudget: + maxUnavailable: 1 + + initContainers: [] + extraContainers: [] + extraVolumes: [] + extraVolumeMounts: [] + extraPorts: [] + env: [] + + autoscaling: + # -- Creates a HorizontalPodAutoscaler for the nginx pods. + enabled: false + minReplicas: 2 + maxReplicas: 30 + targetCPUUtilizationPercentage: 80 + targetMemoryUtilizationPercentage: 0 # 80 + # -- Ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-configurable-scaling-behavior + behavior: {} + +store_gateway: + replicas: 1 + + service: + annotations: {} + labels: {} + + serviceAccount: + # -- "" disables the individual serviceAccount and uses the global serviceAccount for that component + name: "" + + serviceMonitor: + enabled: false + additionalLabels: {} + relabelings: [] + metricRelabelings: [] + # -- Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint + extraEndpointSpec: {} + + resources: {} + + # -- Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) + extraArgs: {} + + # -- Pod Labels + podLabels: {} + + # -- Pod Annotations + podAnnotations: + prometheus.io/scrape: 'true' + prometheus.io/port: '8080' + + nodeSelector: {} + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app.kubernetes.io/component + operator: In + values: + - store-gateway + topologyKey: 'kubernetes.io/hostname' + + annotations: {} + + persistentVolume: + # -- If true Store-gateway will create/use a Persistent Volume Claim + # If false, use emptyDir + enabled: false + + # -- Store-gateway data Persistent Volume Claim annotations + annotations: {} + + # -- Store-gateway data Persistent Volume access modes + # Must match those of existing PV or dynamic provisioner + # Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + accessModes: + - ReadWriteOnce + + # -- Store-gateway data Persistent Volume size + size: 2Gi + + # -- Subdirectory of Store-gateway data Persistent Volume to mount + # Useful if the volume's root directory is not empty + subPath: '' + + # -- Store-gateway data Persistent Volume Storage Class + # If defined, storageClassName: + # If set to "-", storageClassName: "", which disables dynamic provisioning + # If undefined (the default) or set to null, no storageClassName spec is + # set, choosing the default provisioner. + storageClass: null + + startupProbe: + failureThreshold: 60 + initialDelaySeconds: 120 + periodSeconds: 30 + httpGet: + path: /ready + port: http-metrics + scheme: HTTP + livenessProbe: + httpGet: + path: /ready + port: http-metrics + scheme: HTTP + readinessProbe: + httpGet: + path: /ready + port: http-metrics + + securityContext: {} + + containerSecurityContext: + enabled: true + readOnlyRootFilesystem: true + + strategy: + type: RollingUpdate + + terminationGracePeriodSeconds: 240 + + tolerations: [] + + podDisruptionBudget: + maxUnavailable: 1 + + initContainers: [] + extraContainers: [] + extraVolumes: [] + extraVolumeMounts: [] + extraPorts: [] + env: [] + +compactor: + enabled: true + replicas: 1 + + service: + annotations: {} + labels: {} + + serviceAccount: + # -- "" disables the individual serviceAccount and uses the global serviceAccount for that component + name: "" + + serviceMonitor: + enabled: false + additionalLabels: {} + relabelings: [] + metricRelabelings: [] + # -- Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint + extraEndpointSpec: {} + + resources: {} + + # -- Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) + extraArgs: {} + + # -- Pod Labels + podLabels: {} + + # -- Pod Annotations + podAnnotations: + prometheus.io/scrape: 'true' + prometheus.io/port: '8080' + + nodeSelector: {} + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app.kubernetes.io/component + operator: In + values: + - compactor + topologyKey: 'kubernetes.io/hostname' + + annotations: {} + + persistentVolume: + # -- If true compactor will create/use a Persistent Volume Claim + # If false, use emptyDir + enabled: false + + # -- compactor data Persistent Volume Claim annotations + annotations: {} + + # -- compactor data Persistent Volume access modes + # Must match those of existing PV or dynamic provisioner + # Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + accessModes: + - ReadWriteOnce + + # compactor data Persistent Volume size + size: 2Gi + + # -- Subdirectory of compactor data Persistent Volume to mount + # Useful if the volume's root directory is not empty + subPath: '' + + # -- compactor data Persistent Volume Storage Class + # If defined, storageClassName: + # If set to "-", storageClassName: "", which disables dynamic provisioning + # If undefined (the default) or set to null, no storageClassName spec is + # set, choosing the default provisioner. + storageClass: null + + startupProbe: + failureThreshold: 60 + initialDelaySeconds: 120 + periodSeconds: 30 + httpGet: + path: /ready + port: http-metrics + scheme: HTTP + livenessProbe: + httpGet: + path: /ready + port: http-metrics + scheme: HTTP + readinessProbe: + httpGet: + path: /ready + port: http-metrics + + securityContext: {} + containerSecurityContext: + enabled: true + readOnlyRootFilesystem: true + + strategy: + type: RollingUpdate + + terminationGracePeriodSeconds: 240 + + tolerations: [] + + podDisruptionBudget: + maxUnavailable: 1 + + initContainers: [] + extraContainers: [] + extraVolumes: [] + extraVolumeMounts: [] + extraPorts: [] + env: [] + +# -- chunk caching for legacy chunk storage engine +memcached: + enabled: false + architecture: "high-availability" + replicaCount: 2 + resources: {} + extraEnv: + # -- MEMCACHED_CACHE_SIZE is the amount of memory allocated to memcached for object storage + - name: MEMCACHED_CACHE_SIZE + value: "1024" + # -- MEMCACHED_MAX_CONNECTIONS is the maximum number of simultaneous connections to the memcached service + - name: MEMCACHED_MAX_CONNECTIONS + value: "1024" + # -- MEMCACHED_THREADS is the number of threads to use when processing incoming requests. + # By default, memcached is configured to use 4 concurrent threads. The threading improves the performance of + # storing and retrieving data in the cache, using a locking system to prevent different threads overwriting or updating the same values. + - name: MEMCACHED_THREADS + value: "4" + metrics: + enabled: true + serviceMonitor: + enabled: false + +# -- index read caching for legacy chunk storage engine +memcached-index-read: + enabled: false + architecture: "high-availability" + replicaCount: 2 + resources: {} + extraEnv: + # -- MEMCACHED_CACHE_SIZE is the amount of memory allocated to memcached for object storage + - name: MEMCACHED_CACHE_SIZE + value: "1024" + # -- MEMCACHED_MAX_CONNECTIONS is the maximum number of simultaneous connections to the memcached service + - name: MEMCACHED_MAX_CONNECTIONS + value: "1024" + # -- MEMCACHED_THREADS is the number of threads to use when processing incoming requests. + # By default, memcached is configured to use 4 concurrent threads. The threading improves the performance of + # storing and retrieving data in the cache, using a locking system to prevent different threads overwriting or updating the same values. + - name: MEMCACHED_THREADS + value: "4" + metrics: + enabled: true + serviceMonitor: + enabled: false + +# -- index write caching for legacy chunk storage engine +memcached-index-write: + enabled: false + architecture: "high-availability" + replicaCount: 2 + resources: {} + extraEnv: + # -- MEMCACHED_CACHE_SIZE is the amount of memory allocated to memcached for object storage + - name: MEMCACHED_CACHE_SIZE + value: "1024" + # -- MEMCACHED_MAX_CONNECTIONS is the maximum number of simultaneous connections to the memcached service + - name: MEMCACHED_MAX_CONNECTIONS + value: "1024" + # -- MEMCACHED_THREADS is the number of threads to use when processing incoming requests. + # By default, memcached is configured to use 4 concurrent threads. The threading improves the performance of + # storing and retrieving data in the cache, using a locking system to prevent different threads overwriting or updating the same values. + - name: MEMCACHED_THREADS + value: "4" + metrics: + enabled: true + serviceMonitor: + enabled: false + +memcached-frontend: + enabled: false + architecture: "high-availability" + replicaCount: 2 + resources: {} + extraEnv: + # -- MEMCACHED_CACHE_SIZE is the amount of memory allocated to memcached for object storage + - name: MEMCACHED_CACHE_SIZE + value: "1024" + # -- MEMCACHED_MAX_CONNECTIONS is the maximum number of simultaneous connections to the memcached service + - name: MEMCACHED_MAX_CONNECTIONS + value: "1024" + # -- MEMCACHED_THREADS is the number of threads to use when processing incoming requests. + # By default, memcached is configured to use 4 concurrent threads. The threading improves the performance of + # storing and retrieving data in the cache, using a locking system to prevent different threads overwriting or updating the same values. + - name: MEMCACHED_THREADS + value: "4" + metrics: + enabled: true + serviceMonitor: + enabled: false + +memcached-blocks-index: + architecture: "high-availability" + replicaCount: 2 + resources: {} + extraEnv: + # -- MEMCACHED_CACHE_SIZE is the amount of memory allocated to memcached for object storage + - name: MEMCACHED_CACHE_SIZE + value: "1024" + # -- MEMCACHED_MAX_CONNECTIONS is the maximum number of simultaneous connections to the memcached service + - name: MEMCACHED_MAX_CONNECTIONS + value: "1024" + # -- MEMCACHED_THREADS is the number of threads to use when processing incoming requests. + # By default, memcached is configured to use 4 concurrent threads. The threading improves the performance of + # storing and retrieving data in the cache, using a locking system to prevent different threads overwriting or updating the same values. + - name: MEMCACHED_THREADS + value: "4" + metrics: + enabled: true + serviceMonitor: + enabled: false + +memcached-blocks: + architecture: "high-availability" + replicaCount: 2 + resources: {} + extraEnv: + # -- MEMCACHED_CACHE_SIZE is the amount of memory allocated to memcached for object storage + - name: MEMCACHED_CACHE_SIZE + value: "1024" + # -- MEMCACHED_MAX_CONNECTIONS is the maximum number of simultaneous connections to the memcached service + - name: MEMCACHED_MAX_CONNECTIONS + value: "1024" + # -- MEMCACHED_THREADS is the number of threads to use when processing incoming requests. + # By default, memcached is configured to use 4 concurrent threads. The threading improves the performance of + # storing and retrieving data in the cache, using a locking system to prevent different threads overwriting or updating the same values. + - name: MEMCACHED_THREADS + value: "4" + metrics: + enabled: true + serviceMonitor: + enabled: false + +memcached-blocks-metadata: + # enabled/disabled via the tags.blocks-storage-memcached boolean + architecture: "high-availability" + replicaCount: 2 + resources: {} + extraEnv: + # -- MEMCACHED_CACHE_SIZE is the amount of memory allocated to memcached for object storage + - name: MEMCACHED_CACHE_SIZE + value: "1024" + # -- MEMCACHED_MAX_CONNECTIONS is the maximum number of simultaneous connections to the memcached service + - name: MEMCACHED_MAX_CONNECTIONS + value: "1024" + # -- MEMCACHED_THREADS is the number of threads to use when processing incoming requests. + # By default, memcached is configured to use 4 concurrent threads. The threading improves the performance of + # storing and retrieving data in the cache, using a locking system to prevent different threads overwriting or updating the same values. + - name: MEMCACHED_THREADS + value: "4" + metrics: + enabled: true + serviceMonitor: + enabled: false + +configsdb_postgresql: + enabled: true + uri: postgres://admin@postgres/configs?sslmode=disable + auth: + password: eorbahrhkswp + existing_secret: + name: + key: diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/elasticsearch/.helmignore b/ansible/01_old/roles/test/files/02-base/base/charts/elasticsearch/.helmignore new file mode 100644 index 0000000..e12c0b4 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/elasticsearch/.helmignore @@ -0,0 +1,2 @@ +tests/ +.pytest_cache/ diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/elasticsearch/Chart.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/elasticsearch/Chart.yaml new file mode 100644 index 0000000..be38643 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/elasticsearch/Chart.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +appVersion: 7.6.0 +description: Official Elastic helm chart for Elasticsearch +home: https://github.com/elastic/helm-charts +icon: https://helm.elastic.co/icons/elasticsearch.png +maintainers: +- email: helm-charts@elastic.co + name: Elastic +name: elasticsearch +sources: +- https://github.com/elastic/elasticsearch +version: 7.6.0 diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/elasticsearch/templates/1.headless_service.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/elasticsearch/templates/1.headless_service.yaml new file mode 100644 index 0000000..2631417 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/elasticsearch/templates/1.headless_service.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Service +metadata: + namespace: imxc + name: elasticsearch-headless + labels: + app: elasticsearch +spec: + clusterIP: None + selector: + app: elasticsearch + ports: + - name: transport + port: 9300 diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/elasticsearch/templates/2.service.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/elasticsearch/templates/2.service.yaml new file mode 100644 index 0000000..505cc5a --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/elasticsearch/templates/2.service.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Service +metadata: + namespace: imxc + name: elasticsearch + labels: + app: elasticsearch +spec: + selector: + app: elasticsearch + ports: + - name: http + port: 9200 + targetPort: 9200 +# nodePort: 30200 +# type: NodePort + type: ClusterIP diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/elasticsearch/templates/3.configmap.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/elasticsearch/templates/3.configmap.yaml new file mode 100644 index 0000000..ee0a42d --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/elasticsearch/templates/3.configmap.yaml @@ -0,0 +1,41 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + namespace: imxc + name: elasticsearch-config + labels: + app: elasticsearch +data: +# discovery.seed_hosts: ["elasticsearch-0.elasticsearch", "elasticsearch-1.elasticsearch", "elasticsearch-2.elasticsearch"] +# cluster.initial_master_nodes: ["elasticsearch-0","elasticsearch-1", "elasticsearch-2"] +# ES_JAVA_OPTS: -Xms8g -Xmx8g + elasticsearch.yml: | + cluster.name: imxc-elasticsearch-cluster + network.host: ${POD_NAME} + discovery.seed_hosts: ["elasticsearch-0.elasticsearch", "elasticsearch-1.elasticsearch"] + cluster.initial_master_nodes: ["elasticsearch-0","elasticsearch-1"] + xpack.ml.enabled: false + xpack.security.enabled: true + xpack.security.transport.ssl.enabled: true + xpack.security.transport.ssl.verification_mode: certificate + xpack.security.transport.ssl.client_authentication: required + xpack.security.transport.ssl.keystore.path: elastic-certificates.p12 + xpack.security.transport.ssl.truststore.path: elastic-certificates.p12 + xpack.security.transport.filter.enabled: true + xpack.security.transport.filter.allow: _all + xpack.security.http.ssl.enabled: true + xpack.security.http.ssl.keystore.path: http.p12 + node.ml: false + cluster.routing.rebalance.enable: "all" + cluster.routing.allocation.allow_rebalance: "indices_all_active" + cluster.routing.allocation.cluster_concurrent_rebalance: 2 + cluster.routing.allocation.balance.shard: 0.3 + cluster.routing.allocation.balance.index: 0.7 + cluster.routing.allocation.balance.threshold: 1 + cluster.routing.allocation.disk.threshold_enabled: true + cluster.routing.allocation.disk.watermark.low: "85%" + cluster.routing.allocation.disk.watermark.high: "90%" + cluster.routing.allocation.disk.watermark.flood_stage: "95%" + thread_pool.write.queue_size: 1000 + thread_pool.write.size: 2 + ES_JAVA_OPTS: -Xms8g -Xmx8g diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/elasticsearch/templates/4.pv.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/elasticsearch/templates/4.pv.yaml new file mode 100644 index 0000000..5a53f57 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/elasticsearch/templates/4.pv.yaml @@ -0,0 +1,74 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: data-elasticsearch-cluster-0 + labels: + type: local + app: elasticsearch +spec: + capacity: + storage: 30Gi + accessModes: + - ReadWriteOnce + hostPath: + path: {{ .Values.global.ELASTICSEARCH_PATH1 }} + persistentVolumeReclaimPolicy: Retain + storageClassName: elasticsearch-storage + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .Values.global.affinity_key }} + operator: In + values: + - {{ .Values.global.affinity_value1 }} +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: data-elasticsearch-cluster-1 + labels: + type: local + app: elasticsearch +spec: + capacity: + storage: 30Gi + accessModes: + - ReadWriteOnce + hostPath: + path: {{ .Values.global.ELASTICSEARCH_PATH2 }} + persistentVolumeReclaimPolicy: Retain + storageClassName: elasticsearch-storage + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .Values.global.affinity_key }} + operator: In + values: + - {{ .Values.global.affinity_value2 }} +--- +#apiVersion: v1 +#kind: PersistentVolume +#metadata: +# name: data-elasticsearch-cluster-2 +# labels: +# type: local +# app: elasticsearch +#spec: +# capacity: +# storage: 30Gi +# accessModes: +# - ReadWriteOnce +# hostPath: +# path: {{ .Values.global.ELASTICSEARCH_PATH3 }} +# persistentVolumeReclaimPolicy: Retain +# storageClassName: elasticsearch-storage +# nodeAffinity: +# required: +# nodeSelectorTerms: +# - matchExpressions: +# - key: kubernetes.io/hostname +# operator: In +# values: +# - {{ .Values.global.ELASTICSEARCH_HOST3 }} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/elasticsearch/templates/5.pvc.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/elasticsearch/templates/5.pvc.yaml new file mode 100644 index 0000000..a4ae2db --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/elasticsearch/templates/5.pvc.yaml @@ -0,0 +1,53 @@ +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + namespace: imxc + name: elasticsearch-data-elasticsearch-0 +spec: + accessModes: + - ReadWriteOnce + volumeMode: Filesystem + resources: + requests: + storage: 30Gi + storageClassName: elasticsearch-storage + selector: + matchLabels: + type: local + app: elasticsearch +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + namespace: imxc + name: elasticsearch-data-elasticsearch-1 +spec: + accessModes: + - ReadWriteOnce + volumeMode: Filesystem + resources: + requests: + storage: 30Gi + storageClassName: elasticsearch-storage + selector: + matchLabels: + type: local + app: elasticsearch +--- +#kind: PersistentVolumeClaim +#apiVersion: v1 +#metadata: +# namespace: imxc +# name: elasticsearch-data-elasticsearch-2 +#spec: +# accessModes: +# - ReadWriteOnce +# volumeMode: Filesystem +# resources: +# requests: +# storage: 30Gi +# storageClassName: elasticsearch-storage +# selector: +# matchLabels: +# type: local +# app: elasticsearch \ No newline at end of file diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/elasticsearch/templates/6.statefulset.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/elasticsearch/templates/6.statefulset.yaml new file mode 100644 index 0000000..2cbd4b8 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/elasticsearch/templates/6.statefulset.yaml @@ -0,0 +1,146 @@ +{{- if semverCompare ">=1.16-0" .Capabilities.KubeVersion.GitVersion }} +apiVersion: apps/v1 +{{- else }} +apiVersion: apps/v1beta1 +{{- end }} +kind: StatefulSet +metadata: + namespace: imxc + name: elasticsearch +spec: +{{- if semverCompare ">=1.16-0" .Capabilities.KubeVersion.GitVersion }} + selector: + matchLabels: + app: elasticsearch +{{- end }} + serviceName: elasticsearch + replicas: 2 #3 + updateStrategy: + type: RollingUpdate + template: + metadata: + labels: + app: elasticsearch + spec: + securityContext: + fsGroup: 1000 + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: "app" + operator: In + values: + - elasticsearch + topologyKey: "kubernetes.io/hostname" + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: elastic-node + operator: In + values: + - "true" + initContainers: + - name: init-sysctl + image: {{ .Values.global.IMXC_IN_REGISTRY }}/busybox:latest + imagePullPolicy: IfNotPresent + securityContext: + privileged: true + #command: ["sysctl", "-w", "vm.max_map_count=262144"] + command: ["/bin/sh", "-c"] + args: ["sysctl -w vm.max_map_count=262144; chown -R 1000:1000 /usr/share/elasticsearch/data"] + volumeMounts: + - name: elasticsearch-data + mountPath: /usr/share/elasticsearch/data + containers: + - name: elasticsearch + resources: + requests: + cpu: 1000m + memory: 16000Mi #32000Mi + limits: + cpu: 2000m + memory: 16000Mi #32000Mi + securityContext: + privileged: true + runAsUser: 1000 + capabilities: + add: + - IPC_LOCK + - SYS_RESOURCE + image: {{ .Values.global.IMXC_IN_REGISTRY }}/elasticsearch:{{ .Values.global.ELASTICSEARCH_VERSION }} + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: ES_JAVA_OPTS + valueFrom: + configMapKeyRef: + name: elasticsearch-config + key: ES_JAVA_OPTS + # log4j patch + - name: LOG4J_FORMAT_MSG_NO_LOOKUPS + value: "true" + - name: ELASTIC_USERNAME + value: {{ .Values.global.CMOA_ES_ID }} + - name: ELASTIC_PASSWORD + value: {{ .Values.global.CMOA_ES_PW }} + readinessProbe: + httpGet: + scheme: HTTPS + path: /_cluster/health?local=true + port: 9200 + httpHeaders: + - name: Authorization + # encode base64 by elastic:elastic + value: Basic ZWxhc3RpYzplbGFzdGlj + initialDelaySeconds: 5 + ports: + - containerPort: 9200 + name: es-http + - containerPort: 9300 + name: es-transport + volumeMounts: + - name: elasticsearch-data + mountPath: /usr/share/elasticsearch/data + - name: elasticsearch-config + mountPath: /usr/share/elasticsearch/config/elasticsearch.yml + subPath: elasticsearch.yml + - name: es-cert-certificate + mountPath: /usr/share/elasticsearch/config/elastic-certificates.p12 + subPath: elastic-certificates.p12 + - name: es-cert-ca + mountPath: /usr/share/elasticsearch/config/elastic-stack-ca.p12 + subPath: elastic-stack-ca.p12 + - name: es-cert-http + mountPath: /usr/share/elasticsearch/config/http.p12 + subPath: http.p12 + volumes: + - name: elasticsearch-config + configMap: + name: elasticsearch-config + items: + - key: elasticsearch.yml + path: elasticsearch.yml + - name: es-cert-certificate + secret: + secretName: es-cert + - name: es-cert-ca + secret: + secretName: es-cert + - name: es-cert-http + secret: + secretName: es-cert + volumeClaimTemplates: + - metadata: + name: elasticsearch-data + spec: + accessModes: [ "ReadWriteOnce" ] + storageClassName: elasticsearch-storage + resources: + requests: + storage: 10Gi diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/elasticsearch/templates/7.secrets.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/elasticsearch/templates/7.secrets.yaml new file mode 100644 index 0000000..2a24b92 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/elasticsearch/templates/7.secrets.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +data: + elastic-certificates.p12: MIINbwIBAzCCDSgGCSqGSIb3DQEHAaCCDRkEgg0VMIINETCCBW0GCSqGSIb3DQEHAaCCBV4EggVaMIIFVjCCBVIGCyqGSIb3DQEMCgECoIIE+zCCBPcwKQYKKoZIhvcNAQwBAzAbBBRrCrEWs79GCUPrYkFrkDBEF9uz4gIDAMNQBIIEyJUjcP339Anee6bdJls469HbsqYGgzidG41xto7ignNdZdP9LTTca+w8sN8tbVnTUZi4kQYcPSQqv+cWobi66KpgvQ7HhA/YE9K5L7wR7KEj0o61LYvucHm19hRdt788EvBy4mi8cDAr3m49NNuLUM6wyeCEKr2W2dwZFIyxFTPVv6/ef6cuHyDNLXJtjUmOIzNDL8Olqk8JGAd9bwXlizcShfmbiHHX8pAhK0u9JThFQePvCGiKA4LwzeuuwuEniznMlUQ4T/TjLjLLYcoS4vktfOJKPOgL3esjsc5hPoVgbw+ZpNCxRq1RVs/5eOBkxzXhJ7hdNELJDcMjitBfl71MlSDtMV4FhlVuhjilsuHx6URucsEE2l1V3asg4QP1PoSiACqncr2WhCcrKu0d8DztlIkCYG7D8oiAx4nEzsm0xmOhIcigHw6GP4MNeCieJCgAwLkJf1m73IYcxyaKsJAc57jfs9ue62KkVHL2NxNRjTps2j0Cl5NJQRE4CTkieU0etsNS1nJEwiJunVTyHXAa53MF6j40awEqs2Ko4gQENPpuQc599yJb+ZTHfHPe8bpfrmnxiEAaeiABu+OVH9bdLK5gtCyD5vXGZKVtHbyR+0+UlBggw/horFQIP+x7SKO53+ho0iCnYyQK52kJiv93JNgStGHpxf1SkPTtWHOraR2qSZTX6F7vjBtIq3Y6ocb6yo/jMNhzk3spHdz+F99S6uV3NLmDfX2vJmu1YSaPwaNZGDggcFI/g2S5ylBWyHpk2rB5gtklUIQEWxFFvbFOp37ffcdC0mZ6SgpOxj+IxuVLqTvyDLjrfteEvfjRAFXsT8E4XikC8QKjQ+KAwDYETidOiYB0/ByCh7t1KbcKJWU8XYxqzukX88CyVtO9Lp/f97x3ycvaF1UfzLBrm/bnTa0jPEP2/OdzpbjQJcEGX64+QY92k38zjPe4tedUz5H/C9aw8Q8r/DSxUhn2sdDXssR9jytITLLOJHDJX7XCfZxtoW60bwRm5MyXc4bJmjZT2BgxTWIVokaOhk0IZwpbC/oxh1QkaHBioP6+slASXg8Xu9l+mACevb1b9RvpN+fhurW2wOHl4Kul775BCohuTtiqKAce8KEACwncwYz+ZfcPTkbLRy6+p6NI3zNWpZE+iFlPtLh+2+T/QQHEfKTNUxcXLt8WCMOZuCe776T41nY8UhbUQJKqlEvom3MzCcsvFBoahlpjv+rg9/Ay7ESMil49e2x3qbD2929X0BHz//RcvPO5fvSEK/tC2uHzWzqHf0ZaRwtO19Z95Uv3GjGNF0SO8qri830LfJ+ctjk320qLyZmxA9QgPoI2oMHSxkaX1fgVeiN9coBM8yJbPK8ZdOOg4abnYOhqrTJXaoSFo+SYyAVZoTiQIIk/JScL5Qcw9IJw6sSKmOdChy2spYQKeo1NU9ecLD8YRBqRP0EET7e7NDPKlIWQ1vB5y2hokyL7bxvbGgzqQBAyo9wKJ3v1g4IYEWA9mluvQapOMVEHBYh6wv2nTJpE9EqMxpYQBU1w+vgX0EUgZDEOBkbvd5wubAeERt0mJqjea6vxWJIbeqMVIIoJSZEDaPE5qVNYaosoc8yvAZ9+U3lZlZObHzHEAIUx/2pP/jFEMB8GCSqGSIb3DQEJFDESHhAAaQBuAHMAdABhAG4AYwBlMCEGCSqGSIb3DQEJFTEUBBJUaW1lIDE2NTM5NzE4MTk0NzgwggecBgkqhkiG9w0BBwagggeNMIIHiQIBADCCB4IGCSqGSIb3DQEHATApBgoqhkiG9w0BDAEGMBsEFP43u2ii0k7JTUfInMhUBwjWZrS/AgMAw1CAggdItHB4SBc5KdDVc8eXuF8Ex1WP/Y2wz76PoNNpYm2LeIVozsp5c/2RDN2KqhcvhTihlY44esqWWVCOx+OTwmAPFwzZSrMaOYpcOP3fRWaHJLw98cK8a1ZuNv3eXWecf333TrsvU/bpT3v0KNO915qnSbtNwlvXaOMm6jbw6eBnkB7i6jxA7kgVAW6soa3ZHOrV78quBSbAjXZddHsI8x3MS4rxdvkp6GHet22/fQxjxz8UlQEDqzQgK7F4RqULRJeU//JID7VJqfbHRHfnYsKszsirhWKeJsxLVhG1VU/zRgxs0C35NfQeR/o7jmFpE7CCvvC0Rea2pybNojb51HLvyycXtpGn0gAdTBVNnwK1X58uSDWH7jM61uX9f+/gcDZqlUj6UVc6mzqxAgzDtf6B32G0VQq2szaJjbRVEVXhCAOIdVj6pRpI3l3gRv8OkNAWsGwYDMjeFxnrEpw1AQkEj7FRgI6iNOxEfUhOVYIEsflGTUdcd+K+zlCCHAJoMzbqiwPyHHgvLOp04A7fog+H3/cn6Tdmrp/J7TxpaW1ZwwcHtTRLoq0F77Sj8XJule3CzaDtg6IBen/Yo7H9hhK3ORodlGjJYA285dHAd1mtqmHmoWeDNoVrlVyymge78yXGmlFsBWF83VUChRx+9noF3Zhz+QMPBNsKHk4TM9yRHiWpMZIdkEZKq+obCPU2PmC21wnWx13nhb88gaNyBjHxFsGE91SgEyQh/cPhi01Y7+yNYQvYOXJe3EQ6oqFCBkPUnrbAMiHDP//AVN/tUrgVbmpIclfFprP2YIRcfGa7qch48RFbmhnX5N/OYLaPnNYdbxOiwZ0f/KIpDKWS67kS2N+jDKWs/SCLs2g89q1z2EGvbVwKMD6Vl559EZxAfNRv+eZu0MvTejEkuykIHJpXCyP+8EphUyWW9Cqll1ux4rXMUDkgl5sh1WgSoIEASX2j5TJ3fIh0nBkjAkBi0n2BINZgVWKj9U1zHNdRF67Eb+97lUuY6JIkbFhLSgZiIZqnI9bnW8OKUJFtvVtlSKG4xqdOeAroB8GLw2iR/GjF2Dvy4rIZo+qeTCIN+bm+iFkCri7L2K0/KR25h7bAtXwBxwMct5F4A1vltlLs408efMRJ7dg3iqMGhRyXdwxKexWJLbp02uJQVU9/ogYeLfSiIZEm25qjEMQZqRpQpwLaH5JB9oLKqdLEdeuxOfqb6weHDOtITlFHToeRNzIEmbiT9gbdpMwKTxs/rtwMHgGU6kIJmIFgnw2gauKvpiIuDCY79JpSNipsicvvLTIa4cc8sZCCllZ1wAmbNDsCH6p0bh8CooMjGf2vUbRClSe9+R19/lRMFGSp4N6fElW7MxNw85xpkFjG0s053fvIJmfPhxVqUHMP3fFQv0DUvvQNvNTsRGdDjohkC0095v9EWy7n9Frv2wIM2G7uVHvrlgkQfPK2JsYZKsUE0KXa4HUQptWL71kp7RQSmOmXFzsthjYVXu/pfXA+u+PAtHvQpo1nTPreXn3UZqiEiQmNkmMPLAYzpIi35tjNewfw5XwDj77pqH5OFcMZDTKbiInV1LuvFlKxCEYh4gvTThC0XTsrsiHgldtNcw9ZB017uPW9AAqbj2IB0d5b0ZB3yMZ67uzt1pretcxmEfSoA64QWOC9lBYp4DVE9QxcCnsSgibWreqpdJHmX5MR4umwIb6WaM1pJdCY1bW4tO3ZVT4DA/4ry7jqxUH4AcZRNK0zYR6DAtZndB7LTJhT+8d5EBtmAHzC5HT9KLmHV6mAG1QLMlwhNXmtM0YCJsKxcZo+xLBy/2cHl41EU4ACiuEq1JrM5j9fQk+hmJHT+JB0aqv+kvdxGmgBuVWGHQBtNTV6TYeLzqzDpIl9uXi3qFKFBuTQOska2zAMv7gLOe79w1cVb/SJKdcYjWtLR0v6wfaRgVeBwLvTvh7nNXhXRqKfQKe3e2Tjgq4nV4kOQHI21WDKGSd4ONyyvXGMwNzRgcZwpDFAcvshZATwaBtAo4JWi6D3vJB6H1PHRtyqHjErKkPazoZMjR2sZI8S4BMo4R5fa1ZztZO4p2lJYUIAQHj872UdGXHTXgyZKU8t/ifiVfxon5UtZJRi0Xq5OMdN//Qtq2kVwQxntf0eWsygkKMtNr1XLzu0TAMUMItnohdQWUw5w8UeXYOAYfZFqZEhKfcwkJsfq1q56ptzVBI3T2hDFM7xuVFNn5y+FCTx9pB9FCbln/3ZlKuUiTH/eLMKdQYGkRX4X0qzkx3YqAn6jDLQPEG3Rz0JP53T43uLxGpqa8+jn1XIUCNj50mqZGiah7bdo1qsDHbFWYCe7uoOjPapontpaoEQaZog1INqBNerS19a+i4S0/uAsGApykwUhk/zGfr9UudpKJWd7AznlF3+yfZfk/9mCSajBpoWafCIWmOvxJD77L86YAs9STuhWUGQvL2rxPf2uyS4WAi2+DgbdrGTSiwNB/1YX8iHp/cw6DA+MCEwCQYFKw4DAhoFAAQUSvLiFrAQlmfgL3Cewez5Fw2+0okEFH+RyXvcJHVaYbaqjejrXkgUS0JsAgMBhqA= + elastic-stack-ca.p12: MIIJ2wIBAzCCCZQGCSqGSIb3DQEHAaCCCYUEggmBMIIJfTCCBWEGCSqGSIb3DQEHAaCCBVIEggVOMIIFSjCCBUYGCyqGSIb3DQEMCgECoIIE+zCCBPcwKQYKKoZIhvcNAQwBAzAbBBTQSr5nf5M77CSAHwj38PF//hiFVgIDAMNQBIIEyBrOipz1FxDRF9VG/4bMmue7Dt+Qm37ySQ/ZfV3hFTg6xwjEcHje6hvhzQtFeWppCvd4+7U/MG8G5xL0vfV5GzX1RhVlpgYRfClqMZo3URqBNu6Y5t3sum+X37zbXQ1GI6wo3YURStZkDHlVtObZB667qqj5rO4fIajzRalaxTFda8aS2xAmQklMcCEXASsO5j0+ufVKiOiG2SIEV2LjjYlUymP7d9+LAZ2I6vR+k/jo2oNoPeq0v68qFd9aOB2ojI9Q/PDFA7Nj1kKMK7KjpxGN5/Ocfr8qrxF1mviA6rPdl8GV3WCFMFKcJER4fRmskWGNE/AdwU3laXvJux/qz4rjiYoJX+5rSyXBDxdznaFiSyN1LYkFJ+nao6HSAmPPyfEPVPRICc6XHMUM4BZOVlJO49M1xg7NFQUtkyVm8+ooDwXCiGEUHDZNw+hCcuUewp0ZXki695D0tESnzi3BE56w7CRySeaNR8psAtL74IUtov9I66GlBEI7HSbyLTT9Fa7+o+ElJWnFqIyW8WzNF3T5fvRv2LfKjYO5KiISlOM03KlETWE1F60TZqW3EbP9WjLhRnovFcJVsNyha+wDVTu44DAylMX4Oh2xKYm2YW+Oi0aeCFmJbDp/TlxYhm5ACYUxma6CVxbEgHkxwjWyFfiNQp2MBL/5HFJGxuny2lVnN8yUSCvDdnOlVTB36/EByY/oA8S+GF/QRYd3PMew56s7aBgPt8mhncN5Cdm+GCD/Nb/ibcuTId9HAaT6o3wMsc7bYusjHGCjFbz9fEdU2MdpLJO+FXVM9E1sEKoTpPLeJDh2a9RUWJQPUCLu8MgEdiJohtEpOtvM7y5+XbuAkYaDsBw3ym5M/kwovN09X1m5x5qM0QSRIVKHf1qo6wo68VMeVQDEBNxJ5/tuZ11qE3siGRfwDnUkCpb9H54+w3zaScPHGAdwplYYwaqnFMwi8nFMtjZvGOLT2wqPLPnKVeQGt4TCVWPXuB4kYnmbTWoJbUT5Wpurcnyn8l6uzLmypCD4k8YiQoDb1b9HIFUAypn580KIUF19eCSGeIHl4hbmusuISxQ1qXk7Ijbj7PiVtMKy5h8rG/c57KJvfvnMQy9hauM5kcZmlTUvrHDw+7cUFB96/wXbvqmcPKGKutgXRqHcTYyBOPEJnSUMBIM2r59wgFjlMuQLrJurzwzox/IEKu/KMilIBDp4k+MHz6NrINWfbV7xa6yAja1kWyvUmwYjCHhlXZmhCb2fmhP1lsnN4BNAkDsdfxHBRCBISy6fuHSY+c4RsokxZ4RomHhVvJsEY/AE4DCvVXDunY8t4ARrQCqXYso3+kVjm6+aelKk+KgyLZ3St0eAIl/Y2xqEXgh0wHGrx3CLZqGqq864f5MmrxiytmlSzHP4RSad20drsN3VchaJZkyrGbKEs6ZJDU2dq5NiC5unqx5tLw6XNRTydIC2PaiVl9m3GLUCh6hQSRJnvcXrqOd8a9K1uV5OoA3TRdc2V5lyxWRIJsdK5KfiAiTsNeM+Tt+Dh2pZjt2l2h4n4BjgYApxG8u10BP1iZ1e1OsCRgLGbgiuXtXrlrjwvJzrB5i11oy9mt3vqgtbjAciQpsQYGGfnVqyGXfEc55hIYWClNAFZDE4MBMGCSqGSIb3DQEJFDEGHgQAYwBhMCEGCSqGSIb3DQEJFTEUBBJUaW1lIDE2NTM5NzE3OTU1MTUwggQUBgkqhkiG9w0BBwagggQFMIIEAQIBADCCA/oGCSqGSIb3DQEHATApBgoqhkiG9w0BDAEGMBsEFEVjuzIvhFF9BzWGr3Ee4cw/mLcqAgMAw1CAggPAwroH+zLRt2Jtb8IWeOaIbXAv4sVGUljreWkJE8dkoXNcEQpATEt5H7L4uwnDsevLi1yfWtUDN1OxM8gb7iR4Jysrd+8uM1r0nn9YStz/I3qhN9Fb6yAb+ENTCzwo/oAnyDBM/lXR9fL0EPHRfsDmK+6kC+hZ4AZIao+1oWRD0Bu970yK6gwv7TIRCsS/RBZfC/d4Slz1+IQChiWS4ttTzxK/IuhaFbia0JYtUpjmMGMBQwYRyvITgYpOIct39Il/mabQ4BA1/wk7Oecfe3RHzIfM49AxJtwKppfVfaRJjtK1aoO/GKS6CZuvIIX8q3Mt32OEaoRN9FJM9EkUkKCcYhtRfq0/8MTO97MbrcKeO8XICn8vZwOMM7k7IFtCq44/3QBXa9fpc2BFMVYOoQ22W2ZuMNMRp6OYc6Da1BG4Ik9mt1T4k9NkvfrhpNceR27v6Q0pZNUTN26aPr11/SfS/IZmLGXF7cGAfxITMOQwK2ig6qivXzvwLxfnyW4aHF7K/jL59kDg9Vf9zKmlvPJpHSEWv53U9SFYvvrMISd6E8np0bHRM5p49mgH/KXGauRRaLWUxlBwrhjeZRimTF9x//a0luGf5tIW8ymi32wn8LNiu7fbnkldnivfgWVmktNrPMH+70HNlCWkfaNibSHpzyDQRTzg9PjHEcFH+pQAXCc+A8y8FSvlT+nx9dpXXRK5pqbrGnWyrm5D3oY1ceO0E85R9Fx4Ss0f+mMBtNDYpz7zS5BSX36MNn0gm6MkhlOVbbcAob4WbZAEM7zaiV1ilLegXPZYPCGQydN02Q+lJ7HHZ18T4mzTrjF6M1PFIx31cR1r0ZtJhkCrOWdlTrmovvYYEgEStsiE3pi6dW4v1NgcJVevpnJJ//vpGXasH9Ue/ZNdk1tj/h7cQ/qbKlmvrcuH/UQ969RsNX+K3B1xeYnfbV88BXqFLuqhuWy38wwvBvKO37vq+ioPNIjwaIyCVzoF9/MAx2aNOdk/x04mSNVYh5q0ZKv+3JC3W2vJxV2aonc/ybFgi2GZz2erVYNZTSXz+bEefx8QWzcW6/zr437jh/peQRyQ92PsN+eZV9GB2lrwmF7K2579vNQoVcpzTvTFf+eZZhF8u/1HZW4uFHRUyqE3rHyOukSFukD7XWnFL1yUcWw/SGNIm1HNZD3nXjqcwdAIXl7OvqdO0z/Qt2bny6KpOSJqjMUjB5AX5/yt2xlZBDhlsoGtRfbSWefGf7qTdpg2T9+ClMb7vS1dLzrGRzNgGc7KO2IQdkNcfj+1MD4wITAJBgUrDgMCGgUABBSoZ3hv7XnZag72Gq3IDQUfHtup5gQUHZH4AQTUUCeOS0WnPOdFYNvm1KUCAwGGoA== + http.p12: MIINZwIBAzCCDSAGCSqGSIb3DQEHAaCCDREEgg0NMIINCTCCBWUGCSqGSIb3DQEHAaCCBVYEggVSMIIFTjCCBUoGCyqGSIb3DQEMCgECoIIE+zCCBPcwKQYKKoZIhvcNAQwBAzAbBBRl7KAO2Y5ZolA3Si0i+pNdXpn42AIDAMNQBIIEyE9fBFRMMy358/KJQcAD9Ts0Xs0TR0UEl/an+IaNTz/9doU6Es6P22roJUK8j4l09I8ptGGKYdeGzrVBzWEjPhGAZ3EXZPHi2Sr/QKbaiWUnYvqqbPVoWNLukrPvK5NpEyPO2ulfxXN46wHzQMnk5l+BjR4wzqKquxgSzacXRJCqznVj59shjLoTK9FtJ3KVEl+JfukcAh/3EqkP7PRAXrPeQ5UcvYbYMZgxw8xHYg/sdKqyHBxwQqNtvGlfGHQ6jyb4/CS2vu0ZehGHQoMgmry2pvNMjA9ypSVWRGspcrdcQOJNgYtHmBiBScoURLB+9KJX2ivY8zJFI5e8Hb48sLASkp4HQemBWMQTukSnlgddsAtIKgpoRZWpcJ7PunHuWXAKZPCMH6uF14G71/lhluRjjy5GEnkKhKkKnlX15kmLmylTZJVdMbMRnsGK7exsVS8ot7sYJ9EMIvKJUqKf/RmZvUxZqlGp1oy3Uo5JgBU5MF61wnkad+L1UJsB2ZzPV0S/jYKPFVzBsWXj9IH74D02TcQz774+FQqAXlVLlpglmlnMwOU3IboKOH2Z4LIj7Kx7wfZZMi3/sQbYJM2PWCd8OS/keDf53ZwMKNxWPh1ZB7kX4mqhmMHdNgRblcWXP3LtWKck31Vq1UdGfK4/T/nudD1ve15NPUP1DvcVsDOWnRF4s3IDXZwXWqvag+hz0zVyB/T0X1XkqrPtBNX/o5qeTDP30W2GVdGL6SIlgZHaqqNuamHlhGra43ExKTwRPBsskTrziC2fb/JeqXxJBES/YufiomXw14BnQUpyBfVeV3cDDEZUnfu7lJz19jS+2aTtA6v9Qnps+q0rNnLa54JLf9bWlw4RomSWcJCqkkW/EG0AdTKrqNFYPZVZTLvt+4B8ehWrUWas8MK5jAXeTklr0ao5acGOKWip1wmqIRKRAIT2OBbs9jCmigb2xJNDK4RdUtDYsJeltJ69DvnG7bmTLjfsOQcVIaI40k91N8nnda9+/6BdKFDQtMDB6efGkciWp9ce24uGUzKszD7CmKTlCJiqn/V2bbOKGdk4Tafy4B2HzeaX+fMFjpWu01UMaJJrvYbAnXww1Yg2IjbwdAMTv7z8zPIJ0a+drouylUfvKKeun6BnLe0fR+XbRRs77Rengb30c1plozEFHZjzmQ10uVQSh1wWURJnVSru6b1pyVI+KR3WZHB4vgDx+BDlQjxCk53+Hxm5wv8SgpvNxVkepPVF8ucut9FkGNHov1gyatlEKSzYlrFt0mFQWg20rKMrkB6pEDO8f5W2InR3znO15NTbw/l3BXYGOe1lS0tHljc5zJkmMTdVrJnFEd2RqNPNmFWEn+1bm4NeAr6QEY9fiyBCMWBHEELTfHtu4iS37D1cBEKudpCszaWJiPgEeDu75+IuXa/guZdxWJj/ktDfZQJpp9ork2QScgu31l7QdGfC24C2E6kQp4UHZ3k7wXSTUt61bdmK7BHqjiz3HuP76phzd7nZxwLCpEg8fhtwhNgPx3IrU1B4JX40Wzsy1Tz/8oIcvjykDmI967chWtw/WSschamGBelNt+TV1gVKoLlMpL9QxFcAqXhEC6Nr9nXRZRJAIRun3Vj+EabZoR2YsdghDE9boTE8MBcGCSqGSIb3DQEJFDEKHggAaAB0AHQAcDAhBgkqhkiG9w0BCRUxFAQSVGltZSAxNjUzOTcyMDczODY4MIIHnAYJKoZIhvcNAQcGoIIHjTCCB4kCAQAwggeCBgkqhkiG9w0BBwEwKQYKKoZIhvcNAQwBBjAbBBRmhTM5a6OsdDd4LLR/07U/28/dqgIDAMNQgIIHSCCLUDdxl9rcX65CAYiQD1mrnoDJe+c8hWww8KI+RD1/3U8skUZ+NHjf2cjCrDQdtVZcycc37lkJ4HEU0keMdVE7I9tja81EfQclnZAUgx/zzLQqVV9qc1AcKX0pzUczLewoQZdXQHdpXh0u8Hf4xFeYM3EAGxB0mUYGwZXWSxYSdaHmxTgeftqNHF6tudt0vpPgq9Rbqp7zP8z48VUOSUkbNTXZOgNVpMgs/yKivvURdWBwJMkpOs/daeR+QbOLkhrhTtT8FjwFUlpnQ//8i7UsBBJKcEKvlrfBEDWcIGw8M6oAssoPsCGyXnsP7ZCVBDBgv941mBTJ9Z9vMoKPpr9jZzSVJrU2+DDuxkfSy1KL0vUvZm5PGSiZA72OpRZkNi8ZUbJTRKf71R+hsCtX/ZUQtMlGCX50XUEQl44cvyX32XQb2VlyGvWu0rqgEVS+QZbuWJoZBZAedhzHvnfGiIsnn2PhRyKBvALyGcWAgK0XvC26WF676g2oMk8sjBrp8saPDvMXj06XmD6746i5KC52gLiRAcwlT4zJoA0OB5jYgxXv+/GP9iXNIK578cCGpBes28b7R+hLDBCc/fMv1jMhKWPVXWJZ6VkcpUgH73uxFl43guTZzJfHI1kMF1+PbOviWPdlSj1D44ajloMJP5FXubIfYEIqV19BdU42ZXZ8ISIZYTAj9OhNCUkkTjjGH2VhFz/FjZDxdk9m/Sw+du8dg1v0+6XIMScjuutbLxxol8Dx1yfRSgZZGN+D3vi0hW1OgcpnUhVI/x48LjdWm1IA0XWOzFiJAe98BiL0roTsUk0pgyujzvLcwDFGP9hnQ0YLdCy22UsQ39hRyQzwGAVO8O49bU8sgNy75+4++8Z3pqI91hdoHyzNMSx6fJn/Qd6UcAdTF0divh17q5bZi+x3D7AQEvh5NwePD0HIqBZexT0yNTVTHragJZUetI5FZgE1cZrfchckP/Ub5jdn3e/Cvu8J/yZFAM8glJvO1D+4BZ+/MVAw3AkO7kLhGeXMXr9s9+A/uPlznoC6b9bpjj3X46bFz7dPIYC0aeya87vISA0/5VPkkUZ+U6A9nLkCIcl5XQElMjrzidFJyBmtxHXLrAu5yiWorl3KVOf9QOrKrZt1UrNihIaSIq/46jI5yBQX6LV7fUBrZKe/oMbuf6W0LliNJbKSwZi0RRHo0jBPotUiOsn1qmnh+hZp6rwi1KGOsCAPSMSGnURwoXAdTUmAyPriDjDBKjm2EiDZJ9T3XgNDHVU24SqKjsSoByrD4FcVyqFAl3w0CaSNXloZswE0UqGKoQUy6Up0ceWoeHYfA/FJyaGfkFGRkmYun+wUJZvhpoLv6bn377CziWTSc0o3nl+UZ4pTsRJOlG0FOxzWApjSd8bPIdezPxak2DM0qj6aiUocfEBMLnFn4Sjj1vVFmIGPNXiOPlJF0Ef99I5Gno3YAd4ZHBqpkeUq7+bWur+xhv5zsXs5ARK6TVOVqlMPiKRpDX7lEQoya++U6HIj6zb7arSZivM5YrZeqHFKK4gpORvpg6icApQCBniDgmNxZJFobgzvIwKTABJjoivHs4zIIw6TCjbz38GEFdzbsUuCXQo3tFWaxgiGkxtLnjYr0PTIxFdBfQ5dkRkkxLvUg7uR1uP9IcmO/8QzzyLeSA+I+teZME8QCzui6CY/lhIfjxJimawejCJx33nS9uXNibQ0my41SmXRDGVgiH6el8veIbEHU9RY+elVR6eqlemCuIHfU8QNPNbe7Gzqaaoccd2VUY3PXNHxU87DC7Nttvn99Ow5zxZ8xZUQVfLFntS9d2hgKp8gJ9lgVKzEuYCiL59wuxbNtnAb8mET0Buw24JeQew9e8DdYL2vDLhQz+IqPXKAhlf7BSpPyQTOeaba657CNmkzdiNk3RHGeTRrq4c3/nl1M+ZsPwf8WxoTcmu+W0Y7/j9nps8r+fKlNB23hOEIWZ4KN+Y4qZRKltTARhqmdjLIhUtWh4D49eTe5sS3MqzsZJJwsEHPPOvZKvOG5UU3jXMg9R4F8CaYgx/M4ClwIIlHvcdW7R7sXke9E/qccIG3jQ5b/mgHCk3pVkAyrRWfBZqXxlfWn+cfzVALtUXWePwhN8+i3CQbjLLOgE6yH3/rBfXQQVYHwrZqoyFchDwlFF5FtF5GThnj04kvhZbq0EcF4lbiULAOiBkJong4Op287QYgq4W8szOn9F2m/4M2XNaI3X7w67GADFHs5TtPXjWx1l6kKIwMM2pcpltXblqgH087payQHx1LnCpztxcxmeoFb3owvwKWmQpV0Gh6CIKfa7hqwCsNggOcKEQWwRJtADEXzPhRYG0mPelWLQMdLLaEzUqh9HElXu3awKazlHa1HkV0nywgldm23DPCKj5Fi6hux7vl7vt8K0Q4KA8Xoys4Pw43eRi9puQM3jOJgxX8Q/MsABHHxPBa94bOsRLFUa/Td70xbHpOrCCp64M7cm6kDKAwPjAhMAkGBSsOAwIaBQAEFEi1rtKgyohIpB9yF4t2L1CpwF+ABBSDiyukmk2pIV5XfqW5AtbEC9LvtQIDAYag +kind: Secret +metadata: + creationTimestamp: null + name: es-cert + namespace: imxc diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/elasticsearch/templates/needtocheck_storageclass.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/elasticsearch/templates/needtocheck_storageclass.yaml new file mode 100644 index 0000000..d2bff8e --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/elasticsearch/templates/needtocheck_storageclass.yaml @@ -0,0 +1,8 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: local-storage +provisioner: kubernetes.io/no-provisioner +reclaimPolicy: Delete +volumeBindingMode: WaitForFirstConsumer + diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/elasticsearch/values.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/elasticsearch/values.yaml new file mode 100644 index 0000000..7b0bd6d --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/elasticsearch/values.yaml @@ -0,0 +1,68 @@ +# Default values for sample. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: 10.10.31.243:5000/cmoa3/nginx + tag: stable + pullPolicy: IfNotPresent + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 80 + +ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: [] + + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +nodeSelector: {} + +tolerations: [] + +affinity: {} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/kafka-manager/.helmignore b/ansible/01_old/roles/test/files/02-base/base/charts/kafka-manager/.helmignore new file mode 100644 index 0000000..50af031 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/kafka-manager/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/kafka-manager/Chart.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/kafka-manager/Chart.yaml new file mode 100644 index 0000000..61a7b7f --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/kafka-manager/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for Kubernetes +name: kafka-manager +version: 0.1.0 diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/kafka-manager/templates/0.kafka-manager-service.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/kafka-manager/templates/0.kafka-manager-service.yaml new file mode 100644 index 0000000..b20900d --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/kafka-manager/templates/0.kafka-manager-service.yaml @@ -0,0 +1,14 @@ +kind: Service +apiVersion: v1 +metadata: + name: kafka-manager + namespace: imxc +spec: + type: NodePort + ports: + - protocol: TCP + port: 80 + nodePort : 32090 + targetPort: 80 + selector: + app: kafka-manager diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/kafka-manager/templates/1.kafka-manager.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/kafka-manager/templates/1.kafka-manager.yaml new file mode 100644 index 0000000..4edcf32 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/kafka-manager/templates/1.kafka-manager.yaml @@ -0,0 +1,33 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: kafka-manager + namespace: imxc +spec: + replicas: 1 + selector: + matchLabels: + app: kafka-manager + template: + metadata: + labels: + app: kafka-manager + spec: + containers: + - name: kafka-manager + image: {{ .Values.global.IMXC_IN_REGISTRY }}/kafka-manager:{{ .Values.global.KAFKA_MANAGER_VERSION }} + resources: + requests: + cpu: 100m + memory: 500Mi + limits: + cpu: 200m + memory: 1000Mi + ports: + - containerPort: 80 + env: + - name: ZK_HOSTS + value: zookeeper:2181 + command: + - ./bin/kafka-manager + - -Dhttp.port=80 diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/kafka-manager/values.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/kafka-manager/values.yaml new file mode 100644 index 0000000..b5532cd --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/kafka-manager/values.yaml @@ -0,0 +1,68 @@ +# Default values for kafka-manager. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: 10.10.31.243:5000/cmoa3/nginx + tag: stable + pullPolicy: IfNotPresent + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 80 + +ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: [] + + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +nodeSelector: {} + +tolerations: [] + +affinity: {} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/kafka/.helmignore b/ansible/01_old/roles/test/files/02-base/base/charts/kafka/.helmignore new file mode 100644 index 0000000..50af031 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/kafka/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/kafka/1.broker-config.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/kafka/1.broker-config.yaml new file mode 100644 index 0000000..ddf76e1 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/kafka/1.broker-config.yaml @@ -0,0 +1,161 @@ +kind: ConfigMap +metadata: + name: broker-config + namespace: imxc +apiVersion: v1 +data: + init.sh: |- + #!/bin/bash + set -e + set -x + cp /etc/kafka-configmap/log4j.properties /etc/kafka/ + KAFKA_BROKER_ID=${HOSTNAME##*-} + SEDS=("s/#init#broker.id=#init#/broker.id=$KAFKA_BROKER_ID/") + LABELS="kafka-broker-id=$KAFKA_BROKER_ID" + ANNOTATIONS="" + hash kubectl 2>/dev/null || { + SEDS+=("s/#init#broker.rack=#init#/#init#broker.rack=# kubectl not found in path/") + } && { + ZONE=$(kubectl get node "$NODE_NAME" -o=go-template='{{index .metadata.labels "failure-domain.beta.kubernetes.io/zone"}}') + if [ $? -ne 0 ]; then + SEDS+=("s/#init#broker.rack=#init#/#init#broker.rack=# zone lookup failed, see -c init-config logs/") + elif [ "x$ZONE" == "x" ]; then + SEDS+=("s/#init#broker.rack=#init#/#init#broker.rack=# zone label not found for node $NODE_NAME/") + else + SEDS+=("s/#init#broker.rack=#init#/broker.rack=$ZONE/") + LABELS="$LABELS kafka-broker-rack=$ZONE" + fi + # Node Port 설정 주석처리 + # OUTSIDE_HOST=$(kubectl get node "$NODE_NAME" -o jsonpath='{.status.addresses[?(@.type=="InternalIP")].address}') + OUTSIDE_HOST=kafka-outside-${KAFKA_BROKER_ID} + GLOBAL_HOST=kafka-global-${KAFKA_BROKER_ID} + if [ $? -ne 0 ]; then + echo "Outside (i.e. cluster-external access) host lookup command failed" + else + OUTSIDE_PORT=3240${KAFKA_BROKER_ID} + GLOBAL_PORT=3250${KAFKA_BROKER_ID} + # datagate 도입했으므로 Kube DNS 기반 통신 + SEDS+=("s|#init#advertised.listeners=OUTSIDE://#init#|advertised.listeners=OUTSIDE://${OUTSIDE_HOST}:${OUTSIDE_PORT},GLOBAL://${GLOBAL_HOST}:${GLOBAL_PORT}|") + ANNOTATIONS="$ANNOTATIONS kafka-listener-outside-host=$OUTSIDE_HOST kafka-listener-outside-port=$OUTSIDE_PORT" + fi + if [ ! -z "$LABELS" ]; then + kubectl -n $POD_NAMESPACE label pod $POD_NAME $LABELS || echo "Failed to label $POD_NAMESPACE.$POD_NAME - RBAC issue?" + fi + if [ ! -z "$ANNOTATIONS" ]; then + kubectl -n $POD_NAMESPACE annotate pod $POD_NAME $ANNOTATIONS || echo "Failed to annotate $POD_NAMESPACE.$POD_NAME - RBAC issue?" + fi + } + printf '%s\n' "${SEDS[@]}" | sed -f - /etc/kafka-configmap/server.properties > /etc/kafka/server.properties.tmp + [ $? -eq 0 ] && mv /etc/kafka/server.properties.tmp /etc/kafka/server.properties + server.properties: |- + log.dirs=/var/lib/kafka/data/topics + ############################# Zookeeper ############################# + zookeeper.connect=zookeeper:2181 + #zookeeper.connection.timeout.ms=6000 + ############################# Group Coordinator Settings ############################# + #group.initial.rebalance.delay.ms=0 + ############################# Thread ############################# + #background.threads=10 + #num.recovery.threads.per.data.dir=1 + ############################# Topic ############################# + auto.create.topics.enable=true + delete.topic.enable=true + default.replication.factor=2 + ############################# Msg Replication ############################# + min.insync.replicas=1 + num.io.threads=10 + num.network.threads=4 + num.replica.fetchers=4 + replica.fetch.min.bytes=1 + socket.receive.buffer.bytes=1048576 + socket.send.buffer.bytes=1048576 + replica.socket.receive.buffer.bytes=1048576 + socket.request.max.bytes=204857600 + ############################# Partition ############################# + #auto.leader.rebalance.enable=true + num.partitions=12 + ############################# Log size ############################# + message.max.bytes=204857600 + max.message.bytes=204857600 + ############################# Log Flush Policy ############################# + #log.flush.interval.messages=10000 + #log.flush.interval.ms=1000 + ############################# Log Retention Policy ############################# + log.retention.minutes=1 + offsets.retention.minutes=1440 + #log.retention.bytes=1073741824 + #log.segment.bytes=1073741824 + log.retention.check.interval.ms=10000 + ############################# Internal Topic Settings ############################# + offsets.topic.replication.factor=1 + #transaction.state.log.replication.factor=1 + #transaction.state.log.min.isr=1 + ############################# ETC ############################# + listeners=OUTSIDE://:9094,PLAINTEXT://:9092,GLOBAL://:9095 + listener.security.protocol.map=PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL,OUTSIDE:PLAINTEXT,GLOBAL:PLAINTEXT + #listeners=PLAINTEXT://:9092 + inter.broker.listener.name=PLAINTEXT + #init#broker.id=#init# + #init#broker.rack=#init# + log4j.properties: |- + # Unspecified loggers and loggers with additivity=true output to server.log and stdout + # Note that INFO only applies to unspecified loggers, the log level of the child logger is used otherwise + log4j.rootLogger=INFO, stdout + log4j.appender.stdout=org.apache.log4j.ConsoleAppender + log4j.appender.stdout.layout=org.apache.log4j.PatternLayout + log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n + log4j.appender.kafkaAppender=org.apache.log4j.DailyRollingFileAppender + log4j.appender.kafkaAppender.DatePattern='.'yyyy-MM-dd-HH + log4j.appender.kafkaAppender.File=${kafka.logs.dir}/server.log + log4j.appender.kafkaAppender.layout=org.apache.log4j.PatternLayout + log4j.appender.kafkaAppender.layout.ConversionPattern=[%d] %p %m (%c)%n + log4j.appender.stateChangeAppender=org.apache.log4j.DailyRollingFileAppender + log4j.appender.stateChangeAppender.DatePattern='.'yyyy-MM-dd-HH + log4j.appender.stateChangeAppender.File=${kafka.logs.dir}/state-change.log + log4j.appender.stateChangeAppender.layout=org.apache.log4j.PatternLayout + log4j.appender.stateChangeAppender.layout.ConversionPattern=[%d] %p %m (%c)%n + log4j.appender.requestAppender=org.apache.log4j.DailyRollingFileAppender + log4j.appender.requestAppender.DatePattern='.'yyyy-MM-dd-HH + log4j.appender.requestAppender.File=${kafka.logs.dir}/kafka-request.log + log4j.appender.requestAppender.layout=org.apache.log4j.PatternLayout + log4j.appender.requestAppender.layout.ConversionPattern=[%d] %p %m (%c)%n + log4j.appender.cleanerAppender=org.apache.log4j.DailyRollingFileAppender + log4j.appender.cleanerAppender.DatePattern='.'yyyy-MM-dd-HH + log4j.appender.cleanerAppender.File=${kafka.logs.dir}/log-cleaner.log + log4j.appender.cleanerAppender.layout=org.apache.log4j.PatternLayout + log4j.appender.cleanerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n + log4j.appender.controllerAppender=org.apache.log4j.DailyRollingFileAppender + log4j.appender.controllerAppender.DatePattern='.'yyyy-MM-dd-HH + log4j.appender.controllerAppender.File=${kafka.logs.dir}/controller.log + log4j.appender.controllerAppender.layout=org.apache.log4j.PatternLayout + log4j.appender.controllerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n + log4j.appender.authorizerAppender=org.apache.log4j.DailyRollingFileAppender + log4j.appender.authorizerAppender.DatePattern='.'yyyy-MM-dd-HH + log4j.appender.authorizerAppender.File=${kafka.logs.dir}/kafka-authorizer.log + log4j.appender.authorizerAppender.layout=org.apache.log4j.PatternLayout + log4j.appender.authorizerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n + # Change the two lines below to adjust ZK client logging + log4j.logger.org.I0Itec.zkclient.ZkClient=INFO + log4j.logger.org.apache.zookeeper=INFO + # Change the two lines below to adjust the general broker logging level (output to server.log and stdout) + log4j.logger.kafka=INFO + log4j.logger.org.apache.kafka=INFO + # Change to DEBUG or TRACE to enable request logging + log4j.logger.kafka.request.logger=WARN, requestAppender + log4j.additivity.kafka.request.logger=false + # Uncomment the lines below and change log4j.logger.kafka.network.RequestChannel$ to TRACE for additional output + # related to the handling of requests + #log4j.logger.kafka.network.Processor=TRACE, requestAppender + #log4j.logger.kafka.server.KafkaApis=TRACE, requestAppender + #log4j.additivity.kafka.server.KafkaApis=false + log4j.logger.kafka.network.RequestChannel$=WARN, requestAppender + log4j.additivity.kafka.network.RequestChannel$=false + log4j.logger.kafka.controller=TRACE, controllerAppender + log4j.additivity.kafka.controller=false + log4j.logger.kafka.log.LogCleaner=INFO, cleanerAppender + log4j.additivity.kafka.log.LogCleaner=false + log4j.logger.state.change.logger=TRACE, stateChangeAppender + log4j.additivity.state.change.logger=false + # Change to DEBUG to enable audit log for the authorizer + log4j.logger.kafka.authorizer.logger=WARN, authorizerAppender + log4j.additivity.kafka.authorizer.logger=false diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/kafka/Chart.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/kafka/Chart.yaml new file mode 100644 index 0000000..9565567 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/kafka/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for Kubernetes +name: kafka +version: 0.1.0 diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/kafka/templates/2.dns.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/kafka/templates/2.dns.yaml new file mode 100644 index 0000000..8ffb3f8 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/kafka/templates/2.dns.yaml @@ -0,0 +1,14 @@ +# A headless service to create DNS records +--- +apiVersion: v1 +kind: Service +metadata: + name: kafka-headless + namespace: imxc +spec: + ports: + - port: 9092 + clusterIP: None + selector: + app: kafka +--- diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/kafka/templates/3.bootstrap-service.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/kafka/templates/3.bootstrap-service.yaml new file mode 100644 index 0000000..1cd7406 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/kafka/templates/3.bootstrap-service.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: Service +metadata: +# name: bootstrap + name: kafka + namespace: imxc +spec: + ports: + - port: 9092 + selector: + app: kafka diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/kafka/templates/4.persistent-volume.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/kafka/templates/4.persistent-volume.yaml new file mode 100644 index 0000000..6f67ab4 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/kafka/templates/4.persistent-volume.yaml @@ -0,0 +1,76 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: data-kafka-cluster-1 + labels: + type: local + app: kafka +spec: + capacity: + storage: 30Gi + accessModes: + - ReadWriteOnce + hostPath: + path: {{ .Values.global.IMXC_KAFKA_PV_PATH1 }} + persistentVolumeReclaimPolicy: Retain + storageClassName: kafka-broker + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .Values.global.affinity_key }} + operator: In + values: + - {{ .Values.global.affinity_value1 }} + +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: data-kafka-cluster-2 + labels: + type: local + app: kafka +spec: + capacity: + storage: 30Gi + accessModes: + - ReadWriteOnce + hostPath: + path: {{ .Values.global.IMXC_KAFKA_PV_PATH2 }} + persistentVolumeReclaimPolicy: Retain + storageClassName: kafka-broker + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .Values.global.affinity_key }} + operator: In + values: + - {{ .Values.global.affinity_value2 }} +--- +# On-prem/워커노드 두개/브로커 두개 환경에서 발생할 수 있는 affinity 충돌때문에 주석처리 +#apiVersion: v1 +#kind: PersistentVolume +#metadata: +# name: data-kafka-cluster-3 +# labels: +# type: local +# app: kafka +#spec: +# capacity: +# storage: 30Gi +# accessModes: +# - ReadWriteOnce +# hostPath: +# path: {{ .Values.global.IMXC_KAFKA_PV_PATH3 }} +# persistentVolumeReclaimPolicy: Retain +# storageClassName: kafka-broker +# nodeAffinity: +# required: +# nodeSelectorTerms: +# - matchExpressions: +# - key: kubernetes.io/hostname +# operator: In +# values: + # - {{ .Values.global.IMXC_KAFKA_HOST3 }} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/kafka/templates/5.kafka.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/kafka/templates/5.kafka.yaml new file mode 100644 index 0000000..1982584 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/kafka/templates/5.kafka.yaml @@ -0,0 +1,132 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: kafka + namespace: imxc +spec: + selector: + matchLabels: + app: kafka + serviceName: "kafka-headless" + replicas: 2 + updateStrategy: + type: RollingUpdate + podManagementPolicy: Parallel + template: + metadata: + labels: + app: kafka + annotations: + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: "app" + operator: In + values: + - kafka + topologyKey: "kubernetes.io/hostname" + podAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: "app" + operator: In + values: + - zookeeper + topologyKey: "kubernetes.io/hostname" + terminationGracePeriodSeconds: 30 + initContainers: + - name: init-config + image: {{ .Values.global.IMXC_IN_REGISTRY }}/kafka-initutils:{{ .Values.global.KAFKA_INITUTILS_VERSION }} + env: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + command: ['/bin/bash', '/etc/kafka-configmap/init.sh'] + volumeMounts: + - name: configmap + mountPath: /etc/kafka-configmap + - name: config + mountPath: /etc/kafka + - name: extensions + mountPath: /opt/kafka/libs/extensions + containers: + - name: broker + image: {{ .Values.global.IMXC_IN_REGISTRY }}/kafka:{{ .Values.global.KAFKA_VERSION }} + resources: + requests: + cpu: 100m + memory: 6000Mi + limits: + # This limit was intentionally set low as a reminder that + # the entire Yolean/kubernetes-kafka is meant to be tweaked + # before you run production workloads + cpu: 500m + memory: 10000Mi + env: + - name: CLASSPATH + value: /opt/kafka/libs/extensions/* + - name: KAFKA_LOG4J_OPTS + value: -Dlog4j.configuration=file:/etc/kafka/log4j.properties + - name: JMX_PORT + value: "5555" + - name: KAFKA_OPTS + value: -javaagent:/opt/kafka/jmx_prometheus_javaagent-0.15.0.jar=9010:/opt/kafka/config.yaml + ports: + - name: inside + containerPort: 9092 + - name: outside + containerPort: 9094 + - name: global + containerPort: 9095 + - name: jmx + containerPort: 9010 + command: + - ./bin/kafka-server-start.sh + - /etc/kafka/server.properties + lifecycle: + preStop: + exec: + command: ["sh", "-ce", "rm -rf /var/lib/kafka/data/*;kill -s TERM 1; while $(kill -0 1 2>/dev/null); do sleep 1; done"] +# readinessProbe: +# tcpSocket: +# port: 9092 +# timeoutSeconds: 1 + volumeMounts: + - name: config + mountPath: /etc/kafka + - name: data + mountPath: /var/lib/kafka/data + - name: extensions + mountPath: /opt/kafka/libs/extensions + volumes: + - name: configmap + configMap: + name: broker-config + - name: config + emptyDir: {} + - name: extensions + emptyDir: {} + volumeClaimTemplates: + - metadata: + name: data + spec: + accessModes: [ "ReadWriteOnce" ] + storageClassName: kafka-broker + resources: + requests: + storage: 30Gi diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/kafka/templates/6.outside.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/kafka/templates/6.outside.yaml new file mode 100644 index 0000000..c2d8170 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/kafka/templates/6.outside.yaml @@ -0,0 +1,89 @@ +kind: Service +apiVersion: v1 +metadata: + name: kafka-outside-0 + namespace: imxc +spec: + selector: + app: kafka + kafka-broker-id: "0" + ports: + - protocol: TCP + targetPort: 9094 + port: 32400 + type: ClusterIP +--- +kind: Service +apiVersion: v1 +metadata: + name: kafka-outside-1 + namespace: imxc +spec: + selector: + app: kafka + kafka-broker-id: "1" + ports: + - protocol: TCP + targetPort: 9094 + port: 32401 + type: ClusterIP +--- +kind: Service +apiVersion: v1 +metadata: + name: kafka-global-0 + namespace: imxc +spec: + selector: + app: kafka + kafka-broker-id: "0" + ports: + - protocol: TCP + targetPort: 9095 + port: 32500 + type: ClusterIP +--- +kind: Service +apiVersion: v1 +metadata: + name: kafka-global-1 + namespace: imxc +spec: + selector: + app: kafka + kafka-broker-id: "1" + ports: + - protocol: TCP + targetPort: 9095 + port: 32501 + type: ClusterIP +--- +apiVersion: v1 +kind: Service +metadata: + name: kafka-broker + namespace: imxc +spec: + type: ClusterIP + ports: + - port: 9094 + name: kafka + protocol: TCP + targetPort: 9094 + selector: + app: kafka +--- +apiVersion: v1 +kind: Service +metadata: + name: kafka-broker-global + namespace: imxc +spec: + type: ClusterIP + ports: + - port: 9095 + name: kafka + protocol: TCP + targetPort: 9095 + selector: + app: kafka diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/kafka/values.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/kafka/values.yaml new file mode 100644 index 0000000..cb0e677 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/kafka/values.yaml @@ -0,0 +1,68 @@ +# Default values for kafka. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: 10.10.31.243:5000/cmoa3/nginx + tag: stable + pullPolicy: IfNotPresent + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 80 + +ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: [] + + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +nodeSelector: {} + +tolerations: [] + +affinity: {} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/postgres/.helmignore b/ansible/01_old/roles/test/files/02-base/base/charts/postgres/.helmignore new file mode 100644 index 0000000..50af031 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/postgres/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/postgres/Chart.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/postgres/Chart.yaml new file mode 100644 index 0000000..d602e29 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/postgres/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for Kubernetes +name: postgres +version: 0.1.0 diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/postgres/templates/1.postgres-configmap.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/postgres/templates/1.postgres-configmap.yaml new file mode 100644 index 0000000..95c8bda --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/postgres/templates/1.postgres-configmap.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: postgres-config + namespace: imxc + labels: + app: postgres +data: + POSTGRES_DB: postgresdb + POSTGRES_USER: admin + POSTGRES_PASSWORD: eorbahrhkswp diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/postgres/templates/2.postgres-storage.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/postgres/templates/2.postgres-storage.yaml new file mode 100644 index 0000000..dfbd714 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/postgres/templates/2.postgres-storage.yaml @@ -0,0 +1,38 @@ +kind: PersistentVolume +apiVersion: v1 +metadata: + name: postgres-pv-volume + labels: + type: local + app: postgres +spec: + storageClassName: manual + capacity: + storage: 5Gi + accessModes: + - ReadWriteMany + hostPath: + path: "{{ .Values.global.IMXC_POSTGRES_PV_PATH }}" + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .Values.global.affinity_key }} + operator: In + values: + - {{ .Values.global.affinity_value1 }} +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: postgres-pv-claim + namespace: imxc + labels: + app: postgres +spec: + storageClassName: manual + accessModes: + - ReadWriteMany + resources: + requests: + storage: 5Gi diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/postgres/templates/3.postgres-service.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/postgres/templates/3.postgres-service.yaml new file mode 100644 index 0000000..31e90a2 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/postgres/templates/3.postgres-service.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Service +metadata: + name: postgres + namespace: imxc + labels: + app: postgres +spec: + type: ClusterIP + ports: + - port: 5432 + # nodePort: 5432 + selector: + app: postgres diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/postgres/templates/4.postgres-deployment.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/postgres/templates/4.postgres-deployment.yaml new file mode 100644 index 0000000..14993e8 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/postgres/templates/4.postgres-deployment.yaml @@ -0,0 +1,45 @@ +{{- if semverCompare ">=1.16-0" .Capabilities.KubeVersion.GitVersion }} +apiVersion: apps/v1 +{{- else }} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Deployment +metadata: + name: postgres + namespace: imxc +spec: +{{- if semverCompare ">=1.16-0" .Capabilities.KubeVersion.GitVersion }} + selector: + matchLabels: + app: postgres +{{- end }} + replicas: 1 + template: + metadata: + labels: + app: postgres + spec: + containers: + - name: postgres + image: {{ .Values.global.IMXC_IN_REGISTRY }}/postgres:{{ .Values.global.POSTGRES_VERSION }} + resources: + requests: + cpu: 100m + memory: 2000Mi + limits: + cpu: 300m + memory: 2000Mi + imagePullPolicy: "IfNotPresent" + ports: + - containerPort: 5432 + args: ["-c","max_connections=1000","-c","shared_buffers=512MB","-c","deadlock_timeout=5s","-c","statement_timeout=15s","-c","idle_in_transaction_session_timeout=60s"] + envFrom: + - configMapRef: + name: postgres-config + volumeMounts: + - mountPath: /var/lib/postgresql/data + name: postgredb + volumes: + - name: postgredb + persistentVolumeClaim: + claimName: postgres-pv-claim diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/postgres/values.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/postgres/values.yaml new file mode 100644 index 0000000..9972ab8 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/postgres/values.yaml @@ -0,0 +1,68 @@ +# Default values for postgres. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: 10.10.31.243:5000/cmoa3/nginx + tag: stable + pullPolicy: IfNotPresent + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 80 + +ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: [] + + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +nodeSelector: {} + +tolerations: [] + +affinity: {} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/.helmignore b/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/.helmignore new file mode 100644 index 0000000..f0c1319 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/Chart.lock b/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/Chart.lock new file mode 100644 index 0000000..21ff14f --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/Chart.lock @@ -0,0 +1,6 @@ +dependencies: +- name: common + repository: https://charts.bitnami.com/bitnami + version: 1.8.0 +digest: sha256:3e342a25057f87853e52d83e1d14e6d8727c15fd85aaae22e7594489cc129f15 +generated: "2021-08-09T15:49:41.56962208Z" diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/Chart.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/Chart.yaml new file mode 100644 index 0000000..3b08f9c --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/Chart.yaml @@ -0,0 +1,26 @@ +annotations: + category: Infrastructure +apiVersion: v2 +appVersion: 3.8.22 +dependencies: +- name: common + repository: https://charts.bitnami.com/bitnami + tags: + - bitnami-common + version: 1.x.x +description: Open source message broker software that implements the Advanced Message + Queuing Protocol (AMQP) +home: https://github.com/bitnami/charts/tree/master/bitnami/rabbitmq +icon: https://bitnami.com/assets/stacks/rabbitmq/img/rabbitmq-stack-220x234.png +keywords: +- rabbitmq +- message queue +- AMQP +maintainers: +- email: containers@bitnami.com + name: Bitnami +name: rabbitmq +sources: +- https://github.com/bitnami/bitnami-docker-rabbitmq +- https://www.rabbitmq.com +version: 8.20.5 diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/README.md b/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/README.md new file mode 100644 index 0000000..9b26b09 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/README.md @@ -0,0 +1,566 @@ +# RabbitMQ + +[RabbitMQ](https://www.rabbitmq.com/) is an open source message broker software that implements the Advanced Message Queuing Protocol (AMQP). + +## TL;DR + +```bash +$ helm repo add bitnami https://charts.bitnami.com/bitnami +$ helm install my-release bitnami/rabbitmq +``` + +## Introduction + +This chart bootstraps a [RabbitMQ](https://github.com/bitnami/bitnami-docker-rabbitmq) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This chart has been tested to work with NGINX Ingress, cert-manager, fluentd and Prometheus on top of the [BKPR](https://kubeprod.io/). + +## Prerequisites + +- Kubernetes 1.12+ +- Helm 3.1.0 +- PV provisioner support in the underlying infrastructure + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```bash +$ helm install my-release bitnami/rabbitmq +``` + +The command deploys RabbitMQ on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```bash +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Parameters + +### Global parameters + +| Name | Description | Value | +| ------------------------- | ----------------------------------------------- | ----- | +| `global.imageRegistry` | Global Docker image registry | `""` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` | +| `global.storageClass` | Global StorageClass for Persistent Volume(s) | `""` | + + +### RabitMQ Image parameters + +| Name | Description | Value | +| ------------------- | -------------------------------------------------------------- | ---------------------- | +| `image.registry` | RabbitMQ image registry | `docker.io` | +| `image.repository` | RabbitMQ image repository | `bitnami/rabbitmq` | +| `image.tag` | RabbitMQ image tag (immutable tags are recommended) | `3.8.21-debian-10-r13` | +| `image.pullPolicy` | RabbitMQ image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | +| `image.debug` | Set to true if you would like to see extra information on logs | `false` | + + +### Common parameters + +| Name | Description | Value | +| ---------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------- | +| `nameOverride` | String to partially override rabbitmq.fullname template (will maintain the release name) | `""` | +| `fullnameOverride` | String to fully override rabbitmq.fullname template | `""` | +| `kubeVersion` | Force target Kubernetes version (using Helm capabilities if not set) | `""` | +| `clusterDomain` | Kubernetes Cluster Domain | `cluster.local` | +| `extraDeploy` | Array of extra objects to deploy with the release | `[]` | +| `diagnosticMode.enabled` | Enable diagnostic mode (all probes will be disabled and the command will be overridden) | `false` | +| `diagnosticMode.command` | Command to override all containers in the deployment | `[]` | +| `diagnosticMode.args` | Args to override all containers in the deployment | `[]` | +| `hostAliases` | Deployment pod host aliases | `[]` | +| `commonAnnotations` | Annotations to add to all deployed objects | `{}` | +| `auth.username` | RabbitMQ application username | `user` | +| `auth.password` | RabbitMQ application password | `""` | +| `auth.existingPasswordSecret` | Existing secret with RabbitMQ credentials (must contain a value for `rabbitmq-password` key) | `""` | +| `auth.erlangCookie` | Erlang cookie to determine whether different nodes are allowed to communicate with each other | `""` | +| `auth.existingErlangSecret` | Existing secret with RabbitMQ Erlang cookie (must contain a value for `rabbitmq-erlang-cookie` key) | `""` | +| `auth.tls.enabled` | Enable TLS support on RabbitMQ | `false` | +| `auth.tls.autoGenerated` | Generate automatically self-signed TLS certificates | `false` | +| `auth.tls.failIfNoPeerCert` | When set to true, TLS connection will be rejected if client fails to provide a certificate | `true` | +| `auth.tls.sslOptionsVerify` | Should [peer verification](https://www.rabbitmq.com/ssl.html#peer-verification) be enabled? | `verify_peer` | +| `auth.tls.caCertificate` | Certificate Authority (CA) bundle content | `""` | +| `auth.tls.serverCertificate` | Server certificate content | `""` | +| `auth.tls.serverKey` | Server private key content | `""` | +| `auth.tls.existingSecret` | Existing secret with certificate content to RabbitMQ credentials | `""` | +| `auth.tls.existingSecretFullChain` | Whether or not the existing secret contains the full chain in the certificate (`tls.crt`). Will be used in place of `ca.cert` if `true`. | `false` | +| `logs` | Path of the RabbitMQ server's Erlang log file. Value for the `RABBITMQ_LOGS` environment variable | `-` | +| `ulimitNofiles` | RabbitMQ Max File Descriptors | `65536` | +| `maxAvailableSchedulers` | RabbitMQ maximum available scheduler threads | `""` | +| `onlineSchedulers` | RabbitMQ online scheduler threads | `""` | +| `memoryHighWatermark.enabled` | Enable configuring Memory high watermark on RabbitMQ | `false` | +| `memoryHighWatermark.type` | Memory high watermark type. Either `absolute` or `relative` | `relative` | +| `memoryHighWatermark.value` | Memory high watermark value | `0.4` | +| `plugins` | List of default plugins to enable (should only be altered to remove defaults; for additional plugins use `extraPlugins`) | `rabbitmq_management rabbitmq_peer_discovery_k8s` | +| `communityPlugins` | List of Community plugins (URLs) to be downloaded during container initialization | `""` | +| `extraPlugins` | Extra plugins to enable (single string containing a space-separated list) | `rabbitmq_auth_backend_ldap` | +| `clustering.enabled` | Enable RabbitMQ clustering | `true` | +| `clustering.addressType` | Switch clustering mode. Either `ip` or `hostname` | `hostname` | +| `clustering.rebalance` | Rebalance master for queues in cluster when new replica is created | `false` | +| `clustering.forceBoot` | Force boot of an unexpectedly shut down cluster (in an unexpected order). | `false` | +| `loadDefinition.enabled` | Enable loading a RabbitMQ definitions file to configure RabbitMQ | `false` | +| `loadDefinition.existingSecret` | Existing secret with the load definitions file | `""` | +| `command` | Override default container command (useful when using custom images) | `[]` | +| `args` | Override default container args (useful when using custom images) | `[]` | +| `terminationGracePeriodSeconds` | Default duration in seconds k8s waits for container to exit before sending kill signal. | `120` | +| `extraEnvVars` | Extra environment variables to add to RabbitMQ pods | `[]` | +| `extraEnvVarsCM` | Name of existing ConfigMap containing extra environment variables | `""` | +| `extraEnvVarsSecret` | Name of existing Secret containing extra environment variables (in case of sensitive data) | `""` | +| `extraContainerPorts` | Extra ports to be included in container spec, primarily informational | `[]` | +| `configuration` | RabbitMQ Configuration file content: required cluster configuration | `""` | +| `extraConfiguration` | Configuration file content: extra configuration to be appended to RabbitMQ configuration | `""` | +| `advancedConfiguration` | Configuration file content: advanced configuration | `""` | +| `ldap.enabled` | Enable LDAP support | `false` | +| `ldap.servers` | List of LDAP servers hostnames | `[]` | +| `ldap.port` | LDAP servers port | `389` | +| `ldap.user_dn_pattern` | Pattern used to translate the provided username into a value to be used for the LDAP bind | `cn=${username},dc=example,dc=org` | +| `ldap.tls.enabled` | If you enable TLS/SSL you can set advanced options using the `advancedConfiguration` parameter | `false` | +| `extraVolumeMounts` | Optionally specify extra list of additional volumeMounts | `[]` | +| `extraVolumes` | Optionally specify extra list of additional volumes . | `[]` | +| `extraSecrets` | Optionally specify extra secrets to be created by the chart. | `{}` | +| `extraSecretsPrependReleaseName` | Set this flag to true if extraSecrets should be created with prepended. | `false` | + + +### Statefulset parameters + +| Name | Description | Value | +| ------------------------------------ | ------------------------------------------------------------------------------------------------------------------------ | --------------- | +| `replicaCount` | Number of RabbitMQ replicas to deploy | `1` | +| `schedulerName` | Use an alternate scheduler, e.g. "stork". | `""` | +| `podManagementPolicy` | Pod management policy | `OrderedReady` | +| `podLabels` | RabbitMQ Pod labels. Evaluated as a template | `{}` | +| `podAnnotations` | RabbitMQ Pod annotations. Evaluated as a template | `{}` | +| `updateStrategyType` | Update strategy type for RabbitMQ statefulset | `RollingUpdate` | +| `statefulsetLabels` | RabbitMQ statefulset labels. Evaluated as a template | `{}` | +| `priorityClassName` | Name of the priority class to be used by RabbitMQ pods, priority class needs to be created beforehand | `""` | +| `podAffinityPreset` | Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `nodeAffinityPreset.type` | Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `nodeAffinityPreset.key` | Node label key to match Ignored if `affinity` is set. | `""` | +| `nodeAffinityPreset.values` | Node label values to match. Ignored if `affinity` is set. | `[]` | +| `affinity` | Affinity for pod assignment. Evaluated as a template | `{}` | +| `nodeSelector` | Node labels for pod assignment. Evaluated as a template | `{}` | +| `tolerations` | Tolerations for pod assignment. Evaluated as a template | `[]` | +| `topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `{}` | +| `podSecurityContext.enabled` | Enable RabbitMQ pods' Security Context | `true` | +| `podSecurityContext.fsGroup` | Group ID for the filesystem used by the containers | `1001` | +| `podSecurityContext.runAsUser` | User ID for the service user running the pod | `1001` | +| `containerSecurityContext` | RabbitMQ containers' Security Context | `{}` | +| `resources.limits` | The resources limits for RabbitMQ containers | `{}` | +| `resources.requests` | The requested resources for RabbitMQ containers | `{}` | +| `livenessProbe.enabled` | Enable livenessProbe | `true` | +| `livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `120` | +| `livenessProbe.periodSeconds` | Period seconds for livenessProbe | `30` | +| `livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `20` | +| `livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `6` | +| `livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `readinessProbe.enabled` | Enable readinessProbe | `true` | +| `readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `10` | +| `readinessProbe.periodSeconds` | Period seconds for readinessProbe | `30` | +| `readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `20` | +| `readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `3` | +| `readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `customLivenessProbe` | Override default liveness probe | `{}` | +| `customReadinessProbe` | Override default readiness probe | `{}` | +| `customStartupProbe` | Define a custom startup probe | `{}` | +| `initContainers` | Add init containers to the RabbitMQ pod | `[]` | +| `sidecars` | Add sidecar containers to the RabbitMQ pod | `[]` | +| `pdb.create` | Enable/disable a Pod Disruption Budget creation | `false` | +| `pdb.minAvailable` | Minimum number/percentage of pods that should remain scheduled | `1` | +| `pdb.maxUnavailable` | Maximum number/percentage of pods that may be made unavailable | `""` | + + +### RBAC parameters + +| Name | Description | Value | +| ----------------------- | --------------------------------------------------- | ------ | +| `serviceAccount.create` | Enable creation of ServiceAccount for RabbitMQ pods | `true` | +| `serviceAccount.name` | Name of the created serviceAccount | `""` | +| `rbac.create` | Whether RBAC rules should be created | `true` | + + +### Persistence parameters + +| Name | Description | Value | +| --------------------------- | ----------------------------------------------- | --------------- | +| `persistence.enabled` | Enable RabbitMQ data persistence using PVC | `true` | +| `persistence.storageClass` | PVC Storage Class for RabbitMQ data volume | `""` | +| `persistence.selector` | Selector to match an existing Persistent Volume | `{}` | +| `persistence.accessMode` | PVC Access Mode for RabbitMQ data volume | `ReadWriteOnce` | +| `persistence.existingClaim` | Provide an existing PersistentVolumeClaims | `""` | +| `persistence.size` | PVC Storage Request for RabbitMQ data volume | `8Gi` | +| `persistence.volumes` | Additional volumes without creating PVC | `[]` | + + +### Exposure parameters + +| Name | Description | Value | +| ---------------------------------- | ----------------------------------------------------------------------------------------------------------------------- | ------------------------ | +| `service.type` | Kubernetes Service type | `ClusterIP` | +| `service.portEnabled` | Amqp port. Cannot be disabled when `auth.tls.enabled` is `false`. Listener can be disabled with `listeners.tcp = none`. | `true` | +| `service.port` | Amqp port | `5672` | +| `service.portName` | Amqp service port name | `amqp` | +| `service.tlsPort` | Amqp TLS port | `5671` | +| `service.tlsPortName` | Amqp TLS service port name | `amqp-ssl` | +| `service.nodePort` | Node port override for `amqp` port, if serviceType is `NodePort` or `LoadBalancer` | `""` | +| `service.tlsNodePort` | Node port override for `amqp-ssl` port, if serviceType is `NodePort` or `LoadBalancer` | `""` | +| `service.distPort` | Erlang distribution server port | `25672` | +| `service.distPortName` | Erlang distribution service port name | `dist` | +| `service.distNodePort` | Node port override for `dist` port, if serviceType is `NodePort` | `""` | +| `service.managerPortEnabled` | RabbitMQ Manager port | `true` | +| `service.managerPort` | RabbitMQ Manager port | `15672` | +| `service.managerPortName` | RabbitMQ Manager service port name | `http-stats` | +| `service.managerNodePort` | Node port override for `http-stats` port, if serviceType `NodePort` | `""` | +| `service.metricsPort` | RabbitMQ Prometheues metrics port | `9419` | +| `service.metricsPortName` | RabbitMQ Prometheues metrics service port name | `metrics` | +| `service.metricsNodePort` | Node port override for `metrics` port, if serviceType is `NodePort` | `""` | +| `service.epmdNodePort` | Node port override for `epmd` port, if serviceType is `NodePort` | `""` | +| `service.epmdPortName` | EPMD Discovery service port name | `epmd` | +| `service.extraPorts` | Extra ports to expose in the service | `[]` | +| `service.loadBalancerSourceRanges` | Address(es) that are allowed when service is `LoadBalancer` | `[]` | +| `service.externalIPs` | Set the ExternalIPs | `[]` | +| `service.externalTrafficPolicy` | Enable client source IP preservation | `Cluster` | +| `service.loadBalancerIP` | Set the LoadBalancerIP | `""` | +| `service.labels` | Service labels. Evaluated as a template | `{}` | +| `service.annotations` | Service annotations. Evaluated as a template | `{}` | +| `service.annotationsHeadless` | Headless Service annotations. Evaluated as a template | `{}` | +| `ingress.enabled` | Enable ingress resource for Management console | `false` | +| `ingress.path` | Path for the default host. You may need to set this to '/*' in order to use this with ALB ingress controllers. | `/` | +| `ingress.pathType` | Ingress path type | `ImplementationSpecific` | +| `ingress.hostname` | Default host for the ingress resource | `rabbitmq.local` | +| `ingress.annotations` | Ingress annotations | `{}` | +| `ingress.tls` | Enable TLS configuration for the hostname defined at `ingress.hostname` parameter | `false` | +| `ingress.certManager` | Set this to true in order to add the corresponding annotations for cert-manager | `false` | +| `ingress.selfSigned` | Set this to true in order to create a TLS secret for this ingress record | `false` | +| `ingress.extraHosts` | The list of additional hostnames to be covered with this ingress record. | `[]` | +| `ingress.extraTls` | The tls configuration for additional hostnames to be covered with this ingress record. | `[]` | +| `ingress.secrets` | Custom TLS certificates as secrets | `[]` | +| `ingress.ingressClassName` | IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+) | `""` | +| `networkPolicy.enabled` | Enable creation of NetworkPolicy resources | `false` | +| `networkPolicy.allowExternal` | Don't require client label for connections | `true` | +| `networkPolicy.additionalRules` | Additional NetworkPolicy Ingress "from" rules to set. Note that all rules are OR-ed. | `[]` | + + +### Metrics Parameters + +| Name | Description | Value | +| ----------------------------------------- | -------------------------------------------------------------------------------------- | --------------------- | +| `metrics.enabled` | Enable exposing RabbitMQ metrics to be gathered by Prometheus | `false` | +| `metrics.plugins` | Plugins to enable Prometheus metrics in RabbitMQ | `rabbitmq_prometheus` | +| `metrics.podAnnotations` | Annotations for enabling prometheus to access the metrics endpoint | `{}` | +| `metrics.serviceMonitor.enabled` | Create ServiceMonitor Resource for scraping metrics using PrometheusOperator | `false` | +| `metrics.serviceMonitor.namespace` | Specify the namespace in which the serviceMonitor resource will be created | `""` | +| `metrics.serviceMonitor.interval` | Specify the interval at which metrics should be scraped | `30s` | +| `metrics.serviceMonitor.scrapeTimeout` | Specify the timeout after which the scrape is ended | `""` | +| `metrics.serviceMonitor.relabellings` | Specify Metric Relabellings to add to the scrape endpoint | `[]` | +| `metrics.serviceMonitor.honorLabels` | honorLabels chooses the metric's labels on collisions with target labels | `false` | +| `metrics.serviceMonitor.additionalLabels` | Used to pass Labels that are required by the installed Prometheus Operator | `{}` | +| `metrics.serviceMonitor.targetLabels` | Used to keep given service's labels in target | `{}` | +| `metrics.serviceMonitor.podTargetLabels` | Used to keep given pod's labels in target | `{}` | +| `metrics.serviceMonitor.path` | Define the path used by ServiceMonitor to scrap metrics | `""` | +| `metrics.prometheusRule.enabled` | Set this to true to create prometheusRules for Prometheus operator | `false` | +| `metrics.prometheusRule.additionalLabels` | Additional labels that can be used so prometheusRules will be discovered by Prometheus | `{}` | +| `metrics.prometheusRule.namespace` | namespace where prometheusRules resource should be created | `""` | +| `metrics.prometheusRule.rules` | List of rules, used as template by Helm. | `[]` | + + +### Init Container Parameters + +| Name | Description | Value | +| -------------------------------------- | -------------------------------------------------------------------------------------------------------------------- | ----------------------- | +| `volumePermissions.enabled` | Enable init container that changes the owner and group of the persistent volume(s) mountpoint to `runAsUser:fsGroup` | `false` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | +| `volumePermissions.image.repository` | Init container volume-permissions image repository | `bitnami/bitnami-shell` | +| `volumePermissions.image.tag` | Init container volume-permissions image tag | `10-debian-10-r172` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `Always` | +| `volumePermissions.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | +| `volumePermissions.resources.limits` | Init container volume-permissions resource limits | `{}` | +| `volumePermissions.resources.requests` | Init container volume-permissions resource requests | `{}` | + + +The above parameters map to the env variables defined in [bitnami/rabbitmq](http://github.com/bitnami/bitnami-docker-rabbitmq). For more information please refer to the [bitnami/rabbitmq](http://github.com/bitnami/bitnami-docker-rabbitmq) image documentation. + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```bash +$ helm install my-release \ + --set auth.username=admin,auth.password=secretpassword,auth.erlangCookie=secretcookie \ + bitnami/rabbitmq +``` + +The above command sets the RabbitMQ admin username and password to `admin` and `secretpassword` respectively. Additionally the secure erlang cookie is set to `secretcookie`. + +> NOTE: Once this chart is deployed, it is not possible to change the application's access credentials, such as usernames or passwords, using Helm. To change these application credentials after deployment, delete any persistent volumes (PVs) used by the chart and re-deploy it, or use the application's built-in administrative tools if available. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```bash +$ helm install my-release -f values.yaml bitnami/rabbitmq +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +## Configuration and installation details + +### [Rolling vs Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/) + +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. + +Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. + +### Set pod affinity + +This chart allows you to set your custom affinity using the `affinity` parameter. Find more information about Pod's affinity in the [kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity). + +As an alternative, you can use of the preset configurations for pod affinity, pod anti-affinity, and node affinity available at the [bitnami/common](https://github.com/bitnami/charts/tree/master/bitnami/common#affinities) chart. To do so, set the `podAffinityPreset`, `podAntiAffinityPreset`, or `nodeAffinityPreset` parameters. + +### Scale horizontally + +To horizontally scale this chart once it has been deployed, two options are available: + +- Use the `kubectl scale` command. +- Upgrade the chart modifying the `replicaCount` parameter. + +> NOTE: It is mandatory to specify the password and Erlang cookie that was set the first time the chart was installed when upgrading the chart. + +When scaling down the solution, unnecessary RabbitMQ nodes are automatically stopped, but they are not removed from the cluster. You need to manually remove them by running the `rabbitmqctl forget_cluster_node` command. + +Refer to the chart documentation for [more information on scaling the Rabbit cluster horizontally](https://docs.bitnami.com/kubernetes/infrastructure/rabbitmq/administration/scale-deployment/). + +### Enable TLS support + +To enable TLS support, first generate the certificates as described in the [RabbitMQ documentation for SSL certificate generation](https://www.rabbitmq.com/ssl.html#automated-certificate-generation). + +Once the certificates are generated, you have two alternatives: + +* Create a secret with the certificates and associate the secret when deploying the chart +* Include the certificates in the *values.yaml* file when deploying the chart + +Set the *auth.tls.failIfNoPeerCert* parameter to *false* to allow a TLS connection if the client fails to provide a certificate. + +Set the *auth.tls.sslOptionsVerify* to *verify_peer* to force a node to perform peer verification. When set to *verify_none*, peer verification will be disabled and certificate exchange won't be performed. + +Refer to the chart documentation for [more information and examples of enabling TLS and using Let's Encrypt certificates](https://docs.bitnami.com/kubernetes/infrastructure/rabbitmq/administration/enable-tls/). + +### Load custom definitions + +It is possible to [load a RabbitMQ definitions file to configure RabbitMQ](http://www.rabbitmq.com/management.html#load-definitions). + +Because definitions may contain RabbitMQ credentials, [store the JSON as a Kubernetes secret](https://kubernetes.io/docs/concepts/configuration/secret/#using-secrets-as-files-from-a-pod). Within the secret's data, choose a key name that corresponds with the desired load definitions filename (i.e. `load_definition.json`) and use the JSON object as the value. + +Next, specify the `load_definitions` property as an `extraConfiguration` pointing to the load definition file path within the container (i.e. `/app/load_definition.json`) and set `loadDefinition.enable` to `true`. Any load definitions specified will be available within in the container at `/app`. + +> NOTE: Loading a definition will take precedence over any configuration done through [Helm values](#parameters). + +If needed, you can use `extraSecrets` to let the chart create the secret for you. This way, you don't need to manually create it before deploying a release. These secrets can also be templated to use supplied chart values. + +Refer to the chart documentation for [more information and configuration examples of loading custom definitions](https://docs.bitnami.com/kubernetes/infrastructure/rabbitmq/configuration/load-files/). + +### Configure LDAP support + +LDAP support can be enabled in the chart by specifying the `ldap.*` parameters while creating a release. Refer to the chart documentation for [more information and a configuration example](https://docs.bitnami.com/kubernetes/infrastructure/rabbitmq/configuration/configure-ldap/). + +### Configure memory high watermark + +It is possible to configure a memory high watermark on RabbitMQ to define [memory thresholds](https://www.rabbitmq.com/memory.html#threshold) using the `memoryHighWatermark.*` parameters. To do so, you have two alternatives: + +* Set an absolute limit of RAM to be used on each RabbitMQ node, as shown in the configuration example below: + +``` +memoryHighWatermark.enabled="true" +memoryHighWatermark.type="absolute" +memoryHighWatermark.value="512MB" +``` + +* Set a relative limit of RAM to be used on each RabbitMQ node. To enable this feature, define the memory limits at pod level too. An example configuration is shown below: + +``` +memoryHighWatermark.enabled="true" +memoryHighWatermark.type="relative" +memoryHighWatermark.value="0.4" +resources.limits.memory="2Gi" +``` + +### Add extra environment variables + +In case you want to add extra environment variables (useful for advanced operations like custom init scripts), you can use the `extraEnvVars` property. + +```yaml +extraEnvVars: + - name: LOG_LEVEL + value: error +``` + +Alternatively, you can use a ConfigMap or a Secret with the environment variables. To do so, use the `.extraEnvVarsCM` or the `extraEnvVarsSecret` properties. + +### Use plugins + +The Bitnami Docker RabbitMQ image ships a set of plugins by default. By default, this chart enables `rabbitmq_management` and `rabbitmq_peer_discovery_k8s` since they are required for RabbitMQ to work on K8s. + +To enable extra plugins, set the `extraPlugins` parameter with the list of plugins you want to enable. In addition to this, the `communityPlugins` parameter can be used to specify a list of URLs (separated by spaces) for custom plugins for RabbitMQ. + +Refer to the chart documentation for [more information on using RabbitMQ plugins](https://docs.bitnami.com/kubernetes/infrastructure/rabbitmq/configuration/use-plugins/). + +### Recover the cluster from complete shutdown + +> IMPORTANT: Some of these procedures can lead to data loss. Always make a backup beforehand. + +The RabbitMQ cluster is able to support multiple node failures but, in a situation in which all the nodes are brought down at the same time, the cluster might not be able to self-recover. + +This happens if the pod management policy of the statefulset is not `Parallel` and the last pod to be running wasn't the first pod of the statefulset. If that happens, update the pod management policy to recover a healthy state: + +```console +$ kubectl delete statefulset STATEFULSET_NAME --cascade=false +$ helm upgrade RELEASE_NAME bitnami/rabbitmq \ + --set podManagementPolicy=Parallel \ + --set replicaCount=NUMBER_OF_REPLICAS \ + --set auth.password=PASSWORD \ + --set auth.erlangCookie=ERLANG_COOKIE +``` + +For a faster resyncronization of the nodes, you can temporarily disable the readiness probe by setting `readinessProbe.enabled=false`. Bear in mind that the pods will be exposed before they are actually ready to process requests. + +If the steps above don't bring the cluster to a healthy state, it could be possible that none of the RabbitMQ nodes think they were the last node to be up during the shutdown. In those cases, you can force the boot of the nodes by specifying the `clustering.forceBoot=true` parameter (which will execute [`rabbitmqctl force_boot`](https://www.rabbitmq.com/rabbitmqctl.8.html#force_boot) in each pod): + +```console +$ helm upgrade RELEASE_NAME bitnami/rabbitmq \ + --set podManagementPolicy=Parallel \ + --set clustering.forceBoot=true \ + --set replicaCount=NUMBER_OF_REPLICAS \ + --set auth.password=PASSWORD \ + --set auth.erlangCookie=ERLANG_COOKIE +``` + +More information: [Clustering Guide: Restarting](https://www.rabbitmq.com/clustering.html#restarting). + +### Known issues + +- Changing the password through RabbitMQ's UI can make the pod fail due to the default liveness probes. If you do so, remember to make the chart aware of the new password. Updating the default secret with the password you set through RabbitMQ's UI will automatically recreate the pods. If you are using your own secret, you may have to manually recreate the pods. + +## Persistence + +The [Bitnami RabbitMQ](https://github.com/bitnami/bitnami-docker-rabbitmq) image stores the RabbitMQ data and configurations at the `/opt/bitnami/rabbitmq/var/lib/rabbitmq/` path of the container. + +The chart mounts a [Persistent Volume](http://kubernetes.io/docs/user-guide/persistent-volumes/) at this location. By default, the volume is created using dynamic volume provisioning. An existing PersistentVolumeClaim can also be defined. + +### Use existing PersistentVolumeClaims + +1. Create the PersistentVolume +1. Create the PersistentVolumeClaim +1. Install the chart + +```bash +$ helm install my-release --set persistence.existingClaim=PVC_NAME bitnami/rabbitmq +``` + +### Adjust permissions of the persistence volume mountpoint + +As the image runs as non-root by default, it is necessary to adjust the ownership of the persistent volume so that the container can write data into it. + +By default, the chart is configured to use Kubernetes Security Context to automatically change the ownership of the volume. However, this feature does not work in all Kubernetes distributions. +As an alternative, this chart supports using an `initContainer` to change the ownership of the volume before mounting it in the final destination. + +You can enable this `initContainer` by setting `volumePermissions.enabled` to `true`. + +### Configure the default user/vhost + +If you want to create default user/vhost and set the default permission. you can use `extraConfiguration`: + +```yaml +auth: + username: default-user +extraConfiguration: |- + default_vhost = default-vhost + default_permissions.configure = .* + default_permissions.read = .* + default_permissions.write = .* +``` + +## Troubleshooting + +Find more information about how to deal with common errors related to Bitnami’s Helm charts in [this troubleshooting guide](https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues). + +## Upgrading + +It's necessary to set the `auth.password` and `auth.erlangCookie` parameters when upgrading for readiness/liveness probes to work properly. When you install this chart for the first time, some notes will be displayed providing the credentials you must use under the 'Credentials' section. Please note down the password and the cookie, and run the command below to upgrade your chart: + +```bash +$ helm upgrade my-release bitnami/rabbitmq --set auth.password=[PASSWORD] --set auth.erlangCookie=[RABBITMQ_ERLANG_COOKIE] +``` + +| Note: you need to substitute the placeholders [PASSWORD] and [RABBITMQ_ERLANG_COOKIE] with the values obtained in the installation notes. + +### To 8.0.0 + +[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +[Learn more about this change and related upgrade considerations](https://docs.bitnami.com/kubernetes/infrastructure/rabbitmq/administration/upgrade-helm3/). + +### To 7.0.0 + +- Several parameters were renamed or disappeared in favor of new ones on this major version: + - `replicas` is renamed to `replicaCount`. + - `securityContext.*` is deprecated in favor of `podSecurityContext` and `containerSecurityContext`. + - Authentication parameters were reorganized under the `auth.*` parameter: + - `rabbitmq.username`, `rabbitmq.password`, and `rabbitmq.erlangCookie` are now `auth.username`, `auth.password`, and `auth.erlangCookie` respectively. + - `rabbitmq.tls.*` parameters are now under `auth.tls.*`. + - Parameters prefixed with `rabbitmq.` were renamed removing the prefix. E.g. `rabbitmq.configuration` -> renamed to `configuration`. + - `rabbitmq.rabbitmqClusterNodeName` is deprecated. + - `rabbitmq.setUlimitNofiles` is deprecated. + - `forceBoot.enabled` is renamed to `clustering.forceBoot`. + - `loadDefinition.secretName` is renamed to `loadDefinition.existingSecret`. + - `metics.port` is remamed to `service.metricsPort`. + - `service.extraContainerPorts` is renamed to `extraContainerPorts`. + - `service.nodeTlsPort` is renamed to `service.tlsNodePort`. + - `podDisruptionBudget` is deprecated in favor of `pdb.create`, `pdb.minAvailable`, and `pdb.maxUnavailable`. + - `rbacEnabled` -> deprecated in favor of `rbac.create`. + - New parameters: `serviceAccount.create`, and `serviceAccount.name`. + - New parameters: `memoryHighWatermark.enabled`, `memoryHighWatermark.type`, and `memoryHighWatermark.value`. +- Chart labels and Ingress configuration were adapted to follow the Helm charts best practices. +- Initialization logic now relies on the container. +- This version introduces `bitnami/common`, a [library chart](https://helm.sh/docs/topics/library_charts/#helm) as a dependency. More documentation about this new utility could be found [here](https://github.com/bitnami/charts/tree/master/bitnami/common#bitnami-common-library-chart). Please, make sure that you have updated the chart dependencies before executing any upgrade. + +Consequences: + +- Backwards compatibility is not guaranteed. +- Compatibility with non Bitnami images is not guaranteed anymore. + +### To 6.0.0 + +This new version updates the RabbitMQ image to a [new version based on bash instead of node.js](https://github.com/bitnami/bitnami-docker-rabbitmq#3715-r18-3715-ol-7-r19). However, since this Chart overwrites the container's command, the changes to the container shouldn't affect the Chart. To upgrade, it may be needed to enable the `fastBoot` option, as it is already the case from upgrading from 5.X to 5.Y. + +### To 5.0.0 + +This major release changes the clustering method from `ip` to `hostname`. +This change is needed to fix the persistence. The data dir will now depend on the hostname which is stable instead of the pod IP that might change. + +> IMPORTANT: Note that if you upgrade from a previous version you will lose your data. + +### To 3.0.0 + +Backwards compatibility is not guaranteed unless you modify the labels used on the chart's deployments. +Use the workaround below to upgrade from versions previous to 3.0.0. The following example assumes that the release name is rabbitmq: + +```console +$ kubectl delete statefulset rabbitmq --cascade=false +``` + +## Bitnami Kubernetes Documentation + +Bitnami Kubernetes documentation is available at [https://docs.bitnami.com/](https://docs.bitnami.com/). You can find there the following resources: + +- [Documentation for RabbitMQ Helm chart](https://docs.bitnami.com/kubernetes/infrastructure/rabbitmq/) +- [Get Started with Kubernetes guides](https://docs.bitnami.com/kubernetes/) +- [Bitnami Helm charts documentation](https://docs.bitnami.com/kubernetes/apps/) +- [Kubernetes FAQs](https://docs.bitnami.com/kubernetes/faq/) +- [Kubernetes Developer guides](https://docs.bitnami.com/tutorials/) diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/charts/common/.helmignore b/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/charts/common/.helmignore new file mode 100644 index 0000000..50af031 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/charts/common/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/charts/common/Chart.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/charts/common/Chart.yaml new file mode 100644 index 0000000..344c403 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/charts/common/Chart.yaml @@ -0,0 +1,23 @@ +annotations: + category: Infrastructure +apiVersion: v2 +appVersion: 1.8.0 +description: A Library Helm Chart for grouping common logic between bitnami charts. + This chart is not deployable by itself. +home: https://github.com/bitnami/charts/tree/master/bitnami/common +icon: https://bitnami.com/downloads/logos/bitnami-mark.png +keywords: +- common +- helper +- template +- function +- bitnami +maintainers: +- email: containers@bitnami.com + name: Bitnami +name: common +sources: +- https://github.com/bitnami/charts +- http://www.bitnami.com/ +type: library +version: 1.8.0 diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/charts/common/README.md b/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/charts/common/README.md new file mode 100644 index 0000000..054e51f --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/charts/common/README.md @@ -0,0 +1,327 @@ +# Bitnami Common Library Chart + +A [Helm Library Chart](https://helm.sh/docs/topics/library_charts/#helm) for grouping common logic between bitnami charts. + +## TL;DR + +```yaml +dependencies: + - name: common + version: 0.x.x + repository: https://charts.bitnami.com/bitnami +``` + +```bash +$ helm dependency update +``` + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "common.names.fullname" . }} +data: + myvalue: "Hello World" +``` + +## Introduction + +This chart provides a common template helpers which can be used to develop new charts using [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This Helm chart has been tested on top of [Bitnami Kubernetes Production Runtime](https://kubeprod.io/) (BKPR). Deploy BKPR to get automated TLS certificates, logging and monitoring for your applications. + +## Prerequisites + +- Kubernetes 1.12+ +- Helm 3.1.0 + +## Parameters + +The following table lists the helpers available in the library which are scoped in different sections. + +### Affinities + +| Helper identifier | Description | Expected Input | +|-------------------------------|------------------------------------------------------|------------------------------------------------| +| `common.affinities.node.soft` | Return a soft nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` | +| `common.affinities.node.hard` | Return a hard nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` | +| `common.affinities.pod.soft` | Return a soft podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` | +| `common.affinities.pod.hard` | Return a hard podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` | + +### Capabilities + +| Helper identifier | Description | Expected Input | +|----------------------------------------------|------------------------------------------------------------------------------------------------|-------------------| +| `common.capabilities.kubeVersion` | Return the target Kubernetes version (using client default if .Values.kubeVersion is not set). | `.` Chart context | +| `common.capabilities.cronjob.apiVersion` | Return the appropriate apiVersion for cronjob. | `.` Chart context | +| `common.capabilities.deployment.apiVersion` | Return the appropriate apiVersion for deployment. | `.` Chart context | +| `common.capabilities.statefulset.apiVersion` | Return the appropriate apiVersion for statefulset. | `.` Chart context | +| `common.capabilities.ingress.apiVersion` | Return the appropriate apiVersion for ingress. | `.` Chart context | +| `common.capabilities.rbac.apiVersion` | Return the appropriate apiVersion for RBAC resources. | `.` Chart context | +| `common.capabilities.crd.apiVersion` | Return the appropriate apiVersion for CRDs. | `.` Chart context | +| `common.capabilities.policy.apiVersion` | Return the appropriate apiVersion for policy | `.` Chart context | +| `common.capabilities.supportsHelmVersion` | Returns true if the used Helm version is 3.3+ | `.` Chart context | + +### Errors + +| Helper identifier | Description | Expected Input | +|-----------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------| +| `common.errors.upgrade.passwords.empty` | It will ensure required passwords are given when we are upgrading a chart. If `validationErrors` is not empty it will throw an error and will stop the upgrade action. | `dict "validationErrors" (list $validationError00 $validationError01) "context" $` | + +### Images + +| Helper identifier | Description | Expected Input | +|-----------------------------|------------------------------------------------------|---------------------------------------------------------------------------------------------------------| +| `common.images.image` | Return the proper and full image name | `dict "imageRoot" .Values.path.to.the.image "global" $`, see [ImageRoot](#imageroot) for the structure. | +| `common.images.pullSecrets` | Return the proper Docker Image Registry Secret Names (deprecated: use common.images.renderPullSecrets instead) | `dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global` | +| `common.images.renderPullSecrets` | Return the proper Docker Image Registry Secret Names (evaluates values as templates) | `dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "context" $` | + +### Ingress + +| Helper identifier | Description | Expected Input | +|-------------------------------------------|----------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.ingress.backend` | Generate a proper Ingress backend entry depending on the API version | `dict "serviceName" "foo" "servicePort" "bar"`, see the [Ingress deprecation notice](https://kubernetes.io/blog/2019/07/18/api-deprecations-in-1-16/) for the syntax differences | +| `common.ingress.supportsPathType` | Prints "true" if the pathType field is supported | `.` Chart context | +| `common.ingress.supportsIngressClassname` | Prints "true" if the ingressClassname field is supported | `.` Chart context | + +### Labels + +| Helper identifier | Description | Expected Input | +|-----------------------------|------------------------------------------------------|-------------------| +| `common.labels.standard` | Return Kubernetes standard labels | `.` Chart context | +| `common.labels.matchLabels` | Return the proper Docker Image Registry Secret Names | `.` Chart context | + +### Names + +| Helper identifier | Description | Expected Inpput | +|-------------------------|------------------------------------------------------------|-------------------| +| `common.names.name` | Expand the name of the chart or use `.Values.nameOverride` | `.` Chart context | +| `common.names.fullname` | Create a default fully qualified app name. | `.` Chart context | +| `common.names.chart` | Chart name plus version | `.` Chart context | + +### Secrets + +| Helper identifier | Description | Expected Input | +|---------------------------|--------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.secrets.name` | Generate the name of the secret. | `dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $` see [ExistingSecret](#existingsecret) for the structure. | +| `common.secrets.key` | Generate secret key. | `dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName"` see [ExistingSecret](#existingsecret) for the structure. | +| `common.passwords.manage` | Generate secret password or retrieve one if already created. | `dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $`, length, strong and chartNAme fields are optional. | +| `common.secrets.exists` | Returns whether a previous generated secret already exists. | `dict "secret" "secret-name" "context" $` | + +### Storage + +| Helper identifier | Description | Expected Input | +|-------------------------------|---------------------------------------|---------------------------------------------------------------------------------------------------------------------| +| `common.affinities.node.soft` | Return a soft nodeAffinity definition | `dict "persistence" .Values.path.to.the.persistence "global" $`, see [Persistence](#persistence) for the structure. | + +### TplValues + +| Helper identifier | Description | Expected Input | +|---------------------------|----------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.tplvalues.render` | Renders a value that contains template | `dict "value" .Values.path.to.the.Value "context" $`, value is the value should rendered as template, context frequently is the chart context `$` or `.` | + +### Utils + +| Helper identifier | Description | Expected Input | +|--------------------------------|------------------------------------------------------------------------------------------|------------------------------------------------------------------------| +| `common.utils.fieldToEnvVar` | Build environment variable name given a field. | `dict "field" "my-password"` | +| `common.utils.secret.getvalue` | Print instructions to get a secret value. | `dict "secret" "secret-name" "field" "secret-value-field" "context" $` | +| `common.utils.getValueFromKey` | Gets a value from `.Values` object given its key path | `dict "key" "path.to.key" "context" $` | +| `common.utils.getKeyFromList` | Returns first `.Values` key with a defined value or first of the list if all non-defined | `dict "keys" (list "path.to.key1" "path.to.key2") "context" $` | + +### Validations + +| Helper identifier | Description | Expected Input | +|--------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.validations.values.single.empty` | Validate a value must not be empty. | `dict "valueKey" "path.to.value" "secret" "secret.name" "field" "my-password" "subchart" "subchart" "context" $` secret, field and subchart are optional. In case they are given, the helper will generate a how to get instruction. See [ValidateValue](#validatevalue) | +| `common.validations.values.multiple.empty` | Validate a multiple values must not be empty. It returns a shared error for all the values. | `dict "required" (list $validateValueConf00 $validateValueConf01) "context" $`. See [ValidateValue](#validatevalue) | +| `common.validations.values.mariadb.passwords` | This helper will ensure required password for MariaDB are not empty. It returns a shared error for all the values. | `dict "secret" "mariadb-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mariadb chart and the helper. | +| `common.validations.values.postgresql.passwords` | This helper will ensure required password for PostgreSQL are not empty. It returns a shared error for all the values. | `dict "secret" "postgresql-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use postgresql chart and the helper. | +| `common.validations.values.redis.passwords` | This helper will ensure required password for Redis™ are not empty. It returns a shared error for all the values. | `dict "secret" "redis-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use redis chart and the helper. | +| `common.validations.values.cassandra.passwords` | This helper will ensure required password for Cassandra are not empty. It returns a shared error for all the values. | `dict "secret" "cassandra-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use cassandra chart and the helper. | +| `common.validations.values.mongodb.passwords` | This helper will ensure required password for MongoDB® are not empty. It returns a shared error for all the values. | `dict "secret" "mongodb-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mongodb chart and the helper. | + +### Warnings + +| Helper identifier | Description | Expected Input | +|------------------------------|----------------------------------|------------------------------------------------------------| +| `common.warnings.rollingTag` | Warning about using rolling tag. | `ImageRoot` see [ImageRoot](#imageroot) for the structure. | + +## Special input schemas + +### ImageRoot + +```yaml +registry: + type: string + description: Docker registry where the image is located + example: docker.io + +repository: + type: string + description: Repository and image name + example: bitnami/nginx + +tag: + type: string + description: image tag + example: 1.16.1-debian-10-r63 + +pullPolicy: + type: string + description: Specify a imagePullPolicy. Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + +pullSecrets: + type: array + items: + type: string + description: Optionally specify an array of imagePullSecrets (evaluated as templates). + +debug: + type: boolean + description: Set to true if you would like to see extra information on logs + example: false + +## An instance would be: +# registry: docker.io +# repository: bitnami/nginx +# tag: 1.16.1-debian-10-r63 +# pullPolicy: IfNotPresent +# debug: false +``` + +### Persistence + +```yaml +enabled: + type: boolean + description: Whether enable persistence. + example: true + +storageClass: + type: string + description: Ghost data Persistent Volume Storage Class, If set to "-", storageClassName: "" which disables dynamic provisioning. + example: "-" + +accessMode: + type: string + description: Access mode for the Persistent Volume Storage. + example: ReadWriteOnce + +size: + type: string + description: Size the Persistent Volume Storage. + example: 8Gi + +path: + type: string + description: Path to be persisted. + example: /bitnami + +## An instance would be: +# enabled: true +# storageClass: "-" +# accessMode: ReadWriteOnce +# size: 8Gi +# path: /bitnami +``` + +### ExistingSecret + +```yaml +name: + type: string + description: Name of the existing secret. + example: mySecret +keyMapping: + description: Mapping between the expected key name and the name of the key in the existing secret. + type: object + +## An instance would be: +# name: mySecret +# keyMapping: +# password: myPasswordKey +``` + +#### Example of use + +When we store sensitive data for a deployment in a secret, some times we want to give to users the possibility of using theirs existing secrets. + +```yaml +# templates/secret.yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }} + labels: + app: {{ include "common.names.fullname" . }} +type: Opaque +data: + password: {{ .Values.password | b64enc | quote }} + +# templates/dpl.yaml +--- +... + env: + - name: PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "common.secrets.name" (dict "existingSecret" .Values.existingSecret "context" $) }} + key: {{ include "common.secrets.key" (dict "existingSecret" .Values.existingSecret "key" "password") }} +... + +# values.yaml +--- +name: mySecret +keyMapping: + password: myPasswordKey +``` + +### ValidateValue + +#### NOTES.txt + +```console +{{- $validateValueConf00 := (dict "valueKey" "path.to.value00" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value01" "secret" "secretName" "field" "password-01") -}} + +{{ include "common.validations.values.multiple.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} +``` + +If we force those values to be empty we will see some alerts + +```console +$ helm install test mychart --set path.to.value00="",path.to.value01="" + 'path.to.value00' must not be empty, please add '--set path.to.value00=$PASSWORD_00' to the command. To get the current value: + + export PASSWORD_00=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-00}" | base64 --decode) + + 'path.to.value01' must not be empty, please add '--set path.to.value01=$PASSWORD_01' to the command. To get the current value: + + export PASSWORD_01=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-01}" | base64 --decode) +``` + +## Upgrading + +### To 1.0.0 + +[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +**What changes were introduced in this major version?** + +- Previous versions of this Helm Chart use `apiVersion: v1` (installable by both Helm 2 and 3), this Helm Chart was updated to `apiVersion: v2` (installable by Helm 3 only). [Here](https://helm.sh/docs/topics/charts/#the-apiversion-field) you can find more information about the `apiVersion` field. +- Use `type: library`. [Here](https://v3.helm.sh/docs/faq/#library-chart-support) you can find more information. +- The different fields present in the *Chart.yaml* file has been ordered alphabetically in a homogeneous way for all the Bitnami Helm Charts + +**Considerations when upgrading to this version** + +- If you want to upgrade to this version from a previous one installed with Helm v3, you shouldn't face any issues +- If you want to upgrade to this version using Helm v2, this scenario is not supported as this version doesn't support Helm v2 anymore +- If you installed the previous version with Helm v2 and wants to upgrade to this version with Helm v3, please refer to the [official Helm documentation](https://helm.sh/docs/topics/v2_v3_migration/#migration-use-cases) about migrating from Helm v2 to v3 + +**Useful links** + +- https://docs.bitnami.com/tutorials/resolve-helm2-helm3-post-migration-issues/ +- https://helm.sh/docs/topics/v2_v3_migration/ +- https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/ diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/charts/common/templates/_affinities.tpl b/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/charts/common/templates/_affinities.tpl new file mode 100644 index 0000000..189ea40 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/charts/common/templates/_affinities.tpl @@ -0,0 +1,102 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return a soft nodeAffinity definition +{{ include "common.affinities.nodes.soft" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.soft" -}} +preferredDuringSchedulingIgnoredDuringExecution: + - preference: + matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . | quote }} + {{- end }} + weight: 1 +{{- end -}} + +{{/* +Return a hard nodeAffinity definition +{{ include "common.affinities.nodes.hard" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.hard" -}} +requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . | quote }} + {{- end }} +{{- end -}} + +{{/* +Return a nodeAffinity definition +{{ include "common.affinities.nodes" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.nodes.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.nodes.hard" . -}} + {{- end -}} +{{- end -}} + +{{/* +Return a soft podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.soft" (dict "component" "FOO" "extraMatchLabels" .Values.extraMatchLabels "context" $) -}} +*/}} +{{- define "common.affinities.pods.soft" -}} +{{- $component := default "" .component -}} +{{- $extraMatchLabels := default (dict) .extraMatchLabels -}} +preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 10 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := $extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + namespaces: + - {{ .context.Release.Namespace | quote }} + topologyKey: kubernetes.io/hostname + weight: 1 +{{- end -}} + +{{/* +Return a hard podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.hard" (dict "component" "FOO" "extraMatchLabels" .Values.extraMatchLabels "context" $) -}} +*/}} +{{- define "common.affinities.pods.hard" -}} +{{- $component := default "" .component -}} +{{- $extraMatchLabels := default (dict) .extraMatchLabels -}} +requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 8 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := $extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + namespaces: + - {{ .context.Release.Namespace | quote }} + topologyKey: kubernetes.io/hostname +{{- end -}} + +{{/* +Return a podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.pods" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.pods.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.pods.hard" . -}} + {{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/charts/common/templates/_capabilities.tpl b/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/charts/common/templates/_capabilities.tpl new file mode 100644 index 0000000..ae45d5e --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/charts/common/templates/_capabilities.tpl @@ -0,0 +1,117 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return the target Kubernetes version +*/}} +{{- define "common.capabilities.kubeVersion" -}} +{{- if .Values.global }} + {{- if .Values.global.kubeVersion }} + {{- .Values.global.kubeVersion -}} + {{- else }} + {{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} + {{- end -}} +{{- else }} +{{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for policy. +*/}} +{{- define "common.capabilities.policy.apiVersion" -}} +{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "policy/v1beta1" -}} +{{- else -}} +{{- print "policy/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for cronjob. +*/}} +{{- define "common.capabilities.cronjob.apiVersion" -}} +{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "batch/v1beta1" -}} +{{- else -}} +{{- print "batch/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for deployment. +*/}} +{{- define "common.capabilities.deployment.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for statefulset. +*/}} +{{- define "common.capabilities.statefulset.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apps/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for ingress. +*/}} +{{- define "common.capabilities.ingress.apiVersion" -}} +{{- if .Values.ingress -}} +{{- if .Values.ingress.apiVersion -}} +{{- .Values.ingress.apiVersion -}} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end }} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for RBAC resources. +*/}} +{{- define "common.capabilities.rbac.apiVersion" -}} +{{- if semverCompare "<1.17-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "rbac.authorization.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "rbac.authorization.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for CRDs. +*/}} +{{- define "common.capabilities.crd.apiVersion" -}} +{{- if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apiextensions.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "apiextensions.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if the used Helm version is 3.3+. +A way to check the used Helm version was not introduced until version 3.3.0 with .Capabilities.HelmVersion, which contains an additional "{}}" structure. +This check is introduced as a regexMatch instead of {{ if .Capabilities.HelmVersion }} because checking for the key HelmVersion in <3.3 results in a "interface not found" error. +**To be removed when the catalog's minimun Helm version is 3.3** +*/}} +{{- define "common.capabilities.supportsHelmVersion" -}} +{{- if regexMatch "{(v[0-9])*[^}]*}}$" (.Capabilities | toString ) }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/charts/common/templates/_errors.tpl b/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/charts/common/templates/_errors.tpl new file mode 100644 index 0000000..a79cc2e --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/charts/common/templates/_errors.tpl @@ -0,0 +1,23 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Through error when upgrading using empty passwords values that must not be empty. + +Usage: +{{- $validationError00 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password00" "secret" "secretName" "field" "password-00") -}} +{{- $validationError01 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password01" "secret" "secretName" "field" "password-01") -}} +{{ include "common.errors.upgrade.passwords.empty" (dict "validationErrors" (list $validationError00 $validationError01) "context" $) }} + +Required password params: + - validationErrors - String - Required. List of validation strings to be return, if it is empty it won't throw error. + - context - Context - Required. Parent context. +*/}} +{{- define "common.errors.upgrade.passwords.empty" -}} + {{- $validationErrors := join "" .validationErrors -}} + {{- if and $validationErrors .context.Release.IsUpgrade -}} + {{- $errorString := "\nPASSWORDS ERROR: You must provide your current passwords when upgrading the release." -}} + {{- $errorString = print $errorString "\n Note that even after reinstallation, old credentials may be needed as they may be kept in persistent volume claims." -}} + {{- $errorString = print $errorString "\n Further information can be obtained at https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues/#credential-errors-while-upgrading-chart-releases" -}} + {{- $errorString = print $errorString "\n%s" -}} + {{- printf $errorString $validationErrors | fail -}} + {{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/charts/common/templates/_images.tpl b/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/charts/common/templates/_images.tpl new file mode 100644 index 0000000..42ffbc7 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/charts/common/templates/_images.tpl @@ -0,0 +1,75 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper image name +{{ include "common.images.image" ( dict "imageRoot" .Values.path.to.the.image "global" $) }} +*/}} +{{- define "common.images.image" -}} +{{- $registryName := .imageRoot.registry -}} +{{- $repositoryName := .imageRoot.repository -}} +{{- $tag := .imageRoot.tag | toString -}} +{{- if .global }} + {{- if .global.imageRegistry }} + {{- $registryName = .global.imageRegistry -}} + {{- end -}} +{{- end -}} +{{- if $registryName }} +{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- else -}} +{{- printf "%s:%s" $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names (deprecated: use common.images.renderPullSecrets instead) +{{ include "common.images.pullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global) }} +*/}} +{{- define "common.images.pullSecrets" -}} + {{- $pullSecrets := list }} + + {{- if .global }} + {{- range .global.imagePullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) }} +imagePullSecrets: + {{- range $pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names evaluating values as templates +{{ include "common.images.renderPullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "context" $) }} +*/}} +{{- define "common.images.renderPullSecrets" -}} + {{- $pullSecrets := list }} + {{- $context := .context }} + + {{- if $context.Values.global }} + {{- range $context.Values.global.imagePullSecrets -}} + {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) }} +imagePullSecrets: + {{- range $pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/charts/common/templates/_ingress.tpl b/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/charts/common/templates/_ingress.tpl new file mode 100644 index 0000000..f905f20 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/charts/common/templates/_ingress.tpl @@ -0,0 +1,55 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Generate backend entry that is compatible with all Kubernetes API versions. + +Usage: +{{ include "common.ingress.backend" (dict "serviceName" "backendName" "servicePort" "backendPort" "context" $) }} + +Params: + - serviceName - String. Name of an existing service backend + - servicePort - String/Int. Port name (or number) of the service. It will be translated to different yaml depending if it is a string or an integer. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.ingress.backend" -}} +{{- $apiVersion := (include "common.capabilities.ingress.apiVersion" .context) -}} +{{- if or (eq $apiVersion "extensions/v1beta1") (eq $apiVersion "networking.k8s.io/v1beta1") -}} +serviceName: {{ .serviceName }} +servicePort: {{ .servicePort }} +{{- else -}} +service: + name: {{ .serviceName }} + port: + {{- if typeIs "string" .servicePort }} + name: {{ .servicePort }} + {{- else if or (typeIs "int" .servicePort) (typeIs "float64" .servicePort) }} + number: {{ .servicePort | int }} + {{- end }} +{{- end -}} +{{- end -}} + +{{/* +Print "true" if the API pathType field is supported +Usage: +{{ include "common.ingress.supportsPathType" . }} +*/}} +{{- define "common.ingress.supportsPathType" -}} +{{- if (semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .)) -}} +{{- print "false" -}} +{{- else -}} +{{- print "true" -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if the ingressClassname field is supported +Usage: +{{ include "common.ingress.supportsIngressClassname" . }} +*/}} +{{- define "common.ingress.supportsIngressClassname" -}} +{{- if semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "false" -}} +{{- else -}} +{{- print "true" -}} +{{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/charts/common/templates/_labels.tpl b/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/charts/common/templates/_labels.tpl new file mode 100644 index 0000000..252066c --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/charts/common/templates/_labels.tpl @@ -0,0 +1,18 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Kubernetes standard labels +*/}} +{{- define "common.labels.standard" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +helm.sh/chart: {{ include "common.names.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{/* +Labels to use on deploy.spec.selector.matchLabels and svc.spec.selector +*/}} +{{- define "common.labels.matchLabels" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/charts/common/templates/_names.tpl b/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/charts/common/templates/_names.tpl new file mode 100644 index 0000000..adf2a74 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/charts/common/templates/_names.tpl @@ -0,0 +1,32 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "common.names.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "common.names.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "common.names.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/charts/common/templates/_secrets.tpl b/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/charts/common/templates/_secrets.tpl new file mode 100644 index 0000000..60b84a7 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/charts/common/templates/_secrets.tpl @@ -0,0 +1,129 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Generate secret name. + +Usage: +{{ include "common.secrets.name" (dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $) }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/master/bitnami/common#existingsecret + - defaultNameSuffix - String - Optional. It is used only if we have several secrets in the same deployment. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.secrets.name" -}} +{{- $name := (include "common.names.fullname" .context) -}} + +{{- if .defaultNameSuffix -}} +{{- $name = printf "%s-%s" $name .defaultNameSuffix | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- with .existingSecret -}} +{{- if not (typeIs "string" .) -}} +{{- with .name -}} +{{- $name = . -}} +{{- end -}} +{{- else -}} +{{- $name = . -}} +{{- end -}} +{{- end -}} + +{{- printf "%s" $name -}} +{{- end -}} + +{{/* +Generate secret key. + +Usage: +{{ include "common.secrets.key" (dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName") }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/master/bitnami/common#existingsecret + - key - String - Required. Name of the key in the secret. +*/}} +{{- define "common.secrets.key" -}} +{{- $key := .key -}} + +{{- if .existingSecret -}} + {{- if not (typeIs "string" .existingSecret) -}} + {{- if .existingSecret.keyMapping -}} + {{- $key = index .existingSecret.keyMapping $.key -}} + {{- end -}} + {{- end }} +{{- end -}} + +{{- printf "%s" $key -}} +{{- end -}} + +{{/* +Generate secret password or retrieve one if already created. + +Usage: +{{ include "common.secrets.passwords.manage" (dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - key - String - Required - Name of the key in the secret. + - providedValues - List - Required - The path to the validating value in the values.yaml, e.g: "mysql.password". Will pick first parameter with a defined value. + - length - int - Optional - Length of the generated random password. + - strong - Boolean - Optional - Whether to add symbols to the generated random password. + - chartName - String - Optional - Name of the chart used when said chart is deployed as a subchart. + - context - Context - Required - Parent context. +*/}} +{{- define "common.secrets.passwords.manage" -}} + +{{- $password := "" }} +{{- $subchart := "" }} +{{- $chartName := default "" .chartName }} +{{- $passwordLength := default 10 .length }} +{{- $providedPasswordKey := include "common.utils.getKeyFromList" (dict "keys" .providedValues "context" $.context) }} +{{- $providedPasswordValue := include "common.utils.getValueFromKey" (dict "key" $providedPasswordKey "context" $.context) }} +{{- $secret := (lookup "v1" "Secret" $.context.Release.Namespace .secret) }} +{{- if $secret }} + {{- if index $secret.data .key }} + {{- $password = index $secret.data .key }} + {{- end -}} +{{- else if $providedPasswordValue }} + {{- $password = $providedPasswordValue | toString | b64enc | quote }} +{{- else }} + + {{- if .context.Values.enabled }} + {{- $subchart = $chartName }} + {{- end -}} + + {{- $requiredPassword := dict "valueKey" $providedPasswordKey "secret" .secret "field" .key "subchart" $subchart "context" $.context -}} + {{- $requiredPasswordError := include "common.validations.values.single.empty" $requiredPassword -}} + {{- $passwordValidationErrors := list $requiredPasswordError -}} + {{- include "common.errors.upgrade.passwords.empty" (dict "validationErrors" $passwordValidationErrors "context" $.context) -}} + + {{- if .strong }} + {{- $subStr := list (lower (randAlpha 1)) (randNumeric 1) (upper (randAlpha 1)) | join "_" }} + {{- $password = randAscii $passwordLength }} + {{- $password = regexReplaceAllLiteral "\\W" $password "@" | substr 5 $passwordLength }} + {{- $password = printf "%s%s" $subStr $password | toString | shuffle | b64enc | quote }} + {{- else }} + {{- $password = randAlphaNum $passwordLength | b64enc | quote }} + {{- end }} +{{- end -}} +{{- printf "%s" $password -}} +{{- end -}} + +{{/* +Returns whether a previous generated secret already exists + +Usage: +{{ include "common.secrets.exists" (dict "secret" "secret-name" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - context - Context - Required - Parent context. +*/}} +{{- define "common.secrets.exists" -}} +{{- $secret := (lookup "v1" "Secret" $.context.Release.Namespace .secret) }} +{{- if $secret }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/charts/common/templates/_storage.tpl b/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/charts/common/templates/_storage.tpl new file mode 100644 index 0000000..60e2a84 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/charts/common/templates/_storage.tpl @@ -0,0 +1,23 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper Storage Class +{{ include "common.storage.class" ( dict "persistence" .Values.path.to.the.persistence "global" $) }} +*/}} +{{- define "common.storage.class" -}} + +{{- $storageClass := .persistence.storageClass -}} +{{- if .global -}} + {{- if .global.storageClass -}} + {{- $storageClass = .global.storageClass -}} + {{- end -}} +{{- end -}} + +{{- if $storageClass -}} + {{- if (eq "-" $storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" $storageClass -}} + {{- end -}} +{{- end -}} + +{{- end -}} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/charts/common/templates/_tplvalues.tpl b/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/charts/common/templates/_tplvalues.tpl new file mode 100644 index 0000000..2db1668 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/charts/common/templates/_tplvalues.tpl @@ -0,0 +1,13 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Renders a value that contains template. +Usage: +{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $) }} +*/}} +{{- define "common.tplvalues.render" -}} + {{- if typeIs "string" .value }} + {{- tpl .value .context }} + {{- else }} + {{- tpl (.value | toYaml) .context }} + {{- end }} +{{- end -}} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/charts/common/templates/_utils.tpl b/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/charts/common/templates/_utils.tpl new file mode 100644 index 0000000..ea083a2 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/charts/common/templates/_utils.tpl @@ -0,0 +1,62 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Print instructions to get a secret value. +Usage: +{{ include "common.utils.secret.getvalue" (dict "secret" "secret-name" "field" "secret-value-field" "context" $) }} +*/}} +{{- define "common.utils.secret.getvalue" -}} +{{- $varname := include "common.utils.fieldToEnvVar" . -}} +export {{ $varname }}=$(kubectl get secret --namespace {{ .context.Release.Namespace | quote }} {{ .secret }} -o jsonpath="{.data.{{ .field }}}" | base64 --decode) +{{- end -}} + +{{/* +Build env var name given a field +Usage: +{{ include "common.utils.fieldToEnvVar" dict "field" "my-password" }} +*/}} +{{- define "common.utils.fieldToEnvVar" -}} + {{- $fieldNameSplit := splitList "-" .field -}} + {{- $upperCaseFieldNameSplit := list -}} + + {{- range $fieldNameSplit -}} + {{- $upperCaseFieldNameSplit = append $upperCaseFieldNameSplit ( upper . ) -}} + {{- end -}} + + {{ join "_" $upperCaseFieldNameSplit }} +{{- end -}} + +{{/* +Gets a value from .Values given +Usage: +{{ include "common.utils.getValueFromKey" (dict "key" "path.to.key" "context" $) }} +*/}} +{{- define "common.utils.getValueFromKey" -}} +{{- $splitKey := splitList "." .key -}} +{{- $value := "" -}} +{{- $latestObj := $.context.Values -}} +{{- range $splitKey -}} + {{- if not $latestObj -}} + {{- printf "please review the entire path of '%s' exists in values" $.key | fail -}} + {{- end -}} + {{- $value = ( index $latestObj . ) -}} + {{- $latestObj = $value -}} +{{- end -}} +{{- printf "%v" (default "" $value) -}} +{{- end -}} + +{{/* +Returns first .Values key with a defined value or first of the list if all non-defined +Usage: +{{ include "common.utils.getKeyFromList" (dict "keys" (list "path.to.key1" "path.to.key2") "context" $) }} +*/}} +{{- define "common.utils.getKeyFromList" -}} +{{- $key := first .keys -}} +{{- $reverseKeys := reverse .keys }} +{{- range $reverseKeys }} + {{- $value := include "common.utils.getValueFromKey" (dict "key" . "context" $.context ) }} + {{- if $value -}} + {{- $key = . }} + {{- end -}} +{{- end -}} +{{- printf "%s" $key -}} +{{- end -}} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/charts/common/templates/_warnings.tpl b/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/charts/common/templates/_warnings.tpl new file mode 100644 index 0000000..ae10fa4 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/charts/common/templates/_warnings.tpl @@ -0,0 +1,14 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Warning about using rolling tag. +Usage: +{{ include "common.warnings.rollingTag" .Values.path.to.the.imageRoot }} +*/}} +{{- define "common.warnings.rollingTag" -}} + +{{- if and (contains "bitnami/" .repository) (not (.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .repository }}:{{ .tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} + +{{- end -}} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_cassandra.tpl b/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_cassandra.tpl new file mode 100644 index 0000000..8679ddf --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_cassandra.tpl @@ -0,0 +1,72 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Cassandra required passwords are not empty. + +Usage: +{{ include "common.validations.values.cassandra.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where Cassandra values are stored, e.g: "cassandra-passwords-secret" + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.cassandra.passwords" -}} + {{- $existingSecret := include "common.cassandra.values.existingSecret" . -}} + {{- $enabled := include "common.cassandra.values.enabled" . -}} + {{- $dbUserPrefix := include "common.cassandra.values.key.dbUser" . -}} + {{- $valueKeyPassword := printf "%s.password" $dbUserPrefix -}} + + {{- if and (not $existingSecret) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "cassandra-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.cassandra.values.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.cassandra.dbUser.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.dbUser.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled cassandra. + +Usage: +{{ include "common.cassandra.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.cassandra.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.cassandra.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key dbUser + +Usage: +{{ include "common.cassandra.values.key.dbUser" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.key.dbUser" -}} + {{- if .subchart -}} + cassandra.dbUser + {{- else -}} + dbUser + {{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_mariadb.tpl b/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_mariadb.tpl new file mode 100644 index 0000000..bb5ed72 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_mariadb.tpl @@ -0,0 +1,103 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MariaDB required passwords are not empty. + +Usage: +{{ include "common.validations.values.mariadb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MariaDB values are stored, e.g: "mysql-passwords-secret" + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mariadb.passwords" -}} + {{- $existingSecret := include "common.mariadb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mariadb.values.enabled" . -}} + {{- $architecture := include "common.mariadb.values.architecture" . -}} + {{- $authPrefix := include "common.mariadb.values.key.auth" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}} + + {{- if and (not $existingSecret) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mariadb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- if not (empty $valueUsername) -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mariadb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replication") -}} + {{- $requiredReplicationPassword := dict "valueKey" $valueKeyReplicationPassword "secret" .secret "field" "mariadb-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mariadb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mariadb. + +Usage: +{{ include "common.mariadb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mariadb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mariadb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mariadb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mariadb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.key.auth" -}} + {{- if .subchart -}} + mariadb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_mongodb.tpl b/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_mongodb.tpl new file mode 100644 index 0000000..1e5bba9 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_mongodb.tpl @@ -0,0 +1,108 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MongoDB® required passwords are not empty. + +Usage: +{{ include "common.validations.values.mongodb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MongoDB® values are stored, e.g: "mongodb-passwords-secret" + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mongodb.passwords" -}} + {{- $existingSecret := include "common.mongodb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mongodb.values.enabled" . -}} + {{- $authPrefix := include "common.mongodb.values.key.auth" . -}} + {{- $architecture := include "common.mongodb.values.architecture" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyDatabase := printf "%s.database" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicaSetKey := printf "%s.replicaSetKey" $authPrefix -}} + {{- $valueKeyAuthEnabled := printf "%s.enabled" $authPrefix -}} + + {{- $authEnabled := include "common.utils.getValueFromKey" (dict "key" $valueKeyAuthEnabled "context" .context) -}} + + {{- if and (not $existingSecret) (eq $enabled "true") (eq $authEnabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mongodb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- $valueDatabase := include "common.utils.getValueFromKey" (dict "key" $valueKeyDatabase "context" .context) }} + {{- if and $valueUsername $valueDatabase -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mongodb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replicaset") -}} + {{- $requiredReplicaSetKey := dict "valueKey" $valueKeyReplicaSetKey "secret" .secret "field" "mongodb-replica-set-key" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicaSetKey -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mongodb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDb is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mongodb. + +Usage: +{{ include "common.mongodb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mongodb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mongodb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mongodb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.key.auth" -}} + {{- if .subchart -}} + mongodb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mongodb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_postgresql.tpl b/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_postgresql.tpl new file mode 100644 index 0000000..992bcd3 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_postgresql.tpl @@ -0,0 +1,131 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate PostgreSQL required passwords are not empty. + +Usage: +{{ include "common.validations.values.postgresql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where postgresql values are stored, e.g: "postgresql-passwords-secret" + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.postgresql.passwords" -}} + {{- $existingSecret := include "common.postgresql.values.existingSecret" . -}} + {{- $enabled := include "common.postgresql.values.enabled" . -}} + {{- $valueKeyPostgresqlPassword := include "common.postgresql.values.key.postgressPassword" . -}} + {{- $valueKeyPostgresqlReplicationEnabled := include "common.postgresql.values.key.replicationPassword" . -}} + + {{- if and (not $existingSecret) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredPostgresqlPassword := dict "valueKey" $valueKeyPostgresqlPassword "secret" .secret "field" "postgresql-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlPassword -}} + + {{- $enabledReplication := include "common.postgresql.values.enabled.replication" . -}} + {{- if (eq $enabledReplication "true") -}} + {{- $requiredPostgresqlReplicationPassword := dict "valueKey" $valueKeyPostgresqlReplicationEnabled "secret" .secret "field" "postgresql-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to decide whether evaluate global values. + +Usage: +{{ include "common.postgresql.values.use.global" (dict "key" "key-of-global" "context" $) }} +Params: + - key - String - Required. Field to be evaluated within global, e.g: "existingSecret" +*/}} +{{- define "common.postgresql.values.use.global" -}} + {{- if .context.Values.global -}} + {{- if .context.Values.global.postgresql -}} + {{- index .context.Values.global.postgresql .key | quote -}} + {{- end -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.postgresql.values.existingSecret" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.existingSecret" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "existingSecret" "context" .context) -}} + + {{- if .subchart -}} + {{- default (.context.Values.postgresql.existingSecret | quote) $globalValue -}} + {{- else -}} + {{- default (.context.Values.existingSecret | quote) $globalValue -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled postgresql. + +Usage: +{{ include "common.postgresql.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key postgressPassword. + +Usage: +{{ include "common.postgresql.values.key.postgressPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.postgressPassword" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "postgresqlUsername" "context" .context) -}} + + {{- if not $globalValue -}} + {{- if .subchart -}} + postgresql.postgresqlPassword + {{- else -}} + postgresqlPassword + {{- end -}} + {{- else -}} + global.postgresql.postgresqlPassword + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled.replication. + +Usage: +{{ include "common.postgresql.values.enabled.replication" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.enabled.replication" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.replication.enabled -}} + {{- else -}} + {{- printf "%v" .context.Values.replication.enabled -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key replication.password. + +Usage: +{{ include "common.postgresql.values.key.replicationPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.replicationPassword" -}} + {{- if .subchart -}} + postgresql.replication.password + {{- else -}} + replication.password + {{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_redis.tpl b/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_redis.tpl new file mode 100644 index 0000000..18d9813 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_redis.tpl @@ -0,0 +1,76 @@ + +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Redis™ required passwords are not empty. + +Usage: +{{ include "common.validations.values.redis.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where redis values are stored, e.g: "redis-passwords-secret" + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.redis.passwords" -}} + {{- $enabled := include "common.redis.values.enabled" . -}} + {{- $valueKeyPrefix := include "common.redis.values.keys.prefix" . -}} + {{- $standarizedVersion := include "common.redis.values.standarized.version" . }} + + {{- $existingSecret := ternary (printf "%s%s" $valueKeyPrefix "auth.existingSecret") (printf "%s%s" $valueKeyPrefix "existingSecret") (eq $standarizedVersion "true") }} + {{- $existingSecretValue := include "common.utils.getValueFromKey" (dict "key" $existingSecret "context" .context) }} + + {{- $valueKeyRedisPassword := ternary (printf "%s%s" $valueKeyPrefix "auth.password") (printf "%s%s" $valueKeyPrefix "password") (eq $standarizedVersion "true") }} + {{- $valueKeyRedisUseAuth := ternary (printf "%s%s" $valueKeyPrefix "auth.enabled") (printf "%s%s" $valueKeyPrefix "usePassword") (eq $standarizedVersion "true") }} + + {{- if and (not $existingSecretValue) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $useAuth := include "common.utils.getValueFromKey" (dict "key" $valueKeyRedisUseAuth "context" .context) -}} + {{- if eq $useAuth "true" -}} + {{- $requiredRedisPassword := dict "valueKey" $valueKeyRedisPassword "secret" .secret "field" "redis-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRedisPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled redis. + +Usage: +{{ include "common.redis.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.redis.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.redis.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right prefix path for the values + +Usage: +{{ include "common.redis.values.key.prefix" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.redis.values.keys.prefix" -}} + {{- if .subchart -}}redis.{{- else -}}{{- end -}} +{{- end -}} + +{{/* +Checks whether the redis chart's includes the standarizations (version >= 14) + +Usage: +{{ include "common.redis.values.standarized.version" (dict "context" $) }} +*/}} +{{- define "common.redis.values.standarized.version" -}} + + {{- $standarizedAuth := printf "%s%s" (include "common.redis.values.keys.prefix" .) "auth" -}} + {{- $standarizedAuthValues := include "common.utils.getValueFromKey" (dict "key" $standarizedAuth "context" .context) }} + + {{- if $standarizedAuthValues -}} + {{- true -}} + {{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_validations.tpl b/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_validations.tpl new file mode 100644 index 0000000..9a814cf --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_validations.tpl @@ -0,0 +1,46 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate values must not be empty. + +Usage: +{{- $validateValueConf00 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-01") -}} +{{ include "common.validations.values.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" +*/}} +{{- define "common.validations.values.multiple.empty" -}} + {{- range .required -}} + {{- include "common.validations.values.single.empty" (dict "valueKey" .valueKey "secret" .secret "field" .field "context" $.context) -}} + {{- end -}} +{{- end -}} + +{{/* +Validate a value must not be empty. + +Usage: +{{ include "common.validations.value.empty" (dict "valueKey" "mariadb.password" "secret" "secretName" "field" "my-password" "subchart" "subchart" "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" + - subchart - String - Optional - Name of the subchart that the validated password is part of. +*/}} +{{- define "common.validations.values.single.empty" -}} + {{- $value := include "common.utils.getValueFromKey" (dict "key" .valueKey "context" .context) }} + {{- $subchart := ternary "" (printf "%s." .subchart) (empty .subchart) }} + + {{- if not $value -}} + {{- $varname := "my-value" -}} + {{- $getCurrentValue := "" -}} + {{- if and .secret .field -}} + {{- $varname = include "common.utils.fieldToEnvVar" . -}} + {{- $getCurrentValue = printf " To get the current value:\n\n %s\n" (include "common.utils.secret.getvalue" .) -}} + {{- end -}} + {{- printf "\n '%s' must not be empty, please add '--set %s%s=$%s' to the command.%s" .valueKey $subchart .valueKey $varname $getCurrentValue -}} + {{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/charts/common/values.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/charts/common/values.yaml new file mode 100644 index 0000000..f2df68e --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/charts/common/values.yaml @@ -0,0 +1,5 @@ +## bitnami/common +## It is required by CI/CD tools and processes. +## @skip exampleValue +## +exampleValue: common-chart diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/ci/default-values.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/ci/default-values.yaml new file mode 100644 index 0000000..fc2ba60 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/ci/default-values.yaml @@ -0,0 +1 @@ +# Leave this file empty to ensure that CI runs builds against the default configuration in values.yaml. diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/ci/tolerations-values.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/ci/tolerations-values.yaml new file mode 100644 index 0000000..de92d88 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/ci/tolerations-values.yaml @@ -0,0 +1,4 @@ +tolerations: + - key: foo + operator: "Equal" + value: bar diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/templates/NOTES.txt b/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/templates/NOTES.txt new file mode 100644 index 0000000..24ffa89 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/templates/NOTES.txt @@ -0,0 +1,167 @@ +{{- $servicePort := or (.Values.service.portEnabled) (not .Values.auth.tls.enabled) | ternary .Values.service.port .Values.service.tlsPort -}} +{{- $serviceNodePort := or (.Values.service.portEnabled) (not .Values.auth.tls.enabled) | ternary .Values.service.nodePort .Values.service.tlsNodePort -}} +** Please be patient while the chart is being deployed ** + +{{- if .Values.diagnosticMode.enabled }} +The chart has been deployed in diagnostic mode. All probes have been disabled and the command has been overwritten with: + + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 4 }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 4 }} + +Get the list of pods by executing: + + kubectl get pods --namespace {{ .Release.Namespace }} -l app.kubernetes.io/instance={{ .Release.Name }} + +Access the pod you want to debug by executing + + kubectl exec --namespace {{ .Release.Namespace }} -ti -- bash + +In order to replicate the container startup scripts execute this command: + + /opt/bitnami/scripts/rabbitmq/entrypoint.sh /opt/bitnami/scripts/rabbitmq/run.sh + +{{- else }} + +Credentials: + +{{- if not .Values.loadDefinition.enabled }} + echo "Username : {{ .Values.auth.username }}" + echo "Password : $(kubectl get secret --namespace {{ .Release.Namespace }} {{ include "rabbitmq.secretPasswordName" . }} -o jsonpath="{.data.rabbitmq-password}" | base64 --decode)" +{{- end }} + echo "ErLang Cookie : $(kubectl get secret --namespace {{ .Release.Namespace }} {{ include "rabbitmq.secretErlangName" . }} -o jsonpath="{.data.rabbitmq-erlang-cookie}" | base64 --decode)" + +Note that the credentials are saved in persistent volume claims and will not be changed upon upgrade or reinstallation unless the persistent volume claim has been deleted. If this is not the first installation of this chart, the credentials may not be valid. +This is applicable when no passwords are set and therefore the random password is autogenerated. In case of using a fixed password, you should specify it when upgrading. +More information about the credentials may be found at https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues/#credential-errors-while-upgrading-chart-releases. + +RabbitMQ can be accessed within the cluster on port {{ $serviceNodePort }} at {{ include "rabbitmq.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clustering.k8s_domain }} + +To access for outside the cluster, perform the following steps: + +{{- if .Values.ingress.enabled }} +{{- if contains "NodePort" .Values.service.type }} + +To Access the RabbitMQ AMQP port: + +1. Obtain the NodePort IP and ports: + + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT_AMQP=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[1].nodePort}" services {{ include "rabbitmq.fullname" . }}) + echo "URL : amqp://$NODE_IP:$NODE_PORT_AMQP/" + +{{- else if contains "LoadBalancer" .Values.service.type }} + +To Access the RabbitMQ AMQP port: + +1. Obtain the LoadBalancer IP: + +NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ include "rabbitmq.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "rabbitmq.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + echo "URL : amqp://$SERVICE_IP:{{ $servicePort }}/" + +{{- else if contains "ClusterIP" .Values.service.type }} + +To Access the RabbitMQ AMQP port: + +1. Create a port-forward to the AMQP port: + + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ include "rabbitmq.fullname" . }} {{ $servicePort }}:{{ $servicePort }} & + echo "URL : amqp://127.0.0.1:{{ $servicePort }}/" + +{{- end }} + +2. Access RabbitMQ using using the obtained URL. + +To Access the RabbitMQ Management interface: + +1. Get the RabbitMQ Management URL and associate its hostname to your cluster external IP: + + export CLUSTER_IP=$(minikube ip) # On Minikube. Use: `kubectl cluster-info` on others K8s clusters + echo "RabbitMQ Management: http{{ if .Values.ingress.tls }}s{{ end }}://{{ .Values.ingress.hostname }}/" + echo "$CLUSTER_IP {{ .Values.ingress.hostname }}" | sudo tee -a /etc/hosts + +2. Open a browser and access RabbitMQ Management using the obtained URL. + +{{- else }} +{{- if contains "NodePort" .Values.service.type }} + +Obtain the NodePort IP and ports: + + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT_AMQP=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[1].nodePort}" services {{ include "rabbitmq.fullname" . }}) + export NODE_PORT_STATS=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[3].nodePort}" services {{ include "rabbitmq.fullname" . }}) + +To Access the RabbitMQ AMQP port: + + echo "URL : amqp://$NODE_IP:$NODE_PORT_AMQP/" + +To Access the RabbitMQ Management interface: + + echo "URL : http://$NODE_IP:$NODE_PORT_STATS/" + +{{- else if contains "LoadBalancer" .Values.service.type }} + +Obtain the LoadBalancer IP: + +NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ include "rabbitmq.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "rabbitmq.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + +To Access the RabbitMQ AMQP port: + + echo "URL : amqp://$SERVICE_IP:{{ $servicePort }}/" + +To Access the RabbitMQ Management interface: + + echo "URL : http://$SERVICE_IP:{{ .Values.service.managerPort }}/" + +{{- else if contains "ClusterIP" .Values.service.type }} + +To Access the RabbitMQ AMQP port: + + echo "URL : amqp://127.0.0.1:{{ $servicePort }}/" + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ include "rabbitmq.fullname" . }} {{ $servicePort }}:{{ $servicePort }} + +To Access the RabbitMQ Management interface: + + echo "URL : http://127.0.0.1:{{ .Values.service.managerPort }}/" + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ include "rabbitmq.fullname" . }} {{ .Values.service.managerPort }}:{{ .Values.service.managerPort }} + +{{- end }} +{{- end }} + +{{- if .Values.metrics.enabled }} + +To access the RabbitMQ Prometheus metrics, get the RabbitMQ Prometheus URL by running: + + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ include "rabbitmq.fullname" . }} {{ .Values.service.metricsPort }}:{{ .Values.service.metricsPort }} & + echo "Prometheus Metrics URL: http://127.0.0.1:{{ .Values.service.metricsPort }}/metrics" + +Then, open the obtained URL in a browser. + +{{- end }} + +{{- include "common.warnings.rollingTag" .Values.image }} +{{- include "rabbitmq.validateValues" . -}} + +{{- $requiredPassword := list -}} +{{- $secretNameRabbitmq := include "rabbitmq.secretPasswordName" . -}} + +{{- if and (not .Values.auth.existingPasswordSecret) (not .Values.loadDefinition.enabled) -}} + {{- $requiredRabbitmqPassword := dict "valueKey" "auth.password" "secret" $secretNameRabbitmq "field" "rabbitmq-password" -}} + {{- $requiredPassword = append $requiredPassword $requiredRabbitmqPassword -}} +{{- end -}} + +{{- if not .Values.auth.existingErlangSecret -}} + {{- $requiredErlangPassword := dict "valueKey" "auth.erlangCookie" "secret" $secretNameRabbitmq "field" "rabbitmq-erlang-cookie" -}} + {{- $requiredPassword = append $requiredPassword $requiredErlangPassword -}} +{{- end -}} + +{{- $requiredRabbitmqPasswordErrors := include "common.validations.values.multiple.empty" (dict "required" $requiredPassword "context" $) -}} + +{{- include "common.errors.upgrade.passwords.empty" (dict "validationErrors" (list $requiredRabbitmqPasswordErrors) "context" $) -}} + +{{- end }} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/templates/_helpers.tpl b/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/templates/_helpers.tpl new file mode 100644 index 0000000..6b46b23 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/templates/_helpers.tpl @@ -0,0 +1,247 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "rabbitmq.name" -}} +{{- include "common.names.name" . -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "rabbitmq.fullname" -}} +{{- include "common.names.fullname" . -}} +{{- end -}} + +{{/* +Return the proper RabbitMQ image name +*/}} +{{- define "rabbitmq.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper image name (for the init container volume-permissions image) +*/}} +{{- define "rabbitmq.volumePermissions.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.volumePermissions.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "rabbitmq.imagePullSecrets" -}} +{{ include "common.images.pullSecrets" (dict "images" (list .Values.image .Values.volumePermissions.image) "global" .Values.global) }} +{{- end -}} + +{{/* +Return podAnnotations +*/}} +{{- define "rabbitmq.podAnnotations" -}} +{{- if .Values.podAnnotations }} +{{ include "common.tplvalues.render" (dict "value" .Values.podAnnotations "context" $) }} +{{- end }} +{{- if and .Values.metrics.enabled .Values.metrics.podAnnotations }} +{{ include "common.tplvalues.render" (dict "value" .Values.metrics.podAnnotations "context" $) }} +{{- end }} +{{- end -}} + +{{/* + Create the name of the service account to use + */}} +{{- define "rabbitmq.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "rabbitmq.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Get the password secret. +*/}} +{{- define "rabbitmq.secretPasswordName" -}} + {{- if .Values.auth.existingPasswordSecret -}} + {{- printf "%s" (tpl .Values.auth.existingPasswordSecret $) -}} + {{- else -}} + {{- printf "%s" (include "rabbitmq.fullname" .) -}} + {{- end -}} +{{- end -}} + +{{/* +Get the erlang secret. +*/}} +{{- define "rabbitmq.secretErlangName" -}} + {{- if .Values.auth.existingErlangSecret -}} + {{- printf "%s" (tpl .Values.auth.existingErlangSecret $) -}} + {{- else -}} + {{- printf "%s" (include "rabbitmq.fullname" .) -}} + {{- end -}} +{{- end -}} + +{{/* +Get the TLS secret. +*/}} +{{- define "rabbitmq.tlsSecretName" -}} + {{- if .Values.auth.tls.existingSecret -}} + {{- printf "%s" (tpl .Values.auth.tls.existingSecret $) -}} + {{- else -}} + {{- printf "%s-certs" (include "rabbitmq.fullname" .) -}} + {{- end -}} +{{- end -}} + +{{/* +Return true if a TLS credentials secret object should be created +*/}} +{{- define "rabbitmq.createTlsSecret" -}} +{{- if and .Values.auth.tls.enabled (not .Values.auth.tls.existingSecret) }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper RabbitMQ plugin list +*/}} +{{- define "rabbitmq.plugins" -}} +{{- $plugins := .Values.plugins -}} +{{- if .Values.extraPlugins -}} +{{- $plugins = printf "%s %s" $plugins .Values.extraPlugins -}} +{{- end -}} +{{- if .Values.metrics.enabled -}} +{{- $plugins = printf "%s %s" $plugins .Values.metrics.plugins -}} +{{- end -}} +{{- printf "%s" $plugins | replace " " ", " -}} +{{- end -}} + +{{/* +Return the number of bytes given a value +following a base 2 o base 10 number system. +Usage: +{{ include "rabbitmq.toBytes" .Values.path.to.the.Value }} +*/}} +{{- define "rabbitmq.toBytes" -}} +{{- $value := int (regexReplaceAll "([0-9]+).*" . "${1}") }} +{{- $unit := regexReplaceAll "[0-9]+(.*)" . "${1}" }} +{{- if eq $unit "Ki" }} + {{- mul $value 1024 }} +{{- else if eq $unit "Mi" }} + {{- mul $value 1024 1024 }} +{{- else if eq $unit "Gi" }} + {{- mul $value 1024 1024 1024 }} +{{- else if eq $unit "Ti" }} + {{- mul $value 1024 1024 1024 1024 }} +{{- else if eq $unit "Pi" }} + {{- mul $value 1024 1024 1024 1024 1024 }} +{{- else if eq $unit "Ei" }} + {{- mul $value 1024 1024 1024 1024 1024 1024 }} +{{- else if eq $unit "K" }} + {{- mul $value 1000 }} +{{- else if eq $unit "M" }} + {{- mul $value 1000 1000 }} +{{- else if eq $unit "G" }} + {{- mul $value 1000 1000 1000 }} +{{- else if eq $unit "T" }} + {{- mul $value 1000 1000 1000 1000 }} +{{- else if eq $unit "P" }} + {{- mul $value 1000 1000 1000 1000 1000 }} +{{- else if eq $unit "E" }} + {{- mul $value 1000 1000 1000 1000 1000 1000 }} +{{- end }} +{{- end -}} + +{{/* +Compile all warnings into a single message, and call fail. +*/}} +{{- define "rabbitmq.validateValues" -}} +{{- $messages := list -}} +{{- $messages := append $messages (include "rabbitmq.validateValues.ldap" .) -}} +{{- $messages := append $messages (include "rabbitmq.validateValues.memoryHighWatermark" .) -}} +{{- $messages := append $messages (include "rabbitmq.validateValues.ingress.tls" .) -}} +{{- $messages := append $messages (include "rabbitmq.validateValues.auth.tls" .) -}} +{{- $messages := without $messages "" -}} +{{- $message := join "\n" $messages -}} + +{{- if $message -}} +{{- printf "\nVALUES VALIDATION:\n%s" $message | fail -}} +{{- end -}} +{{- end -}} + +{{/* +Validate values of rabbitmq - LDAP support +*/}} +{{- define "rabbitmq.validateValues.ldap" -}} +{{- if .Values.ldap.enabled }} +{{- $serversListLength := len .Values.ldap.servers }} +{{- if or (not (gt $serversListLength 0)) (not (and .Values.ldap.port .Values.ldap.user_dn_pattern)) }} +rabbitmq: LDAP + Invalid LDAP configuration. When enabling LDAP support, the parameters "ldap.servers", + "ldap.port", and "ldap. user_dn_pattern" are mandatory. Please provide them: + + $ helm install {{ .Release.Name }} bitnami/rabbitmq \ + --set ldap.enabled=true \ + --set ldap.servers[0]="lmy-ldap-server" \ + --set ldap.port="389" \ + --set user_dn_pattern="cn=${username},dc=example,dc=org" +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Validate values of rabbitmq - Memory high watermark +*/}} +{{- define "rabbitmq.validateValues.memoryHighWatermark" -}} +{{- if and (not (eq .Values.memoryHighWatermark.type "absolute")) (not (eq .Values.memoryHighWatermark.type "relative")) }} +rabbitmq: memoryHighWatermark.type + Invalid Memory high watermark type. Valid values are "absolute" and + "relative". Please set a valid mode (--set memoryHighWatermark.type="xxxx") +{{- else if and .Values.memoryHighWatermark.enabled (not .Values.resources.limits.memory) (eq .Values.memoryHighWatermark.type "relative") }} +rabbitmq: memoryHighWatermark + You enabled configuring memory high watermark using a relative limit. However, + no memory limits were defined at POD level. Define your POD limits as shown below: + + $ helm install {{ .Release.Name }} bitnami/rabbitmq \ + --set memoryHighWatermark.enabled=true \ + --set memoryHighWatermark.type="relative" \ + --set memoryHighWatermark.value="0.4" \ + --set resources.limits.memory="2Gi" + + Altenatively, user an absolute value for the memory memory high watermark : + + $ helm install {{ .Release.Name }} bitnami/rabbitmq \ + --set memoryHighWatermark.enabled=true \ + --set memoryHighWatermark.type="absolute" \ + --set memoryHighWatermark.value="512MB" +{{- end -}} +{{- end -}} + +{{/* +Validate values of rabbitmq - TLS configuration for Ingress +*/}} +{{- define "rabbitmq.validateValues.ingress.tls" -}} +{{- if and .Values.ingress.enabled .Values.ingress.tls (not .Values.ingress.certManager) (not .Values.ingress.selfSigned) (empty .Values.ingress.extraTls) }} +rabbitmq: ingress.tls + You enabled the TLS configuration for the default ingress hostname but + you did not enable any of the available mechanisms to create the TLS secret + to be used by the Ingress Controller. + Please use any of these alternatives: + - Use the `ingress.extraTls` and `ingress.secrets` parameters to provide your custom TLS certificates. + - Relay on cert-manager to create it by setting `ingress.certManager=true` + - Relay on Helm to create self-signed certificates by setting `ingress.selfSigned=true` +{{- end -}} +{{- end -}} + +{{/* +Validate values of RabbitMQ - Auth TLS enabled +*/}} +{{- define "rabbitmq.validateValues.auth.tls" -}} +{{- if and .Values.auth.tls.enabled (not .Values.auth.tls.autoGenerated) (not .Values.auth.tls.existingSecret) (not .Values.auth.tls.caCertificate) (not .Values.auth.tls.serverCertificate) (not .Values.auth.tls.serverKey) }} +rabbitmq: auth.tls + You enabled TLS for RabbitMQ but you did not enable any of the available mechanisms to create the TLS secret. + Please use any of these alternatives: + - Provide an existing secret containing the TLS certificates using `auth.tls.existingSecret` + - Provide the plain text certificates using `auth.tls.caCertificate`, `auth.tls.serverCertificate` and `auth.tls.serverKey`. + - Enable auto-generated certificates using `auth.tls.autoGenerated`. +{{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/templates/configuration.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/templates/configuration.yaml new file mode 100644 index 0000000..5ba6b72 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/templates/configuration.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "rabbitmq.fullname" . }}-config + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + rabbitmq.conf: |- + {{- include "common.tplvalues.render" (dict "value" .Values.configuration "context" $) | nindent 4 }} + {{- if .Values.advancedConfiguration}} + advanced.config: |- + {{- include "common.tplvalues.render" (dict "value" .Values.advancedConfiguration "context" $) | nindent 4 }} + {{- end }} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/templates/extra-list.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/templates/extra-list.yaml new file mode 100644 index 0000000..9ac65f9 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/templates/extra-list.yaml @@ -0,0 +1,4 @@ +{{- range .Values.extraDeploy }} +--- +{{ include "common.tplvalues.render" (dict "value" . "context" $) }} +{{- end }} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/templates/ingress.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/templates/ingress.yaml new file mode 100644 index 0000000..db74e50 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/templates/ingress.yaml @@ -0,0 +1,57 @@ +{{- if .Values.ingress.enabled }} +apiVersion: {{ include "common.capabilities.ingress.apiVersion" . }} +kind: Ingress +metadata: + name: {{ include "rabbitmq.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + annotations: + {{- if .Values.ingress.certManager }} + kubernetes.io/tls-acme: "true" + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.ingress.annotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.ingress.annotations "context" $) | nindent 4 }} + {{- end }} +spec: + {{- if and .Values.ingress.ingressClassName (eq "true" (include "common.ingress.supportsIngressClassname" .)) }} + ingressClassName: {{ .Values.ingress.ingressClassName | quote }} + {{- end }} + rules: + {{- if .Values.ingress.hostname }} + - host: {{ include "common.tplvalues.render" ( dict "value" .Values.ingress.hostname "context" $ ) }} + http: + paths: + {{- if .Values.ingress.extraPaths }} + {{- toYaml .Values.ingress.extraPaths | nindent 10 }} + {{- end }} + - path: {{ .Values.ingress.path }} + {{- if eq "true" (include "common.ingress.supportsPathType" .) }} + pathType: {{ .Values.ingress.pathType }} + {{- end }} + backend: {{- include "common.ingress.backend" (dict "serviceName" (include "common.names.fullname" .) "servicePort" .Values.service.managerPortName "context" $) | nindent 14 }} + {{- end }} + {{- range .Values.ingress.extraHosts }} + - host: {{ include "common.tplvalues.render" ( dict "value" .name "context" $ ) }} + http: + paths: + - path: {{ default "/" .path }} + {{- if eq "true" (include "common.ingress.supportsPathType" $) }} + pathType: {{ default "ImplementationSpecific" .pathType }} + {{- end }} + backend: {{- include "common.ingress.backend" (dict "serviceName" (include "common.names.fullname" $) "servicePort" "http-stats" "context" $) | nindent 14 }} + {{- end }} + {{- if or (and .Values.ingress.tls (or .Values.ingress.certManager .Values.ingress.selfSigned)) .Values.ingress.extraTls }} + tls: + {{- if and .Values.ingress.tls (or .Values.ingress.certManager .Values.ingress.selfSigned) }} + - hosts: + - {{ .Values.ingress.hostname | quote }} + secretName: {{ printf "%s-tls" .Values.ingress.hostname }} + {{- end }} + {{- if .Values.ingress.extraTls }} + {{- include "common.tplvalues.render" (dict "value" .Values.ingress.extraTls "context" $) | nindent 4 }} + {{- end }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/templates/networkpolicy.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/templates/networkpolicy.yaml new file mode 100644 index 0000000..158aeaa --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/templates/networkpolicy.yaml @@ -0,0 +1,37 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + name: {{ include "rabbitmq.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + podSelector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + ingress: + # Allow inbound connections + - ports: + - port: 4369 # EPMD + - port: {{ .Values.service.port }} + - port: {{ .Values.service.tlsPort }} + - port: {{ .Values.service.distPort }} + - port: {{ .Values.service.managerPort }} + {{- if not .Values.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ template "rabbitmq.fullname" . }}-client: "true" + - podSelector: + matchLabels: + {{- include "common.labels.matchLabels" . | nindent 14 }} + {{- if .Values.networkPolicy.additionalRules }} + {{- include "common.tplvalues.render" (dict "value" .Values.networkPolicy.additionalRules "context" $) | nindent 8 }} + {{- end }} + {{- end }} + # Allow prometheus scrapes + - ports: + - port: {{ .Values.service.metricsPort }} +{{- end }} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/templates/pdb.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/templates/pdb.yaml new file mode 100644 index 0000000..bf06b66 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/templates/pdb.yaml @@ -0,0 +1,20 @@ +{{- if .Values.pdb.create }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ include "rabbitmq.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.pdb.minAvailable }} + minAvailable: {{ .Values.pdb.minAvailable }} + {{- end }} + {{- if .Values.pdb.maxUnavailable }} + maxUnavailable: {{ .Values.pdb.maxUnavailable }} + {{- end }} + selector: + matchLabels: {{ include "common.labels.matchLabels" . | nindent 6 }} +{{- end }} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/templates/prometheusrule.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/templates/prometheusrule.yaml new file mode 100644 index 0000000..a1ba629 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/templates/prometheusrule.yaml @@ -0,0 +1,24 @@ +{{- if and .Values.metrics.enabled .Values.metrics.prometheusRule.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ include "rabbitmq.fullname" . }} + {{- if .Values.metrics.prometheusRule.namespace }} + namespace: {{ .Values.metrics.prometheusRule.namespace }} + {{- else }} + namespace: {{ .Release.Namespace | quote }} + {{- end }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.metrics.prometheusRule.additionalLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.prometheusRule.additionalLabels "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + groups: + {{- with .Values.metrics.prometheusRule.rules }} + - name: {{ template "rabbitmq.name" $ }} + rules: {{- include "common.tplvalues.render" (dict "value" . "context" $) | nindent 8 }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/templates/pv.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/templates/pv.yaml new file mode 100644 index 0000000..d0f8bdd --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/templates/pv.yaml @@ -0,0 +1,22 @@ +kind: PersistentVolume +apiVersion: v1 +metadata: + name: rabbitmq-pv + labels: + app: rabbitmq +spec: + storageClassName: rabbitmq + capacity: + storage: 5Gi + accessModes: + - ReadWriteMany + hostPath: + path: {{ .Values.global.RABBITMQ_PATH }} + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .Values.global.affinity_key }} + operator: In + values: + - {{ .Values.global.affinity_value1 }} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/templates/pvc.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/templates/pvc.yaml new file mode 100644 index 0000000..c677752 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/templates/pvc.yaml @@ -0,0 +1,15 @@ +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: rabbitmq-pvc + namespace: imxc + labels: + app: rabbitmq +spec: + storageClassName: rabbitmq + accessModes: + - ReadWriteMany + resources: + requests: + storage: 5Gi + diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/templates/role.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/templates/role.yaml new file mode 100644 index 0000000..9bd029e --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/templates/role.yaml @@ -0,0 +1,18 @@ +{{- if .Values.rbac.create }} +kind: Role +apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }} +metadata: + name: {{ template "rabbitmq.fullname" . }}-endpoint-reader + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +rules: + - apiGroups: [""] + resources: ["endpoints"] + verbs: ["get"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create"] +{{- end }} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/templates/rolebinding.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/templates/rolebinding.yaml new file mode 100644 index 0000000..74f82f0 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/templates/rolebinding.yaml @@ -0,0 +1,18 @@ +{{- if and .Values.serviceAccount.create .Values.rbac.create }} +kind: RoleBinding +apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }} +metadata: + name: {{ template "rabbitmq.fullname" . }}-endpoint-reader + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +subjects: + - kind: ServiceAccount + name: {{ template "rabbitmq.serviceAccountName" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ template "rabbitmq.fullname" . }}-endpoint-reader +{{- end }} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/templates/secrets.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/templates/secrets.yaml new file mode 100644 index 0000000..4d14e4e --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/templates/secrets.yaml @@ -0,0 +1,43 @@ +{{- if or (not .Values.auth.existingErlangSecret) (not .Values.auth.existingPasswordSecret) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "rabbitmq.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + {{- if and (not .Values.auth.existingPasswordSecret) (not .Values.loadDefinition.enabled) }} + {{- if .Values.auth.password }} + rabbitmq-password: {{ .Values.auth.password | b64enc | quote }} + {{- else }} + rabbitmq-password: {{ randAlphaNum 10 | b64enc | quote }} + {{- end }} + {{- end }} + {{- if not .Values.auth.existingErlangSecret }} + {{- if .Values.auth.erlangCookie }} + rabbitmq-erlang-cookie: {{ .Values.auth.erlangCookie | b64enc | quote }} + {{- else }} + rabbitmq-erlang-cookie: {{ randAlphaNum 32 | b64enc | quote }} + {{- end }} + {{- end }} +{{- end }} +{{- $extraSecretsPrependReleaseName := .Values.extraSecretsPrependReleaseName }} +{{- range $key, $value := .Values.extraSecrets }} +--- +apiVersion: v1 +kind: Secret +metadata: + {{- if $extraSecretsPrependReleaseName }} + name: {{ $.Release.Name }}-{{ $key }} + {{- else }} + name: {{ $key }} + {{- end }} + namespace: {{ $.Release.Namespace | quote }} + labels: {{- include "common.labels.standard" $ | nindent 4 }} +type: Opaque +stringData: {{- include "common.tplvalues.render" (dict "value" $value "context" $) | nindent 2 }} +{{- end }} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/templates/serviceaccount.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/templates/serviceaccount.yaml new file mode 100644 index 0000000..562fde9 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/templates/serviceaccount.yaml @@ -0,0 +1,14 @@ +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "rabbitmq.serviceAccountName" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +secrets: + - name: {{ include "rabbitmq.fullname" . }} +{{- end }} + diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/templates/servicemonitor.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/templates/servicemonitor.yaml new file mode 100644 index 0000000..46b9040 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/templates/servicemonitor.yaml @@ -0,0 +1,49 @@ +{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "rabbitmq.fullname" . }} + {{- if .Values.metrics.serviceMonitor.namespace }} + namespace: {{ .Values.metrics.serviceMonitor.namespace }} + {{- else }} + namespace: {{ .Release.Namespace | quote }} + {{- end }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.metrics.serviceMonitor.additionalLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.serviceMonitor.additionalLabels "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + endpoints: + - port: metrics + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.honorLabels }} + honorLabels: {{ .Values.metrics.serviceMonitor.honorLabels }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.relabellings }} + metricRelabelings: {{- toYaml .Values.metrics.serviceMonitor.relabellings | nindent 6 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.path }} + path: {{ .Values.metrics.serviceMonitor.path }} + {{- end }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace | quote }} + {{- with .Values.metrics.serviceMonitor.podTargetLabels }} + podTargetLabels: + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.metrics.serviceMonitor.targetLabels }} + targetLabels: + {{- toYaml . | nindent 4 }} + {{- end }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} +{{- end }} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/templates/statefulset.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/templates/statefulset.yaml new file mode 100644 index 0000000..45abd14 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/templates/statefulset.yaml @@ -0,0 +1,382 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "rabbitmq.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.statefulsetLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.statefulsetLabels "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + serviceName: {{ template "rabbitmq.fullname" . }}-headless + podManagementPolicy: {{ .Values.podManagementPolicy }} + replicas: {{ .Values.replicaCount }} + updateStrategy: + type: {{ .Values.updateStrategyType }} + {{- if (eq "OnDelete" .Values.updateStrategyType) }} + rollingUpdate: null + {{- end }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + template: + metadata: + labels: {{- include "common.labels.standard" . | nindent 8 }} + {{- if .Values.podLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.podLabels "context" $) | nindent 8 }} + {{- end }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 8 }} + {{- end }} + checksum/config: {{ include (print $.Template.BasePath "/configuration.yaml") . | sha256sum }} + {{- if or (not .Values.auth.existingErlangSecret) (not .Values.auth.existingPasswordSecret) .Values.extraSecrets }} + checksum/secret: {{ include (print $.Template.BasePath "/secrets.yaml") . | sha256sum }} + {{- end }} + {{- if or .Values.podAnnotations .Values.metrics.enabled }} + {{- include "rabbitmq.podAnnotations" . | nindent 8 }} + {{- end }} + spec: + {{- include "rabbitmq.imagePullSecrets" . | nindent 6 }} + {{- if .Values.schedulerName }} + schedulerName: {{ .Values.schedulerName | quote }} + {{- end }} + serviceAccountName: {{ template "rabbitmq.serviceAccountName" . }} + {{- if .Values.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.affinity "context" .) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.podAffinityPreset "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.podAntiAffinityPreset "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.nodeAffinityPreset.type "key" .Values.nodeAffinityPreset.key "values" .Values.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.nodeSelector "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.tolerations "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.topologySpreadConstraints "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.priorityClassName }} + priorityClassName: {{ .Values.priorityClassName }} + {{- end }} + {{- if .Values.podSecurityContext.enabled }} + securityContext: {{- omit .Values.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} + {{- if or (.Values.initContainers) (and .Values.volumePermissions.enabled .Values.persistence.enabled .Values.podSecurityContext) }} + initContainers: + {{- if and .Values.volumePermissions.enabled .Values.persistence.enabled .Values.podSecurityContext }} + - name: volume-permissions + image: {{ include "rabbitmq.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: + - /bin/bash + args: + - -ec + - | + mkdir -p "/bitnami/rabbitmq/mnesia" + chown -R "{{ .Values.podSecurityContext.runAsUser }}:{{ .Values.podSecurityContext.fsGroup }}" "/bitnami/rabbitmq/mnesia" + securityContext: + runAsUser: 0 + {{- if .Values.volumePermissions.resources }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: data + mountPath: /bitnami/rabbitmq/mnesia + {{- end }} + {{- if .Values.initContainers }} + {{- include "common.tplvalues.render" (dict "value" .Values.initContainers "context" $) | nindent 8 }} + {{- end }} + {{- end }} + containers: + - name: rabbitmq + image: {{ template "rabbitmq.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext }} + securityContext: {{- toYaml .Values.containerSecurityContext | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.args "context" $) | nindent 12 }} + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} + - name: MY_POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: MY_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: MY_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: K8S_SERVICE_NAME + value: "{{ template "rabbitmq.fullname" . }}-headless" + - name: K8S_ADDRESS_TYPE + value: {{ .Values.clustering.addressType }} + - name: RABBITMQ_FORCE_BOOT + value: {{ ternary "yes" "no" .Values.clustering.forceBoot | quote }} + {{- if (eq "hostname" .Values.clustering.addressType) }} + - name: RABBITMQ_NODE_NAME + value: "rabbit@$(MY_POD_NAME).$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.{{ .Values.clusterDomain }}" + - name: K8S_HOSTNAME_SUFFIX + value: ".$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.{{ .Values.clusterDomain }}" + {{- else }} + - name: RABBITMQ_NODE_NAME + value: "rabbit@$(MY_POD_NAME)" + {{- end }} + - name: RABBITMQ_MNESIA_DIR + value: "/bitnami/rabbitmq/mnesia/$(RABBITMQ_NODE_NAME)" + - name: RABBITMQ_LDAP_ENABLE + value: {{ ternary "yes" "no" .Values.ldap.enabled | quote }} + {{- if .Values.ldap.enabled }} + - name: RABBITMQ_LDAP_TLS + value: {{ ternary "yes" "no" .Values.ldap.tls.enabled | quote }} + - name: RABBITMQ_LDAP_SERVERS + value: {{ .Values.ldap.servers | join "," | quote }} + - name: RABBITMQ_LDAP_SERVERS_PORT + value: {{ .Values.ldap.port | quote }} + - name: RABBITMQ_LDAP_USER_DN_PATTERN + value: {{ .Values.ldap.user_dn_pattern }} + {{- end }} + - name: RABBITMQ_LOGS + value: {{ .Values.logs | quote }} + - name: RABBITMQ_ULIMIT_NOFILES + value: {{ .Values.ulimitNofiles | quote }} + {{- if and .Values.maxAvailableSchedulers }} + - name: RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS + value: {{ printf "+S %s:%s" (toString .Values.maxAvailableSchedulers) (toString .Values.onlineSchedulers) -}} + {{- end }} + - name: RABBITMQ_USE_LONGNAME + value: "true" + - name: RABBITMQ_ERL_COOKIE + valueFrom: + secretKeyRef: + name: {{ template "rabbitmq.secretErlangName" . }} + key: rabbitmq-erlang-cookie + {{- if .Values.loadDefinition.enabled }} + - name: RABBITMQ_LOAD_DEFINITIONS + value: "yes" + - name: RABBITMQ_SECURE_PASSWORD + value: "no" + {{- else }} + - name: RABBITMQ_LOAD_DEFINITIONS + value: "no" + - name: RABBITMQ_SECURE_PASSWORD + value: "yes" + - name: RABBITMQ_USERNAME + value: {{ .Values.auth.username | quote }} + - name: RABBITMQ_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "rabbitmq.secretPasswordName" . }} + key: rabbitmq-password + {{- end }} + - name: RABBITMQ_PLUGINS + value: {{ include "rabbitmq.plugins" . | quote }} + {{- if .Values.communityPlugins }} + - name: RABBITMQ_COMMUNITY_PLUGINS + value: {{ .Values.communityPlugins | quote }} + {{- end }} + {{- if .Values.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if or .Values.extraEnvVarsCM .Values.extraEnvVarsSecret }} + envFrom: + {{- if .Values.extraEnvVarsCM }} + - configMapRef: + name: {{ tpl .Values.extraEnvVarsCM . | quote }} + {{- end }} + {{- if .Values.extraEnvVarsSecret }} + - secretRef: + name: {{ tpl .Values.extraEnvVarsSecret . | quote }} + {{- end }} + {{- end }} + ports: + {{- if or (.Values.service.portEnabled) (not .Values.auth.tls.enabled) }} + - name: amqp + containerPort: 5672 + {{- end }} + {{- if .Values.auth.tls.enabled }} + - name: amqp-ssl + containerPort: {{ .Values.service.tlsPort }} + {{- end }} + - name: dist + containerPort: 25672 + - name: stats + containerPort: 15672 + - name: epmd + containerPort: 4369 + {{- if .Values.metrics.enabled }} + - name: metrics + containerPort: 9419 + {{- end }} + {{- if .Values.extraContainerPorts }} + {{- toYaml .Values.extraContainerPorts | nindent 12 }} + {{- end }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.livenessProbe.enabled }} + - name: stomp + containerPort: 61613 + livenessProbe: + exec: + command: + - /bin/bash + - -ec + - rabbitmq-diagnostics -q ping + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- else if .Values.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customLivenessProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + exec: + command: + - /bin/bash + - -ec + - rabbitmq-diagnostics -q check_running && rabbitmq-diagnostics -q check_local_alarms + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- else if .Values.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customReadinessProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customStartupProbe "context" $) | nindent 12 }} + {{- end }} + lifecycle: + {{- if and .Values.clustering.rebalance (gt (.Values.replicaCount | int) 1) }} + postStart: + exec: + command: + - /bin/bash + - -ec + - | + until rabbitmqctl cluster_status >/dev/null; do + echo "Waiting for cluster readiness..." + sleep 5 + done + rabbitmq-queues rebalance "all" + {{- end }} + preStop: + exec: + command: + - /bin/bash + - -ec + - | + if [[ -f /opt/bitnami/scripts/rabbitmq/nodeshutdown.sh ]]; then + /opt/bitnami/scripts/rabbitmq/nodeshutdown.sh -t {{ .Values.terminationGracePeriodSeconds | quote }} -d {{ ternary "true" "false" .Values.image.debug | quote }} + else + rabbitmqctl stop_app + fi + {{- end }} + resources: + requests: + memory: "500Mi" + cpu: "150m" + volumeMounts: + - name: configuration + mountPath: /bitnami/rabbitmq/conf + - name: data + mountPath: /bitnami/rabbitmq/mnesia + {{- if .Values.auth.tls.enabled }} + - name: certs + mountPath: /opt/bitnami/rabbitmq/certs + {{- end }} + {{- if .Values.loadDefinition.enabled }} + - name: load-definition-volume + mountPath: /app + readOnly: true + {{- end }} + {{- if .Values.extraVolumeMounts }} + {{- toYaml .Values.extraVolumeMounts | nindent 12 }} + {{- end }} + {{- if .Values.sidecars }} + {{- include "common.tplvalues.render" (dict "value" .Values.sidecars "context" $) | nindent 8 }} + {{- end }} + volumes: + {{- if .Values.persistence.volumes }} + {{- toYaml .Values.persistence.volumes | nindent 8 }} + {{- end }} + {{- if .Values.auth.tls.enabled }} + - name: certs + secret: + secretName: {{ template "rabbitmq.tlsSecretName" . }} + items: + - key: {{ ternary "tls.crt" "ca.crt" .Values.auth.tls.existingSecretFullChain }} + path: ca_certificate.pem + - key: tls.crt + path: server_certificate.pem + - key: tls.key + path: server_key.pem + {{- end }} + - name: configuration + configMap: + name: {{ template "rabbitmq.fullname" . }}-config + items: + - key: rabbitmq.conf + path: rabbitmq.conf + {{- if .Values.advancedConfiguration}} + - key: advanced.config + path: advanced.config + {{- end }} + {{- if .Values.loadDefinition.enabled }} + - name: load-definition-volume + secret: + secretName: {{ tpl .Values.loadDefinition.existingSecret . | quote }} + {{- end }} + {{- if .Values.extraVolumes }} + {{- toYaml .Values.extraVolumes | nindent 8 }} + {{- end }} + {{- if not (contains "data" (quote .Values.persistence.volumes)) }} + {{- if not .Values.persistence.enabled }} + - name: data + emptyDir: {} + {{- else if .Values.persistence.existingClaim }} + - name: data + persistentVolumeClaim: + {{- with .Values.persistence.existingClaim }} + claimName: {{ tpl . $ }} + {{- end }} + {{- else }} + volumeClaimTemplates: + - metadata: + name: data + labels: {{- include "common.labels.matchLabels" . | nindent 10 }} + spec: + accessModes: + - {{ .Values.persistence.accessMode | quote }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{ include "common.storage.class" (dict "persistence" .Values.persistence "global" .Values.global) }} + {{- if .Values.persistence.selector }} + selector: {{- include "common.tplvalues.render" (dict "value" .Values.persistence.selector "context" $) | nindent 10 }} + {{- end -}} + {{- end }} + {{- end }} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/templates/svc-headless.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/templates/svc-headless.yaml new file mode 100644 index 0000000..4ed26cc --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/templates/svc-headless.yaml @@ -0,0 +1,40 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "rabbitmq.fullname" . }}-headless + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if or (.Values.service.annotationsHeadless) (.Values.commonAnnotations) }} + annotations: + {{- if .Values.commonAnnotations}} + {{- include "common.tplvalues.render" (dict "value" .Values.commonAnnotations "context" $) | nindent 4 }} + {{- end -}} + {{- if .Values.service.annotationsHeadless}} + {{- include "common.tplvalues.render" (dict "value" .Values.service.annotationsHeadless "context" $) | nindent 4 }} + {{- end -}} + {{- end }} +spec: + clusterIP: None + ports: + - name: {{ .Values.service.epmdPortName }} + port: 4369 + targetPort: epmd + {{- if or (.Values.service.portEnabled) (not .Values.auth.tls.enabled) }} + - name: amqp + port: {{ .Values.service.port }} + targetPort: {{ .Values.service.portName }} + {{- end }} + {{- if .Values.auth.tls.enabled }} + - name: {{ .Values.service.tlsPortName }} + port: {{ .Values.service.tlsPort }} + targetPort: amqp-tls + {{- end }} + - name: {{ .Values.service.distPortName }} + port: {{ .Values.service.distPort }} + targetPort: dist + {{- if .Values.service.managerPortEnabled }} + - name: {{ .Values.service.managerPortName }} + port: {{ .Values.service.managerPort }} + targetPort: stats + {{- end }} + selector: {{ include "common.labels.matchLabels" . | nindent 4 }} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/templates/svc.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/templates/svc.yaml new file mode 100644 index 0000000..2b4c224 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/templates/svc.yaml @@ -0,0 +1,95 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "rabbitmq.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.service.labels }} + {{- include "common.tplvalues.render" (dict "value" .Values.service.labels "context" $) | nindent 4 }} + {{- end }} + {{- if or (.Values.service.annotations) (.Values.commonAnnotations) }} + annotations: + {{- if .Values.commonAnnotations}} + {{- include "common.tplvalues.render" (dict "value" .Values.commonAnnotations "context" $) | nindent 4 }} + {{- end -}} + {{- if .Values.service.annotations}} + {{- include "common.tplvalues.render" (dict "value" .Values.service.annotations "context" $) | nindent 4 }} + {{- end -}} + {{- end }} +spec: + type: {{ .Values.service.type }} + {{- if eq .Values.service.type "LoadBalancer" }} + {{- if not (empty .Values.service.loadBalancerIP) }} + loadBalancerIP: {{ .Values.service.loadBalancerIP }} + {{- end }} + {{- if .Values.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- toYaml .Values.service.loadBalancerSourceRanges | nindent 4 }} + {{- end }} + {{- end }} + {{- if (or (eq .Values.service.type "LoadBalancer") (eq .Values.service.type "NodePort")) }} + externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy | quote }} + {{- end }} + {{- if .Values.service.externalIPs }} + externalIPs: {{- toYaml .Values.service.externalIPs | nindent 4 }} + {{- end }} + ports: + {{- if or (.Values.service.portEnabled) (not .Values.auth.tls.enabled) }} + - name: {{ .Values.service.portName }} + port: {{ .Values.service.port }} + targetPort: amqp + {{- if (eq .Values.service.type "ClusterIP") }} + nodePort: null + {{- else if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePort)) }} + nodePort: {{ .Values.service.nodePort }} + {{- end }} + {{- end }} + {{- if .Values.auth.tls.enabled }} + - name: {{ .Values.service.tlsPortName }} + port: {{ .Values.service.tlsPort }} + targetPort: amqp-ssl + {{- if (eq .Values.service.type "ClusterIP") }} + nodePort: null + {{- else if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.tlsNodePort)) }} + nodePort: {{ .Values.service.tlsNodePort }} + {{- end }} + {{- end }} + - name: {{ .Values.service.epmdPortName }} + port: 4369 + targetPort: epmd + {{- if (eq .Values.service.type "ClusterIP") }} + nodePort: null + {{- else if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.epmdNodePort))) }} + nodePort: {{ .Values.service.epmdNodePort }} + {{- end }} + - name: {{ .Values.service.distPortName }} + port: {{ .Values.service.distPort }} + targetPort: dist + {{- if eq .Values.service.type "ClusterIP" }} + nodePort: null + {{- else if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.distNodePort))) }} + nodePort: {{ .Values.service.distNodePort }} + {{- end }} + {{- if .Values.service.managerPortEnabled }} + - name: {{ .Values.service.managerPortName }} + port: {{ .Values.service.managerPort }} + targetPort: stats + {{- if eq .Values.service.type "ClusterIP" }} + nodePort: null + {{- else if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.managerNodePort))) }} + nodePort: {{ .Values.service.managerNodePort }} + {{- end }} + {{- end }} + {{- if .Values.metrics.enabled }} + - name: {{ .Values.service.metricsPortName }} + port: {{ .Values.service.metricsPort }} + targetPort: metrics + {{- if eq .Values.service.type "ClusterIP" }} + nodePort: null + {{- else if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.metricsNodePort))) }} + nodePort: {{ .Values.service.metricsNodePort }} + {{- end }} + {{- end }} + {{- if .Values.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" .Values.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + selector: {{ include "common.labels.matchLabels" . | nindent 4 }} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/templates/tls-secrets.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/templates/tls-secrets.yaml new file mode 100644 index 0000000..b6a6078 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/templates/tls-secrets.yaml @@ -0,0 +1,74 @@ +{{- if .Values.ingress.enabled }} +{{- if .Values.ingress.secrets }} +{{- range .Values.ingress.secrets }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ .name }} + namespace: {{ $.Release.Namespace | quote }} + labels: {{- include "common.labels.standard" $ | nindent 4 }} + {{- if $.Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if $.Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + tls.crt: {{ .certificate | b64enc }} + tls.key: {{ .key | b64enc }} +--- +{{- end }} +{{- end }} +{{- if and .Values.ingress.tls .Values.ingress.selfSigned }} +{{- $ca := genCA "rabbitmq-ca" 365 }} +{{- $cert := genSignedCert .Values.ingress.hostname nil (list .Values.ingress.hostname) 365 $ca }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ printf "%s-tls" .Values.ingress.hostname }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + tls.crt: {{ $cert.Cert | b64enc | quote }} + tls.key: {{ $cert.Key | b64enc | quote }} + ca.crt: {{ $ca.Cert | b64enc | quote }} +--- +{{- end }} +{{- end }} +{{- if (include "rabbitmq.createTlsSecret" . )}} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "rabbitmq.fullname" . }}-certs + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + {{- if or (not .Values.auth.tls.autoGenerated ) (and .Values.auth.tls.caCertificate .Values.auth.tls.serverCertificate .Values.auth.tls.serverKey) }} + ca.crt: {{ required "A valid .Values.auth.tls.caCertificate entry required!" .Values.auth.tls.caCertificate | b64enc | quote }} + tls.crt: {{ required "A valid .Values.auth.tls.serverCertificate entry required!" .Values.auth.tls.serverCertificate| b64enc | quote }} + tls.key: {{ required "A valid .Values.auth.tls.serverKey entry required!" .Values.auth.tls.serverKey | b64enc | quote }} + {{- else }} + {{- $ca := genCA "rabbitmq-internal-ca" 365 }} + {{- $fullname := include "rabbitmq.fullname" . }} + {{- $releaseNamespace := .Release.Namespace }} + {{- $clusterDomain := .Values.clusterDomain }} + {{- $serviceName := include "rabbitmq.fullname" . }} + {{- $altNames := list (printf "*.%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) $fullname }} + {{- $crt := genSignedCert $fullname nil $altNames 365 $ca }} + ca.crt: {{ $ca.Cert | b64enc | quote }} + tls.crt: {{ $crt.Cert | b64enc | quote }} + tls.key: {{ $crt.Key | b64enc | quote }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/values.schema.json b/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/values.schema.json new file mode 100644 index 0000000..8ef33ef --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/values.schema.json @@ -0,0 +1,100 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": { + "auth": { + "type": "object", + "properties": { + "username": { + "type": "string", + "title": "RabbitMQ user", + "form": true + }, + "password": { + "type": "string", + "title": "RabbitMQ password", + "form": true, + "description": "Defaults to a random 10-character alphanumeric string if not set" + } + } + }, + "extraConfiguration": { + "type": "string", + "title": "Extra RabbitMQ Configuration", + "form": true, + "render": "textArea", + "description": "Extra configuration to be appended to RabbitMQ Configuration" + }, + "replicaCount": { + "type": "integer", + "form": true, + "title": "Number of replicas", + "description": "Number of replicas to deploy" + }, + "persistence": { + "type": "object", + "title": "Persistence configuration", + "form": true, + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable persistence", + "description": "Enable persistence using Persistent Volume Claims" + }, + "size": { + "type": "string", + "title": "Persistent Volume Size", + "form": true, + "render": "slider", + "sliderMin": 1, + "sliderMax": 100, + "sliderUnit": "Gi", + "hidden": { + "value": false, + "path": "persistence/enabled" + } + } + } + }, + "volumePermissions": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable Init Containers", + "description": "Use an init container to set required folder permissions on the data volume before mounting it in the final destination" + } + } + }, + "metrics": { + "type": "object", + "form": true, + "title": "Prometheus metrics details", + "properties": { + "enabled": { + "type": "boolean", + "title": "Enable Prometheus metrics for RabbitMQ", + "description": "Install Prometheus plugin in the RabbitMQ container", + "form": true + }, + "serviceMonitor": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "title": "Create Prometheus Operator ServiceMonitor", + "description": "Create a ServiceMonitor to track metrics using Prometheus Operator", + "form": true, + "hidden": { + "value": false, + "path": "metrics/enabled" + } + } + } + } + } + } + } +} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/values.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/values.yaml new file mode 100644 index 0000000..5b74e6c --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/rabbitmq/values.yaml @@ -0,0 +1,1151 @@ +## @section Global parameters +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass + +## @param global.imageRegistry Global Docker image registry +## @param global.imagePullSecrets Global Docker registry secret names as an array +## @param global.storageClass Global StorageClass for Persistent Volume(s) +## +## @section RabitMQ Image parameters +## Bitnami RabbitMQ image version +## ref: https://hub.docker.com/r/bitnami/rabbitmq/tags/ +## @param image.registry RabbitMQ image registry +## @param image.repository RabbitMQ image repository +## @param image.tag RabbitMQ image tag (immutable tags are recommended) +## @param image.pullPolicy RabbitMQ image pull policy +## @param image.pullSecrets Specify docker-registry secret names as an array +## @param image.debug Set to true if you would like to see extra information on logs +## +image: + registry: 10.10.31.243:5000/cmoa3 + repository: rabbitmq + tag: v1.0.0 # {{ .Values.global.RABBITMQ_VERSION }} + + ## set to true if you would like to see extra information on logs + ## It turns BASH and/or NAMI debugging in the image + ## + debug: false + + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: + - regcred + +## @section Common parameters + +## @param nameOverride String to partially override rabbitmq.fullname template (will maintain the release name) +## +nameOverride: "" + +## @param fullnameOverride String to fully override rabbitmq.fullname template +## +fullnameOverride: "" + +## @param kubeVersion Force target Kubernetes version (using Helm capabilities if not set) +## +kubeVersion: "" + +## @param clusterDomain Kubernetes Cluster Domain +## +clusterDomain: cluster.local + +## @param extraDeploy Array of extra objects to deploy with the release +## +extraDeploy: [] + +## Enable diagnostic mode in the deployment +## +diagnosticMode: + ## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden) + ## + enabled: false + ## @param diagnosticMode.command Command to override all containers in the deployment + ## + command: + - sleep + ## @param diagnosticMode.args Args to override all containers in the deployment + ## + args: + - infinity + +## @param hostAliases Deployment pod host aliases +## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ +## +hostAliases: [] +## @param commonAnnotations Annotations to add to all deployed objects +## +commonAnnotations: {} +## RabbitMQ Authentication parameters +## +auth: + ## @param auth.username RabbitMQ application username + ## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables + ## + username: user + + ## @param auth.password RabbitMQ application password + ## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables + ## + password: "eorbahrhkswp" + ## @param auth.existingPasswordSecret Existing secret with RabbitMQ credentials (must contain a value for `rabbitmq-password` key) + ## e.g: + ## existingPasswordSecret: name-of-existing-secret + ## + existingPasswordSecret: "" + + ## @param auth.erlangCookie Erlang cookie to determine whether different nodes are allowed to communicate with each other + ## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables + ## + erlangCookie: "pf6t82zTrqY9iaupUmkPOJxPXjmjiNEd" + ## @param auth.existingErlangSecret Existing secret with RabbitMQ Erlang cookie (must contain a value for `rabbitmq-erlang-cookie` key) + ## e.g: + ## existingErlangSecret: name-of-existing-secret + ## + existingErlangSecret: "" + + ## Enable encryption to rabbitmq + ## ref: https://www.rabbitmq.com/ssl.html + ## @param auth.tls.enabled Enable TLS support on RabbitMQ + ## @param auth.tls.autoGenerated Generate automatically self-signed TLS certificates + ## @param auth.tls.failIfNoPeerCert When set to true, TLS connection will be rejected if client fails to provide a certificate + ## @param auth.tls.sslOptionsVerify Should [peer verification](https://www.rabbitmq.com/ssl.html#peer-verification) be enabled? + ## @param auth.tls.caCertificate Certificate Authority (CA) bundle content + ## @param auth.tls.serverCertificate Server certificate content + ## @param auth.tls.serverKey Server private key content + ## @param auth.tls.existingSecret Existing secret with certificate content to RabbitMQ credentials + ## @param auth.tls.existingSecretFullChain Whether or not the existing secret contains the full chain in the certificate (`tls.crt`). Will be used in place of `ca.cert` if `true`. + ## + tls: + enabled: false + autoGenerated: false + failIfNoPeerCert: true + sslOptionsVerify: verify_peer + caCertificate: |- + serverCertificate: |- + serverKey: |- + existingSecret: "" + existingSecretFullChain: false + +## @param logs Path of the RabbitMQ server's Erlang log file. Value for the `RABBITMQ_LOGS` environment variable +## ref: https://www.rabbitmq.com/logging.html#log-file-location +## +logs: "-" + +## @param ulimitNofiles RabbitMQ Max File Descriptors +## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables +## ref: https://www.rabbitmq.com/install-debian.html#kernel-resource-limits +## +ulimitNofiles: "65536" + +## RabbitMQ maximum available scheduler threads and online scheduler threads. By default it will create a thread per CPU detected, with the following parameters you can tune it manually. +## ref: https://hamidreza-s.github.io/erlang/scheduling/real-time/preemptive/migration/2016/02/09/erlang-scheduler-details.html#scheduler-threads +## ref: https://github.com/bitnami/charts/issues/2189 +## @param maxAvailableSchedulers RabbitMQ maximum available scheduler threads +## @param onlineSchedulers RabbitMQ online scheduler threads +## +maxAvailableSchedulers: "" +onlineSchedulers: "" + +## The memory threshold under which RabbitMQ will stop reading from client network sockets, in order to avoid being killed by the OS +## ref: https://www.rabbitmq.com/alarms.html +## ref: https://www.rabbitmq.com/memory.html#threshold +## +memoryHighWatermark: + ## @param memoryHighWatermark.enabled Enable configuring Memory high watermark on RabbitMQ + ## + enabled: false + ## @param memoryHighWatermark.type Memory high watermark type. Either `absolute` or `relative` + ## + type: "relative" + ## Memory high watermark value. + ## @param memoryHighWatermark.value Memory high watermark value + ## The default value of 0.4 stands for 40% of available RAM + ## Note: the memory relative limit is applied to the resource.limits.memory to calculate the memory threshold + ## You can also use an absolute value, e.g.: 256MB + ## + value: 0.4 + +## @param plugins List of default plugins to enable (should only be altered to remove defaults; for additional plugins use `extraPlugins`) +## +plugins: "rabbitmq_management rabbitmq_peer_discovery_k8s rabbitmq_stomp" + +## @param communityPlugins List of Community plugins (URLs) to be downloaded during container initialization +## Combine it with extraPlugins to also enable them. +## +communityPlugins: "" + +## @param extraPlugins Extra plugins to enable (single string containing a space-separated list) +## Use this instead of `plugins` to add new plugins +## +extraPlugins: "rabbitmq_auth_backend_ldap rabbitmq_stomp" + +## Clustering settings +## +clustering: + ## @param clustering.enabled Enable RabbitMQ clustering + ## + enabled: false + ## @param clustering.addressType Switch clustering mode. Either `ip` or `hostname` + ## + addressType: hostname + ## @param clustering.rebalance Rebalance master for queues in cluster when new replica is created + ## ref: https://www.rabbitmq.com/rabbitmq-queues.8.html#rebalance + ## + rebalance: false + + ## @param clustering.forceBoot Force boot of an unexpectedly shut down cluster (in an unexpected order). + ## forceBoot executes 'rabbitmqctl force_boot' to force boot cluster shut down unexpectedly in an unknown order + ## ref: https://www.rabbitmq.com/rabbitmqctl.8.html#force_boot + ## + forceBoot: false + +## Loading a RabbitMQ definitions file to configure RabbitMQ +## +loadDefinition: + ## @param loadDefinition.enabled Enable loading a RabbitMQ definitions file to configure RabbitMQ + ## + enabled: false + ## @param loadDefinition.existingSecret Existing secret with the load definitions file + ## Can be templated if needed, e.g: + ## existingSecret: "{{ .Release.Name }}-load-definition" + ## + existingSecret: "" + +## @param command Override default container command (useful when using custom images) +## +command: [] +## @param args Override default container args (useful when using custom images) +args: [] + +## @param terminationGracePeriodSeconds Default duration in seconds k8s waits for container to exit before sending kill signal. +## Any time in excess of 10 seconds will be spent waiting for any synchronization necessary for cluster not to lose data. +## +terminationGracePeriodSeconds: 120 + +## @param extraEnvVars Extra environment variables to add to RabbitMQ pods +## E.g: +## extraEnvVars: +## - name: FOO +## value: BAR +## +extraEnvVars: [] + +## @param extraEnvVarsCM Name of existing ConfigMap containing extra environment variables +## +extraEnvVarsCM: "" + +## @param extraEnvVarsSecret Name of existing Secret containing extra environment variables (in case of sensitive data) +## +extraEnvVarsSecret: "" + +## @param extraContainerPorts Extra ports to be included in container spec, primarily informational +## E.g: +## extraContainerPorts: +## - name: new_port_name +## containerPort: 1234 +## +extraContainerPorts: [] + +## @param configuration [string] RabbitMQ Configuration file content: required cluster configuration +## Do not override unless you know what you are doing. +## To add more configuration, use `extraConfiguration` of `advancedConfiguration` instead +## +configuration: |- + {{- if not .Values.loadDefinition.enabled -}} + ## Username and password + ## + default_user = {{ .Values.auth.username }} + default_pass = eorbahrhkswp + {{- end }} + {{- if .Values.clustering.enabled }} + ## Clustering + ## + cluster_formation.peer_discovery_backend = rabbit_peer_discovery_k8s + cluster_formation.k8s.host = kubernetes.default.svc.{{ .Values.clusterDomain }} + cluster_formation.node_cleanup.interval = 10 + cluster_formation.node_cleanup.only_log_warning = true + cluster_partition_handling = autoheal + {{- end }} + # queue master locator + queue_master_locator = min-masters + # enable guest user + loopback_users.guest = false + {{ tpl .Values.extraConfiguration . }} + {{- if .Values.auth.tls.enabled }} + ssl_options.verify = {{ .Values.auth.tls.sslOptionsVerify }} + listeners.ssl.default = {{ .Values.service.tlsPort }} + ssl_options.fail_if_no_peer_cert = {{ .Values.auth.tls.failIfNoPeerCert }} + ssl_options.cacertfile = /opt/bitnami/rabbitmq/certs/ca_certificate.pem + ssl_options.certfile = /opt/bitnami/rabbitmq/certs/server_certificate.pem + ssl_options.keyfile = /opt/bitnami/rabbitmq/certs/server_key.pem + {{- end }} + {{- if .Values.ldap.enabled }} + auth_backends.1 = rabbit_auth_backend_ldap + auth_backends.2 = internal + {{- range $index, $server := .Values.ldap.servers }} + auth_ldap.servers.{{ add $index 1 }} = {{ $server }} + {{- end }} + auth_ldap.port = {{ .Values.ldap.port }} + auth_ldap.user_dn_pattern = {{ .Values.ldap.user_dn_pattern }} + {{- if .Values.ldap.tls.enabled }} + auth_ldap.use_ssl = true + {{- end }} + {{- end }} + {{- if .Values.metrics.enabled }} + ## Prometheus metrics + ## + prometheus.tcp.port = 9419 + {{- end }} + {{- if .Values.memoryHighWatermark.enabled }} + ## Memory Threshold + ## + total_memory_available_override_value = {{ include "rabbitmq.toBytes" .Values.resources.limits.memory }} + vm_memory_high_watermark.{{ .Values.memoryHighWatermark.type }} = {{ .Values.memoryHighWatermark.value }} + {{- end }} + +## @param extraConfiguration [string] Configuration file content: extra configuration to be appended to RabbitMQ configuration +## Use this instead of `configuration` to add more configuration +## +extraConfiguration: |- + #default_vhost = {{ .Release.Namespace }}-vhost + #disk_free_limit.absolute = 50MB + #load_definitions = /app/load_definition.json + +## @param advancedConfiguration Configuration file content: advanced configuration +## Use this as additional configuration in classic config format (Erlang term configuration format) +## +## If you set LDAP with TLS/SSL enabled and you are using self-signed certificates, uncomment these lines. +## advancedConfiguration: |- +## [{ +## rabbitmq_auth_backend_ldap, +## [{ +## ssl_options, +## [{ +## verify, verify_none +## }, { +## fail_if_no_peer_cert, +## false +## }] +## ]} +## }]. +## +advancedConfiguration: |- + +## LDAP configuration +## +ldap: + ## @param ldap.enabled Enable LDAP support + ## + enabled: false + ## @param ldap.servers List of LDAP servers hostnames + ## + servers: [] + ## @param ldap.port LDAP servers port + ## + port: "389" + ## Pattern used to translate the provided username into a value to be used for the LDAP bind + ## @param ldap.user_dn_pattern Pattern used to translate the provided username into a value to be used for the LDAP bind + ## ref: https://www.rabbitmq.com/ldap.html#usernames-and-dns + ## + user_dn_pattern: cn=${username},dc=example,dc=org + tls: + ## @param ldap.tls.enabled If you enable TLS/SSL you can set advanced options using the `advancedConfiguration` parameter + ## + enabled: false + +## @param extraVolumeMounts Optionally specify extra list of additional volumeMounts +## Examples: +## extraVolumeMounts: +## - name: extras +## mountPath: /usr/share/extras +## readOnly: true +## +extraVolumeMounts: [] +## @param extraVolumes Optionally specify extra list of additional volumes . +## Example: +## extraVolumes: +## - name: extras +## emptyDir: {} +## +extraVolumes: [] + +## @param extraSecrets Optionally specify extra secrets to be created by the chart. +## This can be useful when combined with load_definitions to automatically create the secret containing the definitions to be loaded. +## Example: +## extraSecrets: +## load-definition: +## load_definition.json: | +## { +## ... +## } +## +extraSecrets: {} +## @param extraSecretsPrependReleaseName Set this flag to true if extraSecrets should be created with prepended. +## +extraSecretsPrependReleaseName: false + +## @section Statefulset parameters + +## @param replicaCount Number of RabbitMQ replicas to deploy +## +replicaCount: 1 + +## @param schedulerName Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +schedulerName: "" + +## RabbitMQ should be initialized one by one when building cluster for the first time. +## Therefore, the default value of podManagementPolicy is 'OrderedReady' +## Once the RabbitMQ participates in the cluster, it waits for a response from another +## RabbitMQ in the same cluster at reboot, except the last RabbitMQ of the same cluster. +## If the cluster exits gracefully, you do not need to change the podManagementPolicy +## because the first RabbitMQ of the statefulset always will be last of the cluster. +## However if the last RabbitMQ of the cluster is not the first RabbitMQ due to a failure, +## you must change podManagementPolicy to 'Parallel'. +## ref : https://www.rabbitmq.com/clustering.html#restarting +## @param podManagementPolicy Pod management policy +## +podManagementPolicy: OrderedReady + +## @param podLabels RabbitMQ Pod labels. Evaluated as a template +## Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +## +podLabels: {} + +## @param podAnnotations RabbitMQ Pod annotations. Evaluated as a template +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +## +podAnnotations: {} + +## @param updateStrategyType Update strategy type for RabbitMQ statefulset +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies +## +updateStrategyType: RollingUpdate + +## @param statefulsetLabels RabbitMQ statefulset labels. Evaluated as a template +## Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +## +statefulsetLabels: {} + +## @param priorityClassName Name of the priority class to be used by RabbitMQ pods, priority class needs to be created beforehand +## Ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ +## +priorityClassName: "" + +## @param podAffinityPreset Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` +## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity +## +podAffinityPreset: "" + +## @param podAntiAffinityPreset Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` +## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity +## +podAntiAffinityPreset: soft + +## Node affinity preset +## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity +## +nodeAffinityPreset: + ## @param nodeAffinityPreset.type Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param nodeAffinityPreset.key Node label key to match Ignored if `affinity` is set. + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## @param nodeAffinityPreset.values Node label values to match. Ignored if `affinity` is set. + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + +## @param affinity Affinity for pod assignment. Evaluated as a template +## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set +## +affinity: {} + +## @param nodeSelector Node labels for pod assignment. Evaluated as a template +## ref: https://kubernetes.io/docs/user-guide/node-selection/ +## +nodeSelector: {} + +## @param tolerations Tolerations for pod assignment. Evaluated as a template +## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +## +tolerations: [] + +## @param topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template +## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods +## +topologySpreadConstraints: {} + +## RabbitMQ pods' Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod +## @param podSecurityContext.enabled Enable RabbitMQ pods' Security Context +## @param podSecurityContext.fsGroup Group ID for the filesystem used by the containers +## @param podSecurityContext.runAsUser User ID for the service user running the pod +## +podSecurityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## @param containerSecurityContext RabbitMQ containers' Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container +## Example: +## containerSecurityContext: +## capabilities: +## drop: ["NET_RAW"] +## readOnlyRootFilesystem: true +## +containerSecurityContext: {} + +## RabbitMQ containers' resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## We usually recommend not to specify default resources and to leave this as a conscious +## choice for the user. This also increases chances charts run on environments with little +## resources, such as Minikube. If you do want to specify resources, uncomment the following +## lines, adjust them as necessary, and remove the curly braces after 'resources:'. +## @param resources.limits The resources limits for RabbitMQ containers +## @param resources.requests The requested resources for RabbitMQ containers +## +resources: + ## Example: + ## limits: + ## cpu: 1000m + ## memory: 2Gi + limits: {} + ## Examples: + ## requests: + ## cpu: 1000m + ## memory: 2Gi + requests: {} + +## Configure RabbitMQ containers' extra options for liveness probe +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes +## @param livenessProbe.enabled Enable livenessProbe +## @param livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe +## @param livenessProbe.periodSeconds Period seconds for livenessProbe +## @param livenessProbe.timeoutSeconds Timeout seconds for livenessProbe +## @param livenessProbe.failureThreshold Failure threshold for livenessProbe +## @param livenessProbe.successThreshold Success threshold for livenessProbe +## +livenessProbe: + enabled: true + initialDelaySeconds: 120 + timeoutSeconds: 20 + periodSeconds: 30 + failureThreshold: 6 + successThreshold: 1 +## Configure RabbitMQ containers' extra options for readiness probe +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes +## @param readinessProbe.enabled Enable readinessProbe +## @param readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe +## @param readinessProbe.periodSeconds Period seconds for readinessProbe +## @param readinessProbe.timeoutSeconds Timeout seconds for readinessProbe +## @param readinessProbe.failureThreshold Failure threshold for readinessProbe +## @param readinessProbe.successThreshold Success threshold for readinessProbe +## +readinessProbe: + enabled: true + initialDelaySeconds: 10 + timeoutSeconds: 20 + periodSeconds: 30 + failureThreshold: 3 + successThreshold: 1 + +## @param customLivenessProbe Override default liveness probe +## +customLivenessProbe: {} + +## @param customReadinessProbe Override default readiness probe +## +customReadinessProbe: {} + +## @param customStartupProbe Define a custom startup probe +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-startup-probes +## +customStartupProbe: {} + +## @param initContainers Add init containers to the RabbitMQ pod +## Example: +## initContainers: +## - name: your-image-name +## image: your-image +## imagePullPolicy: IfNotPresent +## ports: +## - name: portname +## containerPort: 1234 +## +initContainers: [] + +## @param sidecars Add sidecar containers to the RabbitMQ pod +## Example: +## sidecars: +## - name: your-image-name +## image: your-image +## imagePullPolicy: IfNotPresent +## ports: +## - name: portname +## containerPort: 1234 +## +sidecars: [] + +## Pod Disruption Budget configuration +## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ +## +pdb: + ## @param pdb.create Enable/disable a Pod Disruption Budget creation + ## + create: false + ## @param pdb.minAvailable Minimum number/percentage of pods that should remain scheduled + ## + minAvailable: 1 + ## @param pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable + ## + maxUnavailable: "" + +## @section RBAC parameters + +## RabbitMQ pods ServiceAccount +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +## +serviceAccount: + ## @param serviceAccount.create Enable creation of ServiceAccount for RabbitMQ pods + ## + create: true + ## @param serviceAccount.name Name of the created serviceAccount + ## If not set and create is true, a name is generated using the rabbitmq.fullname template + ## + name: "" + +## Role Based Access +## ref: https://kubernetes.io/docs/admin/authorization/rbac/ +## +rbac: + ## @param rbac.create Whether RBAC rules should be created + ## binding RabbitMQ ServiceAccount to a role + ## that allows RabbitMQ pods querying the K8s API + ## + create: true + +## @section Persistence parameters + +persistence: + ## @param persistence.enabled Enable RabbitMQ data persistence using PVC + ## + enabled: true + + ## @param persistence.storageClass PVC Storage Class for RabbitMQ data volume + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "rabbitmq" + ## @param persistence.selector Selector to match an existing Persistent Volume + ## selector: + ## matchLabels: + ## app: my-app + ## + selector: {} + ## @param persistence.accessMode PVC Access Mode for RabbitMQ data volume + ## + accessMode: ReadWriteOnce + + ## @param persistence.existingClaim Provide an existing PersistentVolumeClaims + ## The value is evaluated as a template + ## So, for example, the name can depend on .Release or .Chart + ## + existingClaim: "rabbitmq-pvc" + + ## @param persistence.size PVC Storage Request for RabbitMQ data volume + ## If you change this value, you might have to adjust `rabbitmq.diskFreeLimit` as well + ## + size: 5Gi + + ## @param persistence.volumes Additional volumes without creating PVC + ## - name: volume_name + ## emptyDir: {} + ## + volumes: [] + +## @section Exposure parameters + +## Kubernetes service type +## +service: + ## @param service.type Kubernetes Service type + ## + # type: NodePort + type: ClusterIP + + ## @param service.portEnabled Amqp port. Cannot be disabled when `auth.tls.enabled` is `false`. Listener can be disabled with `listeners.tcp = none`. + portEnabled: true + + ## @param service.port Amqp port + ## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables + ## + port: 5672 + + ## @param service.portName Amqp service port name + ## + portName: amqp + + ## @param service.tlsPort Amqp TLS port + ## + tlsPort: 5671 + + ## @param service.tlsPortName Amqp TLS service port name + ## + tlsPortName: amqp-ssl + + ## @param service.nodePort Node port override for `amqp` port, if serviceType is `NodePort` or `LoadBalancer` + ## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables + ## e.g: + ## nodePort: 30672 + ## + nodePort: "" + + ## @param service.tlsNodePort Node port override for `amqp-ssl` port, if serviceType is `NodePort` or `LoadBalancer` + ## e.g: + ## tlsNodePort: 30671 + ## + tlsNodePort: "" + + ## @param service.distPort Erlang distribution server port + ## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables + ## + distPort: 25672 + + ## @param service.distPortName Erlang distribution service port name + ## + distPortName: dist + + ## @param service.distNodePort Node port override for `dist` port, if serviceType is `NodePort` + ## e.g: + ## distNodePort: 30676 + ## + distNodePort: "" + + ## @param service.managerPortEnabled RabbitMQ Manager port + ## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables + ## + managerPortEnabled: true + + ## @param service.managerPort RabbitMQ Manager port + ## + managerPort: 15672 + + ## @param service.managerPortName RabbitMQ Manager service port name + ## + managerPortName: http-stats + + ## @param service.managerNodePort Node port override for `http-stats` port, if serviceType `NodePort` + ## e.g: + ## managerNodePort: 30673 + ## + managerNodePort: "" + + ## @param service.metricsPort RabbitMQ Prometheues metrics port + ## + metricsPort: 9419 + + ## @param service.metricsPortName RabbitMQ Prometheues metrics service port name + ## + metricsPortName: metrics + + ## @param service.metricsNodePort Node port override for `metrics` port, if serviceType is `NodePort` + ## e.g: + ## metricsNodePort: 30674 + ## + metricsNodePort: "" + + ## @param service.epmdNodePort Node port override for `epmd` port, if serviceType is `NodePort` + ## e.g: + ## epmdNodePort: 30675 + ## + epmdNodePort: "" + + ## @param service.epmdPortName EPMD Discovery service port name + ## + epmdPortName: epmd + + ## @param service.extraPorts Extra ports to expose in the service + ## E.g.: + ## extraPorts: + ## - name: new_svc_name + ## port: 1234 + ## targetPort: 1234 + ## + extraPorts: + - name: stomp + port: 61613 + targetPort: 61613 + #nodePort: 31613 + + ## @param service.loadBalancerSourceRanges Address(es) that are allowed when service is `LoadBalancer` + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + + ## @param service.externalIPs Set the ExternalIPs + ## + externalIPs: [] + + ## @param service.externalTrafficPolicy Enable client source IP preservation + ## ref http://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + + ## @param service.loadBalancerIP Set the LoadBalancerIP + ## + loadBalancerIP: "" + + ## @param service.labels Service labels. Evaluated as a template + ## + labels: {} + + ## @param service.annotations Service annotations. Evaluated as a template + ## Example: + ## annotations: + ## service.beta.kubernetes.io/aws-load-balancer-internal: 0.0.0.0/0 + ## + annotations: {} + ## @param service.annotationsHeadless Headless Service annotations. Evaluated as a template + ## Example: + ## annotations: + ## external-dns.alpha.kubernetes.io/internal-hostname: rabbitmq.example.com + ## + annotationsHeadless: {} + +## Configure the ingress resource that allows you to access the +## RabbitMQ installation. Set up the URL +## ref: http://kubernetes.io/docs/user-guide/ingress/ +## +ingress: + ## @param ingress.enabled Enable ingress resource for Management console + ## + enabled: false + + ## @param ingress.path Path for the default host. You may need to set this to '/*' in order to use this with ALB ingress controllers. + ## + path: / + + ## @param ingress.pathType Ingress path type + ## + pathType: ImplementationSpecific + + ## @param ingress.hostname Default host for the ingress resource + ## + hostname: rabbitmq.local + + ## @param ingress.annotations Ingress annotations + ## For a full list of possible ingress annotations, please see + ## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/annotations.md + ## + ## If certManager is set to true, annotation kubernetes.io/tls-acme: "true" will automatically be set + ## + annotations: {} + + ## @param ingress.tls Enable TLS configuration for the hostname defined at `ingress.hostname` parameter + ## TLS certificates will be retrieved from a TLS secret with name: {{- printf "%s-tls" .Values.ingress.hostname }} + ## You can: + ## - Use the `ingress.secrets` parameter to create this TLS secret + ## - Relay on cert-manager to create it by setting `ingress.certManager=true` + ## - Relay on Helm to create self-signed certificates by setting `ingress.selfSigned=true` + ## + tls: false + + ## @param ingress.certManager Set this to true in order to add the corresponding annotations for cert-manager + ## to generate a TLS secret for the ingress record + ## + certManager: false + + ## @param ingress.selfSigned Set this to true in order to create a TLS secret for this ingress record + ## using self-signed certificates generated by Helm + ## + selfSigned: false + + ## @param ingress.extraHosts The list of additional hostnames to be covered with this ingress record. + ## Most likely the hostname above will be enough, but in the event more hosts are needed, this is an array + ## e.g: + ## extraHosts: + ## - name: rabbitmq.local + ## path: / + ## + extraHosts: [] + + ## @param ingress.extraTls The tls configuration for additional hostnames to be covered with this ingress record. + ## see: https://kubernetes.io/docs/concepts/services-networking/ingress/#tls + ## e.g: + ## extraTls: + ## - hosts: + ## - rabbitmq.local + ## secretName: rabbitmq.local-tls + ## + extraTls: [] + + ## @param ingress.secrets Custom TLS certificates as secrets + ## NOTE: 'key' and 'certificate' are expected in PEM format + ## NOTE: 'name' should line up with a 'secretName' set further up + ## If it is not set and you're using cert-manager, this is unneeded, as it will create a secret for you with valid certificates + ## If it is not set and you're NOT using cert-manager either, self-signed certificates will be created valid for 365 days + ## It is also possible to create and manage the certificates outside of this helm chart + ## Please see README.md for more information + ## e.g: + ## secrets: + ## - name: rabbitmq.local-tls + ## key: |- + ## -----BEGIN RSA PRIVATE KEY----- + ## ... + ## -----END RSA PRIVATE KEY----- + ## certificate: |- + ## -----BEGIN CERTIFICATE----- + ## ... + ## -----END CERTIFICATE----- + ## + secrets: [] + + ## @param ingress.ingressClassName IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+) + ## This is supported in Kubernetes 1.18+ and required if you have more than one IngressClass marked as the default for your cluster . + ## ref: https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/ + ## + ingressClassName: "" + +## Network Policy configuration +## ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ +## +networkPolicy: + ## @param networkPolicy.enabled Enable creation of NetworkPolicy resources + ## + enabled: false + ## @param networkPolicy.allowExternal Don't require client label for connections + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the ports RabbitMQ is listening + ## on. When true, RabbitMQ will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + ## @param networkPolicy.additionalRules Additional NetworkPolicy Ingress "from" rules to set. Note that all rules are OR-ed. + ## e.g: + ## additionalRules: + ## - matchLabels: + ## - role: frontend + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + ## + additionalRules: [] + +## @section Metrics Parameters + +## Prometheus Metrics +## +metrics: + ## @param metrics.enabled Enable exposing RabbitMQ metrics to be gathered by Prometheus + ## + enabled: false + + ## @param metrics.plugins Plugins to enable Prometheus metrics in RabbitMQ + ## + plugins: "rabbitmq_prometheus" + ## Prometheus pod annotations + ## @param metrics.podAnnotations [object] Annotations for enabling prometheus to access the metrics endpoint + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "{{ .Values.service.metricsPort }}" + + ## Prometheus Service Monitor + ## ref: https://github.com/coreos/prometheus-operator + ## + serviceMonitor: + ## @param metrics.serviceMonitor.enabled Create ServiceMonitor Resource for scraping metrics using PrometheusOperator + ## + enabled: false + ## @param metrics.serviceMonitor.namespace Specify the namespace in which the serviceMonitor resource will be created + ## + namespace: "" + ## @param metrics.serviceMonitor.interval Specify the interval at which metrics should be scraped + ## + interval: 30s + ## @param metrics.serviceMonitor.scrapeTimeout Specify the timeout after which the scrape is ended + ## e.g: + ## scrapeTimeout: 30s + ## + scrapeTimeout: "" + ## @param metrics.serviceMonitor.relabellings Specify Metric Relabellings to add to the scrape endpoint + ## + relabellings: [] + ## @param metrics.serviceMonitor.honorLabels honorLabels chooses the metric's labels on collisions with target labels + ## + honorLabels: false + ## @param metrics.serviceMonitor.additionalLabels Used to pass Labels that are required by the installed Prometheus Operator + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusspec + ## + additionalLabels: {} + ## @param metrics.serviceMonitor.targetLabels Used to keep given service's labels in target + ## e.g: + ## - app.kubernetes.io/name + ## + targetLabels: {} + ## @param metrics.serviceMonitor.podTargetLabels Used to keep given pod's labels in target + ## e.g: + ## - app.kubernetes.io/name + ## + podTargetLabels: {} + ## @param metrics.serviceMonitor.path Define the path used by ServiceMonitor to scrap metrics + ## Could be /metrics for aggregated metrics or /metrics/per-object for more details + path: "" + + ## Custom PrometheusRule to be defined + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + ## + prometheusRule: + ## @param metrics.prometheusRule.enabled Set this to true to create prometheusRules for Prometheus operator + ## + enabled: false + ## @param metrics.prometheusRule.additionalLabels Additional labels that can be used so prometheusRules will be discovered by Prometheus + ## + additionalLabels: {} + ## @param metrics.prometheusRule.namespace namespace where prometheusRules resource should be created + ## + namespace: "" + ## List of rules, used as template by Helm. + ## @param metrics.prometheusRule.rules List of rules, used as template by Helm. + ## These are just examples rules inspired from https://awesome-prometheus-alerts.grep.to/rules.html + ## rules: + ## - alert: RabbitmqDown + ## expr: rabbitmq_up{service="{{ template "rabbitmq.fullname" . }}"} == 0 + ## for: 5m + ## labels: + ## severity: error + ## annotations: + ## summary: Rabbitmq down (instance {{ "{{ $labels.instance }}" }}) + ## description: RabbitMQ node down + ## - alert: ClusterDown + ## expr: | + ## sum(rabbitmq_running{service="{{ template "rabbitmq.fullname" . }}"}) + ## < {{ .Values.replicaCount }} + ## for: 5m + ## labels: + ## severity: error + ## annotations: + ## summary: Cluster down (instance {{ "{{ $labels.instance }}" }}) + ## description: | + ## Less than {{ .Values.replicaCount }} nodes running in RabbitMQ cluster + ## VALUE = {{ "{{ $value }}" }} + ## - alert: ClusterPartition + ## expr: rabbitmq_partitions{service="{{ template "rabbitmq.fullname" . }}"} > 0 + ## for: 5m + ## labels: + ## severity: error + ## annotations: + ## summary: Cluster partition (instance {{ "{{ $labels.instance }}" }}) + ## description: | + ## Cluster partition + ## VALUE = {{ "{{ $value }}" }} + ## - alert: OutOfMemory + ## expr: | + ## rabbitmq_node_mem_used{service="{{ template "rabbitmq.fullname" . }}"} + ## / rabbitmq_node_mem_limit{service="{{ template "rabbitmq.fullname" . }}"} + ## * 100 > 90 + ## for: 5m + ## labels: + ## severity: warning + ## annotations: + ## summary: Out of memory (instance {{ "{{ $labels.instance }}" }}) + ## description: | + ## Memory available for RabbmitMQ is low (< 10%)\n VALUE = {{ "{{ $value }}" }} + ## LABELS: {{ "{{ $labels }}" }} + ## - alert: TooManyConnections + ## expr: rabbitmq_connectionsTotal{service="{{ template "rabbitmq.fullname" . }}"} > 1000 + ## for: 5m + ## labels: + ## severity: warning + ## annotations: + ## summary: Too many connections (instance {{ "{{ $labels.instance }}" }}) + ## description: | + ## RabbitMQ instance has too many connections (> 1000) + ## VALUE = {{ "{{ $value }}" }}\n LABELS: {{ "{{ $labels }}" }} + ## + rules: [] + +## @section Init Container Parameters + +## Init Container parameters +## Change the owner and group of the persistent volume(s) mountpoint(s) to 'runAsUser:fsGroup' on each component +## values from the securityContext section of the component +## +volumePermissions: + ## @param volumePermissions.enabled Enable init container that changes the owner and group of the persistent volume(s) mountpoint to `runAsUser:fsGroup` + ## + enabled: false + ## @param volumePermissions.image.registry Init container volume-permissions image registry + ## @param volumePermissions.image.repository Init container volume-permissions image repository + ## @param volumePermissions.image.tag Init container volume-permissions image tag + ## @param volumePermissions.image.pullPolicy Init container volume-permissions image pull policy + ## @param volumePermissions.image.pullSecrets Specify docker-registry secret names as an array + ## + image: + registry: 10.10.31.243:5000/cmoa3 # docker.io + repository: bitnami-shell # bitnami/bitnami-shell + tag: 10-debian-10-r175 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: + - regcred + ## Init Container resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## We usually recommend not to specify default resources and to leave this as a conscious + ## choice for the user. This also increases chances charts run on environments with little + ## resources, such as Minikube. If you do want to specify resources, uncomment the following + ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. + ## @param volumePermissions.resources.limits Init container volume-permissions resource limits + ## @param volumePermissions.resources.requests Init container volume-permissions resource requests + ## + resources: + ## Example: + ## limits: + ## cpu: 100m + ## memory: 128Mi + limits: {} + ## Examples: + ## requests: + ## cpu: 100m + ## memory: 128Mi + requests: {} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/redis/.helmignore b/ansible/01_old/roles/test/files/02-base/base/charts/redis/.helmignore new file mode 100644 index 0000000..f0c1319 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/redis/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/redis/Chart.lock b/ansible/01_old/roles/test/files/02-base/base/charts/redis/Chart.lock new file mode 100644 index 0000000..ee0ecb7 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/redis/Chart.lock @@ -0,0 +1,6 @@ +dependencies: +- name: common + repository: https://charts.bitnami.com/bitnami + version: 1.3.3 +digest: sha256:264db18c8d0962b5c4340840f62306f45fe8d2c1c8999dd41c0f2d62fc93a220 +generated: "2021-01-15T00:05:10.125742807Z" diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/redis/Chart.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/redis/Chart.yaml new file mode 100644 index 0000000..6924d59 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/redis/Chart.yaml @@ -0,0 +1,29 @@ +annotations: + category: Database +apiVersion: v2 +appVersion: 6.0.10 +dependencies: +- name: common + repository: https://charts.bitnami.com/bitnami + tags: + - bitnami-common + version: 1.x.x +description: Open source, advanced key-value store. It is often referred to as a data + structure server since keys can contain strings, hashes, lists, sets and sorted + sets. +home: https://github.com/bitnami/charts/tree/master/bitnami/redis +icon: https://bitnami.com/assets/stacks/redis/img/redis-stack-220x234.png +keywords: +- redis +- keyvalue +- database +maintainers: +- email: containers@bitnami.com + name: Bitnami +- email: cedric@desaintmartin.fr + name: desaintmartin +name: redis +sources: +- https://github.com/bitnami/bitnami-docker-redis +- http://redis.io/ +version: 12.7.0 diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/redis/README.md b/ansible/01_old/roles/test/files/02-base/base/charts/redis/README.md new file mode 100644 index 0000000..3befa8c --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/redis/README.md @@ -0,0 +1,707 @@ +# RedisTM Chart packaged by Bitnami + +[RedisTM](http://redis.io/) is an advanced key-value cache and store. It is often referred to as a data structure server since keys can contain strings, hashes, lists, sets, sorted sets, bitmaps and hyperloglogs. + +Disclaimer: REDIS® is a registered trademark of Redis Labs Ltd.Any rights therein are reserved to Redis Labs Ltd. Any use by Bitnami is for referential purposes only and does not indicate any sponsorship, endorsement, or affiliation between Redis Labs Ltd. + +## TL;DR + +```bash +$ helm repo add bitnami https://charts.bitnami.com/bitnami +$ helm install my-release bitnami/redis +``` + +## Introduction + +This chart bootstraps a [RedisTM](https://github.com/bitnami/bitnami-docker-redis) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This chart has been tested to work with NGINX Ingress, cert-manager, fluentd and Prometheus on top of the [BKPR](https://kubeprod.io/). + +### Choose between RedisTM Helm Chart and RedisTM Cluster Helm Chart + +You can choose any of the two RedisTM Helm charts for deploying a RedisTM cluster. +While [RedisTM Helm Chart](https://github.com/bitnami/charts/tree/master/bitnami/redis) will deploy a master-slave cluster using RedisTM Sentinel, the [RedisTM Cluster Helm Chart](https://github.com/bitnami/charts/tree/master/bitnami/redis-cluster) will deploy a RedisTM Cluster topology with sharding. +The main features of each chart are the following: + +| RedisTM | RedisTM Cluster | +|--------------------------------------------------------|------------------------------------------------------------------------| +| Supports multiple databases | Supports only one database. Better if you have a big dataset | +| Single write point (single master) | Multiple write points (multiple masters) | +| ![RedisTM Topology](img/redis-topology.png) | ![RedisTM Cluster Topology](img/redis-cluster-topology.png) | + +## Prerequisites + +- Kubernetes 1.12+ +- Helm 3.1.0 +- PV provisioner support in the underlying infrastructure + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```bash +$ helm install my-release bitnami/redis +``` + +The command deploys RedisTM on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```bash +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Parameters + +The following table lists the configurable parameters of the RedisTM chart and their default values. + +| Parameter | Description | Default | +|:------------------------------------------------------|:----------------------------------------------------------------------------------------------------------------------------------------------------|:--------------------------------------------------------| +| `global.imageRegistry` | Global Docker image registry | `nil` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) | +| `global.storageClass` | Global storage class for dynamic provisioning | `nil` | +| `global.redis.password` | RedisTM password (overrides `password`) | `nil` | +| `image.registry` | RedisTM Image registry | `docker.io` | +| `image.repository` | RedisTM Image name | `bitnami/redis` | +| `image.tag` | RedisTM Image tag | `{TAG_NAME}` | +| `image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify docker-registry secret names as an array | `nil` | +| `nameOverride` | String to partially override redis.fullname template with a string (will prepend the release name) | `nil` | +| `fullnameOverride` | String to fully override redis.fullname template with a string | `nil` | +| `cluster.enabled` | Use master-slave topology | `true` | +| `cluster.slaveCount` | Number of slaves | `2` | +| `existingSecret` | Name of existing secret object (for password authentication) | `nil` | +| `existingSecretPasswordKey` | Name of key containing password to be retrieved from the existing secret | `nil` | +| `usePassword` | Use password | `true` | +| `usePasswordFile` | Mount passwords as files instead of environment variables | `false` | +| `password` | RedisTM password (ignored if existingSecret set) | Randomly generated | +| `configmap` | Additional common RedisTM node configuration (this value is evaluated as a template) | See values.yaml | +| `clusterDomain` | Kubernetes DNS Domain name to use | `cluster.local` | +| `networkPolicy.enabled` | Enable NetworkPolicy | `false` | +| `networkPolicy.allowExternal` | Don't require client label for connections | `true` | +| `networkPolicy.ingressNSMatchLabels` | Allow connections from other namespaces | `{}` | +| `networkPolicy.ingressNSPodMatchLabels` | For other namespaces match by pod labels and namespace labels | `{}` | +| `securityContext.*` | Other pod security context to be included as-is in the pod spec | `{}` | +| `securityContext.enabled` | Enable security context (both redis master and slave pods) | `true` | +| `securityContext.fsGroup` | Group ID for the container (both redis master and slave pods) | `1001` | +| `containerSecurityContext.*` | Other container security context to be included as-is in the container spec | `{}` | +| `containerSecurityContext.enabled` | Enable security context (both redis master and slave containers) | `true` | +| `containerSecurityContext.runAsUser` | User ID for the container (both redis master and slave containers) | `1001` | +| `serviceAccount.create` | Specifies whether a ServiceAccount should be created | `false` | +| `serviceAccount.name` | The name of the ServiceAccount to create | Generated using the fullname template | +| `serviceAccount.annotations` | Specifies annotations to add to ServiceAccount. | `nil` | +| `rbac.create` | Specifies whether RBAC resources should be created | `false` | +| `rbac.role.rules` | Rules to create | `[]` | +| `metrics.enabled` | Start a side-car prometheus exporter | `false` | +| `metrics.image.registry` | RedisTM exporter image registry | `docker.io` | +| `metrics.image.repository` | RedisTM exporter image name | `bitnami/redis-exporter` | +| `metrics.image.tag` | RedisTM exporter image tag | `{TAG_NAME}` | +| `metrics.image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `metrics.image.pullSecrets` | Specify docker-registry secret names as an array | `nil` | +| `metrics.extraArgs` | Extra arguments for the binary; possible values [here](https://github.com/oliver006/redis_exporter#flags) | {} | +| `metrics.podLabels` | Additional labels for Metrics exporter pod | {} | +| `metrics.podAnnotations` | Additional annotations for Metrics exporter pod | {} | +| `metrics.resources` | Exporter resource requests/limit | Memory: `256Mi`, CPU: `100m` | +| `metrics.serviceMonitor.enabled` | if `true`, creates a Prometheus Operator ServiceMonitor (also requires `metrics.enabled` to be `true`) | `false` | +| `metrics.serviceMonitor.namespace` | Optional namespace which Prometheus is running in | `nil` | +| `metrics.serviceMonitor.interval` | How frequently to scrape metrics (use by default, falling back to Prometheus' default) | `nil` | +| `metrics.serviceMonitor.selector` | Default to kube-prometheus install (CoreOS recommended), but should be set according to Prometheus install | `{ prometheus: kube-prometheus }` | +| `metrics.serviceMonitor.relabelings` | ServiceMonitor relabelings. Value is evaluated as a template | `[]` | +| `metrics.serviceMonitor.metricRelabelings` | ServiceMonitor metricRelabelings. Value is evaluated as a template | `[]` | +| `metrics.service.type` | Kubernetes Service type (redis metrics) | `ClusterIP` | +| `metrics.service.externalTrafficPolicy` | External traffic policy (when service type is LoadBalancer) | `Cluster` | +| `metrics.service.annotations` | Annotations for the services to monitor (redis master and redis slave service) | {} | +| `metrics.service.labels` | Additional labels for the metrics service | {} | +| `metrics.service.loadBalancerIP` | loadBalancerIP if redis metrics service type is `LoadBalancer` | `nil` | +| `metrics.priorityClassName` | Metrics exporter pod priorityClassName | `nil` | +| `metrics.prometheusRule.enabled` | Set this to true to create prometheusRules for Prometheus operator | `false` | +| `metrics.prometheusRule.additionalLabels` | Additional labels that can be used so prometheusRules will be discovered by Prometheus | `{}` | +| `metrics.prometheusRule.namespace` | namespace where prometheusRules resource should be created | Same namespace as redis | +| `metrics.prometheusRule.rules` | [rules](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/) to be created, check values for an example. | `[]` | +| `persistence.existingClaim` | Provide an existing PersistentVolumeClaim | `nil` | +| `master.persistence.enabled` | Use a PVC to persist data (master node) | `true` | +| `master.hostAliases` | Add deployment host aliases | `[]` | +| `master.persistence.path` | Path to mount the volume at, to use other images | `/data` | +| `master.persistence.subPath` | Subdirectory of the volume to mount at | `""` | +| `master.persistence.storageClass` | Storage class of backing PVC | `generic` | +| `master.persistence.accessModes` | Persistent Volume Access Modes | `[ReadWriteOnce]` | +| `master.persistence.size` | Size of data volume | `8Gi` | +| `master.persistence.matchLabels` | matchLabels persistent volume selector | `{}` | +| `master.persistence.matchExpressions` | matchExpressions persistent volume selector | `{}` | +| `master.persistence.volumes` | Additional volumes without creating PVC | `{}` | +| `master.statefulset.labels` | Additional labels for redis master StatefulSet | `{}` | +| `master.statefulset.annotations` | Additional annotations for redis master StatefulSet | `{}` | +| `master.statefulset.updateStrategy` | Update strategy for StatefulSet | onDelete | +| `master.statefulset.rollingUpdatePartition` | Partition update strategy | `nil` | +| `master.statefulset.volumeClaimTemplates.labels` | Additional labels for redis master StatefulSet volumeClaimTemplates | `{}` | +| `master.statefulset.volumeClaimTemplates.annotations` | Additional annotations for redis master StatefulSet volumeClaimTemplates | `{}` | +| `master.podLabels` | Additional labels for RedisTM master pod | {} | +| `master.podAnnotations` | Additional annotations for RedisTM master pod | {} | +| `master.extraEnvVars` | Additional Environment Variables passed to the pod of the master's stateful set set | `[]` | +| `master.extraEnvVarCMs` | Additional Environment Variables ConfigMappassed to the pod of the master's stateful set set | `[]` | +| `master.extraEnvVarsSecret` | Additional Environment Variables Secret passed to the master's stateful set | `[]` | +| `podDisruptionBudget.enabled` | Pod Disruption Budget toggle | `false` | +| `podDisruptionBudget.minAvailable` | Minimum available pods | `1` | +| `podDisruptionBudget.maxUnavailable` | Maximum unavailable | `nil` | +| `redisPort` | RedisTM port (in both master and slaves) | `6379` | +| `tls.enabled` | Enable TLS support for replication traffic | `false` | +| `tls.authClients` | Require clients to authenticate or not | `true` | +| `tls.certificatesSecret` | Name of the secret that contains the certificates | `nil` | +| `tls.certFilename` | Certificate filename | `nil` | +| `tls.certKeyFilename` | Certificate key filename | `nil` | +| `tls.certCAFilename` | CA Certificate filename | `nil` | +| `tls.dhParamsFilename` | DH params (in order to support DH based ciphers) | `nil` | +| `master.command` | RedisTM master entrypoint string. The command `redis-server` is executed if this is not provided. Note this is prepended with `exec` | `/run.sh` | +| `master.preExecCmds` | Text to inset into the startup script immediately prior to `master.command`. Use this if you need to run other ad-hoc commands as part of startup | `nil` | +| `master.configmap` | Additional RedisTM configuration for the master nodes (this value is evaluated as a template) | `nil` | +| `master.disableCommands` | Array of RedisTM commands to disable (master) | `["FLUSHDB", "FLUSHALL"]` | +| `master.extraFlags` | RedisTM master additional command line flags | [] | +| `master.nodeSelector` | RedisTM master Node labels for pod assignment | {"beta.kubernetes.io/arch": "amd64"} | +| `master.tolerations` | Toleration labels for RedisTM master pod assignment | [] | +| `master.affinity` | Affinity settings for RedisTM master pod assignment | {} | +| `master.schedulerName` | Name of an alternate scheduler | `nil` | +| `master.service.type` | Kubernetes Service type (redis master) | `ClusterIP` | +| `master.service.externalTrafficPolicy` | External traffic policy (when service type is LoadBalancer) | `Cluster` | +| `master.service.port` | Kubernetes Service port (redis master) | `6379` | +| `master.service.nodePort` | Kubernetes Service nodePort (redis master) | `nil` | +| `master.service.annotations` | annotations for redis master service | {} | +| `master.service.labels` | Additional labels for redis master service | {} | +| `master.service.loadBalancerIP` | loadBalancerIP if redis master service type is `LoadBalancer` | `nil` | +| `master.service.loadBalancerSourceRanges` | loadBalancerSourceRanges if redis master service type is `LoadBalancer` | `nil` | +| `master.resources` | RedisTM master CPU/Memory resource requests/limits | Memory: `256Mi`, CPU: `100m` | +| `master.livenessProbe.enabled` | Turn on and off liveness probe (redis master pod) | `true` | +| `master.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (redis master pod) | `5` | +| `master.livenessProbe.periodSeconds` | How often to perform the probe (redis master pod) | `5` | +| `master.livenessProbe.timeoutSeconds` | When the probe times out (redis master pod) | `5` | +| `master.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis master pod) | `1` | +| `master.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `5` | +| `master.readinessProbe.enabled` | Turn on and off readiness probe (redis master pod) | `true` | +| `master.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated (redis master pod) | `5` | +| `master.readinessProbe.periodSeconds` | How often to perform the probe (redis master pod) | `5` | +| `master.readinessProbe.timeoutSeconds` | When the probe times out (redis master pod) | `1` | +| `master.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis master pod) | `1` | +| `master.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `5` | +| `master.shareProcessNamespace` | RedisTM Master pod `shareProcessNamespace` option. Enables /pause reap zombie PIDs. | `false` | +| `master.priorityClassName` | RedisTM Master pod priorityClassName | `nil` | +| `volumePermissions.enabled` | Enable init container that changes volume permissions in the registry (for cases where the default k8s `runAsUser` and `fsUser` values do not work) | `false` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | +| `volumePermissions.image.repository` | Init container volume-permissions image name | `bitnami/minideb` | +| `volumePermissions.image.tag` | Init container volume-permissions image tag | `buster` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `Always` | +| `volumePermissions.resources ` | Init container volume-permissions CPU/Memory resource requests/limits | {} | +| `volumePermissions.securityContext.*` | Security context of the init container | `{}` | +| `volumePermissions.securityContext.runAsUser` | UserID for the init container (when facing issues in OpenShift or uid unknown, try value "auto") | 0 | +| `slave.hostAliases` | Add deployment host aliases | `[]` | +| `slave.service.type` | Kubernetes Service type (redis slave) | `ClusterIP` | +| `slave.service.externalTrafficPolicy` | External traffic policy (when service type is LoadBalancer) | `Cluster` | +| `slave.service.nodePort` | Kubernetes Service nodePort (redis slave) | `nil` | +| `slave.service.annotations` | annotations for redis slave service | {} | +| `slave.service.labels` | Additional labels for redis slave service | {} | +| `slave.service.port` | Kubernetes Service port (redis slave) | `6379` | +| `slave.service.loadBalancerIP` | LoadBalancerIP if RedisTM slave service type is `LoadBalancer` | `nil` | +| `slave.service.loadBalancerSourceRanges` | loadBalancerSourceRanges if RedisTM slave service type is `LoadBalancer` | `nil` | +| `slave.command` | RedisTM slave entrypoint string. The command `redis-server` is executed if this is not provided. Note this is prepended with `exec` | `/run.sh` | +| `slave.preExecCmds` | Text to inset into the startup script immediately prior to `slave.command`. Use this if you need to run other ad-hoc commands as part of startup | `nil` | +| `slave.configmap` | Additional RedisTM configuration for the slave nodes (this value is evaluated as a template) | `nil` | +| `slave.disableCommands` | Array of RedisTM commands to disable (slave) | `[FLUSHDB, FLUSHALL]` | +| `slave.extraFlags` | RedisTM slave additional command line flags | `[]` | +| `slave.livenessProbe.enabled` | Turn on and off liveness probe (redis slave pod) | `true` | +| `slave.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (redis slave pod) | `5` | +| `slave.livenessProbe.periodSeconds` | How often to perform the probe (redis slave pod) | `5` | +| `slave.livenessProbe.timeoutSeconds` | When the probe times out (redis slave pod) | `5` | +| `slave.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis slave pod) | `1` | +| `slave.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `5` | +| `slave.readinessProbe.enabled` | Turn on and off slave.readiness probe (redis slave pod) | `true` | +| `slave.readinessProbe.initialDelaySeconds` | Delay before slave.readiness probe is initiated (redis slave pod) | `5` | +| `slave.readinessProbe.periodSeconds` | How often to perform the probe (redis slave pod) | `5` | +| `slave.readinessProbe.timeoutSeconds` | When the probe times out (redis slave pod) | `1` | +| `slave.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis slave pod) | `1` | +| `slave.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. (redis slave pod) | `5` | +| `slave.shareProcessNamespace` | RedisTM slave pod `shareProcessNamespace` option. Enables /pause reap zombie PIDs. | `false` | +| `slave.persistence.enabled` | Use a PVC to persist data (slave node) | `true` | +| `slave.persistence.path` | Path to mount the volume at, to use other images | `/data` | +| `slave.persistence.subPath` | Subdirectory of the volume to mount at | `""` | +| `slave.persistence.storageClass` | Storage class of backing PVC | `generic` | +| `slave.persistence.accessModes` | Persistent Volume Access Modes | `[ReadWriteOnce]` | +| `slave.persistence.size` | Size of data volume | `8Gi` | +| `slave.persistence.matchLabels` | matchLabels persistent volume selector | `{}` | +| `slave.persistence.matchExpressions` | matchExpressions persistent volume selector | `{}` | +| `slave.statefulset.labels` | Additional labels for redis slave StatefulSet | `{}` | +| `slave.statefulset.annotations` | Additional annotations for redis slave StatefulSet | `{}` | +| `slave.statefulset.updateStrategy` | Update strategy for StatefulSet | onDelete | +| `slave.statefulset.rollingUpdatePartition` | Partition update strategy | `nil` | +| `slave.statefulset.volumeClaimTemplates.labels` | Additional labels for redis slave StatefulSet volumeClaimTemplates | `{}` | +| `slave.statefulset.volumeClaimTemplates.annotations` | Additional annotations for redis slave StatefulSet volumeClaimTemplates | `{}` | +| `slave.extraEnvVars` | Additional Environment Variables passed to the pod of the slave's stateful set set | `[]` | +| `slave.extraEnvVarCMs` | Additional Environment Variables ConfigMappassed to the pod of the slave's stateful set set | `[]` | +| `masslaveter.extraEnvVarsSecret` | Additional Environment Variables Secret passed to the slave's stateful set | `[]` | +| `slave.podLabels` | Additional labels for RedisTM slave pod | `master.podLabels` | +| `slave.podAnnotations` | Additional annotations for RedisTM slave pod | `master.podAnnotations` | +| `slave.schedulerName` | Name of an alternate scheduler | `nil` | +| `slave.resources` | RedisTM slave CPU/Memory resource requests/limits | `{}` | +| `slave.affinity` | Enable node/pod affinity for slaves | {} | +| `slave.tolerations` | Toleration labels for RedisTM slave pod assignment | [] | +| `slave.spreadConstraints` | [Topology Spread Constraints](https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/) for RedisTM slave pod | {} | +| `slave.priorityClassName` | RedisTM Slave pod priorityClassName | `nil` | +| `sentinel.enabled` | Enable sentinel containers | `false` | +| `sentinel.usePassword` | Use password for sentinel containers | `true` | +| `sentinel.masterSet` | Name of the sentinel master set | `mymaster` | +| `sentinel.initialCheckTimeout` | Timeout for querying the redis sentinel service for the active sentinel list | `5` | +| `sentinel.quorum` | Quorum for electing a new master | `2` | +| `sentinel.downAfterMilliseconds` | Timeout for detecting a RedisTM node is down | `60000` | +| `sentinel.failoverTimeout` | Timeout for performing a election failover | `18000` | +| `sentinel.parallelSyncs` | Number of parallel syncs in the cluster | `1` | +| `sentinel.port` | RedisTM Sentinel port | `26379` | +| `sentinel.configmap` | Additional RedisTM configuration for the sentinel nodes (this value is evaluated as a template) | `nil` | +| `sentinel.staticID` | Enable static IDs for sentinel replicas (If disabled IDs will be randomly generated on startup) | `false` | +| `sentinel.service.type` | Kubernetes Service type (redis sentinel) | `ClusterIP` | +| `sentinel.service.externalTrafficPolicy` | External traffic policy (when service type is LoadBalancer) | `Cluster` | +| `sentinel.service.nodePort` | Kubernetes Service nodePort (redis sentinel) | `nil` | +| `sentinel.service.annotations` | annotations for redis sentinel service | {} | +| `sentinel.service.labels` | Additional labels for redis sentinel service | {} | +| `sentinel.service.redisPort` | Kubernetes Service port for RedisTM read only operations | `6379` | +| `sentinel.service.sentinelPort` | Kubernetes Service port for RedisTM sentinel | `26379` | +| `sentinel.service.redisNodePort` | Kubernetes Service node port for RedisTM read only operations | `` | +| `sentinel.service.sentinelNodePort` | Kubernetes Service node port for RedisTM sentinel | `` | +| `sentinel.service.loadBalancerIP` | LoadBalancerIP if RedisTM sentinel service type is `LoadBalancer` | `nil` | +| `sentinel.livenessProbe.enabled` | Turn on and off liveness probe (redis sentinel pod) | `true` | +| `sentinel.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (redis sentinel pod) | `5` | +| `sentinel.livenessProbe.periodSeconds` | How often to perform the probe (redis sentinel container) | `5` | +| `sentinel.livenessProbe.timeoutSeconds` | When the probe times out (redis sentinel container) | `5` | +| `sentinel.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis sentinel container) | `1` | +| `sentinel.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `5` | +| `sentinel.readinessProbe.enabled` | Turn on and off sentinel.readiness probe (redis sentinel pod) | `true` | +| `sentinel.readinessProbe.initialDelaySeconds` | Delay before sentinel.readiness probe is initiated (redis sentinel pod) | `5` | +| `sentinel.readinessProbe.periodSeconds` | How often to perform the probe (redis sentinel pod) | `5` | +| `sentinel.readinessProbe.timeoutSeconds` | When the probe times out (redis sentinel container) | `1` | +| `sentinel.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis sentinel container) | `1` | +| `sentinel.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. (redis sentinel container) | `5` | +| `sentinel.resources` | RedisTM sentinel CPU/Memory resource requests/limits | `{}` | +| `sentinel.image.registry` | RedisTM Sentinel Image registry | `docker.io` | +| `sentinel.image.repository` | RedisTM Sentinel Image name | `bitnami/redis-sentinel` | +| `sentinel.image.tag` | RedisTM Sentinel Image tag | `{TAG_NAME}` | +| `sentinel.image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `sentinel.image.pullSecrets` | Specify docker-registry secret names as an array | `nil` | +| `sentinel.extraEnvVars` | Additional Environment Variables passed to the pod of the sentinel node stateful set set | `[]` | +| `sentinel.extraEnvVarCMs` | Additional Environment Variables ConfigMappassed to the pod of the sentinel node stateful set set | `[]` | +| `sentinel.extraEnvVarsSecret` | Additional Environment Variables Secret passed to the sentinel node statefulset | `[]` | +| `sentinel.preExecCmds` | Text to inset into the startup script immediately prior to `sentinel.command`. Use this if you need to run other ad-hoc commands as part of startup | `nil` | +| `sysctlImage.enabled` | Enable an init container to modify Kernel settings | `false` | +| `sysctlImage.command` | sysctlImage command to execute | [] | +| `sysctlImage.registry` | sysctlImage Init container registry | `docker.io` | +| `sysctlImage.repository` | sysctlImage Init container name | `bitnami/minideb` | +| `sysctlImage.tag` | sysctlImage Init container tag | `buster` | +| `sysctlImage.pullPolicy` | sysctlImage Init container pull policy | `Always` | +| `sysctlImage.mountHostSys` | Mount the host `/sys` folder to `/host-sys` | `false` | +| `sysctlImage.resources` | sysctlImage Init container CPU/Memory resource requests/limits | {} | +| `podSecurityPolicy.create` | Specifies whether a PodSecurityPolicy should be created | `false` | + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```bash +$ helm install my-release \ + --set password=secretpassword \ + bitnami/redis +``` + +The above command sets the RedisTM server password to `secretpassword`. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```bash +$ helm install my-release -f values.yaml bitnami/redis +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +> **Note for minikube users**: Current versions of minikube (v0.24.1 at the time of writing) provision `hostPath` persistent volumes that are only writable by root. Using chart defaults cause pod failure for the RedisTM pod as it attempts to write to the `/bitnami` directory. Consider installing RedisTM with `--set persistence.enabled=false`. See minikube issue [1990](https://github.com/kubernetes/minikube/issues/1990) for more information. + +## Configuration and installation details + +### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/) + +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. + +Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. + +### Change RedisTM version + +To modify the RedisTM version used in this chart you can specify a [valid image tag](https://hub.docker.com/r/bitnami/redis/tags/) using the `image.tag` parameter. For example, `image.tag=X.Y.Z`. This approach is also applicable to other images like exporters. + +### Cluster topologies + +#### Default: Master-Slave + +When installing the chart with `cluster.enabled=true`, it will deploy a RedisTM master StatefulSet (only one master node allowed) and a RedisTM slave StatefulSet. The slaves will be read-replicas of the master. Two services will be exposed: + + - RedisTM Master service: Points to the master, where read-write operations can be performed + - RedisTM Slave service: Points to the slaves, where only read operations are allowed. + +In case the master crashes, the slaves will wait until the master node is respawned again by the Kubernetes Controller Manager. + +#### Master-Slave with Sentinel + +When installing the chart with `cluster.enabled=true` and `sentinel.enabled=true`, it will deploy a RedisTM master StatefulSet (only one master allowed) and a RedisTM slave StatefulSet. In this case, the pods will contain an extra container with RedisTM Sentinel. This container will form a cluster of RedisTM Sentinel nodes, which will promote a new master in case the actual one fails. In addition to this, only one service is exposed: + + - RedisTM service: Exposes port 6379 for RedisTM read-only operations and port 26379 for accessing RedisTM Sentinel. + +For read-only operations, access the service using port 6379. For write operations, it's necessary to access the RedisTM Sentinel cluster and query the current master using the command below (using redis-cli or similar: + +``` +SENTINEL get-master-addr-by-name +``` +This command will return the address of the current master, which can be accessed from inside the cluster. + +In case the current master crashes, the Sentinel containers will elect a new master node. + +### Using password file +To use a password file for RedisTM you need to create a secret containing the password. + +> *NOTE*: It is important that the file with the password must be called `redis-password` + +And then deploy the Helm Chart using the secret name as parameter: + +```console +usePassword=true +usePasswordFile=true +existingSecret=redis-password-file +sentinels.enabled=true +metrics.enabled=true +``` + +### Securing traffic using TLS + +TLS support can be enabled in the chart by specifying the `tls.` parameters while creating a release. The following parameters should be configured to properly enable the TLS support in the chart: + +- `tls.enabled`: Enable TLS support. Defaults to `false` +- `tls.certificatesSecret`: Name of the secret that contains the certificates. No defaults. +- `tls.certFilename`: Certificate filename. No defaults. +- `tls.certKeyFilename`: Certificate key filename. No defaults. +- `tls.certCAFilename`: CA Certificate filename. No defaults. + +For example: + +First, create the secret with the cetificates files: + +```console +kubectl create secret generic certificates-tls-secret --from-file=./cert.pem --from-file=./cert.key --from-file=./ca.pem +``` + +Then, use the following parameters: + +```console +tls.enabled="true" +tls.certificatesSecret="certificates-tls-secret" +tls.certFilename="cert.pem" +tls.certKeyFilename="cert.key" +tls.certCAFilename="ca.pem" +``` + +### Metrics + +The chart optionally can start a metrics exporter for [prometheus](https://prometheus.io). The metrics endpoint (port 9121) is exposed in the service. Metrics can be scraped from within the cluster using something similar as the described in the [example Prometheus scrape configuration](https://github.com/prometheus/prometheus/blob/master/documentation/examples/prometheus-kubernetes.yml). If metrics are to be scraped from outside the cluster, the Kubernetes API proxy can be utilized to access the endpoint. + +If you have enabled TLS by specifying `tls.enabled=true` you also need to specify TLS option to the metrics exporter. You can do that via `metrics.extraArgs`. You can find the metrics exporter CLI flags for TLS [here](https://github.com/oliver006/redis_exporter#command-line-flags). For example: + +You can either specify `metrics.extraArgs.skip-tls-verification=true` to skip TLS verification or providing the following values under `metrics.extraArgs` for TLS client authentication: + +```console +tls-client-key-file +tls-client-cert-file +tls-ca-cert-file +``` + +### Host Kernel Settings + +RedisTM may require some changes in the kernel of the host machine to work as expected, in particular increasing the `somaxconn` value and disabling transparent huge pages. +To do so, you can set up a privileged initContainer with the `sysctlImage` config values, for example: + +``` +sysctlImage: + enabled: true + mountHostSys: true + command: + - /bin/sh + - -c + - |- + install_packages procps + sysctl -w net.core.somaxconn=10000 + echo never > /host-sys/kernel/mm/transparent_hugepage/enabled +``` + +Alternatively, for Kubernetes 1.12+ you can set `securityContext.sysctls` which will configure sysctls for master and slave pods. Example: + +```yaml +securityContext: + sysctls: + - name: net.core.somaxconn + value: "10000" +``` + +Note that this will not disable transparent huge tables. + +## Persistence + +By default, the chart mounts a [Persistent Volume](http://kubernetes.io/docs/user-guide/persistent-volumes/) at the `/data` path. The volume is created using dynamic volume provisioning. If a Persistent Volume Claim already exists, specify it during installation. + +### Existing PersistentVolumeClaim + +1. Create the PersistentVolume +2. Create the PersistentVolumeClaim +3. Install the chart + +```bash +$ helm install my-release --set persistence.existingClaim=PVC_NAME bitnami/redis +``` + +## Backup and restore + +### Backup + +To perform a backup you will need to connect to one of the nodes and execute: + +```bash +$ kubectl exec -it my-redis-master-0 bash + +$ redis-cli +127.0.0.1:6379> auth your_current_redis_password +OK +127.0.0.1:6379> save +OK +``` + +Then you will need to get the created dump file form the redis node: + +```bash +$ kubectl cp my-redis-master-0:/data/dump.rdb dump.rdb -c redis +``` + +### Restore + +To restore in a new cluster, you will need to change a parameter in the redis.conf file and then upload the `dump.rdb` to the volume. + +Follow the following steps: + +- First you will need to set in the `values.yaml` the parameter `appendonly` to `no`, if it is already `no` you can skip this step. + +```yaml +configmap: |- + # Enable AOF https://redis.io/topics/persistence#append-only-file + appendonly no + # Disable RDB persistence, AOF persistence already enabled. + save "" +``` + +- Start the new cluster to create the PVCs. + +For example, : + +```bash +helm install new-redis -f values.yaml . --set cluster.enabled=true --set cluster.slaveCount=3 +``` + +- Now that the PVC were created, stop it and copy the `dump.rdp` on the persisted data by using a helping pod. + +``` +$ helm delete new-redis + +$ kubectl run --generator=run-pod/v1 -i --rm --tty volpod --overrides=' +{ + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "name": "redisvolpod" + }, + "spec": { + "containers": [{ + "command": [ + "tail", + "-f", + "/dev/null" + ], + "image": "bitnami/minideb", + "name": "mycontainer", + "volumeMounts": [{ + "mountPath": "/mnt", + "name": "redisdata" + }] + }], + "restartPolicy": "Never", + "volumes": [{ + "name": "redisdata", + "persistentVolumeClaim": { + "claimName": "redis-data-new-redis-master-0" + } + }] + } +}' --image="bitnami/minideb" + +$ kubectl cp dump.rdb redisvolpod:/mnt/dump.rdb +$ kubectl delete pod volpod +``` + +- Start again the cluster: + +``` +helm install new-redis -f values.yaml . --set cluster.enabled=true --set cluster.slaveCount=3 +``` + +## NetworkPolicy + +To enable network policy for RedisTM, install +[a networking plugin that implements the Kubernetes NetworkPolicy spec](https://kubernetes.io/docs/tasks/administer-cluster/declare-network-policy#before-you-begin), +and set `networkPolicy.enabled` to `true`. + +For Kubernetes v1.5 & v1.6, you must also turn on NetworkPolicy by setting +the DefaultDeny namespace annotation. Note: this will enforce policy for _all_ pods in the namespace: + + kubectl annotate namespace default "net.beta.kubernetes.io/network-policy={\"ingress\":{\"isolation\":\"DefaultDeny\"}}" + +With NetworkPolicy enabled, only pods with the generated client label will be +able to connect to RedisTM. This label will be displayed in the output +after a successful install. + +With `networkPolicy.ingressNSMatchLabels` pods from other namespaces can connect to redis. Set `networkPolicy.ingressNSPodMatchLabels` to match pod labels in matched namespace. For example, for a namespace labeled `redis=external` and pods in that namespace labeled `redis-client=true` the fields should be set: + +``` +networkPolicy: + enabled: true + ingressNSMatchLabels: + redis: external + ingressNSPodMatchLabels: + redis-client: true +``` + +## Troubleshooting + +Find more information about how to deal with common errors related to Bitnami’s Helm charts in [this troubleshooting guide](https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues). + +## Upgrading an existing Release to a new major version + +A major chart version change (like v1.2.3 -> v2.0.0) indicates that there is an +incompatible breaking change needing manual actions. + +### To 11.0.0 + +When using sentinel, a new statefulset called `-node` was introduced. This will break upgrading from a previous version where the statefulsets are called master and slave. Hence the PVC will not match the new naming and won't be reused. If you want to keep your data, you will need to perform a backup and then a restore the data in this new version. + +### To 10.0.0 + +For releases with `usePassword: true`, the value `sentinel.usePassword` controls whether the password authentication also applies to the sentinel port. This defaults to `true` for a secure configuration, however it is possible to disable to account for the following cases: + +- Using a version of redis-sentinel prior to `5.0.1` where the authentication feature was introduced. +- Where redis clients need to be updated to support sentinel authentication. + +If using a master/slave topology, or with `usePassword: false`, no action is required. + +### To 8.0.18 + +For releases with `metrics.enabled: true` the default tag for the exporter image is now `v1.x.x`. This introduces many changes including metrics names. You'll want to use [this dashboard](https://github.com/oliver006/redis_exporter/blob/master/contrib/grafana_prometheus_redis_dashboard.json) now. Please see the [redis_exporter github page](https://github.com/oliver006/redis_exporter#upgrading-from-0x-to-1x) for more details. + +### To 7.0.0 + +This version causes a change in the RedisTM Master StatefulSet definition, so the command helm upgrade would not work out of the box. As an alternative, one of the following could be done: + +- Recommended: Create a clone of the RedisTM Master PVC (for example, using projects like [this one](https://github.com/edseymour/pvc-transfer)). Then launch a fresh release reusing this cloned PVC. + + ``` + helm install my-release bitnami/redis --set persistence.existingClaim= + ``` + +- Alternative (not recommended, do at your own risk): `helm delete --purge` does not remove the PVC assigned to the RedisTM Master StatefulSet. As a consequence, the following commands can be done to upgrade the release + + ``` + helm delete --purge + helm install bitnami/redis + ``` + +Previous versions of the chart were not using persistence in the slaves, so this upgrade would add it to them. Another important change is that no values are inherited from master to slaves. For example, in 6.0.0 `slaves.readinessProbe.periodSeconds`, if empty, would be set to `master.readinessProbe.periodSeconds`. This approach lacked transparency and was difficult to maintain. From now on, all the slave parameters must be configured just as it is done with the masters. + +Some values have changed as well: + +- `master.port` and `slave.port` have been changed to `redisPort` (same value for both master and slaves) +- `master.securityContext` and `slave.securityContext` have been changed to `securityContext`(same values for both master and slaves) + +By default, the upgrade will not change the cluster topology. In case you want to use RedisTM Sentinel, you must explicitly set `sentinel.enabled` to `true`. + +### To 6.0.0 + +Previous versions of the chart were using an init-container to change the permissions of the volumes. This was done in case the `securityContext` directive in the template was not enough for that (for example, with cephFS). In this new version of the chart, this container is disabled by default (which should not affect most of the deployments). If your installation still requires that init container, execute `helm upgrade` with the `--set volumePermissions.enabled=true`. + +### To 5.0.0 + +The default image in this release may be switched out for any image containing the `redis-server` +and `redis-cli` binaries. If `redis-server` is not the default image ENTRYPOINT, `master.command` +must be specified. + +#### Breaking changes + +- `master.args` and `slave.args` are removed. Use `master.command` or `slave.command` instead in order to override the image entrypoint, or `master.extraFlags` to pass additional flags to `redis-server`. +- `disableCommands` is now interpreted as an array of strings instead of a string of comma separated values. +- `master.persistence.path` now defaults to `/data`. + +### 4.0.0 + +This version removes the `chart` label from the `spec.selector.matchLabels` +which is immutable since `StatefulSet apps/v1beta2`. It has been inadvertently +added, causing any subsequent upgrade to fail. See https://github.com/helm/charts/issues/7726. + +It also fixes https://github.com/helm/charts/issues/7726 where a deployment `extensions/v1beta1` can not be upgraded if `spec.selector` is not explicitly set. + +Finally, it fixes https://github.com/helm/charts/issues/7803 by removing mutable labels in `spec.VolumeClaimTemplate.metadata.labels` so that it is upgradable. + +In order to upgrade, delete the RedisTM StatefulSet before upgrading: + +```bash +kubectl delete statefulsets.apps --cascade=false my-release-redis-master +``` + +And edit the RedisTM slave (and metrics if enabled) deployment: + +```bash +kubectl patch deployments my-release-redis-slave --type=json -p='[{"op": "remove", "path": "/spec/selector/matchLabels/chart"}]' +kubectl patch deployments my-release-redis-metrics --type=json -p='[{"op": "remove", "path": "/spec/selector/matchLabels/chart"}]' +``` + +## Upgrading + +### To 12.0.0 + +[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +**What changes were introduced in this major version?** + +- Previous versions of this Helm Chart use `apiVersion: v1` (installable by both Helm 2 and 3), this Helm Chart was updated to `apiVersion: v2` (installable by Helm 3 only). [Here](https://helm.sh/docs/topics/charts/#the-apiversion-field) you can find more information about the `apiVersion` field. +- The different fields present in the *Chart.yaml* file has been ordered alphabetically in a homogeneous way for all the Bitnami Helm Charts + +**Considerations when upgrading to this version** + +- If you want to upgrade to this version from a previous one installed with Helm v3, you shouldn't face any issues +- If you want to upgrade to this version using Helm v2, this scenario is not supported as this version doesn't support Helm v2 anymore +- If you installed the previous version with Helm v2 and wants to upgrade to this version with Helm v3, please refer to the [official Helm documentation](https://helm.sh/docs/topics/v2_v3_migration/#migration-use-cases) about migrating from Helm v2 to v3 + +**Useful links** + +- https://docs.bitnami.com/tutorials/resolve-helm2-helm3-post-migration-issues/ +- https://helm.sh/docs/topics/v2_v3_migration/ +- https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/ + +### To 11.0.0 + +When deployed with sentinel enabled, only a group of nodes is deployed and the master/slave role is handled in the group. To avoid breaking the compatibility, the settings for this nodes are given through the `slave.xxxx` parameters in `values.yaml` + +### To 9.0.0 + +The metrics exporter has been changed from a separate deployment to a sidecar container, due to the latest changes in the RedisTM exporter code. Check the [official page](https://github.com/oliver006/redis_exporter/) for more information. The metrics container image was changed from oliver006/redis_exporter to bitnami/redis-exporter (Bitnami's maintained package of oliver006/redis_exporter). + +### To 7.0.0 + +In order to improve the performance in case of slave failure, we added persistence to the read-only slaves. That means that we moved from Deployment to StatefulSets. This should not affect upgrades from previous versions of the chart, as the deployments did not contain any persistence at all. + +This version also allows enabling RedisTM Sentinel containers inside of the RedisTM Pods (feature disabled by default). In case the master crashes, a new RedisTM node will be elected as master. In order to query the current master (no redis master service is exposed), you need to query first the Sentinel cluster. Find more information [in this section](#master-slave-with-sentinel). diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/redis/charts/common/.helmignore b/ansible/01_old/roles/test/files/02-base/base/charts/redis/charts/common/.helmignore new file mode 100644 index 0000000..50af031 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/redis/charts/common/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/redis/charts/common/Chart.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/redis/charts/common/Chart.yaml new file mode 100644 index 0000000..ceb5648 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/redis/charts/common/Chart.yaml @@ -0,0 +1,23 @@ +annotations: + category: Infrastructure +apiVersion: v2 +appVersion: 1.3.3 +description: A Library Helm Chart for grouping common logic between bitnami charts. + This chart is not deployable by itself. +home: https://github.com/bitnami/charts/tree/master/bitnami/common +icon: https://bitnami.com/downloads/logos/bitnami-mark.png +keywords: +- common +- helper +- template +- function +- bitnami +maintainers: +- email: containers@bitnami.com + name: Bitnami +name: common +sources: +- https://github.com/bitnami/charts +- http://www.bitnami.com/ +type: library +version: 1.3.3 diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/redis/charts/common/README.md b/ansible/01_old/roles/test/files/02-base/base/charts/redis/charts/common/README.md new file mode 100644 index 0000000..461fdc9 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/redis/charts/common/README.md @@ -0,0 +1,316 @@ +# Bitnami Common Library Chart + +A [Helm Library Chart](https://helm.sh/docs/topics/library_charts/#helm) for grouping common logic between bitnami charts. + +## TL;DR + +```yaml +dependencies: + - name: common + version: 0.x.x + repository: https://charts.bitnami.com/bitnami +``` + +```bash +$ helm dependency update +``` + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "common.names.fullname" . }} +data: + myvalue: "Hello World" +``` + +## Introduction + +This chart provides a common template helpers which can be used to develop new charts using [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This Helm chart has been tested on top of [Bitnami Kubernetes Production Runtime](https://kubeprod.io/) (BKPR). Deploy BKPR to get automated TLS certificates, logging and monitoring for your applications. + +## Prerequisites + +- Kubernetes 1.12+ +- Helm 3.0-beta3+ + +## Parameters + +The following table lists the helpers available in the library which are scoped in different sections. + +### Affinities + +| Helper identifier | Description | Expected Input | +|-------------------------------|------------------------------------------------------|------------------------------------------------| +| `common.affinities.node.soft` | Return a soft nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` | +| `common.affinities.node.hard` | Return a hard nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` | +| `common.affinities.pod.soft` | Return a soft podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` | +| `common.affinities.pod.hard` | Return a hard podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` | + +### Capabilities + +| Helper identifier | Description | Expected Input | +|----------------------------------------------|------------------------------------------------------------------------------------------------|-------------------| +| `common.capabilities.kubeVersion` | Return the target Kubernetes version (using client default if .Values.kubeVersion is not set). | `.` Chart context | +| `common.capabilities.deployment.apiVersion` | Return the appropriate apiVersion for deployment. | `.` Chart context | +| `common.capabilities.statefulset.apiVersion` | Return the appropriate apiVersion for statefulset. | `.` Chart context | +| `common.capabilities.ingress.apiVersion` | Return the appropriate apiVersion for ingress. | `.` Chart context | + +### Errors + +| Helper identifier | Description | Expected Input | +|-----------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------| +| `common.errors.upgrade.passwords.empty` | It will ensure required passwords are given when we are upgrading a chart. If `validationErrors` is not empty it will throw an error and will stop the upgrade action. | `dict "validationErrors" (list $validationError00 $validationError01) "context" $` | + +### Images + +| Helper identifier | Description | Expected Input | +|-----------------------------|------------------------------------------------------|---------------------------------------------------------------------------------------------------------| +| `common.images.image` | Return the proper and full image name | `dict "imageRoot" .Values.path.to.the.image "global" $`, see [ImageRoot](#imageroot) for the structure. | +| `common.images.pullSecrets` | Return the proper Docker Image Registry Secret Names | `dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global` | + +### Ingress + +| Helper identifier | Description | Expected Input | +|--------------------------|----------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.ingress.backend` | Generate a proper Ingress backend entry depending on the API version | `dict "serviceName" "foo" "servicePort" "bar"`, see the [Ingress deprecation notice](https://kubernetes.io/blog/2019/07/18/api-deprecations-in-1-16/) for the syntax differences | + +### Labels + +| Helper identifier | Description | Expected Input | +|-----------------------------|------------------------------------------------------|-------------------| +| `common.labels.standard` | Return Kubernetes standard labels | `.` Chart context | +| `common.labels.matchLabels` | Return the proper Docker Image Registry Secret Names | `.` Chart context | + +### Names + +| Helper identifier | Description | Expected Inpput | +|-------------------------|------------------------------------------------------------|-------------------| +| `common.names.name` | Expand the name of the chart or use `.Values.nameOverride` | `.` Chart context | +| `common.names.fullname` | Create a default fully qualified app name. | `.` Chart context | +| `common.names.chart` | Chart name plus version | `.` Chart context | + +### Secrets + +| Helper identifier | Description | Expected Input | +|-----------------------|----------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.secrets.name` | Generate the name of the secret. | `dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $` see [ExistingSecret](#existingsecret) for the structure. | +| `common.secrets.key` | Generate secret key. | `dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName"` see [ExistingSecret](#existingsecret) for the structure. | + +### Storage + +| Helper identifier | Description | Expected Input | +|-------------------------------|---------------------------------------|---------------------------------------------------------------------------------------------------------------------| +| `common.affinities.node.soft` | Return a soft nodeAffinity definition | `dict "persistence" .Values.path.to.the.persistence "global" $`, see [Persistence](#persistence) for the structure. | + +### TplValues + +| Helper identifier | Description | Expected Input | +|---------------------------|----------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.tplvalues.render` | Renders a value that contains template | `dict "value" .Values.path.to.the.Value "context" $`, value is the value should rendered as template, context frequently is the chart context `$` or `.` | + +### Utils + +| Helper identifier | Description | Expected Input | +|--------------------------------|-------------------------------------------------------|------------------------------------------------------------------------| +| `common.utils.fieldToEnvVar` | Build environment variable name given a field. | `dict "field" "my-password"` | +| `common.utils.secret.getvalue` | Print instructions to get a secret value. | `dict "secret" "secret-name" "field" "secret-value-field" "context" $` | +| `common.utils.getValueFromKey` | Gets a value from `.Values` object given its key path | `dict "key" "path.to.key" "context" $` | + +### Validations + +| Helper identifier | Description | Expected Input | +|--------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.validations.values.single.empty` | Validate a value must not be empty. | `dict "valueKey" "path.to.value" "secret" "secret.name" "field" "my-password" "context" $` secret and field are optional. In case they are given, the helper will generate a how to get instruction. See [ValidateValue](#validatevalue) | +| `common.validations.values.multiple.empty` | Validate a multiple values must not be empty. It returns a shared error for all the values. | `dict "required" (list $validateValueConf00 $validateValueConf01) "context" $`. See [ValidateValue](#validatevalue) | +| `common.validations.values.mariadb.passwords` | This helper will ensure required password for MariaDB are not empty. It returns a shared error for all the values. | `dict "secret" "mariadb-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mariadb chart and the helper. | +| `common.validations.values.postgresql.passwords` | This helper will ensure required password for PostgreSQL are not empty. It returns a shared error for all the values. | `dict "secret" "postgresql-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use postgresql chart and the helper. | +| `common.validations.values.redis.passwords` | This helper will ensure required password for RedisTM are not empty. It returns a shared error for all the values. | `dict "secret" "redis-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use redis chart and the helper. | +| `common.validations.values.cassandra.passwords` | This helper will ensure required password for Cassandra are not empty. It returns a shared error for all the values. | `dict "secret" "cassandra-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use cassandra chart and the helper. | +| `common.validations.values.mongodb.passwords` | This helper will ensure required password for MongoDB are not empty. It returns a shared error for all the values. | `dict "secret" "mongodb-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mongodb chart and the helper. | + +### Warnings + +| Helper identifier | Description | Expected Input | +|------------------------------|----------------------------------|------------------------------------------------------------| +| `common.warnings.rollingTag` | Warning about using rolling tag. | `ImageRoot` see [ImageRoot](#imageroot) for the structure. | + +## Special input schemas + +### ImageRoot + +```yaml +registry: + type: string + description: Docker registry where the image is located + example: docker.io + +repository: + type: string + description: Repository and image name + example: bitnami/nginx + +tag: + type: string + description: image tag + example: 1.16.1-debian-10-r63 + +pullPolicy: + type: string + description: Specify a imagePullPolicy. Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + +pullSecrets: + type: array + items: + type: string + description: Optionally specify an array of imagePullSecrets. + +debug: + type: boolean + description: Set to true if you would like to see extra information on logs + example: false + +## An instance would be: +# registry: docker.io +# repository: bitnami/nginx +# tag: 1.16.1-debian-10-r63 +# pullPolicy: IfNotPresent +# debug: false +``` + +### Persistence + +```yaml +enabled: + type: boolean + description: Whether enable persistence. + example: true + +storageClass: + type: string + description: Ghost data Persistent Volume Storage Class, If set to "-", storageClassName: "" which disables dynamic provisioning. + example: "-" + +accessMode: + type: string + description: Access mode for the Persistent Volume Storage. + example: ReadWriteOnce + +size: + type: string + description: Size the Persistent Volume Storage. + example: 8Gi + +path: + type: string + description: Path to be persisted. + example: /bitnami + +## An instance would be: +# enabled: true +# storageClass: "-" +# accessMode: ReadWriteOnce +# size: 8Gi +# path: /bitnami +``` + +### ExistingSecret + +```yaml +name: + type: string + description: Name of the existing secret. + example: mySecret +keyMapping: + description: Mapping between the expected key name and the name of the key in the existing secret. + type: object + +## An instance would be: +# name: mySecret +# keyMapping: +# password: myPasswordKey +``` + +#### Example of use + +When we store sensitive data for a deployment in a secret, some times we want to give to users the possibility of using theirs existing secrets. + +```yaml +# templates/secret.yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }} + labels: + app: {{ include "common.names.fullname" . }} +type: Opaque +data: + password: {{ .Values.password | b64enc | quote }} + +# templates/dpl.yaml +--- +... + env: + - name: PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "common.secrets.name" (dict "existingSecret" .Values.existingSecret "context" $) }} + key: {{ include "common.secrets.key" (dict "existingSecret" .Values.existingSecret "key" "password") }} +... + +# values.yaml +--- +name: mySecret +keyMapping: + password: myPasswordKey +``` + +### ValidateValue + +#### NOTES.txt + +```console +{{- $validateValueConf00 := (dict "valueKey" "path.to.value00" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value01" "secret" "secretName" "field" "password-01") -}} + +{{ include "common.validations.values.multiple.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} +``` + +If we force those values to be empty we will see some alerts + +```console +$ helm install test mychart --set path.to.value00="",path.to.value01="" + 'path.to.value00' must not be empty, please add '--set path.to.value00=$PASSWORD_00' to the command. To get the current value: + + export PASSWORD_00=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-00}" | base64 --decode) + + 'path.to.value01' must not be empty, please add '--set path.to.value01=$PASSWORD_01' to the command. To get the current value: + + export PASSWORD_01=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-01}" | base64 --decode) +``` + +## Upgrading + +### To 1.0.0 + +[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +**What changes were introduced in this major version?** + +- Previous versions of this Helm Chart use `apiVersion: v1` (installable by both Helm 2 and 3), this Helm Chart was updated to `apiVersion: v2` (installable by Helm 3 only). [Here](https://helm.sh/docs/topics/charts/#the-apiversion-field) you can find more information about the `apiVersion` field. +- Use `type: library`. [Here](https://v3.helm.sh/docs/faq/#library-chart-support) you can find more information. +- The different fields present in the *Chart.yaml* file has been ordered alphabetically in a homogeneous way for all the Bitnami Helm Charts + +**Considerations when upgrading to this version** + +- If you want to upgrade to this version from a previous one installed with Helm v3, you shouldn't face any issues +- If you want to upgrade to this version using Helm v2, this scenario is not supported as this version doesn't support Helm v2 anymore +- If you installed the previous version with Helm v2 and wants to upgrade to this version with Helm v3, please refer to the [official Helm documentation](https://helm.sh/docs/topics/v2_v3_migration/#migration-use-cases) about migrating from Helm v2 to v3 + +**Useful links** + +- https://docs.bitnami.com/tutorials/resolve-helm2-helm3-post-migration-issues/ +- https://helm.sh/docs/topics/v2_v3_migration/ +- https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/ diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/redis/charts/common/templates/_affinities.tpl b/ansible/01_old/roles/test/files/02-base/base/charts/redis/charts/common/templates/_affinities.tpl new file mode 100644 index 0000000..1ff26d5 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/redis/charts/common/templates/_affinities.tpl @@ -0,0 +1,94 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return a soft nodeAffinity definition +{{ include "common.affinities.nodes.soft" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.soft" -}} +preferredDuringSchedulingIgnoredDuringExecution: + - preference: + matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . }} + {{- end }} + weight: 1 +{{- end -}} + +{{/* +Return a hard nodeAffinity definition +{{ include "common.affinities.nodes.hard" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.hard" -}} +requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . }} + {{- end }} +{{- end -}} + +{{/* +Return a nodeAffinity definition +{{ include "common.affinities.nodes" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.nodes.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.nodes.hard" . -}} + {{- end -}} +{{- end -}} + +{{/* +Return a soft podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.soft" (dict "component" "FOO" "context" $) -}} +*/}} +{{- define "common.affinities.pods.soft" -}} +{{- $component := default "" .component -}} +preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 10 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + namespaces: + - {{ .context.Release.Namespace }} + topologyKey: kubernetes.io/hostname + weight: 1 +{{- end -}} + +{{/* +Return a hard podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.hard" (dict "component" "FOO" "context" $) -}} +*/}} +{{- define "common.affinities.pods.hard" -}} +{{- $component := default "" .component -}} +requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 8 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + namespaces: + - {{ .context.Release.Namespace }} + topologyKey: kubernetes.io/hostname +{{- end -}} + +{{/* +Return a podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.pods" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.pods.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.pods.hard" . -}} + {{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/redis/charts/common/templates/_capabilities.tpl b/ansible/01_old/roles/test/files/02-base/base/charts/redis/charts/common/templates/_capabilities.tpl new file mode 100644 index 0000000..d95b569 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/redis/charts/common/templates/_capabilities.tpl @@ -0,0 +1,61 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return the target Kubernetes version +*/}} +{{- define "common.capabilities.kubeVersion" -}} +{{- if .Values.global }} + {{- if .Values.global.kubeVersion }} + {{- .Values.global.kubeVersion -}} + {{- else }} + {{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} + {{- end -}} +{{- else }} +{{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for deployment. +*/}} +{{- define "common.capabilities.deployment.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for statefulset. +*/}} +{{- define "common.capabilities.statefulset.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apps/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for ingress. +*/}} +{{- define "common.capabilities.ingress.apiVersion" -}} +{{- if .Values.ingress -}} +{{- if .Values.ingress.apiVersion -}} +{{- .Values.ingress.apiVersion -}} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end }} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/redis/charts/common/templates/_images.tpl b/ansible/01_old/roles/test/files/02-base/base/charts/redis/charts/common/templates/_images.tpl new file mode 100644 index 0000000..aafde9f --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/redis/charts/common/templates/_images.tpl @@ -0,0 +1,43 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper image name +{{ include "common.images.image" ( dict "imageRoot" .Values.path.to.the.image "global" $) }} +*/}} +{{- define "common.images.image" -}} +{{- $registryName := .imageRoot.registry -}} +{{- $repositoryName := .imageRoot.repository -}} +{{- $tag := .imageRoot.tag | toString -}} +{{- if .global }} + {{- if .global.imageRegistry }} + {{- $registryName = .global.imageRegistry -}} + {{- end -}} +{{- end -}} +{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +{{ include "common.images.pullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global) }} +*/}} +{{- define "common.images.pullSecrets" -}} + {{- $pullSecrets := list }} + + {{- if .global }} + {{- range .global.imagePullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) }} +imagePullSecrets: + {{- range $pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/redis/charts/common/templates/_ingress.tpl b/ansible/01_old/roles/test/files/02-base/base/charts/redis/charts/common/templates/_ingress.tpl new file mode 100644 index 0000000..622ef50 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/redis/charts/common/templates/_ingress.tpl @@ -0,0 +1,42 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Generate backend entry that is compatible with all Kubernetes API versions. + +Usage: +{{ include "common.ingress.backend" (dict "serviceName" "backendName" "servicePort" "backendPort" "context" $) }} + +Params: + - serviceName - String. Name of an existing service backend + - servicePort - String/Int. Port name (or number) of the service. It will be translated to different yaml depending if it is a string or an integer. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.ingress.backend" -}} +{{- $apiVersion := (include "common.capabilities.ingress.apiVersion" .context) -}} +{{- if or (eq $apiVersion "extensions/v1beta1") (eq $apiVersion "networking.k8s.io/v1beta1") -}} +serviceName: {{ .serviceName }} +servicePort: {{ .servicePort }} +{{- else -}} +service: + name: {{ .serviceName }} + port: + {{- if typeIs "string" .servicePort }} + name: {{ .servicePort }} + {{- else if typeIs "int" .servicePort }} + number: {{ .servicePort }} + {{- end }} +{{- end -}} +{{- end -}} + +{{/* +Print "true" if the API pathType field is supported +Usage: +{{ include "common.ingress.supportsPathType" . }} +*/}} +{{- define "common.ingress.supportsPathType" -}} +{{- if (semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .)) -}} +{{- print "false" -}} +{{- else -}} +{{- print "true" -}} +{{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/redis/charts/common/templates/_labels.tpl b/ansible/01_old/roles/test/files/02-base/base/charts/redis/charts/common/templates/_labels.tpl new file mode 100644 index 0000000..252066c --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/redis/charts/common/templates/_labels.tpl @@ -0,0 +1,18 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Kubernetes standard labels +*/}} +{{- define "common.labels.standard" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +helm.sh/chart: {{ include "common.names.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{/* +Labels to use on deploy.spec.selector.matchLabels and svc.spec.selector +*/}} +{{- define "common.labels.matchLabels" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/redis/charts/common/templates/_names.tpl b/ansible/01_old/roles/test/files/02-base/base/charts/redis/charts/common/templates/_names.tpl new file mode 100644 index 0000000..adf2a74 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/redis/charts/common/templates/_names.tpl @@ -0,0 +1,32 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "common.names.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "common.names.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "common.names.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/redis/charts/common/templates/_secrets.tpl b/ansible/01_old/roles/test/files/02-base/base/charts/redis/charts/common/templates/_secrets.tpl new file mode 100644 index 0000000..4931d94 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/redis/charts/common/templates/_secrets.tpl @@ -0,0 +1,127 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Generate secret name. + +Usage: +{{ include "common.secrets.name" (dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $) }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/master/bitnami/common#existingsecret + - defaultNameSuffix - String - Optional. It is used only if we have several secrets in the same deployment. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.secrets.name" -}} +{{- $name := (include "common.names.fullname" .context) -}} + +{{- if .defaultNameSuffix -}} +{{- $name = printf "%s-%s" $name .defaultNameSuffix | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- with .existingSecret -}} +{{- if not (typeIs "string" .) -}} +{{- $name = .name -}} +{{- else -}} +{{- $name = . -}} +{{- end -}} +{{- end -}} + +{{- printf "%s" $name -}} +{{- end -}} + +{{/* +Generate secret key. + +Usage: +{{ include "common.secrets.key" (dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName") }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/master/bitnami/common#existingsecret + - key - String - Required. Name of the key in the secret. +*/}} +{{- define "common.secrets.key" -}} +{{- $key := .key -}} + +{{- if .existingSecret -}} + {{- if not (typeIs "string" .existingSecret) -}} + {{- if .existingSecret.keyMapping -}} + {{- $key = index .existingSecret.keyMapping $.key -}} + {{- end -}} + {{- end }} +{{- end -}} + +{{- printf "%s" $key -}} +{{- end -}} + +{{/* +Generate secret password or retrieve one if already created. + +Usage: +{{ include "common.secrets.passwords.manage" (dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - key - String - Required - Name of the key in the secret. + - providedValues - List - Required - The path to the validating value in the values.yaml, e.g: "mysql.password". Will pick first parameter with a defined value. + - length - int - Optional - Length of the generated random password. + - strong - Boolean - Optional - Whether to add symbols to the generated random password. + - chartName - String - Optional - Name of the chart used when said chart is deployed as a subchart. + - context - Context - Required - Parent context. +*/}} +{{- define "common.secrets.passwords.manage" -}} + +{{- $password := "" }} +{{- $subchart := "" }} +{{- $chartName := default "" .chartName }} +{{- $passwordLength := default 10 .length }} +{{- $providedPasswordKey := include "common.utils.getKeyFromList" (dict "keys" .providedValues "context" $.context) }} +{{- $providedPasswordValue := include "common.utils.getValueFromKey" (dict "key" $providedPasswordKey "context" $.context) }} +{{- $secret := (lookup "v1" "Secret" $.context.Release.Namespace .secret) }} +{{- if $secret }} + {{- if index $secret.data .key }} + {{- $password = index $secret.data .key }} + {{- end -}} +{{- else if $providedPasswordValue }} + {{- $password = $providedPasswordValue | toString | b64enc | quote }} +{{- else }} + + {{- if .context.Values.enabled }} + {{- $subchart = $chartName }} + {{- end -}} + + {{- $requiredPassword := dict "valueKey" $providedPasswordKey "secret" .secret "field" .key "subchart" $subchart "context" $.context -}} + {{- $requiredPasswordError := include "common.validations.values.single.empty" $requiredPassword -}} + {{- $passwordValidationErrors := list $requiredPasswordError -}} + {{- include "common.errors.upgrade.passwords.empty" (dict "validationErrors" $passwordValidationErrors "context" $.context) -}} + + {{- if .strong }} + {{- $subStr := list (lower (randAlpha 1)) (randNumeric 1) (upper (randAlpha 1)) | join "_" }} + {{- $password = randAscii $passwordLength }} + {{- $password = regexReplaceAllLiteral "\\W" $password "@" | substr 5 $passwordLength }} + {{- $password = printf "%s%s" $subStr $password | toString | shuffle | b64enc | quote }} + {{- else }} + {{- $password = randAlphaNum $passwordLength | b64enc | quote }} + {{- end }} +{{- end -}} +{{- printf "%s" $password -}} +{{- end -}} + +{{/* +Returns whether a previous generated secret already exists + +Usage: +{{ include "common.secrets.exists" (dict "secret" "secret-name" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - context - Context - Required - Parent context. +*/}} +{{- define "common.secrets.exists" -}} +{{- $secret := (lookup "v1" "Secret" $.context.Release.Namespace .secret) }} +{{- if $secret }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/redis/charts/common/templates/_storage.tpl b/ansible/01_old/roles/test/files/02-base/base/charts/redis/charts/common/templates/_storage.tpl new file mode 100644 index 0000000..60e2a84 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/redis/charts/common/templates/_storage.tpl @@ -0,0 +1,23 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper Storage Class +{{ include "common.storage.class" ( dict "persistence" .Values.path.to.the.persistence "global" $) }} +*/}} +{{- define "common.storage.class" -}} + +{{- $storageClass := .persistence.storageClass -}} +{{- if .global -}} + {{- if .global.storageClass -}} + {{- $storageClass = .global.storageClass -}} + {{- end -}} +{{- end -}} + +{{- if $storageClass -}} + {{- if (eq "-" $storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" $storageClass -}} + {{- end -}} +{{- end -}} + +{{- end -}} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/redis/charts/common/templates/_tplvalues.tpl b/ansible/01_old/roles/test/files/02-base/base/charts/redis/charts/common/templates/_tplvalues.tpl new file mode 100644 index 0000000..2db1668 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/redis/charts/common/templates/_tplvalues.tpl @@ -0,0 +1,13 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Renders a value that contains template. +Usage: +{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $) }} +*/}} +{{- define "common.tplvalues.render" -}} + {{- if typeIs "string" .value }} + {{- tpl .value .context }} + {{- else }} + {{- tpl (.value | toYaml) .context }} + {{- end }} +{{- end -}} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/redis/charts/common/templates/_utils.tpl b/ansible/01_old/roles/test/files/02-base/base/charts/redis/charts/common/templates/_utils.tpl new file mode 100644 index 0000000..77bcc2b --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/redis/charts/common/templates/_utils.tpl @@ -0,0 +1,62 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Print instructions to get a secret value. +Usage: +{{ include "common.utils.secret.getvalue" (dict "secret" "secret-name" "field" "secret-value-field" "context" $) }} +*/}} +{{- define "common.utils.secret.getvalue" -}} +{{- $varname := include "common.utils.fieldToEnvVar" . -}} +export {{ $varname }}=$(kubectl get secret --namespace {{ .context.Release.Namespace }} {{ .secret }} -o jsonpath="{.data.{{ .field }}}" | base64 --decode) +{{- end -}} + +{{/* +Build env var name given a field +Usage: +{{ include "common.utils.fieldToEnvVar" dict "field" "my-password" }} +*/}} +{{- define "common.utils.fieldToEnvVar" -}} + {{- $fieldNameSplit := splitList "-" .field -}} + {{- $upperCaseFieldNameSplit := list -}} + + {{- range $fieldNameSplit -}} + {{- $upperCaseFieldNameSplit = append $upperCaseFieldNameSplit ( upper . ) -}} + {{- end -}} + + {{ join "_" $upperCaseFieldNameSplit }} +{{- end -}} + +{{/* +Gets a value from .Values given +Usage: +{{ include "common.utils.getValueFromKey" (dict "key" "path.to.key" "context" $) }} +*/}} +{{- define "common.utils.getValueFromKey" -}} +{{- $splitKey := splitList "." .key -}} +{{- $value := "" -}} +{{- $latestObj := $.context.Values -}} +{{- range $splitKey -}} + {{- if not $latestObj -}} + {{- printf "please review the entire path of '%s' exists in values" $.key | fail -}} + {{- end -}} + {{- $value = ( index $latestObj . ) -}} + {{- $latestObj = $value -}} +{{- end -}} +{{- printf "%v" (default "" $value) -}} +{{- end -}} + +{{/* +Returns first .Values key with a defined value or first of the list if all non-defined +Usage: +{{ include "common.utils.getKeyFromList" (dict "keys" (list "path.to.key1" "path.to.key2") "context" $) }} +*/}} +{{- define "common.utils.getKeyFromList" -}} +{{- $key := first .keys -}} +{{- $reverseKeys := reverse .keys }} +{{- range $reverseKeys }} + {{- $value := include "common.utils.getValueFromKey" (dict "key" . "context" $.context ) }} + {{- if $value -}} + {{- $key = . }} + {{- end -}} +{{- end -}} +{{- printf "%s" $key -}} +{{- end -}} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/redis/charts/common/templates/_warnings.tpl b/ansible/01_old/roles/test/files/02-base/base/charts/redis/charts/common/templates/_warnings.tpl new file mode 100644 index 0000000..ae10fa4 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/redis/charts/common/templates/_warnings.tpl @@ -0,0 +1,14 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Warning about using rolling tag. +Usage: +{{ include "common.warnings.rollingTag" .Values.path.to.the.imageRoot }} +*/}} +{{- define "common.warnings.rollingTag" -}} + +{{- if and (contains "bitnami/" .repository) (not (.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .repository }}:{{ .tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} + +{{- end -}} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/redis/charts/common/templates/validations/_cassandra.tpl b/ansible/01_old/roles/test/files/02-base/base/charts/redis/charts/common/templates/validations/_cassandra.tpl new file mode 100644 index 0000000..8679ddf --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/redis/charts/common/templates/validations/_cassandra.tpl @@ -0,0 +1,72 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Cassandra required passwords are not empty. + +Usage: +{{ include "common.validations.values.cassandra.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where Cassandra values are stored, e.g: "cassandra-passwords-secret" + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.cassandra.passwords" -}} + {{- $existingSecret := include "common.cassandra.values.existingSecret" . -}} + {{- $enabled := include "common.cassandra.values.enabled" . -}} + {{- $dbUserPrefix := include "common.cassandra.values.key.dbUser" . -}} + {{- $valueKeyPassword := printf "%s.password" $dbUserPrefix -}} + + {{- if and (not $existingSecret) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "cassandra-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.cassandra.values.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.cassandra.dbUser.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.dbUser.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled cassandra. + +Usage: +{{ include "common.cassandra.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.cassandra.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.cassandra.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key dbUser + +Usage: +{{ include "common.cassandra.values.key.dbUser" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.key.dbUser" -}} + {{- if .subchart -}} + cassandra.dbUser + {{- else -}} + dbUser + {{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/redis/charts/common/templates/validations/_mariadb.tpl b/ansible/01_old/roles/test/files/02-base/base/charts/redis/charts/common/templates/validations/_mariadb.tpl new file mode 100644 index 0000000..bb5ed72 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/redis/charts/common/templates/validations/_mariadb.tpl @@ -0,0 +1,103 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MariaDB required passwords are not empty. + +Usage: +{{ include "common.validations.values.mariadb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MariaDB values are stored, e.g: "mysql-passwords-secret" + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mariadb.passwords" -}} + {{- $existingSecret := include "common.mariadb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mariadb.values.enabled" . -}} + {{- $architecture := include "common.mariadb.values.architecture" . -}} + {{- $authPrefix := include "common.mariadb.values.key.auth" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}} + + {{- if and (not $existingSecret) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mariadb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- if not (empty $valueUsername) -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mariadb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replication") -}} + {{- $requiredReplicationPassword := dict "valueKey" $valueKeyReplicationPassword "secret" .secret "field" "mariadb-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mariadb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mariadb. + +Usage: +{{ include "common.mariadb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mariadb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mariadb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mariadb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mariadb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.key.auth" -}} + {{- if .subchart -}} + mariadb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/redis/charts/common/templates/validations/_mongodb.tpl b/ansible/01_old/roles/test/files/02-base/base/charts/redis/charts/common/templates/validations/_mongodb.tpl new file mode 100644 index 0000000..a786188 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/redis/charts/common/templates/validations/_mongodb.tpl @@ -0,0 +1,108 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MongoDB required passwords are not empty. + +Usage: +{{ include "common.validations.values.mongodb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MongoDB values are stored, e.g: "mongodb-passwords-secret" + - subchart - Boolean - Optional. Whether MongoDB is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mongodb.passwords" -}} + {{- $existingSecret := include "common.mongodb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mongodb.values.enabled" . -}} + {{- $authPrefix := include "common.mongodb.values.key.auth" . -}} + {{- $architecture := include "common.mongodb.values.architecture" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyDatabase := printf "%s.database" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicaSetKey := printf "%s.replicaSetKey" $authPrefix -}} + {{- $valueKeyAuthEnabled := printf "%s.enabled" $authPrefix -}} + + {{- $authEnabled := include "common.utils.getValueFromKey" (dict "key" $valueKeyAuthEnabled "context" .context) -}} + + {{- if and (not $existingSecret) (eq $enabled "true") (eq $authEnabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mongodb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- $valueDatabase := include "common.utils.getValueFromKey" (dict "key" $valueKeyDatabase "context" .context) }} + {{- if and $valueUsername $valueDatabase -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mongodb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replicaset") -}} + {{- $requiredReplicaSetKey := dict "valueKey" $valueKeyReplicaSetKey "secret" .secret "field" "mongodb-replica-set-key" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicaSetKey -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mongodb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDb is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mongodb. + +Usage: +{{ include "common.mongodb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mongodb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mongodb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mongodb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDB is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.key.auth" -}} + {{- if .subchart -}} + mongodb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mongodb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/redis/charts/common/templates/validations/_postgresql.tpl b/ansible/01_old/roles/test/files/02-base/base/charts/redis/charts/common/templates/validations/_postgresql.tpl new file mode 100644 index 0000000..992bcd3 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/redis/charts/common/templates/validations/_postgresql.tpl @@ -0,0 +1,131 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate PostgreSQL required passwords are not empty. + +Usage: +{{ include "common.validations.values.postgresql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where postgresql values are stored, e.g: "postgresql-passwords-secret" + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.postgresql.passwords" -}} + {{- $existingSecret := include "common.postgresql.values.existingSecret" . -}} + {{- $enabled := include "common.postgresql.values.enabled" . -}} + {{- $valueKeyPostgresqlPassword := include "common.postgresql.values.key.postgressPassword" . -}} + {{- $valueKeyPostgresqlReplicationEnabled := include "common.postgresql.values.key.replicationPassword" . -}} + + {{- if and (not $existingSecret) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredPostgresqlPassword := dict "valueKey" $valueKeyPostgresqlPassword "secret" .secret "field" "postgresql-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlPassword -}} + + {{- $enabledReplication := include "common.postgresql.values.enabled.replication" . -}} + {{- if (eq $enabledReplication "true") -}} + {{- $requiredPostgresqlReplicationPassword := dict "valueKey" $valueKeyPostgresqlReplicationEnabled "secret" .secret "field" "postgresql-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to decide whether evaluate global values. + +Usage: +{{ include "common.postgresql.values.use.global" (dict "key" "key-of-global" "context" $) }} +Params: + - key - String - Required. Field to be evaluated within global, e.g: "existingSecret" +*/}} +{{- define "common.postgresql.values.use.global" -}} + {{- if .context.Values.global -}} + {{- if .context.Values.global.postgresql -}} + {{- index .context.Values.global.postgresql .key | quote -}} + {{- end -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.postgresql.values.existingSecret" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.existingSecret" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "existingSecret" "context" .context) -}} + + {{- if .subchart -}} + {{- default (.context.Values.postgresql.existingSecret | quote) $globalValue -}} + {{- else -}} + {{- default (.context.Values.existingSecret | quote) $globalValue -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled postgresql. + +Usage: +{{ include "common.postgresql.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key postgressPassword. + +Usage: +{{ include "common.postgresql.values.key.postgressPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.postgressPassword" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "postgresqlUsername" "context" .context) -}} + + {{- if not $globalValue -}} + {{- if .subchart -}} + postgresql.postgresqlPassword + {{- else -}} + postgresqlPassword + {{- end -}} + {{- else -}} + global.postgresql.postgresqlPassword + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled.replication. + +Usage: +{{ include "common.postgresql.values.enabled.replication" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.enabled.replication" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.replication.enabled -}} + {{- else -}} + {{- printf "%v" .context.Values.replication.enabled -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key replication.password. + +Usage: +{{ include "common.postgresql.values.key.replicationPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.replicationPassword" -}} + {{- if .subchart -}} + postgresql.replication.password + {{- else -}} + replication.password + {{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/redis/charts/common/templates/validations/_redis.tpl b/ansible/01_old/roles/test/files/02-base/base/charts/redis/charts/common/templates/validations/_redis.tpl new file mode 100644 index 0000000..3e2a47c --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/redis/charts/common/templates/validations/_redis.tpl @@ -0,0 +1,72 @@ + +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Redis(TM) required passwords are not empty. + +Usage: +{{ include "common.validations.values.redis.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where redis values are stored, e.g: "redis-passwords-secret" + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.redis.passwords" -}} + {{- $existingSecret := include "common.redis.values.existingSecret" . -}} + {{- $enabled := include "common.redis.values.enabled" . -}} + {{- $valueKeyPrefix := include "common.redis.values.keys.prefix" . -}} + {{- $valueKeyRedisPassword := printf "%s%s" $valueKeyPrefix "password" -}} + {{- $valueKeyRedisUsePassword := printf "%s%s" $valueKeyPrefix "usePassword" -}} + + {{- if and (not $existingSecret) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $usePassword := include "common.utils.getValueFromKey" (dict "key" $valueKeyRedisUsePassword "context" .context) -}} + {{- if eq $usePassword "true" -}} + {{- $requiredRedisPassword := dict "valueKey" $valueKeyRedisPassword "secret" .secret "field" "redis-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRedisPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Redis Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.redis.values.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Redis(TM) is used as subchart or not. Default: false +*/}} +{{- define "common.redis.values.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.redis.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled redis. + +Usage: +{{ include "common.redis.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.redis.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.redis.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right prefix path for the values + +Usage: +{{ include "common.redis.values.key.prefix" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.redis.values.keys.prefix" -}} + {{- if .subchart -}}redis.{{- else -}}{{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/redis/charts/common/templates/validations/_validations.tpl b/ansible/01_old/roles/test/files/02-base/base/charts/redis/charts/common/templates/validations/_validations.tpl new file mode 100644 index 0000000..fb2fe60 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/redis/charts/common/templates/validations/_validations.tpl @@ -0,0 +1,46 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate values must not be empty. + +Usage: +{{- $validateValueConf00 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-01") -}} +{{ include "common.validations.values.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" +*/}} +{{- define "common.validations.values.multiple.empty" -}} + {{- range .required -}} + {{- include "common.validations.values.single.empty" (dict "valueKey" .valueKey "secret" .secret "field" .field "context" $.context) -}} + {{- end -}} +{{- end -}} + +{{/* +Validate a value must not be empty. + +Usage: +{{ include "common.validations.value.empty" (dict "valueKey" "mariadb.password" "secret" "secretName" "field" "my-password" "subchart" "subchart "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" + - subchart - String - Optional - Name of the subchart that the validated password is part of. +*/}} +{{- define "common.validations.values.single.empty" -}} + {{- $value := include "common.utils.getValueFromKey" (dict "key" .valueKey "context" .context) }} + {{- $subchart := ternary "" (printf "%s." .subchart) (empty .subchart) }} + + {{- if not $value -}} + {{- $varname := "my-value" -}} + {{- $getCurrentValue := "" -}} + {{- if and .secret .field -}} + {{- $varname = include "common.utils.fieldToEnvVar" . -}} + {{- $getCurrentValue = printf " To get the current value:\n\n %s\n" (include "common.utils.secret.getvalue" .) -}} + {{- end -}} + {{- printf "\n '%s' must not be empty, please add '--set %s%s=$%s' to the command.%s" .valueKey $subchart .valueKey $varname $getCurrentValue -}} + {{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/redis/charts/common/values.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/redis/charts/common/values.yaml new file mode 100644 index 0000000..9ecdc93 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/redis/charts/common/values.yaml @@ -0,0 +1,3 @@ +## bitnami/common +## It is required by CI/CD tools and processes. +exampleValue: common-chart diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/redis/ci/default-values.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/redis/ci/default-values.yaml new file mode 100644 index 0000000..fc2ba60 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/redis/ci/default-values.yaml @@ -0,0 +1 @@ +# Leave this file empty to ensure that CI runs builds against the default configuration in values.yaml. diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/redis/ci/extra-flags-values.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/redis/ci/extra-flags-values.yaml new file mode 100644 index 0000000..71132f7 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/redis/ci/extra-flags-values.yaml @@ -0,0 +1,11 @@ +master: + extraFlags: + - --maxmemory-policy allkeys-lru + persistence: + enabled: false +slave: + extraFlags: + - --maxmemory-policy allkeys-lru + persistence: + enabled: false +usePassword: false diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/redis/ci/production-sentinel-values.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/redis/ci/production-sentinel-values.yaml new file mode 100644 index 0000000..7efeda3 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/redis/ci/production-sentinel-values.yaml @@ -0,0 +1,682 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +global: + # imageRegistry: myRegistryName + # imagePullSecrets: + # - myRegistryKeySecretName + # storageClass: myStorageClass + redis: {} + +## Bitnami Redis(TM) image version +## ref: https://hub.docker.com/r/bitnami/redis/tags/ +## +image: + registry: 10.10.31.243:5000 # docker.io + repository: redis # bitnami/redis + ## Bitnami Redis(TM) image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis#supported-tags-and-respective-dockerfile-links + ## + tag: 5.0.9-debian-10-r0 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + +## String to partially override redis.fullname template (will maintain the release name) +## +# nameOverride: + +## String to fully override redis.fullname template +## +# fullnameOverride: + +## Cluster settings +cluster: + enabled: true + slaveCount: 3 + +## Use redis sentinel in the redis pod. This will disable the master and slave services and +## create one redis service with ports to the sentinel and the redis instances +sentinel: + enabled: true + ## Require password authentication on the sentinel itself + ## ref: https://redis.io/topics/sentinel + usePassword: true + ## Bitnami Redis(TM) Sentintel image version + ## ref: https://hub.docker.com/r/bitnami/redis-sentinel/tags/ + ## + image: + registry: 10.10.31.243:5000 # docker.io + repository: redis-sentinel # bitnami/redis-sentinel + ## Bitnami Redis(TM) image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis-sentinel#supported-tags-and-respective-dockerfile-links + ## + tag: 5.0.9-debian-10-r0 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + masterSet: mymaster + initialCheckTimeout: 5 + quorum: 2 + downAfterMilliseconds: 60000 + failoverTimeout: 18000 + parallelSyncs: 1 + port: 26379 + ## Additional Redis(TM) configuration for the sentinel nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Enable or disable static sentinel IDs for each replicas + ## If disabled each sentinel will generate a random id at startup + ## If enabled, each replicas will have a constant ID on each start-up + ## + staticID: false + ## Configure extra options for Redis(TM) Sentinel liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + customLivenessProbe: {} + customReadinessProbe: {} + ## Redis(TM) Sentinel resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Redis(TM) Sentinel Service properties + service: + ## Redis(TM) Sentinel Service type + type: ClusterIP + sentinelPort: 26379 + redisPort: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # sentinelNodePort: + # redisNodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + +## Specifies the Kubernetes Cluster's Domain Name. +## +clusterDomain: cluster.local + +networkPolicy: + ## Specifies whether a NetworkPolicy should be created + ## + enabled: true + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port Redis(TM) is listening + ## on. When true, Redis(TM) will accept connections from any source + ## (with the correct destination port). + ## + # allowExternal: true + + ## Allow connections from other namespacess. Just set label for namespace and set label for pods (optional). + ## + ingressNSMatchLabels: {} + ingressNSPodMatchLabels: {} + +serviceAccount: + ## Specifies whether a ServiceAccount should be created + ## + create: false + ## The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the fullname template + name: + +rbac: + ## Specifies whether RBAC resources should be created + ## + create: false + + role: + ## Rules to create. It follows the role specification + # rules: + # - apiGroups: + # - extensions + # resources: + # - podsecuritypolicies + # verbs: + # - use + # resourceNames: + # - gce.unprivileged + rules: [] + +## Redis(TM) pod Security Context +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + ## sysctl settings for master and slave pods + ## + ## Uncomment the setting below to increase the net.core.somaxconn value + ## + # sysctls: + # - name: net.core.somaxconn + # value: "10000" + +## Use password authentication +usePassword: true +## Redis(TM) password (both master and slave) +## Defaults to a random 10-character alphanumeric string if not set and usePassword is true +## ref: https://github.com/bitnami/bitnami-docker-redis#setting-the-server-password-on-first-run +## +password: +## Use existing secret (ignores previous password) +# existingSecret: +## Password key to be retrieved from Redis(TM) secret +## +# existingSecretPasswordKey: + +## Mount secrets as files instead of environment variables +usePasswordFile: false + +## Persist data to a persistent volume (Redis Master) +persistence: + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + existingClaim: + +# Redis(TM) port +redisPort: 6379 + +## +## Redis(TM) Master parameters +## +master: + ## Redis(TM) command arguments + ## + ## Can be used to specify command line arguments, for example: + ## + command: "/run.sh" + ## Additional Redis(TM) configuration for the master nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Redis(TM) additional command line flags + ## + ## Can be used to specify command line flags, for example: + ## + ## extraFlags: + ## - "--maxmemory-policy volatile-ttl" + ## - "--repl-backlog-size 1024mb" + extraFlags: [] + ## Comma-separated list of Redis(TM) commands to disable + ## + ## Can be used to disable Redis(TM) commands for security reasons. + ## Commands will be completely disabled by renaming each to an empty string. + ## ref: https://redis.io/topics/security#disabling-of-specific-commands + ## + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis(TM) Master additional pod labels and annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + podLabels: {} + podAnnotations: {} + + ## Redis(TM) Master resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Configure extra options for Redis(TM) Master liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + + ## Configure custom probes for images other images like + ## rhscl/redis-32-rhel7 rhscl/redis-5-rhel7 + ## Only used if readinessProbe.enabled: false / livenessProbe.enabled: false + ## + # customLivenessProbe: + # tcpSocket: + # port: 6379 + # initialDelaySeconds: 10 + # periodSeconds: 5 + # customReadinessProbe: + # initialDelaySeconds: 30 + # periodSeconds: 10 + # timeoutSeconds: 5 + # exec: + # command: + # - "container-entrypoint" + # - "bash" + # - "-c" + # - "redis-cli set liveness-probe \"`date`\" | grep OK" + customLivenessProbe: {} + customReadinessProbe: {} + + ## Redis(TM) Master Node selectors and tolerations for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + ## Redis(TM) Master pod/node affinity/anti-affinity + ## + affinity: {} + + ## Redis(TM) Master Service properties + service: + ## Redis(TM) Master Service type + type: ClusterIP + port: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + # loadBalancerSourceRanges: ["10.0.0.0/8"] + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis(TM) images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + ## Persistent Volume selectors + ## https://kubernetes.io/docs/concepts/storage/persistent-volumes/#selector + matchLabels: {} + matchExpressions: {} + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + ## Redis(TM) Master pod priorityClassName + ## + priorityClassName: {} + +## +## Redis(TM) Slave properties +## Note: service.type is a mandatory parameter +## The rest of the parameters are either optional or, if undefined, will inherit those declared in Redis(TM) Master +## +slave: + ## Slave Service properties + service: + ## Redis(TM) Slave Service type + type: ClusterIP + ## Redis(TM) port + port: 6379 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + # loadBalancerSourceRanges: ["10.0.0.0/8"] + + ## Redis(TM) slave port + port: 6379 + ## Can be used to specify command line arguments, for example: + ## + command: "/run.sh" + ## Additional Redis(TM) configuration for the slave nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Redis(TM) extra flags + extraFlags: [] + ## List of Redis(TM) commands to disable + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis(TM) Slave pod/node affinity/anti-affinity + ## + affinity: {} + + ## Configure extra options for Redis(TM) Slave liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 10 + successThreshold: 1 + failureThreshold: 5 + + ## Configure custom probes for images other images like + ## rhscl/redis-32-rhel7 rhscl/redis-5-rhel7 + ## Only used if readinessProbe.enabled: false / livenessProbe.enabled: false + ## + # customLivenessProbe: + # tcpSocket: + # port: 6379 + # initialDelaySeconds: 10 + # periodSeconds: 5 + # customReadinessProbe: + # initialDelaySeconds: 30 + # periodSeconds: 10 + # timeoutSeconds: 5 + # exec: + # command: + # - "container-entrypoint" + # - "bash" + # - "-c" + # - "redis-cli set liveness-probe \"`date`\" | grep OK" + customLivenessProbe: {} + customReadinessProbe: {} + + ## Redis(TM) slave Resource + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + + ## Redis(TM) slave selectors and tolerations for pod assignment + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Redis(TM) slave pod Annotation and Labels + podLabels: {} + podAnnotations: {} + + ## Redis(TM) slave pod priorityClassName + # priorityClassName: {} + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis(TM) images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + ## Persistent Volume selectors + ## https://kubernetes.io/docs/concepts/storage/persistent-volumes/#selector + matchLabels: {} + matchExpressions: {} + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + +## Prometheus Exporter / Metrics +## +metrics: + enabled: true + + image: + registry: 10.10.31.243:5000 # docker.io + repository: redis-exporter # bitnami/redis-exporter + tag: 1.5.3-debian-10-r14 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + # resources: {} + + ## Extra arguments for Metrics exporter, for example: + ## extraArgs: + ## check-keys: myKey,myOtherKey + # extraArgs: {} + + ## Metrics exporter pod Annotation and Labels + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9121" + # podLabels: {} + + # Enable this if you're using https://github.com/coreos/prometheus-operator + serviceMonitor: + enabled: false + ## Specify a namespace if needed + # namespace: monitoring + # fallback to the prometheus default unless specified + # interval: 10s + ## Defaults to what's used if you follow CoreOS [Prometheus Install Instructions](https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#tldr) + ## [Prometheus Selector Label](https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#prometheus-operator-1) + ## [Kube Prometheus Selector Label](https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#exporters) + selector: + prometheus: kube-prometheus + + ## Custom PrometheusRule to be defined + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + prometheusRule: + enabled: false + additionalLabels: {} + namespace: "" + ## Redis(TM) prometheus rules + ## These are just examples rules, please adapt them to your needs. + ## Make sure to constraint the rules to the current postgresql service. + # rules: + # - alert: RedisDown + # expr: redis_up{service="{{ template "redis.fullname" . }}-metrics"} == 0 + # for: 2m + # labels: + # severity: error + # annotations: + # summary: Redis(TM) instance {{ "{{ $labels.instance }}" }} down + # description: Redis(TM) instance {{ "{{ $labels.instance }}" }} is down + # - alert: RedisMemoryHigh + # expr: > + # redis_memory_used_bytes{service="{{ template "redis.fullname" . }}-metrics"} * 100 + # / + # redis_memory_max_bytes{service="{{ template "redis.fullname" . }}-metrics"} + # > 90 + # for: 2m + # labels: + # severity: error + # annotations: + # summary: Redis(TM) instance {{ "{{ $labels.instance }}" }} is using too much memory + # description: | + # Redis(TM) instance {{ "{{ $labels.instance }}" }} is using {{ "{{ $value }}" }}% of its available memory. + # - alert: RedisKeyEviction + # expr: | + # increase(redis_evicted_keys_total{service="{{ template "redis.fullname" . }}-metrics"}[5m]) > 0 + # for: 1s + # labels: + # severity: error + # annotations: + # summary: Redis(TM) instance {{ "{{ $labels.instance }}" }} has evicted keys + # description: | + # Redis(TM) instance {{ "{{ $labels.instance }}" }} has evicted {{ "{{ $value }}" }} keys in the last 5 minutes. + rules: [] + + ## Metrics exporter pod priorityClassName + # priorityClassName: {} + service: + type: ClusterIP + ## Use serviceLoadBalancerIP to request a specific static IP, + ## otherwise leave blank + # loadBalancerIP: + annotations: {} + labels: {} + +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: false + image: + registry: 10.10.31.243:5000 # docker.io + repository: minideb # bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m + +## Redis(TM) config file +## ref: https://redis.io/topics/config +## +configmap: |- + # Enable AOF https://redis.io/topics/persistence#append-only-file + appendonly yes + # Disable RDB persistence, AOF persistence already enabled. + save "" + +## Sysctl InitContainer +## used to perform sysctl operation to modify Kernel settings (needed sometimes to avoid warnings) +sysctlImage: + enabled: false + command: [] + registry: 10.10.31.243:5000 # docker.io + repository: minideb # bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + mountHostSys: false + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m + +## PodSecurityPolicy configuration +## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +## +podSecurityPolicy: + ## Specifies whether a PodSecurityPolicy should be created + ## + create: false diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/redis/templates/NOTES.txt b/ansible/01_old/roles/test/files/02-base/base/charts/redis/templates/NOTES.txt new file mode 100644 index 0000000..a254f58 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/redis/templates/NOTES.txt @@ -0,0 +1,136 @@ +** Please be patient while the chart is being deployed ** + +{{- if contains .Values.master.service.type "LoadBalancer" }} +{{- if not .Values.usePassword }} +{{ if and (not .Values.networkPolicy.enabled) (.Values.networkPolicy.allowExternal) }} + +------------------------------------------------------------------------------- + WARNING + + By specifying "master.service.type=LoadBalancer" and "usePassword=false" you have + most likely exposed the Redis(TM) service externally without any authentication + mechanism. + + For security reasons, we strongly suggest that you switch to "ClusterIP" or + "NodePort". As alternative, you can also switch to "usePassword=true" + providing a valid password on "password" parameter. + +------------------------------------------------------------------------------- +{{- end }} +{{- end }} +{{- end }} + +{{- if and .Values.sentinel.enabled (not .Values.cluster.enabled)}} + +------------------------------------------------------------------------------- + WARNING + + Using redis sentinel without a cluster is not supported. A single pod with + standalone redis has been deployed. + + To deploy redis sentinel, please use the values "cluster.enabled=true" and + "sentinel.enabled=true". + +------------------------------------------------------------------------------- +{{- end }} + +{{- if .Values.cluster.enabled }} +{{- if .Values.sentinel.enabled }} +Redis can be accessed via port {{ .Values.sentinel.service.redisPort }} on the following DNS name from within your cluster: + +{{ template "redis.fullname" . }}.imxc.svc.{{ .Values.clusterDomain }} for read only operations + +For read/write operations, first access the Redis(TM) Sentinel cluster, which is available in port {{ .Values.sentinel.service.sentinelPort }} using the same domain name above. + +{{- else }} +Redis can be accessed via port {{ .Values.redisPort }} on the following DNS names from within your cluster: + +{{ template "redis.fullname" . }}-master.imxc.svc.{{ .Values.clusterDomain }} for read/write operations +{{ template "redis.fullname" . }}-slave.imxc.svc.{{ .Values.clusterDomain }} for read-only operations +{{- end }} + +{{- else }} +Redis can be accessed via port {{ .Values.redisPort }} on the following DNS name from within your cluster: + +{{ template "redis.fullname" . }}-master.imxc.svc.{{ .Values.clusterDomain }} + +{{- end }} + +{{ if .Values.usePassword }} +To get your password run: + + export REDIS_PASSWORD=$(kubectl get secret --namespace imxc {{ template "redis.secretName" . }} -o jsonpath="{.data.redis-password}" | base64 --decode) +{{- end }} + +To connect to your Redis(TM) server: + +1. Run a Redis(TM) pod that you can use as a client: + +{{- if .Values.tls.enabled }} + kubectl run --namespace imxc {{ template "redis.fullname" . }}-client --restart='Never' --env REDIS_PASSWORD=$REDIS_PASSWORD --image {{ template "redis.image" . }} --command -- sleep infinity + + Copy your TLS certificates to the pod: + + kubectl cp --namespace imxc /path/to/client.cert {{ template "redis.fullname" . }}-client:/tmp/client.cert + kubectl cp --namespace imxc /path/to/client.key {{ template "redis.fullname" . }}-client:/tmp/client.key + kubectl cp --namespace imxc /path/to/CA.cert {{ template "redis.fullname" . }}-client:/tmp/CA.cert + + Use the following command to attach to the pod: + + kubectl exec --tty -i {{ template "redis.fullname" . }}-client \ + {{- if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }}--labels="{{ template "redis.fullname" . }}-client=true" \{{- end }} + --namespace imxc -- bash +{{- else }} + kubectl run --namespace imxc {{ template "redis.fullname" . }}-client --rm --tty -i --restart='Never' \ + {{ if .Values.usePassword }} --env REDIS_PASSWORD=$REDIS_PASSWORD \{{ end }} + {{- if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }}--labels="{{ template "redis.fullname" . }}-client=true" \{{- end }} + --image {{ template "redis.image" . }} -- bash +{{- end }} + +2. Connect using the Redis(TM) CLI: + +{{- if .Values.cluster.enabled }} + {{- if .Values.sentinel.enabled }} + redis-cli -h {{ template "redis.fullname" . }} -p {{ .Values.sentinel.service.redisPort }}{{ if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }}{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} # Read only operations + redis-cli -h {{ template "redis.fullname" . }} -p {{ .Values.sentinel.service.sentinelPort }}{{ if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }}{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} # Sentinel access + {{- else }} + redis-cli -h {{ template "redis.fullname" . }}-master{{ if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }}{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} + redis-cli -h {{ template "redis.fullname" . }}-slave{{ if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }}{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} + {{- end }} +{{- else }} + redis-cli -h {{ template "redis.fullname" . }}-master{{ if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }}{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} +{{- end }} + +{{ if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }} +Note: Since NetworkPolicy is enabled, only pods with label +{{ template "redis.fullname" . }}-client=true" +will be able to connect to redis. +{{- else -}} + +To connect to your database from outside the cluster execute the following commands: + +{{- if contains "NodePort" .Values.master.service.type }} + + export NODE_IP=$(kubectl get nodes --namespace imxc -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT=$(kubectl get --namespace imxc -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "redis.fullname" . }}-master) + redis-cli -h $NODE_IP -p $NODE_PORT {{- if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }}{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} + +{{- else if contains "LoadBalancer" .Values.master.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace imxc -w {{ template "redis.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace imxc {{ template "redis.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + redis-cli -h $SERVICE_IP -p {{ .Values.master.service.port }} {{- if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }}{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} + +{{- else if contains "ClusterIP" .Values.master.service.type }} + + kubectl port-forward --namespace imxc svc/{{ template "redis.fullname" . }}-master {{ .Values.redisPort }}:{{ .Values.redisPort }} & + redis-cli -h 127.0.0.1 -p {{ .Values.redisPort }} {{- if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }}{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} + +{{- end }} +{{- end }} + +{{ include "redis.checkRollingTags" . }} + +{{- include "redis.validateValues" . }} \ No newline at end of file diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/redis/templates/_helpers.tpl b/ansible/01_old/roles/test/files/02-base/base/charts/redis/templates/_helpers.tpl new file mode 100644 index 0000000..193105d --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/redis/templates/_helpers.tpl @@ -0,0 +1,421 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "redis.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Expand the chart plus release name (used by the chart label) +*/}} +{{- define "redis.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "redis.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "networkPolicy.apiVersion" -}} +{{- if semverCompare ">=1.4-0, <1.7-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiGroup for PodSecurityPolicy. +*/}} +{{- define "podSecurityPolicy.apiGroup" -}} +{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "policy" -}} +{{- else -}} +{{- print "extensions" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for PodSecurityPolicy. +*/}} +{{- define "podSecurityPolicy.apiVersion" -}} +{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "policy/v1beta1" -}} +{{- else -}} +{{- print "extensions/v1beta1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Redis(TM) image name +*/}} +{{- define "redis.image" -}} +{{- $registryName := .Values.image.registry -}} +{{- $repositoryName := .Values.image.repository -}} +{{- $tag := .Values.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Redis(TM) Sentinel image name +*/}} +{{- define "sentinel.image" -}} +{{- $registryName := .Values.sentinel.image.registry -}} +{{- $repositoryName := .Values.sentinel.image.repository -}} +{{- $tag := .Values.sentinel.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper image name (for the metrics image) +*/}} +{{- define "redis.metrics.image" -}} +{{- $registryName := .Values.metrics.image.registry -}} +{{- $repositoryName := .Values.metrics.image.repository -}} +{{- $tag := .Values.metrics.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper image name (for the init container volume-permissions image) +*/}} +{{- define "redis.volumePermissions.image" -}} +{{- $registryName := .Values.volumePermissions.image.registry -}} +{{- $repositoryName := .Values.volumePermissions.image.repository -}} +{{- $tag := .Values.volumePermissions.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the path to the cert file. +*/}} +{{- define "redis.tlsCert" -}} +{{- required "Certificate filename is required when TLS in enabled" .Values.tls.certFilename | printf "/opt/bitnami/redis/certs/%s" -}} +{{- end -}} + +{{/* +Return the path to the cert key file. +*/}} +{{- define "redis.tlsCertKey" -}} +{{- required "Certificate Key filename is required when TLS in enabled" .Values.tls.certKeyFilename | printf "/opt/bitnami/redis/certs/%s" -}} +{{- end -}} + +{{/* +Return the path to the CA cert file. +*/}} +{{- define "redis.tlsCACert" -}} +{{- required "Certificate CA filename is required when TLS in enabled" .Values.tls.certCAFilename | printf "/opt/bitnami/redis/certs/%s" -}} +{{- end -}} + +{{/* +Return the path to the DH params file. +*/}} +{{- define "redis.tlsDHParams" -}} +{{- if .Values.tls.dhParamsFilename -}} +{{- printf "/opt/bitnami/redis/certs/%s" .Values.tls.dhParamsFilename -}} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the service account to use +*/}} +{{- define "redis.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "redis.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Get the password secret. +*/}} +{{- define "redis.secretName" -}} +{{- if .Values.existingSecret -}} +{{- printf "%s" .Values.existingSecret -}} +{{- else -}} +{{- printf "%s" (include "redis.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Get the password key to be retrieved from Redis(TM) secret. +*/}} +{{- define "redis.secretPasswordKey" -}} +{{- if and .Values.existingSecret .Values.existingSecretPasswordKey -}} +{{- printf "%s" .Values.existingSecretPasswordKey -}} +{{- else -}} +{{- printf "redis-password" -}} +{{- end -}} +{{- end -}} + +{{/* +Return Redis(TM) password +*/}} +{{- define "redis.password" -}} +{{- if not (empty .Values.global.redis.password) }} + {{- .Values.global.redis.password -}} +{{- else if not (empty .Values.password) -}} + {{- .Values.password -}} +{{- else -}} + {{- randAlphaNum 10 -}} +{{- end -}} +{{- end -}} + +{{/* +Return sysctl image +*/}} +{{- define "redis.sysctl.image" -}} +{{- $registryName := default "docker.io" .Values.sysctlImage.registry -}} +{{- $repositoryName := .Values.sysctlImage.repository -}} +{{- $tag := default "buster" .Values.sysctlImage.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "redis.imagePullSecrets" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +Also, we can not use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} +{{- if .Values.global.imagePullSecrets }} +imagePullSecrets: +{{- range .Values.global.imagePullSecrets }} + - name: {{ . }} +{{- end }} +{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.sysctlImage.pullSecrets .Values.volumePermissions.image.pullSecrets }} +imagePullSecrets: +{{- range .Values.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.metrics.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.sysctlImage.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.volumePermissions.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- end -}} +{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.sysctlImage.pullSecrets .Values.volumePermissions.image.pullSecrets }} +imagePullSecrets: +{{- range .Values.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.metrics.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.sysctlImage.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.volumePermissions.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- end -}} +{{- end -}} + +{{/* Check if there are rolling tags in the images */}} +{{- define "redis.checkRollingTags" -}} +{{- if and (contains "bitnami/" .Values.image.repository) (not (.Values.image.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .Values.image.repository }}:{{ .Values.image.tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} +{{- if and (contains "bitnami/" .Values.sentinel.image.repository) (not (.Values.sentinel.image.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .Values.sentinel.image.repository }}:{{ .Values.sentinel.image.tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} +{{- end -}} + +{{/* +Return the proper Storage Class for master +*/}} +{{- define "redis.master.storageClass" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +*/}} +{{- if .Values.global -}} + {{- if .Values.global.storageClass -}} + {{- if (eq "-" .Values.global.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.global.storageClass -}} + {{- end -}} + {{- else -}} + {{- if .Values.master.persistence.storageClass -}} + {{- if (eq "-" .Values.master.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.master.persistence.storageClass -}} + {{- end -}} + {{- end -}} + {{- end -}} +{{- else -}} + {{- if .Values.master.persistence.storageClass -}} + {{- if (eq "-" .Values.master.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.master.persistence.storageClass -}} + {{- end -}} + {{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Storage Class for slave +*/}} +{{- define "redis.slave.storageClass" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +*/}} +{{- if .Values.global -}} + {{- if .Values.global.storageClass -}} + {{- if (eq "-" .Values.global.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.global.storageClass -}} + {{- end -}} + {{- else -}} + {{- if .Values.slave.persistence.storageClass -}} + {{- if (eq "-" .Values.slave.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.slave.persistence.storageClass -}} + {{- end -}} + {{- end -}} + {{- end -}} +{{- else -}} + {{- if .Values.slave.persistence.storageClass -}} + {{- if (eq "-" .Values.slave.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.slave.persistence.storageClass -}} + {{- end -}} + {{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Compile all warnings into a single message, and call fail. +*/}} +{{- define "redis.validateValues" -}} +{{- $messages := list -}} +{{- $messages := append $messages (include "redis.validateValues.spreadConstraints" .) -}} +{{- $messages := without $messages "" -}} +{{- $message := join "\n" $messages -}} + +{{- if $message -}} +{{- printf "\nVALUES VALIDATION:\n%s" $message | fail -}} +{{- end -}} +{{- end -}} + +{{/* Validate values of Redis(TM) - spreadConstrainsts K8s version */}} +{{- define "redis.validateValues.spreadConstraints" -}} +{{- if and (semverCompare "<1.16-0" .Capabilities.KubeVersion.GitVersion) .Values.slave.spreadConstraints -}} +redis: spreadConstraints + Pod Topology Spread Constraints are only available on K8s >= 1.16 + Find more information at https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ +{{- end -}} +{{- end -}} + +{{/* +Renders a value that contains template. +Usage: +{{ include "redis.tplValue" (dict "value" .Values.path.to.the.Value "context" $) }} +*/}} +{{- define "redis.tplValue" -}} + {{- if typeIs "string" .value }} + {{- tpl .value .context }} + {{- else }} + {{- tpl (.value | toYaml) .context }} + {{- end }} +{{- end -}} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/redis/templates/configmap-scripts.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/redis/templates/configmap-scripts.yaml new file mode 100644 index 0000000..02411c8 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/redis/templates/configmap-scripts.yaml @@ -0,0 +1,393 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "redis.fullname" . }}-scripts + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: +{{- if and .Values.cluster.enabled .Values.sentinel.enabled }} + start-node.sh: | + #!/bin/bash + is_boolean_yes() { + local -r bool="${1:-}" + # comparison is performed without regard to the case of alphabetic characters + shopt -s nocasematch + if [[ "$bool" = 1 || "$bool" =~ ^(yes|true)$ ]]; then + true + else + false + fi + } + + HEADLESS_SERVICE="{{ template "redis.fullname" . }}-headless.imxc.svc.{{ .Values.clusterDomain }}" + REDIS_SERVICE="{{ template "redis.fullname" . }}.imxc.svc.{{ .Values.clusterDomain }}" + + export REDIS_REPLICATION_MODE="slave" + if [[ -z "$(getent ahosts "$HEADLESS_SERVICE" | grep -v "^$(hostname -i) ")" ]]; then + export REDIS_REPLICATION_MODE="master" + fi + + {{- if and .Values.securityContext.runAsUser (eq (.Values.securityContext.runAsUser | int) 0) }} + useradd redis + chown -R redis {{ .Values.slave.persistence.path }} + {{- end }} + + if [[ -n $REDIS_PASSWORD_FILE ]]; then + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux + fi + + if [[ -n $REDIS_MASTER_PASSWORD_FILE ]]; then + password_aux=`cat ${REDIS_MASTER_PASSWORD_FILE}` + export REDIS_MASTER_PASSWORD=$password_aux + fi + + if [[ "$REDIS_REPLICATION_MODE" == "master" ]]; then + echo "I am master" + if [[ ! -f /opt/bitnami/redis/etc/master.conf ]];then + cp /opt/bitnami/redis/mounted-etc/master.conf /opt/bitnami/redis/etc/master.conf + fi + else + if [[ ! -f /opt/bitnami/redis/etc/replica.conf ]];then + cp /opt/bitnami/redis/mounted-etc/replica.conf /opt/bitnami/redis/etc/replica.conf + fi + + if is_boolean_yes "$REDIS_TLS_ENABLED"; then + sentinel_info_command="redis-cli {{- if .Values.usePassword }} -a $REDIS_PASSWORD {{- end }} -h $REDIS_SERVICE -p {{ .Values.sentinel.port }} --tls --cert ${REDIS_TLS_CERT_FILE} --key ${REDIS_TLS_KEY_FILE} --cacert ${REDIS_TLS_CA_FILE} sentinel get-master-addr-by-name {{ .Values.sentinel.masterSet }}" + else + sentinel_info_command="redis-cli {{- if .Values.usePassword }} -a $REDIS_PASSWORD {{- end }} -h $REDIS_SERVICE -p {{ .Values.sentinel.port }} sentinel get-master-addr-by-name {{ .Values.sentinel.masterSet }}" + fi + REDIS_SENTINEL_INFO=($($sentinel_info_command)) + REDIS_MASTER_HOST=${REDIS_SENTINEL_INFO[0]} + REDIS_MASTER_PORT_NUMBER=${REDIS_SENTINEL_INFO[1]} + + + # Immediately attempt to connect to the reported master. If it doesn't exist the connection attempt will either hang + # or fail with "port unreachable" and give no data. The liveness check will then timeout waiting for the redis + # container to be ready and restart the it. By then the new master will likely have been elected + if is_boolean_yes "$REDIS_TLS_ENABLED"; then + sentinel_info_command="redis-cli {{- if .Values.usePassword }} -a $REDIS_PASSWORD {{- end }} -h $REDIS_MASTER_HOST -p {{ .Values.sentinel.port }} --tls --cert ${REDIS_TLS_CERT_FILE} --key ${REDIS_TLS_KEY_FILE} --cacert ${REDIS_TLS_CA_FILE} sentinel get-master-addr-by-name {{ .Values.sentinel.masterSet }}" + else + sentinel_info_command="redis-cli {{- if .Values.usePassword }} -a $REDIS_PASSWORD {{- end }} -h $REDIS_MASTER_HOST -p {{ .Values.sentinel.port }} sentinel get-master-addr-by-name {{ .Values.sentinel.masterSet }}" + fi + + if [[ ! ($($sentinel_info_command)) ]]; then + # master doesn't actually exist, this probably means the remaining pods haven't elected a new one yet + # and are reporting the old one still. Once this happens the container will get stuck and never see the new + # master. We stop here to allow the container to not pass the liveness check and be restarted. + exit 1 + fi + fi + + if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then + cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf + fi + {{- if .Values.tls.enabled }} + ARGS=("--port" "0") + ARGS+=("--tls-port" "${REDIS_TLS_PORT}") + ARGS+=("--tls-cert-file" "${REDIS_TLS_CERT_FILE}") + ARGS+=("--tls-key-file" "${REDIS_TLS_KEY_FILE}") + ARGS+=("--tls-ca-cert-file" "${REDIS_TLS_CA_FILE}") + ARGS+=("--tls-auth-clients" "${REDIS_TLS_AUTH_CLIENTS}") + ARGS+=("--tls-replication" "yes") + {{- if .Values.tls.dhParamsFilename }} + ARGS+=("--tls-dh-params-file" "${REDIS_TLS_DH_PARAMS_FILE}") + {{- end }} + {{- else }} + ARGS=("--port" "${REDIS_PORT}") + {{- end }} + + if [[ "$REDIS_REPLICATION_MODE" == "slave" ]]; then + ARGS+=("--slaveof" "${REDIS_MASTER_HOST}" "${REDIS_MASTER_PORT_NUMBER}") + fi + + {{- if .Values.usePassword }} + ARGS+=("--requirepass" "${REDIS_PASSWORD}") + ARGS+=("--masterauth" "${REDIS_MASTER_PASSWORD}") + {{- else }} + ARGS+=("--protected-mode" "no") + {{- end }} + + if [[ "$REDIS_REPLICATION_MODE" == "master" ]]; then + ARGS+=("--include" "/opt/bitnami/redis/etc/master.conf") + else + ARGS+=("--include" "/opt/bitnami/redis/etc/replica.conf") + fi + + ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf") + {{- if .Values.slave.extraFlags }} + {{- range .Values.slave.extraFlags }} + ARGS+=({{ . | quote }}) + {{- end }} + {{- end }} + + {{- if .Values.slave.preExecCmds }} + {{ .Values.slave.preExecCmds | nindent 4}} + {{- end }} + + {{- if .Values.slave.command }} + exec {{ .Values.slave.command }} "${ARGS[@]}" + {{- else }} + exec redis-server "${ARGS[@]}" + {{- end }} + + start-sentinel.sh: | + #!/bin/bash + replace_in_file() { + local filename="${1:?filename is required}" + local match_regex="${2:?match regex is required}" + local substitute_regex="${3:?substitute regex is required}" + local posix_regex=${4:-true} + + local result + + # We should avoid using 'sed in-place' substitutions + # 1) They are not compatible with files mounted from ConfigMap(s) + # 2) We found incompatibility issues with Debian10 and "in-place" substitutions + del=$'\001' # Use a non-printable character as a 'sed' delimiter to avoid issues + if [[ $posix_regex = true ]]; then + result="$(sed -E "s${del}${match_regex}${del}${substitute_regex}${del}g" "$filename")" + else + result="$(sed "s${del}${match_regex}${del}${substitute_regex}${del}g" "$filename")" + fi + echo "$result" > "$filename" + } + sentinel_conf_set() { + local -r key="${1:?missing key}" + local value="${2:-}" + + # Sanitize inputs + value="${value//\\/\\\\}" + value="${value//&/\\&}" + value="${value//\?/\\?}" + [[ "$value" = "" ]] && value="\"$value\"" + + replace_in_file "/opt/bitnami/redis-sentinel/etc/sentinel.conf" "^#*\s*${key} .*" "${key} ${value}" false + } + sentinel_conf_add() { + echo $'\n'"$@" >> "/opt/bitnami/redis-sentinel/etc/sentinel.conf" + } + is_boolean_yes() { + local -r bool="${1:-}" + # comparison is performed without regard to the case of alphabetic characters + shopt -s nocasematch + if [[ "$bool" = 1 || "$bool" =~ ^(yes|true)$ ]]; then + true + else + false + fi + } + host_id() { + echo "$1" | openssl sha1 | awk '{print $2}' + } + + HEADLESS_SERVICE="{{ template "redis.fullname" . }}-headless.imxc.svc.{{ .Values.clusterDomain }}" + REDIS_SERVICE="{{ template "redis.fullname" . }}.imxc.svc.{{ .Values.clusterDomain }}" + + if [[ -n $REDIS_PASSWORD_FILE ]]; then + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux + fi + + if [[ ! -f /opt/bitnami/redis-sentinel/etc/sentinel.conf ]]; then + cp /opt/bitnami/redis-sentinel/mounted-etc/sentinel.conf /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- if .Values.usePassword }} + printf "\nsentinel auth-pass %s %s" "{{ .Values.sentinel.masterSet }}" "$REDIS_PASSWORD" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- if .Values.sentinel.usePassword }} + printf "\nrequirepass %s" "$REDIS_PASSWORD" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- end }} + {{- end }} + {{- if .Values.sentinel.staticID }} + printf "\nsentinel myid %s" "$(host_id "$HOSTNAME")" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- end }} + fi + + export REDIS_REPLICATION_MODE="slave" + if [[ -z "$(getent ahosts "$HEADLESS_SERVICE" | grep -v "^$(hostname -i) ")" ]]; then + export REDIS_REPLICATION_MODE="master" + fi + + if [[ "$REDIS_REPLICATION_MODE" == "master" ]]; then + REDIS_MASTER_HOST="$(hostname -i)" + REDIS_MASTER_PORT_NUMBER="{{ .Values.redisPort }}" + else + if is_boolean_yes "$REDIS_SENTINEL_TLS_ENABLED"; then + sentinel_info_command="redis-cli {{- if .Values.usePassword }} -a $REDIS_PASSWORD {{- end }} -h $REDIS_SERVICE -p {{ .Values.sentinel.port }} --tls --cert ${REDIS_SENTINEL_TLS_CERT_FILE} --key ${REDIS_SENTINEL_TLS_KEY_FILE} --cacert ${REDIS_SENTINEL_TLS_CA_FILE} sentinel get-master-addr-by-name {{ .Values.sentinel.masterSet }}" + else + sentinel_info_command="redis-cli {{- if .Values.usePassword }} -a $REDIS_PASSWORD {{- end }} -h $REDIS_SERVICE -p {{ .Values.sentinel.port }} sentinel get-master-addr-by-name {{ .Values.sentinel.masterSet }}" + fi + REDIS_SENTINEL_INFO=($($sentinel_info_command)) + REDIS_MASTER_HOST=${REDIS_SENTINEL_INFO[0]} + REDIS_MASTER_PORT_NUMBER=${REDIS_SENTINEL_INFO[1]} + + # Immediately attempt to connect to the reported master. If it doesn't exist the connection attempt will either hang + # or fail with "port unreachable" and give no data. The liveness check will then timeout waiting for the sentinel + # container to be ready and restart the it. By then the new master will likely have been elected + if is_boolean_yes "$REDIS_SENTINEL_TLS_ENABLED"; then + sentinel_info_command="redis-cli {{- if .Values.usePassword }} -a $REDIS_PASSWORD {{- end }} -h $REDIS_MASTER_HOST -p {{ .Values.sentinel.port }} --tls --cert ${REDIS_SENTINEL_TLS_CERT_FILE} --key ${REDIS_SENTINEL_TLS_KEY_FILE} --cacert ${REDIS_SENTINEL_TLS_CA_FILE} sentinel get-master-addr-by-name {{ .Values.sentinel.masterSet }}" + else + sentinel_info_command="redis-cli {{- if .Values.usePassword }} -a $REDIS_PASSWORD {{- end }} -h $REDIS_MASTER_HOST -p {{ .Values.sentinel.port }} sentinel get-master-addr-by-name {{ .Values.sentinel.masterSet }}" + fi + + if [[ ! ($($sentinel_info_command)) ]]; then + # master doesn't actually exist, this probably means the remaining pods haven't elected a new one yet + # and are reporting the old one still. Once this happens the container will get stuck and never see the new + # master. We stop here to allow the container to not pass the liveness check and be restarted. + exit 1 + fi + fi + sentinel_conf_set "sentinel monitor" "{{ .Values.sentinel.masterSet }} "$REDIS_MASTER_HOST" "$REDIS_MASTER_PORT_NUMBER" {{ .Values.sentinel.quorum }}" + + add_replica() { + if [[ "$1" != "$REDIS_MASTER_HOST" ]]; then + sentinel_conf_add "sentinel known-replica {{ .Values.sentinel.masterSet }} $1 {{ .Values.redisPort }}" + fi + } + + {{- if .Values.sentinel.staticID }} + # remove generated known sentinels and replicas + tmp="$(sed -e '/^sentinel known-/d' -e '/^$/d' /opt/bitnami/redis-sentinel/etc/sentinel.conf)" + echo "$tmp" > /opt/bitnami/redis-sentinel/etc/sentinel.conf + + for node in $(seq 0 {{ .Values.cluster.slaveCount }}); do + NAME="{{ template "redis.fullname" . }}-node-$node" + IP="$(getent hosts "$NAME.$HEADLESS_SERVICE" | awk ' {print $1 }')" + if [[ "$NAME" != "$HOSTNAME" && -n "$IP" ]]; then + sentinel_conf_add "sentinel known-sentinel {{ .Values.sentinel.masterSet }} $IP {{ .Values.sentinel.port }} $(host_id "$NAME")" + add_replica "$IP" + fi + done + add_replica "$(hostname -i)" + {{- end }} + + {{- if .Values.tls.enabled }} + ARGS=("--port" "0") + ARGS+=("--tls-port" "${REDIS_SENTINEL_TLS_PORT_NUMBER}") + ARGS+=("--tls-cert-file" "${REDIS_SENTINEL_TLS_CERT_FILE}") + ARGS+=("--tls-key-file" "${REDIS_SENTINEL_TLS_KEY_FILE}") + ARGS+=("--tls-ca-cert-file" "${REDIS_SENTINEL_TLS_CA_FILE}") + ARGS+=("--tls-replication" "yes") + ARGS+=("--tls-auth-clients" "${REDIS_SENTINEL_TLS_AUTH_CLIENTS}") + {{- if .Values.tls.dhParamsFilename }} + ARGS+=("--tls-dh-params-file" "${REDIS_SENTINEL_TLS_DH_PARAMS_FILE}") + {{- end }} + {{- end }} + {{- if .Values.sentinel.preExecCmds }} + {{ .Values.sentinel.preExecCmds | nindent 4 }} + {{- end }} + exec redis-server /opt/bitnami/redis-sentinel/etc/sentinel.conf --sentinel {{- if .Values.tls.enabled }} "${ARGS[@]}" {{- end }} +{{- else }} + start-master.sh: | + #!/bin/bash + {{- if and .Values.securityContext.runAsUser (eq (.Values.securityContext.runAsUser | int) 0) }} + useradd redis + chown -R redis {{ .Values.master.persistence.path }} + {{- end }} + if [[ -n $REDIS_PASSWORD_FILE ]]; then + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux + fi + if [[ ! -f /opt/bitnami/redis/etc/master.conf ]];then + cp /opt/bitnami/redis/mounted-etc/master.conf /opt/bitnami/redis/etc/master.conf + fi + if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then + cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf + fi + {{- if .Values.tls.enabled }} + ARGS=("--port" "0") + ARGS+=("--tls-port" "${REDIS_TLS_PORT}") + ARGS+=("--tls-cert-file" "${REDIS_TLS_CERT_FILE}") + ARGS+=("--tls-key-file" "${REDIS_TLS_KEY_FILE}") + ARGS+=("--tls-ca-cert-file" "${REDIS_TLS_CA_FILE}") + ARGS+=("--tls-auth-clients" "${REDIS_TLS_AUTH_CLIENTS}") + {{- if .Values.tls.dhParamsFilename }} + ARGS+=("--tls-dh-params-file" "${REDIS_TLS_DH_PARAMS_FILE}") + {{- end }} + {{- else }} + ARGS=("--port" "${REDIS_PORT}") + {{- end }} + {{- if .Values.usePassword }} + ARGS+=("--requirepass" "${REDIS_PASSWORD}") + ARGS+=("--masterauth" "${REDIS_PASSWORD}") + {{- else }} + ARGS+=("--protected-mode" "no") + {{- end }} + ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf") + ARGS+=("--include" "/opt/bitnami/redis/etc/master.conf") + {{- if .Values.master.extraFlags }} + {{- range .Values.master.extraFlags }} + ARGS+=({{ . | quote }}) + {{- end }} + {{- end }} + {{- if .Values.master.preExecCmds }} + {{ .Values.master.preExecCmds | nindent 4}} + {{- end }} + {{- if .Values.master.command }} + exec {{ .Values.master.command }} "${ARGS[@]}" + {{- else }} + exec redis-server "${ARGS[@]}" + {{- end }} + {{- if .Values.cluster.enabled }} + start-slave.sh: | + #!/bin/bash + {{- if and .Values.securityContext.runAsUser (eq (.Values.securityContext.runAsUser | int) 0) }} + useradd redis + chown -R redis {{ .Values.slave.persistence.path }} + {{- end }} + if [[ -n $REDIS_PASSWORD_FILE ]]; then + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux + fi + if [[ -n $REDIS_MASTER_PASSWORD_FILE ]]; then + password_aux=`cat ${REDIS_MASTER_PASSWORD_FILE}` + export REDIS_MASTER_PASSWORD=$password_aux + fi + if [[ ! -f /opt/bitnami/redis/etc/replica.conf ]];then + cp /opt/bitnami/redis/mounted-etc/replica.conf /opt/bitnami/redis/etc/replica.conf + fi + if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then + cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf + fi + {{- if .Values.tls.enabled }} + ARGS=("--port" "0") + ARGS+=("--tls-port" "${REDIS_TLS_PORT}") + ARGS+=("--tls-cert-file" "${REDIS_TLS_CERT_FILE}") + ARGS+=("--tls-key-file" "${REDIS_TLS_KEY_FILE}") + ARGS+=("--tls-ca-cert-file" "${REDIS_TLS_CA_FILE}") + ARGS+=("--tls-auth-clients" "${REDIS_TLS_AUTH_CLIENTS}") + ARGS+=("--tls-replication" "yes") + {{- if .Values.tls.dhParamsFilename }} + ARGS+=("--tls-dh-params-file" "${REDIS_TLS_DH_PARAMS_FILE}") + {{- end }} + {{- else }} + ARGS=("--port" "${REDIS_PORT}") + {{- end }} + ARGS+=("--slaveof" "${REDIS_MASTER_HOST}" "${REDIS_MASTER_PORT_NUMBER}") + {{- if .Values.usePassword }} + ARGS+=("--requirepass" "${REDIS_PASSWORD}") + ARGS+=("--masterauth" "${REDIS_MASTER_PASSWORD}") + {{- else }} + ARGS+=("--protected-mode" "no") + {{- end }} + ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf") + ARGS+=("--include" "/opt/bitnami/redis/etc/replica.conf") + {{- if .Values.slave.extraFlags }} + {{- range .Values.slave.extraFlags }} + ARGS+=({{ . | quote }}) + {{- end }} + {{- end }} + {{- if .Values.slave.preExecCmds }} + {{ .Values.slave.preExecCmds | nindent 4}} + {{- end }} + {{- if .Values.slave.command }} + exec {{ .Values.slave.command }} "${ARGS[@]}" + {{- else }} + exec redis-server "${ARGS[@]}" + {{- end }} + {{- end }} + +{{- end -}} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/redis/templates/configmap.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/redis/templates/configmap.yaml new file mode 100644 index 0000000..923272c --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/redis/templates/configmap.yaml @@ -0,0 +1,53 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "redis.fullname" . }} + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: + redis.conf: |- +{{- if .Values.configmap }} + # User-supplied configuration: +{{- tpl .Values.configmap . | nindent 4 }} +{{- end }} + master.conf: |- + dir {{ .Values.master.persistence.path }} +{{- if .Values.master.configmap }} + # User-supplied master configuration: +{{- tpl .Values.master.configmap . | nindent 4 }} +{{- end }} +{{- if .Values.master.disableCommands }} +{{- range .Values.master.disableCommands }} + rename-command {{ . }} "" +{{- end }} +{{- end }} + replica.conf: |- + dir {{ .Values.slave.persistence.path }} + slave-read-only yes +{{- if .Values.slave.configmap }} + # User-supplied slave configuration: +{{- tpl .Values.slave.configmap . | nindent 4 }} +{{- end }} +{{- if .Values.slave.disableCommands }} +{{- range .Values.slave.disableCommands }} + rename-command {{ . }} "" +{{- end }} +{{- end }} +{{- if .Values.sentinel.enabled }} + sentinel.conf: |- + dir "/tmp" + bind 0.0.0.0 + port {{ .Values.sentinel.port }} + sentinel monitor {{ .Values.sentinel.masterSet }} {{ template "redis.fullname" . }}-node-0.{{ template "redis.fullname" . }}-headless.imxc.svc.{{ .Values.clusterDomain }} {{ .Values.redisPort }} {{ .Values.sentinel.quorum }} + sentinel down-after-milliseconds {{ .Values.sentinel.masterSet }} {{ .Values.sentinel.downAfterMilliseconds }} + sentinel failover-timeout {{ .Values.sentinel.masterSet }} {{ .Values.sentinel.failoverTimeout }} + sentinel parallel-syncs {{ .Values.sentinel.masterSet }} {{ .Values.sentinel.parallelSyncs }} +{{- if .Values.sentinel.configmap }} + # User-supplied sentinel configuration: +{{- tpl .Values.sentinel.configmap . | nindent 4 }} +{{- end }} +{{- end }} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/redis/templates/headless-svc.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/redis/templates/headless-svc.yaml new file mode 100644 index 0000000..7db7371 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/redis/templates/headless-svc.yaml @@ -0,0 +1,28 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "redis.fullname" . }}-headless + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + type: ClusterIP + clusterIP: None + {{- if .Values.sentinel.enabled }} + publishNotReadyAddresses: true + {{- end }} + ports: + - name: redis + port: {{ .Values.redisPort }} + targetPort: redis + {{- if .Values.sentinel.enabled }} + - name: redis-sentinel + port: {{ .Values.sentinel.port }} + targetPort: redis-sentinel + {{- end }} + selector: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/redis/templates/health-configmap.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/redis/templates/health-configmap.yaml new file mode 100644 index 0000000..0bbbfb6 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/redis/templates/health-configmap.yaml @@ -0,0 +1,176 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "redis.fullname" . }}-health + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: + ping_readiness_local.sh: |- + #!/bin/bash +{{- if .Values.usePasswordFile }} + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux +{{- end }} + export REDISCLI_AUTH="$REDIS_PASSWORD" + response=$( + timeout -s 3 $1 \ + redis-cli \ + -h localhost \ +{{- if .Values.tls.enabled }} + -p $REDIS_TLS_PORT \ + --tls \ + --cacert {{ template "redis.tlsCACert" . }} \ + {{- if .Values.tls.authClients }} + --cert {{ template "redis.tlsCert" . }} \ + --key {{ template "redis.tlsCertKey" . }} \ + {{- end }} +{{- else }} + -p $REDIS_PORT \ +{{- end }} + ping + ) + if [ "$response" != "PONG" ]; then + echo "$response" + exit 1 + fi + ping_liveness_local.sh: |- + #!/bin/bash +{{- if .Values.usePasswordFile }} + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux +{{- end }} + export REDISCLI_AUTH="$REDIS_PASSWORD" + response=$( + timeout -s 3 $1 \ + redis-cli \ + -h localhost \ +{{- if .Values.tls.enabled }} + -p $REDIS_TLS_PORT \ + --tls \ + --cacert {{ template "redis.tlsCACert" . }} \ + {{- if .Values.tls.authClients }} + --cert {{ template "redis.tlsCert" . }} \ + --key {{ template "redis.tlsCertKey" . }} \ + {{- end }} +{{- else }} + -p $REDIS_PORT \ +{{- end }} + ping + ) + if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then + echo "$response" + exit 1 + fi +{{- if .Values.sentinel.enabled }} + ping_sentinel.sh: |- + #!/bin/bash +{{- if .Values.usePasswordFile }} + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux +{{- end }} + export REDISCLI_AUTH="$REDIS_PASSWORD" + response=$( + timeout -s 3 $1 \ + redis-cli \ + -h localhost \ +{{- if .Values.tls.enabled }} + -p $REDIS_SENTINEL_TLS_PORT_NUMBER \ + --tls \ + --cacert {{ template "redis.tlsCACert" . }} \ + {{- if .Values.tls.authClients }} + --cert {{ template "redis.tlsCert" . }} \ + --key {{ template "redis.tlsCertKey" . }} \ + {{- end }} +{{- else }} + -p $REDIS_SENTINEL_PORT \ +{{- end }} + ping + ) + if [ "$response" != "PONG" ]; then + echo "$response" + exit 1 + fi + parse_sentinels.awk: |- + /ip/ {FOUND_IP=1} + /port/ {FOUND_PORT=1} + /runid/ {FOUND_RUNID=1} + !/ip|port|runid/ { + if (FOUND_IP==1) { + IP=$1; FOUND_IP=0; + } + else if (FOUND_PORT==1) { + PORT=$1; + FOUND_PORT=0; + } else if (FOUND_RUNID==1) { + printf "\nsentinel known-sentinel {{ .Values.sentinel.masterSet }} %s %s %s", IP, PORT, $0; FOUND_RUNID=0; + } + } +{{- end }} + ping_readiness_master.sh: |- + #!/bin/bash +{{- if .Values.usePasswordFile }} + password_aux=`cat ${REDIS_MASTER_PASSWORD_FILE}` + export REDIS_MASTER_PASSWORD=$password_aux +{{- end }} + export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD" + response=$( + timeout -s 3 $1 \ + redis-cli \ + -h $REDIS_MASTER_HOST \ + -p $REDIS_MASTER_PORT_NUMBER \ +{{- if .Values.tls.enabled }} + --tls \ + --cacert {{ template "redis.tlsCACert" . }} \ + {{- if .Values.tls.authClients }} + --cert {{ template "redis.tlsCert" . }} \ + --key {{ template "redis.tlsCertKey" . }} \ + {{- end }} +{{- end }} + ping + ) + if [ "$response" != "PONG" ]; then + echo "$response" + exit 1 + fi + ping_liveness_master.sh: |- + #!/bin/bash +{{- if .Values.usePasswordFile }} + password_aux=`cat ${REDIS_MASTER_PASSWORD_FILE}` + export REDIS_MASTER_PASSWORD=$password_aux +{{- end }} + export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD" + response=$( + timeout -s 3 $1 \ + redis-cli \ + -h $REDIS_MASTER_HOST \ + -p $REDIS_MASTER_PORT_NUMBER \ +{{- if .Values.tls.enabled }} + --tls \ + --cacert {{ template "redis.tlsCACert" . }} \ + {{- if .Values.tls.authClients }} + --cert {{ template "redis.tlsCert" . }} \ + --key {{ template "redis.tlsCertKey" . }} \ + {{- end }} +{{- end }} + ping + ) + if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then + echo "$response" + exit 1 + fi + ping_readiness_local_and_master.sh: |- + script_dir="$(dirname "$0")" + exit_status=0 + "$script_dir/ping_readiness_local.sh" $1 || exit_status=$? + "$script_dir/ping_readiness_master.sh" $1 || exit_status=$? + exit $exit_status + ping_liveness_local_and_master.sh: |- + script_dir="$(dirname "$0")" + exit_status=0 + "$script_dir/ping_liveness_local.sh" $1 || exit_status=$? + "$script_dir/ping_liveness_master.sh" $1 || exit_status=$? + exit $exit_status diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/redis/templates/metrics-prometheus.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/redis/templates/metrics-prometheus.yaml new file mode 100644 index 0000000..928f9a8 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/redis/templates/metrics-prometheus.yaml @@ -0,0 +1,39 @@ +{{- if and (.Values.metrics.enabled) (.Values.metrics.serviceMonitor.enabled) }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "redis.fullname" . }} +# {{- if .Values.metrics.serviceMonitor.namespace }} +# namespace: {{ .Values.metrics.serviceMonitor.namespace }} +# {{- else }} + namespace: imxc +# {{- end }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- range $key, $value := .Values.metrics.serviceMonitor.selector }} + {{ $key }}: {{ $value | quote }} + {{- end }} +spec: + endpoints: + - port: metrics + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.relabelings }} + relabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.relabelings "context" $) | nindent 6 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.metricRelabelings }} + metricRelabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.metricRelabelings "context" $) | nindent 6 }} + {{- end }} + selector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + app.kubernetes.io/component: "metrics" + namespaceSelector: + matchNames: + - imxc +{{- end -}} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/redis/templates/metrics-svc.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/redis/templates/metrics-svc.yaml new file mode 100644 index 0000000..4dae3bc --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/redis/templates/metrics-svc.yaml @@ -0,0 +1,34 @@ +{{- if .Values.metrics.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "redis.fullname" . }}-metrics + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + app.kubernetes.io/component: "metrics" + {{- if .Values.metrics.service.labels -}} + {{- toYaml .Values.metrics.service.labels | nindent 4 }} + {{- end -}} + {{- if .Values.metrics.service.annotations }} + annotations: {{- toYaml .Values.metrics.service.annotations | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.metrics.service.type }} + {{ if eq .Values.metrics.service.type "LoadBalancer" }} + externalTrafficPolicy: {{ .Values.metrics.service.externalTrafficPolicy }} + {{- end }} + {{ if and (eq .Values.metrics.service.type "LoadBalancer") .Values.metrics.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.metrics.service.loadBalancerIP }} + {{- end }} + ports: + - name: metrics + port: 9121 + targetPort: metrics + selector: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} +{{- end }} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/redis/templates/networkpolicy.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/redis/templates/networkpolicy.yaml new file mode 100644 index 0000000..ae27ebb --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/redis/templates/networkpolicy.yaml @@ -0,0 +1,74 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ template "networkPolicy.apiVersion" . }} +metadata: + name: {{ template "redis.fullname" . }} + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + podSelector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + {{- if .Values.cluster.enabled }} + policyTypes: + - Ingress + - Egress + egress: + # Allow dns resolution + - ports: + - port: 53 + protocol: UDP + # Allow outbound connections to other cluster pods + - ports: + - port: {{ .Values.redisPort }} + {{- if .Values.sentinel.enabled }} + - port: {{ .Values.sentinel.port }} + {{- end }} + to: + - podSelector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + {{- end }} + ingress: + # Allow inbound connections + - ports: + - port: {{ .Values.redisPort }} + {{- if .Values.sentinel.enabled }} + - port: {{ .Values.sentinel.port }} + {{- end }} + {{- if not .Values.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ template "redis.fullname" . }}-client: "true" + - podSelector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + {{- if .Values.networkPolicy.ingressNSMatchLabels }} + - namespaceSelector: + matchLabels: + {{- range $key, $value := .Values.networkPolicy.ingressNSMatchLabels }} + {{ $key | quote }}: {{ $value | quote }} + {{- end }} + {{- if .Values.networkPolicy.ingressNSPodMatchLabels }} + podSelector: + matchLabels: + {{- range $key, $value := .Values.networkPolicy.ingressNSPodMatchLabels }} + {{ $key | quote }}: {{ $value | quote }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.metrics.enabled }} + # Allow prometheus scrapes for metrics + - ports: + - port: 9121 + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/redis/templates/pdb.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/redis/templates/pdb.yaml new file mode 100644 index 0000000..e2ad471 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/redis/templates/pdb.yaml @@ -0,0 +1,22 @@ +{{- if .Values.podDisruptionBudget.enabled }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ template "redis.fullname" . }} + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} +spec: + {{- if .Values.podDisruptionBudget.minAvailable }} + minAvailable: {{ .Values.podDisruptionBudget.minAvailable }} + {{- end }} + {{- if .Values.podDisruptionBudget.maxUnavailable }} + maxUnavailable: {{ .Values.podDisruptionBudget.maxUnavailable }} + {{- end }} + selector: + matchLabels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} +{{- end }} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/redis/templates/prometheusrule.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/redis/templates/prometheusrule.yaml new file mode 100644 index 0000000..fba6450 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/redis/templates/prometheusrule.yaml @@ -0,0 +1,25 @@ +{{- if and .Values.metrics.enabled .Values.metrics.prometheusRule.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ template "redis.fullname" . }} + {{- if .Values.metrics.prometheusRule.namespace }} + namespace: {{ .Values.metrics.prometheusRule.namespace }} + {{- else }} + namespace: imxc + {{- end }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +{{- with .Values.metrics.prometheusRule.additionalLabels }} +{{- toYaml . | nindent 4 }} +{{- end }} +spec: +{{- with .Values.metrics.prometheusRule.rules }} + groups: + - name: {{ template "redis.name" $ }} + rules: {{- tpl (toYaml .) $ | nindent 8 }} +{{- end }} +{{- end }} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/redis/templates/psp.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/redis/templates/psp.yaml new file mode 100644 index 0000000..f3c9390 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/redis/templates/psp.yaml @@ -0,0 +1,43 @@ +{{- if .Values.podSecurityPolicy.create }} +apiVersion: {{ template "podSecurityPolicy.apiVersion" . }} +kind: PodSecurityPolicy +metadata: + name: {{ template "redis.fullname" . }} + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + allowPrivilegeEscalation: false + fsGroup: + rule: 'MustRunAs' + ranges: + - min: {{ .Values.securityContext.fsGroup }} + max: {{ .Values.securityContext.fsGroup }} + hostIPC: false + hostNetwork: false + hostPID: false + privileged: false + readOnlyRootFilesystem: false + requiredDropCapabilities: + - ALL + runAsUser: + rule: 'MustRunAs' + ranges: + - min: {{ .Values.containerSecurityContext.runAsUser }} + max: {{ .Values.containerSecurityContext.runAsUser }} + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: {{ .Values.containerSecurityContext.runAsUser }} + max: {{ .Values.containerSecurityContext.runAsUser }} + volumes: + - 'configMap' + - 'secret' + - 'emptyDir' + - 'persistentVolumeClaim' +{{- end }} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/redis/templates/redis-master-statefulset.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/redis/templates/redis-master-statefulset.yaml new file mode 100644 index 0000000..78aa2e6 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/redis/templates/redis-master-statefulset.yaml @@ -0,0 +1,378 @@ +{{- if or (not .Values.cluster.enabled) (not .Values.sentinel.enabled) }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "redis.fullname" . }}-master + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- if .Values.master.statefulset.labels }} + {{- toYaml .Values.master.statefulset.labels | nindent 4 }} + {{- end }} +{{- if .Values.master.statefulset.annotations }} + annotations: + {{- toYaml .Values.master.statefulset.annotations | nindent 4 }} +{{- end }} +spec: + selector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + role: master + serviceName: {{ template "redis.fullname" . }}-headless + template: + metadata: + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + role: master + {{- if .Values.master.podLabels }} + {{- toYaml .Values.master.podLabels | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podLabels }} + {{- toYaml .Values.metrics.podLabels | nindent 8 }} + {{- end }} + annotations: + checksum/health: {{ include (print $.Template.BasePath "/health-configmap.yaml") . | sha256sum }} + checksum/configmap: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} + {{- if .Values.master.podAnnotations }} + {{- toYaml .Values.master.podAnnotations | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podAnnotations }} + {{- toYaml .Values.metrics.podAnnotations | nindent 8 }} + {{- end }} + spec: + {{- include "redis.imagePullSecrets" . | nindent 6 }} + {{- if .Values.master.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.master.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: {{- omit .Values.securityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + serviceAccountName: {{ template "redis.serviceAccountName" . }} + {{- if .Values.master.priorityClassName }} + priorityClassName: {{ .Values.master.priorityClassName | quote }} + {{- end }} + {{- with .Values.master.affinity }} + affinity: {{- tpl (toYaml .) $ | nindent 8 }} + {{- end }} + {{- if .Values.master.nodeSelector }} + nodeSelector: {{- toYaml .Values.master.nodeSelector | nindent 8 }} + {{- end }} + {{- if .Values.master.tolerations }} + tolerations: {{- toYaml .Values.master.tolerations | nindent 8 }} + {{- end }} + {{- if .Values.master.shareProcessNamespace }} + shareProcessNamespace: {{ .Values.master.shareProcessNamespace }} + {{- end }} + {{- if .Values.master.schedulerName }} + schedulerName: {{ .Values.master.schedulerName }} + {{- end }} + containers: + - name: {{ template "redis.name" . }} + image: {{ template "redis.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + command: + - /bin/sh + - -c + - /opt/bitnami/scripts/start-scripts/start-master.sh + env: + - name: REDIS_REPLICATION_MODE + value: master + {{- if .Values.usePassword }} + {{- if .Values.usePasswordFile }} + - name: REDIS_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + {{- else }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- else }} + - name: ALLOW_EMPTY_PASSWORD + value: "yes" + {{- end }} + - name: REDIS_TLS_ENABLED + value: {{ ternary "yes" "no" .Values.tls.enabled | quote }} + {{- if .Values.tls.enabled }} + - name: REDIS_TLS_PORT + value: {{ .Values.redisPort | quote }} + - name: REDIS_TLS_AUTH_CLIENTS + value: {{ ternary "yes" "no" .Values.tls.authClients | quote }} + - name: REDIS_TLS_CERT_FILE + value: {{ template "redis.tlsCert" . }} + - name: REDIS_TLS_KEY_FILE + value: {{ template "redis.tlsCertKey" . }} + - name: REDIS_TLS_CA_FILE + value: {{ template "redis.tlsCACert" . }} + {{- if .Values.tls.dhParamsFilename }} + - name: REDIS_TLS_DH_PARAMS_FILE + value: {{ template "redis.tlsDHParams" . }} + {{- end }} + {{- else }} + - name: REDIS_PORT + value: {{ .Values.redisPort | quote }} + {{- end }} + {{- if .Values.master.extraEnvVars }} + {{- include "redis.tplValue" (dict "value" .Values.master.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if or .Values.master.extraEnvVarsCM .Values.master.extraEnvVarsSecret }} + envFrom: + {{- if .Values.master.extraEnvVarsCM }} + - configMapRef: + name: {{ .Values.master.extraEnvVarsCM }} + {{- end }} + {{- if .Values.master.extraEnvVarsSecret }} + - secretRef: + name: {{ .Values.master.extraEnvVarsSecret }} + {{- end }} + {{- end }} + ports: + - name: redis + containerPort: {{ .Values.redisPort }} + {{- if .Values.master.livenessProbe.enabled }} + livenessProbe: + initialDelaySeconds: {{ .Values.master.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.master.livenessProbe.periodSeconds }} + # One second longer than command timeout should prevent generation of zombie processes. + timeoutSeconds: {{ add1 .Values.master.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.master.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.master.livenessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_liveness_local.sh {{ .Values.master.livenessProbe.timeoutSeconds }} + {{- else if .Values.master.customLivenessProbe }} + livenessProbe: {{- toYaml .Values.master.customLivenessProbe | nindent 12 }} + {{- end }} + {{- if .Values.master.readinessProbe.enabled}} + readinessProbe: + initialDelaySeconds: {{ .Values.master.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.master.readinessProbe.periodSeconds }} + timeoutSeconds: {{ add1 .Values.master.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.master.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.master.readinessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_readiness_local.sh {{ .Values.master.readinessProbe.timeoutSeconds }} + {{- else if .Values.master.customReadinessProbe }} + readinessProbe: {{- toYaml .Values.master.customReadinessProbe | nindent 12 }} + {{- end }} + resources: {{- toYaml .Values.master.resources | nindent 12 }} + volumeMounts: + - name: start-scripts + mountPath: /opt/bitnami/scripts/start-scripts + - name: health + mountPath: /health + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /opt/bitnami/redis/secrets/ + {{- end }} + - name: redis-data + mountPath: {{ .Values.master.persistence.path }} + subPath: {{ .Values.master.persistence.subPath }} + - name: config + mountPath: /opt/bitnami/redis/mounted-etc + - name: redis-tmp-conf + mountPath: /opt/bitnami/redis/etc/ + {{- if .Values.tls.enabled }} + - name: redis-certificates + mountPath: /opt/bitnami/redis/certs + readOnly: true + {{- end }} + {{- if .Values.metrics.enabled }} + - name: metrics + image: {{ template "redis.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + command: + - /bin/sh + - -c + - | + if [[ -f '/secrets/redis-password' ]]; then + export REDIS_PASSWORD=$(cat /secrets/redis-password) + fi + redis_exporter{{- range $key, $value := .Values.metrics.extraArgs }} --{{ $key }}={{ $value }}{{- end }} + env: + - name: REDIS_ALIAS + value: {{ template "redis.fullname" . }} + {{- if and .Values.usePassword (not .Values.usePasswordFile) }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: REDIS_ADDR + value: rediss://localhost:{{ .Values.redisPort }} + - name: REDIS_EXPORTER_TLS_CLIENT_KEY_FILE + value: {{ template "redis.tlsCertKey" . }} + - name: REDIS_EXPORTER_TLS_CLIENT_CERT_FILE + value: {{ template "redis.tlsCert" . }} + - name: REDIS_EXPORTER_TLS_CA_CERT_FILE + value: {{ template "redis.tlsCACert" . }} + {{- end }} + volumeMounts: + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /secrets/ + {{- end }} + {{- if .Values.tls.enabled }} + - name: redis-certificates + mountPath: /opt/bitnami/redis/certs + readOnly: true + {{- end }} + ports: + - name: metrics + containerPort: 9121 + resources: {{- toYaml .Values.metrics.resources | nindent 12 }} + {{- end }} + {{- $needsVolumePermissions := and .Values.volumePermissions.enabled .Values.master.persistence.enabled .Values.securityContext.enabled .Values.containerSecurityContext.enabled }} + {{- if or $needsVolumePermissions .Values.sysctlImage.enabled }} + initContainers: + {{- if $needsVolumePermissions }} + - name: volume-permissions + image: "{{ template "redis.volumePermissions.image" . }}" + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: + - /bin/sh + - -ec + - | + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + chown -R `id -u`:`id -G | cut -d " " -f2` {{ .Values.master.persistence.path }} + {{- else }} + chown -R {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} {{ .Values.master.persistence.path }} + {{- end }} + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto "}} + securityContext: {{- omit .Values.volumePermissions.securityContext "runAsUser" | toYaml | nindent 12 }} + {{- else }} + securityContext: {{- .Values.volumePermissions.securityContext | toYaml | nindent 12 }} + {{- end }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 10 }} + volumeMounts: + - name: redis-data + mountPath: {{ .Values.master.persistence.path }} + subPath: {{ .Values.master.persistence.subPath }} + {{- end }} + {{- if .Values.sysctlImage.enabled }} + - name: init-sysctl + image: {{ template "redis.sysctl.image" . }} + imagePullPolicy: {{ default "" .Values.sysctlImage.pullPolicy | quote }} + resources: {{- toYaml .Values.sysctlImage.resources | nindent 10 }} + {{- if .Values.sysctlImage.mountHostSys }} + volumeMounts: + - name: host-sys + mountPath: /host-sys + {{- end }} + command: {{- toYaml .Values.sysctlImage.command | nindent 10 }} + securityContext: + privileged: true + runAsUser: 0 + {{- end }} + {{- end }} + volumes: + - name: start-scripts + configMap: + name: {{ include "redis.fullname" . }}-scripts + defaultMode: 0755 + - name: health + configMap: + name: {{ template "redis.fullname" . }}-health + defaultMode: 0755 + {{- if .Values.usePasswordFile }} + - name: redis-password + secret: + secretName: {{ template "redis.secretName" . }} + items: + - key: {{ template "redis.secretPasswordKey" . }} + path: redis-password + {{- end }} + - name: config + configMap: + name: {{ template "redis.fullname" . }} + {{- if not .Values.master.persistence.enabled }} + - name: "redis-data" + emptyDir: {} + {{- else }} + {{- if .Values.persistence.existingClaim }} + - name: "redis-data" + persistentVolumeClaim: + claimName: {{ include "redis.tplValue" (dict "value" .Values.persistence.existingClaim "context" $) }} + {{- end }} + {{- if .Values.master.persistence.volumes }} + {{- toYaml .Values.master.persistence.volumes | nindent 8 }} + {{- end }} + {{- end }} + {{- if .Values.sysctlImage.mountHostSys }} + - name: host-sys + hostPath: + path: /sys + {{- end }} + - name: redis-tmp-conf + emptyDir: {} + {{- if .Values.tls.enabled }} + - name: redis-certificates + secret: + secretName: {{ required "A secret containing the certificates for the TLS traffic is required when TLS in enabled" .Values.tls.certificatesSecret }} + defaultMode: 256 + {{- end }} + {{- if and .Values.master.persistence.enabled (not .Values.persistence.existingClaim) (not .Values.master.persistence.volumes) }} + volumeClaimTemplates: + - metadata: + name: redis-data + labels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + component: master + {{- if .Values.master.statefulset.volumeClaimTemplates.labels }} + {{- toYaml .Values.master.statefulset.volumeClaimTemplates.labels | nindent 10 }} + {{- end }} + {{- if .Values.master.statefulset.volumeClaimTemplates.annotations }} + annotations: + {{- toYaml .Values.master.statefulset.volumeClaimTemplates.annotations | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.master.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.master.persistence.size | quote }} + {{ include "redis.master.storageClass" . }} + selector: + {{- if .Values.master.persistence.matchLabels }} + matchLabels: {{- toYaml .Values.master.persistence.matchLabels | nindent 12 }} + {{- end -}} + {{- if .Values.master.persistence.matchExpressions }} + matchExpressions: {{- toYaml .Values.master.persistence.matchExpressions | nindent 12 }} + {{- end -}} + {{- end }} + updateStrategy: + type: {{ .Values.master.statefulset.updateStrategy }} + {{- if .Values.master.statefulset.rollingUpdatePartition }} + {{- if (eq "Recreate" .Values.master.statefulset.updateStrategy) }} + rollingUpdate: null + {{- else }} + rollingUpdate: + partition: {{ .Values.master.statefulset.rollingUpdatePartition }} + {{- end }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/redis/templates/redis-master-svc.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/redis/templates/redis-master-svc.yaml new file mode 100644 index 0000000..56ba5f1 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/redis/templates/redis-master-svc.yaml @@ -0,0 +1,43 @@ +{{- if not .Values.sentinel.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "redis.fullname" . }}-master + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- if .Values.master.service.labels -}} + {{- toYaml .Values.master.service.labels | nindent 4 }} + {{- end -}} +{{- if .Values.master.service.annotations }} + annotations: {{- toYaml .Values.master.service.annotations | nindent 4 }} +{{- end }} +spec: + type: {{ .Values.master.service.type }} + {{ if eq .Values.master.service.type "LoadBalancer" }} + externalTrafficPolicy: {{ .Values.master.service.externalTrafficPolicy }} + {{- end }} + {{- if and (eq .Values.master.service.type "LoadBalancer") .Values.master.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.master.service.loadBalancerIP }} + {{- end }} + {{- if and (eq .Values.master.service.type "LoadBalancer") .Values.master.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{- with .Values.master.service.loadBalancerSourceRanges }} +{{- toYaml . | nindent 4 }} +{{- end }} + {{- end }} + ports: + - name: redis + port: {{ .Values.master.service.port }} + targetPort: redis + {{- if .Values.master.service.nodePort }} + nodePort: {{ .Values.master.service.nodePort }} + {{- end }} + selector: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + role: master +{{- end }} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/redis/templates/redis-node-statefulset.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/redis/templates/redis-node-statefulset.yaml new file mode 100644 index 0000000..5d697de --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/redis/templates/redis-node-statefulset.yaml @@ -0,0 +1,494 @@ +{{- if and .Values.cluster.enabled .Values.sentinel.enabled }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "redis.fullname" . }}-node + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- if .Values.slave.statefulset.labels }} + {{- toYaml .Values.slave.statefulset.labels | nindent 4 }} + {{- end }} +{{- if .Values.slave.statefulset.annotations }} + annotations: + {{- toYaml .Values.slave.statefulset.annotations | nindent 4 }} +{{- end }} +spec: +{{- if .Values.slave.updateStrategy }} + strategy: {{- toYaml .Values.slave.updateStrategy | nindent 4 }} +{{- end }} + replicas: {{ .Values.cluster.slaveCount }} + serviceName: {{ template "redis.fullname" . }}-headless + selector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + role: node + template: + metadata: + labels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + chart: {{ template "redis.chart" . }} + role: node + {{- if .Values.slave.podLabels }} + {{- toYaml .Values.slave.podLabels | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podLabels }} + {{- toYaml .Values.metrics.podLabels | nindent 8 }} + {{- end }} + annotations: + checksum/health: {{ include (print $.Template.BasePath "/health-configmap.yaml") . | sha256sum }} + checksum/configmap: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} + {{- if .Values.slave.podAnnotations }} + {{- toYaml .Values.slave.podAnnotations | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podAnnotations }} + {{- toYaml .Values.metrics.podAnnotations | nindent 8 }} + {{- end }} + spec: + {{- include "redis.imagePullSecrets" . | nindent 6 }} + {{- if .Values.slave.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.slave.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: {{- omit .Values.securityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + serviceAccountName: {{ template "redis.serviceAccountName" . }} + {{- if .Values.slave.priorityClassName }} + priorityClassName: "{{ .Values.slave.priorityClassName }}" + {{- end }} + {{- if .Values.slave.nodeSelector }} + nodeSelector: {{- toYaml .Values.slave.nodeSelector | nindent 8 }} + {{- end }} + {{- if .Values.slave.tolerations }} + tolerations: {{- toYaml .Values.slave.tolerations | nindent 8 }} + {{- end }} + {{- if .Values.slave.schedulerName }} + schedulerName: {{ .Values.slave.schedulerName }} + {{- end }} + {{- if .Values.master.spreadConstraints }} + topologySpreadConstraints: {{- toYaml .Values.master.spreadConstraints | nindent 8 }} + {{- end }} + {{- with .Values.slave.affinity }} + affinity: {{- tpl (toYaml .) $ | nindent 8 }} + {{- end }} + containers: + - name: {{ template "redis.name" . }} + image: {{ template "redis.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + command: + - /bin/sh + - -c + - /opt/bitnami/scripts/start-scripts/start-node.sh + env: + - name: REDIS_MASTER_PORT_NUMBER + value: {{ .Values.redisPort | quote }} + {{- if .Values.usePassword }} + {{- if .Values.usePasswordFile }} + - name: REDIS_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + - name: REDIS_MASTER_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + {{- else }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + - name: REDIS_MASTER_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- else }} + - name: ALLOW_EMPTY_PASSWORD + value: "yes" + {{- end }} + - name: REDIS_TLS_ENABLED + value: {{ ternary "yes" "no" .Values.tls.enabled | quote }} + {{- if .Values.tls.enabled }} + - name: REDIS_TLS_PORT + value: {{ .Values.redisPort | quote }} + - name: REDIS_TLS_AUTH_CLIENTS + value: {{ ternary "yes" "no" .Values.tls.authClients | quote }} + - name: REDIS_TLS_CERT_FILE + value: {{ template "redis.tlsCert" . }} + - name: REDIS_TLS_KEY_FILE + value: {{ template "redis.tlsCertKey" . }} + - name: REDIS_TLS_CA_FILE + value: {{ template "redis.tlsCACert" . }} + {{- if .Values.tls.dhParamsFilename }} + - name: REDIS_TLS_DH_PARAMS_FILE + value: {{ template "redis.tlsDHParams" . }} + {{- end }} + {{- else }} + - name: REDIS_PORT + value: {{ .Values.redisPort | quote }} + {{- end }} + - name: REDIS_DATA_DIR + value: {{ .Values.slave.persistence.path }} + {{- if .Values.sentinel.extraEnvVars }} + {{- include "redis.tplValue" (dict "value" .Values.sentinel.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if or .Values.sentinel.extraEnvVarsCM .Values.sentinel.extraEnvVarsSecret }} + envFrom: + {{- if .Values.sentinel.extraEnvVarsCM }} + - configMapRef: + name: {{ .Values.sentinel.extraEnvVarsCM }} + {{- end }} + {{- if .Values.sentinel.extraEnvVarsSecret }} + - secretRef: + name: {{ .Values.sentinel.extraEnvVarsSecret }} + {{- end }} + {{- end }} + ports: + - name: redis + containerPort: {{ .Values.redisPort }} + {{- if .Values.slave.livenessProbe.enabled }} + livenessProbe: + initialDelaySeconds: {{ .Values.slave.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.slave.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.slave.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.slave.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.slave.livenessProbe.failureThreshold}} + exec: + command: + - sh + - -c + {{- if .Values.sentinel.enabled }} + - /health/ping_liveness_local.sh {{ .Values.slave.livenessProbe.timeoutSeconds }} + {{- else }} + - /health/ping_liveness_local_and_master.sh {{ .Values.slave.livenessProbe.timeoutSeconds }} + {{- end }} + {{- else if .Values.slave.customLivenessProbe }} + livenessProbe: {{- toYaml .Values.slave.customLivenessProbe | nindent 12 }} + {{- end }} + {{- if .Values.slave.readinessProbe.enabled }} + readinessProbe: + initialDelaySeconds: {{ .Values.slave.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.slave.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.slave.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.slave.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.slave.readinessProbe.failureThreshold }} + exec: + command: + - sh + - -c + {{- if .Values.sentinel.enabled }} + - /health/ping_readiness_local.sh {{ .Values.slave.livenessProbe.timeoutSeconds }} + {{- else }} + - /health/ping_readiness_local_and_master.sh {{ .Values.slave.livenessProbe.timeoutSeconds }} + {{- end }} + {{- else if .Values.slave.customReadinessProbe }} + readinessProbe: {{- toYaml .Values.slave.customReadinessProbe | nindent 12 }} + {{- end }} + resources: {{- toYaml .Values.slave.resources | nindent 12 }} + volumeMounts: + - name: start-scripts + mountPath: /opt/bitnami/scripts/start-scripts + - name: health + mountPath: /health + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /opt/bitnami/redis/secrets/ + {{- end }} + - name: redis-data + mountPath: {{ .Values.slave.persistence.path }} + subPath: {{ .Values.slave.persistence.subPath }} + - name: config + mountPath: /opt/bitnami/redis/mounted-etc + - name: redis-tmp-conf + mountPath: /opt/bitnami/redis/etc + {{- if .Values.tls.enabled }} + - name: redis-certificates + mountPath: /opt/bitnami/redis/certs + readOnly: true + {{- end }} + {{- if and .Values.cluster.enabled .Values.sentinel.enabled }} + - name: sentinel + image: {{ template "sentinel.image" . }} + imagePullPolicy: {{ .Values.sentinel.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + command: + - /bin/sh + - -c + - /opt/bitnami/scripts/start-scripts/start-sentinel.sh + env: + {{- if .Values.usePassword }} + {{- if .Values.usePasswordFile }} + - name: REDIS_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + {{- else }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- else }} + - name: ALLOW_EMPTY_PASSWORD + value: "yes" + {{- end }} + - name: REDIS_SENTINEL_TLS_ENABLED + value: {{ ternary "yes" "no" .Values.tls.enabled | quote }} + {{- if .Values.tls.enabled }} + - name: REDIS_SENTINEL_TLS_PORT_NUMBER + value: {{ .Values.sentinel.port | quote }} + - name: REDIS_SENTINEL_TLS_AUTH_CLIENTS + value: {{ ternary "yes" "no" .Values.tls.authClients | quote }} + - name: REDIS_SENTINEL_TLS_CERT_FILE + value: {{ template "redis.tlsCert" . }} + - name: REDIS_SENTINEL_TLS_KEY_FILE + value: {{ template "redis.tlsCertKey" . }} + - name: REDIS_SENTINEL_TLS_CA_FILE + value: {{ template "redis.tlsCACert" . }} + {{- if .Values.tls.dhParamsFilename }} + - name: REDIS_SENTINEL_TLS_DH_PARAMS_FILE + value: {{ template "redis.dhParams" . }} + {{- end }} + {{- else }} + - name: REDIS_SENTINEL_PORT + value: {{ .Values.sentinel.port | quote }} + {{- end }} + ports: + - name: redis-sentinel + containerPort: {{ .Values.sentinel.port }} + {{- if .Values.sentinel.livenessProbe.enabled }} + livenessProbe: + initialDelaySeconds: {{ .Values.sentinel.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.sentinel.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.sentinel.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.sentinel.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.sentinel.livenessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_sentinel.sh {{ .Values.sentinel.livenessProbe.timeoutSeconds }} + {{- else if .Values.sentinel.customLivenessProbe }} + livenessProbe: {{- toYaml .Values.sentinel.customLivenessProbe | nindent 12 }} + {{- end }} + {{- if .Values.sentinel.readinessProbe.enabled}} + readinessProbe: + initialDelaySeconds: {{ .Values.sentinel.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.sentinel.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.sentinel.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.sentinel.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.sentinel.readinessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_sentinel.sh {{ .Values.sentinel.livenessProbe.timeoutSeconds }} + {{- else if .Values.sentinel.customReadinessProbe }} + readinessProbe: {{- toYaml .Values.sentinel.customReadinessProbe | nindent 12 }} + {{- end }} + resources: {{- toYaml .Values.sentinel.resources | nindent 12 }} + volumeMounts: + - name: start-scripts + mountPath: /opt/bitnami/scripts/start-scripts + - name: health + mountPath: /health + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /opt/bitnami/redis/secrets/ + {{- end }} + - name: redis-data + mountPath: {{ .Values.slave.persistence.path }} + subPath: {{ .Values.slave.persistence.subPath }} + - name: config + mountPath: /opt/bitnami/redis-sentinel/mounted-etc + - name: sentinel-tmp-conf + mountPath: /opt/bitnami/redis-sentinel/etc + {{- if .Values.tls.enabled }} + - name: redis-certificates + mountPath: /opt/bitnami/redis/certs + readOnly: true + {{- end }} + {{- end }} + {{- if .Values.metrics.enabled }} + - name: metrics + image: {{ template "redis.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + command: + - /bin/sh + - -c + - | + if [[ -f '/secrets/redis-password' ]]; then + export REDIS_PASSWORD=$(cat /secrets/redis-password) + fi + redis_exporter{{- range $key, $value := .Values.metrics.extraArgs }} --{{ $key }}={{ $value }}{{- end }} + env: + - name: REDIS_ALIAS + value: {{ template "redis.fullname" . }} + {{- if and .Values.usePassword (not .Values.usePasswordFile) }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: REDIS_ADDR + value: rediss://localhost:{{ .Values.redisPort }} + - name: REDIS_EXPORTER_TLS_CLIENT_KEY_FILE + value: {{ template "redis.tlsCertKey" . }} + - name: REDIS_EXPORTER_TLS_CLIENT_CERT_FILE + value: {{ template "redis.tlsCert" . }} + - name: REDIS_EXPORTER_TLS_CA_CERT_FILE + value: {{ template "redis.tlsCACert" . }} + {{- end }} + volumeMounts: + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /secrets/ + {{- end }} + {{- if .Values.tls.enabled }} + - name: redis-certificates + mountPath: /opt/bitnami/redis/certs + readOnly: true + {{- end }} + ports: + - name: metrics + containerPort: 9121 + resources: {{- toYaml .Values.metrics.resources | nindent 12 }} + {{- end }} + {{- $needsVolumePermissions := and .Values.volumePermissions.enabled .Values.slave.persistence.enabled .Values.securityContext.enabled .Values.containerSecurityContext.enabled }} + {{- if or $needsVolumePermissions .Values.sysctlImage.enabled }} + initContainers: + {{- if $needsVolumePermissions }} + - name: volume-permissions + image: {{ template "redis.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: + - /bin/sh + - -ec + - | + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + chown -R `id -u`:`id -G | cut -d " " -f2` {{ .Values.slave.persistence.path }} + {{- else }} + chown -R {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} {{ .Values.slave.persistence.path }} + {{- end }} + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto "}} + securityContext: {{- omit .Values.volumePermissions.securityContext "runAsUser" | toYaml | nindent 12 }} + {{- else }} + securityContext: {{- .Values.volumePermissions.securityContext | toYaml | nindent 12 }} + {{- end }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} + volumeMounts: + - name: redis-data + mountPath: {{ .Values.slave.persistence.path }} + subPath: {{ .Values.slave.persistence.subPath }} + {{- end }} + {{- if .Values.sysctlImage.enabled }} + - name: init-sysctl + image: {{ template "redis.sysctl.image" . }} + imagePullPolicy: {{ default "" .Values.sysctlImage.pullPolicy | quote }} + resources: {{- toYaml .Values.sysctlImage.resources | nindent 12 }} + {{- if .Values.sysctlImage.mountHostSys }} + volumeMounts: + - name: host-sys + mountPath: /host-sys + {{- end }} + command: {{- toYaml .Values.sysctlImage.command | nindent 12 }} + securityContext: + privileged: true + runAsUser: 0 + {{- end }} + {{- end }} + volumes: + - name: start-scripts + configMap: + name: {{ include "redis.fullname" . }}-scripts + defaultMode: 0755 + - name: health + configMap: + name: {{ template "redis.fullname" . }}-health + defaultMode: 0755 + {{- if .Values.usePasswordFile }} + - name: redis-password + secret: + secretName: {{ template "redis.secretName" . }} + items: + - key: {{ template "redis.secretPasswordKey" . }} + path: redis-password + {{- end }} + - name: config + configMap: + name: {{ template "redis.fullname" . }} + {{- if .Values.sysctlImage.mountHostSys }} + - name: host-sys + hostPath: + path: /sys + {{- end }} + - name: sentinel-tmp-conf + emptyDir: {} + - name: redis-tmp-conf + emptyDir: {} + {{- if .Values.tls.enabled }} + - name: redis-certificates + secret: + secretName: {{ required "A secret containing the certificates for the TLS traffic is required when TLS in enabled" .Values.tls.certificatesSecret }} + defaultMode: 256 + {{- end }} + {{- if not .Values.slave.persistence.enabled }} + - name: redis-data + emptyDir: {} + {{- else }} + volumeClaimTemplates: + - metadata: + name: redis-data + labels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + component: slave + {{- if .Values.slave.statefulset.volumeClaimTemplates.labels }} + {{- toYaml .Values.slave.statefulset.volumeClaimTemplates.labels | nindent 10 }} + {{- end }} + {{- if .Values.slave.statefulset.volumeClaimTemplates.annotations }} + annotations: + {{- toYaml .Values.slave.statefulset.volumeClaimTemplates.annotations | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.slave.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.slave.persistence.size | quote }} + {{ include "redis.slave.storageClass" . }} + selector: + {{- if .Values.slave.persistence.matchLabels }} + matchLabels: {{- toYaml .Values.slave.persistence.matchLabels | nindent 12 }} + {{- end -}} + {{- if .Values.slave.persistence.matchExpressions }} + matchExpressions: {{- toYaml .Values.slave.persistence.matchExpressions | nindent 12 }} + {{- end -}} + {{- end }} + updateStrategy: + type: {{ .Values.slave.statefulset.updateStrategy }} + {{- if .Values.slave.statefulset.rollingUpdatePartition }} + {{- if (eq "Recreate" .Values.slave.statefulset.updateStrategy) }} + rollingUpdate: null + {{- else }} + rollingUpdate: + partition: {{ .Values.slave.statefulset.rollingUpdatePartition }} + {{- end }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/redis/templates/redis-pv.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/redis/templates/redis-pv.yaml new file mode 100644 index 0000000..adb5416 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/redis/templates/redis-pv.yaml @@ -0,0 +1,92 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + labels: + app: redis + type: local + name: redis-pv-master +spec: + storageClassName: manual + accessModes: + - ReadWriteOnce + capacity: + storage: 8Gi + claimRef: + kind: PersistentVolumeClaim + name: redis-data-redis-master-0 + namespace: imxc + hostPath: + path: {{ .Values.global.IMXC_REDIS_PV_PATH1 }} + persistentVolumeReclaimPolicy: Retain + storageClassName: manual + volumeMode: Filesystem + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .Values.global.affinity_key }} + operator: In + values: + - {{ .Values.global.affinity_value1 }} +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + labels: + app: redis + type: local + name: redis-pv-slave-0 +spec: + storageClassName: manual + accessModes: + - ReadWriteOnce + capacity: + storage: 8Gi + claimRef: + kind: PersistentVolumeClaim + name: redis-data-redis-slave-0 + namespace: imxc + hostPath: + path: {{ .Values.global.IMXC_REDIS_PV_PATH2 }} + persistentVolumeReclaimPolicy: Retain + storageClassName: manual + volumeMode: Filesystem + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .Values.global.affinity_key }} + operator: In + values: + - {{ .Values.global.affinity_value2 }} +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + labels: + app: redis + type: local + name: redis-pv-slave-1 +spec: + storageClassName: manual + accessModes: + - ReadWriteOnce + capacity: + storage: 8Gi + claimRef: + kind: PersistentVolumeClaim + name: redis-data-redis-slave-1 + namespace: imxc + hostPath: + path: {{ .Values.global.IMXC_REDIS_PV_PATH3 }} + persistentVolumeReclaimPolicy: Retain + storageClassName: manual + volumeMode: Filesystem + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .Values.global.affinity_key }} + operator: In + values: + - {{ .Values.global.affinity_value3 }} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/redis/templates/redis-role.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/redis/templates/redis-role.yaml new file mode 100644 index 0000000..0d14129 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/redis/templates/redis-role.yaml @@ -0,0 +1,22 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ template "redis.fullname" . }} + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +rules: +{{- if .Values.podSecurityPolicy.create }} + - apiGroups: ['{{ template "podSecurityPolicy.apiGroup" . }}'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: [{{ template "redis.fullname" . }}] +{{- end -}} +{{- if .Values.rbac.role.rules }} +{{- toYaml .Values.rbac.role.rules | nindent 2 }} +{{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/redis/templates/redis-rolebinding.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/redis/templates/redis-rolebinding.yaml new file mode 100644 index 0000000..83c87f5 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/redis/templates/redis-rolebinding.yaml @@ -0,0 +1,19 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ template "redis.fullname" . }} + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ template "redis.fullname" . }} +subjects: +- kind: ServiceAccount + name: {{ template "redis.serviceAccountName" . }} +{{- end -}} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/redis/templates/redis-serviceaccount.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/redis/templates/redis-serviceaccount.yaml new file mode 100644 index 0000000..9452003 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/redis/templates/redis-serviceaccount.yaml @@ -0,0 +1,15 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "redis.serviceAccountName" . }} + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- if .Values.serviceAccount.annotations }} + annotations: {{ toYaml .Values.serviceAccount.annotations | nindent 4 }} + {{- end }} +{{- end -}} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/redis/templates/redis-slave-statefulset.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/redis/templates/redis-slave-statefulset.yaml new file mode 100644 index 0000000..be0894b --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/redis/templates/redis-slave-statefulset.yaml @@ -0,0 +1,384 @@ +{{- if and .Values.cluster.enabled (not .Values.sentinel.enabled) }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "redis.fullname" . }}-slave + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- if .Values.slave.statefulset.labels }} + {{- toYaml .Values.slave.statefulset.labels | nindent 4 }} + {{- end }} +{{- if .Values.slave.statefulset.annotations }} + annotations: + {{- toYaml .Values.slave.statefulset.annotations | nindent 4 }} +{{- end }} +spec: +{{- if .Values.slave.updateStrategy }} + strategy: {{- toYaml .Values.slave.updateStrategy | nindent 4 }} +{{- end }} + replicas: {{ .Values.cluster.slaveCount }} + serviceName: {{ template "redis.fullname" . }}-headless + selector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + role: slave + template: + metadata: + labels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + chart: {{ template "redis.chart" . }} + role: slave + {{- if .Values.slave.podLabels }} + {{- toYaml .Values.slave.podLabels | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podLabels }} + {{- toYaml .Values.metrics.podLabels | nindent 8 }} + {{- end }} + annotations: + checksum/health: {{ include (print $.Template.BasePath "/health-configmap.yaml") . | sha256sum }} + checksum/configmap: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} + {{- if .Values.slave.podAnnotations }} + {{- toYaml .Values.slave.podAnnotations | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podAnnotations }} + {{- toYaml .Values.metrics.podAnnotations | nindent 8 }} + {{- end }} + spec: + {{- include "redis.imagePullSecrets" . | nindent 6 }} + {{- if .Values.slave.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.slave.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: {{- omit .Values.securityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + serviceAccountName: {{ template "redis.serviceAccountName" . }} + {{- if .Values.slave.priorityClassName }} + priorityClassName: {{ .Values.slave.priorityClassName | quote }} + {{- end }} + {{- if .Values.slave.nodeSelector }} + nodeSelector: {{- toYaml .Values.slave.nodeSelector | nindent 8 }} + {{- end }} + {{- if .Values.slave.tolerations }} + tolerations: {{- toYaml .Values.slave.tolerations | nindent 8 }} + {{- end }} + {{- if .Values.slave.shareProcessNamespace }} + shareProcessNamespace: {{ .Values.slave.shareProcessNamespace }} + {{- end }} + {{- if .Values.slave.schedulerName }} + schedulerName: {{ .Values.slave.schedulerName }} + {{- end }} + {{- if .Values.master.spreadConstraints }} + topologySpreadConstraints: {{- toYaml .Values.master.spreadConstraints | nindent 8 }} + {{- end }} + {{- with .Values.slave.affinity }} + affinity: {{- tpl (toYaml .) $ | nindent 8 }} + {{- end }} + containers: + - name: {{ template "redis.name" . }} + image: {{ template "redis.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + command: + - /bin/sh + - -c + - /opt/bitnami/scripts/start-scripts/start-slave.sh + env: + - name: REDIS_REPLICATION_MODE + value: slave + - name: REDIS_MASTER_HOST + value: {{ template "redis.fullname" . }}-master-0.{{ template "redis.fullname" . }}-headless.imxc.svc.{{ .Values.clusterDomain }} + - name: REDIS_MASTER_PORT_NUMBER + value: {{ .Values.redisPort | quote }} + {{- if .Values.usePassword }} + {{- if .Values.usePasswordFile }} + - name: REDIS_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + - name: REDIS_MASTER_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + {{- else }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + - name: REDIS_MASTER_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- else }} + - name: ALLOW_EMPTY_PASSWORD + value: "yes" + {{- end }} + - name: REDIS_TLS_ENABLED + value: {{ ternary "yes" "no" .Values.tls.enabled | quote }} + {{- if .Values.tls.enabled }} + - name: REDIS_TLS_PORT + value: {{ .Values.redisPort | quote }} + - name: REDIS_TLS_AUTH_CLIENTS + value: {{ ternary "yes" "no" .Values.tls.authClients | quote }} + - name: REDIS_TLS_CERT_FILE + value: {{ template "redis.tlsCert" . }} + - name: REDIS_TLS_KEY_FILE + value: {{ template "redis.tlsCertKey" . }} + - name: REDIS_TLS_CA_FILE + value: {{ template "redis.tlsCACert" . }} + {{- if .Values.tls.dhParamsFilename }} + - name: REDIS_TLS_DH_PARAMS_FILE + value: {{ template "redis.tlsDHParams" . }} + {{- end }} + {{- else }} + - name: REDIS_PORT + value: {{ .Values.redisPort | quote }} + {{- end }} + {{- if .Values.slave.extraEnvVars }} + {{- include "redis.tplValue" (dict "value" .Values.slave.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if or .Values.slave.extraEnvVarsCM .Values.slave.extraEnvVarsSecret }} + envFrom: + {{- if .Values.slave.extraEnvVarsCM }} + - configMapRef: + name: {{ .Values.slave.extraEnvVarsCM }} + {{- end }} + {{- if .Values.slave.extraEnvVarsSecret }} + - secretRef: + name: {{ .Values.slave.extraEnvVarsSecret }} + {{- end }} + {{- end }} + ports: + - name: redis + containerPort: {{ .Values.redisPort }} + {{- if .Values.slave.livenessProbe.enabled }} + livenessProbe: + initialDelaySeconds: {{ .Values.slave.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.slave.livenessProbe.periodSeconds }} + timeoutSeconds: {{ add1 .Values.slave.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.slave.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.slave.livenessProbe.failureThreshold}} + exec: + command: + - sh + - -c + - /health/ping_liveness_local_and_master.sh {{ .Values.slave.livenessProbe.timeoutSeconds }} + {{- else if .Values.slave.customLivenessProbe }} + livenessProbe: {{- toYaml .Values.slave.customLivenessProbe | nindent 12 }} + {{- end }} + {{- if .Values.slave.readinessProbe.enabled }} + readinessProbe: + initialDelaySeconds: {{ .Values.slave.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.slave.readinessProbe.periodSeconds }} + timeoutSeconds: {{ add1 .Values.slave.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.slave.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.slave.readinessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_readiness_local_and_master.sh {{ .Values.slave.readinessProbe.timeoutSeconds }} + {{- else if .Values.slave.customReadinessProbe }} + readinessProbe: {{- toYaml .Values.slave.customReadinessProbe | nindent 12 }} + {{- end }} + resources: {{- toYaml .Values.slave.resources | nindent 12 }} + volumeMounts: + - name: start-scripts + mountPath: /opt/bitnami/scripts/start-scripts + - name: health + mountPath: /health + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /opt/bitnami/redis/secrets/ + {{- end }} + - name: redis-data + mountPath: /data + - name: config + mountPath: /opt/bitnami/redis/mounted-etc + - name: redis-tmp-conf + mountPath: /opt/bitnami/redis/etc + {{- if .Values.tls.enabled }} + - name: redis-certificates + mountPath: /opt/bitnami/redis/certs + readOnly: true + {{- end }} + {{- if .Values.metrics.enabled }} + - name: metrics + image: {{ template "redis.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + command: + - /bin/bash + - -c + - | + if [[ -f '/secrets/redis-password' ]]; then + export REDIS_PASSWORD=$(cat /secrets/redis-password) + fi + redis_exporter{{- range $key, $value := .Values.metrics.extraArgs }} --{{ $key }}={{ $value }}{{- end }} + env: + - name: REDIS_ALIAS + value: {{ template "redis.fullname" . }} + {{- if and .Values.usePassword (not .Values.usePasswordFile) }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: REDIS_ADDR + value: rediss://localhost:{{ .Values.redisPort }} + - name: REDIS_EXPORTER_TLS_CLIENT_KEY_FILE + value: {{ template "redis.tlsCertKey" . }} + - name: REDIS_EXPORTER_TLS_CLIENT_CERT_FILE + value: {{ template "redis.tlsCert" . }} + - name: REDIS_EXPORTER_TLS_CA_CERT_FILE + value: {{ template "redis.tlsCACert" . }} + {{- end }} + volumeMounts: + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /secrets/ + {{- end }} + {{- if .Values.tls.enabled }} + - name: redis-certificates + mountPath: /opt/bitnami/redis/certs + readOnly: true + {{- end }} + ports: + - name: metrics + containerPort: 9121 + resources: {{- toYaml .Values.metrics.resources | nindent 12 }} + {{- end }} + {{- $needsVolumePermissions := and .Values.volumePermissions.enabled .Values.slave.persistence.enabled .Values.securityContext.enabled .Values.containerSecurityContext.enabled }} + {{- if or $needsVolumePermissions .Values.sysctlImage.enabled }} + initContainers: + {{- if $needsVolumePermissions }} + - name: volume-permissions + image: {{ template "redis.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: + - /bin/sh + - -ec + - | + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + chown -R `id -u`:`id -G | cut -d " " -f2` {{ .Values.slave.persistence.path }} + {{- else }} + chown -R {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} {{ .Values.slave.persistence.path }} + {{- end }} + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto "}} + securityContext: {{- omit .Values.volumePermissions.securityContext "runAsUser" | toYaml | nindent 12 }} + {{- else }} + securityContext: {{- .Values.volumePermissions.securityContext | toYaml | nindent 12 }} + {{- end }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} + volumeMounts: + - name: redis-data + mountPath: {{ .Values.slave.persistence.path }} + subPath: {{ .Values.slave.persistence.subPath }} + {{- end }} + {{- if .Values.sysctlImage.enabled }} + - name: init-sysctl + image: {{ template "redis.sysctl.image" . }} + imagePullPolicy: {{ default "" .Values.sysctlImage.pullPolicy | quote }} + resources: {{- toYaml .Values.sysctlImage.resources | nindent 12 }} + {{- if .Values.sysctlImage.mountHostSys }} + volumeMounts: + - name: host-sys + mountPath: /host-sys + {{- end }} + command: {{- toYaml .Values.sysctlImage.command | nindent 12 }} + securityContext: + privileged: true + runAsUser: 0 + {{- end }} + {{- end }} + volumes: + - name: start-scripts + configMap: + name: {{ include "redis.fullname" . }}-scripts + defaultMode: 0755 + - name: health + configMap: + name: {{ template "redis.fullname" . }}-health + defaultMode: 0755 + {{- if .Values.usePasswordFile }} + - name: redis-password + secret: + secretName: {{ template "redis.secretName" . }} + items: + - key: {{ template "redis.secretPasswordKey" . }} + path: redis-password + {{- end }} + - name: config + configMap: + name: {{ template "redis.fullname" . }} + {{- if .Values.sysctlImage.mountHostSys }} + - name: host-sys + hostPath: + path: /sys + {{- end }} + - name: redis-tmp-conf + emptyDir: {} + {{- if .Values.tls.enabled }} + - name: redis-certificates + secret: + secretName: {{ required "A secret containing the certificates for the TLS traffic is required when TLS in enabled" .Values.tls.certificatesSecret }} + defaultMode: 256 + {{- end }} + {{- if not .Values.slave.persistence.enabled }} + - name: redis-data + emptyDir: {} + {{- else }} + volumeClaimTemplates: + - metadata: + name: redis-data + labels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + component: slave + {{- if .Values.slave.statefulset.volumeClaimTemplates.labels }} + {{- toYaml .Values.slave.statefulset.volumeClaimTemplates.labels | nindent 10 }} + {{- end }} + {{- if .Values.slave.statefulset.volumeClaimTemplates.annotations }} + annotations: + {{- toYaml .Values.slave.statefulset.volumeClaimTemplates.annotations | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.slave.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.slave.persistence.size | quote }} + {{ include "redis.slave.storageClass" . }} + selector: + {{- if .Values.slave.persistence.matchLabels }} + matchLabels: {{- toYaml .Values.slave.persistence.matchLabels | nindent 12 }} + {{- end -}} + {{- if .Values.slave.persistence.matchExpressions }} + matchExpressions: {{- toYaml .Values.slave.persistence.matchExpressions | nindent 12 }} + {{- end -}} + {{- end }} + updateStrategy: + type: {{ .Values.slave.statefulset.updateStrategy }} + {{- if .Values.slave.statefulset.rollingUpdatePartition }} + {{- if (eq "Recreate" .Values.slave.statefulset.updateStrategy) }} + rollingUpdate: null + {{- else }} + rollingUpdate: + partition: {{ .Values.slave.statefulset.rollingUpdatePartition }} + {{- end }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/redis/templates/redis-slave-svc.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/redis/templates/redis-slave-svc.yaml new file mode 100644 index 0000000..c1f3ae5 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/redis/templates/redis-slave-svc.yaml @@ -0,0 +1,43 @@ +{{- if and .Values.cluster.enabled (not .Values.sentinel.enabled) }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "redis.fullname" . }}-slave + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- if .Values.slave.service.labels -}} + {{- toYaml .Values.slave.service.labels | nindent 4 }} + {{- end -}} +{{- if .Values.slave.service.annotations }} + annotations: {{- toYaml .Values.slave.service.annotations | nindent 4 }} +{{- end }} +spec: + type: {{ .Values.slave.service.type }} + {{ if eq .Values.slave.service.type "LoadBalancer" }} + externalTrafficPolicy: {{ .Values.slave.service.externalTrafficPolicy }} + {{- end }} + {{- if and (eq .Values.slave.service.type "LoadBalancer") .Values.slave.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.slave.service.loadBalancerIP }} + {{- end }} + {{- if and (eq .Values.slave.service.type "LoadBalancer") .Values.slave.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{- with .Values.slave.service.loadBalancerSourceRanges }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- end }} + ports: + - name: redis + port: {{ .Values.slave.service.port }} + targetPort: redis + {{- if .Values.slave.service.nodePort }} + nodePort: {{ .Values.slave.service.nodePort }} + {{- end }} + selector: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + role: slave +{{- end }} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/redis/templates/redis-with-sentinel-svc.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/redis/templates/redis-with-sentinel-svc.yaml new file mode 100644 index 0000000..3b3458e --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/redis/templates/redis-with-sentinel-svc.yaml @@ -0,0 +1,43 @@ +{{- if .Values.sentinel.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "redis.fullname" . }} + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- if .Values.sentinel.service.labels }} + {{- toYaml .Values.sentinel.service.labels | nindent 4 }} + {{- end }} +{{- if .Values.sentinel.service.annotations }} + annotations: {{- toYaml .Values.sentinel.service.annotations | nindent 4 }} +{{- end }} +spec: + type: {{ .Values.sentinel.service.type }} + {{ if eq .Values.sentinel.service.type "LoadBalancer" }} + externalTrafficPolicy: {{ .Values.sentinel.service.externalTrafficPolicy }} + {{- end }} + {{ if eq .Values.sentinel.service.type "LoadBalancer" -}} {{ if .Values.sentinel.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.sentinel.service.loadBalancerIP }} + {{ end -}} + {{- end -}} + ports: + - name: redis + port: {{ .Values.sentinel.service.redisPort }} + targetPort: redis + {{- if .Values.sentinel.service.redisNodePort }} + nodePort: {{ .Values.sentinel.service.redisNodePort }} + {{- end }} + - name: redis-sentinel + port: {{ .Values.sentinel.service.sentinelPort }} + targetPort: redis-sentinel + {{- if .Values.sentinel.service.sentinelNodePort }} + nodePort: {{ .Values.sentinel.service.sentinelNodePort }} + {{- end }} + selector: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} +{{- end }} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/redis/templates/secret.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/redis/templates/secret.yaml new file mode 100644 index 0000000..c1103d2 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/redis/templates/secret.yaml @@ -0,0 +1,15 @@ +{{- if and .Values.usePassword (not .Values.existingSecret) -}} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "redis.fullname" . }} + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +type: Opaque +data: + redis-password: {{ include "redis.password" . | b64enc | quote }} +{{- end -}} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/redis/values.schema.json b/ansible/01_old/roles/test/files/02-base/base/charts/redis/values.schema.json new file mode 100644 index 0000000..3188d0c --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/redis/values.schema.json @@ -0,0 +1,168 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": { + "usePassword": { + "type": "boolean", + "title": "Use password authentication", + "form": true + }, + "password": { + "type": "string", + "title": "Password", + "form": true, + "description": "Defaults to a random 10-character alphanumeric string if not set", + "hidden": { + "value": false, + "path": "usePassword" + } + }, + "cluster": { + "type": "object", + "title": "Cluster Settings", + "form": true, + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable master-slave", + "description": "Enable master-slave architecture" + }, + "slaveCount": { + "type": "integer", + "title": "Slave Replicas", + "form": true, + "hidden": { + "value": false, + "path": "cluster/enabled" + } + } + } + }, + "master": { + "type": "object", + "title": "Master replicas settings", + "form": true, + "properties": { + "persistence": { + "type": "object", + "title": "Persistence for master replicas", + "form": true, + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable persistence", + "description": "Enable persistence using Persistent Volume Claims" + }, + "size": { + "type": "string", + "title": "Persistent Volume Size", + "form": true, + "render": "slider", + "sliderMin": 1, + "sliderMax": 100, + "sliderUnit": "Gi", + "hidden": { + "value": false, + "path": "master/persistence/enabled" + } + }, + "matchLabels": { + "type": "object", + "title": "Persistent Match Labels Selector" + }, + "matchExpressions": { + "type": "object", + "title": "Persistent Match Expressions Selector" + } + } + } + } + }, + "slave": { + "type": "object", + "title": "Slave replicas settings", + "form": true, + "hidden": { + "value": false, + "path": "cluster/enabled" + }, + "properties": { + "persistence": { + "type": "object", + "title": "Persistence for slave replicas", + "form": true, + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable persistence", + "description": "Enable persistence using Persistent Volume Claims" + }, + "size": { + "type": "string", + "title": "Persistent Volume Size", + "form": true, + "render": "slider", + "sliderMin": 1, + "sliderMax": 100, + "sliderUnit": "Gi", + "hidden": { + "value": false, + "path": "slave/persistence/enabled" + } + }, + "matchLabels": { + "type": "object", + "title": "Persistent Match Labels Selector" + }, + "matchExpressions": { + "type": "object", + "title": "Persistent Match Expressions Selector" + } + } + } + } + }, + "volumePermissions": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable Init Containers", + "description": "Use an init container to set required folder permissions on the data volume before mounting it in the final destination" + } + } + }, + "metrics": { + "type": "object", + "form": true, + "title": "Prometheus metrics details", + "properties": { + "enabled": { + "type": "boolean", + "title": "Create Prometheus metrics exporter", + "description": "Create a side-car container to expose Prometheus metrics", + "form": true + }, + "serviceMonitor": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "title": "Create Prometheus Operator ServiceMonitor", + "description": "Create a ServiceMonitor to track metrics using Prometheus Operator", + "form": true, + "hidden": { + "value": false, + "path": "metrics/enabled" + } + } + } + } + } + } + } +} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/redis/values.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/redis/values.yaml new file mode 100644 index 0000000..fcd8710 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/redis/values.yaml @@ -0,0 +1,932 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +global: + # imageRegistry: myRegistryName + # imagePullSecrets: + # - myRegistryKeySecretName + # storageClass: myStorageClass + redis: {} + +## Bitnami Redis(TM) image version +## ref: https://hub.docker.com/r/bitnami/redis/tags/ +## +image: + registry: 10.10.31.243:5000/cmoa3 + repository: redis + ## Bitnami Redis(TM) image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis#supported-tags-and-respective-dockerfile-links + ## + tag: latest + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + +## String to partially override redis.fullname template (will maintain the release name) +## +# nameOverride: + +## String to fully override redis.fullname template +## +fullnameOverride: redis + +## Cluster settings +## +cluster: + enabled: true + slaveCount: 2 + +## Use redis sentinel in the redis pod. This will disable the master and slave services and +## create one redis service with ports to the sentinel and the redis instances +## +sentinel: + enabled: false + #enabled: true + ## Require password authentication on the sentinel itself + ## ref: https://redis.io/topics/sentinel + ## + usePassword: true + ## Bitnami Redis(TM) Sentintel image version + ## ref: https://hub.docker.com/r/bitnami/redis-sentinel/tags/ + ## + image: + #registry: docker.io + registry: 10.10.31.243:5000 + repository: bitnami/redis-sentinel + ## Bitnami Redis(TM) image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis-sentinel#supported-tags-and-respective-dockerfile-links + ## + tag: 6.0.10-debian-10-r0 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + masterSet: mymaster + initialCheckTimeout: 5 + quorum: 2 + downAfterMilliseconds: 60000 + failoverTimeout: 18000 + parallelSyncs: 1 + port: 26379 + ## Additional Redis(TM) configuration for the sentinel nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Enable or disable static sentinel IDs for each replicas + ## If disabled each sentinel will generate a random id at startup + ## If enabled, each replicas will have a constant ID on each start-up + ## + staticID: false + ## Configure extra options for Redis(TM) Sentinel liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + customLivenessProbe: {} + customReadinessProbe: {} + ## Redis(TM) Sentinel resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Redis(TM) Sentinel Service properties + ## + service: + ## Redis(TM) Sentinel Service type + ## + type: ClusterIP + sentinelPort: 26379 + redisPort: 6379 + + ## External traffic policy (when service type is LoadBalancer) + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # sentinelNodePort: + # redisNodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + + ## Additional commands to run prior to starting Redis(TM) node with sentinel + ## + preExecCmds: "" + + ## An array to add extra env var to the sentinel node configurations + ## For example: + ## extraEnvVars: + ## - name: name + ## value: value + ## - name: other_name + ## valueFrom: + ## fieldRef: + ## fieldPath: fieldPath + ## + extraEnvVars: [] + + ## ConfigMap with extra env vars: + ## + extraEnvVarsCM: [] + + ## Secret with extra env vars: + ## + extraEnvVarsSecret: [] + +## Specifies the Kubernetes Cluster's Domain Name. +## +clusterDomain: cluster.local + +networkPolicy: + ## Specifies whether a NetworkPolicy should be created + ## + enabled: true + #enabled: false + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port Redis(TM) is listening + ## on. When true, Redis(TM) will accept connections from any source + ## (with the correct destination port). + ## + # allowExternal: true + allowExternal: true + + ## Allow connections from other namespaces. Just set label for namespace and set label for pods (optional). + ## + ingressNSMatchLabels: {} + ingressNSPodMatchLabels: {} + +serviceAccount: + ## Specifies whether a ServiceAccount should be created + ## + create: false + ## The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the fullname template + ## + name: + ## Add annotations to service account + # annotations: + # iam.gke.io/gcp-service-account: "sa@project.iam.gserviceaccount.com" + +rbac: + ## Specifies whether RBAC resources should be created + ## + create: false + + role: + ## Rules to create. It follows the role specification + # rules: + # - apiGroups: + # - extensions + # resources: + # - podsecuritypolicies + # verbs: + # - use + # resourceNames: + # - gce.unprivileged + rules: [] + +## Redis(TM) pod Security Context +## +securityContext: + enabled: true + fsGroup: 1001 + ## sysctl settings for master and slave pods + ## + ## Uncomment the setting below to increase the net.core.somaxconn value + ## + # sysctls: + # - name: net.core.somaxconn + # value: "10000" + +## Container Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +containerSecurityContext: + enabled: true + runAsUser: 1001 + +## Use password authentication +## +usePassword: true +## Redis(TM) password (both master and slave) +## Defaults to a random 10-character alphanumeric string if not set and usePassword is true +## ref: https://github.com/bitnami/bitnami-docker-redis#setting-the-server-password-on-first-run +## +password: "dkagh1234!" +## Use existing secret (ignores previous password) +# existingSecret: +## Password key to be retrieved from Redis(TM) secret +## +# existingSecretPasswordKey: + +## Mount secrets as files instead of environment variables +## +usePasswordFile: false + +## Persist data to a persistent volume (Redis Master) +## +persistence: + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + ## + existingClaim: + +# Redis(TM) port +redisPort: 6379 + +## +## TLS configuration +## +tls: + # Enable TLS traffic + enabled: false + # + # Whether to require clients to authenticate or not. + authClients: true + # + # Name of the Secret that contains the certificates + certificatesSecret: + # + # Certificate filename + certFilename: + # + # Certificate Key filename + certKeyFilename: + # + # CA Certificate filename + certCAFilename: + # + # File containing DH params (in order to support DH based ciphers) + # dhParamsFilename: + +## +## Redis(TM) Master parameters +## +master: + ## Redis(TM) command arguments + ## + ## Can be used to specify command line arguments, for example: + ## Note `exec` is prepended to command + ## + command: "/run.sh" + ## Additional commands to run prior to starting Redis(TM) + ## + preExecCmds: "" + ## Additional Redis(TM) configuration for the master nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Deployment pod host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## Redis(TM) additional command line flags + ## + ## Can be used to specify command line flags, for example: + ## extraFlags: + ## - "--maxmemory-policy volatile-ttl" + ## - "--repl-backlog-size 1024mb" + ## + extraFlags: [] + ## Comma-separated list of Redis(TM) commands to disable + ## + ## Can be used to disable Redis(TM) commands for security reasons. + ## Commands will be completely disabled by renaming each to an empty string. + ## ref: https://redis.io/topics/security#disabling-of-specific-commands + ## + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis(TM) Master additional pod labels and annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + podAnnotations: {} + + ## Redis(TM) Master resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + # Enable shared process namespace in a pod. + # If set to false (default), each container will run in separate namespace, redis will have PID=1. + # If set to true, the /pause will run as init process and will reap any zombie PIDs, + # for example, generated by a custom exec probe running longer than a probe timeoutSeconds. + # Enable this only if customLivenessProbe or customReadinessProbe is used and zombie PIDs are accumulating. + # Ref: https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/ + shareProcessNamespace: false + ## Configure extra options for Redis(TM) Master liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + + ## Configure custom probes for images other images like + ## rhscl/redis-32-rhel7 rhscl/redis-5-rhel7 + ## Only used if readinessProbe.enabled: false / livenessProbe.enabled: false + ## + # customLivenessProbe: + # tcpSocket: + # port: 6379 + # initialDelaySeconds: 10 + # periodSeconds: 5 + # customReadinessProbe: + # initialDelaySeconds: 30 + # periodSeconds: 10 + # timeoutSeconds: 5 + # exec: + # command: + # - "container-entrypoint" + # - "bash" + # - "-c" + # - "redis-cli set liveness-probe \"`date`\" | grep OK" + customLivenessProbe: {} + customReadinessProbe: {} + + ## Redis(TM) Master Node selectors and tolerations for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + ## Redis(TM) Master pod/node affinity/anti-affinity + ## + affinity: {} + + ## Redis(TM) Master Service properties + ## + service: + ## Redis(TM) Master Service type + ## + type: ClusterIP + # type: NodePort + port: 6379 + + ## External traffic policy (when service type is LoadBalancer) + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: 31379 + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + # loadBalancerSourceRanges: ["10.0.0.0/8"] + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis(TM) images. + ## + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + ## + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + ## Persistent Volume selectors + ## https://kubernetes.io/docs/concepts/storage/persistent-volumes/#selector + ## + matchLabels: {} + matchExpressions: {} + volumes: + # - name: volume_name + # emptyDir: {} + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + ## + statefulset: + labels: {} + annotations: {} + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + volumeClaimTemplates: + labels: {} + annotations: {} + + ## Redis(TM) Master pod priorityClassName + ## + priorityClassName: '' + + ## An array to add extra env vars + ## For example: + ## extraEnvVars: + ## - name: name + ## value: value + ## - name: other_name + ## valueFrom: + ## fieldRef: + ## fieldPath: fieldPath + ## + extraEnvVars: [] + + ## ConfigMap with extra env vars: + ## + extraEnvVarsCM: [] + + ## Secret with extra env vars: + ## + extraEnvVarsSecret: [] + +## +## Redis(TM) Slave properties +## Note: service.type is a mandatory parameter +## The rest of the parameters are either optional or, if undefined, will inherit those declared in Redis(TM) Master +## +slave: + ## Slave Service properties + ## + service: + ## Redis(TM) Slave Service type + ## + type: ClusterIP + #type: NodePort + ## Redis(TM) port + ## + port: 6379 + + ## External traffic policy (when service type is LoadBalancer) + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: 31380 + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + # loadBalancerSourceRanges: ["10.0.0.0/8"] + + ## Redis(TM) slave port + ## + port: 6379 + ## Deployment pod host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## Can be used to specify command line arguments, for example: + ## Note `exec` is prepended to command + ## + command: "/run.sh" + ## Additional commands to run prior to starting Redis(TM) + ## + preExecCmds: "" + ## Additional Redis(TM) configuration for the slave nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Redis(TM) extra flags + ## + extraFlags: [] + ## List of Redis(TM) commands to disable + ## + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis(TM) Slave pod/node affinity/anti-affinity + ## + affinity: {} + + ## Kubernetes Spread Constraints for pod assignment + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + ## + # - maxSkew: 1 + # topologyKey: node + # whenUnsatisfiable: DoNotSchedule + spreadConstraints: {} + + # Enable shared process namespace in a pod. + # If set to false (default), each container will run in separate namespace, redis will have PID=1. + # If set to true, the /pause will run as init process and will reap any zombie PIDs, + # for example, generated by a custom exec probe running longer than a probe timeoutSeconds. + # Enable this only if customLivenessProbe or customReadinessProbe is used and zombie PIDs are accumulating. + # Ref: https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/ + shareProcessNamespace: false + ## Configure extra options for Redis(TM) Slave liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 10 + successThreshold: 1 + failureThreshold: 5 + + ## Configure custom probes for images other images like + ## rhscl/redis-32-rhel7 rhscl/redis-5-rhel7 + ## Only used if readinessProbe.enabled: false / livenessProbe.enabled: false + ## + # customLivenessProbe: + # tcpSocket: + # port: 6379 + # initialDelaySeconds: 10 + # periodSeconds: 5 + # customReadinessProbe: + # initialDelaySeconds: 30 + # periodSeconds: 10 + # timeoutSeconds: 5 + # exec: + # command: + # - "container-entrypoint" + # - "bash" + # - "-c" + # - "redis-cli set liveness-probe \"`date`\" | grep OK" + customLivenessProbe: {} + customReadinessProbe: {} + + ## Redis(TM) slave Resource + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + + ## Redis(TM) slave selectors and tolerations for pod assignment + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Redis(TM) slave pod Annotation and Labels + ## + podLabels: {} + podAnnotations: {} + + ## Redis(TM) slave pod priorityClassName + # priorityClassName: '' + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis(TM) images. + ## + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + ## + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + ## Persistent Volume selectors + ## https://kubernetes.io/docs/concepts/storage/persistent-volumes/#selector + ## + matchLabels: {} + matchExpressions: {} + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + ## + statefulset: + labels: {} + annotations: {} + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + volumeClaimTemplates: + labels: {} + annotations: {} + + ## An array to add extra env vars + ## For example: + ## extraEnvVars: + ## - name: name + ## value: value + ## - name: other_name + ## valueFrom: + ## fieldRef: + ## fieldPath: fieldPath + ## + extraEnvVars: [] + + ## ConfigMap with extra env vars: + ## + extraEnvVarsCM: [] + + ## Secret with extra env vars: + ## + extraEnvVarsSecret: [] + +## Prometheus Exporter / Metrics +## +metrics: + enabled: false +# enabled: true + + image: + registry: 10.10.31.243:5000 # registry.cloud.intermax:5000 + repository: redis/redis-exporter + #tag: 1.15.1-debian-10-r2 + tag: latest + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + # resources: {} + + ## Extra arguments for Metrics exporter, for example: + ## extraArgs: + ## check-keys: myKey,myOtherKey + # extraArgs: {} + + ## Metrics exporter pod Annotation and Labels + ## + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9121" + # podLabels: {} + + # Enable this if you're using https://github.com/coreos/prometheus-operator + serviceMonitor: + enabled: false + ## Specify a namespace if needed + # namespace: monitoring + # fallback to the prometheus default unless specified + # interval: 10s + ## Defaults to what's used if you follow CoreOS [Prometheus Install Instructions](https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#tldr) + ## [Prometheus Selector Label](https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#prometheus-operator-1) + ## [Kube Prometheus Selector Label](https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#exporters) + ## + selector: + prometheus: kube-prometheus + + ## RelabelConfigs to apply to samples before scraping + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#relabelconfig + ## Value is evalued as a template + ## + relabelings: [] + + ## MetricRelabelConfigs to apply to samples before ingestion + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#relabelconfig + ## Value is evalued as a template + ## + metricRelabelings: [] + # - sourceLabels: + # - "__name__" + # targetLabel: "__name__" + # action: replace + # regex: '(.*)' + # replacement: 'example_prefix_$1' + + ## Custom PrometheusRule to be defined + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + ## + prometheusRule: + enabled: false + additionalLabels: {} + namespace: "" + ## Redis(TM) prometheus rules + ## These are just examples rules, please adapt them to your needs. + ## Make sure to constraint the rules to the current redis service. + # rules: + # - alert: RedisDown + # expr: redis_up{service="{{ template "redis.fullname" . }}-metrics"} == 0 + # for: 2m + # labels: + # severity: error + # annotations: + # summary: Redis(TM) instance {{ "{{ $labels.instance }}" }} down + # description: Redis(TM) instance {{ "{{ $labels.instance }}" }} is down + # - alert: RedisMemoryHigh + # expr: > + # redis_memory_used_bytes{service="{{ template "redis.fullname" . }}-metrics"} * 100 + # / + # redis_memory_max_bytes{service="{{ template "redis.fullname" . }}-metrics"} + # > 90 + # for: 2m + # labels: + # severity: error + # annotations: + # summary: Redis(TM) instance {{ "{{ $labels.instance }}" }} is using too much memory + # description: | + # Redis(TM) instance {{ "{{ $labels.instance }}" }} is using {{ "{{ $value }}" }}% of its available memory. + # - alert: RedisKeyEviction + # expr: | + # increase(redis_evicted_keys_total{service="{{ template "redis.fullname" . }}-metrics"}[5m]) > 0 + # for: 1s + # labels: + # severity: error + # annotations: + # summary: Redis(TM) instance {{ "{{ $labels.instance }}" }} has evicted keys + # description: | + # Redis(TM) instance {{ "{{ $labels.instance }}" }} has evicted {{ "{{ $value }}" }} keys in the last 5 minutes. + rules: [] + + ## Metrics exporter pod priorityClassName + # priorityClassName: '' + service: + type: ClusterIP + + ## External traffic policy (when service type is LoadBalancer) + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + + ## Use serviceLoadBalancerIP to request a specific static IP, + ## otherwise leave blank + # loadBalancerIP: + annotations: {} + labels: {} + +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: false + image: + registry: 10.10.31.243:5000 # docker.io + repository: minideb # bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m + + ## Init container Security Context + ## Note: the chown of the data folder is done to containerSecurityContext.runAsUser + ## and not the below volumePermissions.securityContext.runAsUser + ## When runAsUser is set to special value "auto", init container will try to chwon the + ## data folder to autodetermined user&group, using commands: `id -u`:`id -G | cut -d" " -f2` + ## "auto" is especially useful for OpenShift which has scc with dynamic userids (and 0 is not allowed). + ## You may want to use this volumePermissions.securityContext.runAsUser="auto" in combination with + ## podSecurityContext.enabled=false,containerSecurityContext.enabled=false + ## + securityContext: + runAsUser: 0 + +## Redis(TM) config file +## ref: https://redis.io/topics/config +## +configmap: |- + # Enable AOF https://redis.io/topics/persistence#append-only-file + appendonly yes + # Disable RDB persistence, AOF persistence already enabled. + save "" + +## Sysctl InitContainer +## used to perform sysctl operation to modify Kernel settings (needed sometimes to avoid warnings) +## +sysctlImage: + enabled: false + command: [] + registry: 10.10.31.243:5000 # docker.io + repository: minideb # bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + mountHostSys: false + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m + +## PodSecurityPolicy configuration +## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +## +podSecurityPolicy: + ## Specifies whether a PodSecurityPolicy should be created + ## + create: false + +## Define a disruption budget +## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ +## +podDisruptionBudget: + enabled: false + minAvailable: 1 + # maxUnavailable: 1 diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/zookeeper/.helmignore b/ansible/01_old/roles/test/files/02-base/base/charts/zookeeper/.helmignore new file mode 100644 index 0000000..50af031 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/zookeeper/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/zookeeper/Chart.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/zookeeper/Chart.yaml new file mode 100644 index 0000000..c9a2bfb --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/zookeeper/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for Kubernetes +name: zookeeper +version: 0.1.0 diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/zookeeper/templates/0.config.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/zookeeper/templates/0.config.yaml new file mode 100644 index 0000000..3b23a9e --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/zookeeper/templates/0.config.yaml @@ -0,0 +1,35 @@ +kind: ConfigMap +metadata: + name: zookeeper-config + namespace: imxc +apiVersion: v1 +data: + init.sh: |- + #!/bin/bash + set -e + set -x + [ -d /var/lib/zookeeper/data ] || mkdir /var/lib/zookeeper/data + [ -z "$ID_OFFSET" ] && ID_OFFSET=1 + export ZOOKEEPER_SERVER_ID=$((${HOSTNAME##*-} + $ID_OFFSET)) + echo "${ZOOKEEPER_SERVER_ID:-1}" | tee /var/lib/zookeeper/data/myid + cp -Lur /etc/kafka-configmap/* /etc/kafka/ + sed -i "s/server\.$ZOOKEEPER_SERVER_ID\=[a-z0-9.-]*/server.$ZOOKEEPER_SERVER_ID=0.0.0.0/" /etc/kafka/zookeeper.properties + zookeeper.properties: |- + tickTime=2000 + dataDir=/var/lib/zookeeper/data + dataLogDir=/var/lib/zookeeper/log + clientPort=2181 + maxClientCnxns=1 + initLimit=5 + syncLimit=2 + server.1=zookeeper-0.zookeeper-headless.imxc.svc.cluster.local:2888:3888:participant + server.2=zookeeper-1.zookeeper-headless.imxc.svc.cluster.local:2888:3888:participant + server.3=zookeeper-2.zookeeper-headless.imxc.svc.cluster.local:2888:3888:participant + log4j.properties: |- + log4j.rootLogger=INFO, stdout + log4j.appender.stdout=org.apache.log4j.ConsoleAppender + log4j.appender.stdout.layout=org.apache.log4j.PatternLayout + log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n + # Suppress connection log messages, three lines per livenessProbe execution + log4j.logger.org.apache.zookeeper.server.NIOServerCnxnFactory=WARN + log4j.logger.org.apache.zookeeper.server.NIOServerCnxn=WARN diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/zookeeper/templates/1.service-leader-election.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/zookeeper/templates/1.service-leader-election.yaml new file mode 100644 index 0000000..422433a --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/zookeeper/templates/1.service-leader-election.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Service +metadata: + name: zookeeper-headless + namespace: imxc +spec: + ports: + - port: 2888 + name: peer + - port: 3888 + name: leader-election + clusterIP: None + selector: + app: zookeeper + storage: persistent + diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/zookeeper/templates/2.service-client.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/zookeeper/templates/2.service-client.yaml new file mode 100644 index 0000000..9fdcf95 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/zookeeper/templates/2.service-client.yaml @@ -0,0 +1,12 @@ +# the headless service is for PetSet DNS, this one is for clients +apiVersion: v1 +kind: Service +metadata: + name: zookeeper + namespace: imxc +spec: + ports: + - port: 2181 + name: client + selector: + app: zookeeper diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/zookeeper/templates/3.persistent-volume.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/zookeeper/templates/3.persistent-volume.yaml new file mode 100644 index 0000000..2a909f7 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/zookeeper/templates/3.persistent-volume.yaml @@ -0,0 +1,74 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: zookeeper-cluster-1 + labels: + type: local + app: zookeeper +spec: + capacity: + storage: 30Gi + accessModes: + - ReadWriteOnce + hostPath: + path: {{ .Values.global.IMXC_ZOOKEEPER_PATH1 }} + persistentVolumeReclaimPolicy: Retain + storageClassName: zookeeper-storage + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .Values.global.affinity_key }} + operator: In + values: + - {{ .Values.global.affinity_value1 }} +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: zookeeper-cluster-2 + labels: + type: local + app: zookeeper +spec: + capacity: + storage: 30Gi + accessModes: + - ReadWriteOnce + hostPath: + path: {{ .Values.global.IMXC_ZOOKEEPER_PATH2 }} + persistentVolumeReclaimPolicy: Retain + storageClassName: zookeeper-storage + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .Values.global.affinity_key }} + operator: In + values: + - {{ .Values.global.affinity_value2 }} +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: zookeeper-cluster-3 + labels: + type: local + app: zookeeper +spec: + capacity: + storage: 30Gi + accessModes: + - ReadWriteOnce + hostPath: + path: {{ .Values.global.IMXC_ZOOKEEPER_PATH3 }} + persistentVolumeReclaimPolicy: Retain + storageClassName: zookeeper-storage + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .Values.global.affinity_key }} + operator: In + values: + - {{ .Values.global.affinity_value3 }} diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/zookeeper/templates/4.statefulset.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/zookeeper/templates/4.statefulset.yaml new file mode 100644 index 0000000..a9e5cb8 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/zookeeper/templates/4.statefulset.yaml @@ -0,0 +1,87 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: zookeeper + namespace: imxc +spec: + selector: + matchLabels: + app: zookeeper + storage: persistent + serviceName: "zookeeper-headless" + replicas: 3 + updateStrategy: + type: RollingUpdate + podManagementPolicy: Parallel + template: + metadata: + labels: + app: zookeeper + storage: persistent + annotations: + spec: + terminationGracePeriodSeconds: 10 + initContainers: + - name: init-config + image: {{ .Values.global.IMXC_IN_REGISTRY }}/kafka-initutils:{{ .Values.global.KAFKA_INITUTILS_VERSION }} + command: ['/bin/bash', '/etc/kafka-configmap/init.sh'] + volumeMounts: + - name: configmap + mountPath: /etc/kafka-configmap + - name: config + mountPath: /etc/kafka + - name: data + mountPath: /var/lib/zookeeper + containers: + - name: zookeeper + image: {{ .Values.global.IMXC_IN_REGISTRY }}/kafka:{{ .Values.global.KAFKA_VERSION }} + resources: + requests: + cpu: 100m + memory: 200Mi + limits: + cpu: 200m + memory: 500Mi + env: + - name: KAFKA_LOG4J_OPTS + value: -Dlog4j.configuration=file:/etc/kafka/log4j.properties + command: + - ./bin/zookeeper-server-start.sh + - /etc/kafka/zookeeper.properties + lifecycle: + preStop: + exec: + command: ["sh", "-ce", "kill -s TERM 1; while $(kill -0 1 2>/dev/null); do sleep 1; done"] + ports: + - containerPort: 2181 + name: client + - containerPort: 2888 + name: peer + - containerPort: 3888 + name: leader-election +# readinessProbe: +# exec: +# command: +# - /bin/sh +# - -c +# - '[ "imok" = "$(echo ruok | nc -w 1 -q 1 127.0.0.1 2181)" ]' + volumeMounts: + - name: config + mountPath: /etc/kafka + - name: data + mountPath: /var/lib/zookeeper + volumes: + - name: configmap + configMap: + name: zookeeper-config + - name: config + emptyDir: {} + volumeClaimTemplates: + - metadata: + name: data + spec: + accessModes: [ "ReadWriteOnce" ] + storageClassName: zookeeper-storage + resources: + requests: + storage: 30Gi diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/zookeeper/templates/5.pvc.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/zookeeper/templates/5.pvc.yaml new file mode 100644 index 0000000..e08ed54 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/zookeeper/templates/5.pvc.yaml @@ -0,0 +1,50 @@ +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + namespace: imxc + name: data-zookeeper-0 +spec: + accessModes: + - ReadWriteOnce + volumeMode: Filesystem + resources: + requests: + storage: 30Gi + storageClassName: zookeeper-storage + selector: + matchLabels: + app: zookeeper +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + namespace: imxc + name: data-zookeeper-1 +spec: + accessModes: + - ReadWriteOnce + volumeMode: Filesystem + resources: + requests: + storage: 30Gi + storageClassName: zookeeper-storage + selector: + matchLabels: + app: zookeeper +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + namespace: imxc + name: data-zookeeper-2 +spec: + accessModes: + - ReadWriteOnce + volumeMode: Filesystem + resources: + requests: + storage: 30Gi + storageClassName: zookeeper-storage + selector: + matchLabels: + app: zookeeper \ No newline at end of file diff --git a/ansible/01_old/roles/test/files/02-base/base/charts/zookeeper/values.yaml b/ansible/01_old/roles/test/files/02-base/base/charts/zookeeper/values.yaml new file mode 100644 index 0000000..7b06985 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/charts/zookeeper/values.yaml @@ -0,0 +1,68 @@ +# Default values for zookeeper. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: 10.10.31.243:5000/cmoa3/nginx + tag: stable + pullPolicy: IfNotPresent + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 80 + +ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: [] + + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +nodeSelector: {} + +tolerations: [] + +affinity: {} diff --git a/ansible/01_old/roles/test/files/02-base/base/index.yaml b/ansible/01_old/roles/test/files/02-base/base/index.yaml new file mode 100644 index 0000000..62a41a3 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/index.yaml @@ -0,0 +1,3 @@ +apiVersion: v1 +entries: {} +generated: "2019-11-05T09:47:03.285264152+09:00" diff --git a/ansible/01_old/roles/test/files/02-base/base/templates/role.yaml b/ansible/01_old/roles/test/files/02-base/base/templates/role.yaml new file mode 100644 index 0000000..28f0e32 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/templates/role.yaml @@ -0,0 +1,16 @@ +kind: ClusterRoleBinding +{{- if semverCompare ">=1.17-0" .Capabilities.KubeVersion.GitVersion }} +apiVersion: rbac.authorization.k8s.io/v1 +{{- else }} +apiVersion: rbac.authorization.k8s.io/v1beta1 +{{- end }} +metadata: + name: imxc-cluster-admin-clusterrolebinding +subjects: +- kind: ServiceAccount + name: default + namespace: imxc +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin diff --git a/ansible/01_old/roles/test/files/02-base/base/values.yaml b/ansible/01_old/roles/test/files/02-base/base/values.yaml new file mode 100644 index 0000000..e2ad288 --- /dev/null +++ b/ansible/01_old/roles/test/files/02-base/base/values.yaml @@ -0,0 +1,73 @@ +global: + # cluster variables + CLUSTER_ID: cloudmoa + + # default storageClass + DEFAULT_STORAGE_CLASS: exem-local-storage + + # nodeAffinity + affinity_key: cmoa + affinity_value1: worker1 + affinity_value2: worker2 + affinity_value3: worker2 + + # postgres variables + IMXC_POSTGRES_PV_PATH: /media/data/postgres/postgres-data-0 + + #elastic variables + ELASTICSEARCH_PATH1: /media/data/elasticsearch/elasticsearch-data-0 + ELASTICSEARCH_PATH2: /media/data/elasticsearch/elasticsearch-data-1 + + CMOA_ES_ID: elastic + CMOA_ES_PW: elastic + + # zookeeper variables + IMXC_ZOOKEEPER_PATH1: /media/data/zookeeper/zookeeper-data-0 + IMXC_ZOOKEEPER_PATH2: /media/data/zookeeper/zookeeper-data-1 + IMXC_ZOOKEEPER_PATH3: /media/data/zookeeper/zookeeper-data-2 + + # kafka variables + IMXC_KAFKA_PV_PATH1: /media/data/kafka/kafka-data-0 + IMXC_KAFKA_PV_PATH2: /media/data/kafka/kafka-data-1 + IMXC_KAFKA_PV_PATH3: /media/data/kafka/kafka-data-2 + KAFKA_BROKER_CONFIG: "{{index .metadata.labels \"failure-domain.beta.kubernetes.io/zone\"}}" + + # cortex variables + IMXC_INGESTER_PV_PATH1: /media/cloudmoa/ingester/ingester-data-1 + IMXC_INGESTER_PV_PATH2: /media/cloudmoa/ingester/ingester-data-2 + IMXC_INGESTER_PV_PATH3: /media/cloudmoa/ingester/ingester-data-3 + + # redis variables + IMXC_REDIS_PV_PATH1: /media/data/redis/redis-data-0 + IMXC_REDIS_PV_PATH2: /media/data/redis/redis-data-1 + IMXC_REDIS_PV_PATH3: /media/data/redis/redis-data-2 + + # rabbitmq variables + RABBITMQ_PATH: /media/data/rabbitmq + + # custom or etc variables + # IMXC_WORKER_NODE_NAME: $IMXC_WORKER_NODE_NAME # deprecated 2021.10.21 + # IMXC_MASTER_IP: 10.10.30.202 + IMXC_API_SERVER_DNS: imxc-api-service + + METRIC_ANALYZER_MASTER_VERSION: rel0.0.0 + METRIC_ANALYZER_WORKER_VERSION: rel0.0.0 + ELASTICSEARCH_VERSION: v1.0.0 + KAFKA_MANAGER_VERSION: v1.0.0 + KAFKA_INITUTILS_VERSION: v1.0.0 + #KAFKA_VERSION: v1.0.0 + KAFKA_VERSION: v1.0.1 + METRICS_SERVER_VERSION: v1.0.0 + POSTGRES_VERSION: v1.0.0 + CASSANDRA_VERSION: v1.0.0 + RABBITMQ_VERSION: v1.0.0 + CORTEX_VERSION: v1.11.0 #v1.9.0 + #CONSUL_VERSION: 0.7.1 + + # 레지스트리 변수화 (Public Cloud 대비 / 아래 값 적절히 수정해서 사용할 것) + IMXC_IN_REGISTRY: 10.10.31.243:5000/cmoa3 + + rabbitmq: + image: + registry: 10.10.31.243:5000/cmoa3 # {{ .Values.global.IMXC_REGISTRY }} + tag: v1.0.0 # {{ .Values.global.RABBITMQ_VERSION }} diff --git a/ansible/01_old/roles/test/files/03-ddl-dml/elasticsearch/es-ddl-put.sh b/ansible/01_old/roles/test/files/03-ddl-dml/elasticsearch/es-ddl-put.sh new file mode 100755 index 0000000..4079243 --- /dev/null +++ b/ansible/01_old/roles/test/files/03-ddl-dml/elasticsearch/es-ddl-put.sh @@ -0,0 +1,3085 @@ +#!/bin/bash + +kubectl -n imxc wait --for=condition=ready pod/elasticsearch-1 --timeout=600s + +namespace=$1 +export ES_NODEPORT=`kubectl -n ${namespace} get svc elasticsearch -o jsonpath='{.spec.ports[*].nodePort}'` + +export MASTER_IP=`kubectl get node -o wide | grep control-plane | awk '{print $6}'` + +export NUM_SHARDS=2 +export NUM_REPLICAS=1 + +SECURE=true + +if [ $SECURE = true ] +then +PARAM="-u elastic:elastic --insecure" +PROTO="https" +else +PARAM="" +PROTO="http" +fi + +echo Secure=$SECURE +echo Param=$PARAM +echo Proto=$PROTO + +curl ${PARAM} -X GET ${PROTO}://${MASTER_IP}:${ES_NODEPORT}/_cat/indices + +echo "curl ${PARAM} -X GET ${PROTO}://${MASTER_IP}:${ES_NODEPORT}/_cat/indices" + +# kubernetes_cluster_info +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/kubernetes_cluster_info' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "kubernetes_cluster_info" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "sort.field": "mtime", + "sort.order": "desc" + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "date": { + "type": "long" + }, + "mtime": { + "type": "long" + }, + "nodes": { + "type": "text", + "index": false + } + } + } +}' + +# kubernetes_cluster_history +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/kubernetes_cluster_history' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "1d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/kubernetes_cluster_history' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "kubernetes_cluster_history-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "kubernetes_cluster_history" + }, + "sort.field": "mtime", + "sort.order": "desc" + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "mtime": { + "type": "long" + }, + "nodes": { + "type": "text", + "index": false + } + } + }, + "aliases": { + "kubernetes_cluster_history": {} + } +}' + +# kubernetes_info +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/kubernetes_info' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "1d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/kubernetes_info' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "kubernetes_info-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "kubernetes_info" + }, + "sort.field": "mtime", + "sort.order": "desc" + } + }, + "mappings": { + "properties": { + "id": { + "type": "keyword" + }, + "mtime": { + "type": "long" + }, + "data": { + "type": "text", + "index": false + } + } + }, + "aliases": { + "kubernetes_info": {} + } +}' + + + +# kubernetes_event_info +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/kubernetes_event_info' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/kubernetes_event_info' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "kubernetes_event_info-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "kubernetes_event_info" + } + }, + "analysis": { + "analyzer": { + "my_customer_ngram_analyzer": { + "tokenizer": "my_customer_ngram_tokenizer" + } + }, + "tokenizer": { + "my_customer_ngram_tokenizer": { + "type": "ngram", + "min_gram": "2", + "max_gram": "3" + } + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "type": { + "type": "keyword" + }, + "unixtime": { + "type": "long" + }, + "kind": { + "type": "keyword" + }, + "name": { + "type": "keyword" + }, + "firsttime": { + "type": "long" + }, + "lasttime": { + "type": "long" + }, + "data": { + "type": "text", + "index": false + }, + "id": { + "type": "keyword" + }, + "reason": { + "type": "keyword" + }, + "message": { + "type": "text", + "fields": { + "ngram": { + "type": "text", + "analyzer": "my_customer_ngram_analyzer" + } + } + }, + "count": { + "type": "integer" + }, + "sourceComponent": { + "type": "keyword" + }, + "sourceHost": { + "type": "keyword" + } + } + }, + "aliases": { + "kubernetes_event_info": {} + } +}' + + + + +# kubernetes_job_info +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/kubernetes_job_info' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/kubernetes_job_info' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "kubernetes_job_info-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "kubernetes_job_info" + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "name": { + "type": "keyword" + }, + "starttime": { + "type": "long" + }, + "endtime": { + "type": "long" + }, + "duration": { + "type": "long" + }, + "commandlist": { + "type": "text", + "index": false + }, + "labellist": { + "type": "text", + "index": false + }, + "active": { + "type": "boolean" + }, + "status": { + "type": "keyword" + } + } + }, + "aliases": { + "kubernetes_job_info": {} + } +}' + + + +# kubernetes_cronjob_info +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/kubernetes_cronjob_info' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/kubernetes_cronjob_info' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "kubernetes_cronjob_info-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "kubernetes_cronjob_info" + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "name": { + "type": "keyword" + }, + "jobname": { + "type": "keyword" + }, + "kind": { + "type": "keyword" + }, + "starttime": { + "type": "long" + }, + "endtime": { + "type": "long" + }, + "duration": { + "type": "long" + }, + "lastruntime": { + "type": "long" + }, + "arguments": { + "type": "text", + "index": false + }, + "schedule": { + "type": "keyword" + }, + "active": { + "type": "boolean" + }, + "status": { + "type": "keyword" + } + } + }, + "aliases": { + "kubernetes_cronjob_info": {} + } +}' + + + + +# kubernetes_network_connectivity +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/kubernetes_network_connectivity' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "1d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/kubernetes_network_connectivity' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "kubernetes_network_connectivity-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "kubernetes_network_connectivity" + } + } + }, + "mappings": { + "properties": { + "timestamp": { + "type": "long" + }, + "cluster": { + "type": "keyword" + }, + "node": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "service": { + "type": "keyword" + }, + "pod": { + "type": "keyword" + }, + "container": { + "type": "keyword" + }, + "pid": { + "type": "integer" + }, + "peerNode": { + "type": "keyword" + }, + "peerNamespace": { + "type": "keyword" + }, + "peerService": { + "type": "keyword" + }, + "peerPod": { + "type": "keyword" + }, + "peerContainer": { + "type": "keyword" + }, + "peerPid": { + "type": "integer" + } + } + }, + "aliases": { + "kubernetes_network_connectivity": {} + } +}' + + + +# sparse_log +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/sparse_log' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/sparse_log' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "sparse_log-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "sparse_log" + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "date": { + "type": "keyword" + }, + "targetType": { + "type": "keyword" + }, + "targetId": { + "type": "keyword" + }, + "unixtime": { + "type": "long" + }, + "logpath": { + "type": "text", + "index": false + }, + "contents": { + "type": "text" + }, + "lineNumber": { + "type": "integer" + }, + "probability": { + "type": "float" + }, + "subentityId": { + "type": "keyword" + } + } + }, + "aliases": { + "sparse_log": {} + } +}' + + + +# sparse_model +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/sparse_model' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "sparse_model" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s" + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "targetType": { + "type": "keyword" + }, + "targetId": { + "type": "keyword" + }, + "modifiedDate": { + "type": "long" + }, + "logPath": { + "type": "keyword" + }, + "savedModel": { + "type": "text", + "index": false + } + } + } +}' + + + +# kubernetes_pod_info +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/kubernetes_pod_info' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/kubernetes_pod_info' -H 'Content-Type: application/json' -d '{ +"order": 0, + "index_patterns": [ + "kubernetes_pod_info-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "kubernetes_pod_info" + } + } + }, + "mappings": { + "properties": { + "eventType": {"type": "keyword"}, + "cluster": {"type": "keyword"}, + "namespace": {"type": "keyword"}, + "node": {"type": "keyword"}, + "pod": {"type": "keyword"}, + "podUID": {"type": "keyword"}, + "podCreationTimestamp": {"type": "long"}, + "podDeletionTimestamp": {"type": "long"}, + "podDeletionGracePeriod": {"type": "long"}, + "resourceVersion": {"type": "keyword"}, + "ownerKind": {"type": "keyword"}, + "ownerName": {"type": "keyword"}, + "ownerUID": {"type": "keyword"}, + "podPhase": {"type": "keyword"}, + "podIP": {"type": "keyword"}, + "podStartTime": {"type": "long"}, + "podReady": {"type": "boolean"}, + "podContainersReady": {"type": "boolean"}, + "isInitContainer": {"type": "boolean"}, + "containerName": {"type": "keyword"}, + "containerID": {"type": "keyword"}, + "containerImage": {"type": "keyword"}, + "containerImageShort": {"type": "keyword"}, + "containerReady": {"type": "boolean"}, + "containerRestartCount": {"type": "integer"}, + "containerState": {"type": "keyword"}, + "containerStartTime": {"type": "long"}, + "containerMessage": {"type": "keyword"}, + "containerReason": {"type": "keyword"}, + "containerFinishTime": {"type": "long"}, + "containerExitCode": {"type": "integer"}, + "containerLastState": {"type": "keyword"}, + "containerLastStartTime": {"type": "long"}, + "containerLastMessage": {"type": "keyword"}, + "containerLastReason": {"type": "keyword"}, + "containerLastFinishTime": {"type": "long"}, + "containerLastExitCode": {"type": "integer"} + } + }, + "aliases": { + "kubernetes_pod_info": {} + } +}' + + + +# kubernetes_pod_history +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/kubernetes_pod_history' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "1d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/kubernetes_pod_history' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "kubernetes_pod_history-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "kubernetes_pod_history" + } + } + }, + "mappings": { + "properties": { + "deployName": { + "type": "keyword" + }, + "deployType": { + "type": "keyword" + }, + "deployDate": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "nodeId": { + "type": "keyword" + }, + "podId": { + "type": "keyword" + }, + "podPhase": { + "type": "keyword" + }, + "startTime": { + "type": "keyword" + }, + "endTime": { + "type": "keyword" + }, + "exitCode": { + "type": "integer" + }, + "reason": { + "type": "keyword" + }, + "message": { + "type": "text" + }, + "time": { + "type": "long" + }, + "containerId": { + "type": "keyword" + }, + "containerName": { + "type": "keyword" + }, + "containerPhase": { + "type": "keyword" + }, + "eventAction": { + "type": "keyword" + }, + "containerStartTime": { + "type": "keyword" + }, + "containerEndTime": { + "type": "keyword" + }, + "containerImage": { + "type": "keyword" + }, + "containerImageShort": { + "type": "keyword" + } + } + }, + "aliases": { + "kubernetes_pod_history": {} + } +}' + + + + +# metric_score +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/metric_score' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/metric_score' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "metric_score-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "metric_score" + }, + "sort.field": "unixtime", + "sort.order": "desc" + } + }, + "mappings": { + "properties": { + "anomaly": { + "type": "boolean" + }, + "clstId": { + "type": "keyword" + }, + "contName": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "podId": { + "type": "keyword" + }, + "instance": { + "type": "keyword" + }, + "entityId": { + "type": "keyword" + }, + "entityType": { + "type": "keyword" + }, + "metricId": { + "type": "keyword" + }, + "nodeId": { + "type": "keyword" + }, + "score": { + "type": "integer" + }, + "subKey": { + "type": "keyword" + }, + "unixtime": { + "type": "long" + }, + "yhatLowerUpper": { + "type": "text", + "fields": { + "keyword": { + "type": "keyword", + "ignore_above": 256 + } + } + } + } + }, + "aliases": { + "metric_score": {} + } +}' + + + + +# entity_score +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/entity_score' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/entity_score' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "entity_score-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "entity_score" + }, + "sort.field": "unixtime", + "sort.order": "desc" + } + }, + "mappings": { + "properties": { + "clstId": { + "type": "keyword" + }, + "contName": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "podId": { + "type": "keyword" + }, + "entityId": { + "type": "keyword" + }, + "entityType": { + "type": "keyword" + }, + "unixtime": { + "type": "long" + }, + "nodeId": { + "type": "keyword" + }, + "maxId": { + "type": "keyword" + }, + "maxScore": { + "type": "integer" + }, + "entityScore": { + "type": "integer" + } + } + }, + "aliases": { + "entity_score": {} + } +}' + + +# timeline_score +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/timeline_score' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/timeline_score' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "timeline_score-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "timeline_score" + }, + "sort.field": "unixtime", + "sort.order": "desc" + } + }, + "mappings": { + "properties": { + "clstId": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "entityType": { + "type": "keyword" + }, + "criticalCount": { + "type": "integer" + }, + "warningCount": { + "type": "integer" + }, + "attentionCount": { + "type": "integer" + }, + "normalCount": { + "type": "integer" + }, + "unixtime": { + "type": "long" + } + } + }, + "aliases": { + "timeline_score": {} + } +}' + + + +# spaninfo +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/spaninfo' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/spaninfo' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "spaninfo-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": "0", + "refresh_interval": "1s", + "lifecycle": { + "name": "spaninfo" + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "node": { + "type": "keyword" + }, + "service": { + "type": "keyword" + }, + "pod": { + "type": "keyword" + }, + "version": { + "type": "keyword" + }, + "ip": { + "type": "keyword" + }, + "traceId": { + "type": "keyword" + }, + "spanId": { + "type": "keyword" + }, + "parentSpanId": { + "type": "keyword" + }, + "protocolType": { + "type": "keyword" + }, + "startTime": { + "type": "long" + }, + "duration": { + "type": "long" + }, + "endTime": { + "type": "long" + }, + "operation": { + "type": "keyword" + }, + "spanKind": { + "type": "keyword" + }, + "component": { + "type": "keyword" + }, + "error": { + "type": "boolean" + }, + "peerAddress": { + "type": "keyword" + }, + "peerHostname": { + "type": "keyword" + }, + "peerIpv4": { + "type": "keyword" + }, + "peerIpv6": { + "type": "keyword" + }, + "peerPort": { + "type": "integer" + }, + "peerService": { + "type": "keyword" + }, + "samplingPriority": { + "type": "keyword" + }, + "httpStatusCode": { + "type": "integer" + }, + "httpUrl": { + "type": "keyword" + }, + "httpMethod": { + "type": "keyword" + }, + "httpApi": { + "type": "keyword" + }, + "dbInstance": { + "type": "keyword" + }, + "dbStatement": { + "type": "keyword" + }, + "dbType": { + "type": "keyword" + }, + "dbUser": { + "type": "keyword" + }, + "messagebusDestination": { + "type": "keyword" + }, + "logs": { + "dynamic": false, + "type": "nested", + "properties": { + "fields": { + "dynamic": false, + "type": "nested", + "properties": { + "value": { + "ignore_above": 256, + "type": "keyword" + }, + "key": { + "type": "keyword" + } + } + }, + "timestamp": { + "type": "long" + } + } + } + } + }, + "aliases": { + "spaninfo": {} + } +}' + + + +# sta_podinfo +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/sta_podinfo' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/sta_podinfo' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "sta_podinfo-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": "0", + "refresh_interval": "1s", + "lifecycle": { + "name": "sta_podinfo" + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "service": { + "type": "keyword" + }, + "pod": { + "type": "keyword" + }, + "timestamp": { + "type": "long" + }, + "version": { + "type": "keyword" + }, + "components": { + "type": "keyword", + "fields": { + "keyword": { + "type": "keyword" + } + } + } + } + }, + "aliases": { + "sta_podinfo": {} + } +}' + + +# sta_httpapi +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/sta_httpapi' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "1d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/sta_httpapi' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "sta_httpapi-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": "0", + "refresh_interval": "1s", + "lifecycle": { + "name": "sta_httpapi" + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "service": { + "type": "keyword" + }, + "timestamp": { + "type": "long" + }, + "api": { + "type": "keyword" + } + } + }, + "aliases": { + "sta_httpapi": {} + } +}' + + + +# sta_httpsummary +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/sta_httpsummary' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "1d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/sta_httpsummary' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "sta_httpsummary-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": "0", + "refresh_interval": "1s", + "lifecycle": { + "name": "sta_httpsummary" + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "timestamp": { + "type": "long" + }, + "pod": { + "type": "keyword" + }, + "service": { + "type": "keyword" + }, + "api": { + "type": "keyword" + }, + "countTotal": { + "type": "integer" + }, + "errorCountTotal": { + "type": "integer" + }, + "timeTotalMicrosec": { + "type": "integer" + }, + "methods": { + "type": "keyword", + "fields": { + "keyword": { + "type": "keyword" + } + } + }, + "statuses": { + "type": "integer", + "fields": { + "integer": { + "type": "integer" + } + } + } + } + }, + "aliases": { + "sta_httpsummary": {} + } +}' + + + +# sta_relation +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/sta_relation' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "1d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/sta_relation' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "sta_relation-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": "0", + "refresh_interval": "1s", + "lifecycle": { + "name": "sta_relation" + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "timestamp": { + "type": "long" + }, + "parent": { + "type": "keyword" + }, + "children": { + "type": "nested", + "properties": { + "name": { + "type": "keyword" + }, + "count": { + "type": "integer" + } + } + } + } + }, + "aliases": { + "sta_relation": {} + } +}' + + + +# sta_externalrelation +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/sta_externalrelation' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "1d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/sta_externalrelation' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "sta_externalrelation-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": "0", + "refresh_interval": "1s", + "lifecycle": { + "name": "sta_externalrelation" + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "timestamp": { + "type": "long" + }, + "externalNamespace": { + "type": "keyword" + }, + "externalService": { + "type": "keyword" + } + } + }, + "aliases": { + "sta_externalrelation": {} + } +}' + + + +# sta_traceinfo +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/sta_traceinfo' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/sta_traceinfo' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "sta_traceinfo-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": "0", + "refresh_interval": "1s", + "lifecycle": { + "name": "sta_traceinfo" + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "traceId": { + "type": "keyword" + }, + "serviceName": { + "type": "keyword" + }, + "operationName": { + "type": "keyword" + }, + "spanSize": { + "type": "integer" + }, + "relatedServices": { + "type": "keyword", + "fields": { + "keyword": { + "type": "keyword" + } + } + }, + "startTime": { + "type": "long" + }, + "endTime": { + "type": "long" + }, + "duration": { + "type": "long" + }, + "error": { + "type": "boolean" + } + } + }, + "aliases": { + "sta_traceinfo": {} + } +}' + + + +# sta_tracetrend +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/sta_tracetrend' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/sta_tracetrend' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "sta_tracetrend-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": "0", + "refresh_interval": "1s", + "lifecycle": { + "name": "sta_tracetrend" + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "serviceName": { + "type": "keyword" + }, + "endTimeGTE": { + "type": "long" + }, + "endTimeLT": { + "type": "long" + } + }, + "dynamic_templates": [ + { + "totals": { + "match": "total*", + "mapping": {"type": "integer"} + } + }, + { + "errors": { + "match": "error*", + "mapping": {"type": "integer"} + } + } + ] + }, + "aliases": { + "sta_tracetrend": {} + } +}' + +# script_history +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/script_history' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + + + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/script_history' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "script_history-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "script_history" + } + } + }, + "mappings": { + "properties": { + "taskId": { + "type": "long" + }, + "scriptName": { + "type": "keyword" + }, + "agentName": { + "type": "keyword" + }, + "targetFile": { + "type": "keyword" + }, + "args": { + "type": "keyword", + "fields": { + "keyword": { + "type": "keyword" + } + } + }, + "validCmd": { + "type": "keyword" + }, + "validVal": { + "type": "keyword" + }, + "valid": { + "type": "boolean" + }, + "validResult": { + "type": "keyword" + }, + "cronExp": { + "type": "keyword" + }, + "createUser": { + "type": "keyword" + }, + "startTime": { + "type": "long" + }, + "endTime": { + "type": "long" + }, + "error": { + "type": "boolean" + }, + "result": { + "type": "keyword" + }, + "order": { + "type": "keyword" + }, + "mtime": { + "type": "keyword" + } + } + }, + "aliases": { + "script_history": {} + } +}' + + +# kubernetes_audit_log +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/kubernetes_audit_log' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/kubernetes_audit_log' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "kubernetes_audit_log-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": "1", + "refresh_interval": "1s", + "lifecycle": { + "name": "kubernetes_audit_log" + }, + "sort.field": "stageTimestamp", + "sort.order": "desc" + } + }, + "mappings": { + "properties": { + "verb": { + "type": "keyword" + }, + "userName": { + "type": "keyword" + }, + "sourceIps": { + "type": "keyword" + }, + "resource": { + "type": "keyword" + }, + "code": { + "type": "keyword" + }, + "requestReceivedTimestamp": { + "type": "long" + }, + "stageTimestamp": { + "type": "long" + }, + "durationTimestamp": { + "type": "long" + }, + "data": { + "type": "text", + "index": false + } + } + }, + "aliases": { + "kubernetes_audit_log": {} + } +}' + +# license_history +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/license_history' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "90d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/license_history' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "license_history-*" + ], + "settings": { + "index": { + "number_of_shards": "2", + "number_of_replicas": "1", + "refresh_interval": "1s", + "lifecycle": { + "name": "license_history" + }, + "sort.field": "checkTime", + "sort.order": "desc" + } + }, + "mappings": { + "properties": { + "licenseType": { + "type": "integer" + }, + "expireDate": { + "type": "text" + }, + "targetNodesCount": { + "type": "integer" + }, + "realNodesCount": { + "type": "integer" + }, + "targetPodsCount": { + "type": "integer" + }, + "realPodsCount": { + "type": "integer" + }, + "targetSvcsCount": { + "type": "integer" + }, + "realSvcsCount": { + "type": "integer" + }, + "targetCoreCount": { + "type": "integer" + }, + "realCoreCount": { + "type": "integer" + }, + "allowableRange": { + "type": "integer" + }, + "licenseClusterId": { + "type": "keyword" + }, + "tenantId": { + "type": "keyword" + }, + "checkTime": { + "type": "date", + "format": "epoch_millis" + }, + "checkResult": { + "type": "integer" + } + } + }, + "aliases": { + "license_history": {} + } +}' + +# alert_event_history +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/alert_event_history' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/alert_event_history' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "alert_event_history-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "alert_event_history" + } + } + }, + "mappings": { + "properties": { + "alertName": { + "type": "keyword" + }, + "clusterId": { + "type": "keyword" + }, + "data": { + "type": "text", + "index": false + }, + "entityId": { + "type": "keyword" + }, + "entityType": { + "type": "keyword" + }, + "level": { + "type": "keyword" + }, + "metaId": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "startsAt": { + "type": "long" + }, + "threshold": { + "type": "double" + }, + "value": { + "type": "double" + }, + "message": { + "type": "keyword" + }, + "endsAt": { + "type": "long" + }, + "status": { + "type": "keyword" + }, + "hookCollectAt": { + "type": "long" + } + } + }, + "aliases": { + "alert_event_history": {} + } +}' + +# JSPD ilm +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/jspd_ilm' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "1d", + "actions": { + "delete": {} + } + } + } + } +}' + +# jspd_lite-activetxn +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/jspd_lite-activetxn' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "jspd_lite-activetxn-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "jspd_ilm" + } + } + }, + "mappings": { + "properties": { + "server_uuid": { + "type": "keyword" + }, + "time": { + "type": "long" + }, + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "node": { + "type": "keyword" + }, + "service": { + "type": "keyword" + }, + "pod": { + "type": "keyword" + }, + "start_time": { + "type": "long" + }, + "tid": { + "type": "keyword" + }, + "txn_name": { + "type": "text", + "fields": { + "keyword": { + "ignore_above": 256, + "type": "keyword" + } + } + }, + "cpu_time": { + "type": "integer" + }, + "memory_usage": { + "type": "integer" + }, + "web_id": { + "type": "integer" + }, + "prepare_count": { + "type": "integer" + }, + "sql_exec_count": { + "type": "integer" + }, + "fetch_count": { + "type": "integer" + }, + "active_sql_elapse_time": { + "type": "integer" + }, + "db_id": { + "type": "integer" + }, + "sql_text": { + "type": "text", + "fields": { + "keyword": { + "ignore_above": 102400, + "type": "keyword" + } + } + }, + "thread_id": { + "type": "long" + }, + "state": { + "type": "short" + }, + "method_id": { + "type": "integer" + }, + "method_seq": { + "type": "integer" + }, + "stack_crc": { + "type": "integer" + }, + "thread_memory_usage": { + "type": "integer" + }, + "http_method": { + "type": "keyword" + } + } + }, + "aliases": { + "jspd_lite-activetxn": {} + } +}' + +# jspd_lite-alert +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/jspd_lite-alert' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "jspd_lite-alert-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "jspd_ilm" + } + } + }, + "mappings": { + "properties": { + "server_uuid": { + "type": "keyword" + }, + "time": { + "type": "long" + }, + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "node": { + "type": "keyword" + }, + "service": { + "type": "keyword" + }, + "pod": { + "type": "keyword" + }, + "name": { + "type": "keyword" + }, + "status": { + "type": "short" + }, + "value": { + "type": "integer" + }, + "pid": { + "type": "integer" + } + } + }, + "aliases": { + "jspd_lite-alert": {} + } +}' + +# jspd_lite-e2einfo +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/jspd_lite-e2einfo' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "jspd_lite-e2einfo-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "jspd_ilm" + } + } + }, + "mappings": { + "properties": { + "server_uuid": { + "type": "keyword" + }, + "time": { + "type": "long" + }, + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "node": { + "type": "keyword" + }, + "service": { + "type": "keyword" + }, + "pod": { + "type": "keyword" + }, + "root_tid": { + "type": "keyword" + }, + "tid": { + "type": "keyword" + }, + "e2e_info_type": { + "type": "short" + }, + "e2e_key": { + "type": "keyword" + }, + "elapse_time": { + "type": "integer" + }, + "dest_url": { + "type": "keyword" + } + } + }, + "aliases": { + "jspd_lite-e2einfo": {} + } +}' + +# jspd_lite-methodname +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/jspd_lite-methodname' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "jspd_lite-methodname-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "jspd_ilm" + } + } + }, + "mappings": { + "properties": { + "server_uuid": { + "type": "keyword" + }, + "time": { + "type": "long" + }, + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "node": { + "type": "keyword" + }, + "service": { + "type": "keyword" + }, + "pod": { + "type": "keyword" + }, + "method_id": { + "type": "integer" + }, + "class_name": { + "type": "text", + "fields": { + "keyword": { + "ignore_above": 256, + "type": "keyword" + } + } + }, + "method_name": { + "type": "text", + "fields": { + "keyword": { + "ignore_above": 256, + "type": "keyword" + } + } + } + } + }, + "aliases": { + "jspd_lite-methodname": {} + } +}' + +# jspd_lite-sqldbinfo +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/jspd_lite-sqldbinfo' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "jspd_lite-sqldbinfo-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "jspd_ilm" + } + } + }, + "mappings": { + "properties": { + "server_uuid": { + "type": "keyword" + }, + "time": { + "type": "long" + }, + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "node": { + "type": "keyword" + }, + "service": { + "type": "keyword" + }, + "pod": { + "type": "keyword" + }, + "db_id": { + "type": "integer" + }, + "url": { + "type": "keyword" + } + } + }, + "aliases": { + "jspd_lite-sqldbinfo": {} + } +}' + +# jspd_lite-txninfo +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/jspd_lite-txninfo' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "jspd_lite-txninfo-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "jspd_ilm" + } + } + }, + "mappings": { + "properties": { + "server_uuid": { + "type": "keyword" + }, + "time": { + "type": "long" + }, + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "node": { + "type": "keyword" + }, + "service": { + "type": "keyword" + }, + "pod": { + "type": "keyword" + }, + "start_time": { + "type": "long" + }, + "end_time": { + "type": "long" + }, + "tid": { + "type": "keyword" + }, + "txn_name": { + "type": "keyword" + }, + "client_ip": { + "type": "keyword" + }, + "exception": { + "type": "short" + }, + "thread_cpu_time": { + "type": "integer" + }, + "thread_memory_usage": { + "type": "integer" + }, + "web_id": { + "type": "integer" + }, + "open_conn": { + "type": "integer" + }, + "close_conn": { + "type": "integer" + }, + "open_stmt": { + "type": "integer" + }, + "close_stmt": { + "type": "integer" + }, + "open_rs": { + "type": "integer" + }, + "close_rs": { + "type": "integer" + }, + "prepare_count": { + "type": "integer" + }, + "sql_execute_count": { + "type": "integer" + }, + "sql_elapse_time": { + "type": "integer" + }, + "sql_elapse_max": { + "type": "integer" + }, + "fetch_count": { + "type": "integer" + }, + "fetch_time": { + "type": "integer" + }, + "internal_fetch_count": { + "type": "integer" + }, + "txn_flag": { + "type": "integer" + }, + "http_method": { + "type": "keyword" + }, + "http_status": { + "type": "integer" + }, + "duration": { + "type": "long" + } + } + }, + "aliases": { + "jspd_lite-txninfo": {} + } +}' + +# jspd_lite-txnmethod +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/jspd_lite-txnmethod' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "jspd_lite-txnmethod-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "jspd_ilm" + } + } + }, + "mappings": { + "properties": { + "server_uuid": { + "type": "keyword" + }, + "time": { + "type": "long" + }, + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "node": { + "type": "keyword" + }, + "service": { + "type": "keyword" + }, + "pod": { + "type": "keyword" + }, + "tid": { + "type": "keyword" + }, + "method_seq": { + "type": "integer" + }, + "method_id": { + "type": "integer" + }, + "calling_method_id": { + "type": "integer" + }, + "stack_crc32": { + "type": "integer" + }, + "calling_stack_crc32": { + "type": "integer" + }, + "elapse_time": { + "type": "integer" + }, + "exec_count": { + "type": "integer" + }, + "error_count": { + "type": "integer" + }, + "cpu_time": { + "type": "integer" + }, + "memory": { + "type": "integer" + }, + "start_time": { + "type": "long" + }, + "method_depth": { + "type": "integer" + }, + "exception": { + "type": "text", + "fields": { + "keyword": { + "ignore_above": 32768, + "type": "keyword" + } + } + } + } + }, + "aliases": { + "jspd_lite-txnmethod": {} + } +}' + +# jspd_lite-txnsql +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/jspd_lite-txnsql' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "jspd_lite-txnsql-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "jspd_ilm" + } + } + }, + "mappings": { + "properties": { + "server_uuid": { + "type": "keyword" + }, + "time": { + "type": "long" + }, + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "node": { + "type": "keyword" + }, + "service": { + "type": "keyword" + }, + "pod": { + "type": "keyword" + }, + "tid": { + "type": "keyword" + }, + "db_id": { + "type": "integer" + }, + "cursor_id": { + "type": "integer" + }, + "sql_text": { + "type": "text", + "fields": { + "keyword": { + "ignore_above": 102400, + "type": "keyword" + } + } + }, + "method_id": { + "type": "integer" + }, + "execute_count": { + "type": "integer" + }, + "elapsed_time": { + "type": "integer" + }, + "elapsed_time_max": { + "type": "integer" + }, + "fetch_count": { + "type": "integer" + }, + "fetch_time": { + "type": "integer" + }, + "fetch_time_max": { + "type": "integer" + }, + "internal_fetch_count": { + "type": "integer" + } + } + }, + "aliases": { + "jspd_lite-txnsql": {} + } +}' + +# jspd_lite-wasstat +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/jspd_lite-wasstat' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "jspd_lite-wasstat-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "jspd_ilm" + } + } + }, + "mappings": { + "properties": { + "server_uuid": { + "type": "keyword" + }, + "time": { + "type": "long" + }, + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "node": { + "type": "keyword" + }, + "service": { + "type": "keyword" + }, + "pod": { + "type": "keyword" + }, + "active_txns": { + "type": "integer" + }, + "sql_exec_count": { + "type": "long" + }, + "sql_prepare_count": { + "type": "long" + }, + "sql_fetch_count": { + "type": "long" + }, + "txn_end_count": { + "type": "long" + }, + "open_file_count": { + "type": "integer" + }, + "close_file_count": { + "type": "integer" + }, + "open_socket_count": { + "type": "integer" + }, + "close_socket_count": { + "type": "integer" + }, + "txn_elapse": { + "type": "long" + }, + "sql_elapse": { + "type": "long" + }, + "txn_elapse_max": { + "type": "long" + }, + "sql_elapse_max": { + "type": "long" + }, + "txn_error_count": { + "type": "integer" + } + } + }, + "aliases": { + "jspd_lite-wasstat": {} + } +}' + +# jspd_tta-externalrelation +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/jspd_tta-externalrelation' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "jspd_tta-externalrelation-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "jspd_ilm" + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "time": { + "type": "long" + }, + "external_namespace": { + "type": "keyword" + }, + "external_service": { + "type": "keyword" + } + } + }, + "aliases": { + "jspd_tta-externalrelation": {} + } +}' + +# jspd_tta-relation +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/jspd_tta-relation' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "jspd_tta-relation-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "jspd_ilm" + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "time": { + "type": "long" + }, + "from_service": { + "type": "keyword" + }, + "to_service": { + "type": "keyword" + }, + "count": { + "type": "integer" + } + } + }, + "aliases": { + "jspd_tta-relation": {} + } +}' + +# jspd_tta-txnlist +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/jspd_tta-txnlist' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "jspd_tta-txnlist-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "jspd_ilm" + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "service": { + "type": "keyword" + }, + "pod": { + "type": "keyword" + }, + "time": { + "type": "long" + }, + "txn_name": { + "type": "keyword" + } + } + }, + "aliases": { + "jspd_tta-txnlist": {} + } +}' + +# jspd_tta-txnsummary +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/jspd_tta-txnsummary' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "jspd_tta-txnsummary-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "jspd_ilm" + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "service": { + "type": "keyword" + }, + "pod": { + "type": "keyword" + }, + "time": { + "type": "long" + }, + "txn_name": { + "type": "keyword" + }, + "req_count": { + "type": "integer" + }, + "resp_count": { + "type": "integer" + }, + "total_duration": { + "type": "long" + }, + "failed": { + "type": "integer" + }, + "http_methods": { + "type": "keyword", + "fields": { + "keyword": { + "type": "keyword" + } + } + }, + "http_statuses": { + "type": "integer", + "fields": { + "integer": { + "type": "integer" + } + } + } + } + }, + "aliases": { + "jspd_tta-txnsummary": {} + } +}' + +# jspd_tta-txntrend +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/jspd_tta-txntrend' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "jspd_tta-txntrend-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "jspd_ilm" + } + } + }, + "mappings": { + "properties": { + "server_uuid": { + "type": "keyword" + }, + "time": { + "type": "long" + }, + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "service": { + "type": "keyword" + }, + "pod": { + "type": "keyword" + }, + "endTimeGTE": { + "type": "long" + }, + "endTimeLT": { + "type": "long" + } + }, + "dynamic_templates": [ + { + "totals": { + "match": "total*", + "mapping": { + "type": "integer" + } + } + }, + { + "errors": { + "match": "error*", + "mapping": { + "type": "integer" + } + } + } + ] + }, + "aliases": { + "jspd_tta-txntrend": {} + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/maximum_metrics' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "5d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/maximum_metrics' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "maximum_metrics" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "maximum_metrics" + }, + "sort.field": "date", + "sort.order": "desc" + } + }, + "mappings": { + "properties": { + "kind": { + "type": "keyword" + }, + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "entity": { + "type": "keyword" + }, + "maximum": { + "type": "float" + }, + "date": { + "type": "date", + "format": "yyyy-MM-dd" + } + } + } +}' diff --git a/ansible/01_old/roles/test/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/1_kubernete_event_info_create_dest_source_index.sh b/ansible/01_old/roles/test/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/1_kubernete_event_info_create_dest_source_index.sh new file mode 100644 index 0000000..46007cd --- /dev/null +++ b/ansible/01_old/roles/test/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/1_kubernete_event_info_create_dest_source_index.sh @@ -0,0 +1,220 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +export NUM_SHARDS=2 +export NUM_REPLICAS=1 + +SOURCE_INDEX='kubernetes_event_info' +DEST_INDEX='kubernetes_event_info_backup' + +# 기존 index 재매핑 +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/'"${SOURCE_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/'"${SOURCE_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "'${SOURCE_INDEX}'-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "'${SOURCE_INDEX}'" + } + }, + "analysis": { + "analyzer": { + "my_customer_ngram_analyzer": { + "tokenizer": "my_customer_ngram_tokenizer" + } + }, + "tokenizer": { + "my_customer_ngram_tokenizer": { + "type": "ngram", + "min_gram": "2", + "max_gram": "3" + } + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "type": { + "type": "keyword" + }, + "unixtime": { + "type": "long" + }, + "kind": { + "type": "keyword" + }, + "name": { + "type": "keyword" + }, + "firsttime": { + "type": "long" + }, + "lasttime": { + "type": "long" + }, + "data": { + "type": "text", + "index": false + }, + "id": { + "type": "keyword" + }, + "reason": { + "type": "keyword" + }, + "message": { + "type": "text", + "fields": { + "ngram": { + "type": "text", + "analyzer": "my_customer_ngram_analyzer" + } + } + }, + "count": { + "type": "integer" + }, + "sourceComponent": { + "type": "keyword" + }, + "sourceHost": { + "type": "keyword" + } + } + }, + "aliases": { + "'${SOURCE_INDEX}'": {} + } +}' + +# 기존 index 데이터 백업용 index 매핑 +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/'"${DEST_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/'"${DEST_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "'${DEST_INDEX}'-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "'${DEST_INDEX}'" + } + }, + "analysis": { + "analyzer": { + "my_customer_ngram_analyzer": { + "tokenizer": "my_customer_ngram_tokenizer" + } + }, + "tokenizer": { + "my_customer_ngram_tokenizer": { + "type": "ngram", + "min_gram": "2", + "max_gram": "3" + } + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "type": { + "type": "keyword" + }, + "unixtime": { + "type": "long" + }, + "kind": { + "type": "keyword" + }, + "name": { + "type": "keyword" + }, + "firsttime": { + "type": "long" + }, + "lasttime": { + "type": "long" + }, + "data": { + "type": "text", + "index": false + }, + "id": { + "type": "keyword" + }, + "reason": { + "type": "keyword" + }, + "message": { + "type": "text", + "fields": { + "ngram": { + "type": "text", + "analyzer": "my_customer_ngram_analyzer" + } + } + }, + "count": { + "type": "integer" + }, + "sourceComponent": { + "type": "keyword" + }, + "sourceHost": { + "type": "keyword" + } + } + }, + "aliases": { + "'${DEST_INDEX}'": {} + } +}' \ No newline at end of file diff --git a/ansible/01_old/roles/test/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/2_kubernete_event_info_reindex_to_dest_from_source.sh b/ansible/01_old/roles/test/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/2_kubernete_event_info_reindex_to_dest_from_source.sh new file mode 100644 index 0000000..a9c833c --- /dev/null +++ b/ansible/01_old/roles/test/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/2_kubernete_event_info_reindex_to_dest_from_source.sh @@ -0,0 +1,28 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='kubernetes_event_info' +DEST_INDEX='kubernetes_event_info_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${SOURCE_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X POST 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_reindex?wait_for_completion=false' -H 'Content-Type: application/json' -d '{ + "source": { + "index": "'${source_index_date}'" + }, + "dest": { + "index": "'${dest_index_date}'" + } + }' +done \ No newline at end of file diff --git a/ansible/01_old/roles/test/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/3_kubernete_event_info_reindex_to_source_from_dest.sh b/ansible/01_old/roles/test/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/3_kubernete_event_info_reindex_to_source_from_dest.sh new file mode 100644 index 0000000..abaa743 --- /dev/null +++ b/ansible/01_old/roles/test/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/3_kubernete_event_info_reindex_to_source_from_dest.sh @@ -0,0 +1,30 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='kubernetes_event_info' +DEST_INDEX='kubernetes_event_info_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${DEST_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X DELETE 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/'${source_index_date} + + curl -X POST 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_reindex?wait_for_completion=false' -H 'Content-Type: application/json' -d '{ + "source": { + "index": "'${dest_index_date}'" + }, + "dest": { + "index": "'${source_index_date}'" + } + }' +done diff --git a/ansible/01_old/roles/test/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/4_kubernete_event_info_delete_dest_index.sh b/ansible/01_old/roles/test/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/4_kubernete_event_info_delete_dest_index.sh new file mode 100644 index 0000000..7948b08 --- /dev/null +++ b/ansible/01_old/roles/test/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/4_kubernete_event_info_delete_dest_index.sh @@ -0,0 +1,21 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='kubernetes_event_info' +DEST_INDEX='kubernetes_event_info_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${DEST_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X DELETE 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/'${dest_index_date} +done diff --git a/ansible/01_old/roles/test/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/5_license_history_create_dest_source_index.sh b/ansible/01_old/roles/test/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/5_license_history_create_dest_source_index.sh new file mode 100644 index 0000000..0ddc9ff --- /dev/null +++ b/ansible/01_old/roles/test/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/5_license_history_create_dest_source_index.sh @@ -0,0 +1,184 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +export NUM_SHARDS=2 +export NUM_REPLICAS=1 + +SOURCE_INDEX='license_history' +DEST_INDEX='license_history_backup' + +# 기존 index 재매핑 +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/'"${SOURCE_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "90d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/'"${SOURCE_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "'${SOURCE_INDEX}'-*" + ], + "settings": { + "index": { + "number_of_shards": "2", + "number_of_replicas": "1", + "refresh_interval": "1s", + "lifecycle": { + "name": "'${SOURCE_INDEX}'" + }, + "sort.field": "checkTime", + "sort.order": "desc" + } + }, + "mappings": { + "properties": { + "licenseType": { + "type": "integer" + }, + "expireDate": { + "type": "text" + }, + "targetNodesCount": { + "type": "integer" + }, + "realNodesCount": { + "type": "integer" + }, + "targetPodsCount": { + "type": "integer" + }, + "realPodsCount": { + "type": "integer" + }, + "targetSvcsCount": { + "type": "integer" + }, + "realSvcsCount": { + "type": "integer" + }, + "targetCoreCount": { + "type": "integer" + }, + "realCoreCount": { + "type": "integer" + }, + "allowableRange": { + "type": "integer" + }, + "licenseClusterId": { + "type": "keyword" + }, + "tenantId": { + "type": "keyword" + }, + "checkTime": { + "type": "date", + "format": "epoch_millis" + }, + "checkResult": { + "type": "integer" + } + } + }, + "aliases": { + "'${SOURCE_INDEX}'": {} + } +}' + +# 기존 index 데이터 백업용 index 매핑 +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/'"${DEST_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "90d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/'"${DEST_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "'${DEST_INDEX}'-*" + ], + "settings": { + "index": { + "number_of_shards": "2", + "number_of_replicas": "1", + "refresh_interval": "1s", + "lifecycle": { + "name": "'${DEST_INDEX}'" + }, + "sort.field": "checkTime", + "sort.order": "desc" + } + }, + "mappings": { + "properties": { + "licenseType": { + "type": "integer" + }, + "expireDate": { + "type": "text" + }, + "targetNodesCount": { + "type": "integer" + }, + "realNodesCount": { + "type": "integer" + }, + "targetPodsCount": { + "type": "integer" + }, + "realPodsCount": { + "type": "integer" + }, + "targetSvcsCount": { + "type": "integer" + }, + "realSvcsCount": { + "type": "integer" + }, + "targetCoreCount": { + "type": "integer" + }, + "realCoreCount": { + "type": "integer" + }, + "allowableRange": { + "type": "integer" + }, + "licenseClusterId": { + "type": "keyword" + }, + "tenantId": { + "type": "keyword" + }, + "checkTime": { + "type": "date", + "format": "epoch_millis" + }, + "checkResult": { + "type": "integer" + } + } + }, + "aliases": { + "'${DEST_INDEX}'": {} + } +}' \ No newline at end of file diff --git a/ansible/01_old/roles/test/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/6_license_history_reindex_to_dest_from_source.sh b/ansible/01_old/roles/test/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/6_license_history_reindex_to_dest_from_source.sh new file mode 100644 index 0000000..b1de084 --- /dev/null +++ b/ansible/01_old/roles/test/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/6_license_history_reindex_to_dest_from_source.sh @@ -0,0 +1,32 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='license_history' +DEST_INDEX='license_history_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${SOURCE_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X POST 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_reindex?wait_for_completion=false' -H 'Content-Type: application/json' -d '{ + "source": { + "index": "'${source_index_date}'" + }, + "dest": { + "index": "'${dest_index_date}'" + }, + "script": { + "lang": "painless", + "source": "ctx._source.checkTime = Instant.ofEpochSecond(ctx._source.checkTime).toEpochMilli()" + } + }' +done \ No newline at end of file diff --git a/ansible/01_old/roles/test/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/7_license_history_reindex_to_source_from_dest.sh b/ansible/01_old/roles/test/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/7_license_history_reindex_to_source_from_dest.sh new file mode 100644 index 0000000..e7e0a5c --- /dev/null +++ b/ansible/01_old/roles/test/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/7_license_history_reindex_to_source_from_dest.sh @@ -0,0 +1,30 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='license_history' +DEST_INDEX='license_history_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${DEST_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X DELETE 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/'${source_index_date} + + curl -X POST 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_reindex?wait_for_completion=false' -H 'Content-Type: application/json' -d '{ + "source": { + "index": "'${dest_index_date}'" + }, + "dest": { + "index": "'${source_index_date}'" + } + }' +done diff --git a/ansible/01_old/roles/test/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/8_license_history_delete_dest_index.sh b/ansible/01_old/roles/test/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/8_license_history_delete_dest_index.sh new file mode 100644 index 0000000..3d63181 --- /dev/null +++ b/ansible/01_old/roles/test/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/8_license_history_delete_dest_index.sh @@ -0,0 +1,21 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='license_history' +DEST_INDEX='license_history_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${DEST_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X DELETE 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/'${dest_index_date} +done diff --git a/ansible/01_old/roles/test/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/manual.txt b/ansible/01_old/roles/test/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/manual.txt new file mode 100644 index 0000000..95900be --- /dev/null +++ b/ansible/01_old/roles/test/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/manual.txt @@ -0,0 +1,31 @@ +** 두 인덱스간에 데이터 복재가 잘 됐는지 확인해가며 실행 ** + +1) 1_kubernete_event_info_create_dest_source_index.sh 스크립트 실행 + : 기존 인덱스에 새로운 데이터 타입 매핑작업 + : 기존 인덱스 데이터 백업용 인덱스 매핑작업 + +2) 2_kubernete_event_info_reindex_to_dest_from_source.sh 스크립트 실행 + : 기존 인덱스 데이터 백업용 인덱스로 리인덱싱 + +3) curl -X GET http://{IP}:{PORT}/_cat/indices?pretty | grep kubernete_event_info + : 백업용 인덱스에 기존 인덱스 데이터가 백업될때까지 대기하기 + : 7번째 칸에 숫자가 일자별 인덱스 숫자와 동일할때까지 대기하기 + +4) 3_kubernete_event_info_reindex_to_source_from_dest.sh 스크립트 실행 + : 기존 인덱스 삭제 + : 새로 매핑된 기존 인덱스에 백업용 인덱스에 담긴 데이터 다시 리인덱싱 + +5) curl -X GET http://{IP}:{PORT}/_cat/indices?pretty | grep kubernete_event_info + : 새로 매핑된 인덱스에 백업용 인덱스 데이터가 백업될때까지 대기하기 + : 7번째 칸에 숫자가 일자별 인덱스 숫자와 동일할때까지 대기하기 + +6) 4_kubernete_event_info_delete_dest_index.sh 스크립트 실행 + : 백업용 인덱스 삭제 + +** 아래 스크립트도 위와같은 순서로 진행 ** +** grep license_history 로 변경해서 데이터 복재 확인 ** +5_license_history_create_dest_source_index.sh +6_license_history_reindex_to_dest_from_source.sh +7_license_history_reindex_to_source_from_dest.sh +8_license_history_delete_dest_index.sh + diff --git a/ansible/01_old/roles/test/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/1_kubernete_event_info_create_dest_source_index.sh b/ansible/01_old/roles/test/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/1_kubernete_event_info_create_dest_source_index.sh new file mode 100644 index 0000000..46007cd --- /dev/null +++ b/ansible/01_old/roles/test/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/1_kubernete_event_info_create_dest_source_index.sh @@ -0,0 +1,220 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +export NUM_SHARDS=2 +export NUM_REPLICAS=1 + +SOURCE_INDEX='kubernetes_event_info' +DEST_INDEX='kubernetes_event_info_backup' + +# 기존 index 재매핑 +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/'"${SOURCE_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/'"${SOURCE_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "'${SOURCE_INDEX}'-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "'${SOURCE_INDEX}'" + } + }, + "analysis": { + "analyzer": { + "my_customer_ngram_analyzer": { + "tokenizer": "my_customer_ngram_tokenizer" + } + }, + "tokenizer": { + "my_customer_ngram_tokenizer": { + "type": "ngram", + "min_gram": "2", + "max_gram": "3" + } + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "type": { + "type": "keyword" + }, + "unixtime": { + "type": "long" + }, + "kind": { + "type": "keyword" + }, + "name": { + "type": "keyword" + }, + "firsttime": { + "type": "long" + }, + "lasttime": { + "type": "long" + }, + "data": { + "type": "text", + "index": false + }, + "id": { + "type": "keyword" + }, + "reason": { + "type": "keyword" + }, + "message": { + "type": "text", + "fields": { + "ngram": { + "type": "text", + "analyzer": "my_customer_ngram_analyzer" + } + } + }, + "count": { + "type": "integer" + }, + "sourceComponent": { + "type": "keyword" + }, + "sourceHost": { + "type": "keyword" + } + } + }, + "aliases": { + "'${SOURCE_INDEX}'": {} + } +}' + +# 기존 index 데이터 백업용 index 매핑 +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/'"${DEST_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/'"${DEST_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "'${DEST_INDEX}'-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "'${DEST_INDEX}'" + } + }, + "analysis": { + "analyzer": { + "my_customer_ngram_analyzer": { + "tokenizer": "my_customer_ngram_tokenizer" + } + }, + "tokenizer": { + "my_customer_ngram_tokenizer": { + "type": "ngram", + "min_gram": "2", + "max_gram": "3" + } + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "type": { + "type": "keyword" + }, + "unixtime": { + "type": "long" + }, + "kind": { + "type": "keyword" + }, + "name": { + "type": "keyword" + }, + "firsttime": { + "type": "long" + }, + "lasttime": { + "type": "long" + }, + "data": { + "type": "text", + "index": false + }, + "id": { + "type": "keyword" + }, + "reason": { + "type": "keyword" + }, + "message": { + "type": "text", + "fields": { + "ngram": { + "type": "text", + "analyzer": "my_customer_ngram_analyzer" + } + } + }, + "count": { + "type": "integer" + }, + "sourceComponent": { + "type": "keyword" + }, + "sourceHost": { + "type": "keyword" + } + } + }, + "aliases": { + "'${DEST_INDEX}'": {} + } +}' \ No newline at end of file diff --git a/ansible/01_old/roles/test/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/2_kubernete_event_info_reindex_to_dest_from_source.sh b/ansible/01_old/roles/test/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/2_kubernete_event_info_reindex_to_dest_from_source.sh new file mode 100644 index 0000000..a9c833c --- /dev/null +++ b/ansible/01_old/roles/test/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/2_kubernete_event_info_reindex_to_dest_from_source.sh @@ -0,0 +1,28 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='kubernetes_event_info' +DEST_INDEX='kubernetes_event_info_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${SOURCE_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X POST 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_reindex?wait_for_completion=false' -H 'Content-Type: application/json' -d '{ + "source": { + "index": "'${source_index_date}'" + }, + "dest": { + "index": "'${dest_index_date}'" + } + }' +done \ No newline at end of file diff --git a/ansible/01_old/roles/test/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/3_kubernete_event_info_reindex_to_source_from_dest.sh b/ansible/01_old/roles/test/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/3_kubernete_event_info_reindex_to_source_from_dest.sh new file mode 100644 index 0000000..abaa743 --- /dev/null +++ b/ansible/01_old/roles/test/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/3_kubernete_event_info_reindex_to_source_from_dest.sh @@ -0,0 +1,30 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='kubernetes_event_info' +DEST_INDEX='kubernetes_event_info_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${DEST_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X DELETE 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/'${source_index_date} + + curl -X POST 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_reindex?wait_for_completion=false' -H 'Content-Type: application/json' -d '{ + "source": { + "index": "'${dest_index_date}'" + }, + "dest": { + "index": "'${source_index_date}'" + } + }' +done diff --git a/ansible/01_old/roles/test/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/4_kubernete_event_info_delete_dest_index.sh b/ansible/01_old/roles/test/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/4_kubernete_event_info_delete_dest_index.sh new file mode 100644 index 0000000..7948b08 --- /dev/null +++ b/ansible/01_old/roles/test/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/4_kubernete_event_info_delete_dest_index.sh @@ -0,0 +1,21 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='kubernetes_event_info' +DEST_INDEX='kubernetes_event_info_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${DEST_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X DELETE 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/'${dest_index_date} +done diff --git a/ansible/01_old/roles/test/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/5_license_history_create_dest_source_index.sh b/ansible/01_old/roles/test/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/5_license_history_create_dest_source_index.sh new file mode 100644 index 0000000..0ddc9ff --- /dev/null +++ b/ansible/01_old/roles/test/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/5_license_history_create_dest_source_index.sh @@ -0,0 +1,184 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +export NUM_SHARDS=2 +export NUM_REPLICAS=1 + +SOURCE_INDEX='license_history' +DEST_INDEX='license_history_backup' + +# 기존 index 재매핑 +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/'"${SOURCE_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "90d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/'"${SOURCE_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "'${SOURCE_INDEX}'-*" + ], + "settings": { + "index": { + "number_of_shards": "2", + "number_of_replicas": "1", + "refresh_interval": "1s", + "lifecycle": { + "name": "'${SOURCE_INDEX}'" + }, + "sort.field": "checkTime", + "sort.order": "desc" + } + }, + "mappings": { + "properties": { + "licenseType": { + "type": "integer" + }, + "expireDate": { + "type": "text" + }, + "targetNodesCount": { + "type": "integer" + }, + "realNodesCount": { + "type": "integer" + }, + "targetPodsCount": { + "type": "integer" + }, + "realPodsCount": { + "type": "integer" + }, + "targetSvcsCount": { + "type": "integer" + }, + "realSvcsCount": { + "type": "integer" + }, + "targetCoreCount": { + "type": "integer" + }, + "realCoreCount": { + "type": "integer" + }, + "allowableRange": { + "type": "integer" + }, + "licenseClusterId": { + "type": "keyword" + }, + "tenantId": { + "type": "keyword" + }, + "checkTime": { + "type": "date", + "format": "epoch_millis" + }, + "checkResult": { + "type": "integer" + } + } + }, + "aliases": { + "'${SOURCE_INDEX}'": {} + } +}' + +# 기존 index 데이터 백업용 index 매핑 +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/'"${DEST_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "90d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/'"${DEST_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "'${DEST_INDEX}'-*" + ], + "settings": { + "index": { + "number_of_shards": "2", + "number_of_replicas": "1", + "refresh_interval": "1s", + "lifecycle": { + "name": "'${DEST_INDEX}'" + }, + "sort.field": "checkTime", + "sort.order": "desc" + } + }, + "mappings": { + "properties": { + "licenseType": { + "type": "integer" + }, + "expireDate": { + "type": "text" + }, + "targetNodesCount": { + "type": "integer" + }, + "realNodesCount": { + "type": "integer" + }, + "targetPodsCount": { + "type": "integer" + }, + "realPodsCount": { + "type": "integer" + }, + "targetSvcsCount": { + "type": "integer" + }, + "realSvcsCount": { + "type": "integer" + }, + "targetCoreCount": { + "type": "integer" + }, + "realCoreCount": { + "type": "integer" + }, + "allowableRange": { + "type": "integer" + }, + "licenseClusterId": { + "type": "keyword" + }, + "tenantId": { + "type": "keyword" + }, + "checkTime": { + "type": "date", + "format": "epoch_millis" + }, + "checkResult": { + "type": "integer" + } + } + }, + "aliases": { + "'${DEST_INDEX}'": {} + } +}' \ No newline at end of file diff --git a/ansible/01_old/roles/test/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/6_license_history_reindex_to_dest_from_source.sh b/ansible/01_old/roles/test/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/6_license_history_reindex_to_dest_from_source.sh new file mode 100644 index 0000000..b1de084 --- /dev/null +++ b/ansible/01_old/roles/test/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/6_license_history_reindex_to_dest_from_source.sh @@ -0,0 +1,32 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='license_history' +DEST_INDEX='license_history_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${SOURCE_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X POST 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_reindex?wait_for_completion=false' -H 'Content-Type: application/json' -d '{ + "source": { + "index": "'${source_index_date}'" + }, + "dest": { + "index": "'${dest_index_date}'" + }, + "script": { + "lang": "painless", + "source": "ctx._source.checkTime = Instant.ofEpochSecond(ctx._source.checkTime).toEpochMilli()" + } + }' +done \ No newline at end of file diff --git a/ansible/01_old/roles/test/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/7_license_history_reindex_to_source_from_dest.sh b/ansible/01_old/roles/test/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/7_license_history_reindex_to_source_from_dest.sh new file mode 100644 index 0000000..e7e0a5c --- /dev/null +++ b/ansible/01_old/roles/test/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/7_license_history_reindex_to_source_from_dest.sh @@ -0,0 +1,30 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='license_history' +DEST_INDEX='license_history_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${DEST_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X DELETE 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/'${source_index_date} + + curl -X POST 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_reindex?wait_for_completion=false' -H 'Content-Type: application/json' -d '{ + "source": { + "index": "'${dest_index_date}'" + }, + "dest": { + "index": "'${source_index_date}'" + } + }' +done diff --git a/ansible/01_old/roles/test/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/8_license_history_delete_dest_index.sh b/ansible/01_old/roles/test/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/8_license_history_delete_dest_index.sh new file mode 100644 index 0000000..3d63181 --- /dev/null +++ b/ansible/01_old/roles/test/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/8_license_history_delete_dest_index.sh @@ -0,0 +1,21 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='license_history' +DEST_INDEX='license_history_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${DEST_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X DELETE 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/'${dest_index_date} +done diff --git a/ansible/01_old/roles/test/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/manual.txt b/ansible/01_old/roles/test/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/manual.txt new file mode 100644 index 0000000..95900be --- /dev/null +++ b/ansible/01_old/roles/test/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/manual.txt @@ -0,0 +1,31 @@ +** 두 인덱스간에 데이터 복재가 잘 됐는지 확인해가며 실행 ** + +1) 1_kubernete_event_info_create_dest_source_index.sh 스크립트 실행 + : 기존 인덱스에 새로운 데이터 타입 매핑작업 + : 기존 인덱스 데이터 백업용 인덱스 매핑작업 + +2) 2_kubernete_event_info_reindex_to_dest_from_source.sh 스크립트 실행 + : 기존 인덱스 데이터 백업용 인덱스로 리인덱싱 + +3) curl -X GET http://{IP}:{PORT}/_cat/indices?pretty | grep kubernete_event_info + : 백업용 인덱스에 기존 인덱스 데이터가 백업될때까지 대기하기 + : 7번째 칸에 숫자가 일자별 인덱스 숫자와 동일할때까지 대기하기 + +4) 3_kubernete_event_info_reindex_to_source_from_dest.sh 스크립트 실행 + : 기존 인덱스 삭제 + : 새로 매핑된 기존 인덱스에 백업용 인덱스에 담긴 데이터 다시 리인덱싱 + +5) curl -X GET http://{IP}:{PORT}/_cat/indices?pretty | grep kubernete_event_info + : 새로 매핑된 인덱스에 백업용 인덱스 데이터가 백업될때까지 대기하기 + : 7번째 칸에 숫자가 일자별 인덱스 숫자와 동일할때까지 대기하기 + +6) 4_kubernete_event_info_delete_dest_index.sh 스크립트 실행 + : 백업용 인덱스 삭제 + +** 아래 스크립트도 위와같은 순서로 진행 ** +** grep license_history 로 변경해서 데이터 복재 확인 ** +5_license_history_create_dest_source_index.sh +6_license_history_reindex_to_dest_from_source.sh +7_license_history_reindex_to_source_from_dest.sh +8_license_history_delete_dest_index.sh + diff --git a/ansible/01_old/roles/test/files/03-ddl-dml/postgres/jaeger_menumeta.psql b/ansible/01_old/roles/test/files/03-ddl-dml/postgres/jaeger_menumeta.psql new file mode 100644 index 0000000..c8252dd --- /dev/null +++ b/ansible/01_old/roles/test/files/03-ddl-dml/postgres/jaeger_menumeta.psql @@ -0,0 +1,21 @@ +-- 이미 존재한다는 (insert 시) 에러메세지나 , 존재하지 않는다는 (delete 시) 에러메세지는 무시하셔도 무방합니다. +-- service - active transaction 삭제 +-- auth_resource3 +DELETE FROM public.auth_resource3 WHERE name = 'menu|Services|Active Transaction'; + +-- menu_meta +DELETE FROM public.menu_meta WHERE id = 26; + + +-- service - overview 추가 +-- auth_resource2 +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Overview', (select id from auth_resource2 where type='menu' and name='Services'), 'menu'); + +-- auth_resource3 +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Services|Overview', false, null); + +-- menu_meta +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (22, 'ServiceOverview', NULL, 1, 'overviewServices', (select id from auth_resource3 where name='menu|Services|Overview'), 0); + +-- user_permission2 +INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Services|Overview'), 'owner'); \ No newline at end of file diff --git a/ansible/01_old/roles/test/files/03-ddl-dml/postgres/jspd_menumeta.psql b/ansible/01_old/roles/test/files/03-ddl-dml/postgres/jspd_menumeta.psql new file mode 100644 index 0000000..4541fb2 --- /dev/null +++ b/ansible/01_old/roles/test/files/03-ddl-dml/postgres/jspd_menumeta.psql @@ -0,0 +1,22 @@ +-- 이미 존재한다는 (insert 시) 에러메세지나 , 존재하지 않는다는 (delete 시) 에러메세지는 무시하셔도 무방합니다. + +-- service - overview 삭제 +-- user_permission2 +DELETE FROM public.user_permission2 WHERE auth_resource_id = (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Services|Overview') AND user_id = 'owner'; + +-- menu_meta +DELETE FROM public.menu_meta WHERE id = 22; + +-- auth_resource2 +DELETE FROM public.auth_resource2 WHERE name = 'Overview' AND parent_id = (select id from auth_resource2 where type='menu' and name='Services'); + +-- auth_resource3 +DELETE FROM public.auth_resource3 WHERE name = 'menu|Services|Overview'; + + +-- service - active transaction 추가 +-- auth_resource3 +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Services|Active Transaction', false, null); + +-- menu_meta +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (26, 'Active Transaction', NULL, 5, 'overviewServiceJSPD', (select id from auth_resource3 where name='menu|Services|Active Transaction'), 2); \ No newline at end of file diff --git a/ansible/01_old/roles/test/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/1_kubernete_event_info_create_dest_source_index.sh b/ansible/01_old/roles/test/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/1_kubernete_event_info_create_dest_source_index.sh new file mode 100644 index 0000000..46007cd --- /dev/null +++ b/ansible/01_old/roles/test/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/1_kubernete_event_info_create_dest_source_index.sh @@ -0,0 +1,220 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +export NUM_SHARDS=2 +export NUM_REPLICAS=1 + +SOURCE_INDEX='kubernetes_event_info' +DEST_INDEX='kubernetes_event_info_backup' + +# 기존 index 재매핑 +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/'"${SOURCE_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/'"${SOURCE_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "'${SOURCE_INDEX}'-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "'${SOURCE_INDEX}'" + } + }, + "analysis": { + "analyzer": { + "my_customer_ngram_analyzer": { + "tokenizer": "my_customer_ngram_tokenizer" + } + }, + "tokenizer": { + "my_customer_ngram_tokenizer": { + "type": "ngram", + "min_gram": "2", + "max_gram": "3" + } + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "type": { + "type": "keyword" + }, + "unixtime": { + "type": "long" + }, + "kind": { + "type": "keyword" + }, + "name": { + "type": "keyword" + }, + "firsttime": { + "type": "long" + }, + "lasttime": { + "type": "long" + }, + "data": { + "type": "text", + "index": false + }, + "id": { + "type": "keyword" + }, + "reason": { + "type": "keyword" + }, + "message": { + "type": "text", + "fields": { + "ngram": { + "type": "text", + "analyzer": "my_customer_ngram_analyzer" + } + } + }, + "count": { + "type": "integer" + }, + "sourceComponent": { + "type": "keyword" + }, + "sourceHost": { + "type": "keyword" + } + } + }, + "aliases": { + "'${SOURCE_INDEX}'": {} + } +}' + +# 기존 index 데이터 백업용 index 매핑 +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/'"${DEST_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/'"${DEST_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "'${DEST_INDEX}'-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "'${DEST_INDEX}'" + } + }, + "analysis": { + "analyzer": { + "my_customer_ngram_analyzer": { + "tokenizer": "my_customer_ngram_tokenizer" + } + }, + "tokenizer": { + "my_customer_ngram_tokenizer": { + "type": "ngram", + "min_gram": "2", + "max_gram": "3" + } + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "type": { + "type": "keyword" + }, + "unixtime": { + "type": "long" + }, + "kind": { + "type": "keyword" + }, + "name": { + "type": "keyword" + }, + "firsttime": { + "type": "long" + }, + "lasttime": { + "type": "long" + }, + "data": { + "type": "text", + "index": false + }, + "id": { + "type": "keyword" + }, + "reason": { + "type": "keyword" + }, + "message": { + "type": "text", + "fields": { + "ngram": { + "type": "text", + "analyzer": "my_customer_ngram_analyzer" + } + } + }, + "count": { + "type": "integer" + }, + "sourceComponent": { + "type": "keyword" + }, + "sourceHost": { + "type": "keyword" + } + } + }, + "aliases": { + "'${DEST_INDEX}'": {} + } +}' \ No newline at end of file diff --git a/ansible/01_old/roles/test/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/2_kubernete_event_info_reindex_to_dest_from_source.sh b/ansible/01_old/roles/test/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/2_kubernete_event_info_reindex_to_dest_from_source.sh new file mode 100644 index 0000000..a9c833c --- /dev/null +++ b/ansible/01_old/roles/test/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/2_kubernete_event_info_reindex_to_dest_from_source.sh @@ -0,0 +1,28 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='kubernetes_event_info' +DEST_INDEX='kubernetes_event_info_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${SOURCE_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X POST 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_reindex?wait_for_completion=false' -H 'Content-Type: application/json' -d '{ + "source": { + "index": "'${source_index_date}'" + }, + "dest": { + "index": "'${dest_index_date}'" + } + }' +done \ No newline at end of file diff --git a/ansible/01_old/roles/test/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/3_kubernete_event_info_reindex_to_source_from_dest.sh b/ansible/01_old/roles/test/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/3_kubernete_event_info_reindex_to_source_from_dest.sh new file mode 100644 index 0000000..abaa743 --- /dev/null +++ b/ansible/01_old/roles/test/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/3_kubernete_event_info_reindex_to_source_from_dest.sh @@ -0,0 +1,30 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='kubernetes_event_info' +DEST_INDEX='kubernetes_event_info_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${DEST_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X DELETE 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/'${source_index_date} + + curl -X POST 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_reindex?wait_for_completion=false' -H 'Content-Type: application/json' -d '{ + "source": { + "index": "'${dest_index_date}'" + }, + "dest": { + "index": "'${source_index_date}'" + } + }' +done diff --git a/ansible/01_old/roles/test/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/4_kubernete_event_info_delete_dest_index.sh b/ansible/01_old/roles/test/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/4_kubernete_event_info_delete_dest_index.sh new file mode 100644 index 0000000..7948b08 --- /dev/null +++ b/ansible/01_old/roles/test/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/4_kubernete_event_info_delete_dest_index.sh @@ -0,0 +1,21 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='kubernetes_event_info' +DEST_INDEX='kubernetes_event_info_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${DEST_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X DELETE 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/'${dest_index_date} +done diff --git a/ansible/01_old/roles/test/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/5_license_history_create_dest_source_index.sh b/ansible/01_old/roles/test/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/5_license_history_create_dest_source_index.sh new file mode 100644 index 0000000..0ddc9ff --- /dev/null +++ b/ansible/01_old/roles/test/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/5_license_history_create_dest_source_index.sh @@ -0,0 +1,184 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +export NUM_SHARDS=2 +export NUM_REPLICAS=1 + +SOURCE_INDEX='license_history' +DEST_INDEX='license_history_backup' + +# 기존 index 재매핑 +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/'"${SOURCE_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "90d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/'"${SOURCE_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "'${SOURCE_INDEX}'-*" + ], + "settings": { + "index": { + "number_of_shards": "2", + "number_of_replicas": "1", + "refresh_interval": "1s", + "lifecycle": { + "name": "'${SOURCE_INDEX}'" + }, + "sort.field": "checkTime", + "sort.order": "desc" + } + }, + "mappings": { + "properties": { + "licenseType": { + "type": "integer" + }, + "expireDate": { + "type": "text" + }, + "targetNodesCount": { + "type": "integer" + }, + "realNodesCount": { + "type": "integer" + }, + "targetPodsCount": { + "type": "integer" + }, + "realPodsCount": { + "type": "integer" + }, + "targetSvcsCount": { + "type": "integer" + }, + "realSvcsCount": { + "type": "integer" + }, + "targetCoreCount": { + "type": "integer" + }, + "realCoreCount": { + "type": "integer" + }, + "allowableRange": { + "type": "integer" + }, + "licenseClusterId": { + "type": "keyword" + }, + "tenantId": { + "type": "keyword" + }, + "checkTime": { + "type": "date", + "format": "epoch_millis" + }, + "checkResult": { + "type": "integer" + } + } + }, + "aliases": { + "'${SOURCE_INDEX}'": {} + } +}' + +# 기존 index 데이터 백업용 index 매핑 +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/'"${DEST_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "90d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/'"${DEST_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "'${DEST_INDEX}'-*" + ], + "settings": { + "index": { + "number_of_shards": "2", + "number_of_replicas": "1", + "refresh_interval": "1s", + "lifecycle": { + "name": "'${DEST_INDEX}'" + }, + "sort.field": "checkTime", + "sort.order": "desc" + } + }, + "mappings": { + "properties": { + "licenseType": { + "type": "integer" + }, + "expireDate": { + "type": "text" + }, + "targetNodesCount": { + "type": "integer" + }, + "realNodesCount": { + "type": "integer" + }, + "targetPodsCount": { + "type": "integer" + }, + "realPodsCount": { + "type": "integer" + }, + "targetSvcsCount": { + "type": "integer" + }, + "realSvcsCount": { + "type": "integer" + }, + "targetCoreCount": { + "type": "integer" + }, + "realCoreCount": { + "type": "integer" + }, + "allowableRange": { + "type": "integer" + }, + "licenseClusterId": { + "type": "keyword" + }, + "tenantId": { + "type": "keyword" + }, + "checkTime": { + "type": "date", + "format": "epoch_millis" + }, + "checkResult": { + "type": "integer" + } + } + }, + "aliases": { + "'${DEST_INDEX}'": {} + } +}' \ No newline at end of file diff --git a/ansible/01_old/roles/test/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/6_license_history_reindex_to_dest_from_source.sh b/ansible/01_old/roles/test/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/6_license_history_reindex_to_dest_from_source.sh new file mode 100644 index 0000000..b1de084 --- /dev/null +++ b/ansible/01_old/roles/test/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/6_license_history_reindex_to_dest_from_source.sh @@ -0,0 +1,32 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='license_history' +DEST_INDEX='license_history_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${SOURCE_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X POST 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_reindex?wait_for_completion=false' -H 'Content-Type: application/json' -d '{ + "source": { + "index": "'${source_index_date}'" + }, + "dest": { + "index": "'${dest_index_date}'" + }, + "script": { + "lang": "painless", + "source": "ctx._source.checkTime = Instant.ofEpochSecond(ctx._source.checkTime).toEpochMilli()" + } + }' +done \ No newline at end of file diff --git a/ansible/01_old/roles/test/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/7_license_history_reindex_to_source_from_dest.sh b/ansible/01_old/roles/test/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/7_license_history_reindex_to_source_from_dest.sh new file mode 100644 index 0000000..e7e0a5c --- /dev/null +++ b/ansible/01_old/roles/test/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/7_license_history_reindex_to_source_from_dest.sh @@ -0,0 +1,30 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='license_history' +DEST_INDEX='license_history_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${DEST_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X DELETE 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/'${source_index_date} + + curl -X POST 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_reindex?wait_for_completion=false' -H 'Content-Type: application/json' -d '{ + "source": { + "index": "'${dest_index_date}'" + }, + "dest": { + "index": "'${source_index_date}'" + } + }' +done diff --git a/ansible/01_old/roles/test/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/8_license_history_delete_dest_index.sh b/ansible/01_old/roles/test/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/8_license_history_delete_dest_index.sh new file mode 100644 index 0000000..3d63181 --- /dev/null +++ b/ansible/01_old/roles/test/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/8_license_history_delete_dest_index.sh @@ -0,0 +1,21 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='license_history' +DEST_INDEX='license_history_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${DEST_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X DELETE 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/'${dest_index_date} +done diff --git a/ansible/01_old/roles/test/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/manual.txt b/ansible/01_old/roles/test/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/manual.txt new file mode 100644 index 0000000..95900be --- /dev/null +++ b/ansible/01_old/roles/test/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/manual.txt @@ -0,0 +1,31 @@ +** 두 인덱스간에 데이터 복재가 잘 됐는지 확인해가며 실행 ** + +1) 1_kubernete_event_info_create_dest_source_index.sh 스크립트 실행 + : 기존 인덱스에 새로운 데이터 타입 매핑작업 + : 기존 인덱스 데이터 백업용 인덱스 매핑작업 + +2) 2_kubernete_event_info_reindex_to_dest_from_source.sh 스크립트 실행 + : 기존 인덱스 데이터 백업용 인덱스로 리인덱싱 + +3) curl -X GET http://{IP}:{PORT}/_cat/indices?pretty | grep kubernete_event_info + : 백업용 인덱스에 기존 인덱스 데이터가 백업될때까지 대기하기 + : 7번째 칸에 숫자가 일자별 인덱스 숫자와 동일할때까지 대기하기 + +4) 3_kubernete_event_info_reindex_to_source_from_dest.sh 스크립트 실행 + : 기존 인덱스 삭제 + : 새로 매핑된 기존 인덱스에 백업용 인덱스에 담긴 데이터 다시 리인덱싱 + +5) curl -X GET http://{IP}:{PORT}/_cat/indices?pretty | grep kubernete_event_info + : 새로 매핑된 인덱스에 백업용 인덱스 데이터가 백업될때까지 대기하기 + : 7번째 칸에 숫자가 일자별 인덱스 숫자와 동일할때까지 대기하기 + +6) 4_kubernete_event_info_delete_dest_index.sh 스크립트 실행 + : 백업용 인덱스 삭제 + +** 아래 스크립트도 위와같은 순서로 진행 ** +** grep license_history 로 변경해서 데이터 복재 확인 ** +5_license_history_create_dest_source_index.sh +6_license_history_reindex_to_dest_from_source.sh +7_license_history_reindex_to_source_from_dest.sh +8_license_history_delete_dest_index.sh + diff --git a/ansible/01_old/roles/test/files/03-ddl-dml/postgres/patch/memu_meta/jaeger_menumeta.psql b/ansible/01_old/roles/test/files/03-ddl-dml/postgres/patch/memu_meta/jaeger_menumeta.psql new file mode 100644 index 0000000..c8252dd --- /dev/null +++ b/ansible/01_old/roles/test/files/03-ddl-dml/postgres/patch/memu_meta/jaeger_menumeta.psql @@ -0,0 +1,21 @@ +-- 이미 존재한다는 (insert 시) 에러메세지나 , 존재하지 않는다는 (delete 시) 에러메세지는 무시하셔도 무방합니다. +-- service - active transaction 삭제 +-- auth_resource3 +DELETE FROM public.auth_resource3 WHERE name = 'menu|Services|Active Transaction'; + +-- menu_meta +DELETE FROM public.menu_meta WHERE id = 26; + + +-- service - overview 추가 +-- auth_resource2 +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Overview', (select id from auth_resource2 where type='menu' and name='Services'), 'menu'); + +-- auth_resource3 +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Services|Overview', false, null); + +-- menu_meta +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (22, 'ServiceOverview', NULL, 1, 'overviewServices', (select id from auth_resource3 where name='menu|Services|Overview'), 0); + +-- user_permission2 +INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Services|Overview'), 'owner'); \ No newline at end of file diff --git a/ansible/01_old/roles/test/files/03-ddl-dml/postgres/patch/memu_meta/jspd_menumeta.psql b/ansible/01_old/roles/test/files/03-ddl-dml/postgres/patch/memu_meta/jspd_menumeta.psql new file mode 100644 index 0000000..4541fb2 --- /dev/null +++ b/ansible/01_old/roles/test/files/03-ddl-dml/postgres/patch/memu_meta/jspd_menumeta.psql @@ -0,0 +1,22 @@ +-- 이미 존재한다는 (insert 시) 에러메세지나 , 존재하지 않는다는 (delete 시) 에러메세지는 무시하셔도 무방합니다. + +-- service - overview 삭제 +-- user_permission2 +DELETE FROM public.user_permission2 WHERE auth_resource_id = (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Services|Overview') AND user_id = 'owner'; + +-- menu_meta +DELETE FROM public.menu_meta WHERE id = 22; + +-- auth_resource2 +DELETE FROM public.auth_resource2 WHERE name = 'Overview' AND parent_id = (select id from auth_resource2 where type='menu' and name='Services'); + +-- auth_resource3 +DELETE FROM public.auth_resource3 WHERE name = 'menu|Services|Overview'; + + +-- service - active transaction 추가 +-- auth_resource3 +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Services|Active Transaction', false, null); + +-- menu_meta +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (26, 'Active Transaction', NULL, 5, 'overviewServiceJSPD', (select id from auth_resource3 where name='menu|Services|Active Transaction'), 2); \ No newline at end of file diff --git a/ansible/01_old/roles/test/files/03-ddl-dml/postgres/patch/postgres_patch_3.2.0.psql b/ansible/01_old/roles/test/files/03-ddl-dml/postgres/patch/postgres_patch_3.2.0.psql new file mode 100644 index 0000000..7ed34ad --- /dev/null +++ b/ansible/01_old/roles/test/files/03-ddl-dml/postgres/patch/postgres_patch_3.2.0.psql @@ -0,0 +1,803 @@ +UPDATE public.metric_meta2 SET expr='sum by (xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) ((container_memory_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / (((container_spec_memory_limit_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0)) > 0) * 100) or sum by (xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) ((container_memory_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1024 / 1024 / 1024 *100)' WHERE id = 'container_memory_usage_by_workload'; + +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: v1 +kind: List +items: +- apiVersion: apps/v1 + kind: Deployment + metadata: + name: cloudmoa-trace-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-trace-agent + spec: + selector: + matchLabels: + app: cloudmoa-trace-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-trace-agent + spec: + securityContext: + runAsNonRoot: true + runAsUser: 65534 + containers: + - image: $DOCKER_REGISTRY_URL/trace-agent:$IMAGE_TAG + name: cloudmoa-trace-agent + resources: + requests: + cpu: 100m + memory: 50Mi + limits: + cpu: 200m + memory: 100Mi + ports: + - containerPort: 5775 + protocol: UDP + - containerPort: 6831 + protocol: UDP + - containerPort: 6832 + protocol: UDP + - containerPort: 5778 + protocol: TCP + env: + - name: LOG_LEVEL + value: "INFO" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT +- apiVersion: v1 + kind: Service + metadata: + name: cloudmoa-trace-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-trace-agent + spec: + ports: + - name: agent-zipkin-thrift + port: 5775 + protocol: UDP + targetPort: 5775 + - name: agent-compact + port: 6831 + protocol: UDP + targetPort: 6831 + - name: agent-binary + port: 6832 + protocol: UDP + targetPort: 6832 + - name: agent-configs + port: 5778 + protocol: TCP + targetPort: 5778 + selector: + app: cloudmoa-trace-agent + type: ClusterIP' WHERE id = 7; + +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: v1 +kind: Service +metadata: + annotations: + prometheus.io/scrape: ''true'' + labels: + app: cloudmoa-node-exporter + name: cloudmoa-node-exporter + name: cloudmoa-node-exporter + namespace: $CLOUDMOA_NAMESPACE +spec: + clusterIP: None + ports: + - name: scrape + port: 9110 + protocol: TCP + selector: + app: cloudmoa-node-exporter + type: ClusterIP +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: cloudmoa-node-exporter + namespace: $CLOUDMOA_NAMESPACE +spec: + selector: + matchLabels: + app: cloudmoa-node-exporter + template: + metadata: + labels: + app: cloudmoa-node-exporter + name: cloudmoa-node-exporter + spec: + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - image: $DOCKER_REGISTRY_URL/node-exporter + name: cloudmoa-node-exporter + ports: + - containerPort: 9110 + hostPort: 9110 + name: scrape + args: + - --path.procfs=/host/proc + - --path.sysfs=/host/sys + - --path.rootfs=/host/root + - --collector.filesystem.ignored-mount-points=^/(dev|proc|sys|run|var/lib/docker/.+|var/lib/kubelet/pods/.+)($|/) + - --collector.tcpstat + - --web.listen-address=:9110 + # --log.level=debug + resources: + limits: + cpu: 250m + memory: 180Mi + requests: + cpu: 102m + memory: 180Mi + volumeMounts: + - mountPath: /host/proc + name: proc + readOnly: false + - mountPath: /host/sys + name: sys + readOnly: false + - mountPath: /host/root + mountPropagation: HostToContainer + name: root + readOnly: true + hostNetwork: true + hostPID: true + securityContext: + runAsNonRoot: true + runAsUser: 65534 + volumes: + - hostPath: + path: /proc + name: proc + - hostPath: + path: /sys + name: sys + - hostPath: + path: / + name: root +' WHERE id = 4; + +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cloudmoa-cluster-role +rules: + - nonResourceURLs: + - "*" + verbs: + - get + - apiGroups: + - metrics.k8s.io + resources: + - pods + - nodes + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - nodes/stats + - endpoints + - namespaces + - events + verbs: + - get + - list + - watch + - apiGroups: + - apps + resources: + - daemonsets + - deployments + - deployments/scale + - replicasets + - replicasets/scale + - statefulsets + - statefulsets/scale + verbs: + - get + - list + - watch + - apiGroups: + - batch + resources: + - jobs + verbs: + - get + - list + - watch + - update + - apiGroups: + - batch + resources: + - cronjobs + verbs: + - get + - list + - update + - apiGroups: + - storage.j8s.io + resources: + - storageclasses + verbs: + - get + - list + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - apiGroups: + - extensions + resources: + - ingresses + verbs: + - get + - list + - apiGroups: + - policy + resources: + - podsecuritypolicies + verbs: + - use + resourceNames: + - imxc-ps + - apiGroups: + - certificates.k8s.io + resourceNames: + - kubernetes.io/kube-apiserver-client-kubelet + resources: + - signers + verbs: + - approve + - apiGroups: + - certificates.k8s.io + resourceNames: + - kubernetes.io/kubelet-serving + resources: + - signers + verbs: + - approve + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch + - proxy + - apiGroups: + - "" + resources: + - nodes/log + - nodes/metrics + - nodes/proxy + - nodes/spec + - nodes/stats + verbs: + - ''*'' + - apiGroups: + - ''*'' + resources: + - ''*'' + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cloudmoa-restricted-rb + namespace: $CLOUDMOA_NAMESPACE +subjects: + - kind: ServiceAccount + name: default + namespace: $CLOUDMOA_NAMESPACE +roleRef: + kind: ClusterRole + name: cloudmoa-cluster-role + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: cloudmoa-psp + namespace: $CLOUDMOA_NAMESPACE +spec: + privileged: true + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + runAsUser: + rule: RunAsAny + fsGroup: + rule: RunAsAny + hostPorts: + - max: 65535 + min: 0 + hostNetwork: true + hostPID: true + volumes: + - configMap + - secret + - emptyDir + - hostPath + - projected + - downwardAPI + - persistentVolumeClaim +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: cloudmoa-topology-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-topology-agent +spec: + selector: + matchLabels: + app: cloudmoa-topology-agent + template: + metadata: + labels: + app: cloudmoa-topology-agent + spec: + hostNetwork: true + hostPID: true + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - name: cloudmoa-topology-agent + image: $DOCKER_REGISTRY_URL/topology-agent:$IMAGE_TAG + imagePullPolicy: Always + resources: + requests: + cpu: 200m + memory: 512Mi + limits: + cpu: 500m + memory: 600Mi + securityContext: + privileged: true + volumeMounts: + - mountPath: /host/usr/bin + name: bin-volume + - mountPath: /var/run/docker.sock + name: docker-volume + - mountPath: /host/proc + name: proc-volume + - mountPath: /root + name: root-volume + - mountPath: /log + name: log-volume + env: + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: ROOT_DIRECTORY + value: /root + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: LOG_LEVEL + value: "INFO" + + volumes: + - name: bin-volume + hostPath: + path: /usr/bin + type: Directory + - name: docker-volume + hostPath: + path: /var/run/docker.sock + - name: proc-volume + hostPath: + path: /proc + - name: root-volume + hostPath: + path: / + - name: log-volume + hostPath: + path: /home' WHERE id = 2; + +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cloudmoa-metric-agent-config + namespace: $CLOUDMOA_NAMESPACE +data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + metric-agent.yml: | + global: + scrape_interval: 15s + + scrape_configs: + - job_name: ''kubernetes-kubelet'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + - job_name: ''kubernetes-cadvisor'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod] + target_label: xm_pod_id + - source_labels: [container] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [container] + regex: (.+) + action: keep + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cloudmoa-metric-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-metric-agent +spec: + selector: + matchLabels: + app: cloudmoa-metric-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-metric-agent + spec: + containers: + - name: cloudmoa-metric-agent + image: $DOCKER_REGISTRY_URL/metric-agent:$IMAGE_TAG + args: + - --config.file=/etc/metric-agent/metric-agent.yml + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: /etc/metric-agent/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: LOG_MAXAGE + value: "1" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: STORAGE_TYPE + value: datagate + restartPolicy: Always + volumes: + - name: config-volume + configMap: + name: cloudmoa-metric-agent-config +' WHERE id = 6; + +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cloudmoa-metric-agent-config + namespace: $CLOUDMOA_NAMESPACE +data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + metric-agent.yml: | + global: + scrape_interval: 15s + + scrape_configs: + - job_name: ''kubernetes-kubelet'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + + - job_name: ''kubernetes-cadvisor'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod_name] + target_label: xm_pod_id + - source_labels: [container_name] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [container_name] + regex: (.+) + action: keep + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cloudmoa-metric-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-metric-agent +spec: + selector: + matchLabels: + app: cloudmoa-metric-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-metric-agent + spec: + containers: + - name: cloudmoa-metric-agent + image: $DOCKER_REGISTRY_URL/metric-agent:$IMAGE_TAG + args: + - --config.file=/etc/metric-agent/metric-agent.yml + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: /etc/metric-agent/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: STORAGE_TYPE + value: datagate + restartPolicy: Always + volumes: + - name: config-volume + configMap: + name: cloudmoa-metric-agent-config +' WHERE id = 3; \ No newline at end of file diff --git a/ansible/01_old/roles/test/files/03-ddl-dml/postgres/patch/postgres_patch_3.3.0.psql b/ansible/01_old/roles/test/files/03-ddl-dml/postgres/patch/postgres_patch_3.3.0.psql new file mode 100644 index 0000000..6b63e62 --- /dev/null +++ b/ansible/01_old/roles/test/files/03-ddl-dml/postgres/patch/postgres_patch_3.3.0.psql @@ -0,0 +1,919 @@ + +-- from diff + +CREATE DATABASE CONFIGS; +CREATE DATABASE keycloak; + +-- cortex alert +create table public.alert_rule_config_info ( + config_id varchar not null, + config_data text not null, + in_use boolean default true not null, + created_date timestamp, + modified_date timestamp +); +create table alert_config_info +( + config_id varchar not null, + config_data text not null, + config_default text not null, + in_use boolean default true not null, + created_date timestamp, + modified_date timestamp +); +create table alert_config +( + id bigint not null, + cluster_id varchar, + resolve_timeout varchar, + receiver varchar, + group_by varchar, + group_wait varchar, + group_interval varchar, + repeat_interval varchar, + routes_level varchar, + routes_continue varchar, + receiver_name varchar, + webhook_url varchar, + send_resolved varchar, + inner_route boolean, + inner_webhook boolean, + in_use boolean default true not null, + created_date timestamp, + modified_date timestamp +); +ALTER TABLE public.alert_rule_config_info ADD CONSTRAINT alert_rule_config_info_config_id_pk PRIMARY KEY (config_id); +ALTER TABLE public.alert_config_info ADD CONSTRAINT alert_config_info_config_id_pk PRIMARY KEY (config_id); +ALTER TABLE public.alert_config ADD CONSTRAINT alert_config_id_pk PRIMARY KEY (id); + + + +alter table tenant_info + add delete_scheduler_date timestamp; + +alter table tenant_info + add tenant_init_clusters varchar(255); + +alter table cloud_user + add dormancy_date timestamp; + +alter table cloud_user + add status varchar(255) default 'use'::character varying not null; + +-- DELETE +-- FROM public.auth_resource3 +-- WHERE name = 'menu|Health Check|Check Script'; + +-- DELETE +-- FROM public.auth_resource3 +-- WHERE name = 'menu|Health Check'; + +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Services|Active Transaction', false, null); + +UPDATE public.menu_meta +SET position = 10::integer +WHERE id = 80::bigint; + +UPDATE public.menu_meta +SET position = 99::integer +WHERE id = 90::bigint; + + + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (26, 'Active Transaction', NULL, 5, 'overviewServiceJSPD', (select id from auth_resource3 where name='menu|Services|Active Transaction'), 2); +insert into public.alert_config_info (config_id, created_date, modified_date, config_data, config_default, in_use) values ('config', now(), null, 'global:${GLOBAL}\nroute:${ROUTE}\nreceivers:${RECEIVERS}', 'global:${GLOBAL}\nroute:${ROUTE}\nreceivers:${RECEIVERS}', true); +insert into public.alert_config_info (config_id, created_date, modified_date, config_data, config_default, in_use) values ('global', now(), null, '\n resolve_timeout: ${RESOLVE_TIMEOUT}', '\n resolve_timeout: 5m', true); +insert into public.alert_config_info (config_id, created_date, modified_date, config_data, config_default, in_use) values ('receivers', now(), null, '\n- name: ''${NAME}''\n webhook_configs:${WEBHOOK_CONFIGS}', '\n- name: ''cdms''\n webhook_configs:${WEBHOOK_CONFIGS}', true); +insert into public.alert_config_info (config_id, created_date, modified_date, config_data, config_default, in_use) values ('route', now(), null, '\n receiver: ''${RECEIVER}''\n group_by: [${GROUP_BY}]\n group_wait: ${GROUP_WAIT}\n group_interval: ${GROUP_INTERVAL}\n repeat_interval: ${REPEAT_INTERVAL}\n routes:${ROUTES}', '\n receiver: ''cdms''\n group_by: [xm_clst_id, level]\n group_wait: 30s\n group_interval: 5m\n repeat_interval: 10m\n routes:${ROUTES}', true); +insert into public.alert_config_info (config_id, created_date, modified_date, config_data, config_default, in_use) values ('webhook_configs', now(), null, '\n - url: ''${WEBHOOK_URL}''\n send_resolved: ${SEND_RESOLVED}', '\n - url: ''${WEBHOOK_URL}''\n send_resolved: false', true); +insert into public.alert_config_info (config_id, created_date, modified_date, config_data, config_default, in_use) values ('routes', now(), null, '\n - receiver: ''${ROUTES_RECEIVER}''\n group_by: [${ROUTES_GROUP_BY}]\n group_wait: ${ROUTES_GROUP_WAIT}\n group_interval: ${ROUTES_GROUP_INTERVAL}\n repeat_interval: ${ROUTES_REPEAT_INTERVAL}\n match_re:\n level: ${LEVEL}\n continue: ${CONTINUE}', '\n - receiver: ''cdms''\n group_by: [xm_clst_id, level]\n group_wait: 5s\n group_interval: 5s\n repeat_interval: 1m\n match_re:\n level: Critical\n continue: true', true); +insert into public.alert_rule_config_info (config_id, created_date, modified_date, config_data, in_use) values ('config', now(), null, 'groups:${GROUPS}', true); +insert into public.alert_rule_config_info (config_id, created_date, modified_date, config_data, in_use) values ('groups', now(), null, '\n- name: "${NAME}"\n rules:${RULES}', true); +insert into public.alert_rule_config_info (config_id, created_date, modified_date, config_data, in_use) values ('isHost', now(), null, '\n instance: "{{ $labels.instance }}"\n is_host: "true"', true); +insert into public.alert_rule_config_info (config_id, created_date, modified_date, config_data, in_use) values ('rules', now(), null, '\n - alert: "${ALERT}"\n expr: "${EXPR}"\n labels:\n level: "${LEVEL}"\n for: "${FOR}"\n annotations:\n xm_service_name: "{{ $labels.xm_service_name }}"\n level: "${LEVEL}"\n meta_id: "${META_ID}"\n xm_node_id: "{{ $labels.xm_node_id }}"\n threshold: ${THRESHOLD}\n xm_container_id: "{{ $labels.xm_cont_name }}"\n message: "${MESSAGE}"\n rule_id: ${RULE_ID}\n xm_pod_id: "{{ $labels.xm_pod_id }}"\n xm_clst_id: "{{ $labels.xm_clst_id }}"\n xm_namespace: "{{ $labels.xm_namespace }}"\n value: "{{ $value }}"\n xm_entity_type: "{{ $labels.xm_entity_type }}"', true); + + + +-- JSPD 옵션 값 테이블 +CREATE TABLE public.jspd_prop ( + code_id character varying(255) NOT NULL, + default_value character varying(255) NOT NULL, + description text, + code_type character varying(255), + input_type character varying(255), + input_props character varying(255), + use_yn boolean NOT NULL, + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone NOT NULL +); + +ALTER TABLE ONLY public.jspd_prop ADD CONSTRAINT jspd_prop_pkey PRIMARY KEY (code_id); + +-- JSPD 옵션 값 설정 LIST table +CREATE TABLE public.jspd_config ( + cluster_id character varying(255) NOT NULL, + namespace character varying(255) NOT NULL, + service character varying(255) NOT NULL, + code_id character varying(255), + code_value character varying(255), + code_type character varying(255), + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone NOT NULL +); +-- ALTER TABLE public.jspd_prop +-- ADD input_type character varying(255); + +-- ALTER TABLE public.jspd_prop +-- ADD input_props character varying(255); + + +ALTER TABLE public.jspd_config + ADD CONSTRAINT jspd_config_pkey PRIMARY KEY (cluster_id, namespace, service, code_id); + +ALTER TABLE ONLY public.jspd_config + ADD CONSTRAINT jspd_config_code_id_fk FOREIGN KEY (code_id) REFERENCES public.jspd_prop(code_id); + +INSERT INTO jspd_prop values('TRX_NAME_TYPE','0', 'Set the transaction name generation method (0:default, 1:parameter, 2:param_nouri, 3:attribute)', 'integer','select','{"default":"0", "parameter":"1", "param_nouri":"2", "attribute":"3"}',true, now(), now()); +INSERT INTO jspd_prop values('TRX_NAME_KEY','', 'Set the transaction name generation method by TRX_NAME_TYPE (parameter(1), param_nouri(2),attribute(3))','string','input','',true, now(), now()); +INSERT INTO jspd_prop values('CURR_TRACE_TXN','*:3000', 'Option to check TXNNAME with startsWith logic and collect calltree based on elapsetime. blank or set to *:0 when collecting all.', 'string','input','', true, now(), now()); +INSERT INTO jspd_prop values('CURR_TRACE_LEVEL','100', 'call tree detection level', 'integer','range','{"gte":"0", "lte":"100"}',true, now(), now()); +INSERT INTO jspd_prop values('TRACE_JDBC','true', 'include call tree data', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('EXCLUDE_SERVICE','gif,js,css,xml', 'exclude service name', 'string','input','',true, now(), now()); +INSERT INTO jspd_prop values('INCLUDE_EXCEPTION','', 'Exception that you do not want to be treated as an exception transaction is set.(type.Exception)', 'string','input','',true, now(), now()); +INSERT INTO jspd_prop values('EXCLUDE_EXCEPTION','', 'Set the exception to be treated as an exception transaction.(type.Exception)', 'string','input','',true, now(), now()); +INSERT INTO jspd_prop values('RESP_HEADER_TID','false', 'include X-Xm-Tid text for gearing imxwsmj', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('USE_RUNTIME_REDEFINE','false', 'rt.jar (socket, file, throwable) function use yn option', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('USE_RUNTIME_REDEFINE_HTTP_REMOTE','false', 'rt.jar (socket, file, throwable) function use yn option', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('RT_RMI','false', 'rt.jar (socket, file, throwable) function use yn option', 'boolean','input','',true, now(), now()); + +INSERT INTO jspd_prop values('RT_RMI_TYPE','3', 'remote key value(1: pkey, 2: ckey, 3: pckey)', 'integer','select','{"pkey":"1", "ckey":"2", "pckey":"3"}',true, now(), now()); +INSERT INTO jspd_prop values('RT_RMI_ELAPSE_TIME','0', 'Collect transactions that are greater than or equal to the option value', 'integer','input','',true, now(), now()); +INSERT INTO jspd_prop values('RT_FILE','0x10', 'Display file input/output in call tree', 'string','input','',true, now(), now()); +INSERT INTO jspd_prop values('RT_SOCKET','0x10', 'Display socket input/output in call tree', 'string','input','',true, now(), now()); + +INSERT INTO jspd_prop values('MTD_LIMIT','100000', 'Limit the number of calltree', 'integer','range','{"gte":"0"}',true, now(), now()); + +INSERT INTO jspd_prop values('LIMIT_SQL','20', 'Collection limits based on SQL sentence length', 'integer','input','',true, now(), now()); +INSERT INTO jspd_prop values('TXN_COUNT_LIMIT','3000', 'Transactions per second', 'integer','input','',true, now(), now()); +INSERT INTO jspd_prop values('USE_SQL_ELLIPSIS','false', 'Collect length of sql string by half of SQL_TEXT_BUFFER_SIZE', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('TXN_SQL_LIMIT_COUNT','2000', 'SQL collection limit', 'integer','input','',true, now(), now()); +INSERT INTO jspd_prop values('TXN_CPU_TIME','false', 'cpu time metric used in transactions option', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('TXN_MEMORY','false', 'memory alloc size metric used in transactions option', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('ENABLE_WEB_ID_WHEN_NO_USERAGENT','false', 'Do not create an web ID unless requested by the browser', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('USE_SQL_SEQ','false', 'Add sequence number to sql and packet', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('TRACE_FETCH_METHOD','false', 'Display the fetch function of ResultSet in the call tree', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('EXCLUDE_THREAD','', 'Ability to block monitoring of a specific thread name, value = String[] (prefix1,prefix2)', 'string','input','',true, now(), now()); +INSERT INTO jspd_prop values('USE_METHOD_SEQ','false', 'Display the calltree in the form of a time series without summary', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('TRACE_METHOD_MEMORY','false', 'Collects allocation memory for each method of calltree. (unit k)', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('TRACE_METHOD_CPUTIME','false', 'Collects cputime for each method of calltree. (unit ms)', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('DISABLE_ROOT_METHOD','false', 'Express the service root method at the top of the call tree', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('MTD_BUFFER_SIZE','2500', 'size of the internal buffer that stores the call tree method data.', 'integer','input','',true, now(), now()); +INSERT INTO jspd_prop values('MTD_STACK_BUFFER_SIZE','100', 'A separate option to additionally collect methods that did not generate an error among methods that were not collected because the MTD_BUFFER_SIZE option value was exceeded.', 'integer','input','',true, now(), now()); +INSERT INTO jspd_prop values('MTD_EXCEPTION_BUFFER_SIZE','100', 'A separate option to additionally collect methods that have an error among methods that could not be collected because the MTD_BUFFER_SIZE option value was exceeded.', 'integer','input','',true, now(), now()); +INSERT INTO jspd_prop values('DEBUG','0x000000000', 'Option to specify log level (Debugging)', 'string','input','',true, now(), now()); + +INSERT INTO jspd_prop values('EXCEPTION_LIMIT', '-1', 'Exception content length limit', 'integer', 'input', '', true, now(), now()); +INSERT INTO jspd_prop values('TXN_SEND_PERIOD', '1000', 'Txninfo transmission cycle (ms)', 'integer', 'input', '', true, now(), now()); +INSERT INTO jspd_prop values('MTD_SEND_PERIOD', '1000', 'Txnmethod transmission cycle (ms)', 'integer', 'input', '', true, now(), now()); +INSERT INTO jspd_prop values('SQL_SEND_PERIOD', '1000', 'Txnspl transmission cycle (ms)', 'integer', 'input', '', true, now(), now()); +INSERT INTO jspd_prop values('ETOE_SEND_PERIOD', '1000', 'E2einfo transmission cycle (ms)', 'integer', 'input', '', true, now(), now()); +INSERT INTO jspd_prop values('TXN_SEND_LIMIT', '15000', 'Txninfo maximum number of transfers', 'integer', 'input', '', true, now(), now()); +INSERT INTO jspd_prop values('MTD_SEND_LIMIT', '15000', 'Txnmethod maximum number of transfers', 'integer', 'input', '', true, now(), now()); +INSERT INTO jspd_prop values('SQL_SEND_LIMIT', '15000', 'Txnsql maximum number of transfers', 'integer', 'input', '', true, now(), now()); +INSERT INTO jspd_prop values('ETOE_SEND_LIMIT', '15000', 'E2einfo maximum number of transfers', 'integer', 'input', '', true, now(), now()); + + +---public.metric_meta2 +UPDATE public.metric_meta2 SET expr = '((node_memory_MemTotal_bytes{xm_entity_type="Node", {filter}} - (node_memory_MemFree_bytes{xm_entity_type="Node", {filter}} + node_memory_Cached_bytes{xm_entity_type="Node", {filter}} + node_memory_Buffers_bytes{xm_entity_type="Node", {filter}} + node_memory_SReclaimable_bytes{xm_entity_type="Node", {filter}})) >= 0 or node_memory_MemTotal_bytes{xm_entity_type="Node", {filter}} - node_memory_MemFree_bytes{xm_entity_type="Node", {filter}}) / 1024 / 1024 / 1024'::text WHERE id LIKE 'node#_memory#_used' ESCAPE '#'; + +UPDATE public.metric_meta2 SET expr = '((node_memory_MemTotal_bytes{{filter}} - (node_memory_MemFree_bytes{{filter}} + node_memory_Cached_bytes{{filter}} + node_memory_Buffers_bytes{{filter}} + node_memory_SReclaimable_bytes{{filter}})) >= 0 or (node_memory_MemTotal_bytes{{filter}} - node_memory_MemFree_bytes{{filter}})) / node_memory_MemTotal_bytes{{filter}} * 100'::text WHERE id LIKE 'host#_memory#_usage' ESCAPE '#'; + +UPDATE public.metric_meta2 SET expr = 'sum by(instance, mountpoint, fstype, data_type) ( +label_replace(node_filesystem_size_bytes {fstype!="rootfs",{filter}}, "data_type", "totalsize", "", "") or +label_replace(node_filesystem_avail_bytes {fstype!="rootfs",{filter}}, "data_type", "availablesize", "", ""))'::text WHERE id LIKE 'host#_fs#_total#_by#_mountpoint' ESCAPE '#'; + +UPDATE public.metric_meta2 SET expr = '(1- avg by (xm_clst_id) (((node_memory_MemFree_bytes{xm_entity_type=''Node'', {filter}} + node_memory_Cached_bytes{xm_entity_type=''Node'', {filter}} + node_memory_Buffers_bytes{xm_entity_type=''Node'', {filter}}) <= node_memory_MemTotal_bytes{xm_entity_type=''Node'', {filter}} or node_memory_MemFree_bytes{xm_entity_type=''Node'', {filter}}) / node_memory_MemTotal_bytes{xm_entity_type=''Node'', {filter}})) * 100'::text WHERE id LIKE 'cluster#_memory#_usage' ESCAPE '#'; + + +UPDATE public.metric_meta2 SET expr = '((node_memory_MemTotal_bytes{xm_entity_type=''Node'', {filter}} - (node_memory_MemFree_bytes{xm_entity_type=''Node'', {filter}} + node_memory_Cached_bytes{xm_entity_type=''Node'', {filter}} + node_memory_Buffers_bytes{xm_entity_type=''Node'', {filter}} + node_memory_SReclaimable_bytes{xm_entity_type=''Node'', {filter}})) >= 0 or (node_memory_MemTotal_bytes{xm_entity_type=''Node'', {filter}} - node_memory_MemFree_bytes{xm_entity_type=''Node'', {filter}})) / node_memory_MemTotal_bytes{xm_entity_type=''Node'', {filter}} * 100'::text WHERE id LIKE 'node#_memory#_usage' ESCAPE '#'; + +UPDATE public.metric_meta2 SET expr = '(node_memory_MemTotal_bytes{{filter}} - (node_memory_MemFree_bytes{{filter}} + node_memory_Cached_bytes{{filter}} + node_memory_Buffers_bytes{{filter}} + node_memory_SReclaimable_bytes{{filter}})) >= 0 or (node_memory_MemTotal_bytes{{filter}} - node_memory_MemFree_bytes{{filter}})'::text WHERE id LIKE 'host#_memory#_used' ESCAPE '#'; + + +INSERT INTO public.metric_meta2 (id, meta_name, description, expr, resource_type, entity_type, groupby_keys, in_use, anomaly_score, message, created_date, modified_date) VALUES +('imxc_jspd_pod_txn_error_rate', 'Service Pod Transaction Error Rate', 'The number of transaction error rate for pod', 'sum by(xm_clst_id, xm_namespace, xm_pod_id, xm_service_name) (rate(imxc_txn_total_count{{filter}}[1m])) == 0 or sum by(xm_clst_id, xm_namespace, xm_pod_id, xm_service_name) (rate(imxc_txn_error_count{{filter}}[1m])) == 0 or sum by(xm_clst_id, xm_namespace, xm_pod_id, xm_service_name) (rate(imxc_txn_error_count {{filter}} [1m])) / sum by(xm_clst_id, xm_namespace, xm_pod_id, xm_service_name) (rate(imxc_txn_total_count {{filter}} [1m]))', 'Request', 'Service', NULL, 't', 'f', 'SVC:{{$labels.xm_service_name}} Svc Pod Transaction Error rate:{{humanize $value}}|{threshold}.', '2022-02-15 18:08:58.18', '2022-02-15 18:08:58.18'); +INSERT INTO public.metric_meta2 (id, meta_name, description, expr, resource_type, entity_type, groupby_keys, in_use, anomaly_score, message, created_date, modified_date) VALUES +('imxc_jspd_txn_error_rate', 'Service Transaction Error Rate', 'Service Transaction Error Rate', 'sum by(xm_clst_id, xm_namespace, xm_service_name) (rate(imxc_txn_total_count{{filter}}[1m])) == 0 or sum by(xm_clst_id, xm_namespace, xm_service_name) (rate(imxc_txn_error_count{{filter}}[1m])) == 0 or sum by(xm_clst_id, xm_namespace, xm_service_name) (rate(imxc_txn_error_count {{filter}} [1m])) / sum by(xm_clst_id, xm_namespace, xm_service_name) (rate(imxc_txn_total_count {{filter}} [1m]))', 'Request', 'Service', NULL, 't', 'f', 'SVC:{{$labels.xm_service_name}} Error Request Rate:{{humanize $value}}%|{threshold}%.', '2022-02-15 14:33:00.118', '2022-02-15 15:40:17.64'); +INSERT INTO public.metric_meta2 (id, meta_name, description, expr, resource_type, entity_type, groupby_keys, in_use, anomaly_score, message, created_date, modified_date) VALUES +('imxc_jspd_txn_elapsed_time_avg', 'Service Transaction Elapsed Time (avg)', 'Service Average Elapsed Time', 'sum by(xm_clst_id, xm_namespace, xm_service_name) ((increase(imxc_txn_total_count{{filter}}[1m])))== 0 or sum by(xm_clst_id, xm_namespace, xm_service_name) ((increase(imxc_txn_laytency{{filter}}[1m])))/ sum by(xm_clst_id, xm_namespace, xm_service_name) ((increase(imxc_txn_total_count{{filter}}[1m])))', 'Request', 'Service', NULL, 't', 't', 'SVC:{{$labels.xm_service_name}} Transaction Requests Time Avg:{{humanize $value}}ms|{threshold}ms.', '2021-11-15 16:09:34.233', '2021-11-15 16:12:21.335'); +INSERT INTO public.metric_meta2 (id, meta_name, description, expr, resource_type, entity_type, groupby_keys, in_use, anomaly_score, message, created_date, modified_date) VALUES +('imxc_jspd_pod_txn_elapsed_time_avg', 'Service Pod Transaction Elapsed Time (avg)', 'The number of transaction counts per second for pod', 'sum by(xm_clst_id, xm_namespace, xm_pod_id, xm_service_name) (increase(imxc_txn_total_count{{filter}}[1m]))==0 or sum by(xm_clst_id, xm_namespace, xm_pod_id, xm_service_name) (increase(imxc_txn_laytency{{filter}}[1m])) / sum by(xm_clst_id, xm_namespace, xm_pod_id, xm_service_name) (increase(imxc_txn_total_count{{filter}}[1m]))', 'Request', 'Service', NULL, 't', 'f', 'SVC:{{$labels.xm_service_name}} Pod Transaction Requests Time Avg:{{humanize $value}}ms|{threshold}ms.', '2022-02-15 18:04:55.228', '2022-02-15 18:04:55.228'); +INSERT INTO public.metric_meta2 (id, meta_name, description, expr, resource_type, entity_type, groupby_keys, in_use, anomaly_score, message, created_date, modified_date) VALUES +('imxc_jspd_txn_error_count', 'Service Transaction Error Count', 'Service Transaction Error Count', 'sum by(xm_clst_id, xm_namespace, xm_service_name) (rate(imxc_txn_error_count{{filter}}[1m])) == 0 or sum by(xm_clst_id, xm_namespace, xm_service_name) (rate(imxc_txn_error_count {{filter}} [1m])) ', 'Request', 'Service', NULL, 't', 't', 'SVC:{{$labels.xm_service_name}} Error Request count:{{humanize $value}}%|{threshold}%.', '2021-11-15 16:10:31.352', '2021-11-15 16:12:21.335'); +INSERT INTO public.metric_meta2 (id, meta_name, description, expr, resource_type, entity_type, groupby_keys, in_use, anomaly_score, message, created_date, modified_date) VALUES +('imxc_jspd_txn_per_sec', 'Service Transaction Count (per Second)', 'Service Transaction Count (per Second)', 'sum by(xm_clst_id, xm_namespace, xm_service_name) (rate(imxc_txn_total_count{{filter}}[1m]))', 'Request', 'Service', NULL, 't', 't', 'SVC:{{$labels.xm_service_name}} Svc Transaction count/Seconds:{{humanize $value}}|{threshold}.', '2021-11-15 16:11:19.606', '2021-11-15 16:12:21.335'); +INSERT INTO public.metric_meta2 (id, meta_name, description, expr, resource_type, entity_type, groupby_keys, in_use, anomaly_score, message, created_date, modified_date) VALUES +('imxc_jspd_pod_txn_per_sec', 'Service Pod Transaction Count (per sec)', 'The number of transaction counts per second for pod', 'sum by(xm_clst_id, xm_namespace, xm_pod_id, xm_service_name) (rate(imxc_txn_total_count{{filter}}[1m]))', 'Request', 'Service', NULL, 't', 'f', 'SVC:{{$labels.xm_service_name}} Svc Pod Transaction count/Seconds:{{humanize $value}}|{threshold}.', '2022-02-15 17:59:39.45', '2022-02-15 17:59:39.45'); + + + +-- Auto-generated SQL script #202202221030 +UPDATE public.metric_meta2 + SET expr='sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (rate(container_cpu_system_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running", {filter}}) without (instance)) * 0) * 100' + WHERE id='container_cpu_system_by_workload'; +UPDATE public.metric_meta2 + SET expr='sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (rate(container_cpu_system_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running", {filter}}) without (instance)) * 0)' + WHERE id='container_cpu_system_core_by_workload'; +UPDATE public.metric_meta2 + SET expr='sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (rate(container_cpu_usage_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running", {filter}}) without (instance)) * 0)' + WHERE id='container_cpu_usage_by_workload'; +UPDATE public.metric_meta2 + SET expr='sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (rate(container_cpu_usage_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running", {filter}}) without (instance)) * 0)' + WHERE id='container_cpu_usage_core_by_workload'; +UPDATE public.metric_meta2 + SET expr='sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (rate(container_cpu_user_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running", {filter}}) without (instance)) * 0) * 100' + WHERE id='container_cpu_user_by_workload'; +UPDATE public.metric_meta2 + SET expr='sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (rate(container_cpu_user_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running", {filter}}) without (instance)) * 0)' + WHERE id='container_cpu_user_core_by_workload'; +UPDATE public.metric_meta2 + SET expr='sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (container_fs_limit_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running", {filter}}) without (instance)) * 0) / 1073741824' + WHERE id='container_fs_limit_bytes_by_workload'; +UPDATE public.metric_meta2 + SET expr='sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (rate(container_fs_reads_bytes_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1024' + WHERE id='container_fs_reads_by_workload'; +UPDATE public.metric_meta2 + SET expr='sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (container_fs_usage_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1073741824' + WHERE id='container_fs_usage_bytes_by_workload'; +UPDATE public.metric_meta2 + SET expr='sum by (xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) ((container_fs_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0)/ (((container_fs_limit_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) * 100) > 0) or (container_fs_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1000)' + WHERE id='container_fs_usage_by_workload'; +UPDATE public.metric_meta2 + SET expr='sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (rate(container_fs_writes_bytes_total{xm_cont_name!="POD"}[1m]) + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1024' + WHERE id='container_fs_writes_by_workload'; +UPDATE public.metric_meta2 + SET expr='sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (container_memory_cache{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1073741824' + WHERE id='container_memory_cache_by_workload'; +UPDATE public.metric_meta2 + SET expr='sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (container_memory_max_usage_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1073741824' + WHERE id='container_memory_max_usage_bytes_by_workload'; +UPDATE public.metric_meta2 + SET expr='sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (container_memory_swap{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1073741824' + WHERE id='container_memory_swap_by_workload'; +UPDATE public.metric_meta2 + SET expr='sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (container_memory_usage_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1024 / 1024 / 1024' + WHERE id='container_memory_usage_bytes_by_workload'; +UPDATE public.metric_meta2 + SET expr='sum by (xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) ((container_memory_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / (((container_spec_memory_limit_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0)) > 0) * 100) or sum by (xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) ((container_memory_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1024 / 1024 / 1024 *100)' + WHERE id='container_memory_usage_by_workload'; +UPDATE public.metric_meta2 + SET expr='sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (container_memory_working_set_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1024 / 1024 / 1024' + WHERE id='container_memory_working_set_bytes_by_workload'; + +INSERT INTO public.metric_meta2 (id, meta_name, description, expr, resource_type, entity_type, groupby_keys, in_use, anomaly_score, message, created_date, modified_date) VALUES('imxc_jspd_active_txn_per_sec', 'Service Active Transaction Count (per Second)', 'Service Active Transaction Count (per Second)', 'sum by(xm_clst_id, xm_namespace, xm_service_name) (rate(imxc_txn_active_count {{filter}}[1m]))', 'Request', 'Service', NULL, true, false, 'SVC:{{$labels.xm_service_name}} Svc Active Transaction count/Seconds:{{humanize $value}}|{threshold}.', '2022-03-11 15:51:45.946', '2022-03-11 15:51:45.946') ON +CONFLICT (id) DO +UPDATE +SET + expr = 'sum by(xm_clst_id, xm_namespace, xm_service_name) (rate(imxc_txn_active_count {{filter}}[1m]))' +WHERE id = 'imxc_jspd_active_txn_per_sec'; + +INSERT INTO public.metric_meta2 (id, meta_name, description, expr, resource_type, entity_type, groupby_keys, in_use, anomaly_score, message, created_date, modified_date) VALUES('imxc_jspd_pod_active_txn_per_sec', 'Service Pod Active Transaction Count (per sec)', 'The number of active transaction counts per second for pod', 'sum by(xm_clst_id, xm_namespace, xm_service_name, xm_pod_id) (rate(imxc_txn_active_count{{filter}}[1m]))', 'Request', 'Service', NULL, true, false, 'SVC:{{$labels.xm_service_name}} Svc Pod Active Transaction count/Seconds:{{humanize $value}}|{threshold}.', '2022-03-11 15:53:29.252', '2022-03-11 15:53:29.252') ON +CONFLICT (id) DO +UPDATE +SET + expr = 'sum by(xm_clst_id, xm_namespace, xm_service_name, xm_pod_id) (rate(imxc_txn_active_count{{filter}}[1m]))' +WHERE id = 'imxc_jspd_pod_active_txn_per_sec'; + + +--public.agent_install_file_info + +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cloudmoa-cluster-role +rules: + - nonResourceURLs: + - "*" + verbs: + - get + - apiGroups: + - metrics.k8s.io + resources: + - pods + - nodes + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - nodes/stats + - endpoints + - namespaces + - events + verbs: + - get + - list + - watch + - apiGroups: + - apps + resources: + - daemonsets + - deployments + - deployments/scale + - replicasets + - replicasets/scale + - statefulsets + - statefulsets/scale + verbs: + - get + - list + - watch + - apiGroups: + - batch + resources: + - jobs + verbs: + - get + - list + - watch + - update + - apiGroups: + - batch + resources: + - cronjobs + verbs: + - get + - list + - update + - apiGroups: + - storage.j8s.io + resources: + - storageclasses + verbs: + - get + - list + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - apiGroups: + - extensions + resources: + - ingresses + verbs: + - get + - list + - apiGroups: + - policy + resources: + - podsecuritypolicies + verbs: + - use + resourceNames: + - imxc-ps + - apiGroups: + - certificates.k8s.io + resourceNames: + - kubernetes.io/kube-apiserver-client-kubelet + resources: + - signers + verbs: + - approve + - apiGroups: + - certificates.k8s.io + resourceNames: + - kubernetes.io/kubelet-serving + resources: + - signers + verbs: + - approve + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch + - proxy + - apiGroups: + - "" + resources: + - nodes/log + - nodes/metrics + - nodes/proxy + - nodes/spec + - nodes/stats + verbs: + - ''*'' + - apiGroups: + - ''*'' + resources: + - ''*'' + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cloudmoa-restricted-rb + namespace: $CLOUDMOA_NAMESPACE +subjects: + - kind: ServiceAccount + name: default + namespace: $CLOUDMOA_NAMESPACE +roleRef: + kind: ClusterRole + name: cloudmoa-cluster-role + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: cloudmoa-psp + namespace: $CLOUDMOA_NAMESPACE +spec: + privileged: true + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + runAsUser: + rule: RunAsAny + fsGroup: + rule: RunAsAny + hostPorts: + - max: 65535 + min: 0 + hostNetwork: true + hostPID: true + volumes: + - configMap + - secret + - emptyDir + - hostPath + - projected + - downwardAPI + - persistentVolumeClaim +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: cloudmoa-topology-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-topology-agent +spec: + selector: + matchLabels: + app: cloudmoa-topology-agent + template: + metadata: + labels: + app: cloudmoa-topology-agent + spec: + hostNetwork: true + hostPID: true + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - name: cloudmoa-topology-agent + image: $DOCKER_REGISTRY_URL/topology-agent:$IMAGE_TAG + imagePullPolicy: Always + resources: + requests: + cpu: 200m + memory: 512Mi + limits: + cpu: 500m + memory: 600Mi + securityContext: + privileged: true + volumeMounts: + - mountPath: /host/usr/bin + name: bin-volume + - mountPath: /var/run/docker.sock + name: docker-volume + - mountPath: /host/proc + name: proc-volume + - mountPath: /root + name: root-volume + - mountPath: /log + name: log-volume + env: + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: ROOT_DIRECTORY + value: /root + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: POD_ID + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LOG_LEVEL + value: "INFO" + volumes: + - name: bin-volume + hostPath: + path: /usr/bin + type: Directory + - name: docker-volume + hostPath: + path: /var/run/docker.sock + - name: proc-volume + hostPath: + path: /proc + - name: root-volume + hostPath: + path: / + - name: log-volume + hostPath: + path: /home'::text WHERE id = 2::bigint; + +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cloudmoa-metric-agent-config + namespace: $CLOUDMOA_NAMESPACE +data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + metric-agent.yml: | + global: + scrape_interval: 15s + + scrape_configs: + - job_name: ''kubernetes-kubelet'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + + - job_name: ''kubernetes-cadvisor'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod_name] + target_label: xm_pod_id + - source_labels: [container_name] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [container_name] + regex: (.+) + action: keep + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cloudmoa-metric-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-metric-agent +spec: + selector: + matchLabels: + app: cloudmoa-metric-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-metric-agent + spec: + containers: + - name: cloudmoa-metric-agent + image: $DOCKER_REGISTRY_URL/metric-agent:$IMAGE_TAG + args: + - --config.file=/etc/metric-agent/metric-agent.yml + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: /etc/metric-agent/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: STORAGE_TYPE + value: datagate + restartPolicy: Always + volumes: + - name: config-volume + configMap: + name: cloudmoa-metric-agent-config +'::text WHERE id = 3::bigint; + +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: v1 +kind: List +items: +- apiVersion: apps/v1 + kind: Deployment + metadata: + name: cloudmoa-trace-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-trace-agent + spec: + selector: + matchLabels: + app: cloudmoa-trace-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-trace-agent + spec: + securityContext: + runAsNonRoot: true + runAsUser: 65534 + containers: + - image: $DOCKER_REGISTRY_URL/trace-agent:$IMAGE_TAG + name: cloudmoa-trace-agent + resources: + requests: + cpu: 100m + memory: 50Mi + limits: + cpu: 200m + memory: 100Mi + ports: + - containerPort: 5775 + protocol: UDP + - containerPort: 6831 + protocol: UDP + - containerPort: 6832 + protocol: UDP + - containerPort: 5778 + protocol: TCP + env: + - name: LOG_LEVEL + value: "INFO" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT +- apiVersion: v1 + kind: Service + metadata: + name: cloudmoa-trace-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-trace-agent + spec: + ports: + - name: agent-zipkin-thrift + port: 5775 + protocol: UDP + targetPort: 5775 + - name: agent-compact + port: 6831 + protocol: UDP + targetPort: 6831 + - name: agent-binary + port: 6832 + protocol: UDP + targetPort: 6832 + - name: agent-configs + port: 5778 + protocol: TCP + targetPort: 5778 + selector: + app: cloudmoa-trace-agent + type: ClusterIP'::text WHERE id = 7::bigint; + +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: v1 +kind: Service +metadata: + annotations: + prometheus.io/scrape: ''true'' + labels: + app: cloudmoa-node-exporter + name: cloudmoa-node-exporter + name: cloudmoa-node-exporter + namespace: $CLOUDMOA_NAMESPACE +spec: + clusterIP: None + ports: + - name: scrape + port: 9110 + protocol: TCP + selector: + app: cloudmoa-node-exporter + type: ClusterIP +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: cloudmoa-node-exporter + namespace: $CLOUDMOA_NAMESPACE +spec: + selector: + matchLabels: + app: cloudmoa-node-exporter + template: + metadata: + labels: + app: cloudmoa-node-exporter + name: cloudmoa-node-exporter + spec: + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - image: $DOCKER_REGISTRY_URL/prom/node-exporter + name: cloudmoa-node-exporter + ports: + - containerPort: 9110 + hostPort: 9110 + name: scrape + args: + - --path.procfs=/host/proc + - --path.sysfs=/host/sys + - --path.rootfs=/host/root + - --collector.filesystem.ignored-mount-points=^/(dev|proc|sys|run|var/lib/docker/.+|var/lib/kubelet/pods/.+)($|/) + - --collector.tcpstat + - --web.listen-address=:9110 + # --log.level=debug + resources: + limits: + cpu: 250m + memory: 180Mi + requests: + cpu: 102m + memory: 180Mi + volumeMounts: + - mountPath: /host/proc + name: proc + readOnly: false + - mountPath: /host/sys + name: sys + readOnly: false + - mountPath: /host/root + mountPropagation: HostToContainer + name: root + readOnly: true + hostNetwork: true + hostPID: true + securityContext: + runAsNonRoot: true + runAsUser: 65534 + volumes: + - hostPath: + path: /proc + name: proc + - hostPath: + path: /sys + name: sys + - hostPath: + path: / + name: root +'::text WHERE id = 4::bigint; diff --git a/ansible/01_old/roles/test/files/03-ddl-dml/postgres/patch/postgres_patch_3.3.2.psql b/ansible/01_old/roles/test/files/03-ddl-dml/postgres/patch/postgres_patch_3.3.2.psql new file mode 100644 index 0000000..e84e9be --- /dev/null +++ b/ansible/01_old/roles/test/files/03-ddl-dml/postgres/patch/postgres_patch_3.3.2.psql @@ -0,0 +1,459 @@ + UPDATE public.agent_install_file_info SET yaml = '--- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: cloudmoa-cluster-role + rules: + - nonResourceURLs: + - "*" + verbs: + - get + - apiGroups: + - metrics.k8s.io + resources: + - pods + - nodes + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - nodes/stats + - endpoints + - namespaces + - events + verbs: + - get + - list + - watch + - apiGroups: + - apps + resources: + - daemonsets + - deployments + - deployments/scale + - replicasets + - replicasets/scale + - statefulsets + - statefulsets/scale + verbs: + - get + - list + - watch + - apiGroups: + - batch + resources: + - jobs + verbs: + - get + - list + - watch + - update + - apiGroups: + - batch + resources: + - cronjobs + verbs: + - get + - list + - update + - apiGroups: + - storage.j8s.io + resources: + - storageclasses + verbs: + - get + - list + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - apiGroups: + - extensions + resources: + - ingresses + verbs: + - get + - list + - apiGroups: + - policy + resources: + - podsecuritypolicies + verbs: + - use + resourceNames: + - imxc-ps + - apiGroups: + - certificates.k8s.io + resourceNames: + - kubernetes.io/kube-apiserver-client-kubelet + resources: + - signers + verbs: + - approve + - apiGroups: + - certificates.k8s.io + resourceNames: + - kubernetes.io/kubelet-serving + resources: + - signers + verbs: + - approve + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch + - proxy + - apiGroups: + - "" + resources: + - nodes/log + - nodes/metrics + - nodes/proxy + - nodes/spec + - nodes/stats + verbs: + - ''*'' + - apiGroups: + - ''*'' + resources: + - ''*'' + verbs: + - get + - list + - watch + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: cloudmoa-restricted-rb + namespace: $CLOUDMOA_NAMESPACE + subjects: + - kind: ServiceAccount + name: default + namespace: $CLOUDMOA_NAMESPACE + roleRef: + kind: ClusterRole + name: cloudmoa-cluster-role + apiGroup: rbac.authorization.k8s.io + --- + apiVersion: policy/v1beta1 + kind: PodSecurityPolicy + metadata: + name: cloudmoa-psp + namespace: $CLOUDMOA_NAMESPACE + spec: + privileged: true + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + runAsUser: + rule: RunAsAny + fsGroup: + rule: RunAsAny + hostPorts: + - max: 65535 + min: 0 + hostNetwork: true + hostPID: true + volumes: + - configMap + - secret + - emptyDir + - hostPath + - projected + - downwardAPI + - persistentVolumeClaim + --- + apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: cloudmoa-topology-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-topology-agent + spec: + selector: + matchLabels: + app: cloudmoa-topology-agent + template: + metadata: + labels: + app: cloudmoa-topology-agent + spec: + hostNetwork: true + hostPID: true + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - name: cloudmoa-topology-agent + image: $DOCKER_REGISTRY_URL/topology-agent:$IMAGE_TAG + imagePullPolicy: Always + resources: + requests: + cpu: 200m + memory: 512Mi + limits: + cpu: 500m + memory: 600Mi + securityContext: + privileged: true + volumeMounts: + - mountPath: /host/usr/bin + name: bin-volume + - mountPath: /var/run/docker.sock + name: docker-volume + - mountPath: /host/proc + name: proc-volume + - mountPath: /root + name: root-volume + - mountPath: /log + name: log-volume + env: + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: ROOT_DIRECTORY + value: /root + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: POD_ID + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LOG_LEVEL + value: "INFO" + volumes: + - name: bin-volume + hostPath: + path: /usr/bin + type: Directory + - name: docker-volume + hostPath: + path: /var/run/docker.sock + - name: proc-volume + hostPath: + path: /proc + - name: root-volume + hostPath: + path: / + - name: log-volume + hostPath: + path: /home' WHERE id = 2; + +UPDATE public.agent_install_file_info SET yaml = '--- + apiVersion: v1 + kind: ConfigMap + metadata: + name: cloudmoa-metric-agent-config + namespace: $CLOUDMOA_NAMESPACE + data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + metric-agent.yml: | + global: + scrape_interval: 15s + + scrape_configs: + - job_name: ''kubernetes-kubelet'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + - job_name: ''kubernetes-cadvisor'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod] + target_label: xm_pod_id + - source_labels: [container] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [container] + regex: (.+) + action: keep + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep + --- + apiVersion: apps/v1 + kind: Deployment + metadata: + name: cloudmoa-metric-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-metric-agent + spec: + selector: + matchLabels: + app: cloudmoa-metric-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-metric-agent + spec: + containers: + - name: cloudmoa-metric-agent + image: $DOCKER_REGISTRY_URL/metric-agent:$IMAGE_TAG + args: + - --config.file=/etc/metric-agent/metric-agent.yml + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: /etc/metric-agent/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: LOG_MAXAGE + value: "1" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: STORAGE_TYPE + value: datagate + restartPolicy: Always + volumes: + - name: config-volume + configMap: + name: cloudmoa-metric-agent-config + ' WHERE id = 6; \ No newline at end of file diff --git a/ansible/01_old/roles/test/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.1.psql b/ansible/01_old/roles/test/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.1.psql new file mode 100644 index 0000000..0d20f2c --- /dev/null +++ b/ansible/01_old/roles/test/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.1.psql @@ -0,0 +1,1379 @@ +CREATE TABLE public.cloud_user_setting ( + user_id character varying(255) NOT NULL, + lang character varying(20) DEFAULT 'en', + theme character varying(20) DEFAULT 'dark', + access_token integer DEFAULT 30, + refresh_token integer DEFAULT 10080, + error_msg boolean DEFAULT false, + alert_sound boolean DEFAULT false, + session_persistence boolean DEFAULT true, + gpu_acc_topology boolean DEFAULT true, + created_date timestamp without time zone, + modified_date timestamp without time zone +); + +ALTER TABLE public.cloud_user_setting OWNER TO admin; + +ALTER TABLE ONLY public.cloud_user_setting ADD CONSTRAINT cloud_user_setting_pkey PRIMARY KEY (user_id); + +INSERT INTO public.cloud_user_setting +(user_id, lang, theme, access_token, refresh_token, error_msg, alert_sound, session_persistence, gpu_acc_topology, created_date, modified_date) +VALUES('admin', null, null, null, null, false, false, true, true, now(), null); + +INSERT INTO public.cloud_user_setting +(user_id, lang, theme, access_token, refresh_token, error_msg, alert_sound, session_persistence, gpu_acc_topology, created_date, modified_date) +VALUES('owner', null, null, null, null, false, false, true, true, now(), null); + +-- 더존(3.3.2) 에서 누락되었던 항목 모두 추가 +INSERT INTO public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) VALUES ('normal_score', '20', null, null, 'anomaly', '2020-07-07 18:15:55.000000', '2020-07-07 18:15:53.000000'); +INSERT INTO public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) VALUES ('attention_score', '60', null, null, 'anomaly', '2020-07-07 09:18:04.968765', '2020-07-07 09:18:04.968765'); +INSERT INTO public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) VALUES ('warning_score', '90', null, null, 'anomaly', '2020-07-07 09:18:17.091678', '2020-07-07 09:18:17.091678'); +INSERT INTO public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) VALUES ('collection_weeks', '5', null, null, 'anomaly', '2020-07-13 03:52:44.445408', '2020-07-13 03:52:44.445408'); + +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('topology_storage_period', 7, 'retention period setting value for topology information', null, 'storage', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('trace_storage_period', 3, 'retention period setting value for trace data', null, 'storage', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('event_storage_period', 7, 'retention period setting value for event data', null, 'storage', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('metric_storage_period', 7, 'retention period setting value for metric data', null, 'storage', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('sparse_storage_period', 90, 'retention period setting value for sparse log', null, 'storage', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('anomaly_storage_period', 7, 'retention period setting value for anomaly score', null, 'storage', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('alert_storage_period', 7, 'retention period setting value for alert data', null, 'storage', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('audit_storage_period', 7, 'retention period setting value for audit data', null, 'storage', now(), null); + +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('topology_idx', 'kubernetes_cluster_info:kubernetes_cluster_history:kubernetes_cronjob_info:kubernetes_info:kubernetes_job_info:kubernetes_network_connectivity:kubernetes_pod_info:kubernetes_pod_history', 'elastic search topology type data index', null, 'storageidx', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('trace_idx', 'spaninfo:sta_httpapi:sta_httpsummary:sta_podinfo:sta_relation:sta_tracetrend:sta_externalrelation:sta_traceinfo:jspd_ilm', 'elastic search trace type data index', null, 'storageidx', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('event_idx', 'kubernetes_event_info', 'elastic search for event data index', null, 'storageidx', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('sparse_idx', 'sparse_model:sparse_log', 'elastic search sparse data index', null, 'storageidx', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('anomaly_idx', 'entity_score:metric_score:timeline_score', 'elastic search amomaly data index', null, 'storageidx', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('alert_idx', 'alert_event_history', 'elastic search alert data index', null, 'storageidx', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('audit_idx', 'kubernetes_audit_log', 'elastic search audit type data index', null, 'storageidx', now(), null); + +-- insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) values ('ratelimiting', 2.0, '{"type" : "int", "operator" : "range", "minVal" : "1", "maxVal" : "3000", "desc" : "The time-based sampling method allows input as an integer (e.g. 1 monitors only 1 trace per second)" }', null, 'tracesampling', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('probabilistic', 0.1, '{"type" : "float", "operator" : "range", "minVal" : "0", "maxVal" : "1.0", "desc" : "Probability-based sampling method allows input between 0 and 1 (e.g. 0.1 monitors only 10% of trace information)" }', null, 'tracesampling', '2020-07-30 13:54:52', null); + +INSERT INTO common_setting values('alert_expression','==,<=,<,>=,>', 'alert expression for user custom', null,'alert', now(), now()); + +INSERT INTO common_setting values('job_duration_range','86400', 'job duration range for average', null,'job', now(), now()); + +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Topology Agent', 'topology-agent', 'topology agent deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Metric Agent', 'metric-agent', 'metric agent deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Trace Agent', 'trace-agent', 'trace agent deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Datagate', 'datagate', 'datagate deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Jspd Collector', 'jspd-lite-collector', 'jspd collector deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Metric Collector', 'metric-collector', 'metric collector deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Cloudmoa Collector', 'imxc-collector', 'cloudmoa collector deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Authentication Server', 'auth-server', 'authentication server deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Notification Server', 'noti-server', 'notification server deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Eureka Server', 'eureka', 'eureka server deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Zuul Server', 'zuul-deployment', 'zuul server deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Api Server', 'imxc-api-demo', 'api server deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Ui Server', 'imxc-ui-demo', 'ui server deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Metric Analyzer Master', 'metric-analyzer-master', 'metric analyzer master deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Metric Analyzer Worker', 'metric-analyzer-worker', 'metric analyzer worker deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Kafka Stream Txntrend', 'kafka-stream-txntrend-deployment', 'kafka stream txntrend deployment name', null, 'modules', now(), null); + +INSERT INTO public.common_setting +(code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +VALUES('error_msg', 'false', 'Error Message default value', '', 'user_setting', now(), null); + +INSERT INTO public.common_setting +(code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +VALUES('alert_sound', 'false', 'Alert Sound default value', '', 'user_setting', now(), null); + +INSERT INTO public.common_setting +(code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +VALUES('session_persistence', 'true', 'Session Persistence default value', '', 'user_setting', now(), null); + +INSERT INTO public.common_setting +(code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +VALUES('gpu_acc_topology', 'true', 'GPU Accelerated Topology default value', '', 'user_setting', now(), null); + +UPDATE public.agent_install_file_info +SET yaml = '--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cloudmoa-cluster-role +rules: + - nonResourceURLs: + - "*" + verbs: + - get + - apiGroups: + - metrics.k8s.io + resources: + - pods + - nodes + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - nodes/stats + - endpoints + - namespaces + - events + verbs: + - get + - list + - watch + - apiGroups: + - apps + resources: + - daemonsets + - deployments + - deployments/scale + - replicasets + - replicasets/scale + - statefulsets + - statefulsets/scale + verbs: + - get + - list + - watch + - update + - apiGroups: + - batch + resources: + - jobs + verbs: + - get + - list + - watch + - update + - apiGroups: + - batch + resources: + - cronjobs + verbs: + - get + - list + - update + - apiGroups: + - storage.j8s.io + resources: + - storageclasses + verbs: + - get + - list + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - apiGroups: + - extensions + resources: + - ingresses + verbs: + - get + - list + - apiGroups: + - policy + resources: + - podsecuritypolicies + verbs: + - use + resourceNames: + - imxc-ps + - apiGroups: + - certificates.k8s.io + resourceNames: + - kubernetes.io/kube-apiserver-client-kubelet + resources: + - signers + verbs: + - approve + - apiGroups: + - certificates.k8s.io + resourceNames: + - kubernetes.io/kubelet-serving + resources: + - signers + verbs: + - approve + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch + - proxy + - apiGroups: + - "" + resources: + - nodes/log + - nodes/metrics + - nodes/proxy + - nodes/spec + - nodes/stats + verbs: + - ''*'' + - apiGroups: + - ''*'' + resources: + - ''*'' + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cloudmoa-restricted-rb + namespace: $CLOUDMOA_NAMESPACE +subjects: + - kind: ServiceAccount + name: default + namespace: $CLOUDMOA_NAMESPACE +roleRef: + kind: ClusterRole + name: cloudmoa-cluster-role + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: cloudmoa-psp + namespace: $CLOUDMOA_NAMESPACE +spec: + privileged: true + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + runAsUser: + rule: RunAsAny + fsGroup: + rule: RunAsAny + hostPorts: + - max: 65535 + min: 0 + hostNetwork: true + hostPID: true + volumes: + - configMap + - secret + - emptyDir + - hostPath + - projected + - downwardAPI + - persistentVolumeClaim +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: cloudmoa-topology-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-topology-agent +spec: + selector: + matchLabels: + app: cloudmoa-topology-agent + template: + metadata: + labels: + app: cloudmoa-topology-agent + spec: + hostNetwork: true + hostPID: true + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - name: cloudmoa-topology-agent + image: $DOCKER_REGISTRY_URL/topology-agent:$IMAGE_TAG + resources: + requests: + cpu: 200m + memory: 512Mi + limits: + cpu: 500m + memory: 600Mi + securityContext: + privileged: true + volumeMounts: + - mountPath: /host/usr/bin + name: bin-volume + - mountPath: /var/run/docker.sock + name: docker-volume + - mountPath: /host/proc + name: proc-volume + - mountPath: /root + name: root-volume + - mountPath: /log + name: log-volume + env: + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: ROOT_DIRECTORY + value: /root + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: POD_ID + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LOG_LEVEL + value: "INFO" + volumes: + - name: bin-volume + hostPath:88888889 + path: /usr/bin + type: Directory + - name: docker-volume + hostPath: + path: /var/run/docker.sock + - name: proc-volume + hostPath: + path: /proc + - name: root-volume + hostPath: + path: / + - name: log-volume + hostPath: + path: /home'::text +WHERE id = 2::bigint; + +UPDATE public.common_setting +SET code_group='storageidx' +WHERE code_id='topology_idx'; + +UPDATE public.common_setting +SET code_value='spaninfo:sta_httpapi:sta_httpsummary:sta_podinfo:sta_relation:sta_tracetrend:sta_externalrelation:sta_traceinfo:jspd_ilm', + code_group='storageidx' +WHERE code_id='trace_idx'; + +UPDATE public.common_setting +SET code_group='storageidx' +WHERE code_id='event_idx'; + +UPDATE public.common_setting +SET code_group='storageidx' +WHERE code_id='sparse_idx'; + +UPDATE public.common_setting +SET code_group='storageidx' +WHERE code_id='anomaly_idx'; + +UPDATE public.common_setting +SET code_value='alert_event_history', + code_group='storageidx' +WHERE code_id='alert_idx'; + +UPDATE public.common_setting +SET code_group='storageidx' +WHERE code_id='audit_idx'; + +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: v1 +kind: Service +metadata: + annotations: + prometheus.io/scrape: ''true'' + labels: + app: cloudmoa-node-exporter + name: cloudmoa-node-exporter + name: cloudmoa-node-exporter + namespace: $CLOUDMOA_NAMESPACE +spec: + clusterIP: None + ports: + - name: scrape + port: 9110 + protocol: TCP + selector: + app: cloudmoa-node-exporter + type: ClusterIP +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: cloudmoa-node-exporter + namespace: $CLOUDMOA_NAMESPACE +spec: + selector: + matchLabels: + app: cloudmoa-node-exporter + template: + metadata: + labels: + app: cloudmoa-node-exporter + name: cloudmoa-node-exporter + spec: + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - image: $DOCKER_REGISTRY_URL/node-exporter + name: cloudmoa-node-exporter + ports: + - containerPort: 9110 + hostPort: 9110 + name: scrape + args: + - --path.procfs=/host/proc + - --path.sysfs=/host/sys + - --path.rootfs=/host/root + - --collector.filesystem.ignored-mount-points=^/(dev|proc|sys|run|var/lib/docker/.+|var/lib/kubelet/pods/.+)($|/) + - --collector.tcpstat + - --web.listen-address=:9110 + # --log.level=debug + resources: + limits: + cpu: 250m + memory: 180Mi + requests: + cpu: 102m + memory: 180Mi + volumeMounts: + - mountPath: /host/proc + name: proc + readOnly: false + - mountPath: /host/sys + name: sys + readOnly: false + - mountPath: /host/root + mountPropagation: HostToContainer + name: root + readOnly: true + hostNetwork: true + hostPID: true + securityContext: + runAsNonRoot: true + runAsUser: 65534 + volumes: + - hostPath: + path: /proc + name: proc + - hostPath: + path: /sys + name: sys + - hostPath: + path: / + name: root +'::text WHERE id = 4::bigint; + +UPDATE public.agent_install_file_info SET yaml = '--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: system:cloudmoa-aggregated-metrics-reader + labels: + rbac.authorization.k8s.io/aggregate-to-view: "true" + rbac.authorization.k8s.io/aggregate-to-edit: "true" + rbac.authorization.k8s.io/aggregate-to-admin: "true" +rules: + - apiGroups: ["metrics.k8s.io"] + resources: ["pods"] + verbs: ["get", "list", "watch"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cloudmoa-metrics-server:system:auth-delegator +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:auth-delegator +subjects: + - kind: ServiceAccount + name: cloudmoa-metrics-server + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: cloudmoa-metrics-server-auth-reader + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: extension-apiserver-authentication-reader +subjects: + - kind: ServiceAccount + name: cloudmoa-metrics-server + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: system:cloudmoa-metrics-server +rules: + - apiGroups: + - "" + resources: + - pods + - nodes + - nodes/stats + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: system:cloudmoa-metrics-server +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:cloudmoa-metrics-server +subjects: + - kind: ServiceAccount + name: cloudmoa-metrics-server + namespace: kube-system +--- +apiVersion: v1 +kind: Service +metadata: + name: cloudmoa-metrics-server + namespace: kube-system + labels: + kubernetes.io/name: "Metrics-server" +spec: + selector: + k8s-app: cloudmoa-metrics-server + ports: + - port: 443 + protocol: TCP + targetPort: 443 +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cloudmoa-metrics-server + namespace: kube-system +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cloudmoa-metrics-server + namespace: kube-system + labels: + k8s-app: cloudmoa-metrics-server +spec: + selector: + matchLabels: + k8s-app: cloudmoa-metrics-server + template: + metadata: + name: cloudmoa-metrics-server + labels: + k8s-app: cloudmoa-metrics-server + spec: + serviceAccountName: cloudmoa-metrics-server + volumes: + # mount in tmp so we can safely use from-scratch images and/or read-only containers + - name: tmp-dir + emptyDir: {} + containers: + - name: cloudmoa-metrics-server + image: $DOCKER_REGISTRY_URL/metrics-server-amd64 + command: + - /metrics-server + - --logtostderr + - --v=4 + - --kubelet-insecure-tls=true + - --kubelet-preferred-address-types=InternalIP,Hostname,InternalDNS,ExternalDNS,ExternalIP + volumeMounts: + - name: tmp-dir + mountPath: /tmp1'::text WHERE id = 5::bigint; + +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cloudmoa-metric-agent-config + namespace: $CLOUDMOA_NAMESPACE +data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + metric-agent.yml: | + global: + scrape_interval: 15s + + scrape_configs: + - job_name: ''kubernetes-kubelet'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + + - job_name: ''kubernetes-cadvisor'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod_name] + target_label: xm_pod_id + - source_labels: [container_name] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [container_name] + regex: (.+) + action: keep + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cloudmoa-metric-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-metric-agent +spec: + selector: + matchLabels: + app: cloudmoa-metric-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-metric-agent + spec: + containers: + - name: cloudmoa-metric-agent + image: $DOCKER_REGISTRY_URL/metric-agent:$IMAGE_TAG + args: + - --config.file=/etc/metric-agent/metric-agent.yml + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: /etc/metric-agent/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: STORAGE_TYPE + value: datagate + restartPolicy: Always + volumes: + - name: config-volume + configMap: + name: cloudmoa-metric-agent-config +'::text WHERE id = 3::bigint; + +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: v1 +kind: List +items: +- apiVersion: apps/v1 + kind: Deployment + metadata: + name: cloudmoa-trace-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-trace-agent + spec: + selector: + matchLabels: + app: cloudmoa-trace-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-trace-agent + spec: + securityContext: + runAsNonRoot: true + runAsUser: 65534 + containers: + - image: $DOCKER_REGISTRY_URL/trace-agent:$IMAGE_TAG + name: cloudmoa-trace-agent + resources: + requests: + cpu: 100m + memory: 50Mi + limits: + cpu: 200m + memory: 100Mi + ports: + - containerPort: 5775 + protocol: UDP + - containerPort: 6831 + protocol: UDP + - containerPort: 6832 + protocol: UDP + - containerPort: 5778 + protocol: TCP + env: + - name: LOG_LEVEL + value: "INFO" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT +- apiVersion: v1 + kind: Service + metadata: + name: cloudmoa-trace-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-trace-agent + spec: + ports: + - name: agent-zipkin-thrift + port: 5775 + protocol: UDP + targetPort: 5775 + - name: agent-compact + port: 6831 + protocol: UDP + targetPort: 6831 + - name: agent-binary + port: 6832 + protocol: UDP + targetPort: 6832 + - name: agent-configs + port: 5778 + protocol: TCP + targetPort: 5778 + selector: + app: cloudmoa-trace-agent + type: ClusterIP'::text WHERE id = 7::bigint; + +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cloudmoa-cluster-role +rules: + - nonResourceURLs: + - "*" + verbs: + - get + - apiGroups: + - metrics.k8s.io + resources: + - pods + - nodes + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - nodes/stats + - endpoints + - namespaces + - events + verbs: + - get + - list + - watch + - apiGroups: + - apps + resources: + - daemonsets + - deployments + - deployments/scale + - replicasets + - replicasets/scale + - statefulsets + - statefulsets/scale + verbs: + - get + - list + - watch + - update + - apiGroups: + - batch + resources: + - jobs + verbs: + - get + - list + - watch + - update + - apiGroups: + - batch + resources: + - cronjobs + verbs: + - get + - list + - update + - apiGroups: + - storage.j8s.io + resources: + - storageclasses + verbs: + - get + - list + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - apiGroups: + - extensions + resources: + - ingresses + verbs: + - get + - list + - apiGroups: + - policy + resources: + - podsecuritypolicies + verbs: + - use + resourceNames: + - imxc-ps + - apiGroups: + - certificates.k8s.io + resourceNames: + - kubernetes.io/kube-apiserver-client-kubelet + resources: + - signers + verbs: + - approve + - apiGroups: + - certificates.k8s.io + resourceNames: + - kubernetes.io/kubelet-serving + resources: + - signers + verbs: + - approve + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch + - proxy + - apiGroups: + - "" + resources: + - nodes/log + - nodes/metrics + - nodes/proxy + - nodes/spec + - nodes/stats + verbs: + - ''*'' + - apiGroups: + - ''*'' + resources: + - ''*'' + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cloudmoa-restricted-rb + namespace: $CLOUDMOA_NAMESPACE +subjects: + - kind: ServiceAccount + name: default + namespace: $CLOUDMOA_NAMESPACE +roleRef: + kind: ClusterRole + name: cloudmoa-cluster-role + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: cloudmoa-psp + namespace: $CLOUDMOA_NAMESPACE +spec: + privileged: true + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + runAsUser: + rule: RunAsAny + fsGroup: + rule: RunAsAny + hostPorts: + - max: 65535 + min: 0 + hostNetwork: true + hostPID: true + volumes: + - configMap + - secret + - emptyDir + - hostPath + - projected + - downwardAPI + - persistentVolumeClaim +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: cloudmoa-topology-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-topology-agent +spec: + selector: + matchLabels: + app: cloudmoa-topology-agent + template: + metadata: + labels: + app: cloudmoa-topology-agent + spec: + hostNetwork: true + hostPID: true + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - name: cloudmoa-topology-agent + image: $DOCKER_REGISTRY_URL/topology-agent:$IMAGE_TAG + resources: + requests: + cpu: 200m + memory: 512Mi + limits: + cpu: 500m + memory: 600Mi + securityContext: + privileged: true + volumeMounts: + - mountPath: /host/usr/bin + name: bin-volume + - mountPath: /var/run/docker.sock + name: docker-volume + - mountPath: /host/proc + name: proc-volume + - mountPath: /root + name: root-volume + - mountPath: /log + name: log-volume + env: + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: ROOT_DIRECTORY + value: /root + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: POD_ID + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LOG_LEVEL + value: "INFO" + volumes: + - name: bin-volume + hostPath: + path: /usr/bin + type: Directory + - name: docker-volume + hostPath: + path: /var/run/docker.sock + - name: proc-volume + hostPath: + path: /proc + - name: root-volume + hostPath: + path: / + - name: log-volume + hostPath: + path: /home'::text WHERE id = 2::bigint; + +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cloudmoa-metric-agent-config + namespace: $CLOUDMOA_NAMESPACE +data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + metric-agent.yml: | + global: + scrape_interval: 15s + + scrape_configs: + - job_name: ''kubernetes-kubelet'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + - job_name: ''kubernetes-cadvisor'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod] + target_label: xm_pod_id + - source_labels: [container] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [container] + regex: (.+) + action: keep + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cloudmoa-metric-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-metric-agent +spec: + selector: + matchLabels: + app: cloudmoa-metric-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-metric-agent + spec: + containers: + - name: cloudmoa-metric-agent + image: $DOCKER_REGISTRY_URL/metric-agent:$IMAGE_TAG + args: + - --config.file=/etc/metric-agent/metric-agent.yml + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: /etc/metric-agent/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: LOG_MAXAGE + value: "1" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: STORAGE_TYPE + value: datagate + restartPolicy: Always + volumes: + - name: config-volume + configMap: + name: cloudmoa-metric-agent-config +'::text WHERE id = 6::bigint; + +ALTER TABLE public.alert_rule_config_info ALTER COLUMN config_data TYPE text; + +update alert_rule_config_info +set config_data = '\n - alert: "${ALERT}"\n expr: "${EXPR}"\n labels:\n level: "${LEVEL}"\n for: "${FOR}"\n annotations:\n xm_service_name: "{{ $labels.xm_service_name }}"\n level: "${LEVEL}"\n meta_id: "${META_ID}"\n xm_node_id: "{{ $labels.xm_node_id }}"\n threshold: ${THRESHOLD}\n xm_container_id: "{{ $labels.xm_cont_name }}"\n message: "${MESSAGE}"\n rule_id: ${RULE_ID}\n xm_pod_id: "{{ $labels.xm_pod_id }}"\n xm_clst_id: "{{ $labels.xm_clst_id }}"\n xm_namespace: "{{ $labels.xm_namespace }}"\n value: "{{ $value }}"\n xm_entity_type: "{{ $labels.xm_entity_type }}"\n alert_entity_type: "${ALERT_ENTITY_TYPE}"' +where config_id = 'rules'; + +ALTER TABLE public.alert_config_info ALTER COLUMN config_data TYPE text, ALTER COLUMN config_default TYPE text; + +insert into public.alert_config_info (config_id, created_date, modified_date, config_data, config_default, in_use) values ('routes', now(), null, '\n - receiver: ''${ROUTES_RECEIVER}''\n group_by: [${ROUTES_GROUP_BY}]\n group_wait: ${ROUTES_GROUP_WAIT}\n group_interval: ${ROUTES_GROUP_INTERVAL}\n repeat_interval: ${ROUTES_REPEAT_INTERVAL}\n match_re:\n level: ${LEVEL}\n continue: ${CONTINUE}', '\n - receiver: ''cdms''\n group_by: [xm_clst_id, level]\n group_wait: 5s\n group_interval: 5s\n repeat_interval: 1m\n match_re:\n level: Critical\n continue: true', true); \ No newline at end of file diff --git a/ansible/01_old/roles/test/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.2.psql b/ansible/01_old/roles/test/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.2.psql new file mode 100644 index 0000000..5c5d3c9 --- /dev/null +++ b/ansible/01_old/roles/test/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.2.psql @@ -0,0 +1,8 @@ +-- admin의 owner 속성 추가 +UPDATE cloud_user SET is_tenant_owner = true WHERE user_id = 'admin'; + +-- owner에 대한 종속성을 admin으로 이관기능(필요하면 사용) +UPDATE auth_resource3 SET name = replace(name, 'owner', 'admin') WHERE name like '%|owner|%'; + +-- CLOUD-2305 node_memory_used metric_meta node_memory_SReclaimable_bytes 제거 패치문 반영 +UPDATE metric_meta2 SET expr = '((node_memory_MemTotal_bytes{xm_entity_type="Node", {filter}} - (node_memory_MemFree_bytes{xm_entity_type="Node", {filter}} + node_memory_Cached_bytes{xm_entity_type="Node", {filter}} + node_memory_Buffers_bytes{xm_entity_type="Node", {filter}})) >= 0 or node_memory_MemTotal_bytes{xm_entity_type="Node", {filter}} - node_memory_MemFree_bytes{xm_entity_type="Node", {filter}}) / 1024 / 1024 / 1024' WHERE id = 'node_memory_used'; diff --git a/ansible/01_old/roles/test/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.3.psql b/ansible/01_old/roles/test/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.3.psql new file mode 100644 index 0000000..02f01db --- /dev/null +++ b/ansible/01_old/roles/test/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.3.psql @@ -0,0 +1,361 @@ +-- agent_install_file_info +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cloudmoa-metric-agent-config + namespace: $CLOUDMOA_NAMESPACE +data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + metric-agent.yml: | + global: + scrape_interval: 15s + + scrape_configs: + - job_name: ''kubernetes-kubelet'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_memory_SReclaimable_bytes|node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + + - job_name: ''kubernetes-cadvisor'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod_name] + target_label: xm_pod_id + - source_labels: [container_name] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [container_name] + regex: (.+) + action: keep + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cloudmoa-metric-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-metric-agent +spec: + selector: + matchLabels: + app: cloudmoa-metric-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-metric-agent + spec: + containers: + - name: cloudmoa-metric-agent + image: $DOCKER_REGISTRY_URL/metric-agent:$IMAGE_TAG + args: + - --config.file=/etc/metric-agent/metric-agent.yml + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: /etc/metric-agent/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: STORAGE_TYPE + value: datagate + restartPolicy: Always + volumes: + - name: config-volume + configMap: + name: cloudmoa-metric-agent-config +'::text WHERE id = 3::bigint; + +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cloudmoa-metric-agent-config + namespace: $CLOUDMOA_NAMESPACE +data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + metric-agent.yml: | + global: + scrape_interval: 15s + + scrape_configs: + - job_name: ''kubernetes-kubelet'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_memory_SReclaimable_bytes|node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + - job_name: ''kubernetes-cadvisor'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod] + target_label: xm_pod_id + - source_labels: [container] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [container] + regex: (.+) + action: keep + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cloudmoa-metric-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-metric-agent +spec: + selector: + matchLabels: + app: cloudmoa-metric-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-metric-agent + spec: + containers: + - name: cloudmoa-metric-agent + image: $DOCKER_REGISTRY_URL/metric-agent:$IMAGE_TAG + args: + - --config.file=/etc/metric-agent/metric-agent.yml + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: /etc/metric-agent/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: LOG_MAXAGE + value: "1" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: STORAGE_TYPE + value: datagate + restartPolicy: Always + volumes: + - name: config-volume + configMap: + name: cloudmoa-metric-agent-config +'::text WHERE id = 6::bigint; + +-- CLOUD-2798 pod_phase_count_by_cluster metric_meta 수정 +UPDATE metric_meta2 SET expr = 'count by(xm_clst_id, pod_state) (sum by (xm_clst_id, xm_pod_id, pod_state)(rate(imxc_kubernetes_container_resource_limit_cpu{{filter}}[1m])))' WHERE id = 'pod_phase_count_by_cluster'; + +-- node_memory_usage 수정 +update metric_meta2 set expr = 'sum by (xm_node_id)((node_memory_MemTotal_bytes{xm_entity_type="Node"}- (node_memory_MemFree_bytes{xm_entity_type="Node"} + node_memory_Cached_bytes{xm_entity_type="Node"} + node_memory_Buffers_bytes{xm_entity_type="Node"})) >= 0 or node_memory_MemTotal_bytes{xm_entity_type="Node"}- node_memory_MemFree_bytes{xm_entity_type="Node"}) / (sum by (xm_node_id) (imxc_kubernetes_node_resource_capacity_memory{{filter}})) * 100' where id = 'node_memory_usage'; \ No newline at end of file diff --git a/ansible/01_old/roles/test/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.6.psql b/ansible/01_old/roles/test/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.6.psql new file mode 100644 index 0000000..7c582c5 --- /dev/null +++ b/ansible/01_old/roles/test/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.6.psql @@ -0,0 +1,360 @@ +-- CLOUD-3473 Memory capacity 조회 쿼리 수정 +update metric_meta2 set description = 'imxc_kubernetes_node_resource_capacity_memory', +expr = 'sum by (xm_clst_id) (imxc_kubernetes_node_resource_capacity_memory{{filter}})' where id = 'cluster_memory_capacity'; + +-- module명 metricdata owner_name 와 일치하도록 변경 +update common_setting set code_value ='cmoa-collector' where code_id = 'Cloudmoa Collector'; +update common_setting set code_value ='imxc-api' where code_id = 'Api Server'; +update common_setting set code_value ='imxc-ui' where code_id = 'Ui Server'; +update common_setting set code_value ='cloudmoa-trace-agent' where code_id = 'Trace Agent'; + +-- CLOUD-4795 Contaeird 환경 Container Network 수집 불가 건 확인 +-- 22.10.08 현대카드 대응 건으로 release 3.4.6에 반영 +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cloudmoa-metric-agent-config + namespace: $CLOUDMOA_NAMESPACE +data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + metric-agent.yml: | + global: + scrape_interval: 15s + + scrape_configs: + - job_name: ''kubernetes-kubelet'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_memory_SReclaimable_bytes|node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + - job_name: ''kubernetes-cadvisor'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod] + target_label: xm_pod_id + - source_labels: [container] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cloudmoa-metric-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-metric-agent +spec: + selector: + matchLabels: + app: cloudmoa-metric-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-metric-agent + spec: + containers: + - name: cloudmoa-metric-agent + image: $DOCKER_REGISTRY_URL/metric-agent:$IMAGE_TAG + args: + - --config.file=/etc/metric-agent/metric-agent.yml + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: /etc/metric-agent/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: LOG_MAXAGE + value: "1" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: STORAGE_TYPE + value: datagate + restartPolicy: Always + volumes: + - name: config-volume + configMap: + name: cloudmoa-metric-agent-config +'::text WHERE id = 6::bigint; + +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cloudmoa-metric-agent-config + namespace: $CLOUDMOA_NAMESPACE +data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + metric-agent.yml: | + global: + scrape_interval: 15s + + scrape_configs: + - job_name: ''kubernetes-kubelet'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_memory_SReclaimable_bytes|node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + + - job_name: ''kubernetes-cadvisor'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod_name] + target_label: xm_pod_id + - source_labels: [container_name] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cloudmoa-metric-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-metric-agent +spec: + selector: + matchLabels: + app: cloudmoa-metric-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-metric-agent + spec: + containers: + - name: cloudmoa-metric-agent + image: $DOCKER_REGISTRY_URL/metric-agent:$IMAGE_TAG + args: + - --config.file=/etc/metric-agent/metric-agent.yml + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: /etc/metric-agent/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: STORAGE_TYPE + value: datagate + restartPolicy: Always + volumes: + - name: config-volume + configMap: + name: cloudmoa-metric-agent-config'::text WHERE id = 3::bigint; + diff --git a/ansible/01_old/roles/test/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.7.psql b/ansible/01_old/roles/test/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.7.psql new file mode 100644 index 0000000..92344db --- /dev/null +++ b/ansible/01_old/roles/test/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.7.psql @@ -0,0 +1,102 @@ +-- CLOUD-4752 node_memory_usage alert 관련 쿼리 수정 +update metric_meta2 set +expr = 'sum by (xm_clst_id, xm_node_id)((node_memory_MemTotal_bytes{xm_entity_type="Node"}- (node_memory_MemFree_bytes{xm_entity_type="Node"} + node_memory_Cached_bytes{xm_entity_type="Node"} + node_memory_Buffers_bytes{xm_entity_type="Node"})) >= 0 or node_memory_MemTotal_bytes{xm_entity_type="Node"}- node_memory_MemFree_bytes{xm_entity_type="Node"}) / (sum by (xm_clst_id, xm_node_id) (imxc_kubernetes_node_resource_capacity_memory{{filter}})) * 100' +where id = 'node_memory_usage'; + +-- CLOUD-6474 node-exporter | GPMAXPROCS 세팅 +-- Auto-generated SQL script #202211241543 +UPDATE public.agent_install_file_info + SET yaml='--- +apiVersion: v1 +kind: Service +metadata: + annotations: + prometheus.io/scrape: ''true'' + labels: + app: cloudmoa-node-exporter + name: cloudmoa-node-exporter + name: cloudmoa-node-exporter + namespace: $CLOUDMOA_NAMESPACE +spec: + clusterIP: None + ports: + - name: scrape + port: 9110 + protocol: TCP + selector: + app: cloudmoa-node-exporter + type: ClusterIP +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: cloudmoa-node-exporter + namespace: $CLOUDMOA_NAMESPACE +spec: + selector: + matchLabels: + app: cloudmoa-node-exporter + template: + metadata: + labels: + app: cloudmoa-node-exporter + name: cloudmoa-node-exporter + spec: + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - image: $DOCKER_REGISTRY_URL/node-exporter + name: cloudmoa-node-exporter + ports: + - containerPort: 9110 + hostPort: 9110 + name: scrape + args: + - --path.procfs=/host/proc + - --path.sysfs=/host/sys + - --path.rootfs=/host/root + - --collector.filesystem.ignored-mount-points=^/(dev|proc|sys|run|var/lib/docker/.+|var/lib/kubelet/pods/.+)($|/) + - --collector.tcpstat + - --web.listen-address=:9110 + # --log.level=debug + env: + - name: GOMAXPROCS + value: "1" + resources: + limits: + cpu: 250m + memory: 180Mi + requests: + cpu: 102m + memory: 180Mi + volumeMounts: + - mountPath: /host/proc + name: proc + readOnly: false + - mountPath: /host/sys + name: sys + readOnly: false + - mountPath: /host/root + mountPropagation: HostToContainer + name: root + readOnly: true + hostNetwork: true + hostPID: true + securityContext: + runAsNonRoot: true + runAsUser: 65534 + volumes: + - hostPath: + path: /proc + name: proc + - hostPath: + path: /sys + name: sys + - hostPath: + path: / + name: root +' + WHERE id=4; \ No newline at end of file diff --git a/ansible/01_old/roles/test/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.8.psql b/ansible/01_old/roles/test/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.8.psql new file mode 100644 index 0000000..ea66c68 --- /dev/null +++ b/ansible/01_old/roles/test/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.8.psql @@ -0,0 +1,387 @@ +-- CLOUD-6526 host 관련 쿼리 수정 +-- 수집된 메트릭 시간차로 인해 데이터 표출이 안되는걸 방지하기 위해 rate 5m 추가 +UPDATE metric_meta2 SET expr = 'sum by (data_type, instance) ( +label_replace(rate(node_network_receive_bytes_total{{filter}}[1m]) or rate(node_network_receive_bytes_total{{filter}}[5m]), "data_type", "Receive", "", "") or +label_replace(rate(node_network_transmit_bytes_total{{filter}}[1m]) or rate(node_network_transmit_bytes_total{{filter}}[5m]), "data_type", "Transmit", "", "") )' +WHERE id='host_network_io_byte'; + +UPDATE public.metric_meta2 SET expr = 'sum by (data_type, instance) ( +label_replace(rate(node_disk_read_bytes_total{{filter}}[1m]) or rate(node_disk_read_bytes_total{{filter}}[5m]), "data_type", "Read", "", "") or +label_replace(rate(node_disk_written_bytes_total{{filter}}[1m]) or rate(node_disk_written_bytes_total{{filter}}[5m]), "data_type", "Write", "", "") )' +WHERE id = 'host_disk_read_write_byte'; + +UPDATE public.metric_meta2 SET expr = 'sum by (instance) ( +(rate(node_disk_reads_completed_total{{filter}}[1m]) + rate(node_disk_writes_completed_total{{filter}}[1m])) or +(rate(node_disk_reads_completed_total{{filter}}[5m]) + rate(node_disk_writes_completed_total{{filter}}[5m])))' +WHERE id = 'host_disk_iops'; + +-- CLOUD-8671 Metric-Agent | 데이터 필터링 설정 추가 +-- Workload > Pod 화면 등에 Docker 런타임 환경의 자원 사용량이 2배 가량으로 보이던 문제 픽스 +UPDATE public.agent_install_file_info + SET yaml='--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cloudmoa-metric-agent-config + namespace: $CLOUDMOA_NAMESPACE +data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + metric-agent.yml: | + global: + scrape_interval: 15s + + scrape_configs: + - job_name: ''kubernetes-kubelet'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_memory_SReclaimable_bytes|node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + + - job_name: ''kubernetes-cadvisor'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod_name] + target_label: xm_pod_id + - source_labels: [container_name] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep + - source_labels: [ __name__, image ] + separator: "@" + regex: "container_cpu.*@" + action: drop + - source_labels: [ __name__, name ] + separator: "@" + regex: "container_memory.*@" + action: drop +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cloudmoa-metric-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-metric-agent +spec: + selector: + matchLabels: + app: cloudmoa-metric-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-metric-agent + spec: + containers: + - name: cloudmoa-metric-agent + image: $DOCKER_REGISTRY_URL/metric-agent:$IMAGE_TAG + args: + - --config.file=/etc/metric-agent/metric-agent.yml + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: /etc/metric-agent/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: STORAGE_TYPE + value: datagate + restartPolicy: Always + volumes: + - name: config-volume + configMap: + name: cloudmoa-metric-agent-config +' + WHERE id=3; + +UPDATE public.agent_install_file_info + SET yaml='--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cloudmoa-metric-agent-config + namespace: $CLOUDMOA_NAMESPACE +data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + metric-agent.yml: | + global: + scrape_interval: 15s + + scrape_configs: + - job_name: ''kubernetes-kubelet'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_memory_SReclaimable_bytes|node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + - job_name: ''kubernetes-cadvisor'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod] + target_label: xm_pod_id + - source_labels: [container] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep + - source_labels: [ __name__, image ] + separator: "@" + regex: "container_cpu.*@" + action: drop + - source_labels: [ __name__, name ] + separator: "@" + regex: "container_memory.*@" + action: drop +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cloudmoa-metric-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-metric-agent +spec: + selector: + matchLabels: + app: cloudmoa-metric-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-metric-agent + spec: + containers: + - name: cloudmoa-metric-agent + image: $DOCKER_REGISTRY_URL/metric-agent:$IMAGE_TAG + args: + - --config.file=/etc/metric-agent/metric-agent.yml + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: /etc/metric-agent/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: LOG_MAXAGE + value: "1" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: STORAGE_TYPE + value: datagate + restartPolicy: Always + volumes: + - name: config-volume + configMap: + name: cloudmoa-metric-agent-config +' + WHERE id=6; diff --git a/ansible/01_old/roles/test/files/03-ddl-dml/postgres/patch/postgres_patch_R30020210503.psql b/ansible/01_old/roles/test/files/03-ddl-dml/postgres/patch/postgres_patch_R30020210503.psql new file mode 100644 index 0000000..99d1dbe --- /dev/null +++ b/ansible/01_old/roles/test/files/03-ddl-dml/postgres/patch/postgres_patch_R30020210503.psql @@ -0,0 +1,2844 @@ +ALTER TABLE alert_rule ADD COLUMN IF NOT EXISTS warning_sign character VARYING(255); +ALTER TABLE alert_rule ADD COLUMN IF NOT EXISTS critical_sign character VARYING(255); + +CREATE TABLE IF NOT EXISTS public.license_policy ( + policy_id character varying(255) NOT NULL, + policy_desc character varying(255), + term_year integer NOT NULL, + term_month integer NOT NULL, + term_day integer NOT NULL, + license_type character varying(255) NOT NULL, + allowable_range character varying(255) NOT NULL, + storage_capacity character varying(255) NOT NULL, + cluster_count character varying(255) NOT NULL, + node_count character varying(255) NOT NULL, + pod_count character varying(255) NOT NULL, + service_count character varying(255) NOT NULL, + core_count character varying(255) NOT NULL, + host_ids character varying(255) NOT NULL, + user_division character varying(255) NOT NULL, + created_date timestamp without time zone, + modified_date timestamp without time zone +) + +ALTER TABLE ONLY public.license_policy + ADD CONSTRAINT license_policy_pkey PRIMARY KEY (policy_id); + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('node_contextswitch_and_filedescriptor','Node contextswitch and filedescriptor','Node contextswitch and filedescriptor','sum by(xm_clst_id, xm_node_id, data_type) ( + label_replace(node_filefd_allocated {{filter}}, "data_type", "file descriptor" , "", "") or + label_replace(rate(node_context_switches_total {{filter}}[1m]), "data_type", "context switches", "" , ""))','File','Node',NULL,false,false,'Node contextswitch and filedescriptor','2020-05-28 12:38:21.587','2020-05-28 12:38:21.587') + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('node_contextswitch_and_filedescriptor','Node contextswitch and filedescriptor','Node contextswitch and filedescriptor','sum by(xm_clst_id, xm_node_id, data_type) ( + label_replace(node_filefd_allocated {{filter}}, "data_type", "file descriptor" , "", "") or + label_replace(rate(node_context_switches_total {{filter}}[1m]), "data_type", "context switches", "" , ""))','File','Node',NULL,false,false,'Node contextswitch and filedescriptor','2020-05-28 12:38:21.587','2020-05-28 12:38:21.587') + WHERE public.metric_meta2.id = 'node_contextswitch_and_filedescriptor'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_cpu_user_by_workload', 'Container CPU User By workload (%)', 'Container CPU Usage(User)', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_cpu_user_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) * 100', 'CPU', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU User (%):{{humanize $value}}%|{threshold}%.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_cpu_user_by_workload', 'Container CPU User By workload (%)', 'Container CPU Usage(User)', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_cpu_user_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) * 100', 'CPU', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU User (%):{{humanize $value}}%|{threshold}%.', now(), now()) + WHERE public.metric_meta2.id = 'container_cpu_user_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_cpu_system_core_by_workload', 'Container CPU System By workload (Core)', 'Container CPU(Core)', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_cpu_system_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0)', 'CPU', 'Workload', NULL, TRUE, FALSE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU System (Core) (System):{{humanize $value}}%|{threshold}%.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_cpu_system_core_by_workload', 'Container CPU System By workload (Core)', 'Container CPU(Core)', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_cpu_system_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0)', 'CPU', 'Workload', NULL, TRUE, FALSE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU System (Core) (System):{{humanize $value}}%|{threshold}%.', now(), now()) + WHERE public.metric_meta2.id = 'container_cpu_system_core_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_cpu_usage_core_by_workload', 'Container CPU Usage By workload (Core)', 'Container CPU Usage (Core)', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_cpu_usage_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0)', 'CPU', 'Workload', NULL, TRUE, FALSE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU Usage (Core):{{humanize $value}}|{threshold}.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_cpu_usage_core_by_workload', 'Container CPU Usage By workload (Core)', 'Container CPU Usage (Core)', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_cpu_usage_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0)', 'CPU', 'Workload', NULL, TRUE, FALSE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU Usage (Core):{{humanize $value}}|{threshold}.', now(), now()) + WHERE public.metric_meta2.id = 'container_cpu_usage_core_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_cpu_user_core_by_workload', 'Container CPU User By workload (Core)', 'Container CPU Usage (User)(Core)', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_cpu_user_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0)', 'CPU', 'Workload', NULL, TRUE, FALSE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU User (Core):{{humanize $value}}|{threshold}.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_cpu_user_core_by_workload', 'Container CPU User By workload (Core)', 'Container CPU Usage (User)(Core)', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_cpu_user_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0)', 'CPU', 'Workload', NULL, TRUE, FALSE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU User (Core):{{humanize $value}}|{threshold}.', now(), now()) + WHERE public.metric_meta2.id = 'container_cpu_user_core_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_cpu_system_by_workload', 'Container CPU System By workload (%)', 'Container CPU Usage (System)', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_cpu_system_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) * 100', 'CPU', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU System (%):{{humanize $value}}%|{threshold}%.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_cpu_system_by_workload', 'Container CPU System By workload (%)', 'Container CPU Usage (System)', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_cpu_system_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) * 100', 'CPU', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU System (%):{{humanize $value}}%|{threshold}%.', now(), now()) + WHERE public.metric_meta2.id = 'container_cpu_system_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_cpu_usage_by_workload', 'Container CPU Usage By workload (%)', 'Container CPU Usage', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_cpu_usage_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) * 100', 'CPU', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU Usage (%):{{humanize $value}}%|{threshold}%', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_cpu_usage_by_workload', 'Container CPU Usage By workload (%)', 'Container CPU Usage', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_cpu_usage_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) * 100', 'CPU', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU Usage (%):{{humanize $value}}%|{threshold}%', now(), now()) + WHERE public.metric_meta2.id = 'container_cpu_usage_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_fs_reads_by_workload', 'Container Filesystem Read Bytes By workload (KiB)', 'Cumulative count of bytes read / 1024', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_fs_reads_bytes_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1024', 'Filesystem', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Filesystem Reads:{{humanize $value}}KiB|{threshold}KiB.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_fs_reads_by_workload', 'Container Filesystem Read Bytes By workload (KiB)', 'Cumulative count of bytes read / 1024', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_fs_reads_bytes_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1024', 'Filesystem', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Filesystem Reads:{{humanize $value}}KiB|{threshold}KiB.', now(), now()) + WHERE public.metric_meta2.id = 'container_fs_reads_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_fs_limit_bytes_by_workload', 'Container Filesystem Limit Bytes By workload (GiB)', 'Number of bytes that can be consumed by the container on this filesystem / 1073741824', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (container_fs_limit_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1073741824', 'Filesystem', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Filesystem Limit:{{humanize $value}}GiB|{threshold}GiB.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_fs_limit_bytes_by_workload', 'Container Filesystem Limit Bytes By workload (GiB)', 'Number of bytes that can be consumed by the container on this filesystem / 1073741824', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (container_fs_limit_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1073741824', 'Filesystem', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Filesystem Limit:{{humanize $value}}GiB|{threshold}GiB.', now(), now()) + WHERE public.metric_meta2.id = 'container_fs_limit_bytes_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_fs_usage_bytes_by_workload', 'Container Filesystem Used Bytes By workload (GiB)', 'Number of bytes that are consumed by the container on this filesystem / 1073741824', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (container_fs_usage_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1073741824', 'Filesystem', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Filesystem Used:{{humanize $value}}GiB||{threshold}GiB.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_fs_usage_bytes_by_workload', 'Container Filesystem Used Bytes By workload (GiB)', 'Number of bytes that are consumed by the container on this filesystem / 1073741824', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (container_fs_usage_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1073741824', 'Filesystem', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Filesystem Used:{{humanize $value}}GiB||{threshold}GiB.', now(), now()) + WHERE public.metric_meta2.id = 'container_fs_usage_bytes_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_fs_writes_by_workload', 'Container Filesystem Write Bytes By workload (KiB)', 'Cumulative count of bytes written / 1024', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_fs_writes_bytes_total{xm_cont_name!="POD"}[1m]) + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1024', 'Filesystem', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Filesystem Writes:{{humanize $value}}KiB|{threshold}KiB.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_fs_writes_by_workload', 'Container Filesystem Write Bytes By workload (KiB)', 'Cumulative count of bytes written / 1024', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_fs_writes_bytes_total{xm_cont_name!="POD"}[1m]) + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1024', 'Filesystem', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Filesystem Writes:{{humanize $value}}KiB|{threshold}KiB.', now(), now()) + WHERE public.metric_meta2.id = 'container_fs_writes_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_fs_usage_by_workload', 'Container Filesystem Usage By workload (%)', 'Container File System Usage: 100 * (Used Bytes / Limit Bytes) (not contain persistent volume)', 'sum by (xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) ((container_fs_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0)/ (((container_fs_limit_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) * 100) > 0) or (container_fs_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1000)', 'Filesystem', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.o + wner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Filesystem Usage:{{humanize $value}}%|{threshold}%.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_fs_usage_by_workload', 'Container Filesystem Usage By workload (%)', 'Container File System Usage: 100 * (Used Bytes / Limit Bytes) (not contain persistent volume)', 'sum by (xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) ((container_fs_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0)/ (((container_fs_limit_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) * 100) > 0) or (container_fs_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1000)', 'Filesystem', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.o + wner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Filesystem Usage:{{humanize $value}}%|{threshold}%.', now(), now()) + WHERE public.metric_meta2.id = 'container_fs_usage_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_memory_max_usage_bytes_by_workload', 'Container Memory Max Used By workload (GiB)', 'Maximum memory usage recorded in bytes / 1073741824', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (container_memory_max_usage_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1073741824', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Max Memory Usage:{{humanize $value}}GiB|{threshold}GiB.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_memory_max_usage_bytes_by_workload', 'Container Memory Max Used By workload (GiB)', 'Maximum memory usage recorded in bytes / 1073741824', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (container_memory_max_usage_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1073741824', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Max Memory Usage:{{humanize $value}}GiB|{threshold}GiB.', now(), now()) + WHERE public.metric_meta2.id = 'container_memory_max_usage_bytes_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_memory_usage_bytes_by_workload', 'Container Memory Used By workload (GiB)', 'Current memory usage in GiB, this includes all memory regardless of when it was accessed', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (container_memory_usage_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1024 / 1024 / 1024', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Used Memory:{{humanize $value}}GiB|{threshold}GiB.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_memory_usage_bytes_by_workload', 'Container Memory Used By workload (GiB)', 'Current memory usage in GiB, this includes all memory regardless of when it was accessed', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (container_memory_usage_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1024 / 1024 / 1024', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Used Memory:{{humanize $value}}GiB|{threshold}GiB.', now(), now()) + WHERE public.metric_meta2.id = 'container_memory_usage_bytes_by_workload'; + + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_memory_usage_by_workload', 'Container Memory Usage By workload (%)', 'Container Memory usage compared to limit if limit is non-zero or 1GiB if limit is zero', 'sum by (xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) ((container_memory_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / (((container_spec_memory_limit_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0)) > 0) * 100) or sum by (xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) ((container_memory_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1024 / 1024 / 1024 *100)', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Memory Usage:{{humanize $value}}%|{threshold}%.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_memory_usage_by_workload', 'Container Memory Usage By workload (%)', 'Container Memory usage compared to limit if limit is non-zero or 1GiB if limit is zero', 'sum by (xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) ((container_memory_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / (((container_spec_memory_limit_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0)) > 0) * 100) or sum by (xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) ((container_memory_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1024 / 1024 / 1024 *100)', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Memory Usage:{{humanize $value}}%|{threshold}%.', now(), now()) + WHERE public.metric_meta2.id = 'container_memory_usage_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_memory_swap_by_workload', 'Container Memory Swap By workload (GiB)', 'Container swap usage in bytes / 1073741824', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (container_memory_swap{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1073741824', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Swap Memory:{{humanize $value}}GiB|{threshold}GiB.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_memory_swap_by_workload', 'Container Memory Swap By workload (GiB)', 'Container swap usage in bytes / 1073741824', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (container_memory_swap{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1073741824', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Swap Memory:{{humanize $value}}GiB|{threshold}GiB.', now(), now()) + WHERE public.metric_meta2.id = 'container_memory_swap_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_memory_working_set_bytes_by_workload', 'Container Memory Working Set By workload (GiB)', 'Current working set in GiB, this includes recently accessed memory, dirty memory, and kernel memory', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (container_memory_working_set_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1024 / 1024 / 1024', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Working Set Memory:{{humanize $value}}GiB|{threshold}GiB.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_memory_working_set_bytes_by_workload', 'Container Memory Working Set By workload (GiB)', 'Current working set in GiB, this includes recently accessed memory, dirty memory, and kernel memory', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (container_memory_working_set_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1024 / 1024 / 1024', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Working Set Memory:{{humanize $value}}GiB|{threshold}GiB.', now(), now()) + WHERE public.metric_meta2.id = 'container_memory_working_set_bytes_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_memory_cache_by_workload', 'Container Memory Cache By workload (GiB)', 'Number of bytes of page cache memory / 1073741824', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (container_memory_cache{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1073741824', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Cache Memory:{{humanize $value}}GiB|{threshold}GiB.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_memory_cache_by_workload', 'Container Memory Cache By workload (GiB)', 'Number of bytes of page cache memory / 1073741824', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (container_memory_cache{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1073741824', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Cache Memory:{{humanize $value}}GiB|{threshold}GiB.', now(), now()) + WHERE public.metric_meta2.id = 'container_memory_cache_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_network_receive_by_workload', 'Container Network Receive By workload (KiB)', 'Network device statistic receive_bytes / 1024', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_network_receive_bytes_total{} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1024', 'Network', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Network Receive Usage:{{humanize $value}}KiB|{threshold}KiB.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_network_receive_by_workload', 'Container Network Receive By workload (KiB)', 'Network device statistic receive_bytes / 1024', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_network_receive_bytes_total{} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1024', 'Network', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Network Receive Usage:{{humanize $value}}KiB|{threshold}KiB.', now(), now()) + WHERE public.metric_meta2.id = 'container_network_receive_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_network_transmit_by_workload', 'Container Network Transmit By workload (KiB)', 'Network device statistic transmit_bytes / 1024', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_network_transmit_bytes_total{} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1024', 'Network', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Network Transmit Usage:{{humanize $value}}KiB|{threshold}KiB.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_network_transmit_by_workload', 'Container Network Transmit By workload (KiB)', 'Network device statistic transmit_bytes / 1024', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_network_transmit_bytes_total{} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1024', 'Network', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Network Transmit Usage:{{humanize $value}}KiB|{threshold}KiB.', now(), now()) + WHERE public.metric_meta2.id = 'container_network_transmit_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('count_pod_not_running_by_workload','Number of Pods not running By Workload','Number of Pods not running (pod_state)','count by (xm_clst_id, xm_pod_id,xm_cont_id, xm_cont_name, entity_type, xm_namespace, pod_state) (imxc_kubernetes_container_resource_limit_cpu{pod_state!="Running", {filter}})','State','Workload',null,true,false,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} State:{{$labels.pod_state}}.',now(),now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('count_pod_not_running_by_workload','Number of Pods not running By Workload','Number of Pods not running (pod_state)','count by (xm_clst_id, xm_pod_id,xm_cont_id, xm_cont_name, entity_type, xm_namespace, pod_state) (imxc_kubernetes_container_resource_limit_cpu{pod_state!="Running", {filter}})','State','Workload',null,true,false,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} State:{{$labels.pod_state}}.',now(),now()) + WHERE public.metric_meta2.id = 'count_pod_not_running_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('count_container_not_running_by_workload','Number of Containers not running By Workload','Number of Containers not running (container_state)','count by (xm_clst_id, xm_pod_id, xm_cont_id, xm_cont_name, entity_type, xm_namespace, container_state) (imxc_kubernetes_container_resource_limit_cpu{container_state!="Running", {filter}})','State','Workload',null,true,false,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} State:{{$labels.container_state}}.',now(),now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('count_container_not_running_by_workload','Number of Containers not running By Workload','Number of Containers not running (container_state)','count by (xm_clst_id, xm_pod_id, xm_cont_id, xm_cont_name, entity_type, xm_namespace, container_state) (imxc_kubernetes_container_resource_limit_cpu{container_state!="Running", {filter}})','State','Workload',null,true,false,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} State:{{$labels.container_state}}.',now(),now()) + WHERE public.metric_meta2.id = 'count_container_not_running_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('cotainer_restart_count_by_workload','Number of Containers Restart','Number of Containers Restart (10m)','increase(imxc_kubernetes_container_restart_count{{filter}}[10m])>1','State','Workload',null,true,false,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} RESTARTCOUNT FOR 10MINUTE:{{humanize $value}}.',now(),now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('cotainer_restart_count_by_workload','Number of Containers Restart','Number of Containers Restart (10m)','increase(imxc_kubernetes_container_restart_count{{filter}}[10m])>1','State','Workload',null,true,false,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} RESTARTCOUNT FOR 10MINUTE:{{humanize $value}}.',now(),now()) + WHERE public.metric_meta2.id = 'cotainer_restart_count_by_workload'; + + +INSERT INTO public.agent_install_file_info (id, name, type, description, yaml, use_yn, created_date, modified_date, version) +VALUES (4, 'node-exporter', 'agent', 'Node에 관련된 Metric 시계열 데이터를 수집하여 고객사 클러스터에 설치된 Prometheus에 전달하는 역할을 합니다.', '--- + apiVersion: v1 + kind: Service + metadata: + annotations: + prometheus.io/scrape: ''true'' + labels: + app: cloudmoa-node-exporter + name: cloudmoa-node-exporter + name: cloudmoa-node-exporter + namespace: $CLOUDMOA_NAMESPACE + spec: + clusterIP: None + ports: + - name: scrape + port: 9110 + protocol: TCP + selector: + app: cloudmoa-node-exporter + type: ClusterIP + --- + apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: cloudmoa-node-exporter + namespace: $CLOUDMOA_NAMESPACE + spec: + selector: + matchLabels: + app: cloudmoa-node-exporter + template: + metadata: + labels: + app: cloudmoa-node-exporter + name: cloudmoa-node-exporter + spec: + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - image: $DOCKER_REGISTRY_URL/prom/node-exporter + name: cloudmoa-node-exporter + ports: + - containerPort: 9110 + hostPort: 9110 + name: scrape + args: + - --path.procfs=/host/proc + - --path.sysfs=/host/sys + - --path.rootfs=/host/root + - --collector.filesystem.ignored-mount-points=^/(dev|proc|sys|run|var/lib/docker/.+|var/lib/kubelet/pods/.+)($|/) + - --collector.tcpstat + - --web.listen-address=:9110 + # --log.level=debug + resources: + limits: + cpu: 250m + memory: 180Mi + requests: + cpu: 102m + memory: 180Mi + volumeMounts: + - mountPath: /host/proc + name: proc + readOnly: false + - mountPath: /host/sys + name: sys + readOnly: false + - mountPath: /host/root + mountPropagation: HostToContainer + name: root + readOnly: true + hostNetwork: true + hostPID: true + securityContext: + runAsNonRoot: true + runAsUser: 65534 + volumes: + - hostPath: + path: /proc + name: proc + - hostPath: + path: /sys + name: sys + - hostPath: + path: / + name: root + ', true, '2021-03-11 13:41:02.000000', '2021-03-11 13:41:06.000000', null) +ON CONFLICT (id) +DO + UPDATE SET (id, name, type, description, yaml, use_yn, created_date, modified_date, version) + = (4, 'node-exporter', 'agent', 'Node에 관련된 Metric 시계열 데이터를 수집하여 고객사 클러스터에 설치된 Prometheus에 전달하는 역할을 합니다.', '--- + apiVersion: v1 + kind: Service + metadata: + annotations: + prometheus.io/scrape: ''true'' + labels: + app: cloudmoa-node-exporter + name: cloudmoa-node-exporter + name: cloudmoa-node-exporter + namespace: $CLOUDMOA_NAMESPACE + spec: + clusterIP: None + ports: + - name: scrape + port: 9110 + protocol: TCP + selector: + app: cloudmoa-node-exporter + type: ClusterIP + --- + apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: cloudmoa-node-exporter + namespace: $CLOUDMOA_NAMESPACE + spec: + selector: + matchLabels: + app: cloudmoa-node-exporter + template: + metadata: + labels: + app: cloudmoa-node-exporter + name: cloudmoa-node-exporter + spec: + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - image: $DOCKER_REGISTRY_URL/prom/node-exporter + name: cloudmoa-node-exporter + ports: + - containerPort: 9110 + hostPort: 9110 + name: scrape + args: + - --path.procfs=/host/proc + - --path.sysfs=/host/sys + - --path.rootfs=/host/root + - --collector.filesystem.ignored-mount-points=^/(dev|proc|sys|run|var/lib/docker/.+|var/lib/kubelet/pods/.+)($|/) + - --collector.tcpstat + - --web.listen-address=:9110 + # --log.level=debug + resources: + limits: + cpu: 250m + memory: 180Mi + requests: + cpu: 102m + memory: 180Mi + volumeMounts: + - mountPath: /host/proc + name: proc + readOnly: false + - mountPath: /host/sys + name: sys + readOnly: false + - mountPath: /host/root + mountPropagation: HostToContainer + name: root + readOnly: true + hostNetwork: true + hostPID: true + securityContext: + runAsNonRoot: true + runAsUser: 65534 + volumes: + - hostPath: + path: /proc + name: proc + - hostPath: + path: /sys + name: sys + - hostPath: + path: / + name: root + ', true, '2021-03-11 13:41:02.000000', '2021-03-11 13:41:06.000000', null) + WHERE public.agent_install_file_info.id = 4; + + +INSERT INTO public.agent_install_file_info (id, name, type, description, yaml, use_yn, created_date, modified_date, version) +VALUES (3, 'prometheus', 'agent', 'Prometheus는 다양한 Exporter들과 연결될 수 있으며, 기본적으로 Node Exporter와 cAdvisor를 통해 수집한 Metric 데이터를 Kafka를 통해 수집 클러스터에 전달하는 역할을 합니다.', '--- + # VERSION : 20190227142300 + + apiVersion: v1 + kind: ConfigMap + metadata: + name: cloudmoa-prometheus-configuration + namespace: $CLOUDMOA_NAMESPACE + data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + prometheus.yml: | + global: + scrape_interval: 15s + # Attach these labels to any time series or alerts when communicating with + # external systems. + external_labels: + monitor: ''5s-monitor'' + + #kafka writer only + no_local_disk_write: true + + # A scrape configuration for running Prometheus on a Kubernetes cluster. + # This uses separate scrape configs for cluster components (i.e. API server, node) + # and services to allow each to use different authentication configs. + # + # Kubernetes labels will be added as Prometheus labels on metrics via the + # `labelmap` relabeling action. + # + + # + # rule_files: + # - "scaling.rules" + + # i suppose my code in the remote kafka write is something wrong ... should append a double quote character at the end of the url + remote_write: + - url: kafka://$COLLTION_SERVER_KAFKA_IP:$COLLTION_SERVER_KAFKA_INTERFACE_PORT/remote_prom?encoding=proto3&compression=snappy + + scrape_configs: + + # Scrape config for nodes (kubelet). + # + # Rather than connecting directly to the node, the scrape is proxied though the + # Kubernetes apiserver. This means it will work if Prometheus is running out of + # cluster, or can''t connect to nodes for some other reason (e.g. because of + # firewalling). + - job_name: ''kubernetes-kubelet'' + + # Default to scraping over https. If required, just disable this or change to + # `http`. + scheme: https + # This TLS & bearer token file config is used to connect to the actual scrape + # endpoints for cluster components. This is separate to discovery auth + # configuration because discovery & scraping are two separate concerns in + # Prometheus. The discovery auth config is automatic if Prometheus runs inside + # the cluster. Otherwise, more config options have to be provided within the + # . + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + # copied from https://github.com/kayrus/prometheus-kubernetes/blob/master/prometheus-configmap.yaml + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + + - job_name: ''kubernetes-cadvisor'' + + # Default to scraping over https. If required, just disable this or change to + # `http`. + scheme: https + + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod_name] + target_label: xm_pod_id + - source_labels: [container_name] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [container_name] + regex: (.+) + action: keep + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep + --- + apiVersion: v1 + kind: Service + metadata: + name: cloudmoa-prometheus + namespace: $CLOUDMOA_NAMESPACE + spec: + ports: + - port: 9090 + protocol: TCP + targetPort: 9090 + selector: + app: cloudmoa-prometheus + type: ClusterIP + --- + apiVersion: apps/v1 + kind: Deployment + metadata: + name: cloudmoa-prometheus + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-prometheus + spec: + selector: + matchLabels: + app: cloudmoa-prometheus + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-prometheus + spec: + containers: + - name: cloudmoa-prometheus + image: $DOCKER_REGISTRY_URL/imxc/metric-agent:$IMAGE_TAG + ports: + - containerPort: 9090 + args: + - --config.file=/etc/prometheus/prometheus.yml + #- --log.level=debug + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: "/prometheus" + name: data + - mountPath: /etc/prometheus/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: LOG_MAXAGE + value: "1" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: API_SERVER_LICENSE + value: $COLLTION_SERVER_API_IP:8080 + + restartPolicy: Always + volumes: + - emptyDir: {} + name: data + - name: config-volume + configMap: + name: cloudmoa-prometheus-configuration + ', true, '2021-03-11 13:39:07.000000', '2021-03-11 13:39:09.000000', '1.15') +ON CONFLICT (id) +DO + UPDATE SET (id, name, type, description, yaml, use_yn, created_date, modified_date, version) + = (3, 'prometheus', 'agent', 'Prometheus는 다양한 Exporter들과 연결될 수 있으며, 기본적으로 Node Exporter와 cAdvisor를 통해 수집한 Metric 데이터를 Kafka를 통해 수집 클러스터에 전달하는 역할을 합니다.', '--- + # VERSION : 20190227142300 + + apiVersion: v1 + kind: ConfigMap + metadata: + name: cloudmoa-prometheus-configuration + namespace: $CLOUDMOA_NAMESPACE + data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + prometheus.yml: | + global: + scrape_interval: 15s + # Attach these labels to any time series or alerts when communicating with + # external systems. + external_labels: + monitor: ''5s-monitor'' + + #kafka writer only + no_local_disk_write: true + + # A scrape configuration for running Prometheus on a Kubernetes cluster. + # This uses separate scrape configs for cluster components (i.e. API server, node) + # and services to allow each to use different authentication configs. + # + # Kubernetes labels will be added as Prometheus labels on metrics via the + # `labelmap` relabeling action. + # + + # + # rule_files: + # - "scaling.rules" + + # i suppose my code in the remote kafka write is something wrong ... should append a double quote character at the end of the url + remote_write: + - url: kafka://$COLLTION_SERVER_KAFKA_IP:$COLLTION_SERVER_KAFKA_INTERFACE_PORT/remote_prom?encoding=proto3&compression=snappy + + scrape_configs: + + # Scrape config for nodes (kubelet). + # + # Rather than connecting directly to the node, the scrape is proxied though the + # Kubernetes apiserver. This means it will work if Prometheus is running out of + # cluster, or can''t connect to nodes for some other reason (e.g. because of + # firewalling). + - job_name: ''kubernetes-kubelet'' + + # Default to scraping over https. If required, just disable this or change to + # `http`. + scheme: https + # This TLS & bearer token file config is used to connect to the actual scrape + # endpoints for cluster components. This is separate to discovery auth + # configuration because discovery & scraping are two separate concerns in + # Prometheus. The discovery auth config is automatic if Prometheus runs inside + # the cluster. Otherwise, more config options have to be provided within the + # . + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + # copied from https://github.com/kayrus/prometheus-kubernetes/blob/master/prometheus-configmap.yaml + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + + - job_name: ''kubernetes-cadvisor'' + + # Default to scraping over https. If required, just disable this or change to + # `http`. + scheme: https + + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod_name] + target_label: xm_pod_id + - source_labels: [container_name] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [container_name] + regex: (.+) + action: keep + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep + --- + apiVersion: v1 + kind: Service + metadata: + name: cloudmoa-prometheus + namespace: $CLOUDMOA_NAMESPACE + spec: + ports: + - port: 9090 + protocol: TCP + targetPort: 9090 + selector: + app: cloudmoa-prometheus + type: ClusterIP + --- + apiVersion: apps/v1 + kind: Deployment + metadata: + name: cloudmoa-prometheus + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-prometheus + spec: + selector: + matchLabels: + app: cloudmoa-prometheus + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-prometheus + spec: + containers: + - name: cloudmoa-prometheus + image: $DOCKER_REGISTRY_URL/imxc/metric-agent:$IMAGE_TAG + ports: + - containerPort: 9090 + args: + - --config.file=/etc/prometheus/prometheus.yml + #- --log.level=debug + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: "/prometheus" + name: data + - mountPath: /etc/prometheus/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: LOG_MAXAGE + value: "1" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: API_SERVER_LICENSE + value: $COLLTION_SERVER_API_IP:8080 + + restartPolicy: Always + volumes: + - emptyDir: {} + name: data + - name: config-volume + configMap: + name: cloudmoa-prometheus-configuration + ', true, '2021-03-11 13:39:07.000000', '2021-03-11 13:39:09.000000', '1.15') + WHERE public.agent_install_file_info.id = 3; + + +INSERT INTO public.agent_install_file_info (id, name, type, description, yaml, use_yn, created_date, modified_date, version) +VALUES (2, 'agent', 'agent', '관제 대상 클러스터의 Topology 데이터를 수집하여 Kafka를 통해 수집 클러스터에 전달하는 역할을 하며, 그 밖에 API 서버와의 TCP 연결을 통해 관리 기능, Log Viewer 기능 등을 수행합니다.', '--- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: cloudmoa-cluster-role + rules: + - nonResourceURLs: + - "*" + verbs: + - get + - apiGroups: + - metrics.k8s.io + resources: + - pods + - nodes + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - nodes/stats + - endpoints + - namespaces + - events + verbs: + - get + - list + - watch + - apiGroups: + - apps + resources: + - daemonsets + - deployments + - deployments/scale + - replicasets + - replicasets/scale + - statefulsets + - statefulsets/scale + verbs: + - get + - list + - watch + - apiGroups: + - batch + resources: + - jobs + verbs: + - get + - list + - watch + - update + - apiGroups: + - batch + resources: + - cronjobs + verbs: + - get + - list + - update + - apiGroups: + - storage.j8s.io + resources: + - storageclasses + verbs: + - get + - list + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - apiGroups: + - extensions + resources: + - ingresses + verbs: + - get + - list + - apiGroups: + - policy + resources: + - podsecuritypolicies + verbs: + - use + resourceNames: + - imxc-ps + - apiGroups: + - certificates.k8s.io + resourceNames: + - kubernetes.io/kube-apiserver-client-kubelet + resources: + - signers + verbs: + - approve + - apiGroups: + - certificates.k8s.io + resourceNames: + - kubernetes.io/kubelet-serving + resources: + - signers + verbs: + - approve + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch + - proxy + - apiGroups: + - "" + resources: + - nodes/log + - nodes/metrics + - nodes/proxy + - nodes/spec + - nodes/stats + verbs: + - ''*'' + - apiGroups: + - ''*'' + resources: + - ''*'' + verbs: + - get + - list + - watch + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: cloudmoa-restricted-rb + namespace: $CLOUDMOA_NAMESPACE + subjects: + - kind: ServiceAccount + name: default + namespace: $CLOUDMOA_NAMESPACE + roleRef: + kind: ClusterRole + name: cloudmoa-cluster-role + apiGroup: rbac.authorization.k8s.io + --- + apiVersion: policy/v1beta1 + kind: PodSecurityPolicy + metadata: + name: cloudmoa-psp + namespace: $CLOUDMOA_NAMESPACE + spec: + privileged: true + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + runAsUser: + rule: RunAsAny + fsGroup: + rule: RunAsAny + hostPorts: + - max: 65535 + min: 0 + hostNetwork: true + hostPID: true + volumes: + - configMap + - secret + - emptyDir + - hostPath + - projected + - downwardAPI + - persistentVolumeClaim + --- + apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: cloudmoa-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-agent + spec: + selector: + matchLabels: + app: cloudmoa-agent + template: + metadata: + labels: + app: cloudmoa-agent + spec: + hostNetwork: true + hostPID: true + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - name: cloudmoa-agent + image: $DOCKER_REGISTRY_URL/imxc/imxc-agent:$IMAGE_TAG + imagePullPolicy: Always + resources: + requests: + cpu: 200m + memory: 512Mi + limits: + cpu: 500m + memory: 600Mi + securityContext: + privileged: true + volumeMounts: + - mountPath: /host/usr/bin + name: bin-volume + - mountPath: /var/run/docker.sock + name: docker-volume + - mountPath: /host/proc + name: proc-volume + - mountPath: /root + name: root-volume + - mountPath: /log + name: log-volume + env: + - name: KAFKA_SERVER + value: $COLLTION_SERVER_KAFKA_IP:$COLLTION_SERVER_KAFKA_INTERFACE_PORT + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: API_SERVER + value: $COLLTION_SERVER_API_IP:$COLLECTION_SERVER_API_NETTY_PORT + - name: ROOT_DIRECTORY + value: /root + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: LOG_LEVEL + value: "DEBUG" + - name: API_SERVER_LICENSE + value: $COLLTION_SERVER_API_IP:8080 + + volumes: + - name: bin-volume + hostPath: + path: /usr/bin + type: Directory + - name: docker-volume + hostPath: + path: /var/run/docker.sock + - name: proc-volume + hostPath: + path: /proc + - name: root-volume + hostPath: + path: / + - name: log-volume + hostPath: + path: /home', true, '2021-03-11 13:37:48.000000', '2021-03-11 13:37:51.000000', null) +ON CONFLICT (id) +DO + UPDATE SET (id, name, type, description, yaml, use_yn, created_date, modified_date, version) + = (2, 'agent', 'agent', '관제 대상 클러스터의 Topology 데이터를 수집하여 Kafka를 통해 수집 클러스터에 전달하는 역할을 하며, 그 밖에 API 서버와의 TCP 연결을 통해 관리 기능, Log Viewer 기능 등을 수행합니다.', '--- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: cloudmoa-cluster-role + rules: + - nonResourceURLs: + - "*" + verbs: + - get + - apiGroups: + - metrics.k8s.io + resources: + - pods + - nodes + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - nodes/stats + - endpoints + - namespaces + - events + verbs: + - get + - list + - watch + - apiGroups: + - apps + resources: + - daemonsets + - deployments + - deployments/scale + - replicasets + - replicasets/scale + - statefulsets + - statefulsets/scale + verbs: + - get + - list + - watch + - apiGroups: + - batch + resources: + - jobs + verbs: + - get + - list + - watch + - update + - apiGroups: + - batch + resources: + - cronjobs + verbs: + - get + - list + - update + - apiGroups: + - storage.j8s.io + resources: + - storageclasses + verbs: + - get + - list + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - apiGroups: + - extensions + resources: + - ingresses + verbs: + - get + - list + - apiGroups: + - policy + resources: + - podsecuritypolicies + verbs: + - use + resourceNames: + - imxc-ps + - apiGroups: + - certificates.k8s.io + resourceNames: + - kubernetes.io/kube-apiserver-client-kubelet + resources: + - signers + verbs: + - approve + - apiGroups: + - certificates.k8s.io + resourceNames: + - kubernetes.io/kubelet-serving + resources: + - signers + verbs: + - approve + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch + - proxy + - apiGroups: + - "" + resources: + - nodes/log + - nodes/metrics + - nodes/proxy + - nodes/spec + - nodes/stats + verbs: + - ''*'' + - apiGroups: + - ''*'' + resources: + - ''*'' + verbs: + - get + - list + - watch + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: cloudmoa-restricted-rb + namespace: $CLOUDMOA_NAMESPACE + subjects: + - kind: ServiceAccount + name: default + namespace: $CLOUDMOA_NAMESPACE + roleRef: + kind: ClusterRole + name: cloudmoa-cluster-role + apiGroup: rbac.authorization.k8s.io + --- + apiVersion: policy/v1beta1 + kind: PodSecurityPolicy + metadata: + name: cloudmoa-psp + namespace: $CLOUDMOA_NAMESPACE + spec: + privileged: true + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + runAsUser: + rule: RunAsAny + fsGroup: + rule: RunAsAny + hostPorts: + - max: 65535 + min: 0 + hostNetwork: true + hostPID: true + volumes: + - configMap + - secret + - emptyDir + - hostPath + - projected + - downwardAPI + - persistentVolumeClaim + --- + apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: cloudmoa-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-agent + spec: + selector: + matchLabels: + app: cloudmoa-agent + template: + metadata: + labels: + app: cloudmoa-agent + spec: + hostNetwork: true + hostPID: true + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - name: cloudmoa-agent + image: $DOCKER_REGISTRY_URL/imxc/imxc-agent:$IMAGE_TAG + imagePullPolicy: Always + resources: + requests: + cpu: 200m + memory: 512Mi + limits: + cpu: 500m + memory: 600Mi + securityContext: + privileged: true + volumeMounts: + - mountPath: /host/usr/bin + name: bin-volume + - mountPath: /var/run/docker.sock + name: docker-volume + - mountPath: /host/proc + name: proc-volume + - mountPath: /root + name: root-volume + - mountPath: /log + name: log-volume + env: + - name: KAFKA_SERVER + value: $COLLTION_SERVER_KAFKA_IP:$COLLTION_SERVER_KAFKA_INTERFACE_PORT + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: API_SERVER + value: $COLLTION_SERVER_API_IP:$COLLECTION_SERVER_API_NETTY_PORT + - name: ROOT_DIRECTORY + value: /root + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: LOG_LEVEL + value: "DEBUG" + - name: API_SERVER_LICENSE + value: $COLLTION_SERVER_API_IP:8080 + + volumes: + - name: bin-volume + hostPath: + path: /usr/bin + type: Directory + - name: docker-volume + hostPath: + path: /var/run/docker.sock + - name: proc-volume + hostPath: + path: /proc + - name: root-volume + hostPath: + path: / + - name: log-volume + hostPath: + path: /home', true, '2021-03-11 13:37:48.000000', '2021-03-11 13:37:51.000000', null) + WHERE public.agent_install_file_info.id = 2; + + +INSERT INTO public.agent_install_file_info (id, name, type, description, yaml, use_yn, created_date, modified_date, version) +VALUES (6, 'prometheus', 'agent', 'Prometheus는 다양한 Exporter들과 연결될 수 있으며, 기본적으로 Node Exporter와 cAdvisor를 통해 수집한 Metric 데이터를 Kafka를 통해 수집 클러스터에 전달하는 역할을 합니다.', '--- + # VERSION : 20190227142300 + + apiVersion: v1 + kind: ConfigMap + metadata: + name: cloudmoa-prometheus-configuration + namespace: $CLOUDMOA_NAMESPACE + data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + prometheus.yml: | + global: + scrape_interval: 15s + # Attach these labels to any time series or alerts when communicating with + # external systems. + external_labels: + monitor: ''5s-monitor'' + + #kafka writer only + no_local_disk_write: true + + # A scrape configuration for running Prometheus on a Kubernetes cluster. + # This uses separate scrape configs for cluster components (i.e. API server, node) + # and services to allow each to use different authentication configs. + # + # Kubernetes labels will be added as Prometheus labels on metrics via the + # `labelmap` relabeling action. + # + + # + # rule_files: + # - "scaling.rules" + + # i suppose my code in the remote kafka write is something wrong ... should append a double quote character at the end of the url + remote_write: + - url: kafka://$COLLTION_SERVER_KAFKA_IP:$COLLTION_SERVER_KAFKA_INTERFACE_PORT/remote_prom?encoding=proto3&compression=snappy + + scrape_configs: + + # Scrape config for nodes (kubelet). + # + # Rather than connecting directly to the node, the scrape is proxied though the + # Kubernetes apiserver. This means it will work if Prometheus is running out of + # cluster, or can''t connect to nodes for some other reason (e.g. because of + # firewalling). + - job_name: ''kubernetes-kubelet'' + + # Default to scraping over https. If required, just disable this or change to + # `http`. + scheme: https + # This TLS & bearer token file config is used to connect to the actual scrape + # endpoints for cluster components. This is separate to discovery auth + # configuration because discovery & scraping are two separate concerns in + # Prometheus. The discovery auth config is automatic if Prometheus runs inside + # the cluster. Otherwise, more config options have to be provided within the + # . + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + # copied from https://github.com/kayrus/prometheus-kubernetes/blob/master/prometheus-configmap.yaml + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + - job_name: ''kubernetes-cadvisor'' + + # Default to scraping over https. If required, just disable this or change to + # `http`. + scheme: https + + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod] + target_label: xm_pod_id + - source_labels: [container] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [container] + regex: (.+) + action: keep + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep + --- + apiVersion: v1 + kind: Service + metadata: + name: cloudmoa-prometheus + namespace: $CLOUDMOA_NAMESPACE + spec: + ports: + - port: 9090 + protocol: TCP + targetPort: 9090 + selector: + app: cloudmoa-prometheus + type: ClusterIP + --- + apiVersion: apps/v1 + kind: Deployment + metadata: + name: cloudmoa-prometheus + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-prometheus + spec: + selector: + matchLabels: + app: cloudmoa-prometheus + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-prometheus + spec: + containers: + - name: cloudmoa-prometheus + image: $DOCKER_REGISTRY_URL/imxc/metric-agent:$IMAGE_TAG + ports: + - containerPort: 9090 + args: + - --config.file=/etc/prometheus/prometheus.yml + #- --log.level=debug + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: "/prometheus" + name: data + - mountPath: /etc/prometheus/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: LOG_MAXAGE + value: "1" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: API_SERVER_LICENSE + value: $COLLTION_SERVER_API_IP:8080 + + restartPolicy: Always + volumes: + - emptyDir: {} + name: data + - name: config-volume + configMap: + name: cloudmoa-prometheus-configuration + ', false, '2021-03-11 13:39:07.000000', '2021-03-11 13:39:09.000000', '1.16') +ON CONFLICT (id) +DO + UPDATE SET (id, name, type, description, yaml, use_yn, created_date, modified_date, version) + = (6, 'prometheus', 'agent', 'Prometheus는 다양한 Exporter들과 연결될 수 있으며, 기본적으로 Node Exporter와 cAdvisor를 통해 수집한 Metric 데이터를 Kafka를 통해 수집 클러스터에 전달하는 역할을 합니다.', '--- + # VERSION : 20190227142300 + + apiVersion: v1 + kind: ConfigMap + metadata: + name: cloudmoa-prometheus-configuration + namespace: $CLOUDMOA_NAMESPACE + data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + prometheus.yml: | + global: + scrape_interval: 15s + # Attach these labels to any time series or alerts when communicating with + # external systems. + external_labels: + monitor: ''5s-monitor'' + + #kafka writer only + no_local_disk_write: true + + # A scrape configuration for running Prometheus on a Kubernetes cluster. + # This uses separate scrape configs for cluster components (i.e. API server, node) + # and services to allow each to use different authentication configs. + # + # Kubernetes labels will be added as Prometheus labels on metrics via the + # `labelmap` relabeling action. + # + + # + # rule_files: + # - "scaling.rules" + + # i suppose my code in the remote kafka write is something wrong ... should append a double quote character at the end of the url + remote_write: + - url: kafka://$COLLTION_SERVER_KAFKA_IP:$COLLTION_SERVER_KAFKA_INTERFACE_PORT/remote_prom?encoding=proto3&compression=snappy + + scrape_configs: + + # Scrape config for nodes (kubelet). + # + # Rather than connecting directly to the node, the scrape is proxied though the + # Kubernetes apiserver. This means it will work if Prometheus is running out of + # cluster, or can''t connect to nodes for some other reason (e.g. because of + # firewalling). + - job_name: ''kubernetes-kubelet'' + + # Default to scraping over https. If required, just disable this or change to + # `http`. + scheme: https + # This TLS & bearer token file config is used to connect to the actual scrape + # endpoints for cluster components. This is separate to discovery auth + # configuration because discovery & scraping are two separate concerns in + # Prometheus. The discovery auth config is automatic if Prometheus runs inside + # the cluster. Otherwise, more config options have to be provided within the + # . + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + # copied from https://github.com/kayrus/prometheus-kubernetes/blob/master/prometheus-configmap.yaml + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + - job_name: ''kubernetes-cadvisor'' + + # Default to scraping over https. If required, just disable this or change to + # `http`. + scheme: https + + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod] + target_label: xm_pod_id + - source_labels: [container] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [container] + regex: (.+) + action: keep + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep + --- + apiVersion: v1 + kind: Service + metadata: + name: cloudmoa-prometheus + namespace: $CLOUDMOA_NAMESPACE + spec: + ports: + - port: 9090 + protocol: TCP + targetPort: 9090 + selector: + app: cloudmoa-prometheus + type: ClusterIP + --- + apiVersion: apps/v1 + kind: Deployment + metadata: + name: cloudmoa-prometheus + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-prometheus + spec: + selector: + matchLabels: + app: cloudmoa-prometheus + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-prometheus + spec: + containers: + - name: cloudmoa-prometheus + image: $DOCKER_REGISTRY_URL/imxc/metric-agent:$IMAGE_TAG + ports: + - containerPort: 9090 + args: + - --config.file=/etc/prometheus/prometheus.yml + #- --log.level=debug + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: "/prometheus" + name: data + - mountPath: /etc/prometheus/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: LOG_MAXAGE + value: "1" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: API_SERVER_LICENSE + value: $COLLTION_SERVER_API_IP:8080 + + restartPolicy: Always + volumes: + - emptyDir: {} + name: data + - name: config-volume + configMap: + name: cloudmoa-prometheus-configuration + ', false, '2021-03-11 13:39:07.000000', '2021-03-11 13:39:09.000000', '1.16') + WHERE public.agent_install_file_info.id = 6; + + +INSERT INTO public.agent_install_file_info (id, name, type, description, yaml, use_yn, created_date, modified_date, version) +VALUES (7, 'jaeger', 'application', 'CloudMOA에서는 고객사에서 운영 중인 application의 TPS, 서비스 연관관계 등의 데이터를 얻기 위해서 Jaeger를 사용하며, Jaeger 사용을 위해 Jaeger-client, jaeger-agent, jaeger-collector의 설치가 필요합니다. + ', '--- + apiVersion: v1 + kind: ConfigMap + metadata: + name: cloudmoa-jaeger-collector-configuration + namespace: $CLOUDMOA_NAMESPACE + data: + strategies.json: | + { + "default_strategy": { + "type": "probabilistic", + "param": 0.1 + } + } + --- + apiVersion: apps/v1 + kind: Deployment + metadata: + name: cloudmoa-jaeger-collector + namespace: $CLOUDMOA_NAMESPACE + labels: + app: jaeger + jaeger-infra: collector-deployment + spec: + selector: + matchLabels: + app: jaeger + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: jaeger + jaeger-infra: collector-pod + spec: + securityContext: + runAsNonRoot: true + runAsUser: 65534 + containers: + - image: $DOCKER_REGISTRY_URL/jaeger/jaeger-collector:$IMAGE_TAG + name: jaeger-collector + args: + - --sampling.strategies-file=/etc/jaeger-collector/strategies.json + - --sampling.strategies-reload-interval=60s + resources: + requests: + cpu: 100m + memory: 50Mi + limits: + cpu: 200m + memory: 100Mi + ports: + - containerPort: 14267 + protocol: TCP + - containerPort: 14268 + protocol: TCP + - containerPort: 9411 + protocol: TCP + - containerPort: 14250 + protocol: TCP + - containerPort: 14269 + protocol: TCP + readinessProbe: + httpGet: + path: "/" + port: 14269 + env: + - name: COLLECTOR_ZIPKIN_HTTP_PORT + value: "9411" + - name: SPAN_STORAGE_TYPE + value: kafka + - name: KAFKA_PRODUCER_BROKERS + value: $COLLTION_SERVER_KAFKA_IP:$COLLTION_SERVER_KAFKA_INTERFACE_PORT + - name: LOG_LEVEL + value: "INFO" + - name: LOG_MAXAGE + value: "1" + - name: LOG_MAXBACKUPS + value: "3" + - name: LOG_MAXSIZE + value: "100" + - name: LOG_STDOUT + value: "TRUE" + - name: LOG_FILENAME + value: "jaeger-collector" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: API_SERVER_LICENSE + value: $COLLTION_SERVER_API_IP:8080 + + volumeMounts: + - mountPath: /etc/jaeger-collector + name: config-volume + + volumes: + - name: config-volume + configMap: + name: cloudmoa-jaeger-collector-configuration + --- + apiVersion: v1 + kind: Service + metadata: + name: cloudmoa-jaeger-collector + namespace: $CLOUDMOA_NAMESPACE + labels: + app: jaeger + jaeger-infra: collector-service + spec: + ports: + - name: jaeger-collector-tchannel + port: 14267 + protocol: TCP + targetPort: 14267 + - name: jaeger-collector-metrics + port: 14269 + targetPort: 14269 + - name: jaeger-collector-grpc + port: 14250 + protocol: TCP + targetPort: 14250 + - name: jaeger-collector-zipkin + port: 9411 + targetPort: 9411 + selector: + jaeger-infra: collector-pod + type: ClusterIP + --- + apiVersion: v1 + kind: List + items: + - apiVersion: apps/v1 + kind: Deployment + metadata: + name: cloudmoa-jaeger-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: jaeger + app.kubernetes.io/name: jaeger + app.kubernetes.io/component: agent + spec: + selector: + matchLabels: + app: jaeger + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: jaeger + app.kubernetes.io/name: jaeger + app.kubernetes.io/component: agent + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "5778" + spec: + securityContext: + runAsNonRoot: true + runAsUser: 65534 + containers: + - image: $DOCKER_REGISTRY_URL/jaegertracing/jaeger-agent:$IMAGE_TAG + name: jaeger-agent + args: ["--reporter.grpc.host-port", "cloudmoa-jaeger-collector:14250"] + resources: + requests: + cpu: 100m + memory: 50Mi + limits: + cpu: 200m + memory: 100Mi + ports: + - containerPort: 5775 + protocol: UDP + - containerPort: 6831 + protocol: UDP + - containerPort: 6832 + protocol: UDP + - containerPort: 5778 + protocol: TCP + env: + - name: LOG_LEVEL + value: "INFO" + - name: LOG_MAXAGE + value: "1" + - name: LOG_MAXBACKUPS + value: "3" + - name: LOG_MAXSIZE + value: "100" + - name: LOG_STDOUT + value: "TRUE" + - name: LOG_FILENAME + value: "jaeger-agent" + + - apiVersion: v1 + kind: Service + metadata: + name: jaeger-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: jaeger + app.kubernetes.io/name: jaeger + app.kubernetes.io/component: agent + spec: + ports: + - name: agent-zipkin-thrift + port: 5775 + protocol: UDP + targetPort: 5775 + - name: agent-compact + port: 6831 + protocol: UDP + targetPort: 6831 + - name: agent-binary + port: 6832 + protocol: UDP + targetPort: 6832 + - name: agent-configs + port: 5778 + protocol: TCP + targetPort: 5778 + selector: + app.kubernetes.io/name: jaeger + app.kubernetes.io/component: agent + type: ClusterIP', true, '2021-03-11 17:48:34.000000', '2021-03-11 17:48:39.000000', null) +ON CONFLICT (id) +DO + UPDATE SET (id, name, type, description, yaml, use_yn, created_date, modified_date, version) + = (7, 'jaeger', 'application', 'CloudMOA에서는 고객사에서 운영 중인 application의 TPS, 서비스 연관관계 등의 데이터를 얻기 위해서 Jaeger를 사용하며, Jaeger 사용을 위해 Jaeger-client, jaeger-agent, jaeger-collector의 설치가 필요합니다. + ', '--- + apiVersion: v1 + kind: ConfigMap + metadata: + name: cloudmoa-jaeger-collector-configuration + namespace: $CLOUDMOA_NAMESPACE + data: + strategies.json: | + { + "default_strategy": { + "type": "probabilistic", + "param": 0.1 + } + } + --- + apiVersion: apps/v1 + kind: Deployment + metadata: + name: cloudmoa-jaeger-collector + namespace: $CLOUDMOA_NAMESPACE + labels: + app: jaeger + jaeger-infra: collector-deployment + spec: + selector: + matchLabels: + app: jaeger + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: jaeger + jaeger-infra: collector-pod + spec: + securityContext: + runAsNonRoot: true + runAsUser: 65534 + containers: + - image: $DOCKER_REGISTRY_URL/jaeger/jaeger-collector:$IMAGE_TAG + name: jaeger-collector + args: + - --sampling.strategies-file=/etc/jaeger-collector/strategies.json + - --sampling.strategies-reload-interval=60s + resources: + requests: + cpu: 100m + memory: 50Mi + limits: + cpu: 200m + memory: 100Mi + ports: + - containerPort: 14267 + protocol: TCP + - containerPort: 14268 + protocol: TCP + - containerPort: 9411 + protocol: TCP + - containerPort: 14250 + protocol: TCP + - containerPort: 14269 + protocol: TCP + readinessProbe: + httpGet: + path: "/" + port: 14269 + env: + - name: COLLECTOR_ZIPKIN_HTTP_PORT + value: "9411" + - name: SPAN_STORAGE_TYPE + value: kafka + - name: KAFKA_PRODUCER_BROKERS + value: $COLLTION_SERVER_KAFKA_IP:$COLLTION_SERVER_KAFKA_INTERFACE_PORT + - name: LOG_LEVEL + value: "INFO" + - name: LOG_MAXAGE + value: "1" + - name: LOG_MAXBACKUPS + value: "3" + - name: LOG_MAXSIZE + value: "100" + - name: LOG_STDOUT + value: "TRUE" + - name: LOG_FILENAME + value: "jaeger-collector" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: API_SERVER_LICENSE + value: $COLLTION_SERVER_API_IP:8080 + + volumeMounts: + - mountPath: /etc/jaeger-collector + name: config-volume + + volumes: + - name: config-volume + configMap: + name: cloudmoa-jaeger-collector-configuration + --- + apiVersion: v1 + kind: Service + metadata: + name: cloudmoa-jaeger-collector + namespace: $CLOUDMOA_NAMESPACE + labels: + app: jaeger + jaeger-infra: collector-service + spec: + ports: + - name: jaeger-collector-tchannel + port: 14267 + protocol: TCP + targetPort: 14267 + - name: jaeger-collector-metrics + port: 14269 + targetPort: 14269 + - name: jaeger-collector-grpc + port: 14250 + protocol: TCP + targetPort: 14250 + - name: jaeger-collector-zipkin + port: 9411 + targetPort: 9411 + selector: + jaeger-infra: collector-pod + type: ClusterIP + --- + apiVersion: v1 + kind: List + items: + - apiVersion: apps/v1 + kind: Deployment + metadata: + name: cloudmoa-jaeger-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: jaeger + app.kubernetes.io/name: jaeger + app.kubernetes.io/component: agent + spec: + selector: + matchLabels: + app: jaeger + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: jaeger + app.kubernetes.io/name: jaeger + app.kubernetes.io/component: agent + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "5778" + spec: + securityContext: + runAsNonRoot: true + runAsUser: 65534 + containers: + - image: $DOCKER_REGISTRY_URL/jaegertracing/jaeger-agent:$IMAGE_TAG + name: jaeger-agent + args: ["--reporter.grpc.host-port", "cloudmoa-jaeger-collector:14250"] + resources: + requests: + cpu: 100m + memory: 50Mi + limits: + cpu: 200m + memory: 100Mi + ports: + - containerPort: 5775 + protocol: UDP + - containerPort: 6831 + protocol: UDP + - containerPort: 6832 + protocol: UDP + - containerPort: 5778 + protocol: TCP + env: + - name: LOG_LEVEL + value: "INFO" + - name: LOG_MAXAGE + value: "1" + - name: LOG_MAXBACKUPS + value: "3" + - name: LOG_MAXSIZE + value: "100" + - name: LOG_STDOUT + value: "TRUE" + - name: LOG_FILENAME + value: "jaeger-agent" + + - apiVersion: v1 + kind: Service + metadata: + name: jaeger-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: jaeger + app.kubernetes.io/name: jaeger + app.kubernetes.io/component: agent + spec: + ports: + - name: agent-zipkin-thrift + port: 5775 + protocol: UDP + targetPort: 5775 + - name: agent-compact + port: 6831 + protocol: UDP + targetPort: 6831 + - name: agent-binary + port: 6832 + protocol: UDP + targetPort: 6832 + - name: agent-configs + port: 5778 + protocol: TCP + targetPort: 5778 + selector: + app.kubernetes.io/name: jaeger + app.kubernetes.io/component: agent + type: ClusterIP', true, '2021-03-11 17:48:34.000000', '2021-03-11 17:48:39.000000', null) + WHERE public.agent_install_file_info.id = 7; + +--Menu Resource +--Infrastructure +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (0, 'Infrastructure', '01.Infrastructure', 0, NULL, (SELECT id FROM auth_resource3 WHERE name='menu|Infrastructure'), 3) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 3 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Infrastructure'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (1, 'Topology', NULL, 0, 'topologyInfra', (SELECT id FROM auth_resource3 WHERE name='menu|Infrastructure|Topology'), 3) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 3 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Infrastructure|Topology'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (2, 'Overview', NULL, 1, 'overViewInfra', (SELECT id FROM auth_resource3 WHERE name='menu|Infrastructure|Overview'), 3) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 3 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Infrastructure|Overview'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (3, 'Resource Usage', NULL, 2, 'resourceUsageInfra', (SELECT id FROM auth_resource3 WHERE name='menu|Infrastructure|Resource Usage'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Infrastructure|Resource Usage'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (4, 'Namespace', NULL, 3, 'namespaceInfra', (SELECT id FROM auth_resource3 WHERE name='menu|Infrastructure|Namespace'), 3) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 3 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Infrastructure|Namespace'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (5, 'Nodes', NULL, 4, 'nodesInfra', (select id from auth_resource3 where name='menu|Infrastructure|Nodes'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Infrastructure|Nodes'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (6, 'Node Details', NULL, 5, 'nodeDetailInfra', (select id from auth_resource3 where name='menu|Infrastructure|Node Details'), 3) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 3 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Infrastructure|Node Details'); + +--Workloads +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (10, 'Workloads', '02.Workload', 1, NULL, (select id from auth_resource3 where name='menu|Workloads'), 3) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 3 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Workloads'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (11, 'Overview', NULL, 0, 'overviewWorkloads', (select id from auth_resource3 where name='menu|Workloads|Overview'), 3) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 3 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Workloads|Overview'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (12, 'deployList', NULL, 1, 'deployListWorkloads', (select id from auth_resource3 where name='menu|Workloads|Deploy List'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Workloads|Deploy List'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (17, 'Jobs', NULL, 6, 'jobsWorkloads', (select id from auth_resource3 where name='menu|Workloads|Jobs'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Workloads|Jobs'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (18, 'Cron Jobs', NULL, 7, 'cronJobsWorkloads', (select id from auth_resource3 where name='menu|Workloads|Cron Jobs'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Workloads|Cron Jobs'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (19, 'Pods', NULL, 8, 'podsWorkloads', (select id from auth_resource3 where name='menu|Workloads|Pods'), 3) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 3 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Workloads|Pods'); + +--Services +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (20, 'Services', '03.Service', 2, NULL, (select id from auth_resource3 where name='menu|Services'), 3) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 3 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Services'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (21, 'DataCenter Service', NULL, 0, 'topologyServices', (select id from auth_resource3 where name='menu|Services|Topology'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Services|Topology'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (22, 'ServiceOverview', NULL, 1, 'overviewServices', (select id from auth_resource3 where name='menu|Services|Overview'), 0) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 0 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Services|Overview'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (23, 'Cluster Service', NULL, 2, 'detailServices', (select id from auth_resource3 where name='menu|Services|Structure'), 0) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 0 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Services|Structure'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (24, 'List', NULL, 3, 'serviceList', (select id from auth_resource3 where name='menu|Services|List'), 3) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 3 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Services|List'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (25, 'Detail', NULL, 4, 'slasServices', (select id from auth_resource3 where name='menu|Services|Detail'), 0) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 0 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Services|Detail'); + +--Statistics & Analysis +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (40, 'Statistics & Analysis', '06.Statistics&Analysis', 5, NULL, (select id from auth_resource3 where name='menu|Statistics & Analysis'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Statistics & Analysis'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (41, 'Performance Trends', NULL, 0, 'performanceTrendSA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Performance Trends'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Statistics & Analysis|Performance Trends'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (42, 'Alert Analysis', NULL, 2, 'alertAnalysisSA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Alert Analysis'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Statistics & Analysis|Alert Analysis'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (43, 'Alert History', NULL, 3, 'alertHistorySA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Alert History'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Statistics & Analysis|Alert History'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (44, 'Anomaly Score Analysis', NULL, 4, 'anomalyScoreSA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Anomaly Score'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Statistics & Analysis|Anomaly Score'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (45, 'Job History', NULL, 5, 'jobHistorySA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Job History'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Statistics & Analysis|Job History'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (46, 'Sparse Log Analysis', NULL, 6, 'sparseLogSA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Sparse Logs'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Statistics & Analysis|Sparse Logs'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (47, 'Log Viewer', NULL, 7, 'logViewerSA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Log Viewer'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Statistics & Analysis|Log Viewer'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (48, 'eventLog Analysis', NULL, 8, 'eventLogSA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Event Logs'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Statistics & Analysis|Event Logs'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (49, 'Container Life Cycle', NULL, 9, 'containerLifecycleSA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Container Life Cycle'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Statistics & Analysis|Container Life Cycle'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (50, 'Service Trace Analysis', NULL, 10, 'serviceTraceSA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Service Traces'), 0) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 0 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Statistics & Analysis|Service Traces'); + +--Reports +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (60, 'Reports', '07.Report', 6, NULL, (select id from auth_resource3 where name='menu|Reports'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Reports'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (61, 'Documents', NULL, 0, 'documentReport', (select id from auth_resource3 where name='menu|Reports|Documents'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Reports|Documents'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (62, 'Templates', NULL, 1, 'reportSettings', (select id from auth_resource3 where name='menu|Reports|Templates'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Reports|Templates'); + +--Dashboards +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (65, 'Dashboards', '10.Dashboard', 7, NULL, (select id from auth_resource3 where name='menu|Dashboards'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Dashboards'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (66, 'Documents', NULL, 0, 'documentDashboard', (select id from auth_resource3 where name='menu|Dashboards|Documents'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Dashboards|Documents'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (67, 'Templates', NULL, 1, 'templateDashboard', (select id from auth_resource3 where name='menu|Dashboards|Templates'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Dashboards|Templates'); + +--Hosts +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (80, 'Hosts', '12.Hosts', 1, NULL, (select id from auth_resource3 where name='menu|Hosts'), 0) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 0 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Hosts'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (81, 'Topology', null, 0, 'topologyHost', (select id from auth_resource3 where name='menu|Hosts|Topology'), 0) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 0 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Hosts|Topology'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (82, 'Overview', NULL, 1, 'overviewHost', (select id from auth_resource3 where name='menu|Hosts|Overview'), 0) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 0 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Hosts|Overview'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (83, 'List', NULL, 2, 'listHost', (select id from auth_resource3 where name='menu|Hosts|List'), 0) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 0 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Hosts|List'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (84, 'Detail', NULL, 3, 'detailHost', (select id from auth_resource3 where name='menu|Hosts|Detail'), 0) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 0 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Hosts|Detail'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (85, 'Group', NULL, 4, 'groupHost', (select id from auth_resource3 where name='menu|Hosts|Group'), 0) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 0 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Hosts|Group'); + +--Settings +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (90, 'Settings', '08.Setting', 10, NULL, (select id from auth_resource3 where name='menu|Settings'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Settings'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (91, 'User', NULL, 0, 'userGroupSettings', (select id from auth_resource3 where name='menu|Settings|User & Group'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Settings|User & Group'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (92, 'Alerts', NULL, 1, 'alertSettings', (select id from auth_resource3 where name='menu|Settings|Alerts'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Settings|Alerts'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (93, 'Host Alerts', NULL, 2, 'hostAlertSettings', (select id from auth_resource3 where name='menu|Settings|Host Alerts'), 0) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 0 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Settings|Host Alerts'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (94, 'Sparse Logs', NULL, 3, 'sparseLogSettings', (select id from auth_resource3 where name='menu|Settings|Sparse Logs'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Settings|Sparse Logs'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (96, 'Metric Meta', NULL, 5, 'metricMetaSettings', (select id from auth_resource3 where name='menu|Settings|Metric Meta'), 0) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 0 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Settings|Metric Meta'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (97, 'Appearance', NULL, 6, 'appearanceSettings', (select id from auth_resource3 where name='menu|Settings|General'), 0) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 0 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Settings|General'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (98, 'Notification', NULL, 7, 'notificationsSettings', (select id from auth_resource3 where name='menu|Settings|Notification'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Settings|Notification'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (99, 'Agent', NULL, 8, 'agentSettings', (select id from auth_resource3 where name='menu|Settings|Agent'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Settings|Agent'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (100, 'Alias', NULL, 9, 'aliasSettings', (select id from auth_resource3 where name='menu|Settings|Alias'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Settings|Alias'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (101, 'License', NULL, 10, 'validationLicense', (select id from auth_resource3 where name='menu|Settings|License'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Settings|License'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (102, 'agent Installation', NULL, 11, 'agentInstallationSettings', (select id from auth_resource3 where name='menu|Settings|Agent Installation'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Settings|Agent Installation'); + +--Health Check +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (121, 'Health Check', '09.HealthCheck', 9, 'healthCHeck', (select id from auth_resource3 where name='menu|Health Check'), 0) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 0 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Health Check'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (122, 'Check Script', NULL, 0, 'checkScript', (select id from auth_resource3 where name='menu|Health Check|Check Script'), 0) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 0 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Health Check|Check Script'); + +INSERT INTO public.license_policy +(policy_id, policy_desc, term_year, term_month, term_day, license_type, allowable_range, storage_capacity, cluster_count, node_count, pod_count, service_count, core_count, host_ids, user_division, created_date, modified_date) +VALUES('promotion_license', '프로모션 기간에 사용자들에게 발급되는 라이선스', 0, 0, 14, 'trial', '0', 'unlimited', '1', '10', 'unlimited', 'unlimited', 'unlimited', 'unlimited', '1', now(), null); \ No newline at end of file diff --git a/ansible/01_old/roles/test/files/03-ddl-dml/postgres/patch/postgres_patch_R30020210730.psql b/ansible/01_old/roles/test/files/03-ddl-dml/postgres/patch/postgres_patch_R30020210730.psql new file mode 100644 index 0000000..60ad862 --- /dev/null +++ b/ansible/01_old/roles/test/files/03-ddl-dml/postgres/patch/postgres_patch_R30020210730.psql @@ -0,0 +1,4 @@ +alter table cloud_user alter column log_in_count set default 0; +alter table cloud_user alter column user_lock set default false; + +UPDATE public.metric_meta2 SET meta_name = 'Number of Containers Restart', description = 'Number of Containers Restart (10m)', expr = 'increase(imxc_kubernetes_container_restart_count{{filter}}[10m])', resource_type = 'State', entity_type = 'Workload', groupby_keys = null, in_use = true, anomaly_score = false, message = 'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} RESTARTCOUNT FOR 10MINUTE:{{humanize $value}}.', created_date = '2021-06-23 09:30:38.646312', modified_date = '2021-06-23 09:30:38.646312' WHERE id = 'cotainer_restart_count_by_workload'; \ No newline at end of file diff --git a/ansible/01_old/roles/test/files/03-ddl-dml/postgres/postgres_insert_ddl.psql b/ansible/01_old/roles/test/files/03-ddl-dml/postgres/postgres_insert_ddl.psql new file mode 100644 index 0000000..c8deff4 --- /dev/null +++ b/ansible/01_old/roles/test/files/03-ddl-dml/postgres/postgres_insert_ddl.psql @@ -0,0 +1,1667 @@ +CREATE TABLE public.tenant_info ( + id character varying(255) NOT NULL, + name character varying(255) NOT NULL, + in_used boolean DEFAULT true, + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone NOT NULL, + delete_scheduler_date timestamp without time zone NULL, + contract_id bigint NOT NULL, + tenant_init_clusters character varying(255) NULL +); +ALTER TABLE ONLY public.tenant_info ADD CONSTRAINT tenant_info_pkey PRIMARY KEY (id); + +CREATE TABLE public.alert_group ( + id bigint NOT NULL, + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone NOT NULL, + cluster_id character varying(255) NOT NULL, + description character varying(255), + name character varying(255) NOT NULL, + type character varying(255) NOT NULL, + namespace character varying(255) DEFAULT 'default'::character varying +); + +ALTER TABLE public.alert_group OWNER TO admin; + +ALTER TABLE ONLY public.alert_group + ADD CONSTRAINT alert_group_pkey PRIMARY KEY (id); + +CREATE UNIQUE INDEX alert_group_name_uindex ON public.alert_group USING btree (name); + +CREATE TABLE public.alert_target ( + id bigint NOT NULL, + created_date timestamp without time zone, + modified_date timestamp without time zone, + cluster_id character varying(255) NOT NULL, + entity_id character varying(255) NOT NULL, + entity_type character varying(255) NOT NULL, + alert_group_id bigint, + namespace character varying(255) +); + +ALTER TABLE public.alert_target OWNER TO admin; + +ALTER TABLE ONLY public.alert_target + ADD CONSTRAINT alert_target_pkey PRIMARY KEY (id); + +ALTER TABLE ONLY public.alert_target + ADD CONSTRAINT fkjrvj775641ky7s0f82kx3sile FOREIGN KEY (alert_group_id) REFERENCES public.alert_group(id); + + + +CREATE TABLE public.report_template ( + id bigint NOT NULL, + created_by character varying(255), + created_date timestamp without time zone NOT NULL, + modified_by character varying(255), + modified_date timestamp without time zone NOT NULL, + cron_exp character varying(255), + enable boolean NOT NULL, + metric_data text, + template_data text, + title character varying(255) +); + +ALTER TABLE public.report_template OWNER TO admin; + +ALTER TABLE ONLY public.report_template + ADD CONSTRAINT report_template_pkey PRIMARY KEY (id); + +CREATE TABLE public.alert_event ( + id bigint NOT NULL, + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone NOT NULL, + alert_name character varying(255) NOT NULL, + cluster_id character varying(255) NOT NULL, + data text NOT NULL, + entity_id character varying(255) NOT NULL, + entity_type character varying(255) NOT NULL, + level character varying(255) NOT NULL, + meta_id character varying(255) NOT NULL, + namespace character varying(255), + starts_at bigint NOT NULL, + threshold character varying(255) NOT NULL, + value character varying(255) NOT NULL, + message character varying(255), + ends_at bigint, + status character varying(20) NOT NULL, + hook_collect_at bigint +); + +ALTER TABLE public.alert_event OWNER TO admin; + +CREATE TABLE public.metric_meta2 ( + id character varying(255) NOT NULL, + meta_name character varying(255) NOT NULL, + description character varying(255) NOT NULL, + expr text NOT NULL, + resource_type character varying(255), + entity_type character varying(255) NOT NULL, + groupby_keys character varying(255), + in_use boolean DEFAULT false NOT NULL, + anomaly_score boolean DEFAULT false NOT NULL, + message character varying(255) NOT NULL, + created_date timestamp without time zone DEFAULT now() NOT NULL, + modified_date timestamp without time zone DEFAULT now() NOT NULL +); + +ALTER TABLE public.metric_meta2 OWNER to admin; + +ALTER TABLE ONLY public.metric_meta2 + ADD CONSTRAINT metric_meta2_pk PRIMARY KEY (id); + +CREATE TABLE public.alert_rule ( + id bigint NOT NULL, + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone NOT NULL, + critical float, + name character varying(255), + warning float, + alert_group_id bigint, + alert_rule_meta_id character varying(255) NOT NULL, + alert_target_id bigint, + duration character varying(255) NOT NULL, + pause boolean DEFAULT false NOT NULL, + warning_sign character varying(255), + critical_sign character varying(255) +); + +ALTER TABLE public.alert_rule OWNER TO admin; + +ALTER TABLE ONLY public.alert_rule + ADD CONSTRAINT alert_rule_pkey PRIMARY KEY (id); + +ALTER TABLE ONLY public.alert_rule + ADD CONSTRAINT fk6b09d1xfyago6wiiqhdiv03s3 FOREIGN KEY (alert_rule_meta_id) REFERENCES public.metric_meta2(id); + +ALTER TABLE ONLY public.alert_rule + ADD CONSTRAINT fk8wkucwkgr48hkfg8cvuptww0f FOREIGN KEY (alert_group_id) REFERENCES public.alert_group(id); + +ALTER TABLE ONLY public.alert_rule + ADD CONSTRAINT fkiqaskea7ts0f872u3nx9ne25u FOREIGN KEY (alert_target_id) REFERENCES public.alert_target(id); + +CREATE TABLE public.alert_rule_meta ( + id bigint NOT NULL, + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone NOT NULL, + description text NOT NULL, + expr character varying(255) NOT NULL, + meta_name character varying(255) NOT NULL, + target character varying(255) NOT NULL, + message character varying(255) +); + +ALTER TABLE public.alert_rule_meta OWNER TO admin; + +ALTER TABLE ONLY public.alert_rule_meta + ADD CONSTRAINT alert_rule_meta_pkey PRIMARY KEY (id); + +CREATE SEQUENCE hibernate_sequence; + +CREATE TABLE public.cloud_group ( + id bigint NOT NULL, + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone NOT NULL, + name character varying(255) NOT NULL, + description character varying(255), + created_by character varying(255), + auth_resource_id bigint +); + +ALTER TABLE public.cloud_group OWNER TO admin; + +ALTER TABLE ONLY public.cloud_group + ADD CONSTRAINT cloud_group_pkey PRIMARY KEY (id); + +CREATE UNIQUE INDEX cloud_group_name_uindex ON public.cloud_group USING btree (name); + +CREATE TABLE public.cloud_user ( + user_id character varying(255) NOT NULL, + email character varying(255), + is_admin boolean NOT NULL, + phone character varying(255), + user_nm character varying(255) NOT NULL, + user_pw character varying(255) NOT NULL, + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone NOT NULL, + dormancy_date timestamp without time zone NULL, + company character varying(255), + department character varying(255), + last_log_in_date timestamp without time zone, + "position" character varying(255), + use_ldap boolean NOT NULL, + auth_method character varying(255) NOT NULL, + log_in_count integer default 0 NOT NULL, + user_lock boolean default false NOT NULL, + user_lock_date timestamp without time zone, + tenant_id character varying(120), + is_tenant_owner boolean default false, + auth_resource_id bigint, + status character varying(255) default 'use' NOT NULL +); + +ALTER TABLE public.cloud_user OWNER TO admin; + +ALTER TABLE ONLY public.cloud_user ADD CONSTRAINT cloud_user_pkey PRIMARY KEY (user_id); + +ALTER TABLE ONLY public.cloud_user + ADD CONSTRAINT cloud_user_tenant_id_fk FOREIGN KEY (tenant_id) REFERENCES public.tenant_info(id); + +CREATE TABLE public.menu_meta ( + id bigint NOT NULL, + description character varying(255), + icon character varying(255), + "position" integer NOT NULL, + url character varying(255), + auth_resource3_id bigint NOT NULL, + scope_level int default 0 +); + +ALTER TABLE public.menu_meta OWNER TO admin; + +ALTER TABLE ONLY public.menu_meta + ADD CONSTRAINT menu_meta_pkey PRIMARY KEY (id); + + + +CREATE TABLE public.metric_base ( + meta_name character varying(255) NOT NULL, + provider character varying(255) NOT NULL, + description character varying(255) NOT NULL, + resource_type character varying(255), + diag_type character varying(255), + entity_type character varying(255) NOT NULL, + metric_type character varying(255) NOT NULL, + keys character varying(255), + created_date timestamp without time zone DEFAULT now() NOT NULL, + modified_date timestamp without time zone DEFAULT now() NOT NULL +); + + +ALTER TABLE public.metric_base OWNER TO admin; + +ALTER TABLE ONLY public.metric_base + ADD CONSTRAINT metric_base_pk PRIMARY KEY (meta_name); + +CREATE TABLE public.report_static ( + id bigint NOT NULL, + created_by character varying(255), + created_date timestamp without time zone NOT NULL, + modified_by character varying(255), + modified_date timestamp without time zone NOT NULL, + cron_exp character varying(255), + metric_data text, + template_data text, + title character varying(255), + type character varying(255), + report_template_id bigint +); + +ALTER TABLE public.report_static OWNER TO admin; + +ALTER TABLE ONLY public.report_static + ADD CONSTRAINT report_static_pkey PRIMARY KEY (id); + +ALTER TABLE ONLY public.report_static + ADD CONSTRAINT fk7o821ym9a57lrcfipf928cfpe FOREIGN KEY (report_template_id) REFERENCES public.report_template(id); + +CREATE TABLE public.user_group ( + user_group_id bigint NOT NULL, + user_id character varying(255) NOT NULL +); + +ALTER TABLE public.user_group OWNER TO admin; + +ALTER TABLE ONLY public.user_group + ADD CONSTRAINT user_group_pkey PRIMARY KEY (user_group_id, user_id); + +ALTER TABLE ONLY public.user_group + ADD CONSTRAINT fkooy6rip2craw6jy3geb5wnix6 FOREIGN KEY (user_id) REFERENCES public.cloud_user(user_id); + +ALTER TABLE ONLY public.user_group + ADD CONSTRAINT fkowo8h9te5nwashab3u30docg FOREIGN KEY (user_group_id) REFERENCES public.cloud_group(id); + +CREATE TABLE public.cloud_user_profile ( + user_id character varying(255) NOT NULL, + created_date timestamp without time zone, + modified_date timestamp without time zone, + profile_image oid +); + +ALTER TABLE public.cloud_user_profile OWNER TO admin; + +ALTER TABLE ONLY public.cloud_user_profile + ADD CONSTRAINT cloud_user_profile_pkey PRIMARY KEY (user_id); + + +CREATE TABLE public.common_setting ( + code_id character varying(255) NOT NULL, + code_value character varying(255), + code_desc character varying(255), + code_auth character varying(255), + code_group character varying(255), + created_date timestamp without time zone, + modified_date timestamp without time zone +); + + +ALTER TABLE public.common_setting OWNER TO admin; + +ALTER TABLE ONLY public.common_setting + ADD CONSTRAINT common_setting_pkey PRIMARY KEY (code_id); + + + +CREATE TABLE public.dashboard_thumbnail ( + id bigint NOT NULL, + thumbnail_image oid, + created_date timestamp without time zone, + modified_date timestamp without time zone +); + + +ALTER TABLE public.dashboard_thumbnail OWNER TO admin; + +ALTER TABLE ONLY public.dashboard_thumbnail + ADD CONSTRAINT dashboard_thumbnail_pkey PRIMARY KEY (id); + + + +CREATE TABLE public.notification_channel ( + id bigint NOT NULL, + created_by character varying(255), + created_date timestamp without time zone, + modified_by character varying(255), + modified_date timestamp without time zone, + cluster_id character varying(255), + config text, + name character varying(255), + type character varying(255) +); + +ALTER TABLE public.notification_channel OWNER TO admin; + +ALTER TABLE ONLY public.notification_channel + ADD CONSTRAINT notification_channel_pkey PRIMARY KEY (id); + + +CREATE TABLE public.notification_registry ( + id bigint NOT NULL, + alert_rule_id bigint NOT NULL, + notification_channel_id bigint +); + +ALTER TABLE public.notification_registry OWNER TO admin; + +ALTER TABLE ONLY public.notification_registry + ADD CONSTRAINT notification_registry_pkey PRIMARY KEY (id); + +ALTER TABLE ONLY public.notification_registry + ADD CONSTRAINT fk28xo8snm6fd19i3uap0oba0d1 FOREIGN KEY (notification_channel_id) REFERENCES public.notification_channel(id); + + +CREATE TABLE public.license_check_2 ( + id bigint NOT NULL, + site_name character varying(255) NOT NULL, + license_type integer NOT NULL, + expire_date character varying(255) NOT NULL, + imxc_host_id integer NOT NULL, + real_host_id integer NOT NULL, + imxc_cpu_count integer NOT NULL, + real_cpu_count integer NOT NULL, + target_clusters_count integer NOT NULL, + real_clusters_count integer NOT NULL, + target_nodes_count integer NOT NULL, + real_nodes_count integer NOT NULL, + target_pods_count integer NOT NULL, + real_pods_count integer NOT NULL, + target_svcs_count integer NOT NULL, + real_svcs_count integer NOT NULL, + target_core_count integer NOT NULL, + real_core_count integer NOT NULL, + features_bitmap integer NOT NULL, + allowable_range integer NOT NULL, + check_time timestamp without time zone NOT NULL, + check_result integer NOT NULL +); + +ALTER TABLE public.license_check_2 + ADD CONSTRAINT license_check_pkey PRIMARY KEY (id); + +CREATE INDEX license_check_check_time_idx ON license_check_2(check_time); + + +CREATE TABLE public.license_violation ( + id bigint not null, + check_id bigint not null, + check_time timestamp without time zone not null, + violation_item varchar not null, + allow_time timestamp without time zone not null, + resolved_id bigint, + resolved_time timestamp without time zone +); + +ALTER TABLE public.license_violation + ADD CONSTRAINT license_violation_pkey PRIMARY KEY (id); + +ALTER TABLE public.license_violation + ADD CONSTRAINT license_violation_check_id_fk FOREIGN KEY (check_id) REFERENCES public.license_check_2(id); + +ALTER TABLE public.license_violation + ADD CONSTRAINT license_violation_resolved_id_fk FOREIGN KEY (resolved_id) REFERENCES public.license_check_2(id); + +CREATE INDEX license_violation_check_time_idx ON license_violation(check_time); +CREATE INDEX license_violation_resolved_time_idx ON license_violation(resolved_time); + + +CREATE TABLE public.license_key ( + id bigint NOT NULL, + license_key text NOT NULL, + set_time timestamp NOT NULL, + in_used bool NULL, + tenant_id varchar NULL, + cluster_id bigint NULL, + CONSTRAINT license_key_pkey PRIMARY KEY (id) +); + +ALTER TABLE public.license_key ADD CONSTRAINT license_key_tenant_id_fk FOREIGN KEY (tenant_id) REFERENCES public.tenant_info(id); + +CREATE TABLE public.license_check2 ( + id bigint NOT NULL, + site_name character varying(255) NOT NULL, + license_type integer NOT NULL, + expire_date character varying(255) NOT NULL, + imxc_host_ids character varying(255), + real_host_ids character varying(255), + target_nodes_count integer NOT NULL, + real_nodes_count integer NOT NULL, + target_pods_count integer NOT NULL, + real_pods_count integer NOT NULL, + target_svcs_count integer NOT NULL, + real_svcs_count integer NOT NULL, + target_core_count integer NOT NULL, + real_core_count integer NOT NULL, + allowable_range integer NOT NULL, + license_cluster_id character varying(255), + check_time timestamp without time zone NOT NULL, + check_result integer NOT null +); + +ALTER TABLE public.license_check2 + ADD CONSTRAINT license_check2_pkey PRIMARY KEY (id); + +CREATE INDEX license_check2_time_idx ON license_check2(check_time); + +CREATE TABLE public.license_violation2 ( + id bigint not null, + check_id bigint not null, + check_time timestamp without time zone not null, + violation_item varchar not null, + allow_time timestamp without time zone not null, + resolved_id bigint, + resolved_time timestamp without time zone, + cluster_id varchar not null +); + +ALTER TABLE public.license_violation2 + ADD CONSTRAINT license_violation2_pkey PRIMARY KEY (id); + +ALTER TABLE public.license_violation2 + ADD CONSTRAINT license_violation2_check_id_fk FOREIGN KEY (check_id) REFERENCES public.license_check2(id); + +ALTER TABLE public.license_violation2 + ADD CONSTRAINT license_violation2_resolved_id_fk FOREIGN KEY (resolved_id) REFERENCES public.license_check2(id); + +CREATE INDEX license_violation2_check_time_idx ON license_violation2(check_time); +CREATE INDEX license_violation2_resolved_time_idx ON license_violation2(resolved_time); + +CREATE TABLE public.license_key2 ( + id bigint not null, + license_key text not null, + set_time timestamp without time zone not null, + cluster_id varchar, + license_used bool not null +); + +ALTER TABLE public.license_key2 + ADD CONSTRAINT license_key2_pkey PRIMARY KEY (id); + +create table public.license_policy ( + policy_id character varying(255) NOT NULL, + policy_desc character varying(255), + term_year integer NOT NULL, + term_month integer NOT NULL, + term_day integer NOT NULL, + license_type character varying(255) NOT NULL, + allowable_range character varying(255) NOT NULL, + storage_capacity character varying(255) NOT NULL, + cluster_count character varying(255) NOT NULL, + node_count character varying(255) NOT NULL, + pod_count character varying(255) NOT NULL, + service_count character varying(255) NOT NULL, + core_count character varying(255) NOT NULL, + host_ids character varying(255) NOT NULL, + user_division character varying(255) NOT NULL, + created_date timestamp without time zone, + modified_date timestamp without time zone +); + +ALTER TABLE ONLY public.license_policy + ADD CONSTRAINT license_policy_pkey PRIMARY KEY (policy_id); + + +CREATE TABLE public.auth_resource2 ( + id bigint NOT NULL default nextval('hibernate_sequence'), + access_type integer NOT NULL, + name character varying(255) NOT NULL, + parent_id bigint, + type character varying(255) NOT NULL +); + +ALTER TABLE public.auth_resource2 OWNER TO admin; + +ALTER TABLE ONLY public.auth_resource2 + ADD CONSTRAINT auth_resource2_pkey PRIMARY KEY (id); + +ALTER TABLE ONLY public.auth_resource2 + ADD CONSTRAINT resource_name_uniq UNIQUE (name, type, parent_id); + +--ALTER TABLE ONLY public.auth_resource2 +-- ADD CONSTRAINT auth_resource2_auth_resource_id_fk FOREIGN KEY (parent_id) REFERENCES public.auth_resource2(id); +-- +--ALTER TABLE ONLY public.menu_meta +-- ADD CONSTRAINT fk2tqq4ybf6w130fsaejhrsnw5s FOREIGN KEY (auth_resource_id) REFERENCES public.auth_resource2(id); + +CREATE TABLE public.user_permission2 ( + id bigint NOT NULL, + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone NOT NULL, + all_child boolean NOT NULL, + permission integer NOT NULL, + auth_resource_id bigint, + user_id character varying(255) +); + +ALTER TABLE public.user_permission2 OWNER TO admin; + +ALTER TABLE ONLY public.user_permission2 + ADD CONSTRAINT user_permission2_pkey PRIMARY KEY (id); + +-- ALTER TABLE ONLY public.user_permission2 +-- ADD CONSTRAINT user_permission2_auth_resource2_fk FOREIGN KEY (auth_resource_id) REFERENCES public.auth_resource2(id); + +ALTER TABLE ONLY public.user_permission2 + ADD CONSTRAINT user_permission2_user_id_fk FOREIGN KEY (user_id) REFERENCES public.cloud_user(user_id); + + +CREATE TABLE public.group_permission2 ( + id bigint NOT NULL, + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone NOT NULL, + all_child boolean NOT NULL, + permission integer NOT NULL, + auth_resource_id bigint, + user_group_id bigint +); + +ALTER TABLE public.group_permission2 OWNER TO admin; + +ALTER TABLE ONLY public.group_permission2 + ADD CONSTRAINT group_permission2_pkey PRIMARY KEY (id); + +ALTER TABLE ONLY public.group_permission2 + ADD CONSTRAINT group_permission2_user_group_id_fk FOREIGN KEY (user_group_id) REFERENCES public.cloud_group(id); + +-- ALTER TABLE ONLY public.group_permission2 +-- ADD CONSTRAINT group_permission2_auth_resource2_fk FOREIGN KEY (auth_resource_id) REFERENCES public.auth_resource2(id); + +CREATE TABLE public.resource_group2 ( + id int8 NOT NULL, + created_date timestamp NOT NULL, + modified_date timestamp NOT NULL, + "name" varchar(255) NOT NULL, + description varchar(255) NULL, + CONSTRAINT resource_group2_pkey PRIMARY KEY (id) +-- CONSTRAINT resource_group2_fk1 FOREIGN KEY (id) REFERENCES auth_resource2(id) +); + +ALTER TABLE public.resource_group2 OWNER TO "admin"; +GRANT ALL ON TABLE public.resource_group2 TO "admin"; + +CREATE TABLE public.resource_member2 ( + resource_group_id int8 NOT NULL, + auth_resource_id int8 NOT NULL, + CONSTRAINT resource_member2_pkey PRIMARY KEY (resource_group_id, auth_resource_id), + CONSTRAINT resource_member2_fkey1 FOREIGN KEY (resource_group_id) REFERENCES resource_group2(id) +-- CONSTRAINT resource_member2_fkey2 FOREIGN KEY (auth_resource_id) REFERENCES auth_resource2(id) +); + +ALTER TABLE public.resource_member2 OWNER TO "admin"; +GRANT ALL ON TABLE public.resource_member2 TO "admin"; + +CREATE TABLE public.dashboard2 ( + id bigint NOT NULL, + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone NOT NULL, + layout text NOT NULL, + title character varying(255) NOT NULL, + auth_resource_id bigint NOT NULL, + created_by character varying(255) NOT NULL, + modified_by character varying(255) NOT NULL, + description character varying(255), + share boolean DEFAULT false +); + +ALTER TABLE public.dashboard2 OWNER TO admin; + +ALTER TABLE ONLY public.dashboard2 + ADD CONSTRAINT dashboard2_pkey PRIMARY KEY (id); + +-- ALTER TABLE ONLY public.dashboard2 +-- ADD CONSTRAINT dashboard_resource_fk FOREIGN KEY (auth_resource_id) REFERENCES public.auth_resource2(id); + +CREATE TABLE public.log_management ( + cluster_id varchar NOT NULL, + node_id varchar NOT NULL, + log_rotate_dir varchar, + log_rotate_count integer, + log_rotate_size integer, + log_rotate_management boolean NOT NULL, + back_up_dir varchar, + back_up_period integer, + back_up_dir_size integer, + back_up_management boolean NOT NULL, + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone +); + +alter table public.log_management add constraint log_management_pkey primary key (cluster_id, node_id); + +CREATE TABLE public.sampling_setting ( + service_id bigint NOT NULL, + service_name character varying(255), + sampling_type character varying(255), + sampling_param character varying(255), + cluster varchar, + namespace varchar, + cluster_id bigint +); +ALTER TABLE public.sampling_setting OWNER TO admin; + +ALTER TABLE ONLY public.sampling_setting + ADD CONSTRAINT sampling_setting_pkey PRIMARY KEY (service_id); + +CREATE TABLE public.operation_setting ( + id bigint NOT NULL, + service_id bigint NOT NULL, + sampling_type character varying(255), + sampling_param character varying(255), + operation_name character varying(255) +); + +ALTER TABLE public.operation_setting OWNER TO admin; + +ALTER TABLE ONLY public.operation_setting + ADD CONSTRAINT operation_setting_pkey PRIMARY KEY (id); + +ALTER TABLE ONLY public.operation_setting + ADD CONSTRAINT operation_setting_fkey FOREIGN KEY (service_id) REFERENCES public.sampling_setting(service_id); + +CREATE TABLE public.cluster_setting ( + cluster_id bigint NOT NULL, + param_type character varying(255), + param_value character varying(255), + cluster_name varchar, + name character varying(255) +); + +ALTER TABLE ONLY public.cluster_setting + ADD CONSTRAINT cluster_setting_pkey PRIMARY KEY (cluster_id); + +CREATE TABLE public.alias_code ( + user_id varchar NOT NULL, + id varchar NOT NULL, + name varchar, + type varchar, + use_yn varchar, + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone +); + +ALTER TABLE ONLY public.alias_code add constraint alias_code_pkey primary key (user_id, id); + +CREATE TABLE public.sparse_log_info ( + id varchar NOT NULL, + cluster_id varchar, + namespace varchar, + target_type varchar, + target_id varchar, + log_path varchar, + created_date timestamp, + modified_date timestamp, + threshold float4, + PRIMARY KEY ("id") +); + +CREATE TABLE public.view_code ( + user_id varchar NOT NULL, + view_id varchar NOT NULL, + json_data text, + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone +); + +ALTER TABLE ONLY public.view_code add constraint view_code_pkey primary key (user_id, view_id); + +CREATE TABLE public.entity_black_list ( + entity_type varchar not null, + entity_name varchar not null, + cluster_id varchar not null, + namespace varchar, + black_list bool not null, + workload varchar(255) not null +); + +ALTER TABLE public.entity_black_list + ADD CONSTRAINT entity_black_list_pkey PRIMARY KEY (entity_type, entity_name, cluster_id, namespace); + +CREATE TABLE public.script_setting ( + id bigint NOT NULL, + name character varying(255), + agent_list character varying(255), + file_path character varying(255), + args character varying(255), + valid_cmd character varying(255), + valid_val character varying(255), + cron_exp character varying(255), + create_user character varying(255), + mtime BIGINT, + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone +); + +ALTER TABLE ONLY public.script_setting + ADD CONSTRAINT script_setting_pkey PRIMARY KEY (id); + +CREATE TABLE public.agent_install_file_info ( + id bigint NOT NULL, + name character varying(255) NOT NULL, + type character varying(255) NOT NULL, + description text, + version character varying(255), + yaml text, + use_yn boolean NOT NULL, + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone NOT NULL +); + +ALTER TABLE ONLY public.agent_install_file_info ADD CONSTRAINT agent_install_file_info_pkey PRIMARY KEY (id); + +create table auth_resource3( + id bigint NOT NULL default nextval('hibernate_sequence'), + name character varying(255) NOT NULL, + is_deleted boolean not null, + tenant_id character varying(255) +); + +ALTER TABLE public.auth_resource3 owner to admin; + +ALTER TABLE ONLY public.auth_resource3 + ADD CONSTRAINT auth_resource3_pkey PRIMARY KEY (id); + +ALTER TABLE ONLY public.auth_resource3 + ADD CONSTRAINT auth_resource3_name_uniq UNIQUE (name); + +create table resource_member3( + resource_group_id bigint not null, + auth_resource3_id bigint not null +); + +ALTER TABLE resource_member3 owner to admin; + +ALTER TABLE ONLY public.resource_member3 + ADD CONSTRAINT resource_member3_pkey primary key (resource_group_id, auth_resource3_id); + +ALTER TABLE ONLY public.auth_resource3 ADD CONSTRAINT auth_resource3_tenant_id_fk FOREIGN KEY (tenant_id) REFERENCES public.tenant_info(id); + +ALTER TABLE public.menu_meta ADD CONSTRAINT menu_meta_auth_resource3_fk FOREIGN KEY (auth_resource3_id) REFERENCES auth_resource3(id); +ALTER TABLE public.user_permission2 ADD CONSTRAINT user_permission2_auth_resource3_fk FOREIGN KEY (auth_resource_id) REFERENCES auth_resource3(id); +ALTER TABLE public.resource_group2 ADD CONSTRAINT resource_group2_auth_resource3_fk1 FOREIGN KEY (id) REFERENCES auth_resource3(id); +ALTER TABLE public.resource_member3 ADD CONSTRAINT resource_member3_auth_resource3_fkey1 FOREIGN KEY (resource_group_id) REFERENCES public.resource_group2(id); +ALTER TABLE public.resource_member3 ADD CONSTRAINT resource_member3_auth_resource3_fkey2 FOREIGN KEY (auth_resource3_id) REFERENCES auth_resource3(id); +ALTER TABLE public.group_permission2 ADD CONSTRAINT group_permission2_auth_resource3_fk FOREIGN KEY (auth_resource_id) REFERENCES auth_resource3(id); +ALTER TABLE public.dashboard2 ADD CONSTRAINT dashboard2_auth_resource3_fk FOREIGN KEY (auth_resource_id) REFERENCES auth_resource3(id); +ALTER TABLE public.cloud_user ADD CONSTRAINT cloud_user_auth_resource3_fk FOREIGN KEY (auth_resource_id) REFERENCES auth_resource3(id); +ALTER TABLE public.cloud_group ADD CONSTRAINT cloud_group_auth_resource3_fk FOREIGN KEY (auth_resource_id) REFERENCES auth_resource3(id); + +CREATE DATABASE CONFIGS; +CREATE DATABASE keycloak; + +-- JSPD 옵션 값 테이블 +CREATE TABLE public.jspd_prop ( + code_id character varying(255) NOT NULL, + default_value character varying(255) NOT NULL, + description text, + code_type character varying(255), + input_type character varying(255), + input_props character varying(255), + use_yn boolean NOT NULL, + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone NOT NULL +); + +ALTER TABLE ONLY public.jspd_prop ADD CONSTRAINT jspd_prop_pkey PRIMARY KEY (code_id); + +-- JSPD 옵션 값 설정 LIST table +CREATE TABLE public.jspd_config ( + cluster_id character varying(255) NOT NULL, + namespace character varying(255) NOT NULL, + service character varying(255) NOT NULL, + code_id character varying(255), + code_value character varying(255), + code_type character varying(255), + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone NOT NULL +); +-- ALTER TABLE public.jspd_prop +-- ADD input_type character varying(255); + +-- ALTER TABLE public.jspd_prop +-- ADD input_props character varying(255); + + +ALTER TABLE public.jspd_config + ADD CONSTRAINT jspd_config_pkey PRIMARY KEY (cluster_id, namespace, service, code_id); + +ALTER TABLE ONLY public.jspd_config + ADD CONSTRAINT jspd_config_code_id_fk FOREIGN KEY (code_id) REFERENCES public.jspd_prop(code_id); + +-- noti server table +CREATE TABLE public.alert_group_v2 ( + id bigint NOT NULL, + created_date timestamp NOT NULL, + modified_date timestamp NOT NULL, + cluster_id varchar(255) NOT NULL, + description varchar(255), + name varchar(255) NOT NULL, + type varchar(255) NOT NULL, + namespace varchar(255) default 'default'::character varying, + destination varchar(255) NOT NULL, + created_by varchar(255) NOT NULL +); + +CREATE TABLE public.alert_target_v2 ( + id bigint NOT NULL, + created_date timestamp, + modified_date timestamp, + cluster_id varchar(255) NOT NULL, + entity_id varchar(255) NOT NULL, + entity_type varchar(255) NOT NULL, + alert_group_id bigint, + namespace varchar(255) +); + +CREATE TABLE public.alert_rule_v2 ( + id bigint NOT NULL, + created_date timestamp NOT NULL, + modified_date timestamp NOT NULL, + critical double precision, + name varchar(255), + warning double precision, + alert_group_id bigint, + alert_rule_meta_id varchar(255) NOT NULL, + alert_target_id bigint, + duration varchar(255) NOT NULL, + pause boolean DEFAULT false NOT NULL, + critical_sign varchar(255), + warning_sign varchar(255), + destination varchar(255), + created_by varchar(255) +); + +ALTER TABLE public.alert_group_v2 ADD CONSTRAINT alert_group_v2_id_pk PRIMARY KEY (id); +ALTER TABLE public.alert_target_v2 ADD CONSTRAINT alert_target_v2_id_pk PRIMARY KEY (id); +ALTER TABLE public.alert_rule_v2 ADD CONSTRAINT alert_rule_v2_id_pk PRIMARY KEY (id); + +ALTER TABLE public.alert_target_v2 ADD CONSTRAINT alert_target_v2_alert_group_id_fk FOREIGN KEY (alert_group_id) REFERENCES public.alert_group_v2(id); +ALTER TABLE public.alert_rule_v2 ADD CONSTRAINT alert_rule_v2_alert_group_id_fk FOREIGN KEY (alert_group_id) REFERENCES public.alert_group_v2(id); +ALTER TABLE public.alert_rule_v2 ADD CONSTRAINT alert_rule_v2_alert_rule_meta_id_fk FOREIGN KEY (alert_rule_meta_id) REFERENCES public.metric_meta2(id); +ALTER TABLE public.alert_rule_v2 ADD CONSTRAINT alert_rule_v2_alert_target_id_fk FOREIGN KEY (alert_target_id) REFERENCES public.alert_target_v2(id); +ALTER TABLE ONLY public.notification_registry + ADD CONSTRAINT fk4lljw4fnija73tm3lthjg90rx FOREIGN KEY (alert_rule_id) REFERENCES public.alert_rule_v2(id); + + +-- cortex alert +create table public.alert_rule_config_info ( + config_id varchar not null, + config_data text not null, + in_use boolean default true not null, + created_date timestamp, + modified_date timestamp +); + +create table alert_config_info +( + config_id varchar not null, + config_data text not null, + config_default text not null, + in_use boolean default true not null, + created_date timestamp, + modified_date timestamp +); + +create table alert_config +( + id varchar not null, + cluster_id varchar, + resolve_timeout varchar, + receiver varchar, + group_by varchar, + group_wait varchar, + group_interval varchar, + repeat_interval varchar, + routes_level varchar, + routes_continue varchar, + receiver_name varchar, + webhook_url varchar, + send_resolved varchar, + inner_route boolean, + inner_webhook boolean, + in_use boolean default true not null, + created_date timestamp, + modified_date timestamp +); + +ALTER TABLE public.alert_rule_config_info ADD CONSTRAINT alert_rule_config_info_config_id_pk PRIMARY KEY (config_id); +ALTER TABLE public.alert_config_info ADD CONSTRAINT alert_config_info_config_id_pk PRIMARY KEY (config_id); +ALTER TABLE public.alert_config ADD CONSTRAINT alert_config_id_pk PRIMARY KEY (id); + +CREATE TABLE public.cloud_user_setting ( + user_id character varying(255) NOT NULL, + lang character varying(20) DEFAULT 'en', + theme character varying(20) DEFAULT 'dark', + access_token integer DEFAULT 30, + refresh_token integer DEFAULT 10080, + error_msg boolean DEFAULT false, + alert_sound boolean DEFAULT false, + session_persistence boolean DEFAULT true, + gpu_acc_topology boolean DEFAULT true, + created_date timestamp without time zone, + modified_date timestamp without time zone +); + +ALTER TABLE public.cloud_user_setting OWNER TO admin; + +ALTER TABLE ONLY public.cloud_user_setting ADD CONSTRAINT cloud_user_setting_pkey PRIMARY KEY (user_id); + +-------- 2022-05-31 KubeInfo flatting table -------- +CREATE TABLE cmoa_configmap_base( + kube_flatting_time bigint, + cluster_id varchar(255), + kind varchar(30), + metadata_uid varchar(40), + row_index int, + metadata_name text, + kind_status varchar(50), + metadata_resourceVersion text, + metadata_annotations text, + metadata_creationTimestamp varchar(25), + metadata_labels text, + metadata_namespace text, + binaryData text, + data text, + immutable text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, cluster_id, kind, metadata_uid, row_index) +); +----------------------- +CREATE TABLE cmoa_cronjob_active( + kube_flatting_time bigint, + kind varchar(30), + metadata_uid varchar(40), + row_index int, + metadata_name text, + status_active_apiVersion text, + status_active_fieldPath text, + status_active_kind text, + status_active_name text, + status_active_namespace text, + status_active_resourceVersion text, + status_active_uid text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_cronjob_base( + kube_flatting_time bigint, + cluster_id varchar(255), + kind varchar(30), + metadata_uid varchar(40), + row_index int, + kind_status varchar(50), + metadata_annotations text, + metadata_creationTimestamp varchar(25), + metadata_labels text, + metadata_name text, + metadata_namespace text, + metadata_resourceVersion text, + spec_failedJobsHistoryLimit text, + spec_schedule text, + spec_successfulJobsHistoryLimit text, + spec_suspend text, + status_lastScheduleTime text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, cluster_id, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_daemonset_base( + kube_flatting_time bigint, + cluster_id varchar(255), + kind varchar(30), + metadata_uid varchar(40), + row_index int, + kind_status varchar(50), + metadata_annotations text, + metadata_creationTimestamp varchar(25), + metadata_labels text, + metadata_name text, + metadata_namespace text, + metadata_resourceVersion text, + status_currentNumberScheduled text, + status_desiredNumberScheduled text, + status_numberAvailable text, + status_numberMisscheduled text, + status_numberReady text, + status_numberUnavailable text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, cluster_id, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_deployment_base( + kube_flatting_time bigint, + cluster_id varchar(255), + kind varchar(30), + metadata_uid varchar(40), + row_index int, + kind_status varchar(50), + metadata_annotations text, + metadata_creationTimestamp varchar(25), + metadata_labels text, + metadata_name text, + metadata_namespace text, + metadata_resourceVersion text, + spec_replicas text, + spec_template_spec_containers_image text, + status_availableReplicas text, + status_readyReplicas text, + status_replicas text, + status_unavailableReplicas text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, cluster_id, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_endpoint_addresses( + kube_flatting_time bigint, + kind varchar(30), + metadata_uid varchar(40), + row_index int, + metadata_name text, + subset_addresses_ip text, + subset_addresses_hostname text, + subset_addresses_nodeName text, + subset_addresses_targetRef text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_endpoint_base( + kube_flatting_time bigint, + cluster_id varchar(255), + kind varchar(30), + metadata_uid varchar(40), + row_index int, + kind_status varchar(50), + metadata_name text, + metadata_resourceVersion text, + metadata_annotations text, + metadata_creationTimestamp varchar(25), + metadata_labels text, + metadata_namespace text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, cluster_id, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_endpoint_notreadyaddresses( + kube_flatting_time bigint, + kind varchar(30), + metadata_uid varchar(40), + row_index int, + metadata_name text, + subset_notreadyaddresses_ip text, + subset_notreadyaddresses_hostname text, + subset_notreadyaddresses_nodename text, + subset_notreadyaddresses_targetref text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_endpoint_ports( + kube_flatting_time bigint, + kind varchar(30), + metadata_uid varchar(40), + row_index int, + metadata_name text, + subset_ports_port text, + subset_ports_appprotocol text, + subset_ports_name text, + subset_ports_protocol text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_event_base ( + kube_flatting_time bigint, + cluster_id varchar(255), + kind varchar(30), + metadata_uid varchar(40), + row_index int, + kind_status varchar(50), + action text, + count text, + eventtime text, + firsttimestamp text, + involvedobject_apiversion text, + involvedobject_fieldpath text, + involvedobject_kind text, + involvedobject_name text, + involvedobject_namespace text, + involvedobject_resourceversion text, + involvedobject_uid text, + lasttimestamp text, + message text, + metadata_annotations text, + metadata_creationtimestamp varchar(25), + metadata_labels text, + metadata_name text, + metadata_namespace text, + metadata_resourceversion text, + reason text, + related_apiversion text, + related_fieldpath text, + related_kind text, + related_name text, + related_namespace text, + related_resourceversion text, + related_uid text, + series_count text, + series_lastobservedtime text, + series_state text, + source_component text, + source_host text, + type text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, cluster_id, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_job_base ( + kube_flatting_time bigint, + cluster_id varchar(255), + kind varchar(30), + metadata_uid varchar(40), + row_index int, + kind_status varchar(50), + metadata_annotations text, + metadata_creationtimestamp varchar(25), + metadata_labels text, + metadata_name text, + metadata_namespace text, + metadata_ownerreferences text, + metadata_ownerReferences_kind varchar(30), + metadata_ownerReferences_uid varchar(40), + metadata_resourceversion text, + spec_backofflimit text, + spec_completions text, + spec_parallelism text, + status_active text, + status_completiontime text, + status_failed text, + status_starttime text, + status_succeeded text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, cluster_id, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_job_template ( + kube_flatting_time bigint, + kind varchar(30), + metadata_uid varchar(40), + row_index int, + metadata_name text, + spec_template_spec_containers_args text, + spec_template_spec_containers_command text, + spec_template_spec_containers_image text, + spec_template_spec_containers_name text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_namespace_base ( + kube_flatting_time bigint, + cluster_id varchar(255), + kind varchar(30), + metadata_uid varchar(40), + row_index int, + kind_status varchar(50), + metadata_name text, + metadata_resourceversion text, + metadata_annotations text, + metadata_creationtimestamp varchar(25), + metadata_labels text, + metadata_namespace text, + spec_finalizers text, + status_phase text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, cluster_id, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_node_base ( + kube_flatting_time bigint, + cluster_id varchar(255), + kind varchar(30), + metadata_uid varchar(40), + row_index int, + kind_status varchar(50), + metadata_name text, + metadata_selflink text, + metadata_resourceversion text, + metadata_creationtimestamp varchar(25), + metadata_labels text, + metadata_annotations text, + spec_podcidr text, + spec_taints text, + status_capacity_cpu text, + status_capacity_ephemeral_storage text, + status_capacity_hugepages_1gi text, + status_capacity_hugepages_2mi text, + status_capacity_memory text, + status_capacity_pods text, + status_allocatable_cpu text, + status_allocatable_ephemeral_storage text, + status_allocatable_hugepages_1gi text, + status_allocatable_hugepages_2mi text, + status_allocatable_memory text, + status_allocatable_pods text, + status_addresses text, + status_daemonendpoints_kubeletendpoint_port text, + status_nodeinfo_machineid text, + status_nodeinfo_systemuuid text, + status_nodeinfo_bootid text, + status_nodeinfo_kernelversion text, + status_nodeinfo_osimage text, + status_nodeinfo_containerruntimeversion text, + status_nodeinfo_kubeletversion text, + status_nodeinfo_kubeproxyversion text, + status_nodeinfo_operatingsystem text, + status_nodeinfo_architecture text, + status_volumesinuse text, + status_volumesattached text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, cluster_id, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_node_condition ( + kube_flatting_time bigint, + kind varchar(30), + metadata_uid varchar(40), + row_index int, + metadata_name text, + status_conditions_type text, + status_conditions_status text, + status_conditions_lastheartbeattime text, + status_conditions_lasttransitiontime text, + status_conditions_reason text, + status_conditions_message text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_node_image ( + kube_flatting_time bigint, + kind varchar(30), + metadata_uid varchar(40), + row_index int, + metadata_name text, + status_images_names text, + status_images_sizebytes text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_persistentvolume_base ( + kube_flatting_time bigint, + cluster_id varchar(255), + kind varchar(30), + metadata_uid varchar(40), + row_index int, + kind_status varchar(50), + metadata_annotations text, + metadata_creationtimestamp varchar(25), + metadata_labels text, + metadata_name text, + metadata_namespace text, + metadata_resourceversion text, + spec_accessmodes text, + spec_awselasticblockstore text, + spec_azuredisk text, + spec_azurefile text, + spec_capacity text, + spec_claimref_apiversion text, + spec_claimref_fieldpath text, + spec_claimref_kind text, + spec_claimref_name text, + spec_claimref_namespace text, + spec_claimref_resourceversion text, + spec_claimref_uid text, + spec_csi text, + spec_fc text, + spec_flexvolume text, + spec_flocker text, + spec_gcepersistentdisk text, + spec_glusterfs text, + spec_hostpath text, + spec_iscsi text, + spec_local text, + spec_nfs text, + spec_persistentvolumereclaimpolicy text, + spec_photonpersistentdisk text, + spec_portworxvolume text, + spec_quobyte text, + spec_rbd text, + spec_scaleio text, + spec_storageclassname text, + spec_storageos text, + spec_volumemode text, + spec_vspherevolume text, + status_message text, + status_phase text, + status_reason text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, cluster_id, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_persistentvolumeclaim_base ( + kube_flatting_time bigint, + cluster_id varchar(255), + kind varchar(30), + metadata_uid varchar(40), + row_index int, + kind_status varchar(50), + metadata_annotations text, + metadata_creationtimestamp varchar(25), + metadata_labels text, + metadata_name text, + metadata_namespace text, + metadata_resourceversion text, + spec_accessmodes text, + spec_storageclassname text, + spec_volumemode text, + spec_volumename text, + status_accessmodes text, + status_capacity text, + status_phase text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, cluster_id, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_pod_base ( + kube_flatting_time bigint, + cluster_id varchar(255), + kind varchar(30), + kind_status varchar(50), + metadata_uid varchar(40), + row_index int, + metadata_name text, + metadata_selflink text, + metadata_resourceversion text, + metadata_creationtimestamp varchar(25), + metadata_generatename text, + metadata_namespace text, + metadata_deletiontimestamp text, + metadata_deletiongraceperiodseconds text, + metadata_labels text, + metadata_ownerreferences text, + metadata_ownerReferences_kind varchar(30), + metadata_ownerReferences_uid varchar(40), + metadata_annotations text, + spec_hostnetwork text, + spec_priorityclassname text, + spec_enableservicelinks text, + spec_priority text, + spec_schedulername text, + spec_hostpid text, + spec_nodename text, + spec_serviceaccount text, + spec_serviceaccountname text, + spec_dnspolicy text, + spec_terminationgraceperiodseconds text, + spec_restartpolicy text, + spec_securitycontext text, + spec_nodeselector_kubernetes_io_hostname text, + spec_tolerations text, + status_phase text, + status_hostip text, + status_podip text, + status_starttime text, + status_qosclass text, + status_reason text, + status_message text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, cluster_id, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_pod_conditions ( + kube_flatting_time bigint, + kind varchar(30), + metadata_uid varchar(40), + row_index int, + metadata_name text, + status_conditions_type text, + status_conditions_status text, + status_conditions_lasttransitiontime text, + status_conditions_reason text, + status_conditions_message text, + status_conditions_lastprobetime text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_pod_containerstatuses ( + kube_flatting_time bigint, + kind varchar(30), + metadata_uid varchar(40), + row_index int, + metadata_name text, + status_containerstatuses_name text, + status_containerstatuses_ready text, + status_containerstatuses_restartcount text, + status_containerstatuses_image text, + status_containerstatuses_imageid text, + status_containerstatuses_containerid text, + status_containerstatuses_state_terminated_exitcode text, + status_containerstatuses_state_terminated_reason text, + status_containerstatuses_state_terminated_startedat text, + status_containerstatuses_state_terminated_finishedat text, + status_containerstatuses_state_terminated_containerid text, + status_containerstatuses_state_waiting_reason text, + status_containerstatuses_state_waiting_message text, + status_containerstatuses_state_running_startedat text, + status_containerstatuses_laststate_terminated_exitcode text, + status_containerstatuses_laststate_terminated_reason text, + status_containerstatuses_laststate_terminated_startedat text, + status_containerstatuses_laststate_terminated_finishedat text, + status_containerstatuses_laststate_terminated_containerid text, + status_containerstatuses_laststate_waiting_reason text, + status_containerstatuses_laststate_waiting_message text, + status_containerstatuses_laststate_running_startedat text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_pod_containers ( + kube_flatting_time bigint, + kind varchar(30), + metadata_uid varchar(40), + row_index int, + metadata_name text, + spec_containers_name text, + spec_containers_image text, + spec_containers_env text, + spec_containers_resources_limits_cpu text, + spec_containers_resources_limits_memory text, + spec_containers_resources_requests_cpu text, + spec_containers_resources_requests_memory text, + spec_containers_volumemounts text, + spec_containers_securitycontext_privileged text, + spec_containers_command text, + spec_containers_ports text, + spec_containers_args text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_pod_volume ( + kube_flatting_time bigint, + kind varchar(30), + metadata_uid varchar(40), + row_index int, + metadata_name text, + spec_volumes_name text, + spec_volumes_hostpath text, + spec_volumes_secret text, + spec_volumes_configmap text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_replicaset_base ( + kube_flatting_time bigint, + cluster_id varchar(255), + kind varchar(30), + metadata_uid varchar(40), + row_index int, + kind_status varchar(50), + metadata_annotations text, + metadata_creationtimestamp varchar(25), + metadata_labels text, + metadata_name text, + metadata_namespace text, + metadata_resourceversion text, + spec_replicas text, + status_availablereplicas text, + status_readyreplicas text, + status_replicas text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, cluster_id, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_resourcequota_base ( + kube_flatting_time bigint, + cluster_id varchar(255), + kind varchar(30), + metadata_uid varchar(40), + row_index int, + kind_status varchar(50), + metadata_annotations text, + metadata_creationtimestamp varchar(25), + metadata_labels text, + metadata_name text, + metadata_namespace text, + metadata_resourceversion text, + spec_hard text, + spec_scopes text, + status_hard text, + status_used text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, cluster_id, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_resourcequota_scopeselector ( + kube_flatting_time bigint, + kind varchar(30), + metadata_uid varchar(40), + row_index int, + metadata_name text, + spec_scopeselector_matchexpressions_operator text, + spec_scopeselector_matchexpressions_scopename text, + spec_scopeselector_matchexpressions_values text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_service_base ( + kube_flatting_time bigint, + cluster_id varchar(255), + kind varchar(30), + metadata_uid varchar(40), + row_index int, + kind_status varchar(50), + metadata_name text, + metadata_resourceversion text, + metadata_ownerreferences text, + metadata_ownerReferences_kind varchar(30), + metadata_ownerReferences_uid varchar(40), + metadata_annotations text, + metadata_creationtimestamp varchar(25), + metadata_deletiongraceperiodseconds text, + metadata_deletiontimestamp text, + metadata_labels text, + metadata_namespace text, + spec_clusterip text, + spec_externalips text, + spec_selector text, + spec_type text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, cluster_id, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_service_ports ( + kube_flatting_time bigint, + kind varchar(30), + metadata_uid varchar(40), + row_index int, + metadata_name text, + spec_ports_appprotocol text, + spec_ports_name text, + spec_ports_nodeport text, + spec_ports_port text, + spec_ports_protocol text, + spec_ports_targetport text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_statefulset_base ( + kube_flatting_time bigint, + cluster_id varchar(255), + kind varchar(30), + metadata_uid varchar(40), + row_index int, + kind_status varchar(50), + metadata_annotations text, + metadata_creationtimestamp varchar(25), + metadata_labels text, + metadata_name text, + metadata_namespace text, + metadata_resourceversion text, + spec_replicas text, + status_readyreplicas text, + status_replicas text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, cluster_id, kind, metadata_uid, row_index) +); + +CREATE TABLE public.api_error_history ( + id int8 NOT NULL, + api_msg varchar(255) NULL, + code varchar(255) NULL, + "exception" varchar(255) NULL, + http_error varchar(255) NULL, + http_status int4 NULL, + occureence_time varchar(255) NULL, + params varchar(255) NULL, + "path" varchar(255) NULL, + "type" varchar(255) NULL, + CONSTRAINT api_error_history_pkey PRIMARY KEY (id) +); + +CREATE TABLE public.metric_score ( + clst_id varchar(255) NOT NULL, + entity_id varchar(255) NOT NULL, + entity_type varchar(255) NOT NULL, + metric_id varchar(255) NOT NULL, + sub_key varchar(255) NOT NULL, + unixtime int4 NOT NULL, + anomaly bool NOT NULL, + cont_name varchar(255) NULL, + "instance" varchar(255) NULL, + "namespace" varchar(255) NULL, + node_id varchar(255) NULL, + pod_id varchar(255) NULL, + score int4 NOT NULL, + yhat_lower_upper json NULL, + CONSTRAINT metric_score_pkey PRIMARY KEY (clst_id, entity_id, entity_type, metric_id, sub_key, unixtime) +); + + +CREATE TABLE public.tenant_info_auth_resources ( + tenant_info_id varchar(255) NOT NULL, + auth_resources_id int8 NOT NULL, + CONSTRAINT tenant_info_auth_resources_pkey PRIMARY KEY (tenant_info_id, auth_resources_id), + CONSTRAINT uk_7s6l8e2c8gli4js43c4xoifcl UNIQUE (auth_resources_id) +); + + +-- public.tenant_info_auth_resources foreign keys + +ALTER TABLE public.tenant_info_auth_resources ADD CONSTRAINT fkkecsc13ydhwg8u05aumkqbnx1 FOREIGN KEY (tenant_info_id) REFERENCES public.tenant_info(id); +ALTER TABLE public.tenant_info_auth_resources ADD CONSTRAINT fkpvvec4ju3hsma6s1rtgvr4mf6 FOREIGN KEY (auth_resources_id) REFERENCES public.auth_resource3(id); \ No newline at end of file diff --git a/ansible/01_old/roles/test/files/03-ddl-dml/postgres/postgres_insert_dml.psql b/ansible/01_old/roles/test/files/03-ddl-dml/postgres/postgres_insert_dml.psql new file mode 100644 index 0000000..e6335f3 --- /dev/null +++ b/ansible/01_old/roles/test/files/03-ddl-dml/postgres/postgres_insert_dml.psql @@ -0,0 +1,2380 @@ +INSERT INTO public.tenant_info (id, name, in_used, created_date, modified_date, contract_id) VALUES ('DEFAULT_TENANT', 'admin', true, now(), now(), 0); + +INSERT INTO public.auth_resource2 (id, access_type, name, parent_id, type) VALUES (-1, 4, 'null', NULL, 'null'); + +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Infrastructure', -1 , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Workloads', -1 , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Services', -1 , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Diagnosis', -1 , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Statistics & Analysis', -1 , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Reports', -1 , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Settings', -1 , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Hosts', -1, 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Dashboards', -1 , 'menu'); +--INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Health Check', -1, 'menu'); + +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Topology', (select id from auth_resource2 where type='menu' and name='Infrastructure') , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Overview', (select id from auth_resource2 where type='menu' and name='Infrastructure') , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Namespace', (select id from auth_resource2 where type='menu' and name='Infrastructure') , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Nodes', (select id from auth_resource2 where type='menu' and name='Infrastructure') , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Node Details', (select id from auth_resource2 where type='menu' and name='Infrastructure') , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Resource Usage', (select id from auth_resource2 where type='menu' and name='Infrastructure') , 'menu'); +-- INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Persistent Volume', (select id from auth_resource2 where type='menu' and name='Infrastructure') , 'menu'); + +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Overview', (select id from auth_resource2 where type='menu' and name='Workloads') , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Pods', (select id from auth_resource2 where type='menu' and name='Workloads') , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Jobs', (select id from auth_resource2 where type='menu' and name='Workloads') , 'menu'); +-- INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Cron Jobs', (select id from auth_resource2 where type='menu' and name='Workloads') , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Deploy List', (select id from auth_resource2 where type='menu' and name='Workloads'), 'menu'); + +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Topology', (select id from auth_resource2 where type='menu' and name='Services') , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Structure', (select id from auth_resource2 where type='menu' and name='Services') , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Overview', (select id from auth_resource2 where type='menu' and name='Services'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Detail', (select id from auth_resource2 where type='menu' and name='Services'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'List', (select id from auth_resource2 where type='menu' and name='Services'), 'menu'); + +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Anomaly Score', (select id from auth_resource2 where type='menu' and name='Diagnosis'), 'menu'); +-- INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Troubleshooting', (select id from auth_resource2 where type='menu' and name='Diagnosis') , 'menu'); + +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Performance Trends', (select id from auth_resource2 where type='menu' and name='Statistics & Analysis'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Alert History', (select id from auth_resource2 where type='menu' and name='Statistics & Analysis'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Anomaly Score', (select id from auth_resource2 where type='menu' and name='Statistics & Analysis'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Job History', (select id from auth_resource2 where type='menu' and name='Statistics & Analysis'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Log Viewer', (select id from auth_resource2 where type='menu' and name='Statistics & Analysis'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Sparse Logs', (select id from auth_resource2 where type='menu' and name='Statistics & Analysis'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Event Logs', (select id from auth_resource2 where type='menu' and name='Statistics & Analysis') , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Alert Analysis', (select id from auth_resource2 where type='menu' and name='Statistics & Analysis') , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Container Life Cycle', (select id from auth_resource2 where type='menu' and name='Statistics & Analysis'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Service Traces', (select id from auth_resource2 where type='menu' and name='Statistics & Analysis'), 'menu'); +-- INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Resource Used Trends', (select id from auth_resource2 where type='menu' and name='Statistics & Analysis'), 'menu'); + +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Documents', (select id from auth_resource2 where type='menu' and name='Reports'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Templates', (select id from auth_resource2 where type='menu' and name='Reports'), 'menu'); + +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'User & Group', (select id from auth_resource2 where type='menu' and name='Settings') , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Alerts', (select id from auth_resource2 where type='menu' and name='Settings') , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Sparse Logs', (select id from auth_resource2 where type='menu' and name='Settings') , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'General', (select id from auth_resource2 where type='menu' and name='Settings') , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Metric Meta', (select id from auth_resource2 where type='menu' and name='Settings'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Notification', (select id from auth_resource2 where type='menu' and name='Settings'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Host Alerts', (select id from auth_resource2 where type='menu' and name='Settings'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'License', (select id from auth_resource2 where type='menu' and name='Settings'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Agent', (select id from auth_resource2 where type='menu' and name='Settings'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Alias', (select id from auth_resource2 where type='menu' and name='Settings'), 'menu'); + +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Documents', (select id from auth_resource2 where type='menu' and name='Dashboards'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Templates', (select id from auth_resource2 where type='menu' and name='Dashboards'), 'menu'); + +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Topology', (select id from auth_resource2 where type='menu' and name='Hosts'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Overview', (select id from auth_resource2 where type='menu' and name='Hosts'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'List', (select id from auth_resource2 where type='menu' and name='Hosts'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Detail', (select id from auth_resource2 where type='menu' and name='Hosts'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Group', (select id from auth_resource2 where type='menu' and name='Hosts'), 'menu'); + +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'CloudMOA - Nodes Resource', NULL, 'dashboard'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Service Detail', NULL, 'dashboard'); + +--INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES(4, 'Check Script', (select id from auth_resource2 where type='menu' and name='Health Check'), 'menu'); + +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Infrastructure', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Workloads', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Services', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Diagnosis', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Statistics & Analysis', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Reports', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Settings', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Hosts', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Dashboards', false, null); +--INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Health Check', false, null); + +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Infrastructure|Topology', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Infrastructure|Overview', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Infrastructure|Namespace', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Infrastructure|Nodes', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Infrastructure|Node Details', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Infrastructure|Resource Usage', false, null); + +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Workloads|Overview', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Workloads|Pods', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Workloads|Jobs', false, null); +-- NSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Workloads|Cron Jobs', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Workloads|Deploy List', false, null); + +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Services|Topology', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Services|Structure', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Services|Overview', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Services|Detail', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Services|List', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Services|Active Transaction', false, null); + +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Diagnosis|Anomaly Score', false, null); + +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Statistics & Analysis|Performance Trends', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Statistics & Analysis|Alert History', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Statistics & Analysis|Anomaly Score', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Statistics & Analysis|Job History', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Statistics & Analysis|Log Viewer', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Statistics & Analysis|Sparse Logs', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Statistics & Analysis|Event Logs', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Statistics & Analysis|Alert Analysis', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Statistics & Analysis|Container Life Cycle', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Statistics & Analysis|Service Traces', false, null); + +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Reports|Documents', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Reports|Templates', false, null); + +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Settings|User & Group', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Settings|Alerts', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Settings|Sparse Logs', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Settings|General', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Settings|Metric Meta', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Settings|Notification', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Settings|Host Alerts', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Settings|License', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Settings|Agent', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Settings|Alias', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Settings|Agent Installation', false, NULL); + + +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Dashboards|Documents', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Dashboards|Templates', false, null); + +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Hosts|Topology', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Hosts|Overview', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Hosts|List', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Hosts|Detail', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Hosts|Group', false, null); + +--INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Health Check|Check Script', false, null); + +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('user|admin', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('userGroup|admin|default', false, null); + +--INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('user|admin|owner', false, 'DEFAULT_TENANT'); + +INSERT INTO public.cloud_user (user_id, email, is_admin, phone, user_nm, user_pw, created_date, modified_date, company, department, last_log_in_date, "position", use_ldap, auth_method, log_in_count, user_lock, user_lock_date, tenant_id, is_tenant_owner, auth_resource_id) VALUES ('admin', NULL, true, NULL, 'admin', '$2a$10$a0XPdet9RCL8uF8ZVZ2Yzu4y0po5RWCesyB0e03MhrTIfG.0Y6xfS',now() , now() , NULL , NULL , NULL , NULL, false, 'default', 0, false, null, 'DEFAULT_TENANT', true, (select id from auth_resource3 where name='user|admin')); +INSERT INTO public.cloud_group (id, created_date, modified_date, name, description) VALUES ((select id from auth_resource3 where name='userGroup|admin|default'), now(), now(), 'default', '기본그룹정의'); + +--INSERT INTO public.cloud_user (user_id, email, is_admin, phone, user_nm, user_pw, created_date, modified_date, company, department, last_log_in_date, "position", use_ldap, auth_method, log_in_count, user_lock, user_lock_date, tenant_id, is_tenant_owner, auth_resource_id) VALUES ('owner', NULL, false, NULL, 'owner', '$2a$10$a0XPdet9RCL8uF8ZVZ2Yzu4y0po5RWCesyB0e03MhrTIfG.0Y6xfS',now() , now() , NULL , NULL , NULL , NULL, false, 'default', 0, false, null, 'DEFAULT_TENANT', true, (select id from auth_resource3 where name='user|admin|owner')); + +INSERT INTO public.cloud_user_setting +(user_id, lang, theme, access_token, refresh_token, error_msg, alert_sound, session_persistence, gpu_acc_topology, created_date, modified_date) +VALUES('admin', null, null, null, null, false, false, true, true, now(), null); + +--INSERT INTO public.cloud_user_setting +--(user_id, lang, theme, access_token, refresh_token, error_msg, alert_sound, session_persistence, gpu_acc_topology, created_date, modified_date) +--VALUES('owner', null, null, null, null, false, false, true, true, now(), null); + +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('dashboard|admin|CloudMOA - Nodes Resource', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('dashboard|admin|Service Detail', false, null); + +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('cluster|cloudmoa', false, 'DEFAULT_TENANT'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (0, 'Infrastructure', '01.Infrastructure', 0, NULL, (select id from auth_resource3 where name='menu|Infrastructure'), 3); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (1, 'Topology', NULL, 0, 'topologyInfra', (select id from auth_resource3 where name='menu|Infrastructure|Topology'), 3); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (2, 'Overview', NULL, 1, 'overViewInfra', (select id from auth_resource3 where name='menu|Infrastructure|Overview'), 3); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (3, 'Resource Usage', NULL, 2, 'resourceUsageInfra', (select id from auth_resource3 where name='menu|Infrastructure|Resource Usage'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (4, 'Namespace', NULL, 3, 'namespaceInfra', (select id from auth_resource3 where name='menu|Infrastructure|Namespace'), 3); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (5, 'Nodes', NULL, 4, 'nodesInfra', (select id from auth_resource3 where name='menu|Infrastructure|Nodes'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (6, 'Node Details', NULL, 5, 'nodeDetailInfra', (select id from auth_resource3 where name='menu|Infrastructure|Node Details'), 3); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (10, 'Workloads', '02.Workload', 1, NULL, (select id from auth_resource3 where name='menu|Workloads'), 3); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (11, 'Overview', NULL, 0, 'overviewWorkloads', (select id from auth_resource3 where name='menu|Workloads|Overview'), 3); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (12, 'deployList', NULL, 1, 'deployListWorkloads', (select id from auth_resource3 where name='menu|Workloads|Deploy List'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (17, 'Jobs', NULL, 6, 'jobsWorkloads', (select id from auth_resource3 where name='menu|Workloads|Jobs'), 2); +-- INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (18, 'Cron Jobs', NULL, 7, 'cronJobsWorkloads', (select id from auth_resource3 where name='menu|Workloads|Cron Jobs'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (19, 'Pods', NULL, 8, 'podsWorkloads', (select id from auth_resource3 where name='menu|Workloads|Pods'), 3); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (20, 'Services', '03.Service', 2, NULL, (select id from auth_resource3 where name='menu|Services'), 3); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (21, 'DataCenter Service', NULL, 0, 'topologyServices', (select id from auth_resource3 where name='menu|Services|Topology'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (22, 'ServiceOverview', NULL, 1, 'overviewServices', (select id from auth_resource3 where name='menu|Services|Overview'), 0); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (23, 'Cluster Service', NULL, 2, 'detailServices', (select id from auth_resource3 where name='menu|Services|Structure'), 0); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (24, 'List', NULL, 3, 'serviceList', (select id from auth_resource3 where name='menu|Services|List'), 3); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (25, 'Detail', NULL, 4, 'slasServices', (select id from auth_resource3 where name='menu|Services|Detail'), 0); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (26, 'Active Transaction', NULL, 5, 'overviewServiceJSPD', (select id from auth_resource3 where name='menu|Services|Active Transaction'), 2); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (30, 'Diagnosis', '05.Diagnosis', 4, NULL, (select id from auth_resource3 where name='menu|Diagnosis'), 0); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (31, 'Anomaly Score Detail', NULL, 0, 'anomalyScoreDiagnosis', (select id from auth_resource3 where name='menu|Diagnosis|Anomaly Score'), 0); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (40, 'Statistics & Analysis', '06.Statistics&Analysis', 5, NULL, (select id from auth_resource3 where name='menu|Statistics & Analysis'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (41, 'Performance Trends', NULL, 0, 'performanceTrendSA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Performance Trends'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (42, 'Alert Analysis', NULL, 2, 'alertAnalysisSA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Alert Analysis'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (43, 'Alert History', NULL, 3, 'alertHistorySA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Alert History'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (44, 'Anomaly Score Analysis', NULL, 4, 'anomalyScoreSA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Anomaly Score'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (45, 'Job History', NULL, 5, 'jobHistorySA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Job History'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (46, 'Sparse Log Analysis', NULL, 6, 'sparseLogSA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Sparse Logs'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (47, 'Log Viewer', NULL, 7, 'logViewerSA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Log Viewer'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (48, 'eventLog Analysis', NULL, 8, 'eventLogSA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Event Logs'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (49, 'Container Life Cycle', NULL, 9, 'containerLifecycleSA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Container Life Cycle'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (50, 'Service Trace Analysis', NULL, 10, 'serviceTraceSA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Service Traces'), 0); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (60, 'Reports', '07.Report', 6, NULL, (select id from auth_resource3 where name='menu|Reports'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (61, 'Documents', NULL, 0, 'documentReport', (select id from auth_resource3 where name='menu|Reports|Documents'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (62, 'Templates', NULL, 1, 'templateReport', (select id from auth_resource3 where name='menu|Reports|Templates'), 2); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (65, 'Dashboards', '10.Dashboard', 7, NULL, (select id from auth_resource3 where name='menu|Dashboards'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (66, 'Documents', NULL, 0, 'documentDashboard', (select id from auth_resource3 where name='menu|Dashboards|Documents'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (67, 'Templates', NULL, 1, 'templateDashboard', (select id from auth_resource3 where name='menu|Dashboards|Templates'), 2); + + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (80, 'Hosts', '12.Hosts', 10, NULL, (select id from auth_resource3 where name='menu|Hosts'), 0); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (81, 'Topology', null, 0, 'topologyHost', (select id from auth_resource3 where name='menu|Hosts|Topology'), 0); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (82, 'Overview', NULL, 1, 'overviewHost', (select id from auth_resource3 where name='menu|Hosts|Overview'), 0); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (83, 'List', NULL, 2, 'listHost', (select id from auth_resource3 where name='menu|Hosts|List'), 0); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (84, 'Detail', NULL, 3, 'detailHost', (select id from auth_resource3 where name='menu|Hosts|Detail'), 0); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (85, 'Group', NULL, 4, 'groupHost', (select id from auth_resource3 where name='menu|Hosts|Group'), 0); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (90, 'Settings', '08.Setting', 99, NULL, (select id from auth_resource3 where name='menu|Settings'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (91, 'User', NULL, 0, 'userGroupSettings', (select id from auth_resource3 where name='menu|Settings|User & Group'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (92, 'Alerts', NULL, 1, 'alertSettings', (select id from auth_resource3 where name='menu|Settings|Alerts'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (93, 'Host Alerts', NULL, 2, 'hostAlertSettings', (select id from auth_resource3 where name='menu|Settings|Host Alerts'), 0); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (94, 'Sparse Logs', NULL, 3, 'sparseLogSettings', (select id from auth_resource3 where name='menu|Settings|Sparse Logs'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (96, 'Metric Meta', NULL, 5, 'metricMetaSettings', (select id from auth_resource3 where name='menu|Settings|Metric Meta'), 0); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (97, 'Appearance', NULL, 6, 'appearanceSettings', (select id from auth_resource3 where name='menu|Settings|General'), 0); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (98, 'Notification', NULL, 7, 'notificationsSettings', (select id from auth_resource3 where name='menu|Settings|Notification'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (99, 'Agent', NULL, 8, 'agentSettings', (select id from auth_resource3 where name='menu|Settings|Agent'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (100, 'Alias', NULL, 9, 'aliasSettings', (select id from auth_resource3 where name='menu|Settings|Alias'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (101, 'License', NULL, 10, 'validationLicense', (select id from auth_resource3 where name='menu|Settings|License'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (102, 'agent Installation', NULL, 11, 'agentInstallationSettings', (select id from auth_resource3 where name='menu|Settings|Agent Installation'), 2); + +-- INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (121, 'Health Check', '09.HealthCheck', 9, 'healthCHeck', (select id from auth_resource3 where name='menu|Health Check'), 0); +-- INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (122, 'Check Script', NULL, 0, 'checkScript', (select id from auth_resource3 where name='menu|Health Check|Check Script'), 0); + +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Infrastructure'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Infrastructure|Topology'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Infrastructure|Overview'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Infrastructure|Resource Usage'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Infrastructure|Namespace'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Infrastructure|Nodes'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Infrastructure|Node Details'), 'owner'); +-- +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Workloads'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Workloads|Overview'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Workloads|Deploy List'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Workloads|Jobs'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Workloads|Cron Jobs'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Workloads|Pods'), 'owner'); +-- +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Services'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Services|Topology'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Services|Overview'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Services|Structure'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Services|List'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Services|Detail'), 'owner'); +-- +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Diagnosis'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Diagnosis|Anomaly Score'), 'owner'); +-- +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Statistics & Analysis'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Statistics & Analysis|Performance Trends'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Statistics & Analysis|Alert Analysis'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Statistics & Analysis|Alert History'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Statistics & Analysis|Anomaly Score'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Statistics & Analysis|Job History'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Statistics & Analysis|Sparse Logs'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Statistics & Analysis|Log Viewer'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Statistics & Analysis|Event Logs'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Statistics & Analysis|Container Life Cycle'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Statistics & Analysis|Service Traces'), 'owner'); +-- +-- +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Reports'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Reports|Documents'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Reports|Templates'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Dashboards'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Dashboards|Documents'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Dashboards|Templates'), 'owner'); +-- +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Settings'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Settings|User & Group'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Settings|Alerts'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Settings|Sparse Logs'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Settings|Metric Meta'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Settings|General'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Settings|Notification'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Settings|Agent'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Settings|Alias'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Settings|License'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Settings|Agent Installation'), 'owner'); + +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cadvisor_version_info', 'cadvisor', 'A metric with a constant ''1'' value labeled by kernel version, OS version, docker version, cadvisor version & cadvisor revision.', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_cpu_cfs_periods_total', 'cadvisor', 'Number of elapsed enforcement period intervals.', 'CPU', 'LOAD', 'Container', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_cpu_cfs_throttled_periods_total', 'cadvisor', 'Number of throttled period intervals.', 'CPU', 'LOAD', 'Container', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_cpu_cfs_throttled_seconds_total', 'cadvisor', 'Total time duration the container has been throttled.', 'CPU', 'LOAD', 'Container', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_cpu_load_average_10s', 'cadvisor', 'Value of container cpu load average over the last 10 seconds.', 'CPU', 'LOAD', 'Container', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_cpu_schedstat_run_periods_total', 'cadvisor', 'Number of times processes of the cgroup have run on the cpu', 'CPU', 'LOAD', 'Container', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_cpu_schedstat_run_seconds_total', 'cadvisor', 'Time duration the processes of the container have run on the CPU.', 'CPU', 'LOAD', 'Container', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_cpu_schedstat_runqueue_seconds_total', 'cadvisor', 'Time duration processes of the container have been waiting on a runqueue.', 'CPU', 'LOAD', 'Container', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_cpu_system_seconds_total', 'cadvisor', 'Cumulative system cpu time consumed in seconds.', 'CPU', 'LOAD', 'Container', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_cpu_usage_seconds_total', 'cadvisor', 'Cumulative cpu time consumed in seconds.', 'CPU', 'LOAD', 'Container', 'counter', 'cpu', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_cpu_user_seconds_total', 'cadvisor', 'Cumulative user cpu time consumed in seconds.', 'CPU', 'LOAD', 'Container', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_fs_limit_bytes', 'cadvisor', 'Number of bytes that can be consumed by the container on this filesystem.', NULL, NULL, 'Container', 'gauge', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_last_seen', 'cadvisor', 'Last time a container was seen by the exporter', NULL, NULL, 'Container', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_network_receive_bytes_total', 'cadvisor', 'Cumulative count of bytes received', 'NIC', 'LOAD', 'Container', 'counter', 'interface', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_network_receive_packets_dropped_total', 'cadvisor', 'Cumulative count of packets dropped while receiving', 'NIC', 'LOAD', 'Container', 'counter', 'interface', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_network_receive_packets_total', 'cadvisor', 'Cumulative count of packets received', 'NIC', 'LOAD', 'Container', 'counter', 'interface', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_network_transmit_bytes_total', 'cadvisor', 'Cumulative count of bytes transmitted', 'NIC', 'LOAD', 'Container', 'counter', 'interface', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_network_transmit_errors_total', 'cadvisor', 'Cumulative count of errors encountered while transmitting', 'NIC', 'LOAD', 'Container', 'counter', 'interface', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_network_transmit_packets_dropped_total', 'cadvisor', 'Cumulative count of packets dropped while transmitting', 'NIC', 'LOAD', 'Container', 'counter', 'interface', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_network_transmit_packets_total', 'cadvisor', 'Cumulative count of packets transmitted', 'NIC', 'LOAD', 'Container', 'counter', 'interface', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_scrape_error', 'cadvisor', '1 if there was an error while getting container metrics, 0 otherwise', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_spec_cpu_period', 'cadvisor', 'CPU period of the container', NULL, NULL, 'Container', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_spec_cpu_quota', 'cadvisor', 'CPU quota of the container', NULL, NULL, 'Container', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_memory_cache', 'cadvisor', 'Number of bytes of page cache memory.', 'Memory', 'LOAD', 'Container', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_memory_failcnt', 'cadvisor', 'Number of memory usage hits limits', 'Memory', 'LOAD', 'Container', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_memory_failures_total', 'cadvisor', 'Cumulative count of memory allocation failures.', 'Memory', 'LOAD', 'Container', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_memory_max_usage_bytes', 'cadvisor', 'Maximum memory usage recorded in bytes', 'Memory', 'LOAD', 'Container', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_memory_rss', 'cadvisor', 'Size of RSS in bytes.', 'Memory', 'LOAD', 'Container', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_memory_swap', 'cadvisor', 'Container swap usage in bytes.', 'Memory', 'LOAD', 'Container', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_memory_usage_bytes', 'cadvisor', 'Current memory usage in bytes, including all memory regardless of when it was accessed', 'Memory', 'LOAD', 'Container', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_memory_working_set_bytes', 'cadvisor', 'Current working set in bytes.', 'Memory', 'LOAD', 'Container', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_network_tcp_usage_total', 'cadvisor', 'tcp connection usage statistic for container', 'Network', 'LOAD', 'Container', 'counter', 'tcp_state', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_network_udp_usage_total', 'cadvisor', 'udp connection usage statistic for container', 'Network', 'LOAD', 'Container', 'counter', 'udp_state', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_spec_cpu_shares', 'cadvisor', 'CPU share of the container', NULL, NULL, 'Container', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_spec_memory_limit_bytes', 'cadvisor', 'Memory limit for the container.', NULL, NULL, 'Container', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_spec_memory_swap_limit_bytes', 'cadvisor', 'Memory swap limit for the container.', NULL, NULL, 'Container', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_start_time_seconds', 'cadvisor', 'Start time of the container since unix epoch in seconds.', NULL, NULL, 'Container', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_tasks_state', 'cadvisor', 'Number of tasks in given state', NULL, NULL, 'Container', 'gauge', 'state', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('http_request_duration_microseconds', 'prometheus', 'The HTTP request latencies in microseconds.', NULL, 'DURATION', 'Node', 'summary', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('http_request_duration_microseconds_count', 'prometheus', '', NULL, NULL, 'Node', '', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('http_request_duration_microseconds_sum', 'prometheus', '', NULL, NULL, 'Node', '', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('http_request_size_bytes_count', 'prometheus', '', NULL, NULL, 'Node', '', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('http_request_size_bytes_sum', 'prometheus', '', NULL, NULL, 'Node', '', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('http_requests_total', 'prometheus', 'Total number of scrapes by HTTP status code.', NULL, 'ERROR', 'Node', 'counter', 'code,method', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('aws_ec2_ebsread_bytes_average', 'cloudwatch', 'Bytes read from all EBS volumes attached to the instance in a specified period of time.', 'EBS', 'LOAD', 'AWS/EC2', 'gauge', 'instance_id', '2019-07-24 15:23:37.148501', '2019-07-24 15:23:37.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('http_response_size_bytes_count', 'prometheus', '', NULL, NULL, 'Node', '', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('http_response_size_bytes_sum', 'prometheus', '', NULL, NULL, 'Node', '', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('http_server_requests_seconds', 'micrometer', 'Server Response in second', NULL, 'RATE', 'Service', 'summary', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('http_server_requests_seconds_count', 'micrometer', 'the total number of requests.', NULL, NULL, 'Service', '', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('http_server_requests_seconds_sum', 'micrometer', 'the total time taken to serve the requests', NULL, NULL, 'Service', '', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('http_server_requests_seconds_max', 'micrometer', 'the max number of requests.', NULL, 'RATE', 'Service', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('aws_ec2_ebswrite_bytes_average', 'cloudwatch', 'Bytes written to all EBS volumes attached to the instance in a specified period of time.', 'EBS', 'LOAD', 'AWS/EC2', 'gauge', 'instance_id', '2019-07-24 15:23:37.148501', '2019-07-24 15:23:37.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_classes_loaded', 'micrometer', 'jvm info', 'GC', 'LOAD', 'Process', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_classes_unloaded_total', 'micrometer', 'jvm info', 'GC', 'LOAD', 'Process', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_gc_live_data_size_bytes', 'micrometer', 'jvm info', 'GC', 'LOAD', 'Process', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_gc_max_data_size_bytes', 'micrometer', 'jvm info', 'GC', 'LOAD', 'Process', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_gc_memory_allocated_bytes_total', 'micrometer', 'jvm info', 'GC', 'LOAD', 'Process', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_gc_memory_promoted_bytes_total', 'micrometer', 'jvm info', 'GC', 'LOAD', 'Process', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_gc_pause_seconds', 'micrometer', 'jvm info', 'GC', 'LOAD', 'Process', 'summary', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_gc_pause_seconds_count', 'micrometer', 'jvm info', NULL, NULL, 'Process', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_gc_pause_seconds_max', 'micrometer', 'jvm info', 'GC', 'LOAD', 'Process', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_gc_pause_seconds_sum', 'micrometer', 'jvm info', NULL, NULL, 'Process', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_arp_entries', 'node_exporter', 'ARP entries by device', 'OS', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_boot_time_seconds', 'node_exporter', 'Node boot time, in unixtime.', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_context_switches_total', 'node_exporter', 'Total number of context switches.', 'OS', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_cpu_core_throttles_total', 'node_exporter', 'Number of times this cpu core has been throttled.', 'CPU', 'LOAD', 'Node', 'counter', 'core', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_cpu_frequency_hertz', 'node_exporter', 'Current cpu thread frequency in hertz.', 'CPU', 'LOAD', 'Node', 'gauge', 'cpu', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_cpu_frequency_max_hertz', 'node_exporter', 'Maximum cpu thread frequency in hertz.', NULL, NULL, 'Node', 'gauge', 'cpu', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_cpu_frequency_min_hertz', 'node_exporter', 'Minimum cpu thread frequency in hertz.', NULL, NULL, 'Node', 'gauge', 'cpu', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_cpu_guest_seconds_total', 'node_exporter', 'Seconds the cpus spent in guests (VMs) for each mode.', 'CPU', 'LOAD', 'Node', 'counter', 'cpu', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_cpu_package_throttles_total', 'node_exporter', 'Number of times this cpu package has been throttled.', 'CPU', 'LOAD', 'Node', 'counter', 'package', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_cpu_seconds_total', 'node_exporter', 'Seconds the cpus spent in each mode.', 'CPU', 'LOAD', 'Node', 'counter', 'cpu,mode', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_entropy_available_bits', 'node_exporter', 'Bits of available entropy.', 'OS', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_exporter_build_info', 'node_exporter', 'A metric with a constant ''1'' value labeled by version, revision, branch, and goversion from which node_exporter was built.', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('aws_ec2_cpuutilization_average', 'cloudwatch', 'The percentage of allocated EC2 compute units that are currently in use on the instance.', 'CPU', 'LOAD', 'AWS/EC2', 'gauge', 'instance_id', '2019-07-24 15:23:37.148501', '2019-07-24 15:23:37.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('aws_ec2_disk_read_ops_average', 'cloudwatch', 'Completed read operations from all instance store volumes available to the instance in a specified period of time.', 'Disk', 'LOAD', 'AWS/EC2', 'gauge', 'instance_id', '2019-07-24 15:23:37.148501', '2019-07-24 15:23:37.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('aws_ec2_disk_write_ops_average', 'cloudwatch', 'Completed write operations to all instance store volumes available to the instance in a specified period of time.', 'Disk', 'LOAD', 'AWS/EC2', 'gauge', 'instance_id', '2019-07-24 15:23:37.148501', '2019-07-24 15:23:37.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('aws_ec2_disk_read_bytes_average', 'cloudwatch', 'Bytes read from all instance store volumes available to the instance.', 'Disk', 'LOAD', 'AWS/EC2', 'gauge', 'instance_id', '2019-07-24 15:23:37.148501', '2019-07-24 15:23:37.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('aws_ec2_disk_write_bytes_average', 'cloudwatch', 'Bytes written to all instance store volumes available to the instance.', 'Disk', 'LOAD', 'AWS/EC2', 'gauge', 'instance_id', '2019-07-24 15:23:37.148501', '2019-07-24 15:23:37.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('aws_ec2_network_in_average', 'cloudwatch', 'The number of bytes received on all network interfaces by the instance.', 'Network', 'LOAD', 'AWS/EC2', 'gauge', 'instance_id', '2019-07-24 15:23:37.148501', '2019-07-24 15:23:37.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('aws_ec2_network_out_average', 'cloudwatch', 'The number of bytes sent out on all network interfaces by the instance.', 'Network', 'LOAD', 'AWS/EC2', 'gauge', 'instance_id', '2019-07-24 15:23:37.148501', '2019-07-24 15:23:37.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_filesystem_readonly', 'node_exporter', 'Filesystem read-only status.', NULL, NULL, 'Node', 'gauge', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('aws_ec2_network_packets_in_average', 'cloudwatch', 'The number of packets received on all network interfaces by the instance.', 'Network', 'LOAD', 'AWS/EC2', 'gauge', 'instance_id', '2019-07-24 15:23:37.148501', '2019-07-24 15:23:37.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_forks_total', 'node_exporter', 'Total number of forks.', 'OS', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_hwmon_chip_names', 'node_exporter', 'Annotation metric for human-readable chip names', 'CPU', 'LOAD', 'Node', 'gauge', 'chip', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_hwmon_fan_rpm', 'node_exporter', 'Hardware monitor for fan revolutions per minute (input)', 'CPU', 'LOAD', 'Node', 'gauge', 'chip,sensor', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_hwmon_pwm', 'node_exporter', 'Hardware monitor pwm element ', 'CPU', 'LOAD', 'Node', 'gauge', 'chip,sensor', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_hwmon_sensor_label', 'node_exporter', 'Label for given chip and sensor', 'CPU', 'LOAD', 'Node', 'gauge', 'chip,sensor', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_hwmon_temp_celsius', 'node_exporter', 'Hardware monitor for temperature (input)', 'CPU', 'LOAD', 'Node', 'gauge', 'chip,sensor', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_hwmon_temp_crit_alarm_celsius', 'node_exporter', 'Hardware monitor for temperature (crit_alarm)', 'CPU', 'LOAD', 'Node', 'gauge', 'chip,sensor', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_hwmon_temp_crit_celsius', 'node_exporter', 'Hardware monitor for temperature (crit)', 'CPU', 'LOAD', 'Node', 'gauge', 'chip,sensor', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_hwmon_temp_max_celsius', 'node_exporter', 'Hardware monitor for temperature (max)', NULL, NULL, 'Node', 'gauge', 'chip,sensor', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_intr_total', 'node_exporter', 'Total number of interrupts serviced.', 'OS', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('aws_ec2_network_packets_out_average', 'cloudwatch', 'The number of packets sent out on all network interfaces by the instance.', 'Network', 'LOAD', 'AWS/EC2', 'gauge', 'instance_id', '2019-07-24 15:23:37.148501', '2019-07-24 15:23:37.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('aws_ec2_ebsread_ops_average', 'cloudwatch', 'Completed read operations from all Amazon EBS volumes attached to the instance in a specified period of time.', 'EBS', 'LOAD', 'AWS/EC2', 'gauge', 'instance_id', '2019-07-24 15:23:37.148501', '2019-07-24 15:23:37.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('aws_ec2_ebswrite_ops_average', 'cloudwatch', 'Completed write operations to all EBS volumes attached to the instance in a specified period of time.', 'EBS', 'LOAD', 'AWS/EC2', 'gauge', 'instance_id', '2019-07-24 15:23:37.148501', '2019-07-24 15:23:37.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_load1', 'node_exporter', '1m load average.', 'CPU', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_load15', 'node_exporter', '15m load average.', 'CPU', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_load5', 'node_exporter', '5m load average.', 'CPU', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_disk_reads_completed_total', 'node_exporter', 'The total number of reads completed successfully.', 'Disk', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_disk_reads_merged_total', 'node_exporter', 'The total number of reads merged.', 'Disk', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_disk_write_time_seconds_total', 'node_exporter', 'This is the total number of seconds spent by all writes.', 'Disk', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_disk_writes_completed_total', 'node_exporter', 'The total number of writes completed successfully.', 'Disk', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_disk_writes_merged_total', 'node_exporter', 'The number of writes merged.', 'Disk', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_disk_written_bytes_total', 'node_exporter', 'The total number of bytes written successfully.', 'Disk', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_nf_conntrack_entries', 'node_exporter', 'Number of currently allocated flow entries for connection tracking.', 'OS', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_nf_conntrack_entries_limit', 'node_exporter', 'Maximum size of connection tracking table.', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_scrape_collector_duration_seconds', 'node_exporter', 'node_exporter: Duration of a collector scrape.', NULL, NULL, 'Node', 'gauge', 'collector', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_scrape_collector_success', 'node_exporter', 'node_exporter: Whether a collector succeeded.', NULL, NULL, 'Node', 'gauge', 'collector', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_textfile_scrape_error', 'node_exporter', '1 if there was an error opening or reading a file, 0 otherwise', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_time_seconds', 'node_exporter', 'System time in seconds since epoch (1970).', NULL, NULL, 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_timex_estimated_error_seconds', 'node_exporter', 'Estimated error in seconds.', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_timex_frequency_adjustment_ratio', 'node_exporter', 'Local clock frequency adjustment.', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_timex_loop_time_constant', 'node_exporter', 'Phase-locked loop time constant.', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_timex_maxerror_seconds', 'node_exporter', 'Maximum error in seconds.', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_timex_offset_seconds', 'node_exporter', 'Time offset in between local system and reference clock.', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_timex_pps_calibration_total', 'node_exporter', 'Pulse per second count of calibration intervals.', NULL, NULL, 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_timex_pps_error_total', 'node_exporter', 'Pulse per second count of calibration errors.', NULL, NULL, 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_timex_pps_frequency_hertz', 'node_exporter', 'Pulse per second frequency.', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_timex_pps_jitter_seconds', 'node_exporter', 'Pulse per second jitter.', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_timex_pps_jitter_total', 'node_exporter', 'Pulse per second count of jitter limit exceeded events.', NULL, NULL, 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_timex_pps_shift_seconds', 'node_exporter', 'Pulse per second interval duration.', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_timex_pps_stability_exceeded_total', 'node_exporter', 'Pulse per second count of stability limit exceeded events.', NULL, NULL, 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_timex_pps_stability_hertz', 'node_exporter', 'Pulse per second stability, average of recent frequency changes.', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_timex_status', 'node_exporter', 'Value of the status array bits.', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_timex_sync_status', 'node_exporter', 'Is clock synchronized to a reliable server (1 = yes, 0 = no).', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_timex_tai_offset_seconds', 'node_exporter', 'International Atomic Time (TAI) offset.', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_timex_tick_seconds', 'node_exporter', 'Seconds between clock ticks.', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_uname_info', 'node_exporter', 'Labeled system information as provided by the uname system call.', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_vmstat_oom_kill', 'node_exporter', '/proc/vmstat information field oom_kill.', NULL, 'ERROR', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('process_cpu_usage', 'micrometer', 'The "recent cpu usage" for the Java Virtual Machine process', 'CPU', 'LOAD', 'Process', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('process_uptime_seconds', 'micrometer', 'Process uptime in seconds.', NULL, NULL, 'Process', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('service_elapsed_seconds', 'micrometer', 'custom service', NULL, 'DURATION', 'Service', 'summary', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('service_elapsed_seconds_count', 'micrometer', 'custom service', NULL, NULL, 'Service', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('service_elapsed_seconds_max', 'micrometer', 'custom service', NULL, 'DURATION', 'Service', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('service_elapsed_seconds_sum', 'micrometer', 'custom service', NULL, NULL, 'Service', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('system_cpu_usage', 'micrometer', 'The "recent cpu usage" for the whole system', 'CPU', 'LOAD', 'Process', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('system_load_average_1m', 'micrometer', 'The sum of the number of runnable entities queued to available processors and the number of runnable entities running on the available processors averaged over a period of time', 'CPU', 'LOAD', 'Process', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('up', 'prometheus', '1 if the instance is healthy, i.e. reachable, or 0 if the scrape failed.', NULL, 'ERROR', 'Any', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('go_threads', 'prometheus', 'Number of OS threads created.', 'Thread', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('http_request_size_bytes', 'prometheus', 'The HTTP request sizes in bytes.', 'Network', 'LOAD', 'Node', 'summary', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('http_response_size_bytes', 'prometheus', 'The HTTP response sizes in bytes.', 'Network', 'LOAD', 'Node', 'summary', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_buffer_count', 'micrometer', 'jvm info', 'Memory', 'LOAD', 'Process', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_buffer_memory_used_bytes', 'micrometer', 'jvm info', 'Memory', 'LOAD', 'Process', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_buffer_total_capacity_bytes', 'micrometer', 'jvm info', 'Memory', 'LOAD', 'Process', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_memory_committed_bytes', 'micrometer', 'jvm info', 'Memory', 'LOAD', 'Process', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_memory_max_bytes', 'micrometer', 'jvm info', 'Memory', 'LOAD', 'Process', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_memory_used_bytes', 'micrometer', 'jvm info', 'Memory', 'LOAD', 'Process', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_threads_daemon', 'micrometer', 'jvm info', 'Thread', 'LOAD', 'Process', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_threads_live', 'micrometer', 'jvm info', 'Thread', 'LOAD', 'Process', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_threads_peak', 'micrometer', 'jvm info', 'Thread', 'LOAD', 'Process', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_disk_io_now', 'node_exporter', 'The number of I/Os currently in progress.', 'Disk', 'LOAD', 'Node', 'gauge', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_disk_io_time_seconds_total', 'node_exporter', 'Total seconds spent doing I/Os.', 'Disk', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_disk_io_time_weighted_seconds_total', 'node_exporter', 'The weighted # of seconds spent doing I/Os.', 'Disk', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_disk_read_bytes_total', 'node_exporter', 'The total number of bytes read successfully.', 'Disk', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_disk_read_time_seconds_total', 'node_exporter', 'The total number of seconds spent by all reads.', 'Disk', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_filefd_allocated', 'node_exporter', 'File descriptor statistics: allocated.', 'File', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_filefd_maximum', 'node_exporter', 'File descriptor statistics: maximum.', 'File', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_ipvs_connections_total', 'node_exporter', 'The total number of connections made.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_ipvs_incoming_bytes_total', 'node_exporter', 'The total amount of incoming data.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_ipvs_incoming_packets_total', 'node_exporter', 'The total number of incoming packets.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_ipvs_outgoing_bytes_total', 'node_exporter', 'The total amount of outgoing data.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_ipvs_outgoing_packets_total', 'node_exporter', 'The total number of outgoing packets.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Active_anon_bytes', 'node_exporter', 'Memory information field Active_anon_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Active_bytes', 'node_exporter', 'Memory information field Active_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Active_file_bytes', 'node_exporter', 'Memory information field Active_file_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_AnonHugePages_bytes', 'node_exporter', 'Memory information field AnonHugePages_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_AnonPages_bytes', 'node_exporter', 'Memory information field AnonPages_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Bounce_bytes', 'node_exporter', 'Memory information field Bounce_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Buffers_bytes', 'node_exporter', 'Memory information field Buffers_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Cached_bytes', 'node_exporter', 'Memory information field Cached_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_CmaFree_bytes', 'node_exporter', 'Memory information field CmaFree_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_CmaTotal_bytes', 'node_exporter', 'Memory information field CmaTotal_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_CommitLimit_bytes', 'node_exporter', 'Memory information field CommitLimit_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Committed_AS_bytes', 'node_exporter', 'Memory information field Committed_AS_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_DirectMap1G_bytes', 'node_exporter', 'Memory information field DirectMap1G_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_DirectMap2M_bytes', 'node_exporter', 'Memory information field DirectMap2M_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_DirectMap4k_bytes', 'node_exporter', 'Memory information field DirectMap4k_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Dirty_bytes', 'node_exporter', 'Memory information field Dirty_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_HardwareCorrupted_bytes', 'node_exporter', 'Memory information field HardwareCorrupted_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_HugePages_Free', 'node_exporter', 'Memory information field HugePages_Free.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_HugePages_Rsvd', 'node_exporter', 'Memory information field HugePages_Rsvd.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_HugePages_Surp', 'node_exporter', 'Memory information field HugePages_Surp.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_HugePages_Total', 'node_exporter', 'Memory information field HugePages_Total.', 'Memory', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Hugepagesize_bytes', 'node_exporter', 'Memory information field Hugepagesize_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Inactive_anon_bytes', 'node_exporter', 'Memory information field Inactive_anon_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Inactive_bytes', 'node_exporter', 'Memory information field Inactive_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Inactive_file_bytes', 'node_exporter', 'Memory information field Inactive_file_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_KernelStack_bytes', 'node_exporter', 'Memory information field KernelStack_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Mapped_bytes', 'node_exporter', 'Memory information field Mapped_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_MemAvailable_bytes', 'node_exporter', 'Memory information field MemAvailable_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_MemFree_bytes', 'node_exporter', 'Memory information field MemFree_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_MemTotal_bytes', 'node_exporter', 'Memory information field MemTotal_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Mlocked_bytes', 'node_exporter', 'Memory information field Mlocked_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_NFS_Unstable_bytes', 'node_exporter', 'Memory information field NFS_Unstable_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_PageTables_bytes', 'node_exporter', 'Memory information field PageTables_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Shmem_bytes', 'node_exporter', 'Memory information field Shmem_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_ShmemHugePages_bytes', 'node_exporter', 'Memory information field ShmemHugePages_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_ShmemPmdMapped_bytes', 'node_exporter', 'Memory information field ShmemPmdMapped_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Slab_bytes', 'node_exporter', 'Memory information field Slab_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_SReclaimable_bytes', 'node_exporter', 'Memory information field SReclaimable_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_SUnreclaim_bytes', 'node_exporter', 'Memory information field SUnreclaim_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_SwapCached_bytes', 'node_exporter', 'Memory information field SwapCached_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_SwapFree_bytes', 'node_exporter', 'Memory information field SwapFree_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_SwapTotal_bytes', 'node_exporter', 'Memory information field SwapTotal_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Unevictable_bytes', 'node_exporter', 'Memory information field Unevictable_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_VmallocChunk_bytes', 'node_exporter', 'Memory information field VmallocChunk_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_VmallocTotal_bytes', 'node_exporter', 'Memory information field VmallocTotal_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_VmallocUsed_bytes', 'node_exporter', 'Memory information field VmallocUsed_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Writeback_bytes', 'node_exporter', 'Memory information field Writeback_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_WritebackTmp_bytes', 'node_exporter', 'Memory information field WritebackTmp_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Icmp_InErrors', 'node_exporter', 'Statistic IcmpInErrors.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Icmp_InMsgs', 'node_exporter', 'Statistic IcmpInMsgs.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Icmp_OutMsgs', 'node_exporter', 'Statistic IcmpOutMsgs.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Icmp6_InErrors', 'node_exporter', 'Statistic Icmp6InErrors.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Icmp6_InMsgs', 'node_exporter', 'Statistic Icmp6InMsgs.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Icmp6_OutMsgs', 'node_exporter', 'Statistic Icmp6OutMsgs.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Ip_Forwarding', 'node_exporter', 'Statistic IpForwarding.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Ip6_InOctets', 'node_exporter', 'Statistic Ip6InOctets.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Ip6_OutOctets', 'node_exporter', 'Statistic Ip6OutOctets.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_IpExt_InOctets', 'node_exporter', 'Statistic IpExtInOctets.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_IpExt_OutOctets', 'node_exporter', 'Statistic IpExtOutOctets.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Tcp_ActiveOpens', 'node_exporter', 'Statistic TcpActiveOpens.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Tcp_CurrEstab', 'node_exporter', 'Statistic TcpCurrEstab.', 'Network', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Tcp_InErrs', 'node_exporter', 'Statistic TcpInErrs.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Tcp_PassiveOpens', 'node_exporter', 'Statistic TcpPassiveOpens.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Tcp_RetransSegs', 'node_exporter', 'Statistic TcpRetransSegs.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_TcpExt_ListenDrops', 'node_exporter', 'Statistic TcpExtListenDrops.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_TcpExt_ListenOverflows', 'node_exporter', 'Statistic TcpExtListenOverflows.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_TcpExt_SyncookiesFailed', 'node_exporter', 'Statistic TcpExtSyncookiesFailed.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_TcpExt_SyncookiesRecv', 'node_exporter', 'Statistic TcpExtSyncookiesRecv.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_TcpExt_SyncookiesSent', 'node_exporter', 'Statistic TcpExtSyncookiesSent.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Udp_InDatagrams', 'node_exporter', 'Statistic UdpInDatagrams.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Udp_InErrors', 'node_exporter', 'Statistic UdpInErrors.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Udp_NoPorts', 'node_exporter', 'Statistic UdpNoPorts.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Udp_OutDatagrams', 'node_exporter', 'Statistic UdpOutDatagrams.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Udp6_InDatagrams', 'node_exporter', 'Statistic Udp6InDatagrams.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Udp6_InErrors', 'node_exporter', 'Statistic Udp6InErrors.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Udp6_NoPorts', 'node_exporter', 'Statistic Udp6NoPorts.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Udp6_OutDatagrams', 'node_exporter', 'Statistic Udp6OutDatagrams.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_UdpLite_InErrors', 'node_exporter', 'Statistic UdpLiteInErrors.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_UdpLite6_InErrors', 'node_exporter', 'Statistic UdpLite6InErrors.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_network_receive_bytes_total', 'node_exporter', 'Network device statistic receive_bytes.', 'Network', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_network_receive_compressed_total', 'node_exporter', 'Network device statistic receive_compressed.', 'Network', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_network_receive_drop_total', 'node_exporter', 'Network device statistic receive_drop.', 'Network', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_network_receive_errs_total', 'node_exporter', 'Network device statistic receive_errs.', 'Network', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_network_receive_fifo_total', 'node_exporter', 'Network device statistic receive_fifo.', 'Network', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_network_receive_frame_total', 'node_exporter', 'Network device statistic receive_frame.', 'Network', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_network_receive_multicast_total', 'node_exporter', 'Network device statistic receive_multicast.', 'Network', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_network_receive_packets_total', 'node_exporter', 'Network device statistic receive_packets.', 'Network', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_network_transmit_bytes_total', 'node_exporter', 'Network device statistic transmit_bytes.', 'Network', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_network_transmit_carrier_total', 'node_exporter', 'Network device statistic transmit_carrier.', 'Network', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_network_transmit_colls_total', 'node_exporter', 'Network device statistic transmit_colls.', 'Network', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_network_transmit_compressed_total', 'node_exporter', 'Network device statistic transmit_compressed.', 'Network', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_network_transmit_drop_total', 'node_exporter', 'Network device statistic transmit_drop.', 'Network', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_network_transmit_errs_total', 'node_exporter', 'Network device statistic transmit_errs.', 'Network', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_network_transmit_fifo_total', 'node_exporter', 'Network device statistic transmit_fifo.', 'Network', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_network_transmit_packets_total', 'node_exporter', 'Network device statistic transmit_packets.', 'Network', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_procs_blocked', 'node_exporter', 'Number of processes blocked waiting for I/O to complete.', 'Process', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_procs_running', 'node_exporter', 'Number of processes in runnable state.', 'Process', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_sockstat_FRAG_inuse', 'node_exporter', 'Number of FRAG sockets in state inuse.', 'Network', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_sockstat_FRAG_memory', 'node_exporter', 'Number of FRAG sockets in state memory.', 'Network', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_sockstat_RAW_inuse', 'node_exporter', 'Number of RAW sockets in state inuse.', 'Network', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_sockstat_sockets_used', 'node_exporter', 'Number of sockets sockets in state used.', 'Network', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_sockstat_TCP_alloc', 'node_exporter', 'Number of TCP sockets in state alloc.', 'Network', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_sockstat_TCP_inuse', 'node_exporter', 'Number of TCP sockets in state inuse.', 'Network', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_sockstat_TCP_mem', 'node_exporter', 'Number of TCP sockets in state mem.', 'Network', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_sockstat_TCP_mem_bytes', 'node_exporter', 'Number of TCP sockets in state mem_bytes.', 'Network', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_sockstat_TCP_orphan', 'node_exporter', 'Number of TCP sockets in state orphan.', 'Network', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_sockstat_TCP_tw', 'node_exporter', 'Number of TCP sockets in state tw.', 'Network', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_sockstat_UDP_inuse', 'node_exporter', 'Number of UDP sockets in state inuse.', 'Network', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_sockstat_UDP_mem', 'node_exporter', 'Number of UDP sockets in state mem.', 'Network', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_sockstat_UDP_mem_bytes', 'node_exporter', 'Number of UDP sockets in state mem_bytes.', 'Network', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_sockstat_UDPLITE_inuse', 'node_exporter', 'Number of UDPLITE sockets in state inuse.', 'Network', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_tcp_connection_states', 'node_exporter', 'Number of connection states.', 'Network', 'LOAD', 'Node', 'gauge', 'state', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_vmstat_pgfault', 'node_exporter', '/proc/vmstat information field pgfault.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_vmstat_pgmajfault', 'node_exporter', '/proc/vmstat information field pgmajfault.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_vmstat_pgpgin', 'node_exporter', '/proc/vmstat information field pgpgin.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_vmstat_pgpgout', 'node_exporter', '/proc/vmstat information field pgpgout.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_vmstat_pswpin', 'node_exporter', '/proc/vmstat information field pswpin.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_vmstat_pswpout', 'node_exporter', '/proc/vmstat information field pswpout.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('process_files_open', 'micrometer', 'The open file descriptor count', 'File', 'LOAD', 'Process', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('process_open_fds', 'micrometer', 'Number of open file descriptors.', 'File', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('process_resident_memory_bytes', 'micrometer', 'Resident memory size in bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('process_virtual_memory_bytes', 'micrometer', '-', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_fs_inodes_free', 'cadvisor', 'Number of available Inodes', 'Filesystem', 'LOAD', 'Container', 'gauge', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_fs_inodes_total', 'cadvisor', 'Number of Inodes', 'Filesystem', 'LOAD', 'Container', 'gauge', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_fs_io_current', 'cadvisor', 'Number of I/Os currently in progress', 'Filesystem', 'LOAD', 'Container', 'gauge', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_fs_io_time_seconds_total', 'cadvisor', 'Cumulative count of seconds spent doing I/Os', 'Filesystem', 'LOAD', 'Container', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_fs_io_time_weighted_seconds_total', 'cadvisor', 'Cumulative weighted I/O time in seconds', 'Filesystem', 'LOAD', 'Container', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_fs_read_seconds_total', 'cadvisor', 'Cumulative count of seconds spent reading', 'Filesystem', 'LOAD', 'Container', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_fs_reads_bytes_total', 'cadvisor', 'Cumulative count of bytes read', 'Filesystem', 'LOAD', 'Container', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_fs_reads_merged_total', 'cadvisor', 'Cumulative count of reads merged', 'Filesystem', 'LOAD', 'Container', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_fs_reads_total', 'cadvisor', 'Cumulative count of reads completed', 'Filesystem', 'LOAD', 'Container', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_fs_sector_reads_total', 'cadvisor', 'Cumulative count of sector reads completed', 'Filesystem', 'LOAD', 'Container', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_fs_sector_writes_total', 'cadvisor', 'Cumulative count of sector writes completed', 'Filesystem', 'LOAD', 'Container', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_fs_usage_bytes', 'cadvisor', 'Number of bytes that are consumed by the container on this filesystem.', 'Filesystem', 'LOAD', 'Container', 'gauge', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_fs_write_seconds_total', 'cadvisor', 'Cumulative count of seconds spent writing', 'Filesystem', 'LOAD', 'Container', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_fs_writes_bytes_total', 'cadvisor', 'Cumulative count of bytes written', 'Filesystem', 'LOAD', 'Container', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_fs_writes_total', 'cadvisor', 'Cumulative count of writes completed', 'Filesystem', 'LOAD', 'Container', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_filesystem_avail_bytes', 'node_exporter', 'Filesystem space available to non-root users in bytes.', 'Filesystem', 'LOAD', 'Node', 'gauge', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_filesystem_device_error', 'node_exporter', 'Whether an error occurred while getting statistics for the given device.', 'Filesystem', 'LOAD', 'Node', 'gauge', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_filesystem_files', 'node_exporter', 'Filesystem total file nodes.', 'Filesystem', 'LOAD', 'Node', 'gauge', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_filesystem_files_free', 'node_exporter', 'Filesystem total free file nodes.', 'Filesystem', 'LOAD', 'Node', 'gauge', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_filesystem_free_bytes', 'node_exporter', 'Filesystem free space in bytes.', 'Filesystem', 'LOAD', 'Node', 'gauge', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_filesystem_size_bytes', 'node_exporter', 'Filesystem size in bytes.', 'Filesystem', 'LOAD', 'Node', 'gauge', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_cache_hitrate', 'cassandra_exporter', 'All time cache hit rate', 'Cache', 'LOAD', 'Cassandra', 'gauge', 'cache', '2019-10-02 10:17:01', '2019-10-02 10:17:01'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_cache_hits_count', 'cassandra_exporter', 'Total number of cache hits', 'Cache', 'LOAD', 'Cassandra', 'counter', 'cache', '2019-10-02 10:17:01', '2019-10-02 10:17:01'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_cache_requests_count', 'cassandra_exporter', 'Total number of cache requests', 'Cache', 'LOAD', 'Cassandra', 'counter', 'cache', '2019-10-02 10:17:01', '2019-10-02 10:17:01'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_client_connectednativeclients', 'cassandra_exporter', 'Number of clients connected to this nodes native protocol server', 'Connection', 'LOAD', 'Cassandra', 'gauge', NULL, '2019-10-01 16:45:21', '2019-10-01 16:45:21'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_clientrequest_failures_count', 'cassandra_exporter', 'Number of transaction failures encountered', 'Request', 'LOAD', 'Cassandra', 'counter', 'clientrequest', '2019-10-02 10:17:01', '2019-10-02 10:17:01'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_clientrequest_latency_seconds_count', 'cassandra_exporter', 'Number of client requests latency seconds', 'Request', 'LOAD', 'Cassandra', 'counter', 'clientrequest', '2019-10-02 10:17:01', '2019-10-02 10:17:01'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_clientrequest_timeouts_count', 'cassandra_exporter', 'Number of timeouts encountered', 'Request', 'LOAD', 'Cassandra', 'counter', 'clientrequest', '2019-10-02 10:17:01', '2019-10-02 10:17:01'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_clientrequest_unavailables_count', 'cassandra_exporter', 'Number of unavailable exceptions encountered', 'Request', 'LOAD', 'Cassandra', 'counter', 'clientrequest', '2019-10-02 10:17:01', '2019-10-02 10:17:01'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_commitlog_completedtasks', 'cassandra_exporter', 'Total number of commit log messages written', 'Log', 'LOAD', 'Cassandra', 'counter', NULL, '2019-10-02 10:17:01', '2019-10-02 10:17:01'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_commitlog_totalcommitlogsize', 'cassandra_exporter', 'Current size, in bytes, used by all the commit log segments', 'Log', 'LOAD', 'Cassandra', 'counter', NULL, '2019-10-02 10:17:01', '2019-10-02 10:17:01'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_keyspace_rangelatency_seconds', 'cassandra_exporter', 'Local range scan latency seconds for this keyspace', 'Disk', 'LOAD', 'Cassandra', 'gauge', 'keyspace,quantile', '2019-10-02 10:17:01', '2019-10-02 10:17:01'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_keyspace_rangelatency_seconds_count', 'cassandra_exporter', 'Local range scan count for this keyspace', 'Disk', 'LOAD', 'Cassandra', 'counter', 'keyspace', '2019-10-02 10:17:01', '2019-10-02 10:17:01'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_keyspace_readlatency_seconds', 'cassandra_exporter', 'Local read latency seconds for this keyspace', 'Disk', 'LOAD', 'Cassandra', 'gauge', 'keyspace,quantile', '2019-10-02 10:17:01', '2019-10-02 10:17:01'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_keyspace_readlatency_seconds_count', 'cassandra_exporter', 'Local read count for this keyspace', 'Disk', 'LOAD', 'Cassandra', 'counter', 'keyspace', '2019-10-02 10:17:01', '2019-10-02 10:17:01'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_keyspace_totaldiskspaceused', 'cassandra_exporter', 'Total disk space used belonging to this keyspace', 'Disk', 'LOAD', 'Cassandra', 'gauge', 'keyspace', '2019-10-02 10:17:01', '2019-10-02 10:17:01'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_keyspace_writelatency_seconds', 'cassandra_exporter', 'Local write latency seconds for this keyspace', 'Disk', 'LOAD', 'Cassandra', 'gauge', 'keyspace,quantile', '2019-10-02 10:17:01', '2019-10-02 10:17:01'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_keyspace_writelatency_seconds_count', 'cassandra_exporter', 'Local write count for this keyspace', 'Disk', 'LOAD', 'Cassandra', 'counter', 'keyspace', '2019-10-02 10:17:01', '2019-10-02 10:17:01'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_threadpools_activetasks', 'cassandra_exporter', 'Number of tasks being actively worked on', 'Task', 'LOAD', 'Cassandra', 'gauge', 'path,threadpools', '2019-10-01 16:45:21', '2019-10-01 16:45:21'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_threadpools_completedtasks', 'cassandra_exporter', 'Number of tasks completed', 'Task', 'LOAD', 'Cassandra', 'counter', 'path,threadpools', '2019-10-01 16:45:21', '2019-10-01 16:45:21'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_threadpools_pendingtasks', 'cassandra_exporter', 'Number of queued tasks queued up', 'Task', 'LOAD', 'Cassandra', 'gauge', 'path,threadpools', '2019-10-01 16:45:21', '2019-10-01 16:45:21'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_threadpools_totalblockedtasks_count', 'cassandra_exporter', 'Number of tasks that were blocked due to queue saturation', 'Task', 'LOAD', 'Cassandra', 'counter', 'path,threadpools', '2019-10-01 16:45:21', '2019-10-01 16:45:21'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cloudwatch_requests_total', 'cloudwatch', 'API requests made to CloudWatch', 'API', 'LOAD', 'AWS/Usage', 'counter', 'NULL', '2019-08-27 14:07:00', '2019-08-27 14:07:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('imxc_service_errors_count', 'imxc_api_server', 'the number of error counts in 5s', NULL, 'ERROR', 'Service', 'gauge', 'protocol', '2019-10-15 09:37:44', '2019-10-15 09:37:44'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('imxc_service_errors_total', 'imxc_api_server', 'the total number of errors', NULL, 'ERROR', 'Service', 'counter', 'protocol', '2019-12-20 16:30:00', '2019-12-20 16:30:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('imxc_service_request_milliseconds_total', 'imxc_api_server', 'the total time taken to serve the requests', NULL, 'DURATION', 'Service', 'counter', 'protocol', '2019-12-20 16:30:00', '2019-12-20 16:30:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('imxc_service_requests_count', 'imxc_api_server', 'the number of requests counts in 5s', NULL, 'LOAD', 'Service', 'gauge', 'protocol', '2019-10-15 09:37:44', '2019-10-15 09:37:44'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('imxc_service_requests_milliseconds_total', 'imxc_api_server', 'the total time taken to serve the requests', NULL, 'DURATION', 'Service', 'gauge', 'protocol', '2019-12-10 11:22:00', '2019-10-15 09:37:44'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('imxc_service_requests_total', 'imxc_api_server', 'the total number of requests', NULL, 'LOAD', 'Service', 'counter', 'protocol', '2019-12-20 16:30:00', '2019-12-20 16:30:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mongodb_connections', 'mongodb_exporter', 'The number of incoming connections from clients to the database server', 'Connection', 'LOAD', 'MongoDB', 'gauge', 'state', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mongodb_global_lock_client', 'mongodb_exporter', 'The number of the active client connections performing read or write operations', 'Lock', 'LOAD', 'MongoDB', 'gauge', 'type', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mongodb_global_lock_current_queue', 'mongodb_exporter', 'The number of operations that are currently queued and waiting for the read or write lock', 'Lock', 'LOAD', 'MongoDB', 'gauge', 'type', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mongodb_instance_uptime_seconds', 'mongodb_exporter', 'The number of seconds that the current MongoDB process has been active', 'Server', 'DURATION', 'MongoDB', 'gauge', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mongodb_memory', 'mongodb_exporter', 'The amount of memory, in mebibyte (MiB), currently used by the database process', 'Memory', 'LOAD', 'MongoDB', 'gauge', 'type', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mongodb_metrics_document_total', 'mongodb_exporter', 'The total number of documents processed', 'Row', 'LOAD', 'MongoDB', 'counter', 'state', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mongodb_network_bytes_total', 'mongodb_exporter', 'The number of bytes that reflects the amount of network traffic', 'Network', 'LOAD', 'MongoDB', 'counter', 'state', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mongodb_op_counters_total', 'mongodb_exporter', 'The total number of operations since the mongod instance last started', 'Request', 'LOAD', 'MongoDB', 'counter', 'type', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_aborted_connects', 'mysqld_exporter', 'The number of failed attempts to connect to the MySQL server', 'Connection', 'LOAD', 'MySQL', 'counter', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_bytes_received', 'mysqld_exporter', 'The number of bytes received from all clients', 'Network', 'LOAD', 'MySQL', 'counter', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_bytes_sent', 'mysqld_exporter', 'The number of bytes sent to all clients', 'Network', 'LOAD', 'MySQL', 'counter', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_commands_total', 'mysqld_exporter', 'The number of times each XXX command has been executed', 'Request', 'LOAD', 'MySQL', 'counter', 'command', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_connections', 'mysqld_exporter', 'The number of connection attempts (successful or not) to the MySQL server', 'Connection', 'LOAD', 'MySQL', 'counter', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_innodb_buffer_pool_read_requests', 'mysqld_exporter', 'The number of logical read requests', 'Block', 'LOAD', 'MySQL', 'counter', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_innodb_buffer_pool_write_requests', 'mysqld_exporter', 'The number of writes done to the InnoDB buffer pool', 'Block', 'LOAD', 'MySQL', 'counter', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_innodb_data_read', 'mysqld_exporter', 'The amount of data read since the server was started (in bytes)', 'Disk', 'LOAD', 'MySQL', 'counter', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_innodb_data_reads', 'mysqld_exporter', 'The total number of data reads (OS file reads)', 'Disk', 'LOAD', 'MySQL', 'counter', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_innodb_data_writes', 'mysqld_exporter', 'The total number of data writes', 'Disk', 'LOAD', 'MySQL', 'counter', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_innodb_data_written', 'mysqld_exporter', 'The amount of data written so far, in bytes', 'Disk', 'LOAD', 'MySQL', 'counter', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_innodb_log_write_requests', 'mysqld_exporter', 'The number of write requests for the InnoDB redo log', 'Log', 'LOAD', 'MySQL', 'counter', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_innodb_log_writes', 'mysqld_exporter', 'The number of physical writes to the InnoDB redo log file', 'Disk', 'LOAD', 'MySQL', 'counter', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_innodb_os_log_written', 'mysqld_exporter', 'The number of bytes written to the InnoDB redo log files', 'Disk', 'LOAD', 'MySQL', 'counter', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_innodb_row_lock_current_waits', 'mysqld_exporter', 'The number of row locks currently being waited for by operations on InnoDB tables', 'Lock', 'LOAD', 'MySQL', 'gauge', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_innodb_row_lock_time', 'mysqld_exporter', 'The total time spent in acquiring row locks for InnoDB tables, in milliseconds', 'Lock', 'DURATION', 'MySQL', 'counter', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_innodb_row_lock_waits', 'mysqld_exporter', 'The number of times operations on InnoDB tables had to wait for a row lock', 'Lock', 'LOAD', 'MySQL', 'counter', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_innodb_row_ops_total', 'mysqld_exporter', 'The number of rows operated in InnoDB tables', 'Row', 'LOAD', 'MySQL', 'counter', 'operation', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_table_locks_immediate', 'mysqld_exporter', 'The number of times that a request for a table lock could be granted immediately', 'Lock', 'LOAD', 'MySQL', 'counter', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_threads_connected', 'mysqld_exporter', 'The number of currently open connections', 'Thread', 'LOAD', 'MySQL', 'gauge', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_threads_running', 'mysqld_exporter', 'The number of threads that are not sleeping', 'Thread', 'LOAD', 'MySQL', 'gauge', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_uptime', 'mysqld_exporter', 'The number of seconds that the server has been up', 'Server', 'DURATION', 'MySQL', 'gauge', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_up', 'mysqld_exporter', 'Whether the last scrape of metrics from MySQL was able to connect to the server', 'NULL', 'ERROR', 'MySQL', 'gauge', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('pg_locks_count', 'postgres_exporter', 'Number of locks', 'Lock', 'LOAD', 'PostgreSQL', 'gauge', 'datname,mode', '2019-08-27 14:07:00', '2019-08-27 14:07:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('pg_stat_activity_count', 'postgres_exporter', 'number of connections in this state', 'Connection', 'LOAD', 'PostgreSQL', 'gauge', 'datname,state', '2019-08-27 14:07:00', '2019-08-27 14:07:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('pg_stat_database_blk_read_time', 'postgres_exporter', 'Time spent reading data file blocks by backends in this database, in milliseconds', 'Block', 'LOAD', 'PostgreSQL', 'counter', 'datname', '2019-08-27 14:07:00', '2019-08-27 14:07:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('pg_stat_database_blk_write_time', 'postgres_exporter', 'Time spent writing data file blocks by backends in this database, in milliseconds', 'Block', 'LOAD', 'PostgreSQL', 'counter', 'datname', '2019-08-27 14:07:00', '2019-08-27 14:07:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('pg_stat_database_blks_hit', 'postgres_exporter', 'Number of times disk blocks were found already in the buffer cache', 'Block', 'LOAD', 'PostgreSQL', 'counter', 'datname', '2019-08-27 14:07:00', '2019-08-27 14:07:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('pg_stat_database_blks_read', 'postgres_exporter', 'Number of disk blocks read in this database', 'Block', 'LOAD', 'PostgreSQL', 'counter', 'datname', '2019-08-27 14:07:00', '2019-08-27 14:07:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('pg_stat_database_temp_bytes', 'postgres_exporter', 'Total amount of data written to temporary files by queries in this database', 'TemporaryFile', 'LOAD', 'PostgreSQL', 'counter', 'datname', '2019-08-27 14:07:00', '2019-08-27 14:07:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('pg_stat_database_temp_files', 'postgres_exporter', 'Number of temporary files created by queries in this database', 'TemporaryFile', 'LOAD', 'PostgreSQL', 'counter', 'datname', '2019-08-27 14:07:00', '2019-08-27 14:07:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('pg_stat_database_tup_deleted', 'postgres_exporter', 'Number of rows deleted by queries in this database', 'Row', 'LOAD', 'PostgreSQL', 'counter', 'datname', '2019-08-27 14:07:00', '2019-08-27 14:07:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('pg_stat_database_tup_fetched', 'postgres_exporter', 'Number of rows fetched by queries in this database', 'Row', 'LOAD', 'PostgreSQL', 'counter', 'datname', '2019-08-27 14:07:00', '2019-08-27 14:07:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('pg_stat_database_tup_inserted', 'postgres_exporter', 'Number of rows inserted by queries in this database', 'Row', 'LOAD', 'PostgreSQL', 'counter', 'datname', '2019-08-27 14:07:00', '2019-08-27 14:07:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('pg_stat_database_tup_returned', 'postgres_exporter', 'Number of rows returned by queries in this database', 'Row', 'LOAD', 'PostgreSQL', 'counter', 'datname', '2019-08-27 14:07:00', '2019-08-27 14:07:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('pg_stat_database_tup_updated', 'postgres_exporter', 'Number of rows updated by queries in this database', 'Row', 'LOAD', 'PostgreSQL', 'counter', 'datname', '2019-08-27 14:07:00', '2019-08-27 14:07:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('pg_stat_database_xact_commit', 'postgres_exporter', 'Number of transactions in this database that have been committed', 'Transaction', 'LOAD', 'PostgreSQL', 'counter', 'datname', '2019-08-27 14:07:00', '2019-08-27 14:07:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('pg_stat_database_xact_rollback', 'postgres_exporter', 'Number of transactions in this database that have been rolled back', 'Transaction', 'LOAD', 'PostgreSQL', 'counter', 'datname', '2019-08-27 14:07:00', '2019-08-27 14:07:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('pg_up', 'postgres_exporter', 'Whether the last scrape of metrics from PostgreSQL was able to connect to the server', 'NULL', 'ERROR', 'PostgreSQL', 'gauge', 'NULL', '2019-08-27 14:07:00', '2019-08-27 14:07:00'); + +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816000, '2019-08-19 06:14:22.616', '2019-08-19 06:14:22.616', false, 4, (select id from auth_resource2 where type='menu' and name='Infrastructure' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816001, '2019-08-19 06:14:22.635', '2019-08-19 06:14:22.635', false, 4, (select id from auth_resource2 where type='menu' and name='Topology' and parent_id=(select id from auth_resource2 where type='menu' and name='Infrastructure')) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816002, '2019-08-19 06:14:22.638', '2019-08-19 06:14:22.638', false, 4, (select id from auth_resource2 where type='menu' and name='Overview' and parent_id=(select id from auth_resource2 where type='menu' and name='Infrastructure')) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816003, '2019-08-19 06:14:22.64', '2019-08-19 06:14:22.64', false, 4, (select id from auth_resource2 where type='menu' and name='Namespace' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816004, '2019-08-19 06:14:22.643', '2019-08-19 06:14:22.643', false, 4, (select id from auth_resource2 where type='menu' and name='Nodes' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816005, '2019-08-19 06:14:22.72', '2019-08-19 06:14:22.72', false, 4, (select id from auth_resource2 where type='menu' and name='Node Details' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816006, '2019-08-19 06:14:22.72', '2019-08-19 06:14:22.72', false, 4, (select id from auth_resource2 where type='menu' and name='Resource Usage' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816009, '2019-08-19 06:14:22', '2019-08-19 06:14:22', false, 4, (select id from auth_resource2 where type='menu' and name='Persistent Volume' ) , 'admin'); + +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816100, '2019-08-19 06:14:22.619', '2019-08-19 06:14:22.619', false, 4, (select id from auth_resource2 where type='menu' and name='Workloads' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816105, '2019-08-19 06:14:22.657', '2019-08-19 06:14:22.657', false, 4, (select id from auth_resource2 where type='menu' and name='Jobs' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816106, '2019-08-19 06:14:22.66', '2019-08-19 06:14:22.66', false, 4, (select id from auth_resource2 where type='menu' and name='Cron Jobs' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816107, '2019-08-19 06:14:22.646', '2019-08-19 06:14:22.646', false, 4, (select id from auth_resource2 where type='menu' and name='Pods' ) , 'admin'); + +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816200, '2019-08-19 06:14:22.621', '2019-08-19 06:14:22.621', false, 4, (select id from auth_resource2 where type='menu' and name='Services' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816201, '2019-08-19 06:14:22.698', '2019-08-19 06:14:22.698', false, 4, (select id from auth_resource2 where type='menu' and name='Topology' and parent_id=(select id from auth_resource2 where type='menu' and name='Services')) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816202, '2019-08-19 06:14:22.728', '2019-08-19 06:14:22.728', false, 4, (select id from auth_resource2 where type='menu' and name='Overview' and parent_id=(select id from auth_resource2 where type='menu' and name='Services')) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816203, '2019-08-19 06:14:22.734', '2019-08-19 06:14:22.734', false, 4, (select id from auth_resource2 where type='menu' and name='Detail' and parent_id=(select id from auth_resource2 where type='menu' and name='Services')) , 'admin'); + +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816300, '2019-08-19 06:14:22.624', '2019-08-19 06:14:22.624', false, 4, (select id from auth_resource2 where type='menu' and name='Diagnosis' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816301, '2019-08-19 06:14:22.705', '2019-08-19 06:14:22.705', false, 4, (select id from auth_resource2 where type='menu' and name='Anomaly Score' and parent_id=(select id from auth_resource2 where type='menu' and name='Diagnosis') ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816309, '2019-08-19 06:14:22.668', '2019-08-19 06:14:22.668', false, 4, (select id from auth_resource2 where type='menu' and name='Troubleshooting' ) , 'admin'); + +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816400, '2019-08-19 06:14:22.627', '2019-08-19 06:14:22.627', false, 4, (select id from auth_resource2 where type='menu' and name='Statistics & Analysis') , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816401, '2019-08-19 06:14:22.671', '2019-08-19 06:14:22.671', false, 4, (select id from auth_resource2 where type='menu' and name='Performance Trends' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816402, '2019-08-19 06:14:22.731', '2019-08-19 06:14:22.731', false, 4, (select id from auth_resource2 where type='menu' and name='Alert Analysis' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816403, '2019-08-19 06:14:22.674', '2019-08-19 06:14:22.674', false, 4, (select id from auth_resource2 where type='menu' and name='Alert History' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816404, '2019-08-19 06:14:22.677', '2019-08-19 06:14:22.677', false, 4, (select id from auth_resource2 where type='menu' and name='Anomaly Score' and parent_id=(select id from auth_resource2 where type='menu' and name='Statistics & Analysis')) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816405, '2019-08-19 06:14:22.679', '2019-08-19 06:14:22.679', false, 4, (select id from auth_resource2 where type='menu' and name='Job History' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816406, '2019-08-19 06:14:22.685', '2019-08-19 06:14:22.685', false, 4, (select id from auth_resource2 where type='menu' and name='Sparse Logs' and parent_id=(select id from auth_resource2 where type='menu' and name='Statistics & Analysis' )) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816407, '2019-08-19 06:14:22.682', '2019-08-19 06:14:22.682', false, 4, (select id from auth_resource2 where type='menu' and name='Log Viewer' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816408, '2019-08-19 06:14:22.725', '2019-08-19 06:14:22.725', false, 4, (select id from auth_resource2 where type='menu' and name='Event Logs' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816409, '2019-08-19 06:14:22.734', '2019-08-19 06:14:22.734', false, 4, (select id from auth_resource2 where type='menu' and name='Container Life Cycle' and parent_id=(select id from auth_resource2 where type='menu' and name='Statistics & Analysis')) , 'admin'); + +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816500, '2019-08-19 06:14:22.629', '2019-08-19 06:14:22.629', false, 4, (select id from auth_resource2 where type='menu' and name='Reports' and parent_id is null) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816501, '2019-08-19 06:14:22.734', '2019-08-19 06:14:22.734', false, 4, (select id from auth_resource2 where type='menu' and name='Documents' and parent_id=(select id from auth_resource2 where type='menu' and name='Reports' and parent_id is null)) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816502, '2019-08-19 06:14:22.734', '2019-08-19 06:14:22.734', false, 4, (select id from auth_resource2 where type='menu' and name='Templates' and parent_id=(select id from auth_resource2 where type='menu' and name='Reports' and parent_id is null)) , 'admin'); + +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816550, '2019-08-19 06:14:22', '2019-08-19 06:14:22', false, 4, (select id from auth_resource2 where type='menu' and name='Dashboards' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816551, '2019-08-19 06:14:22.734', '2019-08-19 06:14:22.734', false, 4, (select id from auth_resource2 where type='menu' and name='Documents' and parent_id=(select id from auth_resource2 where type='menu' and name='Dashboards' and parent_id is null)) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816552, '2019-08-19 06:14:22.734', '2019-08-19 06:14:22.734', false, 4, (select id from auth_resource2 where type='menu' and name='Templates' and parent_id=(select id from auth_resource2 where type='menu' and name='Dashboards' and parent_id is null)) , 'admin'); + +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816700, '2019-08-19 06:14:22.632', '2019-08-19 06:14:22.632', false, 4, (select id from auth_resource2 where type='menu' and name='Settings' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816701, '2019-08-19 06:14:22.687', '2019-08-19 06:14:22.687', false, 4, (select id from auth_resource2 where type='menu' and name='User & Group' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816702, '2019-08-19 06:14:22.69', '2019-08-19 06:14:22.69', false, 4, (select id from auth_resource2 where type='menu' and name='Alert' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816703, '2019-08-19 06:14:22.734', '2019-08-19 06:14:22.734', false, 4, (select id from auth_resource2 where type='menu' and name='Host Alerts' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816704, '2019-08-19 06:14:22.693', '2019-08-19 06:14:22.693', false, 4, (select id from auth_resource2 where type='menu' and name='Sparse Logs' and parent_id=(select id from auth_resource2 where type='menu' and name='Settings' )) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816706, '2019-08-19 06:14:22.717', '2019-08-19 06:14:22.717', false, 4, (select id from auth_resource2 where type='menu' and name='Metric Meta' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816707, '2019-08-19 06:14:22.696', '2019-08-19 06:14:22.696', false, 4, (select id from auth_resource2 where type='menu' and name='Notification' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816708, '2019-08-19 06:14:22.696', '2019-08-19 06:14:22.696', false, 4, (select id from auth_resource2 where type='menu' and name='General' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816709, '2019-08-19 06:14:22.734', '2019-08-19 06:14:22.734', false, 4, (select id from auth_resource2 where type='menu' and name='License' ) , 'admin'); + +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816800, '2019-08-19 06:14:22.734', '2019-08-19 06:14:22.734', false, 4, (select id from auth_resource2 where type='menu' and name='Hosts' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816801, '2019-08-19 06:14:22.734', '2019-08-19 06:14:22.734', false, 4, (select id from auth_resource2 where type='menu' and name='Topology' and parent_id=(select id from auth_resource2 where type='menu' and name='Hosts')) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816802, '2019-08-19 06:14:22.734', '2019-08-19 06:14:22.734', false, 4, (select id from auth_resource2 where type='menu' and name='Overview' and parent_id=(select id from auth_resource2 where type='menu' and name='Hosts')) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816803, '2019-08-19 06:14:22.734', '2019-08-19 06:14:22.734', false, 4, (select id from auth_resource2 where type='menu' and name='List' and parent_id=(select id from auth_resource2 where type='menu' and name='Hosts')) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816804, '2019-08-19 06:14:22.734', '2019-08-19 06:14:22.734', false, 4, (select id from auth_resource2 where type='menu' and name='Detail' and parent_id=(select id from auth_resource2 where type='menu' and name='Hosts')) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816805, '2019-08-19 06:14:22.734', '2019-08-19 06:14:22.734', false, 4, (select id from auth_resource2 where type='menu' and name='Group' and parent_id=(select id from auth_resource2 where type='menu' and name='Hosts')) , 'admin'); + + + + +INSERT INTO public.alert_rule_meta ( id, created_date, modified_date, description, expr, meta_name, target, message ) VALUES (97, '2019-04-02 18:07:31.319', '2019-04-02 18:07:31.319', 'NODE CPU 사용', '(100 - (avg by (xm_clst_id, xm_node_id, xm_entity_type) (rate(node_cpu_seconds_total{ name=''node-exporter'', mode=''idle'', xm_entity_type=''Node'', {filter} }[1m])) * 100))', 'Node CPU Usage', 'node', 'Cluster:{{$labels.xm_clst_id}} Node:{{$labels.xm_node_id }} CPU 사용률이 {threshold}%를 초과했습니다. 현재값:{{humanize $value}}%'); +INSERT INTO public.alert_rule_meta ( id, created_date, modified_date, description, expr, meta_name, target, message ) VALUES (1, '2019-04-15 02:26:13.826', '2019-04-15 02:26:24.02', 'NODE Disk 사용', '(1- (sum by (xm_clst_id, xm_node_id, xm_entity_type) (node_filesystem_avail_bytes{xm_entity_type=''Node'', {filter} }) / sum by (xm_clst_id, xm_node_id, xm_entity_type) (node_filesystem_size_bytes{xm_entity_type=''Node'', {filter} }))) * 100', 'Node Disk Usage', 'node', 'Cluster:{{$labels.xm_clst_id}} Node:{{$labels.xm_node_id}} Disk 사용률이 {threshold}%를 초과했습니다. 현재값:{{humanize $value}}%'); +INSERT INTO public.alert_rule_meta ( id, created_date, modified_date, description, expr, meta_name, target, message ) VALUES (119, '2019-04-02 18:08:50.17', '2019-04-02 18:08:50.17', 'NODE Memory 사용', '(1- ((node_memory_MemFree_bytes{xm_entity_type=''Node'', {filter}} + node_memory_Cached_bytes{xm_entity_type=''Node'', {filter}} + node_memory_Buffers_bytes{xm_entity_type=''Node'', {filter}}) / node_memory_MemTotal_bytes{xm_entity_type=''Node''})) * 100', 'Node Memory Usage', 'node', 'Cluster:{{$labels.xm_clst_id}} Node:{{$labels.xm_node_id}} Memory 사용률이 {threshold}%를 초과했습니다. 현재값 : {{humanize $value}}%'); +INSERT INTO public.alert_rule_meta ( id, created_date, modified_date, description, expr, meta_name, target, message ) VALUES (2, '2019-04-15 05:27:56.544', '2019-04-15 05:27:59.924', 'Container CPU 사용', 'sum (rate (container_cpu_usage_seconds_total{ {filter} }[1m])) by (xm_clst_id, xm_namespace, xm_entity_type, xm_pod_id) * 100', 'Container CPU Usage', 'controller', 'Cluster:{{$labels.xm_clst_id }} POD:{{$labels.xm_pod_id}} CPU 사용률이 {threshold}%를 초과했습니다. 현재값:{{humanize $value}}%'); + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_cpu_user','Container CPU User (%)','Container CPU Usage (User)','sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) (rate(container_cpu_user_seconds_total{xm_entity_type=''Container'',xm_cont_name!=''POD'',{filter}}[1m])) * 100','CPU','Container',NULL,true,true,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} CPU User:{{humanize $value}}%|{threshold}%.','2019-06-05 09:07:00.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_memory_working_set_bytes','Container Memory Working Set (GiB)','Current working set in GiB, this includes recently accessed memory, dirty memory, and kernel memory','sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) (container_memory_working_set_bytes{xm_entity_type=''Container'',xm_cont_name!=''POD'',{filter}} / 1024 / 1024 / 1024)','Memory','Container',NULL,true,true,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} Working Set Memory:{{humanize $value}}GiB|{threshold}GiB.','2020-06-04 11:11:11.000','2020-06-04 11:11:11.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_disk_io_seconds','Host io Disk seconds','Host disk io seconds','sum by (instance) (rate(node_disk_io_time_seconds_total{{filter}}[1m]))','Disk','Host',NULL,false,false,'Host:{{$labels.instance}} Disk IO Seconds:{{humanize $value}}|{threshold}.','2020-03-23 04:08:37.359','2020-03-23 04:08:37.359'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_disk_read_write_byte','host disk R/W byte','host disk R/W byte','sum by (data_type, instance) ( +label_replace(rate(node_disk_read_bytes_total{{filter}}[1m]) or rate(node_disk_read_bytes_total{{filter}}[5m]), "data_type", "Read", "", "") or +label_replace(rate(node_disk_written_bytes_total{{filter}}[1m]) or rate(node_disk_written_bytes_total{{filter}}[5m]), "data_type", "Write", "", "") )','Disk','Host',NULL,false,false,'Host:{{$labels.instance}} Read/Write Bytes:{{humanize $value}}KiB|{threshold}KiB.','2020-03-24 05:21:53.915','2020-03-24 05:24:52.674'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_memory_free','Host Memory Free (GiB)','Memory information field MemFree_bytes','(node_memory_MemAvailable_bytes{{filter}} or (node_memory_MemFree_bytes{{filter}} + node_memory_Cached_bytes{{filter}} + node_memory_Buffers_bytes{{filter}}))','Memory','Host',NULL,true,false,'Host:{{$labels.instance}} Free Memory Size:{{humanize $value}}GiB|{threshold}GiB.','2020-03-23 04:08:18.977','2020-03-23 04:08:18.977'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_bytes_sent','Number of Bytes Sent','The number of bytes sent to all clients','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(mysql_global_status_bytes_sent[1m]))','Network','MySQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Number of Bytes Sent:{{humanize $value}}KiB|{threshold}KiB.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_memory_sum_by_namespace','Containe memory sum by namespace','Containe memory sum by namespace','sum by(xm_clst_id, xm_namespace, data_type) ( +label_replace(imxc_kubernetes_container_resource_limit_memory{{filter}}, "data_type", "limit", "" , "") or +label_replace(imxc_kubernetes_container_resource_request_memory{{filter}}, "data_type", "request", "" , "") or +label_replace(container_memory_usage_bytes{xm_entity_type=''Container'',{filter}}, "data_type", "used", "" , ""))','memory','Namespace',NULL,false,false,'Container memory sum by namespace','2020-07-03 04:31:10.079','2020-07-03 08:38:17.034'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_count','Node Count','node count','count by(xm_clst_id, xm_namespace,xm_node_id) (up{{filter}})','Node','Namespace',NULL,false,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} NODE:{{$labels.xm_node_id}} Node Count:{{humanize $value}}|{threshold}.','2020-08-19 16:45:00.000','2020-08-19 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_restart_count','Container Restart Count','container restart count group by namespace','sum by(xm_clst_id, xm_namespace, pod_name ) (increase(imxc_kubernetes_container_restart_count{{filter}}[10s]))','Pod','Namespace',NULL,false,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Container Restart Count:{{humanize $value}}|{threshold}.','2020-08-19 16:45:00.000','2020-08-19 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_cpu_usage','Node CPU Usage (%)','NODE CPU Usage','(100 - (avg by (xm_clst_id, xm_node_id, xm_entity_type)(clamp_max(rate(node_cpu_seconds_total{ name=''node-exporter'', mode=''idle'', xm_entity_type=''Node'', {filter} }[1m]),1.0) * 100)))','CPU','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} CPU Usage:{{humanize $value}}%|{threshold}%.','2019-05-15 01:02:23.000','2020-06-04 11:11:11.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_disk_read_latency_device','Node Disk Read Latency per Device (ms)','Node Disk Read Latency per Device','sum by (xm_clst_id, xm_node_id, xm_entity_type, device, mountpoint) (rate(node_disk_read_time_seconds_total{xm_entity_type=''Node'',{filter}}[1m])) * 1000','Disk','Node','device',true,false,'NODE:{{$labels.xm_node_id}} FS:{{$labels.mountpoint}} Disk Read Latency:{{humanize $value}}ms|{threshold}ms.','2019-08-23 11:26:07.000','2019-08-23 11:26:07.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_filesystem_usage_per_device','Node Filesystem Usage per device (%)','NODE Filesystem Usage per Device','(1- (sum by (xm_clst_id, xm_node_id, xm_entity_type, device, mountpoint) (node_filesystem_avail_bytes{xm_entity_type=''Node'', device!=''rootfs'', {filter} }) / sum by (xm_clst_id, xm_node_id, xm_entity_type, device, mountpoint) (node_filesystem_size_bytes{xm_entity_type=''Node'', device!=''rootfs'', {filter} }))) * 100','Filesystem','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} FS:{{$labels.mountpoint}} Usage:{{humanize $value}}%|{threshold}%.','2019-05-15 01:02:23.000','2019-05-15 01:02:23.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_memory_usage','Node Memory Usage (%)','Node Memory Usage','sum by (xm_clst_id, xm_node_id)((node_memory_MemTotal_bytes{xm_entity_type="Node"}- (node_memory_MemFree_bytes{xm_entity_type="Node"} + node_memory_Cached_bytes{xm_entity_type="Node"} + node_memory_Buffers_bytes{xm_entity_type="Node"})) >= 0 or node_memory_MemTotal_bytes{xm_entity_type="Node"}- node_memory_MemFree_bytes{xm_entity_type="Node"}) / (sum by (xm_clst_id, xm_node_id) (imxc_kubernetes_node_resource_capacity_memory{{filter}})) * 100','Memory','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Memory Usage:{{humanize $value}}%|{threshold}%.','2019-05-15 01:02:23.000','2020-06-04 11:11:11.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_tablespace_size','Tablespace Size (GiB)','Generic counter metric of tablespaces bytes in Oracle','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, tablespace, type) (oracledb_tablespace_bytes) / 1073741824','Tablespace','OracleDB','tablespace, type',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Oracle Tablespace Size:{{humanize $value}}GiB|{threshold}GiB.','2020-01-28 13:03:00.000','2020-01-28 13:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_allocator_allocated_size','Allocated Memory (MiB)','The total amount of memory that the Redis allocator allocated','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (redis_allocator_allocated_bytes) / 1048576','Memory','Redis',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis Allocated Memory:{{humanize $value}}MiB|{threshold}MiB.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_kubernetes_event_count','Cluster events count','Kubernetes Namespace Events count','sum by (xm_clst_id, type) (imxc_kubernetes_event_in_last_min{{filter}})','Event','Cluster',NULL,true,true,'CLST:{{$labels.xm_clst_id}} Event Count:{{humanize $value}}|{threshold}.','2019-09-26 05:33:37.000','2020-04-27 05:38:47.804'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_memory_limit','cluster_memory_limit (Gib)','Total container limit size in GiB for the given cluster','sum by (xm_clst_id) (imxc_kubernetes_container_resource_limit_memory{{filter}}) / 1024 / 1024 / 1024','Memory','Cluster',NULL,false,false,'CLST:{{$labels.xm_clst_id}} Memory Limits:{{humanize $value}}GiB|{threshold}GiB.','2019-08-23 08:45:47.000','2019-08-23 08:45:47.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_pod_total_count','Cluster Pod Total Count','Cluster Pod Total Count','sum by (xm_clst_id) (imxc_kubernetes_controller_counts{{filter}})','Pod','Cluster',NULL,true,true,'CLST:{{$labels.xm_clst_id}} Total Pod Counts:{{humanize $value}}|{threshold}.','2019-08-23 17:36:00.000','2019-11-28 08:25:07.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_swap_free','Host Swap Memory Free','Host Swap Free','node_memory_SwapFree_bytes{{filter}}','Memory','Host',NULL,true,false,'Host:{{$labels.instance}} Free Swap Memory Size:{{humanize $value}}KiB|{threshold}KiB.','2020-03-23 04:08:24.594','2020-03-23 04:08:24.594'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_context_switch_count','Host Context','Total number of context switches.','sum by (instance) (node_context_switches_total{{filter}})','CPU','Host',NULL,false,false,'None','2020-03-23 04:08:15.000','2020-03-23 04:08:15.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_fs_used','Host system Filesystem used','Host File system used','sum by (instance) (node_filesystem_size_bytes{{filter}}-node_filesystem_free_bytes{{filter}})','Filesystem','Host',NULL,true,false,'Host:{{$labels.instance}} Filesystem Utillization:{{humanize $value}}%|{threshold}%.','2020-03-23 04:08:30.407','2020-03-23 04:08:30.407'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_disk_io','Node Disk I/O','Total seconds spent doing I/Os','avg by (xm_clst_id, xm_node_id) (rate(node_disk_io_time_seconds_total{{filter}}[1m]))','Disk','Node',NULL,false,false,'None','2020-05-21 01:18:06.000','2020-05-29 09:38:55.992'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_fs_usage','Container Filesystem Usage (%)','Container File System Usage: 100 * (Used Bytes / Limit Bytes) (not contain persistent volume)','sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) ( +container_fs_usage_bytes{xm_entity_type=''Container'',{filter}} / ((container_fs_limit_bytes{xm_entity_type=''Container'',{filter}} * 100) > 0) or +container_fs_usage_bytes{xm_entity_type=''Container'',{filter}} / 1000)','Filesystem','Container',NULL,true,true,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} Filesystem Usage:{{humanize $value}}%|{threshold}%.','2019-06-05 10:27:42.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_fs_reads','Container Filesystem Read Bytes (KiB)','Cumulative count of bytes read / 1024','sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) (rate(container_fs_reads_bytes_total{xm_entity_type=''Container'',{filter}}[1m])) / 1024','Filesystem','Container',NULL,true,true,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} Filesystem Reads:{{humanize $value}}KiB|{threshold}KiB.','2019-05-20 05:53:42.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_cpu_sum_by_namespace','Container cpu sum by namespace','Container cpu sum by namespace','sum by(xm_clst_id, xm_namespace, data_type) ( +label_replace(imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0.001, "data_type", "limit", "" , "") or +label_replace(imxc_kubernetes_container_resource_request_cpu{{filter}} * 0.001, "data_type", "request", "" , "") or +label_replace(rate(container_cpu_usage_seconds_total{xm_entity_type=''Container'',{filter}}[1m]), "data_type", "used", "" , ""))','CPU','Namespace',NULL,false,false,'.','2020-05-30 08:30:10.158','2020-06-09 02:00:50.856'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_filesystem_avail_size','Node Filesystem Available Size (GiB)','Filesystem space available to non-root users in bytes / 1073741824','sum by (xm_clst_id, xm_node_id, xm_entity_type) (node_filesystem_avail_bytes{xm_entity_type=''Node'', device!=''rootfs'', {filter} }) / 1073741824','Filesystem','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Filesystem Avail Size:{{humanize $value}}GiB|{threshold}GiB.','2019-06-04 19:47:00.000','2019-06-04 19:47:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_pod_running_count','Node Pod Running Count','Node Pod Running Count','count by (xm_clst_id, xm_node_id) (sum by (xm_clst_id, xm_node_id, xm_pod_id) (imxc_kubernetes_container_resource_limit_cpu{pod_state="Running", {filter}}))','Pod','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Running Pod Count:{{humanize $value}}|{threshold}.','2019-10-11 00:29:17.000','2019-11-06 08:02:40.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_cpu_user','Pod CPU User (%)','Pod CPU Usage (User)','sum by (xm_clst_id,xm_pod_id,xm_entity_type,xm_namespace) (rate(container_cpu_user_seconds_total{xm_entity_type=''Container'',{filter}}[1m])) * 100','CPU','Pod',NULL,true,true,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} CPU User:{{humanize $value}}%|{threshold}%.','2019-06-05 09:07:00.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_fs_reads','Pod Filesystem Read Bytes (KiB)','Cumulative count of bytes read / 1024','sum by (xm_clst_id,xm_pod_id,xm_entity_type,xm_namespace) (rate(container_fs_reads_bytes_total{xm_entity_type=''Container'',{filter}}[1m])) / 1024','Filesystem','Pod',NULL,true,true,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} Filesystem Read Bytes:{{humanize $value}}KiB|{threshold}KiB.','2019-05-20 05:53:42.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_memory_max_usage_bytes','Pod Memory Max Used (GiB)','Maximum memory usage recorded in bytes / 1073741824','sum by (xm_clst_id,xm_pod_id,xm_entity_type,xm_namespace) (container_memory_max_usage_bytes{xm_entity_type=''Container'',{filter}}) / 1073741824','Memory','Pod',NULL,true,true,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} Max Used Memory:{{humanize $value}}GiB|{threshold}GiB.','2019-06-05 14:27:36.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_network_receive','Pod Network Receive (KiB)','Network device statistic receive_bytes / 1024','sum by (xm_clst_id,xm_pod_id,xm_entity_type,xm_namespace) (rate(container_network_receive_bytes_total{xm_entity_type=''Container'',{filter}}[1m]) ) / 1024','Network','Pod',NULL,true,true,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} Network Receive:{{humanize $value}}KiB|{threshold}KiB.','2019-05-21 08:23:36.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_cache_hits_count','Total number of cache hits (count/s)','Total number of cache hits','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, cache) (rate(cassandra_cache_hits_count{{filter}}[1m]))','Cache','Cassandra','cache',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Cache Hit Counts per second:{{humanize $value}}|{threshold}.','2019-10-02 10:17:01.000','2019-11-05 11:24:29.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_clientrequest_failures_count','Number of transaction failures encountered','Number of transaction failures encountered','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, clientrequest) (rate(cassandra_clientrequest_failures_count[1m]))','Request','Cassandra','clientrequest',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Failure Request:{{humanize $value}}|{threshold}.','2019-10-02 10:17:01.000','2019-10-02 10:17:01.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_connections_and_tasks','Cassandra connections & tasks','cassandra connections & tasks','sum by (data_type, xm_clst_id, xm_namespace, xm_node_id, instance) ( +label_replace(cassandra_threadpools_activetasks {{filter}}, "data_type", "Active tasks", "", "") or +label_replace(cassandra_threadpools_pendingtasks {{filter}}, "data_type", "Pending tasks", "", "") or +label_replace(cassandra_client_connectednativeclients {{filter}}, "data_type", "Client connections", "", "") )','Connection','Cassandra','data_type',true,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} POD:{{$labels.xm_pod_id}} Cassandra Connections and Tasks:{{humanize $value}}|{threshold}.','2020-01-02 09:11:48.000','2020-02-13 01:24:51.522'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_network_transmit','Pod Network Transmit (KiB)','Network device statistic transmit_bytes / 1024','sum by (xm_clst_id,xm_pod_id,xm_entity_type,xm_namespace) (rate(container_network_transmit_bytes_total{xm_entity_type=''Container'',{filter}}[1m]) ) / 1024','Network','Pod',NULL,true,true,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} Network Transmit:{{humanize $value}}KiB|{threshold}KiB.','2019-05-21 08:26:35.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_memory_request','cluster_memory_request (Gib)','Total container memory request in GiB for the given cluster','sum by (xm_clst_id) (imxc_kubernetes_container_resource_request_memory{{filter}}) / 1024 / 1024 / 1024','Memory','Cluster',NULL,false,false,'None','2019-08-23 08:45:47.000','2019-08-23 08:45:47.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_keyspace_read_count','Local read count (count/s)','Local read count for this keyspace','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, keyspace) (rate(cassandra_keyspace_readlatency_seconds_count[1m]))','Disk','Cassandra','keyspace',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Read Count:{{humanize $value}}|{threshold}.','2019-10-02 10:17:01.000','2019-10-02 10:17:01.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_cpu_capacity_cores','cluster_cpu_capacity_cores','cluster_cpu_capacity_cores','sum by (xm_clst_id) (imxc_kubernetes_node_resource_capacity_cpu{{filter}})','CPU','Cluster',NULL,false,false,'CLST:{{$labels.xm_clst_id}} Cluster CPU Capacity Cores:{{humanize $value}}|{threshold}.','2019-08-23 08:40:36.000','2019-08-23 08:40:36.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_alerts_received_count','Cluster alerts received count','Alert count by cluster','sum by (xm_clst_id, level) (ceil(increase(imxc_alerts_received_count_total{status=''firing'', {filter}}[10m])))','Alert','Cluster',NULL,false,false,'CLST:{{$labels.xm_clst_id}} Alert Received Counts:{{humanize $value}}|{threshold}.','2019-08-23 04:41:49.000','2020-04-28 08:09:09.429'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_cpu_throttled_time','Container CPU Throttled Time','container cpu_throttled time','sum by(xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) (increase(container_cpu_cfs_throttled_seconds_total{container_name!="POD", image!="", {filter}}[10s]))','CPU','Cluster',NULL,false,false,'CLST:{{$labels.xm_clst_id}} CPU Throttled:{{humanize $value}}|{threshold}.','2020-08-19 16:45:00.000','2020-08-19 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_cache_hitrate','All time cache hit rate','All time cache hit rate','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, cache) (cassandra_cache_hitrate {{filter}} * 100)','Cache','Cassandra','cache',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Cache Hit Rate:{{humanize $value}}|{threshold}.','2019-10-02 10:17:01.000','2019-12-13 01:19:54.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('aws_ec2_disk_read_bytes','Bytes Read from All Instance Store Volumes (KiB)','Bytes read from all instance store volumes available to the instance.','sum by (xm_clst_id, instance_id, instance) (aws_ec2_disk_read_bytes_average{{filter}}) / 1024','Disk','AWS/EC2',NULL,true,true,'CLST:{{$labels.xm_clst_id}} Instance:{{$labels.instance_id}} Disk Read Size:{{humanize $value}}KiB|{threshold}KiB.','2019-08-23 17:38:23.000','2019-08-23 17:38:23.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('aws_ec2_disk_write_bytes','Bytes Written to All Instance Store Volumes (KiB)','Bytes written to all instance store volumes available to the instance.','sum by (xm_clst_id, instance_id, instance) (aws_ec2_disk_write_bytes_average{{filter}}) / 1024','Disk','AWS/EC2',NULL,true,true,'CLST:{{$labels.xm_clst_id}} Instance:{{$labels.instance_id}} Disk Write Size:{{humanize $value}}KiB|{threshold}KiB.','2019-08-23 17:38:23.000','2019-08-23 17:38:23.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('aws_ec2_ebswrite_bytes','Bytes written to all EBS volumes (KiB)','Bytes written to all EBS volumes attached to the instance in a specified period of time.','sum by (xm_clst_id, instance_id, instance) (aws_ec2_ebswrite_bytes_average{{filter}}) / 1024','EBS','AWS/EC2',NULL,true,true,'CLST:{{$labels.xm_clst_id}} Instance:{{$labels.instance_id}} EBS Write Size:{{humanize $value}}KiB|{threshold}KiB.','2019-08-23 17:38:23.000','2019-08-23 17:38:23.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_cache_requests_count','Total number of cache requests (count/s)','Total number of cache requests','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, cache) (rate(cassandra_cache_requests_count[1m]))','Cache','Cassandra','cache',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Cache Request per second:{{humanize $value}}|{threshold}.','2019-10-02 10:17:01.000','2019-10-02 10:17:01.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_keyspace_write_latency','Local write latency (ms)','Local write latency seconds for this keyspace','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, keyspace) (cassandra_keyspace_writelatency_seconds{quantile=''0.99''}) * 1000','Disk','Cassandra','keyspace',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Write Latency:{{humanize $value}}ms|{threshold}ms.','2019-10-02 10:17:01.000','2019-10-02 10:17:01.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_memory_usage','Cluster Memory Usage (%)','All Nodes Memory Usage in cluster.','(1- avg by (xm_clst_id) (((node_memory_MemFree_bytes{xm_entity_type=''Node'', {filter}} + node_memory_Cached_bytes{xm_entity_type=''Node'', {filter}} + node_memory_Buffers_bytes{xm_entity_type=''Node'', {filter}}) <= node_memory_MemTotal_bytes{xm_entity_type=''Node'', {filter}} or node_memory_MemFree_bytes{xm_entity_type=''Node'', {filter}}) / node_memory_MemTotal_bytes{xm_entity_type=''Node'', {filter}})) * 100','Memory','Cluster',NULL,true,true,'CLST:{{$labels.xm_clst_id}} Memory Usage:{{humanize $value}}%|{threshold}%.','2019-07-18 06:12:22.000','2020-04-22 04:59:14.251'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mongodb_connections_metrics_created_total','Incoming Connections Created','Count of all incoming connections created to the server (This number includes connections that have since closed)','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(mongodb_connections_metrics_created_total[1m]))','Connection','MongoDB',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MongoDB Incoming Connections Created Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_disk_io','MySQL Disk I/O','MySQL Disk I/O','sum by (data_type, xm_clst_id, xm_namespace, xm_node_id, instance) ( +label_replace(rate(mysql_global_status_innodb_data_read[1m]), "data_type", "read", "", "") or +label_replace(rate(mysql_global_status_innodb_data_written[1m]), "data_type", "written", "", ""))','Disk','MySQL','data_type',true,false,'CLST:{{$labels.xm_clst_id}} SVC:{{$labels.xm_service_name}} Mysql Disk IO:{{humanize $value}}|{threshold}.','2019-12-05 08:48:30.000','2020-02-13 01:12:05.438'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_pod_capacity_count','Cluster Pod Capacity Count','Cluster Pod Capacity Count','sum by (xm_clst_id) (imxc_kubernetes_node_resource_capacity_pods{{filter}})','Pod','Cluster',NULL,true,true,'CLST:{{$labels.xm_clst_id}} Capacity Pod Counts:{{humanize $value}}|{threshold}.','2019-08-27 04:45:52.000','2019-11-28 08:25:07.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('namespace_kubernetes_event_count','Namespace events count','Kubernetes Namespace Events count','sum by (xm_clst_id, xm_namespace, type) (imxc_kubernetes_event_in_last_min{{filter}})','Event','Namespace','level',false,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Events:{{humanize $value}}|{threshold}.','2019-09-24 06:42:09.000','2019-09-24 06:42:34.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_cpu_capacity_cores','node_cpu_capacity_cores','node_cpu_capacity_cores','imxc_kubernetes_node_resource_capacity_cpu{{filter}}','CPU','Node',NULL,false,false,'None','2019-08-23 08:40:36.000','2019-08-23 08:40:36.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_cpu_allocatable_cores','node_cpu_allocatable_cores','node_cpu_allocatable_cores','imxc_kubernetes_node_resource_allocatable_cpu{{filter}}','CPU','Node',NULL,false,false,'None','2019-08-23 08:40:36.000','2019-08-23 08:40:36.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_pod_capacity_count','Node Pod Capacity Count','Node Pod Capacity Count','imxc_kubernetes_node_resource_capacity_pods{{filter}}','Pod','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Total Capacity Count of Pods:{{humanize $value}}|{threshold}.','2019-10-11 00:29:17.000','2019-11-26 01:29:10.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_memory_allocatable','node_memory_allocatable (Gib)','imxc_kubernetes_node_resource_allocatable_memory in GiB','imxc_kubernetes_node_resource_allocatable_memory{{filter}} / 1024 / 1024 / 1024','Memory','Node',NULL,false,false,'None','2019-08-23 08:45:47.000','2019-08-23 08:45:47.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_memory_limit','node_memory_limit (Gib)','Total container memory limit for the given cluster, node','sum by (xm_clst_id, xm_node_id) (imxc_kubernetes_container_resource_limit_memory{{filter}}) / 1024 / 1024 / 1024','Memory','Node',NULL,false,false,'None','2019-08-23 08:45:47.000','2019-08-23 08:45:47.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_keyspace_readwritelatency_seconds','Cassandra Read/Write Latency (ms)','Cassandra Read/Write Latency (ms)','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, keyspace) (cassandra_keyspace_readlatency_seconds{quantile=''0.99''}) or (cassandra_keyspace_writelatency_seconds{quantile=''0.99''}) * 1000','Disk','Cassandra','keyspace',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} POD:{{$labels.xm_pod_id}} Cassandra Keyspace Readwritelatency Seconds:{{humanize $value}}ms|{threshold}ms.','2019-10-23 01:46:07.000','2019-11-05 09:03:05.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_cpu_usage','Cluster CPU Usage (%)','All Nodes CPU Usage in cluster.','(100 - (avg by (xm_clst_id)(clamp_max(rate(node_cpu_seconds_total{ name=''node-exporter'', mode=''idle'', xm_entity_type=''Node'', {filter} }[1m]),1.0)) * 100))','CPU','Cluster',NULL,true,true,'CLST:{{$labels.xm_clst_id}} CPU Usage:{{humanize $value}}%|{threshold}%','2019-07-18 05:54:39.000','2020-04-22 04:59:14.253'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_bytes_received','Number of Bytes Received','The number of bytes received from all clients','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(mysql_global_status_bytes_received[1m]))','Network','MySQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Number of Bytes Received:{{humanize $value}}KiB|{threshold}KiB.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_memory_request','node_memory_request (Gib)','Total container memory request in GiB for the given cluster, node','sum by (xm_clst_id, xm_node_id) (imxc_kubernetes_container_resource_request_memory{{filter}}) / 1024 / 1024 / 1024','Memory','Node',NULL,false,false,'None','2019-08-23 08:45:47.000','2019-08-23 08:45:47.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_threadpools_tasks','Number of tasks','Number of tasks','sum by (task_type, xm_clst_id, xm_namespace, xm_node_id, instance) ( +label_replace(cassandra_threadpools_activetasks {{filter}}, "task_type", "active", "", "") or +label_replace(cassandra_threadpools_pendingtasks {{filter}}, "task_type", "pending", "", "") or +label_replace(cassandra_client_connectednativeclients {{filter}}, "task_type", "connected", "", "") )','Task','Cassandra','task_type',true,false,'Number of tasks','2019-10-24 01:34:25.000','2020-02-13 01:14:23.895'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_latency_seconds','Local latency seconds','Local latency seconds','sum by(type, xm_clst_id, xm_namespace, xm_node_id, instance) +(label_replace(cassandra_keyspace_readlatency_seconds{quantile=''0.99'', {filter}}, "type", "read", "", "") or +label_replace(cassandra_keyspace_writelatency_seconds{quantile=''0.99'', {filter}}, "type", "write", "", "")) * 1000','Disk','Cassandra',NULL,true,true,'Local latency seconds','2019-10-24 02:14:45.000','2020-02-13 01:23:46.608'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_wait_time_concurrency','Wait-Time - Concurrency','Generic counter metric from v$waitclassmetric view in Oracle','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(oracledb_wait_time_concurrency[1m]))','Wait','OracleDB',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Oracle Wait-Time - Concurrency:{{humanize $value}}|{threshold}.','2020-01-28 13:03:00.000','2020-01-28 13:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_threadpools_pendingtasks','Number of queued tasks queued up','Number of queued tasks queued up','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, path) (cassandra_threadpools_pendingtasks)','Task','Cassandra','path',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Active Task:{{humanize $value}}|{threshold}.','2019-10-01 16:45:21.000','2019-10-01 16:45:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_pod_ready_count','Cluster Pod Ready Count','Cluster Pod Ready Count','sum by (xm_clst_id) (imxc_kubernetes_controller_ready{{filter}})','Pod','Cluster',NULL,true,true,'CLST:{{$labels.xm_clst_id}} Ready Pod Counts:{{humanize $value}}|{threshold}.','2019-08-23 17:36:00.000','2019-11-28 08:25:07.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_pod_allocatable_count','Node Pod Allocatable Count','Node Pod Allocatable Count','imxc_kubernetes_node_resource_allocatable_pods{{filter}}','Pod','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Allocatable Pod Count:{{humanize $value}}|{threshold}.','2019-10-11 00:29:17.000','2019-11-26 01:29:10.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_sparselog_type_conatiner_count','Container Type Sparselog Count','Container-type sparse log count by xm_clst_id, xm_namespace, xm_node_id, xm_pod_id over last 1 min','sum by (xm_entity_type, xm_clst_id, xm_namespace, xm_node_id, xm_pod_id) (round(increase(imxc_sparselog_count_total{xm_entity_type="Pod",{filter}}[1m])))','SparseLog','Pod',NULL,true,true,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} Sparselog Count:{{humanize $value}}|{threshold}.','2020-03-26 15:05:51.828','2020-03-26 15:05:51.828'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_threads_connected','Number of Open Connections','The number of currently open connections','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (mysql_global_status_threads_connected)','Thread','MySQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Number of Open Connections Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('aws_ec2_ebsread_bytes','Bytes read from all EBS volumes (KiB)','Bytes read from all EBS volumes attached to the instance in a specified period of time.','sum by (xm_clst_id, instance_id, instance) (aws_ec2_ebsread_bytes_average{{filter}}) / 1024','EBS','AWS/EC2',NULL,true,true,'CLST:{{$labels.xm_clst_id}} Instance:{{$labels.instance_id}} EBS Read Size:{{humanize $value}}KiB|{threshold}KiB.','2019-08-23 17:38:23.000','2019-08-23 17:38:23.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('namespace_cpu_usage','Namespace CPU Usage (%)','CPU Usage by namespace','sum by (xm_clst_id,xm_entity_type,xm_namespace) (rate(container_cpu_usage_seconds_total{xm_entity_type=''Container'', {filter}}[1m])) * 100','CPU','Namespace',NULL,true,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} CPU Utillization:{{humanize $value}}%|{threshold}%','2019-08-23 01:06:05.000','2019-08-23 01:06:05.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('namespace_memory_usage','Namespace memory usage (Gib)','Memory usage by namespace in bytes / 1073741824','sum by (xm_clst_id,xm_entity_type,xm_namespace) (container_memory_usage_bytes{xm_entity_type=''Container'', {filter}}) / 1073741824','Memory','Namespace',NULL,true,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Memory Utillization:{{humanize $value}}GiB|{threshold}GiB.','2019-08-23 01:21:31.000','2019-08-23 01:21:31.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_memory_free','Node Memory Free (GiB)','Memory information field MemFree_bytes / 1073741824','node_memory_MemFree_bytes{xm_entity_type=''Node'', {filter}} / 1073741824','Memory','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Free Memory Size:{{humanize $value}}GiB|{threshold}GiB.','2019-06-04 16:03:00.000','2019-06-04 16:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_swap_memory_cached','Node Swap Memory Cached (GiB)','Memory information field SwapCached_bytes / 1073741824','node_memory_SwapCached_bytes{xm_entity_type=''Node'', {filter}} / 1073741824','Memory','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Cached Swap Memory Size:{{humanize $value}}GiB|{threshold}GiB.','2019-06-04 16:03:00.000','2019-06-04 16:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_allocator_active_size','Active Memory (MiB)','The total amount of active memory that the Redis allocator has','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (redis_allocator_active_bytes) / 1048576','Memory','Redis',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis Active Memory:{{humanize $value}}MiB|{threshold}MiB.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_up','MySQL Up Count','Whether the last scrape of metrics from MySQL was able to connect to the server','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (mysql_up)','Instance','MySQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Up counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_up','Oracle DB Up Count','Whether the Oracle database server is up','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (oracledb_up)','Instance','OracleDB',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Oracle DB Up Count:{{humanize $value}}|{threshold}.','2020-01-28 13:03:00.000','2020-01-28 13:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_process_count','Process Count','Gauge metric with count of processes','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (oracledb_process_count)','Process','OracleDB',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Oracle Process Count Count:{{humanize $value}}|{threshold}.','2020-01-28 13:03:00.000','2020-01-28 13:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_locks_count','Number of Locks','Number of locks','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, datname, mode) (pg_locks_count)','Lock','PostgreSQL','datname,mode',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} PostgreSQL Lock Counts:{{humanize $value}}|{threshold}.','2019-08-27 15:49:21.000','2019-08-27 15:49:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_stat_database_tup_updated','Number of Rows Updated','Number of rows updated by queries in this database','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, datname) (rate(pg_stat_database_tup_updated[1m]))','Row','PostgreSQL','datname',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} PostgreSQL Updated Row Counts:{{humanize $value}}|{threshold}.','2019-08-27 15:49:21.000','2019-08-27 15:49:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_stat_database_tup_deleted','Number of Rows Deleted','Number of rows deleted by queries in this database','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, datname) (rate(pg_stat_database_tup_deleted[1m]))','Row','PostgreSQL','datname',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} PostgreSQL Deleted Row counts:{{humanize $value}}|{threshold}.','2019-08-27 15:49:21.000','2019-08-27 15:49:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_stat_database_temp_files','Number of Temporary Files Created','Number of temporary files created by queries in this database','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, datname) (rate(pg_stat_database_temp_files[1m]))','TemporaryFile','PostgreSQL','datname',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} PostgreSQL Temporary File counts:{{humanize $value}}|{threshold}.','2019-08-27 15:49:21.000','2019-08-27 15:49:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_load15','Node CPU Load 15m Average','Node CPU 15m load average','node_load15{xm_entity_type=''Node'',{filter}}','CPU','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} CPU 15m Load Avg:{{humanize $value}}|{threshold}.','2019-05-15 08:27:39.000','2019-05-15 08:27:39.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_cpu_throttling','Node CPU Throttling','Number of times this cpu package has been throttled.','increase(node_cpu_package_throttles_total{xm_entity_type=''Node'',{filter}}[1m])','CPU','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} CPU Throttling Counts:{{humanize $value}}|{threshold}.','2019-05-15 08:29:24.000','2019-05-15 08:29:24.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_cpu_usage','Pod CPU Usage (%)','Pod CPU Usage','sum by (xm_clst_id,xm_pod_id,xm_entity_type,xm_namespace) (clamp_min((rate(container_cpu_usage_seconds_total{xm_entity_type=''Container'',{filter}}[1m] offset 10s)),0)) * 100','CPU','Pod',NULL,true,true,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} CPU Usage:{{humanize $value}}%|{threshold}%.','2019-05-15 01:02:23.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_cpu_system','Pod CPU System (%)','Pod CPU Usage (System)','sum by (xm_clst_id,xm_pod_id,xm_entity_type,xm_namespace) (rate(container_cpu_system_seconds_total{xm_entity_type=''Container'',{filter}}[1m])) * 100','CPU','Pod',NULL,true,true,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} CPU System:{{humanize $value}}%|{threshold}%.','2019-06-05 09:07:00.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_fs_usage_bytes','Pod Filesystem Used Bytes (GiB)','Number of bytes that are consumed by the container on this filesystem / 1073741824','sum by (xm_clst_id,xm_pod_id,xm_entity_type,xm_namespace) (container_fs_usage_bytes{xm_entity_type=''Container'',{filter}}) / 1073741824','Filesystem','Pod',NULL,true,true,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} Filesystem Used Bytes:{{humanize $value}}GiB|{threshold}GiB.','2019-06-05 10:27:42.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_fs_limit_bytes','Pod Filesystem Limit Bytes (GiB)','Number of bytes that can be consumed by the container on this filesystem / 1073741824','sum by (xm_clst_id,xm_pod_id,xm_entity_type,xm_namespace) (container_fs_limit_bytes{xm_entity_type=''Container'',{filter}}) / 1073741824','Filesystem','Pod',NULL,true,true,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} Filesystem Limit Bytes:{{humanize $value}}GiB|{threshold}GiB.','2019-06-05 10:27:42.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_load5','Node CPU Load 5m Average','Node CPU 5m load average','node_load5{xm_entity_type=''Node'',{filter}}','CPU','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} CPU 5m Load Avg:{{humanize $value}}|{threshold}.','2019-05-15 08:26:07.000','2019-05-15 08:26:07.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_client_connectednativeclients','Number of Client Connections','Number of clients connected to this nodes native protocol server','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (cassandra_client_connectednativeclients)','Connection','Cassandra',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Connection:{{humanize $value}}|{threshold}.','2019-10-01 16:45:21.000','2019-11-07 11:59:04.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_threadpools_activetasks','Number of tasks being actively worked on','Number of tasks being actively worked on','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, path) (cassandra_threadpools_activetasks)','Task','Cassandra','path',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Connection:{{humanize $value}}|{threshold}.','2019-10-01 16:45:21.000','2019-10-01 16:45:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cloudwatch_requests_count','API requests made to CloudWatch','API requests made to CloudWatch','sum by (xm_clst_id, namespace, action) (rate(cloudwatch_requests_total{{filter}}[10m]))','Request','AWS/Usage',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.namespace}} CloudWatch API Call Volume:{{humanize $value}}|{threshold}.','2019-08-27 15:49:21.000','2019-08-27 15:49:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('aws_ec2_network_out','Bytes Sent Out on All Network Interfaces (KiB)','The number of bytes sent out on all network interfaces by the instance.','sum by (xm_clst_id, instance_id, instance) (aws_ec2_network_out_average{{filter}}) / 1024','Network','AWS/EC2',NULL,true,true,'CLST:{{$labels.xm_clst_id}} Instance:{{$labels.instance_id}} Network Transmit Usage:{{humanize $value}}KiB|{threshold}KiB.','2019-08-23 17:38:23.000','2019-08-23 17:38:23.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('aws_ec2_network_in','Bytes Received on All Network Interfaces (KiB)','The number of bytes received on all network interfaces by the instance.','sum by (xm_clst_id, instance_id, instance) (aws_ec2_network_in_average{{filter}}) / 1024','Network','AWS/EC2',NULL,true,true,'CLST:{{$labels.xm_clst_id}} Instance:{{$labels.instance_id}} Network Receive Usage:{{humanize $value}}KiB|{threshold}KiB.','2019-08-23 17:38:23.000','2019-08-23 17:38:23.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('namespace_pod_count','Namespace Pod Count','Pod count by namesapce','count (sum (container_last_seen{{filter}}) by (xm_clst_id, xm_namespace, xm_pod_id)) by (xm_clst_id, xm_namespace)','Pod','Namespace',NULL,true,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Pod Counts:{{humanize $value}}|{threshold}.','2019-08-22 16:53:32.000','2019-08-23 01:06:12.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_filesystem_usage','Node Filesystem Usage (%)','NODE Filesystem Usage','(1- (sum by (xm_clst_id, xm_node_id, xm_entity_type) (node_filesystem_avail_bytes{xm_entity_type=''Node'', device!=''rootfs'', {filter} }) / sum by (xm_clst_id, xm_node_id, xm_entity_type) (node_filesystem_size_bytes{xm_entity_type=''Node'', device!=''rootfs'', {filter} }))) * 100','Filesystem','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Filesystem Usage:{{humanize $value}}%|{threshold}%.','2019-05-15 01:02:23.000','2019-05-15 01:02:23.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_memory_available','Node Memory Available (GiB)','Memory information field MemAvailable_bytes / 1073741824','node_memory_MemAvailable_bytes{xm_entity_type=''Node'', {filter}} / 1073741824','Memory','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Avail Memory Size:{{humanize $value}}GiB|{threshold}GiB.','2019-06-04 16:03:00.000','2019-06-04 16:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_memory_total','Node Memory Total (GiB)','Memory information field MemTotal_bytes / 1073741824','node_memory_MemTotal_bytes{xm_entity_type=''Node'', {filter}} / 1073741824','Memory','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Total Memory Size:{{humanize $value}}GiB|{threshold}GiB.','2019-06-04 16:03:00.000','2019-06-04 16:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_network_receive','Node Network Receive (KiB)','Network device statistic receive_bytes / 1024','sum by (xm_clst_id, xm_node_id, xm_entity_type) (rate(node_network_receive_bytes_total{xm_entity_type=''Node'',{filter}}[1m]) ) / 1024','Network','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Network Receive Usage:{{humanize $value}}KiB|{threshold}KiB.','2019-05-20 09:07:46.000','2019-05-31 17:45:22.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_network_transmit','Node Network Transmit (KiB)','Network device statistic transmit_bytes / 1024','sum by (xm_clst_id, xm_node_id, xm_entity_type) (rate(node_network_transmit_bytes_total{xm_entity_type=''Node'',{filter}}[1m]) ) / 1024','Network','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Network Transmit Usage:{{humanize $value}}KiB|{threshold}KiB.','2019-05-20 09:09:05.000','2019-05-31 17:46:06.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_pod_allocated_count','Cluster Pod Allocated Count','Cluster Pod Allocated Count','sum by (xm_clst_id) (imxc_kubernetes_node_resource_allocatable_pods{{filter}})','Pod','Cluster',NULL,true,true,'CLST:{{$labels.xm_clst_id}} Allocated Pod Counts:{{humanize $value}}|{threshold}.','2019-08-23 17:36:00.000','2019-11-28 08:25:07.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_pod_desired_count','Cluster Pod Desired Count','Cluster pod desired count by controller','sum by (xm_clst_id) (imxc_kubernetes_controller_replicas{{filter}})','Pod','Cluster',NULL,true,true,'CLST:{{$labels.xm_clst_id}} Desired Pod Counts:{{humanize $value}}|{threshold}.','2019-08-23 02:26:55.000','2019-11-28 08:25:07.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_commands_total','Number of Commands Executed','The number of times each XXX command has been executed','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, command) (rate(mysql_global_status_commands_total[1m]) > 0)','Request','MySQL','command',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Number of Commands Executed Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-12 08:20:06.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_threads_running','Number of Threads Running','The number of threads that are not sleeping','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (mysql_global_status_threads_running)','Thread','MySQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Number of Threads Running Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_count_by_dbname_state','Count by dbname and state in pg','count by dbname and state in pg','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, state) (pg_stat_activity_count)','Connection','PostgreSQL','state',true,false,'count by dbname and state in pg','2020-01-30 06:10:54.000','2020-01-31 11:33:41.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('namespace_alerts_received_count','Namespace alerts received count','Alert count by namespace','sum by (xm_clst_id, xm_namespace, level) (floor(increase(imxc_alerts_received_count_total{status=''firing'', {filter}}[10m])))','Alert','Namespace','level',false,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Alert Count:{{humanize $value}}|{threshold}.','2019-08-23 04:43:29.000','2019-08-23 04:43:29.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_disk_reads_count_device','Node Disk Reads Count per Device (IOPS)','Node Disk Reads Count per Device','sum by (xm_clst_id, xm_node_id, xm_entity_type, device, mountpoint) (rate(node_disk_reads_completed_total{xm_entity_type=''Node'', {filter}}[1m]) )','Disk','Node','device',true,false,'NODE:{{$labels.xm_node_id}} FS:{{$labels.mountpoint}} Disk Reads Count:{{humanize $value}}IOPS|{threshold}IOPS.','2019-08-23 11:26:07.000','2019-08-23 11:26:07.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_disk_read_latency','Node Disk Read Latency (ms)','Node Disk Read Latency','sum by (xm_clst_id,xm_node_id, xm_entity_type) (rate(node_disk_read_time_seconds_total{xm_entity_type=''Node'',{filter}}[1m])) * 1000','Disk','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Disk Read Latency:{{humanize $value}}ms|{threshold}ms.','2019-05-20 10:59:07.000','2019-05-31 17:46:54.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_disk_write_latency_device','Node Disk Write Latency per Device (ms)','Node Disk Write Latency per Device','sum by (xm_clst_id, xm_node_id, xm_entity_type, device, mountpoint) (rate(node_disk_write_time_seconds_total{xm_entity_type=''Node'',{filter}}[1m])) * 1000','Disk','Node','device',true,false,'NODE:{{$labels.xm_node_id}} FS:{{$labels.mountpoint}} Disk Write Latency:{{humanize $value}}ms|{threshold}ms.','2019-08-23 11:26:07.000','2019-08-23 11:26:07.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_disk_write_bytes','Node Disk Write Bytes (KiB)','The total number of bytes written successfully / 1024','sum by (xm_clst_id, xm_node_id, xm_entity_type) (rate(node_disk_written_bytes_total{xm_entity_type=''Node'', {filter}}[1m]) ) / 1024','Disk','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Disk Write Size:{{humanize $value}}KiB|{threshold}KiB.','2019-06-04 18:11:00.000','2019-06-04 18:11:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_filesystem_avail_size_device','Node Filesystem Available Size per Device (GiB)','Filesystem space available to non-root users in bytes / 1073741824','sum by (xm_clst_id, xm_node_id, xm_entity_type, device, fs_type, mountpoint) (node_filesystem_avail_bytes{xm_entity_type=''Node'', device!=''rootfs'', {filter} }) / 1073741824','Filesystem','Node','device,fs_type',true,false,'NODE:{{$labels.xm_node_id}} FS:{{$labels.mountpoint}} Avail Size:{{humanize $value}}GiB|{threshold}GiB.','2019-08-23 11:26:07.000','2019-08-23 11:26:07.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_filesystem_free_size_device','Node Filesystem Free Size per Device (GiB)','Filesystem free space in bytes / 1073741824','sum by (xm_clst_id, xm_node_id, xm_entity_type, device, fs_type, mountpoint) (node_filesystem_free_bytes{xm_entity_type=''Node'', device!=''rootfs'', {filter} }) / 1073741824','Filesystem','Node','device,fs_type',true,false,'NODE:{{$labels.xm_node_id}} FS:{{$labels.mountpoint}} Free Size:{{humanize $value}}GiB|{threshold}GiB.','2019-08-23 11:26:07.000','2019-08-23 11:26:07.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_filesystem_total_size_device','Node Filesystem Total Size per Device (GiB)','Filesystem size in bytes / 1073741824','sum by (xm_clst_id, xm_node_id, xm_entity_type, device, fs_type, mountpoint) (node_filesystem_size_bytes{xm_entity_type=''Node'', device!=''rootfs'', {filter} }) / 1073741824','Filesystem','Node','device,fs_type',true,false,'NODE:{{$labels.xm_node_id}} FS:{{$labels.mountpoint}} Total Size:{{humanize $value}}GiB|{threshold}GiB.','2019-08-23 11:26:07.000','2019-08-23 11:26:07.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_swap_memory_free','Node Swap Memory Free (GiB)','Memory information field SwapFree_bytes / 1073741824','node_memory_SwapFree_bytes{xm_entity_type=''Node'', {filter}} / 1073741824','Memory','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Free Swap Memory Size:{{humanize $value}}GiB|{threshold}GiB.','2019-06-04 16:03:00.000','2019-06-04 16:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_swap_memory_total','Node Swap Memory Total (GiB)','Memory information field SwapTotal_bytes / 1073741824','node_memory_SwapTotal_bytes{xm_entity_type=''Node'', {filter}} / 1073741824','Memory','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Total Swap Memory Size:{{humanize $value}}GiB|{threshold}GiB.','2019-06-04 16:03:00.000','2019-06-04 16:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_up','PostgreSQL Up Count','Whether the last scrape of metrics from PostgreSQL was able to connect to the server','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (pg_up)','Instance','PostgreSQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} PostgreSQL Instance Count:{{humanize $value}}|{threshold}.','2019-08-27 15:49:21.000','2019-08-27 15:49:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_innodb_buffer_pool_write_requests','Number of Writes to Buffer Pool','The number of writes done to the InnoDB buffer pool','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(mysql_global_status_innodb_buffer_pool_write_requests[1m]))','Block','MySQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Number of Writes to Buffer Pool Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_innodb_buffer_pool_read_requests','Number of Logical Read Requests','The number of logical read requests','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(mysql_global_status_innodb_buffer_pool_read_requests[1m]))','Block','MySQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Number of Logical Read Requests Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_innodb_data_read','Amount of Data Read','The amount of data read since the server was started (in bytes)','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(mysql_global_status_innodb_data_read[1m]))','Disk','MySQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Amount of Data Read Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_innodb_os_log_written','Number of Bytes Written to Redo Log','The number of bytes written to the InnoDB redo log files','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(mysql_global_status_innodb_os_log_written[1m]))','Disk','MySQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Number of Bytes Written to Redo Log Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_innodb_data_written','Amount of Data Written','The amount of data written so far, in bytes','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(mysql_global_status_innodb_data_written[1m]))','Disk','MySQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Amount of Data Written Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_memory_sum_by_pod','Container Memory Request/Limits vs Used by Pod','container_memory_sum_by_pod','sum by(xm_clst_id, xm_namespace, xm_node_id, xm_pod_id, xm_cont_name, data_type) ( +label_replace(imxc_kubernetes_container_resource_limit_memory{{filter}}, "data_type", "limit", "" , "") or +label_replace(imxc_kubernetes_container_resource_request_memory{{filter}}, "data_type", "request", "" , "") or +label_replace(container_memory_usage_bytes{xm_entity_type=''Container'',{filter}}, "data_type", "used", "" , ""))','Memory','Pod',NULL,true,false,'Container memory sum by pod (limit, request, used)','2020-07-22 21:44:33.000','2020-07-22 21:44:33.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_cache_hit_ratio','Buffer Cache Hit Ratio','Buffer Cache Hit Ratio','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) ( +(1 - increase(mysql_global_status_innodb_buffer_pool_reads [1h]) / increase(mysql_global_status_innodb_buffer_pool_read_requests [1h])) * 100)','Block','MySQL',NULL,true,false,'.','2019-12-05 07:47:50.000','2019-12-13 01:17:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_cpu_sum_by_cluster','Container CPU Request/Limits vs Used by Cluster','Container cpu sum by cluster (capacity, limit, request, usage)','sum by(xm_clst_id, data_type) ( +label_replace(imxc_kubernetes_node_resource_capacity_cpu{{filter}} *0.001, "data_type", "capacity" , "", "") or +label_replace(sum by (xm_clst_id) (imxc_kubernetes_container_resource_limit_cpu{{filter}})*0.001, "data_type", "limit", "" , "") or +label_replace(sum by (xm_clst_id) (imxc_kubernetes_container_resource_request_cpu{{filter}})*0.001, "data_type", "request", "" , "") or +label_replace(sum by(xm_clst_id)(rate(container_cpu_usage_seconds_total{{filter}}[1m])), "data_type", "used", "" , ""))','CPU','Cluster',NULL,true,false,'Container cpu sum by cluster','2020-07-22 17:49:53.000','2020-07-22 17:49:53.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_filesystem_total_size','Node Filesystem Total Size (GiB)','Filesystem size in bytes / 1073741824','sum by (xm_clst_id, xm_node_id, xm_entity_type) (node_filesystem_size_bytes{xm_entity_type=''Node'', device!=''rootfs'', {filter} }) / 1073741824','Filesystem','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Filesystem Total Size:{{humanize $value}}GiB|{threshold}GiB.','2019-06-04 19:47:00.000','2019-06-04 19:47:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_filesystem_free_size','Node Filesystem Free Size (GiB)','Filesystem free space in bytes / 1073741824','sum by (xm_clst_id, xm_node_id, xm_entity_type) (node_filesystem_free_bytes{xm_entity_type=''Node'', device!=''rootfs'', {filter} }) / 1073741824','Filesystem','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Filesystem Free Size:{{humanize $value}}GiB|{threshold}GiB.','2019-06-04 19:47:00.000','2019-06-04 19:47:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_cpu_sum_by_pod','Container CPU Request/Limits vs Used by Pod','Container cpu sum by pod (capacity, limit, request, usage)','sum by(xm_clst_id, xm_namespace, xm_node_id, xm_pod_id, xm_cont_name, data_type)( +label_replace (rate(container_cpu_usage_seconds_total{xm_entity_type=''Container'',{filter}}[1m]), "data_type", "used", "", "") or +label_replace (imxc_kubernetes_container_resource_limit_cpu{{filter}}*0.001, "data_type", "limit", "", "") or +label_replace (imxc_kubernetes_container_resource_request_cpu{{filter}}*0.001, "data_type", "request", "", "") +)','CPU','Pod',NULL,true,false,'Container cpu sum by Pod','2020-07-22 21:37:45.000','2020-07-22 21:37:45.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_count_by_lockmode','Count_by_lockmode','Count by lockmode','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, mode) (pg_locks_count)','Lock','PostgreSQL','mode',true,false,'Count by lockmode','2020-01-30 07:06:13.000','2020-01-30 07:06:47.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_innodb_row_lock_current_waits','Number of Row Locks ','The number of row locks currently being waited for by operations on InnoDB tables','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (mysql_global_status_innodb_row_lock_current_waits)','Lock','MySQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Number of Row Locks Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_memory_capacity','cluster_memory_capacity (Gib)','imxc_kubernetes_node_resource_capacity_memory','sum by (xm_clst_id) (imxc_kubernetes_node_resource_capacity_memory{{filter}})','Memory','Cluster',NULL,false,false,'CLST:{{$labels.xm_clst_id}} Memory Capacity:{{humanize $value}}GiB|{threshold}GiB.','2019-08-23 08:46:58.000','2020-05-27 09:05:56.427'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_fs_free','Host system Filesystem free','Host File system free','sum by (instance) (node_filesystem_free_bytes{{filter}})','Filesystem','Host',NULL,true,false,'Host:{{$labels.instance}} Filesystem Free Size:{{humanize $value}}KiB|{threshold}KiB.','2020-03-23 04:08:29.025','2020-03-23 04:08:29.025'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_fs_total','Host system Filesystem total','Host File system total','sum by (instance) (node_filesystem_size_bytes{{filter}})','Filesystem','Host',NULL,true,false,'Host:{{$labels.instance}} Filesystem Total Size:{{humanize $value}}KiB|{threshold}KiB.','2020-03-23 04:08:27.634','2020-03-23 04:08:27.634'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_swap_used','Host Swap Memory Used','Host Swap Used','node_memory_SwapTotal_bytes{{filter}} - node_memory_SwapFree_bytes{{filter}}','Memory','Host',NULL,true,false,'Host:{{$labels.instance}} Used Swap Memory Size:{{humanize $value}}KiB|{threshold}KiB.','2020-03-23 04:08:26.169','2020-03-23 04:08:26.169'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_disk_read_bytes_device','Node Disk Read Bytes per Device (KiB)','The total number of bytes read successfully / 1024','sum by (xm_clst_id, xm_node_id, xm_entity_type, device, mountpoint) (rate(node_disk_read_bytes_total{xm_entity_type=''Node'', {filter}}[1m]) ) / 1024','Disk','Node','device',true,false,'NODE:{{$labels.xm_node_id}} FS:{{$labels.mountpoint}} Disk Read Size:{{humanize $value}}KiB|{threshold}KiB.','2019-08-23 11:26:07.000','2019-08-23 11:26:07.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_disk_read_bytes','Node Disk Read Bytes (KiB)','The total number of bytes read successfully / 1024','sum by (xm_clst_id, xm_node_id, xm_entity_type) (rate(node_disk_read_bytes_total{xm_entity_type=''Node'', {filter}}[1m]) ) / 1024','Disk','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Disk Read Size:{{humanize $value}}KiB|{threshold}KiB.','2019-06-04 18:11:00.000','2019-06-04 18:11:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_stat_database_xact_rollback','Number of Transactions Rolled Back','Number of transactions in this database that have been rolled back','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, datname) (rate(pg_stat_database_xact_rollback[1m]))','Transaction','PostgreSQL','datname',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} PostgreSQL Rollback Counts:{{humanize $value}}|{threshold}.','2019-08-27 15:49:21.000','2019-08-27 15:49:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_stat_database_xact_commit','Number of Transactions Committed','Number of transactions in this database that have been committed','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, datname) (rate(pg_stat_database_xact_commit[1m]))','Transaction','PostgreSQL','datname',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} PostgreSQL Commit Counts:{{humanize $value}}|{threshold}.','2019-08-27 15:49:21.000','2019-08-27 15:49:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_innodb_row_ops_total','Number of Rows Operated','The number of rows operated in InnoDB tables','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, operation) (rate(mysql_global_status_innodb_row_ops_total[1m]))','Row','MySQL','operation',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Number of Rows Operated Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_table_locks_immediate','Number of Table Lock Immediate','The number of times that a request for a table lock could be granted immediately','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(mysql_global_status_table_locks_immediate[1m]))','Lock','MySQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Number of Table Lock Immediate Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_keyspace_range_count','Local range scan count (count/s)','Local range scan count for this keyspace','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, keyspace) (rate(cassandra_keyspace_rangelatency_seconds_count[1m]))','Disk','Cassandra','keyspace',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Range Scan Count:{{humanize $value}}|{threshold}.','2019-10-02 10:17:01.000','2019-10-02 10:17:01.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_table_locks_waited','Number of Table Lock Waited','The number of times that a request for a table lock could not be granted immediately and a wait was needed','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(mysql_global_status_table_locks_waited[1m]))','Lock','MySQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Number of Table Lock Waited Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_stat_database_blk_read_time','Time Spent Reading Data File Blocks (ms)','Time spent reading data file blocks by backends in this database, in milliseconds','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, datname) (rate(pg_stat_database_blk_read_time[1m]))','Block','PostgreSQL','datname',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} PostgreSQL Block Read Time:{{humanize $value}}|{threshold}.','2019-08-27 15:49:21.000','2019-08-27 15:49:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_stat_database_blk_write_time','Time Spent Writing Data File Blocks (ms)','Time spent writing data file blocks by backends in this database, in milliseconds','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, datname) (rate(pg_stat_database_blk_write_time[1m]))','Block','PostgreSQL','datname',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} PostgreSQL Block Write Time:{{humanize $value}}|{threshold}.','2019-08-27 15:49:21.000','2019-08-27 15:49:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_stat_database_blks_read','Number of Disk Blocks Read','Number of disk blocks read in this database','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, datname) (rate(pg_stat_database_blks_read[1m]))','Block','PostgreSQL','datname',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} PostgreSQL Block Read Counts:{{humanize $value}}|{threshold}.','2019-08-27 15:49:21.000','2019-08-27 15:49:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_stat_database_blks_hit','Number of Block Cache Hit','Number of times disk blocks were found already in the buffer cache','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, datname) (rate(pg_stat_database_blks_hit[1m]))','Block','PostgreSQL','datname',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} PostgreSQL Block Hit Counts:{{humanize $value}}|{threshold}.','2019-08-27 15:49:21.000','2019-08-27 15:49:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_stat_activity_count','Number of Client Connections','number of connections in this state','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, datname, state) (pg_stat_activity_count{{filter}})','Connection','PostgreSQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} PostgreSQL Connection Counts:{{humanize $value}}|{threshold}.','2019-08-27 15:49:21.000','2019-11-18 04:16:33.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_stat_database_tup_fetched','Number of Rows Fetched','Number of rows fetched by queries in this database','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, datname) (rate(pg_stat_database_tup_fetched[1m]))','Row','PostgreSQL','datname',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} PostgreSQL Fetched Row Counts:{{humanize $value}}|{threshold}.','2019-08-27 15:49:21.000','2019-08-27 15:49:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_stat_database_tup_inserted','Number of Rows Inserted','Number of rows inserted by queries in this database','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, datname) (rate(pg_stat_database_tup_inserted[1m]))','Row','PostgreSQL','datname',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} PostgreSQL Inserted Row Counts:{{humanize $value}}|{threshold}.','2019-08-27 15:49:21.000','2019-08-27 15:49:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_keyspace_range_latency','Local range scan latency (ms)','Local range scan latency seconds for this keyspace','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, keyspace) (cassandra_keyspace_rangelatency_seconds{quantile=''0.99''}) * 1000','Disk','Cassandra','keyspace',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Range Scan Latency:{{humanize $value}}ms|{threshold}ms.','2019-10-02 10:17:01.000','2019-10-02 10:17:01.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_commitlog_size','Size used by commit log segments (KiB/s)','Current size, in bytes, used by all the commit log segments / 1024','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(cassandra_commitlog_totalcommitlogsize[1m]){{filter}}) / 1024','Log','Cassandra',NULL,true,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Commit Log Volume:{{humanize $value}}KiB/s|{threshold}KiB/s.','2019-10-02 10:17:01.000','2019-11-05 08:07:03.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_commitlog_messages','Number of commit log messages written (count/s)','Total number of commit log messages written','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(cassandra_commitlog_completedtasks[1m]))','Log','Cassandra',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Commit Log Message per second:{{humanize $value}}|{threshold}.','2019-10-02 10:17:01.000','2019-10-02 10:17:01.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_clientrequest_count','Number of client requests (count/s)','Number of client requests by request type','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, clientrequest) (rate(cassandra_clientrequest_latency_seconds_count{{filter}}[1m]))','Request','Cassandra','clientrequest',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Client Request per second:{{humanize $value}}|{threshold}.','2019-10-02 10:17:01.000','2019-11-05 11:04:25.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_memory_active','Node Memory Active (GiB)','Memory information field Active_bytes in GiB','node_memory_Active_bytes{xm_entity_type=''Node'', {filter}} / 1024 / 1024 / 1024','Memory','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Active Memory:{{humanize $value}}GiB|{threshold}GiB.','2020-06-04 11:11:11.000','2020-06-04 11:11:11.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_stat_database_tup_returned','Number of Rows Returned','Number of rows returned by queries in this database','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, datname) (rate(pg_stat_database_tup_returned[1m]))','Row','PostgreSQL','datname',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} PostgreSQL Returned Row Counts:{{humanize $value}}|{threshold}.','2019-08-27 15:49:21.000','2019-08-27 15:49:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_keyspace_write_count','Local write count (count/s)','Local write count for this keyspace','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, keyspace) (rate(cassandra_keyspace_writelatency_seconds_count[1m]))','Disk','Cassandra','keyspace',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Write Count:{{humanize $value}}|{threshold}.','2019-10-02 10:17:01.000','2019-10-02 10:17:01.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_memory_sum_by_cluster','Container Memory Request/Limits vs Used by Cluster','Container memory sum by cluster','sum by (xm_clst_id, data_type)( +label_replace(imxc_kubernetes_node_resource_capacity_memory{{filter}}, "data_type", "capacity", "" , "") or +label_replace(imxc_kubernetes_container_resource_limit_memory{{filter}}, "data_type", "limit", "", "") or +label_replace(imxc_kubernetes_container_resource_request_memory{{filter}}, "data_type", "request", "", "") or +label_replace(container_memory_usage_bytes{xm_entity_type=''Container'',{filter}}, "data_type", "used", "" , ""))','Memory','Cluster',NULL,true,false,'Container memory sum by cluster','2020-07-22 21:23:15.000','2020-07-22 21:23:15.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_memory_capacity','node_memory_capacity (Gib)','node memory capacity in GiB','imxc_kubernetes_node_resource_capacity_memory{{filter}} / 1024 / 1024 / 1024','Memory','Node',NULL,false,false,'None','2019-08-23 08:46:58.000','2019-08-23 08:46:58.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_cpu_request_cores','cluster_cpu_request_cores','cluster_cpu_request_cores','sum by (xm_clst_id) (imxc_kubernetes_container_resource_request_cpu{{filter}})','CPU','Cluster',NULL,false,false,'None','2019-08-23 08:40:36.000','2019-08-23 08:40:36.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_cpu_request_cores','node_cpu_request_cores','node_cpu_request_cores','sum by (xm_clst_id, xm_node_id) (imxc_kubernetes_container_resource_request_cpu{{filter}})','CPU','Node',NULL,false,false,'None','2019-08-23 08:40:36.000','2019-08-23 08:40:36.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_cpu_limit_cores','cluster_cpu_limit_cores','cluster_cpu_limit_cores','sum by (xm_clst_id) (imxc_kubernetes_container_resource_limit_cpu{{filter}})','CPU','Cluster',NULL,false,false,'None','2019-08-23 08:40:36.000','2019-08-23 08:40:36.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_cpu_limit_cores','node_cpu_limit_cores','node_cpu_limit_cores','sum by (xm_clst_id, xm_node_id) (imxc_kubernetes_container_resource_limit_cpu{{filter}})','CPU','Node',NULL,false,false,'None','2019-08-23 08:40:36.000','2019-08-23 08:40:36.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_clientrequest_unavailables_count','Number of unavailable exceptions encountered','Number of unavailable exceptions encountered','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, clientrequest) (rate(cassandra_clientrequest_unavailables_count[1m]))','Request','Cassandra','clientrequest',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Unavailable Request:{{humanize $value}}|{threshold}.','2019-10-02 10:17:01.000','2019-10-02 10:17:01.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_up','Cassandra Up Count','Whether the last scrape of metrics from Cassandra was able to connect to the server','count by (xm_clst_id, xm_namespace, xm_node_id, instance) (cassandra_bufferpool_size{{filter}})','Instance','Cassandra',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Instances:{{humanize $value}}|{threshold}.','2019-10-02 10:17:01.000','2019-11-05 17:01:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mongodb_up','MongoDB Up Count','The number of seconds that the current MongoDB process has been active','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(mongodb_instance_uptime_seconds[1m]))','Instance','MongoDB',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MongoDB Up Count Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mongodb_global_lock_current_queue','Number of Operations Waiting','The number of operations that are currently queued and waiting for the read or write lock','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, type) (mongodb_global_lock_current_queue)','Lock','MongoDB','type',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MongoDB Number of Operations Waiting Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mongodb_global_lock_client','Number of Active Client','The number of the active client connections performing read or write operations','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, type) (mongodb_global_lock_client)','Lock','MongoDB','type',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MongoDB Number of Active Client Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mongodb_metrics_document_total','Number of Documents Processed','The total number of documents processed','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, state) (rate(mongodb_metrics_document_total[1m]))','Row','MongoDB','state',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MongoDB Number of Documents Processed Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_keyspace_totaldiskspaceused','Total disk space used (GiB)','Total disk space used belonging to this keyspace / 1073741824','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, keyspace) (cassandra_keyspace_totaldiskspaceused {{filter}}) / 1073741824','Disk','Cassandra','keyspace',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Disk Space:{{humanize $value}}GiB|{threshold}GiB.','2019-10-02 10:17:01.000','2019-11-07 01:14:39.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_keyspace_read_latency','Local read latency (ms)','Local read latency seconds for this keyspace','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, keyspace) (cassandra_keyspace_readlatency_seconds{quantile=''0.99''}) * 1000','Disk','Cassandra','keyspace',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Read Latency:{{humanize $value}}ms|{threshold}ms.','2019-10-02 10:17:01.000','2019-10-02 10:17:01.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_threadpools_totalblockedtasks','Number of tasks that were blocked (count/s)','Number of tasks that were blocked due to queue saturation in a second','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, path) (rate(cassandra_threadpools_totalblockedtasks_count[1m]))','Task','Cassandra','path',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Blocked Task per second:{{humanize $value}}|{threshold}.','2019-10-01 16:45:21.000','2019-10-01 16:45:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_threadpools_completedtasks','Number of tasks completed (count/s)','Number of tasks completed in a second','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, path) (rate(cassandra_threadpools_completedtasks{{filter}}[1m]))','Task','Cassandra','path',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Pending Task per second:{{humanize $value}}|{threshold}.','2019-10-01 16:45:21.000','2019-11-05 08:08:57.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mongodb_memory','Amount of Memory, in MebiByte','The amount of memory, in mebibyte (MiB), currently used by the database process','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, type) (mongodb_memory)','Memory','MongoDB','type',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MongoDB Amount of Memory:{{humanize $value}}MiB|{threshold}MiB.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_resource_utilization','Resource Usage','Gauge metric with resource utilization','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, resource_name) (oracledb_resource_current_utilization)','Resource','OracleDB','resource_name',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Oracle Resource Usage:{{humanize $value}}%|{threshold}%.','2020-01-28 13:03:00.000','2020-01-28 13:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_clientrequest_timeouts_count','Number of timeouts encountered','Number of timeouts encountered','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, clientrequest) (rate(cassandra_clientrequest_timeouts_count[1m]))','Request','Cassandra','clientrequest',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Timeout Request:{{humanize $value}}|{threshold}.','2019-10-02 10:17:01.000','2019-10-02 10:17:01.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mongodb_network_bytes_total','Amount of Network Traffic','The number of bytes that reflects the amount of network traffic','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, state) (rate(mongodb_network_bytes_total[1m]))','Network','MongoDB','state',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MongoDB Amount of Network Traffic Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mongodb_op_counters_total','Number of Operations','The total number of operations since the mongod instance last started','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, type) (rate(mongodb_op_counters_total[1m]))','Request','MongoDB','type',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MongoDB Number of Operations Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_innodb_row_lock_waits','Number of Waits for Row Locks','The number of times operations on InnoDB tables had to wait for a row lock','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(mysql_global_status_innodb_row_lock_waits[1m]))','Lock','MySQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Number of Waits for Row Locks Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_activity_execute_count','Execute Count','Generic counter metric from v$sysstat view in Oracle','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(oracledb_activity_execute_count[1m]))','Request','OracleDB',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Oracle Execute Count:{{humanize $value}}|{threshold}.','2020-01-28 13:03:00.000','2020-01-28 13:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_activity_user_commits','User Commits','Generic counter metric from v$sysstat view in Oracle','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(oracledb_activity_user_commits[1m]))','Request','OracleDB',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Oracle User Commit:{{humanize $value}}|{threshold}.','2020-01-28 13:03:00.000','2020-01-28 13:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_activity_parse_count','Parse Count','Generic counter metric from v$sysstat view in Oracle','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(oracledb_activity_parse_count_total[1m]))','Request','OracleDB',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Oracle Parse Count:{{humanize $value}}|{threshold}.','2020-01-28 13:03:00.000','2020-01-28 13:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_activity_user_rollbacks','User Rollbacks','Generic counter metric from v$sysstat view in Oracle','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(oracledb_activity_user_rollbacks[1m]))','Request','OracleDB',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Oracle User Rollback:{{humanize $value}}|{threshold}.','2020-01-28 13:03:00.000','2020-01-28 13:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_fs_writes','Pod Filesystem Write Bytes (KiB)','Cumulative count of bytes written / 1024','sum by (xm_clst_id,xm_pod_id,xm_entity_type,xm_namespace) (rate(container_fs_writes_bytes_total{xm_entity_type=''Container'',{filter}}[1m])) / 1024','Filesystem','Pod',NULL,true,true,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} Filesystem Write Bytes:{{humanize $value}}KiB|{threshold}KiB.','2019-05-20 05:58:07.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_memory_usage','Pod Memory Usage (%)','Pod Memory Usage Compared to Limit','sum by (xm_clst_id,xm_pod_id,xm_entity_type,xm_namespace) ( +container_memory_usage_bytes{xm_entity_type=''Container'',{filter}} / ((container_spec_memory_limit_bytes{xm_entity_type=''Container'',{filter}} * 100) > 0) or +container_memory_usage_bytes{xm_entity_type=''Container'',{filter}} / 1024)','Memory','Pod',NULL,true,true,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} Used Utillization:{{humanize $value}}%|{threshold}%.','2019-06-05 14:27:36.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_memory_usage_bytes','Pod Memory Used (GiB)','Current memory usage in bytes / 1073741824','sum by (xm_clst_id,xm_pod_id,xm_entity_type,xm_namespace) (container_memory_usage_bytes{xm_entity_type=''Container'',{filter}}) / 1073741824','Memory','Pod',NULL,true,true,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} Used Memory:{{humanize $value}}GiB|{threshold}GiB.','2019-06-05 14:27:36.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_stat_database_cache_hit_ratio','Buffer Cache Hit Ratio (%)','Number of Block Cache Hit / (Number of Block Cache Hit & Blocks Reads) * 100','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, datname) (increase(pg_stat_database_blks_hit[1h]) / (increase(pg_stat_database_blks_read[1h]) + increase(pg_stat_database_blks_hit[1h])) * 100)','Block','PostgreSQL','datname',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} PostgreSQL Cache Hit Ratio:{{humanize $value}}%|{threshold}%.','2019-08-27 15:49:21.000','2019-12-13 01:33:39.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_wait_time_other','Wait-Time - Other','Generic counter metric from v$waitclassmetric view in Oracle','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(oracledb_wait_time_other[1m]))','Wait','OracleDB',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Oracle Wait-Time - Other:{{humanize $value}}|{threshold}.','2020-01-28 13:03:00.000','2020-01-28 13:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_wait_time_configuration','Wait-Time - Configuration','Generic counter metric from v$waitclassmetric view in Oracle','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(oracledb_wait_time_configuration[1m]))','Wait','OracleDB',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Oracle Wait-Time - Configuration{{humanize $value}}|{threshold}','2020-01-28 13:03:00.000','2020-01-28 13:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_wait_time_commit','Wait-Time - Commit','Generic counter metric from v$waitclassmetric view in Oracle','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(oracledb_wait_time_commit[1m]))','Wait','OracleDB',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Oracle Wait-Time - Commit:{{humanize $value}}|{threshold}.','2020-01-28 13:03:00.000','2020-01-28 13:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_wait_time_scheduler','Wait-Time - Scheduler','Generic counter metric from v$waitclassmetric view in Oracle','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(oracledb_wait_time_scheduler[1m]))','Wait','OracleDB',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Oracle Wait-Time - Scheduler:{{humanize $value}}|{threshold}.','2020-01-28 13:03:00.000','2020-01-28 13:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_wait_time_system_io','Wait-Time - System I/O','Generic counter metric from v$waitclassmetric view in Oracle','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(oracledb_wait_time_system_io[1m]))','Wait','OracleDB',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Oracle Wait-Time - System I/O:{{humanize $value}}|{threshold}.','2020-01-28 13:03:00.000','2020-01-28 13:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_wait_time_user_io','Wait-Time - User I/O','Generic counter metric from v$waitclassmetric view in Oracle','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(oracledb_wait_time_user_io[1m]))','Wait','OracleDB',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Oracle Wait-Time - User I/O:{{humanize $value}}|{threshold}.','2020-01-28 13:03:00.000','2020-01-28 13:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_wait_time_network','Wait-Time - Network','Generic counter metric from v$waitclassmetric view in Oracle','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(oracledb_wait_time_network[1m]))','Wait','OracleDB',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Oracle Wait-Time - Network:{{humanize $value}}|{threshold}.','2020-01-28 13:03:00.000','2020-01-28 13:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_blocked_clients','Blocked Clients','Number of clients pending on a blocking call (BLPOP, BRPOP, BRPOPLPUSH)','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (redis_blocked_clients)','Connection','Redis',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis Blocked Clients:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_connected_clients','Connected Clients','Number of client connections (excluding connections from replicas)','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (redis_connected_clients)','Connection','Redis',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis Connected Clients:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_connections_received','Received Connections','Total number of connections accepted by the server','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(redis_connections_received_total[1m]))','Connection','Redis',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis Received Connections:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_rejected_connections','Rejected Connections','Number of connections rejected because of maxclients limit','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(redis_rejected_connections_total[1m]))','Connection','Redis',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis Rejected Connections:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_up','Redis Up Count','Whether the Redis server is up','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (redis_up)','Instance','Redis',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis Up Count:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_commands_total','Call Count / Command','Total number of calls per command','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, cmd) (rate(redis_commands_total[1m]))','Request','Redis','cmd',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis Call Count:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_commands_processed','Processed Commands','Total number of commands processed by the server','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(redis_commands_processed_total[1m]))','Request','Redis',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace:}} Redis Processed Commands:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_key_hit_raito','Redis key hit raito','redis key hit raito','sum by (data_type, xm_clst_id, xm_namespace, xm_node_id, instance) ( +label_replace(rate(redis_keyspace_hits_total [1m]), "data_type", "hits", "" , "") or +label_replace(rate(redis_keyspace_misses_total [1m]), "data_type", "misses", "" , "") )','Keyspace','Redis','data_type',true,false,'redis key hit raito','2020-01-29 02:28:03.000','2020-02-13 00:46:27.568'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_net_byte_total','Network byte','Network byte','sum by (data_type, xm_clst_id, xm_namespace, xm_node_id, instance) ( +label_replace(rate(redis_net_input_bytes_total [1m]), "data_type", "input", "", "") or +label_replace(rate(redis_net_output_bytes_total [1m]), "data_type", "output", "", ""))','Network','PostgreSQL','data_type',true,false,'Network byte','2020-01-30 07:22:12.000','2020-02-13 01:04:18.528'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_memory_cache','Pod Memory Cache (GiB)','Number of bytes of page cache memory / 1073741824','sum by (xm_clst_id,xm_pod_id,xm_entity_type,xm_namespace) (container_memory_cache{xm_entity_type=''Container'',{filter}}) / 1073741824','Memory','Pod',NULL,true,true,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} Cache Memory:{{humanize $value}}GiB|{threshold}GiB.','2019-06-05 14:27:36.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_memory_swap','Pod Memory Swap (GiB)','Pod swap usage in bytes / 1073741824','sum by (xm_clst_id,xm_pod_id,xm_entity_type,xm_namespace) (container_memory_swap{xm_entity_type=''Container'',{filter}}) / 1073741824','Memory','Pod',NULL,true,true,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} Swap Memory:{{humanize $value}}GiB|{threshold}GiB.','2019-06-05 14:27:36.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_wait_time_total','Oracledb wait time total','oracledb wait time total','sum by (data_type, xm_clst_id, xm_namespace, xm_node_id, instance) ( +label_replace(rate(oracledb_wait_time_scheduler[1m]), "data_type", "scheduler", "", "") or +label_replace(rate(oracledb_wait_time_commit[1m]), "data_type", "commit", "", "") or +label_replace(rate(oracledb_wait_time_network[1m]), "data_type", "network", "", "") or +label_replace(rate(oracledb_wait_time_concurrency[1m]), "data_type", "concurrency", "", "") or +label_replace(rate(oracledb_wait_time_Configuration[1m]), "data_type", "configuration", "", "") or +label_replace(rate(oracledb_wait_time_user_io[1m]), "data_type", "user_io", "", "") or +label_replace(rate(oracledb_wait_time_system_io[1m]), "data_type", "system_io", "", "") or +label_replace(rate(oracledb_wait_time_other[1m]), "data_type", "other", "", ""))','Wait','OracleDB','data_type',true,false,'oracledb wait time total','2020-01-29 11:03:20.000','2020-02-13 01:08:01.629'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_activity_count','Oracledb activity count','oracledb activity count','sum by (data_type, xm_clst_id, xm_namespace, xm_node_id, instance) ( +label_replace(rate(oracledb_activity_execute_count [1m]), "data_type", "excutecount", "", "") or +label_replace(rate(oracledb_activity_parse_count_total[1m]), "data_type", "parse_count", "", "") )','Request','OracleDB','data_type',true,false,'oracledb activity count','2020-01-29 10:40:58.000','2020-02-13 01:12:05.436'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_transaction','Oracledb transaction','oracledb transaction','sum by (data_type, xm_clst_id, xm_namespace, xm_node_id, instance) ( +label_replace(rate(oracledb_activity_user_rollbacks[1m]), "data_type", "rollbacks", "", "") or +label_replace(rate(oracledb_activity_user_commits[1m]), "data_type", "commits", "", ""))','Request','OracleDB','data_type',true,false,'oracledb transaction','2020-01-29 11:20:47.000','2020-02-13 01:26:28.558'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_cpu_usage','Redis cpu usage','redis cpu usage','sum by (data_type, xm_clst_id, xm_namespace, xm_node_id, instance) ( +label_replace(rate(redis_used_cpu_sys [1m]), "data_type", "system", "", "") or +label_replace(rate(redis_used_cpu_user [1m]), "data_type", "user", "", "") )','CPU','Redis','data_type',true,false,'redis cpu usage','2020-01-29 01:56:58.000','2020-02-12 04:47:21.228'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_total_load','host total load','host total load','sum by (instance, data_type) ( +label_replace(node_load1 {{filter}}, "data_type", "load 1", "", "") or +label_replace(node_load5 {{filter}}, "data_type", "load 5", "", "") or +label_replace(node_load15 {{filter}}, "data_type", "load15", "", "") )','CPU','Host',NULL,false,false,'host total load','2020-04-01 08:10:26.588','2020-04-03 01:23:47.665'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_used_cpu_sys_children','System CPU Used Background','System CPU consumed by the background processes','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(redis_used_cpu_sys_children[1m]))','CPU','Redis',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis System CPU Used Backedground:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_keyspace_hits','Keyspace Hits','Number of successful lookup of keys in the main dictionary','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(redis_keyspace_hits_total[1m]))','Keyspace','Redis',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis Keyspace Hits:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_keyspace_misses','Keyspace Misses','Number of failed lookup of keys in the main dictionary','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(redis_keyspace_misses_total[1m]))','Keyspace','Redis',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis Keyspace Misses:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_db_keys','DB Keys Count','Total number of keys by DB','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, db) (redis_db_keys)','Keyspace','Redis','db',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis DB Keys Count:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_expired_keys','Expired Keys','Total number of key expiration events','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(redis_expired_keys_total[1m]))','Keyspace','Redis',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis Expired Keys:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_evicted_keys','Evicted Keys','Number of evicted keys due to maxmemory limit','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(redis_evicted_keys_total[1m]))','Keyspace','Redis',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis Evicted Keys:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_db_keys_expiring','DB Keys Count Expiring','Total number of expiring keys by DB','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, db) (redis_db_keys_expiring)','Keyspace','Redis','db',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis DB Keys Count Expiring:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_commands_duration_seconds','Duration Seconds / Command','Total duration seconds per command','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, cmd) (rate(redis_commands_duration_seconds_total[1m]) * 1000)','Request','Redis','cmd',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis Duration Seconds:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-01-29 01:42:36.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_memory_total','Redis memory total','redis memory total','sum by (data_type, xm_clst_id, xm_namespace, xm_node_id, instance) ( +label_replace(redis_allocator_active_bytes / 1048576, "data_type", "active", "" , "") or +label_replace(redis_memory_used_bytes / 1048576, "data_type", "used", "" , "") or +label_replace(redis_allocator_allocated_bytes / 1048576, "data_type", "allocated", "" , "") or +label_replace(redis_allocator_resident_bytes / 1048576, "data_type", "resident", "" , "") )','Memory','Redis','data_type',true,false,'redis memory total','2020-01-29 02:08:28.000','2020-02-13 00:45:28.475'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('count_by_connection_type','Count by connection type','count by connection type','sum by (data_type, xm_clst_id, xm_namespace, xm_node_id, instance) ( +label_replace(rate(redis_connections_received_total [1m]), "data_type", "received connections", "", "") or +label_replace(rate(redis_rejected_connections_total [1m]), "data_type", "rejected connections", "", "") or +label_replace(redis_connected_clients, "data_type", "connected clients", "", "") or +label_replace(redis_blocked_clients, "data_type", "blocked clients", "", "") )','Connection','Redis','data_type',true,false,'count by connection type','2020-01-29 00:49:09.000','2020-02-13 01:04:18.528'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_stat_database_tup_count','Number of row by stat','Number of row by stat','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, data_type) +(label_replace(rate(pg_stat_database_tup_deleted[1m]), "data_type", "deleted", "", "") or +label_replace(rate(pg_stat_database_tup_updated[1m]), "data_type", "updated", "", "") or +label_replace(rate(pg_stat_database_tup_inserted[1m]), "data_type", "inserted", "", "") or +label_replace(rate(pg_stat_database_tup_returned[1m]), "data_type", "returned", "", "") or +label_replace(rate(pg_stat_database_tup_fetched[1m]), "data_type", "fetched", "", "") )','Row','PostgreSQL','data_type',true,true,'Number of row by stat','2019-10-28 07:29:26.000','2020-02-13 01:04:18.528'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_stat_database_blk_read_write_time','Read/Write spent time by file blocks','Read/Write spent time by file blocks','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, data_type) +(label_replace(rate(pg_stat_database_blk_read_time [1m]), "data_type", "read", "", "") or +label_replace(rate(pg_stat_database_blk_write_time [1m]), "data_type", "write", "", ""))','Block','PostgreSQL','data_type',true,false,'Read/Write spent time by file blocks','2019-10-28 10:56:48.000','2020-02-13 01:06:46.680'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_allocator_resident_size','Resident Memory (MiB)','The total amount of resident memory that the Redis allocator has','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (redis_allocator_resident_bytes) / 1048576','Memory','Redis',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis Resident Memory:{{humanize $value}}MiB|{threshold}MiB.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_memory_used_size','Used Memory (MiB)','Total number of bytes allocated by Redis using its allocator','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (redis_memory_used_bytes) / 1048576','Memory','Redis',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis Used Memory:{{humanize $value}}MiB|{threshold}MiB.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_clientrequest_anormal_count','Number of anormal request','Number of anormal request ','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, anormal_type) +(label_replace(rate(cassandra_clientrequest_unavailables_count[1m]), "anormal_type", "unavailables", "", "") or +label_replace(rate(cassandra_clientrequest_timeouts_count[1m]), "anormal_type", "timeouts", "", "") or +label_replace(rate(cassandra_clientrequest_failures_count[1m]), "anormal_type", "failures", "", ""))','Request','Cassandra','anomal_type',true,false,'Number of anormal request ','2019-10-28 02:09:45.000','2020-02-13 01:16:24.862'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_commitlog','Commitlog count and size','Commitlog count and size','sum by (data_type, xm_clst_id, xm_namespace, xm_node_id, instance) +(label_replace(rate(cassandra_commitlog_completedtasks {{filter}}[1m]), "data_type", "log_count", "", "") or +label_replace(rate(cassandra_commitlog_totalcommitlogsize {{filter}}[1m]) / 1048576, "data_type", "log_size", "", ""))','Log','Cassandra','data_type',true,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Cache Hit Rate:{{humanize $value}}|{threshold}.','2019-10-24 10:44:47.000','2020-02-13 01:16:24.864'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_threads_total','Number of Threads','Number of Threads','sum by (data_type, xm_clst_id, xm_namespace, xm_node_id, instance) ( +label_replace(mysql_global_status_threads_running, "data_type", "active", "", "") or +label_replace(mysql_global_status_threads_connected, "data_type", "connected", "", "") or +label_replace(rate(mysql_global_status_connections [1m]), "data_type", "connection attempts[1m]", "", "") )','Thread','MySQL','data_type',true,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Number of Threads Running Counts:{{humanize $value}}|{threshold}.','2019-12-05 06:04:21.000','2020-02-13 01:12:05.436'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_read_write_count','Local read write count','Local read write count','sum by(xm_clst_id, xm_namespace, xm_node_id, instance, type) +(label_replace( rate(cassandra_keyspace_readlatency_seconds_count [1m]), "type", "read", "", "") or +label_replace( rate(cassandra_keyspace_writelatency_seconds_count [1m]), "type", "write", "", ""))','Disk','Cassandra','type',true,true,'Local read write count','2019-10-24 05:18:50.000','2020-02-13 01:23:46.608'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_lock_total','Oracledb lock total','oracledb lock total','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, resource_name) +(oracledb_resource_current_utilization{resource_name =~''.+_locks''})','Resource','OracleDB','resource_name',true,false,'oracledb lock total','2020-01-29 11:17:01.000','2020-02-13 01:34:00.720'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_service_http_requests_per_sec_by_api','Service HTTP Requests Count by API (per Second)','the number of HTTP requests counts per second by API','(sum by (xm_clst_id,xm_service_name,xm_entity_type,xm_namespace,api) (rate(imxc_service_request_milliseconds_count{xm_entity_type="Service",protocol="http",{filter}}[1m])) / on (xm_clst_id, xm_namespace, xm_service_name ) group_left imxc_sampling_param_value) or (sum by (xm_clst_id,xm_service_name,xm_entity_type,xm_namespace,api) (rate(imxc_service_request_milliseconds_count{xm_entity_type="Service",protocol="http",{filter}}[1m])) / on (xm_clst_id) group_left imxc_sampling_default_param_value)','Request','Service',NULL,false,false,'not for alarm','2020-02-18 12:12:12.000','2020-06-03 06:52:05.498'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_cpu_core_count','Host CPU Core Count','Host_cpu_capacity_cores','count without(cpu, mode) (node_cpu_seconds_total{{filter}})','CPU','Host',NULL,true,false,'None','2020-03-23 04:08:05.290','2020-03-23 04:08:05.290'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_load5','Host CPU Load 5m Average','Host CPU 5m load average','node_load5{{filter}}','CPU','Host',NULL,true,false,'Host:{{$labels.instance}} CPU 5m Load Average:{{humanize $value}}%|{threshold}$.','2020-03-23 04:08:11.655','2020-03-23 04:08:11.655'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_phase_count_by_cluster','Pod Phase Count by Cluster','pod phase count by cluster','count by(xm_clst_id, pod_state) (sum by (xm_clst_id, xm_pod_id, pod_state)(rate(imxc_kubernetes_container_resource_limit_cpu{{filter}}[1m])))','Cluster','Pod',NULL,true,false,'CLST:{{$labels.xm_clst_id}} pod phase count:{{humanize $value}}|{threshold}.','2020-08-19 16:45:00.000','2020-08-19 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_network_io_byte','host network io byte','host network io byte','sum by (data_type, instance) ( +label_replace(rate(node_network_receive_bytes_total{{filter}}[1m]) or rate(node_network_receive_bytes_total{{filter}}[5m]), "data_type", "Receive", "", "") or +label_replace(rate(node_network_transmit_bytes_total{{filter}}[1m]) or rate(node_network_transmit_bytes_total{{filter}}[5m]), "data_type", "Transmit", "", "") )','Network','Host',NULL,false,false,'host network io byte','2020-03-24 05:48:31.359','2020-03-24 05:48:31.359'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_contextswitch_and_filedescriptor','host contextswitch and filedescriptor','host contextswitch and filedescriptor','sum by (data_type, instance) ( +label_replace(rate(node_context_switches_total {{filter}}[1m]), "data_type", "Context switch", "", "") or +label_replace(node_filefd_allocated {{filter}}, "data_type", "File descriptor", "", "") )','OS','Host',NULL,false,false,'host contextswitch and filedescriptor','2020-03-24 09:05:51.828','2020-03-24 09:08:06.867'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_swap_usage','Host Swap Memory Usage (%)','Host Swap Memory Usage','node_memory_SwapTotal_bytes{{filter}} - node_memory_SwapFree_bytes{{filter}} / node_memory_SwapTotal_bytes{{filter}} +','Memory','Host',NULL,true,false,'None','2020-03-26 06:39:21.333','2020-03-26 06:39:21.333'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_boot_time','Host Boot time','Host Boot time','node_boot_time_seconds{{filter}}','CPU','Host',NULL,true,false,'None','2020-03-26 08:03:46.189','2020-03-26 08:03:46.189'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_disk_read_latency','Host read Disk latency','Host disk read latency','sum by (instance) (rate(node_disk_reads_completed_total{{filter}}[1m])) == 0 or sum by (instance) (rate(node_disk_read_time_seconds_total{{filter}}[1m])/rate(node_disk_reads_completed_total{{filter}}[1m]) >= 0 )','Disk','Host',NULL,true,false,'Host:{{$labels.instance}} Disk Read Latency:{{humanize $value}}|{threshold}.','2020-03-23 04:08:34.001','2020-03-23 04:08:34.001'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_disk_write_latency','Host write Disk latency','Host disk write latency','sum by (instance) (rate(node_disk_writes_completed_total{{filter}}[1m])) == 0 or sum by (instance) (rate(node_disk_write_time_seconds_total{{filter}}[1m])/rate(node_disk_writes_completed_total{{filter}}[1m]) >= 0 )','Disk','Host',NULL,true,false,'Host:{{$labels.instance}} Disk Write Latency:{{humanize $value}}|{threshold}.','2020-03-23 04:08:35.823','2020-03-23 04:08:35.823'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_memory_usage','Host Memory Usage (%)','Host Memory Usage ','((node_memory_MemTotal_bytes{{filter}} - (node_memory_MemFree_bytes{{filter}} + node_memory_Cached_bytes{{filter}} + node_memory_Buffers_bytes{{filter}} + node_memory_SReclaimable_bytes{{filter}})) >= 0 or (node_memory_MemTotal_bytes{{filter}} - node_memory_MemFree_bytes{{filter}})) / node_memory_MemTotal_bytes{{filter}} * 100','Memory','Host',NULL,true,false,'Host:{{$labels.instance}} Memory Usage:{{humanize $value}}%|{threshold}%.','2020-03-26 06:36:47.931','2020-03-26 06:36:47.931'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_memory_total','Host Memory Total (GiB)','Memory information field MemTotal_bytes','node_memory_MemTotal_bytes{{filter}}','Memory','Host',NULL,true,false,'Host:{{$labels.instance}} Total Memory Size:{{humanize $value}}GiB|{threshold}GiB.','2020-03-23 04:08:16.897','2020-03-23 04:08:16.897'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_bytes_received_sent','Bytes Received & Sent in MySQL','Bytes Received & Sent in MySQL','sum by (data_type, xm_clst_id, xm_namespace, xm_node_id, instance) ( +label_replace(rate(mysql_global_status_bytes_received [1m]), "data_type", "received", "", "") or +label_replace(rate(mysql_global_status_bytes_sent [1m]), "data_type", "sent", "", ""))','Network','MySQL','data_type',true,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Container:{{$labels.xm_cont_name}} Cache Memory:{{humanize $value}}|{threshold}.','2019-12-05 07:58:11.000','2020-02-13 01:12:05.436'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_service_http_requests_time_95th','Service HTTP 95% Elapsed Time (ms)','the maximum time taken to servce the 95% of HTTP requests','histogram_quantile(0.95, sum by (xm_entity_type,xm_clst_id,xm_namespace,xm_service_name,le) (rate(imxc_service_request_milliseconds_bucket{xm_entity_type="Service",protocol="http",{filter}}[1m]))) >=0 or sum by (xm_entity_type,xm_clst_id,xm_namespace,xm_service_name) (rate(imxc_service_request_milliseconds_bucket{xm_entity_type="Service",protocol="http",{filter}}[1m]))','Request','Service',NULL,true,true,'SVC:{{$labels.xm_service_name}} 95th HTTP Requests Time:{{humanize $value}}ms|{threshold}ms.','2020-02-18 12:12:12.000','2020-02-18 12:12:12.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_service_http_requests_time_99th','Service HTTP 99% Elapsed Time (ms)','the maximum time taken to servce the 99% of HTTP requests','histogram_quantile(0.99, sum by (xm_entity_type,xm_clst_id,xm_namespace,xm_service_name,le) (rate(imxc_service_request_milliseconds_bucket{xm_entity_type="Service",protocol="http",{filter}}[1m]))) >=0 or sum by (xm_entity_type,xm_clst_id,xm_namespace,xm_service_name) (rate(imxc_service_request_milliseconds_bucket{xm_entity_type="Service",protocol="http",{filter}}[1m]))','Request','Service',NULL,true,true,'SVC:{{$labels.xm_service_name}} 99th HTTP Requests Time:{{humanize $value}}ms|{threshold}ms.','2020-02-18 12:12:12.000','2020-02-18 12:12:12.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_service_pod_http_error_rate','Service Pod HTTP Requests Error Rate','the number of HTTP error counts / the number of HTTP requests counts for pod','sum by (xm_clst_id,xm_service_name,xm_entity_type,xm_namespace,xm_pod_id) (rate(imxc_service_request_milliseconds_count{xm_entity_type="Service",protocol="http",{filter}}[1m])) == 0 or +sum by (xm_clst_id,xm_service_name,xm_entity_type,xm_namespace,xm_pod_id) (rate(imxc_service_errors_count{xm_entity_type="Service",protocol="http",{filter}}[1m])) +/ sum by (xm_clst_id,xm_service_name,xm_entity_type,xm_namespace,xm_pod_id) (rate(imxc_service_request_milliseconds_count{xm_entity_type="Service",protocol="http",{filter}}[1m]))','Request','Service',NULL,true,false,'SVC:{{$labels.xm_service_name}} Pod Error Request Rate:{{humanize $value}}%|{threshold}%.','2019-11-07 07:52:24.000','2020-02-17 12:12:12.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_service_http_requests_time_90th','Service HTTP 90% Elapsed Time (ms)','the maximum time taken to servce the 90% of HTTP requests','histogram_quantile(0.90, sum by (xm_entity_type,xm_clst_id,xm_namespace,xm_service_name,le) (rate(imxc_service_request_milliseconds_bucket{xm_entity_type="Service",protocol="http",{filter}}[1m]))) >=0 or sum by (xm_entity_type,xm_clst_id,xm_namespace,xm_service_name) (rate(imxc_service_request_milliseconds_bucket{xm_entity_type="Service",protocol="http",{filter}}[1m]))','Request','Service',NULL,true,true,'SVC:{{$labels.xm_service_name}} 90th HTTP Requests Time:{{humanize $value}}ms|{threshold}ms.','2020-02-18 12:12:12.000','2020-02-18 12:12:12.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_fs_total_by_mountpoint','host filesystem size by mountpoint','host filesystem size by mountpoint','sum by(instance, mountpoint, fstype, data_type) ( +label_replace(node_filesystem_size_bytes {fstype!="rootfs",{filter}}, "data_type", "totalsize", "", "") or +label_replace(node_filesystem_avail_bytes {fstype!="rootfs",{filter}}, "data_type", "availablesize", "", ""))','Filesystem','Host',NULL,false,false,'host filesystem size by mountpoint','2020-03-30 04:01:45.322','2020-03-30 05:16:32.252'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('namespace_timeline_count','Namespace timeline count','alert, event count','sum (floor(increase(imxc_kubernetes_event_counts{{filter}}[10m])) or floor(increase(imxc_alerts_received_count_total{status="firing", {filter}}[10m])))by (xm_clst_id, xm_namespace, level)','Timeline','Namespace',NULL,false,false,'None','2020-04-08 06:21:21.392','2020-04-08 06:21:21.392'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_timeline_count','Cluster timeline count','alert, event count','sum (floor(increase(imxc_kubernetes_event_counts{{filter}}[10m])) or floor(increase(imxc_alerts_received_count_total{status="firing", {filter}}[10m])))by (xm_clst_id,level)','Timeline','Cluster',NULL,false,false,'None','2020-04-08 06:19:32.792','2020-04-28 08:07:47.786'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_network_transmit','Cluster Network Transmit','Cluster Network Transmit','sum by (xm_clst_id) (rate(node_network_transmit_bytes_total{{filter}} [1m]))','Network','Cluster',NULL,true,true,'Cluster Network Transmit','2020-04-28 08:10:21.070','2020-04-28 08:29:18.491'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_network_receive','Cluster Network Receive','Cluster Network Receive','sum by (xm_clst_id) (rate(node_network_receive_bytes_total{{filter}} [1m]))','Network','Cluster',NULL,true,true,'Cluster Network Receive','2020-04-28 08:07:26.294','2020-04-28 08:29:18.486'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('namespace_pod_running_count','Namespace Pod Running Count','Running pod count by namespace','count by (xm_clst_id, xm_namespace) (sum by (xm_clst_id, xm_node_id, xm_namespace, xm_pod_id) (imxc_kubernetes_container_resource_limit_cpu{pod_state="Running", {filter}}))','Pod','Namespace',NULL,false,false,'None','2020-05-21 01:18:06.016','2020-05-21 01:18:06.016'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_cpu_request','Pod CPU Request','Pod CPU Request','sum by (xm_clst_id, xm_node_id, xm_pod_id) (imxc_kubernetes_container_resource_request_cpu{{filter}})','CPU','Pod',NULL,false,false,'None','2020-05-21 06:50:49.546','2020-05-21 06:50:49.546'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_network_io_byte','Node Network IO byte','Node Network IO byte','sum by (data_type, instance) ( +label_replace(rate(node_network_receive_bytes_total{{filter}}[1m]), "data_type", "Receive", "", "") or +label_replace(rate(node_network_transmit_bytes_total{{filter}}[1m]), "data_type", "Transmit", "", "") )','Network','Node',NULL,false,false,'Node Network IO byte','2020-05-21 07:32:03.535','2020-05-21 07:32:03.535'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_memory_request','pod_memory_request (Gib)','Total container memory request in GiB for the given pod','sum by (xm_clst_id, xm_node_id, xm_pod_id) (imxc_kubernetes_container_resource_request_memory{{filter}}) / 1073741824','Memory','Pod',NULL,false,false,'None','2020-05-21 11:50:52.717','2020-05-21 11:50:52.717'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_memory_sum_by_node','Container memory sum by node','Container memory sum by node','sum by(xm_clst_id, xm_node_id, data_type) ( +label_replace(imxc_kubernetes_node_resource_capacity_memory{{filter}}, "data_type", "capacity" , "", "") or +label_replace(imxc_kubernetes_container_resource_limit_memory{{filter}}, "data_type", "limit", "" , "") or +label_replace(imxc_kubernetes_container_resource_request_memory{{filter}}, "data_type", "request", "" , "") or +label_replace(container_memory_working_set_bytes{{filter}}, "data_type", "used", "" , ""))','Memory','Node',NULL,false,false,'Container memory sum by node','2020-05-28 09:36:44.000','2020-06-09 01:38:10.694'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_context_switches','Node Context Switches','Node Context Switches','rate(node_context_switches_total {{filter}}[1m])','CPU','Node',NULL,false,false,'None','2020-05-21 01:18:06.000','2020-05-29 09:38:05.521'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_contextswitch_and_filedescriptor','Node contextswitch and filedescriptor','Node contextswitch and filedescriptor','sum by(xm_clst_id, xm_node_id, data_type) ( +label_replace(node_filefd_allocated {{filter}}, "data_type", "file descriptor" , "", "") or +label_replace(rate(node_context_switches_total {{filter}}[1m]), "data_type", "context switches", "" , ""))','File','Node',NULL,false,false,'Node contextswitch and filedescriptor','2020-05-28 12:38:21.587','2020-05-28 12:38:21.587'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_disk_read_write_byte','Node disk read and write bytes','Node disk read and write bytes','sum by(xm_clst_id, xm_node_id, data_type) ( +label_replace(rate(node_disk_read_bytes_total{{filter}}[1m]), "data_type", "Read" , "", "") or +label_replace(rate(node_disk_written_bytes_total{{filter}}[1m]), "data_type", "Write", "" , "") +)','Disk','Node',NULL,false,false,'Node disk read and write bytes','2020-05-28 13:02:44.729','2020-05-28 13:04:35.126'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_swap_total','Host Swap Memory Total','Host Swap Total','node_memory_SwapTotal_bytes{{filter}}','Memory','Host',NULL,true,false,'Host:{{$labels.instance}} Total Swap Memory Size:{{humanize $value}}GiB|{threshold}GiB.','2020-03-23 04:08:23.130','2020-03-23 04:08:23.130'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_cpu_iowait','Host CPU iowait','Host CPU iowait','avg by (instance) (rate(node_cpu_seconds_total{mode=''iowait'',{filter}}[1m])) * 100','CPU','Host',NULL,false,false,'Host:{{$labels.instance}} CPU IO wait:{{humanize $value}}|{threshold}.','2020-03-26 08:03:51.307','2020-03-26 08:03:51.307'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_filefd_allocated','Host statistics Filesystem allocated.','Host File descriptor statistics: allocated.','sum by (instance) (node_filefd_allocated{{filter}})','Filesystem','Host',NULL,true,false,'Host:{{$labels.instance}} Filesystem allocated:{{humanize $value}}|{threshold}.','2020-03-23 04:08:31.970','2020-03-23 04:08:31.970'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_service_http_requests_time_avg','Service HTTP Average Elapsed Time (ms)','the average time taken to serve the HTTP requests','sum (rate(imxc_service_request_milliseconds_count{xm_entity_type="Service",protocol="http",{filter}}[1m])) by (xm_clst_id,xm_service_name,xm_entity_type,xm_namespace) == 0 or +sum (rate(imxc_service_request_milliseconds_sum{xm_entity_type="Service",protocol="http",{filter}}[1m])) by (xm_clst_id,xm_service_name,xm_entity_type,xm_namespace) +/ sum (rate(imxc_service_request_milliseconds_count{xm_entity_type="Service",protocol="http",{filter}}[1m])) by (xm_clst_id,xm_service_name,xm_entity_type,xm_namespace)','Request','Service',NULL,true,true,'SVC:{{$labels.xm_service_name}} Requests Time Avg:{{humanize $value}}ms|{threshold}ms.','2019-10-15 09:37:44.000','2020-03-09 06:42:14.172'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_service_http_error_rate_by_api','Service HTTP Requests Error Rate by API','the number of HTTP error counts by API / the number of HTTP requests counts by API','sum by (xm_entity_type,xm_clst_id,xm_namespace,xm_service_name,api) (rate(imxc_service_request_milliseconds_count{xm_entity_type="Service",protocol="http",{filter}}[1m])) ==0 or +sum by (xm_entity_type,xm_clst_id,xm_namespace,xm_service_name,api) (rate(imxc_service_errors_count{xm_entity_type="Service",protocol="http",{filter}}[1m])) +/ sum by (xm_entity_type,xm_clst_id,xm_namespace,xm_service_name,api) (rate(imxc_service_request_milliseconds_count{xm_entity_type="Service",protocol="http",{filter}}[1m]))','Request','Service',NULL,false,false,'not for alarm','2020-02-18 12:12:12.000','2020-06-03 06:52:05.498'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_service_http_requests_time_avg_by_api','Service HTTP Average Elapsed Time by API (ms)','the average time taken to serve the HTTP requests by API for a service','sum by (xm_entity_type,xm_clst_id,xm_namespace,xm_service_name,api) (rate(imxc_service_request_milliseconds_count{xm_entity_type="Service",protocol="http",{filter}}[1m])) == 0 or +sum by (xm_entity_type,xm_clst_id,xm_namespace,xm_service_name,api) (rate(imxc_service_request_milliseconds_sum{xm_entity_type="Service",protocol="http",{filter}}[1m])) +/ sum by (xm_entity_type,xm_clst_id,xm_namespace,xm_service_name,api) (rate(imxc_service_request_milliseconds_count{xm_entity_type="Service",protocol="http",{filter}}[1m]))','Request','Service',NULL,false,false,'not for alarm','2020-02-18 12:12:12.000','2020-06-03 06:52:05.500'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_cpu_used','Node CPU Used (Cores)','Node CPU Used (Cores)','(100 - (avg by (xm_clst_id, xm_node_id) (clamp_max(rate(node_cpu_seconds_total{name="node-exporter", mode="idle", xm_entity_type="Node", {filter}}[1m]),1.0)) * 100)) * sum by(xm_clst_id, xm_node_id)(imxc_kubernetes_node_resource_capacity_cpu{{filter}}) / 100','CPU','Node',NULL,false,false,'None','2020-05-21 01:18:06.000','2020-05-29 09:38:35.939'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_cpu_iowait','Node CPU I/O Wait','Node CPU I/O Wait','avg by (xm_clst_id, xm_node_id, xm_entity_type) (rate(node_cpu_seconds_total{name="node-exporter", mode="iowait", xm_entity_type="Node" , {filter}}[1m])) * 100','CPU','Node',NULL,false,false,'None','2020-05-21 01:18:06.000','2020-05-29 09:38:20.633'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_cpu_sum_by_node','Container cpu sum by Node','Container cpu sum by Node','sum by(xm_clst_id, xm_node_id, data_type) ( +label_replace(imxc_kubernetes_node_resource_capacity_cpu{{filter}} * 0.001, "data_type", "capacity" , "", "") or +label_replace(sum by (xm_clst_id, xm_node_id) (imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0.001), "data_type", "limit", "" , "") or +label_replace(sum by (xm_clst_id, xm_node_id) (imxc_kubernetes_container_resource_request_cpu{{filter}} * 0.001), "data_type", "request", "" , "") or +label_replace(rate(container_cpu_usage_seconds_total{{filter}}[1m]), "data_type", "used", "" , ""))','CPU','Node',NULL,false,false,'Container cpu sum by Node','2020-05-28 08:06:35.736','2020-06-09 01:46:12.446'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_disk_iops_per_device','Node Disk IOPs per device','Node Disk I/O Operations Per Second (per device)','sum by (xm_clst_id, xm_node_id, device) (rate(node_disk_reads_completed_total{{filter}}[1m]) + rate(node_disk_writes_completed_total{{filter}}[1m]))','Disk','Node','device',false,false,'None','2020-06-10 05:56:05.311','2020-06-10 07:24:15.462'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_disk_iops','Node Disk IOPs','Node Disk I/O Operations Per Second','sum by (xm_clst_id, xm_node_id) (rate(node_disk_reads_completed_total{{filter}}[1m]) + rate(node_disk_writes_completed_total{{filter}}[1m]))','Disk','Node',NULL,false,false,'None','2020-06-10 05:54:01.309','2020-06-10 07:24:15.462'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_disk_iops','Host Disk IOPs','Host Disk IOPs','sum by (instance) ((rate(node_disk_reads_completed_total{{filter}}[1m]) + rate(node_disk_writes_completed_total{{filter}}[1m])) or (rate(node_disk_reads_completed_total{{filter}}[5m]) + rate(node_disk_writes_completed_total{{filter}}[5m])))','Disk','Node',NULL,false,false,'Host Disk IOPs','2020-06-10 07:26:28.895','2020-06-10 07:26:28.895'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_cpu_limit','Pod CPU Limit','Pod CPU Limit','sum by (xm_clst_id, xm_node_id, xm_pod_id) (imxc_kubernetes_container_resource_limit_cpu{{filter}})','CPU','Pod',NULL,false,false,'None','2020-05-21 06:50:49.546','2020-05-21 06:50:49.546'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_memory_limit','pod_memory_limit (Gib)','Total container memory limit in GiB for the given pod','sum by (xm_clst_id, xm_node_id, xm_pod_id) (imxc_kubernetes_container_resource_limit_memory{{filter}}) / 1073741824','Memory','Pod',NULL,false,false,'None','2020-05-21 11:50:52.717','2020-05-21 11:50:52.717'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_memory_usage_bytes','Container Memory Used (GiB)','Current memory usage in GiB, this includes all memory regardless of when it was accessed','sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) (container_memory_usage_bytes{xm_entity_type=''Container'',xm_cont_name!=''POD'',{filter}} / 1024 / 1024 / 1024)','Memory','Container',NULL,true,true,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} Used Memory:{{humanize $value}}GiB|{threshold}GiB.','2019-06-05 14:27:36.000','2020-06-04 11:11:11.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_memory_used','Node Memory Used (GIB)','Node Memory Used (GIB)','((node_memory_MemTotal_bytes{xm_entity_type="Node", {filter}} - (node_memory_MemFree_bytes{xm_entity_type="Node", {filter}} + node_memory_Cached_bytes{xm_entity_type="Node", {filter}} + node_memory_Buffers_bytes{xm_entity_type="Node", {filter}})) >= 0 or node_memory_MemTotal_bytes{xm_entity_type="Node", {filter}} - node_memory_MemFree_bytes{xm_entity_type="Node", {filter}}) / 1024 / 1024 / 1024','Memory','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Memory Used:{{humanize $value}}GiB|{threshold}GiB.','2020-05-21 01:18:06.000','2020-06-04 11:11:11.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_used_cpu_user','User CPU Used','User CPU consumed by the Redis server','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(redis_used_cpu_user[1m]))','CPU','Redis',NULL,false,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis User CPU Used:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-05-29 09:37:22.273'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_cpu_sum_by_container','Container cpu sum by container','container cpu sum by container','sum by(xm_clst_id, data_type, xm_pod_id, xm_cont_name) (label_replace(imxc_kubernetes_container_resource_request_cpu{xm_cont_name!=''POD'',{filter}} * 0.001, "data_type", "request" , "", "") or label_replace(imxc_kubernetes_container_resource_limit_cpu{xm_cont_name!=''POD'',{filter}} * 0.001, "data_type", "limit" , "", "") or label_replace(rate(container_cpu_usage_seconds_total{xm_cont_name!=''POD'',{filter}}[1m]), "data_type", "used", "" , ""))','CPU','Container',NULL,false,false,'None','2020-05-21 06:50:49.546','2020-05-21 06:50:49.546'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_cpu_sum_by_pods','Container cpu sum by pod','Container cpu sum by pod','sum by(xm_clst_id, data_type, xm_pod_id) (label_replace(imxc_kubernetes_container_resource_request_cpu{{filter}} * 0.001, "data_type", "request" , "", "") or label_replace(imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0.001, "data_type", "limit" , "", "") or label_replace(rate(container_cpu_usage_seconds_total{{filter}}[1m]), "data_type", "used", "" , ""))','CPU','Pod',NULL,false,false,'None','2020-05-21 06:50:49.546','2020-05-21 06:50:49.546'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_memory_sum_by_pods','Container memory sum by pod','Container memory sum by pod','sum by(xm_clst_id, data_type, xm_pod_id) (label_replace(imxc_kubernetes_container_resource_limit_memory{{filter}}, "data_type", "limit", "" , "") or label_replace(imxc_kubernetes_container_resource_request_memory{{filter}}, "data_type", "request", "" , "") or label_replace(container_memory_usage_bytes{{filter}}, "data_type", "used", "" , ""))','Memory','Pod',NULL,false,false,'None','2020-05-21 06:50:49.546','2020-05-21 06:50:49.546'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_memory_sum_by_container','Container memory sum by container','Container memory sum by container','sum by(xm_clst_id, data_type, xm_pod_id, xm_cont_name) (label_replace(imxc_kubernetes_container_resource_limit_memory{xm_cont_name!=''POD'',{filter}}, "data_type", "limit", "" , "") or label_replace(imxc_kubernetes_container_resource_request_memory{xm_cont_name!=''POD'',{filter}}, "data_type", "request", "" , "") or label_replace(container_memory_usage_bytes{xm_cont_name!=''POD'',{filter}}, "data_type", "used", "" , ""))','Memory','Container',NULL,false,false,'None','2020-05-21 06:50:49.546','2020-05-21 06:50:49.546'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_disk_read_write_byte','Container disk read and write bytes','Container disk read and write bytes','sum by(xm_clst_id, xm_pod_id, xm_cont_name, data_type) (label_replace(rate(container_fs_writes_bytes_total{xm_entity_type="Container",{filter}}[1m]), "data_type", "Read" , "", "") or label_replace(rate(container_fs_reads_bytes_total{xm_entity_type="Container",{filter}}[1m]), "data_type", "Write", "" , ""))','Disk','Container',NULL,false,false,'None','2020-05-21 06:50:49.546','2020-05-21 06:50:49.546'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_disk_read_write_byte','Pod disk read and write bytes','Pod disk read and write bytes','sum by(xm_clst_id, xm_pod_id, data_type) (label_replace(rate(container_fs_writes_bytes_total{xm_entity_type="Container",{filter}}[1m]), "data_type", "Read" , "", "") or label_replace(rate(container_fs_reads_bytes_total{xm_entity_type="Container",{filter}}[1m]), "data_type", "Write", "" , ""))','Disk','Pod',NULL,false,false,'None','2020-05-21 06:50:49.546','2020-05-21 06:50:49.546'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_network_io_byte','Container Network IO byte','Container Network IO byte','sum by (xm_clst_id, xm_pod_id, xm_cont_name, data_type) (label_replace(rate(container_network_receive_bytes_total{{filter}}[1m]), "data_type", "Receive", "", "") or label_replace(rate(container_network_transmit_bytes_total{{filter}}[1m]), "data_type", "Transmit", "", ""))','Network','Container',NULL,false,false,'None','2020-05-21 06:50:49.546','2020-05-21 06:50:49.546'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_network_io_byte','Pod Network IO byte','Pod Network IO byte','sum by (xm_clst_id, xm_pod_id, data_type) (label_replace(rate(container_network_receive_bytes_total{{filter}}[1m]), "data_type", "Receive", "", "") or label_replace(rate(container_network_transmit_bytes_total{{filter}}[1m]), "data_type", "Transmit", "", ""))','Network','Pod',NULL,false,false,'None','2020-05-21 06:50:49.546','2020-05-21 06:50:49.546'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_load1','Node CPU Load 1m Average','Node CPU 1m load average','node_load1{xm_entity_type=''Node'',{filter}}','CPU','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} CPU 1m Load Avg:{{humanize $value}}|{threshold}.','2019-05-15 08:22:49.000','2019-05-15 08:22:49.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_open_file_descriptor','Node File Descriptor','Node File Descriptor','sum by(xm_clst_id, xm_node_id)(node_filefd_allocated {{filter}})','Filesystem','Node',NULL,true,false,'NODE:{{$labels.xm_node_id}} File Descriptor:{{humanize $value}}|{threshold}.','2020-05-21 01:18:06.000','2020-05-29 09:37:51.101'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_sparselog_type_node_count','Node Type Sparselog Count','Node-type sparse log count by xm_clst_id, xm_node_id over last 1 min','sum by (xm_entity_type, xm_clst_id, xm_node_id) (round(increase(imxc_sparselog_count_total{xm_entity_type="Node",{filter}}[1m])))','SparseLog','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Sparselog Count:{{humanize $value}}|{threshold}.','2020-03-26 15:05:51.828','2020-03-26 15:05:51.828'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_memory_cache','Container Memory Cache (GiB)','Number of bytes of page cache memory / 1073741824','sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) (container_memory_cache{xm_entity_type=''Container'',xm_cont_name!=''POD'',{filter}}) / 1073741824','Memory','Container',NULL,true,true,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} Cache Memory:{{humanize $value}}GiB|{threshold}GiB.','2019-06-05 14:27:36.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_load15','Host CPU Load 15m Average','Host CPU 15m load average','node_load15{{filter}}','CPU','Host',NULL,true,false,'Host:{{$labels.instance}} CPU 15m Load Average:{{humanize $value}}%|{threshold}%','2020-03-23 04:08:13.337','2020-03-23 04:08:13.337'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_disk_write_bytes_device','Node Disk Write Bytes per Device (KiB)','The total number of bytes written successfully / 1024','sum by (xm_clst_id, xm_node_id, xm_entity_type, device, mountpoint) (rate(node_disk_written_bytes_total{xm_entity_type=''Node'', {filter}}[1m]) ) / 1024','Disk','Node','device',true,false,'NODE:{{$labels.xm_node_id}} FS:{{$labels.mountpoint}} Disk Write Size:{{humanize $value}}KiB|{threshold}KiB.','2019-08-23 11:26:07.000','2019-08-23 11:26:07.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_disk_write_latency','Node Disk Write Latency (ms)','Node Disk Write Latency','sum by (xm_clst_id,xm_node_id, xm_entity_type) (rate(node_disk_write_time_seconds_total{xm_entity_type=''Node'',{filter}}[1m])) * 1000','Disk','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Disk Write Latency:{{humanize $value}}ms|{threshold}ms.','2019-05-20 11:00:56.000','2019-05-31 17:47:10.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_disk_writes_count_device','Node Disk Writes Count per Device (IOPS)','Node Disk Writes Counts per Device','sum by (xm_clst_id, xm_node_id, xm_entity_type, device, mountpoint) (rate(node_disk_writes_completed_total{xm_entity_type=''Node'', {filter}}[1m]) )','Disk','Node','device',true,false,'NODE:{{$labels.xm_node_id}} FS:{{$labels.mountpoint}} Disk Writes Count:{{humanize $value}}IOPS|{threshold}IOPS.','2019-08-23 11:26:07.000','2019-08-23 11:26:07.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_throttled_rate','Container CPU Throttled Rate','container throttled rate','sum by(xm_clst_id, xm_cont_id) (rate(container_cpu_cfs_throttled_seconds_total{container_name!="POD", image!="",{filter}}[1m]))','Cluster','Container',NULL,false,false,'CLST:{{$labels.xm_clst_id}} CPU Throttled:{{humanize $value}}|{threshold}.','2020-08-19 16:45:00.000','2020-08-19 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_pod_total_count','Node Pod Total Count','Node Pod Total Count','count by (xm_clst_id, xm_node_id) (sum by (xm_clst_id, xm_node_id, xm_pod_id) (imxc_kubernetes_container_resource_limit_cpu{{filter}}))','Pod','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Pod Count:{{humanize $value}}|{threshold}.','2019-10-11 00:29:17.000','2019-11-26 01:29:10.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_service_http_requests_per_sec','Service HTTP Requests Count (per Second)','the number of HTTP requests counts per second','((sum by (xm_clst_id,xm_service_name,xm_entity_type,xm_namespace) (rate(imxc_service_request_milliseconds_count{xm_entity_type="Service",protocol="http",{filter}}[1m]))/ on (xm_clst_id, xm_namespace, xm_service_name ) group_left imxc_sampling_param_value) or (sum by (xm_clst_id,xm_service_name,xm_entity_type,xm_namespace) (rate(imxc_service_request_milliseconds_count{xm_entity_type="Service",protocol="http",{filter}}[1m])) / on (xm_clst_id) group_left imxc_sampling_default_param_value))','Request','Service',NULL,true,true,'SVC:{{$labels.xm_service_name}} Http Requests/Second:{{humanize $value}}|{threshold}.','2019-10-15 09:37:44.000','2020-02-17 12:12:12.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_service_pod_http_requests_per_sec','Service Pod HTTP Requests Count (per Second)','the number of HTTP requets counts per second for pod','sum by (xm_clst_id,xm_service_name,xm_entity_type,xm_namespace,xm_pod_id) (rate(imxc_service_request_milliseconds_count{xm_entity_type="Service",protocol="http",{filter}}[1m]))','Request','Service',NULL,true,false,'SVC:{{$labels.xm_service_name}} IMXC Svc Pod Http Requests/Seconds:{{humanize $value}}|{threshold}.','2019-11-07 07:51:11.000','2020-03-09 06:34:19.353'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_memory_max_usage_bytes','Container Memory Max Used (GiB)','Maximum memory usage recorded in bytes / 1073741824','sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) (container_memory_max_usage_bytes{xm_entity_type=''Container'',xm_cont_name!=''POD'',{filter}}) / 1073741824','Memory','Container',NULL,true,true,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} Max Memory Usage:{{humanize $value}}GiB|{threshold}GiB.','2019-06-05 14:27:36.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_network_receive','Container Network Receive (KiB)','Network device statistic receive_bytes / 1024','sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) (rate(container_network_receive_bytes_total{xm_entity_type=''Container'',{filter}}[1m]) ) / 1024','Network','Container',NULL,true,true,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} Network Receive Usage:{{humanize $value}}KiB|{threshold}KiB.','2019-05-21 08:23:36.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_service_http_requests_time_50th','Service HTTP 50% Elapsed Time (ms)','the maximum time taken to servce the 50% of HTTP requests','histogram_quantile(0.50, sum by (xm_entity_type,xm_clst_id,xm_namespace,xm_service_name,le) (rate(imxc_service_request_milliseconds_bucket{xm_entity_type="Service",protocol="http",{filter}}[1m]))) >=0 or sum by (xm_entity_type,xm_clst_id,xm_namespace,xm_service_name) (rate(imxc_service_request_milliseconds_bucket{xm_entity_type="Service",protocol="http",{filter}}[1m]))','Request','Service',NULL,true,true,'SVC:{{$labels.xm_service_name}} 50th HTTP Requests Time:{{humanize $value}}ms|{threshold}ms.','2020-02-18 12:12:12.000','2020-02-18 12:12:12.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_service_errors_count','Service Error Count','service error count','sum by(xm_clst_id, xm_namespace, xm_service_name, statuscode ) (imxc_service_errors_count{statuscode!="200",{filter}}) OR on() vector(0)','Request','Service',NULL,true,false,'SVC:{{$labels.xm_service_name}} Svc Error Count:{{humanize $value}}|{threshold}.','2020-08-21 16:45:00.000','2020-08-21 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_memory_used','Host Memory Used (GiB)','Memory information field MemUsed_bytes','((node_memory_MemTotal_bytes{{filter}} - (node_memory_MemFree_bytes{{filter}} + node_memory_Cached_bytes{{filter}} + node_memory_Buffers_bytes{{filter}} + node_memory_SReclaimable_bytes{{filter}})) >= 0 or (node_memory_MemTotal_bytes{{filter}} - node_memory_MemFree_bytes{{filter}}))','Memory','Host',NULL,true,false,'Host:{{$labels.instance}} Memory Utillization:{{humanize $value}}GiB|{threshold}GiB.','2020-03-23 04:08:21.399','2020-03-23 04:08:21.399'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('workload_count_all_state','Workload Count All State','workload total count regardless of pod state','count by(xm_clst_id, controller_kind) (imxc_kubernetes_controller_ready{controller_kind=~"Deployment|DaemonSet|ReplicaSet|StatefulSet|StaticPod",{filter}})','Pod','Namespace',NULL,true,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Workload Total Count:{{humanize $value}}|{threshold}.','2020-08-19 16:45:00.000','2020-08-19 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('workload_count_running_pod','Workload Count Running Pod','workload count of Running state pod','sum by(xm_clst_id,controller_kind ) (imxc_kubernetes_controller_ready{controller_kind=~"Deployment|DaemonSet|ReplicaSet|StatefulSet|StaticPod",{filter}})','Pod','Namespace',NULL,false,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Workload Total Count:{{humanize $value}}|{threshold}.','2020-08-19 16:45:00.000','2020-08-19 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_network_transmit_device','Node Network Transmit per Device(KiB)','Network device statistic transmit_bytes by device / 1024','sum by (xm_clst_id, xm_node_id, xm_entity_type, device, mountpoint) (rate(node_network_transmit_bytes_total{xm_entity_type=''Node'',{filter}}[1m]) ) / 1024','Network','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} DEV:{{$labels.device}} Network Transmit Usage:{{humanize $value}}KiB|{threshold}KiB.','2020-11-06 09:09:05.000','2020-11-06 09:09:05.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_network_receive_device','Node Network Receive per Device(KiB)','Network device statistic receive_bytes by device / 1024','sum by (xm_clst_id, xm_node_id, xm_entity_type, device, mountpoint) (rate(node_network_receive_bytes_total{xm_entity_type=''Node'',{filter}}[1m]) ) / 1024','Network','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} DEV:{{$labels.device}} Network Receive Usage:{{humanize $value}}KiB|{threshold}KiB.','2020-11-06 09:09:05.000','2020-11-06 09:09:05.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_service_pod_http_requests_time_avg','Service Pod HTTP Average Elapsed Time (ms)','the average time taken to serve the HTTP requests for pod','sum by (xm_clst_id,xm_service_name,xm_entity_type,xm_namespace,xm_pod_id) (rate(imxc_service_request_milliseconds_count{xm_entity_type="Service",protocol="http",{filter}}[1m])) == 0 or +sum by (xm_clst_id,xm_service_name,xm_entity_type,xm_namespace,xm_pod_id) (rate(imxc_service_request_milliseconds_sum{xm_entity_type="Service",protocol="http",{filter}}[1m])) +/ sum by (xm_clst_id,xm_service_name,xm_entity_type,xm_namespace,xm_pod_id) (rate(imxc_service_request_milliseconds_count{xm_entity_type="Service",protocol="http",{filter}}[1m]))','Request','Service',NULL,true,false,'SVC:{{$labels.xm_service_name}} IMXC Svc Pod http Requests Time Avg:{{humanize $value}}ms|{threshold}ms.','2019-11-07 07:51:46.000','2020-02-17 12:12:12.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_cpu_system','Container CPU System (%)','Container CPU Usage (System)','sum by (xm_clst_id,xm_node_id,xm_pod_id,xm_cont_name,xm_entity_type,xm_namespace,xm_cont_id) (rate(container_cpu_system_seconds_total{xm_entity_type=''Container'',xm_cont_name!=''POD'',{filter}}[1m])) * 100','CPU','Container',NULL,true,true,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} CPU System:{{humanize $value}}%|{threshold}%.','2019-06-05 09:07:00.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_cpu_usage','Container CPU Usage (%)','Container CPU Usage','sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) (rate(container_cpu_usage_seconds_total{xm_entity_type=''Container'',xm_cont_name!=''POD'',{filter}}[1m])) * 100','CPU','Container',NULL,true,true,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} CPU Usage:{{humanize $value}}%|{threshold}%','2019-05-15 01:02:23.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_phase_count_by_namespace','Pod Phase Count by Namespace','pod phase count by cluster, namespace','count by(xm_clst_id, xm_namespace, pod_state) (imxc_kubernetes_container_resource_limit_cpu{{filter}})','Namespace','Pod',NULL,true,false,'CLST:{{$labels.xm_clst_id}} Pod phase count:{{humanize $value}}|{threshold}.','2020-08-19 16:45:00.000','2020-08-19 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_fs_limit_bytes','Container Filesystem Limit Bytes (GiB)','Number of bytes that can be consumed by the container on this filesystem / 1073741824','sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) (container_fs_limit_bytes{xm_entity_type=''Container'',{filter}}) / 1073741824','Filesystem','Container',NULL,true,true,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} Filesystem Limit:{{humanize $value}}GiB|{threshold}GiB.','2019-06-05 10:27:42.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_memory_usage','Container Memory Usage (%)','Container memory usage compared to limit if limit is non-zero or 1GiB if limit is zero','sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) (container_memory_usage_bytes{xm_entity_type=''Container'', xm_cont_name!=''POD'', {filter}} / (container_spec_memory_limit_bytes{xm_entity_type=''Container'',{filter}} > 0) * 100) or sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) (container_memory_usage_bytes{xm_entity_type=''Container'',{filter}} / 1024 / 1024 / 1024 * 100)','Memory','Container',NULL,true,true,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} Memory Usage:{{humanize $value}}%|{threshold}%.','2019-06-05 14:27:36.000','2020-06-04 11:11:11.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_memory_swap','Container Memory Swap (GiB)','Container swap usage in bytes / 1073741824','sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) (container_memory_swap{xm_entity_type=''Container'',xm_cont_name!=''POD'',{filter}}) / 1073741824','Memory','Container',NULL,true,true,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} Swap Memory:{{humanize $value}}GiB|{threshold}GiB.','2019-06-05 14:27:36.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_network_transmit','Container Network Transmit (KiB)','Network device statistic transmit_bytes / 1024','sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) (rate(container_network_transmit_bytes_total{xm_entity_type=''Container'',{filter}}[1m]) ) / 1024','Network','Container',NULL,true,true,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} Network Transmit Usage:{{humanize $value}}KiB|{threshold}KiB.','2019-05-21 08:26:35.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('controller_pod_count','Controller Pod Count','Controller Pod Count','sum (imxc_kubernetes_controller_counts{{filter}}) by (xm_clst_id, xm_namespace, xm_entity_name, xm_entity_type)','Pod','Controller',NULL,false,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Controller Pod Counts:{{humanize $value}}|{threshold}.','2019-10-10 06:39:09.000','2019-10-10 06:39:09.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_load1','Host CPU Load 1m Average','Host CPU 1m load average','node_load1{{filter}}','CPU','Host',NULL,true,false,'Host:{{$labels.instance}} CPU 1m Load Average:{{humanize $value}}%|{threshold}%','2020-03-23 04:08:09.946','2020-03-23 04:08:09.946'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_cpu_usage','Host CPU Usage (%)','Host CPU Usage','100 - (avg by (instance)(clamp_max(rate(node_cpu_seconds_total{mode=''idle'',{filter}}[1m]),1.0)) * 100)','CPU','Host',NULL,true,false,'Host:{{$labels.instance}} CPU Utillization:{{humanize $value}}%|{threshold}%.','2020-03-23 04:08:07.606','2020-03-23 04:08:07.606'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('aws_ec2_cpuutilization','The percentage of allocated EC2 compute','The percentage of allocated EC2 compute units that are currently in use on the instance.','sum by (xm_clst_id, instance_id, instance) (aws_ec2_cpuutilization_average{{filter}})','CPU','AWS/EC2',NULL,true,true,'CLST:{{$labels.xm_clst_id}} Instance:{{$labels.instance_id}} CPU Utillization:{{humanize $value}}%|{threshold}%','2019-08-23 17:38:23.000','2019-08-23 17:38:23.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mongodb_connections','Number of Incoming Connections','The number of incoming connections from clients to the database server','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, state) (mongodb_connections{{filter}})','Connection','MongoDB','state',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MongoDB Number of Incoming Connections Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-13 02:26:09.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_buffer_io','Block read / write','mysql buffer I/O summary','sum by (data_type, xm_clst_id, xm_namespace, xm_node_id, instance) ( +label_replace(mysql_global_status_innodb_buffer_pool_write_requests, "data_type", "write", "", "") or +label_replace(mysql_global_status_innodb_buffer_pool_read_requests, "data_type", "read", "", "") )','Block','MySQL','data_type',true,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} POD:{{$labels.xm_pod_id}} Mysql Buffer IO:{{humanize $value}}|{threshold}.','2019-12-05 07:30:33.000','2020-02-13 01:14:23.895'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_innodb_buffer_pool_reads','Number of Reads Directly from Disk','The number of logical reads that InnoDB could not satisfy from the buffer pool, and had to read directly from disk','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(mysql_global_status_innodb_buffer_pool_reads[1m]))','Block','MySQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Number of Reads Directly from Disk Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_connections','Number of Connection Attempts','The number of connection attempts (successful or not) to the MySQL server','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(mysql_global_status_connections[1m]))','Connection','MySQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Number of Connection Attempts counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_status_locks','Number of Locks in MySQL','Number of Locks in MySQL','sum by (data_type, xm_clst_id, xm_namespace, xm_node_id, instance) ( +label_replace(rate(mysql_global_status_innodb_row_lock_current_waits[1m]), "data_type", "rowlocks", "", "") or +label_replace(rate(mysql_global_status_innodb_row_lock_waits[1m]), "data_type", "waits for rowlocks", "", "") or +label_replace(rate(mysql_global_status_table_locks_immediate[1m]), "data_type", "tablelock immediate", "", "") or +label_replace(rate(mysql_global_status_table_locks_waited[1m]), "data_type", "tablelock waited", "", "") )','Lock','MySQL','data_type',true,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Mysql Status Locks:{{humanize $value}}|{threshold}.','2019-12-05 08:39:30.000','2020-02-13 01:12:05.438'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_fs_usage_bytes','Container Filesystem Used Bytes (GiB)','Number of bytes that are consumed by the container on this filesystem / 1073741824','sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) (container_fs_usage_bytes{xm_entity_type=''Container'',{filter}}) / 1073741824','Filesystem','Container',NULL,true,true,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} Filesystem Used:{{humanize $value}}GiB||{threshold}GiB.','2019-06-05 10:27:42.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_fs_writes','Container Filesystem Write Bytes (KiB)','Cumulative count of bytes written / 1024','sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) (rate(container_fs_writes_bytes_total{xm_entity_type=''Container'',{filter}}[1m])) / 1024','Filesystem','Container',NULL,true,true,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} Filesystem Writes:{{humanize $value}}KiB|{threshold}KiB.','2019-05-20 05:58:07.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_sessions_value','Session Count','Gauge metric with count of sessions by status and type','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, status_type) +(label_join(oracledb_sessions_value, "status_type", "-", "status", "type"))','Session','OracleDB','status_type',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Oracle Session Count:{{humanize $value}}|{threshold}.','2020-01-28 13:03:00.000','2020-02-13 01:34:00.720'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_stat_database_temp_bytes','Bytes Written to Temporary Files (KiB)','Total amount of data written to temporary files by queries in this database','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, datname) (rate(pg_stat_database_temp_bytes[1m])) / 1024','TemporaryFile','PostgreSQL','datname',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} PostgreSQL Temporary File Write Size:{{humanize $value}}|{threshold}.','2019-08-27 15:49:21.000','2019-08-27 15:49:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_used_cpu_sys','System CPU Used','System CPU consumed by the Redis server','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(redis_used_cpu_sys[1m]))','CPU','Redis',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis System CPU Used:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_used_cpu_user_children','User CPU Used Background','User CPU consumed by the background processes','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(redis_used_cpu_user_children[1m]))','CPU','Redis',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis User CPU Used Background:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_service_http_error_rate','Service HTTP Requests Error Rate','the number of HTTP error counts / the number of HTTP requests counts','sum by(xm_clst_id,xm_service_name,xm_entity_type,xm_namespace) (rate(imxc_service_request_milliseconds_count{xm_entity_type="Service",protocol="http",{filter}}[1m])) == 0 or +sum by (xm_clst_id,xm_service_name,xm_entity_type,xm_namespace) (rate(imxc_service_errors_count{xm_entity_type="Service",protocol="http",{filter}}[1m])) / sum by +(xm_clst_id,xm_service_name,xm_entity_type,xm_namespace) (rate(imxc_service_request_milliseconds_count{xm_entity_type="Service",protocol="http",{filter}}[1m]))','Request','Service',NULL,true,true,'SVC:{{$labels.xm_service_name}} Error Request Rate:{{humanize $value}}%|{threshold}%.','2019-10-15 09:37:44.000','2020-02-17 12:12:12.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_cache_hit_ratio','Buffer Cache Hit Ratio (%)','(Number of Logical Read - Number of Reads Directly from Disk) / (Number of Logical Read) * 100','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) ((increase(mysql_global_status_innodb_buffer_pool_read_requests[1m]) - increase(mysql_global_status_innodb_buffer_pool_reads[1m])) / increase(mysql_global_status_innodb_buffer_pool_read_requests[1m]) * 100)','Block','MySQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Buffer Cache Hit Ratio:{{humanize $value}}%|{threshold}%.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_fs_usage','Pod Filesystem Usage (%)','Pod File System Usage: 100 * (Used Bytes / Limit Bytes)','sum by (xm_clst_id,xm_pod_id,xm_entity_type,xm_namespace) ( +container_fs_usage_bytes{xm_entity_type=''Container'',{filter}} /((container_fs_limit_bytes{xm_entity_type=''Container'',{filter}} * 100) > 0) or +container_fs_usage_bytes{xm_entity_type=''Container'',{filter}} / 1000)','Filesystem','Pod',NULL,true,true,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} Filesystem Usage:{{humanize $value}}%|{threshold}%.','2019-06-05 10:27:42.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_pod_cpu_request','Node Pod CPU Request','Node Pod CPU Request','sum by (xm_clst_id, xm_node_id) (imxc_kubernetes_container_resource_request_cpu{{filter}})','CPU','Node',NULL,true,false,'NODE:{{$labels.xm_node_id}} Pod CPU Requests:{{humanize $value}}|{threshold}.','2020-11-20 06:50:49.546','2020-11-20 06:50:49.546'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_pod_cpu_usage','Node Pod CPU Usage (%)','Node Pod CPU Usage','sum by (xm_clst_id,xm_node_id) (clamp_min((rate(container_cpu_usage_seconds_total{xm_entity_type=''Container'',{filter}}[1m] offset 10s)),0)) * 100','CPU','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Pod CPU Usage:{{humanize $value}}%|{threshold}%.','2020-11-20 06:50:49.546','2020-11-20 06:50:49.546'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,in_use,anomaly_score,message) + VALUES ('container_cpu_usage_core','Container CPU Usage (Core)','Container CPU Usage (Core)','sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) (rate(container_cpu_usage_seconds_total{xm_entity_type=''Container'',xm_cont_name!=''POD'',{filter}}[1m]))','CPU','Container',true,false,'None'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,in_use,anomaly_score,message) + VALUES ('container_cpu_system_core','Container CPU System (Core)','Container CPU Usage (System)(Core)','sum by (xm_clst_id,xm_node_id,xm_pod_id,xm_cont_name,xm_entity_type,xm_namespace,xm_cont_id) (rate(container_cpu_system_seconds_total{xm_entity_type=''Container'',xm_cont_name!=''POD'',{filter}}[1m]))','CPU','Container',true,false,'None'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,in_use,anomaly_score,message) + VALUES ('container_cpu_user_core','Container CPU User (Core)','Container CPU Usage (User)(Core)','sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) (rate(container_cpu_user_seconds_total{xm_entity_type=''Container'',xm_cont_name!=''POD'',{filter}}[1m]))','CPU','Container',true,false,'None'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_pod_info_in_service','pod info in service','pod info(state, node) in service','sum by (xm_clst_id, xm_namespace, xm_service_name,xm_node_id,node_status,xm_pod_id,pod_state) (imxc_kubernetes_endpoint_count{{filter}})','Pod','Service',NULL,false,false,'None','2020-12-22 16:05:00.000','2020-12-22 16:05:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_service_state','Service State Count Sum','service state sum by xm_service_name','sum by (xm_service_name,pod_state) (imxc_kubernetes_endpoint_count{{filter}})','Pod','Service',NULL,false,false,'None','2021-01-06 17:30:00.000','2021-01-06 17:30:00.000'); + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_workload_state','Workload State Count Sum','wokload state sum by owner_name','count by (owner_name, pod_state) (imxc_kubernetes_container_resource_request_cpu{{filter}})','Pod','Workload',NULL,false,false,'None','2021-02-08 17:00:00.000','2021-02-08 17:00:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_pod_info_in_workload','Pod info by workload type','pod info(state, node) by workload type (do filter param)','count by (xm_clst_id, xm_namespace, owner_name, xm_node_id, node_status, xm_pod_id, pod_state) (imxc_kubernetes_container_resource_request_cpu{{filter}})','Pod','Workload',NULL,false,false,'None','2021-02-08 17:00:00.000','2021-02-08 17:00:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_up_state','Node State metric','Node State metric for up, down check','imxc_kubernetes_node_ready{{filter}}','State','Node',NULL,true,false,'Cluster:{{$labels.xm_clst_id}} Node:{{$labels.xm_node_id}} Down {threshold}.','2020-02-02 14:30:00.000','2020-02-02 14:30:00.000'); + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_cpu_user_by_workload', 'Container CPU User By workload (%)', 'Container CPU Usage(User)', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (rate(container_cpu_user_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running", {filter}}) without (instance)) * 0) * 100', 'CPU', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU User (%):{{humanize $value}}%|{threshold}%.', now(), now()); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_cpu_system_core_by_workload', 'Container CPU System By workload (Core)', 'Container CPU(Core)', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (rate(container_cpu_system_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running", {filter}}) without (instance)) * 0)', 'CPU', 'Workload', NULL, TRUE, FALSE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU System (Core) (System):{{humanize $value}}%|{threshold}%.', now(), now()); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_cpu_usage_core_by_workload', 'Container CPU Usage By workload (Core)', 'Container CPU Usage (Core)', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (rate(container_cpu_usage_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running", {filter}}) without (instance)) * 0)', 'CPU', 'Workload', NULL, TRUE, FALSE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU Usage (Core):{{humanize $value}}|{threshold}.', now(), now()); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_cpu_user_core_by_workload', 'Container CPU User By workload (Core)', 'Container CPU Usage (User)(Core)', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (rate(container_cpu_user_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running", {filter}}) without (instance)) * 0)', 'CPU', 'Workload', NULL, TRUE, FALSE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU User (Core):{{humanize $value}}|{threshold}.', now(), now()); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_cpu_system_by_workload', 'Container CPU System By workload (%)', 'Container CPU Usage (System)', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (rate(container_cpu_system_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running", {filter}}) without (instance)) * 0) * 100', 'CPU', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU System (%):{{humanize $value}}%|{threshold}%.', now(), now()); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_cpu_usage_by_workload', 'Container CPU Usage By workload (%)', 'Container CPU Usage', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (rate(container_cpu_usage_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running", {filter}}) without (instance)) * 0)', 'CPU', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU Usage (%):{{humanize $value}}%|{threshold}%', now(), now()); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_fs_reads_by_workload', 'Container Filesystem Read Bytes By workload (KiB)', 'Cumulative count of bytes read / 1024', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (rate(container_fs_reads_bytes_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1024', 'Filesystem', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Filesystem Reads:{{humanize $value}}KiB|{threshold}KiB.', now(), now()); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_fs_limit_bytes_by_workload', 'Container Filesystem Limit Bytes By workload (GiB)', 'Number of bytes that can be consumed by the container on this filesystem / 1073741824', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (container_fs_limit_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running", {filter}}) without (instance)) * 0) / 1073741824', 'Filesystem', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Filesystem Limit:{{humanize $value}}GiB|{threshold}GiB.', now(), now()); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_fs_usage_bytes_by_workload', 'Container Filesystem Used Bytes By workload (GiB)', 'Number of bytes that are consumed by the container on this filesystem / 1073741824', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (container_fs_usage_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1073741824', 'Filesystem', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Filesystem Used:{{humanize $value}}GiB||{threshold}GiB.', now(), now()); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_fs_writes_by_workload', 'Container Filesystem Write Bytes By workload (KiB)', 'Cumulative count of bytes written / 1024', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (rate(container_fs_writes_bytes_total{xm_cont_name!="POD"}[1m]) + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1024', 'Filesystem', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Filesystem Writes:{{humanize $value}}KiB|{threshold}KiB.', now(), now()); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_fs_usage_by_workload', 'Container Filesystem Usage By workload (%)', 'Container File System Usage: 100 * (Used Bytes / Limit Bytes) (not contain persistent volume)', 'sum by (xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) ((container_fs_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0)/ (((container_fs_limit_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) * 100) > 0) or (container_fs_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1000)', 'Filesystem', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Filesystem Usage:{{humanize $value}}%|{threshold}%.', now(), now()); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_memory_max_usage_bytes_by_workload', 'Container Memory Max Used By workload (GiB)', 'Maximum memory usage recorded in bytes / 1073741824', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (container_memory_max_usage_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1073741824', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Max Memory Usage:{{humanize $value}}GiB|{threshold}GiB.', now(), now()); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_memory_usage_bytes_by_workload', 'Container Memory Used By workload (GiB)', 'Current memory usage in GiB, this includes all memory regardless of when it was accessed', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (container_memory_usage_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1024 / 1024 / 1024', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Used Memory:{{humanize $value}}GiB|{threshold}GiB.', now(), now()); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_memory_usage_by_workload', 'Container Memory Usage By workload (%)', 'Container Memory usage compared to limit if limit is non-zero or 1GiB if limit is zero', 'sum by (xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (((container_memory_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / (((container_spec_memory_limit_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0)) > 0) * 100) or sum by (xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) ((container_memory_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1024 / 1024 / 1024 *100))', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Memory Usage:{{humanize $value}}%|{threshold}%.', now(), now()); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_memory_swap_by_workload', 'Container Memory Swap By workload (GiB)', 'Container swap usage in bytes / 1073741824', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (container_memory_swap{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1073741824', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Swap Memory:{{humanize $value}}GiB|{threshold}GiB.', now(), now()); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_memory_working_set_bytes_by_workload', 'Container Memory Working Set By workload (GiB)', 'Current working set in GiB, this includes recently accessed memory, dirty memory, and kernel memory', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (container_memory_working_set_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1024 / 1024 / 1024', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Working Set Memory:{{humanize $value}}GiB|{threshold}GiB.', now(), now()); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_memory_cache_by_workload', 'Container Memory Cache By workload (GiB)', 'Number of bytes of page cache memory / 1073741824', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (container_memory_cache{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1073741824', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Cache Memory:{{humanize $value}}GiB|{threshold}GiB.', now(), now()); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_network_receive_by_workload', 'Container Network Receive By workload (KiB)', 'Network device statistic receive_bytes / 1024', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name,xm_entity_type) (rate(container_network_receive_bytes_total{} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id) group_left(owner_name) sum by (xm_clst_id, xm_namespace, xm_pod_id, owner_name) (imxc_kubernetes_container_resource_limit_cpu{{filter}}) * 0) / 1024', 'Network', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Network Receive Usage:{{humanize $value}}KiB|{threshold}KiB.', now(), now()); + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_network_transmit_by_workload', 'Container Network Transmit By workload (KiB)', 'Network device statistic transmit_bytes / 1024', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (rate(container_network_transmit_bytes_total{} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id) group_left(owner_name) sum by(xm_clst_id, xm_namespace, xm_pod_id, owner_name) (imxc_kubernetes_container_resource_limit_cpu{{filter}}) * 0) / 1024', 'Network', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Network Transmit Usage:{{humanize $value}}KiB|{threshold}KiB.', now(), now()); +--Number of Pods not running +INSERT INTO public.metric_meta2 VALUES ('count_pod_not_running_by_workload','Number of Pods not running By Workload','Number of Pods not running (pod_state)','count by (xm_clst_id, xm_pod_id,xm_cont_id, xm_cont_name, entity_type, xm_namespace, pod_state) (imxc_kubernetes_container_resource_limit_cpu{pod_state!="Running", {filter}})','State','Workload',null,true,false,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} State:{{$labels.pod_state}}.',now(),now()); +--Number of Containers not running +INSERT INTO public.metric_meta2 VALUES ('count_container_not_running_by_workload','Number of Containers not running By Workload','Number of Containers not running (container_state)','count by (xm_clst_id, xm_pod_id, xm_cont_id, xm_cont_name, entity_type, xm_namespace, container_state) (imxc_kubernetes_container_resource_limit_cpu{container_state!="Running", {filter}})','State','Workload',null,true,false,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} State:{{$labels.container_state}}.',now(),now()); +-- Containers Restart count +INSERT INTO public.metric_meta2 VALUES ('cotainer_restart_count_by_workload','Number of Containers Restart','Number of Containers Restart (10m)','increase(imxc_kubernetes_container_restart_count{{filter}}[10m])','State','Workload',null,true,false,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} RESTARTCOUNT FOR 10MINUTE:{{humanize $value}}.',now(),now()); + +INSERT INTO metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_jspd_txn_per_sec','Service Transaction Count (per Second)','Service Transaction Count (per Second)','sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_service_name) (rate(imxc_txn_total_count{{filter}}[1m]))','Request','Service',NULL,true,true,'Service Transaction Count (per Second)','2021-11-15 16:11:19.606','2021-11-15 16:12:21.335'); +INSERT INTO metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_jspd_pod_txn_elapsed_time_avg','Service Pod Transaction Elapsed Time (avg)','Service Average Elapsed Time','sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_pod_id, xm_service_name) (increase(imxc_txn_total_count{{filter}}[1m]))==0 or sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_pod_id, xm_service_name) (increase(imxc_txn_laytency{{filter}}[1m])) / sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_pod_id, xm_service_name) (increase(imxc_txn_total_count{{filter}}[1m]))','Request','Service',NULL,true,true,'Service Average Elapsed Time','2021-11-15 16:09:34.233','2021-11-15 16:12:21.335'); +INSERT INTO metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_jspd_txn_error_rate','Service Transaction Error Rate','Service Transaction Error Rate','sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_service_name) (sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_service_name) (rate(imxc_txn_total_count{{filter}}[1m])) == 0 or sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_service_name) (rate(imxc_txn_error_count{{filter}}[1m])) == 0 or sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_service_name) (rate(imxc_txn_error_count {{filter}} [1m])) / sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_service_name) (rate(imxc_txn_total_count {{filter}} [1m])))','Request','Service',null,true,false,'SVC:{{$labels.xm_service_name}} Error Request Rate:{{humanize $value}}%|{threshold}%.','2022-02-15 14:33:00.118000','2022-02-15 15:40:17.640000'); +INSERT INTO metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_jspd_pod_txn_per_sec','Service Pod Transaction Count (per sec)','The number of transaction counts per second for pod','sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_pod_id, xm_service_name) (rate(imxc_txn_total_count{{filter}}[1m]))','Request','Service',null,true,false,'SVC:{{$labels.xm_service_name}} Svc Pod Transaction count/Seconds:{{humanize $value}}|{threshold}.','2022-02-15 17:59:39.450000','2022-02-15 17:59:39.450000'); +INSERT INTO metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_jspd_txn_elapsed_time_avg','Service Average Elapsed Time','Service Average Elapsed Time','sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_service_name) ((increase(imxc_txn_total_count{{filter}}[1m])))== 0 or sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_service_name) ((increase(imxc_txn_laytency{{filter}}[1m])))/ sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_service_name) ((increase(imxc_txn_total_count{{filter}}[1m])))','Request','Service',null,true,true,'SVC:{{$labels.xm_service_name}} Transaction Requests Time Avg:{{humanize $value}}ms|{threshold}ms.','2021-11-15 16:09:34.233000','2021-11-15 16:12:21.335000'); +INSERT INTO metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_jspd_txn_error_count','Service Transaction Error Count','Service Transaction Error Count','sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_service_name) (sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_service_name) (rate(imxc_txn_error_count{{filter}}[1m])) == 0 or sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_service_name) (rate(imxc_txn_error_count {{filter}} [1m])))','Request','Service',NULL,true,true,'Service Transaction Error Count','2021-11-15 16:10:31.352','2021-11-15 16:12:21.335'); +INSERT INTO metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_jspd_pod_txn_error_rate','Service Pod Transaction Error Rate','The number of transaction error rate for pod','sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_pod_id, xm_service_name) (sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_pod_id, xm_service_name) (rate(imxc_txn_total_count{{filter}}[1m])) == 0 or sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_pod_id, xm_service_name) (rate(imxc_txn_error_count{{filter}}[1m])) == 0 or sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_pod_id, xm_service_name) (rate(imxc_txn_error_count {{filter}} [1m])) / sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_pod_id, xm_service_name) (rate(imxc_txn_total_count {{filter}} [1m])))','Request','Service',null,true,false,'SVC:{{$labels.xm_service_name}} Svc Pod Transaction Error rate:{{humanize $value}}|{threshold}.','2022-02-15 18:08:58.180000','2022-02-15 18:08:58.180000'); + +INSERT INTO metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_jspd_active_txn_per_sec','Service Active Transaction Count (per Second)','Service Active Transaction Count (per Second)','sum by(xm_clst_id, xm_namespace, xm_service_name) (rate(imxc_txn_active_count{{filter}}[1m]))','Request','Service',NULL,true,false,'SVC:{{$labels.xm_service_name}} Svc Active Transaction count/Seconds:{{humanize $value}}|{threshold}.','2022-03-11 15:51:45.946','2022-03-11 15:51:45.946'); +INSERT INTO metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_jspd_pod_active_txn_per_sec','Service Pod Active Transaction Count (per sec)','The number of active transaction counts per second for pod','sum by(xm_clst_id, xm_namespace, xm_service_name, xm_pod_id) (rate(imxc_txn_active_count{{filter}}[1m]))','Request','Service',NULL,true,false,'SVC:{{$labels.xm_service_name}} Svc Pod Active Transaction count/Seconds:{{humanize $value}}|{threshold}.','2022-03-11 15:53:29.252','2022-03-11 15:53:29.252'); + + +INSERT INTO public.license_key (id, license_key, set_time, in_used, tenant_id) VALUES (nextval('hibernate_sequence'), 'A46CB0A0870B60DD0EF554F092FB8490C647C4ACCF17177EB0028FEF1B677A1DC86C08219D3D357E55E87B653A9D2F044F9095576ED493CE5D1E180E8843A04BCFE94E500F85491D408CFC7397B82F00063415F4CF8756545B6ED1A38F07F91A7B6D9381B7FC433A5086CDD2D748527ECB42835677199F23F7C8E33A66E8138182DDD76BE4925FA4B1DFD96FD5578FE80C75E0E20D76877BF6FD570265D8E69CAC34795B982CF8D811669894886567E4F5F62E28990953401374B548787E35374BFF201D5C9AD062B326E72F9B1D7791A610DA1BDF1D4F829819BC537E06C8D54F95FB04F2DAC456698F605DE3BBD72E472FC79658C806B188988B053E1E4D96FFFFFF0312983D630FAD5E9160650653074248047030124045265319328119048121312221292096178141356403289033057286071001044254168244430392446457353385472238471183338511051434316333006127241420429465082200161165099271484261287306170426201314452131350327249112310323036187433166345114324280269098441154231174135226128298344425341164290424093450115453299282209144110060155055496368233391148510223372355438125122460232315097083390283180026090507303464176016343147301028053052418046214169100404193398101492126437150008449359062078276386196105011194373118107003376243188284337378334352432479501211364186021040035210237120336302073022394079272002081397132067383497202300181309396185361017436058208454167203412219275329234043427354024133409339470296204490485256467335056F5B2CABD122B376DAEA67944E1CCE6867DF9EB6504C78F817DF9EB6504C78F81BF1E615E6EC6242C9667BD675FC5FA39C6672FE2068E5D1431C6CD04429D07655865E293C1F77ED7A0D33F5556DA6CD3A8EC2774DB04F797CE4A29B0312F75E585D51D7B4DD227EA6BD5278CB9233040E7DD2B30A6D5119959D5B7EAC826D3DA0537EFB5A034A6A1C91A619F4E168F46A455B594C91F058E1E22C7EA2957EED7533D069C335C95B4FA2B53E71A800343EA7F16B05AFBA04635F1FBDE9C81709C27BA075C78FA26311ED3A4A5226EF47FC84C3024999406B47F2098B5983CC3CAF79F92332074B9872E429CBE8EF12D5092628E4D4A39CBDDFCAAB2E382229CF09A5B10243340C1A7A0C5CBC14C704FCE873571524A5B038F1781CD31A4D8E2C48E02E63A2746E668273BE9D63937B88D8C864CE439528EB13BDFAC3E52EE4B8CB75B4ED65A7C97B42E5DAEE3E41D2331B06FFFBA71BECD9B96AEEB969670FC3869CC59050FD6DFA32457195314104022250232266247291151DEFAULT_TENANT', now(), true, 'DEFAULT_TENANT'); +insert into public.license_key2 (id, license_key, set_time, cluster_id, license_used) values (nextval('hibernate_sequence'), 'D041F44269EAFF1AF7C37ACAA86B7D9CBED89547431E777B797220CF62FE5D6A27C66BEBEAB8F4C89EA5379009C90CDEBFFAE307B7AEB897DC4D8CEAB61654340BB746B0B46679A9FB4791C777BAEBA176308F6BEB1654CE43D4E80E6D0F80CEC00B1EC30E7DA4BB8D3159133EF98AEB50617107DB77BE94676E0D4AA04ADA3B11A66824DB89A60C52BC1AB92926F10189DBBA6210B31478F48CF87B5D754F1A7C6BED0D1637742179DBF7BE82B3B3357AEA82CFAAD9126E39C4E19BABCB1CBDDB816C86A8F7C476D963265720383B627800775B0C9116D67CE5CB7CFC71D0A8A36623965EBB18A5BE1816FB1FAAAEAC361D2ABBC7344EC0B6C61E0395115B13FFFFFF03DEF34E840F2ED2AC84AC44DF368362366124308470063002498494067338303241077065122260378200508377102354337080160182150254091118451110391059070094162363290186239455351194330333503046082379128006166220287276298120398066372099177432015458270176242025196335311342039022343475412085392206244005184417460227292375103433217376511140361223163316121467443014486278407389237024349111268136424371062035285300509195050441367478101310353464249250399393211468032382017479033204215420319027225173414447170427346074048078201158299332476339297492269181214328291096331271222221199421106169418137405411466364104047152090465446480302462385088114481261428257207129020358100073347153355274495263056109229159157348228275180360410147142130230179450079472482323145202198010119F9BFDDF3C203A7E537AB046811BB7CEA37AB046811BB7CEA37AB046811BB7CEAE012403885A8163C0E3E14D7AD6207B5E8CE91579501D84B09D6682339A4DB462F479FFE1B232AFB3D19E925768AF0AA3E62D9AB6F9CEADDB1CDCA351CAA90996631814A556C47270431A6A40891F756FDDCA7BDD05C62A2932F8E77979E0D43C9F12565B1F4BB4F0520B44CC76BAC23F65330AC5966D22B209F32126132F4848E500A013F4DC32306A9620394D40C94B8EBC2406B68EBE31DAB17EF2DF977731A5C41C11311DC36E1FB8BC2529D1AA20D5D46919472212D781B1D77378872CBD14C2A5B783C7ADF0D2680946C52E56E186A7E971E7EAB2CF09511361DD892B5D4A113E8A2C60E3F7FEFA4100753D82B7064101002937733CE0285C73130635F0CBBDF6F1160C2917B2DF9B1C391A8E9D7D9F380BF31A77A84017D0DF26B35BED6B2D145A051EB4345DA90241CA997828B8393ACD5C7316594634356CCC3986EFDD7776AC62C65E500ED125097142489479219130046503035CloudMOA', now(), null, true); + +INSERT INTO public.license_policy +(policy_id, policy_desc, term_year, term_month, term_day, license_type, allowable_range, storage_capacity, cluster_count, node_count, pod_count, service_count, core_count, host_ids, user_division, created_date, modified_date) +VALUES('promotion_license', '프로모션 기간에 사용자들에게 발급되는 라이선스', 0, 0, 14, 'trial', '0', 'unlimited', '1', '10', 'unlimited', 'unlimited', 'unlimited', 'unlimited', '1', now(), null); + +INSERT INTO public.report_template(id, created_by, created_date, modified_by, modified_date, cron_exp, "enable", metric_data, template_data, title) VALUES(nextval('hibernate_sequence'), 'admin', '2020-04-28 09:29:49.466', 'admin', '2020-04-28 09:29:49.466', '0 0 1 ? * * *', true, +'[{"id":"metricItem1587977724113","requestInfo":{"clusterId":"cloudmoa","namespace":"All","entityId":"","metricId":"cluster_cpu_usage","type":"Cluster","selectTime":"hour","dateType":"relative","startTime":"1d0h","endTime":"0d0h"},"metricName":"Cluster CPU Usage (%)","displayType":"line","unit":"%","data":""},{"id":"metricItem1588037028605","requestInfo":{"clusterId":"cloudmoa","namespace":"All","entityId":"","metricId":"cluster_memory_usage","type":"Cluster","selectTime":"hour","dateType":"relative","startTime":"1d0h","endTime":"0d0h"},"metricName":"Cluster Memory Usage (%)","displayType":"line","unit":"%","data":""},{"id":"metricItem1588059107546","requestInfo":{"clusterId":"cloudmoa","namespace":"All","entityId":"","metricId":"cluster_network_receive","type":"Cluster","selectTime":"hour","dateType":"relative","startTime":"1d0h","endTime":"0d0h"},"metricName":"Cluster Network Receive","displayType":"line","unit":"%","data":""},{"id":"metricItem1588059110952","requestInfo":{"clusterId":"cloudmoa","namespace":"All","entityId":"","metricId":"cluster_network_transmit","type":"Cluster","selectTime":"hour","dateType":"relative","startTime":"1d0h","endTime":"0d0h"},"metricName":"Cluster Network Transmit","displayType":"line","unit":"%","data":""},{"id":"metricItem1588059623963","requestInfo":{"clusterId":"cloudmoa","namespace":"All","entityId":"","metricId":"cluster_pod_ready_count","type":"Cluster","selectTime":"hour","dateType":"relative","startTime":"1d0h","endTime":"0d0h"},"metricName":"Cluster Pod Ready Count","displayType":"line","unit":"%","data":""}]', +'

1. Cluster Resource

Today''s Cluster resource usage is displayed.

1. CPU Usage

${metricItem1587977724113}

2. Memory Usage

${metricItem1588037028605}

3. Network

Transmit

${metricItem1588059107546}

Receive

${metricItem1588059110952}

2. Pod


1. Allocated Pods Count Trend

Running Pod Count
${metricItem1588059623963}





', 'cloudmoa Cluster Daily Report'); +INSERT INTO public.report_template (id, created_by, created_date, modified_by, modified_date, cron_exp, "enable", metric_data, template_data, title) +VALUES(nextval('hibernate_sequence'), 'admin', '2020-01-20 01:17:50.182', 'admin', '2020-04-29 08:01:40.841', '0 0 9 ? * * *', false, +'[{"id":"metricItem1579497906163","requestInfo":{"clusterId":"cloudmoa","namespace":"","entityId":"exem-master,exem-node001,exem-node002","metricId":"node_cpu_usage","type":"node","selectTime":"hour","dateType":"relative","startTime":"1d0h","endTime":"0d0h"},"metricName":"Node CPU Usage (%)","displayType":"line","unit":"%","data":""},{"id":"metricItem1579497916213","requestInfo":{"clusterId":"cloudmoa","namespace":"","entityId":"exem-master,exem-node001,exem-node002","metricId":"node_memory_usage","type":"node","selectTime":"hour","dateType":"relative","startTime":"1d0h","endTime":"0d0h"},"metricName":"Node Memory Usage (%)","displayType":"bar","unit":"%","data":""},{"id":"metricItem1579497928963","requestInfo":{"clusterId":"cloudmoa","namespace":"","entityId":"exem-master,exem-node001,exem-node002","metricId":"node_network_receive","type":"node","selectTime":"hour","dateType":"relative","startTime":"1d0h","endTime":"0d0h"},"metricName":"Node Network Receive (KiB)","displayType":"pie","unit":"%","data":""},{"id":"metricItem1579497947243","requestInfo":{"clusterId":"cloudmoa","namespace":"","entityId":"exem-master,exem-node001,exem-node002","metricId":"node_load5","type":"node","selectTime":"hour","dateType":"relative","startTime":"1d0h","endTime":"0d0h"},"metricName":"Node CPU Load 5m Average","displayType":"table","unit":"%","data":""}]', +'

1. editor usage

Let''s write the editor.

1.1 Text Decoration

Bold
Itelic
Strike


1.2 Color and blockquote

What''s your color?

Today is the first day of the rest of your life

1.3 List

  • Apple
  • Banana

  1. postgre
  2. cassandra
  3. prometheus

[ TODO List ]
  • Create DB table
  • Charge file name

1.4 Link, Table, Image




Deamonset NameAgeNamespaceLabelsImageCPUMemory
imxc-agent5
day
imxcimxc-agentregistry.openstacklocal:5000/imxc/imxc-agent:latest83.151.68
GiB
kube-flannel-ds-amd643
month
kube-systemflannelnodequay.io/coreos/flannel:v0.11.0-amd641.0790.88
MiB
kube-proxy10
month
kube-systemkube-proxyk8s.gcr.io/kube-proxy:v1.16.01.18117.66
MiB
node-exporter10
month
defaultnode-exporternode-exporterprom/node-exporter4.7697.54
MiB

exem.jpg

1.6 Metric Item

${metricItem1579497906163}
${metricItem1579497916213}
${metricItem1579497928963}
${metricItem1579497947243}



















', 'Editor usage example'); + +INSERT INTO public.report_static(id, created_by, created_date, modified_by, modified_date, cron_exp, metric_data, template_data, title, "type", report_template_id) VALUES(10582051, 'admin', '2020-04-29 08:27:52.545', 'admin', '2020-04-29 08:27:52.545', '0 0 1 ? * * *', +'[{"id":"metricItem1587977724113","requestInfo":{"clusterId":"cloudmoa","namespace":"All","entityId":"","metricId":"cluster_cpu_usage","type":"Cluster","selectTime":"hour","dateType":"relative","startTime":"1d0h","endTime":"0d0h"},"metricName":"Cluster CPU Usage (%)","displayType":"line","unit":"%","data":""},{"id":"metricItem1588037028605","requestInfo":{"clusterId":"cloudmoa","namespace":"All","entityId":"","metricId":"cluster_memory_usage","type":"Cluster","selectTime":"hour","dateType":"relative","startTime":"1d0h","endTime":"0d0h"},"metricName":"Cluster Memory Usage (%)","displayType":"line","unit":"%","data":""},{"id":"metricItem1588059107546","requestInfo":{"clusterId":"cloudmoa","namespace":"All","entityId":"","metricId":"cluster_network_receive","type":"Cluster","selectTime":"hour","dateType":"relative","startTime":"1d0h","endTime":"0d0h"},"metricName":"Cluster Network Receive","displayType":"line","unit":"%","data":""},{"id":"metricItem1588059110952","requestInfo":{"clusterId":"cloudmoa","namespace":"All","entityId":"","metricId":"cluster_network_transmit","type":"Cluster","selectTime":"hour","dateType":"relative","startTime":"1d0h","endTime":"0d0h"},"metricName":"Cluster Network Transmit","displayType":"line","unit":"%","data":""},{"id":"metricItem1588059623963","requestInfo":{"clusterId":"cloudmoa","namespace":"All","entityId":"","metricId":"cluster_pod_ready_count","type":"Cluster","selectTime":"hour","dateType":"relative","startTime":"1d0h","endTime":"0d0h"},"metricName":"Cluster Pod Ready Count","displayType":"line","unit":"%","data":""}]', +'

1. Cluster Resource

Today''s cluster resource usage flow is shown.

1. CPU Usage

Abnormally high CPU usage by particular programs can be an indication that there is something wrong with the computer system.

${metricItem1587977724113}

2. Memory Usage

The Memory Usage window displays the amount of memory available on your system, as well as the memory currently in use by all applications, including Windows itself.

${metricItem1588037028605}

3. Network

A network transmit/receive provides basic network utilization data in relation to the available network capacity.

Transmit

${metricItem1588059107546}

Receive

${metricItem1588059110952}

2. Pod

1. Allocated Pods Count Trend

Running Pod Count
${metricItem1588059623963}







', +'cloudmoa Cluster Daily Report', 'manual', (select id from report_template where title='cloudmoa Cluster Daily Report')); + +-- INSERT INTO public.dashboard2 (id, created_date, modified_date, layout, title, auth_resource_id, created_by, modified_by, description, "share") VALUES(nextval('hibernate_sequence'), '2020-04-28 09:23:14.286', '2020-04-28 09:23:44.213', '[{"i":"widget0","widget":{"header":"default-header","body":"event-view"},"w":48,"h":2,"minW":2,"minH":1,"maxW":48,"maxH":36,"component":{"params":{"targets":["widget1","widget2","widget3","widget4","widget5","widget6","widget7","widget8"],"action":"changeFilter","options":{"clusterId":{"mod":true,"value":"cloudmoa"},"namespace":{"mod":false,"value":null},"entity":{"mod":true,"type":["node"],"value":["exem-master","exem-node001","exem-node002"]}}},"visualization":{"type":"select"}},"x":0,"y":0},{"i":"widget1","widget":{"header":"default-header","body":"line-chart-view","title":"CPU Usage"},"w":18,"h":11,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":"cloudmoa","metricId":"node_disk_read_latency","entityId":[],"type":"node","namespace":"default"}},"visualization":{"showLegend":true}},"x":0,"y":2},{"i":"widget2","widget":{"header":"default-header","body":"horizontal-bar-chart-view","title":"Memory Usage"},"w":18,"h":11,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":false,"clusterId":"cloudmoa","metricId":"node_memory_usage","entityId":[],"type":"node"}},"visualization":{"showLegend":true}},"x":0,"y":13},{"i":"widget3","widget":{"header":"default-header","body":"line-chart-view","title":"Network Transmit (KiB)"},"w":15,"h":11,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":"cloudmoa","metricId":"node_network_transmit","entityId":[],"type":"node","namespace":"default"}},"visualization":{"showLegend":true}},"x":18,"y":2},{"i":"widget4","widget":{"header":"default-header","body":"line-chart-view","title":"Network Receive (KiB)"},"w":15,"h":11,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":"cloudmoa","metricId":"node_network_receive","entityId":[],"type":"node","namespace":"default"}},"visualization":{"showLegend":true}},"x":33,"y":2},{"i":"widget5","widget":{"header":"default-header","body":"stack-bar-chart-view","title":"Pod Running Count"},"w":30,"h":12,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":"cloudmoa","metricId":"node_pod_running_count","entityId":[],"type":"node","namespace":"default"}},"visualization":{"showLegend":true}},"x":18,"y":24},{"i":"widget6","widget":{"header":"default-header","body":"stack-bar-chart-view","title":"Disk Read Latency (ms)"},"w":15,"h":11,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":"cloudmoa","metricId":"node_disk_read_latency","entityId":[],"type":"node","namespace":"default"}},"visualization":{"showLegend":true}},"x":18,"y":13},{"i":"widget7","widget":{"header":"default-header","body":"stack-bar-chart-view","title":"Disk Write Latency (ms)"},"w":15,"h":11,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":"cloudmoa","metricId":"node_disk_write_latency","entityId":[],"type":"node","namespace":"default"}},"visualization":{"showLegend":true}},"x":33,"y":13},{"i":"widget8","widget":{"header":"default-header","body":"stack-bar-chart-view","title":"Filesystem Usage (%)"},"w":18,"h":12,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":"cloudmoa","metricId":"node_filesystem_usage","entityId":[],"type":"node","namespace":"default"}},"visualization":{"showLegend":true}},"x":0,"y":24}]', 'CloudMOA - Nodes Resource', (select id from auth_resource2 where name='CloudMOA - Nodes Resource'), 'admin', 'admin', NULL, true); +-- INSERT INTO public.dashboard2 (id, created_date, modified_date, layout, title, auth_resource_id, created_by, modified_by, description, "share") VALUES(nextval('hibernate_sequence'), '2020-04-28 09:23:14.286', '2020-04-28 09:23:44.213', '[{"i":"widget0","widget":{"header":"default-header","body":"service-tps-view","title":"Service TPS"},"w":24,"h":7,"minW":12,"minH":6,"maxW":48,"maxH":16,"component":{"api":{"uri":"metric.chart","params":{"clusterId":null,"namespace":null,"entityId":null,"type":"service","range":false}}},"x":0,"y":2},{"i":"widget1","widget":{"header":"default-header","body":"event-view"},"w":48,"h":2,"minW":2,"minH":2,"maxW":48,"maxH":36,"component":{"params":{"targets":["widget0","widget2","widget3","widget4","widget5","widget6","widget7","widget8"],"action":"changeFilter","options":{"clusterId":{"mod":true,"value":null},"namespace":{"mod":true,"value":null},"entity":{"mod":true,"type":["service"],"value":[]}}},"visualization":{"type":"select"}},"viewStyle":{"backgroundColor":"#252525"},"x":0,"y":0},{"i":"widget2","widget":{"header":"default-header","body":"service-treeMap-view"},"w":24,"h":21,"minW":20,"minH":10,"maxW":48,"maxH":48,"component":{"api":{"uri":"metric.chart","params":{"clusterId":null,"namespace":null,"entityId":null,"type":"service","range":false}}},"x":24,"y":2},{"i":"widget3","widget":{"header":"default-header","body":"stack-bar-chart-view","title":"Service Request Count"},"w":12,"h":7,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":null,"namespace":null,"metricId":"imxc_service_http_requests_per_sec","entityId":"","type":null}},"visualization":{"showLegend":true}},"x":0,"y":9},{"i":"widget4","widget":{"header":"default-header","body":"stack-bar-chart-view","title":"Service Total Error Count"},"w":12,"h":7,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":null,"namespace":null,"metricId":"imxc_service_errors_count","entityId":"","type":null}},"visualization":{"showLegend":true}},"x":0,"y":16},{"i":"widget5","widget":{"header":"default-header","body":"scatter-chart-view","bodyClass":["drag-ignore"],"title":"Xview","headerClass":["drag-handle"]},"w":24,"h":13,"minW":20,"minH":12,"maxW":68,"maxH":60,"component":{"api":{"params":{}}},"x":0,"y":23},{"i":"widget6","widget":{"header":"default-header","body":"event-list-view","title":"Event List"},"w":24,"h":13,"minW":24,"minH":12,"maxW":48,"maxH":36,"component":{"api":{"params":{"clusterId":null}}},"x":24,"y":23},{"i":"widget7","widget":{"header":"default-header","body":"line-chart-view","title":"Service Latency"},"w":12,"h":7,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":null,"namespace":null,"metricId":"imxc_service_http_requests_time_avg","entityId":"","type":null}},"visualization":{"showLegend":true}},"x":12,"y":9},{"i":"widget8","widget":{"header":"default-header","body":"stack-bar-chart-view","title":"Service Total Transaction Count"},"w":12,"h":7,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":null,"namespace":null,"metricId":"imxc_service_http_requests_per_sec_by_api","entityId":"","type":null}},"visualization":{"showLegend":true}},"x":12,"y":16}]', 'Service Detail', (select id from auth_resource2 where name='Service Detail'), 'admin', 'admin', NULL, true); + +INSERT INTO public.dashboard2 (id, created_date, modified_date, layout, title, auth_resource_id, created_by, modified_by, description, "share") VALUES(nextval('hibernate_sequence'), '2020-04-28 09:23:14.286', '2020-04-28 09:23:44.213', '[{"i":"widget0","widget":{"header":"default-header","body":"event-view"},"w":48,"h":2,"minW":2,"minH":1,"maxW":48,"maxH":36,"component":{"params":{"targets":["widget1","widget2","widget3","widget4","widget5","widget6","widget7","widget8"],"action":"changeFilter","options":{"clusterId":{"mod":true,"value":"cloudmoa"},"namespace":{"mod":false,"value":null},"entity":{"mod":true,"type":["node"],"value":["exem-master","exem-node001","exem-node002"]}}},"visualization":{"type":"select"}},"x":0,"y":0},{"i":"widget1","widget":{"header":"default-header","body":"line-chart-view","title":"CPU Usage"},"w":18,"h":11,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":"cloudmoa","metricId":"node_disk_read_latency","entityId":[],"type":"node","namespace":"default"}},"visualization":{"showLegend":true}},"x":0,"y":2},{"i":"widget2","widget":{"header":"default-header","body":"horizontal-bar-chart-view","title":"Memory Usage"},"w":18,"h":11,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":false,"clusterId":"cloudmoa","metricId":"node_memory_usage","entityId":[],"type":"node"}},"visualization":{"showLegend":true}},"x":0,"y":13},{"i":"widget3","widget":{"header":"default-header","body":"line-chart-view","title":"Network Transmit (KiB)"},"w":15,"h":11,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":"cloudmoa","metricId":"node_network_transmit","entityId":[],"type":"node","namespace":"default"}},"visualization":{"showLegend":true}},"x":18,"y":2},{"i":"widget4","widget":{"header":"default-header","body":"line-chart-view","title":"Network Receive (KiB)"},"w":15,"h":11,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":"cloudmoa","metricId":"node_network_receive","entityId":[],"type":"node","namespace":"default"}},"visualization":{"showLegend":true}},"x":33,"y":2},{"i":"widget5","widget":{"header":"default-header","body":"stack-bar-chart-view","title":"Pod Running Count"},"w":30,"h":12,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":"cloudmoa","metricId":"node_pod_running_count","entityId":[],"type":"node","namespace":"default"}},"visualization":{"showLegend":true}},"x":18,"y":24},{"i":"widget6","widget":{"header":"default-header","body":"stack-bar-chart-view","title":"Disk Read Latency (ms)"},"w":15,"h":11,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":"cloudmoa","metricId":"node_disk_read_latency","entityId":[],"type":"node","namespace":"default"}},"visualization":{"showLegend":true}},"x":18,"y":13},{"i":"widget7","widget":{"header":"default-header","body":"stack-bar-chart-view","title":"Disk Write Latency (ms)"},"w":15,"h":11,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":"cloudmoa","metricId":"node_disk_write_latency","entityId":[],"type":"node","namespace":"default"}},"visualization":{"showLegend":true}},"x":33,"y":13},{"i":"widget8","widget":{"header":"default-header","body":"stack-bar-chart-view","title":"Filesystem Usage (%)"},"w":18,"h":12,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":"cloudmoa","metricId":"node_filesystem_usage","entityId":[],"type":"node","namespace":"default"}},"visualization":{"showLegend":true}},"x":0,"y":24}]', 'CloudMOA - Nodes Resource', +(select id from auth_resource3 where name='dashboard|admin|CloudMOA - Nodes Resource'), 'admin', 'admin', NULL, true); +INSERT INTO public.dashboard2 (id, created_date, modified_date, layout, title, auth_resource_id, created_by, modified_by, description, "share") VALUES(nextval('hibernate_sequence'), '2020-04-28 09:23:14.286', '2020-04-28 09:23:44.213', '[{"i":"widget0","widget":{"header":"default-header","body":"service-tps-view","title":"Service TPS"},"w":24,"h":7,"minW":12,"minH":6,"maxW":48,"maxH":16,"component":{"api":{"uri":"metric.chart","params":{"clusterId":null,"namespace":null,"entityId":null,"type":"service","range":false}}},"x":0,"y":2},{"i":"widget1","widget":{"header":"default-header","body":"event-view"},"w":48,"h":2,"minW":2,"minH":2,"maxW":48,"maxH":36,"component":{"params":{"targets":["widget0","widget2","widget3","widget4","widget5","widget6","widget7","widget8"],"action":"changeFilter","options":{"clusterId":{"mod":true,"value":null},"namespace":{"mod":true,"value":null},"entity":{"mod":true,"type":["service"],"value":[]}}},"visualization":{"type":"select"}},"viewStyle":{"backgroundColor":"#252525"},"x":0,"y":0},{"i":"widget2","widget":{"header":"default-header","body":"service-treeMap-view"},"w":24,"h":21,"minW":20,"minH":10,"maxW":48,"maxH":48,"component":{"api":{"uri":"metric.chart","params":{"clusterId":null,"namespace":null,"entityId":null,"type":"service","range":false}}},"x":24,"y":2},{"i":"widget3","widget":{"header":"default-header","body":"stack-bar-chart-view","title":"Service Request Count"},"w":12,"h":7,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":null,"namespace":null,"metricId":"imxc_service_http_requests_per_sec","entityId":"","type":null}},"visualization":{"showLegend":true}},"x":0,"y":9},{"i":"widget4","widget":{"header":"default-header","body":"stack-bar-chart-view","title":"Service Total Error Count"},"w":12,"h":7,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":null,"namespace":null,"metricId":"imxc_service_errors_count","entityId":"","type":null}},"visualization":{"showLegend":true}},"x":0,"y":16},{"i":"widget5","widget":{"header":"default-header","body":"scatter-chart-view","bodyClass":["drag-ignore"],"title":"Xview","headerClass":["drag-handle"]},"w":24,"h":13,"minW":20,"minH":12,"maxW":68,"maxH":60,"component":{"api":{"params":{}}},"x":0,"y":23},{"i":"widget6","widget":{"header":"default-header","body":"event-list-view","title":"Event List"},"w":24,"h":13,"minW":24,"minH":12,"maxW":48,"maxH":36,"component":{"api":{"params":{"clusterId":null}}},"x":24,"y":23},{"i":"widget7","widget":{"header":"default-header","body":"line-chart-view","title":"Service Latency"},"w":12,"h":7,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":null,"namespace":null,"metricId":"imxc_service_http_requests_time_avg","entityId":"","type":null}},"visualization":{"showLegend":true}},"x":12,"y":9},{"i":"widget8","widget":{"header":"default-header","body":"stack-bar-chart-view","title":"Service Total Transaction Count"},"w":12,"h":7,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":null,"namespace":null,"metricId":"imxc_service_http_requests_per_sec_by_api","entityId":"","type":null}},"visualization":{"showLegend":true}},"x":12,"y":16}]', 'Service Detail', +(select id from auth_resource3 where name='dashboard|admin|Service Detail'), 'admin', 'admin', NULL, true); + +INSERT INTO public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) VALUES ('normal_score', '20', null, null, 'anomaly', '2020-07-07 18:15:55.000000', '2020-07-07 18:15:53.000000'); +INSERT INTO public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) VALUES ('attention_score', '60', null, null, 'anomaly', '2020-07-07 09:18:04.968765', '2020-07-07 09:18:04.968765'); +INSERT INTO public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) VALUES ('warning_score', '90', null, null, 'anomaly', '2020-07-07 09:18:17.091678', '2020-07-07 09:18:17.091678'); +INSERT INTO public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) VALUES ('collection_weeks', '5', null, null, 'anomaly', '2020-07-13 03:52:44.445408', '2020-07-13 03:52:44.445408'); + +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('topology_storage_period', 7, 'retention period setting value for topology information', null, 'storage', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('trace_storage_period', 3, 'retention period setting value for trace data', null, 'storage', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('event_storage_period', 7, 'retention period setting value for event data', null, 'storage', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('metric_storage_period', 7, 'retention period setting value for metric data', null, 'storage', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('sparse_storage_period', 90, 'retention period setting value for sparse log', null, 'storage', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('anomaly_storage_period', 7, 'retention period setting value for anomaly score', null, 'storage', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('alert_storage_period', 7, 'retention period setting value for alert data', null, 'storage', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('audit_storage_period', 7, 'retention period setting value for audit data', null, 'storage', now(), null); + +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('topology_idx', 'kubernetes_cluster_info:kubernetes_cluster_history:kubernetes_cronjob_info:kubernetes_info:kubernetes_job_info:kubernetes_network_connectivity:kubernetes_pod_info:kubernetes_pod_history', 'elastic search topology type data index', null, 'storageidx', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('trace_idx', 'spaninfo:sta_httpapi:sta_httpsummary:sta_podinfo:sta_relation:sta_tracetrend:sta_externalrelation:sta_traceinfo:jspd_ilm', 'elastic search trace type data index', null, 'storageidx', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('event_idx', 'kubernetes_event_info', 'elastic search for event data index', null, 'storageidx', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('sparse_idx', 'sparse_model:sparse_log', 'elastic search sparse data index', null, 'storageidx', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('anomaly_idx', 'entity_score:metric_score:timeline_score', 'elastic search amomaly data index', null, 'storageidx', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('alert_idx', 'alert_event_history', 'elastic search alert data index', null, 'storageidx', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('audit_idx', 'kubernetes_audit_log', 'elastic search audit type data index', null, 'storageidx', now(), null); + +-- insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) values ('ratelimiting', 2.0, '{"type" : "int", "operator" : "range", "minVal" : "1", "maxVal" : "3000", "desc" : "The time-based sampling method allows input as an integer (e.g. 1 monitors only 1 trace per second)" }', null, 'tracesampling', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('probabilistic', 0.1, '{"type" : "float", "operator" : "range", "minVal" : "0", "maxVal" : "1.0", "desc" : "Probability-based sampling method allows input between 0 and 1 (e.g. 0.1 monitors only 10% of trace information)" }', null, 'tracesampling', '2020-07-30 13:54:52', null); + +INSERT INTO common_setting values('alert_expression','==,<=,<,>=,>', 'alert expression for user custom', null,'alert', now(), now()); + +INSERT INTO common_setting values('job_duration_range','86400', 'job duration range for average', null,'job', now(), now()); + +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Topology Agent', 'topology-agent', 'topology agent deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Metric Agent', 'metric-agent', 'metric agent deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Trace Agent', 'cloudmoa-trace-agent', 'trace agent deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Datagate', 'datagate', 'datagate deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Jspd Collector', 'jspd-lite-collector', 'jspd collector deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Metric Collector', 'metric-collector', 'metric collector deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Cloudmoa Collector', 'cmoa-collector', 'cloudmoa collector deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Authentication Server', 'auth-server', 'authentication server deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Notification Server', 'noti-server', 'notification server deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Eureka Server', 'eureka', 'eureka server deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Zuul Server', 'zuul-deployment', 'zuul server deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Api Server', 'imxc-api', 'api server deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Ui Server', 'imxc-ui', 'ui server deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Metric Analyzer Master', 'metric-analyzer-master', 'metric analyzer master deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Metric Analyzer Worker', 'metric-analyzer-worker', 'metric analyzer worker deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Kafka Stream Txntrend', 'kafka-stream-txntrend-deployment', 'kafka stream txntrend deployment name', null, 'modules', now(), null); + +INSERT INTO public.common_setting +(code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +VALUES('error_msg', 'false', 'Error Message default value', '', 'user_setting', now(), null); +INSERT INTO public.common_setting +(code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +VALUES('alert_sound', 'false', 'Alert Sound default value', '', 'user_setting', now(), null); +INSERT INTO public.common_setting +(code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +VALUES('session_persistence', 'true', 'Session Persistence default value', '', 'user_setting', now(), null); +INSERT INTO public.common_setting +(code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +VALUES('gpu_acc_topology', 'true', 'GPU Accelerated Topology default value', '', 'user_setting', now(), null); + +insert into public.log_management (cluster_id, node_id, log_rotate_dir, log_rotate_count, log_rotate_size, log_rotate_management, back_up_dir, back_up_period, back_up_dir_size, back_up_management, created_date, modified_date) values ('cloudmoa', '', '/var/lib/docker', 3, 100, true, '/home/moa/log', 5, 1000, true, '2020-07-30 13:54:52', null); + +insert into public.agent_install_file_info (id, name, type, description, version, yaml, use_yn, created_date, modified_date) values (5, 'metrics-server', 'agent', 'Metrcis-Server는 Kubernetes의 kubelet에 있는 cAdvisor로부터 Container Metric 데이터를 수집하여 Prometheus에 전달하는 역할을 합니다.', null, '--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: system:cloudmoa-aggregated-metrics-reader + labels: + rbac.authorization.k8s.io/aggregate-to-view: "true" + rbac.authorization.k8s.io/aggregate-to-edit: "true" + rbac.authorization.k8s.io/aggregate-to-admin: "true" +rules: + - apiGroups: ["metrics.k8s.io"] + resources: ["pods"] + verbs: ["get", "list", "watch"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cloudmoa-metrics-server:system:auth-delegator +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:auth-delegator +subjects: + - kind: ServiceAccount + name: cloudmoa-metrics-server + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: cloudmoa-metrics-server-auth-reader + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: extension-apiserver-authentication-reader +subjects: + - kind: ServiceAccount + name: cloudmoa-metrics-server + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: system:cloudmoa-metrics-server +rules: + - apiGroups: + - "" + resources: + - pods + - nodes + - nodes/stats + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: system:cloudmoa-metrics-server +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:cloudmoa-metrics-server +subjects: + - kind: ServiceAccount + name: cloudmoa-metrics-server + namespace: kube-system +--- +apiVersion: v1 +kind: Service +metadata: + name: cloudmoa-metrics-server + namespace: kube-system + labels: + kubernetes.io/name: "Metrics-server" +spec: + selector: + k8s-app: cloudmoa-metrics-server + ports: + - port: 443 + protocol: TCP + targetPort: 443 +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cloudmoa-metrics-server + namespace: kube-system +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cloudmoa-metrics-server + namespace: kube-system + labels: + k8s-app: cloudmoa-metrics-server +spec: + selector: + matchLabels: + k8s-app: cloudmoa-metrics-server + template: + metadata: + name: cloudmoa-metrics-server + labels: + k8s-app: cloudmoa-metrics-server + spec: + serviceAccountName: cloudmoa-metrics-server + volumes: + # mount in tmp so we can safely use from-scratch images and/or read-only containers + - name: tmp-dir + emptyDir: {} + containers: + - name: cloudmoa-metrics-server + image: $DOCKER_REGISTRY_URL/metrics-server-amd64 + command: + - /metrics-server + - --logtostderr + - --v=4 + - --kubelet-insecure-tls=true + - --kubelet-preferred-address-types=InternalIP,Hostname,InternalDNS,ExternalDNS,ExternalIP + volumeMounts: + - name: tmp-dir + mountPath: /tmp1', true, '2021-03-11 13:41:48.000000', '2021-03-11 13:41:56.000000'); +insert into public.agent_install_file_info (id, name, type, description, version, yaml, use_yn, created_date, modified_date) values (7, 'jaeger', 'application', 'CloudMOA에서는 고객사에서 운영 중인 application의 TPS, 서비스 연관관계 등의 데이터를 얻기 위해서 Jaeger를 사용하며, Jaeger 사용을 위해 Jaeger-client, jaeger-agent, jaeger-collector의 설치가 필요합니다. +', null, '--- +apiVersion: v1 +kind: List +items: +- apiVersion: apps/v1 + kind: Deployment + metadata: + name: cloudmoa-trace-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-trace-agent + spec: + selector: + matchLabels: + app: cloudmoa-trace-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-trace-agent + spec: + securityContext: + runAsNonRoot: true + runAsUser: 65534 + containers: + - image: $DOCKER_REGISTRY_URL/trace-agent:$IMAGE_TAG + name: cloudmoa-trace-agent + resources: + requests: + cpu: 100m + memory: 50Mi + limits: + cpu: 200m + memory: 100Mi + ports: + - containerPort: 5775 + protocol: UDP + - containerPort: 6831 + protocol: UDP + - containerPort: 6832 + protocol: UDP + - containerPort: 5778 + protocol: TCP + env: + - name: LOG_LEVEL + value: "INFO" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT +- apiVersion: v1 + kind: Service + metadata: + name: cloudmoa-trace-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-trace-agent + spec: + ports: + - name: agent-zipkin-thrift + port: 5775 + protocol: UDP + targetPort: 5775 + - name: agent-compact + port: 6831 + protocol: UDP + targetPort: 6831 + - name: agent-binary + port: 6832 + protocol: UDP + targetPort: 6832 + - name: agent-configs + port: 5778 + protocol: TCP + targetPort: 5778 + selector: + app: cloudmoa-trace-agent + type: ClusterIP', true, '2021-03-11 17:48:34.000000', '2021-03-11 17:48:39.000000'); +insert into public.agent_install_file_info (id, name, type, description, version, yaml, use_yn, created_date, modified_date) values (4, 'node-exporter', 'agent', 'Node에 관련된 Metric 시계열 데이터를 수집하여 고객사 클러스터에 설치된 Prometheus에 전달하는 역할을 합니다.', null, '--- +apiVersion: v1 +kind: Service +metadata: + annotations: + prometheus.io/scrape: ''true'' + labels: + app: cloudmoa-node-exporter + name: cloudmoa-node-exporter + name: cloudmoa-node-exporter + namespace: $CLOUDMOA_NAMESPACE +spec: + clusterIP: None + ports: + - name: scrape + port: 9110 + protocol: TCP + selector: + app: cloudmoa-node-exporter + type: ClusterIP +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: cloudmoa-node-exporter + namespace: $CLOUDMOA_NAMESPACE +spec: + selector: + matchLabels: + app: cloudmoa-node-exporter + template: + metadata: + labels: + app: cloudmoa-node-exporter + name: cloudmoa-node-exporter + spec: + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - image: $DOCKER_REGISTRY_URL/node-exporter + name: cloudmoa-node-exporter + ports: + - containerPort: 9110 + hostPort: 9110 + name: scrape + args: + - --path.procfs=/host/proc + - --path.sysfs=/host/sys + - --path.rootfs=/host/root + - --collector.filesystem.ignored-mount-points=^/(dev|proc|sys|run|var/lib/docker/.+|var/lib/kubelet/pods/.+)($|/) + - --collector.tcpstat + - --web.listen-address=:9110 + # --log.level=debug + env: + - name: GOMAXPROCS + value: "1" + resources: + limits: + cpu: 250m + memory: 180Mi + requests: + cpu: 102m + memory: 180Mi + volumeMounts: + - mountPath: /host/proc + name: proc + readOnly: false + - mountPath: /host/sys + name: sys + readOnly: false + - mountPath: /host/root + mountPropagation: HostToContainer + name: root + readOnly: true + hostNetwork: true + hostPID: true + securityContext: + runAsNonRoot: true + runAsUser: 65534 + volumes: + - hostPath: + path: /proc + name: proc + - hostPath: + path: /sys + name: sys + - hostPath: + path: / + name: root +', true, '2021-03-11 13:41:02.000000', '2021-03-11 13:41:06.000000'); +insert into public.agent_install_file_info (id, name, type, description, version, yaml, use_yn, created_date, modified_date) values (2, 'agent', 'agent', '관제 대상 클러스터의 Topology 데이터를 수집하여 Kafka를 통해 수집 클러스터에 전달하는 역할을 하며, 그 밖에 API 서버와의 TCP 연결을 통해 관리 기능, Log Viewer 기능 등을 수행합니다.', null, '--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cloudmoa-cluster-role +rules: + - nonResourceURLs: + - "*" + verbs: + - get + - apiGroups: + - metrics.k8s.io + resources: + - pods + - nodes + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - nodes/stats + - endpoints + - namespaces + - events + verbs: + - get + - list + - watch + - apiGroups: + - apps + resources: + - daemonsets + - deployments + - deployments/scale + - replicasets + - replicasets/scale + - statefulsets + - statefulsets/scale + verbs: + - get + - list + - watch + - update + - apiGroups: + - batch + resources: + - jobs + verbs: + - get + - list + - watch + - update + - apiGroups: + - batch + resources: + - cronjobs + verbs: + - get + - list + - update + - apiGroups: + - storage.j8s.io + resources: + - storageclasses + verbs: + - get + - list + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - apiGroups: + - extensions + resources: + - ingresses + verbs: + - get + - list + - apiGroups: + - policy + resources: + - podsecuritypolicies + verbs: + - use + resourceNames: + - imxc-ps + - apiGroups: + - certificates.k8s.io + resourceNames: + - kubernetes.io/kube-apiserver-client-kubelet + resources: + - signers + verbs: + - approve + - apiGroups: + - certificates.k8s.io + resourceNames: + - kubernetes.io/kubelet-serving + resources: + - signers + verbs: + - approve + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch + - proxy + - apiGroups: + - "" + resources: + - nodes/log + - nodes/metrics + - nodes/proxy + - nodes/spec + - nodes/stats + verbs: + - ''*'' + - apiGroups: + - ''*'' + resources: + - ''*'' + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cloudmoa-restricted-rb + namespace: $CLOUDMOA_NAMESPACE +subjects: + - kind: ServiceAccount + name: default + namespace: $CLOUDMOA_NAMESPACE +roleRef: + kind: ClusterRole + name: cloudmoa-cluster-role + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: cloudmoa-psp + namespace: $CLOUDMOA_NAMESPACE +spec: + privileged: true + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + runAsUser: + rule: RunAsAny + fsGroup: + rule: RunAsAny + hostPorts: + - max: 65535 + min: 0 + hostNetwork: true + hostPID: true + volumes: + - configMap + - secret + - emptyDir + - hostPath + - projected + - downwardAPI + - persistentVolumeClaim +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: cloudmoa-topology-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-topology-agent +spec: + selector: + matchLabels: + app: cloudmoa-topology-agent + template: + metadata: + labels: + app: cloudmoa-topology-agent + spec: + hostNetwork: true + hostPID: true + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - name: cloudmoa-topology-agent + image: $DOCKER_REGISTRY_URL/topology-agent:$IMAGE_TAG + resources: + requests: + cpu: 200m + memory: 512Mi + limits: + cpu: 500m + memory: 600Mi + securityContext: + privileged: true + volumeMounts: + - mountPath: /host/usr/bin + name: bin-volume + - mountPath: /var/run/docker.sock + name: docker-volume + - mountPath: /host/proc + name: proc-volume + - mountPath: /root + name: root-volume + - mountPath: /log + name: log-volume + env: + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: ROOT_DIRECTORY + value: /root + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: POD_ID + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LOG_LEVEL + value: "INFO" + volumes: + - name: bin-volume + hostPath: + path: /usr/bin + type: Directory + - name: docker-volume + hostPath: + path: /var/run/docker.sock + - name: proc-volume + hostPath: + path: /proc + - name: root-volume + hostPath: + path: / + - name: log-volume + hostPath: + path: /home', true, '2021-03-11 13:37:48.000000', '2021-03-11 13:37:51.000000'); +insert into public.agent_install_file_info (id, name, type, description, version, yaml, use_yn, created_date, modified_date) values (6, 'prometheus', 'agent', 'Prometheus는 다양한 Exporter들과 연결될 수 있으며, 기본적으로 Node Exporter와 cAdvisor를 통해 수집한 Metric 데이터를 Kafka를 통해 수집 클러스터에 전달하는 역할을 합니다.', '1.16', '--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cloudmoa-metric-agent-config + namespace: $CLOUDMOA_NAMESPACE +data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + metric-agent.yml: | + global: + scrape_interval: 15s + + scrape_configs: + - job_name: ''kubernetes-kubelet'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_memory_SReclaimable_bytes|node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + - job_name: ''kubernetes-cadvisor'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod] + target_label: xm_pod_id + - source_labels: [container] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep + - source_labels: [ __name__, image ] + separator: "@" + regex: "container_cpu.*@" + action: drop + - source_labels: [ __name__, name ] + separator: "@" + regex: "container_memory.*@" + action: drop +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cloudmoa-metric-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-metric-agent +spec: + selector: + matchLabels: + app: cloudmoa-metric-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-metric-agent + spec: + containers: + - name: cloudmoa-metric-agent + image: $DOCKER_REGISTRY_URL/metric-agent:$IMAGE_TAG + args: + - --config.file=/etc/metric-agent/metric-agent.yml + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: /etc/metric-agent/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: LOG_MAXAGE + value: "1" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: STORAGE_TYPE + value: datagate + restartPolicy: Always + volumes: + - name: config-volume + configMap: + name: cloudmoa-metric-agent-config +', false, '2021-03-11 13:39:07.000000', '2021-03-11 13:39:09.000000'); +insert into public.agent_install_file_info (id, name, type, description, version, yaml, use_yn, created_date, modified_date) values (3, 'prometheus', 'agent', 'Prometheus는 다양한 Exporter들과 연결될 수 있으며, 기본적으로 Node Exporter와 cAdvisor를 통해 수집한 Metric 데이터를 Kafka를 통해 수집 클러스터에 전달하는 역할을 합니다.', '1.15', '--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cloudmoa-metric-agent-config + namespace: $CLOUDMOA_NAMESPACE +data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + metric-agent.yml: | + global: + scrape_interval: 15s + + scrape_configs: + - job_name: ''kubernetes-kubelet'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_memory_SReclaimable_bytes|node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + + - job_name: ''kubernetes-cadvisor'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod_name] + target_label: xm_pod_id + - source_labels: [container_name] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cloudmoa-metric-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-metric-agent +spec: + selector: + matchLabels: + app: cloudmoa-metric-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-metric-agent + spec: + containers: + - name: cloudmoa-metric-agent + image: $DOCKER_REGISTRY_URL/metric-agent:$IMAGE_TAG + args: + - --config.file=/etc/metric-agent/metric-agent.yml + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: /etc/metric-agent/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: STORAGE_TYPE + value: datagate + restartPolicy: Always + volumes: + - name: config-volume + configMap: + name: cloudmoa-metric-agent-config +', true, '2021-03-11 13:39:07.000000', '2021-03-11 13:39:09.000000'); + +insert into public.alert_config_info (config_id, created_date, modified_date, config_data, config_default, in_use) values ('config', now(), null, 'global:${GLOBAL}\nroute:${ROUTE}\nreceivers:${RECEIVERS}', 'global:${GLOBAL}\nroute:${ROUTE}\nreceivers:${RECEIVERS}', true); +insert into public.alert_config_info (config_id, created_date, modified_date, config_data, config_default, in_use) values ('global', now(), null, '\n resolve_timeout: ${RESOLVE_TIMEOUT}', '\n resolve_timeout: 5m', true); +insert into public.alert_config_info (config_id, created_date, modified_date, config_data, config_default, in_use) values ('receivers', now(), null, '\n- name: ''${NAME}''\n webhook_configs:${WEBHOOK_CONFIGS}', '\n- name: ''cdms''\n webhook_configs:${WEBHOOK_CONFIGS}', true); +insert into public.alert_config_info (config_id, created_date, modified_date, config_data, config_default, in_use) values ('route', now(), null, '\n receiver: ''${RECEIVER}''\n group_by: [${GROUP_BY}]\n group_wait: ${GROUP_WAIT}\n group_interval: ${GROUP_INTERVAL}\n repeat_interval: ${REPEAT_INTERVAL}\n routes:${ROUTES}', '\n receiver: ''cdms''\n group_by: [xm_clst_id, level]\n group_wait: 30s\n group_interval: 5m\n repeat_interval: 10m\n routes:${ROUTES}', true); +insert into public.alert_config_info (config_id, created_date, modified_date, config_data, config_default, in_use) values ('webhook_configs', now(), null, '\n - url: ''${WEBHOOK_URL}''\n send_resolved: ${SEND_RESOLVED}', '\n - url: ''${WEBHOOK_URL}''\n send_resolved: false', true); +insert into public.alert_config_info (config_id, created_date, modified_date, config_data, config_default, in_use) values ('routes', now(), null, '\n - receiver: ''${ROUTES_RECEIVER}''\n group_by: [${ROUTES_GROUP_BY}]\n group_wait: ${ROUTES_GROUP_WAIT}\n group_interval: ${ROUTES_GROUP_INTERVAL}\n repeat_interval: ${ROUTES_REPEAT_INTERVAL}\n match_re:\n level: ${LEVEL}\n continue: ${CONTINUE}', '\n - receiver: ''cdms''\n group_by: [xm_clst_id, level]\n group_wait: 5s\n group_interval: 5s\n repeat_interval: 1m\n match_re:\n level: Critical\n continue: true', true); + + +insert into public.alert_rule_config_info (config_id, created_date, modified_date, config_data, in_use) values ('config', now(), null, 'groups:${GROUPS}', true); +insert into public.alert_rule_config_info (config_id, created_date, modified_date, config_data, in_use) values ('groups', now(), null, '\n- name: "${NAME}"\n rules:${RULES}', true); +insert into public.alert_rule_config_info (config_id, created_date, modified_date, config_data, in_use) values ('isHost', now(), null, '\n instance: "{{ $labels.instance }}"\n is_host: "true"', true); +insert into public.alert_rule_config_info (config_id, created_date, modified_date, config_data, in_use) values ('rules', now(), null, '\n - alert: "${ALERT}"\n expr: "${EXPR}"\n labels:\n level: "${LEVEL}"\n for: "${FOR}"\n annotations:\n xm_service_name: "{{ $labels.xm_service_name }}"\n level: "${LEVEL}"\n meta_id: "${META_ID}"\n xm_node_id: "{{ $labels.xm_node_id }}"\n threshold: ${THRESHOLD}\n xm_container_id: "{{ $labels.xm_cont_name }}"\n message: "${MESSAGE}"\n rule_id: ${RULE_ID}\n xm_pod_id: "{{ $labels.xm_pod_id }}"\n xm_clst_id: "{{ $labels.xm_clst_id }}"\n xm_namespace: "{{ $labels.xm_namespace }}"\n value: "{{ $value }}"\n xm_entity_type: "{{ $labels.xm_entity_type }}"\n alert_entity_type: "${ALERT_ENTITY_TYPE}"', true); + + +INSERT INTO jspd_prop values('TRX_NAME_TYPE','0', 'Set the transaction name generation method (0:default, 1:parameter, 2:param_nouri, 3:attribute)', 'integer','select','{"default":"0", "parameter":"1", "param_nouri":"2", "attribute":"3"}',true, now(), now()); +INSERT INTO jspd_prop values('TRX_NAME_KEY','', 'Set the transaction name generation method by TRX_NAME_TYPE (parameter(1), param_nouri(2),attribute(3))','string','input','',true, now(), now()); +INSERT INTO jspd_prop values('CURR_TRACE_TXN','*:3000', 'Option to check TXNNAME with startsWith logic and collect calltree based on elapsetime. blank or set to *:0 when collecting all.', 'string','input','', true, now(), now()); +INSERT INTO jspd_prop values('CURR_TRACE_LEVEL','100', 'call tree detection level', 'integer','range','{"gte":"0", "lte":"100"}',true, now(), now()); +INSERT INTO jspd_prop values('TRACE_JDBC','true', 'include call tree data', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('EXCLUDE_SERVICE','gif,js,css,xml', 'exclude service name', 'string','input','',true, now(), now()); +INSERT INTO jspd_prop values('INCLUDE_EXCEPTION','', 'Exception that you do not want to be treated as an exception transaction is set.(type.Exception)', 'string','input','',true, now(), now()); +INSERT INTO jspd_prop values('EXCLUDE_EXCEPTION','', 'Set the exception to be treated as an exception transaction.(type.Exception)', 'string','input','',true, now(), now()); +INSERT INTO jspd_prop values('RESP_HEADER_TID','false', 'include X-Xm-Tid text for gearing imxwsmj', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('USE_RUNTIME_REDEFINE','false', 'rt.jar (socket, file, throwable) function use yn option', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('USE_RUNTIME_REDEFINE_HTTP_REMOTE','false', 'rt.jar (socket, file, throwable) function use yn option', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('RT_RMI','false', 'rt.jar (socket, file, throwable) function use yn option', 'boolean','input','',true, now(), now()); + +INSERT INTO jspd_prop values('RT_RMI_TYPE','3', 'remote key value(1: pkey, 2: ckey, 3: pckey)', 'integer','select','{"pkey":"1", "ckey":"2", "pckey":"3"}',true, now(), now()); +INSERT INTO jspd_prop values('RT_RMI_ELAPSE_TIME','0', 'Collect transactions that are greater than or equal to the option value', 'integer','input','',true, now(), now()); +INSERT INTO jspd_prop values('RT_FILE','0x10', 'Display file input/output in call tree', 'string','input','',true, now(), now()); +INSERT INTO jspd_prop values('RT_SOCKET','0x10', 'Display socket input/output in call tree', 'string','input','',true, now(), now()); + +INSERT INTO jspd_prop values('MTD_LIMIT','100000', 'Limit the number of calltree', 'integer','range','{"gte":"0"}',true, now(), now()); + +INSERT INTO jspd_prop values('LIMIT_SQL','20', 'Collection limits based on SQL sentence length', 'integer','input','',true, now(), now()); +INSERT INTO jspd_prop values('TXN_COUNT_LIMIT','3000', 'Transactions per second', 'integer','input','',true, now(), now()); +INSERT INTO jspd_prop values('USE_SQL_ELLIPSIS','false', 'Collect length of sql string by half of SQL_TEXT_BUFFER_SIZE', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('TXN_SQL_LIMIT_COUNT','2000', 'SQL collection limit', 'integer','input','',true, now(), now()); +INSERT INTO jspd_prop values('TXN_CPU_TIME','false', 'cpu time metric used in transactions option', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('TXN_MEMORY','false', 'memory alloc size metric used in transactions option', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('ENABLE_WEB_ID_WHEN_NO_USERAGENT','false', 'Do not create an web ID unless requested by the browser', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('USE_SQL_SEQ','false', 'Add sequence number to sql and packet', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('TRACE_FETCH_METHOD','false', 'Display the fetch function of ResultSet in the call tree', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('EXCLUDE_THREAD','', 'Ability to block monitoring of a specific thread name, value = String[] (prefix1,prefix2)', 'string','input','',true, now(), now()); +INSERT INTO jspd_prop values('USE_METHOD_SEQ','false', 'Display the calltree in the form of a time series without summary', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('TRACE_METHOD_MEMORY','false', 'Collects allocation memory for each method of calltree. (unit k)', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('TRACE_METHOD_CPUTIME','false', 'Collects cputime for each method of calltree. (unit ms)', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('DISABLE_ROOT_METHOD','false', 'Express the service root method at the top of the call tree', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('MTD_BUFFER_SIZE','2500', 'size of the internal buffer that stores the call tree method data.', 'integer','input','',true, now(), now()); +INSERT INTO jspd_prop values('MTD_STACK_BUFFER_SIZE','100', 'A separate option to additionally collect methods that did not generate an error among methods that were not collected because the MTD_BUFFER_SIZE option value was exceeded.', 'integer','input','',true, now(), now()); +INSERT INTO jspd_prop values('MTD_EXCEPTION_BUFFER_SIZE','100', 'A separate option to additionally collect methods that have an error among methods that could not be collected because the MTD_BUFFER_SIZE option value was exceeded.', 'integer','input','',true, now(), now()); +INSERT INTO jspd_prop values('DEBUG','0x000000000', 'Option to specify log level (Debugging)', 'string','input','',true, now(), now()); + +INSERT INTO jspd_prop values('EXCEPTION_LIMIT', '-1', 'Exception content length limit', 'integer', 'input', '', true, now(), now()); +INSERT INTO jspd_prop values('TXN_SEND_PERIOD', '1000', 'Txninfo transmission cycle (ms)', 'integer', 'input', '', true, now(), now()); +INSERT INTO jspd_prop values('MTD_SEND_PERIOD', '1000', 'Txnmethod transmission cycle (ms)', 'integer', 'input', '', true, now(), now()); +INSERT INTO jspd_prop values('SQL_SEND_PERIOD', '1000', 'Txnspl transmission cycle (ms)', 'integer', 'input', '', true, now(), now()); +INSERT INTO jspd_prop values('ETOE_SEND_PERIOD', '1000', 'E2einfo transmission cycle (ms)', 'integer', 'input', '', true, now(), now()); +INSERT INTO jspd_prop values('TXN_SEND_LIMIT', '15000', 'Txninfo maximum number of transfers', 'integer', 'input', '', true, now(), now()); +INSERT INTO jspd_prop values('MTD_SEND_LIMIT', '15000', 'Txnmethod maximum number of transfers', 'integer', 'input', '', true, now(), now()); +INSERT INTO jspd_prop values('SQL_SEND_LIMIT', '15000', 'Txnsql maximum number of transfers', 'integer', 'input', '', true, now(), now()); +INSERT INTO jspd_prop values('ETOE_SEND_LIMIT', '15000', 'E2einfo maximum number of transfers', 'integer', 'input', '', true, now(), now()); diff --git a/ansible/01_old/roles/test/files/04-keycloak/Chart.yaml b/ansible/01_old/roles/test/files/04-keycloak/Chart.yaml new file mode 100644 index 0000000..a5d4032 --- /dev/null +++ b/ansible/01_old/roles/test/files/04-keycloak/Chart.yaml @@ -0,0 +1,23 @@ +apiVersion: v1 +appVersion: 4.0.0 +description: Modified Authentication Module By EXEM CloudMOA +home: https://www.keycloak.org/ +icon: https://www.keycloak.org/resources/images/keycloak_logo_480x108.png +keywords: +- sso +- idm +- openid connect +- saml +- kerberos +- ldap +maintainers: +- email: unguiculus@gmail.com + name: unguiculus +- email: thomas.darimont+github@gmail.com + name: thomasdarimont +name: keycloak +sources: +- https://github.com/codecentric/helm-charts +- https://github.com/jboss-dockerfiles/keycloak +- https://github.com/bitnami/charts/tree/master/bitnami/postgresql +version: 11.0.1 diff --git a/ansible/01_old/roles/test/files/04-keycloak/OWNERS b/ansible/01_old/roles/test/files/04-keycloak/OWNERS new file mode 100644 index 0000000..8c2ff0d --- /dev/null +++ b/ansible/01_old/roles/test/files/04-keycloak/OWNERS @@ -0,0 +1,6 @@ +approvers: + - unguiculus + - thomasdarimont +reviewers: + - unguiculus + - thomasdarimont diff --git a/ansible/01_old/roles/test/files/04-keycloak/README.md b/ansible/01_old/roles/test/files/04-keycloak/README.md new file mode 100644 index 0000000..5f8da10 --- /dev/null +++ b/ansible/01_old/roles/test/files/04-keycloak/README.md @@ -0,0 +1,765 @@ +# Keycloak + +[Keycloak](http://www.keycloak.org/) is an open source identity and access management for modern applications and services. + +## TL;DR; + +```console +$ helm install keycloak codecentric/keycloak +``` + +## Introduction + +This chart bootstraps a [Keycloak](http://www.keycloak.org/) StatefulSet on a [Kubernetes](https://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. +It provisions a fully featured Keycloak installation. +For more information on Keycloak and its capabilities, see its [documentation](http://www.keycloak.org/documentation.html). + +## Prerequisites Details + +The chart has an optional dependency on the [PostgreSQL](https://github.com/bitnami/charts/tree/master/bitnami/postgresql) chart. +By default, the PostgreSQL chart requires PV support on underlying infrastructure (may be disabled). + +## Installing the Chart + +To install the chart with the release name `keycloak`: + +```console +$ helm install keycloak codecentric/keycloak +``` + +## Uninstalling the Chart + +To uninstall the `keycloak` deployment: + +```console +$ helm uninstall keycloak +``` + +## Configuration + +The following table lists the configurable parameters of the Keycloak chart and their default values. + +| Parameter | Description | Default | +|---|---|---| +| `fullnameOverride` | Optionally override the fully qualified name | `""` | +| `nameOverride` | Optionally override the name | `""` | +| `replicas` | The number of replicas to create | `1` | +| `image.repository` | The Keycloak image repository | `docker.io/jboss/keycloak` | +| `image.tag` | Overrides the Keycloak image tag whose default is the chart version | `""` | +| `image.pullPolicy` | The Keycloak image pull policy | `IfNotPresent` | +| `imagePullSecrets` | Image pull secrets for the Pod | `[]` | +| `hostAliases` | Mapping between IPs and hostnames that will be injected as entries in the Pod's hosts files | `[]` | +| `enableServiceLinks` | Indicates whether information about services should be injected into Pod's environment variables, matching the syntax of Docker links | `true` | +| `podManagementPolicy` | Pod management policy. One of `Parallel` or `OrderedReady` | `Parallel` | +| `restartPolicy` | Pod restart policy. One of `Always`, `OnFailure`, or `Never` | `Always` | +| `serviceAccount.create` | Specifies whether a ServiceAccount should be created | `true` | +| `serviceAccount.name` | The name of the service account to use. If not set and create is true, a name is generated using the fullname template | `""` | +| `serviceAccount.annotations` | Additional annotations for the ServiceAccount | `{}` | +| `serviceAccount.labels` | Additional labels for the ServiceAccount | `{}` | +| `serviceAccount.imagePullSecrets` | Image pull secrets that are attached to the ServiceAccount | `[]` | +| `rbac.create` | Specifies whether RBAC resources are to be created | `false` +| `rbac.rules` | Custom RBAC rules, e. g. for KUBE_PING | `[]` +| `podSecurityContext` | SecurityContext for the entire Pod. Every container running in the Pod will inherit this SecurityContext. This might be relevant when other components of the environment inject additional containers into running Pods (service meshes are the most prominent example for this) | `{"fsGroup":1000}` | +| `securityContext` | SecurityContext for the Keycloak container | `{"runAsNonRoot":true,"runAsUser":1000}` | +| `extraInitContainers` | Additional init containers, e. g. for providing custom themes | `[]` | +| `extraContainers` | Additional sidecar containers, e. g. for a database proxy, such as Google's cloudsql-proxy | `[]` | +| `lifecycleHooks` | Lifecycle hooks for the Keycloak container | `{}` | +| `terminationGracePeriodSeconds` | Termination grace period in seconds for Keycloak shutdown. Clusters with a large cache might need to extend this to give Infinispan more time to rebalance | `60` | +| `clusterDomain` | The internal Kubernetes cluster domain | `cluster.local` | +| `command` | Overrides the default entrypoint of the Keycloak container | `[]` | +| `args` | Overrides the default args for the Keycloak container | `[]` | +| `extraEnv` | Additional environment variables for Keycloak | `""` | +| `extraEnvFrom` | Additional environment variables for Keycloak mapped from a Secret or ConfigMap | `""` | +| `priorityClassName` | Pod priority class name | `""` | +| `affinity` | Pod affinity | Hard node and soft zone anti-affinity | +| `nodeSelector` | Node labels for Pod assignment | `{}` | +| `tolerations` | Node taints to tolerate | `[]` | +| `podLabels` | Additional Pod labels | `{}` | +| `podAnnotations` | Additional Pod annotations | `{}` | +| `livenessProbe` | Liveness probe configuration | `{"httpGet":{"path":"/health/live","port":"http"},"initialDelaySeconds":300,"timeoutSeconds":5}` | +| `readinessProbe` | Readiness probe configuration | `{"httpGet":{"path":"/auth/realms/master","port":"http"},"initialDelaySeconds":30,"timeoutSeconds":1}` | +| `resources` | Pod resource requests and limits | `{}` | +| `startupScripts` | Startup scripts to run before Keycloak starts up | `{"keycloak.cli":"{{- .Files.Get "scripts/keycloak.cli" \| nindent 2 }}"}` | +| `extraVolumes` | Add additional volumes, e. g. for custom themes | `""` | +| `extraVolumeMounts` | Add additional volumes mounts, e. g. for custom themes | `""` | +| `extraPorts` | Add additional ports, e. g. for admin console or exposing JGroups ports | `[]` | +| `podDisruptionBudget` | Pod disruption budget | `{}` | +| `statefulsetAnnotations` | Annotations for the StatefulSet | `{}` | +| `statefulsetLabels` | Additional labels for the StatefulSet | `{}` | +| `secrets` | Configuration for secrets that should be created | `{}` | +| `service.annotations` | Annotations for headless and HTTP Services | `{}` | +| `service.labels` | Additional labels for headless and HTTP Services | `{}` | +| `service.type` | The Service type | `ClusterIP` | +| `service.loadBalancerIP` | Optional IP for the load balancer. Used for services of type LoadBalancer only | `""` | +| `loadBalancerSourceRanges` | Optional List of allowed source ranges (CIDRs). Used for service of type LoadBalancer only | `[]` | +| `service.httpPort` | The http Service port | `80` | +| `service.httpNodePort` | The HTTP Service node port if type is NodePort | `""` | +| `service.httpsPort` | The HTTPS Service port | `8443` | +| `service.httpsNodePort` | The HTTPS Service node port if type is NodePort | `""` | +| `service.httpManagementPort` | The WildFly management Service port | `8443` | +| `service.httpManagementNodePort` | The WildFly management node port if type is NodePort | `""` | +| `service.extraPorts` | Additional Service ports, e. g. for custom admin console | `[]` | +| `service.sessionAffinity` | sessionAffinity for Service, e. g. "ClientIP" | `""` | +| `service.sessionAffinityConfig` | sessionAffinityConfig for Service | `{}` | +| `ingress.enabled` | If `true`, an Ingress is created | `false` | +| `ingress.rules` | List of Ingress Ingress rule | see below | +| `ingress.rules[0].host` | Host for the Ingress rule | `{{ .Release.Name }}.keycloak.example.com` | +| `ingress.rules[0].paths` | Paths for the Ingress rule | `[/]` | +| `ingress.servicePort` | The Service port targeted by the Ingress | `http` | +| `ingress.annotations` | Ingress annotations | `{}` | +| `ingress.labels` | Additional Ingress labels | `{}` | +| `ingress.tls` | TLS configuration | see below | +| `ingress.tls[0].hosts` | List of TLS hosts | `[keycloak.example.com]` | +| `ingress.tls[0].secretName` | Name of the TLS secret | `""` | +| `ingress.console.enabled` | If `true`, an Ingress for the console is created | `false` | +| `ingress.console.rules` | List of Ingress Ingress rule for the console | see below | +| `ingress.console.rules[0].host` | Host for the Ingress rule for the console | `{{ .Release.Name }}.keycloak.example.com` | +| `ingress.console.rules[0].paths` | Paths for the Ingress rule for the console | `[/auth/admin]` | +| `ingress.console.annotations` | Ingress annotations for the console | `{}` | +| `networkPolicy.enabled` | If true, the ingress network policy is deployed | `false` +| `networkPolicy.extraFrom` | Allows to define allowed external traffic (see Kubernetes doc for network policy `from` format) | `[]` +| `route.enabled` | If `true`, an OpenShift Route is created | `false` | +| `route.path` | Path for the Route | `/` | +| `route.annotations` | Route annotations | `{}` | +| `route.labels` | Additional Route labels | `{}` | +| `route.host` | Host name for the Route | `""` | +| `route.tls.enabled` | If `true`, TLS is enabled for the Route | `true` | +| `route.tls.insecureEdgeTerminationPolicy` | Insecure edge termination policy of the Route. Can be `None`, `Redirect`, or `Allow` | `Redirect` | +| `route.tls.termination` | TLS termination of the route. Can be `edge`, `passthrough`, or `reencrypt` | `edge` | +| `pgchecker.image.repository` | Docker image used to check Postgresql readiness at startup | `docker.io/busybox` | +| `pgchecker.image.tag` | Image tag for the pgchecker image | `1.32` | +| `pgchecker.image.pullPolicy` | Image pull policy for the pgchecker image | `IfNotPresent` | +| `pgchecker.securityContext` | SecurityContext for the pgchecker container | `{"allowPrivilegeEscalation":false,"runAsGroup":1000,"runAsNonRoot":true,"runAsUser":1000}` | +| `pgchecker.resources` | Resource requests and limits for the pgchecker container | `{"limits":{"cpu":"10m","memory":"16Mi"},"requests":{"cpu":"10m","memory":"16Mi"}}` | +| `postgresql.enabled` | If `true`, the Postgresql dependency is enabled | `true` | +| `postgresql.postgresqlUsername` | PostgreSQL User to create | `keycloak` | +| `postgresql.postgresqlPassword` | PostgreSQL Password for the new user | `keycloak` | +| `postgresql.postgresqlDatabase` | PostgreSQL Database to create | `keycloak` | +| `serviceMonitor.enabled` | If `true`, a ServiceMonitor resource for the prometheus-operator is created | `false` | +| `serviceMonitor.namespace` | Optionally sets a target namespace in which to deploy the ServiceMonitor resource | `""` | +| `serviceMonitor.namespaceSelector` | Optionally sets a namespace selector for the ServiceMonitor | `{}` | +| `serviceMonitor.annotations` | Annotations for the ServiceMonitor | `{}` | +| `serviceMonitor.labels` | Additional labels for the ServiceMonitor | `{}` | +| `serviceMonitor.interval` | Interval at which Prometheus scrapes metrics | `10s` | +| `serviceMonitor.scrapeTimeout` | Timeout for scraping | `10s` | +| `serviceMonitor.path` | The path at which metrics are served | `/metrics` | +| `serviceMonitor.port` | The Service port at which metrics are served | `http` | +| `extraServiceMonitor.enabled` | If `true`, an additional ServiceMonitor resource for the prometheus-operator is created. Could be used for additional metrics via [Keycloak Metrics SPI](https://github.com/aerogear/keycloak-metrics-spi) | `false` | +| `extraServiceMonitor.namespace` | Optionally sets a target namespace in which to deploy the additional ServiceMonitor resource | `""` | +| `extraServiceMonitor.namespaceSelector` | Optionally sets a namespace selector for the additional ServiceMonitor | `{}` | +| `extraServiceMonitor.annotations` | Annotations for the additional ServiceMonitor | `{}` | +| `extraServiceMonitor.labels` | Additional labels for the additional ServiceMonitor | `{}` | +| `extraServiceMonitor.interval` | Interval at which Prometheus scrapes metrics | `10s` | +| `extraServiceMonitor.scrapeTimeout` | Timeout for scraping | `10s` | +| `extraServiceMonitor.path` | The path at which metrics are served | `/metrics` | +| `extraServiceMonitor.port` | The Service port at which metrics are served | `http` | +| `prometheusRule.enabled` | If `true`, a PrometheusRule resource for the prometheus-operator is created | `false` | +| `prometheusRule.annotations` | Annotations for the PrometheusRule | `{}` | +| `prometheusRule.labels` | Additional labels for the PrometheusRule | `{}` | +| `prometheusRule.rules` | List of rules for Prometheus | `[]` | +| `autoscaling.enabled` | Enable creation of a HorizontalPodAutoscaler resource | `false` | +| `autoscaling.labels` | Additional labels for the HorizontalPodAutoscaler resource | `{}` | +| `autoscaling.minReplicas` | The minimum number of Pods when autoscaling is enabled | `3` | +| `autoscaling.maxReplicas` | The maximum number of Pods when autoscaling is enabled | `10` | +| `autoscaling.metrics` | The metrics configuration for the HorizontalPodAutoscaler | `[{"resource":{"name":"cpu","target":{"averageUtilization":80,"type":"Utilization"}},"type":"Resource"}]` | +| `autoscaling.behavior` | The scaling policy configuration for the HorizontalPodAutoscaler | `{"scaleDown":{"policies":[{"periodSeconds":300,"type":"Pods","value":1}],"stabilizationWindowSeconds":300}` | +| `test.enabled` | If `true`, test resources are created | `false` | +| `test.image.repository` | The image for the test Pod | `docker.io/unguiculus/docker-python3-phantomjs-selenium` | +| `test.image.tag` | The tag for the test Pod image | `v1` | +| `test.image.pullPolicy` | The image pull policy for the test Pod image | `IfNotPresent` | +| `test.podSecurityContext` | SecurityContext for the entire test Pod | `{"fsGroup":1000}` | +| `test.securityContext` | SecurityContext for the test container | `{"runAsNonRoot":true,"runAsUser":1000}` | + + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example: + +```console +$ helm install keycloak codecentric/keycloak -n keycloak --set replicas=1 +``` + +Alternatively, a YAML file that specifies the values for the parameters can be provided while +installing the chart. For example: + +```console +$ helm install keycloak codecentric/keycloak -n keycloak --values values.yaml +``` + +The chart offers great flexibility. +It can be configured to work with the official Keycloak Docker image but any custom image can be used as well. + +For the offical Docker image, please check it's configuration at https://github.com/keycloak/keycloak-containers/tree/master/server. + +### Usage of the `tpl` Function + +The `tpl` function allows us to pass string values from `values.yaml` through the templating engine. +It is used for the following values: + +* `extraInitContainers` +* `extraContainers` +* `extraEnv` +* `extraEnvFrom` +* `affinity` +* `extraVolumeMounts` +* `extraVolumes` +* `livenessProbe` +* `readinessProbe` + +Additionally, custom labels and annotations can be set on various resources the values of which being passed through `tpl` as well. + +It is important that these values be configured as strings. +Otherwise, installation will fail. +See example for Google Cloud Proxy or default affinity configuration in `values.yaml`. + +### JVM Settings + +Keycloak sets the following system properties by default: +`-Djava.net.preferIPv4Stack=true -Djboss.modules.system.pkgs=$JBOSS_MODULES_SYSTEM_PKGS -Djava.awt.headless=true` + +You can override these by setting the `JAVA_OPTS` environment variable. +Make sure you configure container support. +This allows you to only configure memory using Kubernetes resources and the JVM will automatically adapt. + +```yaml +extraEnv: | + - name: JAVA_OPTS + value: >- + -XX:+UseContainerSupport + -XX:MaxRAMPercentage=50.0 + -Djava.net.preferIPv4Stack=true + -Djboss.modules.system.pkgs=$JBOSS_MODULES_SYSTEM_PKGS + -Djava.awt.headless=true +``` + +### Database Setup + +By default, Bitnami's [PostgreSQL](https://github.com/bitnami/charts/tree/master/bitnami/postgresql) chart is deployed and used as database. +Please refer to this chart for additional PostgreSQL configuration options. + +#### Using an External Database + +The Keycloak Docker image supports various database types. +Configuration happens in a generic manner. + +##### Using a Secret Managed by the Chart + +The following examples uses a PostgreSQL database with a secret that is managed by the Helm chart. + +```yaml +postgresql: + # Disable PostgreSQL dependency + enabled: false + +extraEnv: | + - name: DB_VENDOR + value: postgres + - name: DB_ADDR + value: mypostgres + - name: DB_PORT + value: "5432" + - name: DB_DATABASE + value: mydb + +extraEnvFrom: | + - secretRef: + name: '{{ include "keycloak.fullname" . }}-db' + +secrets: + db: + stringData: + DB_USER: '{{ .Values.dbUser }}' + DB_PASSWORD: '{{ .Values.dbPassword }}' +``` + +`dbUser` and `dbPassword` are custom values you'd then specify on the commandline using `--set-string`. + +##### Using an Existing Secret + +The following examples uses a PostgreSQL database with a secret. +Username and password are mounted as files. + +```yaml +postgresql: + # Disable PostgreSQL dependency + enabled: false + +extraEnv: | + - name: DB_VENDOR + value: postgres + - name: DB_ADDR + value: mypostgres + - name: DB_PORT + value: "5432" + - name: DB_DATABASE + value: mydb + - name: DB_USER_FILE + value: /secrets/db-creds/user + - name: DB_PASSWORD_FILE + value: /secrets/db-creds/password + +extraVolumeMounts: | + - name: db-creds + mountPath: /secrets/db-creds + readOnly: true + +extraVolumes: | + - name: db-creds + secret: + secretName: keycloak-db-creds +``` + +### Creating a Keycloak Admin User + +The Keycloak Docker image supports creating an initial admin user. +It must be configured via environment variables: + +* `KEYCLOAK_USER` or `KEYCLOAK_USER_FILE` +* `KEYCLOAK_PASSWORD` or `KEYCLOAK_PASSWORD_FILE` + +Please refer to the section on database configuration for how to configure a secret for this. + +### High Availability and Clustering + +For high availability, Keycloak must be run with multiple replicas (`replicas > 1`). +The chart has a helper template (`keycloak.serviceDnsName`) that creates the DNS name based on the headless service. + +#### DNS_PING Service Discovery + +JGroups discovery via DNS_PING can be configured as follows: + +```yaml +extraEnv: | + - name: JGROUPS_DISCOVERY_PROTOCOL + value: dns.DNS_PING + - name: JGROUPS_DISCOVERY_PROPERTIES + value: 'dns_query={{ include "keycloak.serviceDnsName" . }}' + - name: CACHE_OWNERS_COUNT + value: "2" + - name: CACHE_OWNERS_AUTH_SESSIONS_COUNT + value: "2" +``` + +#### KUBE_PING Service Discovery + +Recent versions of Keycloak include a new Kubernetes native [KUBE_PING](https://github.com/jgroups-extras/jgroups-kubernetes) service discovery protocol. +This requires a little more configuration than DNS_PING but can easily be achieved with the Helm chart. + +As with DNS_PING some environment variables must be configured as follows: + +```yaml +extraEnv: | + - name: JGROUPS_DISCOVERY_PROTOCOL + value: kubernetes.KUBE_PING + - name: KUBERNETES_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: CACHE_OWNERS_COUNT + value: "2" + - name: CACHE_OWNERS_AUTH_SESSIONS_COUNT + value: "2" +``` + +However, the Keycloak Pods must also get RBAC permissions to `get` and `list` Pods in the namespace which can be configured as follows: + +```yaml +rbac: + create: true + rules: + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - list +``` + +#### Autoscaling + +Due to the caches in Keycloak only replicating to a few nodes (two in the example configuration above) and the limited controls around autoscaling built into Kubernetes, it has historically been problematic to autoscale Keycloak. +However, in Kubernetes 1.18 [additional controls were introduced](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-configurable-scaling-behavior) which make it possible to scale down in a more controlled manner. + +The example autoscaling configuration in the values file scales from three up to a maximum of ten Pods using CPU utilization as the metric. Scaling up is done as quickly as required but scaling down is done at a maximum rate of one Pod per five minutes. + +Autoscaling can be enabled as follows: + +```yaml +autoscaling: + enabled: true +``` + +KUBE_PING service discovery seems to be the most reliable mechanism to use when enabling autoscaling, due to being faster than DNS_PING at detecting changes in the cluster. + +### Running Keycloak Behind a Reverse Proxy + +When running Keycloak behind a reverse proxy, which is the case when using an ingress controller, +proxy address forwarding must be enabled as follows: + +```yaml +extraEnv: | + - name: PROXY_ADDRESS_FORWARDING + value: "true" +``` + +### Providing a Custom Theme + +One option is certainly to provide a custom Keycloak image that includes the theme. +However, if you prefer to stick with the official Keycloak image, you can use an init container as theme provider. + +Create your own theme and package it up into a Docker image. + +```docker +FROM busybox +COPY mytheme /mytheme +``` + +In combination with an `emptyDir` that is shared with the Keycloak container, configure an init container that runs your theme image and copies the theme over to the right place where Keycloak will pick it up automatically. + +```yaml +extraInitContainers: | + - name: theme-provider + image: myuser/mytheme:1 + imagePullPolicy: IfNotPresent + command: + - sh + args: + - -c + - | + echo "Copying theme..." + cp -R /mytheme/* /theme + volumeMounts: + - name: theme + mountPath: /theme + +extraVolumeMounts: | + - name: theme + mountPath: /opt/jboss/keycloak/themes/mytheme + +extraVolumes: | + - name: theme + emptyDir: {} +``` + +### Setting a Custom Realm + +A realm can be added by creating a secret or configmap for the realm json file and then supplying this into the chart. +It can be mounted using `extraVolumeMounts` and then referenced as environment variable `KEYCLOAK_IMPORT`. +First we need to create a Secret from the realm JSON file using `kubectl create secret generic realm-secret --from-file=realm.json` which we need to reference in `values.yaml`: + +```yaml +extraVolumes: | + - name: realm-secret + secret: + secretName: realm-secret + +extraVolumeMounts: | + - name: realm-secret + mountPath: "/realm/" + readOnly: true + +extraEnv: | + - name: KEYCLOAK_IMPORT + value: /realm/realm.json +``` + +Alternatively, the realm file could be added to a custom image. + +After startup the web admin console for the realm should be available on the path /auth/admin/\/console/. + +### Using Google Cloud SQL Proxy + +Depending on your environment you may need a local proxy to connect to the database. +This is, e. g., the case for Google Kubernetes Engine when using Google Cloud SQL. +Create the secret for the credentials as documented [here](https://cloud.google.com/sql/docs/postgres/connect-kubernetes-engine) and configure the proxy as a sidecar. + +Because `extraContainers` is a string that is passed through the `tpl` function, it is possible to create custom values and use them in the string. + +```yaml +postgresql: + # Disable PostgreSQL dependency + enabled: false + +# Custom values for Google Cloud SQL +cloudsql: + project: my-project + region: europe-west1 + instance: my-instance + +extraContainers: | + - name: cloudsql-proxy + image: gcr.io/cloudsql-docker/gce-proxy:1.17 + command: + - /cloud_sql_proxy + args: + - -instances={{ .Values.cloudsql.project }}:{{ .Values.cloudsql.region }}:{{ .Values.cloudsql.instance }}=tcp:5432 + - -credential_file=/secrets/cloudsql/credentials.json + volumeMounts: + - name: cloudsql-creds + mountPath: /secrets/cloudsql + readOnly: true + +extraVolumes: | + - name: cloudsql-creds + secret: + secretName: cloudsql-instance-credentials + +extraEnv: | + - name: DB_VENDOR + value: postgres + - name: DB_ADDR + value: "127.0.0.1" + - name: DB_PORT + value: "5432" + - name: DB_DATABASE + value: postgres + - name: DB_USER + value: myuser + - name: DB_PASSWORD + value: mypassword +``` + +### Changing the Context Path + +By default, Keycloak is served under context `/auth`. +This can be changed as follows: + +```yaml +contextPath: mycontext + +startupScripts: + # cli script that reconfigures WildFly + contextPath.cli: | + embed-server --server-config=standalone-ha.xml --std-out=echo + batch + {{- if ne .Values.contextPath "auth" }} + /subsystem=keycloak-server/:write-attribute(name=web-context,value={{ if eq .Values.contextPath "" }}/{{ else }}{{ .Values.contextPath }}{{ end }}) + {{- if eq .Values.contextPath "" }} + /subsystem=undertow/server=default-server/host=default-host:write-attribute(name=default-web-module,value=keycloak-server.war) + {{- end }} + {{- end }} + run-batch + stop-embedded-server + +livenessProbe: | + httpGet: + path: {{ if ne .Values.contextPath "" }}/{{ .Values.contextPath }}{{ end }}/ + port: http + initialDelaySeconds: 300 + timeoutSeconds: 5 + +readinessProbe: | + httpGet: + path: {{ if ne .Values.contextPath "" }}/{{ .Values.contextPath }}{{ end }}/realms/master + port: http + initialDelaySeconds: 30 + timeoutSeconds: 1 +``` + +The above YAML references introduces the custom value `contextPath` which is possible because `startupScripts`, `livenessProbe`, and `readinessProbe` are templated using the `tpl` function. +Note that it must not start with a slash. +Alternatively, you may supply it via CLI flag: + +```console +--set-string contextPath=mycontext +``` + +### Prometheus Metrics Support + +#### WildFly Metrics + +WildFly can expose metrics on the management port. +In order to achieve this, the environment variable `KEYCLOAK_STATISTICS` must be set. + +```yaml +extraEnv: | + - name: KEYCLOAK_STATISTICS + value: all +``` + +Add a ServiceMonitor if using prometheus-operator: + +```yaml +serviceMonitor: + # If `true`, a ServiceMonitor resource for the prometheus-operator is created + enabled: true +``` + +Checkout `values.yaml` for customizing the ServiceMonitor and for adding custom Prometheus rules. + +Add annotations if you don't use prometheus-operator: + +```yaml +service: + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9990" +``` + +#### Keycloak Metrics SPI + +Optionally, it is possible to add [Keycloak Metrics SPI](https://github.com/aerogear/keycloak-metrics-spi) via init container. + +A separate `ServiceMonitor` can be enabled to scrape metrics from the SPI: + +```yaml +extraServiceMonitor: + # If `true`, an additional ServiceMonitor resource for the prometheus-operator is created + enabled: true +``` + +Checkout `values.yaml` for customizing this ServiceMonitor. + +Note that the metrics endpoint is exposed on the HTTP port. +You may want to restrict access to it in your ingress controller configuration. +For ingress-nginx, this could be done as follows: + +```yaml +annotations: + nginx.ingress.kubernetes.io/server-snippet: | + location ~* /auth/realms/[^/]+/metrics { + return 403; + } +``` + +## Why StatefulSet? + +The chart sets node identifiers to the system property `jboss.node.name` which is in fact the pod name. +Node identifiers must not be longer than 23 characters. +This can be problematic because pod names are quite long. +We would have to truncate the chart's fullname to six characters because pods get a 17-character suffix (e. g. `-697f8b7655-mf5ht`). +Using a StatefulSet allows us to truncate to 20 characters leaving room for up to 99 replicas, which is much better. +Additionally, we get stable values for `jboss.node.name` which can be advantageous for cluster discovery. +The headless service that governs the StatefulSet is used for DNS discovery via DNS_PING. + +## Upgrading + +### From chart < 10.0.0 + +* Keycloak is updated to 12.0.4 + +The upgrade should be seemless. +No special care has to be taken. + +### From chart versions < 9.0.0 + +The Keycloak chart received a major facelift and, thus, comes with breaking changes. +Opinionated stuff and things that are now baked into Keycloak's Docker image were removed. +Configuration is more generic making it easier to use custom Docker images that are configured differently than the official one. + +* Values are no longer nested under `keycloak`. +* Besides setting the node identifier, no CLI changes are performed out of the box +* Environment variables for the Postresql dependency are set automatically if enabled. + Otherwise, no environment variables are set by default. +* Optionally enables creating RBAC resources with configurable rules (e. g. for KUBE_PING) +* PostgreSQL chart dependency is updated to 9.1.1 + +### From chart versions < 8.0.0 + +* Keycloak is updated to 10.0.0 +* PostgreSQL chart dependency is updated to 8.9.5 + +The upgrade should be seemless. +No special care has to be taken. + +### From chart versions < 7.0.0 + +Version 7.0.0 update breaks backwards-compatibility with the existing `keycloak.persistence.existingSecret` scheme. + +#### Changes in Configuring Database Credentials from an Existing Secret + +Both `DB_USER` and `DB_PASS` are always read from a Kubernetes Secret. +This is a requirement if you are provisioning database credentials dynamically - either via an Operator or some secret-management engine. + +The variable referencing the password key name has been renamed from `keycloak.persistence.existingSecretKey` to `keycloak.persistence.existingSecretPasswordKey` + +A new, optional variable for referencing the username key name for populating the `DB_USER` env has been added: +`keycloak.persistence.existingSecretUsernameKey`. + +If `keycloak.persistence.existingSecret` is left unset, a new Secret will be provisioned populated with the `dbUser` and `dbPassword` Helm variables. + +###### Example configuration: +```yaml +keycloak: + persistence: + existingSecret: keycloak-provisioned-db-credentials + existingSecretPasswordKey: PGPASSWORD + existingSecretUsernameKey: PGUSER + ... +``` +### From chart versions < 6.0.0 + +#### Changes in Probe Configuration + +Now both readiness and liveness probes are configured as strings that are then passed through the `tpl` function. +This allows for greater customizability of the readiness and liveness probes. + +The defaults are unchanged, but since 6.0.0 configured as follows: + +```yaml + livenessProbe: | + httpGet: + path: {{ if ne .Values.keycloak.basepath "" }}/{{ .Values.keycloak.basepath }}{{ end }}/ + port: http + initialDelaySeconds: 300 + timeoutSeconds: 5 + readinessProbe: | + httpGet: + path: {{ if ne .Values.keycloak.basepath "" }}/{{ .Values.keycloak.basepath }}{{ end }}/realms/master + port: http + initialDelaySeconds: 30 + timeoutSeconds: 1 +``` + +#### Changes in Existing Secret Configuration + +This can be useful if you create a secret in a parent chart and want to reference that secret. +Applies to `keycloak.existingSecret` and `keycloak.persistence.existingSecret`. + +_`values.yaml` of parent chart:_ +```yaml +keycloak: + keycloak: + existingSecret: '{{ .Release.Name }}-keycloak-secret' +``` + +#### HTTPS Port Added + +The HTTPS port was added to the pod and to the services. +As a result, service ports are now configured differently. + + +### From chart versions < 5.0.0 + +Version 5.0.0 is a major update. + +* The chart now follows the new Kubernetes label recommendations: +https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/ +* Several changes to the StatefulSet render an out-of-the-box upgrade impossible because StatefulSets only allow updates to a limited set of fields +* The chart uses the new support for running scripts at startup that has been added to Keycloak's Docker image. +If you use this feature, you will have to adjust your configuration + +However, with the following manual steps an automatic upgrade is still possible: + +1. Adjust chart configuration as necessary (e. g. startup scripts) +1. Perform a non-cascading deletion of the StatefulSet which keeps the pods running +1. Add the new labels to the pods +1. Run `helm upgrade` + +Use a script like the following to add labels and to delete the StatefulSet: + +```console +#!/bin/sh + +release= +namespace= + +kubectl delete statefulset -n "$namespace" -l app=keycloak -l release="$release" --cascade=false + +kubectl label pod -n "$namespace" -l app=keycloak -l release="$release" app.kubernetes.io/name=keycloak +kubectl label pod -n "$namespace" -l app=keycloak -l release="$release" app.kubernetes.io/instance="$release" +``` + +**NOTE:** Version 5.0.0 also updates the Postgresql dependency which has received a major upgrade as well. +In case you use this dependency, the database must be upgraded first. +Please refer to the Postgresql chart's upgrading section in its README for instructions. diff --git a/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/.helmignore b/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/.helmignore new file mode 100644 index 0000000..f0c1319 --- /dev/null +++ b/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/Chart.yaml b/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/Chart.yaml new file mode 100644 index 0000000..48d8f2f --- /dev/null +++ b/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/Chart.yaml @@ -0,0 +1,24 @@ +annotations: + category: Database +apiVersion: v1 +appVersion: 11.8.0 +description: Chart for PostgreSQL, an object-relational database management system + (ORDBMS) with an emphasis on extensibility and on standards-compliance. +home: https://www.postgresql.org/ +icon: https://bitnami.com/assets/stacks/postgresql/img/postgresql-stack-110x117.png +keywords: +- postgresql +- postgres +- database +- sql +- replication +- cluster +maintainers: +- email: containers@bitnami.com + name: Bitnami +- email: cedric@desaintmartin.fr + name: desaintmartin +name: postgresql +sources: +- https://github.com/bitnami/bitnami-docker-postgresql +version: 9.1.1 diff --git a/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/README.md b/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/README.md new file mode 100644 index 0000000..c84cc7b --- /dev/null +++ b/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/README.md @@ -0,0 +1,625 @@ +# PostgreSQL + +[PostgreSQL](https://www.postgresql.org/) is an object-relational database management system (ORDBMS) with an emphasis on extensibility and on standards-compliance. + +For HA, please see [this repo](https://github.com/bitnami/charts/tree/master/bitnami/postgresql-ha) + +## TL;DR; + +```console +$ helm repo add bitnami https://charts.bitnami.com/bitnami +$ helm install my-release bitnami/postgresql +``` + +## Introduction + +This chart bootstraps a [PostgreSQL](https://github.com/bitnami/bitnami-docker-postgresql) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This chart has been tested to work with NGINX Ingress, cert-manager, fluentd and Prometheus on top of the [BKPR](https://kubeprod.io/). + +## Prerequisites + +- Kubernetes 1.12+ +- Helm 2.12+ or Helm 3.0-beta3+ +- PV provisioner support in the underlying infrastructure + +## Installing the Chart +To install the chart with the release name `my-release`: + +```console +$ helm install my-release bitnami/postgresql +``` + +The command deploys PostgreSQL on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```console +$ helm delete my-release +``` + +The command removes all the Kubernetes components but PVC's associated with the chart and deletes the release. + +To delete the PVC's associated with `my-release`: + +```console +$ kubectl delete pvc -l release=my-release +``` + +> **Note**: Deleting the PVC's will delete postgresql data as well. Please be cautious before doing it. + +## Parameters + +The following tables lists the configurable parameters of the PostgreSQL chart and their default values. + +| Parameter | Description | Default | +|-----------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------| +| `global.imageRegistry` | Global Docker Image registry | `nil` | +| `global.postgresql.postgresqlDatabase` | PostgreSQL database (overrides `postgresqlDatabase`) | `nil` | +| `global.postgresql.postgresqlUsername` | PostgreSQL username (overrides `postgresqlUsername`) | `nil` | +| `global.postgresql.existingSecret` | Name of existing secret to use for PostgreSQL passwords (overrides `existingSecret`) | `nil` | +| `global.postgresql.postgresqlPassword` | PostgreSQL admin password (overrides `postgresqlPassword`) | `nil` | +| `global.postgresql.servicePort` | PostgreSQL port (overrides `service.port`) | `nil` | +| `global.postgresql.replicationPassword` | Replication user password (overrides `replication.password`) | `nil` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) | +| `global.storageClass` | Global storage class for dynamic provisioning | `nil` | +| `image.registry` | PostgreSQL Image registry | `docker.io` | +| `image.repository` | PostgreSQL Image name | `bitnami/postgresql` | +| `image.tag` | PostgreSQL Image tag | `{TAG_NAME}` | +| `image.pullPolicy` | PostgreSQL Image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify Image pull secrets | `nil` (does not add image pull secrets to deployed pods) | +| `image.debug` | Specify if debug values should be set | `false` | +| `nameOverride` | String to partially override postgresql.fullname template with a string (will prepend the release name) | `nil` | +| `fullnameOverride` | String to fully override postgresql.fullname template with a string | `nil` | +| `volumePermissions.enabled` | Enable init container that changes volume permissions in the data directory (for cases where the default k8s `runAsUser` and `fsUser` values do not work) | `false` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | +| `volumePermissions.image.repository` | Init container volume-permissions image name | `bitnami/minideb` | +| `volumePermissions.image.tag` | Init container volume-permissions image tag | `buster` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `Always` | +| `volumePermissions.securityContext.runAsUser` | User ID for the init container (when facing issues in OpenShift or uid unknown, try value "auto") | `0` | +| `usePasswordFile` | Have the secrets mounted as a file instead of env vars | `false` | +| `ldap.enabled` | Enable LDAP support | `false` | +| `ldap.existingSecret` | Name of existing secret to use for LDAP passwords | `nil` | +| `ldap.url` | LDAP URL beginning in the form `ldap[s]://host[:port]/basedn[?[attribute][?[scope][?[filter]]]]` | `nil` | +| `ldap.server` | IP address or name of the LDAP server. | `nil` | +| `ldap.port` | Port number on the LDAP server to connect to | `nil` | +| `ldap.scheme` | Set to `ldaps` to use LDAPS. | `nil` | +| `ldap.tls` | Set to `1` to use TLS encryption | `nil` | +| `ldap.prefix` | String to prepend to the user name when forming the DN to bind | `nil` | +| `ldap.suffix` | String to append to the user name when forming the DN to bind | `nil` | +| `ldap.search_attr` | Attribute to match agains the user name in the search | `nil` | +| `ldap.search_filter` | The search filter to use when doing search+bind authentication | `nil` | +| `ldap.baseDN` | Root DN to begin the search for the user in | `nil` | +| `ldap.bindDN` | DN of user to bind to LDAP | `nil` | +| `ldap.bind_password` | Password for the user to bind to LDAP | `nil` | +| `replication.enabled` | Enable replication | `false` | +| `replication.user` | Replication user | `repl_user` | +| `replication.password` | Replication user password | `repl_password` | +| `replication.slaveReplicas` | Number of slaves replicas | `1` | +| `replication.synchronousCommit` | Set synchronous commit mode. Allowed values: `on`, `remote_apply`, `remote_write`, `local` and `off` | `off` | +| `replication.numSynchronousReplicas` | Number of replicas that will have synchronous replication. Note: Cannot be greater than `replication.slaveReplicas`. | `0` | +| `replication.applicationName` | Cluster application name. Useful for advanced replication settings | `my_application` | +| `existingSecret` | Name of existing secret to use for PostgreSQL passwords. The secret has to contain the keys `postgresql-postgres-password` which is the password for `postgresqlUsername` when it is different of `postgres`, `postgresql-password` which will override `postgresqlPassword`, `postgresql-replication-password` which will override `replication.password` and `postgresql-ldap-password` which will be sed to authenticate on LDAP. The value is evaluated as a template. | `nil` | +| `postgresqlPostgresPassword` | PostgreSQL admin password (used when `postgresqlUsername` is not `postgres`, in which case`postgres` is the admin username). | _random 10 character alphanumeric string_ | +| `postgresqlUsername` | PostgreSQL user (creates a non-admin user when `postgresqlUsername` is not `postgres`) | `postgres` | +| `postgresqlPassword` | PostgreSQL user password | _random 10 character alphanumeric string_ | +| `postgresqlDatabase` | PostgreSQL database | `nil` | +| `postgresqlDataDir` | PostgreSQL data dir folder | `/bitnami/postgresql` (same value as persistence.mountPath) | +| `extraEnv` | Any extra environment variables you would like to pass on to the pod. The value is evaluated as a template. | `[]` | +| `extraEnvVarsCM` | Name of a Config Map containing extra environment variables you would like to pass on to the pod. The value is evaluated as a template. | `nil` | +| `postgresqlInitdbArgs` | PostgreSQL initdb extra arguments | `nil` | +| `postgresqlInitdbWalDir` | PostgreSQL location for transaction log | `nil` | +| `postgresqlConfiguration` | Runtime Config Parameters | `nil` | +| `postgresqlExtendedConf` | Extended Runtime Config Parameters (appended to main or default configuration) | `nil` | +| `pgHbaConfiguration` | Content of pg_hba.conf | `nil (do not create pg_hba.conf)` | +| `configurationConfigMap` | ConfigMap with the PostgreSQL configuration files (Note: Overrides `postgresqlConfiguration` and `pgHbaConfiguration`). The value is evaluated as a template. | `nil` | +| `extendedConfConfigMap` | ConfigMap with the extended PostgreSQL configuration files. The value is evaluated as a template. | `nil` | +| `initdbScripts` | Dictionary of initdb scripts | `nil` | +| `initdbUser` | PostgreSQL user to execute the .sql and sql.gz scripts | `nil` | +| `initdbPassword` | Password for the user specified in `initdbUser` | `nil` | +| `initdbScriptsConfigMap` | ConfigMap with the initdb scripts (Note: Overrides `initdbScripts`). The value is evaluated as a template. | `nil` | +| `initdbScriptsSecret` | Secret with initdb scripts that contain sensitive information (Note: can be used with `initdbScriptsConfigMap` or `initdbScripts`). The value is evaluated as a template. | `nil` | +| `service.type` | Kubernetes Service type | `ClusterIP` | +| `service.port` | PostgreSQL port | `5432` | +| `service.nodePort` | Kubernetes Service nodePort | `nil` | +| `service.annotations` | Annotations for PostgreSQL service | `{}` (evaluated as a template) | +| `service.loadBalancerIP` | loadBalancerIP if service type is `LoadBalancer` | `nil` | +| `service.loadBalancerSourceRanges` | Address that are allowed when svc is LoadBalancer | `[]` (evaluated as a template) | +| `schedulerName` | Name of the k8s scheduler (other than default) | `nil` | +| `shmVolume.enabled` | Enable emptyDir volume for /dev/shm for master and slave(s) Pod(s) | `true` | +| `shmVolume.chmod.enabled` | Run at init chmod 777 of the /dev/shm (ignored if `volumePermissions.enabled` is `false`) | `true` | +| `persistence.enabled` | Enable persistence using PVC | `true` | +| `persistence.existingClaim` | Provide an existing `PersistentVolumeClaim`, the value is evaluated as a template. | `nil` | +| `persistence.mountPath` | Path to mount the volume at | `/bitnami/postgresql` | +| `persistence.subPath` | Subdirectory of the volume to mount at | `""` | +| `persistence.storageClass` | PVC Storage Class for PostgreSQL volume | `nil` | +| `persistence.accessModes` | PVC Access Mode for PostgreSQL volume | `[ReadWriteOnce]` | +| `persistence.size` | PVC Storage Request for PostgreSQL volume | `8Gi` | +| `persistence.annotations` | Annotations for the PVC | `{}` | +| `commonAnnotations` | Annotations to be added to all deployed resources (rendered as a template) | `{}` | +| `master.nodeSelector` | Node labels for pod assignment (postgresql master) | `{}` | +| `master.affinity` | Affinity labels for pod assignment (postgresql master) | `{}` | +| `master.tolerations` | Toleration labels for pod assignment (postgresql master) | `[]` | +| `master.anotations` | Map of annotations to add to the statefulset (postgresql master) | `{}` | +| `master.labels` | Map of labels to add to the statefulset (postgresql master) | `{}` | +| `master.podAnnotations` | Map of annotations to add to the pods (postgresql master) | `{}` | +| `master.podLabels` | Map of labels to add to the pods (postgresql master) | `{}` | +| `master.priorityClassName` | Priority Class to use for each pod (postgresql master) | `nil` | +| `master.extraInitContainers` | Additional init containers to add to the pods (postgresql master) | `[]` | +| `master.extraVolumeMounts` | Additional volume mounts to add to the pods (postgresql master) | `[]` | +| `master.extraVolumes` | Additional volumes to add to the pods (postgresql master) | `[]` | +| `master.sidecars` | Add additional containers to the pod | `[]` | +| `master.service.type` | Allows using a different service type for Master | `nil` | +| `master.service.nodePort` | Allows using a different nodePort for Master | `nil` | +| `master.service.clusterIP` | Allows using a different clusterIP for Master | `nil` | +| `slave.nodeSelector` | Node labels for pod assignment (postgresql slave) | `{}` | +| `slave.affinity` | Affinity labels for pod assignment (postgresql slave) | `{}` | +| `slave.tolerations` | Toleration labels for pod assignment (postgresql slave) | `[]` | +| `slave.anotations` | Map of annotations to add to the statefulsets (postgresql slave) | `{}` | +| `slave.labels` | Map of labels to add to the statefulsets (postgresql slave) | `{}` | +| `slave.podAnnotations` | Map of annotations to add to the pods (postgresql slave) | `{}` | +| `slave.podLabels` | Map of labels to add to the pods (postgresql slave) | `{}` | +| `slave.priorityClassName` | Priority Class to use for each pod (postgresql slave) | `nil` | +| `slave.extraInitContainers` | Additional init containers to add to the pods (postgresql slave) | `[]` | +| `slave.extraVolumeMounts` | Additional volume mounts to add to the pods (postgresql slave) | `[]` | +| `slave.extraVolumes` | Additional volumes to add to the pods (postgresql slave) | `[]` | +| `slave.sidecars` | Add additional containers to the pod | `[]` | +| `slave.service.type` | Allows using a different service type for Slave | `nil` | +| `slave.service.nodePort` | Allows using a different nodePort for Slave | `nil` | +| `slave.service.clusterIP` | Allows using a different clusterIP for Slave | `nil` | +| `terminationGracePeriodSeconds` | Seconds the pod needs to terminate gracefully | `nil` | +| `resources` | CPU/Memory resource requests/limits | Memory: `256Mi`, CPU: `250m` | +| `securityContext.enabled` | Enable security context | `true` | +| `securityContext.fsGroup` | Group ID for the container | `1001` | +| `securityContext.runAsUser` | User ID for the container | `1001` | +| `serviceAccount.enabled` | Enable service account (Note: Service Account will only be automatically created if `serviceAccount.name` is not set) | `false` | +| `serviceAccount.name` | Name of existing service account | `nil` | +| `livenessProbe.enabled` | Would you like a livenessProbe to be enabled | `true` | +| `networkPolicy.enabled` | Enable NetworkPolicy | `false` | +| `networkPolicy.allowExternal` | Don't require client label for connections | `true` | +| `networkPolicy.explicitNamespacesSelector` | A Kubernetes LabelSelector to explicitly select namespaces from which ingress traffic could be allowed | `{}` | +| `livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | 30 | +| `livenessProbe.periodSeconds` | How often to perform the probe | 10 | +| `livenessProbe.timeoutSeconds` | When the probe times out | 5 | +| `livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 6 | +| `livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed | 1 | +| `readinessProbe.enabled` | would you like a readinessProbe to be enabled | `true` | +| `readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | 5 | +| `readinessProbe.periodSeconds` | How often to perform the probe | 10 | +| `readinessProbe.timeoutSeconds` | When the probe times out | 5 | +| `readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 6 | +| `readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed | 1 | +| `tls.enabled` | Enable TLS traffic support | `false` | +| `tls.preferServerCiphers` | Whether to use the server's TLS cipher preferences rather than the client's | `true` | +| `tls.certificatesSecret` | Name of an existing secret that contains the certificates | `nil` | +| `tls.certFilename` | Certificate filename | `""` | +| `tls.certKeyFilename` | Certificate key filename | `""` | +| `tls.certCAFilename` | CA Certificate filename. If provided, PostgreSQL will authenticate TLS/SSL clients by requesting them a certificate. |`nil` | +| `tls.crlFilename` | File containing a Certificate Revocation List |`nil` | +| `metrics.enabled` | Start a prometheus exporter | `false` | +| `metrics.service.type` | Kubernetes Service type | `ClusterIP` | +| `service.clusterIP` | Static clusterIP or None for headless services | `nil` | +| `metrics.service.annotations` | Additional annotations for metrics exporter pod | `{ prometheus.io/scrape: "true", prometheus.io/port: "9187"}` | +| `metrics.service.loadBalancerIP` | loadBalancerIP if redis metrics service type is `LoadBalancer` | `nil` | +| `metrics.serviceMonitor.enabled` | Set this to `true` to create ServiceMonitor for Prometheus operator | `false` | +| `metrics.serviceMonitor.additionalLabels` | Additional labels that can be used so ServiceMonitor will be discovered by Prometheus | `{}` | +| `metrics.serviceMonitor.namespace` | Optional namespace in which to create ServiceMonitor | `nil` | +| `metrics.serviceMonitor.interval` | Scrape interval. If not set, the Prometheus default scrape interval is used | `nil` | +| `metrics.serviceMonitor.scrapeTimeout` | Scrape timeout. If not set, the Prometheus default scrape timeout is used | `nil` | +| `metrics.prometheusRule.enabled` | Set this to true to create prometheusRules for Prometheus operator | `false` | +| `metrics.prometheusRule.additionalLabels` | Additional labels that can be used so prometheusRules will be discovered by Prometheus | `{}` | +| `metrics.prometheusRule.namespace` | namespace where prometheusRules resource should be created | the same namespace as postgresql | +| `metrics.prometheusRule.rules` | [rules](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/) to be created, check values for an example. | `[]` | +| `metrics.image.registry` | PostgreSQL Exporter Image registry | `docker.io` | +| `metrics.image.repository` | PostgreSQL Exporter Image name | `bitnami/postgres-exporter` | +| `metrics.image.tag` | PostgreSQL Exporter Image tag | `{TAG_NAME}` | +| `metrics.image.pullPolicy` | PostgreSQL Exporter Image pull policy | `IfNotPresent` | +| `metrics.image.pullSecrets` | Specify Image pull secrets | `nil` (does not add image pull secrets to deployed pods) | +| `metrics.customMetrics` | Additional custom metrics | `nil` | +| `metrics.extraEnvVars` | Extra environment variables to add to exporter | `{}` (evaluated as a template) | +| `metrics.securityContext.enabled` | Enable security context for metrics | `false` | +| `metrics.securityContext.runAsUser` | User ID for the container for metrics | `1001` | +| `metrics.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | 30 | +| `metrics.livenessProbe.periodSeconds` | How often to perform the probe | 10 | +| `metrics.livenessProbe.timeoutSeconds` | When the probe times out | 5 | +| `metrics.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 6 | +| `metrics.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed | 1 | +| `metrics.readinessProbe.enabled` | would you like a readinessProbe to be enabled | `true` | +| `metrics.readinessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | 5 | +| `metrics.readinessProbe.periodSeconds` | How often to perform the probe | 10 | +| `metrics.readinessProbe.timeoutSeconds` | When the probe times out | 5 | +| `metrics.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 6 | +| `metrics.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed | 1 | +| `updateStrategy` | Update strategy policy | `{type: "RollingUpdate"}` | +| `psp.create` | Create Pod Security Policy | `false` | +| `rbac.create` | Create Role and RoleBinding (required for PSP to work) | `false` | + + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```console +$ helm install my-release \ + --set postgresqlPassword=secretpassword,postgresqlDatabase=my-database \ + bitnami/postgresql +``` + +The above command sets the PostgreSQL `postgres` account password to `secretpassword`. Additionally it creates a database named `my-database`. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```console +$ helm install my-release -f values.yaml bitnami/postgresql +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +## Configuration and installation details + +### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/) + +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. + +Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. + +### Production configuration and horizontal scaling + +This chart includes a `values-production.yaml` file where you can find some parameters oriented to production configuration in comparison to the regular `values.yaml`. You can use this file instead of the default one. + +- Enable replication: +```diff +- replication.enabled: false ++ replication.enabled: true +``` + +- Number of slaves replicas: +```diff +- replication.slaveReplicas: 1 ++ replication.slaveReplicas: 2 +``` + +- Set synchronous commit mode: +```diff +- replication.synchronousCommit: "off" ++ replication.synchronousCommit: "on" +``` + +- Number of replicas that will have synchronous replication: +```diff +- replication.numSynchronousReplicas: 0 ++ replication.numSynchronousReplicas: 1 +``` + +- Start a prometheus exporter: +```diff +- metrics.enabled: false ++ metrics.enabled: true +``` + +To horizontally scale this chart, you can use the `--replicas` flag to modify the number of nodes in your PostgreSQL deployment. Also you can use the `values-production.yaml` file or modify the parameters shown above. + +### Customizing Master and Slave services in a replicated configuration + +At the top level, there is a service object which defines the services for both master and slave. For deeper customization, there are service objects for both the master and slave types individually. This allows you to override the values in the top level service object so that the master and slave can be of different service types and with different clusterIPs / nodePorts. Also in the case you want the master and slave to be of type nodePort, you will need to set the nodePorts to different values to prevent a collision. The values that are deeper in the master.service or slave.service objects will take precedence over the top level service object. + +### Change PostgreSQL version + +To modify the PostgreSQL version used in this chart you can specify a [valid image tag](https://hub.docker.com/r/bitnami/postgresql/tags/) using the `image.tag` parameter. For example, `image.tag=X.Y.Z`. This approach is also applicable to other images like exporters. + +### postgresql.conf / pg_hba.conf files as configMap + +This helm chart also supports to customize the whole configuration file. + +Add your custom file to "files/postgresql.conf" in your working directory. This file will be mounted as configMap to the containers and it will be used for configuring the PostgreSQL server. + +Alternatively, you can specify PostgreSQL configuration parameters using the `postgresqlConfiguration` parameter as a dict, using camelCase, e.g. {"sharedBuffers": "500MB"}. + +In addition to these options, you can also set an external ConfigMap with all the configuration files. This is done by setting the `configurationConfigMap` parameter. Note that this will override the two previous options. + +### Allow settings to be loaded from files other than the default `postgresql.conf` + +If you don't want to provide the whole PostgreSQL configuration file and only specify certain parameters, you can add your extended `.conf` files to "files/conf.d/" in your working directory. +Those files will be mounted as configMap to the containers adding/overwriting the default configuration using the `include_dir` directive that allows settings to be loaded from files other than the default `postgresql.conf`. + +Alternatively, you can also set an external ConfigMap with all the extra configuration files. This is done by setting the `extendedConfConfigMap` parameter. Note that this will override the previous option. + +### Initialize a fresh instance + +The [Bitnami PostgreSQL](https://github.com/bitnami/bitnami-docker-postgresql) image allows you to use your custom scripts to initialize a fresh instance. In order to execute the scripts, they must be located inside the chart folder `files/docker-entrypoint-initdb.d` so they can be consumed as a ConfigMap. + +Alternatively, you can specify custom scripts using the `initdbScripts` parameter as dict. + +In addition to these options, you can also set an external ConfigMap with all the initialization scripts. This is done by setting the `initdbScriptsConfigMap` parameter. Note that this will override the two previous options. If your initialization scripts contain sensitive information such as credentials or passwords, you can use the `initdbScriptsSecret` parameter. + +The allowed extensions are `.sh`, `.sql` and `.sql.gz`. + +### Securing traffic using TLS + +TLS support can be enabled in the chart by specifying the `tls.` parameters while creating a release. The following parameters should be configured to properly enable the TLS support in the chart: + +- `tls.enabled`: Enable TLS support. Defaults to `false` +- `tls.certificatesSecret`: Name of an existing secret that contains the certificates. No defaults. +- `tls.certFilename`: Certificate filename. No defaults. +- `tls.certKeyFilename`: Certificate key filename. No defaults. + +For example: + +* First, create the secret with the cetificates files: + + ```console + kubectl create secret generic certificates-tls-secret --from-file=./cert.crt --from-file=./cert.key --from-file=./ca.crt + ``` + +* Then, use the following parameters: + + ```console + volumePermissions.enabled=true + tls.enabled=true + tls.certificatesSecret="certificates-tls-secret" + tls.certFilename="cert.crt" + tls.certKeyFilename="cert.key" + ``` + + > Note TLS and VolumePermissions: PostgreSQL requires certain permissions on sensitive files (such as certificate keys) to start up. Due to an on-going [issue](https://github.com/kubernetes/kubernetes/issues/57923) regarding kubernetes permissions and the use of `securityContext.runAsUser`, you must enable `volumePermissions` to ensure everything works as expected. + +### Sidecars + +If you need additional containers to run within the same pod as PostgreSQL (e.g. an additional metrics or logging exporter), you can do so via the `sidecars` config parameter. Simply define your container according to the Kubernetes container spec. + +```yaml +# For the PostgreSQL master +master: + sidecars: + - name: your-image-name + image: your-image + imagePullPolicy: Always + ports: + - name: portname + containerPort: 1234 +# For the PostgreSQL replicas +slave: + sidecars: + - name: your-image-name + image: your-image + imagePullPolicy: Always + ports: + - name: portname + containerPort: 1234 +``` + +### Metrics + +The chart optionally can start a metrics exporter for [prometheus](https://prometheus.io). The metrics endpoint (port 9187) is not exposed and it is expected that the metrics are collected from inside the k8s cluster using something similar as the described in the [example Prometheus scrape configuration](https://github.com/prometheus/prometheus/blob/master/documentation/examples/prometheus-kubernetes.yml). + +The exporter allows to create custom metrics from additional SQL queries. See the Chart's `values.yaml` for an example and consult the [exporters documentation](https://github.com/wrouesnel/postgres_exporter#adding-new-metrics-via-a-config-file) for more details. + +### Use of global variables + +In more complex scenarios, we may have the following tree of dependencies + +``` + +--------------+ + | | + +------------+ Chart 1 +-----------+ + | | | | + | --------+------+ | + | | | + | | | + | | | + | | | + v v v ++-------+------+ +--------+------+ +--------+------+ +| | | | | | +| PostgreSQL | | Sub-chart 1 | | Sub-chart 2 | +| | | | | | ++--------------+ +---------------+ +---------------+ +``` + +The three charts below depend on the parent chart Chart 1. However, subcharts 1 and 2 may need to connect to PostgreSQL as well. In order to do so, subcharts 1 and 2 need to know the PostgreSQL credentials, so one option for deploying could be deploy Chart 1 with the following parameters: + +``` +postgresql.postgresqlPassword=testtest +subchart1.postgresql.postgresqlPassword=testtest +subchart2.postgresql.postgresqlPassword=testtest +postgresql.postgresqlDatabase=db1 +subchart1.postgresql.postgresqlDatabase=db1 +subchart2.postgresql.postgresqlDatabase=db1 +``` + +If the number of dependent sub-charts increases, installing the chart with parameters can become increasingly difficult. An alternative would be to set the credentials using global variables as follows: + +``` +global.postgresql.postgresqlPassword=testtest +global.postgresql.postgresqlDatabase=db1 +``` + +This way, the credentials will be available in all of the subcharts. + +## Persistence + +The [Bitnami PostgreSQL](https://github.com/bitnami/bitnami-docker-postgresql) image stores the PostgreSQL data and configurations at the `/bitnami/postgresql` path of the container. + +Persistent Volume Claims are used to keep the data across deployments. This is known to work in GCE, AWS, and minikube. +See the [Parameters](#parameters) section to configure the PVC or to disable persistence. + +If you already have data in it, you will fail to sync to standby nodes for all commits, details can refer to [code](https://github.com/bitnami/bitnami-docker-postgresql/blob/8725fe1d7d30ebe8d9a16e9175d05f7ad9260c93/9.6/debian-9/rootfs/libpostgresql.sh#L518-L556). If you need to use those data, please covert them to sql and import after `helm install` finished. + +## NetworkPolicy + +To enable network policy for PostgreSQL, install [a networking plugin that implements the Kubernetes NetworkPolicy spec](https://kubernetes.io/docs/tasks/administer-cluster/declare-network-policy#before-you-begin), and set `networkPolicy.enabled` to `true`. + +For Kubernetes v1.5 & v1.6, you must also turn on NetworkPolicy by setting the DefaultDeny namespace annotation. Note: this will enforce policy for _all_ pods in the namespace: + +```console +$ kubectl annotate namespace default "net.beta.kubernetes.io/network-policy={\"ingress\":{\"isolation\":\"DefaultDeny\"}}" +``` + +With NetworkPolicy enabled, traffic will be limited to just port 5432. + +For more precise policy, set `networkPolicy.allowExternal=false`. This will only allow pods with the generated client label to connect to PostgreSQL. +This label will be displayed in the output of a successful install. + +## Differences between Bitnami PostgreSQL image and [Docker Official](https://hub.docker.com/_/postgres) image + +- The Docker Official PostgreSQL image does not support replication. If you pass any replication environment variable, this would be ignored. The only environment variables supported by the Docker Official image are POSTGRES_USER, POSTGRES_DB, POSTGRES_PASSWORD, POSTGRES_INITDB_ARGS, POSTGRES_INITDB_WALDIR and PGDATA. All the remaining environment variables are specific to the Bitnami PostgreSQL image. +- The Bitnami PostgreSQL image is non-root by default. This requires that you run the pod with `securityContext` and updates the permissions of the volume with an `initContainer`. A key benefit of this configuration is that the pod follows security best practices and is prepared to run on Kubernetes distributions with hard security constraints like OpenShift. +- For OpenShift, one may either define the runAsUser and fsGroup accordingly, or try this more dynamic option: volumePermissions.securityContext.runAsUser="auto",securityContext.enabled=false,shmVolume.chmod.enabled=false + +### Deploy chart using Docker Official PostgreSQL Image + +From chart version 4.0.0, it is possible to use this chart with the Docker Official PostgreSQL image. +Besides specifying the new Docker repository and tag, it is important to modify the PostgreSQL data directory and volume mount point. Basically, the PostgreSQL data dir cannot be the mount point directly, it has to be a subdirectory. + +``` +image.repository=postgres +image.tag=10.6 +postgresqlDataDir=/data/pgdata +persistence.mountPath=/data/ +``` + +## Upgrade + +It's necessary to specify the existing passwords while performing an upgrade to ensure the secrets are not updated with invalid randomly generated passwords. Remember to specify the existing values of the `postgresqlPassword` and `replication.password` parameters when upgrading the chart: + +```bash +$ helm upgrade my-release stable/postgresql \ + --set postgresqlPassword=[POSTGRESQL_PASSWORD] \ + --set replication.password=[REPLICATION_PASSWORD] +``` + +> Note: you need to substitute the placeholders _[POSTGRESQL_PASSWORD]_, and _[REPLICATION_PASSWORD]_ with the values obtained from instructions in the installation notes. + +## 8.0.0 + +Prefixes the port names with their protocols to comply with Istio conventions. + +If you depend on the port names in your setup, make sure to update them to reflect this change. + +## 7.1.0 + +Adds support for LDAP configuration. + +## 7.0.0 + +Helm performs a lookup for the object based on its group (apps), version (v1), and kind (Deployment). Also known as its GroupVersionKind, or GVK. Changing the GVK is considered a compatibility breaker from Kubernetes' point of view, so you cannot "upgrade" those objects to the new GVK in-place. Earlier versions of Helm 3 did not perform the lookup correctly which has since been fixed to match the spec. + +In https://github.com/helm/charts/pull/17281 the `apiVersion` of the statefulset resources was updated to `apps/v1` in tune with the api's deprecated, resulting in compatibility breakage. + +This major version bump signifies this change. + +## 6.5.7 + +In this version, the chart will use PostgreSQL with the Postgis extension included. The version used with Postgresql version 10, 11 and 12 is Postgis 2.5. It has been compiled with the following dependencies: + +- protobuf +- protobuf-c +- json-c +- geos +- proj + +## 5.0.0 + +In this version, the **chart is using PostgreSQL 11 instead of PostgreSQL 10**. You can find the main difference and notable changes in the following links: [https://www.postgresql.org/about/news/1894/](https://www.postgresql.org/about/news/1894/) and [https://www.postgresql.org/about/featurematrix/](https://www.postgresql.org/about/featurematrix/). + +For major releases of PostgreSQL, the internal data storage format is subject to change, thus complicating upgrades, you can see some errors like the following one in the logs: + +```console +Welcome to the Bitnami postgresql container +Subscribe to project updates by watching https://github.com/bitnami/bitnami-docker-postgresql +Submit issues and feature requests at https://github.com/bitnami/bitnami-docker-postgresql/issues +Send us your feedback at containers@bitnami.com + +INFO ==> ** Starting PostgreSQL setup ** +NFO ==> Validating settings in POSTGRESQL_* env vars.. +INFO ==> Initializing PostgreSQL database... +INFO ==> postgresql.conf file not detected. Generating it... +INFO ==> pg_hba.conf file not detected. Generating it... +INFO ==> Deploying PostgreSQL with persisted data... +INFO ==> Configuring replication parameters +INFO ==> Loading custom scripts... +INFO ==> Enabling remote connections +INFO ==> Stopping PostgreSQL... +INFO ==> ** PostgreSQL setup finished! ** + +INFO ==> ** Starting PostgreSQL ** + [1] FATAL: database files are incompatible with server + [1] DETAIL: The data directory was initialized by PostgreSQL version 10, which is not compatible with this version 11.3. +``` + +In this case, you should migrate the data from the old chart to the new one following an approach similar to that described in [this section](https://www.postgresql.org/docs/current/upgrading.html#UPGRADING-VIA-PGDUMPALL) from the official documentation. Basically, create a database dump in the old chart, move and restore it in the new one. + +### 4.0.0 + +This chart will use by default the Bitnami PostgreSQL container starting from version `10.7.0-r68`. This version moves the initialization logic from node.js to bash. This new version of the chart requires setting the `POSTGRES_PASSWORD` in the slaves as well, in order to properly configure the `pg_hba.conf` file. Users from previous versions of the chart are advised to upgrade immediately. + +IMPORTANT: If you do not want to upgrade the chart version then make sure you use the `10.7.0-r68` version of the container. Otherwise, you will get this error + +``` +The POSTGRESQL_PASSWORD environment variable is empty or not set. Set the environment variable ALLOW_EMPTY_PASSWORD=yes to allow the container to be started with blank passwords. This is recommended only for development +``` + +### 3.0.0 + +This releases make it possible to specify different nodeSelector, affinity and tolerations for master and slave pods. +It also fixes an issue with `postgresql.master.fullname` helper template not obeying fullnameOverride. + +#### Breaking changes + +- `affinty` has been renamed to `master.affinity` and `slave.affinity`. +- `tolerations` has been renamed to `master.tolerations` and `slave.tolerations`. +- `nodeSelector` has been renamed to `master.nodeSelector` and `slave.nodeSelector`. + +### 2.0.0 + +In order to upgrade from the `0.X.X` branch to `1.X.X`, you should follow the below steps: + + - Obtain the service name (`SERVICE_NAME`) and password (`OLD_PASSWORD`) of the existing postgresql chart. You can find the instructions to obtain the password in the NOTES.txt, the service name can be obtained by running + +```console +$ kubectl get svc +``` + +- Install (not upgrade) the new version + +```console +$ helm repo update +$ helm install my-release bitnami/postgresql +``` + +- Connect to the new pod (you can obtain the name by running `kubectl get pods`): + +```console +$ kubectl exec -it NAME bash +``` + +- Once logged in, create a dump file from the previous database using `pg_dump`, for that we should connect to the previous postgresql chart: + +```console +$ pg_dump -h SERVICE_NAME -U postgres DATABASE_NAME > /tmp/backup.sql +``` + +After run above command you should be prompted for a password, this password is the previous chart password (`OLD_PASSWORD`). +This operation could take some time depending on the database size. + +- Once you have the backup file, you can restore it with a command like the one below: + +```console +$ psql -U postgres DATABASE_NAME < /tmp/backup.sql +``` + +In this case, you are accessing to the local postgresql, so the password should be the new one (you can find it in NOTES.txt). + +If you want to restore the database and the database schema does not exist, it is necessary to first follow the steps described below. + +```console +$ psql -U postgres +postgres=# drop database DATABASE_NAME; +postgres=# create database DATABASE_NAME; +postgres=# create user USER_NAME; +postgres=# alter role USER_NAME with password 'BITNAMI_USER_PASSWORD'; +postgres=# grant all privileges on database DATABASE_NAME to USER_NAME; +postgres=# alter database DATABASE_NAME owner to USER_NAME; +``` diff --git a/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/charts/common/.helmignore b/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/charts/common/.helmignore new file mode 100644 index 0000000..50af031 --- /dev/null +++ b/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/charts/common/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/charts/common/Chart.yaml b/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/charts/common/Chart.yaml new file mode 100644 index 0000000..b4d8828 --- /dev/null +++ b/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/charts/common/Chart.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +appVersion: 0.3.1 +description: A Library Helm Chart for grouping common logic between bitnami charts. + This chart is not deployable by itself. +home: http://www.bitnami.com/ +icon: https://bitnami.com/downloads/logos/bitnami-mark.png +keywords: +- common +- helper +- template +- function +- bitnami +maintainers: +- email: containers@bitnami.com + name: Bitnami +name: common +sources: +- https://github.com/bitnami/charts +version: 0.3.1 diff --git a/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/charts/common/README.md b/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/charts/common/README.md new file mode 100644 index 0000000..ab50967 --- /dev/null +++ b/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/charts/common/README.md @@ -0,0 +1,228 @@ +# Bitnami Common Library Chart + +A [Helm Library Chart](https://helm.sh/docs/topics/library_charts/#helm) for grouping common logic between bitnami charts. + +## TL;DR; + +```yaml +dependencies: + - name: common + version: 0.1.0 + repository: https://charts.bitnami.com/bitnami +``` + +```bash +$ helm dependency update +``` + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "common.names.fullname" . }} +data: + myvalue: "Hello World" +``` + +## Introduction + +This chart provides a common template helpers which can be used to develop new charts using [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This Helm chart has been tested on top of [Bitnami Kubernetes Production Runtime](https://kubeprod.io/) (BKPR). Deploy BKPR to get automated TLS certificates, logging and monitoring for your applications. + +## Prerequisites + +- Kubernetes 1.12+ +- Helm 2.12+ or Helm 3.0-beta3+ + +## Parameters + +The following table lists the helpers available in the library which are scoped in different sections. + +**Names** + +| Helper identifier | Description | Expected Input | +|---------------------------------------------|------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.names.name` | Expand the name of the chart or use `.Values.nameOverride` | `.` Chart context | +| `common.names.fullname` | Create a default fully qualified app name. | `.` Chart context | +| `common.names.chart` | Chart name plus version | `.` Chart context | + +**Images** + +| Helper identifier | Description | Expected Input | +|---------------------------------------------|------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.images.image` | Return the proper and full image name | `dict "imageRoot" .Values.path.to.the.image "global" $`, see [ImageRoot](#imageroot) for the structure. | +| `common.images.pullSecrets` | Return the proper Docker Image Registry Secret Names | `dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" $` | + +**Labels** + +| Helper identifier | Description | Expected Input | +|---------------------------------------------|------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.labels.standard` | Return Kubernetes standard labels | `.` Chart context | +| `common.labels.matchLabels` | Return the proper Docker Image Registry Secret Names | `.` Chart context | + +**Storage** + +| Helper identifier | Description | Expected Input | +|---------------------------------------------|------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.storage.class` | Return the proper Storage Class | `dict "persistence" .Values.path.to.the.persistence "global" $`, see [Persistence](#persistence) for the structure. | + +**TplValues** + +| Helper identifier | Description | Expected Input | +|---------------------------------------------|------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.tplvalues.render` | Renders a value that contains template | `dict "value" .Values.path.to.the.Value "context" $`, value is the value should rendered as template, context frecuently is the chart context `$` or `.` | + +**Capabilities** + +| Helper identifier | Description | Expected Input | +|---------------------------------------------|------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.capabilities.deployment.apiVersion` | Return the appropriate apiVersion for deployment. | `.` Chart context | +| `common.capabilities.ingress.apiVersion` | Return the appropriate apiVersion for ingress. | `.` Chart context | + +**Warnings** + +| Helper identifier | Description | Expected Input | +|---------------------------------------------|------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.warnings.rollingTag` | Warning about using rolling tag. | `ImageRoot` see [ImageRoot](#imageroot) for the structure. | + +**Secrets** + +| Helper identifier | Description | Expected Input | +|---------------------------------------------|------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.secrets.name` | Generate the name of the secret. | `dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $` see [ExistingSecret](#existingsecret) for the structure. | +| `common.secrets.key` | Generate secret key. | `dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName"` see [ExistingSecret](#existingsecret) for the structure. | + +## Special input schemas + +### ImageRoot + +```yaml +registry: + type: string + description: Docker registry where the image is located + example: docker.io + +repository: + type: string + description: Repository and image name + example: bitnami/nginx + +tag: + type: string + description: image tag + example: 1.16.1-debian-10-r63 + +pullPolicy: + type: string + description: Specify a imagePullPolicy. Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + +pullSecrets: + type: array + items: + type: string + description: Optionally specify an array of imagePullSecrets. + +debug: + type: boolean + description: Set to true if you would like to see extra information on logs + example: false + +## An instance would be: +# registry: docker.io +# repository: bitnami/nginx +# tag: 1.16.1-debian-10-r63 +# pullPolicy: IfNotPresent +# debug: false +``` + +### Persistence + +```yaml +enabled: + type: boolean + description: Whether enable persistence. + example: true + +storageClass: + type: string + description: Ghost data Persistent Volume Storage Class, If set to "-", storageClassName: "" which disables dynamic provisioning. + example: "-" + +accessMode: + type: string + description: Access mode for the Persistent Volume Storage. + example: ReadWriteOnce + +size: + type: string + description: Size the Persistent Volume Storage. + example: 8Gi + +path: + type: string + description: Path to be persisted. + example: /bitnami + +## An instance would be: +# enabled: true +# storageClass: "-" +# accessMode: ReadWriteOnce +# size: 8Gi +# path: /bitnami +``` + +### ExistingSecret +```yaml +name: + type: string + description: Name of the existing secret. + example: mySecret +keyMapping: + description: Mapping between the expected key name and the name of the key in the existing secret. + type: object + +## An instance would be: +# name: mySecret +# keyMapping: +# password: myPasswordKey +``` + +**Example of use** + +When we store sensitive data for a deployment in a secret, some times we want to give to users the possiblity of using theirs existing secrets. + +```yaml +# templates/secret.yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }} + labels: + app: {{ include "common.names.fullname" . }} +type: Opaque +data: + password: {{ .Values.password | b64enc | quote }} + +# templates/dpl.yaml +--- +... + env: + - name: PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "common.secrets.name" (dict "existingSecret" .Values.existingSecret "context" $) }} + key: {{ include "common.secrets.key" (dict "existingSecret" .Values.existingSecret "key" "password") }} +... + +# values.yaml +--- +name: mySecret +keyMapping: + password: myPasswordKey +``` + +## Notable changes + +N/A diff --git a/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/charts/common/templates/_capabilities.tpl b/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/charts/common/templates/_capabilities.tpl new file mode 100644 index 0000000..c0ea2c7 --- /dev/null +++ b/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/charts/common/templates/_capabilities.tpl @@ -0,0 +1,22 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the appropriate apiVersion for deployment. +*/}} +{{- define "common.capabilities.deployment.apiVersion" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for ingress. +*/}} +{{- define "common.capabilities.ingress.apiVersion" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/charts/common/templates/_images.tpl b/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/charts/common/templates/_images.tpl new file mode 100644 index 0000000..ee6673a --- /dev/null +++ b/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/charts/common/templates/_images.tpl @@ -0,0 +1,44 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper image name +{{ include "common.images.image" ( dict "imageRoot" .Values.path.to.the.image "global" $) }} +*/}} +{{- define "common.images.image" -}} +{{- $registryName := .imageRoot.registry -}} +{{- $repositoryName := .imageRoot.repository -}} +{{- $tag := .imageRoot.tag | toString -}} +{{- if .global }} + {{- if .global.imageRegistry }} + {{- $registryName = .global.imageRegistry -}} + {{- end -}} +{{- end -}} +{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +{{ include "common.images.pullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" $) }} +*/}} +{{- define "common.images.pullSecrets" -}} +{{- if .global }} +{{- if .global.imagePullSecrets }} +imagePullSecrets: + {{- range .global.imagePullSecrets }} + - name: {{ . }} + {{- end }} +{{- end }} +{{- else }} +{{- $pullSecrets := list }} +{{- range .images }} + {{- if .pullSecrets }} + {{- $pullSecrets = append $pullSecrets .pullSecrets }} + {{- end }} +{{- end }} +{{- if $pullSecrets }} +imagePullSecrets: + {{- range $pullSecrets }} + - name: {{ . }} + {{- end }} +{{- end }} +{{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/charts/common/templates/_labels.tpl b/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/charts/common/templates/_labels.tpl new file mode 100644 index 0000000..252066c --- /dev/null +++ b/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/charts/common/templates/_labels.tpl @@ -0,0 +1,18 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Kubernetes standard labels +*/}} +{{- define "common.labels.standard" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +helm.sh/chart: {{ include "common.names.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{/* +Labels to use on deploy.spec.selector.matchLabels and svc.spec.selector +*/}} +{{- define "common.labels.matchLabels" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} diff --git a/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/charts/common/templates/_names.tpl b/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/charts/common/templates/_names.tpl new file mode 100644 index 0000000..adf2a74 --- /dev/null +++ b/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/charts/common/templates/_names.tpl @@ -0,0 +1,32 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "common.names.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "common.names.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "common.names.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/charts/common/templates/_secrets.tpl b/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/charts/common/templates/_secrets.tpl new file mode 100644 index 0000000..d6165a2 --- /dev/null +++ b/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/charts/common/templates/_secrets.tpl @@ -0,0 +1,49 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Generate secret name. + +Usage: +{{ include "common.secrets.name" (dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $) }} + +Params: + - existingSecret - ExistingSecret - Optional. The path to the existing secrets in the values.yaml given by the user + to be used istead of the default one. +info: https://github.com/bitnami/charts/tree/master/bitnami/common#existingsecret + - defaultNameSuffix - String - Optional. It is used only if we have several secrets in the same deployment. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.secrets.name" -}} +{{- $name := (include "common.names.fullname" .context) -}} + +{{- if .defaultNameSuffix -}} +{{- $name = cat $name .defaultNameSuffix -}} +{{- end -}} + +{{- with .existingSecret -}} +{{- $name = .name -}} +{{- end -}} + +{{- printf "%s" $name -}} +{{- end -}} + +{{/* +Generate secret key. + +Usage: +{{ include "common.secrets.key" (dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName") }} + +Params: + - existingSecret - ExistingSecret - Optional. The path to the existing secrets in the values.yaml given by the user + to be used istead of the default one. +info: https://github.com/bitnami/charts/tree/master/bitnami/common#existingsecret + - key - String - Required. Name of the key in the secret. +*/}} +{{- define "common.secrets.key" -}} +{{- $key := .key -}} + +{{- if .existingSecret -}} + {{- if .existingSecret.keyMapping -}} + {{- $key = index .existingSecret.keyMapping $.key -}} + {{- end -}} +{{- end -}} + +{{- printf "%s" $key -}} +{{- end -}} diff --git a/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/charts/common/templates/_storage.tpl b/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/charts/common/templates/_storage.tpl new file mode 100644 index 0000000..60e2a84 --- /dev/null +++ b/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/charts/common/templates/_storage.tpl @@ -0,0 +1,23 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper Storage Class +{{ include "common.storage.class" ( dict "persistence" .Values.path.to.the.persistence "global" $) }} +*/}} +{{- define "common.storage.class" -}} + +{{- $storageClass := .persistence.storageClass -}} +{{- if .global -}} + {{- if .global.storageClass -}} + {{- $storageClass = .global.storageClass -}} + {{- end -}} +{{- end -}} + +{{- if $storageClass -}} + {{- if (eq "-" $storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" $storageClass -}} + {{- end -}} +{{- end -}} + +{{- end -}} diff --git a/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/charts/common/templates/_tplvalues.tpl b/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/charts/common/templates/_tplvalues.tpl new file mode 100644 index 0000000..2db1668 --- /dev/null +++ b/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/charts/common/templates/_tplvalues.tpl @@ -0,0 +1,13 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Renders a value that contains template. +Usage: +{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $) }} +*/}} +{{- define "common.tplvalues.render" -}} + {{- if typeIs "string" .value }} + {{- tpl .value .context }} + {{- else }} + {{- tpl (.value | toYaml) .context }} + {{- end }} +{{- end -}} diff --git a/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/charts/common/templates/_warnings.tpl b/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/charts/common/templates/_warnings.tpl new file mode 100644 index 0000000..ae10fa4 --- /dev/null +++ b/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/charts/common/templates/_warnings.tpl @@ -0,0 +1,14 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Warning about using rolling tag. +Usage: +{{ include "common.warnings.rollingTag" .Values.path.to.the.imageRoot }} +*/}} +{{- define "common.warnings.rollingTag" -}} + +{{- if and (contains "bitnami/" .repository) (not (.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .repository }}:{{ .tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} + +{{- end -}} diff --git a/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/charts/common/values.yaml b/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/charts/common/values.yaml new file mode 100644 index 0000000..9ecdc93 --- /dev/null +++ b/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/charts/common/values.yaml @@ -0,0 +1,3 @@ +## bitnami/common +## It is required by CI/CD tools and processes. +exampleValue: common-chart diff --git a/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/ci/commonAnnotations.yaml b/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/ci/commonAnnotations.yaml new file mode 100644 index 0000000..a936299 --- /dev/null +++ b/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/ci/commonAnnotations.yaml @@ -0,0 +1,4 @@ +commonAnnotations: + helm.sh/hook: "pre-install, pre-upgrade" + helm.sh/hook-weight: "-1" + diff --git a/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/ci/default-values.yaml b/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/ci/default-values.yaml new file mode 100644 index 0000000..fc2ba60 --- /dev/null +++ b/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/ci/default-values.yaml @@ -0,0 +1 @@ +# Leave this file empty to ensure that CI runs builds against the default configuration in values.yaml. diff --git a/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/ci/shmvolume-disabled-values.yaml b/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/ci/shmvolume-disabled-values.yaml new file mode 100644 index 0000000..347d3b4 --- /dev/null +++ b/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/ci/shmvolume-disabled-values.yaml @@ -0,0 +1,2 @@ +shmVolume: + enabled: false diff --git a/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/files/README.md b/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/files/README.md new file mode 100644 index 0000000..1813a2f --- /dev/null +++ b/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/files/README.md @@ -0,0 +1 @@ +Copy here your postgresql.conf and/or pg_hba.conf files to use it as a config map. diff --git a/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/files/conf.d/README.md b/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/files/conf.d/README.md new file mode 100644 index 0000000..184c187 --- /dev/null +++ b/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/files/conf.d/README.md @@ -0,0 +1,4 @@ +If you don't want to provide the whole configuration file and only specify certain parameters, you can copy here your extended `.conf` files. +These files will be injected as a config maps and add/overwrite the default configuration using the `include_dir` directive that allows settings to be loaded from files other than the default `postgresql.conf`. + +More info in the [bitnami-docker-postgresql README](https://github.com/bitnami/bitnami-docker-postgresql#configuration-file). diff --git a/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/files/docker-entrypoint-initdb.d/README.md b/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/files/docker-entrypoint-initdb.d/README.md new file mode 100644 index 0000000..cba3809 --- /dev/null +++ b/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/files/docker-entrypoint-initdb.d/README.md @@ -0,0 +1,3 @@ +You can copy here your custom `.sh`, `.sql` or `.sql.gz` file so they are executed during the first boot of the image. + +More info in the [bitnami-docker-postgresql](https://github.com/bitnami/bitnami-docker-postgresql#initializing-a-new-instance) repository. \ No newline at end of file diff --git a/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/requirements.lock b/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/requirements.lock new file mode 100644 index 0000000..1069b62 --- /dev/null +++ b/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/requirements.lock @@ -0,0 +1,6 @@ +dependencies: +- name: common + repository: https://charts.bitnami.com/bitnami + version: 0.3.1 +digest: sha256:740783295d301fdd168fafdbaa760de27ab54b0ff36b513589a5a2515072b885 +generated: "2020-07-15T00:56:02.067804177Z" diff --git a/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/requirements.yaml b/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/requirements.yaml new file mode 100644 index 0000000..868eee6 --- /dev/null +++ b/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/requirements.yaml @@ -0,0 +1,4 @@ +dependencies: + - name: common + version: 0.x.x + repository: https://charts.bitnami.com/bitnami \ No newline at end of file diff --git a/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/templates/NOTES.txt b/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/templates/NOTES.txt new file mode 100644 index 0000000..6dec604 --- /dev/null +++ b/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/templates/NOTES.txt @@ -0,0 +1,54 @@ +** Please be patient while the chart is being deployed ** + +PostgreSQL can be accessed via port {{ template "postgresql.port" . }} on the following DNS name from within your cluster: + + {{ template "postgresql.fullname" . }}.imxc.svc.cluster.local - Read/Write connection +{{- if .Values.replication.enabled }} + {{ template "postgresql.fullname" . }}-read.imxc.svc.cluster.local - Read only connection +{{- end }} + +{{- if and .Values.postgresqlPostgresPassword (not (eq .Values.postgresqlUsername "postgres")) }} + +To get the password for "postgres" run: + + export POSTGRES_ADMIN_PASSWORD=$(kubectl get secret --namespace imxc {{ template "postgresql.secretName" . }} -o jsonpath="{.data.postgresql-postgres-password}" | base64 --decode) +{{- end }} + +To get the password for "{{ template "postgresql.username" . }}" run: + + export POSTGRES_PASSWORD=$(kubectl get secret --namespace imxc {{ template "postgresql.secretName" . }} -o jsonpath="{.data.postgresql-password}" | base64 --decode) + +To connect to your database run the following command: + + kubectl run {{ template "postgresql.fullname" . }}-client --rm --tty -i --restart='Never' --namespace imxc --image {{ template "postgresql.image" . }} --env="PGPASSWORD=$POSTGRES_PASSWORD" {{- if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }} + --labels="{{ template "postgresql.fullname" . }}-client=true" {{- end }} --command -- psql --host {{ template "postgresql.fullname" . }} -U {{ .Values.postgresqlUsername }} -d {{- if .Values.postgresqlDatabase }} {{ .Values.postgresqlDatabase }}{{- else }} postgres{{- end }} -p {{ template "postgresql.port" . }} + +{{ if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }} +Note: Since NetworkPolicy is enabled, only pods with label {{ template "postgresql.fullname" . }}-client=true" will be able to connect to this PostgreSQL cluster. +{{- end }} + +To connect to your database from outside the cluster execute the following commands: + +{{- if contains "NodePort" .Values.service.type }} + + export NODE_IP=$(kubectl get nodes --namespace imxc -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT=$(kubectl get --namespace imxc -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "postgresql.fullname" . }}) + {{ if (include "postgresql.password" . ) }}PGPASSWORD="$POSTGRES_PASSWORD" {{ end }}psql --host $NODE_IP --port $NODE_PORT -U {{ .Values.postgresqlUsername }} -d {{- if .Values.postgresqlDatabase }} {{ .Values.postgresqlDatabase }}{{- else }} postgres{{- end }} + +{{- else if contains "LoadBalancer" .Values.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace imxc -w {{ template "postgresql.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace imxc {{ template "postgresql.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + {{ if (include "postgresql.password" . ) }}PGPASSWORD="$POSTGRES_PASSWORD" {{ end }}psql --host $SERVICE_IP --port {{ template "postgresql.port" . }} -U {{ .Values.postgresqlUsername }} -d {{- if .Values.postgresqlDatabase }} {{ .Values.postgresqlDatabase }}{{- else }} postgres{{- end }} + +{{- else if contains "ClusterIP" .Values.service.type }} + + kubectl port-forward --namespace imxc svc/{{ template "postgresql.fullname" . }} {{ template "postgresql.port" . }}:{{ template "postgresql.port" . }} & + {{ if (include "postgresql.password" . ) }}PGPASSWORD="$POSTGRES_PASSWORD" {{ end }}psql --host 127.0.0.1 -U {{ .Values.postgresqlUsername }} -d {{- if .Values.postgresqlDatabase }} {{ .Values.postgresqlDatabase }}{{- else }} postgres{{- end }} -p {{ template "postgresql.port" . }} + +{{- end }} + +{{- include "postgresql.validateValues" . -}} + diff --git a/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/templates/_helpers.tpl b/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/templates/_helpers.tpl new file mode 100644 index 0000000..a7008a1 --- /dev/null +++ b/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/templates/_helpers.tpl @@ -0,0 +1,494 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "postgresql.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "postgresql.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "postgresql.master.fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- $fullname := default (printf "%s-%s" .Release.Name $name) .Values.fullnameOverride -}} +{{- if .Values.replication.enabled -}} +{{- printf "%s-%s" $fullname "master" | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s" $fullname | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "postgresql.networkPolicy.apiVersion" -}} +{{- if semverCompare ">=1.4-0, <1.7-0" .Capabilities.KubeVersion.GitVersion -}} +"extensions/v1beta1" +{{- else if semverCompare "^1.7-0" .Capabilities.KubeVersion.GitVersion -}} +"networking.k8s.io/v1" +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "postgresql.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Return the proper PostgreSQL image name +*/}} +{{- define "postgresql.image" -}} +{{- $registryName := .Values.image.registry -}} +{{- $repositoryName := .Values.image.repository -}} +{{- $tag := .Values.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL postgres user password +*/}} +{{- define "postgresql.postgres.password" -}} +{{- if .Values.global.postgresql.postgresqlPostgresPassword }} + {{- .Values.global.postgresql.postgresqlPostgresPassword -}} +{{- else if .Values.postgresqlPostgresPassword -}} + {{- .Values.postgresqlPostgresPassword -}} +{{- else -}} + {{- randAlphaNum 10 -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL password +*/}} +{{- define "postgresql.password" -}} +{{- if .Values.global.postgresql.postgresqlPassword }} + {{- .Values.global.postgresql.postgresqlPassword -}} +{{- else if .Values.postgresqlPassword -}} + {{- .Values.postgresqlPassword -}} +{{- else -}} + {{- randAlphaNum 10 -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL replication password +*/}} +{{- define "postgresql.replication.password" -}} +{{- if .Values.global.postgresql.replicationPassword }} + {{- .Values.global.postgresql.replicationPassword -}} +{{- else if .Values.replication.password -}} + {{- .Values.replication.password -}} +{{- else -}} + {{- randAlphaNum 10 -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL username +*/}} +{{- define "postgresql.username" -}} +{{- if .Values.global.postgresql.postgresqlUsername }} + {{- .Values.global.postgresql.postgresqlUsername -}} +{{- else -}} + {{- .Values.postgresqlUsername -}} +{{- end -}} +{{- end -}} + + +{{/* +Return PostgreSQL replication username +*/}} +{{- define "postgresql.replication.username" -}} +{{- if .Values.global.postgresql.replicationUser }} + {{- .Values.global.postgresql.replicationUser -}} +{{- else -}} + {{- .Values.replication.user -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL port +*/}} +{{- define "postgresql.port" -}} +{{- if .Values.global.postgresql.servicePort }} + {{- .Values.global.postgresql.servicePort -}} +{{- else -}} + {{- .Values.service.port -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL created database +*/}} +{{- define "postgresql.database" -}} +{{- if .Values.global.postgresql.postgresqlDatabase }} + {{- .Values.global.postgresql.postgresqlDatabase -}} +{{- else if .Values.postgresqlDatabase -}} + {{- .Values.postgresqlDatabase -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper image name to change the volume permissions +*/}} +{{- define "postgresql.volumePermissions.image" -}} +{{- $registryName := .Values.volumePermissions.image.registry -}} +{{- $repositoryName := .Values.volumePermissions.image.repository -}} +{{- $tag := .Values.volumePermissions.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper PostgreSQL metrics image name +*/}} +{{- define "postgresql.metrics.image" -}} +{{- $registryName := default "docker.io" .Values.metrics.image.registry -}} +{{- $repositoryName := .Values.metrics.image.repository -}} +{{- $tag := default "latest" .Values.metrics.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Get the password secret. +*/}} +{{- define "postgresql.secretName" -}} +{{- if .Values.global.postgresql.existingSecret }} + {{- printf "%s" (tpl .Values.global.postgresql.existingSecret $) -}} +{{- else if .Values.existingSecret -}} + {{- printf "%s" (tpl .Values.existingSecret $) -}} +{{- else -}} + {{- printf "%s" (include "postgresql.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a secret object should be created +*/}} +{{- define "postgresql.createSecret" -}} +{{- if .Values.global.postgresql.existingSecret }} +{{- else if .Values.existingSecret -}} +{{- else -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Get the configuration ConfigMap name. +*/}} +{{- define "postgresql.configurationCM" -}} +{{- if .Values.configurationConfigMap -}} +{{- printf "%s" (tpl .Values.configurationConfigMap $) -}} +{{- else -}} +{{- printf "%s-configuration" (include "postgresql.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Get the extended configuration ConfigMap name. +*/}} +{{- define "postgresql.extendedConfigurationCM" -}} +{{- if .Values.extendedConfConfigMap -}} +{{- printf "%s" (tpl .Values.extendedConfConfigMap $) -}} +{{- else -}} +{{- printf "%s-extended-configuration" (include "postgresql.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a configmap should be mounted with PostgreSQL configuration +*/}} +{{- define "postgresql.mountConfigurationCM" -}} +{{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Get the initialization scripts ConfigMap name. +*/}} +{{- define "postgresql.initdbScriptsCM" -}} +{{- if .Values.initdbScriptsConfigMap -}} +{{- printf "%s" (tpl .Values.initdbScriptsConfigMap $) -}} +{{- else -}} +{{- printf "%s-init-scripts" (include "postgresql.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Get the initialization scripts Secret name. +*/}} +{{- define "postgresql.initdbScriptsSecret" -}} +{{- printf "%s" (tpl .Values.initdbScriptsSecret $) -}} +{{- end -}} + +{{/* +Get the metrics ConfigMap name. +*/}} +{{- define "postgresql.metricsCM" -}} +{{- printf "%s-metrics" (include "postgresql.fullname" .) -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "postgresql.imagePullSecrets" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +Also, we can not use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} +{{- if .Values.global.imagePullSecrets }} +imagePullSecrets: +{{- range .Values.global.imagePullSecrets }} + - name: {{ . }} +{{- end }} +{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.volumePermissions.image.pullSecrets }} +imagePullSecrets: +{{- range .Values.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.metrics.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.volumePermissions.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- end -}} +{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.volumePermissions.image.pullSecrets }} +imagePullSecrets: +{{- range .Values.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.metrics.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.volumePermissions.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- end -}} +{{- end -}} + +{{/* +Get the readiness probe command +*/}} +{{- define "postgresql.readinessProbeCommand" -}} +- | +{{- if (include "postgresql.database" .) }} + exec pg_isready -U {{ include "postgresql.username" . | quote }} -d "dbname={{ include "postgresql.database" . }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}{{- end }}" -h 127.0.0.1 -p {{ template "postgresql.port" . }} +{{- else }} + exec pg_isready -U {{ include "postgresql.username" . | quote }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} -d "sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}"{{- end }} -h 127.0.0.1 -p {{ template "postgresql.port" . }} +{{- end }} +{{- if contains "bitnami/" .Values.image.repository }} + [ -f /opt/bitnami/postgresql/tmp/.initialized ] || [ -f /bitnami/postgresql/.initialized ] +{{- end -}} +{{- end -}} + +{{/* +Return the proper Storage Class +*/}} +{{- define "postgresql.storageClass" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +*/}} +{{- if .Values.global -}} + {{- if .Values.global.storageClass -}} + {{- if (eq "-" .Values.global.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.global.storageClass -}} + {{- end -}} + {{- else -}} + {{- if .Values.persistence.storageClass -}} + {{- if (eq "-" .Values.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.persistence.storageClass -}} + {{- end -}} + {{- end -}} + {{- end -}} +{{- else -}} + {{- if .Values.persistence.storageClass -}} + {{- if (eq "-" .Values.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.persistence.storageClass -}} + {{- end -}} + {{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Renders a value that contains template. +Usage: +{{ include "postgresql.tplValue" ( dict "value" .Values.path.to.the.Value "context" $) }} +*/}} +{{- define "postgresql.tplValue" -}} + {{- if typeIs "string" .value }} + {{- tpl .value .context }} + {{- else }} + {{- tpl (.value | toYaml) .context }} + {{- end }} +{{- end -}} + +{{/* +Return the appropriate apiVersion for statefulset. +*/}} +{{- define "postgresql.statefulset.apiVersion" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "apps/v1beta2" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Compile all warnings into a single message, and call fail. +*/}} +{{- define "postgresql.validateValues" -}} +{{- $messages := list -}} +{{- $messages := append $messages (include "postgresql.validateValues.ldapConfigurationMethod" .) -}} +{{- $messages := append $messages (include "postgresql.validateValues.psp" .) -}} +{{- $messages := append $messages (include "postgresql.validateValues.tls" .) -}} +{{- $messages := without $messages "" -}} +{{- $message := join "\n" $messages -}} + +{{- if $message -}} +{{- printf "\nVALUES VALIDATION:\n%s" $message | fail -}} +{{- end -}} +{{- end -}} + +{{/* +Validate values of Postgresql - If ldap.url is used then you don't need the other settings for ldap +*/}} +{{- define "postgresql.validateValues.ldapConfigurationMethod" -}} +{{- if and .Values.ldap.enabled (and (not (empty .Values.ldap.url)) (not (empty .Values.ldap.server))) }} +postgresql: ldap.url, ldap.server + You cannot set both `ldap.url` and `ldap.server` at the same time. + Please provide a unique way to configure LDAP. + More info at https://www.postgresql.org/docs/current/auth-ldap.html +{{- end -}} +{{- end -}} + +{{/* +Validate values of Postgresql - If PSP is enabled RBAC should be enabled too +*/}} +{{- define "postgresql.validateValues.psp" -}} +{{- if and .Values.psp.create (not .Values.rbac.create) }} +postgresql: psp.create, rbac.create + RBAC should be enabled if PSP is enabled in order for PSP to work. + More info at https://kubernetes.io/docs/concepts/policy/pod-security-policy/#authorizing-policies +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for podsecuritypolicy. +*/}} +{{- define "podsecuritypolicy.apiVersion" -}} +{{- if semverCompare "<1.10-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "policy/v1beta1" -}} +{{- end -}} +{{- end -}} + +{{/* +Validate values of Postgresql TLS - When TLS is enabled, so must be VolumePermissions +*/}} +{{- define "postgresql.validateValues.tls" -}} +{{- if and .Values.tls.enabled (not .Values.volumePermissions.enabled) }} +postgresql: tls.enabled, volumePermissions.enabled + When TLS is enabled you must enable volumePermissions as well to ensure certificates files have + the right permissions. +{{- end -}} +{{- end -}} + +{{/* +Return the path to the cert file. +*/}} +{{- define "postgresql.tlsCert" -}} +{{- required "Certificate filename is required when TLS in enabled" .Values.tls.certFilename | printf "/opt/bitnami/postgresql/certs/%s" -}} +{{- end -}} + +{{/* +Return the path to the cert key file. +*/}} +{{- define "postgresql.tlsCertKey" -}} +{{- required "Certificate Key filename is required when TLS in enabled" .Values.tls.certKeyFilename | printf "/opt/bitnami/postgresql/certs/%s" -}} +{{- end -}} + +{{/* +Return the path to the CA cert file. +*/}} +{{- define "postgresql.tlsCACert" -}} +{{- printf "/opt/bitnami/postgresql/certs/%s" .Values.tls.certCAFilename -}} +{{- end -}} + +{{/* +Return the path to the CRL file. +*/}} +{{- define "postgresql.tlsCRL" -}} +{{- if .Values.tls.crlFilename -}} +{{- printf "/opt/bitnami/postgresql/certs/%s" .Values.tls.crlFilename -}} +{{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/templates/configmap.yaml b/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/templates/configmap.yaml new file mode 100644 index 0000000..b29ef60 --- /dev/null +++ b/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/templates/configmap.yaml @@ -0,0 +1,26 @@ +{{ if and (or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration) (not .Values.configurationConfigMap) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "postgresql.fullname" . }}-configuration + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: +{{- if (.Files.Glob "files/postgresql.conf") }} +{{ (.Files.Glob "files/postgresql.conf").AsConfig | indent 2 }} +{{- else if .Values.postgresqlConfiguration }} + postgresql.conf: | +{{- range $key, $value := default dict .Values.postgresqlConfiguration }} + {{ $key | snakecase }}={{ $value }} +{{- end }} +{{- end }} +{{- if (.Files.Glob "files/pg_hba.conf") }} +{{ (.Files.Glob "files/pg_hba.conf").AsConfig | indent 2 }} +{{- else if .Values.pgHbaConfiguration }} + pg_hba.conf: | +{{ .Values.pgHbaConfiguration | indent 4 }} +{{- end }} +{{ end }} diff --git a/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/templates/extended-config-configmap.yaml b/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/templates/extended-config-configmap.yaml new file mode 100644 index 0000000..f21a976 --- /dev/null +++ b/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/templates/extended-config-configmap.yaml @@ -0,0 +1,21 @@ +{{- if and (or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf) (not .Values.extendedConfConfigMap)}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "postgresql.fullname" . }}-extended-configuration + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: +{{- with .Files.Glob "files/conf.d/*.conf" }} +{{ .AsConfig | indent 2 }} +{{- end }} +{{ with .Values.postgresqlExtendedConf }} + override.conf: | +{{- range $key, $value := . }} + {{ $key | snakecase }}={{ $value }} +{{- end }} +{{- end }} +{{- end }} diff --git a/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/templates/initialization-configmap.yaml b/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/templates/initialization-configmap.yaml new file mode 100644 index 0000000..6637867 --- /dev/null +++ b/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/templates/initialization-configmap.yaml @@ -0,0 +1,24 @@ +{{- if and (or (.Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql,sql.gz}") .Values.initdbScripts) (not .Values.initdbScriptsConfigMap) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "postgresql.fullname" . }}-init-scripts + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +{{- with .Files.Glob "files/docker-entrypoint-initdb.d/*.sql.gz" }} +binaryData: +{{- range $path, $bytes := . }} + {{ base $path }}: {{ $.Files.Get $path | b64enc | quote }} +{{- end }} +{{- end }} +data: +{{- with .Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql}" }} +{{ .AsConfig | indent 2 }} +{{- end }} +{{- with .Values.initdbScripts }} +{{ toYaml . | indent 2 }} +{{- end }} +{{- end }} diff --git a/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/templates/metrics-configmap.yaml b/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/templates/metrics-configmap.yaml new file mode 100644 index 0000000..6b7a317 --- /dev/null +++ b/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/templates/metrics-configmap.yaml @@ -0,0 +1,13 @@ +{{- if and .Values.metrics.enabled .Values.metrics.customMetrics }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "postgresql.metricsCM" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + custom-metrics.yaml: {{ toYaml .Values.metrics.customMetrics | quote }} +{{- end }} diff --git a/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/templates/metrics-svc.yaml b/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/templates/metrics-svc.yaml new file mode 100644 index 0000000..b993c99 --- /dev/null +++ b/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/templates/metrics-svc.yaml @@ -0,0 +1,25 @@ +{{- if .Values.metrics.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "postgresql.fullname" . }}-metrics + labels: + {{- include "common.labels.standard" . | nindent 4 }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- toYaml .Values.metrics.service.annotations | nindent 4 }} +spec: + type: {{ .Values.metrics.service.type }} + {{- if and (eq .Values.metrics.service.type "LoadBalancer") .Values.metrics.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.metrics.service.loadBalancerIP }} + {{- end }} + ports: + - name: http-metrics + port: 9187 + targetPort: http-metrics + selector: + {{- include "common.labels.matchLabels" . | nindent 4 }} + role: master +{{- end }} diff --git a/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/templates/networkpolicy.yaml b/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/templates/networkpolicy.yaml new file mode 100644 index 0000000..2a7b372 --- /dev/null +++ b/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/templates/networkpolicy.yaml @@ -0,0 +1,36 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ template "postgresql.networkPolicy.apiVersion" . }} +metadata: + name: {{ template "postgresql.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + podSelector: + matchLabels: + {{- include "common.labels.matchLabels" . | nindent 6 }} + ingress: + # Allow inbound connections + - ports: + - port: {{ template "postgresql.port" . }} + {{- if not .Values.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ template "postgresql.fullname" . }}-client: "true" + {{- if .Values.networkPolicy.explicitNamespacesSelector }} + namespaceSelector: +{{ toYaml .Values.networkPolicy.explicitNamespacesSelector | indent 12 }} + {{- end }} + - podSelector: + matchLabels: + {{- include "common.labels.matchLabels" . | nindent 14 }} + role: slave + {{- end }} + # Allow prometheus scrapes + - ports: + - port: 9187 +{{- end }} diff --git a/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/templates/podsecuritypolicy.yaml b/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/templates/podsecuritypolicy.yaml new file mode 100644 index 0000000..da0b3ab --- /dev/null +++ b/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/templates/podsecuritypolicy.yaml @@ -0,0 +1,37 @@ +{{- if .Values.psp.create }} +apiVersion: {{ include "podsecuritypolicy.apiVersion" . }} +kind: PodSecurityPolicy +metadata: + name: {{ template "postgresql.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + privileged: false + volumes: + - 'configMap' + - 'secret' + - 'persistentVolumeClaim' + - 'emptyDir' + - 'projected' + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: 'MustRunAsNonRoot' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + readOnlyRootFilesystem: false +{{- end }} diff --git a/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/templates/prometheusrule.yaml b/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/templates/prometheusrule.yaml new file mode 100644 index 0000000..b0c41b1 --- /dev/null +++ b/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/templates/prometheusrule.yaml @@ -0,0 +1,23 @@ +{{- if and .Values.metrics.enabled .Values.metrics.prometheusRule.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ template "postgresql.fullname" . }} +{{- with .Values.metrics.prometheusRule.namespace }} + namespace: {{ . }} +{{- end }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- with .Values.metrics.prometheusRule.additionalLabels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: +{{- with .Values.metrics.prometheusRule.rules }} + groups: + - name: {{ template "postgresql.name" $ }} + rules: {{ tpl (toYaml .) $ | nindent 8 }} +{{- end }} +{{- end }} diff --git a/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/templates/pv.yaml b/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/templates/pv.yaml new file mode 100644 index 0000000..ddd7d7c --- /dev/null +++ b/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/templates/pv.yaml @@ -0,0 +1,27 @@ +kind: PersistentVolume +apiVersion: v1 +metadata: + name: keycloak-saas +spec: + storageClassName: manual + capacity: + storage: 8Gi + accessModes: + - ReadWriteOnce + #- ReadWriteMany + hostPath: + #path: "/home/keycloak/keycloak" + path: /mnt/keycloak-postgresql + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/hostname + operator: In + values: + #- imxc-worker1 + - {{ .Values.node.affinity }} + claimRef: + name: data-keycloak-saas-postgresql-0 + #namespace: auth + diff --git a/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/templates/role.yaml b/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/templates/role.yaml new file mode 100644 index 0000000..6d3cf50 --- /dev/null +++ b/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/templates/role.yaml @@ -0,0 +1,19 @@ +{{- if .Values.rbac.create }} +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ template "postgresql.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +rules: + {{- if .Values.psp.create }} + - apiGroups: ["extensions"] + resources: ["podsecuritypolicies"] + verbs: ["use"] + resourceNames: + - {{ template "postgresql.fullname" . }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/templates/rolebinding.yaml b/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/templates/rolebinding.yaml new file mode 100644 index 0000000..b7daa2a --- /dev/null +++ b/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/templates/rolebinding.yaml @@ -0,0 +1,19 @@ +{{- if .Values.rbac.create }} +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ template "postgresql.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +roleRef: + kind: Role + name: {{ template "postgresql.fullname" . }} + apiGroup: rbac.authorization.k8s.io +subjects: + - kind: ServiceAccount + name: {{ default (include "postgresql.fullname" . ) .Values.serviceAccount.name }} + namespace: imxc +{{- end }} diff --git a/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/templates/secrets.yaml b/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/templates/secrets.yaml new file mode 100644 index 0000000..c93dbe0 --- /dev/null +++ b/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/templates/secrets.yaml @@ -0,0 +1,23 @@ +{{- if (include "postgresql.createSecret" .) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "postgresql.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + {{- if and .Values.postgresqlPostgresPassword (not (eq .Values.postgresqlUsername "postgres")) }} + postgresql-postgres-password: {{ include "postgresql.postgres.password" . | b64enc | quote }} + {{- end }} + postgresql-password: {{ include "postgresql.password" . | b64enc | quote }} + {{- if .Values.replication.enabled }} + postgresql-replication-password: {{ include "postgresql.replication.password" . | b64enc | quote }} + {{- end }} + {{- if (and .Values.ldap.enabled .Values.ldap.bind_password)}} + postgresql-ldap-password: {{ .Values.ldap.bind_password | b64enc | quote }} + {{- end }} +{{- end -}} diff --git a/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/templates/serviceaccount.yaml b/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/templates/serviceaccount.yaml new file mode 100644 index 0000000..17f7ff3 --- /dev/null +++ b/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/templates/serviceaccount.yaml @@ -0,0 +1,11 @@ +{{- if and (.Values.serviceAccount.enabled) (not .Values.serviceAccount.name) }} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + {{- include "common.labels.standard" . | nindent 4 }} + name: {{ template "postgresql.fullname" . }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/templates/servicemonitor.yaml b/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/templates/servicemonitor.yaml new file mode 100644 index 0000000..3e643e1 --- /dev/null +++ b/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/templates/servicemonitor.yaml @@ -0,0 +1,33 @@ +{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "postgresql.fullname" . }} + {{- if .Values.metrics.serviceMonitor.namespace }} + namespace: {{ .Values.metrics.serviceMonitor.namespace }} + {{- end }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.metrics.serviceMonitor.additionalLabels }} + {{- toYaml .Values.metrics.serviceMonitor.additionalLabels | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + +spec: + endpoints: + - port: http-metrics + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + namespaceSelector: + matchNames: + - imxc + selector: + matchLabels: + {{- include "common.labels.matchLabels" . | nindent 6 }} +{{- end }} diff --git a/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/templates/statefulset-slaves.yaml b/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/templates/statefulset-slaves.yaml new file mode 100644 index 0000000..a712a03 --- /dev/null +++ b/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/templates/statefulset-slaves.yaml @@ -0,0 +1,340 @@ +{{- if .Values.replication.enabled }} +apiVersion: {{ template "postgresql.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: "{{ template "postgresql.fullname" . }}-slave" + labels: + {{- include "common.labels.standard" . | nindent 4 }} +{{- with .Values.slave.labels }} +{{ toYaml . | indent 4 }} +{{- end }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- with .Values.slave.annotations }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + serviceName: {{ template "postgresql.fullname" . }}-headless + replicas: {{ .Values.replication.slaveReplicas }} + selector: + matchLabels: + {{- include "common.labels.matchLabels" . | nindent 6 }} + role: slave + template: + metadata: + name: {{ template "postgresql.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 8 }} + role: slave +{{- with .Values.slave.podLabels }} +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.slave.podAnnotations }} + annotations: +{{ toYaml . | indent 8 }} +{{- end }} + spec: + {{- if .Values.schedulerName }} + schedulerName: "{{ .Values.schedulerName }}" + {{- end }} +{{- include "postgresql.imagePullSecrets" . | indent 6 }} + {{- if .Values.slave.nodeSelector }} + nodeSelector: +{{ toYaml .Values.slave.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.slave.affinity }} + affinity: +{{ toYaml .Values.slave.affinity | indent 8 }} + {{- end }} + {{- if .Values.slave.tolerations }} + tolerations: +{{ toYaml .Values.slave.tolerations | indent 8 }} + {{- end }} + {{- if .Values.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + {{- end }} + {{- if .Values.serviceAccount.enabled }} + serviceAccountName: {{ default (include "postgresql.fullname" . ) .Values.serviceAccount.name}} + {{- end }} + {{- if or .Values.slave.extraInitContainers (and .Values.volumePermissions.enabled (or .Values.persistence.enabled (and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled))) }} + initContainers: + {{- if and .Values.volumePermissions.enabled (or .Values.persistence.enabled (and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled) .Values.tls.enabled) }} + - name: init-chmod-data + image: {{ template "postgresql.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + command: + - /bin/sh + - -cx + - | + {{- if .Values.persistence.enabled }} + mkdir -p {{ .Values.persistence.mountPath }}/data {{- if (include "postgresql.mountConfigurationCM" .) }} {{ .Values.persistence.mountPath }}/conf {{- end }} + chmod 700 {{ .Values.persistence.mountPath }}/data {{- if (include "postgresql.mountConfigurationCM" .) }} {{ .Values.persistence.mountPath }}/conf {{- end }} + find {{ .Values.persistence.mountPath }} -mindepth 1 -maxdepth 1 {{- if not (include "postgresql.mountConfigurationCM" .) }} -not -name "conf" {{- end }} -not -name ".snapshot" -not -name "lost+found" | \ + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + xargs chown -R `id -u`:`id -G | cut -d " " -f2` + {{- else }} + xargs chown -R {{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} + {{- end }} + {{- end }} + {{- if and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled }} + chmod -R 777 /dev/shm + {{- end }} + {{- if .Values.tls.enabled }} + cp /tmp/certs/* /opt/bitnami/postgresql/certs/ + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + chown -R `id -u`:`id -G | cut -d " " -f2` /opt/bitnami/postgresql/certs/ + {{- else }} + chown -R {{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} /opt/bitnami/postgresql/certs/ + {{- end }} + chmod 600 {{ template "postgresql.tlsCertKey" . }} + {{- end }} + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + securityContext: + {{- else }} + securityContext: + runAsUser: {{ .Values.volumePermissions.securityContext.runAsUser }} + {{- end }} + volumeMounts: + {{ if .Values.persistence.enabled }} + - name: data + mountPath: {{ .Values.persistence.mountPath }} + subPath: {{ .Values.persistence.subPath }} + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + mountPath: /dev/shm + {{- end }} + {{- if .Values.tls.enabled }} + - name: raw-certificates + mountPath: /tmp/certs + - name: postgresql-certificates + mountPath: /opt/bitnami/postgresql/certs + {{- end }} + {{- end }} + {{- if .Values.slave.extraInitContainers }} +{{ tpl .Values.slave.extraInitContainers . | indent 8 }} + {{- end }} + {{- end }} + {{- if .Values.slave.priorityClassName }} + priorityClassName: {{ .Values.slave.priorityClassName }} + {{- end }} + containers: + - name: {{ template "postgresql.fullname" . }} + image: {{ template "postgresql.image" . }} + imagePullPolicy: "{{ .Values.image.pullPolicy }}" + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" .Values.image.debug | quote }} + - name: POSTGRESQL_VOLUME_DIR + value: "{{ .Values.persistence.mountPath }}" + - name: POSTGRESQL_PORT_NUMBER + value: "{{ template "postgresql.port" . }}" + {{- if .Values.persistence.mountPath }} + - name: PGDATA + value: {{ .Values.postgresqlDataDir | quote }} + {{- end }} + - name: POSTGRES_REPLICATION_MODE + value: "slave" + - name: POSTGRES_REPLICATION_USER + value: {{ include "postgresql.replication.username" . | quote }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_REPLICATION_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-replication-password" + {{- else }} + - name: POSTGRES_REPLICATION_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-replication-password + {{- end }} + - name: POSTGRES_CLUSTER_APP_NAME + value: {{ .Values.replication.applicationName }} + - name: POSTGRES_MASTER_HOST + value: {{ template "postgresql.fullname" . }} + - name: POSTGRES_MASTER_PORT_NUMBER + value: {{ include "postgresql.port" . | quote }} + {{- if and .Values.postgresqlPostgresPassword (not (eq .Values.postgresqlUsername "postgres")) }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_POSTGRES_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-postgres-password" + {{- else }} + - name: POSTGRES_POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-postgres-password + {{- end }} + {{- end }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-password" + {{- else }} + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-password + {{- end }} + - name: POSTGRESQL_ENABLE_TLS + value: {{ ternary "yes" "no" .Values.tls.enabled | quote }} + {{- if .Values.tls.enabled }} + - name: POSTGRESQL_TLS_PREFER_SERVER_CIPHERS + value: {{ ternary "yes" "no" .Values.tls.preferServerCiphers | quote }} + - name: POSTGRESQL_TLS_CERT_FILE + value: {{ template "postgresql.tlsCert" . }} + - name: POSTGRESQL_TLS_KEY_FILE + value: {{ template "postgresql.tlsCertKey" . }} + {{- if .Values.tls.certCAFilename }} + - name: POSTGRESQL_TLS_CA_FILE + value: {{ template "postgresql.tlsCACert" . }} + {{- end }} + {{- if .Values.tls.crlFilename }} + - name: POSTGRESQL_TLS_CRL_FILE + value: {{ template "postgresql.tlsCRL" . }} + {{- end }} + {{- end }} + ports: + - name: tcp-postgresql + containerPort: {{ template "postgresql.port" . }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - /bin/sh + - -c + {{- if (include "postgresql.database" .) }} + - exec pg_isready -U {{ include "postgresql.username" . | quote }} -d "dbname={{ include "postgresql.database" . }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}{{- end }}" -h 127.0.0.1 -p {{ template "postgresql.port" . }} + {{- else }} + - exec pg_isready -U {{ include "postgresql.username" . | quote }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} -d "sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}"{{- end }} -h 127.0.0.1 -p {{ template "postgresql.port" . }} + {{- end }} + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + exec: + command: + - /bin/sh + - -c + - -e + {{- include "postgresql.readinessProbeCommand" . | nindent 16 }} + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + volumeMounts: + {{- if .Values.usePasswordFile }} + - name: postgresql-password + mountPath: /opt/bitnami/postgresql/secrets/ + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + mountPath: /dev/shm + {{- end }} + {{- if .Values.persistence.enabled }} + - name: data + mountPath: {{ .Values.persistence.mountPath }} + subPath: {{ .Values.persistence.subPath }} + {{ end }} + {{- if or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf .Values.extendedConfConfigMap }} + - name: postgresql-extended-config + mountPath: /bitnami/postgresql/conf/conf.d/ + {{- end }} + {{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap }} + - name: postgresql-config + mountPath: /bitnami/postgresql/conf + {{- end }} + {{- if .Values.tls.enabled }} + - name: postgresql-certificates + mountPath: /opt/bitnami/postgresql/certs + readOnly: true + {{- end }} + {{- if .Values.slave.extraVolumeMounts }} + {{- toYaml .Values.slave.extraVolumeMounts | nindent 12 }} + {{- end }} +{{- if .Values.slave.sidecars }} +{{- include "postgresql.tplValue" ( dict "value" .Values.slave.sidecars "context" $ ) | nindent 8 }} +{{- end }} + volumes: + {{- if .Values.usePasswordFile }} + - name: postgresql-password + secret: + secretName: {{ template "postgresql.secretName" . }} + {{- end }} + {{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap}} + - name: postgresql-config + configMap: + name: {{ template "postgresql.configurationCM" . }} + {{- end }} + {{- if or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf .Values.extendedConfConfigMap }} + - name: postgresql-extended-config + configMap: + name: {{ template "postgresql.extendedConfigurationCM" . }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: raw-certificates + secret: + secretName: {{ required "A secret containing TLS certificates is required when TLS is enabled" .Values.tls.certificatesSecret }} + - name: postgresql-certificates + emptyDir: {} + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + emptyDir: + medium: Memory + sizeLimit: 1Gi + {{- end }} + {{- if not .Values.persistence.enabled }} + - name: data + emptyDir: {} + {{- end }} + {{- if .Values.slave.extraVolumes }} + {{- toYaml .Values.slave.extraVolumes | nindent 8 }} + {{- end }} + updateStrategy: + type: {{ .Values.updateStrategy.type }} + {{- if (eq "Recreate" .Values.updateStrategy.type) }} + rollingUpdate: null + {{- end }} +{{- if .Values.persistence.enabled }} + volumeClaimTemplates: + - metadata: + name: data + {{- with .Values.persistence.annotations }} + annotations: + {{- range $key, $value := . }} + {{ $key }}: {{ $value }} + {{- end }} + {{- end }} + spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{ include "postgresql.storageClass" . }} +{{- end }} +{{- end }} diff --git a/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/templates/statefulset.yaml b/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/templates/statefulset.yaml new file mode 100644 index 0000000..35c6293 --- /dev/null +++ b/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/templates/statefulset.yaml @@ -0,0 +1,510 @@ +apiVersion: {{ template "postgresql.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ template "postgresql.master.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- with .Values.master.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- with .Values.master.annotations }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + serviceName: {{ template "postgresql.fullname" . }}-headless + replicas: 1 + updateStrategy: + type: {{ .Values.updateStrategy.type }} + {{- if (eq "Recreate" .Values.updateStrategy.type) }} + rollingUpdate: null + {{- end }} + selector: + matchLabels: + {{- include "common.labels.matchLabels" . | nindent 6 }} + role: master + template: + metadata: + name: {{ template "postgresql.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 8 }} + role: master + {{- with .Values.master.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.master.podAnnotations }} + annotations: {{- toYaml . | nindent 8 }} + {{- end }} + spec: + {{- if .Values.schedulerName }} + schedulerName: "{{ .Values.schedulerName }}" + {{- end }} +{{- include "postgresql.imagePullSecrets" . | indent 6 }} + {{- if .Values.master.nodeSelector }} + nodeSelector: {{- toYaml .Values.master.nodeSelector | nindent 8 }} + {{- end }} + {{- if .Values.master.affinity }} + affinity: {{- toYaml .Values.master.affinity | nindent 8 }} + {{- end }} + {{- if .Values.master.tolerations }} + tolerations: {{- toYaml .Values.master.tolerations | nindent 8 }} + {{- end }} + {{- if .Values.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + {{- end }} + {{- if .Values.serviceAccount.enabled }} + serviceAccountName: {{ default (include "postgresql.fullname" . ) .Values.serviceAccount.name }} + {{- end }} + {{- if or .Values.master.extraInitContainers (and .Values.volumePermissions.enabled (or .Values.persistence.enabled (and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled))) }} + initContainers: + {{- if and .Values.volumePermissions.enabled (or .Values.persistence.enabled (and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled) .Values.tls.enabled) }} + - name: init-chmod-data + image: {{ template "postgresql.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + command: + - /bin/sh + - -cx + - | + {{- if .Values.persistence.enabled }} + mkdir -p {{ .Values.persistence.mountPath }}/data {{- if (include "postgresql.mountConfigurationCM" .) }} {{ .Values.persistence.mountPath }}/conf {{- end }} + chmod 700 {{ .Values.persistence.mountPath }}/data {{- if (include "postgresql.mountConfigurationCM" .) }} {{ .Values.persistence.mountPath }}/conf {{- end }} + find {{ .Values.persistence.mountPath }} -mindepth 1 -maxdepth 1 {{- if not (include "postgresql.mountConfigurationCM" .) }} -not -name "conf" {{- end }} -not -name ".snapshot" -not -name "lost+found" | \ + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + xargs chown -R `id -u`:`id -G | cut -d " " -f2` + {{- else }} + xargs chown -R {{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} + {{- end }} + {{- end }} + {{- if and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled }} + chmod -R 777 /dev/shm + {{- end }} + {{- if .Values.tls.enabled }} + cp /tmp/certs/* /opt/bitnami/postgresql/certs/ + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + chown -R `id -u`:`id -G | cut -d " " -f2` /opt/bitnami/postgresql/certs/ + {{- else }} + chown -R {{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} /opt/bitnami/postgresql/certs/ + {{- end }} + chmod 600 {{ template "postgresql.tlsCertKey" . }} + {{- end }} + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + securityContext: + {{- else }} + securityContext: + runAsUser: {{ .Values.volumePermissions.securityContext.runAsUser }} + {{- end }} + volumeMounts: + {{- if .Values.persistence.enabled }} + - name: data + mountPath: {{ .Values.persistence.mountPath }} + subPath: {{ .Values.persistence.subPath }} + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + mountPath: /dev/shm + {{- end }} + {{- if .Values.tls.enabled }} + - name: raw-certificates + mountPath: /tmp/certs + - name: postgresql-certificates + mountPath: /opt/bitnami/postgresql/certs + {{- end }} + {{- end }} + {{- if .Values.master.extraInitContainers }} + {{- include "postgresql.tplValue" ( dict "value" .Values.master.extraInitContainers "context" $ ) | nindent 8 }} + {{- end }} + {{- end }} + {{- if .Values.master.priorityClassName }} + priorityClassName: {{ .Values.master.priorityClassName }} + {{- end }} + containers: + - name: {{ template "postgresql.fullname" . }} + image: {{ template "postgresql.image" . }} + imagePullPolicy: "{{ .Values.image.pullPolicy }}" + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" .Values.image.debug | quote }} + - name: POSTGRESQL_PORT_NUMBER + value: "{{ template "postgresql.port" . }}" + - name: POSTGRESQL_VOLUME_DIR + value: "{{ .Values.persistence.mountPath }}" + {{- if .Values.postgresqlInitdbArgs }} + - name: POSTGRES_INITDB_ARGS + value: {{ .Values.postgresqlInitdbArgs | quote }} + {{- end }} + {{- if .Values.postgresqlInitdbWalDir }} + - name: POSTGRES_INITDB_WALDIR + value: {{ .Values.postgresqlInitdbWalDir | quote }} + {{- end }} + {{- if .Values.initdbUser }} + - name: POSTGRESQL_INITSCRIPTS_USERNAME + value: {{ .Values.initdbUser }} + {{- end }} + {{- if .Values.initdbPassword }} + - name: POSTGRESQL_INITSCRIPTS_PASSWORD + value: {{ .Values.initdbPassword }} + {{- end }} + {{- if .Values.persistence.mountPath }} + - name: PGDATA + value: {{ .Values.postgresqlDataDir | quote }} + {{- end }} + {{- if .Values.replication.enabled }} + - name: POSTGRES_REPLICATION_MODE + value: "master" + - name: POSTGRES_REPLICATION_USER + value: {{ include "postgresql.replication.username" . | quote }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_REPLICATION_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-replication-password" + {{- else }} + - name: POSTGRES_REPLICATION_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-replication-password + {{- end }} + {{- if not (eq .Values.replication.synchronousCommit "off")}} + - name: POSTGRES_SYNCHRONOUS_COMMIT_MODE + value: {{ .Values.replication.synchronousCommit | quote }} + - name: POSTGRES_NUM_SYNCHRONOUS_REPLICAS + value: {{ .Values.replication.numSynchronousReplicas | quote }} + {{- end }} + - name: POSTGRES_CLUSTER_APP_NAME + value: {{ .Values.replication.applicationName }} + {{- end }} + {{- if and .Values.postgresqlPostgresPassword (not (eq .Values.postgresqlUsername "postgres")) }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_POSTGRES_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-postgres-password" + {{- else }} + - name: POSTGRES_POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-postgres-password + {{- end }} + {{- end }} + - name: POSTGRES_USER + value: {{ include "postgresql.username" . | quote }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-password" + {{- else }} + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-password + {{- end }} + {{- if (include "postgresql.database" .) }} + - name: POSTGRES_DB + value: {{ (include "postgresql.database" .) | quote }} + {{- end }} + {{- if .Values.extraEnv }} + {{- include "postgresql.tplValue" (dict "value" .Values.extraEnv "context" $) | nindent 12 }} + {{- end }} + - name: POSTGRESQL_ENABLE_LDAP + value: {{ ternary "yes" "no" .Values.ldap.enabled | quote }} + {{- if .Values.ldap.enabled }} + - name: POSTGRESQL_LDAP_SERVER + value: {{ .Values.ldap.server }} + - name: POSTGRESQL_LDAP_PORT + value: {{ .Values.ldap.port | quote }} + - name: POSTGRESQL_LDAP_SCHEME + value: {{ .Values.ldap.scheme }} + {{- if .Values.ldap.tls }} + - name: POSTGRESQL_LDAP_TLS + value: "1" + {{- end}} + - name: POSTGRESQL_LDAP_PREFIX + value: {{ .Values.ldap.prefix | quote }} + - name: POSTGRESQL_LDAP_SUFFIX + value: {{ .Values.ldap.suffix | quote}} + - name: POSTGRESQL_LDAP_BASE_DN + value: {{ .Values.ldap.baseDN }} + - name: POSTGRESQL_LDAP_BIND_DN + value: {{ .Values.ldap.bindDN }} + {{- if (not (empty .Values.ldap.bind_password)) }} + - name: POSTGRESQL_LDAP_BIND_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-ldap-password + {{- end}} + - name: POSTGRESQL_LDAP_SEARCH_ATTR + value: {{ .Values.ldap.search_attr }} + - name: POSTGRESQL_LDAP_SEARCH_FILTER + value: {{ .Values.ldap.search_filter }} + - name: POSTGRESQL_LDAP_URL + value: {{ .Values.ldap.url }} + {{- end}} + - name: POSTGRESQL_ENABLE_TLS + value: {{ ternary "yes" "no" .Values.tls.enabled | quote }} + {{- if .Values.tls.enabled }} + - name: POSTGRESQL_TLS_PREFER_SERVER_CIPHERS + value: {{ ternary "yes" "no" .Values.tls.preferServerCiphers | quote }} + - name: POSTGRESQL_TLS_CERT_FILE + value: {{ template "postgresql.tlsCert" . }} + - name: POSTGRESQL_TLS_KEY_FILE + value: {{ template "postgresql.tlsCertKey" . }} + {{- if .Values.tls.certCAFilename }} + - name: POSTGRESQL_TLS_CA_FILE + value: {{ template "postgresql.tlsCACert" . }} + {{- end }} + {{- if .Values.tls.crlFilename }} + - name: POSTGRESQL_TLS_CRL_FILE + value: {{ template "postgresql.tlsCRL" . }} + {{- end }} + {{- end }} + {{- if .Values.extraEnvVarsCM }} + envFrom: + - configMapRef: + name: {{ tpl .Values.extraEnvVarsCM . }} + {{- end }} + ports: + - name: tcp-postgresql + containerPort: {{ template "postgresql.port" . }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - /bin/sh + - -c + {{- if (include "postgresql.database" .) }} + - exec pg_isready -U {{ include "postgresql.username" . | quote }} -d "dbname={{ include "postgresql.database" . }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}{{- end }}" -h 127.0.0.1 -p {{ template "postgresql.port" . }} + {{- else }} + - exec pg_isready -U {{ include "postgresql.username" . | quote }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} -d "sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}"{{- end }} -h 127.0.0.1 -p {{ template "postgresql.port" . }} + {{- end }} + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + exec: + command: + - /bin/sh + - -c + - -e + {{- include "postgresql.readinessProbeCommand" . | nindent 16 }} + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + volumeMounts: + {{- if or (.Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql,sql.gz}") .Values.initdbScriptsConfigMap .Values.initdbScripts }} + - name: custom-init-scripts + mountPath: /docker-entrypoint-initdb.d/ + {{- end }} + {{- if .Values.initdbScriptsSecret }} + - name: custom-init-scripts-secret + mountPath: /docker-entrypoint-initdb.d/secret + {{- end }} + {{- if or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf .Values.extendedConfConfigMap }} + - name: postgresql-extended-config + mountPath: /bitnami/postgresql/conf/conf.d/ + {{- end }} + {{- if .Values.usePasswordFile }} + - name: postgresql-password + mountPath: /opt/bitnami/postgresql/secrets/ + {{- end }} + {{- if .Values.tls.enabled }} + - name: postgresql-certificates + mountPath: /opt/bitnami/postgresql/certs + readOnly: true + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + mountPath: /dev/shm + {{- end }} + {{- if .Values.persistence.enabled }} + - name: data + mountPath: {{ .Values.persistence.mountPath }} + subPath: {{ .Values.persistence.subPath }} + {{- end }} + {{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap }} + - name: postgresql-config + mountPath: /bitnami/postgresql/conf + {{- end }} + {{- if .Values.master.extraVolumeMounts }} + {{- toYaml .Values.master.extraVolumeMounts | nindent 12 }} + {{- end }} +{{- if .Values.master.sidecars }} +{{- include "postgresql.tplValue" ( dict "value" .Values.master.sidecars "context" $ ) | nindent 8 }} +{{- end }} +{{- if .Values.metrics.enabled }} + - name: metrics + image: {{ template "postgresql.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + {{- if .Values.metrics.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.metrics.securityContext.runAsUser }} + {{- end }} + env: + {{- $database := required "In order to enable metrics you need to specify a database (.Values.postgresqlDatabase or .Values.global.postgresql.postgresqlDatabase)" (include "postgresql.database" .) }} + {{- $sslmode := ternary "require" "disable" .Values.tls.enabled }} + {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} + - name: DATA_SOURCE_NAME + value: {{ printf "host=127.0.0.1 port=%d user=%s sslmode=%s sslcert=%s sslkey=%s" (int (include "postgresql.port" .)) (include "postgresql.username" .) $sslmode (include "postgresql.tlsCert" .) (include "postgresql.tlsCertKey" .) }} + {{- else }} + - name: DATA_SOURCE_URI + value: {{ printf "127.0.0.1:%d/%s?sslmode=%s" (int (include "postgresql.port" .)) $database $sslmode }} + {{- end }} + {{- if .Values.usePasswordFile }} + - name: DATA_SOURCE_PASS_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-password" + {{- else }} + - name: DATA_SOURCE_PASS + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-password + {{- end }} + - name: DATA_SOURCE_USER + value: {{ template "postgresql.username" . }} + {{- if .Values.metrics.extraEnvVars }} + {{- include "postgresql.tplValue" (dict "value" .Values.metrics.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: / + port: http-metrics + initialDelaySeconds: {{ .Values.metrics.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.metrics.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.metrics.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.metrics.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.metrics.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + httpGet: + path: / + port: http-metrics + initialDelaySeconds: {{ .Values.metrics.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.metrics.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.metrics.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.metrics.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.metrics.readinessProbe.failureThreshold }} + {{- end }} + volumeMounts: + {{- if .Values.usePasswordFile }} + - name: postgresql-password + mountPath: /opt/bitnami/postgresql/secrets/ + {{- end }} + {{- if .Values.tls.enabled }} + - name: postgresql-certificates + mountPath: /opt/bitnami/postgresql/certs + readOnly: true + {{- end }} + {{- if .Values.metrics.customMetrics }} + - name: custom-metrics + mountPath: /conf + readOnly: true + args: ["--extend.query-path", "/conf/custom-metrics.yaml"] + {{- end }} + ports: + - name: http-metrics + containerPort: 9187 + {{- if .Values.metrics.resources }} + resources: {{- toYaml .Values.metrics.resources | nindent 12 }} + {{- end }} +{{- end }} + volumes: + {{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap}} + - name: postgresql-config + configMap: + name: {{ template "postgresql.configurationCM" . }} + {{- end }} + {{- if or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf .Values.extendedConfConfigMap }} + - name: postgresql-extended-config + configMap: + name: {{ template "postgresql.extendedConfigurationCM" . }} + {{- end }} + {{- if .Values.usePasswordFile }} + - name: postgresql-password + secret: + secretName: {{ template "postgresql.secretName" . }} + {{- end }} + {{- if or (.Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql,sql.gz}") .Values.initdbScriptsConfigMap .Values.initdbScripts }} + - name: custom-init-scripts + configMap: + name: {{ template "postgresql.initdbScriptsCM" . }} + {{- end }} + {{- if .Values.initdbScriptsSecret }} + - name: custom-init-scripts-secret + secret: + secretName: {{ template "postgresql.initdbScriptsSecret" . }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: raw-certificates + secret: + secretName: {{ required "A secret containing TLS certificates is required when TLS is enabled" .Values.tls.certificatesSecret }} + - name: postgresql-certificates + emptyDir: {} + {{- end }} + {{- if .Values.master.extraVolumes }} + {{- toYaml .Values.master.extraVolumes | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.customMetrics }} + - name: custom-metrics + configMap: + name: {{ template "postgresql.metricsCM" . }} + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + emptyDir: + medium: Memory + sizeLimit: 1Gi + {{- end }} +{{- if and .Values.persistence.enabled .Values.persistence.existingClaim }} + - name: data + persistentVolumeClaim: +{{- with .Values.persistence.existingClaim }} + #claimName: {{ tpl . $ }} + claimName: data-keycloak-saas-postgresql-0 +{{- end }} +{{- else if not .Values.persistence.enabled }} + - name: data + emptyDir: {} +{{- else if and .Values.persistence.enabled (not .Values.persistence.existingClaim) }} + volumeClaimTemplates: + - metadata: + name: data + {{- with .Values.persistence.annotations }} + annotations: + {{- range $key, $value := . }} + {{ $key }}: {{ $value }} + {{- end }} + {{- end }} + spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{ include "postgresql.storageClass" . }} +{{- end }} diff --git a/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/templates/svc-headless.yaml b/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/templates/svc-headless.yaml new file mode 100644 index 0000000..4913157 --- /dev/null +++ b/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/templates/svc-headless.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "postgresql.fullname" . }}-headless + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + type: ClusterIP + clusterIP: None + ports: + - name: tcp-postgresql + port: {{ template "postgresql.port" . }} + targetPort: tcp-postgresql + selector: + {{- include "common.labels.matchLabels" . | nindent 4 }} diff --git a/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/templates/svc-read.yaml b/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/templates/svc-read.yaml new file mode 100644 index 0000000..885c7bb --- /dev/null +++ b/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/templates/svc-read.yaml @@ -0,0 +1,42 @@ +{{- if .Values.replication.enabled }} +{{- $serviceAnnotations := coalesce .Values.slave.service.annotations .Values.service.annotations -}} +{{- $serviceType := coalesce .Values.slave.service.type .Values.service.type -}} +{{- $serviceLoadBalancerIP := coalesce .Values.slave.service.loadBalancerIP .Values.service.loadBalancerIP -}} +{{- $serviceLoadBalancerSourceRanges := coalesce .Values.slave.service.loadBalancerSourceRanges .Values.service.loadBalancerSourceRanges -}} +{{- $serviceClusterIP := coalesce .Values.slave.service.clusterIP .Values.service.clusterIP -}} +{{- $serviceNodePort := coalesce .Values.slave.service.nodePort .Values.service.nodePort -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "postgresql.fullname" . }}-read + labels: + {{- include "common.labels.standard" . | nindent 4 }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if $serviceAnnotations }} + {{- include "postgresql.tplValue" (dict "value" $serviceAnnotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: {{ $serviceType }} + {{- if and $serviceLoadBalancerIP (eq $serviceType "LoadBalancer") }} + loadBalancerIP: {{ $serviceLoadBalancerIP }} + {{- end }} + {{- if and (eq $serviceType "LoadBalancer") $serviceLoadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- include "postgresql.tplValue" (dict "value" $serviceLoadBalancerSourceRanges "context" $) | nindent 4 }} + {{- end }} + {{- if and (eq $serviceType "ClusterIP") $serviceClusterIP }} + clusterIP: {{ $serviceClusterIP }} + {{- end }} + ports: + - name: tcp-postgresql + port: {{ template "postgresql.port" . }} + targetPort: tcp-postgresql + {{- if $serviceNodePort }} + nodePort: {{ $serviceNodePort }} + {{- end }} + selector: + {{- include "common.labels.matchLabels" . | nindent 4 }} + role: slave +{{- end }} diff --git a/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/templates/svc.yaml b/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/templates/svc.yaml new file mode 100644 index 0000000..e9fc504 --- /dev/null +++ b/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/templates/svc.yaml @@ -0,0 +1,40 @@ +{{- $serviceAnnotations := coalesce .Values.master.service.annotations .Values.service.annotations -}} +{{- $serviceType := coalesce .Values.master.service.type .Values.service.type -}} +{{- $serviceLoadBalancerIP := coalesce .Values.master.service.loadBalancerIP .Values.service.loadBalancerIP -}} +{{- $serviceLoadBalancerSourceRanges := coalesce .Values.master.service.loadBalancerSourceRanges .Values.service.loadBalancerSourceRanges -}} +{{- $serviceClusterIP := coalesce .Values.master.service.clusterIP .Values.service.clusterIP -}} +{{- $serviceNodePort := coalesce .Values.master.service.nodePort .Values.service.nodePort -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "postgresql.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if $serviceAnnotations }} + {{- include "postgresql.tplValue" (dict "value" $serviceAnnotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: {{ $serviceType }} + {{- if and $serviceLoadBalancerIP (eq $serviceType "LoadBalancer") }} + loadBalancerIP: {{ $serviceLoadBalancerIP }} + {{- end }} + {{- if and (eq $serviceType "LoadBalancer") $serviceLoadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- include "postgresql.tplValue" (dict "value" $serviceLoadBalancerSourceRanges "context" $) | nindent 4 }} + {{- end }} + {{- if and (eq $serviceType "ClusterIP") $serviceClusterIP }} + clusterIP: {{ $serviceClusterIP }} + {{- end }} + ports: + - name: tcp-postgresql + port: {{ template "postgresql.port" . }} + targetPort: tcp-postgresql + {{- if $serviceNodePort }} + nodePort: {{ $serviceNodePort }} + {{- end }} + selector: + {{- include "common.labels.matchLabels" . | nindent 4 }} + role: master diff --git a/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/values-production.yaml b/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/values-production.yaml new file mode 100644 index 0000000..a43670f --- /dev/null +++ b/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/values-production.yaml @@ -0,0 +1,591 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +global: + postgresql: {} +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName +# storageClass: myStorageClass + +## Bitnami PostgreSQL image version +## ref: https://hub.docker.com/r/bitnami/postgresql/tags/ +## +image: + registry: 10.10.31.243:5000 # docker.io + repository: postgresql # bitnami/postgresql + tag: 11.8.0-debian-10-r61 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Set to true if you would like to see extra information on logs + ## It turns BASH and NAMI debugging in minideb + ## ref: https://github.com/bitnami/minideb-extras/#turn-on-bash-debugging + debug: false + +## String to partially override postgresql.fullname template (will maintain the release name) +## +# nameOverride: + +## String to fully override postgresql.fullname template +## +# fullnameOverride: + +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: false + image: + registry: 10.10.31.243:5000 # docker.io + repository: minideb # bitnami/minideb + tag: buster + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + ## Init container Security Context + ## Note: the chown of the data folder is done to securityContext.runAsUser + ## and not the below volumePermissions.securityContext.runAsUser + ## When runAsUser is set to special value "auto", init container will try to chwon the + ## data folder to autodetermined user&group, using commands: `id -u`:`id -G | cut -d" " -f2` + ## "auto" is especially useful for OpenShift which has scc with dynamic userids (and 0 is not allowed). + ## You may want to use this volumePermissions.securityContext.runAsUser="auto" in combination with + ## pod securityContext.enabled=false and shmVolume.chmod.enabled=false + ## + securityContext: + runAsUser: 0 + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: + +## Pod Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## Pod Service Account +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +serviceAccount: + enabled: false + ## Name of an already existing service account. Setting this value disables the automatic service account creation. + # name: + +## Pod Security Policy +## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +psp: + create: false + +## Creates role for ServiceAccount +## Required for PSP +rbac: + create: false + +replication: + enabled: true + user: repl_user + password: repl_password + slaveReplicas: 2 + ## Set synchronous commit mode: on, off, remote_apply, remote_write and local + ## ref: https://www.postgresql.org/docs/9.6/runtime-config-wal.html#GUC-WAL-LEVEL + synchronousCommit: "on" + ## From the number of `slaveReplicas` defined above, set the number of those that will have synchronous replication + ## NOTE: It cannot be > slaveReplicas + numSynchronousReplicas: 1 + ## Replication Cluster application name. Useful for defining multiple replication policies + applicationName: my_application + +## PostgreSQL admin password (used when `postgresqlUsername` is not `postgres`) +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-user-on-first-run (see note!) +# postgresqlPostgresPassword: + +## PostgreSQL user (has superuser privileges if username is `postgres`) +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run +postgresqlUsername: postgres + +## PostgreSQL password +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run +## +# postgresqlPassword: + +## PostgreSQL password using existing secret +## existingSecret: secret + +## Mount PostgreSQL secret as a file instead of passing environment variable +# usePasswordFile: false + +## Create a database +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-on-first-run +## +# postgresqlDatabase: + +## PostgreSQL data dir +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +postgresqlDataDir: /bitnami/postgresql/data + +## An array to add extra environment variables +## For example: +## extraEnv: +## - name: FOO +## value: "bar" +## +# extraEnv: +extraEnv: [] + +## Name of a ConfigMap containing extra env vars +## +# extraEnvVarsCM: + +## Specify extra initdb args +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +# postgresqlInitdbArgs: + +## Specify a custom location for the PostgreSQL transaction log +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +# postgresqlInitdbWalDir: + +## PostgreSQL configuration +## Specify runtime configuration parameters as a dict, using camelCase, e.g. +## {"sharedBuffers": "500MB"} +## Alternatively, you can put your postgresql.conf under the files/ directory +## ref: https://www.postgresql.org/docs/current/static/runtime-config.html +## +# postgresqlConfiguration: + +## PostgreSQL extended configuration +## As above, but _appended_ to the main configuration +## Alternatively, you can put your *.conf under the files/conf.d/ directory +## https://github.com/bitnami/bitnami-docker-postgresql#allow-settings-to-be-loaded-from-files-other-than-the-default-postgresqlconf +## +# postgresqlExtendedConf: + +## PostgreSQL client authentication configuration +## Specify content for pg_hba.conf +## Default: do not create pg_hba.conf +## Alternatively, you can put your pg_hba.conf under the files/ directory +# pgHbaConfiguration: |- +# local all all trust +# host all all localhost trust +# host mydatabase mysuser 192.168.0.0/24 md5 + +## ConfigMap with PostgreSQL configuration +## NOTE: This will override postgresqlConfiguration and pgHbaConfiguration +# configurationConfigMap: + +## ConfigMap with PostgreSQL extended configuration +# extendedConfConfigMap: + +## initdb scripts +## Specify dictionary of scripts to be run at first boot +## Alternatively, you can put your scripts under the files/docker-entrypoint-initdb.d directory +## +# initdbScripts: +# my_init_script.sh: | +# #!/bin/sh +# echo "Do something." + +## Specify the PostgreSQL username and password to execute the initdb scripts +# initdbUser: +# initdbPassword: + +## ConfigMap with scripts to be run at first boot +## NOTE: This will override initdbScripts +# initdbScriptsConfigMap: + +## Secret with scripts to be run at first boot (in case it contains sensitive information) +## NOTE: This can work along initdbScripts or initdbScriptsConfigMap +# initdbScriptsSecret: + +## Optional duration in seconds the pod needs to terminate gracefully. +## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods +## +# terminationGracePeriodSeconds: 30 + +## LDAP configuration +## +ldap: + enabled: false + url: "" + server: "" + port: "" + prefix: "" + suffix: "" + baseDN: "" + bindDN: "" + bind_password: + search_attr: "" + search_filter: "" + scheme: "" + tls: false + +## PostgreSQL service configuration +service: + ## PosgresSQL service type + type: ClusterIP + # clusterIP: None + port: 5432 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. Evaluated as a template. + ## + annotations: {} + ## Set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + # loadBalancerIP: + + ## Load Balancer sources. Evaluated as a template. + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## + # loadBalancerSourceRanges: + # - 10.10.10.0/24 + +## Start master and slave(s) pod(s) without limitations on shm memory. +## By default docker and containerd (and possibly other container runtimes) +## limit `/dev/shm` to `64M` (see e.g. the +## [docker issue](https://github.com/docker-library/postgres/issues/416) and the +## [containerd issue](https://github.com/containerd/containerd/issues/3654), +## which could be not enough if PostgreSQL uses parallel workers heavily. +## +shmVolume: + ## Set `shmVolume.enabled` to `true` to mount a new tmpfs volume to remove + ## this limitation. + ## + enabled: true + ## Set to `true` to `chmod 777 /dev/shm` on a initContainer. + ## This option is ingored if `volumePermissions.enabled` is `false` + ## + chmod: + enabled: true + +## PostgreSQL data Persistent Volume Storage Class +## If defined, storageClassName: +## If set to "-", storageClassName: "", which disables dynamic provisioning +## If undefined (the default) or set to null, no storageClassName spec is +## set, choosing the default provisioner. (gp2 on AWS, standard on +## GKE, AWS & OpenStack) +## +persistence: + enabled: true + ## A manually managed Persistent Volume and Claim + ## If defined, PVC must be created manually before volume will be bound + ## The value is evaluated as a template, so, for example, the name can depend on .Release or .Chart + ## + # existingClaim: + + ## The path the volume will be mounted at, useful when using different + ## PostgreSQL images. + ## + mountPath: /bitnami/postgresql + + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + ## + subPath: "" + + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + annotations: {} + +## updateStrategy for PostgreSQL StatefulSet and its slaves StatefulSets +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies +updateStrategy: + type: RollingUpdate + +## +## PostgreSQL Master parameters +## +master: + ## Node, affinity, tolerations, and priorityclass settings for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption + nodeSelector: {} + affinity: {} + tolerations: [] + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + priorityClassName: "" + ## Additional PostgreSQL Master Volume mounts + ## + extraVolumeMounts: [] + ## Additional PostgreSQL Master Volumes + ## + extraVolumes: [] + ## Add sidecars to the pod + ## + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + sidecars: [] + + ## Override the service configuration for master + ## + service: {} + # type: + # nodePort: + # clusterIP: + +## +## PostgreSQL Slave parameters +## +slave: + ## Node, affinity, tolerations, and priorityclass settings for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption + nodeSelector: {} + affinity: {} + tolerations: [] + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + priorityClassName: "" + ## Extra init containers + ## Example + ## + ## extraInitContainers: + ## - name: do-something + ## image: busybox + ## command: ['do', 'something'] + extraInitContainers: [] + ## Additional PostgreSQL Slave Volume mounts + ## + extraVolumeMounts: [] + ## Additional PostgreSQL Slave Volumes + ## + extraVolumes: [] + ## Add sidecars to the pod + ## + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + sidecars: [] + + ## Override the service configuration for slave + ## + service: {} + # type: + # nodePort: + # clusterIP: + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: + requests: + memory: 256Mi + cpu: 250m + +## Add annotations to all the deployed resources +## +commonAnnotations: {} + +networkPolicy: + ## Enable creation of NetworkPolicy resources. Only Ingress traffic is filtered for now. + ## + enabled: false + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port PostgreSQL is listening + ## on. When true, PostgreSQL will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + + ## if explicitNamespacesSelector is missing or set to {}, only client Pods that are in the networkPolicy's namespace + ## and that match other criteria, the ones that have the good label, can reach the DB. + ## But sometimes, we want the DB to be accessible to clients from other namespaces, in this case, we can use this + ## LabelSelector to select these namespaces, note that the networkPolicy's namespace should also be explicitly added. + ## + ## Example: + ## explicitNamespacesSelector: + ## matchLabels: + ## role: frontend + ## matchExpressions: + ## - {key: role, operator: In, values: [frontend]} + explicitNamespacesSelector: {} + +## Configure extra options for liveness and readiness probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +## +## TLS configuration +## +tls: + # Enable TLS traffic + enabled: false + # + # Whether to use the server's TLS cipher preferences rather than the client's. + preferServerCiphers: true + # + # Name of the Secret that contains the certificates + certificatesSecret: "" + # + # Certificate filename + certFilename: "" + # + # Certificate Key filename + certKeyFilename: "" + # + # CA Certificate filename + # If provided, PostgreSQL will authenticate TLS/SSL clients by requesting them a certificate + # ref: https://www.postgresql.org/docs/9.6/auth-methods.html + certCAFilename: + # + # File containing a Certificate Revocation List + crlFilename: + +## Configure metrics exporter +## +metrics: + enabled: true + # resources: {} + service: + type: ClusterIP + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9187" + loadBalancerIP: + serviceMonitor: + enabled: false + additionalLabels: {} + # namespace: monitoring + # interval: 30s + # scrapeTimeout: 10s + ## Custom PrometheusRule to be defined + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + prometheusRule: + enabled: false + additionalLabels: {} + namespace: "" + ## These are just examples rules, please adapt them to your needs. + ## Make sure to constraint the rules to the current postgresql service. + ## rules: + ## - alert: HugeReplicationLag + ## expr: pg_replication_lag{service="{{ template "postgresql.fullname" . }}-metrics"} / 3600 > 1 + ## for: 1m + ## labels: + ## severity: critical + ## annotations: + ## description: replication for {{ template "postgresql.fullname" . }} PostgreSQL is lagging by {{ "{{ $value }}" }} hour(s). + ## summary: PostgreSQL replication is lagging by {{ "{{ $value }}" }} hour(s). + rules: [] + + image: + registry: 10.10.31.243:5000 # docker.io + repository: postgres-exporter # bitnami/postgres-exporter + tag: 0.8.0-debian-10-r166 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + ## Define additional custom metrics + ## ref: https://github.com/wrouesnel/postgres_exporter#adding-new-metrics-via-a-config-file + # customMetrics: + # pg_database: + # query: "SELECT d.datname AS name, CASE WHEN pg_catalog.has_database_privilege(d.datname, 'CONNECT') THEN pg_catalog.pg_database_size(d.datname) ELSE 0 END AS size FROM pg_catalog.pg_database d where datname not in ('template0', 'template1', 'postgres')" + # metrics: + # - name: + # usage: "LABEL" + # description: "Name of the database" + # - size_bytes: + # usage: "GAUGE" + # description: "Size of the database in bytes" + ## An array to add extra env vars to configure postgres-exporter + ## see: https://github.com/wrouesnel/postgres_exporter#environment-variables + ## For example: + # extraEnvVars: + # - name: PG_EXPORTER_DISABLE_DEFAULT_METRICS + # value: "true" + extraEnvVars: {} + + ## Pod Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## + securityContext: + enabled: false + runAsUser: 1001 + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## Configure extra options for liveness and readiness probes + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 diff --git a/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/values.schema.json b/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/values.schema.json new file mode 100644 index 0000000..7b5e2ef --- /dev/null +++ b/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/values.schema.json @@ -0,0 +1,103 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": { + "postgresqlUsername": { + "type": "string", + "title": "Admin user", + "form": true + }, + "postgresqlPassword": { + "type": "string", + "title": "Password", + "form": true + }, + "persistence": { + "type": "object", + "properties": { + "size": { + "type": "string", + "title": "Persistent Volume Size", + "form": true, + "render": "slider", + "sliderMin": 1, + "sliderMax": 100, + "sliderUnit": "Gi" + } + } + }, + "resources": { + "type": "object", + "title": "Required Resources", + "description": "Configure resource requests", + "form": true, + "properties": { + "requests": { + "type": "object", + "properties": { + "memory": { + "type": "string", + "form": true, + "render": "slider", + "title": "Memory Request", + "sliderMin": 10, + "sliderMax": 2048, + "sliderUnit": "Mi" + }, + "cpu": { + "type": "string", + "form": true, + "render": "slider", + "title": "CPU Request", + "sliderMin": 10, + "sliderMax": 2000, + "sliderUnit": "m" + } + } + } + } + }, + "replication": { + "type": "object", + "form": true, + "title": "Replication Details", + "properties": { + "enabled": { + "type": "boolean", + "title": "Enable Replication", + "form": true + }, + "slaveReplicas": { + "type": "integer", + "title": "Slave Replicas", + "form": true, + "hidden": { + "value": false, + "path": "replication/enabled" + } + } + } + }, + "volumePermissions": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable Init Containers", + "description": "Change the owner of the persist volume mountpoint to RunAsUser:fsGroup" + } + } + }, + "metrics": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "title": "Configure metrics exporter", + "form": true + } + } + } + } +} diff --git a/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/values.yaml b/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/values.yaml new file mode 100644 index 0000000..5f831ef --- /dev/null +++ b/ansible/01_old/roles/test/files/04-keycloak/charts/postgresql/values.yaml @@ -0,0 +1,604 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +global: + postgresql: {} +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName +# storageClass: myStorageClass + +## Bitnami PostgreSQL image version +## ref: https://hub.docker.com/r/bitnami/postgresql/tags/ +## +image: + #registry: cdm-dev.exem-oss.org/keycloak + registry: 10.10.31.243:5000/keycloak # registry.openstacklocal:5000/keycloak + repository: keycloak-postgresql + tag: 11.8.0 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Set to true if you would like to see extra information on logs + ## It turns BASH and NAMI debugging in minideb + ## ref: https://github.com/bitnami/minideb-extras/#turn-on-bash-debugging + debug: false + +## String to partially override postgresql.fullname template (will maintain the release name) +## +# nameOverride: + +## String to fully override postgresql.fullname template +## +# fullnameOverride: + +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: true + image: + #registry: cdm-dev.exem-oss.org + registry: 10.10.31.243:5000 # registry.openstacklocal:5000 + repository: minideb # keycloak/minideb + tag: buster + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + ## Init container Security Context + ## Note: the chown of the data folder is done to securityContext.runAsUser + ## and not the below volumePermissions.securityContext.runAsUser + ## When runAsUser is set to special value "auto", init container will try to chwon the + ## data folder to autodetermined user&group, using commands: `id -u`:`id -G | cut -d" " -f2` + ## "auto" is especially useful for OpenShift which has scc with dynamic userids (and 0 is not allowed). + ## You may want to use this volumePermissions.securityContext.runAsUser="auto" in combination with + ## pod securityContext.enabled=false and shmVolume.chmod.enabled=false + ## + securityContext: + runAsUser: 0 + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: + + +## Pod Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## Pod Service Account +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +serviceAccount: + enabled: false + ## Name of an already existing service account. Setting this value disables the automatic service account creation. + # name: + +## Pod Security Policy +## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +psp: + create: false + +## Creates role for ServiceAccount +## Required for PSP +rbac: + create: false + +replication: + enabled: false + user: repl_user + password: repl_password + slaveReplicas: 1 + ## Set synchronous commit mode: on, off, remote_apply, remote_write and local + ## ref: https://www.postgresql.org/docs/9.6/runtime-config-wal.html#GUC-WAL-LEVEL + synchronousCommit: "off" + ## From the number of `slaveReplicas` defined above, set the number of those that will have synchronous replication + ## NOTE: It cannot be > slaveReplicas + numSynchronousReplicas: 0 + ## Replication Cluster application name. Useful for defining multiple replication policies + applicationName: my_application + +## PostgreSQL admin password (used when `postgresqlUsername` is not `postgres`) +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-user-on-first-run (see note!) +# postgresqlPostgresPassword: + +## PostgreSQL user (has superuser privileges if username is `postgres`) +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run +postgresqlUsername: postgres + +## PostgreSQL password +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run +## +# postgresqlPassword: + +## PostgreSQL password using existing secret +## existingSecret: secret + +## Mount PostgreSQL secret as a file instead of passing environment variable +# usePasswordFile: false + +## Create a database +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-on-first-run +## +# postgresqlDatabase: + +## PostgreSQL data dir +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +postgresqlDataDir: /bitnami/postgresql/data +#postgresqlDataDir: /var/lib/postgresql/data/pgdata + +## An array to add extra environment variables +## For example: +## extraEnv: +## - name: FOO +## value: "bar" +## +# extraEnv: +extraEnv: [] + +## Name of a ConfigMap containing extra env vars +## +# extraEnvVarsCM: + +## Specify extra initdb args +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +# postgresqlInitdbArgs: + +## Specify a custom location for the PostgreSQL transaction log +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +# postgresqlInitdbWalDir: + +## PostgreSQL configuration +## Specify runtime configuration parameters as a dict, using camelCase, e.g. +## {"sharedBuffers": "500MB"} +## Alternatively, you can put your postgresql.conf under the files/ directory +## ref: https://www.postgresql.org/docs/current/static/runtime-config.html +## +# postgresqlConfiguration: + +## PostgreSQL extended configuration +## As above, but _appended_ to the main configuration +## Alternatively, you can put your *.conf under the files/conf.d/ directory +## https://github.com/bitnami/bitnami-docker-postgresql#allow-settings-to-be-loaded-from-files-other-than-the-default-postgresqlconf +## +# postgresqlExtendedConf: + +## PostgreSQL client authentication configuration +## Specify content for pg_hba.conf +## Default: do not create pg_hba.conf +## Alternatively, you can put your pg_hba.conf under the files/ directory +# pgHbaConfiguration: |- +# local all all trust +# host all all localhost trust +# host mydatabase mysuser 192.168.0.0/24 md5 + +## ConfigMap with PostgreSQL configuration +## NOTE: This will override postgresqlConfiguration and pgHbaConfiguration +# configurationConfigMap: + +## ConfigMap with PostgreSQL extended configuration +# extendedConfConfigMap: + +## initdb scripts +## Specify dictionary of scripts to be run at first boot +## Alternatively, you can put your scripts under the files/docker-entrypoint-initdb.d directory +## +# initdbScripts: +# my_init_script.sh: | +# #!/bin/sh +# echo "Do something." + +## ConfigMap with scripts to be run at first boot +## NOTE: This will override initdbScripts +# initdbScriptsConfigMap: + +## Secret with scripts to be run at first boot (in case it contains sensitive information) +## NOTE: This can work along initdbScripts or initdbScriptsConfigMap +# initdbScriptsSecret: + +## Specify the PostgreSQL username and password to execute the initdb scripts +# initdbUser: +# initdbPassword: + +## Optional duration in seconds the pod needs to terminate gracefully. +## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods +## +# terminationGracePeriodSeconds: 30 + +## LDAP configuration +## +ldap: + enabled: false + url: "" + server: "" + port: "" + prefix: "" + suffix: "" + baseDN: "" + bindDN: "" + bind_password: + search_attr: "" + search_filter: "" + scheme: "" + tls: false + +## PostgreSQL service configuration +service: + ## PosgresSQL service type + type: ClusterIP + # clusterIP: None + port: 5432 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. Evaluated as a template. + ## + annotations: {} + ## Set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + # loadBalancerIP: + + ## Load Balancer sources. Evaluated as a template. + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## + # loadBalancerSourceRanges: + # - 10.10.10.0/24 + +## Start master and slave(s) pod(s) without limitations on shm memory. +## By default docker and containerd (and possibly other container runtimes) +## limit `/dev/shm` to `64M` (see e.g. the +## [docker issue](https://github.com/docker-library/postgres/issues/416) and the +## [containerd issue](https://github.com/containerd/containerd/issues/3654), +## which could be not enough if PostgreSQL uses parallel workers heavily. +## +shmVolume: + ## Set `shmVolume.enabled` to `true` to mount a new tmpfs volume to remove + ## this limitation. + ## + enabled: true + ## Set to `true` to `chmod 777 /dev/shm` on a initContainer. + ## This option is ingored if `volumePermissions.enabled` is `false` + ## + chmod: + enabled: true + +## PostgreSQL data Persistent Volume Storage Class +## If defined, storageClassName: +## If set to "-", storageClassName: "", which disables dynamic provisioning +## If undefined (the default) or set to null, no storageClassName spec is +## set, choosing the default provisioner. (gp2 on AWS, standard on +## GKE, AWS & OpenStack) +## +persistence: + enabled: true + ## A manually managed Persistent Volume and Claim + ## If defined, PVC must be created manually before volume will be bound + ## The value is evaluated as a template, so, for example, the name can depend on .Release or .Chart + ## + # existingClaim: + + ## The path the volume will be mounted at, useful when using different + ## PostgreSQL images. + ## + mountPath: /bitnami/postgresql + + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + ## + subPath: "" + + storageClass: "" + accessModes: + - ReadWriteOnce + size: 8Gi + annotations: {} + +## updateStrategy for PostgreSQL StatefulSet and its slaves StatefulSets +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies +updateStrategy: + type: RollingUpdate + +## +## PostgreSQL Master parameters +## +master: + ## Node, affinity, tolerations, and priorityclass settings for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption + nodeSelector: {} + affinity: {} + tolerations: [] + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + priorityClassName: "" + ## Extra init containers + ## Example + ## + ## extraInitContainers: + ## - name: do-something + ## image: busybox + ## command: ['do', 'something'] + extraInitContainers: [] + + ## Additional PostgreSQL Master Volume mounts + ## + extraVolumeMounts: [] + ## Additional PostgreSQL Master Volumes + ## + extraVolumes: [] + ## Add sidecars to the pod + ## + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: IfNotPresent + ## ports: + ## - name: portname + ## containerPort: 1234 + sidecars: [] + + ## Override the service configuration for master + ## + service: {} + # type: + # nodePort: + # clusterIP: + +## +## PostgreSQL Slave parameters +## +slave: + ## Node, affinity, tolerations, and priorityclass settings for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption + nodeSelector: {} + affinity: {} + tolerations: [] + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + priorityClassName: "" + extraInitContainers: | + # - name: do-something + # image: busybox + # command: ['do', 'something'] + ## Additional PostgreSQL Slave Volume mounts + ## + extraVolumeMounts: [] + ## Additional PostgreSQL Slave Volumes + ## + extraVolumes: [] + ## Add sidecars to the pod + ## + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: IfNotPresent + ## ports: + ## - name: portname + ## containerPort: 1234 + sidecars: [] + + ## Override the service configuration for slave + ## + service: {} + # type: + # nodePort: + # clusterIP: + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: + requests: + memory: 256Mi + cpu: 250m + +## Add annotations to all the deployed resources +## +commonAnnotations: {} + +networkPolicy: + ## Enable creation of NetworkPolicy resources. Only Ingress traffic is filtered for now. + ## + enabled: false + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port PostgreSQL is listening + ## on. When true, PostgreSQL will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + + ## if explicitNamespacesSelector is missing or set to {}, only client Pods that are in the networkPolicy's namespace + ## and that match other criteria, the ones that have the good label, can reach the DB. + ## But sometimes, we want the DB to be accessible to clients from other namespaces, in this case, we can use this + ## LabelSelector to select these namespaces, note that the networkPolicy's namespace should also be explicitly added. + ## + ## Example: + ## explicitNamespacesSelector: + ## matchLabels: + ## role: frontend + ## matchExpressions: + ## - {key: role, operator: In, values: [frontend]} + explicitNamespacesSelector: {} + +## Configure extra options for liveness and readiness probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +## +## TLS configuration +## +tls: + # Enable TLS traffic + enabled: false + # + # Whether to use the server's TLS cipher preferences rather than the client's. + preferServerCiphers: true + # + # Name of the Secret that contains the certificates + certificatesSecret: "" + # + # Certificate filename + certFilename: "" + # + # Certificate Key filename + certKeyFilename: "" + # + # CA Certificate filename + # If provided, PostgreSQL will authenticate TLS/SSL clients by requesting them a certificate + # ref: https://www.postgresql.org/docs/9.6/auth-methods.html + certCAFilename: + # + # File containing a Certificate Revocation List + crlFilename: + +## Configure metrics exporter +## +metrics: + enabled: false + # resources: {} + service: + type: ClusterIP + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9187" + loadBalancerIP: + serviceMonitor: + enabled: false + additionalLabels: {} + # namespace: monitoring + # interval: 30s + # scrapeTimeout: 10s + ## Custom PrometheusRule to be defined + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + prometheusRule: + enabled: false + additionalLabels: {} + namespace: "" + ## These are just examples rules, please adapt them to your needs. + ## Make sure to constraint the rules to the current postgresql service. + ## rules: + ## - alert: HugeReplicationLag + ## expr: pg_replication_lag{service="{{ template "postgresql.fullname" . }}-metrics"} / 3600 > 1 + ## for: 1m + ## labels: + ## severity: critical + ## annotations: + ## description: replication for {{ template "postgresql.fullname" . }} PostgreSQL is lagging by {{ "{{ $value }}" }} hour(s). + ## summary: PostgreSQL replication is lagging by {{ "{{ $value }}" }} hour(s). + rules: [] + + image: + registry: 10.10.31.243:5000 # docker.io + repository: postgres-exporter # bitnami/postgres-exporter + tag: 0.8.0-debian-10-r166 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + ## Define additional custom metrics + ## ref: https://github.com/wrouesnel/postgres_exporter#adding-new-metrics-via-a-config-file + # customMetrics: + # pg_database: + # query: "SELECT d.datname AS name, CASE WHEN pg_catalog.has_database_privilege(d.datname, 'CONNECT') THEN pg_catalog.pg_database_size(d.datname) ELSE 0 END AS size_bytes FROM pg_catalog.pg_database d where datname not in ('template0', 'template1', 'postgres')" + # metrics: + # - name: + # usage: "LABEL" + # description: "Name of the database" + # - size_bytes: + # usage: "GAUGE" + # description: "Size of the database in bytes" + # + ## An array to add extra env vars to configure postgres-exporter + ## see: https://github.com/wrouesnel/postgres_exporter#environment-variables + ## For example: + # extraEnvVars: + # - name: PG_EXPORTER_DISABLE_DEFAULT_METRICS + # value: "true" + extraEnvVars: {} + + ## Pod Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## + securityContext: + enabled: false + runAsUser: 1001 + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## Configure extra options for liveness and readiness probes + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 +#persistentVolume nodeAffinity Value Require this value +node: + affinity: imxc-worker1 diff --git a/ansible/01_old/roles/test/files/04-keycloak/ci/h2-values.yaml b/ansible/01_old/roles/test/files/04-keycloak/ci/h2-values.yaml new file mode 100644 index 0000000..10d1705 --- /dev/null +++ b/ansible/01_old/roles/test/files/04-keycloak/ci/h2-values.yaml @@ -0,0 +1,38 @@ +extraEnv: | + - name: DB_VENDOR + value: h2 + - name: KEYCLOAK_USER_FILE + value: /secrets/admin-creds/user + - name: KEYCLOAK_PASSWORD_FILE + value: /secrets/admin-creds/password + - name: JAVA_OPTS + value: >- + -XX:+UseContainerSupport + -XX:MaxRAMPercentage=50.0 + -Djava.net.preferIPv4Stack=true + -Djboss.modules.system.pkgs=$JBOSS_MODULES_SYSTEM_PKGS + -Djava.awt.headless=true + +secrets: + admin-creds: + annotations: + my-test-annotation: Test secret for {{ include "keycloak.fullname" . }} + stringData: + user: admin + password: secret + +extraVolumeMounts: | + - name: admin-creds + mountPath: /secrets/admin-creds + readOnly: true + +extraVolumes: | + - name: admin-creds + secret: + secretName: '{{ include "keycloak.fullname" . }}-admin-creds' + +postgresql: + enabled: false + +test: + enabled: true diff --git a/ansible/01_old/roles/test/files/04-keycloak/ci/postgres-ha-values.yaml b/ansible/01_old/roles/test/files/04-keycloak/ci/postgres-ha-values.yaml new file mode 100644 index 0000000..e92c2c7 --- /dev/null +++ b/ansible/01_old/roles/test/files/04-keycloak/ci/postgres-ha-values.yaml @@ -0,0 +1,73 @@ +replicas: 2 + +podLabels: + test-label: test-label-value + +podAnnotations: + test-annotation: test-annotation-value-{{ .Release.Name }} + test-int-annotation: "12345" + +startupScripts: + hello.sh: | + #!/bin/sh + + echo '********************************************************************************' + echo '* *' + echo '* Hello from my startup script! *' + echo '* *' + echo '********************************************************************************' + +lifecycleHooks: | + postStart: + exec: + command: + - /bin/sh + - -c + - echo 'Hello from lifecycle hook!' + +extraEnv: | + - name: JGROUPS_DISCOVERY_PROTOCOL + value: dns.DNS_PING + - name: JGROUPS_DISCOVERY_PROPERTIES + value: 'dns_query={{ include "keycloak.serviceDnsName" . }}' + - name: CACHE_OWNERS_COUNT + value: "2" + - name: CACHE_OWNERS_AUTH_SESSIONS_COUNT + value: "2" + - name: KEYCLOAK_USER_FILE + value: /secrets/admin-creds/user + - name: KEYCLOAK_PASSWORD_FILE + value: /secrets/admin-creds/password + - name: KEYCLOAK_STATISTICS + value: all + - name: JAVA_OPTS + value: >- + -XX:+UseContainerSupport + -XX:MaxRAMPercentage=50.0 + -Djava.net.preferIPv4Stack=true + -Djboss.modules.system.pkgs=$JBOSS_MODULES_SYSTEM_PKGS + -Djava.awt.headless=true + +secrets: + admin-creds: + stringData: + user: admin + password: secret + +extraVolumeMounts: | + - name: admin-creds + mountPath: /secrets/admin-creds + readOnly: true + +extraVolumes: | + - name: admin-creds + secret: + secretName: '{{ include "keycloak.fullname" . }}-admin-creds' + +postgresql: + enabled: true + persistence: + enabled: true + +test: + enabled: true diff --git a/ansible/01_old/roles/test/files/04-keycloak/requirements.lock b/ansible/01_old/roles/test/files/04-keycloak/requirements.lock new file mode 100644 index 0000000..4231a57 --- /dev/null +++ b/ansible/01_old/roles/test/files/04-keycloak/requirements.lock @@ -0,0 +1,6 @@ +dependencies: +- name: postgresql + repository: https://charts.bitnami.com/bitnami + version: 9.1.1 +digest: sha256:33ee9e6caa9e519633071fd71aedd9de7906b9a9d7fb629eb814d9f72bb8d68e +generated: "2020-07-24T07:40:55.78753+02:00" diff --git a/ansible/01_old/roles/test/files/04-keycloak/requirements.yaml b/ansible/01_old/roles/test/files/04-keycloak/requirements.yaml new file mode 100644 index 0000000..f3409a3 --- /dev/null +++ b/ansible/01_old/roles/test/files/04-keycloak/requirements.yaml @@ -0,0 +1,5 @@ +dependencies: + - name: postgresql + version: 9.1.1 + repository: https://charts.bitnami.com/bitnami + condition: postgresql.enabled diff --git a/ansible/01_old/roles/test/files/04-keycloak/scripts/keycloak.cli b/ansible/01_old/roles/test/files/04-keycloak/scripts/keycloak.cli new file mode 100644 index 0000000..1469963 --- /dev/null +++ b/ansible/01_old/roles/test/files/04-keycloak/scripts/keycloak.cli @@ -0,0 +1,13 @@ +embed-server --server-config=standalone-ha.xml --std-out=echo +batch + +echo Configuring node identifier + +## Sets the node identifier to the node name (= pod name). Node identifiers have to be unique. They can have a +## maximum length of 23 characters. Thus, the chart's fullname template truncates its length accordingly. +/subsystem=transactions:write-attribute(name=node-identifier, value=${jboss.node.name}) + +echo Finished configuring node identifier + +run-batch +stop-embedded-server diff --git a/ansible/01_old/roles/test/files/04-keycloak/templates/NOTES.txt b/ansible/01_old/roles/test/files/04-keycloak/templates/NOTES.txt new file mode 100644 index 0000000..e76e064 --- /dev/null +++ b/ansible/01_old/roles/test/files/04-keycloak/templates/NOTES.txt @@ -0,0 +1,61 @@ +*********************************************************************** +* * +* Keycloak Helm Chart by codecentric AG * +* * +*********************************************************************** + +{{- if .Values.ingress.enabled }} + +Keycloak was installed with an Ingress and an be reached at the following URL(s): +{{ range $unused, $rule := .Values.ingress.rules }} + {{- range $rule.paths }} + - http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $rule.host }}{{ . }} + {{- end }} +{{- end }} + +{{- else if eq "NodePort" .Values.service.type }} + +Keycloak was installed with a Service of type NodePort. +{{ if .Values.service.httpNodePort }} +Get its HTTP URL with the following commands: + +export NODE_PORT=$(kubectl get --namespace imxc service {{ include "keycloak.fullname" . }}-http --template='{{"{{ range .spec.ports }}{{ if eq .name \"http\" }}{{ .nodePort }}{{ end }}{{ end }}"}}') +export NODE_IP=$(kubectl get nodes --namespace imxc -o jsonpath="{.items[0].status.addresses[0].address}") +echo "http://$NODE_IP:$NODE_PORT" +{{- end }} +{{ if .Values.service.httpsNodePort }} +Get its HTTPS URL with the following commands: + +export NODE_PORT=$(kubectl get --namespace imxc service {{ include "keycloak.fullname" . }}-http --template='{{"{{ range .spec.ports }}{{ if eq .name \"https\" }}{{ .nodePort }}{{ end }}{{ end }}"}}') +export NODE_IP=$(kubectl get nodes --namespace imxc -o jsonpath="{.items[0].status.addresses[0].address}") +echo "http://$NODE_IP:$NODE_PORT" +{{- end }} + +{{- else if eq "LoadBalancer" .Values.service.type }} + +Keycloak was installed with a Service of type LoadBalancer + +NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get --namespace imxc service -w {{ include "keycloak.fullname" . }}' + +Get its HTTP URL with the following commands: + +export SERVICE_IP=$(kubectl get service --namespace imxc {{ include "keycloak.fullname" . }}-http --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") +echo "http://$SERVICE_IP:{{ .Values.service.httpPort }}" + +Get its HTTPS URL with the following commands: + +export SERVICE_IP=$(kubectl get service --namespace imxc {{ include "keycloak.fullname" . }}-http --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") +echo "http://$SERVICE_IP:{{ .Values.service.httpsPort }}" + +{{- else if eq "ClusterIP" .Values.service.type }} + +Keycloak was installed with a Service of type ClusterIP + +Create a port-forwarding with the following commands: + +export POD_NAME=$(kubectl get pods --namespace imxc -l "app.kubernetes.io/name={{ include "keycloak.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o name) +echo "Visit http://127.0.0.1:8080 to use your application" +kubectl --namespace imxc port-forward "$POD_NAME" 8080 + +{{- end }} diff --git a/ansible/01_old/roles/test/files/04-keycloak/templates/_helpers.tpl b/ansible/01_old/roles/test/files/04-keycloak/templates/_helpers.tpl new file mode 100644 index 0000000..d019e17 --- /dev/null +++ b/ansible/01_old/roles/test/files/04-keycloak/templates/_helpers.tpl @@ -0,0 +1,87 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "keycloak.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate to 20 characters because this is used to set the node identifier in WildFly which is limited to +23 characters. This allows for a replica suffix for up to 99 replicas. +*/}} +{{- define "keycloak.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 20 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 20 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 20 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "keycloak.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "keycloak.labels" -}} +helm.sh/chart: {{ include "keycloak.chart" . }} +{{ include "keycloak.selectorLabels" . }} +app.kubernetes.io/version: {{ .Values.image.tag | default .Chart.AppVersion | quote }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "keycloak.selectorLabels" -}} +app.kubernetes.io/name: {{ include "keycloak.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "keycloak.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "keycloak.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} + +{{/* +Create a default fully qualified app name for the postgres requirement. +*/}} +{{- define "keycloak.postgresql.fullname" -}} +{{- $postgresContext := dict "Values" .Values.postgresql "Release" .Release "Chart" (dict "Name" "postgresql") -}} +{{ include "postgresql.fullname" $postgresContext }} +{{- end }} + +{{/* +Create the service DNS name. +*/}} +{{- define "keycloak.serviceDnsName" -}} +{{ include "keycloak.fullname" . }}-headless.imxc.svc.{{ .Values.clusterDomain }} +{{- end }} + +{{/* +Return the appropriate apiVersion for ingress. +*/}} +{{- define "keycloak.ingressAPIVersion" -}} +{{- if .Capabilities.APIVersions.Has "networking.k8s.io/v1/Ingress" -}} +{{- print "networking.k8s.io/v1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/test/files/04-keycloak/templates/configmap-startup.yaml b/ansible/01_old/roles/test/files/04-keycloak/templates/configmap-startup.yaml new file mode 100644 index 0000000..8fbb462 --- /dev/null +++ b/ansible/01_old/roles/test/files/04-keycloak/templates/configmap-startup.yaml @@ -0,0 +1,14 @@ +{{- if .Values.startupScripts }} +{{- $highAvailability := gt (int .Values.replicas) 1 -}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "keycloak.fullname" . }}-startup + labels: + {{- include "keycloak.labels" . | nindent 4 }} +data: + {{- range $key, $value := .Values.startupScripts }} + {{ $key }}: | + {{- tpl $value $ | nindent 4 }} + {{- end }} +{{- end -}} diff --git a/ansible/01_old/roles/test/files/04-keycloak/templates/hpa.yaml b/ansible/01_old/roles/test/files/04-keycloak/templates/hpa.yaml new file mode 100644 index 0000000..c772b76 --- /dev/null +++ b/ansible/01_old/roles/test/files/04-keycloak/templates/hpa.yaml @@ -0,0 +1,22 @@ +{{- if .Values.autoscaling.enabled }} +apiVersion: autoscaling/v2beta2 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "keycloak.fullname" . }} + labels: + {{- include "keycloak.labels" . | nindent 4 }} + {{- range $key, $value := .Values.autoscaling.labels }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: StatefulSet + name: {{ include "keycloak.fullname" . }} + minReplicas: {{ .Values.autoscaling.minReplicas }} + maxReplicas: {{ .Values.autoscaling.maxReplicas }} + metrics: + {{- toYaml .Values.autoscaling.metrics | nindent 4 }} + behavior: + {{- toYaml .Values.autoscaling.behavior | nindent 4 }} +{{- end }} diff --git a/ansible/01_old/roles/test/files/04-keycloak/templates/ingress.yaml b/ansible/01_old/roles/test/files/04-keycloak/templates/ingress.yaml new file mode 100644 index 0000000..d749e24 --- /dev/null +++ b/ansible/01_old/roles/test/files/04-keycloak/templates/ingress.yaml @@ -0,0 +1,104 @@ +{{- $ingress := .Values.ingress -}} +{{- if $ingress.enabled -}} +apiVersion: {{ include "keycloak.ingressAPIVersion" . }} +kind: Ingress +metadata: + name: {{ include "keycloak.fullname" . }} + {{- with $ingress.annotations }} + annotations: + {{- range $key, $value := . }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} + {{- end }} + labels: + {{- include "keycloak.labels" . | nindent 4 }} + {{- range $key, $value := $ingress.labels }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} +spec: +{{- if $ingress.tls }} + tls: + {{- range $ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ tpl . $ | quote }} + {{- end }} + {{- with .secretName }} + secretName: {{ tpl . $ }} + {{- end }} + {{- end }} +{{- end }} + rules: + {{- range .Values.ingress.rules }} + - host: {{ tpl .host $ | quote }} + http: + paths: + {{- range .paths }} + - path: {{ . }} + {{- if $.Capabilities.APIVersions.Has "networking.k8s.io/v1/Ingress" }} + pathType: Prefix + backend: + service: + name: {{ include "keycloak.fullname" $ }}-http + port: + name: {{ $ingress.servicePort }} + {{- else }} + backend: + serviceName: {{ include "keycloak.fullname" $ }}-http + servicePort: {{ $ingress.servicePort }} + {{- end }} + {{- end }} + {{- end }} +{{- if $ingress.console.enabled }} +--- +apiVersion: {{ include "keycloak.ingressAPIVersion" . }} +kind: Ingress +metadata: + name: {{ include "keycloak.fullname" . }}-console + {{- with $ingress.console.annotations }} + annotations: + {{- range $key, $value := . }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} + {{- end }} + labels: + {{- include "keycloak.labels" . | nindent 4 }} + {{- range $key, $value := $ingress.labels }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} +spec: +{{- if $ingress.tls }} + tls: + {{- range $ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ tpl . $ | quote }} + {{- end }} + {{- with .secretName }} + secretName: {{ tpl . $ }} + {{- end }} + {{- end }} +{{- end }} + rules: + {{- range .Values.ingress.console.rules }} + - host: {{ tpl .host $ | quote }} + http: + paths: + {{- range .paths }} + - path: {{ . }} + {{- if $.Capabilities.APIVersions.Has "networking.k8s.io/v1/Ingress" }} + pathType: Prefix + backend: + service: + name: {{ include "keycloak.fullname" $ }}-http + port: + name: {{ $ingress.servicePort }} + {{- else }} + backend: + serviceName: {{ include "keycloak.fullname" $ }}-http + servicePort: {{ $ingress.servicePort }} + {{- end }} + {{- end }} + {{- end }} +{{- end -}} +{{- end -}} diff --git a/ansible/01_old/roles/test/files/04-keycloak/templates/networkpolicy.yaml b/ansible/01_old/roles/test/files/04-keycloak/templates/networkpolicy.yaml new file mode 100644 index 0000000..5e7c7b6 --- /dev/null +++ b/ansible/01_old/roles/test/files/04-keycloak/templates/networkpolicy.yaml @@ -0,0 +1,46 @@ +{{- if .Values.networkPolicy.enabled }} +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: {{ include "keycloak.fullname" . | quote }} + labels: + {{- include "keycloak.labels" . | nindent 4 }} + {{- range $key, $value := .Values.networkPolicy.labels }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} +spec: + policyTypes: + - Ingress + podSelector: + matchLabels: + {{- include "keycloak.selectorLabels" . | nindent 6 }} + ingress: + {{- with .Values.networkPolicy.extraFrom }} + - from: + {{- toYaml . | nindent 8 }} + ports: + - protocol: TCP + port: {{ $.Values.service.httpPort }} + - protocol: TCP + port: {{ $.Values.service.httpsPort }} + {{ range $.Values.extraPorts }} + - protocol: {{ default "TCP" .protocol }} + port: {{ .containerPort }} + {{- end }} + {{- end }} + - from: + - podSelector: + matchLabels: + {{- include "keycloak.selectorLabels" . | nindent 14 }} + ports: + - protocol: TCP + port: {{ .Values.service.httpPort }} + - protocol: TCP + port: {{ .Values.service.httpsPort }} + - protocol: TCP + port: {{ .Values.service.httpManagementPort }} + {{ range .Values.extraPorts }} + - protocol: {{ default "TCP" .protocol }} + port: {{ .containerPort }} + {{- end }} +{{- end }} diff --git a/ansible/01_old/roles/test/files/04-keycloak/templates/poddisruptionbudget.yaml b/ansible/01_old/roles/test/files/04-keycloak/templates/poddisruptionbudget.yaml new file mode 100644 index 0000000..39cc390 --- /dev/null +++ b/ansible/01_old/roles/test/files/04-keycloak/templates/poddisruptionbudget.yaml @@ -0,0 +1,13 @@ +{{- if .Values.podDisruptionBudget -}} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ include "keycloak.fullname" . }} + labels: + {{- include "keycloak.labels" . | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "keycloak.selectorLabels" . | nindent 6 }} + {{- toYaml .Values.podDisruptionBudget | nindent 2 }} +{{- end -}} diff --git a/ansible/01_old/roles/test/files/04-keycloak/templates/prometheusrule.yaml b/ansible/01_old/roles/test/files/04-keycloak/templates/prometheusrule.yaml new file mode 100644 index 0000000..69af5e7 --- /dev/null +++ b/ansible/01_old/roles/test/files/04-keycloak/templates/prometheusrule.yaml @@ -0,0 +1,24 @@ +{{- with .Values.prometheusRule -}} +{{- if .enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ include "keycloak.fullname" $ }} + {{- with .annotations }} + annotations: + {{- range $key, $value := . }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} + {{- end }} + labels: + {{- include "keycloak.labels" $ | nindent 4 }} + {{- range $key, $value := .labels }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} +spec: + groups: + - name: {{ include "keycloak.fullname" $ }} + rules: + {{- toYaml .rules | nindent 8 }} +{{- end }} +{{- end -}} diff --git a/ansible/01_old/roles/test/files/04-keycloak/templates/rbac.yaml b/ansible/01_old/roles/test/files/04-keycloak/templates/rbac.yaml new file mode 100644 index 0000000..9ca0a2b --- /dev/null +++ b/ansible/01_old/roles/test/files/04-keycloak/templates/rbac.yaml @@ -0,0 +1,25 @@ +{{- if and .Values.rbac.create .Values.rbac.rules }} +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ include "keycloak.fullname" . }} + labels: + {{- include "keycloak.labels" . | nindent 4 }} +rules: + {{- toYaml .Values.rbac.rules | nindent 2 }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ include "keycloak.fullname" . }} + labels: + {{- include "keycloak.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ include "keycloak.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ include "keycloak.serviceAccountName" . }} + namespace: {{ .Release.Namespace | quote }} +{{- end }} diff --git a/ansible/01_old/roles/test/files/04-keycloak/templates/route.yaml b/ansible/01_old/roles/test/files/04-keycloak/templates/route.yaml new file mode 100644 index 0000000..9507d56 --- /dev/null +++ b/ansible/01_old/roles/test/files/04-keycloak/templates/route.yaml @@ -0,0 +1,34 @@ +{{- $route := .Values.route -}} +{{- if $route.enabled -}} +apiVersion: route.openshift.io/v1 +kind: Route +metadata: + name: {{ include "keycloak.fullname" . }} + {{- with $route.annotations }} + annotations: + {{- range $key, $value := . }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} + {{- end }} + labels: + {{- include "keycloak.labels" . | nindent 4 }} + {{- range $key, $value := $route.labels }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} +spec: +{{- if $route.host }} + host: {{ tpl $route.host $ | quote }} +{{- end }} + path: {{ $route.path }} + port: + targetPort: http + to: + kind: Service + name: {{ include "keycloak.fullname" $ }}-http + weight: 100 + {{- if $route.tls.enabled }} + tls: + insecureEdgeTerminationPolicy: {{ $route.tls.insecureEdgeTerminationPolicy }} + termination: {{ $route.tls.termination }} + {{- end }} +{{- end -}} diff --git a/ansible/01_old/roles/test/files/04-keycloak/templates/secrets.yaml b/ansible/01_old/roles/test/files/04-keycloak/templates/secrets.yaml new file mode 100644 index 0000000..c1cb796 --- /dev/null +++ b/ansible/01_old/roles/test/files/04-keycloak/templates/secrets.yaml @@ -0,0 +1,29 @@ +{{- range $nameSuffix, $values := .Values.secrets -}} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "keycloak.fullname" $ }}-{{ $nameSuffix }} + {{- with $values.annotations }} + annotations: + {{- range $key, $value := . }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} + {{- end }} + labels: + {{- include "keycloak.labels" $ | nindent 4 }} + {{- range $key, $value := $values.labels }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} +type: {{ default "Opaque" $values.type }} +{{- with $values.data }} +data: + {{- toYaml . | nindent 2 }} +{{- end }} +{{- with $values.stringData }} +stringData: + {{- range $key, $value := . }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 2 }} + {{- end }} +{{- end }} +--- +{{- end -}} diff --git a/ansible/01_old/roles/test/files/04-keycloak/templates/service-headless.yaml b/ansible/01_old/roles/test/files/04-keycloak/templates/service-headless.yaml new file mode 100644 index 0000000..0c22ec9 --- /dev/null +++ b/ansible/01_old/roles/test/files/04-keycloak/templates/service-headless.yaml @@ -0,0 +1,18 @@ +{{- $highAvailability := gt (int .Values.replicas) 1 -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "keycloak.fullname" . }}-headless + labels: + {{- include "keycloak.labels" . | nindent 4 }} + app.kubernetes.io/component: headless +spec: + type: ClusterIP + clusterIP: None + ports: + - name: http + port: {{ .Values.service.httpPort }} + targetPort: http + protocol: TCP + selector: + {{- include "keycloak.selectorLabels" . | nindent 4 }} diff --git a/ansible/01_old/roles/test/files/04-keycloak/templates/service-http.yaml b/ansible/01_old/roles/test/files/04-keycloak/templates/service-http.yaml new file mode 100644 index 0000000..c4a1dc9 --- /dev/null +++ b/ansible/01_old/roles/test/files/04-keycloak/templates/service-http.yaml @@ -0,0 +1,59 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "keycloak.fullname" . }}-http + {{- with .Values.service.annotations }} + annotations: + {{- range $key, $value := . }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} + {{- end }} + labels: + {{- include "keycloak.labels" . | nindent 4 }} + {{- range $key, $value := .Values.service.labels }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} + app.kubernetes.io/component: http +spec: + type: {{ .Values.service.type }} + {{- if and (eq "LoadBalancer" .Values.service.type) .Values.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.service.loadBalancerIP }} + {{- end }} + {{- if and (eq "LoadBalancer" .Values.service.type) .Values.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{- toYaml .Values.service.loadBalancerSourceRanges | nindent 4 }} + {{- end }} + {{- if .Values.service.sessionAffinity }} + sessionAffinity: {{ .Values.service.sessionAffinity }} + {{- with .Values.service.sessionAffinityConfig }} + sessionAffinityConfig: + {{- toYaml . | nindent 4 }} + {{- end }} + {{- end }} + ports: + - name: http + port: {{ .Values.service.httpPort }} + targetPort: http + {{- if and (or (eq "NodePort" .Values.service.type) (eq "LoadBalancer" .Values.service.type) ) .Values.service.httpNodePort }} + nodePort: {{ .Values.service.httpNodePort }} + {{- end }} + protocol: TCP + - name: https + port: {{ .Values.service.httpsPort }} + targetPort: https + {{- if and (or (eq "NodePort" .Values.service.type) (eq "LoadBalancer" .Values.service.type) ) .Values.service.httpsNodePort }} + nodePort: {{ .Values.service.httpsNodePort }} + {{- end }} + protocol: TCP + - name: http-management + port: {{ .Values.service.httpManagementPort }} + targetPort: http-management + {{- if and (eq "NodePort" .Values.service.type) .Values.service.httpManagementNodePort }} + nodePort: {{ .Values.service.httpManagementNodePort }} + {{- end }} + protocol: TCP + {{- with .Values.service.extraPorts }} + {{- toYaml . | nindent 4 }} + {{- end }} + selector: + {{- include "keycloak.selectorLabels" . | nindent 4 }} diff --git a/ansible/01_old/roles/test/files/04-keycloak/templates/serviceaccount.yaml b/ansible/01_old/roles/test/files/04-keycloak/templates/serviceaccount.yaml new file mode 100644 index 0000000..1d8f3f0 --- /dev/null +++ b/ansible/01_old/roles/test/files/04-keycloak/templates/serviceaccount.yaml @@ -0,0 +1,19 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "keycloak.serviceAccountName" . }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- range $key, $value := . }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} + {{- end }} + labels: + {{- include "keycloak.labels" . | nindent 4 }} + {{- range $key, $value := .Values.serviceAccount.labels }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} +imagePullSecrets: + {{- toYaml .Values.serviceAccount.imagePullSecrets | nindent 4 }} +{{- end }} diff --git a/ansible/01_old/roles/test/files/04-keycloak/templates/servicemonitor.yaml b/ansible/01_old/roles/test/files/04-keycloak/templates/servicemonitor.yaml new file mode 100644 index 0000000..ba97f62 --- /dev/null +++ b/ansible/01_old/roles/test/files/04-keycloak/templates/servicemonitor.yaml @@ -0,0 +1,39 @@ +{{- range $key, $serviceMonitor := dict "wildfly" .Values.serviceMonitor "extra" .Values.extraServiceMonitor }} +{{- with $serviceMonitor }} +{{- if .enabled }} +--- +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "keycloak.fullname" $ }}-{{ $key }} + {{- with .namespace }} + namespace: {{ . }} + {{- end }} + {{- with .annotations }} + annotations: + {{- range $key, $value := . }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} + {{- end }} + labels: + {{- include "keycloak.labels" $ | nindent 4 }} + {{- range $key, $value := .labels }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} +spec: + {{- with .namespaceSelector }} + namespaceSelector: + {{- toYaml . | nindent 4 }} + {{- end }} + selector: + matchLabels: + {{- include "keycloak.selectorLabels" $ | nindent 6 }} + app.kubernetes.io/component: http + endpoints: + - port: {{ .port }} + path: {{ .path }} + interval: {{ .interval }} + scrapeTimeout: {{ .scrapeTimeout }} +{{- end }} +{{- end }} +{{- end }} diff --git a/ansible/01_old/roles/test/files/04-keycloak/templates/statefulset.yaml b/ansible/01_old/roles/test/files/04-keycloak/templates/statefulset.yaml new file mode 100644 index 0000000..8278986 --- /dev/null +++ b/ansible/01_old/roles/test/files/04-keycloak/templates/statefulset.yaml @@ -0,0 +1,208 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "keycloak.fullname" . }} + {{- with .Values.statefulsetAnnotations }} + annotations: + {{- range $key, $value := . }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} + {{- end }} + labels: + {{- include "keycloak.labels" . | nindent 4 }} + {{- range $key, $value := .Values.statefulsetLabels }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} +spec: + selector: + matchLabels: + {{- include "keycloak.selectorLabels" . | nindent 6 }} + {{- if not .Values.autoscaling.enabled }} + replicas: {{ .Values.replicas }} + {{- end }} + serviceName: {{ include "keycloak.fullname" . }}-headless + podManagementPolicy: {{ .Values.podManagementPolicy }} + updateStrategy: + type: RollingUpdate + template: + metadata: + annotations: + checksum/config-startup: {{ include (print .Template.BasePath "/configmap-startup.yaml") . | sha256sum }} + checksum/secrets: {{ tpl (toYaml .Values.secrets) . | sha256sum }} + {{- range $key, $value := .Values.podAnnotations }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 8 }} + {{- end }} + labels: + {{- include "keycloak.selectorLabels" . | nindent 8 }} + {{- if and .Values.postgresql.enabled (and .Values.postgresql.networkPolicy .Values.postgresql.networkPolicy.enabled) }} + {{ include "keycloak.postgresql.fullname" . }}-client: "true" + {{- end }} + {{- range $key, $value := .Values.podLabels }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 8 }} + {{- end }} + spec: + {{- if or .Values.postgresql.enabled .Values.extraInitContainers }} + initContainers: + {{- if .Values.postgresql.enabled }} + - name: pgchecker + image: "{{ .Values.pgchecker.image.repository }}:{{ .Values.pgchecker.image.tag }}" + imagePullPolicy: {{ .Values.pgchecker.image.pullPolicy }} + securityContext: + {{- toYaml .Values.pgchecker.securityContext | nindent 12 }} + command: + - sh + - -c + - | + echo 'Waiting for PostgreSQL to become ready...' + + until printf "." && nc -z -w 2 {{ include "keycloak.postgresql.fullname" . }} {{ .Values.postgresql.service.port }}; do + sleep 2; + done; + + echo 'PostgreSQL OK ✓' + volumeMounts: + - mountPath: /opt/jboss/keycloak/themes/cloudmoa/ + name: themes-upper-directory + resources: + {{- toYaml .Values.pgchecker.resources | nindent 12 }} + {{- end }} + {{- with .Values.extraInitContainers }} + {{- tpl . $ | nindent 8 }} + {{- end }} + {{- end }} + containers: + - name: keycloak + securityContext: + {{- toYaml .Values.securityContext | nindent 12 }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + command: + {{- toYaml .Values.command | nindent 12 }} + args: + {{- toYaml .Values.args | nindent 12 }} + {{- with .Values.lifecycleHooks }} + {{- tpl . $ | nindent 12 }} + {{- end }} + env: + - name: KEYCLOAK_USER + value: "admin" + #valueFrom: + # secretKeyRef: + # name: keycloak-secret + # key: KEYCLOAK_MASTER_USERNAME + - name: KEYCLOAK_PASSWORD + value: "admin" + #valueFrom: + # secretKeyRef: + # name: keycloak-secret + # key: KEYCLOAK_MASTER_PASSWORD + {{- if .Values.postgresql.enabled }} + - name: DB_VENDOR + value: postgres + - name: DB_ADDR + value: {{ include "keycloak.postgresql.fullname" . }} + - name: DB_PORT + value: {{ .Values.postgresql.service.port | quote }} + - name: DB_DATABASE + value: {{ .Values.postgresql.postgresqlDatabase | quote }} + - name: DB_USER + value: {{ .Values.postgresql.postgresqlUsername | quote }} + - name: DB_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "keycloak.postgresql.fullname" . }} + key: postgresql-password + {{- end }} + {{- with .Values.extraEnv }} + {{- tpl . $ | nindent 12 }} + {{- end }} + envFrom: + {{- with .Values.extraEnvFrom }} + {{- tpl . $ | nindent 12 }} + {{- end }} + ports: + - name: http + containerPort: 8080 + protocol: TCP + - name: https + containerPort: 8443 + protocol: TCP + - name: http-management + containerPort: 9990 + protocol: TCP + {{- with .Values.extraPorts }} + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.livenessProbe }} + livenessProbe: + {{- tpl . $ | nindent 12 }} + {{- end }} + {{- with .Values.readinessProbe }} + readinessProbe: + {{- tpl . $ | nindent 12 }} + {{- end }} + resources: + {{- toYaml .Values.resources | nindent 12 }} + volumeMounts: + - mountPath: /opt/jboss/keycloak/themes/cloudmoa/ + name: themes-upper-directory + {{- range $key, $value := .Values.startupScripts }} + - name: startup + mountPath: "/opt/jboss/startup-scripts/{{ $key }}" + subPath: "{{ $key }}" + readOnly: true + {{- end }} + {{- with .Values.extraVolumeMounts }} + {{- tpl . $ | nindent 12 }} + {{- end }} + {{- with .Values.extraContainers }} + {{- tpl . $ | nindent 8 }} + {{- end }} + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "keycloak.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} + {{- with .Values.hostAliases }} + hostAliases: + {{- toYaml . | nindent 8 }} + {{- end }} + enableServiceLinks: {{ .Values.enableServiceLinks }} + restartPolicy: {{ .Values.restartPolicy }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- tpl . $ | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.priorityClassName }} + priorityClassName: {{ . }} + {{- end }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} + volumes: + - name: themes-upper-directory + hostPath: + path: /root/oci/infra-set/keycloak/keycloak_theme/ + type: DirectoryOrCreate + {{- with .Values.startupScripts }} + - name: startup + configMap: + name: {{ include "keycloak.fullname" $ }}-startup + defaultMode: 0555 + items: + {{- range $key, $value := . }} + - key: {{ $key }} + path: {{ $key }} + {{- end }} + {{- end }} + {{- with .Values.extraVolumes }} + {{- tpl . $ | nindent 8 }} + {{- end }} diff --git a/ansible/01_old/roles/test/files/04-keycloak/templates/test/configmap-test.yaml b/ansible/01_old/roles/test/files/04-keycloak/templates/test/configmap-test.yaml new file mode 100644 index 0000000..8dda781 --- /dev/null +++ b/ansible/01_old/roles/test/files/04-keycloak/templates/test/configmap-test.yaml @@ -0,0 +1,50 @@ +{{- if .Values.test.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "keycloak.fullname" . }}-test + labels: + {{- include "keycloak.labels" . | nindent 4 }} + annotations: + helm.sh/hook: test + helm.sh/hook-delete-policy: hook-succeeded +data: + test.py: | + import os + from selenium import webdriver + from selenium.webdriver.common.by import By + from selenium.webdriver.support.ui import WebDriverWait + from selenium.webdriver.support import expected_conditions + from urllib.parse import urlparse + + print('Creating PhantomJS driver...') + driver = webdriver.PhantomJS(service_log_path='/tmp/ghostdriver.log') + + base_url = 'http://{{ include "keycloak.fullname" . }}-http{{ if ne 80 (int .Values.service.httpPort) }}:{{ .Values.service.httpPort }}{{ end }}' + + print('Opening Keycloak...') + driver.get('{0}/auth/admin/'.format(base_url)) + + username = os.environ['KEYCLOAK_USER'] + password = os.environ['KEYCLOAK_PASSWORD'] + + username_input = WebDriverWait(driver, 30).until(expected_conditions.presence_of_element_located((By.ID, "username"))) + password_input = WebDriverWait(driver, 30).until(expected_conditions.presence_of_element_located((By.ID, "password"))) + login_button = WebDriverWait(driver, 30).until(expected_conditions.presence_of_element_located((By.ID, "kc-login"))) + + print('Entering username...') + username_input.send_keys(username) + + print('Entering password...') + password_input.send_keys(password) + + print('Clicking login button...') + login_button.click() + + WebDriverWait(driver, 30).until(lambda driver: '/auth/admin/master/console/' in driver.current_url) + + print('Admin console visible. Login successful.') + + driver.quit() + + {{- end }} diff --git a/ansible/01_old/roles/test/files/04-keycloak/templates/test/pod-test.yaml b/ansible/01_old/roles/test/files/04-keycloak/templates/test/pod-test.yaml new file mode 100644 index 0000000..5b166f2 --- /dev/null +++ b/ansible/01_old/roles/test/files/04-keycloak/templates/test/pod-test.yaml @@ -0,0 +1,43 @@ +{{- if .Values.test.enabled }} +apiVersion: v1 +kind: Pod +metadata: + name: {{ include "keycloak.fullname" . }}-test + labels: + {{- include "keycloak.labels" . | nindent 4 }} + app.kubernetes.io/component: test + annotations: + helm.sh/hook: test +spec: + securityContext: + {{- toYaml .Values.test.podSecurityContext | nindent 4 }} + containers: + - name: keycloak-test + image: "{{ .Values.test.image.repository }}:{{ .Values.test.image.tag }}" + imagePullPolicy: {{ .Values.test.image.pullPolicy }} + securityContext: + {{- toYaml .Values.test.securityContext | nindent 8 }} + command: + - python3 + args: + - /tests/test.py + env: + - name: KEYCLOAK_USER + valueFrom: + secretKeyRef: + name: {{ include "keycloak.fullname" . }}-admin-creds + key: user + - name: KEYCLOAK_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "keycloak.fullname" . }}-admin-creds + key: password + volumeMounts: + - name: tests + mountPath: /tests + volumes: + - name: tests + configMap: + name: {{ include "keycloak.fullname" . }}-test + restartPolicy: Never +{{- end }} diff --git a/ansible/01_old/roles/test/files/04-keycloak/values.schema.json b/ansible/01_old/roles/test/files/04-keycloak/values.schema.json new file mode 100644 index 0000000..47c2aa3 --- /dev/null +++ b/ansible/01_old/roles/test/files/04-keycloak/values.schema.json @@ -0,0 +1,434 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "required": [ + "image" + ], + "definitions": { + "image": { + "type": "object", + "required": [ + "repository", + "tag" + ], + "properties": { + "pullPolicy": { + "type": "string", + "pattern": "^(Always|Never|IfNotPresent)$" + }, + "repository": { + "type": "string" + }, + "tag": { + "type": "string" + } + } + }, + "imagePullSecrets": { + "type": "array", + "items": { + "type": "object", + "properties": { + "name": { + "type": "string" + } + } + } + } + }, + "properties": { + "affinity": { + "type": "string" + }, + "args": { + "type": "array" + }, + "clusterDomain": { + "type": "string" + }, + "command": { + "type": "array" + }, + "enableServiceLinks": { + "type": "boolean" + }, + "extraContainers": { + "type": "string" + }, + "extraEnv": { + "type": "string" + }, + "extraEnvFrom": { + "type": "string" + }, + "extraInitContainers": { + "type": "string" + }, + "extraPorts": { + "type": "array" + }, + "extraVolumeMounts": { + "type": "string" + }, + "extraVolumes": { + "type": "string" + }, + "fullnameOverride": { + "type": "string" + }, + "hostAliases": { + "type": "array" + }, + "image": { + "$ref": "#/definitions/image" + }, + "imagePullSecrets": { + "$ref": "#/definitions/imagePullSecrets" + }, + "ingress": { + "type": "object", + "properties": { + "annotations": { + "type": "object" + }, + "enabled": { + "type": "boolean" + }, + "labels": { + "type": "object" + }, + "rules": { + "type": "array", + "items": { + "type": "object", + "properties": { + "host": { + "type": "string" + }, + "paths": { + "type": "array", + "items": { + "type": "string" + } + } + } + } + }, + "servicePort": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ] + }, + "tls": { + "type": "array", + "items": { + "type": "object", + "properties": { + "hosts": { + "type": "array", + "items": { + "items": { + "type": "string" + } + }, + "secretName": { + "type": "string" + } + } + } + } + } + }, + "lifecycleHooks": { + "type": "string" + }, + "livenessProbe": { + "type": "string" + }, + "nameOverride": { + "type": "string" + }, + "nodeSelector": { + "type": "object" + }, + "pgchecker": { + "type": "object", + "properties": { + "image": { + "$ref": "#/definitions/image" + }, + "resources": { + "type": "object", + "properties": { + "limits": { + "type": "object", + "properties": { + "cpu": { + "type": "string" + }, + "memory": { + "type": "string" + } + } + }, + "requests": { + "type": "object", + "properties": { + "cpu": { + "type": "string" + }, + "memory": { + "type": "string" + } + } + } + } + }, + "securityContext": { + "type": "object" + } + } + }, + "podAnnotations": { + "type": "object" + }, + "podDisruptionBudget": { + "type": "object" + }, + "podLabels": { + "type": "object" + }, + "podManagementPolicy": { + "type": "string" + }, + "podSecurityContext": { + "type": "object" + }, + "postgresql": { + "type": "object" + }, + "priorityClassName": { + "type": "string" + }, + "prometheusRule": { + "type": "object" + }, + "serviceMonitor": { + "type": "object" + }, + "extraServiceMonitor": { + "type": "object" + }, + "readinessProbe": { + "type": "string" + }, + "replicas": { + "type": "integer" + }, + "resources": { + "type": "object" + }, + "restartPolicy": { + "type": "string" + }, + "route": { + "type": "object", + "properties": { + "annotations": { + "type": "object" + }, + "enabled": { + "type": "boolean" + }, + "host": { + "type": "string" + }, + "labels": { + "type": "object" + }, + "path": { + "type": "string" + }, + "tls": { + "type": "object" + } + } + }, + "secrets": { + "type": "object" + }, + "securityContext": { + "type": "object" + }, + "service": { + "type": "object", + "properties": { + "annotations": { + "type": "object" + }, + "extraPorts": { + "type": "array" + }, + "loadBalancerSourceRanges": { + "type": "array" + }, + "httpNodePort": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + }, + "httpPort": { + "type": "integer" + }, + "httpsNodePort": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + }, + "httpsPort": { + "type": "integer" + }, + "httpManagementNodePort": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + }, + "httpManagementPort": { + "type": "integer" + }, + "labels": { + "type": "object" + }, + "nodePort": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + }, + "type": { + "type": "string" + }, + "loadBalancerIP": { + "type": "string" + }, + "sessionAffinity": { + "type": "string" + }, + "sessionAffinityConfig": { + "type": "object" + } + } + }, + "serviceAccount": { + "type": "object", + "properties": { + "annotations": { + "type": "object" + }, + "create": { + "type": "boolean" + }, + "imagePullSecrets": { + "$ref": "#/definitions/imagePullSecrets" + }, + "labels": { + "type": "object" + }, + "name": { + "type": "string" + } + } + }, + "rbac": { + "type": "object", + "properties": { + "create": { + "type": "boolean" + }, + "rules": { + "type": "array" + } + } + }, + "startupScripts": { + "type": "object" + }, + "statefulsetAnnotations": { + "type": "object" + }, + "statefulsetLabels": { + "type": "object" + }, + "terminationGracePeriodSeconds": { + "type": "integer" + }, + "autoscaling": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + }, + "labels": { + "type": "object" + }, + "minReplicas": { + "type": "integer" + }, + "maxReplicas": { + "type": "integer" + }, + "metrics": { + "type": "array" + }, + "behavior": { + "type": "object" + } + } + }, + "test": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + }, + "image": { + "$ref": "#/definitions/image" + }, + "podSecurityContext": { + "type": "object" + }, + "securityContext": { + "type": "object" + } + } + }, + "tolerations": { + "type": "array" + } + } + } +} diff --git a/ansible/01_old/roles/test/files/04-keycloak/values.yaml b/ansible/01_old/roles/test/files/04-keycloak/values.yaml new file mode 100644 index 0000000..a95521f --- /dev/null +++ b/ansible/01_old/roles/test/files/04-keycloak/values.yaml @@ -0,0 +1,552 @@ +# Optionally override the fully qualified name +fullnameOverride: "imxc-keycloak" + +# Optionally override the name +nameOverride: "" + +# The number of replicas to create (has no effect if autoscaling enabled) +replicas: 2 + +image: + # The Keycloak image repository + #repository: cdm-dev.exem-oss.org/keycloak/keycloak + repository: 10.10.31.243:5000/cmoa3/keycloak + # Overrides the Keycloak image tag whose default is the chart version + tag: "11.0.1" + # The Keycloak image pull policy + pullPolicy: Always + +# Image pull secrets for the Pod +#imagePullSecrets: [] +# - name: myRegistrKeySecretName +imagePullSecrets: + - name: regcred + +# Mapping between IPs and hostnames that will be injected as entries in the Pod's hosts files +hostAliases: [] +# - ip: "1.2.3.4" +# hostnames: +# - "my.host.com" + +# Indicates whether information about services should be injected into Pod's environment variables, matching the syntax of Docker links +enableServiceLinks: true + +# Pod management policy. One of `Parallel` or `OrderedReady` +podManagementPolicy: Parallel + +# Pod restart policy. One of `Always`, `OnFailure`, or `Never` +restartPolicy: Always + +serviceAccount: + # Specifies whether a ServiceAccount should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" + # Additional annotations for the ServiceAccount + annotations: {} + # Additional labels for the ServiceAccount + labels: {} + # Image pull secrets that are attached to the ServiceAccount + #imagePullSecrets: [] + imagePullSecrets: + - name: regcred + +rbac: + create: true + rules: + # RBAC rules for KUBE_PING + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + +# SecurityContext for the entire Pod. Every container running in the Pod will inherit this SecurityContext. This might be relevant when other components of the environment inject additional containers into running Pods (service meshes are the most prominent example for this) +podSecurityContext: + fsGroup: 1000 + +# SecurityContext for the Keycloak container +securityContext: + runAsUser: 1000 + runAsNonRoot: true + +# Additional init containers, e. g. for providing custom themes +extraInitContainers: | + - name: theme-provider + image: 10.10.31.243:5000/cmoa3/theme-provider:latest + imagePullPolicy: IfNotPresent + command: + - sh + args: + - -c + - | + echo "Copying theme ..." + cp -R /mytheme/* /theme + volumeMounts: + - name: theme + mountPath: /theme + +#extraInitContainers: "" + +# Additional sidecar containers, e. g. for a database proxy, such as Google's cloudsql-proxy +extraContainers: "" + +# Lifecycle hooks for the Keycloak container +lifecycleHooks: | +# postStart: +# exec: +# command: +# - /bin/sh +# - -c +# - ls + +# Termination grace period in seconds for Keycloak shutdown. Clusters with a large cache might need to extend this to give Infinispan more time to rebalance +terminationGracePeriodSeconds: 60 + +# The internal Kubernetes cluster domain +clusterDomain: cluster.local + +## Overrides the default entrypoint of the Keycloak container +command: [] + +## Overrides the default args for the Keycloak container +#args: ["-Dkeycloak.profile.feature.scripts=enabled", "-Dkeycloak.profile.feature.upload_scripts=enabled", "-Dkeycloak.profile.feature.admin_fine_grained_authz=enabled"] +args: ["-Dkeycloak.profile.feature.scripts=enabled", "-Dkeycloak.profile.feature.upload_scripts=enabled"] + +# Additional environment variables for Keycloak +extraEnv: | + # HA settings + - name: PROXY_ADDRESS_FORWARDING + value: "true" + - name: JGROUPS_DISCOVERY_PROTOCOL + value: kubernetes.KUBE_PING + - name: KUBERNETES_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: CACHE_OWNERS_COUNT + value: "2" + - name: CACHE_OWNERS_AUTH_SESSIONS_COUNT + value: "2" + # postgresql settings + - name: DB_VENDOR + value: postgres + - name: DB_ADDR + value: postgres + - name: DB_PORT + value: "5432" + - name: DB_DATABASE + value: keycloak + - name: DB_USER + value: admin + - name: DB_PASSWORD + value: eorbahrhkswp +# - name: KEYCLOAK_USER +# value: keycloak +# - name: KEYCLOAK_PASSWORD +# value: keycloak +#extraEnv: "" + # - name: KEYCLOAK_LOGLEVEL + # value: DEBUG + # - name: WILDFLY_LOGLEVEL + # value: DEBUG + # - name: CACHE_OWNERS_COUNT + # value: "2" + # - name: CACHE_OWNERS_AUTH_SESSIONS_COUNT + # value: "2" +#extraEnv: | +# - name: JGROUPS_DISCOVERY_PROTOCOL +# value: dns.DNS_PING +# - name: JGROUPS_DISCOVERY_PROPERTIES +# value: 'dns_query={{ include "keycloak.serviceDnsName" . }}' +# - name: CACHE_OWNERS_COUNT +# value: "2" +# - name: CACHE_OWNERS_AUTH_SESSIONS_COUNT +# value: "2" +# Additional environment variables for Keycloak mapped from Secret or ConfigMap +extraEnvFrom: "" + +# Pod priority class name +#priorityClassName: "manual" + +# Pod affinity +affinity: | + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + {{- include "keycloak.selectorLabels" . | nindent 10 }} + matchExpressions: + - key: app.kubernetes.io/component + operator: NotIn + values: + - test + topologyKey: kubernetes.io/hostname + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchLabels: + {{- include "keycloak.selectorLabels" . | nindent 12 }} + matchExpressions: + - key: app.kubernetes.io/component + operator: NotIn + values: + - test + topologyKey: failure-domain.beta.kubernetes.io/zone + +#affinity: {} + +# Node labels for Pod assignment +nodeSelector: {} + +# Node taints to tolerate +tolerations: [] + +# Additional Pod labels +podLabels: {} + +# Additional Pod annotations +podAnnotations: {} + +# Liveness probe configuration +livenessProbe: | + httpGet: + path: /auth/ + port: http + initialDelaySeconds: 300 + timeoutSeconds: 5 + +# Readiness probe configuration +readinessProbe: | + httpGet: + path: /auth/realms/master + port: http + initialDelaySeconds: 30 + timeoutSeconds: 1 + +# Pod resource requests and limits +#resources: {} + # requests: + # cpu: "500m" + # memory: "1024Mi" + # limits: + # cpu: "500m" + # memory: "1024Mi" +resources: + requests: + memory: "200Mi" + cpu: "10m" + +# Startup scripts to run before Keycloak starts up +startupScripts: + # WildFly CLI script for configuring the node-identifier + keycloak.cli: | + {{- .Files.Get "scripts/keycloak.cli" }} + # mystartup.sh: | + # #!/bin/sh + # + # echo 'Hello from my custom startup script!' + +# Add additional volumes, e. g. for custom themes +extraVolumes: | + - name: theme + emptyDir: {} +#extraVolumes: "" + +# Add additional volumes mounts, e. g. for custom themes +extraVolumeMounts: | + - name: theme + mountPath: /opt/jboss/keycloak/themes +#extraVolumeMounts: "" + +# Add additional ports, e. g. for admin console or exposing JGroups ports +extraPorts: [] + +# Pod disruption budget +podDisruptionBudget: {} +# maxUnavailable: 1 +# minAvailable: 1 + +# Annotations for the StatefulSet +statefulsetAnnotations: {} + +# Additional labels for the StatefulSet +statefulsetLabels: {} + +# Configuration for secrets that should be created +secrets: {} + # mysecret: + # type: {} + # annotations: {} + # labels: {} + # stringData: {} + # data: {} + +service: + # Annotations for headless and HTTP Services + annotations: {} + # Additional labels for headless and HTTP Services + labels: {} + # key: value + # The Service type + type: NodePort + # Optional IP for the load balancer. Used for services of type LoadBalancer only + loadBalancerIP: "" + # The http Service port + httpPort: 80 + # The HTTP Service node port if type is NodePort + httpNodePort: 31082 + # The HTTPS Service port + httpsPort: 8443 + # The HTTPS Service node port if type is NodePort + httpsNodePort: null + # The WildFly management Service port + httpManagementPort: 9990 + # The WildFly management Service node port if type is NodePort + httpManagementNodePort: 31990 + # Additional Service ports, e. g. for custom admin console + extraPorts: [] + # When using Service type LoadBalancer, you can restrict source ranges allowed + # to connect to the LoadBalancer, e. g. will result in Security Groups + # (or equivalent) with inbound source ranges allowed to connect + loadBalancerSourceRanges: [] + # Session affinity + # See https://kubernetes.io/docs/concepts/services-networking/service/#proxy-mode-userspace + sessionAffinity: "" + # Session affinity config + sessionAffinityConfig: {} + +ingress: + # If `true`, an Ingress is created + enabled: false + # The Service port targeted by the Ingress + servicePort: http + # Ingress annotations + annotations: {} + ## Resolve HTTP 502 error using ingress-nginx: + ## See https://www.ibm.com/support/pages/502-error-ingress-keycloak-response + # nginx.ingress.kubernetes.io/proxy-buffer-size: 128k + + # Additional Ingress labels + labels: {} + # List of rules for the Ingress + rules: + - + # Ingress host + host: '{{ .Release.Name }}.keycloak.example.com' + # Paths for the host + paths: + - / + # TLS configuration + tls: + - hosts: + - keycloak.example.com + secretName: "" + + # ingress for console only (/auth/admin) + console: + # If `true`, an Ingress is created for console path only + enabled: false + # Ingress annotations for console ingress only + # Useful to set nginx.ingress.kubernetes.io/whitelist-source-range particularly + annotations: {} + rules: + - + # Ingress host + host: '{{ .Release.Name }}.keycloak.example.com' + # Paths for the host + paths: + - /auth/admin/ + +## Network policy configuration +networkPolicy: + # If true, the Network policies are deployed + enabled: false + + # Additional Network policy labels + labels: {} + + # Define all other external allowed source + # See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#networkpolicypeer-v1-networking-k8s-io + extraFrom: [] + +route: + # If `true`, an OpenShift Route is created + enabled: false + # Path for the Route + path: / + # Route annotations + annotations: {} + # Additional Route labels + labels: {} + # Host name for the Route + host: "" + # TLS configuration + tls: + # If `true`, TLS is enabled for the Route + enabled: false + # Insecure edge termination policy of the Route. Can be `None`, `Redirect`, or `Allow` + insecureEdgeTerminationPolicy: Redirect + # TLS termination of the route. Can be `edge`, `passthrough`, or `reencrypt` + termination: edge + +pgchecker: + image: + # Docker image used to check Postgresql readiness at startup + #repository: cdm-dev.exem-oss.org/keycloak/busybox + #repository: {{ .Values.global.IMXC_REGISTRY }}/keycloak/busybox + repository: 10.10.31.243:5000/cmoa3/busybox + # Image tag for the pgchecker image + tag: 1.32 + # Image pull policy for the pgchecker image + pullPolicy: Always + # SecurityContext for the pgchecker contai/docker.ner + securityContext: + allowPrivilegeEscalation: false + runAsUser: 1000 + runAsGroup: 1000 + runAsNonRoot: true + # Resource requests and limits for the pgchecker container + resources: + requests: + cpu: "10m" + memory: "16Mi" + limits: + cpu: "10m" + memory: "16Mi" + +postgresql: + # If `true`, the Postgresql dependency is enabled + enabled: false + # PostgreSQL User to create + postgresqlUsername: keycloak + # PostgreSQL Password for the new user + postgresqlPassword: keycloak + # PostgreSQL Database to create + postgresqlDatabase: keycloak + # PostgreSQL network policy configuration + networkPolicy: + enabled: false + +serviceMonitor: + # If `true`, a ServiceMonitor resource for the prometheus-operator is created + enabled: false + # Optionally sets a target namespace in which to deploy the ServiceMonitor resource + namespace: "" + # Optionally sets a namespace for the ServiceMonitor + namespaceSelector: {} + # Annotations for the ServiceMonitor + annotations: {} + # Additional labels for the ServiceMonitor + labels: {} + # Interval at which Prometheus scrapes metrics + interval: 10s + # Timeout for scraping + scrapeTimeout: 10s + # The path at which metrics are served + path: /metrics + # The Service port at which metrics are served + port: http-management + +extraServiceMonitor: + # If `true`, a ServiceMonitor resource for the prometheus-operator is created + enabled: false + # Optionally sets a target namespace in which to deploy the ServiceMonitor resource + namespace: "" + # Optionally sets a namespace for the ServiceMonitor + namespaceSelector: {} + # Annotations for the ServiceMonitor + annotations: {} + # Additional labels for the ServiceMonitor + labels: {} + # Interval at which Prometheus scrapes metrics + interval: 10s + # Timeout for scraping + scrapeTimeout: 10s + # The path at which metrics are served + path: /auth/realms/master/metrics + # The Service port at which metrics are served + port: http + +prometheusRule: + # If `true`, a PrometheusRule resource for the prometheus-operator is created + enabled: false + # Annotations for the PrometheusRule + annotations: {} + # Additional labels for the PrometheusRule + labels: {} + # List of rules for Prometheus + rules: [] + # - alert: keycloak-IngressHigh5xxRate + # annotations: + # message: The percentage of 5xx errors for keycloak over the last 5 minutes is over 1%. + # expr: | + # ( + # sum( + # rate( + # nginx_ingress_controller_response_duration_seconds_count{exported_namespace="mynamespace",ingress="mynamespace-keycloak",status=~"5[0-9]{2}"}[1m] + # ) + # ) + # / + # sum( + # rate( + # nginx_ingress_controller_response_duration_seconds_count{exported_namespace="mynamespace",ingress="mynamespace-keycloak"}[1m] + # ) + # ) + # ) * 100 > 1 + # for: 5m + # labels: + # severity: warning + +autoscaling: + # If `true`, a autoscaling/v2beta2 HorizontalPodAutoscaler resource is created (requires Kubernetes 1.18 or above) + # Autoscaling seems to be most reliable when using KUBE_PING service discovery (see README for details) + # This disables the `replicas` field in the StatefulSet + enabled: false + # Additional HorizontalPodAutoscaler labels + labels: {} + # The minimum and maximum number of replicas for the Keycloak StatefulSet + minReplicas: 3 + maxReplicas: 10 + # The metrics to use for scaling + metrics: + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: 80 + # The scaling policy to use. This will scale up quickly but only scale down a single Pod per 5 minutes. + # This is important because caches are usually only replicated to 2 Pods and if one of those Pods is terminated this will give the cluster time to recover. + behavior: + scaleDown: + stabilizationWindowSeconds: 300 + policies: + - type: Pods + value: 1 + periodSeconds: 300 + +test: + # If `true`, test resources are created + enabled: false + image: + # The image for the test Pod + #repository: docker.io/unguiculus/docker-python3-phantomjs-selenium + repository: 10.10.31.243:5000/docker-python3-phantomjs-selenium + # The tag for the test Pod image + tag: v1 + # The image pull policy for the test Pod image + pullPolicy: IfNotPresent + # SecurityContext for the entire test Pod + podSecurityContext: + fsGroup: 1000 + # SecurityContext for the test container + securityContext: + runAsUser: 1000 + runAsNonRoot: true + diff --git a/ansible/01_old/roles/test/files/05-imxc/Chart.yaml b/ansible/01_old/roles/test/files/05-imxc/Chart.yaml new file mode 100644 index 0000000..e2f559f --- /dev/null +++ b/ansible/01_old/roles/test/files/05-imxc/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for Kubernetes +name: imxc +version: 0.1.0 diff --git a/ansible/01_old/roles/test/files/05-imxc/cmoa-manual.yaml b/ansible/01_old/roles/test/files/05-imxc/cmoa-manual.yaml new file mode 100644 index 0000000..e94fc14 --- /dev/null +++ b/ansible/01_old/roles/test/files/05-imxc/cmoa-manual.yaml @@ -0,0 +1,36 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: manual + namespace: imxc +spec: + selector: + matchLabels: + app: manual + replicas: 1 + template: + metadata: + labels: + app: manual + spec: + containers: + - name: manual + image: {{ .Values.global.IMXC_IN_REGISTRY }}/manual:{{ .Values.global.CMOA_MANUAL_VERSION }} + imagePullPolicy: IfNotPresent + +--- +apiVersion: v1 +kind: Service +metadata: + name: manual + namespace: imxc +spec: + type: NodePort + selector: + app: manual + ports: + - protocol: TCP + port: 8088 + targetPort: 3000 + nodePort: {{ .Values.global.CMOA_MANUAL_PORT }} + diff --git a/ansible/01_old/roles/test/files/05-imxc/scripts/init-api-server.sh b/ansible/01_old/roles/test/files/05-imxc/scripts/init-api-server.sh new file mode 100644 index 0000000..78a9962 --- /dev/null +++ b/ansible/01_old/roles/test/files/05-imxc/scripts/init-api-server.sh @@ -0,0 +1,17 @@ +#! /bin/sh + +STATUS_CODE="$(curl -s -o /dev/null -w '%{http_code}' http://imxc-keycloak-http/auth/realms/exem)" + +if [ $STATUS_CODE -eq 200 ]; then + JWT_KEY="$(curl -s -XGET http://imxc-keycloak-http/auth/realms/exem | jq -r '.public_key')" + export JWT_KEY + + chmod -R 777 /home/cloudmoa/notification/cloudmoa_alert.log + + /sbin/tini -- java -Djava.security.egd=file:/dev/./urandom -jar /app.jar + #java -Djava.security.egd=file:/dev/./urandom -jar /app.jar +elif [ $STATUS_CODE -eq 404 ]; then + echo "not found exem relam. check realm in imxc-keycloak" +else + echo "not found keycloak. check to install keycloak" +fi diff --git a/ansible/01_old/roles/test/files/05-imxc/scripts/init-auth-server.sh b/ansible/01_old/roles/test/files/05-imxc/scripts/init-auth-server.sh new file mode 100644 index 0000000..279b8a5 --- /dev/null +++ b/ansible/01_old/roles/test/files/05-imxc/scripts/init-auth-server.sh @@ -0,0 +1,36 @@ +#! /bin/bash + +# 200 -> 서버 및 realm이 있는 경우 +# 404 -> 서버는 있으나 realm이 없는 경우 +# 000 -> 서버가 없음 +STATUS_CODE="$(curl -s -o /dev/null -w '%{http_code}' http://imxc-keycloak-http/auth/realms/exem)" + +if [ $STATUS_CODE -eq 404 ]; then + TOKEN="$(curl -s -d "client_id=admin-cli" -d "username=admin" -d "password=admin" -d "grant_type=password" http://imxc-keycloak-http/auth/realms/master/protocol/openid-connect/token | jq -r '.access_token')" + + echo $TOKEN + + echo "create realm and client" + # create realm and client + curl -s -v POST -H "Authorization: Bearer $TOKEN" -H "Content-Type: application/json" -d "@/tmp/init.json" http://imxc-keycloak-http/auth/admin/realms + + + echo "create admin and owner" + # create admin and owner + curl -s -v POST -H "Authorization: Bearer $TOKEN" -H "Content-Type: application/json" -d '{"firstName":"","lastName":"", "username":"admin","email":"admin@example.com", "enabled":"true","credentials":[{"type":"password","value":"admin","temporary":false}]}' http://imxc-keycloak-http/auth/admin/realms/exem/users + curl -s -v POST -H "Authorization: Bearer $TOKEN" -H "Content-Type: application/json" -d '{"firstName":"","lastName":"", "username":"owner","email":"owner@example.com", "enabled":"true","credentials":[{"type":"password","value":"admin","temporary":false}]}' http://imxc-keycloak-http/auth/admin/realms/exem/users + + JWT_KEY="$(curl -s -XGET http://imxc-keycloak-http/auth/realms/exem | jq -r '.public_key')" + export JWT_KEY + + java -Djava.security.egd=file:/dev/./urandom -jar /app.jar +elif [ $STATUS_CODE -eq 200 ]; then + echo "exist exem relam" + + JWT_KEY="$(curl -s -XGET http://imxc-keycloak-http/auth/realms/exem | jq -r '.public_key')" + export JWT_KEY + + java -Djava.security.egd=file:/dev/./urandom -jar /app.jar +else + echo "not found keycloak. check to install keycloak" +fi diff --git a/ansible/01_old/roles/test/files/05-imxc/scripts/init-noti-server.sh b/ansible/01_old/roles/test/files/05-imxc/scripts/init-noti-server.sh new file mode 100644 index 0000000..af73aed --- /dev/null +++ b/ansible/01_old/roles/test/files/05-imxc/scripts/init-noti-server.sh @@ -0,0 +1,14 @@ +#! /bin/sh + +STATUS_CODE="$(curl -s -o /dev/null -w '%{http_code}' http://imxc-keycloak-http/auth/realms/exem)" + +if [ $STATUS_CODE -eq 200 ]; then + JWT_KEY="$(curl -s -XGET http://imxc-keycloak-http/auth/realms/exem | jq -r '.public_key')" + export JWT_KEY + + java -Djava.security.egd=file:/dev/./urandom -jar /app.jar +elif [ $STATUS_CODE -eq 404 ]; then + echo "not found exem relam. check realm in imxc-keycloak" +else + echo "not found keycloak. check to install keycloak" +fi \ No newline at end of file diff --git a/ansible/01_old/roles/test/files/05-imxc/scripts/init-resource.sh b/ansible/01_old/roles/test/files/05-imxc/scripts/init-resource.sh new file mode 100644 index 0000000..58db392 --- /dev/null +++ b/ansible/01_old/roles/test/files/05-imxc/scripts/init-resource.sh @@ -0,0 +1,6 @@ +#!/bin/sh + +chmod -R 777 /scripts + +sed -i "s/localhost/$REDIRECT_URLS/g" /scripts/init.json +cp /scripts/init.json /tmp/init.json \ No newline at end of file diff --git a/ansible/01_old/roles/test/files/05-imxc/scripts/init.json b/ansible/01_old/roles/test/files/05-imxc/scripts/init.json new file mode 100644 index 0000000..dcd68b4 --- /dev/null +++ b/ansible/01_old/roles/test/files/05-imxc/scripts/init.json @@ -0,0 +1,2148 @@ +{ + "id": "exem", + "realm": "exem", + "notBefore": 0, + "revokeRefreshToken": false, + "refreshTokenMaxReuse": 0, + "accessTokenLifespan": 300, + "accessTokenLifespanForImplicitFlow": 900, + "ssoSessionIdleTimeout": 1800, + "ssoSessionMaxLifespan": 36000, + "ssoSessionIdleTimeoutRememberMe": 0, + "ssoSessionMaxLifespanRememberMe": 0, + "offlineSessionIdleTimeout": 2592000, + "offlineSessionMaxLifespanEnabled": false, + "offlineSessionMaxLifespan": 5184000, + "clientSessionIdleTimeout": 0, + "clientSessionMaxLifespan": 0, + "clientOfflineSessionIdleTimeout": 0, + "clientOfflineSessionMaxLifespan": 0, + "accessCodeLifespan": 60, + "accessCodeLifespanUserAction": 300, + "accessCodeLifespanLogin": 1800, + "actionTokenGeneratedByAdminLifespan": 43200, + "actionTokenGeneratedByUserLifespan": 300, + "enabled": true, + "sslRequired": "none", + "registrationAllowed": false, + "registrationEmailAsUsername": false, + "rememberMe": false, + "verifyEmail": false, + "loginWithEmailAllowed": true, + "duplicateEmailsAllowed": false, + "resetPasswordAllowed": false, + "editUsernameAllowed": false, + "bruteForceProtected": false, + "permanentLockout": false, + "maxFailureWaitSeconds": 900, + "minimumQuickLoginWaitSeconds": 60, + "waitIncrementSeconds": 60, + "quickLoginCheckMilliSeconds": 1000, + "maxDeltaTimeSeconds": 43200, + "failureFactor": 30, + "roles": { + "realm": [ + { + "id": "b361dcb8-4ec4-484e-a432-8d40a8ca5ac8", + "name": "offline_access", + "description": "${role_offline-access}", + "composite": false, + "clientRole": false, + "containerId": "exem", + "attributes": {} + }, + { + "id": "621155f2-6c01-4e4a-bf11-47111503d696", + "name": "uma_authorization", + "description": "${role_uma_authorization}", + "composite": false, + "clientRole": false, + "containerId": "exem", + "attributes": {} + }, + { + "id": "4aadd73a-e863-466a-932b-5bc81553fbf1", + "name": "access", + "composite": false, + "clientRole": false, + "containerId": "exem", + "attributes": {} + } + ], + "client": { + "realm-management": [ + { + "id": "e3eca547-c372-406a-abe7-30f554e13e63", + "name": "manage-realm", + "description": "${role_manage-realm}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "eb1faff2-4cca-458c-b9da-96c1f6f5f647", + "name": "impersonation", + "description": "${role_impersonation}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "eb0f6ebb-8993-47f8-8979-2152ed92bf62", + "name": "create-client", + "description": "${role_create-client}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "29f0b39d-9cc9-4b40-ad81-00041897ae0c", + "name": "view-clients", + "description": "${role_view-clients}", + "composite": true, + "composites": { + "client": { + "realm-management": [ + "query-clients" + ] + } + }, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "b6307563-9b35-4093-b0c4-a27df7cb82bd", + "name": "query-groups", + "description": "${role_query-groups}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "30091a91-f676-4e39-8ae2-ebfcee36c32a", + "name": "query-clients", + "description": "${role_query-clients}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "b40ca071-2318-4f69-9664-f0dfe471d03b", + "name": "view-realm", + "description": "${role_view-realm}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "efd25ec7-e61f-4659-a772-907791aed58e", + "name": "view-authorization", + "description": "${role_view-authorization}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "4ad18bd0-f9a9-4fc7-8864-99afa71f95e4", + "name": "manage-users", + "description": "${role_manage-users}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "a92c781f-7c6a-48d8-aa88-0b3aefb3c10c", + "name": "manage-events", + "description": "${role_manage-events}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "424933c1-3c03-49cd-955c-34aeeb0a3108", + "name": "manage-authorization", + "description": "${role_manage-authorization}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "5476db80-dbfa-408b-a934-5e8decc0af56", + "name": "manage-clients", + "description": "${role_manage-clients}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "acf53868-d09b-4865-92da-3b906307b979", + "name": "realm-admin", + "description": "${role_realm-admin}", + "composite": true, + "composites": { + "client": { + "realm-management": [ + "manage-realm", + "impersonation", + "create-client", + "view-clients", + "query-groups", + "query-clients", + "view-realm", + "view-authorization", + "manage-users", + "manage-events", + "manage-authorization", + "manage-clients", + "query-users", + "query-realms", + "manage-identity-providers", + "view-users", + "view-events", + "view-identity-providers" + ] + } + }, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "f2ad5f83-ffde-4cf4-acc4-21f7bcec4c38", + "name": "query-users", + "description": "${role_query-users}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "96a017bf-5211-4c20-a1b2-7493bc45a3ad", + "name": "query-realms", + "description": "${role_query-realms}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "d8051d4d-f26c-4a6d-bcdd-b3d8111d9d29", + "name": "manage-identity-providers", + "description": "${role_manage-identity-providers}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "8c929b20-abc3-4b78-88f2-ed3348426667", + "name": "view-users", + "description": "${role_view-users}", + "composite": true, + "composites": { + "client": { + "realm-management": [ + "query-groups", + "query-users" + ] + } + }, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "a337a8f7-8725-4ff7-85fc-ecc4b5ce1433", + "name": "view-events", + "description": "${role_view-events}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "649350cf-925c-4502-84b4-ec8415f956d3", + "name": "view-identity-providers", + "description": "${role_view-identity-providers}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + } + ], + "authorization_server": [ + { + "id": "2346ca49-eb3e-4f2e-b0ec-4def9ea9655c", + "name": "access", + "composite": false, + "clientRole": true, + "containerId": "b9bbda1f-a756-4b72-9cd8-06a6dfd6d5bf", + "attributes": {} + } + ], + "security-admin-console": [], + "admin-cli": [], + "account-console": [], + "broker": [ + { + "id": "133ff901-3a8f-48df-893b-4c7e9047e829", + "name": "read-token", + "description": "${role_read-token}", + "composite": false, + "clientRole": true, + "containerId": "fdc71d6d-db86-414f-bd80-ed1f5e9a6975", + "attributes": {} + } + ], + "account": [ + { + "id": "89c5f56f-5845-400b-ac9f-942c46d082e0", + "name": "manage-account-links", + "description": "${role_manage-account-links}", + "composite": false, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + }, + { + "id": "2cba7fed-0a80-4dbd-bd2d-abfa2c6a985e", + "name": "view-profile", + "description": "${role_view-profile}", + "composite": false, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + }, + { + "id": "f446a93d-143f-4071-9bdc-08aa2fdce6d2", + "name": "view-consent", + "description": "${role_view-consent}", + "composite": false, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + }, + { + "id": "ef3364db-e008-4aec-9e74-04bac25cbe40", + "name": "manage-consent", + "description": "${role_manage-consent}", + "composite": true, + "composites": { + "client": { + "account": [ + "view-consent" + ] + } + }, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + }, + { + "id": "96afbe32-3ac2-4345-bc17-06cf0e8de0b4", + "name": "view-applications", + "description": "${role_view-applications}", + "composite": false, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + }, + { + "id": "cf6861ca-4804-40d4-9016-c48e7ebf1c72", + "name": "manage-account", + "description": "${role_manage-account}", + "composite": true, + "composites": { + "client": { + "account": [ + "manage-account-links" + ] + } + }, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + } + ] + } + }, + "groups": [ + { + "id": "8d3f7332-7f72-47e2-9cb3-38331f0c29b5", + "name": "DEFAULT_TENANT", + "path": "/DEFAULT_TENANT", + "attributes": {}, + "realmRoles": [], + "clientRoles": {}, + "subGroups": [] + } + ], + "defaultRoles": [ + "offline_access", + "uma_authorization" + ], + "requiredCredentials": [ + "password" + ], + "otpPolicyType": "totp", + "otpPolicyAlgorithm": "HmacSHA1", + "otpPolicyInitialCounter": 0, + "otpPolicyDigits": 6, + "otpPolicyLookAheadWindow": 1, + "otpPolicyPeriod": 30, + "otpSupportedApplications": [ + "FreeOTP", + "Google Authenticator" + ], + "webAuthnPolicyRpEntityName": "keycloak", + "webAuthnPolicySignatureAlgorithms": [ + "ES256" + ], + "webAuthnPolicyRpId": "", + "webAuthnPolicyAttestationConveyancePreference": "not specified", + "webAuthnPolicyAuthenticatorAttachment": "not specified", + "webAuthnPolicyRequireResidentKey": "not specified", + "webAuthnPolicyUserVerificationRequirement": "not specified", + "webAuthnPolicyCreateTimeout": 0, + "webAuthnPolicyAvoidSameAuthenticatorRegister": false, + "webAuthnPolicyAcceptableAaguids": [], + "webAuthnPolicyPasswordlessRpEntityName": "keycloak", + "webAuthnPolicyPasswordlessSignatureAlgorithms": [ + "ES256" + ], + "webAuthnPolicyPasswordlessRpId": "", + "webAuthnPolicyPasswordlessAttestationConveyancePreference": "not specified", + "webAuthnPolicyPasswordlessAuthenticatorAttachment": "not specified", + "webAuthnPolicyPasswordlessRequireResidentKey": "not specified", + "webAuthnPolicyPasswordlessUserVerificationRequirement": "not specified", + "webAuthnPolicyPasswordlessCreateTimeout": 0, + "webAuthnPolicyPasswordlessAvoidSameAuthenticatorRegister": false, + "webAuthnPolicyPasswordlessAcceptableAaguids": [], + "scopeMappings": [ + { + "clientScope": "offline_access", + "roles": [ + "offline_access" + ] + } + ], + "clientScopeMappings": { + "account": [ + { + "client": "account-console", + "roles": [ + "manage-account" + ] + } + ] + }, + "clients": [ + { + "id": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "clientId": "account", + "name": "${client_account}", + "rootUrl": "${authBaseUrl}", + "baseUrl": "/realms/exem/account/", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "defaultRoles": [ + "view-profile", + "manage-account" + ], + "redirectUris": [ + "/realms/exem/account/*" + ], + "webOrigins": [], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": false, + "serviceAccountsEnabled": false, + "publicClient": false, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": {}, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "1e3d0c5d-c456-4c5f-93cf-58236273186a", + "clientId": "account-console", + "name": "${client_account-console}", + "rootUrl": "${authBaseUrl}", + "baseUrl": "/realms/exem/account/", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [ + "/realms/exem/account/*" + ], + "webOrigins": [], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": false, + "serviceAccountsEnabled": false, + "publicClient": true, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": { + "pkce.code.challenge.method": "S256" + }, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "protocolMappers": [ + { + "id": "cceae7c8-fa8d-48eb-a0a6-6013a2cc771e", + "name": "audience resolve", + "protocol": "openid-connect", + "protocolMapper": "oidc-audience-resolve-mapper", + "consentRequired": false, + "config": {} + } + ], + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "d4d3e5a5-584c-4aff-a79f-ac3c31ace5a1", + "clientId": "admin-cli", + "name": "${client_admin-cli}", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [], + "webOrigins": [], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": false, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": true, + "serviceAccountsEnabled": false, + "publicClient": true, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": {}, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "b9bbda1f-a756-4b72-9cd8-06a6dfd6d5bf", + "clientId": "authorization_server", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [ + "localhost" + ], + "webOrigins": [ + "*" + ], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": true, + "serviceAccountsEnabled": false, + "publicClient": true, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": { + "saml.assertion.signature": "false", + "saml.force.post.binding": "false", + "saml.multivalued.roles": "false", + "saml.encrypt": "false", + "saml.server.signature": "false", + "saml.server.signature.keyinfo.ext": "false", + "exclude.session.state.from.auth.response": "false", + "saml_force_name_id_format": "false", + "saml.client.signature": "false", + "tls.client.certificate.bound.access.tokens": "false", + "saml.authnstatement": "false", + "display.on.consent.screen": "false", + "saml.onetimeuse.condition": "false" + }, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": true, + "nodeReRegistrationTimeout": -1, + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "fdc71d6d-db86-414f-bd80-ed1f5e9a6975", + "clientId": "broker", + "name": "${client_broker}", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [], + "webOrigins": [], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": false, + "serviceAccountsEnabled": false, + "publicClient": false, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": {}, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "clientId": "realm-management", + "name": "${client_realm-management}", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [], + "webOrigins": [], + "notBefore": 0, + "bearerOnly": true, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": false, + "serviceAccountsEnabled": false, + "publicClient": false, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": {}, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "bb6c56f1-126e-4356-9579-d95992a8d150", + "clientId": "security-admin-console", + "name": "${client_security-admin-console}", + "rootUrl": "${authAdminUrl}", + "baseUrl": "/admin/exem/console/", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [ + "/admin/exem/console/*" + ], + "webOrigins": [ + "+" + ], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": false, + "serviceAccountsEnabled": false, + "publicClient": true, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": { + "pkce.code.challenge.method": "S256" + }, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "protocolMappers": [ + { + "id": "3cf06cab-00dd-486b-8e72-1a453a7031ca", + "name": "locale", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "locale", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "locale", + "jsonType.label": "String" + } + } + ], + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + } + ], + "clientScopes": [ + { + "id": "6a21eaaa-69c9-4519-8732-2155865a1891", + "name": "custom_jwt", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "true" + }, + "protocolMappers": [ + { + "id": "fd7557f5-3174-4c65-8cd1-0e9f015a906f", + "name": "customizingJWT", + "protocol": "openid-connect", + "protocolMapper": "oidc-script-based-protocol-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "multivalued": "true", + "id.token.claim": "false", + "access.token.claim": "true", + "jsonType.label": "String", + "script": "/**\r\n * Available variables: \r\n * user - the current user\r\n * realm - the current realm\r\n * token - the current token\r\n * userSession - the current userSession\r\n * keycloakSession - the current keycloakSession\r\n */\r\n\r\n//insert your code here...\r\n\r\n// you can set standard fields in token - test code\r\n// token.setAcr(\"test value\");\r\n\r\n// you can set claims in the token - test code\r\n// token.getOtherClaims().put(\"claimName\", \"claim value\");\r\n\r\n// work with variables and return multivalued token value\r\nvar ArrayList = Java.type(\"java.util.ArrayList\");\r\nvar HashMap = Java.type(\"java.util.HashMap\");\r\nvar tenantInfoMap = new HashMap();\r\nvar tenantIpMap = new HashMap();\r\n\r\nvar forEach = Array.prototype.forEach;\r\n\r\nvar client = keycloakSession.getContext().getClient();\r\nvar groups = user.getGroups();\r\nvar clientRole = client.getRole(\"access\");\r\n\r\nforEach.call(groups.toArray(), function(group) {\r\n if(group.hasRole(clientRole)) {\r\n tenantIpMap.put(group.getName(), clientRole.getAttribute(\"ip\"));\r\n tenantInfoMap.put(group.getName(), group.getAttributes());\r\n }\r\n});\r\n\r\ntoken.setOtherClaims(\"tenantInfo\", tenantInfoMap);\r\n" + } + }, + { + "id": "2cb34189-9f06-4b9f-b066-c28e7930f0a5", + "name": "custom_phone", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "false", + "user.attribute": "phone", + "id.token.claim": "false", + "access.token.claim": "true", + "claim.name": "attributes.phone", + "jsonType.label": "String" + } + }, + { + "id": "6bcb0aa9-8713-4e4b-b997-2e08d2dda0f4", + "name": "group_attr", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "groups", + "id.token.claim": "false", + "access.token.claim": "true", + "claim.name": "groups.attributes", + "jsonType.label": "String" + } + }, + { + "id": "03deb40b-4f83-436e-9eab-f479eed62460", + "name": "custom_name", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "false", + "user.attribute": "name", + "id.token.claim": "false", + "access.token.claim": "true", + "claim.name": "attributes.name", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "9fed7d81-3f42-41b0-b661-7875abb90b2b", + "name": "microprofile-jwt", + "description": "Microprofile - JWT built-in scope", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "false" + }, + "protocolMappers": [ + { + "id": "d030d675-2c31-401a-a461-534211b3d2ec", + "name": "upn", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "username", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "upn", + "jsonType.label": "String" + } + }, + { + "id": "ca2026a0-84de-4b8d-bf0c-35f3d088b115", + "name": "groups", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-realm-role-mapper", + "consentRequired": false, + "config": { + "multivalued": "true", + "user.attribute": "foo", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "groups", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "cf3e7fce-e9e8-40dc-bd0d-5cf7bac861c0", + "name": "web-origins", + "description": "OpenID Connect scope for add allowed web origins to the access token", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "false", + "display.on.consent.screen": "false", + "consent.screen.text": "" + }, + "protocolMappers": [ + { + "id": "6b909bad-30d8-4095-a80b-d71589e8a0b4", + "name": "allowed web origins", + "protocol": "openid-connect", + "protocolMapper": "oidc-allowed-origins-mapper", + "consentRequired": false, + "config": {} + } + ] + }, + { + "id": "73231863-d614-4725-9707-f5704c70893a", + "name": "roles", + "description": "OpenID Connect scope for add user roles to the access token", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "false", + "display.on.consent.screen": "true", + "consent.screen.text": "${rolesScopeConsentText}" + }, + "protocolMappers": [ + { + "id": "fad2c0b3-d6d6-46c9-b8a5-70cf2f3cd69e", + "name": "realm roles", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-realm-role-mapper", + "consentRequired": false, + "config": { + "multivalued": "true", + "user.attribute": "foo", + "access.token.claim": "true", + "claim.name": "realm_access.roles", + "jsonType.label": "String" + } + }, + { + "id": "1fa51f0e-8fa8-4807-a381-c9756ce1d2ff", + "name": "audience resolve", + "protocol": "openid-connect", + "protocolMapper": "oidc-audience-resolve-mapper", + "consentRequired": false, + "config": {} + }, + { + "id": "8be191ba-c7b8-45f1-a37f-2830595d4b54", + "name": "client roles", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-client-role-mapper", + "consentRequired": false, + "config": { + "multivalued": "true", + "user.attribute": "foo", + "access.token.claim": "true", + "claim.name": "resource_access.${client_id}.roles", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "93a4b53a-a281-4203-a070-0ad31e719b29", + "name": "phone", + "description": "OpenID Connect built-in scope: phone", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "true", + "consent.screen.text": "${phoneScopeConsentText}" + }, + "protocolMappers": [ + { + "id": "c716d4df-ad16-4a47-aa05-ded2a69313a3", + "name": "phone number verified", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "phoneNumberVerified", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "phone_number_verified", + "jsonType.label": "boolean" + } + }, + { + "id": "db0fcb5b-bad6-42b7-8ab0-b90225100b8a", + "name": "phone number", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "phoneNumber", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "phone_number", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "f1723d4c-6d93-40be-b5b8-5ca7083e55c7", + "name": "address", + "description": "OpenID Connect built-in scope: address", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "true", + "consent.screen.text": "${addressScopeConsentText}" + }, + "protocolMappers": [ + { + "id": "9e95dff0-dc01-4efe-a414-21c83d94491c", + "name": "address", + "protocol": "openid-connect", + "protocolMapper": "oidc-address-mapper", + "consentRequired": false, + "config": { + "user.attribute.formatted": "formatted", + "user.attribute.country": "country", + "user.attribute.postal_code": "postal_code", + "userinfo.token.claim": "true", + "user.attribute.street": "street", + "id.token.claim": "true", + "user.attribute.region": "region", + "access.token.claim": "true", + "user.attribute.locality": "locality" + } + } + ] + }, + { + "id": "16524b43-6bfc-4e05-868c-682e7e1e611c", + "name": "email", + "description": "OpenID Connect built-in scope: email", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "true", + "consent.screen.text": "${emailScopeConsentText}" + }, + "protocolMappers": [ + { + "id": "4444c30e-5da5-46e6-a201-64c28ab26e10", + "name": "email verified", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "emailVerified", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "email_verified", + "jsonType.label": "boolean" + } + }, + { + "id": "0faa8ba7-6d4d-4ed4-ab89-334e1d18b503", + "name": "email", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "email", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "email", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "4ccced80-99d8-4081-8d1d-37ed6d5aaf34", + "name": "profile", + "description": "OpenID Connect built-in scope: profile", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "true", + "consent.screen.text": "${profileScopeConsentText}" + }, + "protocolMappers": [ + { + "id": "02aea132-f5e1-483c-968a-5fbb9cdfb82d", + "name": "updated at", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "updatedAt", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "updated_at", + "jsonType.label": "String" + } + }, + { + "id": "eb5d10fc-d4a8-473a-ac3e-35f3fb0f41bb", + "name": "family name", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "lastName", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "family_name", + "jsonType.label": "String" + } + }, + { + "id": "2467b8e5-f340-45a2-abff-c658eccf3ed3", + "name": "zoneinfo", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "zoneinfo", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "zoneinfo", + "jsonType.label": "String" + } + }, + { + "id": "50a9bb17-af12-481d-95dd-6aed1dd4bf56", + "name": "full name", + "protocol": "openid-connect", + "protocolMapper": "oidc-full-name-mapper", + "consentRequired": false, + "config": { + "id.token.claim": "true", + "access.token.claim": "true", + "userinfo.token.claim": "true" + } + }, + { + "id": "80a65208-9425-4e66-b769-98c2f1c91e6e", + "name": "nickname", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "nickname", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "nickname", + "jsonType.label": "String" + } + }, + { + "id": "68a750c6-b4b8-47f4-a919-752319e63213", + "name": "gender", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "gender", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "gender", + "jsonType.label": "String" + } + }, + { + "id": "e27abd0e-72c1-40de-a678-e9e4e2db8e7f", + "name": "given name", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "firstName", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "given_name", + "jsonType.label": "String" + } + }, + { + "id": "04f3fa01-6a4c-44eb-bfd8-0a0e1c31bc4a", + "name": "middle name", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "middleName", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "middle_name", + "jsonType.label": "String" + } + }, + { + "id": "94e697d9-fbee-48d8-91d1-7bbc4f1fb44e", + "name": "username", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "username", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "preferred_username", + "jsonType.label": "String" + } + }, + { + "id": "a2f05d76-947d-4ceb-969b-1b923be9a923", + "name": "website", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "website", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "website", + "jsonType.label": "String" + } + }, + { + "id": "1966f863-ac5c-4cbc-a156-d5bd861728f0", + "name": "profile", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "profile", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "profile", + "jsonType.label": "String" + } + }, + { + "id": "18a9b452-cd8e-4c43-a9a8-0ea532074f74", + "name": "locale", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "locale", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "locale", + "jsonType.label": "String" + } + }, + { + "id": "1583790a-ec7a-4899-a901-60e23fd0d969", + "name": "birthdate", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "birthdate", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "birthdate", + "jsonType.label": "String" + } + }, + { + "id": "7094b64a-492b-4f31-aa73-bb19d06ddb56", + "name": "picture", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "picture", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "picture", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "eff18c11-eaf4-4d6a-8365-90f646ea3cc5", + "name": "role_list", + "description": "SAML role list", + "protocol": "saml", + "attributes": { + "consent.screen.text": "${samlRoleListScopeConsentText}", + "display.on.consent.screen": "true" + }, + "protocolMappers": [ + { + "id": "3bb12700-3e6f-4a73-bfbb-cfd16a8ab007", + "name": "role list", + "protocol": "saml", + "protocolMapper": "saml-role-list-mapper", + "consentRequired": false, + "config": { + "single": "false", + "attribute.nameformat": "Basic", + "attribute.name": "Role" + } + } + ] + }, + { + "id": "e83e35b7-9650-4f7e-b182-65c184d261b3", + "name": "offline_access", + "description": "OpenID Connect built-in scope: offline_access", + "protocol": "openid-connect", + "attributes": { + "consent.screen.text": "${offlineAccessScopeConsentText}", + "display.on.consent.screen": "true" + } + } + ], + "defaultDefaultClientScopes": [ + "role_list", + "profile", + "email", + "roles", + "web-origins", + "custom_jwt" + ], + "defaultOptionalClientScopes": [ + "offline_access", + "address", + "phone", + "microprofile-jwt" + ], + "browserSecurityHeaders": { + "contentSecurityPolicyReportOnly": "", + "xContentTypeOptions": "nosniff", + "xRobotsTag": "none", + "xFrameOptions": "SAMEORIGIN", + "contentSecurityPolicy": "frame-src 'self'; frame-ancestors 'self'; object-src 'none';", + "xXSSProtection": "1; mode=block", + "strictTransportSecurity": "max-age=31536000; includeSubDomains" + }, + "smtpServer": {}, + "eventsEnabled": false, + "eventsListeners": [ + "jboss-logging" + ], + "enabledEventTypes": [], + "adminEventsEnabled": false, + "adminEventsDetailsEnabled": false, + "components": { + "org.keycloak.services.clientregistration.policy.ClientRegistrationPolicy": [ + { + "id": "9b1dcf02-e9ec-4302-8aad-28f3250d1b2d", + "name": "Allowed Protocol Mapper Types", + "providerId": "allowed-protocol-mappers", + "subType": "anonymous", + "subComponents": {}, + "config": { + "allowed-protocol-mapper-types": [ + "oidc-sha256-pairwise-sub-mapper", + "oidc-usermodel-property-mapper", + "saml-role-list-mapper", + "saml-user-attribute-mapper", + "oidc-full-name-mapper", + "oidc-usermodel-attribute-mapper", + "oidc-address-mapper", + "saml-user-property-mapper" + ] + } + }, + { + "id": "752137ea-bc3a-46c3-9d83-49cb370d39a9", + "name": "Max Clients Limit", + "providerId": "max-clients", + "subType": "anonymous", + "subComponents": {}, + "config": { + "max-clients": [ + "200" + ] + } + }, + { + "id": "f365d31f-ccc5-4e57-97bd-b2749b1ab5e5", + "name": "Allowed Client Scopes", + "providerId": "allowed-client-templates", + "subType": "authenticated", + "subComponents": {}, + "config": { + "allow-default-scopes": [ + "true" + ] + } + }, + { + "id": "52e385fd-3aa5-442d-b5e4-6ff659126196", + "name": "Allowed Protocol Mapper Types", + "providerId": "allowed-protocol-mappers", + "subType": "authenticated", + "subComponents": {}, + "config": { + "allowed-protocol-mapper-types": [ + "oidc-sha256-pairwise-sub-mapper", + "saml-user-attribute-mapper", + "oidc-full-name-mapper", + "oidc-usermodel-attribute-mapper", + "oidc-address-mapper", + "oidc-usermodel-property-mapper", + "saml-user-property-mapper", + "saml-role-list-mapper" + ] + } + }, + { + "id": "dbebbc9d-1b14-4d09-906c-b4e5638f9588", + "name": "Consent Required", + "providerId": "consent-required", + "subType": "anonymous", + "subComponents": {}, + "config": {} + }, + { + "id": "b3fc18dc-467f-4240-9b6d-f07df5c40aee", + "name": "Full Scope Disabled", + "providerId": "scope", + "subType": "anonymous", + "subComponents": {}, + "config": {} + }, + { + "id": "19e102da-1d66-4747-958b-9311e5156693", + "name": "Trusted Hosts", + "providerId": "trusted-hosts", + "subType": "anonymous", + "subComponents": {}, + "config": { + "host-sending-registration-request-must-match": [ + "true" + ], + "client-uris-must-match": [ + "true" + ] + } + }, + { + "id": "66e83112-7392-46cb-bbd5-b71586183ada", + "name": "Allowed Client Scopes", + "providerId": "allowed-client-templates", + "subType": "anonymous", + "subComponents": {}, + "config": { + "allow-default-scopes": [ + "true" + ] + } + } + ], + "org.keycloak.keys.KeyProvider": [ + { + "id": "a60adc1b-3f6b-40d4-901f-d4f744f0d71b", + "name": "aes-generated", + "providerId": "aes-generated", + "subComponents": {}, + "config": { + "priority": [ + "100" + ] + } + }, + { + "id": "bc1b25d8-b199-4d87-b606-6cde0f6eafb0", + "name": "hmac-generated", + "providerId": "hmac-generated", + "subComponents": {}, + "config": { + "priority": [ + "100" + ], + "algorithm": [ + "HS256" + ] + } + }, + { + "id": "fe624aa7-54a3-43d8-b2a3-f74b543a9225", + "name": "rsa-generated", + "providerId": "rsa-generated", + "subComponents": {}, + "config": { + "priority": [ + "100" + ] + } + } + ] + }, + "internationalizationEnabled": false, + "supportedLocales": [], + "authenticationFlows": [ + { + "id": "a837df3e-15cb-4d2a-8ce0-5eea5c704e76", + "alias": "Account verification options", + "description": "Method with which to verity the existing account", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "idp-email-verification", + "requirement": "ALTERNATIVE", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "ALTERNATIVE", + "priority": 20, + "flowAlias": "Verify Existing Account by Re-authentication", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "59026e13-e2bd-4977-a868-505ea562f545", + "alias": "Authentication Options", + "description": "Authentication options.", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "basic-auth", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "basic-auth-otp", + "requirement": "DISABLED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "auth-spnego", + "requirement": "DISABLED", + "priority": 30, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "484d422c-d9b4-4c0e-86d5-60463ecd24c9", + "alias": "Browser - Conditional OTP", + "description": "Flow to determine if the OTP is required for the authentication", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "conditional-user-configured", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "auth-otp-form", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "0ec05058-6d09-4951-a116-19e8810e5d8e", + "alias": "Direct Grant - Conditional OTP", + "description": "Flow to determine if the OTP is required for the authentication", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "conditional-user-configured", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "direct-grant-validate-otp", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "667c03cd-114c-4d9a-a7fa-7d2c27f10722", + "alias": "First broker login - Conditional OTP", + "description": "Flow to determine if the OTP is required for the authentication", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "conditional-user-configured", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "auth-otp-form", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "1510fbf7-239f-44aa-9955-72d42f6d99fd", + "alias": "Handle Existing Account", + "description": "Handle what to do if there is existing account with same email/username like authenticated identity provider", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "idp-confirm-link", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "REQUIRED", + "priority": 20, + "flowAlias": "Account verification options", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "5622e71d-e1f4-4711-a425-a8470d0a017e", + "alias": "Reset - Conditional OTP", + "description": "Flow to determine if the OTP should be reset or not. Set to REQUIRED to force.", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "conditional-user-configured", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "reset-otp", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "09dfe405-5ef6-4940-8885-5adf867a74c8", + "alias": "User creation or linking", + "description": "Flow for the existing/non-existing user alternatives", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticatorConfig": "create unique user config", + "authenticator": "idp-create-user-if-unique", + "requirement": "ALTERNATIVE", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "ALTERNATIVE", + "priority": 20, + "flowAlias": "Handle Existing Account", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "a3eb6b61-1943-4fb7-9b2f-137826882662", + "alias": "Verify Existing Account by Re-authentication", + "description": "Reauthentication of existing account", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "idp-username-password-form", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "CONDITIONAL", + "priority": 20, + "flowAlias": "First broker login - Conditional OTP", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "7f5e2f68-84bc-4703-b474-e3b092621195", + "alias": "browser", + "description": "browser based authentication", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "auth-cookie", + "requirement": "ALTERNATIVE", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "auth-spnego", + "requirement": "DISABLED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "identity-provider-redirector", + "requirement": "ALTERNATIVE", + "priority": 25, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "ALTERNATIVE", + "priority": 30, + "flowAlias": "forms", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "224cc520-37f7-445e-ab1f-7ba547a45a0d", + "alias": "clients", + "description": "Base authentication for clients", + "providerId": "client-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "client-secret", + "requirement": "ALTERNATIVE", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "client-jwt", + "requirement": "ALTERNATIVE", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "client-secret-jwt", + "requirement": "ALTERNATIVE", + "priority": 30, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "client-x509", + "requirement": "ALTERNATIVE", + "priority": 40, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "2e58184b-529b-450c-9731-29763d26b087", + "alias": "direct grant", + "description": "OpenID Connect Resource Owner Grant", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "direct-grant-validate-username", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "direct-grant-validate-password", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "CONDITIONAL", + "priority": 30, + "flowAlias": "Direct Grant - Conditional OTP", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "c969ac8c-e7d8-44b5-ad4d-5fcb80514eac", + "alias": "docker auth", + "description": "Used by Docker clients to authenticate against the IDP", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "docker-http-basic-authenticator", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "de2259a4-7f92-42ec-994c-f55d8cba3b59", + "alias": "first broker login", + "description": "Actions taken after first broker login with identity provider account, which is not yet linked to any Keycloak account", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticatorConfig": "review profile config", + "authenticator": "idp-review-profile", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "REQUIRED", + "priority": 20, + "flowAlias": "User creation or linking", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "6c2745d2-be21-4f3c-a291-5b3fc039432a", + "alias": "forms", + "description": "Username, password, otp and other auth forms.", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "auth-username-password-form", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "CONDITIONAL", + "priority": 20, + "flowAlias": "Browser - Conditional OTP", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "ac8f5082-3fd0-47c5-854d-0dd9c3951668", + "alias": "http challenge", + "description": "An authentication flow based on challenge-response HTTP Authentication Schemes", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "no-cookie-redirect", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "REQUIRED", + "priority": 20, + "flowAlias": "Authentication Options", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "32030b4b-c82b-4c1a-a692-3b51eae74bbc", + "alias": "registration", + "description": "registration flow", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "registration-page-form", + "requirement": "REQUIRED", + "priority": 10, + "flowAlias": "registration form", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "b99fca4c-386c-4277-acc1-83e57e29244d", + "alias": "registration form", + "description": "registration form", + "providerId": "form-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "registration-user-creation", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "registration-profile-action", + "requirement": "REQUIRED", + "priority": 40, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "registration-password-action", + "requirement": "REQUIRED", + "priority": 50, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "registration-recaptcha-action", + "requirement": "DISABLED", + "priority": 60, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "5edbc053-816a-434e-9866-6c0cc7e49f89", + "alias": "reset credentials", + "description": "Reset credentials for a user if they forgot their password or something", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "reset-credentials-choose-user", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "reset-credential-email", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "reset-password", + "requirement": "REQUIRED", + "priority": 30, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "CONDITIONAL", + "priority": 40, + "flowAlias": "Reset - Conditional OTP", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "460782e7-9644-4a34-8024-cb428cbe3991", + "alias": "saml ecp", + "description": "SAML ECP Profile Authentication Flow", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "http-basic-authenticator", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + } + ], + "authenticatorConfig": [ + { + "id": "67af6e65-853c-4bfd-9eef-72e735691377", + "alias": "create unique user config", + "config": { + "require.password.update.after.registration": "false" + } + }, + { + "id": "af6c6e01-772d-426a-bdd3-3ebc95537bcd", + "alias": "review profile config", + "config": { + "update.profile.on.first.login": "missing" + } + } + ], + "requiredActions": [ + { + "alias": "CONFIGURE_TOTP", + "name": "Configure OTP", + "providerId": "CONFIGURE_TOTP", + "enabled": true, + "defaultAction": false, + "priority": 10, + "config": {} + }, + { + "alias": "terms_and_conditions", + "name": "Terms and Conditions", + "providerId": "terms_and_conditions", + "enabled": false, + "defaultAction": false, + "priority": 20, + "config": {} + }, + { + "alias": "UPDATE_PASSWORD", + "name": "Update Password", + "providerId": "UPDATE_PASSWORD", + "enabled": true, + "defaultAction": false, + "priority": 30, + "config": {} + }, + { + "alias": "UPDATE_PROFILE", + "name": "Update Profile", + "providerId": "UPDATE_PROFILE", + "enabled": true, + "defaultAction": false, + "priority": 40, + "config": {} + }, + { + "alias": "VERIFY_EMAIL", + "name": "Verify Email", + "providerId": "VERIFY_EMAIL", + "enabled": true, + "defaultAction": false, + "priority": 50, + "config": {} + }, + { + "alias": "update_user_locale", + "name": "Update User Locale", + "providerId": "update_user_locale", + "enabled": true, + "defaultAction": false, + "priority": 1000, + "config": {} + } + ], + "browserFlow": "browser", + "registrationFlow": "registration", + "directGrantFlow": "direct grant", + "resetCredentialsFlow": "reset credentials", + "clientAuthenticationFlow": "clients", + "dockerAuthenticationFlow": "docker auth", + "attributes": { + "clientOfflineSessionMaxLifespan": "0", + "clientSessionIdleTimeout": "0", + "clientSessionMaxLifespan": "0", + "clientOfflineSessionIdleTimeout": "0" + }, + "keycloakVersion": "11.0.1", + "userManagedAccessAllowed": false +} \ No newline at end of file diff --git a/ansible/01_old/roles/test/files/05-imxc/templates/auth-server.yaml b/ansible/01_old/roles/test/files/05-imxc/templates/auth-server.yaml new file mode 100644 index 0000000..fb8fe7b --- /dev/null +++ b/ansible/01_old/roles/test/files/05-imxc/templates/auth-server.yaml @@ -0,0 +1,82 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: auth-server + namespace: imxc +spec: + selector: + matchLabels: + app: auth + replicas: 1 + template: + metadata: + labels: + app: auth + spec: + initContainers: + - name: init-resource + image: {{ .Values.global.IMXC_IN_REGISTRY }}/init-resource:latest + imagePullPolicy: IfNotPresent + command: ["/bin/sh", "-c"] + args: ['chmod -R 777 /scripts; cp /scripts/init.json /tmp/init.json'] + volumeMounts: + - name: init + mountPath: /tmp + containers: + - name: auth-server + image: {{ .Values.global.IMXC_IN_REGISTRY }}/auth-server:{{ .Values.global.AUTH_SERVER_VERSION }} + imagePullPolicy: IfNotPresent + command: ["sh", "-c", {{ .Files.Get "scripts/init-auth-server.sh" | quote }}] + env: + # spring profile + - name: SPRING_PROFILES_ACTIVE + value: prd + + # imxc-api-server configuration + - name: IMXC_API-SERVER-URL + value: http://imxc-api-service:8080 + + # keycloak configuration + - name: KEYCLOAK_AUTH-SERVER-URL + value: "{{ .Values.global.KEYCLOAK_AUTH_SERVER_URL }}" + - name: KEYCLOAK_REALM + value: exem + # eureka configuration + - name: EUREKA_CLIENT_SERVICE-URL_DEFAULTZONE + value: http://eureka:8761/eureka + # log4j + - name: LOG4J_FORMAT_MSG_NO_LOOKUPS + value: "true" + - name: LOGGING_LEVEL_COM_EXEM_CLOUD_REPO + value: debug + - name: LOGGING_LEVEL_COM_EXEM_CLOUD_AUTH_AUTHENTICATION_USER_SERVICE + value: debug + # 현대카드는 커스텀으로 해당 값 추가. keycloak만 사용(true), keycloak+내부db 사용(false) + - name: IMXC_KEYCLOAK_ENABLED + value: "true" + + volumeMounts: + - name: init + mountPath: /tmp + resources: + requests: + memory: "200Mi" + cpu: "10m" + + volumes: + - name: init + emptyDir: {} +--- +apiVersion: v1 +kind: Service +metadata: + name: auth-server-service + namespace: imxc +spec: + type: ClusterIP + selector: + app: auth + ports: + - protocol: TCP + port: 8480 + # nodePort: 15016 diff --git a/ansible/01_old/roles/test/files/05-imxc/templates/cloudmoa-datagate.yaml b/ansible/01_old/roles/test/files/05-imxc/templates/cloudmoa-datagate.yaml new file mode 100644 index 0000000..cbbee9a --- /dev/null +++ b/ansible/01_old/roles/test/files/05-imxc/templates/cloudmoa-datagate.yaml @@ -0,0 +1,79 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: datagate + namespace: imxc + labels: + app: datagate +spec: + selector: + matchLabels: + app: datagate + replicas: 2 + template: + metadata: + labels: + app: datagate + spec: + containers: + - image: {{ .Values.global.IMXC_IN_REGISTRY }}/datagate:{{ .Values.global.DATAGATE_VERSION }} + imagePullPolicy: IfNotPresent + name: datagate + ports: + - containerPort: 50051 + protocol: TCP + - containerPort: 14268 + protocol: TCP + - containerPort: 14269 + protocol: TCP + readinessProbe: + httpGet: + path: "/" + port: 14269 + env: + - name: REDIS_ADDR + value: redis-master:6379 + - name: REDIS_PW + value: dkagh1234! + - name: REDIS_DB + value: "0" + - name: REDIS_TYPE + value: normal + - name: STORAGE_TYPE + value: kafka + - name: KAFKA_PRODUCER_BROKERS + value: kafka-broker:9094 + - name: LOG_LEVEL + value: "INFO" + resources: + requests: + cpu: "100m" + memory: "100Mi" + limits: + cpu: "2000m" + memory: "1Gi" +--- +apiVersion: v1 +kind: Service +metadata: + name: datagate + namespace: imxc + labels: + app: datagate +spec: + ports: + - name: datagate-grpc + port: 50051 + protocol: TCP + targetPort: 50051 + nodePort: 30051 + - name: datagate-http + port: 14268 + targetPort: 14268 +# nodePort: 31268 + - name: datagate-readiness + port: 14269 + targetPort: 14269 + selector: + app: datagate + type: NodePort diff --git a/ansible/01_old/roles/test/files/05-imxc/templates/cloudmoa-metric-agent.yaml b/ansible/01_old/roles/test/files/05-imxc/templates/cloudmoa-metric-agent.yaml new file mode 100644 index 0000000..45c3d41 --- /dev/null +++ b/ansible/01_old/roles/test/files/05-imxc/templates/cloudmoa-metric-agent.yaml @@ -0,0 +1,331 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: metric-agent + namespace: imxc + labels: + app: metric-agent +spec: + selector: + matchLabels: + app: metric-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: metric-agent + spec: + containers: + - name: metric-agent + image: {{ .Values.global.IMXC_IN_REGISTRY }}/metric-agent:{{ .Values.global.METRIC_AGENT_VERSION }} + imagePullPolicy: IfNotPresent + ports: + - containerPort: 14271 + - containerPort: 14272 + args: + - --config.file=/etc/metric-agent/metric-agent.yml + env: + - name: STORAGE_TYPE + value: datagate + - name: DATAGATE + value: datagate:50051 + - name: CLUSTER_ID + value: cloudmoa +# - name: USER_ID +# value: mskim@ex-em.com + volumeMounts: + - mountPath: /etc/metric-agent/ + name: config-volume + resources: + requests: + memory: "256Mi" + cpu: "100m" + limits: + memory: "1000Mi" + cpu: "300m" + volumes: + - name: config-volume + configMap: + name: metric-agent-config + securityContext: + runAsUser: 1000 +--- +apiVersion: v1 +kind: Service +metadata: + name: metric-agent + namespace: imxc + labels: + app: metric-agent +spec: + ports: + - name: metric + port: 14271 + targetPort: 14271 + selector: + app: metric-agent + +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: metric-agent-config + namespace: imxc +data: + metric-agent.yml: | + global: + scrape_interval: 10s + evaluation_interval: 5s # Evaluate rules every 15 seconds. The default is every 1 minute. + + scrape_configs: + - job_name: 'kubernetes-kubelet' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: 'cloudmoa' + - target_label: xm_entity_type + replacement: 'Node' + + # added by mskim 8/19 + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + + - job_name: 'kubernetes-node-exporter' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: '(.*):10250' + replacement: '${1}:9100' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: 'kubernetes-(.*)' + replacement: '${1}' + target_label: name + - target_label: xm_clst_id + replacement: 'cloudmoa' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: 'Node' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + + # added by mskim 8/19 + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + - job_name: 'kubernetes-cadvisor' + scheme: https + + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: 'cloudmoa' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: 'Container' + +{{- if semverCompare ">=1.16-0" .Capabilities.KubeVersion.GitVersion }} + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod] + target_label: xm_pod_id + - source_labels: [container] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + # added by mskim 8/19 + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep + + {{- else }} + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod_name] + target_label: xm_pod_id + - source_labels: [container_name] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + # added by mskim 8/19 + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep +{{- end }} + # CLOUD-8671 | 데이터 필터링 설정 추가 + - source_labels: [ __name__, image ] + separator: "@" + regex: "container_cpu.*@" + action: drop + - source_labels: [ __name__, name ] + separator: "@" + regex: "container_memory.*@" + action: drop + + - job_name: 'kafka-consumer' + metrics_path: /remote_prom + scrape_interval: 5s + scrape_timeout: 5s + scheme: kafka + static_configs: + - targets: ['kafka-broker:9094'] + params: + #server_addrs: ['broker.default.svc.k8s:9094'] + server_addrs: ['kafka-broker:9094'] + encoding: [proto3] + contents: [remote_write] + compression: [snappy] + group: [remote-write-consumer] + workers: [50] + + # job for API server (SpringBoot) commented by ersione 2019-09-19 + - job_name: 'imxc-api' + metrics_path: '/actuator/prometheus' + scrape_interval: 5s + static_configs: + - targets: ['imxc-api-service:8080'] + - job_name: 'imxc-noti' + metrics_path: '/actuator/prometheus' + scrape_interval: 15s + static_configs: + - targets: ['noti-server-service:8080'] + #- job_name: 'imxc-auth' + # metrics_path: '/actuator/prometheus' + # scrape_interval: 15s + # static_configs: + # - targets: ['auth-server-service:8480'] + + + + - job_name: 'alertmanager-exporter' + metrics_path: '/metrics' + scrape_interval: 5s + static_configs: + - targets: ['alertmanager:9093'] + + + # modified by seungtak choi 2020-02-18 + - job_name: 'cmoa-collector' + scrape_interval: 5s + kubernetes_sd_configs: + - role: endpoints + namespaces: + names: + - imxc + relabel_configs: + - source_labels: [__meta_kubernetes_service_name] + action: keep + regex: cmoa-collector + + # added by dwkim 2021-03-15 + - job_name: 'elasticsearch' + scrape_interval: 5s + kubernetes_sd_configs: + - role: endpoints + namespaces: + names: + - imxc + relabel_configs: + - target_label: xm_clst_id + replacement: 'cloudmoa' + - source_labels: [__meta_kubernetes_pod_node_name] + target_label: xm_node_id + - source_labels: [__meta_kubernetes_namespace] + target_label: xm_namespace + - source_labels: [__meta_kubernetes_service_name] + action: keep + regex: es-exporter-elasticsearch-exporter + + # kafka-exporter prometheus 수집 룰 추가 + - job_name: 'kafka-exporter' + kubernetes_sd_configs: + - role: endpoints + namespaces: + names: + - imxc + scheme: http + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_service_label_(.+) + - action: labelmap + regex: __meta_kubernetes_service_annotation_(.+) + - source_labels: [__meta_kubernetes_pod_container_port_number] + action: keep + regex: '(.*)9308' + + # kafka-jmx-exporter configuration yaml 수집룰 추가 + - job_name: 'kafka-jmx' + kubernetes_sd_configs: + - role: endpoints + namespaces: + names: + - imxc + scheme: http + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_service_label_(.+) + - action: labelmap + regex: __meta_kubernetes_service_annotation_(.+) + - source_labels: [__meta_kubernetes_pod_container_port_number] + action: keep + regex: '(.*)9010' + + # job for API Server(Spring Cloud Notification Server) commented by hjyoon 2022-01-26 + - job_name: 'cmoa-noti' + metrics_path: '/actuator/prometheus' + scrape_interval: 15s + static_configs: + - targets: ['noti-server-service:8080'] diff --git a/ansible/01_old/roles/test/files/05-imxc/templates/cloudmoa-metric-collector.yaml b/ansible/01_old/roles/test/files/05-imxc/templates/cloudmoa-metric-collector.yaml new file mode 100644 index 0000000..3d7acc8 --- /dev/null +++ b/ansible/01_old/roles/test/files/05-imxc/templates/cloudmoa-metric-collector.yaml @@ -0,0 +1,45 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: metric-collector + namespace: imxc + labels: + app: metric-collector +spec: + selector: + matchLabels: + app: metric-collector + replicas: 3 + template: + metadata: + labels: + app: metric-collector + spec: + containers: + - name: metric-collector + image: {{ .Values.global.IMXC_IN_REGISTRY }}/metric-collector:{{ .Values.global.METRIC_COLLECTOR_VERSION }} + imagePullPolicy: IfNotPresent + ports: + - containerPort: 14270 + env: + - name: KAFKA_CONSUMER_BROKERS + value: kafka-broker:9094 + - name: HTTP_PUSH + value: http://base-cortex-nginx/api/v1/push + securityContext: + runAsUser: 1000 +--- +apiVersion: v1 +kind: Service +metadata: + name: metric-collector + namespace: imxc + labels: + app: metric-collector +spec: + ports: + - name: metric + port: 14270 + targetPort: 14270 + selector: + app: metric-collector diff --git a/ansible/01_old/roles/test/files/05-imxc/templates/cmoa-kube-info-batch.yaml b/ansible/01_old/roles/test/files/05-imxc/templates/cmoa-kube-info-batch.yaml new file mode 100644 index 0000000..b20fed2 --- /dev/null +++ b/ansible/01_old/roles/test/files/05-imxc/templates/cmoa-kube-info-batch.yaml @@ -0,0 +1,38 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmoa-kube-info-batch + namespace: {{ .Values.global.IMXC_NAMESPACE }} + labels: + app: cmoa-kube-info-batch +spec: + replicas: 1 + selector: + matchLabels: + app: cmoa-kube-info-batch + template: + metadata: + labels: + app: cmoa-kube-info-batch + spec: + containers: + - name: cmoa-kube-info-batch + image: {{ .Values.global.IMXC_IN_REGISTRY }}/kube-info-batch:{{ .Values.global.KUBE_INFO_BATCH_VERSION }} + imagePullPolicy: Always + env: + - name: JDBC_KIND + value: {{ .Values.global.JDBC_KIND }} + - name: JDBC_SERVER + value: {{ .Values.global.JDBC_SERVER }} + - name: JDBC_DB + value: {{ .Values.global.JDBC_DB }} + - name: JDBC_USER + value: {{ .Values.global.JDBC_USER }} + - name: JDBC_PWD + value: {{ .Values.global.JDBC_PWD }} + - name: TABLE_PREFIX + value: {{ .Values.global.TABLE_PREFIX }} + - name: BLACK_LIST + value: {{ .Values.global.BLACK_LIST }} + - name: DELETE_HOUR + value: '{{ .Values.global.DELETE_HOUR }}' diff --git a/ansible/01_old/roles/test/files/05-imxc/templates/cmoa-kube-info-connector.yaml b/ansible/01_old/roles/test/files/05-imxc/templates/cmoa-kube-info-connector.yaml new file mode 100644 index 0000000..cad91b9 --- /dev/null +++ b/ansible/01_old/roles/test/files/05-imxc/templates/cmoa-kube-info-connector.yaml @@ -0,0 +1,48 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmoa-kube-info-connector + namespace: {{ .Values.global.IMXC_NAMESPACE }} + labels: + app: cmoa-kube-info-connector +spec: + replicas: 1 + selector: + matchLabels: + app: cmoa-kube-info-connector + template: + metadata: + labels: + app: cmoa-kube-info-connector + spec: + containers: + - name: cmoa-kube-info-connector + image: {{ .Values.global.IMXC_IN_REGISTRY }}/kube-info-connector:{{ .Values.global.KUBE_INFO_CONNECTOR_VERSION }} + imagePullPolicy: Always + env: + - name: KAFKA_GROUP_ID + value: cmoa-kube-info-connector + - name: KAFKA_SERVER + value: kafka:9092 + - name: JDBC_KIND + value: {{ .Values.global.JDBC_KIND }} + - name: JDBC_SERVER + value: {{ .Values.global.JDBC_SERVER }} + - name: JDBC_DB + value: {{ .Values.global.JDBC_DB }} + - name: JDBC_USER + value: {{ .Values.global.JDBC_USER }} + - name: JDBC_PWD + value: {{ .Values.global.JDBC_PWD }} + - name: TABLE_PREFIX + value: {{ .Values.global.TABLE_PREFIX }} + - name: BLACK_LIST + value: {{ .Values.global.BLACK_LIST }} + - name: MAX_POLL_RECORDS_CONFIG + value: "300" + - name: MAX_POLL_INTERVAL_MS_CONFIG + value: "600000" + - name: SESSION_TIMEOUT_MS_CONFIG + value: "60000" + - name: MAX_PARTITION_FETCH_BYTES_CONFIG + value: "5242880" diff --git a/ansible/01_old/roles/test/files/05-imxc/templates/cmoa-kube-info-flat.yaml b/ansible/01_old/roles/test/files/05-imxc/templates/cmoa-kube-info-flat.yaml new file mode 100644 index 0000000..6f77ee5 --- /dev/null +++ b/ansible/01_old/roles/test/files/05-imxc/templates/cmoa-kube-info-flat.yaml @@ -0,0 +1,35 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmoa-kube-info-flat + namespace: {{ .Values.global.IMXC_NAMESPACE }} + labels: + app: cmoa-kube-info-flat +spec: + replicas: 1 + selector: + matchLabels: + app: cmoa-kube-info-flat + template: + metadata: + labels: + app: cmoa-kube-info-flat + spec: + containers: + - name: cmoa-kube-info-flat + image: {{ .Values.global.IMXC_IN_REGISTRY }}/kube-info-flat:{{ .Values.global.KUBE_INFO_FLAT_VERSION }} + imagePullPolicy: Always + env: + - name: KAFKA_SERVER + value: kafka:9092 + - name: KAFKA_INPUT_TOPIC + value: {{ .Values.global.KAFKA_INPUT_TOPIC }} + - name: TABLE_PREFIX + value: {{ .Values.global.TABLE_PREFIX }} + - name: BLACK_LIST + value: {{ .Values.global.BLACK_LIST }} + resources: + limits: + memory: 1Gi + requests: + memory: 200Mi diff --git a/ansible/01_old/roles/test/files/05-imxc/templates/cmoa-manual.yaml b/ansible/01_old/roles/test/files/05-imxc/templates/cmoa-manual.yaml new file mode 100644 index 0000000..e94fc14 --- /dev/null +++ b/ansible/01_old/roles/test/files/05-imxc/templates/cmoa-manual.yaml @@ -0,0 +1,36 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: manual + namespace: imxc +spec: + selector: + matchLabels: + app: manual + replicas: 1 + template: + metadata: + labels: + app: manual + spec: + containers: + - name: manual + image: {{ .Values.global.IMXC_IN_REGISTRY }}/manual:{{ .Values.global.CMOA_MANUAL_VERSION }} + imagePullPolicy: IfNotPresent + +--- +apiVersion: v1 +kind: Service +metadata: + name: manual + namespace: imxc +spec: + type: NodePort + selector: + app: manual + ports: + - protocol: TCP + port: 8088 + targetPort: 3000 + nodePort: {{ .Values.global.CMOA_MANUAL_PORT }} + diff --git a/ansible/01_old/roles/test/files/05-imxc/templates/eureka-server.yaml b/ansible/01_old/roles/test/files/05-imxc/templates/eureka-server.yaml new file mode 100644 index 0000000..5ffd9c2 --- /dev/null +++ b/ansible/01_old/roles/test/files/05-imxc/templates/eureka-server.yaml @@ -0,0 +1,60 @@ +apiVersion: v1 +kind: Service +metadata: + name: eureka + namespace: imxc + labels: + app: eureka +spec: + type: NodePort + ports: + - port: 8761 + targetPort: 8761 + nodePort: 30030 + name: eureka + selector: + app: eureka +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: eureka + namespace: imxc +spec: + serviceName: 'eureka' + replicas: 3 + selector: + matchLabels: + app: eureka + template: + metadata: + labels: + app: eureka + spec: + containers: + - name: eureka + image: {{ .Values.global.IMXC_IN_REGISTRY }}/eureka-server:{{ .Values.global.EUREKA_SERVER_VERSION }} + imagePullPolicy: IfNotPresent + ports: + - containerPort: 8761 + #resources: + # requests: + # memory: "1Gi" + # cpu: "500m" + # limits: + # memory: "1200Mi" + # cpu: "500m" + env: + - name: SPRING_PROFILES_ACTIVE + value: prd + - name: EUREKA_CLIENT_SERVICE-URL_DEFAULTZONE + value: http://eureka-0.eureka:8761/eureka/,http://eureka-1.eureka:8761/eureka/,http://eureka-2.eureka:8761/eureka/ + - name: JVM_OPTS + value: "-Xms1g -Xmx1g" + # log4j + - name: LOG4J_FORMAT_MSG_NO_LOOKUPS + value: "true" + resources: + requests: + memory: "100Mi" + cpu: "20m" diff --git a/ansible/01_old/roles/test/files/05-imxc/templates/imxc-api-server.yaml b/ansible/01_old/roles/test/files/05-imxc/templates/imxc-api-server.yaml new file mode 100644 index 0000000..de967a6 --- /dev/null +++ b/ansible/01_old/roles/test/files/05-imxc/templates/imxc-api-server.yaml @@ -0,0 +1,245 @@ +--- +kind: Service +apiVersion: v1 +metadata: + name: imxc-api-service + namespace: imxc +spec: + type: NodePort + selector: + app: imxc-api + ports: + - protocol: TCP + name: api + port: 8080 + targetPort: 8080 + nodePort: 32080 + - protocol: TCP + name: netty + port: 10100 + targetPort: 10100 + nodePort: 31100 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: imxc-api + namespace: imxc + labels: + app: imxc-api +spec: + revisionHistoryLimit: 0 + replicas: 1 + selector: + matchLabels: + app: imxc-api + template: + metadata: + labels: + app: imxc-api + build: develop + spec: + securityContext: + #runAsNonRoot: true + runAsUser: 1577 + initContainers: + - name: cloudmoa-api-permission-fix + image: {{ .Values.global.IMXC_IN_REGISTRY }}/busybox:latest + imagePullPolicy: IfNotPresent + securityContext: + runAsUser: 0 +# - sh +# - -c +# - "chmod -R 777 /home/cloudmoa/notification/cloudmoa_alert.log" + volumeMounts: + - mountPath: /home/cloudmoa/notification/ + name: notification-upper-directory + - mountPath: /home/cloudmoa/notification/cloudmoa_alert.log + name: notification-directory + containers: + - name: imxc-api + image: {{ .Values.global.IMXC_IN_REGISTRY }}/api-server:{{ .Values.global.API_SERVER_VERSION }} + resources: + requests: + cpu: 200m + memory: 500Mi + limits: + cpu: 2000m + memory: 5000Mi + imagePullPolicy: IfNotPresent + command: ["sh", "-c", {{ .Files.Get "scripts/init-api-server.sh" | quote }}] + env: + - name: SPRING_PROFILES_ACTIVE + value: prd + - name: SPRING_ELASTIC_URLS + value: elasticsearch + - name: SPRING_ELASTIC_PORT + value: "9200" + - name: SPRING_DATAGATE_URLS + value: "{{ .Values.global.DATAGATE_INSIDE_IP }}" + - name: SPRING_DATAGATE_PORT + value: "{{ .Values.global.DATAGATE_INSIDE_PORT }}" + - name: SPRING_REDIS_URLS + value: {{ .Values.global.REDIS_URLS }} + - name: SPRING_REDIS_PORT + value: "{{ .Values.global.REDIS_PORT }}" + - name: SPRING_REDIS_PASSWORD + value: {{ .Values.global.REDIS_PASSWORD }} + - name: SPRING_DATASOURCE_URL + value: jdbc:log4jdbc:postgresql://postgres:5432/postgresdb + - name: SPRING_BOOT_ADMIN_CLIENT_URL + value: http://{{ .Values.global.IMXC_ADMIN_SERVER_DNS }}:8888 + - name: SPRING_BOOT_ADMIN_CLIENT_INSTANCE_NAME + value: Intermax Cloud API Server + - name: SPRING_BOOT_ADMIN_CLIENT_ENABLED + value: "false" + - name: OPENTRACING_JAEGER_ENABLED + value: "false" + - name: SPRING_JPA_PROPERTIES_HIBERNATE_GENERATE_STATISTICS + value: "false" + - name: IMXC_REPORT_ENABLED + value: "true" + - name: IMXC_ALERT_PERSIST + value: "true" + - name: SPRING_BOOT_ADMIN_CLIENT_INSTANCE_METADATA_TAGS_ENVIRONMENT + value: Demo + - name: SPRING_BOOT_ADMIN_CLIENT_INSTANCE_PREFERIP + value: "true" + - name: SPRING_BOOT_ADMIN_CLIENT_INSTANCE_METADATA_TAGS_NODENAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: SPRING_BOOT_ADMIN_CLIENT_INSTANCE_METADATA_TAGS_PODNAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + - name: SPRING_BOOT_ADMIN_CLIENT_AUTODEREGISTRATION + value: "true" + - name: SPRING_JPA_HIBERNATE_DDL-AUTO + value: validate + - name: KEYCLOAK_AUTH-SERVER-URL + value: "{{ .Values.global.KEYCLOAK_AUTH_SERVER_URL }}" + - name: KEYCLOAK_REALM + value: exem + - name: KEYCLOAK_RESOURCE + value: "{{ .Values.global.KEYCLOAK_RESOURCE }}" + - name: SPRING_KEYCLOAK_MASTER_USERNAME + value: "{{ .Values.global.KEYCLOAK_MASTER_USERNAME }}" + - name: SPRING_KEYCLOAK_MASTER_PASSWORD + value: "{{ .Values.global.KEYCLOAK_MASTER_PASSWORD }}" + - name: SPRING_LDAP_USE + value: "{{ .Values.global.IMXC_LDAP_USE }}" + - name: TIMEZONE + value: Asia/Seoul + - name: IMXC_PROMETHEUS_URL + value: http://base-cortex-nginx/prometheus + - name: IMXC_PROMETHEUS_NAMESPACE + value: "imxc" + - name: LOGGING_LEVEL_ROOT + value: info + - name: IMXC_ALERT_NOTIFICATION_FILE_USE + value: "true" + - name: IMXC_ALERT_NOTIFICATION_FILE_FILE-LIMIT-SIZE-MB + value: "10" + - name: IMXC_ALERT_NOTIFICATION_FILE_PATH + value: /cloudmoa_noti + - name: IMXC_ALERT_NOTIFICATION_FILE_NAME + value: cloudmoa_alert.log + - name: IMXC_ALERT_NOTIFICATION_FILE_FORMAT + value: $[name]/($[level])/$[data]/$[message] + - name: IMXC_ALERT_NOTIFICATION_FILE_LEVELCONTRACT + value: "true" + #R30020210730 추가 :: 현대카드는 true로 설정 + - name: IMXC_ALERT_NOTIFICATION_MAIL_MAIL-HOST + value: "exemmail1.ex-em.com" + - name: IMXC_ALERT_NOTIFICATION_MAIL_MAIL-PORT + value: "587" + - name: IMXC_ALERT_NOTIFICATION_MAIL_MAIL-USERNAME + value: "imxc@ex-em.com" + - name: IMXC_ALERT_NOTIFICATION_MAIL_MAIL-PASSWORD + value: "1234" + - name: IMXC_ALERT_NOTIFICATION_MAIL_PROTOCOL + value: "smtp" + - name: IMXC_ALERT_NOTIFICATION_MAIL_STARTTLS-REQ + value: "true" + - name: IMXC_ALERT_NOTIFICATION_MAIL_STARTTLS-ENB + value: "true" + - name: IMXC_ALERT_NOTIFICATION_MAIL_SMTP-AUTH + value: "true" + - name: IMXC_ALERT_NOTIFICATION_MAIL_DEBUG + value: "true" + - name: IMXC_ANOMALY_BLACK-LIST + value: "false" + - name: IMXC_VERSION_SAAS + value: "false" + - name: LOGGING_LEVEL_COM_EXEM_CLOUD_API_SERVER_KUBERNETES_SERVICE + value: info + - name: IMXC_WEBSOCKET_SCHEDULE_PERIOD_5SECOND + value: "30000" + - name: IMXC_CACHE_INFO_1MCACHE + value: "0 0/1 * * * ?" + - name: IMXC_EXECUTION_LOG_USE + value: "false" + - name: IMXC_EXECUTION_PERMISSION_LOG_USE + value: "false" + - name: IMXC_EXECUTION_CODE-LOG_USE + value: "false" + - name: IMXC_PORTAL_INFO_URL + value: "{{ .Values.global.IMXC_PORTAL_INFO_URL }}" + # Do not remove below rows related to AGENT-INSTALL. Added by youngmin 2021-03-29. + - name: AGENT-INSTALL_COLLECTION-SERVER_KAFKA_IP + value: {{ .Values.global.KAFKA_IP }} + - name: AGENT-INSTALL_COLLECTION-SERVER_KAFKA_INTERFACE-PORT + value: "{{ .Values.global.KAFKA_INTERFACE_PORT }}" + - name: AGENT-INSTALL_COLLECTION-SERVER_APISERVER_IP + value: {{ .Values.global.IMXC_API_SERVER_IP }} + - name: AGENT-INSTALL_COLLECTION-SERVER_APISERVER_NETTY-PORT + value: "{{ .Values.global.APISERVER_NETTY_PORT }}" + - name: AGENT-INSTALL_REGISTRY_URL + value: {{ .Values.global.IMXC_IN_REGISTRY }} + - name: AGENT-INSTALL_IMAGE_TAG + value: {{ .Values.global.AGENT_IMAGE_TAG }} + - name: AGENT-INSTALL_JAEGER_AGENT_CLUSTERIP + value: {{ .Values.global.JAEGER_AGENT_CLUSTERIP }} + - name: AGENT-INSTALL_JAEGER_JAVA-SPECIALAGENT-CLASSPATH + value: {{ .Values.global.JAEGER_JAVA_SPECIALAGENT_CLASSPATH }} + - name: AGENT-INSTALL_COLLECTION-SERVER_DATAGATE_IP + value: "{{ .Values.global.DATAGATE_OUTSIDE_IP }}" + - name: AGENT-INSTALL_COLLECTION-SERVER_DATAGATE_PORT + value: "{{ .Values.global.DATAGATE_OUTSIDE_PORT }}" + - name: IMXC_REST-CONFIG_MAX-CON + value: "200" + - name: IMXC_REST-CONFIG_MAX-CON-ROUTE + value: "65" + # log4j + - name: LOG4J_FORMAT_MSG_NO_LOOKUPS + value: "true" + # Elasticsearch for Security + - name: SPRING_ELASTIC_SSL_USERNAME + value: "{{ .Values.global.CMOA_ES_ID }}" + - name: SPRING_ELASTIC_SSL_PASSWORD + value: "{{ .Values.global.CMOA_ES_PW }}" + - name: IMXC_BACK-LOGIN_ENABLED + value: "{{ .Values.global.BACKLOGIN }}" + volumeMounts: + - mountPath: /var/log/imxc-audit.log + name: auditlog + - mountPath: /home/cloudmoa/notification/cloudmoa_alert.log + name: notification-directory + - mountPath: /home/cloudmoa/notification/ + name: notification-upper-directory + volumes: + - name: auditlog + hostPath: + path: {{ .Values.global.AUDITLOG_PATH }}/imxc-audit.log + type: FileOrCreate + - name: notification-upper-directory + hostPath: + path: /home/ + type: DirectoryOrCreate + - name: notification-directory + hostPath: + path: /home/cloudmoa_event.log + type: FileOrCreate diff --git a/ansible/01_old/roles/test/files/05-imxc/templates/imxc-collector.yaml b/ansible/01_old/roles/test/files/05-imxc/templates/imxc-collector.yaml new file mode 100644 index 0000000..e125243 --- /dev/null +++ b/ansible/01_old/roles/test/files/05-imxc/templates/imxc-collector.yaml @@ -0,0 +1,79 @@ +apiVersion: v1 +kind: List +items: +- apiVersion: apps/v1 + kind: Deployment + metadata: + name: cmoa-collector + namespace: imxc + labels: + app: cmoa-collector + spec: + replicas: 1 + selector: + matchLabels: + app: cmoa-collector + template: + metadata: + labels: + app: cmoa-collector + spec: + securityContext: + runAsNonRoot: true + runAsUser: 65534 + containers: + - name: cmoa-collector + image: {{ .Values.global.IMXC_IN_REGISTRY }}/cmoa-collector:{{ .Values.global.COLLECTOR_VERSION }} + imagePullPolicy: IfNotPresent + resources: + requests: + cpu: 100m + memory: 500Mi + limits: + cpu: 500m + memory: 2500Mi + ports: + - containerPort: 12010 + env: + - name: LOCATION + value: Asia/Seoul + - name: KAFKA_SERVER + value: kafka:9092 + - name: ELASTICSEARCH + value: elasticsearch:9200 +# - name: PROMETHEUS +# value: nginx-cortex/prometheus + - name: REDIS_ADDR + value: redis-master:6379 + - name: REDIS_PW + value: dkagh1234! + - name: REDIS_DB + value: "0" + - name: REDIS_TYPE + value: normal + - name: CMOA_ES_ID + value: {{ .Values.global.CMOA_ES_ID }} + - name: CMOA_ES_PW + value: {{ .Values.global.CMOA_ES_PW }} + resources: + requests: + cpu: "300m" + memory: "1500Mi" + limits: + cpu: "500m" + memory: "2500Mi" +- apiVersion: v1 + kind: Service + metadata: + name: cmoa-collector + namespace: imxc + labels: + app: cmoa-collector + spec: + ports: + - name: cmoa-collector-exporter + port: 12010 + targetPort: 12010 + selector: + app: cmoa-collector + diff --git a/ansible/01_old/roles/test/files/05-imxc/templates/noti-server.yaml b/ansible/01_old/roles/test/files/05-imxc/templates/noti-server.yaml new file mode 100644 index 0000000..99c7a5b --- /dev/null +++ b/ansible/01_old/roles/test/files/05-imxc/templates/noti-server.yaml @@ -0,0 +1,121 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: noti-server + namespace: imxc +spec: + selector: + matchLabels: + app: noti + replicas: 1 + template: + metadata: + labels: + app: noti + spec: + containers: + - name: noti-server + image: {{ .Values.global.IMXC_IN_REGISTRY }}/notification-server:{{ .Values.global.NOTI_SERVER_VERSION }} + imagePullPolicy: IfNotPresent + command: ["sh", "-c", {{ .Files.Get "scripts/init-noti-server.sh" | quote }}] + env: + # spring profile + - name: SPRING_PROFILES_ACTIVE + value: prd + + # keycloak configuration + - name: KEYCLOAK_AUTH-SERVER-URL + value: {{ .Values.global.KEYCLOAK_AUTH_SERVER_URL }} + - name: KEYCLOAK_REALM + value: exem + + # eureka configuration + - name: EUREKA_CLIENT_SERVICE-URL_DEFAULTZONE + value: http://eureka:8761/eureka + + # postgres configuration + - name: SPRING_DATASOURCE_URL + value: jdbc:log4jdbc:postgresql://postgres:5432/postgresdb + + # redis configuration + - name: SPRING_REDIS_HOST + value: redis-master + - name: SPRING_REDIS_PORT + value: "6379" + - name: SPRING_REDIS_PASSWORD + value: dkagh1234! + + # elasticsearch configuration + - name: SPRING_ELASTIC_URLS + value: elasticsearch + - name: SPRING_ELASTIC_PORT + value: "9200" + + # file I/O configuration + - name: IMXC_ALERT_NOTIFICATION_FILE_USE + value: "true" + - name: IMXC_ALERT_NOTIFICATION_FILE_FILE-LIMIT-SIZE-MB + value: "10" + - name: IMXC_ALERT_NOTIFICATION_FILE_PATH + value: /cloudmoa_noti + - name: IMXC_ALERT_NOTIFICATION_FILE_NAME + value: cloudmoa_alert.log + - name: IMXC_ALERT_NOTIFICATION_FILE_FORMAT + value: $[name]/($[level])/$[data]/$[message] + - name: IMXC_ALERT_NOTIFICATION_FILE_LEVELCONTRACT + value: "true" + + # rabbitmq configuration + - name: IMXC_RABBITMQ_HOST + value: base-rabbitmq + - name: IMXC_RABBITMQ_PORT + value: "61613" + - name: IMXC_RABBITMQ_CLIENT_ID + value: "user" + - name: IMXC_RABBITMQ_CLIENT_PASSWORD + value: "eorbahrhkswp" + - name: IMXC_RABBITMQ_SYSTEM_ID + value: "user" + - name: IMXC_RABBITMQ_SYSTEM_PASSWORD + value: "eorbahrhkswp" + + # api-server configuration + - name: IMXC_API-SERVER-URL + value: "http://imxc-api-service:8080" + + # cortex integration + - name: SPRING_CORTEX_URLS + value: base-cortex-configs + - name: SPRING_CORTEX_PORT + value: "8080" + + # alert webhook + - name: IMXC_ALERT_WEBHOOK_URLS + value: http://noti-server-service:8080/alert + + # etc configuration + - name: IMXC_PROMETHEUS_NAMESPACE + value: {{ .Values.global.IMXC_NAMESPACE }} + - name: IMXC_ALERT_KUBERNETES_NAMESPACE + value: {{ .Values.global.IMXC_NAMESPACE }} + # log4j + - name: LOG4J_FORMAT_MSG_NO_LOOKUPS + value: "true" + resources: + requests: + memory: "100Mi" + cpu: "50m" +--- +apiVersion: v1 +kind: Service +metadata: + name: noti-server-service + namespace: imxc +spec: + type: NodePort + selector: + app: noti + ports: + - protocol: TCP + port: 8080 + nodePort: 31083 diff --git a/ansible/01_old/roles/test/files/05-imxc/templates/streams-depl.yaml b/ansible/01_old/roles/test/files/05-imxc/templates/streams-depl.yaml new file mode 100644 index 0000000..b3223e5 --- /dev/null +++ b/ansible/01_old/roles/test/files/05-imxc/templates/streams-depl.yaml @@ -0,0 +1,26 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: kafka-stream-txntrend-deployment + namespace: imxc + labels: + app: kafka-stream-txntrend +spec: + replicas: 1 + selector: + matchLabels: + app: kafka-stream-txntrend + template: + metadata: + labels: + app: kafka-stream-txntrend + spec: + containers: + - name: kafka-stream-txntrend + image: {{ .Values.global.IMXC_IN_REGISTRY }}/kafka-stream-txntrend:{{ .Values.global.KAFKA_STREAM_VERSION }} + imagePullPolicy: IfNotPresent + env: + - name: SERVICE_KAFKA_HOST + value: kafka-broker:9094 + - name: SERVICE_STREAM_OUTPUT + value: jspd_txntrend diff --git a/ansible/01_old/roles/test/files/05-imxc/templates/topology-agent.yaml b/ansible/01_old/roles/test/files/05-imxc/templates/topology-agent.yaml new file mode 100644 index 0000000..80476a3 --- /dev/null +++ b/ansible/01_old/roles/test/files/05-imxc/templates/topology-agent.yaml @@ -0,0 +1,107 @@ +{{ if semverCompare ">=1.17-0" .Capabilities.KubeVersion.GitVersion }} +apiVersion: rbac.authorization.k8s.io/v1 +{{ else }} +apiVersion: rbac.authorization.k8s.io/v1beta1 +{{ end }} +kind: ClusterRoleBinding +metadata: + name: topology-agent + namespace: imxc + labels: + k8s-app: topology-agent +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +subjects: + - kind: ServiceAccount + name: topology-agent + namespace: imxc +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: topology-agent + namespace: imxc + labels: + app: topology-agent +spec: + selector: + matchLabels: + app: topology-agent + template: + metadata: + labels: + app: topology-agent + spec: + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + # below appended + hostPID: true + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - name: topology-agent + image: {{ .Values.global.IMXC_IN_REGISTRY }}/topology-agent:{{ .Values.global.TOPOLOGY_AGENT_VERSION }} + imagePullPolicy: IfNotPresent + securityContext: + privileged: true + volumeMounts: + - mountPath: /host/usr/bin + name: bin-volume + - mountPath: /var/run/docker.sock + name: docker-volume + - mountPath: /host/proc + name: proc-volume + - mountPath: /root + name: root-volume + env: + - name: CLUSTER_ID + value: cloudmoa + - name: ROOT_DIRECTORY + value: /root + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: POD_ID + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: DATAGATE + value: datagate:50051 + - name: LOG_RNAME_USE + value: "false" + - name: LOG_LEVEL + value: "DEBUG" + - name: CLOUDMOA_SETTING_PATH + value: /home/cloudmoa/setting/ + resources: + requests: + memory: "125Mi" + cpu: "100m" + limits: + memory: "600Mi" + cpu: "500m" + volumes: + - name: bin-volume + hostPath: + path: /usr/bin + type: Directory + - name: docker-volume + hostPath: + path: /var/run/docker.sock + - name: proc-volume + hostPath: + path: /proc + - name: root-volume + hostPath: + path: / diff --git a/ansible/01_old/roles/test/files/05-imxc/templates/zuul-server.yaml b/ansible/01_old/roles/test/files/05-imxc/templates/zuul-server.yaml new file mode 100644 index 0000000..79969d7 --- /dev/null +++ b/ansible/01_old/roles/test/files/05-imxc/templates/zuul-server.yaml @@ -0,0 +1,62 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: zuul-deployment + namespace: imxc + labels: + app: cloud +spec: + selector: + matchLabels: + app: cloud + replicas: 1 + template: + metadata: + labels: + app: cloud + spec: + containers: + - env: + - name: SPRING_PROFILES_ACTIVE + value: prd + - name: SPRING_ZIPKIN_BASE-URL + value: http://zipkin-service:9411 + - name: LOGGING_LEVEL_COM_EXEM_CLOUD_ZUULSERVER_FILTERS_AUTHFILTER + value: info + # log4j + - name: LOG4J_FORMAT_MSG_NO_LOOKUPS + value: "true" + name: zuul + image: {{ .Values.global.IMXC_IN_REGISTRY }}/zuul-server:{{ .Values.global.ZUUL_SERVER_VERSION }} + imagePullPolicy: IfNotPresent + ports: + - containerPort: 8080 + #- containerPort: 6831 + #protocol: UDP + #resources: + # requests: + # memory: "256Mi" + # cpu: "344m" + # limits: + # memory: "1Gi" + # cpu: "700m" + resources: + requests: + memory: "200Mi" + cpu: "50m" +--- +apiVersion: v1 +kind: Service +metadata: + name: zuul + namespace: imxc + labels: + app: cloud +spec: + type: NodePort + selector: + app: cloud + ports: + - port: 8080 + targetPort: 8080 + nodePort: 31081 diff --git a/ansible/01_old/roles/test/files/05-imxc/values.yaml b/ansible/01_old/roles/test/files/05-imxc/values.yaml new file mode 100644 index 0000000..07c9a47 --- /dev/null +++ b/ansible/01_old/roles/test/files/05-imxc/values.yaml @@ -0,0 +1,157 @@ +# Default values for imxc. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: 10.10.31.243:5000/cmoa3/nginx + tag: stable + pullPolicy: IfNotPresent + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 80 + +ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: [] + + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +nodeSelector: {} + +tolerations: [] + +affinity: {} + +global: + IMXC_LDAP_USE: false + IMXC_ADMIN_SERVER_DNS: imxc-admin-service + AUDITLOG_PATH: /var/log + KAFKA_IP: kafka-broker + # 로드밸런서 안 쓴다고 가정했을때 입니다.. + KAFKA_INTERFACE_PORT: 9094 + APISERVER_NETTY_PORT: 10100 + #REGISTRY_URL: cdm-dev.exem-oss.org:5050 + #REGISTRY_URL: 10.10.31.243:5000/cmoa + IMXC_ADMIN_SERVER_DNS: imxc-admin-service + AGENT_IMAGE_TAG: rel0.0.0 + # Jaeger 관련변수 + JAEGER_AGENT_CLUSTERIP: 10.98.94.198 + JAEGER_JAVA_SPECIALAGENT_CLASSPATH: classpath:/install/opentracing-specialagent-1.7.4.jar + # added by DongWoo Kim 2021-06-21 + KEYCLOAK_AUTH_SERVER_URL: http://111.111.111.111:31082/auth + KEYCLOAK_RESOURCE: authorization_server + KEYCLOAK_MASTER_USERNAME: admin + KEYCLOAK_MASTER_PASSWORD: admin + IMXC_PORTAL_INFO_URL: + KEYCLOAK_REALM: exem + # added by EunHye Kim 2021-08-25 + #DATAGATE_URLS: datagate + #DATAGATE_IP: 111.111.111.111 + #DATAGATE_PORT: 14268 + DATAGATE_INSIDE_IP: datagate + DATAGATE_INSIDE_PORT: 14268 + DATAGATE_OUTSIDE_IP: 111.111.111.111 + DATAGATE_OUTSIDE_PORT: 30051 + REDIS_URLS: redis-master + REDIS_PORT: 6379 + REDIS_PASSWORD: dkagh1234! + # added by DongWoo Kim 2021-08-31 (version of each module) + DATAGATE_VERSION: rel0.0.0 + #ADMIN_SERVER_VERSION: v1.0.0 + #API_SERVER_VERSION: CLOUD-172 + API_SERVER_VERSION: rel0.0.0 + COLLECTOR_VERSION: rel0.0.0 + #release-3.3.0 + TOPOLOGY_AGENT_VERSION: rel0.0.0 + METRIC_COLLECTOR_VERSION: rel0.0.0 + #v1.0.0 + METRIC_AGENT_VERSION: rel0.0.0 + # spring cloud + ZUUL_SERVER_VERSION: rel0.0.0 + #CMOA-1269 + EUREKA_SERVER_VERSION: rel0.0.0 + AUTH_SERVER_VERSION: rel0.0.0 + NOTI_SERVER_VERSION: rel0.0.0 + KAFKA_STREAM_VERSION: rel0.0.0 + CMOA_MANUAL_VERSION: rel0.0.0 + KUBE_INFO_FLAT_VERSION: rel0.0.0 + KUBE_INFO_BATCH_VERSION: rel0.0.0 + KUBE_INFO_CONNECTOR_VERSION: rel0.0.0 + + + CMOA_MANUAL_PORT: 31090 + + + # Keycloak + #KEYCLOAK_VERSION: v1.0.0 + + # 레지스트리 변수화 (Public Cloud 대비 / 아래 값 적절히 수정해서 사용할 것) + #IMXC_REGISTRY: 10.10.31.243:5000 + IMXC_IN_REGISTRY: 10.10.31.243:5000/cmoa3 + + + # namespace 추가 + IMXC_NAMESPACE: imxc + + # ZUUL 8080으로 열어놓을것 + + CMOA_ES_ID: elastic + CMOA_ES_PW: elastic + + JDBC_KIND: 'postgres' + JDBC_SERVER: 'postgres:5432' + JDBC_DB: 'postgresdb' + JDBC_USER: 'admin' + JDBC_PWD: 'eorbahrhkswp' + + KAFKA_INPUT_TOPIC: 'kubernetes_info' + + TABLE_PREFIX: 'cmoa_' + BLACK_LIST: 'configmap_base,cronjob_active,endpoint_base,endpoint_addresses,endpoint_notreadyaddresses,endpoint_ports,event_base,node_image,persistentvolume_base,persistentvolumeclaim_base,pod_volume,resourcequota_base,resourcequota_scopeselector' + DELETE_HOUR: '15' + BACKLOGIN: false diff --git a/ansible/01_old/roles/test/files/06-imxc-ui/imxc-ui-jaeger/Chart.yaml b/ansible/01_old/roles/test/files/06-imxc-ui/imxc-ui-jaeger/Chart.yaml new file mode 100644 index 0000000..e2f559f --- /dev/null +++ b/ansible/01_old/roles/test/files/06-imxc-ui/imxc-ui-jaeger/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for Kubernetes +name: imxc +version: 0.1.0 diff --git a/ansible/01_old/roles/test/files/06-imxc-ui/imxc-ui-jaeger/cmoa-manual.yaml b/ansible/01_old/roles/test/files/06-imxc-ui/imxc-ui-jaeger/cmoa-manual.yaml new file mode 100644 index 0000000..e94fc14 --- /dev/null +++ b/ansible/01_old/roles/test/files/06-imxc-ui/imxc-ui-jaeger/cmoa-manual.yaml @@ -0,0 +1,36 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: manual + namespace: imxc +spec: + selector: + matchLabels: + app: manual + replicas: 1 + template: + metadata: + labels: + app: manual + spec: + containers: + - name: manual + image: {{ .Values.global.IMXC_IN_REGISTRY }}/manual:{{ .Values.global.CMOA_MANUAL_VERSION }} + imagePullPolicy: IfNotPresent + +--- +apiVersion: v1 +kind: Service +metadata: + name: manual + namespace: imxc +spec: + type: NodePort + selector: + app: manual + ports: + - protocol: TCP + port: 8088 + targetPort: 3000 + nodePort: {{ .Values.global.CMOA_MANUAL_PORT }} + diff --git a/ansible/01_old/roles/test/files/06-imxc-ui/imxc-ui-jaeger/scripts/init-api-server.sh b/ansible/01_old/roles/test/files/06-imxc-ui/imxc-ui-jaeger/scripts/init-api-server.sh new file mode 100644 index 0000000..45b8f1e --- /dev/null +++ b/ansible/01_old/roles/test/files/06-imxc-ui/imxc-ui-jaeger/scripts/init-api-server.sh @@ -0,0 +1,16 @@ +#! /bin/sh + +STATUS_CODE="$(curl -s -o /dev/null -w '%{http_code}' http://imxc-keycloak-http/auth/realms/exem)" + +if [ $STATUS_CODE -eq 200 ]; then + JWT_KEY="$(curl -s -XGET http://imxc-keycloak-http/auth/realms/exem | jq -r '.public_key')" + export JWT_KEY + + chmod -R 777 /home/cloudmoa/notification/cloudmoa_alert.log + + java -Djava.security.egd=file:/dev/./urandom -jar /app.jar +elif [ $STATUS_CODE -eq 404 ]; then + echo "not found exem relam. check realm in imxc-keycloak" +else + echo "not found keycloak. check to install keycloak" +fi \ No newline at end of file diff --git a/ansible/01_old/roles/test/files/06-imxc-ui/imxc-ui-jaeger/scripts/init-auth-server.sh b/ansible/01_old/roles/test/files/06-imxc-ui/imxc-ui-jaeger/scripts/init-auth-server.sh new file mode 100644 index 0000000..279b8a5 --- /dev/null +++ b/ansible/01_old/roles/test/files/06-imxc-ui/imxc-ui-jaeger/scripts/init-auth-server.sh @@ -0,0 +1,36 @@ +#! /bin/bash + +# 200 -> 서버 및 realm이 있는 경우 +# 404 -> 서버는 있으나 realm이 없는 경우 +# 000 -> 서버가 없음 +STATUS_CODE="$(curl -s -o /dev/null -w '%{http_code}' http://imxc-keycloak-http/auth/realms/exem)" + +if [ $STATUS_CODE -eq 404 ]; then + TOKEN="$(curl -s -d "client_id=admin-cli" -d "username=admin" -d "password=admin" -d "grant_type=password" http://imxc-keycloak-http/auth/realms/master/protocol/openid-connect/token | jq -r '.access_token')" + + echo $TOKEN + + echo "create realm and client" + # create realm and client + curl -s -v POST -H "Authorization: Bearer $TOKEN" -H "Content-Type: application/json" -d "@/tmp/init.json" http://imxc-keycloak-http/auth/admin/realms + + + echo "create admin and owner" + # create admin and owner + curl -s -v POST -H "Authorization: Bearer $TOKEN" -H "Content-Type: application/json" -d '{"firstName":"","lastName":"", "username":"admin","email":"admin@example.com", "enabled":"true","credentials":[{"type":"password","value":"admin","temporary":false}]}' http://imxc-keycloak-http/auth/admin/realms/exem/users + curl -s -v POST -H "Authorization: Bearer $TOKEN" -H "Content-Type: application/json" -d '{"firstName":"","lastName":"", "username":"owner","email":"owner@example.com", "enabled":"true","credentials":[{"type":"password","value":"admin","temporary":false}]}' http://imxc-keycloak-http/auth/admin/realms/exem/users + + JWT_KEY="$(curl -s -XGET http://imxc-keycloak-http/auth/realms/exem | jq -r '.public_key')" + export JWT_KEY + + java -Djava.security.egd=file:/dev/./urandom -jar /app.jar +elif [ $STATUS_CODE -eq 200 ]; then + echo "exist exem relam" + + JWT_KEY="$(curl -s -XGET http://imxc-keycloak-http/auth/realms/exem | jq -r '.public_key')" + export JWT_KEY + + java -Djava.security.egd=file:/dev/./urandom -jar /app.jar +else + echo "not found keycloak. check to install keycloak" +fi diff --git a/ansible/01_old/roles/test/files/06-imxc-ui/imxc-ui-jaeger/scripts/init-noti-server.sh b/ansible/01_old/roles/test/files/06-imxc-ui/imxc-ui-jaeger/scripts/init-noti-server.sh new file mode 100644 index 0000000..af73aed --- /dev/null +++ b/ansible/01_old/roles/test/files/06-imxc-ui/imxc-ui-jaeger/scripts/init-noti-server.sh @@ -0,0 +1,14 @@ +#! /bin/sh + +STATUS_CODE="$(curl -s -o /dev/null -w '%{http_code}' http://imxc-keycloak-http/auth/realms/exem)" + +if [ $STATUS_CODE -eq 200 ]; then + JWT_KEY="$(curl -s -XGET http://imxc-keycloak-http/auth/realms/exem | jq -r '.public_key')" + export JWT_KEY + + java -Djava.security.egd=file:/dev/./urandom -jar /app.jar +elif [ $STATUS_CODE -eq 404 ]; then + echo "not found exem relam. check realm in imxc-keycloak" +else + echo "not found keycloak. check to install keycloak" +fi \ No newline at end of file diff --git a/ansible/01_old/roles/test/files/06-imxc-ui/imxc-ui-jaeger/scripts/init-resource.sh b/ansible/01_old/roles/test/files/06-imxc-ui/imxc-ui-jaeger/scripts/init-resource.sh new file mode 100644 index 0000000..58db392 --- /dev/null +++ b/ansible/01_old/roles/test/files/06-imxc-ui/imxc-ui-jaeger/scripts/init-resource.sh @@ -0,0 +1,6 @@ +#!/bin/sh + +chmod -R 777 /scripts + +sed -i "s/localhost/$REDIRECT_URLS/g" /scripts/init.json +cp /scripts/init.json /tmp/init.json \ No newline at end of file diff --git a/ansible/01_old/roles/test/files/06-imxc-ui/imxc-ui-jaeger/scripts/init.json b/ansible/01_old/roles/test/files/06-imxc-ui/imxc-ui-jaeger/scripts/init.json new file mode 100644 index 0000000..dcd68b4 --- /dev/null +++ b/ansible/01_old/roles/test/files/06-imxc-ui/imxc-ui-jaeger/scripts/init.json @@ -0,0 +1,2148 @@ +{ + "id": "exem", + "realm": "exem", + "notBefore": 0, + "revokeRefreshToken": false, + "refreshTokenMaxReuse": 0, + "accessTokenLifespan": 300, + "accessTokenLifespanForImplicitFlow": 900, + "ssoSessionIdleTimeout": 1800, + "ssoSessionMaxLifespan": 36000, + "ssoSessionIdleTimeoutRememberMe": 0, + "ssoSessionMaxLifespanRememberMe": 0, + "offlineSessionIdleTimeout": 2592000, + "offlineSessionMaxLifespanEnabled": false, + "offlineSessionMaxLifespan": 5184000, + "clientSessionIdleTimeout": 0, + "clientSessionMaxLifespan": 0, + "clientOfflineSessionIdleTimeout": 0, + "clientOfflineSessionMaxLifespan": 0, + "accessCodeLifespan": 60, + "accessCodeLifespanUserAction": 300, + "accessCodeLifespanLogin": 1800, + "actionTokenGeneratedByAdminLifespan": 43200, + "actionTokenGeneratedByUserLifespan": 300, + "enabled": true, + "sslRequired": "none", + "registrationAllowed": false, + "registrationEmailAsUsername": false, + "rememberMe": false, + "verifyEmail": false, + "loginWithEmailAllowed": true, + "duplicateEmailsAllowed": false, + "resetPasswordAllowed": false, + "editUsernameAllowed": false, + "bruteForceProtected": false, + "permanentLockout": false, + "maxFailureWaitSeconds": 900, + "minimumQuickLoginWaitSeconds": 60, + "waitIncrementSeconds": 60, + "quickLoginCheckMilliSeconds": 1000, + "maxDeltaTimeSeconds": 43200, + "failureFactor": 30, + "roles": { + "realm": [ + { + "id": "b361dcb8-4ec4-484e-a432-8d40a8ca5ac8", + "name": "offline_access", + "description": "${role_offline-access}", + "composite": false, + "clientRole": false, + "containerId": "exem", + "attributes": {} + }, + { + "id": "621155f2-6c01-4e4a-bf11-47111503d696", + "name": "uma_authorization", + "description": "${role_uma_authorization}", + "composite": false, + "clientRole": false, + "containerId": "exem", + "attributes": {} + }, + { + "id": "4aadd73a-e863-466a-932b-5bc81553fbf1", + "name": "access", + "composite": false, + "clientRole": false, + "containerId": "exem", + "attributes": {} + } + ], + "client": { + "realm-management": [ + { + "id": "e3eca547-c372-406a-abe7-30f554e13e63", + "name": "manage-realm", + "description": "${role_manage-realm}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "eb1faff2-4cca-458c-b9da-96c1f6f5f647", + "name": "impersonation", + "description": "${role_impersonation}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "eb0f6ebb-8993-47f8-8979-2152ed92bf62", + "name": "create-client", + "description": "${role_create-client}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "29f0b39d-9cc9-4b40-ad81-00041897ae0c", + "name": "view-clients", + "description": "${role_view-clients}", + "composite": true, + "composites": { + "client": { + "realm-management": [ + "query-clients" + ] + } + }, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "b6307563-9b35-4093-b0c4-a27df7cb82bd", + "name": "query-groups", + "description": "${role_query-groups}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "30091a91-f676-4e39-8ae2-ebfcee36c32a", + "name": "query-clients", + "description": "${role_query-clients}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "b40ca071-2318-4f69-9664-f0dfe471d03b", + "name": "view-realm", + "description": "${role_view-realm}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "efd25ec7-e61f-4659-a772-907791aed58e", + "name": "view-authorization", + "description": "${role_view-authorization}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "4ad18bd0-f9a9-4fc7-8864-99afa71f95e4", + "name": "manage-users", + "description": "${role_manage-users}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "a92c781f-7c6a-48d8-aa88-0b3aefb3c10c", + "name": "manage-events", + "description": "${role_manage-events}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "424933c1-3c03-49cd-955c-34aeeb0a3108", + "name": "manage-authorization", + "description": "${role_manage-authorization}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "5476db80-dbfa-408b-a934-5e8decc0af56", + "name": "manage-clients", + "description": "${role_manage-clients}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "acf53868-d09b-4865-92da-3b906307b979", + "name": "realm-admin", + "description": "${role_realm-admin}", + "composite": true, + "composites": { + "client": { + "realm-management": [ + "manage-realm", + "impersonation", + "create-client", + "view-clients", + "query-groups", + "query-clients", + "view-realm", + "view-authorization", + "manage-users", + "manage-events", + "manage-authorization", + "manage-clients", + "query-users", + "query-realms", + "manage-identity-providers", + "view-users", + "view-events", + "view-identity-providers" + ] + } + }, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "f2ad5f83-ffde-4cf4-acc4-21f7bcec4c38", + "name": "query-users", + "description": "${role_query-users}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "96a017bf-5211-4c20-a1b2-7493bc45a3ad", + "name": "query-realms", + "description": "${role_query-realms}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "d8051d4d-f26c-4a6d-bcdd-b3d8111d9d29", + "name": "manage-identity-providers", + "description": "${role_manage-identity-providers}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "8c929b20-abc3-4b78-88f2-ed3348426667", + "name": "view-users", + "description": "${role_view-users}", + "composite": true, + "composites": { + "client": { + "realm-management": [ + "query-groups", + "query-users" + ] + } + }, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "a337a8f7-8725-4ff7-85fc-ecc4b5ce1433", + "name": "view-events", + "description": "${role_view-events}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "649350cf-925c-4502-84b4-ec8415f956d3", + "name": "view-identity-providers", + "description": "${role_view-identity-providers}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + } + ], + "authorization_server": [ + { + "id": "2346ca49-eb3e-4f2e-b0ec-4def9ea9655c", + "name": "access", + "composite": false, + "clientRole": true, + "containerId": "b9bbda1f-a756-4b72-9cd8-06a6dfd6d5bf", + "attributes": {} + } + ], + "security-admin-console": [], + "admin-cli": [], + "account-console": [], + "broker": [ + { + "id": "133ff901-3a8f-48df-893b-4c7e9047e829", + "name": "read-token", + "description": "${role_read-token}", + "composite": false, + "clientRole": true, + "containerId": "fdc71d6d-db86-414f-bd80-ed1f5e9a6975", + "attributes": {} + } + ], + "account": [ + { + "id": "89c5f56f-5845-400b-ac9f-942c46d082e0", + "name": "manage-account-links", + "description": "${role_manage-account-links}", + "composite": false, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + }, + { + "id": "2cba7fed-0a80-4dbd-bd2d-abfa2c6a985e", + "name": "view-profile", + "description": "${role_view-profile}", + "composite": false, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + }, + { + "id": "f446a93d-143f-4071-9bdc-08aa2fdce6d2", + "name": "view-consent", + "description": "${role_view-consent}", + "composite": false, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + }, + { + "id": "ef3364db-e008-4aec-9e74-04bac25cbe40", + "name": "manage-consent", + "description": "${role_manage-consent}", + "composite": true, + "composites": { + "client": { + "account": [ + "view-consent" + ] + } + }, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + }, + { + "id": "96afbe32-3ac2-4345-bc17-06cf0e8de0b4", + "name": "view-applications", + "description": "${role_view-applications}", + "composite": false, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + }, + { + "id": "cf6861ca-4804-40d4-9016-c48e7ebf1c72", + "name": "manage-account", + "description": "${role_manage-account}", + "composite": true, + "composites": { + "client": { + "account": [ + "manage-account-links" + ] + } + }, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + } + ] + } + }, + "groups": [ + { + "id": "8d3f7332-7f72-47e2-9cb3-38331f0c29b5", + "name": "DEFAULT_TENANT", + "path": "/DEFAULT_TENANT", + "attributes": {}, + "realmRoles": [], + "clientRoles": {}, + "subGroups": [] + } + ], + "defaultRoles": [ + "offline_access", + "uma_authorization" + ], + "requiredCredentials": [ + "password" + ], + "otpPolicyType": "totp", + "otpPolicyAlgorithm": "HmacSHA1", + "otpPolicyInitialCounter": 0, + "otpPolicyDigits": 6, + "otpPolicyLookAheadWindow": 1, + "otpPolicyPeriod": 30, + "otpSupportedApplications": [ + "FreeOTP", + "Google Authenticator" + ], + "webAuthnPolicyRpEntityName": "keycloak", + "webAuthnPolicySignatureAlgorithms": [ + "ES256" + ], + "webAuthnPolicyRpId": "", + "webAuthnPolicyAttestationConveyancePreference": "not specified", + "webAuthnPolicyAuthenticatorAttachment": "not specified", + "webAuthnPolicyRequireResidentKey": "not specified", + "webAuthnPolicyUserVerificationRequirement": "not specified", + "webAuthnPolicyCreateTimeout": 0, + "webAuthnPolicyAvoidSameAuthenticatorRegister": false, + "webAuthnPolicyAcceptableAaguids": [], + "webAuthnPolicyPasswordlessRpEntityName": "keycloak", + "webAuthnPolicyPasswordlessSignatureAlgorithms": [ + "ES256" + ], + "webAuthnPolicyPasswordlessRpId": "", + "webAuthnPolicyPasswordlessAttestationConveyancePreference": "not specified", + "webAuthnPolicyPasswordlessAuthenticatorAttachment": "not specified", + "webAuthnPolicyPasswordlessRequireResidentKey": "not specified", + "webAuthnPolicyPasswordlessUserVerificationRequirement": "not specified", + "webAuthnPolicyPasswordlessCreateTimeout": 0, + "webAuthnPolicyPasswordlessAvoidSameAuthenticatorRegister": false, + "webAuthnPolicyPasswordlessAcceptableAaguids": [], + "scopeMappings": [ + { + "clientScope": "offline_access", + "roles": [ + "offline_access" + ] + } + ], + "clientScopeMappings": { + "account": [ + { + "client": "account-console", + "roles": [ + "manage-account" + ] + } + ] + }, + "clients": [ + { + "id": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "clientId": "account", + "name": "${client_account}", + "rootUrl": "${authBaseUrl}", + "baseUrl": "/realms/exem/account/", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "defaultRoles": [ + "view-profile", + "manage-account" + ], + "redirectUris": [ + "/realms/exem/account/*" + ], + "webOrigins": [], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": false, + "serviceAccountsEnabled": false, + "publicClient": false, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": {}, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "1e3d0c5d-c456-4c5f-93cf-58236273186a", + "clientId": "account-console", + "name": "${client_account-console}", + "rootUrl": "${authBaseUrl}", + "baseUrl": "/realms/exem/account/", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [ + "/realms/exem/account/*" + ], + "webOrigins": [], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": false, + "serviceAccountsEnabled": false, + "publicClient": true, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": { + "pkce.code.challenge.method": "S256" + }, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "protocolMappers": [ + { + "id": "cceae7c8-fa8d-48eb-a0a6-6013a2cc771e", + "name": "audience resolve", + "protocol": "openid-connect", + "protocolMapper": "oidc-audience-resolve-mapper", + "consentRequired": false, + "config": {} + } + ], + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "d4d3e5a5-584c-4aff-a79f-ac3c31ace5a1", + "clientId": "admin-cli", + "name": "${client_admin-cli}", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [], + "webOrigins": [], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": false, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": true, + "serviceAccountsEnabled": false, + "publicClient": true, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": {}, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "b9bbda1f-a756-4b72-9cd8-06a6dfd6d5bf", + "clientId": "authorization_server", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [ + "localhost" + ], + "webOrigins": [ + "*" + ], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": true, + "serviceAccountsEnabled": false, + "publicClient": true, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": { + "saml.assertion.signature": "false", + "saml.force.post.binding": "false", + "saml.multivalued.roles": "false", + "saml.encrypt": "false", + "saml.server.signature": "false", + "saml.server.signature.keyinfo.ext": "false", + "exclude.session.state.from.auth.response": "false", + "saml_force_name_id_format": "false", + "saml.client.signature": "false", + "tls.client.certificate.bound.access.tokens": "false", + "saml.authnstatement": "false", + "display.on.consent.screen": "false", + "saml.onetimeuse.condition": "false" + }, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": true, + "nodeReRegistrationTimeout": -1, + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "fdc71d6d-db86-414f-bd80-ed1f5e9a6975", + "clientId": "broker", + "name": "${client_broker}", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [], + "webOrigins": [], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": false, + "serviceAccountsEnabled": false, + "publicClient": false, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": {}, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "clientId": "realm-management", + "name": "${client_realm-management}", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [], + "webOrigins": [], + "notBefore": 0, + "bearerOnly": true, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": false, + "serviceAccountsEnabled": false, + "publicClient": false, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": {}, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "bb6c56f1-126e-4356-9579-d95992a8d150", + "clientId": "security-admin-console", + "name": "${client_security-admin-console}", + "rootUrl": "${authAdminUrl}", + "baseUrl": "/admin/exem/console/", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [ + "/admin/exem/console/*" + ], + "webOrigins": [ + "+" + ], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": false, + "serviceAccountsEnabled": false, + "publicClient": true, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": { + "pkce.code.challenge.method": "S256" + }, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "protocolMappers": [ + { + "id": "3cf06cab-00dd-486b-8e72-1a453a7031ca", + "name": "locale", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "locale", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "locale", + "jsonType.label": "String" + } + } + ], + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + } + ], + "clientScopes": [ + { + "id": "6a21eaaa-69c9-4519-8732-2155865a1891", + "name": "custom_jwt", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "true" + }, + "protocolMappers": [ + { + "id": "fd7557f5-3174-4c65-8cd1-0e9f015a906f", + "name": "customizingJWT", + "protocol": "openid-connect", + "protocolMapper": "oidc-script-based-protocol-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "multivalued": "true", + "id.token.claim": "false", + "access.token.claim": "true", + "jsonType.label": "String", + "script": "/**\r\n * Available variables: \r\n * user - the current user\r\n * realm - the current realm\r\n * token - the current token\r\n * userSession - the current userSession\r\n * keycloakSession - the current keycloakSession\r\n */\r\n\r\n//insert your code here...\r\n\r\n// you can set standard fields in token - test code\r\n// token.setAcr(\"test value\");\r\n\r\n// you can set claims in the token - test code\r\n// token.getOtherClaims().put(\"claimName\", \"claim value\");\r\n\r\n// work with variables and return multivalued token value\r\nvar ArrayList = Java.type(\"java.util.ArrayList\");\r\nvar HashMap = Java.type(\"java.util.HashMap\");\r\nvar tenantInfoMap = new HashMap();\r\nvar tenantIpMap = new HashMap();\r\n\r\nvar forEach = Array.prototype.forEach;\r\n\r\nvar client = keycloakSession.getContext().getClient();\r\nvar groups = user.getGroups();\r\nvar clientRole = client.getRole(\"access\");\r\n\r\nforEach.call(groups.toArray(), function(group) {\r\n if(group.hasRole(clientRole)) {\r\n tenantIpMap.put(group.getName(), clientRole.getAttribute(\"ip\"));\r\n tenantInfoMap.put(group.getName(), group.getAttributes());\r\n }\r\n});\r\n\r\ntoken.setOtherClaims(\"tenantInfo\", tenantInfoMap);\r\n" + } + }, + { + "id": "2cb34189-9f06-4b9f-b066-c28e7930f0a5", + "name": "custom_phone", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "false", + "user.attribute": "phone", + "id.token.claim": "false", + "access.token.claim": "true", + "claim.name": "attributes.phone", + "jsonType.label": "String" + } + }, + { + "id": "6bcb0aa9-8713-4e4b-b997-2e08d2dda0f4", + "name": "group_attr", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "groups", + "id.token.claim": "false", + "access.token.claim": "true", + "claim.name": "groups.attributes", + "jsonType.label": "String" + } + }, + { + "id": "03deb40b-4f83-436e-9eab-f479eed62460", + "name": "custom_name", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "false", + "user.attribute": "name", + "id.token.claim": "false", + "access.token.claim": "true", + "claim.name": "attributes.name", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "9fed7d81-3f42-41b0-b661-7875abb90b2b", + "name": "microprofile-jwt", + "description": "Microprofile - JWT built-in scope", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "false" + }, + "protocolMappers": [ + { + "id": "d030d675-2c31-401a-a461-534211b3d2ec", + "name": "upn", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "username", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "upn", + "jsonType.label": "String" + } + }, + { + "id": "ca2026a0-84de-4b8d-bf0c-35f3d088b115", + "name": "groups", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-realm-role-mapper", + "consentRequired": false, + "config": { + "multivalued": "true", + "user.attribute": "foo", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "groups", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "cf3e7fce-e9e8-40dc-bd0d-5cf7bac861c0", + "name": "web-origins", + "description": "OpenID Connect scope for add allowed web origins to the access token", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "false", + "display.on.consent.screen": "false", + "consent.screen.text": "" + }, + "protocolMappers": [ + { + "id": "6b909bad-30d8-4095-a80b-d71589e8a0b4", + "name": "allowed web origins", + "protocol": "openid-connect", + "protocolMapper": "oidc-allowed-origins-mapper", + "consentRequired": false, + "config": {} + } + ] + }, + { + "id": "73231863-d614-4725-9707-f5704c70893a", + "name": "roles", + "description": "OpenID Connect scope for add user roles to the access token", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "false", + "display.on.consent.screen": "true", + "consent.screen.text": "${rolesScopeConsentText}" + }, + "protocolMappers": [ + { + "id": "fad2c0b3-d6d6-46c9-b8a5-70cf2f3cd69e", + "name": "realm roles", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-realm-role-mapper", + "consentRequired": false, + "config": { + "multivalued": "true", + "user.attribute": "foo", + "access.token.claim": "true", + "claim.name": "realm_access.roles", + "jsonType.label": "String" + } + }, + { + "id": "1fa51f0e-8fa8-4807-a381-c9756ce1d2ff", + "name": "audience resolve", + "protocol": "openid-connect", + "protocolMapper": "oidc-audience-resolve-mapper", + "consentRequired": false, + "config": {} + }, + { + "id": "8be191ba-c7b8-45f1-a37f-2830595d4b54", + "name": "client roles", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-client-role-mapper", + "consentRequired": false, + "config": { + "multivalued": "true", + "user.attribute": "foo", + "access.token.claim": "true", + "claim.name": "resource_access.${client_id}.roles", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "93a4b53a-a281-4203-a070-0ad31e719b29", + "name": "phone", + "description": "OpenID Connect built-in scope: phone", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "true", + "consent.screen.text": "${phoneScopeConsentText}" + }, + "protocolMappers": [ + { + "id": "c716d4df-ad16-4a47-aa05-ded2a69313a3", + "name": "phone number verified", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "phoneNumberVerified", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "phone_number_verified", + "jsonType.label": "boolean" + } + }, + { + "id": "db0fcb5b-bad6-42b7-8ab0-b90225100b8a", + "name": "phone number", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "phoneNumber", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "phone_number", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "f1723d4c-6d93-40be-b5b8-5ca7083e55c7", + "name": "address", + "description": "OpenID Connect built-in scope: address", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "true", + "consent.screen.text": "${addressScopeConsentText}" + }, + "protocolMappers": [ + { + "id": "9e95dff0-dc01-4efe-a414-21c83d94491c", + "name": "address", + "protocol": "openid-connect", + "protocolMapper": "oidc-address-mapper", + "consentRequired": false, + "config": { + "user.attribute.formatted": "formatted", + "user.attribute.country": "country", + "user.attribute.postal_code": "postal_code", + "userinfo.token.claim": "true", + "user.attribute.street": "street", + "id.token.claim": "true", + "user.attribute.region": "region", + "access.token.claim": "true", + "user.attribute.locality": "locality" + } + } + ] + }, + { + "id": "16524b43-6bfc-4e05-868c-682e7e1e611c", + "name": "email", + "description": "OpenID Connect built-in scope: email", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "true", + "consent.screen.text": "${emailScopeConsentText}" + }, + "protocolMappers": [ + { + "id": "4444c30e-5da5-46e6-a201-64c28ab26e10", + "name": "email verified", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "emailVerified", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "email_verified", + "jsonType.label": "boolean" + } + }, + { + "id": "0faa8ba7-6d4d-4ed4-ab89-334e1d18b503", + "name": "email", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "email", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "email", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "4ccced80-99d8-4081-8d1d-37ed6d5aaf34", + "name": "profile", + "description": "OpenID Connect built-in scope: profile", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "true", + "consent.screen.text": "${profileScopeConsentText}" + }, + "protocolMappers": [ + { + "id": "02aea132-f5e1-483c-968a-5fbb9cdfb82d", + "name": "updated at", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "updatedAt", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "updated_at", + "jsonType.label": "String" + } + }, + { + "id": "eb5d10fc-d4a8-473a-ac3e-35f3fb0f41bb", + "name": "family name", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "lastName", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "family_name", + "jsonType.label": "String" + } + }, + { + "id": "2467b8e5-f340-45a2-abff-c658eccf3ed3", + "name": "zoneinfo", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "zoneinfo", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "zoneinfo", + "jsonType.label": "String" + } + }, + { + "id": "50a9bb17-af12-481d-95dd-6aed1dd4bf56", + "name": "full name", + "protocol": "openid-connect", + "protocolMapper": "oidc-full-name-mapper", + "consentRequired": false, + "config": { + "id.token.claim": "true", + "access.token.claim": "true", + "userinfo.token.claim": "true" + } + }, + { + "id": "80a65208-9425-4e66-b769-98c2f1c91e6e", + "name": "nickname", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "nickname", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "nickname", + "jsonType.label": "String" + } + }, + { + "id": "68a750c6-b4b8-47f4-a919-752319e63213", + "name": "gender", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "gender", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "gender", + "jsonType.label": "String" + } + }, + { + "id": "e27abd0e-72c1-40de-a678-e9e4e2db8e7f", + "name": "given name", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "firstName", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "given_name", + "jsonType.label": "String" + } + }, + { + "id": "04f3fa01-6a4c-44eb-bfd8-0a0e1c31bc4a", + "name": "middle name", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "middleName", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "middle_name", + "jsonType.label": "String" + } + }, + { + "id": "94e697d9-fbee-48d8-91d1-7bbc4f1fb44e", + "name": "username", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "username", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "preferred_username", + "jsonType.label": "String" + } + }, + { + "id": "a2f05d76-947d-4ceb-969b-1b923be9a923", + "name": "website", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "website", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "website", + "jsonType.label": "String" + } + }, + { + "id": "1966f863-ac5c-4cbc-a156-d5bd861728f0", + "name": "profile", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "profile", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "profile", + "jsonType.label": "String" + } + }, + { + "id": "18a9b452-cd8e-4c43-a9a8-0ea532074f74", + "name": "locale", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "locale", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "locale", + "jsonType.label": "String" + } + }, + { + "id": "1583790a-ec7a-4899-a901-60e23fd0d969", + "name": "birthdate", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "birthdate", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "birthdate", + "jsonType.label": "String" + } + }, + { + "id": "7094b64a-492b-4f31-aa73-bb19d06ddb56", + "name": "picture", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "picture", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "picture", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "eff18c11-eaf4-4d6a-8365-90f646ea3cc5", + "name": "role_list", + "description": "SAML role list", + "protocol": "saml", + "attributes": { + "consent.screen.text": "${samlRoleListScopeConsentText}", + "display.on.consent.screen": "true" + }, + "protocolMappers": [ + { + "id": "3bb12700-3e6f-4a73-bfbb-cfd16a8ab007", + "name": "role list", + "protocol": "saml", + "protocolMapper": "saml-role-list-mapper", + "consentRequired": false, + "config": { + "single": "false", + "attribute.nameformat": "Basic", + "attribute.name": "Role" + } + } + ] + }, + { + "id": "e83e35b7-9650-4f7e-b182-65c184d261b3", + "name": "offline_access", + "description": "OpenID Connect built-in scope: offline_access", + "protocol": "openid-connect", + "attributes": { + "consent.screen.text": "${offlineAccessScopeConsentText}", + "display.on.consent.screen": "true" + } + } + ], + "defaultDefaultClientScopes": [ + "role_list", + "profile", + "email", + "roles", + "web-origins", + "custom_jwt" + ], + "defaultOptionalClientScopes": [ + "offline_access", + "address", + "phone", + "microprofile-jwt" + ], + "browserSecurityHeaders": { + "contentSecurityPolicyReportOnly": "", + "xContentTypeOptions": "nosniff", + "xRobotsTag": "none", + "xFrameOptions": "SAMEORIGIN", + "contentSecurityPolicy": "frame-src 'self'; frame-ancestors 'self'; object-src 'none';", + "xXSSProtection": "1; mode=block", + "strictTransportSecurity": "max-age=31536000; includeSubDomains" + }, + "smtpServer": {}, + "eventsEnabled": false, + "eventsListeners": [ + "jboss-logging" + ], + "enabledEventTypes": [], + "adminEventsEnabled": false, + "adminEventsDetailsEnabled": false, + "components": { + "org.keycloak.services.clientregistration.policy.ClientRegistrationPolicy": [ + { + "id": "9b1dcf02-e9ec-4302-8aad-28f3250d1b2d", + "name": "Allowed Protocol Mapper Types", + "providerId": "allowed-protocol-mappers", + "subType": "anonymous", + "subComponents": {}, + "config": { + "allowed-protocol-mapper-types": [ + "oidc-sha256-pairwise-sub-mapper", + "oidc-usermodel-property-mapper", + "saml-role-list-mapper", + "saml-user-attribute-mapper", + "oidc-full-name-mapper", + "oidc-usermodel-attribute-mapper", + "oidc-address-mapper", + "saml-user-property-mapper" + ] + } + }, + { + "id": "752137ea-bc3a-46c3-9d83-49cb370d39a9", + "name": "Max Clients Limit", + "providerId": "max-clients", + "subType": "anonymous", + "subComponents": {}, + "config": { + "max-clients": [ + "200" + ] + } + }, + { + "id": "f365d31f-ccc5-4e57-97bd-b2749b1ab5e5", + "name": "Allowed Client Scopes", + "providerId": "allowed-client-templates", + "subType": "authenticated", + "subComponents": {}, + "config": { + "allow-default-scopes": [ + "true" + ] + } + }, + { + "id": "52e385fd-3aa5-442d-b5e4-6ff659126196", + "name": "Allowed Protocol Mapper Types", + "providerId": "allowed-protocol-mappers", + "subType": "authenticated", + "subComponents": {}, + "config": { + "allowed-protocol-mapper-types": [ + "oidc-sha256-pairwise-sub-mapper", + "saml-user-attribute-mapper", + "oidc-full-name-mapper", + "oidc-usermodel-attribute-mapper", + "oidc-address-mapper", + "oidc-usermodel-property-mapper", + "saml-user-property-mapper", + "saml-role-list-mapper" + ] + } + }, + { + "id": "dbebbc9d-1b14-4d09-906c-b4e5638f9588", + "name": "Consent Required", + "providerId": "consent-required", + "subType": "anonymous", + "subComponents": {}, + "config": {} + }, + { + "id": "b3fc18dc-467f-4240-9b6d-f07df5c40aee", + "name": "Full Scope Disabled", + "providerId": "scope", + "subType": "anonymous", + "subComponents": {}, + "config": {} + }, + { + "id": "19e102da-1d66-4747-958b-9311e5156693", + "name": "Trusted Hosts", + "providerId": "trusted-hosts", + "subType": "anonymous", + "subComponents": {}, + "config": { + "host-sending-registration-request-must-match": [ + "true" + ], + "client-uris-must-match": [ + "true" + ] + } + }, + { + "id": "66e83112-7392-46cb-bbd5-b71586183ada", + "name": "Allowed Client Scopes", + "providerId": "allowed-client-templates", + "subType": "anonymous", + "subComponents": {}, + "config": { + "allow-default-scopes": [ + "true" + ] + } + } + ], + "org.keycloak.keys.KeyProvider": [ + { + "id": "a60adc1b-3f6b-40d4-901f-d4f744f0d71b", + "name": "aes-generated", + "providerId": "aes-generated", + "subComponents": {}, + "config": { + "priority": [ + "100" + ] + } + }, + { + "id": "bc1b25d8-b199-4d87-b606-6cde0f6eafb0", + "name": "hmac-generated", + "providerId": "hmac-generated", + "subComponents": {}, + "config": { + "priority": [ + "100" + ], + "algorithm": [ + "HS256" + ] + } + }, + { + "id": "fe624aa7-54a3-43d8-b2a3-f74b543a9225", + "name": "rsa-generated", + "providerId": "rsa-generated", + "subComponents": {}, + "config": { + "priority": [ + "100" + ] + } + } + ] + }, + "internationalizationEnabled": false, + "supportedLocales": [], + "authenticationFlows": [ + { + "id": "a837df3e-15cb-4d2a-8ce0-5eea5c704e76", + "alias": "Account verification options", + "description": "Method with which to verity the existing account", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "idp-email-verification", + "requirement": "ALTERNATIVE", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "ALTERNATIVE", + "priority": 20, + "flowAlias": "Verify Existing Account by Re-authentication", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "59026e13-e2bd-4977-a868-505ea562f545", + "alias": "Authentication Options", + "description": "Authentication options.", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "basic-auth", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "basic-auth-otp", + "requirement": "DISABLED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "auth-spnego", + "requirement": "DISABLED", + "priority": 30, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "484d422c-d9b4-4c0e-86d5-60463ecd24c9", + "alias": "Browser - Conditional OTP", + "description": "Flow to determine if the OTP is required for the authentication", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "conditional-user-configured", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "auth-otp-form", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "0ec05058-6d09-4951-a116-19e8810e5d8e", + "alias": "Direct Grant - Conditional OTP", + "description": "Flow to determine if the OTP is required for the authentication", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "conditional-user-configured", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "direct-grant-validate-otp", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "667c03cd-114c-4d9a-a7fa-7d2c27f10722", + "alias": "First broker login - Conditional OTP", + "description": "Flow to determine if the OTP is required for the authentication", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "conditional-user-configured", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "auth-otp-form", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "1510fbf7-239f-44aa-9955-72d42f6d99fd", + "alias": "Handle Existing Account", + "description": "Handle what to do if there is existing account with same email/username like authenticated identity provider", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "idp-confirm-link", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "REQUIRED", + "priority": 20, + "flowAlias": "Account verification options", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "5622e71d-e1f4-4711-a425-a8470d0a017e", + "alias": "Reset - Conditional OTP", + "description": "Flow to determine if the OTP should be reset or not. Set to REQUIRED to force.", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "conditional-user-configured", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "reset-otp", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "09dfe405-5ef6-4940-8885-5adf867a74c8", + "alias": "User creation or linking", + "description": "Flow for the existing/non-existing user alternatives", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticatorConfig": "create unique user config", + "authenticator": "idp-create-user-if-unique", + "requirement": "ALTERNATIVE", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "ALTERNATIVE", + "priority": 20, + "flowAlias": "Handle Existing Account", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "a3eb6b61-1943-4fb7-9b2f-137826882662", + "alias": "Verify Existing Account by Re-authentication", + "description": "Reauthentication of existing account", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "idp-username-password-form", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "CONDITIONAL", + "priority": 20, + "flowAlias": "First broker login - Conditional OTP", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "7f5e2f68-84bc-4703-b474-e3b092621195", + "alias": "browser", + "description": "browser based authentication", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "auth-cookie", + "requirement": "ALTERNATIVE", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "auth-spnego", + "requirement": "DISABLED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "identity-provider-redirector", + "requirement": "ALTERNATIVE", + "priority": 25, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "ALTERNATIVE", + "priority": 30, + "flowAlias": "forms", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "224cc520-37f7-445e-ab1f-7ba547a45a0d", + "alias": "clients", + "description": "Base authentication for clients", + "providerId": "client-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "client-secret", + "requirement": "ALTERNATIVE", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "client-jwt", + "requirement": "ALTERNATIVE", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "client-secret-jwt", + "requirement": "ALTERNATIVE", + "priority": 30, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "client-x509", + "requirement": "ALTERNATIVE", + "priority": 40, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "2e58184b-529b-450c-9731-29763d26b087", + "alias": "direct grant", + "description": "OpenID Connect Resource Owner Grant", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "direct-grant-validate-username", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "direct-grant-validate-password", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "CONDITIONAL", + "priority": 30, + "flowAlias": "Direct Grant - Conditional OTP", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "c969ac8c-e7d8-44b5-ad4d-5fcb80514eac", + "alias": "docker auth", + "description": "Used by Docker clients to authenticate against the IDP", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "docker-http-basic-authenticator", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "de2259a4-7f92-42ec-994c-f55d8cba3b59", + "alias": "first broker login", + "description": "Actions taken after first broker login with identity provider account, which is not yet linked to any Keycloak account", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticatorConfig": "review profile config", + "authenticator": "idp-review-profile", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "REQUIRED", + "priority": 20, + "flowAlias": "User creation or linking", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "6c2745d2-be21-4f3c-a291-5b3fc039432a", + "alias": "forms", + "description": "Username, password, otp and other auth forms.", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "auth-username-password-form", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "CONDITIONAL", + "priority": 20, + "flowAlias": "Browser - Conditional OTP", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "ac8f5082-3fd0-47c5-854d-0dd9c3951668", + "alias": "http challenge", + "description": "An authentication flow based on challenge-response HTTP Authentication Schemes", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "no-cookie-redirect", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "REQUIRED", + "priority": 20, + "flowAlias": "Authentication Options", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "32030b4b-c82b-4c1a-a692-3b51eae74bbc", + "alias": "registration", + "description": "registration flow", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "registration-page-form", + "requirement": "REQUIRED", + "priority": 10, + "flowAlias": "registration form", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "b99fca4c-386c-4277-acc1-83e57e29244d", + "alias": "registration form", + "description": "registration form", + "providerId": "form-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "registration-user-creation", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "registration-profile-action", + "requirement": "REQUIRED", + "priority": 40, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "registration-password-action", + "requirement": "REQUIRED", + "priority": 50, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "registration-recaptcha-action", + "requirement": "DISABLED", + "priority": 60, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "5edbc053-816a-434e-9866-6c0cc7e49f89", + "alias": "reset credentials", + "description": "Reset credentials for a user if they forgot their password or something", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "reset-credentials-choose-user", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "reset-credential-email", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "reset-password", + "requirement": "REQUIRED", + "priority": 30, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "CONDITIONAL", + "priority": 40, + "flowAlias": "Reset - Conditional OTP", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "460782e7-9644-4a34-8024-cb428cbe3991", + "alias": "saml ecp", + "description": "SAML ECP Profile Authentication Flow", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "http-basic-authenticator", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + } + ], + "authenticatorConfig": [ + { + "id": "67af6e65-853c-4bfd-9eef-72e735691377", + "alias": "create unique user config", + "config": { + "require.password.update.after.registration": "false" + } + }, + { + "id": "af6c6e01-772d-426a-bdd3-3ebc95537bcd", + "alias": "review profile config", + "config": { + "update.profile.on.first.login": "missing" + } + } + ], + "requiredActions": [ + { + "alias": "CONFIGURE_TOTP", + "name": "Configure OTP", + "providerId": "CONFIGURE_TOTP", + "enabled": true, + "defaultAction": false, + "priority": 10, + "config": {} + }, + { + "alias": "terms_and_conditions", + "name": "Terms and Conditions", + "providerId": "terms_and_conditions", + "enabled": false, + "defaultAction": false, + "priority": 20, + "config": {} + }, + { + "alias": "UPDATE_PASSWORD", + "name": "Update Password", + "providerId": "UPDATE_PASSWORD", + "enabled": true, + "defaultAction": false, + "priority": 30, + "config": {} + }, + { + "alias": "UPDATE_PROFILE", + "name": "Update Profile", + "providerId": "UPDATE_PROFILE", + "enabled": true, + "defaultAction": false, + "priority": 40, + "config": {} + }, + { + "alias": "VERIFY_EMAIL", + "name": "Verify Email", + "providerId": "VERIFY_EMAIL", + "enabled": true, + "defaultAction": false, + "priority": 50, + "config": {} + }, + { + "alias": "update_user_locale", + "name": "Update User Locale", + "providerId": "update_user_locale", + "enabled": true, + "defaultAction": false, + "priority": 1000, + "config": {} + } + ], + "browserFlow": "browser", + "registrationFlow": "registration", + "directGrantFlow": "direct grant", + "resetCredentialsFlow": "reset credentials", + "clientAuthenticationFlow": "clients", + "dockerAuthenticationFlow": "docker auth", + "attributes": { + "clientOfflineSessionMaxLifespan": "0", + "clientSessionIdleTimeout": "0", + "clientSessionMaxLifespan": "0", + "clientOfflineSessionIdleTimeout": "0" + }, + "keycloakVersion": "11.0.1", + "userManagedAccessAllowed": false +} \ No newline at end of file diff --git a/ansible/01_old/roles/test/files/06-imxc-ui/imxc-ui-jaeger/templates/imxc-ui-config-jaeger.yaml b/ansible/01_old/roles/test/files/06-imxc-ui/imxc-ui-jaeger/templates/imxc-ui-config-jaeger.yaml new file mode 100644 index 0000000..9fa97ed --- /dev/null +++ b/ansible/01_old/roles/test/files/06-imxc-ui/imxc-ui-jaeger/templates/imxc-ui-config-jaeger.yaml @@ -0,0 +1,75 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: imxc-ui-config-jaeger + namespace: imxc +data: + properties.file: | + api.url = {{ .Values.global.SERVELET_URL_PROTOCOL }}://{{ .Values.global.ZUUL_SERVER_IP }}:{{ .Values.global.ZUUL_SERVER_PORT }} + config.js: | + window.appEnv = { + // Env Settings servletURL + offlineAccess: "{{ .Values.global.OFFLINEACCESS }}", + backLogin: "{{ .Values.global.BACKLOGIN }}", + servletURL: "{{ .Values.global.SERVELET_URL_PROTOCOL }}://{{ .Values.global.ZUUL_SERVER_IP }}:{{ .Values.global.ZUUL_SERVER_PORT }}", + demoServletURL: "{{ .Values.global.DEMO_SERVELET_URL_PROTOCOL }}://{{ .Values.global.ZUUL_SERVER_IP }}:{{ .Values.global.ZUUL_SERVER_PORT }}", + // Env Settings socketURL + socketURL: "http://{{ .Values.global.NOTI_SERVER_IP }}:{{ .Values.global.NOTI_SERVER_PORT }}/ui-server-websocket", + manualURL: "http://{{ .Values.global.CMOA_MANUAL_SERVER_IP }}:{{ .Values.global.CMOA_MANUAL_PORT }}", + // Env Settings interMaxURL + interMaxURL: "http://{{ .Values.global.INTERMAX_IP }}:8080/intermax/?", + // Env Settings CloudMOA Version + version: '{{ .Values.global.CLOUDMOA_UI_VERSION }}', + UI_build_ver: '{{ .Values.global.UI_SERVER_VERSION }}', + maxSelectionSize: 30, + loginType: 'keycloak', + keyCloak: { + "realm": "{{ .Values.global.KEYCLOAK_REALM }}", + "auth-server-url": "{{ .Values.global.KEYCLOAK_AUTH_SERVER_URL }}", + "ssl-required": "none", + "resource": "{{ .Values.global.KEYCLOAK_RESOURCE }}", + "public-client": true, + "confidential-port": 0 + }, + healthIndicatorStateInfo: [ + { + state: "critical", + // max: 1.0, + // over: 0.8, + max: 100, + over: 80, + text: "Critical", + color: "#ff4040", + level: 4, + }, { + state: "warning", + // max: 0.8, + // over: 0.5, + max: 80, + over: 50, + text: "Warning", + color: "#ffa733", + level: 3, + }, { + state: "attention", + // max: 0.5, + // over: 0.0, + max: 50, + over: 0, + text: "Attention", + // color: "#B4B83D", + color: "#1cbe85", + level: 2, + }, { + state: "normal", + max: 0, + over: 0, + text: "Normal", + // color: "#64B87D", + color: "#24b0ed", + level: 1, + }, + ] + }; + + diff --git a/ansible/01_old/roles/test/files/06-imxc-ui/imxc-ui-jaeger/templates/imxc-ui-server-jaeger.yaml b/ansible/01_old/roles/test/files/06-imxc-ui/imxc-ui-jaeger/templates/imxc-ui-server-jaeger.yaml new file mode 100644 index 0000000..a0d959f --- /dev/null +++ b/ansible/01_old/roles/test/files/06-imxc-ui/imxc-ui-jaeger/templates/imxc-ui-server-jaeger.yaml @@ -0,0 +1,63 @@ +--- +kind: Service +apiVersion: v1 +metadata: + name: imxc-ui-service-jaeger + namespace: imxc +spec: + type: NodePort + selector: + app: imxc-ui-jaeger + ports: + - protocol: TCP + name: ui + port: 80 + targetPort: 9999 + nodePort: 31084 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: imxc-ui-jaeger + namespace: imxc + labels: + app: imxc-ui +spec: + revisionHistoryLimit: 0 + replicas: 1 + selector: + matchLabels: + app: imxc-ui-jaeger + template: + metadata: + labels: + app: imxc-ui-jaeger + spec: + containers: + - name: imxc-ui-jaeger + image: {{ .Values.global.IMXC_IN_REGISTRY }}/ui-server:{{ .Values.global.UI_SERVER_VERSION }} + resources: + requests: + cpu: 100m + memory: 50Mi + limits: + cpu: 200m + memory: 100Mi + imagePullPolicy: IfNotPresent + ports: + - containerPort: 80 + volumeMounts: + - name: config-profile + mountPath: /usr/src/app/web/env + - name: config-server + mountPath: /usr/src/app/config + volumes: + - name: config-profile + configMap: + name: imxc-ui-config-jaeger + items: + - key: "config.js" + path: "config.js" + - name: config-server + configMap: + name: imxc-ui-config-jaeger diff --git a/ansible/01_old/roles/test/files/06-imxc-ui/imxc-ui-jaeger/values.yaml b/ansible/01_old/roles/test/files/06-imxc-ui/imxc-ui-jaeger/values.yaml new file mode 100644 index 0000000..bd63730 --- /dev/null +++ b/ansible/01_old/roles/test/files/06-imxc-ui/imxc-ui-jaeger/values.yaml @@ -0,0 +1,94 @@ +# Default values for imxc. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: 10.10.31.243:5000/cmoa3/nginx + tag: stable + pullPolicy: IfNotPresent + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 80 + +ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: [] + + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +nodeSelector: {} + +tolerations: [] + +affinity: {} + +global: + INTERMAX_IP: + SERVELET_URL_PROTOCOL : http + DEMO_SERVELET_URL_PROTOCOL : http + KEYCLOAK_AUTH_SERVER_URL: http://111.111.111.111:31082/auth + KEYCLOAK_RESOURCE: authorization_server + KEYCLOAK_REALM: exem + + IMXC_IN_REGISTRY: 10.10.31.243:5000/cmoa3 + + ZUUL_SERVER_IP: 111.111.111.111 + ZUUL_SERVER_PORT: 31081 + + NOTI_SERVER_IP: 111.111.111.111 + NOTI_SERVER_PORT: 31083 + + CMOA_MANUAL_SERVER_IP: 111.111.111.111 + CMOA_MANUAL_PORT: 31090 + + OFFLINEACCESS: false + BACKLOGIN: false + + CLOUDMOA_VERSION: rel0.0.0 + UI_SERVER_VERSION: rel0.0.0 + CMOA_MANUAL_VERSION: rel0.0.0 diff --git a/ansible/01_old/roles/test/files/06-imxc-ui/imxc-ui-jspd/Chart.yaml b/ansible/01_old/roles/test/files/06-imxc-ui/imxc-ui-jspd/Chart.yaml new file mode 100644 index 0000000..e2f559f --- /dev/null +++ b/ansible/01_old/roles/test/files/06-imxc-ui/imxc-ui-jspd/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for Kubernetes +name: imxc +version: 0.1.0 diff --git a/ansible/01_old/roles/test/files/06-imxc-ui/imxc-ui-jspd/scripts/init-api-server.sh b/ansible/01_old/roles/test/files/06-imxc-ui/imxc-ui-jspd/scripts/init-api-server.sh new file mode 100644 index 0000000..45b8f1e --- /dev/null +++ b/ansible/01_old/roles/test/files/06-imxc-ui/imxc-ui-jspd/scripts/init-api-server.sh @@ -0,0 +1,16 @@ +#! /bin/sh + +STATUS_CODE="$(curl -s -o /dev/null -w '%{http_code}' http://imxc-keycloak-http/auth/realms/exem)" + +if [ $STATUS_CODE -eq 200 ]; then + JWT_KEY="$(curl -s -XGET http://imxc-keycloak-http/auth/realms/exem | jq -r '.public_key')" + export JWT_KEY + + chmod -R 777 /home/cloudmoa/notification/cloudmoa_alert.log + + java -Djava.security.egd=file:/dev/./urandom -jar /app.jar +elif [ $STATUS_CODE -eq 404 ]; then + echo "not found exem relam. check realm in imxc-keycloak" +else + echo "not found keycloak. check to install keycloak" +fi \ No newline at end of file diff --git a/ansible/01_old/roles/test/files/06-imxc-ui/imxc-ui-jspd/scripts/init-auth-server.sh b/ansible/01_old/roles/test/files/06-imxc-ui/imxc-ui-jspd/scripts/init-auth-server.sh new file mode 100644 index 0000000..279b8a5 --- /dev/null +++ b/ansible/01_old/roles/test/files/06-imxc-ui/imxc-ui-jspd/scripts/init-auth-server.sh @@ -0,0 +1,36 @@ +#! /bin/bash + +# 200 -> 서버 및 realm이 있는 경우 +# 404 -> 서버는 있으나 realm이 없는 경우 +# 000 -> 서버가 없음 +STATUS_CODE="$(curl -s -o /dev/null -w '%{http_code}' http://imxc-keycloak-http/auth/realms/exem)" + +if [ $STATUS_CODE -eq 404 ]; then + TOKEN="$(curl -s -d "client_id=admin-cli" -d "username=admin" -d "password=admin" -d "grant_type=password" http://imxc-keycloak-http/auth/realms/master/protocol/openid-connect/token | jq -r '.access_token')" + + echo $TOKEN + + echo "create realm and client" + # create realm and client + curl -s -v POST -H "Authorization: Bearer $TOKEN" -H "Content-Type: application/json" -d "@/tmp/init.json" http://imxc-keycloak-http/auth/admin/realms + + + echo "create admin and owner" + # create admin and owner + curl -s -v POST -H "Authorization: Bearer $TOKEN" -H "Content-Type: application/json" -d '{"firstName":"","lastName":"", "username":"admin","email":"admin@example.com", "enabled":"true","credentials":[{"type":"password","value":"admin","temporary":false}]}' http://imxc-keycloak-http/auth/admin/realms/exem/users + curl -s -v POST -H "Authorization: Bearer $TOKEN" -H "Content-Type: application/json" -d '{"firstName":"","lastName":"", "username":"owner","email":"owner@example.com", "enabled":"true","credentials":[{"type":"password","value":"admin","temporary":false}]}' http://imxc-keycloak-http/auth/admin/realms/exem/users + + JWT_KEY="$(curl -s -XGET http://imxc-keycloak-http/auth/realms/exem | jq -r '.public_key')" + export JWT_KEY + + java -Djava.security.egd=file:/dev/./urandom -jar /app.jar +elif [ $STATUS_CODE -eq 200 ]; then + echo "exist exem relam" + + JWT_KEY="$(curl -s -XGET http://imxc-keycloak-http/auth/realms/exem | jq -r '.public_key')" + export JWT_KEY + + java -Djava.security.egd=file:/dev/./urandom -jar /app.jar +else + echo "not found keycloak. check to install keycloak" +fi diff --git a/ansible/01_old/roles/test/files/06-imxc-ui/imxc-ui-jspd/scripts/init-noti-server.sh b/ansible/01_old/roles/test/files/06-imxc-ui/imxc-ui-jspd/scripts/init-noti-server.sh new file mode 100644 index 0000000..af73aed --- /dev/null +++ b/ansible/01_old/roles/test/files/06-imxc-ui/imxc-ui-jspd/scripts/init-noti-server.sh @@ -0,0 +1,14 @@ +#! /bin/sh + +STATUS_CODE="$(curl -s -o /dev/null -w '%{http_code}' http://imxc-keycloak-http/auth/realms/exem)" + +if [ $STATUS_CODE -eq 200 ]; then + JWT_KEY="$(curl -s -XGET http://imxc-keycloak-http/auth/realms/exem | jq -r '.public_key')" + export JWT_KEY + + java -Djava.security.egd=file:/dev/./urandom -jar /app.jar +elif [ $STATUS_CODE -eq 404 ]; then + echo "not found exem relam. check realm in imxc-keycloak" +else + echo "not found keycloak. check to install keycloak" +fi \ No newline at end of file diff --git a/ansible/01_old/roles/test/files/06-imxc-ui/imxc-ui-jspd/scripts/init-resource.sh b/ansible/01_old/roles/test/files/06-imxc-ui/imxc-ui-jspd/scripts/init-resource.sh new file mode 100644 index 0000000..58db392 --- /dev/null +++ b/ansible/01_old/roles/test/files/06-imxc-ui/imxc-ui-jspd/scripts/init-resource.sh @@ -0,0 +1,6 @@ +#!/bin/sh + +chmod -R 777 /scripts + +sed -i "s/localhost/$REDIRECT_URLS/g" /scripts/init.json +cp /scripts/init.json /tmp/init.json \ No newline at end of file diff --git a/ansible/01_old/roles/test/files/06-imxc-ui/imxc-ui-jspd/scripts/init.json b/ansible/01_old/roles/test/files/06-imxc-ui/imxc-ui-jspd/scripts/init.json new file mode 100644 index 0000000..dcd68b4 --- /dev/null +++ b/ansible/01_old/roles/test/files/06-imxc-ui/imxc-ui-jspd/scripts/init.json @@ -0,0 +1,2148 @@ +{ + "id": "exem", + "realm": "exem", + "notBefore": 0, + "revokeRefreshToken": false, + "refreshTokenMaxReuse": 0, + "accessTokenLifespan": 300, + "accessTokenLifespanForImplicitFlow": 900, + "ssoSessionIdleTimeout": 1800, + "ssoSessionMaxLifespan": 36000, + "ssoSessionIdleTimeoutRememberMe": 0, + "ssoSessionMaxLifespanRememberMe": 0, + "offlineSessionIdleTimeout": 2592000, + "offlineSessionMaxLifespanEnabled": false, + "offlineSessionMaxLifespan": 5184000, + "clientSessionIdleTimeout": 0, + "clientSessionMaxLifespan": 0, + "clientOfflineSessionIdleTimeout": 0, + "clientOfflineSessionMaxLifespan": 0, + "accessCodeLifespan": 60, + "accessCodeLifespanUserAction": 300, + "accessCodeLifespanLogin": 1800, + "actionTokenGeneratedByAdminLifespan": 43200, + "actionTokenGeneratedByUserLifespan": 300, + "enabled": true, + "sslRequired": "none", + "registrationAllowed": false, + "registrationEmailAsUsername": false, + "rememberMe": false, + "verifyEmail": false, + "loginWithEmailAllowed": true, + "duplicateEmailsAllowed": false, + "resetPasswordAllowed": false, + "editUsernameAllowed": false, + "bruteForceProtected": false, + "permanentLockout": false, + "maxFailureWaitSeconds": 900, + "minimumQuickLoginWaitSeconds": 60, + "waitIncrementSeconds": 60, + "quickLoginCheckMilliSeconds": 1000, + "maxDeltaTimeSeconds": 43200, + "failureFactor": 30, + "roles": { + "realm": [ + { + "id": "b361dcb8-4ec4-484e-a432-8d40a8ca5ac8", + "name": "offline_access", + "description": "${role_offline-access}", + "composite": false, + "clientRole": false, + "containerId": "exem", + "attributes": {} + }, + { + "id": "621155f2-6c01-4e4a-bf11-47111503d696", + "name": "uma_authorization", + "description": "${role_uma_authorization}", + "composite": false, + "clientRole": false, + "containerId": "exem", + "attributes": {} + }, + { + "id": "4aadd73a-e863-466a-932b-5bc81553fbf1", + "name": "access", + "composite": false, + "clientRole": false, + "containerId": "exem", + "attributes": {} + } + ], + "client": { + "realm-management": [ + { + "id": "e3eca547-c372-406a-abe7-30f554e13e63", + "name": "manage-realm", + "description": "${role_manage-realm}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "eb1faff2-4cca-458c-b9da-96c1f6f5f647", + "name": "impersonation", + "description": "${role_impersonation}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "eb0f6ebb-8993-47f8-8979-2152ed92bf62", + "name": "create-client", + "description": "${role_create-client}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "29f0b39d-9cc9-4b40-ad81-00041897ae0c", + "name": "view-clients", + "description": "${role_view-clients}", + "composite": true, + "composites": { + "client": { + "realm-management": [ + "query-clients" + ] + } + }, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "b6307563-9b35-4093-b0c4-a27df7cb82bd", + "name": "query-groups", + "description": "${role_query-groups}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "30091a91-f676-4e39-8ae2-ebfcee36c32a", + "name": "query-clients", + "description": "${role_query-clients}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "b40ca071-2318-4f69-9664-f0dfe471d03b", + "name": "view-realm", + "description": "${role_view-realm}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "efd25ec7-e61f-4659-a772-907791aed58e", + "name": "view-authorization", + "description": "${role_view-authorization}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "4ad18bd0-f9a9-4fc7-8864-99afa71f95e4", + "name": "manage-users", + "description": "${role_manage-users}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "a92c781f-7c6a-48d8-aa88-0b3aefb3c10c", + "name": "manage-events", + "description": "${role_manage-events}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "424933c1-3c03-49cd-955c-34aeeb0a3108", + "name": "manage-authorization", + "description": "${role_manage-authorization}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "5476db80-dbfa-408b-a934-5e8decc0af56", + "name": "manage-clients", + "description": "${role_manage-clients}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "acf53868-d09b-4865-92da-3b906307b979", + "name": "realm-admin", + "description": "${role_realm-admin}", + "composite": true, + "composites": { + "client": { + "realm-management": [ + "manage-realm", + "impersonation", + "create-client", + "view-clients", + "query-groups", + "query-clients", + "view-realm", + "view-authorization", + "manage-users", + "manage-events", + "manage-authorization", + "manage-clients", + "query-users", + "query-realms", + "manage-identity-providers", + "view-users", + "view-events", + "view-identity-providers" + ] + } + }, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "f2ad5f83-ffde-4cf4-acc4-21f7bcec4c38", + "name": "query-users", + "description": "${role_query-users}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "96a017bf-5211-4c20-a1b2-7493bc45a3ad", + "name": "query-realms", + "description": "${role_query-realms}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "d8051d4d-f26c-4a6d-bcdd-b3d8111d9d29", + "name": "manage-identity-providers", + "description": "${role_manage-identity-providers}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "8c929b20-abc3-4b78-88f2-ed3348426667", + "name": "view-users", + "description": "${role_view-users}", + "composite": true, + "composites": { + "client": { + "realm-management": [ + "query-groups", + "query-users" + ] + } + }, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "a337a8f7-8725-4ff7-85fc-ecc4b5ce1433", + "name": "view-events", + "description": "${role_view-events}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "649350cf-925c-4502-84b4-ec8415f956d3", + "name": "view-identity-providers", + "description": "${role_view-identity-providers}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + } + ], + "authorization_server": [ + { + "id": "2346ca49-eb3e-4f2e-b0ec-4def9ea9655c", + "name": "access", + "composite": false, + "clientRole": true, + "containerId": "b9bbda1f-a756-4b72-9cd8-06a6dfd6d5bf", + "attributes": {} + } + ], + "security-admin-console": [], + "admin-cli": [], + "account-console": [], + "broker": [ + { + "id": "133ff901-3a8f-48df-893b-4c7e9047e829", + "name": "read-token", + "description": "${role_read-token}", + "composite": false, + "clientRole": true, + "containerId": "fdc71d6d-db86-414f-bd80-ed1f5e9a6975", + "attributes": {} + } + ], + "account": [ + { + "id": "89c5f56f-5845-400b-ac9f-942c46d082e0", + "name": "manage-account-links", + "description": "${role_manage-account-links}", + "composite": false, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + }, + { + "id": "2cba7fed-0a80-4dbd-bd2d-abfa2c6a985e", + "name": "view-profile", + "description": "${role_view-profile}", + "composite": false, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + }, + { + "id": "f446a93d-143f-4071-9bdc-08aa2fdce6d2", + "name": "view-consent", + "description": "${role_view-consent}", + "composite": false, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + }, + { + "id": "ef3364db-e008-4aec-9e74-04bac25cbe40", + "name": "manage-consent", + "description": "${role_manage-consent}", + "composite": true, + "composites": { + "client": { + "account": [ + "view-consent" + ] + } + }, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + }, + { + "id": "96afbe32-3ac2-4345-bc17-06cf0e8de0b4", + "name": "view-applications", + "description": "${role_view-applications}", + "composite": false, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + }, + { + "id": "cf6861ca-4804-40d4-9016-c48e7ebf1c72", + "name": "manage-account", + "description": "${role_manage-account}", + "composite": true, + "composites": { + "client": { + "account": [ + "manage-account-links" + ] + } + }, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + } + ] + } + }, + "groups": [ + { + "id": "8d3f7332-7f72-47e2-9cb3-38331f0c29b5", + "name": "DEFAULT_TENANT", + "path": "/DEFAULT_TENANT", + "attributes": {}, + "realmRoles": [], + "clientRoles": {}, + "subGroups": [] + } + ], + "defaultRoles": [ + "offline_access", + "uma_authorization" + ], + "requiredCredentials": [ + "password" + ], + "otpPolicyType": "totp", + "otpPolicyAlgorithm": "HmacSHA1", + "otpPolicyInitialCounter": 0, + "otpPolicyDigits": 6, + "otpPolicyLookAheadWindow": 1, + "otpPolicyPeriod": 30, + "otpSupportedApplications": [ + "FreeOTP", + "Google Authenticator" + ], + "webAuthnPolicyRpEntityName": "keycloak", + "webAuthnPolicySignatureAlgorithms": [ + "ES256" + ], + "webAuthnPolicyRpId": "", + "webAuthnPolicyAttestationConveyancePreference": "not specified", + "webAuthnPolicyAuthenticatorAttachment": "not specified", + "webAuthnPolicyRequireResidentKey": "not specified", + "webAuthnPolicyUserVerificationRequirement": "not specified", + "webAuthnPolicyCreateTimeout": 0, + "webAuthnPolicyAvoidSameAuthenticatorRegister": false, + "webAuthnPolicyAcceptableAaguids": [], + "webAuthnPolicyPasswordlessRpEntityName": "keycloak", + "webAuthnPolicyPasswordlessSignatureAlgorithms": [ + "ES256" + ], + "webAuthnPolicyPasswordlessRpId": "", + "webAuthnPolicyPasswordlessAttestationConveyancePreference": "not specified", + "webAuthnPolicyPasswordlessAuthenticatorAttachment": "not specified", + "webAuthnPolicyPasswordlessRequireResidentKey": "not specified", + "webAuthnPolicyPasswordlessUserVerificationRequirement": "not specified", + "webAuthnPolicyPasswordlessCreateTimeout": 0, + "webAuthnPolicyPasswordlessAvoidSameAuthenticatorRegister": false, + "webAuthnPolicyPasswordlessAcceptableAaguids": [], + "scopeMappings": [ + { + "clientScope": "offline_access", + "roles": [ + "offline_access" + ] + } + ], + "clientScopeMappings": { + "account": [ + { + "client": "account-console", + "roles": [ + "manage-account" + ] + } + ] + }, + "clients": [ + { + "id": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "clientId": "account", + "name": "${client_account}", + "rootUrl": "${authBaseUrl}", + "baseUrl": "/realms/exem/account/", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "defaultRoles": [ + "view-profile", + "manage-account" + ], + "redirectUris": [ + "/realms/exem/account/*" + ], + "webOrigins": [], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": false, + "serviceAccountsEnabled": false, + "publicClient": false, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": {}, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "1e3d0c5d-c456-4c5f-93cf-58236273186a", + "clientId": "account-console", + "name": "${client_account-console}", + "rootUrl": "${authBaseUrl}", + "baseUrl": "/realms/exem/account/", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [ + "/realms/exem/account/*" + ], + "webOrigins": [], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": false, + "serviceAccountsEnabled": false, + "publicClient": true, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": { + "pkce.code.challenge.method": "S256" + }, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "protocolMappers": [ + { + "id": "cceae7c8-fa8d-48eb-a0a6-6013a2cc771e", + "name": "audience resolve", + "protocol": "openid-connect", + "protocolMapper": "oidc-audience-resolve-mapper", + "consentRequired": false, + "config": {} + } + ], + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "d4d3e5a5-584c-4aff-a79f-ac3c31ace5a1", + "clientId": "admin-cli", + "name": "${client_admin-cli}", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [], + "webOrigins": [], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": false, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": true, + "serviceAccountsEnabled": false, + "publicClient": true, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": {}, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "b9bbda1f-a756-4b72-9cd8-06a6dfd6d5bf", + "clientId": "authorization_server", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [ + "localhost" + ], + "webOrigins": [ + "*" + ], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": true, + "serviceAccountsEnabled": false, + "publicClient": true, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": { + "saml.assertion.signature": "false", + "saml.force.post.binding": "false", + "saml.multivalued.roles": "false", + "saml.encrypt": "false", + "saml.server.signature": "false", + "saml.server.signature.keyinfo.ext": "false", + "exclude.session.state.from.auth.response": "false", + "saml_force_name_id_format": "false", + "saml.client.signature": "false", + "tls.client.certificate.bound.access.tokens": "false", + "saml.authnstatement": "false", + "display.on.consent.screen": "false", + "saml.onetimeuse.condition": "false" + }, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": true, + "nodeReRegistrationTimeout": -1, + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "fdc71d6d-db86-414f-bd80-ed1f5e9a6975", + "clientId": "broker", + "name": "${client_broker}", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [], + "webOrigins": [], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": false, + "serviceAccountsEnabled": false, + "publicClient": false, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": {}, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "clientId": "realm-management", + "name": "${client_realm-management}", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [], + "webOrigins": [], + "notBefore": 0, + "bearerOnly": true, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": false, + "serviceAccountsEnabled": false, + "publicClient": false, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": {}, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "bb6c56f1-126e-4356-9579-d95992a8d150", + "clientId": "security-admin-console", + "name": "${client_security-admin-console}", + "rootUrl": "${authAdminUrl}", + "baseUrl": "/admin/exem/console/", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [ + "/admin/exem/console/*" + ], + "webOrigins": [ + "+" + ], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": false, + "serviceAccountsEnabled": false, + "publicClient": true, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": { + "pkce.code.challenge.method": "S256" + }, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "protocolMappers": [ + { + "id": "3cf06cab-00dd-486b-8e72-1a453a7031ca", + "name": "locale", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "locale", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "locale", + "jsonType.label": "String" + } + } + ], + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + } + ], + "clientScopes": [ + { + "id": "6a21eaaa-69c9-4519-8732-2155865a1891", + "name": "custom_jwt", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "true" + }, + "protocolMappers": [ + { + "id": "fd7557f5-3174-4c65-8cd1-0e9f015a906f", + "name": "customizingJWT", + "protocol": "openid-connect", + "protocolMapper": "oidc-script-based-protocol-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "multivalued": "true", + "id.token.claim": "false", + "access.token.claim": "true", + "jsonType.label": "String", + "script": "/**\r\n * Available variables: \r\n * user - the current user\r\n * realm - the current realm\r\n * token - the current token\r\n * userSession - the current userSession\r\n * keycloakSession - the current keycloakSession\r\n */\r\n\r\n//insert your code here...\r\n\r\n// you can set standard fields in token - test code\r\n// token.setAcr(\"test value\");\r\n\r\n// you can set claims in the token - test code\r\n// token.getOtherClaims().put(\"claimName\", \"claim value\");\r\n\r\n// work with variables and return multivalued token value\r\nvar ArrayList = Java.type(\"java.util.ArrayList\");\r\nvar HashMap = Java.type(\"java.util.HashMap\");\r\nvar tenantInfoMap = new HashMap();\r\nvar tenantIpMap = new HashMap();\r\n\r\nvar forEach = Array.prototype.forEach;\r\n\r\nvar client = keycloakSession.getContext().getClient();\r\nvar groups = user.getGroups();\r\nvar clientRole = client.getRole(\"access\");\r\n\r\nforEach.call(groups.toArray(), function(group) {\r\n if(group.hasRole(clientRole)) {\r\n tenantIpMap.put(group.getName(), clientRole.getAttribute(\"ip\"));\r\n tenantInfoMap.put(group.getName(), group.getAttributes());\r\n }\r\n});\r\n\r\ntoken.setOtherClaims(\"tenantInfo\", tenantInfoMap);\r\n" + } + }, + { + "id": "2cb34189-9f06-4b9f-b066-c28e7930f0a5", + "name": "custom_phone", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "false", + "user.attribute": "phone", + "id.token.claim": "false", + "access.token.claim": "true", + "claim.name": "attributes.phone", + "jsonType.label": "String" + } + }, + { + "id": "6bcb0aa9-8713-4e4b-b997-2e08d2dda0f4", + "name": "group_attr", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "groups", + "id.token.claim": "false", + "access.token.claim": "true", + "claim.name": "groups.attributes", + "jsonType.label": "String" + } + }, + { + "id": "03deb40b-4f83-436e-9eab-f479eed62460", + "name": "custom_name", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "false", + "user.attribute": "name", + "id.token.claim": "false", + "access.token.claim": "true", + "claim.name": "attributes.name", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "9fed7d81-3f42-41b0-b661-7875abb90b2b", + "name": "microprofile-jwt", + "description": "Microprofile - JWT built-in scope", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "false" + }, + "protocolMappers": [ + { + "id": "d030d675-2c31-401a-a461-534211b3d2ec", + "name": "upn", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "username", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "upn", + "jsonType.label": "String" + } + }, + { + "id": "ca2026a0-84de-4b8d-bf0c-35f3d088b115", + "name": "groups", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-realm-role-mapper", + "consentRequired": false, + "config": { + "multivalued": "true", + "user.attribute": "foo", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "groups", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "cf3e7fce-e9e8-40dc-bd0d-5cf7bac861c0", + "name": "web-origins", + "description": "OpenID Connect scope for add allowed web origins to the access token", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "false", + "display.on.consent.screen": "false", + "consent.screen.text": "" + }, + "protocolMappers": [ + { + "id": "6b909bad-30d8-4095-a80b-d71589e8a0b4", + "name": "allowed web origins", + "protocol": "openid-connect", + "protocolMapper": "oidc-allowed-origins-mapper", + "consentRequired": false, + "config": {} + } + ] + }, + { + "id": "73231863-d614-4725-9707-f5704c70893a", + "name": "roles", + "description": "OpenID Connect scope for add user roles to the access token", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "false", + "display.on.consent.screen": "true", + "consent.screen.text": "${rolesScopeConsentText}" + }, + "protocolMappers": [ + { + "id": "fad2c0b3-d6d6-46c9-b8a5-70cf2f3cd69e", + "name": "realm roles", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-realm-role-mapper", + "consentRequired": false, + "config": { + "multivalued": "true", + "user.attribute": "foo", + "access.token.claim": "true", + "claim.name": "realm_access.roles", + "jsonType.label": "String" + } + }, + { + "id": "1fa51f0e-8fa8-4807-a381-c9756ce1d2ff", + "name": "audience resolve", + "protocol": "openid-connect", + "protocolMapper": "oidc-audience-resolve-mapper", + "consentRequired": false, + "config": {} + }, + { + "id": "8be191ba-c7b8-45f1-a37f-2830595d4b54", + "name": "client roles", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-client-role-mapper", + "consentRequired": false, + "config": { + "multivalued": "true", + "user.attribute": "foo", + "access.token.claim": "true", + "claim.name": "resource_access.${client_id}.roles", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "93a4b53a-a281-4203-a070-0ad31e719b29", + "name": "phone", + "description": "OpenID Connect built-in scope: phone", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "true", + "consent.screen.text": "${phoneScopeConsentText}" + }, + "protocolMappers": [ + { + "id": "c716d4df-ad16-4a47-aa05-ded2a69313a3", + "name": "phone number verified", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "phoneNumberVerified", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "phone_number_verified", + "jsonType.label": "boolean" + } + }, + { + "id": "db0fcb5b-bad6-42b7-8ab0-b90225100b8a", + "name": "phone number", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "phoneNumber", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "phone_number", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "f1723d4c-6d93-40be-b5b8-5ca7083e55c7", + "name": "address", + "description": "OpenID Connect built-in scope: address", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "true", + "consent.screen.text": "${addressScopeConsentText}" + }, + "protocolMappers": [ + { + "id": "9e95dff0-dc01-4efe-a414-21c83d94491c", + "name": "address", + "protocol": "openid-connect", + "protocolMapper": "oidc-address-mapper", + "consentRequired": false, + "config": { + "user.attribute.formatted": "formatted", + "user.attribute.country": "country", + "user.attribute.postal_code": "postal_code", + "userinfo.token.claim": "true", + "user.attribute.street": "street", + "id.token.claim": "true", + "user.attribute.region": "region", + "access.token.claim": "true", + "user.attribute.locality": "locality" + } + } + ] + }, + { + "id": "16524b43-6bfc-4e05-868c-682e7e1e611c", + "name": "email", + "description": "OpenID Connect built-in scope: email", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "true", + "consent.screen.text": "${emailScopeConsentText}" + }, + "protocolMappers": [ + { + "id": "4444c30e-5da5-46e6-a201-64c28ab26e10", + "name": "email verified", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "emailVerified", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "email_verified", + "jsonType.label": "boolean" + } + }, + { + "id": "0faa8ba7-6d4d-4ed4-ab89-334e1d18b503", + "name": "email", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "email", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "email", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "4ccced80-99d8-4081-8d1d-37ed6d5aaf34", + "name": "profile", + "description": "OpenID Connect built-in scope: profile", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "true", + "consent.screen.text": "${profileScopeConsentText}" + }, + "protocolMappers": [ + { + "id": "02aea132-f5e1-483c-968a-5fbb9cdfb82d", + "name": "updated at", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "updatedAt", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "updated_at", + "jsonType.label": "String" + } + }, + { + "id": "eb5d10fc-d4a8-473a-ac3e-35f3fb0f41bb", + "name": "family name", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "lastName", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "family_name", + "jsonType.label": "String" + } + }, + { + "id": "2467b8e5-f340-45a2-abff-c658eccf3ed3", + "name": "zoneinfo", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "zoneinfo", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "zoneinfo", + "jsonType.label": "String" + } + }, + { + "id": "50a9bb17-af12-481d-95dd-6aed1dd4bf56", + "name": "full name", + "protocol": "openid-connect", + "protocolMapper": "oidc-full-name-mapper", + "consentRequired": false, + "config": { + "id.token.claim": "true", + "access.token.claim": "true", + "userinfo.token.claim": "true" + } + }, + { + "id": "80a65208-9425-4e66-b769-98c2f1c91e6e", + "name": "nickname", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "nickname", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "nickname", + "jsonType.label": "String" + } + }, + { + "id": "68a750c6-b4b8-47f4-a919-752319e63213", + "name": "gender", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "gender", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "gender", + "jsonType.label": "String" + } + }, + { + "id": "e27abd0e-72c1-40de-a678-e9e4e2db8e7f", + "name": "given name", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "firstName", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "given_name", + "jsonType.label": "String" + } + }, + { + "id": "04f3fa01-6a4c-44eb-bfd8-0a0e1c31bc4a", + "name": "middle name", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "middleName", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "middle_name", + "jsonType.label": "String" + } + }, + { + "id": "94e697d9-fbee-48d8-91d1-7bbc4f1fb44e", + "name": "username", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "username", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "preferred_username", + "jsonType.label": "String" + } + }, + { + "id": "a2f05d76-947d-4ceb-969b-1b923be9a923", + "name": "website", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "website", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "website", + "jsonType.label": "String" + } + }, + { + "id": "1966f863-ac5c-4cbc-a156-d5bd861728f0", + "name": "profile", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "profile", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "profile", + "jsonType.label": "String" + } + }, + { + "id": "18a9b452-cd8e-4c43-a9a8-0ea532074f74", + "name": "locale", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "locale", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "locale", + "jsonType.label": "String" + } + }, + { + "id": "1583790a-ec7a-4899-a901-60e23fd0d969", + "name": "birthdate", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "birthdate", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "birthdate", + "jsonType.label": "String" + } + }, + { + "id": "7094b64a-492b-4f31-aa73-bb19d06ddb56", + "name": "picture", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "picture", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "picture", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "eff18c11-eaf4-4d6a-8365-90f646ea3cc5", + "name": "role_list", + "description": "SAML role list", + "protocol": "saml", + "attributes": { + "consent.screen.text": "${samlRoleListScopeConsentText}", + "display.on.consent.screen": "true" + }, + "protocolMappers": [ + { + "id": "3bb12700-3e6f-4a73-bfbb-cfd16a8ab007", + "name": "role list", + "protocol": "saml", + "protocolMapper": "saml-role-list-mapper", + "consentRequired": false, + "config": { + "single": "false", + "attribute.nameformat": "Basic", + "attribute.name": "Role" + } + } + ] + }, + { + "id": "e83e35b7-9650-4f7e-b182-65c184d261b3", + "name": "offline_access", + "description": "OpenID Connect built-in scope: offline_access", + "protocol": "openid-connect", + "attributes": { + "consent.screen.text": "${offlineAccessScopeConsentText}", + "display.on.consent.screen": "true" + } + } + ], + "defaultDefaultClientScopes": [ + "role_list", + "profile", + "email", + "roles", + "web-origins", + "custom_jwt" + ], + "defaultOptionalClientScopes": [ + "offline_access", + "address", + "phone", + "microprofile-jwt" + ], + "browserSecurityHeaders": { + "contentSecurityPolicyReportOnly": "", + "xContentTypeOptions": "nosniff", + "xRobotsTag": "none", + "xFrameOptions": "SAMEORIGIN", + "contentSecurityPolicy": "frame-src 'self'; frame-ancestors 'self'; object-src 'none';", + "xXSSProtection": "1; mode=block", + "strictTransportSecurity": "max-age=31536000; includeSubDomains" + }, + "smtpServer": {}, + "eventsEnabled": false, + "eventsListeners": [ + "jboss-logging" + ], + "enabledEventTypes": [], + "adminEventsEnabled": false, + "adminEventsDetailsEnabled": false, + "components": { + "org.keycloak.services.clientregistration.policy.ClientRegistrationPolicy": [ + { + "id": "9b1dcf02-e9ec-4302-8aad-28f3250d1b2d", + "name": "Allowed Protocol Mapper Types", + "providerId": "allowed-protocol-mappers", + "subType": "anonymous", + "subComponents": {}, + "config": { + "allowed-protocol-mapper-types": [ + "oidc-sha256-pairwise-sub-mapper", + "oidc-usermodel-property-mapper", + "saml-role-list-mapper", + "saml-user-attribute-mapper", + "oidc-full-name-mapper", + "oidc-usermodel-attribute-mapper", + "oidc-address-mapper", + "saml-user-property-mapper" + ] + } + }, + { + "id": "752137ea-bc3a-46c3-9d83-49cb370d39a9", + "name": "Max Clients Limit", + "providerId": "max-clients", + "subType": "anonymous", + "subComponents": {}, + "config": { + "max-clients": [ + "200" + ] + } + }, + { + "id": "f365d31f-ccc5-4e57-97bd-b2749b1ab5e5", + "name": "Allowed Client Scopes", + "providerId": "allowed-client-templates", + "subType": "authenticated", + "subComponents": {}, + "config": { + "allow-default-scopes": [ + "true" + ] + } + }, + { + "id": "52e385fd-3aa5-442d-b5e4-6ff659126196", + "name": "Allowed Protocol Mapper Types", + "providerId": "allowed-protocol-mappers", + "subType": "authenticated", + "subComponents": {}, + "config": { + "allowed-protocol-mapper-types": [ + "oidc-sha256-pairwise-sub-mapper", + "saml-user-attribute-mapper", + "oidc-full-name-mapper", + "oidc-usermodel-attribute-mapper", + "oidc-address-mapper", + "oidc-usermodel-property-mapper", + "saml-user-property-mapper", + "saml-role-list-mapper" + ] + } + }, + { + "id": "dbebbc9d-1b14-4d09-906c-b4e5638f9588", + "name": "Consent Required", + "providerId": "consent-required", + "subType": "anonymous", + "subComponents": {}, + "config": {} + }, + { + "id": "b3fc18dc-467f-4240-9b6d-f07df5c40aee", + "name": "Full Scope Disabled", + "providerId": "scope", + "subType": "anonymous", + "subComponents": {}, + "config": {} + }, + { + "id": "19e102da-1d66-4747-958b-9311e5156693", + "name": "Trusted Hosts", + "providerId": "trusted-hosts", + "subType": "anonymous", + "subComponents": {}, + "config": { + "host-sending-registration-request-must-match": [ + "true" + ], + "client-uris-must-match": [ + "true" + ] + } + }, + { + "id": "66e83112-7392-46cb-bbd5-b71586183ada", + "name": "Allowed Client Scopes", + "providerId": "allowed-client-templates", + "subType": "anonymous", + "subComponents": {}, + "config": { + "allow-default-scopes": [ + "true" + ] + } + } + ], + "org.keycloak.keys.KeyProvider": [ + { + "id": "a60adc1b-3f6b-40d4-901f-d4f744f0d71b", + "name": "aes-generated", + "providerId": "aes-generated", + "subComponents": {}, + "config": { + "priority": [ + "100" + ] + } + }, + { + "id": "bc1b25d8-b199-4d87-b606-6cde0f6eafb0", + "name": "hmac-generated", + "providerId": "hmac-generated", + "subComponents": {}, + "config": { + "priority": [ + "100" + ], + "algorithm": [ + "HS256" + ] + } + }, + { + "id": "fe624aa7-54a3-43d8-b2a3-f74b543a9225", + "name": "rsa-generated", + "providerId": "rsa-generated", + "subComponents": {}, + "config": { + "priority": [ + "100" + ] + } + } + ] + }, + "internationalizationEnabled": false, + "supportedLocales": [], + "authenticationFlows": [ + { + "id": "a837df3e-15cb-4d2a-8ce0-5eea5c704e76", + "alias": "Account verification options", + "description": "Method with which to verity the existing account", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "idp-email-verification", + "requirement": "ALTERNATIVE", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "ALTERNATIVE", + "priority": 20, + "flowAlias": "Verify Existing Account by Re-authentication", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "59026e13-e2bd-4977-a868-505ea562f545", + "alias": "Authentication Options", + "description": "Authentication options.", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "basic-auth", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "basic-auth-otp", + "requirement": "DISABLED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "auth-spnego", + "requirement": "DISABLED", + "priority": 30, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "484d422c-d9b4-4c0e-86d5-60463ecd24c9", + "alias": "Browser - Conditional OTP", + "description": "Flow to determine if the OTP is required for the authentication", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "conditional-user-configured", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "auth-otp-form", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "0ec05058-6d09-4951-a116-19e8810e5d8e", + "alias": "Direct Grant - Conditional OTP", + "description": "Flow to determine if the OTP is required for the authentication", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "conditional-user-configured", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "direct-grant-validate-otp", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "667c03cd-114c-4d9a-a7fa-7d2c27f10722", + "alias": "First broker login - Conditional OTP", + "description": "Flow to determine if the OTP is required for the authentication", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "conditional-user-configured", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "auth-otp-form", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "1510fbf7-239f-44aa-9955-72d42f6d99fd", + "alias": "Handle Existing Account", + "description": "Handle what to do if there is existing account with same email/username like authenticated identity provider", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "idp-confirm-link", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "REQUIRED", + "priority": 20, + "flowAlias": "Account verification options", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "5622e71d-e1f4-4711-a425-a8470d0a017e", + "alias": "Reset - Conditional OTP", + "description": "Flow to determine if the OTP should be reset or not. Set to REQUIRED to force.", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "conditional-user-configured", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "reset-otp", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "09dfe405-5ef6-4940-8885-5adf867a74c8", + "alias": "User creation or linking", + "description": "Flow for the existing/non-existing user alternatives", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticatorConfig": "create unique user config", + "authenticator": "idp-create-user-if-unique", + "requirement": "ALTERNATIVE", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "ALTERNATIVE", + "priority": 20, + "flowAlias": "Handle Existing Account", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "a3eb6b61-1943-4fb7-9b2f-137826882662", + "alias": "Verify Existing Account by Re-authentication", + "description": "Reauthentication of existing account", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "idp-username-password-form", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "CONDITIONAL", + "priority": 20, + "flowAlias": "First broker login - Conditional OTP", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "7f5e2f68-84bc-4703-b474-e3b092621195", + "alias": "browser", + "description": "browser based authentication", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "auth-cookie", + "requirement": "ALTERNATIVE", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "auth-spnego", + "requirement": "DISABLED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "identity-provider-redirector", + "requirement": "ALTERNATIVE", + "priority": 25, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "ALTERNATIVE", + "priority": 30, + "flowAlias": "forms", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "224cc520-37f7-445e-ab1f-7ba547a45a0d", + "alias": "clients", + "description": "Base authentication for clients", + "providerId": "client-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "client-secret", + "requirement": "ALTERNATIVE", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "client-jwt", + "requirement": "ALTERNATIVE", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "client-secret-jwt", + "requirement": "ALTERNATIVE", + "priority": 30, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "client-x509", + "requirement": "ALTERNATIVE", + "priority": 40, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "2e58184b-529b-450c-9731-29763d26b087", + "alias": "direct grant", + "description": "OpenID Connect Resource Owner Grant", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "direct-grant-validate-username", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "direct-grant-validate-password", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "CONDITIONAL", + "priority": 30, + "flowAlias": "Direct Grant - Conditional OTP", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "c969ac8c-e7d8-44b5-ad4d-5fcb80514eac", + "alias": "docker auth", + "description": "Used by Docker clients to authenticate against the IDP", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "docker-http-basic-authenticator", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "de2259a4-7f92-42ec-994c-f55d8cba3b59", + "alias": "first broker login", + "description": "Actions taken after first broker login with identity provider account, which is not yet linked to any Keycloak account", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticatorConfig": "review profile config", + "authenticator": "idp-review-profile", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "REQUIRED", + "priority": 20, + "flowAlias": "User creation or linking", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "6c2745d2-be21-4f3c-a291-5b3fc039432a", + "alias": "forms", + "description": "Username, password, otp and other auth forms.", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "auth-username-password-form", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "CONDITIONAL", + "priority": 20, + "flowAlias": "Browser - Conditional OTP", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "ac8f5082-3fd0-47c5-854d-0dd9c3951668", + "alias": "http challenge", + "description": "An authentication flow based on challenge-response HTTP Authentication Schemes", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "no-cookie-redirect", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "REQUIRED", + "priority": 20, + "flowAlias": "Authentication Options", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "32030b4b-c82b-4c1a-a692-3b51eae74bbc", + "alias": "registration", + "description": "registration flow", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "registration-page-form", + "requirement": "REQUIRED", + "priority": 10, + "flowAlias": "registration form", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "b99fca4c-386c-4277-acc1-83e57e29244d", + "alias": "registration form", + "description": "registration form", + "providerId": "form-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "registration-user-creation", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "registration-profile-action", + "requirement": "REQUIRED", + "priority": 40, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "registration-password-action", + "requirement": "REQUIRED", + "priority": 50, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "registration-recaptcha-action", + "requirement": "DISABLED", + "priority": 60, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "5edbc053-816a-434e-9866-6c0cc7e49f89", + "alias": "reset credentials", + "description": "Reset credentials for a user if they forgot their password or something", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "reset-credentials-choose-user", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "reset-credential-email", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "reset-password", + "requirement": "REQUIRED", + "priority": 30, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "CONDITIONAL", + "priority": 40, + "flowAlias": "Reset - Conditional OTP", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "460782e7-9644-4a34-8024-cb428cbe3991", + "alias": "saml ecp", + "description": "SAML ECP Profile Authentication Flow", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "http-basic-authenticator", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + } + ], + "authenticatorConfig": [ + { + "id": "67af6e65-853c-4bfd-9eef-72e735691377", + "alias": "create unique user config", + "config": { + "require.password.update.after.registration": "false" + } + }, + { + "id": "af6c6e01-772d-426a-bdd3-3ebc95537bcd", + "alias": "review profile config", + "config": { + "update.profile.on.first.login": "missing" + } + } + ], + "requiredActions": [ + { + "alias": "CONFIGURE_TOTP", + "name": "Configure OTP", + "providerId": "CONFIGURE_TOTP", + "enabled": true, + "defaultAction": false, + "priority": 10, + "config": {} + }, + { + "alias": "terms_and_conditions", + "name": "Terms and Conditions", + "providerId": "terms_and_conditions", + "enabled": false, + "defaultAction": false, + "priority": 20, + "config": {} + }, + { + "alias": "UPDATE_PASSWORD", + "name": "Update Password", + "providerId": "UPDATE_PASSWORD", + "enabled": true, + "defaultAction": false, + "priority": 30, + "config": {} + }, + { + "alias": "UPDATE_PROFILE", + "name": "Update Profile", + "providerId": "UPDATE_PROFILE", + "enabled": true, + "defaultAction": false, + "priority": 40, + "config": {} + }, + { + "alias": "VERIFY_EMAIL", + "name": "Verify Email", + "providerId": "VERIFY_EMAIL", + "enabled": true, + "defaultAction": false, + "priority": 50, + "config": {} + }, + { + "alias": "update_user_locale", + "name": "Update User Locale", + "providerId": "update_user_locale", + "enabled": true, + "defaultAction": false, + "priority": 1000, + "config": {} + } + ], + "browserFlow": "browser", + "registrationFlow": "registration", + "directGrantFlow": "direct grant", + "resetCredentialsFlow": "reset credentials", + "clientAuthenticationFlow": "clients", + "dockerAuthenticationFlow": "docker auth", + "attributes": { + "clientOfflineSessionMaxLifespan": "0", + "clientSessionIdleTimeout": "0", + "clientSessionMaxLifespan": "0", + "clientOfflineSessionIdleTimeout": "0" + }, + "keycloakVersion": "11.0.1", + "userManagedAccessAllowed": false +} \ No newline at end of file diff --git a/ansible/01_old/roles/test/files/06-imxc-ui/imxc-ui-jspd/templates/imxc-ui-config.yaml b/ansible/01_old/roles/test/files/06-imxc-ui/imxc-ui-jspd/templates/imxc-ui-config.yaml new file mode 100644 index 0000000..e47ff66 --- /dev/null +++ b/ansible/01_old/roles/test/files/06-imxc-ui/imxc-ui-jspd/templates/imxc-ui-config.yaml @@ -0,0 +1,44 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: imxc-ui-config + namespace: imxc + +data: + properties.file: | + api.url = {{ .Values.global.SERVELET_URL_PROTOCOL }}://{{ .Values.global.ZUUL_SERVER_IP }}:{{ .Values.global.ZUUL_SERVER_PORT }} + config.js: | + window.appEnv = { + offlineAccess: "{{ .Values.global.OFFLINEACCESS }}", + backLogin: "{{ .Values.global.BACKLOGIN }}", + // Env Settings servletURL + servletURL: "{{ .Values.global.SERVELET_URL_PROTOCOL }}://{{ .Values.global.ZUUL_SERVER_IP }}:{{ .Values.global.ZUUL_SERVER_PORT }}", + // Env Settings socketURL + socketURL: "http://{{ .Values.global.NOTI_SERVER_IP }}:{{ .Values.global.NOTI_SERVER_PORT }}/ui-server-websocket", + // Env Settings interMaxURL + // ex) ~/intermax/?paConnect=1&paType=ResponseInspector&fromTime=1556096539206&toTime=1556096599206&serverName=jeus89 + interMaxURL: "", + manualURL: "http://{{ .Values.global.CMOA_MANUAL_SERVER_IP }}:{{ .Values.global.CMOA_MANUAL_PORT }}", + // Env Settings CloudMOA Version + version: '{{ .Values.global.CLOUDMOA_VERSION }}', + loginType: 'keycloak', + keyCloak: { + "realm": "{{ .Values.global.KEYCLOAK_REALM }}", + "auth-server-url": "{{ .Values.global.KEYCLOAK_AUTH_SERVER_URL }}", + "ssl-required": "none", + "resource": "{{ .Values.global.KEYCLOAK_RESOURCE }}", + "public-client": true, + "confidential-port": 0 + }, + // refreshTime: '4', // 리로드 주기 설정 4로 설정시 새벽 4시에 리로드 하게 됨 + intervalTime: { // 5의 배수여야만 함 + short: 5, + medium: 10, + long: 60, + }, + // excludedContents: { + // anomalyScoreSettings: true, // entity black list setting page + // anomalyScoreInSidebar: true, // anomaly score in side bar + // }, + serviceTraceAgentType: 'jspd' + }; diff --git a/ansible/01_old/roles/test/files/06-imxc-ui/imxc-ui-jspd/templates/imxc-ui-server.yaml b/ansible/01_old/roles/test/files/06-imxc-ui/imxc-ui-jspd/templates/imxc-ui-server.yaml new file mode 100644 index 0000000..35c4b61 --- /dev/null +++ b/ansible/01_old/roles/test/files/06-imxc-ui/imxc-ui-jspd/templates/imxc-ui-server.yaml @@ -0,0 +1,63 @@ +--- +kind: Service +apiVersion: v1 +metadata: + name: imxc-ui-service + namespace: imxc +spec: + type: NodePort + selector: + app: imxc-ui + ports: + - protocol: TCP + name: ui + port: 80 + targetPort: 9999 + nodePort: 31080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: imxc-ui + namespace: imxc + labels: + app: imxc-ui +spec: + revisionHistoryLimit: 0 + replicas: 1 + selector: + matchLabels: + app: imxc-ui + template: + metadata: + labels: + app: imxc-ui + spec: + containers: + - name: imxc-ui + image: {{ .Values.global.IMXC_IN_REGISTRY }}/ui-server:{{ .Values.global.UI_SERVER_VERSION }} + resources: + requests: + cpu: 100m + memory: 50Mi + limits: + cpu: 200m + memory: 100Mi + imagePullPolicy: IfNotPresent + ports: + - containerPort: 80 + volumeMounts: + - name: config-profile + mountPath: /usr/src/app/web/env + - name: config-server + mountPath: /usr/src/app/config + volumes: + - name: config-profile + configMap: + name: imxc-ui-config + items: + - key: "config.js" + path: "config.js" + - name: config-server + configMap: + name: imxc-ui-config diff --git a/ansible/01_old/roles/test/files/06-imxc-ui/imxc-ui-jspd/values.yaml b/ansible/01_old/roles/test/files/06-imxc-ui/imxc-ui-jspd/values.yaml new file mode 100644 index 0000000..bd63730 --- /dev/null +++ b/ansible/01_old/roles/test/files/06-imxc-ui/imxc-ui-jspd/values.yaml @@ -0,0 +1,94 @@ +# Default values for imxc. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: 10.10.31.243:5000/cmoa3/nginx + tag: stable + pullPolicy: IfNotPresent + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 80 + +ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: [] + + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +nodeSelector: {} + +tolerations: [] + +affinity: {} + +global: + INTERMAX_IP: + SERVELET_URL_PROTOCOL : http + DEMO_SERVELET_URL_PROTOCOL : http + KEYCLOAK_AUTH_SERVER_URL: http://111.111.111.111:31082/auth + KEYCLOAK_RESOURCE: authorization_server + KEYCLOAK_REALM: exem + + IMXC_IN_REGISTRY: 10.10.31.243:5000/cmoa3 + + ZUUL_SERVER_IP: 111.111.111.111 + ZUUL_SERVER_PORT: 31081 + + NOTI_SERVER_IP: 111.111.111.111 + NOTI_SERVER_PORT: 31083 + + CMOA_MANUAL_SERVER_IP: 111.111.111.111 + CMOA_MANUAL_PORT: 31090 + + OFFLINEACCESS: false + BACKLOGIN: false + + CLOUDMOA_VERSION: rel0.0.0 + UI_SERVER_VERSION: rel0.0.0 + CMOA_MANUAL_VERSION: rel0.0.0 diff --git a/ansible/01_old/roles/test/files/ip_change b/ansible/01_old/roles/test/files/ip_change new file mode 100755 index 0000000..ac13cc7 --- /dev/null +++ b/ansible/01_old/roles/test/files/ip_change @@ -0,0 +1,15 @@ +#!/bin/bash +if [ -z "$BASH_VERSION" ]; then exec bash "$0" "$@"; exit; fi + +before_ip=$1 +after_ip=$2 +grep_path=$3 + +if [[ $before_ip == '' || $after_ip == '' ]]; then + echo '[Usage] $0 {before_ip} {after_ip}' + exit +fi + +grep -rn ${before_ip} ${grep_path} | awk -F':' {'print $1'} | uniq | /usr/bin/xargs sed -i "s/${before_ip}/${after_ip}/g" + +echo "success" \ No newline at end of file diff --git a/ansible/01_old/roles/test/files/k8s_status b/ansible/01_old/roles/test/files/k8s_status new file mode 100755 index 0000000..16b3c61 --- /dev/null +++ b/ansible/01_old/roles/test/files/k8s_status @@ -0,0 +1,86 @@ +#! /usr/bin/python3 +#-*- coding:utf-8 -*- + +import os, sys, subprocess, io, time +from kubernetes import client, config +def debug_print(msg): + print(" # ", msg) + +def k8s_conn(KUBE_CONFIG_PATH): + config.load_kube_config( + config_file=KUBE_CONFIG_PATH + ) + k8s_api = client.CoreV1Api() + + return k8s_api + +def k8s_get_pod(k8s_api, namespace, target=''): + pretty=False + watch=False + timeout_seconds=30 + api_response = k8s_api.list_namespaced_pod(namespace, pretty=pretty, timeout_seconds=timeout_seconds, watch=watch) + pod_list=[] + for pod in api_response.items: + status = pod.status.phase + #container_status = pod.status.container_statuses[0] + #if container_status.started is False or container_status.ready is False: + # waiting_state = container_status.state.waiting + # if waiting_state.message is not None and 'Error' in waiting_state.message: + # status = waiting_state.reason + if target != '': + if target in pod.metadata.name: + return (pod.metadata.name + " " + status) + pod_list.append(pod.metadata.name+" "+status) + return pod_list + +def k8s_pod_status_check(k8s_api, waiting_time, namespace,except_pod=False): + num=0 + while True: + num+=1 + resp=k8s_get_pod(k8s_api, namespace) + all_run_flag=True + if debug_mode: + debug_print('-'*30) + debug_print('pod 상태 체크시도 : {} ({}s)'.format(num, waiting_time)) + debug_print('-'*30) + for i in resp: + if except_pod: + if except_pod in i.lower(): continue + if 'pending' in i.lower(): + all_run_flag=False + result='{} 결과: {}'.format(i, all_run_flag) + debug_print(result) + if all_run_flag: + if debug_mode: + debug_print('-'*30) + debug_print('[{}] pod All Running'.format(namespace)) + debug_print('-'*30) + for i in resp: debug_print(i) + break + else: time.sleep(int(waiting_time)) + +def main(): + namespace = os.sys.argv[1] + + try: + Except_k8s_pod = os.sys.argv[2] + except: + Except_k8s_pod = '' + + try: + KUBE_CONFIG_PATH = os.sys.argv[3] + os.environ["KUBECONFIG"]=KUBE_CONFIG_PATH + except: + KUBE_CONFIG_PATH = os.environ["KUBECONFIG"] + + k8s_api=k8s_conn(KUBE_CONFIG_PATH) + k8s_pod_status_check(k8s_api, 60, namespace, Except_k8s_pod) + + +if __name__ == "__main__": + try: + debug_mode=False + main() + except Exception as err: + print("[Usage] k8s_status {namespace} {Except_pod=(default=false)} {KUBECONFIG_PATH=(default=current env)}") + print(err) diff --git a/ansible/01_old/roles/test/files/postgres_check_data b/ansible/01_old/roles/test/files/postgres_check_data new file mode 100755 index 0000000..d377aeb --- /dev/null +++ b/ansible/01_old/roles/test/files/postgres_check_data @@ -0,0 +1,6 @@ +#!/bin/bash + +namespace=$1 +pg_pod=`kubectl -n ${namespace} get pod --no-headers | awk '{print $1}' | grep postgres` +kubectl_cmd="kubectl -n ${namespace} exec -it ${pg_pod} --" +${kubectl_cmd} bash -c "echo \"select count(*) from pg_database where datname='keycloak';\" | /usr/bin/psql -U postgres | egrep -iv '(count|---|row)' | tr -d ' ' | tr -d '\n'" \ No newline at end of file diff --git a/ansible/01_old/roles/test/files/rel_change b/ansible/01_old/roles/test/files/rel_change new file mode 100755 index 0000000..ae1f6b3 --- /dev/null +++ b/ansible/01_old/roles/test/files/rel_change @@ -0,0 +1,15 @@ +#!/bin/bash +if [ -z "$BASH_VERSION" ]; then exec bash "$0" "$@"; exit; fi + +before_version=$1 +after_version=$2 +grep_path=$3 + +if [[ $before_version == '' || $after_version == '' ]]; then + echo '[Usage] $0 {before_version} {after_version}' + exit +fi + +grep -rn ${before_version} ${grep_path} | awk -F':' {'print $1'} | uniq | /usr/bin/xargs sed -i "s/${before_version}/${after_version}/g" + +echo "success" \ No newline at end of file diff --git a/ansible/01_old/roles/test/tasks/00-default-settings-master.yml b/ansible/01_old/roles/test/tasks/00-default-settings-master.yml new file mode 100644 index 0000000..4a17c4a --- /dev/null +++ b/ansible/01_old/roles/test/tasks/00-default-settings-master.yml @@ -0,0 +1,30 @@ +--- +- name: 1. Create a cmoa namespace + kubernetes.core.k8s: + name: "{{ cmoa_namespace }}" + api_version: v1 + kind: Namespace + state: present + +- name: 2. Create secret + kubernetes.core.k8s: + state: present + namespace: "{{ item }}" + src: "{{ role_path }}/files/00-default/secret_nexus.yaml" + apply: yes + with_items: + - "{{ cmoa_namespace }}" + - default + +- name: 3. kubeconfig check + shell: "echo $KUBECONFIG" + register: kubeconfig + +- name: 4. Patch default sa + shell: "{{ role_path }}/files/00-default/sa_patch.sh {{ kubeconfig.stdout }}" + +- name: 5. Master IP Setting + command: "{{ role_path }}/files/ip_change {{ before_ip }} {{ ansible_default_ipv4.address }} {{ role_path }}/files" + +- name: 6. CloudMOA Version Change + command: "{{ role_path }}/files/rel_change {{ before_version }} {{ cmoa_version }} {{ role_path }}/files" diff --git a/ansible/01_old/roles/test/tasks/00-default-settings-node.yml b/ansible/01_old/roles/test/tasks/00-default-settings-node.yml new file mode 100644 index 0000000..a568b74 --- /dev/null +++ b/ansible/01_old/roles/test/tasks/00-default-settings-node.yml @@ -0,0 +1,27 @@ +--- +- name: 1. Node add Label (worker1) + kubernetes.core.k8s: + apply: yes + definition: + apiversion: v1 + kind: Node + metadata: + name: "{{ item }}" + labels: + cmoa: worker1 + with_items: + - "{{ ansible_hostname }}" + when: ansible_default_ipv4.address in groups.worker1 + +- name: 2. Node add Label (worker2) + kubernetes.core.k8s: + definition: + apiversion: v1 + kind: Node + metadata: + name: "{{ item }}" + labels: + cmoa: worker2 + with_items: + - "{{ ansible_hostname }}" + when: ansible_default_ipv4.address in groups.worker2 \ No newline at end of file diff --git a/ansible/01_old/roles/test/tasks/01-storage-install.yml b/ansible/01_old/roles/test/tasks/01-storage-install.yml new file mode 100644 index 0000000..bef58ef --- /dev/null +++ b/ansible/01_old/roles/test/tasks/01-storage-install.yml @@ -0,0 +1,45 @@ +--- +- name: 1. yaml file install (sc, pv) + kubernetes.core.k8s: + state: present + namespace: "{{ cmoa_namespace }}" + src: "{{ role_path }}/files/01-storage/{{ item }}" + apply: yes + with_items: + - 00-storageclass.yaml + - 01-persistentvolume.yaml + +- name: 2. helmchart install (minio) + kubernetes.core.helm: + name: "{{item}}" + release_namespace: "{{ cmoa_namespace }}" + chart_ref: "{{ role_path }}/files/01-storage/{{item}}" + create_namespace: yes + release_state: present + values_files: + - "{{ role_path }}/files/01-storage/{{item}}/values.yaml" + with_items: + - minio + +- name: 3. Change a Minio Api Service (NodePort=minio_nodePort) + kubernetes.core.k8s: + state: present + definition: + apiVersion: v1 + kind: Service + metadata: + name: "{{ minio_service_name }}" + namespace: "{{ cmoa_namespace }}" + spec: + type: NodePort + ports: + - protocol: TCP + port: "{{ minio_service_port }}" + nodePort: "{{ minio_nodePort }}" + apply: yes + +- name: 4. Check Kubernetes Pods (minio) + command: "{{ role_path }}/files/k8s_status {{ cmoa_namespace }}" + +- name: 5. minio setting (minio) + command: "{{ role_path }}/files/01-storage/cmoa_minio {{ ansible_default_ipv4.address }}:{{ minio_nodePort }} {{ minio_user }} {{ bucket_name }} {{ days }} {{ rule_id }}" \ No newline at end of file diff --git a/ansible/01_old/roles/test/tasks/02-base-install.yml b/ansible/01_old/roles/test/tasks/02-base-install.yml new file mode 100644 index 0000000..f7924a6 --- /dev/null +++ b/ansible/01_old/roles/test/tasks/02-base-install.yml @@ -0,0 +1,51 @@ +--- +- name: 1. kafka broker config apply (base) + kubernetes.core.k8s: + state: present + namespace: "{{ cmoa_namespace }}" + src: "{{ role_path }}/files/02-base/{{ item }}" + apply: yes + with_items: + - 00-kafka-broker-config.yaml + +- name: 2. coredns config apply (base) + kubernetes.core.k8s: + state: present + namespace: default + src: "{{ role_path }}/files/02-base/{{ item }}" + apply: yes + with_items: + - 01-coredns.yaml + +- name: 3. helmchart install (base) + kubernetes.core.helm: + name: "{{item}}" + release_name: "{{item}}" + release_namespace: "{{ cmoa_namespace }}" + chart_ref: "{{ role_path }}/files/02-base/{{item}}" + create_namespace: yes + release_state: present + values_files: + - "{{ role_path }}/files/02-base/{{item}}/values.yaml" + with_items: + - base + +- name: 4. Check Kubernetes Pods (base) + command: "{{ role_path }}/files/k8s_status {{ cmoa_namespace }} alertmanage" + +- name: 5. Change a Elasticsearch Service (NodePort=elasticsearch_nodePort) + kubernetes.core.k8s: + state: present + definition: + apiVersion: v1 + kind: Service + metadata: + name: "{{ elasticsearch_service_name }}" + namespace: "{{ cmoa_namespace }}" + spec: + type: NodePort + ports: + - protocol: TCP + port: "{{ elasticsearch_service_port }}" + nodePort: "{{ elasticsearch_nodePort }}" + apply: yes diff --git a/ansible/01_old/roles/test/tasks/03-ddl-dml.yml b/ansible/01_old/roles/test/tasks/03-ddl-dml.yml new file mode 100644 index 0000000..be5af75 --- /dev/null +++ b/ansible/01_old/roles/test/tasks/03-ddl-dml.yml @@ -0,0 +1,59 @@ +- name: 1. Check Postgres DB Data + command: "{{ role_path }}/files/postgres_check_data {{ cmoa_namespace }}" + register: pg_check_result + +- name: 2. Insert Elasticsearch template + command: "sh {{ role_path }}/files/03-ddl-dml/elasticsearch/es-ddl-put.sh {{ cmoa_namespace }}" + +- name: 2.1. Elasticsearch dependency deploy restart + command: "kubectl -n {{ cmoa_namespace }} rollout restart deploy alertmanager base-cortex-configs base-cortex-distributor base-cortex-ruler" + register: restart + +- debug: + msg: "{{restart.stdout_lines}}" + +- name: 2.2. Check Kubernetes Pods (Elasticsearch dependency) + command: "{{ role_path }}/files/k8s_status {{ cmoa_namespace }} alertmanage" + +- name: 3. Get a list of all pods from the namespace + command: kubectl -n "{{ cmoa_namespace }}" get pods --no-headers -o custom-columns=":metadata.name" + register: pod_list + when: pg_check_result.stdout != '1' + +- name: 4. Copy psql file in postgres (DDL) + kubernetes.core.k8s_cp: + namespace: "{{ cmoa_namespace }}" + pod: "{{ item }}" + remote_path: /tmp/postgres_insert_ddl.psql + local_path: "{{ role_path }}/files/03-ddl-dml/postgres/postgres_insert_ddl.psql" + when: item is match('postgres') and pg_check_result.stdout != '1' + with_items: "{{ pod_list.stdout_lines }}" + ignore_errors: true + +- name: 5. Execute a command in postgres (DDL) + kubernetes.core.k8s_exec: + namespace: "{{ cmoa_namespace }}" + pod: "{{ item }}" + command: bash -c "PGPASSWORD='eorbahrhkswp' && /usr/bin/psql -h 'localhost' -U 'admin' -d 'postgresdb' -f /tmp/postgres_insert_ddl.psql" + with_items: "{{ pod_list.stdout_lines }}" + when: item is match('postgres') + ignore_errors: true + +- name: 6. Copy psql file in postgres (DML) + kubernetes.core.k8s_cp: + namespace: "{{ cmoa_namespace }}" + pod: "{{ item }}" + remote_path: /tmp/postgres_insert_dml.psql + local_path: "{{ role_path }}/files/03-ddl-dml/postgres/postgres_insert_dml.psql" + with_items: "{{ pod_list.stdout_lines }}" + when: item is match('postgres') + ignore_errors: true + +- name: 7. Execute a command in postgres (DML) + kubernetes.core.k8s_exec: + namespace: "{{ cmoa_namespace }}" + pod: "{{ item }}" + command: bash -c "PGPASSWORD='eorbahrhkswp' && /usr/bin/psql -h 'localhost' -U 'admin' -d 'postgresdb' -f /tmp/postgres_insert_dml.psql" + with_items: "{{ pod_list.stdout_lines }}" + when: item is match('postgres') + ignore_errors: true \ No newline at end of file diff --git a/ansible/01_old/roles/test/tasks/04-keycloak-install.yml b/ansible/01_old/roles/test/tasks/04-keycloak-install.yml new file mode 100644 index 0000000..de5fc9c --- /dev/null +++ b/ansible/01_old/roles/test/tasks/04-keycloak-install.yml @@ -0,0 +1,34 @@ +--- +- name: 1. helmchart install (keycloak) + kubernetes.core.helm: + name: "{{item}}" + release_name: "{{item}}" + release_namespace: "{{ cmoa_namespace }}" + chart_ref: "{{ role_path }}/files/04-keycloak" + create_namespace: yes + release_state: present + values_files: + - "{{ role_path }}/files/04-keycloak/values.yaml" + with_items: + - keycloak + +- name: 4. Check Kubernetes Pods (base) + command: "{{ role_path }}/files/k8s_status {{ cmoa_namespace }}" + + +- name: 5. Change a Elasticsearch Service (NodePort=elasticsearch_nodePort) + kubernetes.core.k8s: + state: present + definition: + apiVersion: v1 + kind: Service + metadata: + name: "{{ elasticsearch_service_name }}" + namespace: "{{ cmoa_namespace }}" + spec: + type: NodePort + ports: + - protocol: TCP + port: "{{ elasticsearch_service_port }}" + nodePort: "{{ elasticsearch_nodePort }}" + apply: yes diff --git a/ansible/01_old/roles/test/tasks/05-imxc-install.yml b/ansible/01_old/roles/test/tasks/05-imxc-install.yml new file mode 100644 index 0000000..420d2d1 --- /dev/null +++ b/ansible/01_old/roles/test/tasks/05-imxc-install.yml @@ -0,0 +1,16 @@ +--- +- name: 1. helmchart install (imxc) + kubernetes.core.helm: + name: "{{item}}" + release_name: "{{item}}" + release_namespace: "{{ cmoa_namespace }}" + chart_ref: "{{ role_path }}/files/05-imxc" + create_namespace: yes + release_state: present + values_files: + - "{{ role_path }}/files/05-imxc/values.yaml" + with_items: + - imxc + +- name: 2. Check Kubernetes Pods (imxc / keycloak) + command: "{{ role_path }}/files/k8s_status {{ cmoa_namespace }}" diff --git a/ansible/01_old/roles/test/tasks/06-imxc-ui-install.yml b/ansible/01_old/roles/test/tasks/06-imxc-ui-install.yml new file mode 100644 index 0000000..7da82a1 --- /dev/null +++ b/ansible/01_old/roles/test/tasks/06-imxc-ui-install.yml @@ -0,0 +1,112 @@ +--- +- name: 1. helmchart install (imxc-ui-all) + kubernetes.core.helm: + name: "{{item}}" + release_name: "{{item}}" + release_namespace: "{{ cmoa_namespace }}" + chart_ref: "{{ role_path }}/files/06-imxc-ui/{{ item }}" + create_namespace: yes + release_state: present + values_files: + - "{{ role_path }}/files/06-imxc-ui/{{ item }}/values.yaml" + with_items: + - imxc-ui-jaeger + - imxc-ui-jspd + when: imxc_ui == 'all' + +- name: 1. helmchart install (imxc-ui-jaeger) + kubernetes.core.helm: + name: "{{item}}" + release_name: "{{item}}" + release_namespace: "{{ cmoa_namespace }}" + chart_ref: "{{ role_path }}/files/06-imxc-ui/{{ item }}" + create_namespace: yes + release_state: present + values_files: + - "{{ role_path }}/files/06-imxc-ui/{{ item }}/values.yaml" + with_items: + - imxc-ui-jaeger + when: imxc_ui == 'jaeger' + +- name: 2. Change a imxc-ui Service (imxc-ui-jaeger) + kubernetes.core.k8s: + state: present + definition: + apiVersion: v1 + kind: Service + metadata: + name: "{{ jaeger_servicename }}" + namespace: "{{ cmoa_namespace }}" + spec: + type: NodePort + ports: + - protocol: TCP + port: "{{ jaeger_service_port }}" + nodePort: "{{ jaeger_nodePort }}" + apply: yes + when: imxc_ui == 'jaeger' + +- name: 2. Get a list of all pods from the namespace + command: kubectl -n "{{ cmoa_namespace }}" get pods --no-headers -o custom-columns=":metadata.name" # Output is a column + register: pod_list + when: imxc_ui != 'all' + +- name: 3. Copy psql file in psql (imxc-jaeger) + kubernetes.core.k8s_cp: + namespace: "{{ cmoa_namespace }}" + pod: "{{ item }}" + remote_path: /tmp/jaeger_menumeta.psql + local_path: "{{ role_path }}/files/03-ddl-dml/postgres/jaeger_menumeta.psql" + with_items: "{{ pod_list.stdout_lines }}" + when: + - item is match('postgres') + - imxc_ui == 'jaeger' + ignore_errors: true + +- name: 4. Execute a command in psql (imxc-jaeger) + kubernetes.core.k8s_exec: + namespace: "{{ cmoa_namespace }}" + pod: "{{ item }}" + command: bash -c "PGPASSWORD='eorbahrhkswp' && /usr/bin/psql -h 'localhost' -U 'admin' -d 'postgresdb' -f /tmp/jaeger_menumeta.psql" + with_items: "{{ pod_list.stdout_lines }}" + when: + - item is match('postgres') + - imxc_ui == 'jaeger' + ignore_errors: true + +- name: 1. helmchart install (imxc-ui-jspd) + kubernetes.core.helm: + name: "{{item}}" + release_name: "{{item}}" + release_namespace: "{{ cmoa_namespace }}" + chart_ref: "{{ role_path }}/files/06-imxc-ui/{{ item }}" + create_namespace: yes + release_state: present + values_files: + - "{{ role_path }}/files/06-imxc-ui/{{ item }}/values.yaml" + with_items: + - imxc-ui-jspd + when: imxc_ui == 'jspd' + ignore_errors: true + +- name: 3. Copy psql file in postgres (imxc-ui-jspd) + kubernetes.core.k8s_cp: + namespace: "{{ cmoa_namespace }}" + pod: "{{ item }}" + remote_path: /tmp/jspd_menumeta.psql + local_path: "{{ role_path }}/files/03-ddl-dml/postgres/jspd_menumeta.psql" + with_items: "{{ pod_list.stdout_lines }}" + when: item is match('postgres') and imxc_ui == 'jspd' + ignore_errors: true + +- name: 4. Execute a command in postgres (imxc-ui-jspd) + kubernetes.core.k8s_exec: + namespace: "{{ cmoa_namespace }}" + pod: "{{ item }}" + command: bash -c "PGPASSWORD='eorbahrhkswp' && /usr/bin/psql -h 'localhost' -U 'admin' -d 'postgresdb' -f /tmp/jspd_menumeta.psql" + with_items: "{{ pod_list.stdout_lines }}" + when: item is match('postgres') and imxc_ui == 'jspd' + ignore_errors: true + +- name: 2. Check Kubernetes Pods (imxc ui) + command: "{{ role_path }}/files/k8s_status {{ cmoa_namespace }}" diff --git a/ansible/01_old/roles/test/tasks/07-keycloak-setting.yml b/ansible/01_old/roles/test/tasks/07-keycloak-setting.yml new file mode 100644 index 0000000..8e90b79 --- /dev/null +++ b/ansible/01_old/roles/test/tasks/07-keycloak-setting.yml @@ -0,0 +1,90 @@ +--- +- name: 0. Generate keycloak auth token + ansible.builtin.uri: + url: "{{ keycloak_url }}{{ keycloak_context }}/realms/master/protocol/openid-connect/token" + method: POST + body: "client_id={{ keycloak_auth_client }}&username={{ keycloak_admin_user }}&password={{ keycloak_admin_password }}&grant_type=password" + validate_certs: no + register: keycloak_auth_response + until: keycloak_auth_response.status == 200 + retries: 5 + delay: 2 + +- name: 1. Determine if realm exists + ansible.builtin.uri: + url: "{{ keycloak_url }}{{ keycloak_context }}/admin/realms/{{ keycloak_realm }}" + method: GET + status_code: + - 200 + - 404 + headers: + Accept: "application/json" + Authorization: "Bearer {{ keycloak_auth_response.json.access_token }}" + register: keycloak_realm_exists + + +- name: 2. update a keycloak realm + community.general.keycloak_realm: + auth_client_id: "{{ keycloak_auth_client }}" + auth_keycloak_url: "{{ keycloak_url }}{{ keycloak_context }}" + auth_realm: "{{ keycloak_auth_realm }}" + auth_username: "{{ keycloak_admin_user }}" + auth_password: "{{ keycloak_admin_password }}" + realm: "{{ item.realm }}" + login_theme: "{{ keycloak_login_theme }}" + loop: "{{ keycloak_clients | flatten }}" + +- name: 3. Validate Keycloak clients + ansible.builtin.assert: + that: + - item.name is defined and item.name | length > 0 + - (item.client_id is defined and item.client_id | length > 0) or (item.id is defined and item.id | length > 0) + fail_msg: "For each keycloak client, attributes `name` and either `id` or `client_id` is required" + quiet: True + loop: "{{ keycloak_clients | flatten }}" + loop_control: + label: "{{ item.name | default('unnamed client') }}" + + +- name: 4. update a Keycloak client + community.general.keycloak_client: + auth_client_id: "{{ keycloak_auth_client }}" + auth_keycloak_url: "{{ keycloak_url }}{{ keycloak_context }}" + auth_realm: "{{ keycloak_auth_realm }}" + auth_username: "{{ keycloak_admin_user }}" + auth_password: "{{ keycloak_admin_password }}" + realm: "{{ item.realm }}" + default_roles: "{{ item.roles | default(omit) }}" + client_id: "{{ item.client_id | default(omit) }}" + id: "{{ item.id | default(omit) }}" + name: "{{ item.name | default(omit) }}" + description: "{{ item.description | default(omit) }}" + root_url: "{{ item.root_url | default('') }}" + admin_url: "{{ item.admin_url | default('') }}" + base_url: "{{ item.base_url | default('') }}" + enabled: "{{ item.enabled | default(True) }}" + redirect_uris: "{{ item.redirect_uris | default(omit) }}" + web_origins: "{{ item.web_origins | default('+') }}" + bearer_only: "{{ item.bearer_only | default(omit) }}" + standard_flow_enabled: "{{ item.standard_flow_enabled | default(omit) }}" + implicit_flow_enabled: "{{ item.implicit_flow_enabled | default(omit) }}" + direct_access_grants_enabled: "{{ item.direct_access_grants_enabled | default(omit) }}" + service_accounts_enabled: "{{ item.service_accounts_enabled | default(omit) }}" + public_client: "{{ item.public_client | default(False) }}" + protocol: "{{ item.protocol | default(omit) }}" + state: present + register: create_client_result + loop: "{{ keycloak_clients | flatten }}" + when: (item.name is defined and item.client_id is defined) or (item.name is defined and item.id is defined) + +- name: 5. Dependency deploy scale down + command: "kubectl -n {{ cmoa_namespace }} scale --replicas=0 deploy imxc-api noti-server auth-server zuul-deployment" + +- name: 6. Dependency deploy scale up + command: "kubectl -n {{ cmoa_namespace }} scale --replicas=1 deploy imxc-api noti-server auth-server zuul-deployment" + register: restart + +- debug: + msg: "{{restart.stdout_lines}}" + + diff --git a/ansible/01_old/roles/test/tasks/08-finish.yml b/ansible/01_old/roles/test/tasks/08-finish.yml new file mode 100644 index 0000000..4fd19f4 --- /dev/null +++ b/ansible/01_old/roles/test/tasks/08-finish.yml @@ -0,0 +1,17 @@ +--- +- name: 0. Check Kubernetes Pods (ALL) + command: "{{ role_path }}/files/k8s_status {{ cmoa_namespace }}" + +- name: 1. IP Setting reset + command: "{{ role_path }}/files/ip_change {{ansible_default_ipv4.address}} {{before_ip}} {{ role_path }}/files" + +- name: 2. CloudMOA Version reset + command: "{{ role_path }}/files/rel_change {{ cmoa_version }} {{ before_version }} {{ role_path }}/files" + +- debug: + msg: + - ======================================================================================= + - "## CloudMOA WEB " + - CloudMOA Jaeger = http://{{ ansible_default_ipv4.address }}:31080 + - CloudMOA JSPD = http://{{ ansible_default_ipv4.address }}:31084 + - ======================================================================================= diff --git a/ansible/01_old/roles/test/tasks/helm-install.yml b/ansible/01_old/roles/test/tasks/helm-install.yml new file mode 100644 index 0000000..d057455 --- /dev/null +++ b/ansible/01_old/roles/test/tasks/helm-install.yml @@ -0,0 +1,60 @@ +--- +- name: Create Helm temporary directory + file: + path: /tmp/helm + state: directory + mode: "0755" + +- name: Fetch Helm package + get_url: + url: 'https://get.helm.sh/helm-{{ helm_version }}-linux-amd64.tar.gz' + dest: /tmp/helm.tar.gz + checksum: '{{ helm_checksum }}' + +- name: Extract Helm package + unarchive: + remote_src: true + src: /tmp/helm.tar.gz + dest: /tmp/helm + +- name: Ensure "docker" group exists + group: + name: docker + state: present + become: true + +- name: Install helm to /usr/local/bin + copy: + remote_src: true + src: /tmp/helm/linux-amd64/helm + dest: /usr/local/bin/helm + owner: root + group: docker + mode: "0755" + become: true + +- name: Cleanup Helm temporary directory + file: + path: /tmp/helm + state: absent + +- name: Cleanup Helm temporary download + file: + path: /tmp/helm.tar.gz + state: absent + +- name: Ensure bash_completion.d directory exists + file: + path: /etc/bash_completion.d + state: directory + mode: "0755" + become: true + +- name: Setup Helm tab-completion + shell: | + set -o pipefail + /usr/local/bin/helm completion bash | tee /etc/bash_completion.d/helm + args: + executable: /bin/bash + changed_when: false + become: true diff --git a/ansible/01_old/roles/test/tasks/main.yml b/ansible/01_old/roles/test/tasks/main.yml new file mode 100644 index 0000000..5e267bd --- /dev/null +++ b/ansible/01_old/roles/test/tasks/main.yml @@ -0,0 +1,4 @@ +--- +- name: test + debug: + msg: "{{ cmoa_version }}" diff --git a/ansible/01_old/roles/test/templates/realm.json.j2 b/ansible/01_old/roles/test/templates/realm.json.j2 new file mode 100644 index 0000000..1323ce2 --- /dev/null +++ b/ansible/01_old/roles/test/templates/realm.json.j2 @@ -0,0 +1,7 @@ +{ + "id": "{{ keycloak_realm }}", + "realm": "{{ keycloak_realm }}", + "enabled": true, + "eventsEnabled": true, + "eventsExpiration": 7200 +} diff --git a/ansible/01_old/roles/test/vars/main.yml b/ansible/01_old/roles/test/vars/main.yml new file mode 100644 index 0000000..14c8e95 --- /dev/null +++ b/ansible/01_old/roles/test/vars/main.yml @@ -0,0 +1,7 @@ +--- +# name of the realm to create, this is a required variable +keycloak_realm: Exem + +# other settings +keycloak_url: "http://{{ ansible_default_ipv4.address }}:{{ keycloak_http_port }}" +keycloak_management_url: "http://{{ ansible_default_ipv4.address }}:{{ keycloak_management_http_port }}" diff --git a/ansible/01_old/roles/zabbix-agent/defaults/main.yml b/ansible/01_old/roles/zabbix-agent/defaults/main.yml new file mode 100644 index 0000000..7e57368 --- /dev/null +++ b/ansible/01_old/roles/zabbix-agent/defaults/main.yml @@ -0,0 +1,292 @@ +--- +# defaults file for zabbix_agent + +zabbix_agent2: false +# zabbix_agent_version: 6.4 +zabbix_agent_version_minor: "*" +zabbix_version_patch: 0 +zabbix_agent_package_remove: false +zabbix_agent_package: zabbix-agent +zabbix_sender_package: zabbix-sender +zabbix_get_package: zabbix-get +zabbix_agent_package_state: present +zabbix_agent_server: +zabbix_agent_serveractive: +zabbix_agent2_server: "{{ zabbix_agent_server }}" +zabbix_agent2_serveractive: "{{ zabbix_agent_serveractive }}" +zabbix_selinux: false +zabbix_agent_apt_priority: +zabbix_agent_conf_mode: "0644" +zabbix_agent_dont_detect_ip: false +zabbix_agent_allow_key: [] +zabbix_agent_deny_key: [] +zabbix_agent2_allow_key: "{{ zabbix_agent_allow_key }}" +zabbix_agent2_deny_key: "{{ zabbix_agent_deny_key }}" + +# Selinux related vars +selinux_allow_zabbix_run_sudo: false + +zabbix_agent_install_agent_only: false +zabbix_agent_packages: + - "{{ zabbix_agent_package }}" + - "{{ zabbix_sender_package }}" + - "{{ zabbix_get_package }}" + +# Zabbix role related vars +zabbix_apt_force_apt_get: true +zabbix_apt_install_recommends: false + +# Override Ansible specific facts +zabbix_agent_distribution_major_version: "{{ ansible_distribution_major_version }}" +zabbix_agent_distribution_release: "{{ ansible_distribution_release }}" +zabbix_repo_yum_gpgcheck: 0 +zabbix_repo_yum_schema: https +zabbix_agent_disable_repo: + - epel +zabbix_repo_yum: + - name: zabbix + description: Zabbix Official Repository - $basearch + baseurl: "{{ zabbix_repo_yum_schema }}://repo.zabbix.com/zabbix/{{ zabbix_agent_version }}/rhel/{{ zabbix_agent_distribution_major_version }}/$basearch/" + mode: "0644" + gpgcheck: "{{ zabbix_repo_yum_gpgcheck }}" + gpgkey: file:///etc/pki/rpm-gpg/RPM-GPG-KEY-ZABBIX + state: present + - name: zabbix-non-supported + description: Zabbix Official Repository non-supported - $basearch + baseurl: "{{ zabbix_repo_yum_schema }}://repo.zabbix.com/non-supported/rhel/{{ zabbix_agent_distribution_major_version }}/$basearch/" + mode: "0644" + gpgcheck: "{{ zabbix_repo_yum_gpgcheck }}" + gpgkey: file:///etc/pki/rpm-gpg/RPM-GPG-KEY-ZABBIX + state: present + - name: zabbix-agent2-plugins + description: Zabbix Official Repository (Agent2 Plugins) - $basearch + baseurl: "{{ zabbix_repo_yum_schema }}://repo.zabbix.com/zabbix-agent2-plugins/1/rhel/{{ zabbix_agent_distribution_major_version }}/$basearch/" + mode: "0644" + gpgcheck: "{{ zabbix_repo_yum_gpgcheck }}" + gpgkey: file:///etc/pki/rpm-gpg/RPM-GPG-KEY-ZABBIX + state: present + +zabbix_repo_deb_component: main + +# Zabbix API stuff +zabbix_api_server_host: localhost +# zabbix_api_server_port: 80 +zabbix_api_login_user: Admin +zabbix_api_use_ssl: false +zabbix_api_login_pass: !unsafe zabbix +zabbix_api_validate_certs: false +ansible_httpapi_pass: "{{ zabbix_api_login_pass }}" +ansible_httpapi_port: "{{ zabbix_api_server_port }}" +ansible_httpapi_validate_certs: "{{ zabbix_api_validate_certs }}" +zabbix_api_timeout: 30 +zabbix_api_create_hostgroup: false +zabbix_api_create_hosts: false +zabbix_agent_hostgroups_state: present # or absent +zabbix_agent_host_state: present # or absent +zabbix_agent_host_update: true +zabbix_host_status: enabled # or disabled +zabbix_agent_proxy: null +zabbix_agent_inventory_mode: disabled +zabbix_useuip: 1 +zabbix_host_groups: + - Linux servers +zabbix_agent_link_templates: + - Template Linux by Zabbix agent + +zabbix_agent_interfaces: + - type: 1 + main: 1 + useip: "{{ zabbix_useuip }}" + ip: "{{ zabbix_agent_ip }}" + dns: "{{ ansible_fqdn }}" + port: "{{ (zabbix_agent2 == True) | ternary(zabbix_agent2_listenport, zabbix_agent_listenport) }}" + +# Zabbix configuration variables +zabbix_agent_pidfile: /var/run/zabbix/zabbix_agentd.pid +zabbix_agent_logtype: file +zabbix_agent_logfile: /var/log/zabbix/zabbix_agentd.log +zabbix_agent_logfilesize: 100 +zabbix_agent_debuglevel: 3 +zabbix_agent_sourceip: +zabbix_agent_enableremotecommands: 0 +zabbix_agent_allowkeys: +zabbix_agent_denykeys: +zabbix_agent_logremotecommands: 0 +zabbix_agent_listenport: 10050 +zabbix_agent_jmx_listenport: +zabbix_agent_listeninterface: +zabbix_agent_listenip: +zabbix_agent_startagents: 3 +zabbix_agent_hostname: "{{ inventory_hostname }}" +zabbix_agent_hostnameitem: +zabbix_agent_hostmetadata: +zabbix_agent_hostmetadataitem: +zabbix_agent_refreshactivechecks: 120 +zabbix_agent_buffersend: 5 +zabbix_agent_buffersize: 100 +zabbix_agent_maxlinespersecond: 100 +zabbix_agent_allowroot: 0 +zabbix_agent_zabbix_alias: +zabbix_agent_timeout: 3 +zabbix_agent_include: /etc/zabbix/zabbix_agentd.d +zabbix_agent_include_pattern: +zabbix_agent_include_mode: "0750" +zabbix_agent_unsafeuserparameters: 0 +zabbix_agent_userparameters: [] +zabbix_agent_userparameters_templates_src: "userparameters" +zabbix_agent_userparameters_scripts_src: "scripts" +zabbix_agent_custom_scripts: false +zabbix_agent_loadmodulepath: ${libdir}/modules +zabbix_agent_loadmodule: +zabbix_agent_become_on_localhost: true +zabbix_agent_description: +zabbix_agent_inventory_zabbix: {} +zabbix_agent_heartbeatfrequency: 60 +zabbix_agent_macros: [] +zabbix_agent_tags: [] +zabbix_agent_chassis: false + +# TLS settings +zabbix_agent_tlsconnect: +zabbix_agent_tlsaccept: +zabbix_agent_tlscafile: +zabbix_agent_tlscrlfile: +zabbix_agent_tlsservercertissuer: +zabbix_agent_tlsservercertsubject: +zabbix_agent_tls_subject: "{{ zabbix_agent_tlsservercertsubject }}" # FIXME this is not correct and should be removed with 2.0.0, here only to prevent regression +zabbix_agent_tlscertfile: +zabbix_agent_tlskeyfile: +zabbix_agent_tlspskidentity: +zabbix_agent_tlspsk_auto: false + +zabbix_agent_tls_config: + unencrypted: "1" + psk: "2" + cert: "4" + +# IPMI settings +zabbix_agent_ipmi_authtype: 2 +zabbix_agent_ipmi_password: +zabbix_agent_ipmi_privilege: 2 +zabbix_agent_ipmi_username: + +# Zabbix Agent2 +zabbix_agent2_pidfile: /var/run/zabbix/zabbix_agent2.pid +zabbix_agent2_logfile: /var/log/zabbix/zabbix_agent2.log +zabbix_agent2_logtype: file +zabbix_agent2_statusport: 9999 +zabbix_agent2_include: /etc/zabbix/zabbix_agent2.d +zabbix_agent2_include_pattern: +zabbix_agent2_logfilesize: 100 +zabbix_agent2_debuglevel: 3 +zabbix_agent2_sourceip: +zabbix_agent2_listenport: 10050 +zabbix_agent2_listenip: +zabbix_agent2_hostname: "{{ inventory_hostname }}" +zabbix_agent2_hostnameitem: +zabbix_agent2_hostmetadata: +zabbix_agent2_hostmetadataitem: +zabbix_agent2_hostinterface: +zabbix_agent2_hostinterfaceitem: +zabbix_agent2_enablepersistentbuffer: 0 +zabbix_agent2_persistentbufferperiod: 1h +zabbix_agent2_persistentbufferfile: +zabbix_agent2_refreshactivechecks: 120 +zabbix_agent2_buffersend: 5 +zabbix_agent2_buffersize: 100 +zabbix_agent2_zabbix_alias: +zabbix_agent2_timeout: 3 +zabbix_agent2_include_mode: "0750" +zabbix_agent2_unsafeuserparameters: 0 +zabbix_agent2_controlsocket: /tmp/agent.sock +zabbix_agent2_plugins: [] + +# Zabbix Agent2 TLS settings +zabbix_agent2_tlsconnect: +zabbix_agent2_tlsaccept: +zabbix_agent2_tlscafile: +zabbix_agent2_tlscrlfile: +zabbix_agent2_tlsservercertissuer: +zabbix_agent2_tlsservercertsubject: +zabbix_agent2_tls_subject: "{{ zabbix_agent2_tlsservercertsubject }}" # FIXME this is not correct and should be removed with 2.0.0, here only to prevent regression +zabbix_agent2_tlscertfile: +zabbix_agent2_tlskeyfile: +zabbix_agent2_tlspskidentity: +zabbix_agent2_tlspsk_auto: false + +# Windows/macOS Related +zabbix_version_long: 5.2.4 + +# Windows Related +zabbix_win_package: zabbix_agent-{{ zabbix_version_long }}-windows-amd64-openssl.zip +zabbix2_win_package: zabbix_agent2-{{ zabbix_version_long }}-windows-amd64-openssl-static.zip +zabbix_win_download_url: https://cdn.zabbix.com/zabbix/binaries/stable +zabbix_win_download_link: "{{ zabbix_win_download_url }}/{{ zabbix_version_long | regex_search('^\\d+\\.\\d+') }}/{{ zabbix_version_long }}/{{ zabbix_win_package }}" +zabbix2_win_download_link: "{{ zabbix_win_download_url }}/{{ zabbix_version_long | regex_search('^\\d+\\.\\d+') }}/{{ zabbix_version_long }}/{{ zabbix2_win_package }}" +zabbix_win_install_dir: 'C:\Zabbix' +zabbix_win_install_dir_conf: '{{ zabbix_win_install_dir }}\\conf' +zabbix_win_install_dir_bin: '{{ zabbix_win_install_dir }}\\bin' +zabbix_agent_win_logfile: "{{ zabbix_win_install_dir }}\\zabbix_agentd.log" +zabbix_agent_win_include: "{{ zabbix_win_install_dir }}\\zabbix_agent.d\\" +zabbix_agent2_win_logfile: "{{ zabbix_win_install_dir }}\\zabbix_agent2.log" +zabbix_agent_win_svc_recovery: true +zabbix_win_firewall_management: true + +# macOS Related +zabbix_mac_package: zabbix_agent-{{ zabbix_version_long }}-macos-amd64-openssl.pkg +zabbix_mac_download_url: https://cdn.zabbix.com/zabbix/binaries/stable +zabbix_mac_download_link: "{{ zabbix_mac_download_url }}/{{ zabbix_agent_version }}/{{ zabbix_version_long }}/{{ zabbix_mac_package }}" + +# Zabbix Agent Docker facts +zabbix_agent_docker: false +zabbix_agent_docker_state: started +zabbix_agent_docker_name: zabbix-agent +zabbix_agent_docker_image: "zabbix/zabbix-agent" +zabbix_agent_docker_image_tag: "ubuntu-{{ zabbix_agent_version }}.{{ zabbix_version_patch }}" +zabbix_agent_docker_user_gid: 101 +zabbix_agent_docker_user_uid: 101 +zabbix_agent_docker_network_mode: host +zabbix_agent_docker_restart_policy: unless-stopped +zabbix_agent_docker_privileged: false +zabbix_agent_docker_ports: + - 10050:10050 +zabbix_agent_docker_security_opts: + - apparmor:unconfined +zabbix_agent_docker_volumes: + - /etc/zabbix/zabbix_agentd.d:{{ zabbix_agent_include }} + - /:/hostfs:ro + - /etc:/hostfs/etc:ro + - /proc:/hostfs/proc:ro + - /sys:/hostfs/sys:ro + - /var/run:/var/run +zabbix_agent_docker_env: + ZBX_HOSTNAME: "{{ zabbix_agent_hostname }}" + ZBX_SERVER_HOST: "{{ zabbix_agent_server }}" + ZBX_PASSIVE_ALLOW: "{{ zabbix_agent_serverpassive_allow | default(omit) }}" + ZBX_PASSIVESERVERS: "{{ zabbix_agent_serverpassive | default(omit) }}" + ZBX_ACTIVE_ALLOW: "{{ zabbix_agent_serveractive_allow | default(omit) }}" + ZBX_LOADMODULE: "{{ zabbix_agent_loadmodule | default(omit) }}" + ZBX_DEBUGLEVEL: "{{ zabbix_agent_debuglevel }}" + ZBX_TIMEOUT: "{{ zabbix_agent_timeout }}" + ZBX_SOURCEIP: "{{ zabbix_agent_sourceip | default(omit) }}" + ZBX_ENABLEREMOTECOMMANDS: "{{ zabbix_agent_enableremotecommands | default(omit) }}" + ZBX_LOGREMOTECOMMANDS: "{{ zabbix_agent_logremotecommands | default(omit) }}" + ZBX_STARTAGENTS: "{{ zabbix_agent_startagents | default(omit) }}" + ZBX_HOSTNAMEITEM: "{{ zabbix_agent_hostnameitem | default(omit) }}" + ZBX_METADATA: "{{ zabbix_agent_hostmetadata | default(omit) }}" + ZBX_METADATAITEM: "{{ zabbix_agent_hostmetadataitem | default(omit) }}" + ZBX_REFRESHACTIVECHECKS: "{{ zabbix_agent_refreshactivechecks | default(omit) }}" + ZBX_BUFFERSEND: "{{ zabbix_agent_buffersend | default(omit) }}" + ZBX_BUFFERSIZE: "{{ zabbix_agent_buffersize | default(omit) }}" + ZBX_MAXLINESPERSECOND: "{{ zabbix_agent_maxlinespersecond | default(omit) }}" + ZBX_LISTENIP: "{{ zabbix_agent_listenip }}" + ZBX_UNSAFEUSERPARAMETERS: "{{ zabbix_agent_unsafeuserparameters | default(omit) }}" + ZBX_TLSCONNECT: "{{ zabbix_agent_tlsconnect | default(omit) }}" + ZBX_TLSACCEPT: "{{ zabbix_agent_tlsaccept | default(omit) }}" + ZBX_TLSCAFILE: "{{ zabbix_agent_tlscafile | default(omit) }}" + ZBX_TLSCRLFILE: "{{ zabbix_agent_tlscrlfile | default(omit) }}" + ZBX_TLSSERVERCERTISSUER: "{{ zabbix_agent_tlsservercertissuer | default(omit) }}" + ZBX_TLSSERVERCERTSUBJECT: "{{ zabbix_agent_tlsservercertsubject | default(omit) }}" + ZBX_TLSCERTFILE: "{{ zabbix_agent_tlscertfile | default(omit) }}" + ZBX_TLSKEYFILE: "{{ zabbix_agent_tlskeyfile | default(omit) }}" + ZBX_TLSPSKIDENTITY: "{{ zabbix_agent_tlspskidentity | default(omit) }}" diff --git a/ansible/01_old/roles/zabbix-agent/files/sample.conf b/ansible/01_old/roles/zabbix-agent/files/sample.conf new file mode 100644 index 0000000..70df285 --- /dev/null +++ b/ansible/01_old/roles/zabbix-agent/files/sample.conf @@ -0,0 +1,3 @@ +# This is an sample userparameters file. + +UserParameter=mysql.ping_to,mysqladmin -uroot ping | grep -c alive diff --git a/ansible/01_old/roles/zabbix-agent/files/win_sample/doSomething.ps1 b/ansible/01_old/roles/zabbix-agent/files/win_sample/doSomething.ps1 new file mode 100644 index 0000000..e69de29 diff --git a/ansible/01_old/roles/zabbix-agent/handlers/main.yml b/ansible/01_old/roles/zabbix-agent/handlers/main.yml new file mode 100644 index 0000000..46fa0a8 --- /dev/null +++ b/ansible/01_old/roles/zabbix-agent/handlers/main.yml @@ -0,0 +1,40 @@ +--- +# handlers file for zabbix-agent + +- name: restart zabbix-agent + ansible.builtin.service: + name: "{{ zabbix_agent_service }}" + state: restarted + enabled: true + become: true + when: + - not zabbix_agent_docker + - ansible_os_family != "Windows" and ansible_os_family != "Darwin" + +- name: firewalld-reload + ansible.builtin.command: "firewall-cmd --reload" + become: true + when: + - ansible_facts.services["firewalld"] is defined + - ansible_facts.services["firewalld"].state == "running" + +- name: restart win zabbix agent + win_service: + name: "{{ zabbix_win_svc_name }}" + state: restarted + when: + - ansible_os_family == "Windows" + +- name: restart mac zabbix agent + ansible.builtin.command: "launchctl kickstart -k system/{{ zabbix_agent_service }}" + become: true + when: + - not zabbix_agent_docker + - ansible_os_family == "Darwin" + +- name: "clean repo files from proxy creds" + ansible.builtin.shell: ls /etc/yum.repos.d/zabbix* && sed -i 's/^proxy =.*//' /etc/yum.repos.d/zabbix* || true + become: true + when: + - ansible_os_family == 'RedHat' + - zabbix_http_proxy is defined or zabbix_https_proxy is defined diff --git a/ansible/01_old/roles/zabbix-agent/meta/main.yml b/ansible/01_old/roles/zabbix-agent/meta/main.yml new file mode 100644 index 0000000..22803e0 --- /dev/null +++ b/ansible/01_old/roles/zabbix-agent/meta/main.yml @@ -0,0 +1,42 @@ +--- +galaxy_info: + author: Werner Dijkerman + description: Installing and maintaining zabbix-agent for RedHat/Debian/Ubuntu/Windows/Suse. + company: myCompany.Dotcom + license: MIT + min_ansible_version: 2.7 + platforms: + - name: EL + versions: + - 5 + - 6 + - 7 + - name: Ubuntu + versions: + - lucid + - precise + - trusty + - xenial + - bionic + - name: Debian + versions: + - squeeze + - wheezy + - jessie + - stretch + - buster + - name: opensuse + versions: + - 12.1 + - 12.2 + - 12.3 + - 13.1 + - 13.2 + - name: Windows + versions: + - all + + galaxy_tags: + - zabbix + - monitoring +dependencies: [] diff --git a/ansible/01_old/roles/zabbix-agent/molecule/with-server/Dockerfile.j2 b/ansible/01_old/roles/zabbix-agent/molecule/with-server/Dockerfile.j2 new file mode 100644 index 0000000..1df5be7 --- /dev/null +++ b/ansible/01_old/roles/zabbix-agent/molecule/with-server/Dockerfile.j2 @@ -0,0 +1,14 @@ +# Molecule managed + +{% if item.registry is defined %} +FROM {{ item.registry.url }}/{{ item.image }} +{% else %} +FROM {{ item.image }} +{% endif %} + +RUN if [ $(command -v apt-get) ]; then apt-get update && apt-get install -y python sudo bash ca-certificates && apt-get clean; \ + elif [ $(command -v dnf) ]; then dnf makecache && dnf --assumeyes install python sudo python-devel python*-dnf bash && dnf clean all; \ + elif [ $(command -v yum) ]; then yum makecache fast && yum install -y python sudo yum-plugin-ovl bash && sed -i 's/plugins=0/plugins=1/g' /etc/yum.conf && yum clean all; \ + elif [ $(command -v zypper) ]; then zypper refresh && zypper install -y python sudo bash python-xml && zypper clean -a; \ + elif [ $(command -v apk) ]; then apk update && apk add --no-cache python sudo bash ca-certificates; \ + elif [ $(command -v xbps-install) ]; then xbps-install -Syu && xbps-install -y python sudo bash ca-certificates && xbps-remove -O; fi diff --git a/ansible/01_old/roles/zabbix-agent/molecule/with-server/INSTALL.rst b/ansible/01_old/roles/zabbix-agent/molecule/with-server/INSTALL.rst new file mode 100644 index 0000000..3c2ae97 --- /dev/null +++ b/ansible/01_old/roles/zabbix-agent/molecule/with-server/INSTALL.rst @@ -0,0 +1,26 @@ +******************************** +Docker driver installation guide +******************************** + +Requirements +============ + +* General molecule dependencies (see https://molecule.readthedocs.io/en/latest/installation.html) +* Docker Engine +* docker-py +* docker + +Install +======= + +Ansible < 2.6 + +.. code-block:: bash + + $ sudo pip install docker-py + +Ansible >= 2.6 + +.. code-block:: bash + + $ sudo pip install docker diff --git a/ansible/01_old/roles/zabbix-agent/molecule/with-server/molecule.yml b/ansible/01_old/roles/zabbix-agent/molecule/with-server/molecule.yml new file mode 100644 index 0000000..7795860 --- /dev/null +++ b/ansible/01_old/roles/zabbix-agent/molecule/with-server/molecule.yml @@ -0,0 +1,73 @@ +--- +dependency: + name: galaxy +driver: + name: docker +platforms: + - name: zabbix-server-centos + image: milcom/centos7-systemd:latest + groups: + - zabbix_server + - mysql + privileged: true + networks: + - name: zabbix + published_ports: + - "80:80" + - name: zabbix-agent-centos + image: milcom/centos7-systemd:latest + groups: + - zabbix_agent + privileged: true + networks: + - name: zabbix + - name: zabbix-agent-debian + image: minimum2scp/systemd-stretch:latest + command: /sbin/init + groups: + - zabbix_agent + privileged: true + networks: + - name: zabbix + - name: zabbix-agent-ubuntu + image: solita/ubuntu-systemd:bionic + groups: + - zabbix_agent + privileged: true + networks: + - name: zabbix + +provisioner: + name: ansible + playbooks: + docker: + create: ../default/create.yml + destroy: ../default/destroy.yml + inventory: + group_vars: + all: + zabbix_api_create_hosts: true + zabbix_api_create_hostgroup: true + zabbix_api_server_url: http://zabbix-server-centos + zabbix_apache_servername: zabbix-server-centos + mysql: + zabbix_server_database: mysql + zabbix_server_database_long: mysql + zabbix_server_dbport: 3306 + database_type: mysql + database_type_long: mysql + host_vars: + zabbix-agent-fedora: + ansible_python_interpreter: /usr/bin/python3 + zabbix-agent-ubuntu: + zabbix_agent_tlsaccept: psk + zabbix_agent_tlsconnect: psk + zabbix_agent_tlspskidentity: "myhost PSK" + zabbix_agent_tlspsk_secret: b7e3d380b9d400676d47198ecf3592ccd4795a59668aa2ade29f0003abbbd40d + zabbix_agent_tlspskfile: /etc/zabbix/zabbix_agent_pskfile.psk + +scenario: + name: with-server + +verifier: + name: testinfra diff --git a/ansible/01_old/roles/zabbix-agent/molecule/with-server/playbook.yml b/ansible/01_old/roles/zabbix-agent/molecule/with-server/playbook.yml new file mode 100644 index 0000000..21c3ea0 --- /dev/null +++ b/ansible/01_old/roles/zabbix-agent/molecule/with-server/playbook.yml @@ -0,0 +1,24 @@ +--- +- name: Converge + hosts: all:!zabbix_server + pre_tasks: + - name: "Get IP Server" + ansible.builtin.shell: grep $(hostname) /etc/hosts | awk '{ print $1 }' | tail -n 1 + register: ip_address + delegate_to: zabbix-server-centos + changed_when: false + tags: + - skip_ansible_lint + + - name: "Get IP hosts" + ansible.builtin.shell: grep $(hostname) /etc/hosts | awk '{ print $1 }' | tail -n 1 + register: ip_address_host + changed_when: false + tags: + - skip_ansible_lint + + roles: + - role: zabbix_agent + zabbix_agent_ip: "{{ ip_address_host.stdout }}" + zabbix_agent_server: "{{ ip_address.stdout }}" + zabbix_agent_serveractive: "{{ ip_address.stdout }}" diff --git a/ansible/01_old/roles/zabbix-agent/molecule/with-server/prepare.yml b/ansible/01_old/roles/zabbix-agent/molecule/with-server/prepare.yml new file mode 100644 index 0000000..a08d0fa --- /dev/null +++ b/ansible/01_old/roles/zabbix-agent/molecule/with-server/prepare.yml @@ -0,0 +1,114 @@ +--- +- name: Prepare + hosts: zabbix_server + pre_tasks: + - name: "Installing EPEL" + ansible.builtin.yum: + name: + - epel-release + state: present + when: ansible_distribution == 'CentOS' + + - name: "Installing packages" + ansible.builtin.yum: + name: + - net-tools + - which + - libselinux-python + - python-pip + state: present + register: installation_dependencies + when: ansible_distribution == 'CentOS' + + - name: "Installing which on NON-CentOS" + ansible.builtin.apt: + name: + - net-tools + - python-pip + - curl + state: present + when: ansible_distribution != 'CentOS' + + - name: "Configure SUDO." + ansible.builtin.lineinfile: + dest: /etc/sudoers + line: "Defaults !requiretty" + state: present + + - name: "Make sure the docs are installed." + ansible.builtin.lineinfile: + dest: /etc/yum.conf + line: "tsflags=nodocs" + state: absent + + - name: "Installing some python dependencies" + ansible.builtin.pip: + name: py-zabbix + state: present + + roles: + - role: geerlingguy.mysql + - role: zabbix_server + - role: zabbix_web + +- name: Prepare + hosts: all:!zabbix_server:!docker + tasks: + - name: "Installing packages on CentOS family" + ansible.builtin.yum: + name: + - net-tools + - which + state: present + when: + - ansible_os_family == 'RedHat' + + - name: "Installing packages on Debian family" + ansible.builtin.apt: + name: + - net-tools + state: present + when: + - ansible_os_family == 'Debian' + +- name: Converge + hosts: docker + tasks: + - name: "Download Docker CE repo file" + ansible.builtin.get_url: + url: https://download.docker.com/linux/centos/docker-ce.repo + dest: /etc/yum.repos.d/docker-ce.repo + mode: 0644 + register: zabbix_agent_prepare_docker_repo + until: zabbix_agent_prepare_docker_repo is succeeded + + - name: "Installing Epel" + ansible.builtin.package: + pkg: + - epel-release + state: present + register: zabbix_agent_prepare_docker_install + until: zabbix_agent_prepare_docker_install is succeeded + + - name: "Installing Docker" + ansible.builtin.package: + pkg: + - docker-ce + - python-pip + - python-setuptools + state: present + register: zabbix_agent_prepare_docker_install + until: zabbix_agent_prepare_docker_install is succeeded + + - name: "Installing Docker Python" + ansible.builtin.pip: + name: + - docker + state: present + register: zabbix_agent_prepare_docker_install + until: zabbix_agent_prepare_docker_install is succeeded + + - name: "Starting Docker service" + ansible.builtin.service: + name: docker + state: started diff --git a/ansible/01_old/roles/zabbix-agent/molecule/with-server/requirements.yml b/ansible/01_old/roles/zabbix-agent/molecule/with-server/requirements.yml new file mode 100644 index 0000000..793f925 --- /dev/null +++ b/ansible/01_old/roles/zabbix-agent/molecule/with-server/requirements.yml @@ -0,0 +1,5 @@ +--- +- src: geerlingguy.apache +- src: geerlingguy.mysql +- src: dj-wasabi.zabbix-server +- src: dj-wasabi.zabbix-web diff --git a/ansible/01_old/roles/zabbix-agent/molecule/with-server/tests/test_agent.py b/ansible/01_old/roles/zabbix-agent/molecule/with-server/tests/test_agent.py new file mode 100644 index 0000000..b6fbb22 --- /dev/null +++ b/ansible/01_old/roles/zabbix-agent/molecule/with-server/tests/test_agent.py @@ -0,0 +1,44 @@ +import os +from zabbix_api import ZabbixAPI + +import testinfra.utils.ansible_runner + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('zabbix_agent') + + +def authenticate(): + zapi = ZabbixAPI(server='http://zabbix-server-centos/api_jsonrpc.php') + zapi.login("Admin", "zabbix") + return zapi + + +def test_psk_host(host): + zapi = authenticate() + hostname = host.check_output('hostname -s') + host_name = "zabbix-agent-ubuntu" + + server_data = zapi.host.get({'output': 'extend', 'selectInventory': 'extend', 'filter': {'host': [hostname]}}) + + if hostname == host_name: + assert server_data[0]['tls_psk'] == "b7e3d380b9d400676d47198ecf3592ccd4795a59668aa2ade29f0003abbbd40d" + assert server_data[0]['tls_psk_identity'] == "myhost PSK" + assert server_data[0]['tls_accept'] == "2" + else: + assert server_data[0]['tls_psk'] == "" + assert server_data[0]['tls_psk_identity'] == "" + assert server_data[0]['tls_accept'] == "1" + + +def test_zabbix_agent_psk(host): + hostname = host.check_output('hostname -s') + host_name = "zabbix-agent-ubuntu" + + psk_file = host.file("/etc/zabbix/zabbix_agent_pskfile.psk") + if hostname == host_name: + assert psk_file.user == "zabbix" + assert psk_file.group == "zabbix" + assert psk_file.mode == 0o400 + assert psk_file.contains("b7e3d380b9d400676d47198ecf3592ccd4795a59668aa2ade29f0003abbbd40d") + else: + assert not psk_file.exists diff --git a/ansible/01_old/roles/zabbix-agent/molecule/with-server/tests/test_default.py b/ansible/01_old/roles/zabbix-agent/molecule/with-server/tests/test_default.py new file mode 100644 index 0000000..f81cca3 --- /dev/null +++ b/ansible/01_old/roles/zabbix-agent/molecule/with-server/tests/test_default.py @@ -0,0 +1,41 @@ +import os +from zabbix_api import ZabbixAPI + +import testinfra.utils.ansible_runner + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('zabbix_server') + + +def authenticate(): + zapi = ZabbixAPI(server='http://zabbix-server-centos/api_jsonrpc.php') + zapi.login("Admin", "zabbix") + return zapi + + +def get_hosts(): + return [ + "zabbix-agent-debian", + "zabbix-agent-ubuntu", + "zabbix-agent-centos", + "zabbix-agent-docker-centos" + ] + + +def test_hosts(): + zapi = authenticate() + hosts = get_hosts() + servers = zapi.host.get({'output': ["hostid", "name"]}) + + for server in servers: + if server['name'] != 'Zabbix server': + assert server['name'] in hosts + + +def test_hosts_status(): + zapi = authenticate() + servers = zapi.host.get({'output': ["status", "name"]}) + + for server in servers: + if server['name'] != 'Zabbix server': + assert int(server['status']) == 0 diff --git a/ansible/01_old/roles/zabbix-agent/tasks/Debian.yml b/ansible/01_old/roles/zabbix-agent/tasks/Debian.yml new file mode 100644 index 0000000..3a56b8e --- /dev/null +++ b/ansible/01_old/roles/zabbix-agent/tasks/Debian.yml @@ -0,0 +1,151 @@ +--- +# Tasks specific for Debian/Ubuntu Systems + +- name: "Debian | Set some variables" + ansible.builtin.set_fact: + zabbix_short_version: "{{ zabbix_agent_version | regex_replace('\\.', '') }}" + zabbix_underscore_version: "{{ zabbix_agent_version | regex_replace('\\.', '_') }}" + tags: + - always + +- name: "Debian | Repo URL" + ansible.builtin.set_fact: + zabbix_repo_deb_url: "{{ _zabbix_repo_deb_url }}{{ '-arm64' if ansible_machine == 'aarch64' else ''}}" + when: + - zabbix_repo_deb_url is undefined + tags: + - always + +- name: "Debian | Installing gnupg" + ansible.builtin.apt: + pkg: gnupg + update_cache: true + cache_valid_time: 3600 + force: true + state: present + environment: + http_proxy: "{{ zabbix_http_proxy | default(None) | default(omit) }}" + https_proxy: "{{ zabbix_https_proxy | default(None) | default(omit) }}" + register: gnupg_installed + until: gnupg_installed is succeeded + become: true + tags: + - install + +# In releases older than Debian 12 and Ubuntu 22.04, /etc/apt/keyrings does not exist by default. +# It SHOULD be created with permissions 0755 if it is needed and does not already exist. +# See: https://wiki.debian.org/DebianRepository/UseThirdParty +- name: "Debian | Create /etc/apt/keyrings/ on older versions" + ansible.builtin.file: + path: /etc/apt/keyrings/ + state: directory + mode: "0755" + become: true + when: + - (ansible_distribution == "Ubuntu" and ansible_distribution_major_version < "22") or + (ansible_distribution == "Debian" and ansible_distribution_major_version < "12") + +- name: "Debian | Download gpg key" + ansible.builtin.get_url: + url: http://repo.zabbix.com/zabbix-official-repo.key + dest: "{{ zabbix_gpg_key }}" + mode: "0644" + force: true + environment: + http_proxy: "{{ zabbix_http_proxy | default(None) | default(omit) }}" + https_proxy: "{{ zabbix_https_proxy | default(None) | default(omit) }}" + become: true + tags: + - install + +- name: "Debian | Installing repository {{ ansible_distribution }}" + ansible.builtin.copy: + dest: /etc/apt/sources.list.d/zabbix.sources + owner: root + group: root + mode: 0644 + content: | + Types: deb deb-src + Enabled: yes + URIs: {{ zabbix_repo_deb_url }} + Suites: {{ ansible_distribution_release }} + Components: {{ zabbix_repo_deb_component }} + Architectures: {{ 'amd64' if ansible_machine != 'aarch64' else 'arm64'}} + Signed-By: {{ zabbix_gpg_key }} + become: true + tags: + - install + +- name: "Debian | Create /etc/apt/preferences.d/" + ansible.builtin.file: + path: /etc/apt/preferences.d/ + state: directory + mode: "0755" + when: + - zabbix_agent_apt_priority | int + become: true + tags: + - install + +- name: "Debian | Configuring the weight for APT" + ansible.builtin.copy: + dest: "/etc/apt/preferences.d/zabbix-agent-{{ zabbix_underscore_version }}" + content: | + Package: {{ zabbix_agent_package }} + Pin: origin repo.zabbix.com + Pin-Priority: {{ zabbix_agent_apt_priority | int }} + owner: root + mode: "0644" + when: + - zabbix_agent_apt_priority | int + become: true + tags: + - install + +- name: "Debian | Installing zabbix-agent" + ansible.builtin.apt: + pkg: "{{ zabbix_agent_package }}" + state: "{{ zabbix_agent_package_state }}" + update_cache: true + cache_valid_time: 0 + force_apt_get: "{{ zabbix_apt_force_apt_get }}" + install_recommends: "{{ zabbix_apt_install_recommends }}" + environment: + http_proxy: "{{ zabbix_http_proxy | default(None) | default(omit) }}" + https_proxy: "{{ zabbix_https_proxy | default(None) | default(omit) }}" + register: zabbix_agent_package_installed + until: zabbix_agent_package_installed is succeeded + become: true + tags: + - install + +- name: "Debian | Installing zabbix-{sender,get}" + ansible.builtin.apt: + pkg: + - "{{ zabbix_sender_package }}" + - "{{ zabbix_get_package }}" + state: "{{ zabbix_agent_package_state }}" + update_cache: true + cache_valid_time: 0 + force_apt_get: "{{ zabbix_apt_force_apt_get }}" + install_recommends: "{{ zabbix_apt_install_recommends }}" + environment: + http_proxy: "{{ zabbix_http_proxy | default(None) | default(omit) }}" + https_proxy: "{{ zabbix_https_proxy | default(None) | default(omit) }}" + when: + - not zabbix_agent_install_agent_only + register: zabbix_agent_package_installed + until: zabbix_agent_package_installed is succeeded + become: true + check_mode: false + tags: + - install + +- name: "Debian | Enable the service" + ansible.builtin.service: + name: "{{ zabbix_agent_service }}" + enabled: true + use: service + become: true + tags: + - service diff --git a/ansible/01_old/roles/zabbix-agent/tasks/Docker.yml b/ansible/01_old/roles/zabbix-agent/tasks/Docker.yml new file mode 100644 index 0000000..90656fc --- /dev/null +++ b/ansible/01_old/roles/zabbix-agent/tasks/Docker.yml @@ -0,0 +1,32 @@ +--- +- name: "Create volume mount string" + ansible.builtin.set_fact: + volume_mount: "{{ zabbix_agent_tlspskfile }}:/var/lib/zabbix/enc/tlspskfile" + tls_key: + ZBX_TLSPSKFILE: tlspskfile + when: + - zabbix_agent_tlspskfile is defined + +- name: "Add zabbix_agent_tlspskfile to volume mount" + ansible.builtin.set_fact: + zabbix_agent_docker_volumes: "{{ zabbix_agent_docker_volumes + [ volume_mount ] }}" + zabbix_agent_docker_env: "{{ zabbix_agent_docker_env | combine(tls_key) }}" + when: + - zabbix_agent_tlspskfile is defined + +- name: "Ensure Zabbix Docker container is running" + community.docker.docker_container: + name: "{{ zabbix_agent_docker_name }}" + image: "{{ zabbix_agent_docker_image }}:{{ zabbix_agent_docker_image_tag }}" + state: "{{ zabbix_agent_docker_state }}" + restart_policy: "{{ zabbix_agent_docker_restart_policy }}" + network_mode: "{{ zabbix_agent_docker_network_mode }}" + published_ports: "{{ zabbix_agent_docker_ports }}" + privileged: "{{ zabbix_agent_docker_privileged }}" + security_opts: "{{ zabbix_agent_docker_security_opts }}" + volumes: "{{ zabbix_agent_docker_volumes }}" + env: "{{ zabbix_agent_docker_env }}" + environment: + http_proxy: "{{ zabbix_http_proxy | default(None) | default(omit) }}" + https_proxy: "{{ zabbix_https_proxy | default(None) | default(omit) }}" + become: true diff --git a/ansible/01_old/roles/zabbix-agent/tasks/Linux.yml b/ansible/01_old/roles/zabbix-agent/tasks/Linux.yml new file mode 100644 index 0000000..de06858 --- /dev/null +++ b/ansible/01_old/roles/zabbix-agent/tasks/Linux.yml @@ -0,0 +1,239 @@ +--- +- name: "Set default ip address for zabbix_agent_ip" + ansible.builtin.set_fact: + zabbix_agent_ip: "{{ hostvars[inventory_hostname]['ansible_default_ipv4'].address }}" + when: + - zabbix_agent_ip is not defined + - "'ansible_default_ipv4' in hostvars[inventory_hostname]" + tags: + - config + +- name: "Get Total Private IP Addresses" + ansible.builtin.set_fact: + total_private_ip_addresses: "{{ ansible_all_ipv4_addresses | ansible.utils.ipaddr('private') | length }}" + when: + - ansible_all_ipv4_addresses is defined + - not (zabbix_agent_dont_detect_ip) + tags: + - config + +- name: "Set first public ip address for zabbix_agent_ip" + ansible.builtin.set_fact: + zabbix_agent_ip: "{{ ansible_all_ipv4_addresses | ansible.netcommon.ipaddr('public') | first }}" + zabbix_agent_server: "{{ zabbix_agent_server_public_ip | default(zabbix_agent_server) }}" + zabbix_agent_serveractive: "{{ zabbix_agent_serveractive_public_ip | default(zabbix_agent_serveractive) }}" + zabbix_agent2_server: "{{ zabbix_agent_server_public_ip | default(zabbix_agent2_server) }}" + zabbix_agent2_serveractive: "{{ zabbix_agent_serveractive_public_ip | default(zabbix_agent2_serveractive) }}" + when: + - zabbix_agent_ip is not defined + - total_private_ip_addresses is defined + - total_private_ip_addresses == '0' + tags: + - config + +- name: "Set first private ip address for zabbix_agent_ip" + ansible.builtin.set_fact: + zabbix_agent_ip: "{{ ansible_all_ipv4_addresses | ansible.netcommon.ipaddr('private') | first }}" + when: + - zabbix_agent_ip is not defined + - total_private_ip_addresses is defined + - total_private_ip_addresses != '0' + tags: + - config + +- name: "Fail invalid specified agent_listeninterface" + ansible.builtin.fail: + msg: "The specified network interface does not exist" + when: + - (zabbix_agent_listeninterface) + - (zabbix_agent_listeninterface not in ansible_interfaces) + tags: + - config + +- name: "Set network interface" + ansible.builtin.set_fact: + network_interface: ansible_{{ zabbix_agent_listeninterface }} + when: + - (zabbix_agent_listeninterface) + - not zabbix_agent_listenip + tags: + - config + +- name: "Get IP of agent_listeninterface when no agent_listenip specified" + ansible.builtin.set_fact: + zabbix_agent_listenip: "{{ hostvars[inventory_hostname][network_interface]['ipv4'].address | default('0.0.0.0') }}" + when: + - (zabbix_agent_listeninterface) + - not zabbix_agent_listenip + tags: + - config + - api + +- name: "Default agent_listenip to all when not specified" + ansible.builtin.set_fact: + zabbix_agent_listenip: "0.0.0.0" + when: + - not (zabbix_agent_listenip) + tags: + - config + +- name: "Fail invalid specified agent_listenip" + ansible.builtin.fail: + msg: "The agent_listenip does not exist" + when: + - zabbix_agent_listenip != '0.0.0.0' + - zabbix_agent_listenip != '127.0.0.1' + - (zabbix_agent_listenip not in ansible_all_ipv4_addresses) + tags: + - config + +- name: "Configure SELinux when enabled" + ansible.builtin.include_tasks: selinux.yml + when: + - zabbix_selinux | bool + +- name: "Adding zabbix group" + ansible.builtin.group: + name: zabbix + state: present + gid: "{{ zabbix_agent_docker_user_gid | default(omit) }}" + become: true + when: + - zabbix_agent_docker | bool + tags: + - config + +- name: "Adding zabbix user" + ansible.builtin.user: + name: zabbix + group: zabbix + state: present + create_home: false + home: /etc/zabbix + uid: "{{ zabbix_agent_docker_user_uid | default(omit) }}" + system: true + become: true + when: + - zabbix_agent_docker | bool + tags: + - config + +- name: "Configure zabbix-agent" + ansible.builtin.template: + src: "{{ 'zabbix_agentd.conf.j2' if not zabbix_agent2 else 'zabbix_agent2.conf.j2' }}" + dest: "/etc/zabbix/{{ zabbix_agent_conf if not zabbix_agent2 else zabbix_agent2_conf }}" + owner: root + group: root + mode: "{{ zabbix_agent_conf_mode }}" + notify: + - restart zabbix-agent + become: true + when: + - not (zabbix_agent_docker | bool) + tags: + - config + +- name: "Create directory for PSK file if not exist." + ansible.builtin.file: + path: "{{ zabbix_agent_tlspskfile | dirname }}" + mode: 0755 + state: directory + become: true + when: + - zabbix_agent_tlspskfile is defined + - zabbix_agent_tlspskfile # https://github.com/ansible-collections/community.zabbix/issues/680 + - not (zabbix_agent2 | bool) + tags: + - config + +- name: "Create directory for PSK file if not exist (zabbix-agent2)" + ansible.builtin.file: + path: "{{ zabbix_agent2_tlspskfile | dirname }}" + mode: 0755 + state: directory + become: true + when: + - zabbix_agent2_tlspskfile is defined + - zabbix_agent2_tlspskfile # https://github.com/ansible-collections/community.zabbix/issues/680 + - zabbix_agent2 | bool + tags: + - config + +- name: "Place TLS PSK File" + ansible.builtin.copy: + dest: "{{ zabbix_agent_tlspskfile }}" + content: "{{ zabbix_agent_tlspsk_secret }}" + owner: zabbix + group: zabbix + mode: 0400 + become: true + when: + - zabbix_agent_tlspskfile is defined + - zabbix_agent_tlspskfile # https://github.com/ansible-collections/community.zabbix/issues/680 + - zabbix_agent_tlspsk_secret is defined + - not (zabbix_agent2 | bool) + notify: + - restart zabbix-agent + tags: + - config + +- name: "Place TLS PSK File (zabbix-agent2)" + ansible.builtin.copy: + dest: "{{ zabbix_agent2_tlspskfile }}" + content: "{{ zabbix_agent2_tlspsk_secret }}" + owner: zabbix + group: zabbix + mode: 0400 + become: true + when: + - zabbix_agent2_tlspskfile is defined + - zabbix_agent2_tlspskfile # https://github.com/ansible-collections/community.zabbix/issues/680 + - zabbix_agent2_tlspsk_secret is defined + - zabbix_agent2 | bool + notify: + - restart zabbix-agent + tags: + - config + +- name: "Create include dir zabbix-agent" + ansible.builtin.file: + path: "{{ zabbix_agent_include if not zabbix_agent2 else zabbix_agent2_include }}" + owner: root + group: zabbix + mode: "{{ zabbix_agent_include_mode if not zabbix_agent2 else zabbix_agent2_include_mode }}" + state: directory + become: true + tags: + - config + +- name: "Install the Docker container" + ansible.builtin.include_tasks: Docker.yml + when: + - zabbix_agent_docker | bool + +- name: "Remove zabbix-agent installation when zabbix-agent2 is used." + ansible.builtin.include_tasks: remove.yml + when: + - zabbix_agent2 | bool + - zabbix_agent_package_remove + +- name: "Make sure the zabbix-agent service is running" + ansible.builtin.service: + name: "{{ zabbix_agent_service }}" + state: started + enabled: true + become: true + when: + - not (zabbix_agent_docker | bool) + tags: + - service + +- name: "Give zabbix-agent access to system.hw.chassis info" + ansible.builtin.file: + path: /sys/firmware/dmi/tables/DMI + owner: root + group: zabbix + become: true + when: zabbix_agent_chassis | bool + tags: + - config diff --git a/ansible/01_old/roles/zabbix-agent/tasks/RedHat.yml b/ansible/01_old/roles/zabbix-agent/tasks/RedHat.yml new file mode 100644 index 0000000..c85d8e5 --- /dev/null +++ b/ansible/01_old/roles/zabbix-agent/tasks/RedHat.yml @@ -0,0 +1,70 @@ +--- +# Tasks specific for RedHat systems + +- name: "RedHat | Install basic repo file" + ansible.builtin.yum_repository: + name: "{{ item.name }}" + description: "{{ item.description }}" + baseurl: "{{ item.baseurl }}" + gpgcheck: "{{ item.gpgcheck }}" + gpgkey: "{{ item.gpgkey }}" + mode: "{{ item.mode | default('0644') }}" + priority: "{{ item.priority | default('99') }}" + state: "{{ item.state | default('present') }}" + proxy: "{{ zabbix_http_proxy | default(omit) }}" + with_items: "{{ zabbix_repo_yum }}" + register: yum_repo_installed + become: true + notify: + - "clean repo files from proxy creds" + tags: + - install + +- name: Check if warn parameter can be used for shell module + ansible.builtin.set_fact: + produce_warn: False + when: ansible_version.full is version("2.14", "<") + tags: + - always + +- name: "RedHat | Installing zabbix-agent" + ansible.builtin.package: + pkg: + - "{{ zabbix_agent_package }}-{{ zabbix_agent_version }}.{{ zabbix_agent_version_minor }}" + disablerepo: "{{ zabbix_agent_disable_repo | default(omit) }}" + state: "{{ zabbix_agent_package_state }}" + environment: + http_proxy: "{{ zabbix_http_proxy | default(None) | default(omit) }}" + https_proxy: "{{ zabbix_https_proxy | default(None) | default(omit) }}" + register: zabbix_agent_package_installed + until: zabbix_agent_package_installed is succeeded + become: true + tags: + - install + +- name: "RedHat | Installing zabbix-{sender,get}" + ansible.builtin.package: + pkg: + - "{{ zabbix_sender_package }}-{{ zabbix_agent_version }}.{{ zabbix_agent_version_minor }}" + - "{{ zabbix_get_package }}-{{ zabbix_agent_version }}.{{ zabbix_agent_version_minor }}" + disablerepo: "{{ zabbix_agent_disable_repo | default(omit) }}" + state: "{{ zabbix_agent_package_state }}" + environment: + http_proxy: "{{ zabbix_http_proxy | default(None) | default(omit) }}" + https_proxy: "{{ zabbix_https_proxy | default(None) | default(omit) }}" + register: zabbix_agent_package_installed + until: zabbix_agent_package_installed is succeeded + when: + - not zabbix_agent_install_agent_only + become: true + tags: + - install + +- name: "RedHat | Enable the service" + ansible.builtin.service: + name: "{{ zabbix_agent_service }}" + enabled: true + use: service + become: true + tags: + - service diff --git a/ansible/01_old/roles/zabbix-agent/tasks/Windows.yml b/ansible/01_old/roles/zabbix-agent/tasks/Windows.yml new file mode 100644 index 0000000..177db17 --- /dev/null +++ b/ansible/01_old/roles/zabbix-agent/tasks/Windows.yml @@ -0,0 +1,352 @@ +--- +- name: "Windows | Set default architecture" + ansible.builtin.set_fact: + windows_arch: 32 + tags: + - always + +- name: "Windows | Override architecture if 64-bit" + ansible.builtin.set_fact: + windows_arch: 64 + when: + - ansible_architecture == "64-bit" + tags: + - always + +- name: "Windows | Set path to zabbix.exe" + ansible.builtin.set_fact: + zabbix_win_exe_path: '{{ zabbix_win_install_dir }}\bin\win{{ windows_arch }}\zabbix_agentd.exe' + tags: + - always + +- name: "Windows | Set variables specific to Zabbix" + ansible.builtin.set_fact: + zabbix_win_svc_name: Zabbix Agent + zabbix_win_exe_path: '{{ zabbix_win_install_dir }}\bin\zabbix_agentd.exe' + zabbix_win_config_name: "zabbix_agentd.conf" + zabbix2_win_svc_name: Zabbix Agent 2 + zabbix2_win_exe_path: '{{ zabbix_win_install_dir }}\bin\zabbix_agent2.exe' + zabbix2_win_config_name: "zabbix_agent2.conf" + tags: + - always + +- name: "Windows | Check if Zabbix agent is present" + ansible.windows.win_stat: + path: "{{ item }}" + with_items: + - "{{ zabbix_win_exe_path }}" + - "{{ zabbix2_win_exe_path }}" + register: agent_file_info + tags: + - always + +- name: "Windows | Get Installed Zabbix Agent Version" + community.windows.win_file_version: + path: "{{ item.item }}" + register: zabbix_win_exe_info + when: + - item.stat.exists | bool + with_items: "{{ agent_file_info.results }}" + tags: + - always + +- name: "Windows | Set facts current zabbix agent installation" + ansible.builtin.set_fact: + zabbix_agent_1_binary_exist: true + zabbix_agent_1_version: zabbix_win_exe_info.results[0].win_file_version.product_version + when: + - zabbix_win_exe_info.results[0] is defined + - zabbix_win_exe_info.results[0].item.stat.exists + - zabbix_win_exe_info.results[0].item.stat.path == zabbix_win_exe_path + - zabbix_win_exe_info.results[0].win_file_version.product_version + tags: + - always + +- name: "Windows | Set facts current zabbix agent installation (agent 2)" + ansible.builtin.set_fact: + zabbix_agent_2_binary_exist: true + zabbix_agent_2_version: zabbix_win_exe_info.results[1].win_file_version.product_version + when: + - zabbix_win_exe_info.results[1] is defined + - zabbix_win_exe_info.results[1].item.stat.exists + - zabbix_win_exe_info.results[1].item.stat.path == zabbix2_win_exe_path + - zabbix_win_exe_info.results[1].win_file_version.product_version + tags: + - always + +- name: "Windows | Check Zabbix service" + ansible.windows.win_service: + name: "{{ (item.item.stat.path == zabbix_win_exe_path ) | ternary(zabbix_win_svc_name,zabbix2_win_svc_name) }}" + register: zabbix_service_info + when: item.item.stat.exists + with_items: "{{ zabbix_win_exe_info.results }}" + tags: + - always + +- name: "Windows | Set facts about current zabbix agent service state" + ansible.builtin.set_fact: + zabbix_agent_1_service_exist: true + when: + - zabbix_service_info.results[0].exists is defined + - zabbix_service_info.results[0].exists + - zabbix_service_info.results[0].display_name == zabbix_win_svc_name + tags: + - always + +- name: "Windows | Set facts about current zabbix agent service state (agent 2)" + ansible.builtin.set_fact: + zabbix_agent_2_service_exist: true + when: + - zabbix_service_info.results[1].exists is defined + - zabbix_service_info.results[1].exists + - zabbix_service_info.results[1].display_name == zabbix2_win_svc_name + tags: + - always + +- name: "Windows | Set fact about version change requirement" + ansible.builtin.set_fact: + zabbix_agent_version_change: true + when: > + (zabbix_agent_1_binary_exist | default(false) and + zabbix_win_exe_info.results[0].win_file_version.product_version is version(zabbix_version_long, '<>')) + or + (zabbix_agent_2_binary_exist | default(false) and + zabbix_win_exe_info.results[1].win_file_version.product_version is version(zabbix_version_long, '<>')) + or (zabbix_agent_1_binary_exist | default(false) and zabbix_agent2) + or (zabbix_agent_2_binary_exist | default(false) and not zabbix_agent2) + tags: + - always + +################## +# delete section # +################## + +- name: "Windows | Stop Zabbix agent v1" + ansible.windows.win_service: + name: "{{ zabbix_win_svc_name }}" + start_mode: auto + state: stopped + when: + - zabbix_agent_version_change | default(false) or zabbix_agent2 + - zabbix_agent_1_service_exist | default(false) + +- name: "Windows | Stop Zabbix agent v2" + ansible.windows.win_service: + name: "{{ zabbix2_win_svc_name }}" + start_mode: auto + state: stopped + when: + - zabbix_agent_version_change | default(false) or not zabbix_agent2 + - zabbix_agent_2_service_exist | default(false) + +- name: "Windows | Uninstall Zabbix v1" + ansible.windows.win_command: '"{{ zabbix_win_exe_path }}" --config "{{ zabbix_win_install_dir_conf }}\{{ zabbix_win_config_name }}" --uninstall' + when: + - zabbix_agent_version_change | default(false) or zabbix_agent2 + - zabbix_agent_1_service_exist | default(false) + +- name: "Windows | Uninstall Zabbix v2" + ansible.windows.win_command: '"{{ zabbix2_win_exe_path }}" --config "{{ zabbix_win_install_dir_conf }}\{{ zabbix2_win_config_name }}" --uninstall' + when: + - zabbix_agent_version_change | default(false) or not zabbix_agent2 + - zabbix_agent_2_service_exist | default(false) + +- name: "Windows | Removing Zabbix Directory" + ansible.windows.win_file: + path: "{{ zabbix_win_install_dir }}" + state: absent + when: + ((zabbix_agent_version_change | default(false) or zabbix_agent2) and zabbix_agent_1_binary_exist | default(false)) or + ((zabbix_agent_version_change | default(false) or not zabbix_agent2) and zabbix_agent_2_binary_exist | default(false)) + +################### +# install section # +################### + +- name: "Windows | Create directory structure" + ansible.windows.win_file: + path: "{{ item }}" + state: directory + with_items: + - "{{ zabbix_win_install_dir }}" + tags: + - install + +- name: "Windows | Create directory structure, includes" + ansible.windows.win_file: + path: "{{ item }}" + state: directory + with_items: + - "{{ zabbix_agent_win_include }}" + when: + - ('.conf' not in zabbix_agent_win_include) + tags: + - install + +- name: "Windows | Set installation settings (agent 2)" + ansible.builtin.set_fact: + zabbix_win_package: "{{ zabbix2_win_package }}" + zabbix_win_download_link: "{{ zabbix2_win_download_link }}" + zabbix_win_exe_path: "{{ zabbix2_win_exe_path }}" + zabbix_win_config_name: "{{ zabbix2_win_config_name }}" + zabbix_win_svc_name: "{{ zabbix2_win_svc_name }}" + when: zabbix_agent2 | bool + tags: + - install + +- name: "Windows | Check if agent file is already downloaded" + ansible.windows.win_stat: + path: '{{ zabbix_win_install_dir }}\{{ zabbix_win_package }}' + register: file_info + tags: + - install + +- name: "Windows | Check if agent binaries in place" + ansible.windows.win_stat: + path: "{{ zabbix_win_exe_path }}" + register: zabbix_windows_binaries + tags: + - install + +- name: "Windows | Download Zabbix Agent Zip file" + ansible.windows.win_get_url: + url: "{{ zabbix_win_download_link }}" + dest: '{{ zabbix_win_install_dir }}\{{ zabbix_win_package }}' + url_username: "{{ zabbix_download_user | default(omit) }}" + url_password: "{{ zabbix_download_pass | default(omit) }}" + force: false + follow_redirects: all + proxy_url: "{{ zabbix_https_proxy | default(None) | default(omit) }}" + validate_certs: "{{ zabbix_download_validate_certs | default(False) | bool }}" + timeout: "{{ zabbix_download_timeout | default(120) | int }}" + when: + - not file_info.stat.exists + - not zabbix_windows_binaries.stat.exists + register: zabbix_agent_win_download_zip + until: zabbix_agent_win_download_zip is succeeded + throttle: "{{ zabbix_download_throttle | default(5) | int }}" + tags: + - install + +- name: "Windows | Unzip file" + community.windows.win_unzip: + src: '{{ zabbix_win_install_dir }}\{{ zabbix_win_package }}' + dest: "{{ zabbix_win_install_dir }}" + creates: "{{ zabbix_win_exe_path }}" + tags: + - install + +- name: "Windows | Cleanup downloaded Zabbix Agent Zip file" + ansible.windows.win_file: + path: '{{ zabbix_win_install_dir }}\{{ zabbix_win_package }}' + state: absent + when: + - zabbix_agent_win_download_zip.changed + tags: + - install + +- name: "Windows | Copy binary files to expected location" + ansible.windows.win_copy: + src: "{{ zabbix_win_install_dir }}\\bin\\{{ item }}" + dest: "{{ zabbix_win_install_dir_bin }}\\{{ item }}" + remote_src: yes + loop: + - zabbix_agentd.exe + - zabbix_sender.exe + when: + - zabbix_win_install_dir_bin is defined + - not (zabbix_agent2 | bool) + tags: + - install + +- name: "Windows | Copy binary files to expected location (zabbix-agent2)" + ansible.windows.win_copy: + src: "{{ zabbix_win_install_dir }}\\bin\\{{ item }}" + dest: "{{ zabbix_win_install_dir_bin }}\\{{ item }}" + remote_src: yes + loop: + - zabbix_agent2.exe + when: + - zabbix_win_install_dir_bin is defined + - zabbix_agent2 | bool + tags: + - install + +- set_fact: + zabbix_win_exe_path: "{{ zabbix_win_install_dir_bin }}\\zabbix_agentd.exe" + when: + - zabbix_win_install_dir_bin is defined + - not (zabbix_agent2 | bool) + tags: + - install + +- set_fact: + zabbix_win_exe_path: "{{ zabbix_win_install_dir_bin }}\\zabbix_agent2.exe" + when: + - zabbix_win_install_dir_bin is defined + - zabbix_agent2 | bool + tags: + - install + +- name: "Create directory for PSK file if not exist." + ansible.windows.win_file: + path: "{{ zabbix_agent_tlspskfile | win_dirname }}" + state: directory + when: + - zabbix_agent_tlspskfile is defined + - zabbix_agent_tlspskfile + - not (zabbix_agent2 | bool) + tags: + - config + +- name: "Create directory for PSK file if not exist (zabbix-agent2)" + ansible.windows.win_file: + path: "{{ zabbix_agent2_tlspskfile | win_dirname }}" + state: directory + when: + - zabbix_agent2_tlspskfile is defined + - zabbix_agent2_tlspskfile + - zabbix_agent2 | bool + tags: + - config + +- name: "Place TLS PSK File" + ansible.windows.win_copy: + dest: "{{ zabbix_agent_tlspskfile }}" + content: "{{ zabbix_agent_tlspsk_secret }}" + when: + - zabbix_agent_tlspskfile is defined + - zabbix_agent_tlspskfile + - zabbix_agent_tlspsk_secret is defined + - not (zabbix_agent2 | bool) + notify: + - restart win zabbix agent + tags: + - config + +- name: "Place TLS PSK File (zabbix-agent2)" + ansible.windows.win_copy: + dest: "{{ zabbix_agent2_tlspskfile }}" + content: "{{ zabbix_agent2_tlspsk_secret }}" + when: + - zabbix_agent2_tlspskfile is defined + - zabbix_agent2_tlspskfile + - zabbix_agent2_tlspsk_secret is defined + - zabbix_agent2 | bool + notify: + - restart win zabbix agent + tags: + - config + +- name: "Windows | Check if windows service exist" + ansible.windows.win_service: + name: "{{ zabbix_win_svc_name }}" + register: zabbix_windows_service + tags: + - service + +- name: "Windows | Register Service" + ansible.windows.win_command: '"{{ zabbix_win_exe_path }}" --config "{{ zabbix_win_install_dir_conf }}\{{ zabbix_win_config_name }}" --install' + when: not zabbix_windows_service.exists + tags: + - service diff --git a/ansible/01_old/roles/zabbix-agent/tasks/Windows_conf.yml b/ansible/01_old/roles/zabbix-agent/tasks/Windows_conf.yml new file mode 100644 index 0000000..c59e3bc --- /dev/null +++ b/ansible/01_old/roles/zabbix-agent/tasks/Windows_conf.yml @@ -0,0 +1,56 @@ +--- +- name: "Set default ip address for zabbix_agent_ip" + ansible.builtin.set_fact: + zabbix_agent_ip: "{{ hostvars[inventory_hostname]['ansible_ip_addresses'] | ansible.utils.ipv4 | first }}" + when: + - zabbix_agent_ip is not defined + - "'ansible_ip_addresses' in hostvars[inventory_hostname]" + tags: + - config + +- name: "Windows | Configure zabbix-agent" + ansible.windows.win_template: + src: "{{ zabbix_win_config_name }}.j2" + dest: "{{ zabbix_win_install_dir_conf }}\\{{ zabbix_win_config_name }}" + notify: restart win zabbix agent + tags: + - config + +- name: "Windows | Set service startup mode to auto, ensure it is started and set auto-recovery" + ansible.windows.win_service: + name: "{{ zabbix_win_svc_name }}" + start_mode: auto + state: started + failure_actions: + - type: restart + delay_ms: 5000 + - type: restart + delay_ms: 10000 + - type: restart + delay_ms: 20000 + failure_reset_period_sec: 86400 + tags: + - config + +- name: "Windows | Check firewall service" + ansible.windows.win_service_info: + name: MpsSvc + register: firewall_info + when: zabbix_win_firewall_management + tags: + - config + +- name: "Windows | Firewall rule" + community.windows.win_firewall_rule: + name: "{{ zabbix_win_svc_name }}" + localport: "{{ zabbix_agent_listenport }}" + action: allow + direction: in + protocol: tcp + state: present + enabled: true + when: + - zabbix_win_firewall_management + - firewall_info.services[0].state == 'started' or firewall_info.services[0].start_mode == 'auto' + tags: + - config diff --git a/ansible/01_old/roles/zabbix-agent/tasks/XCP-ng.yml b/ansible/01_old/roles/zabbix-agent/tasks/XCP-ng.yml new file mode 100644 index 0000000..c85d8e5 --- /dev/null +++ b/ansible/01_old/roles/zabbix-agent/tasks/XCP-ng.yml @@ -0,0 +1,70 @@ +--- +# Tasks specific for RedHat systems + +- name: "RedHat | Install basic repo file" + ansible.builtin.yum_repository: + name: "{{ item.name }}" + description: "{{ item.description }}" + baseurl: "{{ item.baseurl }}" + gpgcheck: "{{ item.gpgcheck }}" + gpgkey: "{{ item.gpgkey }}" + mode: "{{ item.mode | default('0644') }}" + priority: "{{ item.priority | default('99') }}" + state: "{{ item.state | default('present') }}" + proxy: "{{ zabbix_http_proxy | default(omit) }}" + with_items: "{{ zabbix_repo_yum }}" + register: yum_repo_installed + become: true + notify: + - "clean repo files from proxy creds" + tags: + - install + +- name: Check if warn parameter can be used for shell module + ansible.builtin.set_fact: + produce_warn: False + when: ansible_version.full is version("2.14", "<") + tags: + - always + +- name: "RedHat | Installing zabbix-agent" + ansible.builtin.package: + pkg: + - "{{ zabbix_agent_package }}-{{ zabbix_agent_version }}.{{ zabbix_agent_version_minor }}" + disablerepo: "{{ zabbix_agent_disable_repo | default(omit) }}" + state: "{{ zabbix_agent_package_state }}" + environment: + http_proxy: "{{ zabbix_http_proxy | default(None) | default(omit) }}" + https_proxy: "{{ zabbix_https_proxy | default(None) | default(omit) }}" + register: zabbix_agent_package_installed + until: zabbix_agent_package_installed is succeeded + become: true + tags: + - install + +- name: "RedHat | Installing zabbix-{sender,get}" + ansible.builtin.package: + pkg: + - "{{ zabbix_sender_package }}-{{ zabbix_agent_version }}.{{ zabbix_agent_version_minor }}" + - "{{ zabbix_get_package }}-{{ zabbix_agent_version }}.{{ zabbix_agent_version_minor }}" + disablerepo: "{{ zabbix_agent_disable_repo | default(omit) }}" + state: "{{ zabbix_agent_package_state }}" + environment: + http_proxy: "{{ zabbix_http_proxy | default(None) | default(omit) }}" + https_proxy: "{{ zabbix_https_proxy | default(None) | default(omit) }}" + register: zabbix_agent_package_installed + until: zabbix_agent_package_installed is succeeded + when: + - not zabbix_agent_install_agent_only + become: true + tags: + - install + +- name: "RedHat | Enable the service" + ansible.builtin.service: + name: "{{ zabbix_agent_service }}" + enabled: true + use: service + become: true + tags: + - service diff --git a/ansible/01_old/roles/zabbix-agent/tasks/api.yml b/ansible/01_old/roles/zabbix-agent/tasks/api.yml new file mode 100644 index 0000000..3487971 --- /dev/null +++ b/ansible/01_old/roles/zabbix-agent/tasks/api.yml @@ -0,0 +1,96 @@ +--- +- name: "API | Create host groups" + community.zabbix.zabbix_group: + host_group: "{{ zabbix_host_groups }}" + state: "{{ zabbix_agent_hostgroups_state }}" + when: + - zabbix_api_create_hostgroup | bool + register: zabbix_api_hostgroup_created + until: zabbix_api_hostgroup_created is succeeded + delegate_to: "{{ zabbix_api_server_host }}" + tags: + - api + +- name: "API | Create a new host or update an existing host's info" + community.zabbix.zabbix_host: + host_name: "{{ zabbix_agent_hostname }}" + host_groups: "{{ zabbix_host_groups }}" + link_templates: "{{ zabbix_agent_link_templates }}" + status: "{{ zabbix_host_status }}" + state: "{{ zabbix_agent_host_state }}" + force: "{{ zabbix_agent_host_update }}" + proxy: "{{ zabbix_agent_proxy }}" + inventory_mode: "{{ zabbix_agent_inventory_mode }}" + interfaces: "{{ zabbix_agent_interfaces }}" + visible_name: "{{ zabbix_agent_visible_hostname | default(zabbix_agent_hostname) }}" + tls_psk: "{{ zabbix_agent_tlspsk_secret | default(omit) }}" + tls_psk_identity: "{{ zabbix_agent_tlspskidentity | default(omit) }}" + tls_issuer: "{{ zabbix_agent_tlsservercertissuer | default(omit) }}" + tls_subject: "{{ zabbix_agent_tls_subject | default(omit) }}" + tls_accept: "{{ zabbix_agent_tls_config[zabbix_agent_tlsaccept if zabbix_agent_tlsaccept else 'unencrypted'] }}" + tls_connect: "{{ zabbix_agent_tls_config[zabbix_agent_tlsconnect if zabbix_agent_tlsconnect else 'unencrypted'] }}" + description: "{{ zabbix_agent_description | default(omit) }}" + inventory_zabbix: "{{ zabbix_agent_inventory_zabbix | default({}) }}" + ipmi_authtype: "{{ zabbix_agent_ipmi_authtype | default(omit) }}" + ipmi_password: "{{ zabbix_agent_ipmi_password| default(omit) }}" + ipmi_privilege: "{{ zabbix_agent_ipmi_privilege | default(omit) }}" + ipmi_username: "{{ zabbix_agent_ipmi_username | default(omit) }}" + tags: "{{ zabbix_agent_tags }}" + when: + - not zabbix_agent2 + register: zabbix_api_host_created + until: zabbix_api_host_created is succeeded + delegate_to: "{{ zabbix_api_server_host }}" + changed_when: false + tags: + - api + +- name: "API | Create a new host using agent2 or update an existing host's info" + community.zabbix.zabbix_host: + host_name: "{{ zabbix_agent2_hostname }}" + host_groups: "{{ zabbix_host_groups }}" + link_templates: "{{ zabbix_agent_link_templates }}" + status: "{{ zabbix_host_status }}" + state: "{{ zabbix_agent_host_state }}" + force: "{{ zabbix_agent_host_update }}" + proxy: "{{ zabbix_agent_proxy }}" + inventory_mode: "{{ zabbix_agent_inventory_mode }}" + interfaces: "{{ zabbix_agent_interfaces }}" + visible_name: "{{ zabbix_agent_visible_hostname | default(zabbix_agent2_hostname) }}" + tls_psk: "{{ zabbix_agent2_tlspsk_secret | default(omit) }}" + tls_psk_identity: "{{ zabbix_agent2_tlspskidentity | default(omit) }}" + tls_issuer: "{{ zabbix_agent2_tlsservercertissuer | default(omit) }}" + tls_subject: "{{ zabbix_agent2_tls_subject | default(omit) }}" + tls_accept: "{{ zabbix_agent_tls_config[zabbix_agent2_tlsaccept if zabbix_agent2_tlsaccept else 'unencrypted'] }}" + tls_connect: "{{ zabbix_agent_tls_config[zabbix_agent2_tlsconnect if zabbix_agent2_tlsconnect else 'unencrypted'] }}" + description: "{{ zabbix_agent_description | default(omit) }}" + inventory_zabbix: "{{ zabbix_agent_inventory_zabbix | default({}) }}" + ipmi_authtype: "{{ zabbix_agent_ipmi_authtype | default(omit) }}" + ipmi_password: "{{ zabbix_agent_ipmi_password| default(omit) }}" + ipmi_privilege: "{{ zabbix_agent_ipmi_privilege | default(omit) }}" + ipmi_username: "{{ zabbix_agent_ipmi_username | default(omit) }}" + tags: "{{ zabbix_agent_tags }}" + when: + - zabbix_agent2 | bool + register: zabbix_api_host_created + until: zabbix_api_host_created is succeeded + delegate_to: "{{ zabbix_api_server_host }}" + changed_when: false + tags: + - api + +- name: "API | Updating host configuration with macros" + community.zabbix.zabbix_hostmacro: + host_name: "{{ (zabbix_agent2 | bool) | ternary(zabbix_agent2_hostname, zabbix_agent_hostname) }}" + macro_name: "{{ item.macro_key }}" + macro_value: "{{ item.macro_value }}" + macro_type: "{{ item.macro_type|default('text') }}" + with_items: "{{ zabbix_agent_macros | default([]) }}" + when: + - zabbix_agent_macros is defined + - item.macro_key is defined + register: zabbix_api_hostmarcro_created + until: zabbix_api_hostmarcro_created is succeeded + delegate_to: "{{ zabbix_api_server_host }}" + tags: + - api diff --git a/ansible/01_old/roles/zabbix-agent/tasks/macOS.yml b/ansible/01_old/roles/zabbix-agent/tasks/macOS.yml new file mode 100644 index 0000000..f8fd97b --- /dev/null +++ b/ansible/01_old/roles/zabbix-agent/tasks/macOS.yml @@ -0,0 +1,22 @@ +--- +# Tasks specific for macOS +- name: "macOS | Check installed package version" + ansible.builtin.shell: | + set -o pipefail + pkgutil --pkg-info 'com.zabbix.pkg.ZabbixAgent' | grep 'version:' | cut -d ' ' -f 2 + register: pkgutil_version + check_mode: false + changed_when: false + failed_when: pkgutil_version.rc == 2 + +- name: "macOS | Download the Zabbix package" + ansible.builtin.get_url: + url: "{{ zabbix_mac_download_link }}" + dest: "/tmp/{{ zabbix_mac_package }}" + mode: 0644 + when: pkgutil_version.stdout != zabbix_version_long + +- name: "macOS | Install the Zabbix package" + ansible.builtin.command: installer -pkg "/tmp/{{ zabbix_mac_package }}" -target / + become: true + when: pkgutil_version.stdout != zabbix_version_long diff --git a/ansible/01_old/roles/zabbix-agent/tasks/main.yml b/ansible/01_old/roles/zabbix-agent/tasks/main.yml new file mode 100644 index 0000000..5b12ec6 --- /dev/null +++ b/ansible/01_old/roles/zabbix-agent/tasks/main.yml @@ -0,0 +1,94 @@ +--- +# tasks file for zabbix_agent +- name: "Include OS-specific variables" + ansible.builtin.include_vars: "{{ ansible_os_family }}.yml" + tags: + - always + +- name: Determine Latest Supported Zabbix Version + ansible.builtin.set_fact: + zabbix_agent_version: "{{ zabbix_valid_agent_versions[ansible_distribution_major_version][0] | default(6.4) }}" + when: zabbix_agent_version is not defined or zabbix_agent_version is none + tags: + - always + +- name: Set More Variables + ansible.builtin.set_fact: + zabbix_valid_version: "{{ zabbix_agent_version|float in zabbix_valid_agent_versions[ansible_distribution_major_version] }}" + tags: + - always + +- name: Stopping Install of Invalid Version + ansible.builtin.fail: + msg: Zabbix version {{ zabbix_agent_version }} is not supported on {{ ansible_distribution }} {{ ansible_distribution_major_version }} + when: not zabbix_valid_version + tags: + - always + +- name: Setting Zabbix API Server Port + ansible.builtin.set_fact: + zabbix_api_server_port: "{{ '443' if zabbix_api_use_ssl|bool else '80' }}" + when: zabbix_api_server_port is undefined + +- name: "Set variables specific for Zabbix Agent 2" + ansible.builtin.set_fact: + zabbix_agent_service: zabbix-agent2 + zabbix_agent_package: zabbix-agent2 + when: + - zabbix_agent2 is defined + - zabbix_agent2 + tags: + - always + +- name: "Install the correct repository" + ansible.builtin.include_tasks: "{{ ansible_os_family }}.yml" + when: + - not (zabbix_agent_docker | bool) + +- name: "Encrypt with TLS PSK auto management" + ansible.builtin.include_tasks: tlspsk_auto.yml + when: + - not zabbix_agent2 + - zabbix_agent_tlspsk_auto | bool + - (zabbix_agent_tlspskfile is undefined) or (zabbix_agent_tlspskfile | length == '0') + - (zabbix_agent_tlspsk_secret is undefined) or (zabbix_agent_tlspsk_secret | length == '0') + +- name: "Encrypt with TLS PSK auto management" + ansible.builtin.include_tasks: tlspsk_auto_agent2.yml + when: + - zabbix_agent2 | bool + - zabbix_agent2_tlspsk_auto | bool + - (zabbix_agent2_tlspskfile is undefined) or (zabbix_agent2_tlspskfile | length == '0') + - (zabbix_agent2_tlspsk_secret is undefined) or (zabbix_agent2_tlspsk_secret | length == '0') + +- name: "Configure Agent" + ansible.builtin.include_tasks: Windows_conf.yml + when: + - ansible_os_family == "Windows" + +- name: "Configure Agent" + ansible.builtin.include_tasks: Linux.yml + when: + - (ansible_os_family != "Windows" and ansible_os_family != "Darwin") or (zabbix_agent_docker | bool) + +- name: "Run the API calls to Zabbix Server" + vars: + gather_facts: false + ansible_user: "{{ zabbix_api_login_user }}" + ansible_httpapi_use_ssl: "{{ zabbix_api_use_ssl }}" + ansible_network_os: community.zabbix.zabbix + ansible_connection: httpapi + # Can't think of a way to make http_login_* vars be undefined -( + http_login_user: "{{ zabbix_api_http_user | default(-42) }}" + http_login_password: "{{ zabbix_api_http_password | default(-42) }}" + ansible.builtin.include_tasks: api.yml + when: + - (zabbix_api_create_hostgroup | bool) or (zabbix_api_create_hosts | bool) + tags: + - api + +- name: "Including userparameters" + ansible.builtin.include_tasks: "userparameter.yml" + when: zabbix_agent_userparameters|length > 0 + tags: + - config diff --git a/ansible/01_old/roles/zabbix-agent/tasks/remove.yml b/ansible/01_old/roles/zabbix-agent/tasks/remove.yml new file mode 100644 index 0000000..f825067 --- /dev/null +++ b/ansible/01_old/roles/zabbix-agent/tasks/remove.yml @@ -0,0 +1,25 @@ +--- +- name: Pull service facts + ansible.builtin.service_facts: + +- name: 'Remove | Make sure the "old" zabbix-agent service stopped' + ansible.builtin.service: + name: "zabbix-agent" + state: stopped + enabled: false + become: true + when: | + ansible_facts.services["zabbix-agent.service"] is defined or + ansible_facts.services["zabbix-agent"] is defined + +- name: "Remove | Package removal" + ansible.builtin.package: + name: "zabbix-agent" + state: absent + become: true + +- name: "Remove | Remove the agent-include-dir" + ansible.builtin.file: + path: "{{ zabbix_agent_include }}" + state: absent + become: true diff --git a/ansible/01_old/roles/zabbix-agent/tasks/selinux.yml b/ansible/01_old/roles/zabbix-agent/tasks/selinux.yml new file mode 100644 index 0000000..ca29e77 --- /dev/null +++ b/ansible/01_old/roles/zabbix-agent/tasks/selinux.yml @@ -0,0 +1,110 @@ +--- +- name: "SELinux | Debian | Install policycoreutils-python" + ansible.builtin.apt: + pkg: policycoreutils-python-utils + state: present + update_cache: true + cache_valid_time: 0 + force_apt_get: "{{ zabbix_apt_force_apt_get }}" + install_recommends: "{{ zabbix_apt_install_recommends }}" + environment: + http_proxy: "{{ zabbix_http_proxy | default(None) | default(omit) }}" + https_proxy: "{{ zabbix_https_proxy | default(None) | default(omit) }}" + register: zabbix_agent_policycoreutils_installed + until: zabbix_agent_package_installed is succeeded + become: true + when: + - ansible_os_family == "Debian" + tags: + - install + +- name: "SELinux | RedHat | Install policycoreutils-python" + ansible.builtin.package: + name: policycoreutils-python + state: installed + environment: + http_proxy: "{{ zabbix_http_proxy | default(None) | default(omit) }}" + https_proxy: "{{ zabbix_https_proxy | default(None) | default(omit) }}" + register: zabbix_agent_policycoreutils_installed + until: zabbix_agent_policycoreutils_installed is succeeded + when: + - ansible_os_family == "RedHat" + - (zabbix_agent_distribution_major_version == "6" or zabbix_agent_distribution_major_version == "7") + become: true + tags: + - install + +- name: "SELinux | RedHat | Install python3-policycoreutils on RHEL8" + ansible.builtin.package: + name: python3-policycoreutils + state: installed + environment: + http_proxy: "{{ zabbix_http_proxy | default(None) | default(omit) }}" + https_proxy: "{{ zabbix_https_proxy | default(None) | default(omit) }}" + register: zabbix_agent_policycoreutils_installed + until: zabbix_agent_policycoreutils_installed is succeeded + when: + - ansible_os_family == "RedHat" + - ansible_distribution_major_version == "8" + become: true + tags: + - install + +- name: "SELinux | RedHat | Install selinux-policy-targeted" + ansible.builtin.package: + name: selinux-policy-targeted + state: installed + register: zabbix_agent_selinuxpolicytargeted_installed + until: zabbix_agent_selinuxpolicytargeted_installed is succeeded + when: + - ansible_os_family == "RedHat" + become: true + tags: + - install + +# straight to getenforce binary , workaround for missing python_selinux library +- name: "SELinux | Get getenforce binary" + ansible.builtin.stat: + path: /usr/sbin/getenforce + register: getenforce_bin + become: true + tags: + - always + +- name: "SELinux | Collect getenforce output" + ansible.builtin.command: /usr/sbin/getenforce + register: sestatus + when: "getenforce_bin.stat.exists" + changed_when: false + become: true + check_mode: false + tags: + - always + +- name: "SELinux | Set zabbix_selinux to true if getenforce returns Enforcing or Permissive" + ansible.builtin.set_fact: + zabbix_selinux: "{{ true }}" + when: + - 'getenforce_bin.stat.exists and ("Enforcing" in sestatus.stdout or "Permissive" in sestatus.stdout)' + tags: + - always + +- name: "SELinux | Allow zabbix_agent to start (SELinux)" + community.general.selinux_permissive: + name: zabbix_agent_t + permissive: true + become: true + tags: + - config + +- name: "SELinux | Allow zabbix to run sudo commands (SELinux)" + ansible.posix.seboolean: + name: zabbix_run_sudo + persistent: true + state: true + become: true + when: + - ansible_selinux.status == "enabled" + - selinux_allow_zabbix_run_sudo|bool + tags: + - config diff --git a/ansible/01_old/roles/zabbix-agent/tasks/tlspsk_auto.yml b/ansible/01_old/roles/zabbix-agent/tasks/tlspsk_auto.yml new file mode 100644 index 0000000..6a1870e --- /dev/null +++ b/ansible/01_old/roles/zabbix-agent/tasks/tlspsk_auto.yml @@ -0,0 +1,14 @@ +--- +- ansible.builtin.include_tasks: tlspsk_auto_linux.yml + when: (ansible_os_family != "Windows") or (zabbix_agent_docker | bool) + +- ansible.builtin.include_tasks: tlspsk_auto_windows.yml + when: ansible_os_family == "Windows" + +- name: AutoPSK | Default tlsaccept and tlsconnect to enforce PSK + ansible.builtin.set_fact: + zabbix_agent_tlsaccept: psk + zabbix_agent_tlsconnect: psk + when: zabbix_api_create_hosts + tags: + - config diff --git a/ansible/01_old/roles/zabbix-agent/tasks/tlspsk_auto_agent2.yml b/ansible/01_old/roles/zabbix-agent/tasks/tlspsk_auto_agent2.yml new file mode 100644 index 0000000..6dc4ec6 --- /dev/null +++ b/ansible/01_old/roles/zabbix-agent/tasks/tlspsk_auto_agent2.yml @@ -0,0 +1,14 @@ +--- +- include_tasks: tlspsk_auto_agent2_linux.yml + when: (ansible_os_family != "Windows") or (zabbix_agent_docker | bool) + +- include_tasks: tlspsk_auto_agent2_windows.yml + when: ansible_os_family == "Windows" + +- name: AutoPSK | Default tlsaccept and tlsconnect to enforce PSK + ansible.builtin.set_fact: + zabbix_agent2_tlsaccept: psk + zabbix_agent2_tlsconnect: psk + when: zabbix_api_create_hosts + tags: + - config diff --git a/ansible/01_old/roles/zabbix-agent/tasks/tlspsk_auto_agent2_common.yml b/ansible/01_old/roles/zabbix-agent/tasks/tlspsk_auto_agent2_common.yml new file mode 100644 index 0000000..436eb42 --- /dev/null +++ b/ansible/01_old/roles/zabbix-agent/tasks/tlspsk_auto_agent2_common.yml @@ -0,0 +1,53 @@ +--- +# Process PSK Secret +- name: AutoPSK | Save existing TLS PSK secret + ansible.builtin.set_fact: + zabbix_agent2_tlspsk_read: "{{ zabbix_agent2_tlspsk_base64['content'] | b64decode | trim }}" + when: zabbix_agent2_tlspskcheck.stat.exists + no_log: "{{ ansible_verbosity < 3 }}" + tags: + - config + +- name: AutoPSK | Use existing TLS PSK secret + ansible.builtin.set_fact: + zabbix_agent2_tlspsk_secret: "{{ zabbix_agent2_tlspsk_read }}" + when: + - zabbix_agent2_tlspskcheck.stat.exists + - zabbix_agent2_tlspsk_read|length >= 32 + no_log: "{{ ansible_verbosity < 3 }}" + tags: + - config + +- name: AutoPSK | Generate new TLS PSK secret + ansible.builtin.set_fact: + zabbix_agent2_tlspsk_secret: "{{ lookup('password', '/dev/null chars=hexdigits length=64') }}" + when: + - not zabbix_agent2_tlspskcheck.stat.exists + - (zabbix_agent2_tlspsk_read is not defined) or (zabbix_agent2_tlspsk_read|length < 32) + no_log: "{{ ansible_verbosity < 3 }}" + tags: + - config + +# Process PSK Identity +- name: AutoPSK | Use existing TLS PSK identity + ansible.builtin.set_fact: + zabbix_agent2_tlspskidentity: "{{ zabbix_agent2_tlspskidentity_base64['content'] | b64decode | trim }}" + when: + - zabbix_agent2_tlspskidentity_check.stat.exists + no_log: "{{ ansible_verbosity < 3 }}" + tags: + - config + +- name: AutoPSK | Generate new TLS PSK identity + ansible.builtin.set_fact: + zabbix_agent2_tlspskidentity: >- + {{ + zabbix_agent_visible_hostname + | default(((zabbix_agent2 == True) | ternary(zabbix_agent2_hostname, zabbix_agent_hostname))) + + '_' + + lookup('password', '/dev/null chars=hexdigits length=4') + }} + when: not zabbix_agent2_tlspskidentity_check.stat.exists + no_log: "{{ ansible_verbosity < 3 }}" + tags: + - config diff --git a/ansible/01_old/roles/zabbix-agent/tasks/tlspsk_auto_agent2_linux.yml b/ansible/01_old/roles/zabbix-agent/tasks/tlspsk_auto_agent2_linux.yml new file mode 100644 index 0000000..98fa652 --- /dev/null +++ b/ansible/01_old/roles/zabbix-agent/tasks/tlspsk_auto_agent2_linux.yml @@ -0,0 +1,80 @@ +--- +- name: AutoPSK | Set default path variables (Linux) + ansible.builtin.set_fact: + zabbix_agent2_tlspskfile: "/etc/zabbix/tls_psk_auto.secret" + zabbix_agent2_tlspskidentity_file: "/etc/zabbix/tls_psk_auto.identity" + tags: + - config + +- name: AutoPSK | Check for existing TLS PSK file (Linux) + ansible.builtin.stat: + path: "{{ zabbix_agent2_tlspskfile }}" + register: zabbix_agent2_tlspskcheck + become: true + tags: + - config + +- name: AutoPSK | Check for existing TLS PSK identity (Linux) + ansible.builtin.stat: + path: "{{ zabbix_agent2_tlspskidentity_file }}" + register: zabbix_agent2_tlspskidentity_check + become: true + tags: + - config + +- name: AutoPSK | read existing TLS PSK file (Linux) + ansible.builtin.slurp: + src: "{{ zabbix_agent2_tlspskfile }}" + register: zabbix_agent2_tlspsk_base64 + become: true + when: + - zabbix_agent2_tlspskcheck.stat.exists + no_log: "{{ ansible_verbosity < 3 }}" + tags: + - config + +- name: AutoPSK | Read existing TLS PSK identity file (Linux) + ansible.builtin.slurp: + src: "{{ zabbix_agent2_tlspskidentity_file }}" + register: zabbix_agent2_tlspskidentity_base64 + become: true + when: zabbix_agent2_tlspskidentity_check.stat.exists + no_log: "{{ ansible_verbosity < 3 }}" + tags: + - config + +- include_tasks: tlspsk_auto_agent2_common.yml + +- name: AutoPSK | Template TLS PSK identity in file (Linux) + ansible.builtin.copy: + dest: "{{ zabbix_agent2_tlspskidentity_file }}" + content: "{{ zabbix_agent2_tlspskidentity }}" + owner: zabbix + group: zabbix + mode: 0400 + become: true + when: + - zabbix_agent2_tlspskidentity_file is defined + - zabbix_agent2_tlspskidentity is defined + notify: + - restart zabbix-agent + - restart mac zabbix agent + tags: + - config + +- name: AutoPSK | Template TLS PSK secret in file (Linux) + ansible.builtin.copy: + dest: "{{ zabbix_agent2_tlspskfile }}" + content: "{{ zabbix_agent2_tlspsk_secret }}" + owner: zabbix + group: zabbix + mode: 0400 + become: true + when: + - zabbix_agent2_tlspskfile is defined + - zabbix_agent2_tlspsk_secret is defined + notify: + - restart zabbix-agent + - restart mac zabbix agent + tags: + - config diff --git a/ansible/01_old/roles/zabbix-agent/tasks/tlspsk_auto_agent2_windows.yml b/ansible/01_old/roles/zabbix-agent/tasks/tlspsk_auto_agent2_windows.yml new file mode 100644 index 0000000..2549249 --- /dev/null +++ b/ansible/01_old/roles/zabbix-agent/tasks/tlspsk_auto_agent2_windows.yml @@ -0,0 +1,66 @@ +--- +- name: AutoPSK | Set default path variables for Windows + ansible.builtin.set_fact: + zabbix_agent2_tlspskfile: "{{ zabbix_win_install_dir }}\\tls_psk_auto.secret.txt" + zabbix_agent2_tlspskidentity_file: "{{ zabbix_win_install_dir }}\\tls_psk_auto.identity.txt" + tags: + - config + +- name: AutoPSK | Check for existing TLS PSK file (Windows) + ansible.windows.win_stat: + path: "{{ zabbix_agent2_tlspskfile }}" + register: zabbix_agent2_tlspskcheck + tags: + - config + +- name: AutoPSK | Check for existing TLS PSK identity (Windows) + ansible.windows.win_stat: + path: "{{ zabbix_agent2_tlspskidentity_file }}" + register: zabbix_agent2_tlspskidentity_check + tags: + - config + +- name: AutoPSK | read existing TLS PSK file (Windows) + ansible.builtin.slurp: + src: "{{ zabbix_agent2_tlspskfile }}" + register: zabbix_agent2_tlspsk_base64 + when: + - zabbix_agent2_tlspskcheck.stat.exists + no_log: "{{ ansible_verbosity < 3 }}" + tags: + - config + +- name: AutoPSK | Read existing TLS PSK identity file (Windows) + ansible.builtin.slurp: + src: "{{ zabbix_agent2_tlspskidentity_file }}" + register: zabbix_agent2_tlspskidentity_base64 + when: zabbix_agent2_tlspskidentity_check.stat.exists + no_log: "{{ ansible_verbosity < 3 }}" + tags: + - config + +- ansible.builtin.include_tasks: tlspsk_auto_agent2_common.yml + +- name: Windows | AutoPSK | Template TLS PSK identity in file (Windows) + ansible.windows.win_copy: + dest: "{{ zabbix_agent2_tlspskidentity_file }}" + content: "{{ zabbix_agent2_tlspskidentity }}" + when: + - zabbix_agent2_tlspskidentity_file is defined + - zabbix_agent2_tlspskidentity is defined + notify: + - restart win zabbix agent + tags: + - config + +- name: AutoPSK | Template TLS PSK secret in file (Windows) + ansible.windows.win_copy: + dest: "{{ zabbix_agent2_tlspskfile }}" + content: "{{ zabbix_agent2_tlspsk_secret }}" + when: + - zabbix_agent2_tlspskfile is defined + - zabbix_agent2_tlspsk_secret is defined + notify: + - restart win zabbix agent + tags: + - config diff --git a/ansible/01_old/roles/zabbix-agent/tasks/tlspsk_auto_common.yml b/ansible/01_old/roles/zabbix-agent/tasks/tlspsk_auto_common.yml new file mode 100644 index 0000000..a933692 --- /dev/null +++ b/ansible/01_old/roles/zabbix-agent/tasks/tlspsk_auto_common.yml @@ -0,0 +1,52 @@ +--- +# Process PSK Secret +- name: AutoPSK | Save existing TLS PSK secret + ansible.builtin.set_fact: + zabbix_agent_tlspsk_read: "{{ zabbix_agent_tlspsk_base64['content'] | b64decode | trim }}" + when: zabbix_agent_tlspskcheck.stat.exists + no_log: "{{ ansible_verbosity < 3 }}" + tags: + - config + +- name: AutoPSK | Use existing TLS PSK secret + ansible.builtin.set_fact: + zabbix_agent_tlspsk_secret: "{{ zabbix_agent_tlspsk_read }}" + when: + - zabbix_agent_tlspskcheck.stat.exists + - zabbix_agent_tlspsk_read|length >= 32 + no_log: "{{ ansible_verbosity < 3 }}" + tags: + - config + +- name: AutoPSK | Generate new TLS PSK secret + ansible.builtin.set_fact: + zabbix_agent_tlspsk_secret: "{{ lookup('password', '/dev/null chars=hexdigits length=64') }}" + when: + - (not zabbix_agent_tlspskcheck.stat.exists) or (zabbix_agent_tlspsk_read|length < 32) + no_log: "{{ ansible_verbosity < 3 }}" + tags: + - config + +# Process PSK Identity +- name: AutoPSK | Use existing TLS PSK identity + ansible.builtin.set_fact: + zabbix_agent_tlspskidentity: "{{ zabbix_agent_tlspskidentity_base64['content'] | b64decode | trim }}" + when: + - zabbix_agent_tlspskidentity_check.stat.exists + no_log: "{{ ansible_verbosity < 3 }}" + tags: + - config + +- name: AutoPSK | Generate new TLS PSK identity + ansible.builtin.set_fact: + zabbix_agent_tlspskidentity: >- + {{ + zabbix_agent_visible_hostname + | default(((zabbix_agent2 != True) | ternary(zabbix_agent_hostname, zabbix_agent_hostname))) + + '_' + + lookup('password', '/dev/null chars=hexdigits length=4') + }} + when: not zabbix_agent_tlspskidentity_check.stat.exists + no_log: "{{ ansible_verbosity < 3 }}" + tags: + - config diff --git a/ansible/01_old/roles/zabbix-agent/tasks/tlspsk_auto_linux.yml b/ansible/01_old/roles/zabbix-agent/tasks/tlspsk_auto_linux.yml new file mode 100644 index 0000000..906ccb0 --- /dev/null +++ b/ansible/01_old/roles/zabbix-agent/tasks/tlspsk_auto_linux.yml @@ -0,0 +1,80 @@ +--- +- name: AutoPSK | Set default path variables (Linux) + ansible.builtin.set_fact: + zabbix_agent_tlspskfile: "/etc/zabbix/tls_psk_auto.secret" + zabbix_agent_tlspskidentity_file: "/etc/zabbix/tls_psk_auto.identity" + tags: + - config + +- name: AutoPSK | Check for existing TLS PSK file (Linux) + ansible.builtin.stat: + path: "{{ zabbix_agent_tlspskfile }}" + register: zabbix_agent_tlspskcheck + become: true + tags: + - config + +- name: AutoPSK | Check for existing TLS PSK identity (Linux) + ansible.builtin.stat: + path: "{{ zabbix_agent_tlspskidentity_file }}" + register: zabbix_agent_tlspskidentity_check + become: true + tags: + - config + +- name: AutoPSK | read existing TLS PSK file (Linux) + ansible.builtin.slurp: + src: "{{ zabbix_agent_tlspskfile }}" + register: zabbix_agent_tlspsk_base64 + become: true + when: + - zabbix_agent_tlspskcheck.stat.exists + no_log: "{{ ansible_verbosity < 3 }}" + tags: + - config + +- name: AutoPSK | Read existing TLS PSK identity file (Linux) + ansible.builtin.slurp: + src: "{{ zabbix_agent_tlspskidentity_file }}" + register: zabbix_agent_tlspskidentity_base64 + become: true + when: zabbix_agent_tlspskidentity_check.stat.exists + no_log: "{{ ansible_verbosity < 3 }}" + tags: + - config + +- include_tasks: tlspsk_auto_common.yml + +- name: AutoPSK | Template TLS PSK identity in file (Linux) + ansible.builtin.copy: + dest: "{{ zabbix_agent_tlspskidentity_file }}" + content: "{{ zabbix_agent_tlspskidentity }}" + owner: zabbix + group: zabbix + mode: 0400 + become: true + when: + - zabbix_agent_tlspskidentity_file is defined + - zabbix_agent_tlspskidentity is defined + notify: + - restart zabbix-agent + - restart mac zabbix agent + tags: + - config + +- name: AutoPSK | Template TLS PSK secret in file (Linux) + ansible.builtin.copy: + dest: "{{ zabbix_agent_tlspskfile }}" + content: "{{ zabbix_agent_tlspsk_secret }}" + owner: zabbix + group: zabbix + mode: 0400 + become: true + when: + - zabbix_agent_tlspskfile is defined + - zabbix_agent_tlspsk_secret is defined + notify: + - restart zabbix-agent + - restart mac zabbix agent + tags: + - config diff --git a/ansible/01_old/roles/zabbix-agent/tasks/tlspsk_auto_windows.yml b/ansible/01_old/roles/zabbix-agent/tasks/tlspsk_auto_windows.yml new file mode 100644 index 0000000..db2aedf --- /dev/null +++ b/ansible/01_old/roles/zabbix-agent/tasks/tlspsk_auto_windows.yml @@ -0,0 +1,67 @@ +--- +- name: AutoPSK | Set default path variables for Windows + ansible.builtin.set_fact: + zabbix_agent_tlspskfile: "{{ zabbix_win_install_dir }}\\tls_psk_auto.secret.txt" + zabbix_agent_tlspskidentity_file: "{{ zabbix_win_install_dir }}\\tls_psk_auto.identity.txt" + tags: + - config + +- name: AutoPSK | Check for existing TLS PSK file (Windows) + ansible.windows.win_stat: + path: "{{ zabbix_agent_tlspskfile }}" + register: zabbix_agent_tlspskcheck + tags: + - config + +- name: AutoPSK | Check for existing TLS PSK identity (Windows) + ansible.windows.win_stat: + path: "{{ zabbix_agent_tlspskidentity_file }}" + register: zabbix_agent_tlspskidentity_check + tags: + - config + +- name: AutoPSK | read existing TLS PSK file (Windows) + ansible.builtin.slurp: + src: "{{ zabbix_agent_tlspskfile }}" + register: zabbix_agent_tlspsk_base64 + when: + - zabbix_agent_tlspskcheck.stat.exists + no_log: "{{ ansible_verbosity < 3 }}" + tags: + - config + +- name: AutoPSK | Read existing TLS PSK identity file (Windows) + ansible.builtin.slurp: + src: "{{ zabbix_agent_tlspskidentity_file }}" + register: zabbix_agent_tlspskidentity_base64 + when: zabbix_agent_tlspskidentity_check.stat.exists + no_log: "{{ ansible_verbosity < 3 }}" + tags: + - config + +- include_tasks: tlspsk_auto_common.yml + +- name: AutoPSK | Template TLS PSK identity in file (Windows) + ansible.windows.win_copy: + dest: "{{ zabbix_agent_tlspskidentity_file }}" + content: "{{ zabbix_agent_tlspskidentity }}" + when: + - zabbix_agent_tlspskidentity_file is defined + - zabbix_agent_tlspskidentity is defined + notify: + - restart win zabbix agent + tags: + - config + +- name: AutoPSK | Template TLS PSK secret in file (Windows) + ansible.windows.win_copy: + dest: "{{ zabbix_agent_tlspskfile }}" + content: "{{ zabbix_agent_tlspsk_secret }}" + when: + - zabbix_agent_tlspskfile is defined + - zabbix_agent_tlspsk_secret is defined + - ansible_os_family == "Windows" + notify: + - restart win zabbix agent + tags: + - config diff --git a/ansible/01_old/roles/zabbix-agent/tasks/userparameter.yml b/ansible/01_old/roles/zabbix-agent/tasks/userparameter.yml new file mode 100644 index 0000000..c683f9e --- /dev/null +++ b/ansible/01_old/roles/zabbix-agent/tasks/userparameter.yml @@ -0,0 +1,87 @@ +--- +- block: + - name: "Windows | Installing user-defined userparameters" + ansible.windows.win_template: + src: "{{ zabbix_agent_userparameters_templates_src }}/{{ item.name }}.j2" + dest: '{{ zabbix_agent_win_include }}\{{ item.name }}.conf' + notify: + - restart win zabbix agent + with_items: "{{ zabbix_agent_userparameters }}" + + - name: "Windows | Installing user-defined scripts" + ansible.windows.win_copy: + src: "{{ zabbix_agent_userparameters_scripts_src }}/{{ item.scripts_dir }}" + dest: '{{ zabbix_win_install_dir }}\scripts\' + notify: + - restart win zabbix agent + with_items: "{{ zabbix_agent_userparameters }}" + when: item.scripts_dir is defined + when: ansible_os_family == "Windows" + tags: + - config + +- block: + - name: "Installing user-defined userparameters" + ansible.builtin.template: + src: "{{ zabbix_agent_userparameters_templates_src }}/{{ item.name }}.j2" + dest: "{{ zabbix_agent_include }}/userparameter_{{ item.name }}.conf" + owner: zabbix + group: zabbix + mode: 0644 + notify: + - restart zabbix-agent + - restart mac zabbix agent + become: true + with_items: "{{ zabbix_agent_userparameters }}" + + - name: "Installing user-defined scripts" + ansible.builtin.copy: + src: "{{ zabbix_agent_userparameters_scripts_src }}/{{ item.scripts_dir }}" + dest: "/etc/zabbix/scripts/" + owner: zabbix + group: zabbix + mode: 0755 + notify: + - restart zabbix-agent + - restart mac zabbix agent + become: true + with_items: "{{ zabbix_agent_userparameters }}" + when: item.scripts_dir is defined + when: + - ansible_os_family != "Windows" + - not zabbix_agent2 + tags: + - config + +- block: + - name: "Installing user-defined userparameters" + ansible.builtin.template: + src: "{{ zabbix_agent_userparameters_templates_src }}/{{ item.name }}.j2" + dest: "{{ zabbix_agent2_include }}/userparameter_{{ item.name }}.conf" + owner: zabbix + group: zabbix + mode: 0644 + notify: + - restart zabbix-agent + - restart mac zabbix agent + become: true + with_items: "{{ zabbix_agent_userparameters }}" + + - name: "Installing user-defined scripts" + ansible.builtin.copy: + src: "{{ zabbix_agent_userparameters_scripts_src }}/{{ item.scripts_dir }}" + dest: "/etc/zabbix/scripts/" + owner: zabbix + group: zabbix + mode: 0755 + notify: + - restart zabbix-agent + - restart mac zabbix agent + become: true + with_items: "{{ zabbix_agent_userparameters }}" + when: item.scripts_dir is defined + when: + - ansible_os_family != "Windows" + - zabbix_agent2 + tags: + - config diff --git a/ansible/01_old/roles/zabbix-agent/templates/userparameters/dev2_iac_pass_failed.j2 b/ansible/01_old/roles/zabbix-agent/templates/userparameters/dev2_iac_pass_failed.j2 new file mode 100644 index 0000000..517ff71 --- /dev/null +++ b/ansible/01_old/roles/zabbix-agent/templates/userparameters/dev2_iac_pass_failed.j2 @@ -0,0 +1 @@ +UserParameter=dev2_iac_pass_failed,pam_tally2 -u dev2-iac | awk '/Failures/ {getline; print $2}' \ No newline at end of file diff --git a/ansible/01_old/roles/zabbix-agent/templates/userparameters/dev2_pass_failed.j2 b/ansible/01_old/roles/zabbix-agent/templates/userparameters/dev2_pass_failed.j2 new file mode 100644 index 0000000..2e3c3f5 --- /dev/null +++ b/ansible/01_old/roles/zabbix-agent/templates/userparameters/dev2_pass_failed.j2 @@ -0,0 +1 @@ +UserParameter=dev2_pass_failed,pam_tally2 -u dev2 | awk '/Failures/ {getline; print $2}' \ No newline at end of file diff --git a/ansible/01_old/roles/zabbix-agent/templates/userparameters/mysql.j2 b/ansible/01_old/roles/zabbix-agent/templates/userparameters/mysql.j2 new file mode 100644 index 0000000..70df285 --- /dev/null +++ b/ansible/01_old/roles/zabbix-agent/templates/userparameters/mysql.j2 @@ -0,0 +1,3 @@ +# This is an sample userparameters file. + +UserParameter=mysql.ping_to,mysqladmin -uroot ping | grep -c alive diff --git a/ansible/01_old/roles/zabbix-agent/templates/userparameters/root_pass_failed.j2 b/ansible/01_old/roles/zabbix-agent/templates/userparameters/root_pass_failed.j2 new file mode 100644 index 0000000..1526e1b --- /dev/null +++ b/ansible/01_old/roles/zabbix-agent/templates/userparameters/root_pass_failed.j2 @@ -0,0 +1 @@ +UserParameter=root_pass_failed,pam_tally2 -u root | awk '/Failures/ {getline; print $2}' \ No newline at end of file diff --git a/ansible/01_old/roles/zabbix-agent/templates/userparameters/win_sample.j2 b/ansible/01_old/roles/zabbix-agent/templates/userparameters/win_sample.j2 new file mode 100644 index 0000000..c144e46 --- /dev/null +++ b/ansible/01_old/roles/zabbix-agent/templates/userparameters/win_sample.j2 @@ -0,0 +1 @@ +UserParameter=do.something, powershell -NoProfile -ExecutionPolicy Bypass -File {{ zabbix_win_install_dir }}\scripts\{{ item.name }}\doSomething.ps1 diff --git a/ansible/01_old/roles/zabbix-agent/templates/userparameters/zombie.j2 b/ansible/01_old/roles/zabbix-agent/templates/userparameters/zombie.j2 new file mode 100644 index 0000000..055ec98 --- /dev/null +++ b/ansible/01_old/roles/zabbix-agent/templates/userparameters/zombie.j2 @@ -0,0 +1 @@ +UserParameter=zombie.count,ps -ef | grep defunct | egrep -v grep | wc -l \ No newline at end of file diff --git a/ansible/01_old/roles/zabbix-agent/templates/userparameters/zombielist.j2 b/ansible/01_old/roles/zabbix-agent/templates/userparameters/zombielist.j2 new file mode 100644 index 0000000..ff14c89 --- /dev/null +++ b/ansible/01_old/roles/zabbix-agent/templates/userparameters/zombielist.j2 @@ -0,0 +1 @@ +UserParameter=zombie.list,ps -ef | grep defunct | egrep -v grep \ No newline at end of file diff --git a/ansible/01_old/roles/zabbix-agent/templates/zabbix_agent2.conf.j2 b/ansible/01_old/roles/zabbix-agent/templates/zabbix_agent2.conf.j2 new file mode 100644 index 0000000..bbdfd26 --- /dev/null +++ b/ansible/01_old/roles/zabbix-agent/templates/zabbix_agent2.conf.j2 @@ -0,0 +1,140 @@ +{{ ansible_managed | comment }} +# This is a configuration file for Zabbix Agent 2 +# To get more information about Zabbix, visit http://www.zabbix.com + +# This configuration file is "minimalized", which means all the original comments +# are removed. The full documentation for your Zabbix Agent 2 can be found here: +# https://www.zabbix.com/documentation/{{ zabbix_agent_version }}/en/manual/appendix/config/zabbix_agent2{{ "_win" if ansible_os_family == "Windows" else "" }} + +{% if ansible_os_family != "Windows" %} +PidFile={{ zabbix_agent2_pidfile }} +{% endif %} +LogType={{ zabbix_agent2_logtype }} +{% if ansible_os_family == "Windows" %} +LogFile={{ zabbix_agent2_win_logfile }} +{% else %} +LogFile={{ zabbix_agent2_logfile }} +{% endif %} +LogFileSize={{ zabbix_agent2_logfilesize }} +DebugLevel={{ zabbix_agent2_debuglevel }} +{% if zabbix_agent2_sourceip is defined and zabbix_agent2_sourceip %} +SourceIP={{ zabbix_agent2_sourceip }} +{% endif %} +Server={{ zabbix_agent2_server }} +ListenPort={{ zabbix_agent2_listenport }} +{% if zabbix_agent2_listenip is defined and zabbix_agent2_listenip !='0.0.0.0' and zabbix_agent2_listenip %} +ListenIP={{ zabbix_agent2_listenip }} +{% endif %} +{% if zabbix_agent2_statusport is defined and zabbix_agent2_statusport %} +StatusPort={{ zabbix_agent2_statusport }} +{% endif %} +ServerActive={{ zabbix_agent2_serveractive }} +{% if zabbix_agent2_hostname is defined and zabbix_agent2_hostname %} +Hostname={{ zabbix_agent2_hostname }} +{% endif %} +{% if zabbix_agent2_hostnameitem is defined and zabbix_agent2_hostnameitem %} +HostnameItem={{ zabbix_agent2_hostnameitem }} +{% endif %} +{% if zabbix_agent2_hostmetadata is defined and zabbix_agent2_hostmetadata %} +HostMetadata={{ zabbix_agent2_hostmetadata }} +{% endif %} +{% if zabbix_agent2_hostmetadataitem is defined and zabbix_agent2_hostmetadataitem %} +HostMetadataItem={{ zabbix_agent2_hostmetadataitem }} +{% endif %} +{% if zabbix_agent2_hostinterface is defined and zabbix_agent2_hostinterface %} +HostInterface={{ zabbix_agent2_hostinterface }} +{% endif %} +{% if zabbix_agent2_hostinterfaceitem is defined and zabbix_agent2_hostinterfaceitem %} +HostInterfaceItem={{ zabbix_agent2_hostinterfaceitem }} +{% endif %} +{% if zabbix_agent2_allow_key is defined and zabbix_agent2_allow_key %} +{% for item in zabbix_agent2_allow_key %} +AllowKey={{ item }} +{% endfor %} +{% endif %} +{% if zabbix_agent2_deny_key is defined and zabbix_agent2_deny_key %} +{% for item in zabbix_agent2_deny_key %} +DenyKey={{ item }} +{% endfor %} +{% endif %} +RefreshActiveChecks={{ zabbix_agent2_refreshactivechecks }} +BufferSend={{ zabbix_agent2_buffersend }} +BufferSize={{ zabbix_agent2_buffersize }} +{% if zabbix_agent2_enablepersistentbuffer is defined and zabbix_agent2_enablepersistentbuffer %} +EnablePersistentBuffer={{ zabbix_agent2_enablepersistentbuffer }} +{% endif %} +{% if zabbix_agent2_persistentbufferperiod is defined and zabbix_agent2_persistentbufferperiod %} +PersistentBufferPeriod={{ zabbix_agent2_persistentbufferperiod }} +{% endif %} +{% if zabbix_agent2_persistentbufferfile is defined and zabbix_agent2_persistentbufferfile %} +PersistentBufferFile={{ zabbix_agent2_persistentbufferfile }} +{% endif %} +{% if zabbix_agent2_zabbix_alias is defined and zabbix_agent2_zabbix_alias %} +{% if zabbix_agent2_zabbix_alias is string %} +Alias={{ zabbix_agent2_zabbix_alias }} +{% else %} +{% for item in zabbix_agent2_zabbix_alias %} +Alias={{ item }} +{% endfor %} +{% endif %} +{% endif %} +Timeout={{ zabbix_agent2_timeout }} +{% if ansible_os_family == "Windows" %} +Include={{ zabbix_agent_win_include }} +{% else %} +Include={{ zabbix_agent2_include }}/{{ zabbix_agent2_include_pattern }} +{% endif %} +{% if zabbix_agent2_additional_include is defined and zabbix_agent2_additional_include is iterable and zabbix_agent2_additional_include is not string %} +{% for include in zabbix_agent2_additional_include %} +Include={{ include }} +{% endfor %} +{% endif %} +UnsafeUserParameters={{ zabbix_agent2_unsafeuserparameters }} +{% if ansible_os_family != "Windows" %} +ControlSocket={{ zabbix_agent2_controlsocket }} +{% endif %} +{% if zabbix_agent2_tlsconnect is defined and zabbix_agent2_tlsconnect %} +TLSConnect={{ zabbix_agent2_tlsconnect }} +{% endif %} +{% if zabbix_agent2_tlsaccept is defined and zabbix_agent2_tlsaccept %} +TLSAccept={{ zabbix_agent2_tlsaccept }} +{% endif %} +{% if zabbix_agent2_tlscafile is defined and zabbix_agent2_tlscafile %} +TLSCAFile={{ zabbix_agent2_tlscafile }} +{% endif %} +{% if zabbix_agent2_tlscrlfile is defined and zabbix_agent2_tlscrlfile %} +TLSCRLFile={{ zabbix_agent2_tlscrlfile }} +{% endif %} +{% if zabbix_agent2_tlsservercertissuer is defined and zabbix_agent2_tlsservercertissuer %} +TLSServerCertIssuer={{ zabbix_agent2_tlsservercertissuer }} +{% endif %} +{% if zabbix_agent2_tlsservercertsubject is defined and zabbix_agent2_tlsservercertsubject %} +TLSServerCertSubject={{ zabbix_agent2_tlsservercertsubject }} +{% endif %} +{% if zabbix_agent2_tlscertfile is defined and zabbix_agent2_tlscertfile %} +TLSCertFile={{ zabbix_agent2_tlscertfile }} +{% endif %} +{% if zabbix_agent2_tlskeyfile is defined and zabbix_agent2_tlskeyfile %} +TLSKeyFile={{ zabbix_agent2_tlskeyfile }} +{% endif %} +{% if zabbix_agent2_tlspskidentity is defined and zabbix_agent2_tlspskidentity %} +TLSPSKIdentity={{ zabbix_agent2_tlspskidentity }} +{% endif %} +{% if zabbix_agent2_tlspskfile is defined and zabbix_agent2_tlspskfile %} +TLSPSKFile={{ zabbix_agent2_tlspskfile }} +{% endif %} +{% if zabbix_agent2_plugins is defined and zabbix_agent2_plugins is iterable %} +{% for entry in zabbix_agent2_plugins %} +{% set my_name = entry['name'] %} +{% for property in entry['options'] %} +{% set param = property['parameter'] %} +{% set value = property['value'] %} +Plugins.{{ my_name }}.{{ param }}={{ value }} +{% endfor %} +{% endfor %} +{% endif %} +{% if zabbix_agent_version is version('6.0', '>=') %} +{% if zabbix_agent2_listenbacklog is defined and zabbix_agent2_listenbacklog %} +ListenBacklog={{ zabbix_agent2_listenbacklog }} +{% endif %} +{% endif %} diff --git a/ansible/01_old/roles/zabbix-agent/templates/zabbix_agentd.conf.j2 b/ansible/01_old/roles/zabbix-agent/templates/zabbix_agentd.conf.j2 new file mode 100644 index 0000000..85c8c84 --- /dev/null +++ b/ansible/01_old/roles/zabbix-agent/templates/zabbix_agentd.conf.j2 @@ -0,0 +1,149 @@ +{{ ansible_managed | comment }} +# This is a configuration file for Zabbix Agent +# To get more information about Zabbix, visit http://www.zabbix.com + +# This configuration file is "minimalized", which means all the original comments +# are removed. The full documentation for your Zabbix Agent can be found here: +# https://www.zabbix.com/documentation/{{ zabbix_agent_version }}/en/manual/appendix/config/zabbix_agentd{{ "_win" if ansible_os_family == "Windows" else "" }} + +{% if ansible_os_family != "Windows" %} +PidFile={{ zabbix_agent_pidfile }} +{% endif %} +{% if zabbix_agent_version is version('3.0', '>=') %} +LogType={{ zabbix_agent_logtype }} +{% endif %} +{% if ansible_os_family == "Windows" %} +LogFile={{ zabbix_agent_win_logfile }} +{% else %} +LogFile={{ zabbix_agent_logfile }} +{% endif %} +LogFileSize={{ zabbix_agent_logfilesize }} +DebugLevel={{ zabbix_agent_debuglevel }} +{% if zabbix_agent_sourceip is defined and zabbix_agent_sourceip %} +SourceIP={{ zabbix_agent_sourceip }} +{% endif %} +{% if zabbix_agent_version is version('6.0', '<=') %} +EnableRemoteCommands={{ zabbix_agent_enableremotecommands }} +{% else %} +{% if zabbix_agent_allowkeys is defined and zabbix_agent_allowkeys %} +AllowKey={{ zabbix_agent_allowkeys }} +{% endif %} +{% if zabbix_agent_denykeys is defined and zabbix_agent_denykeys %} +DenyKey={{ zabbix_agent_denykeys }} +{% endif %} +{% endif %} +LogRemoteCommands={{ zabbix_agent_logremotecommands }} +Server={{ zabbix_agent_server }} +ListenPort={{ zabbix_agent_listenport }} +{% if zabbix_agent_listenip is defined and zabbix_agent_listenip !='0.0.0.0' and zabbix_agent_listenip %} +ListenIP={{ zabbix_agent_listenip }} +{% endif %} +StartAgents={{ zabbix_agent_startagents }} +ServerActive={{ zabbix_agent_serveractive }} +{% if zabbix_agent_hostname is defined and zabbix_agent_hostname %} +Hostname={{ zabbix_agent_hostname }} +{% endif %} +{% if zabbix_agent_hostnameitem is defined and zabbix_agent_hostnameitem %} +HostnameItem={{ zabbix_agent_hostnameitem }} +{% endif %} +{% if zabbix_agent_hostmetadata is defined and zabbix_agent_hostmetadata %} +HostMetadata={{ zabbix_agent_hostmetadata }} +{% endif %} +{% if zabbix_agent_hostmetadataitem is defined and zabbix_agent_hostmetadataitem %} +HostMetadataItem={{ zabbix_agent_hostmetadataitem }} +{% endif %} +{% if zabbix_agent_allow_key is defined and zabbix_agent_allow_key %} +{% for item in zabbix_agent_allow_key %} +AllowKey={{ item }} +{% endfor %} +{% endif %} +{% if zabbix_agent_deny_key is defined and zabbix_agent_deny_key %} +{% for item in zabbix_agent_deny_key %} +DenyKey={{ item }} +{% endfor %} +{% endif %} +RefreshActiveChecks={{ zabbix_agent_refreshactivechecks }} +BufferSend={{ zabbix_agent_buffersend }} +BufferSize={{ zabbix_agent_buffersize }} +MaxLinesPerSecond={{ zabbix_agent_maxlinespersecond }} +{% if zabbix_agent_version is version_compare('6.2', '>=') %} +HeartbeatFrequency={{ zabbix_agent_heartbeatfrequency }} +{% endif %} +{% if zabbix_agent_zabbix_alias is defined and zabbix_agent_zabbix_alias %} +{% if zabbix_agent_zabbix_alias is string %} +Alias={{ zabbix_agent_zabbix_alias }} +{% else %} +{% for item in zabbix_agent_zabbix_alias %} +Alias={{ item }} +{% endfor %} +{% endif %} +{% endif %} +Timeout={{ zabbix_agent_timeout }} +{% if ansible_os_family != "Windows" %} +AllowRoot={{ zabbix_agent_allowroot }} +{% endif %} +{% if zabbix_agent_runas_user is defined and zabbix_agent_runas_user %} +User={{ zabbix_agent_runas_user }} +{% endif %} +{% if ansible_os_family == "Windows" %} +Include={{ zabbix_agent_win_include }} +{% else %} +Include={{ zabbix_agent_include }}/{{ zabbix_agent_include_pattern }} +{% endif %} +{% if zabbix_agent_additional_include is defined and zabbix_agent_additional_include is iterable and zabbix_agent_additional_include is not string %} +{% for include in zabbix_agent_additional_include %} +Include={{ include }} +{% endfor %} +{% endif %} +UnsafeUserParameters={{ zabbix_agent_unsafeuserparameters }} +{% if zabbix_agent_version is version_compare('2.2', '>=') %} +{% if ansible_os_family != "Windows" %} +LoadModulePath={{ zabbix_agent_loadmodulepath }} +{% endif %} +{% endif %} +{% if zabbix_agent_loadmodule is defined and zabbix_agent_loadmodule %} +{% if zabbix_agent_loadmodule is string %} +LoadModule={{ zabbix_agent_loadmodule }} +{% else %} +{% for module in zabbix_agent_loadmodule %} +LoadModule={{ module }} +{% endfor %} +{% endif %} +{% endif %} +{% if zabbix_agent_version is version_compare('3.0', '>=') %} +{% if zabbix_agent_tlsconnect is defined and zabbix_agent_tlsconnect %} +TLSConnect={{ zabbix_agent_tlsconnect }} +{% endif %} +{% if zabbix_agent_tlsaccept is defined and zabbix_agent_tlsaccept %} +TLSAccept={{ zabbix_agent_tlsaccept }} +{% endif %} +{% if zabbix_agent_tlscafile is defined and zabbix_agent_tlscafile %} +TLSCAFile={{ zabbix_agent_tlscafile }} +{% endif %} +{% if zabbix_agent_tlscrlfile is defined and zabbix_agent_tlscrlfile %} +TLSCRLFile={{ zabbix_agent_tlscrlfile }} +{% endif %} +{% if zabbix_agent_tlsservercertissuer is defined and zabbix_agent_tlsservercertissuer %} +TLSServerCertIssuer={{ zabbix_agent_tlsservercertissuer }} +{% endif %} +{% if zabbix_agent_tlsservercertsubject is defined and zabbix_agent_tlsservercertsubject %} +TLSServerCertSubject={{ zabbix_agent_tlsservercertsubject }} +{% endif %} +{% if zabbix_agent_tlscertfile is defined and zabbix_agent_tlscertfile %} +TLSCertFile={{ zabbix_agent_tlscertfile }} +{% endif %} +{% if zabbix_agent_tlskeyfile is defined and zabbix_agent_tlskeyfile %} +TLSKeyFile={{ zabbix_agent_tlskeyfile }} +{% endif %} +{% if zabbix_agent_tlspskidentity is defined and zabbix_agent_tlspskidentity %} +TLSPSKIdentity={{ zabbix_agent_tlspskidentity }} +{% endif %} +{% if zabbix_agent_tlspskfile is defined and zabbix_agent_tlspskfile %} +TLSPSKFile={{ zabbix_agent_tlspskfile }} +{% endif %} +{% endif %} +{% if zabbix_agent_version is version('6.0', '>=') %} +{% if zabbix_agent_listenbacklog is defined and zabbix_agent_listenbacklog %} +ListenBacklog={{ zabbix_agent_listenbacklog }} +{% endif %} +{% endif %} diff --git a/ansible/01_old/roles/zabbix-agent/vars/Debian.yml b/ansible/01_old/roles/zabbix-agent/vars/Debian.yml new file mode 100644 index 0000000..7c46c31 --- /dev/null +++ b/ansible/01_old/roles/zabbix-agent/vars/Debian.yml @@ -0,0 +1,48 @@ +--- +# vars file for zabbix_agent (Debian) + +zabbix_agent: zabbix-agent +zabbix_agent_service: zabbix-agent +zabbix_agent_conf: zabbix_agentd.conf +zabbix_agent2_conf: zabbix_agent2.conf + +zabbix_valid_agent_versions: + # Debian + "12": + - 6.4 + - 6.2 + - 6.0 + + "11": + - 6.4 + - 6.2 + - 6.0 + + "10": + - 6.4 + - 6.2 + - 6.0 + + "9": + - 6.4 + - 6.2 + - 6.0 + # Ubuntu + "22": + - 6.4 + - 6.2 + - 6.0 + + "20": + - 6.4 + - 6.2 + - 6.0 + + "18": + - 6.4 + - 6.2 + - 6.0 + +debian_keyring_path: /etc/apt/keyrings/ +zabbix_gpg_key: "{{ debian_keyring_path }}/zabbix-official-repo.asc" +_zabbix_repo_deb_url: "http://repo.zabbix.com/zabbix/{{ zabbix_agent_version }}/{{ ansible_distribution.lower() }}" diff --git a/ansible/01_old/roles/zabbix-agent/vars/RedHat.yml b/ansible/01_old/roles/zabbix-agent/vars/RedHat.yml new file mode 100644 index 0000000..2302e0f --- /dev/null +++ b/ansible/01_old/roles/zabbix-agent/vars/RedHat.yml @@ -0,0 +1,21 @@ +--- +# vars file for zabbix_agent (RedHat) + +zabbix_agent: zabbix-agent +zabbix_agent_service: zabbix-agent +zabbix_agent_conf: zabbix_agentd.conf +zabbix_agent2_conf: zabbix_agent2.conf + +zabbix_valid_agent_versions: + "9": + - 6.4 + - 6.2 + - 6.0 + "8": + - 6.4 + - 6.2 + - 6.0 + "7": + - 6.4 + - 6.2 + - 6.0 diff --git a/ansible/01_old/roles/zabbix-agent/vars/Windows.yml b/ansible/01_old/roles/zabbix-agent/vars/Windows.yml new file mode 100644 index 0000000..e4a7216 --- /dev/null +++ b/ansible/01_old/roles/zabbix-agent/vars/Windows.yml @@ -0,0 +1,7 @@ +--- +# vars file for zabbix_agent (Windows) +zabbix_valid_agent_versions: + "10": + - 6.4 + - 6.2 + - 6.0 diff --git a/ansible/01_old/roles/zabbix-agent/vars/main.yml b/ansible/01_old/roles/zabbix-agent/vars/main.yml new file mode 100644 index 0000000..e69de29 diff --git a/ansible/01_old/ssh_key/README.md b/ansible/01_old/ssh_key/README.md new file mode 100644 index 0000000..f6eec9a --- /dev/null +++ b/ansible/01_old/ssh_key/README.md @@ -0,0 +1,9 @@ +# ssh key 배포 installer + +# authorized_keys.yaml은 client에 ssh-pass가 설치되어 있어야 해서 일단 미사용 + +1. 사전 작업 + - ssh-keygen + +2. 명령어 + - ./key.sh diff --git a/ansible/01_old/ssh_key/authorized_keys.yml b/ansible/01_old/ssh_key/authorized_keys.yml new file mode 100644 index 0000000..d01e291 --- /dev/null +++ b/ansible/01_old/ssh_key/authorized_keys.yml @@ -0,0 +1,11 @@ +--- +- hosts: cluster + remote_user: root + tasks: + - name: key add + authorized_key: + user: root + state: present + key: "{{ lookup('file', lookup('env','HOME') + '/.ssh/id_rsa.pub') }}" + manage_dir: False + diff --git a/ansible/01_old/ssh_key/ip_list b/ansible/01_old/ssh_key/ip_list new file mode 100644 index 0000000..5ee481c --- /dev/null +++ b/ansible/01_old/ssh_key/ip_list @@ -0,0 +1,32 @@ +10.10.43.111 +10.10.43.112 +10.10.43.113 +10.10.43.114 +10.10.43.115 +10.10.43.116 +10.10.43.117 +10.10.43.118 +10.10.43.119 +10.10.43.120 +10.10.43.121 +10.10.43.122 +10.10.43.123 +10.10.43.124 +10.10.43.125 +10.10.43.126 +10.10.43.127 +10.10.43.128 +10.10.43.129 +10.10.43.130 +10.10.43.131 +10.10.43.132 +10.10.43.133 +10.10.43.134 +10.10.43.135 +10.10.43.136 +10.10.43.137 +10.10.43.138 +10.10.43.139 +10.10.43.140 +10.10.43.141 +10.10.43.142 diff --git a/ansible/01_old/ssh_key/key.sh b/ansible/01_old/ssh_key/key.sh new file mode 100755 index 0000000..d7e6884 --- /dev/null +++ b/ansible/01_old/ssh_key/key.sh @@ -0,0 +1,8 @@ +#!/usr/bin/expect -f +set password [lindex $argv 0] +set host [lindex $argv 1] + +spawn ssh-copy-id -o StrictHostKeyChecking=no root@$host +expect "password:" +send "$password\n" +expect eof diff --git a/ansible/01_old/ssh_key/test.sh b/ansible/01_old/ssh_key/test.sh new file mode 100755 index 0000000..958dbf8 --- /dev/null +++ b/ansible/01_old/ssh_key/test.sh @@ -0,0 +1,8 @@ +#!/bin/bash + + +while read ip +do + ./key.sh saasadmin1234 ${ip} +done < ip_list + diff --git a/ansible/01_old/std_inven b/ansible/01_old/std_inven new file mode 100644 index 0000000..c34a4a0 --- /dev/null +++ b/ansible/01_old/std_inven @@ -0,0 +1,4 @@ +[agent] +10.10.43.177 ansible_user=redhat +10.10.43.178 ansible_user=redhat +10.10.43.179 ansible_user=redhat diff --git a/ansible/01_old/teleport b/ansible/01_old/teleport new file mode 100755 index 0000000..2c70f12 --- /dev/null +++ b/ansible/01_old/teleport @@ -0,0 +1,26 @@ +[manager] +10.10.43.98 ansible_user=root +10.10.43.[105:106] ansible_user=ubuntu +10.10.43.[100:101] ansible_user=ubuntu + +[saas_mgmt_master] +10.10.43.240 ansible_user=ubuntu + +[saas_mgmt_node] +10.10.43.[241:243] ansible_user=ubuntu + +[dsk_dev_master] +10.10.43.[111:113] ansible_user=ubuntu + +[dsk_dev_node] +10.10.43.[114:153] ansible_user=ubuntu + +[bastion] +10.10.43.43 ansible_port=2222 ansible_user=ubuntu + +[all:children] +saas_mgmt_master +saas_mgmt_node +dsk_dev_master +dsk_dev_node +bastion diff --git a/ansible/01_old/teleport.yml b/ansible/01_old/teleport.yml new file mode 100644 index 0000000..f35bffa --- /dev/null +++ b/ansible/01_old/teleport.yml @@ -0,0 +1,11 @@ +--- +- hosts: all + become: true + roles: + - teleport + vars: + teleport_uri: teleport.kr.datasaker.io + # remove: True + # custom_labels: 'user=havelight,company=exem' + # update: True + install: True diff --git a/ansible/01_old/teleport_aws.yml b/ansible/01_old/teleport_aws.yml new file mode 100644 index 0000000..df225de --- /dev/null +++ b/ansible/01_old/teleport_aws.yml @@ -0,0 +1,12 @@ +--- +#- hosts: prod-demo-master,prod-demo-worker,dev-demo-master,dev-demo-worker +- hosts: all + become: true + roles: + - teleport + vars: + teleport_uri: teleport.kr.datasaker.io + # remove: True + # custom_labels: 'user=havelight,company=exem' + # update: True + install: True diff --git a/ansible/01_old/zabbix-agent.yaml b/ansible/01_old/zabbix-agent.yaml new file mode 100644 index 0000000..9ce7065 --- /dev/null +++ b/ansible/01_old/zabbix-agent.yaml @@ -0,0 +1,28 @@ +--- +- hosts: all + roles: + - role: zabbix-agent + zabbix_api_server_host: 10.10.43.252 + zabbix_api_server_port: 80 + ansible_zabbix_url_path: "/" + zabbix_api_login_user: sa8001 + zabbix_api_login_pass: ios2011a + zabbix_api_create_hostgroup: false + zabbix_api_create_hosts: true + zabbix_agent_host_state: present + zabbix_host_groups: + - Linux servers + - Virtual machines + zabbix_agent_visible_hostname: "{{ ansible_fqdn }}" + zabbix_agent_server: 10.10.43.252 + zabbix_agent_serveractive: 10.10.43.252 + zabbix_agent_link_templates: + - Linux by Zabbix agent + zabbix_agent_version: 6.4 + zabbix_agent_unsafeuserparameters: 1 + zabbix_agent_userparameters: + - name: zombie + - name: zombielist + - name: dev2_iac_pass_failed + - name: dev2_pass_failed + - name: root_pass_failed diff --git a/ansible/README.md b/ansible/README.md new file mode 100644 index 0000000..7a78ecb --- /dev/null +++ b/ansible/README.md @@ -0,0 +1,17 @@ +# Ansible Script +ansible script 구조 +``` +. +├── 00_old +├── 01_old +├── infra_setting +├── teleport_setting +└── zabbix_agent +``` +|디렉토리|설명| +|---|---| +|00_old|과거 스크립트 백업| +|01_old|기존 Ansible Script 백업| +|infra_setting|Drop IP 설정, dev2 그룹 생성, dev2-iac 및 dev2 유저 생성, ssh key 등록
sudo 설정 추가, selinux 종료, 방화벽 종료, ssh port 변경, ssh root 로그인 비활성
ssh 접속 ip 설정, 패스워드 변경, vault 등록, Excel 저장| +|teleport_setting|Teleport Agent 배포, Teleport 등록| +|zabbix_agent|Zabbix Agent 배포, Zabbix 등록| \ No newline at end of file diff --git a/ansible/infra_setting/ansible.cfg b/ansible/infra_setting/ansible.cfg new file mode 100755 index 0000000..0ebf722 --- /dev/null +++ b/ansible/infra_setting/ansible.cfg @@ -0,0 +1,10 @@ +[defaults] +inventory = inventory +roles_path = roles +deprecation_warnings = False +display_skipped_hosts = no +ansible_home = . +stdout_callback = debug +host_key_checking=False +#private_key_file=/root/.ssh/dev2-iac +#remote_tmp = /tmp/.ansible/tmp diff --git a/ansible/infra_setting/infra-settings.yml b/ansible/infra_setting/infra-settings.yml new file mode 100644 index 0000000..4902c49 --- /dev/null +++ b/ansible/infra_setting/infra-settings.yml @@ -0,0 +1,19 @@ +--- +- hosts: all + become: true + roles: + - connect-settings + # - teleport + vars: + username: dev2 + adminuser: root + manual_password: saasadmin1234 + sshmainport: 2222 + #encrypt: 1 + #debug_mode: True + teleport_uri: teleport.kr.datasaker.io + # remove: True + # custom_labels: 'user=havelight,company=exem' + #update: True + # install: True + diff --git a/ansible/infra_setting/inventory b/ansible/infra_setting/inventory new file mode 100644 index 0000000..67052d2 --- /dev/null +++ b/ansible/infra_setting/inventory @@ -0,0 +1,31 @@ +[all] +10.10.43.195 +10.10.43.196 +10.10.43.197 +10.10.43.200 +10.10.43.201 +10.10.43.202 +10.10.43.203 +10.10.43.204 +10.10.43.205 +10.10.43.206 +10.10.43.207 +10.10.43.208 +10.10.43.210 +10.10.43.211 +10.10.43.212 +10.10.43.213 +10.10.43.214 +10.10.43.215 +10.10.43.216 +10.10.43.217 +10.10.43.218 +10.10.43.224 +10.10.43.225 +10.10.43.226 +10.10.43.227 +10.10.43.228 +10.10.43.230 +10.10.43.235 +10.10.43.236 +10.10.43.252 diff --git a/ansible/infra_setting/passwd_inventory b/ansible/infra_setting/passwd_inventory new file mode 100644 index 0000000..33a2461 --- /dev/null +++ b/ansible/infra_setting/passwd_inventory @@ -0,0 +1,76 @@ +[all] +10.10.43.100 ansible_port=2222 ansible_user=dev2 +10.10.43.101 ansible_port=2222 ansible_user=dev2 +10.10.43.105 ansible_port=2222 ansible_user=dev2 +10.10.43.106 ansible_port=2222 ansible_user=dev2 +10.10.43.111 ansible_port=2222 ansible_user=dev2 +10.10.43.112 ansible_port=2222 ansible_user=dev2 +10.10.43.113 ansible_port=2222 ansible_user=dev2 +10.10.43.114 ansible_port=2222 ansible_user=dev2 +10.10.43.115 ansible_port=2222 ansible_user=dev2 +10.10.43.116 ansible_port=2222 ansible_user=dev2 +10.10.43.117 ansible_port=2222 ansible_user=dev2 +10.10.43.118 ansible_port=2222 ansible_user=dev2 +10.10.43.119 ansible_port=2222 ansible_user=dev2 +10.10.43.120 ansible_port=2222 ansible_user=dev2 +10.10.43.121 ansible_port=2222 ansible_user=dev2 +10.10.43.122 ansible_port=2222 ansible_user=dev2 +10.10.43.123 ansible_port=2222 ansible_user=dev2 +10.10.43.124 ansible_port=2222 ansible_user=dev2 +10.10.43.125 ansible_port=2222 ansible_user=dev2 +10.10.43.126 ansible_port=2222 ansible_user=dev2 +10.10.43.127 ansible_port=2222 ansible_user=dev2 +10.10.43.128 ansible_port=2222 ansible_user=dev2 +10.10.43.129 ansible_port=2222 ansible_user=dev2 +10.10.43.130 ansible_port=2222 ansible_user=dev2 +10.10.43.131 ansible_port=2222 ansible_user=dev2 +10.10.43.132 ansible_port=2222 ansible_user=dev2 +10.10.43.133 ansible_port=2222 ansible_user=dev2 +10.10.43.134 ansible_port=2222 ansible_user=dev2 +10.10.43.135 ansible_port=2222 ansible_user=dev2 +10.10.43.136 ansible_port=2222 ansible_user=dev2 +10.10.43.137 ansible_port=2222 ansible_user=dev2 +10.10.43.138 ansible_port=2222 ansible_user=dev2 +10.10.43.139 ansible_port=2222 ansible_user=dev2 +10.10.43.140 ansible_port=2222 ansible_user=dev2 +10.10.43.141 ansible_port=2222 ansible_user=dev2 +10.10.43.142 ansible_port=2222 ansible_user=dev2 +10.10.43.143 ansible_port=2222 ansible_user=dev2 +10.10.43.144 ansible_port=2222 ansible_user=dev2 +10.10.43.145 ansible_port=2222 ansible_user=dev2 +10.10.43.146 ansible_port=2222 ansible_user=dev2 +10.10.43.147 ansible_port=2222 ansible_user=dev2 +10.10.43.148 ansible_port=2222 ansible_user=dev2 +10.10.43.151 ansible_port=2222 ansible_user=dev2 +10.10.43.152 ansible_port=2222 ansible_user=dev2 +10.10.43.153 ansible_port=2222 ansible_user=dev2 +10.10.43.164 ansible_port=2222 ansible_user=dev2 +10.10.43.165 ansible_port=2222 ansible_user=dev2 +10.10.43.166 ansible_port=2222 ansible_user=dev2 +10.10.43.167 ansible_port=2222 ansible_user=dev2 +10.10.43.168 ansible_port=2222 ansible_user=dev2 +10.10.43.169 ansible_port=2222 ansible_user=dev2 +10.10.43.171 ansible_port=2222 ansible_user=dev2 +10.10.43.172 ansible_port=2222 ansible_user=dev2 +10.10.43.173 ansible_port=2222 ansible_user=dev2 +10.10.43.174 ansible_port=2222 ansible_user=dev2 +10.10.43.175 ansible_port=2222 ansible_user=dev2 +10.10.43.176 ansible_port=2222 ansible_user=dev2 +10.10.43.177 ansible_port=2222 ansible_user=dev2 +10.10.43.178 ansible_port=2222 ansible_user=dev2 +10.10.43.179 ansible_port=2222 ansible_user=dev2 +10.10.43.180 ansible_port=2222 ansible_user=dev2 +10.10.43.181 ansible_port=2222 ansible_user=dev2 +10.10.43.182 ansible_port=2222 ansible_user=dev2 +10.10.43.185 ansible_port=2222 ansible_user=dev2 +10.10.43.186 ansible_port=2222 ansible_user=dev2 +10.10.43.187 ansible_port=2222 ansible_user=dev2 +10.10.43.188 ansible_port=2222 ansible_user=dev2 +10.10.43.189 ansible_port=2222 ansible_user=dev2 +10.10.43.190 ansible_port=2222 ansible_user=dev2 +10.10.43.191 ansible_port=2222 ansible_user=dev2 +10.10.43.192 ansible_port=2222 ansible_user=dev2 +10.10.43.193 ansible_port=2222 ansible_user=dev2 +10.10.43.194 ansible_port=2222 ansible_user=dev2 +10.10.43.199 ansible_port=2222 ansible_user=dev2 + diff --git a/ansible/infra_setting/roles/.DS_Store b/ansible/infra_setting/roles/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..17f2c108b61b495005ce582996417109b95b1826 GIT binary patch literal 6148 zcmeHKQA@)x5WdW*9U}BWLEi$tPTZ6Y#g|g`4_MI$mD#$Y#oEZ)xrZ_6yZ#~nh`-0X zBo$?fFCsGT#^t+Q?h^84Ia_!{5jAEAS)}^xZ%njq-;mRT0L%L=S0_7p>N-sLj>q7Z#mGXUVw? zZq+C#!eW^F;ozKlCrYJJWrxve5|4WB%7My?Fv;S9PDuJOhFqK_SznEOHO%_C&JE0f z({LI+cYQqWw7X5Y)9Or`a@=lr_nLBddv7vnIIA05`^UYz^gdHhhFyVwL@mn}D|klZ z$BjM)gDh3q4aQdURQZg|05iZ0%n$?iigT7`XivNqW`G&^9R}!rkf?;7#nPa@ILOxLCQfAszP|8o-em;q*BrWg?Q!{E@zE!o;SwK=M_67>p|gyKqr l?S5Z>*NO({YS3VK`cTCi1GEM7vbFJMFuDm5WRLu0lysX3HF?)pN$h|lB9 z?gm;69!2a7?0&QJvzz%K`@3s4^wN~G0SPiRXJ-E*@b5k!}Btvh0hofsLBfro+ z|0bMdV|(vH#;F&^letO=g9(J(--dA@v!PtXL85Y9?Xa3wb8H_jm%VPkElxYVRa-2( z-Tt60&Q1oaRnyu(IzGP|PotMuzG)^oP_AUxU;*!-td;cY&f`eNPhc%Fibz6YfEXYK zh=HwSz#IowYb&d#qKN@w;3o!fe-O|R9fP?>wRJ#;*Jt$C5K%zKw*;av=orj3f(L}_ zR6w1|%@c#`bg&B(=NQa2>U74{$}o>vxp=&AwK~{^3TNEWNIfw?3~Vw`)k7Q4|4aB~ zN+0=~DKsJmh=G5`0I!YQ(GZFF$eggRp27Z~^g7%xjh literal 0 HcmV?d00001 diff --git a/ansible/infra_setting/roles/connect-settings/README.md b/ansible/infra_setting/roles/connect-settings/README.md new file mode 100644 index 0000000..225dd44 --- /dev/null +++ b/ansible/infra_setting/roles/connect-settings/README.md @@ -0,0 +1,38 @@ +Role Name +========= + +A brief description of the role goes here. + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +Role Variables +-------------- + +A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. + +Dependencies +------------ + +A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + + - hosts: servers + roles: + - { role: username.rolename, x: 42 } + +License +------- + +BSD + +Author Information +------------------ + +An optional section for the role authors to include contact information, or a website (HTML is not allowed). diff --git a/ansible/infra_setting/roles/connect-settings/defaults/main.yml b/ansible/infra_setting/roles/connect-settings/defaults/main.yml new file mode 100644 index 0000000..5415520 --- /dev/null +++ b/ansible/infra_setting/roles/connect-settings/defaults/main.yml @@ -0,0 +1,15 @@ +--- +# defaults file for password + +encrypt: 0 # strings 0 , encrypted 1 +debug_mode: False +sshrootlogin: forced-commands-only +sshmainport: 2222 +iptables_rules: + - { source: "10.10.45.0/24", target: "DROP" } + - { source: "10.10.47.0/24", target: "DROP" } + - { source: "10.10.48.0/24", target: "DROP" } + - { source: "10.10.50.0/24", target: "DROP" } + - { source: "10.10.37.0/24", target: "DROP" } +delete_rule: False +add_rule: True \ No newline at end of file diff --git a/ansible/infra_setting/roles/connect-settings/files/00_old/gen_password.py b/ansible/infra_setting/roles/connect-settings/files/00_old/gen_password.py new file mode 100644 index 0000000..b1b4e13 --- /dev/null +++ b/ansible/infra_setting/roles/connect-settings/files/00_old/gen_password.py @@ -0,0 +1,44 @@ +#!/usr/bin/python3 + +import base64, random, string, os +from Crypto.Cipher import AES +from Crypto.Random import get_random_bytes +from Crypto.Util.Padding import pad, unpad + +try: + encrypt_flag=True if os.sys.argv[1].lower()=='1' else False +except Exception as err: + encrypt_flag=False + +def generate_password(length=8, num_uppercase=1, num_lowercase=1, num_digits=1, num_sp_char=1): + sp_char = '!@#$' + all_chars = string.ascii_letters + string.digits + sp_char + + password = [ + *random.choices(string.ascii_uppercase, k=num_uppercase), + *random.choices(string.ascii_lowercase, k=num_lowercase), + *random.choices(string.digits, k=num_digits), + *random.choices(sp_char, k=num_sp_char) + ] + + remaining_length = length - (num_uppercase + num_lowercase + num_digits + num_sp_char) + password += random.choices(all_chars, k=remaining_length) + + random.shuffle(password) + return ''.join(password) + +def encrypt(plain_text, key): + manual_iv = b'PhilinnovatorDEV' + cipher = AES.new(key, AES.MODE_CBC, iv=manual_iv) + ct_bytes = cipher.encrypt(pad(plain_text.encode(), 16)) + ct = base64.b64encode(ct_bytes).decode('utf-8') + return ct + +key = b'PhilinnovatorDEVPhilinnovatorDEV' +plain_text = generate_password() + +if encrypt_flag: + encrypted_text = encrypt(plain_text, key) + print(encrypted_text) +else: + print(plain_text) diff --git a/ansible/infra_setting/roles/connect-settings/files/00_old/vault_test.py b/ansible/infra_setting/roles/connect-settings/files/00_old/vault_test.py new file mode 100644 index 0000000..18f6988 --- /dev/null +++ b/ansible/infra_setting/roles/connect-settings/files/00_old/vault_test.py @@ -0,0 +1,11 @@ +import hvac + +str_url = "http://10.10.43.98:31080" +str_token = "hvs.CAESIMV6zCg-GpUP4pQgVA5f1ZXkgyJZrqOC6QDCegrpiAX9Gh4KHGh2cy5ORkpkc2ZyVUxYd09qUVFtQldRNDBjS3I" +client = hvac.Client(url=str_url, token=str_token) + +str_mount_point = 'kv' +str_secret_path = 'host1' +read_secret_result = client.secrets.kv.v1.read_secret(mount_point=str_mount_point, path=str_secret_path) +print(read_secret_result) + diff --git a/ansible/infra_setting/roles/connect-settings/files/custom_excel b/ansible/infra_setting/roles/connect-settings/files/custom_excel new file mode 100755 index 0000000..562b89c --- /dev/null +++ b/ansible/infra_setting/roles/connect-settings/files/custom_excel @@ -0,0 +1,108 @@ +#!/usr/bin/python3 +#-*- coding: utf-8 -*- + +import os, sys, time, errno, socket, signal, psutil, random, logging.handlers, subprocess, paramiko, hvac +from xlwt import Workbook, XFStyle, Borders, Font, Pattern +from socket import error as SocketError + +process_time = time.strftime("%Y%m%d_%H%M", time.localtime()) +excel_file_name = '/mnt/e/excel/{}.xls'.format(process_time) + +def process_close(flag=True, result=''): + if flag: + print("[Success]") + else: + print("[Fail]:{}".format(result)) + + sys.exit(0) + +def set_header(sheet, header_list): + # 폰트 설정 + font = Font() + font.bold = True + + # 테두리 설정 + borders = Borders() + borders.left = Borders.THIN + borders.right = Borders.THIN + borders.top = Borders.THIN + borders.bottom = Borders.THIN + + # 배경색 설정 + pattern = Pattern() + pattern.pattern = Pattern.SOLID_PATTERN + pattern.pattern_fore_colour = 22 # #E2EFDA는 xlwt에서 인덱스 22에 해당하는 색입니다. + + hdrstyle = XFStyle() + hdrstyle.font = font + hdrstyle.borders = borders + hdrstyle.pattern = pattern + + for idx, header in enumerate(header_list): + sheet.write(0, idx, header, hdrstyle) + sheet.col(idx).width = len(header) * 800 + +def write_data(sheet, data_list): + datestyle = XFStyle() + datestyle.num_format_str = 'YYYY-MM-DD' + + for row_num, data in enumerate(data_list, start=1): + for col_num, cell_data in enumerate(data): + if col_num == 7: + sheet.write(row_num, col_num, cell_data, datestyle) + elif col_num in [1, 4, 5]: + formatted_data = u'{}'.format(cell_data) if cell_data else '' + sheet.write(row_num, col_num, formatted_data) + else: + sheet.write(row_num, col_num, cell_data) + +def excel_write(header_list=[], data_list=[], filename='', sheetTitle=''): + workbook = Workbook(style_compression=2, encoding='utf-8') + sheet = workbook.add_sheet(sheetTitle) + + set_header(sheet, header_list) + write_data(sheet, data_list) + + sheet.panes_frozen = True + sheet.vert_split_pos = 0 + sheet.horz_split_pos = 1 + workbook.save(filename) + +def main(): + header_list=['번호','호스트 유형','호스트명','호스트 IP','포트번호','프로토콜','인증방법','1차 로그인 계정명','1차 로그인 비밀번호','1차 로그인 계정명','2차 로그인 비밀번호','용도','비고'] + data_list=[] + + openfile=open('/tmp/host_list','r') + readfile=openfile.readlines() + openfile.close() + for idx, host_data in enumerate(readfile): + try: + if idx==0: continue + host_num=idx + hosttype=host_data.strip().split(' ')[0] + print(hosttype) + hostname=host_data.strip().split(' ')[1] + host_ips=host_data.strip().split(' ')[2] + port_num=int(host_data.strip().split(' ')[3]) + protocol='SSH' + auth_con='Password' + username=host_data.strip().split(' ')[4] + first_pw=host_data.strip().split(' ')[5] + rootuser=host_data.strip().split(' ')[6] + secon_pw=host_data.strip().split(' ')[7] + descript='-' + remarks_='-' + data_list.append([host_num,hosttype,hostname,host_ips,port_num,protocol,auth_con,username,first_pw,rootuser,secon_pw,descript,remarks_,]) + except: + continue + + excel_write(header_list, data_list, excel_file_name, 'TEST') + +DEBUG=False +try: + if os.sys.argv[1]: DEBUG=True +except: + pass +main() +process_close() + diff --git a/ansible/infra_setting/roles/connect-settings/files/decrypt_password b/ansible/infra_setting/roles/connect-settings/files/decrypt_password new file mode 100755 index 0000000..5e31c71 --- /dev/null +++ b/ansible/infra_setting/roles/connect-settings/files/decrypt_password @@ -0,0 +1,21 @@ +#!/usr/bin/python3 +#-*- coding: utf-8 -*- + +import base64, random, string, os +from Crypto.Cipher import AES +from Crypto.Random import get_random_bytes +from Crypto.Util.Padding import pad, unpad + +try: + encrypted_text=os.sys.argv[1] +except: + encrypted_text="q6i1/JxyNe1OUrO0JKu+Z4WQTyQZam2yIJTp43dl1pI=" + +def decrypt(ct, key): + manual_iv = b'PhilinnovatorDEV' + ct_bytes = base64.b64decode(ct) + cipher = AES.new(key, AES.MODE_CBC, iv=manual_iv) + return unpad(cipher.decrypt(ct_bytes), 16).decode('utf-8') + +key = b'PhilinnovatorDEVPhilinnovatorDEV' +print(decrypt(encrypted_text, key)) \ No newline at end of file diff --git a/ansible/infra_setting/roles/connect-settings/files/gen_password b/ansible/infra_setting/roles/connect-settings/files/gen_password new file mode 100755 index 0000000..febe48a --- /dev/null +++ b/ansible/infra_setting/roles/connect-settings/files/gen_password @@ -0,0 +1,45 @@ +#!/usr/bin/python3 +#-*- coding: utf-8 -*- + +import base64, random, string, os +from Crypto.Cipher import AES +from Crypto.Random import get_random_bytes +from Crypto.Util.Padding import pad, unpad + +try: + encrypt_flag=True if os.sys.argv[1].lower()=='1' else False +except Exception as err: + encrypt_flag=False + +def generate_password(length=12, num_uppercase=3, num_lowercase=4, num_digits=3, num_sp_char=2): + sp_char = '!@#$' + all_chars = string.ascii_letters + string.digits + sp_char + + password = [ + *random.choices(string.ascii_uppercase, k=num_uppercase), + *random.choices(string.ascii_lowercase, k=num_lowercase), + *random.choices(string.digits, k=num_digits), + *random.choices(sp_char, k=num_sp_char) + ] + + remaining_length = length - (num_uppercase + num_lowercase + num_digits + num_sp_char) + password += random.choices(all_chars, k=remaining_length) + + random.shuffle(password) + return ''.join(password) + +def encrypt(plain_text, key): + manual_iv = b'PhilinnovatorDEV' + cipher = AES.new(key, AES.MODE_CBC, iv=manual_iv) + ct_bytes = cipher.encrypt(pad(plain_text.encode(), 16)) + ct = base64.b64encode(ct_bytes).decode('utf-8') + return ct + +key = b'PhilinnovatorDEVPhilinnovatorDEV' +plain_text = generate_password() + +if encrypt_flag: + encrypted_text = encrypt(plain_text, key) + print(encrypted_text) +else: + print(plain_text) diff --git a/ansible/infra_setting/roles/connect-settings/files/vault_get b/ansible/infra_setting/roles/connect-settings/files/vault_get new file mode 100755 index 0000000..d0fabdb --- /dev/null +++ b/ansible/infra_setting/roles/connect-settings/files/vault_get @@ -0,0 +1,17 @@ +#!/usr/bin/python3 +#-*- coding: utf-8 -*- + +import hvac +import os + +hostname=os.sys.argv[1] + +str_url = "http://10.10.43.240:30803" +client = hvac.Client(url=str_url) +client.auth.approle.login(role_id="e96c5fd8-abde-084a-fde7-7450a9348a70", secret_id="5371706b-414a-11d3-f3fd-6cf98871aad1") + +try: + data = client.secrets.kv.v2.read_secret_version(mount_point='host', path=hostname, raise_on_deleted_version=True)['data']['data'] + print(data) +except Exception as err: + print(err) diff --git a/ansible/infra_setting/roles/connect-settings/files/vault_put b/ansible/infra_setting/roles/connect-settings/files/vault_put new file mode 100755 index 0000000..aeae507 --- /dev/null +++ b/ansible/infra_setting/roles/connect-settings/files/vault_put @@ -0,0 +1,21 @@ +#!/usr/bin/python3 +#-*- coding: utf-8 -*- + +import hvac +import os + +hostname=os.sys.argv[1] +accountid=os.sys.argv[2] +password=os.sys.argv[3] +adminuser=os.sys.argv[4] +adminpass=os.sys.argv[5] + +str_url = "http://10.10.43.240:30803" +client = hvac.Client(url=str_url) +client.auth.approle.login(role_id="e96c5fd8-abde-084a-fde7-7450a9348a70", secret_id="5371706b-414a-11d3-f3fd-6cf98871aad1") + +client.secrets.kv.v2.create_or_update_secret( + mount_point='host', + path=hostname, + secret=dict(accountid=f'{accountid}',password=f'{password}',adminuser=f'{adminuser}',adminpass=f'{adminpass}') +) diff --git a/ansible/infra_setting/roles/connect-settings/handlers/main.yml b/ansible/infra_setting/roles/connect-settings/handlers/main.yml new file mode 100644 index 0000000..b44722c --- /dev/null +++ b/ansible/infra_setting/roles/connect-settings/handlers/main.yml @@ -0,0 +1,16 @@ +--- +- name: Reload systemd configuration + ansible.builtin.systemd: + daemon_reload: True + +- name: Restart teleport service + ansible.builtin.systemd: + name: teleport + enabled: true + state: restarted + +- name: restart sshd + service: + name: sshd + state: restarted + enabled: true \ No newline at end of file diff --git a/ansible/infra_setting/roles/connect-settings/meta/main.yml b/ansible/infra_setting/roles/connect-settings/meta/main.yml new file mode 100644 index 0000000..c572acc --- /dev/null +++ b/ansible/infra_setting/roles/connect-settings/meta/main.yml @@ -0,0 +1,52 @@ +galaxy_info: + author: your name + description: your role description + company: your company (optional) + + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: http://example.com/issue/tracker + + # Choose a valid license ID from https://spdx.org - some suggested licenses: + # - BSD-3-Clause (default) + # - MIT + # - GPL-2.0-or-later + # - GPL-3.0-only + # - Apache-2.0 + # - CC-BY-4.0 + license: license (GPL-2.0-or-later, MIT, etc) + + min_ansible_version: 2.1 + + # If this a Container Enabled role, provide the minimum Ansible Container version. + # min_ansible_container_version: + + # + # Provide a list of supported platforms, and for each platform a list of versions. + # If you don't wish to enumerate all versions for a particular platform, use 'all'. + # To view available platforms and versions (or releases), visit: + # https://galaxy.ansible.com/api/v1/platforms/ + # + # platforms: + # - name: Fedora + # versions: + # - all + # - 25 + # - name: SomePlatform + # versions: + # - all + # - 1.0 + # - 7 + # - 99.99 + + galaxy_tags: [] + # List tags for your role here, one per line. A tag is a keyword that describes + # and categorizes the role. Users find roles by searching for tags. Be sure to + # remove the '[]' above, if you add tags to this list. + # + # NOTE: A tag is limited to a single word comprised of alphanumeric characters. + # Maximum 20 tags per role. + +dependencies: [] + # List your role dependencies here, one per line. Be sure to remove the '[]' above, + # if you add dependencies to this list. diff --git a/ansible/infra_setting/roles/connect-settings/tasks/00_host_setting.yml b/ansible/infra_setting/roles/connect-settings/tasks/00_host_setting.yml new file mode 100644 index 0000000..0e08eb8 --- /dev/null +++ b/ansible/infra_setting/roles/connect-settings/tasks/00_host_setting.yml @@ -0,0 +1,162 @@ +--- +- name: "host setting" + hosts: all + become: yes + vars: + iptables_rules: + - { source: "10.10.45.0/24", target: "DROP" } + - { source: "10.10.47.0/24", target: "DROP" } + - { source: "10.10.48.0/24", target: "DROP" } + - { source: "10.10.50.0/24", target: "DROP" } + - { source: "10.10.37.0/24", target: "DROP" } + delete_rule: False + add_rule: True + +- name: "Create dev2 group" + ansible.builtin.group: + name: "dev2" + state: present + when: + - add_rule == True + +- name: Ensure user dev2-iac exists + user: + name: "{{ item }}" + create_home: yes + home: "/home/{{ item }}" + group: dev2 + shell: /bin/bash + with_items: + - dev2-iac + - dev2 + when: + - add_rule == True + +- name: "Ensure .ssh directory exists for dev2-iac" + file: + path: /home/dev2-iac/.ssh + state: directory + owner: dev2-iac + group: dev2 + mode: '0700' + when: + - add_rule == True + +- name: "Add authorized key for dev2-iac" + authorized_key: + user: dev2-iac + key: "{{ item }}" + with_items: + - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDRP/Kjn7UBudTO4ZLtWXRJNDcOPGbm+5jLKax+1tVgN2n0MCmwwrbFJQJvdaE/wp4+PnMtEyt+IqdwFdUDah8tu9CIYZ2Jk2T18oU7hYGvymh+QJmZgCNvYcmM9ATJbXpns7y8VLDVbkSq9EJIB+emLt1ZV/C8cyvhlmBUwGQA6c3zMgzWl9MT0HLa7H88cNVVknZPY0vGIw+H0Y2JtDr62xyVNT7w8B+jh7Yu6nCnQchwx3IRWGATuKfi2FB3rhkDqNvM1h00JJosu5ooBn3g5xll+w+sVKIQxEWShI9zatYP9/zrce+uVYeZLfz52X8giJ9dns66vqEKdJtdp4By5RPxRSsdQ2QGAQ0UuBHKgweU2EzivLynu49oiShAiJPxmru4TiGtchl52dvw/E9rjZiCKTq697azHHLbwTiOgbHpnu7GrxNRMdXCON70RYJpfERg/SGxxmUNF9OhYUeQJGNc8DcWnlBUrT/9Wi3Ryh1rKx2wtZt6eDkrehJ1lgU=" + - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDmxGUDo5rdB/XA+cyH4a7Kn8zGWHqbL0AZDL55j5JLRLXC/z482Rp2cIx/FsQRtwEslEVXHHSowpJWHvQ4Z6NcInh0/0psJK2K8qnApLDHhPoiQzpGL+nG4JIho/10QPGpJ2aDcXdushvUME97j0A8hfaoR2xhBl2C9r865Vred0M971A5SRchwN/cmsTh2OMYGXKHD9RC6OFud2sQjyidkSTW58yBoN2B5CoAO4GMV09jX6Wp43jot19xJ5lX65NAHLsNIXMWiURmQDieIKqEiwWlPgwo7geErHlMOoNoypU9yTaN9NMYWZBG1xVL5skjmkdTEd+cnHBLAvhVtW1w5pOA7S8OUXkmiu0UITLYyWfzUx4uwzb7nGcb6aDboRVX6w8H4+GVgpYWJq+fh0ZZ9JbsdP6+PjRz1vgptM7K4Ji5ZRvqV5WMT0cvpySBaJakLSiPSa+dxGi6nfowXvUEAzMIVyaScNgCs1/NpdgN8dwffZlYB9WBUxY+5IjBQc8=" + when: + - add_rule == True + +- name: "Add authorized key for dev2" + authorized_key: + user: dev2 + key: "{{ item }}" + with_items: + - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDRP/Kjn7UBudTO4ZLtWXRJNDcOPGbm+5jLKax+1tVgN2n0MCmwwrbFJQJvdaE/wp4+PnMtEyt+IqdwFdUDah8tu9CIYZ2Jk2T18oU7hYGvymh+QJmZgCNvYcmM9ATJbXpns7y8VLDVbkSq9EJIB+emLt1ZV/C8cyvhlmBUwGQA6c3zMgzWl9MT0HLa7H88cNVVknZPY0vGIw+H0Y2JtDr62xyVNT7w8B+jh7Yu6nCnQchwx3IRWGATuKfi2FB3rhkDqNvM1h00JJosu5ooBn3g5xll+w+sVKIQxEWShI9zatYP9/zrce+uVYeZLfz52X8giJ9dns66vqEKdJtdp4By5RPxRSsdQ2QGAQ0UuBHKgweU2EzivLynu49oiShAiJPxmru4TiGtchl52dvw/E9rjZiCKTq697azHHLbwTiOgbHpnu7GrxNRMdXCON70RYJpfERg/SGxxmUNF9OhYUeQJGNc8DcWnlBUrT/9Wi3Ryh1rKx2wtZt6eDkrehJ1lgU=" + - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDmxGUDo5rdB/XA+cyH4a7Kn8zGWHqbL0AZDL55j5JLRLXC/z482Rp2cIx/FsQRtwEslEVXHHSowpJWHvQ4Z6NcInh0/0psJK2K8qnApLDHhPoiQzpGL+nG4JIho/10QPGpJ2aDcXdushvUME97j0A8hfaoR2xhBl2C9r865Vred0M971A5SRchwN/cmsTh2OMYGXKHD9RC6OFud2sQjyidkSTW58yBoN2B5CoAO4GMV09jX6Wp43jot19xJ5lX65NAHLsNIXMWiURmQDieIKqEiwWlPgwo7geErHlMOoNoypU9yTaN9NMYWZBG1xVL5skjmkdTEd+cnHBLAvhVtW1w5pOA7S8OUXkmiu0UITLYyWfzUx4uwzb7nGcb6aDboRVX6w8H4+GVgpYWJq+fh0ZZ9JbsdP6+PjRz1vgptM7K4Ji5ZRvqV5WMT0cvpySBaJakLSiPSa+dxGi6nfowXvUEAzMIVyaScNgCs1/NpdgN8dwffZlYB9WBUxY+5IjBQc8=" + when: + - add_rule == True + +- name: "sudoers_users file" + file: + path: /etc/sudoers.d/sudoers_users + state: touch + when: + - add_rule == True + +- name: "Allow user to sudo" + lineinfile: + path: /etc/sudoers.d/sudoers_users + line: "{{ item }} ALL=(ALL) NOPASSWD:ALL" + state: present + with_items: + - dev2-iac + - dev2 + when: + - add_rule == True + +# - name: Check if rule exists +# command: iptables -D INPUT 7 +# loop: "{{ range(0, 9) }}" +# ignore_errors: yes +# when: +# - delete_rule == True + +# - name: Check if rule exists +# command: iptables -C INPUT -s {{ item.source }} -j {{ item.target }} +# register: rule_check +# ignore_errors: yes +# changed_when: false +# with_items: "{{ iptables_rules }}" +# when: +# - add_rule == True + +# - name: Add rule if it doesn't exist +# command: iptables -A INPUT -s {{ item.item.source }} -j {{ item.item.target }} +# with_items: "{{ rule_check.results }}" +# when: +# - item.rc == 1 +# - add_rule == True + +- name: "selinux permissive" + command: "setenforce 0" + ignore_errors: yes + when: + - ansible_facts.os_family == "RedHat" + +- name: "firewalld stop" + systemd: + name: firewalld + state: stopped + enabled: false + ignore_errors: yes + when: + - ansible_facts.os_family == "RedHat" + +- name: Configure ssh root login to {{sshrootlogin}} + lineinfile: + dest: /etc/ssh/sshd_config + regexp: '^(#)?PermitRootLogin.*' + line: 'PermitRootLogin {{sshrootlogin}}' + insertbefore: '^Match.*' + state: present + owner: root + group: root + mode: 0640 + notify: restart sshd + +- name: Remove existing Port lines + lineinfile: + path: /etc/ssh/sshd_config + regexp: '^Port' + state: absent + +- name: SSH Listen on Main Port + lineinfile: + dest: /etc/ssh/sshd_config + insertbefore: '^#*AddressFamily' + line: 'Port {{sshmainport}}' + state: present + owner: root + group: root + mode: 0640 + notify: restart sshd + +- name: "Create sshd_config.d directory" + ansible.builtin.file: + path: "/etc/ssh/sshd_config.d/" + state: directory + recurse: yes + owner: root + group: root + +- name: "Setting sshd allow users" + template: + src: allow_users.j2 + dest: "/etc/ssh/sshd_config.d/allow_users.conf" + notify: restart sshd diff --git a/ansible/infra_setting/roles/connect-settings/tasks/01_get_password.yml b/ansible/infra_setting/roles/connect-settings/tasks/01_get_password.yml new file mode 100644 index 0000000..c848fda --- /dev/null +++ b/ansible/infra_setting/roles/connect-settings/tasks/01_get_password.yml @@ -0,0 +1,36 @@ +--- +- name: get password + command: "{{ role_path }}/files/gen_password {{ encrypt }}" + register: user_password + delegate_to: 127.0.0.1 + when: manual_password is not defined + +- name: get admin password + command: "{{ role_path }}/files/gen_password {{ encrypt }}" + register: admin_password + delegate_to: 127.0.0.1 + when: manual_password is not defined + +- name: set fact user password + block: + - set_fact: + user_password: "{{ user_password.stdout }}" + rescue: + - set_fact: + user_password: "{{ manual_password }}" + always: + - debug: + msg: "{{ username }} : {{ user_password }}" + when: debug_mode == True + +- name: set fact admin password + block: + - set_fact: + admin_password: "{{ admin_password.stdout }}" + rescue: + - set_fact: + admin_password: "{{ manual_password }}" + always: + - debug: + msg: "{{ adminuser }} : {{ admin_password }}" + when: debug_mode == True \ No newline at end of file diff --git a/ansible/infra_setting/roles/connect-settings/tasks/02_change_password.yml b/ansible/infra_setting/roles/connect-settings/tasks/02_change_password.yml new file mode 100644 index 0000000..64deba0 --- /dev/null +++ b/ansible/infra_setting/roles/connect-settings/tasks/02_change_password.yml @@ -0,0 +1,21 @@ +--- +- include_tasks: 99_decrypt_password.yml + when: + - encrypt == 1 + - manual_password is not defined + +- name: user password change + user: + name: "{{ item }}" + password: "{{ user_password | password_hash('sha512') }}" + state: present + with_items: + - "{{ username }}" + +- name: admin password change + user: + name: "{{ item }}" + password: "{{ admin_password | password_hash('sha512') }}" + state: present + with_items: + - "{{ adminuser }}" \ No newline at end of file diff --git a/ansible/infra_setting/roles/connect-settings/tasks/03_vault.yml b/ansible/infra_setting/roles/connect-settings/tasks/03_vault.yml new file mode 100644 index 0000000..1f3aa95 --- /dev/null +++ b/ansible/infra_setting/roles/connect-settings/tasks/03_vault.yml @@ -0,0 +1,21 @@ +--- +- name: Check if ansible_port is defined + set_fact: + ansible_port: "{{ ansible_port | default(22) }}" + +- debug: + msg: "{{ ansible_distribution }} {{ ansible_hostname }} {{ ansible_default_ipv4.address }} {{ ansible_port }} {{ username }} {{ user_password }} {{ adminuser }} {{ admin_password }}" + when: debug_mode == True + +- name: put vault + command: "{{ role_path }}/files/vault_put {{ ansible_default_ipv4.address }} {{ username }} {{ user_password }} {{ adminuser }} {{ admin_password }}" + delegate_to: 127.0.0.1 + +- name: get vault + command: "{{ role_path }}/files/vault_get {{ ansible_default_ipv4.address }} {{ username }} {{ user_password }} {{ adminuser }} {{ admin_password }}" + register: get_vault + delegate_to: 127.0.0.1 + +- debug: + msg: "{{get_vault.stdout_lines}}" + when: debug_mode == True diff --git a/ansible/infra_setting/roles/connect-settings/tasks/04_excel_export.yml b/ansible/infra_setting/roles/connect-settings/tasks/04_excel_export.yml new file mode 100644 index 0000000..cf70b57 --- /dev/null +++ b/ansible/infra_setting/roles/connect-settings/tasks/04_excel_export.yml @@ -0,0 +1,19 @@ +--- +- name: Redirect output to local file + delegate_to: localhost + copy: + content: "[{{ ansible_date_time.date }} {{ ansible_date_time.hour }}:{{ ansible_date_time.minute }}:{{ ansible_date_time.second }}]" + dest: "/tmp/host_list" + mode: '0666' + backup: yes + +- name: Append output to local file + delegate_to: localhost + lineinfile: + path: "/tmp/host_list" + line: "{{ ansible_distribution }} {{ ansible_hostname }} {{ ansible_default_ipv4.address }} {{ sshmainport }} {{ username }} {{ user_password }} {{ adminuser }} {{ admin_password }}" + create: yes + +- name: excel export + command: "{{ role_path }}/files/custom_excel" + delegate_to: 127.0.0.1 diff --git a/ansible/infra_setting/roles/connect-settings/tasks/99_decrypt_password.yml b/ansible/infra_setting/roles/connect-settings/tasks/99_decrypt_password.yml new file mode 100644 index 0000000..164cecc --- /dev/null +++ b/ansible/infra_setting/roles/connect-settings/tasks/99_decrypt_password.yml @@ -0,0 +1,27 @@ +--- +- name: user_password decrypt + command: "{{ role_path }}/files/decrypt_password {{ user_password }}" + register: user_password + delegate_to: 127.0.0.1 + +- name: admin_password decrypt + command: "{{ role_path }}/files/decrypt_password {{ admin_password }}" + register: admin_password + delegate_to: 127.0.0.1 + when: + - encrypt == 1 + - manual_password is not defined + +- name: admin_password re fact + set_fact: + admin_password: "{{ admin_password.stdout }}" + when: + - encrypt == 1 + - manual_password is not defined + +- name: user_password re fact + set_fact: + user_password: "{{ user_password.stdout }}" + when: + - encrypt == 1 + - manual_password is not defined diff --git a/ansible/infra_setting/roles/connect-settings/tasks/main.yml b/ansible/infra_setting/roles/connect-settings/tasks/main.yml new file mode 100644 index 0000000..82dd567 --- /dev/null +++ b/ansible/infra_setting/roles/connect-settings/tasks/main.yml @@ -0,0 +1,15 @@ +--- +- include: 00_host_setting.yml + tags: host + +- include: 01_get_password.yml + tags: password + +- include: 02_change_password.yml + tags: change + +- include: 03_vault.yml + tags: vault + +- include: 04_excel_export.yml + tags: excel \ No newline at end of file diff --git a/ansible/infra_setting/roles/connect-settings/templates/allow_users.j2 b/ansible/infra_setting/roles/connect-settings/templates/allow_users.j2 new file mode 100755 index 0000000..67c88da --- /dev/null +++ b/ansible/infra_setting/roles/connect-settings/templates/allow_users.j2 @@ -0,0 +1,22 @@ +AllowUsers dev2-iac@10.10.43.* +AllowUsers *@10.20.142.* +{% if ansible_distribution == "Ubuntu" %} +AllowUsers ubuntu@10.10.43.* +{% endif %} +{% if ansible_distribution == "CentOS" %} +AllowUsers centos@10.10.43.* +{% endif %} +{% if ansible_distribution == "RedHat" %} +AllowUsers redhat@10.10.43.* +{% endif %} + +{% if admin_users is defined %} +{% for user in admin_users %} +AllowUsers {{ user.name }}@{{ user.ip }} +{% endfor %} +{% endif %} +{% if allow_users is defined %} +{% for user in allow_users %} +AllowUsers {{ user.name }}@{{ user.ip }} +{% endfor %} +{% endif %} \ No newline at end of file diff --git a/ansible/infra_setting/roles/connect-settings/tests/inventory b/ansible/infra_setting/roles/connect-settings/tests/inventory new file mode 100644 index 0000000..878877b --- /dev/null +++ b/ansible/infra_setting/roles/connect-settings/tests/inventory @@ -0,0 +1,2 @@ +localhost + diff --git a/ansible/infra_setting/roles/connect-settings/tests/test.yml b/ansible/infra_setting/roles/connect-settings/tests/test.yml new file mode 100644 index 0000000..c604954 --- /dev/null +++ b/ansible/infra_setting/roles/connect-settings/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - password diff --git a/ansible/infra_setting/roles/connect-settings/vars/main.yml b/ansible/infra_setting/roles/connect-settings/vars/main.yml new file mode 100644 index 0000000..1392b01 --- /dev/null +++ b/ansible/infra_setting/roles/connect-settings/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for password diff --git a/ansible/teleport_setting/ansible.cfg b/ansible/teleport_setting/ansible.cfg new file mode 100755 index 0000000..0ebf722 --- /dev/null +++ b/ansible/teleport_setting/ansible.cfg @@ -0,0 +1,10 @@ +[defaults] +inventory = inventory +roles_path = roles +deprecation_warnings = False +display_skipped_hosts = no +ansible_home = . +stdout_callback = debug +host_key_checking=False +#private_key_file=/root/.ssh/dev2-iac +#remote_tmp = /tmp/.ansible/tmp diff --git a/ansible/teleport_setting/restart.yml b/ansible/teleport_setting/restart.yml new file mode 100644 index 0000000..b0e0721 --- /dev/null +++ b/ansible/teleport_setting/restart.yml @@ -0,0 +1,10 @@ +--- +- name: "restart" + hosts: all + become: yes + tasks: + - name: Restart teleport service + ansible.builtin.systemd: + name: teleport + enabled: true + state: restarted diff --git a/ansible/teleport_setting/roles/.DS_Store b/ansible/teleport_setting/roles/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..9b0c2bb765696061bcc1e2ee0d027b27a852529b GIT binary patch literal 6148 zcmeHK%}T>S5Z-O8-BN@e6a+5|UMn`mzv3m-`T|Dupi&cBG#IlbP0gVc@&fuozK74_ z%VIE*zIV*(VhRDf*+`SDBak>15pX zlPjE^D-{KE+Yc_nVb*V~pQt$X!+4nJf^aZ|lAAj;v8PUC~WP?OX5hy zcMzM`&GQL~0b+m{SQ-ZG0VwKAvt!yQF+dFblmR>+Bq*Y5F*m4>4jAzF5#tp^6tM9w zfhaAy7IT9T0pX?;(3Em}#o(qK{L<#R7ITB9oN>J}Jjbrg?G1(N)xj@yI^(WE8i@g7 zV3C2cnKrQhpMHP;Uo4^-F+dFbD+YL_=k>aS5Z>*NO({YT3WApfuN9l(FJ3~dFJMFuDm5WRLu0lysX3HFUO->S_wad~ z+1)^k#iNLwf!%L*es(h-WPccA+?|FtV{OKm1r3p-vO>_j(p53Rh+NH)26;LTVi~NM z=5Lzt+v_Z2K1vlhRrCO_RG^~czvL3u=nR}U^O;gvO+~Me2$~Y)> zKe&m8`JlaZA(PCHlHo)pMBxxZ?r)$y!aFDnMZJ2HB$mk&Sj&tul8_i6 z28aP-U=0~Chk?~v!`i82Vt^Ry()(U8k&`>b0Km`Q!rAq(|a39%GPVE<{ aL!4u<(1^33U8MukML-ck9Wn3=4159o+)Jte literal 0 HcmV?d00001 diff --git a/ansible/teleport_setting/roles/teleport/README.md b/ansible/teleport_setting/roles/teleport/README.md new file mode 100644 index 0000000..225dd44 --- /dev/null +++ b/ansible/teleport_setting/roles/teleport/README.md @@ -0,0 +1,38 @@ +Role Name +========= + +A brief description of the role goes here. + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +Role Variables +-------------- + +A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. + +Dependencies +------------ + +A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + + - hosts: servers + roles: + - { role: username.rolename, x: 42 } + +License +------- + +BSD + +Author Information +------------------ + +An optional section for the role authors to include contact information, or a website (HTML is not allowed). diff --git a/ansible/teleport_setting/roles/teleport/defaults/main.yml b/ansible/teleport_setting/roles/teleport/defaults/main.yml new file mode 100644 index 0000000..79506a1 --- /dev/null +++ b/ansible/teleport_setting/roles/teleport/defaults/main.yml @@ -0,0 +1,9 @@ +--- +# defaults file for teleport +teleport_uri: teleport.kr.datasaker.io +teleport_version: 13.3.8 +remove: False +update: False +install: False +custom_labels: [] + diff --git a/ansible/teleport_setting/roles/teleport/handlers/main.yml b/ansible/teleport_setting/roles/teleport/handlers/main.yml new file mode 100644 index 0000000..4b32df4 --- /dev/null +++ b/ansible/teleport_setting/roles/teleport/handlers/main.yml @@ -0,0 +1,10 @@ +--- +- name: Reload systemd configuration + ansible.builtin.systemd: + daemon_reload: True + +- name: Restart teleport service + ansible.builtin.systemd: + name: teleport + enabled: true + state: restarted \ No newline at end of file diff --git a/ansible/teleport_setting/roles/teleport/meta/main.yml b/ansible/teleport_setting/roles/teleport/meta/main.yml new file mode 100644 index 0000000..c572acc --- /dev/null +++ b/ansible/teleport_setting/roles/teleport/meta/main.yml @@ -0,0 +1,52 @@ +galaxy_info: + author: your name + description: your role description + company: your company (optional) + + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: http://example.com/issue/tracker + + # Choose a valid license ID from https://spdx.org - some suggested licenses: + # - BSD-3-Clause (default) + # - MIT + # - GPL-2.0-or-later + # - GPL-3.0-only + # - Apache-2.0 + # - CC-BY-4.0 + license: license (GPL-2.0-or-later, MIT, etc) + + min_ansible_version: 2.1 + + # If this a Container Enabled role, provide the minimum Ansible Container version. + # min_ansible_container_version: + + # + # Provide a list of supported platforms, and for each platform a list of versions. + # If you don't wish to enumerate all versions for a particular platform, use 'all'. + # To view available platforms and versions (or releases), visit: + # https://galaxy.ansible.com/api/v1/platforms/ + # + # platforms: + # - name: Fedora + # versions: + # - all + # - 25 + # - name: SomePlatform + # versions: + # - all + # - 1.0 + # - 7 + # - 99.99 + + galaxy_tags: [] + # List tags for your role here, one per line. A tag is a keyword that describes + # and categorizes the role. Users find roles by searching for tags. Be sure to + # remove the '[]' above, if you add tags to this list. + # + # NOTE: A tag is limited to a single word comprised of alphanumeric characters. + # Maximum 20 tags per role. + +dependencies: [] + # List your role dependencies here, one per line. Be sure to remove the '[]' above, + # if you add dependencies to this list. diff --git a/ansible/teleport_setting/roles/teleport/tasks/main.yml b/ansible/teleport_setting/roles/teleport/tasks/main.yml new file mode 100644 index 0000000..8fd44f0 --- /dev/null +++ b/ansible/teleport_setting/roles/teleport/tasks/main.yml @@ -0,0 +1,33 @@ +- name: "Create temporary directory for key manipulation" + tempfile: + state: directory + suffix: keys + register: tempdir + when: + - install == True or update == True + - remove == False + +- name: "Include Teleport Agent Install" + include_tasks: teleport_install.yml + tags: install + when: + - install == True + +- name: "Include Teleport Agent update" + include_tasks: teleport_update.yml + tags: remove + when: + - update == True + +- name: "Remove temporary directory for key manipulation" + file: + path: "{{ tempdir.path }}" + state: absent + when: + - install == True or update == True + +- name: "Include Teleport Agent remove" + include_tasks: teleport_remove.yml + tags: remove + when: + - remove == True diff --git a/ansible/teleport_setting/roles/teleport/tasks/teleport_install.yml b/ansible/teleport_setting/roles/teleport/tasks/teleport_install.yml new file mode 100644 index 0000000..0d849ba --- /dev/null +++ b/ansible/teleport_setting/roles/teleport/tasks/teleport_install.yml @@ -0,0 +1,25 @@ +--- +- name: "Run tctl nodes add and capture the output" + command: tctl nodes add + register: tctl_output + changed_when: false + delegate_to: 127.0.0.1 + +- name: "Extract token and ca_pin" + set_fact: + get_join_token: "{{ (tctl_output.stdout | regex_search('--token=(\\S+)', '\\1'))[0] }}" + get_ca_pin: "{{ (tctl_output.stdout | regex_search('--ca-pin=(\\S+)', '\\1'))[0] }}" + +- name: "Debug extracted values" + debug: + msg: + - "join_token: {{ get_join_token }}" + - "ca_pin: {{ get_ca_pin }}" + +- name: "Create Teleport install script" + template: + src: install-node.sh.j2 + dest: "{{ tempdir.path }}/install-node.sh" + +- name: "Run Teleport Install Script" + command: "bash {{ tempdir.path }}/install-node.sh" diff --git a/ansible/teleport_setting/roles/teleport/tasks/teleport_remove.yml b/ansible/teleport_setting/roles/teleport/tasks/teleport_remove.yml new file mode 100644 index 0000000..504e042 --- /dev/null +++ b/ansible/teleport_setting/roles/teleport/tasks/teleport_remove.yml @@ -0,0 +1,27 @@ +--- +- name: "Remove Teleport on RedHat-based systems" + yum: + name: teleport + state: absent + when: ansible_os_family == "RedHat" + +- name: "Remove Teleport on Debian-based systems" + apt: + name: teleport + state: absent + when: ansible_os_family == "Debian" + +- name: "Remove Teleport directories and files" + file: + path: "{{ item }}" + state: absent + with_items: + - /var/lib/teleport + - /etc/teleport.yaml + - /usr/local/bin/teleport + - /usr/local/bin/tctl + - /usr/local/bin/tsh + +- name: "Kill Teleport processes" + command: pkill -9 teleport + ignore_errors: yes diff --git a/ansible/teleport_setting/roles/teleport/tasks/teleport_update.yml b/ansible/teleport_setting/roles/teleport/tasks/teleport_update.yml new file mode 100644 index 0000000..5efe98f --- /dev/null +++ b/ansible/teleport_setting/roles/teleport/tasks/teleport_update.yml @@ -0,0 +1,47 @@ +--- + +- name: "Run token the output" + shell: "cat /etc/teleport.yaml | grep 'token_name:' | awk '{print $2}'" + register: token_output + changed_when: false + ignore_errors: true + +- name: "Run ca_pin the output" + shell: "cat /etc/teleport.yaml | grep 'ca_pin:' | awk '{print $2}'" + register: ca_output + changed_when: false + ignore_errors: true + +- name: "Extract token and ca_pin" + set_fact: + get_join_token: "{{ token_output.stdout }}" + get_ca_pin: "{{ ca_output.stdout }}" + +- name: "Debug extracted values" + debug: + msg: + - "join_token: {{ get_join_token }}" + - "ca_pin: {{ get_ca_pin }}" + +- name: "Update Teleport yaml" + template: + src: teleport.yaml.j2 + dest: "/etc/teleport.yaml" + +- name: "Update Teleport on RedHat-based systems" + yum: + name: teleport + state: latest + when: ansible_os_family == "RedHat" + notify: + - Reload systemd configuration + - Restart teleport service + +- name: "Update Teleport on Debian-based systems" + apt: + name: teleport + state: latest + when: ansible_os_family == "Debian" + notify: + - Reload systemd configuration + - Restart teleport service \ No newline at end of file diff --git a/ansible/teleport_setting/roles/teleport/templates/install-node.sh.j2 b/ansible/teleport_setting/roles/teleport/templates/install-node.sh.j2 new file mode 100644 index 0000000..1972e5b --- /dev/null +++ b/ansible/teleport_setting/roles/teleport/templates/install-node.sh.j2 @@ -0,0 +1,999 @@ +#!/bin/bash +set -euo pipefail +SCRIPT_NAME="teleport-installer" + +# default values +ALIVE_CHECK_DELAY=3 +CONNECTIVITY_TEST_METHOD="" +COPY_COMMAND="cp" +DISTRO_TYPE="" +IGNORE_CONNECTIVITY_CHECK="${TELEPORT_IGNORE_CONNECTIVITY_CHECK:-false}" +LAUNCHD_CONFIG_PATH="/Library/LaunchDaemons" +LOG_FILENAME="$(mktemp -t ${SCRIPT_NAME}.log.XXXXXXXXXX)" +MACOS_STDERR_LOG="/var/log/teleport-stderr.log" +MACOS_STDOUT_LOG="/var/log/teleport-stdout.log" +SYSTEMD_UNIT_PATH="/lib/systemd/system/teleport.service" +TARGET_PORT_DEFAULT=443 +TELEPORT_ARCHIVE_PATH='teleport' +TELEPORT_BINARY_DIR="/usr/local/bin" +TELEPORT_BINARY_LIST="teleport tctl tsh" +TELEPORT_CONFIG_PATH="/etc/teleport.yaml" +TELEPORT_DATA_DIR="/var/lib/teleport" +TELEPORT_DOCS_URL="https://goteleport.com/docs/" +TELEPORT_FORMAT="" + +# initialise variables (because set -u disallows unbound variables) +f="" +l="" +DISABLE_TLS_VERIFICATION=false +NODENAME=$(hostname) +IGNORE_CHECKS=false +OVERRIDE_FORMAT="" +QUIET=false +APP_INSTALL_DECISION="" +INTERACTIVE=false + +# the default value of each variable is a templatable Go value so that it can +# optionally be replaced by the server before the script is served up +TELEPORT_VERSION='{{ teleport_version }}' +TELEPORT_PACKAGE_NAME='teleport' +REPO_CHANNEL='' +TARGET_HOSTNAME='{{ teleport_uri }}' +TARGET_PORT='443' +JOIN_TOKEN='{{ get_join_token }}' +JOIN_METHOD='' +JOIN_METHOD_FLAG="" +[ -n "$JOIN_METHOD" ] && JOIN_METHOD_FLAG="--join-method ${JOIN_METHOD}" + +# inject labels into the configuration +# LABELS='teleport.internal/resource-id=0ec993a8-b1ec-4fa6-8fc5-4e73e3e5306e','env=localhost' +LABELS='ipaddr={{ansible_default_ipv4.address}},group={{ group_names[-1] }},os={{ ansible_distribution }}{% if custom_labels %},{{ custom_labels }}{% endif %}' +LABELS_FLAG=() +[ -n "$LABELS" ] && LABELS_FLAG=(--labels "${LABELS}") + +LABELS_FLAG=() +[ -n "$LABELS" ] && LABELS_FLAG=(--labels "${LABELS}") + +# When all stanza generators have been updated to use the new +# `teleport configure` commands CA_PIN_HASHES can be removed along +# with the script passing it in in `join_tokens.go`. +CA_PIN_HASHES='{{ get_ca_pin }}' +CA_PINS='{{ get_ca_pin }}' +ARG_CA_PIN_HASHES="" +APP_INSTALL_MODE='false' +APP_NAME='' +APP_URI='' +DB_INSTALL_MODE='false' + +# usage message +# shellcheck disable=SC2086 +usage() { echo "Usage: $(basename $0) [-v teleport_version] [-h target_hostname] [-p target_port] [-j join_token] [-c ca_pin_hash]... [-q] [-l log_filename] [-a app_name] [-u app_uri] " 1>&2; exit 1; } +while getopts ":v:h:p:j:c:f:ql:ika:u:" o; do + case "${o}" in + v) TELEPORT_VERSION=${OPTARG};; + h) TARGET_HOSTNAME=${OPTARG};; + p) TARGET_PORT=${OPTARG};; + j) JOIN_TOKEN=${OPTARG};; + c) ARG_CA_PIN_HASHES="${ARG_CA_PIN_HASHES} ${OPTARG}";; + f) f=${OPTARG}; if [[ ${f} != "tarball" && ${f} != "deb" && ${f} != "rpm" ]]; then usage; fi;; + q) QUIET=true;; + l) l=${OPTARG};; + i) IGNORE_CHECKS=true; COPY_COMMAND="cp -f";; + k) DISABLE_TLS_VERIFICATION=true;; + a) APP_INSTALL_MODE=true && APP_NAME=${OPTARG};; + u) APP_INSTALL_MODE=true && APP_URI=${OPTARG};; + *) usage;; + esac +done +shift $((OPTIND-1)) + +if [[ "${ARG_CA_PIN_HASHES}" != "" ]]; then + CA_PIN_HASHES="${ARG_CA_PIN_HASHES}" +fi + +# function to construct a go template variable +# go's template parser is a bit finicky, so we dynamically build the value one character at a time +construct_go_template() { + OUTPUT="{" + OUTPUT+="{" + OUTPUT+="." + OUTPUT+="${1}" + OUTPUT+="}" + OUTPUT+="}" + echo "${OUTPUT}" +} + +# check whether we are root, exit if not +assert_running_as_root() { + if ! [ "$(id -u)" = 0 ]; then + echo "This script must be run as root." 1>&2 + exit 1 + fi +} + +# function to check whether variables are either blank or set to the default go template value +# (because they haven't been set by the go script generator or a command line argument) +# returns 1 if the variable is set to a default/zero value +# returns 0 otherwise (i.e. it needs to be set interactively) +check_variable() { + VARIABLE_VALUE="${!1}" + GO_TEMPLATE_NAME=$(construct_go_template "${2}") + if [[ "${VARIABLE_VALUE}" == "" ]] || [[ "${VARIABLE_VALUE}" == "${GO_TEMPLATE_NAME}" ]]; then + return 1 + fi + return 0 +} + +# function to check whether a provided value is "truthy" i.e. it looks like you're trying to say "yes" +is_truthy() { + declare -a TRUTHY_VALUES + TRUTHY_VALUES=("y" "Y" "yes" "YES" "ye" "YE" "yep" "YEP" "ya" "YA") + CHECK_VALUE="$1" + for ARRAY_VALUE in "${TRUTHY_VALUES[@]}"; do [[ "${CHECK_VALUE}" == "${ARRAY_VALUE}" ]] && return 0; done + return 1 +} + +# function to read input until the value you get is non-empty +read_nonblank_input() { + INPUT="" + VARIABLE_TO_ASSIGN="$1" + shift + PROMPT="$*" + until [[ "${INPUT}" != "" ]]; do + echo -n "${PROMPT}" + read -r INPUT + done + printf -v "${VARIABLE_TO_ASSIGN}" '%s' "${INPUT}" +} + +# error if we're not root +assert_running_as_root + +# set/read values interactively if not provided +# users will be prompted to enter their own value if all the following are true: +# - the current value is blank, or equal to the default Go template value +# - the value has not been provided by command line argument +! check_variable TELEPORT_VERSION version && INTERACTIVE=true && read_nonblank_input TELEPORT_VERSION "Enter Teleport version to install (without v): " +! check_variable TARGET_HOSTNAME hostname && INTERACTIVE=true && read_nonblank_input TARGET_HOSTNAME "Enter target hostname to connect to: " +! check_variable TARGET_PORT port && INTERACTIVE=true && { echo -n "Enter target port to connect to [${TARGET_PORT_DEFAULT}]: "; read -r TARGET_PORT; } +! check_variable JOIN_TOKEN token && INTERACTIVE=true && read_nonblank_input JOIN_TOKEN "Enter Teleport join token as provided: " +! check_variable CA_PIN_HASHES caPins && INTERACTIVE=true && read_nonblank_input CA_PIN_HASHES "Enter CA pin hash (separate multiple hashes with spaces): " +[ -n "${f}" ] && OVERRIDE_FORMAT=${f} +[ -n "${l}" ] && LOG_FILENAME=${l} +# if app service mode is not set (or is the default value) and we are running interactively (i.e. the user has provided some input already), +# prompt the user to choose whether to enable app_service +if [[ "${INTERACTIVE}" == "true" ]]; then + if ! check_variable APP_INSTALL_MODE appInstallMode; then + APP_INSTALL_MODE="false" + echo -n "Would you like to enable and configure Teleport's app_service, to use Teleport as a reverse proxy for a web application? [y/n, default: n] " + read -r APP_INSTALL_DECISION + if is_truthy "${APP_INSTALL_DECISION}"; then + APP_INSTALL_MODE="true" + fi + fi +fi +# prompt for extra needed values if we're running in app service mode +if [[ "${APP_INSTALL_MODE}" == "true" ]]; then + ! check_variable APP_NAME appName && read_nonblank_input APP_NAME "Enter app name to install (must be DNS-compatible; less than 63 characters, no spaces, only - or _ as punctuation): " + ! check_variable APP_URI appURI && read_nonblank_input APP_URI "Enter app URI (the host running the Teleport app service must be able to connect to this): " + # generate app public addr by concatenating values + APP_PUBLIC_ADDR="${APP_NAME}.${TARGET_HOSTNAME}" +fi + +# set default target port if value not provided +if [[ "${TARGET_PORT}" == "" ]]; then + TARGET_PORT=${TARGET_PORT_DEFAULT} +fi + +# clear log file if provided +if [[ "${LOG_FILENAME}" != "" ]]; then + if [ -f "${LOG_FILENAME}" ]; then + echo -n "" > "${LOG_FILENAME}" + fi +fi + +# log functions +log_date() { echo -n "$(date '+%Y-%m-%d %H:%M:%S %Z')"; } +log() { + LOG_LINE="$(log_date) [${SCRIPT_NAME}] $*" + if [[ ${QUIET} != "true" ]]; then + echo "${LOG_LINE}" + fi + if [[ "${LOG_FILENAME}" != "" ]]; then + echo "${LOG_LINE}" >> "${LOG_FILENAME}" + fi +} +# writes a line with no timestamp or starting data, always prints +log_only() { + LOG_LINE="$*" + echo "${LOG_LINE}" + if [[ "${LOG_FILENAME}" != "" ]]; then + echo "${LOG_LINE}" >> "${LOG_FILENAME}" + fi +} +# writes a line by itself as a header +log_header() { + LOG_LINE="$*" + echo "" + echo "${LOG_LINE}" + echo "" + if [[ "${LOG_FILENAME}" != "" ]]; then + echo "${LOG_LINE}" >> "${LOG_FILENAME}" + fi +} +# important log lines, print even when -q (quiet) is passed +log_important() { + LOG_LINE="$(log_date) [${SCRIPT_NAME}] ---> $*" + echo "${LOG_LINE}" + if [[ "${LOG_FILENAME}" != "" ]]; then + echo "${LOG_LINE}" >> "${LOG_FILENAME}" + fi +} +log_cleanup_message() { + log_only "This script does not overwrite any existing settings or Teleport installations." + log_only "Please clean up by running any of the following steps as necessary:" + log_only "- stop any running Teleport processes" + log_only " - pkill -f teleport" + log_only "- remove any data under ${TELEPORT_DATA_DIR}, along with the directory itself" + log_only " - rm -rf ${TELEPORT_DATA_DIR}" + log_only "- remove any configuration at ${TELEPORT_CONFIG_PATH}" + log_only " - rm -f ${TELEPORT_CONFIG_PATH}" + log_only "- remove any Teleport binaries (${TELEPORT_BINARY_LIST}) installed under ${TELEPORT_BINARY_DIR}" + for BINARY in ${TELEPORT_BINARY_LIST}; do EXAMPLE_DELETE_COMMAND+="${TELEPORT_BINARY_DIR}/${BINARY} "; done + log_only " - rm -f ${EXAMPLE_DELETE_COMMAND}" + log_only "Run this installer again when done." + log_only +} + +# other functions +# check whether a named program exists +check_exists() { NAME=$1; if type "${NAME}" >/dev/null 2>&1; then return 0; else return 1; fi; } +# checks for the existence of a list of named binaries and exits with error if any of them don't exist +check_exists_fatal() { + for TOOL in "$@"; do + if ! check_exists "${TOOL}"; then + log_important "Error: cannot find ${TOOL} - it needs to be installed" + exit 1 + fi + done +} +# check connectivity to the given host/port and make a request to see if Teleport is listening +# uses the global variable CONNECTIVITY_TEST_METHOD to return the name of the checker, as return +# values aren't really a thing that exists in bash +check_connectivity() { + HOST=$1 + PORT=$2 + # check with nc + if check_exists nc; then + CONNECTIVITY_TEST_METHOD="nc" + if nc -z -w3 "${HOST}" "${PORT}" >/dev/null 2>&1; then return 0; else return 1; fi + # if there's no nc, check with telnet + elif check_exists telnet; then + CONNECTIVITY_TEST_METHOD="telnet" + if echo -e '\x1dclose\x0d' | telnet "${HOST}" "${PORT}" >/dev/null 2>&1; then return 0; else return 1; fi + # if there's no nc or telnet, try and use /dev/tcp + elif [ -f /dev/tcp ]; then + CONNECTIVITY_TEST_METHOD="/dev/tcp" + if (head -1 < "/dev/tcp/${HOST}/${PORT}") >/dev/null 2>&1; then return 0; else return 1; fi + else + return 255 + fi +} +# check whether a teleport DEB is already installed and exit with error if so +check_deb_not_already_installed() { + check_exists_fatal dpkg awk + DEB_INSTALLED=$(dpkg -l | awk '{print $2}' | grep -E ^teleport || true) + if [[ ${DEB_INSTALLED} != "" ]]; then + log_important "It looks like there is already a Teleport DEB package installed (name: ${DEB_INSTALLED})." + log_important "You will need to remove that package before using this script." + exit 1 + fi +} +# check whether a teleport RPM is already installed and exit with error if so +check_rpm_not_already_installed() { + check_exists_fatal rpm + RPM_INSTALLED=$(rpm -qa | grep -E ^teleport || true) + if [[ ${RPM_INSTALLED} != "" ]]; then + log_important "It looks like there is already a Teleport RPM package installed (name: ${RPM_INSTALLED})." + log_important "You will need to remove that package before using this script." + exit 1 + fi +} +# function to check if given variable is set +check_set() { + CHECK_KEY=${1} || true + CHECK_VALUE=${!1} || true + if [[ "${CHECK_VALUE}" == "" ]]; then + log "Required variable ${CHECK_KEY} is not set" + exit 1 + else + log "${CHECK_KEY}: ${CHECK_VALUE}" + fi +} +# checks that teleport binary can be found in path and runs 'teleport version' +check_teleport_binary() { + FOUND_TELEPORT_VERSION=$(${TELEPORT_BINARY_DIR}/teleport version) + if [[ "${FOUND_TELEPORT_VERSION}" == "" ]]; then + log "Cannot find Teleport binary" + return 1 + else + log "Found: ${FOUND_TELEPORT_VERSION}"; + return 0 + fi +} +# wrapper to download with curl +download() { + URL=$1 + OUTPUT_PATH=$2 + CURL_COMMAND="curl -fsSL --retry 5 --retry-delay 5" + # optionally allow disabling of TLS verification (can be useful on older distros + # which often have an out-of-date set of CA certificate bundle which won't validate) + if [[ ${DISABLE_TLS_VERIFICATION} == "true" ]]; then + CURL_COMMAND+=" -k" + fi + log "Running ${CURL_COMMAND} ${URL}" + log "Downloading to ${OUTPUT_PATH}" + # handle errors with curl + if ! ${CURL_COMMAND} -o "${OUTPUT_PATH}" "${URL}"; then + log_important "curl error downloading ${URL}" + log "On an older OS, this may be related to the CA certificate bundle being too old." + log "You can pass the hidden -k flag to this script to disable TLS verification - this is not recommended!" + exit 1 + fi + # check that the file has a non-zero size as an extra validation + check_exists_fatal wc xargs + FILE_SIZE="$(wc -c <"${OUTPUT_PATH}" | xargs)" + if [ "${FILE_SIZE}" -eq 0 ]; then + log_important "The downloaded file has a size of 0 bytes, which means an error occurred. Cannot continue." + exit 1 + else + log "Downloaded file size: ${FILE_SIZE} bytes" + fi + # if we have a hashing utility installed, also download and validate the checksum + SHA_COMMAND="" + # shasum is installed by default on MacOS and some distros + if check_exists shasum; then + SHA_COMMAND="shasum -a 256" + # sha256sum is installed by default in some other distros + elif check_exists sha256sum; then + SHA_COMMAND="sha256sum" + fi + if [[ "${SHA_COMMAND}" != "" ]]; then + log "Will use ${SHA_COMMAND} to validate the checksum of the downloaded file" + SHA_URL="${URL}.sha256" + SHA_PATH="${OUTPUT_PATH}.sha256" + ${CURL_COMMAND} -o "${SHA_PATH}" "${SHA_URL}" + if ${SHA_COMMAND} --status -c "${SHA_PATH}"; then + log "The downloaded file's checksum validated correctly" + else + SHA_EXPECTED=$(cat "${SHA_PATH}") + SHA_ACTUAL=$(${SHA_COMMAND} "${OUTPUT_PATH}") + if check_exists awk; then + SHA_EXPECTED=$(echo "${SHA_EXPECTED}" | awk '{print $1}') + SHA_ACTUAL=$(echo "${SHA_ACTUAL}" | awk '{print $1}') + fi + log_important "Checksum of the downloaded file did not validate correctly" + log_important "Expected: ${SHA_EXPECTED}" + log_important "Got: ${SHA_ACTUAL}" + log_important "Try rerunning this script from the start. If the issue persists, contact Teleport support." + exit 1 + fi + else + log "shasum/sha256sum utilities not found, will skip checksum validation" + fi +} +# gets the filename from a full path (https://target.site/path/to/file.tar.gz -> file.tar.gz) +get_download_filename() { echo "${1##*/}"; } +# gets the pid of any running teleport process (and converts newlines to spaces) +get_teleport_pid() { + check_exists_fatal pgrep xargs + pgrep teleport | xargs echo +} +# returns a command which will start teleport using the config +get_teleport_start_command() { + echo "${TELEPORT_BINARY_DIR}/teleport start --config=${TELEPORT_CONFIG_PATH}" +} +# installs the teleport-provided launchd config +install_launchd_config() { + log "Installing Teleport launchd config to ${LAUNCHD_CONFIG_PATH}" + ${COPY_COMMAND} ./${TELEPORT_ARCHIVE_PATH}/examples/launchd/com.goteleport.teleport.plist ${LAUNCHD_CONFIG_PATH}/com.goteleport.teleport.plist +} +# installs the teleport-provided systemd unit +install_systemd_unit() { + log "Installing Teleport systemd unit to ${SYSTEMD_UNIT_PATH}" + ${COPY_COMMAND} ./${TELEPORT_ARCHIVE_PATH}/examples/systemd/teleport.service ${SYSTEMD_UNIT_PATH} + log "Reloading unit files (systemctl daemon-reload)" + systemctl daemon-reload +} +# formats the arguments as a yaml list +get_yaml_list() { + name="${1}" + list="${2}" + indentation="${3}" + echo "${indentation}${name}:" + for item in ${list}; do + echo "${indentation}- ${item}" + done +} + +# installs the provided teleport config (for app service) +install_teleport_app_config() { + log "Writing Teleport app service config to ${TELEPORT_CONFIG_PATH}" + CA_PINS_CONFIG=$(get_yaml_list "ca_pin" "${CA_PIN_HASHES}" " ") + cat << EOF > ${TELEPORT_CONFIG_PATH} +version: v3 +teleport: + nodename: ${NODENAME} + auth_token: ${JOIN_TOKEN} +${CA_PINS_CONFIG} + proxy_server: ${TARGET_HOSTNAME}:${TARGET_PORT} + log: + output: stderr + severity: INFO +auth_service: + enabled: no +ssh_service: + enabled: no +proxy_service: + enabled: no +app_service: + enabled: yes + apps: + - name: "${APP_NAME}" + uri: "${APP_URI}" + public_addr: ${APP_PUBLIC_ADDR} +EOF +} +# installs the provided teleport config (for database service) +install_teleport_database_config() { + log "Writing Teleport database service config to ${TELEPORT_CONFIG_PATH}" + CA_PINS_CONFIG=$(get_yaml_list "ca_pin" "${CA_PIN_HASHES}" " ") + + # This file is processed by `shellschek` as part of the lint step + # It detects an issue because of un-set variables - $index and $line. This check is called SC2154. + # However, that's not an issue, because those variables are replaced when we run go's text/template engine over it. + # When executing the script, those are no long variables but actual values. + # shellcheck disable=SC2154 + cat << EOF > ${TELEPORT_CONFIG_PATH} +version: v3 +teleport: + nodename: ${NODENAME} + auth_token: ${JOIN_TOKEN} +${CA_PINS_CONFIG} + proxy_server: ${TARGET_HOSTNAME}:${TARGET_PORT} + log: + output: stderr + severity: INFO +auth_service: + enabled: no +ssh_service: + enabled: no +proxy_service: + enabled: no +db_service: + enabled: "yes" + resources: + - labels: +EOF +} +# installs the provided teleport config (for node service) +install_teleport_node_config() { + log "Writing Teleport node service config to ${TELEPORT_CONFIG_PATH}" + ${TELEPORT_BINARY_DIR}/teleport node configure \ + --token ${JOIN_TOKEN} \ + ${JOIN_METHOD_FLAG} \ + --ca-pin ${CA_PINS} \ + --proxy ${TARGET_HOSTNAME}:${TARGET_PORT} \ + "${LABELS_FLAG[@]}" \ + --output ${TELEPORT_CONFIG_PATH} +} +# checks whether the given host is running MacOS +is_macos_host() { if [[ ${OSTYPE} == "darwin"* ]]; then return 0; else return 1; fi } +# checks whether teleport is already running on the host +is_running_teleport() { + check_exists_fatal pgrep + TELEPORT_PID=$(get_teleport_pid) + if [[ "${TELEPORT_PID}" != "" ]]; then return 0; else return 1; fi +} +# checks whether the given host is running systemd as its init system +is_using_systemd() { if [ -d /run/systemd/system ]; then return 0; else return 1; fi } +# prints a warning if the host isn't running systemd +no_systemd_warning() { + log_important "This host is not running systemd, so Teleport cannot be started automatically when it exits." + log_important "Please investigate an alternative way to keep Teleport running." + log_important "You can find information in our documentation: ${TELEPORT_DOCS_URL}" + log_important "For now, Teleport will be started in the foreground - you can press Ctrl+C to exit." + log_only + log_only "Run this command to start Teleport in future:" + log_only "$(get_teleport_start_command)" + log_only + log_only "------------------------------------------------------------------------" + log_only "| IMPORTANT: TELEPORT WILL STOP RUNNING AFTER YOU CLOSE THIS TERMINAL! |" + log_only "| YOU MUST CONFIGURE A SERVICE MANAGER TO MAKE IT RUN ON STARTUP! |" + log_only "------------------------------------------------------------------------" + log_only +} +# print a message giving the name of the node and a link to the docs +# gives some debugging instructions if the service didn't start successfully +print_welcome_message() { + log_only "" + if is_running_teleport; then + log_only "Teleport has been started." + log_only "" + if is_using_systemd; then + log_only "View its status with 'sudo systemctl status teleport.service'" + log_only "View Teleport logs using 'sudo journalctl -u teleport.service'" + log_only "To stop Teleport, run 'sudo systemctl stop teleport.service'" + log_only "To start Teleport again if you stop it, run 'sudo systemctl start teleport.service'" + elif is_macos_host; then + log_only "View Teleport logs in '${MACOS_STDERR_LOG}' and '${MACOS_STDOUT_LOG}'" + log_only "To stop Teleport, run 'sudo launchctl unload ${LAUNCHD_CONFIG_PATH}/com.goteleport.teleport.plist'" + log_only "To start Teleport again if you stop it, run 'sudo launchctl load ${LAUNCHD_CONFIG_PATH}/com.goteleport.teleport.plist'" + fi + log_only "" + log_only "You can see this node connected in the Teleport web UI or 'tsh ls' with the name '${NODENAME}'" + log_only "Find more details on how to use Teleport here: https://goteleport.com/docs/user-manual/" + else + log_important "The Teleport service was installed, but it does not appear to have started successfully." + if is_using_systemd; then + log_important "Check the Teleport service's status with 'systemctl status teleport.service'" + log_important "View Teleport logs with 'journalctl -u teleport.service'" + elif is_macos_host; then + log_important "Check Teleport logs in '${MACOS_STDERR_LOG}' and '${MACOS_STDOUT_LOG}'" + fi + log_important "Contact Teleport support for further assistance." + fi + log_only "" +} +# start teleport in foreground (when there's no systemd) +start_teleport_foreground() { + log "Starting Teleport in the foreground" + # shellcheck disable=SC2091 + $(get_teleport_start_command) +} +# start teleport via launchd (after installing config) +start_teleport_launchd() { + log "Starting Teleport via launchctl. It will automatically be started whenever the system reboots." + launchctl load ${LAUNCHD_CONFIG_PATH}/com.goteleport.teleport.plist + sleep ${ALIVE_CHECK_DELAY} +} +# start teleport via systemd (after installing unit) +start_teleport_systemd() { + log "Starting Teleport via systemd. It will automatically be started whenever the system reboots." + systemctl enable teleport.service + systemctl start teleport.service + sleep ${ALIVE_CHECK_DELAY} +} +# checks whether teleport binaries exist on the host +teleport_binaries_exist() { + for BINARY_NAME in teleport tctl tsh; do + if [ -f ${TELEPORT_BINARY_DIR}/${BINARY_NAME} ]; then return 0; else return 1; fi + done +} +# checks whether a teleport config exists on the host +teleport_config_exists() { if [ -f ${TELEPORT_CONFIG_PATH} ]; then return 0; else return 1; fi; } +# checks whether a teleport data dir exists on the host +teleport_datadir_exists() { if [ -d ${TELEPORT_DATA_DIR} ]; then return 0; else return 1; fi; } + +# error out if any required values are not set +check_set TELEPORT_VERSION +check_set TARGET_HOSTNAME +check_set TARGET_PORT +check_set JOIN_TOKEN +check_set CA_PIN_HASHES +if [[ "${APP_INSTALL_MODE}" == "true" ]]; then + check_set APP_NAME + check_set APP_URI + check_set APP_PUBLIC_ADDR +fi + +### +# main script starts here +### +# check connectivity to teleport server/port +if [[ "${IGNORE_CONNECTIVITY_CHECK}" == "true" ]]; then + log "TELEPORT_IGNORE_CONNECTIVITY_CHECK=true, not running connectivity check" +else + log "Checking TCP connectivity to Teleport server (${TARGET_HOSTNAME}:${TARGET_PORT})" + if ! check_connectivity "${TARGET_HOSTNAME}" "${TARGET_PORT}"; then + # if we don't have a connectivity test method assigned, we know we couldn't run the test + if [[ ${CONNECTIVITY_TEST_METHOD} == "" ]]; then + log "Couldn't find nc, telnet or /dev/tcp to do a connection test" + log "Going to blindly continue without testing connectivity" + else + log_important "Couldn't open a connection to the Teleport server (${TARGET_HOSTNAME}:${TARGET_PORT}) via ${CONNECTIVITY_TEST_METHOD}" + log_important "This issue will need to be fixed before the script can continue." + log_important "If you think this is an error, add 'export TELEPORT_IGNORE_CONNECTIVITY_CHECK=true && ' before the curl command which runs the script." + exit 1 + fi + else + log "Connectivity to Teleport server (via ${CONNECTIVITY_TEST_METHOD}) looks good" + fi +fi + +# use OSTYPE variable to figure out host type/arch +if [[ "${OSTYPE}" == "linux-gnu"* ]]; then + # linux host, now detect arch + TELEPORT_BINARY_TYPE="linux" + ARCH=$(uname -m) + log "Detected host: ${OSTYPE}, using Teleport binary type ${TELEPORT_BINARY_TYPE}" + if [[ ${ARCH} == "armv7l" ]]; then + TELEPORT_ARCH="arm" + elif [[ ${ARCH} == "aarch64" ]]; then + TELEPORT_ARCH="arm64" + elif [[ ${ARCH} == "x86_64" ]]; then + TELEPORT_ARCH="amd64" + elif [[ ${ARCH} == "i686" ]]; then + TELEPORT_ARCH="386" + else + log_important "Error: cannot detect architecture from uname -m: ${ARCH}" + exit 1 + fi + log "Detected arch: ${ARCH}, using Teleport arch ${TELEPORT_ARCH}" + # if the download format is already set, we have no need to detect distro + if [[ ${TELEPORT_FORMAT} == "" ]]; then + # detect distro + # if /etc/os-release doesn't exist, we need to use some other logic + if [ ! -f /etc/os-release ]; then + if [ -f /etc/centos-release ]; then + if grep -q 'CentOS release 6' /etc/centos-release; then + log_important "Detected host type: CentOS 6 [$(cat /etc/centos-release)]" + log_important "Teleport will not work on CentOS 6 -based servers due to the glibc version being too low." + exit 1 + fi + elif [ -f /etc/redhat-release ]; then + if grep -q 'Red Hat Enterprise Linux Server release 5' /etc/redhat-release; then + log_important "Detected host type: RHEL5 [$(cat /etc/redhat-release)]" + log_important "Teleport will not work on RHEL5-based servers due to the glibc version being too low." + exit 1 + elif grep -q 'Red Hat Enterprise Linux Server release 6' /etc/redhat-release; then + log_important "Detected host type: RHEL6 [$(cat /etc/redhat-release)]" + log_important "Teleport will not work on RHEL6-based servers due to the glibc version being too low." + exit 1 + fi + fi + # use ID_LIKE value from /etc/os-release (if set) + # this is 'debian' on ubuntu/raspbian, 'centos rhel fedora' on amazon linux etc + else + check_exists_fatal cut + DISTRO_TYPE=$(grep ID_LIKE /etc/os-release | cut -d= -f2) || true + if [[ ${DISTRO_TYPE} == "" ]]; then + # use exact ID value from /etc/os-release if ID_LIKE is not set + DISTRO_TYPE=$(grep -w ID /etc/os-release | cut -d= -f2) + fi + if [[ ${DISTRO_TYPE} =~ "debian" ]]; then + TELEPORT_FORMAT="deb" + elif [[ "$DISTRO_TYPE" =~ "amzn"* ]] || [[ ${DISTRO_TYPE} =~ "centos"* ]] || [[ ${DISTRO_TYPE} =~ "rhel" ]] || [[ ${DISTRO_TYPE} =~ "fedora"* ]]; then + TELEPORT_FORMAT="rpm" + else + log "Couldn't match a distro type using /etc/os-release, falling back to tarball installer" + TELEPORT_FORMAT="tarball" + fi + fi + log "Detected distro type: ${DISTRO_TYPE}" + #suse, also identified as sles, uses a different path for its systemd then other distro types like ubuntu + if [[ ${DISTRO_TYPE} =~ "suse"* ]] || [[ ${DISTRO_TYPE} =~ "sles"* ]]; then + SYSTEMD_UNIT_PATH="/etc/systemd/system/teleport.service" + fi + fi +elif [[ "${OSTYPE}" == "darwin"* ]]; then + # macos host, now detect arch + TELEPORT_BINARY_TYPE="darwin" + ARCH=$(uname -m) + log "Detected host: ${OSTYPE}, using Teleport binary type ${TELEPORT_BINARY_TYPE}" + if [[ ${ARCH} == "arm64" ]]; then + TELEPORT_ARCH="arm64" + elif [[ ${ARCH} == "x86_64" ]]; then + TELEPORT_ARCH="amd64" + else + log_important "Error: unsupported architecture from uname -m: ${ARCH}" + exit 1 + fi + log "Detected MacOS ${ARCH} architecture, using Teleport arch ${TELEPORT_ARCH}" + TELEPORT_FORMAT="tarball" +else + log_important "Error - unsupported platform: ${OSTYPE}" + exit 1 +fi +log "Using Teleport distribution: ${TELEPORT_FORMAT}" + +# create temporary directory and exit cleanup logic +TEMP_DIR=$(mktemp -d -t teleport-XXXXXXXXXX) +log "Created temp dir ${TEMP_DIR}" +pushd "${TEMP_DIR}" >/dev/null 2>&1 + +finish() { + popd >/dev/null 2>&1 + rm -rf "${TEMP_DIR}" +} +trap finish EXIT + +# optional format override (mostly for testing) +if [[ ${OVERRIDE_FORMAT} != "" ]]; then + TELEPORT_FORMAT="${OVERRIDE_FORMAT}" + log "Overriding TELEPORT_FORMAT to ${OVERRIDE_FORMAT}" +fi + +# check whether teleport is running already +# if it is, we exit gracefully with an error +if is_running_teleport; then + if [[ ${IGNORE_CHECKS} != "true" ]]; then + TELEPORT_PID=$(get_teleport_pid) + log_header "Warning: Teleport appears to already be running on this host (pid: ${TELEPORT_PID})" + log_cleanup_message + exit 1 + else + log "Ignoring is_running_teleport as requested" + fi +fi + +# check for existing config file +if teleport_config_exists; then + if [[ ${IGNORE_CHECKS} != "true" ]]; then + log_header "Warning: There is already a Teleport config file present at ${TELEPORT_CONFIG_PATH}." + log_cleanup_message + exit 1 + else + log "Ignoring teleport_config_exists as requested" + fi +fi + +# check for existing data directory +if teleport_datadir_exists; then + if [[ ${IGNORE_CHECKS} != "true" ]]; then + log_header "Warning: Found existing Teleport data directory (${TELEPORT_DATA_DIR})." + log_cleanup_message + exit 1 + else + log "Ignoring teleport_datadir_exists as requested" + fi +fi + +# check for existing binaries +if teleport_binaries_exist; then + if [[ ${IGNORE_CHECKS} != "true" ]]; then + log_header "Warning: Found existing Teleport binaries under ${TELEPORT_BINARY_DIR}." + log_cleanup_message + exit 1 + else + log "Ignoring teleport_binaries_exist as requested" + fi +fi + +install_from_file() { + # select correct URL/installation method based on distro + if [[ ${TELEPORT_FORMAT} == "tarball" ]]; then + URL="https://get.gravitational.com/${TELEPORT_PACKAGE_NAME}-v${TELEPORT_VERSION}-${TELEPORT_BINARY_TYPE}-${TELEPORT_ARCH}-bin.tar.gz" + + # check that needed tools are installed + check_exists_fatal curl tar + # download tarball + log "Downloading Teleport ${TELEPORT_FORMAT} release ${TELEPORT_VERSION}" + DOWNLOAD_FILENAME=$(get_download_filename "${URL}") + download "${URL}" "${TEMP_DIR}/${DOWNLOAD_FILENAME}" + # extract tarball + tar -xzf "${TEMP_DIR}/${DOWNLOAD_FILENAME}" -C "${TEMP_DIR}" + # install binaries to /usr/local/bin + for BINARY in ${TELEPORT_BINARY_LIST}; do + ${COPY_COMMAND} "${TELEPORT_ARCHIVE_PATH}/${BINARY}" "${TELEPORT_BINARY_DIR}/" + done + elif [[ ${TELEPORT_FORMAT} == "deb" ]]; then + # convert teleport arch to deb arch + if [[ ${TELEPORT_ARCH} == "amd64" ]]; then + DEB_ARCH="amd64" + elif [[ ${TELEPORT_ARCH} == "386" ]]; then + DEB_ARCH="i386" + elif [[ ${TELEPORT_ARCH} == "arm" ]]; then + DEB_ARCH="arm" + elif [[ ${TELEPORT_ARCH} == "arm64" ]]; then + DEB_ARCH="arm64" + fi + URL="https://get.gravitational.com/${TELEPORT_PACKAGE_NAME}_${TELEPORT_VERSION}_${DEB_ARCH}.deb" + check_deb_not_already_installed + # check that needed tools are installed + check_exists_fatal curl dpkg + # download deb and register cleanup operation + log "Downloading Teleport ${TELEPORT_FORMAT} release ${TELEPORT_VERSION}" + DOWNLOAD_FILENAME=$(get_download_filename "${URL}") + download "${URL}" "${TEMP_DIR}/${DOWNLOAD_FILENAME}" + # install deb + log "Using dpkg to install ${TEMP_DIR}/${DOWNLOAD_FILENAME}" + dpkg -i "${TEMP_DIR}/${DOWNLOAD_FILENAME}" + elif [[ ${TELEPORT_FORMAT} == "rpm" ]]; then + # convert teleport arch to rpm arch + if [[ ${TELEPORT_ARCH} == "amd64" ]]; then + RPM_ARCH="x86_64" + elif [[ ${TELEPORT_ARCH} == "386" ]]; then + RPM_ARCH="i386" + elif [[ ${TELEPORT_ARCH} == "arm" ]]; then + RPM_ARCH="arm" + elif [[ ${TELEPORT_ARCH} == "arm64" ]]; then + RPM_ARCH="arm64" + fi + URL="https://get.gravitational.com/${TELEPORT_PACKAGE_NAME}-${TELEPORT_VERSION}-1.${RPM_ARCH}.rpm" + check_rpm_not_already_installed + # check for package managers + if check_exists dnf; then + log "Found 'dnf' package manager, using it" + PACKAGE_MANAGER_COMMAND="dnf -y install" + elif check_exists yum; then + log "Found 'yum' package manager, using it" + PACKAGE_MANAGER_COMMAND="yum -y localinstall" + else + PACKAGE_MANAGER_COMMAND="" + log "Cannot find 'yum' or 'dnf' package manager commands, will try installing the rpm manually instead" + fi + # check that needed tools are installed + check_exists_fatal curl + log "Downloading Teleport ${TELEPORT_FORMAT} release ${TELEPORT_VERSION}" + DOWNLOAD_FILENAME=$(get_download_filename "${URL}") + download "${URL}" "${TEMP_DIR}/${DOWNLOAD_FILENAME}" + # install with package manager if available + if [[ ${PACKAGE_MANAGER_COMMAND} != "" ]]; then + log "Installing Teleport release from ${TEMP_DIR}/${DOWNLOAD_FILENAME} using ${PACKAGE_MANAGER_COMMAND}" + # install rpm with package manager + ${PACKAGE_MANAGER_COMMAND} "${TEMP_DIR}/${DOWNLOAD_FILENAME}" + # use rpm if we couldn't find a package manager + else + # install RPM (in upgrade mode) + log "Using rpm to install ${TEMP_DIR}/${DOWNLOAD_FILENAME}" + rpm -Uvh "${TEMP_DIR}/${DOWNLOAD_FILENAME}" + fi + else + log_important "Can't figure out what Teleport format to use" + exit 1 + fi +} + +install_from_repo() { + if [[ "${REPO_CHANNEL}" == "" ]]; then + # By default, use the current version's channel. + REPO_CHANNEL=stable/v"${TELEPORT_VERSION//.*/}" + fi + + # Populate $ID, $VERSION_ID, $VERSION_CODENAME and other env vars identifying the OS. + # shellcheck disable=SC1091 + . /etc/os-release + + PACKAGE_LIST=$(package_list) + if [ "$ID" == "debian" ] || [ "$ID" == "ubuntu" ]; then + # old versions of ubuntu require that keys get added by `apt-key add`, without + # adding the key apt shows a key signing error when installing teleport. + if [[ + ($ID == "ubuntu" && $VERSION_ID == "16.04") || \ + ($ID == "debian" && $VERSION_ID == "9" ) + ]]; then + apt install apt-transport-https gnupg -y + curl -fsSL https://apt.releases.teleport.dev/gpg | apt-key add - + echo "deb https://apt.releases.teleport.dev/${ID} ${VERSION_CODENAME} ${REPO_CHANNEL}" > /etc/apt/sources.list.d/teleport.list + else + curl -fsSL https://apt.releases.teleport.dev/gpg \ + -o /usr/share/keyrings/teleport-archive-keyring.asc + echo "deb [signed-by=/usr/share/keyrings/teleport-archive-keyring.asc] \ + https://apt.releases.teleport.dev/${ID} ${VERSION_CODENAME} ${REPO_CHANNEL}" > /etc/apt/sources.list.d/teleport.list + fi + apt-get update + apt-get install -y ${PACKAGE_LIST} + elif [ "$ID" = "amzn" ] || [ "$ID" = "rhel" ] || [ "$ID" = "centos" ] ; then + if [ "$ID" = "rhel" ]; then + VERSION_ID="${VERSION_ID//.*/}" # convert version numbers like '7.2' to only include the major version + fi + yum install -y yum-utils + yum-config-manager --add-repo \ + "$(rpm --eval "https://yum.releases.teleport.dev/$ID/$VERSION_ID/Teleport/%{_arch}/${REPO_CHANNEL}/teleport.repo")" + + # Remove metadata cache to prevent cache from other channel (eg, prior version) + # See: https://github.com/gravitational/teleport/issues/22581 + yum --disablerepo="*" --enablerepo="teleport" clean metadata + + yum install -y ${PACKAGE_LIST} + else + echo "Unsupported distro: $ID" + exit 1 + fi +} + +# package_list returns the list of packages to install. +# The list of packages can be fed into yum or apt because they already have the expected format when pinning versions. +package_list() { + TELEPORT_PACKAGE_PIN_VERSION=${TELEPORT_PACKAGE_NAME} + TELEPORT_UPDATER_PIN_VERSION="${TELEPORT_PACKAGE_NAME}-updater" + + if [[ "${TELEPORT_FORMAT}" == "deb" ]]; then + TELEPORT_PACKAGE_PIN_VERSION+="=${TELEPORT_VERSION}" + TELEPORT_UPDATER_PIN_VERSION+="=${TELEPORT_VERSION}" + + elif [[ "${TELEPORT_FORMAT}" == "rpm" ]]; then + TELEPORT_YUM_VERSION="${TELEPORT_VERSION//-/_}" + TELEPORT_PACKAGE_PIN_VERSION+="-${TELEPORT_YUM_VERSION}" + TELEPORT_UPDATER_PIN_VERSION+="-${TELEPORT_YUM_VERSION}" + fi + + PACKAGE_LIST=${TELEPORT_PACKAGE_PIN_VERSION} + # (warning): This expression is constant. Did you forget the $ on a variable? + # Disabling the warning above because expression is templated. + # shellcheck disable=SC2050 + if is_using_systemd && [[ "false" == "true" ]]; then + # Teleport Updater requires systemd. + PACKAGE_LIST+=" ${TELEPORT_UPDATER_PIN_VERSION}" + fi + echo ${PACKAGE_LIST} +} + +is_repo_available() { + if [[ "${OSTYPE}" != "linux-gnu" ]]; then + return 1 + fi + + # Populate $ID, $VERSION_ID and other env vars identifying the OS. + # shellcheck disable=SC1091 + . /etc/os-release + + # The following distros+version have a Teleport repository to install from. + case "${ID}-${VERSION_ID}" in + ubuntu-16.04* | ubuntu-18.04* | ubuntu-20.04* | ubuntu-22.04* | \ + debian-9* | debian-10* | debian-11* | \ + rhel-7* | rhel-8* | rhel-9* | \ + centos-7* | centos-8* | centos-9* | \ + amzn-2 | amzn-2023) + return 0;; + esac + + return 1 +} + +if is_repo_available; then + log "Installing repo for distro $ID." + install_from_repo +else + log "Installing from binary file." + install_from_file +fi + +# check that teleport binary can be found and runs +if ! check_teleport_binary; then + log_important "The Teleport binary could not be found at ${TELEPORT_BINARY_DIR} as expected." + log_important "This usually means that there was an error during installation." + log_important "Check this log for obvious signs of error and contact Teleport support" + log_important "for further assistance." + exit 1 +fi + +# install teleport config +# check the mode and write the appropriate config type +if [[ "${APP_INSTALL_MODE}" == "true" ]]; then + install_teleport_app_config +elif [[ "${DB_INSTALL_MODE}" == "true" ]]; then + install_teleport_database_config +else + install_teleport_node_config +fi + + +# Used to track whether a Teleport agent was installed using this method. +export TELEPORT_INSTALL_METHOD_NODE_SCRIPT="true" + +# install systemd unit if applicable (linux hosts) +if is_using_systemd; then + log "Host is using systemd" + # we only need to manually install the systemd config if teleport was installed via tarball + # all other packages will deploy it automatically + if [[ ${TELEPORT_FORMAT} == "tarball" ]]; then + install_systemd_unit + fi + start_teleport_systemd + print_welcome_message +# install launchd config on MacOS hosts +elif is_macos_host; then + log "Host is running MacOS" + install_launchd_config + start_teleport_launchd + print_welcome_message +# not a MacOS host and no systemd available, print a warning +# and temporarily start Teleport in the foreground +else + log "Host does not appear to be using systemd" + no_systemd_warning + start_teleport_foreground +fi + diff --git a/ansible/teleport_setting/roles/teleport/templates/teleport.yaml.j2 b/ansible/teleport_setting/roles/teleport/templates/teleport.yaml.j2 new file mode 100644 index 0000000..180af28 --- /dev/null +++ b/ansible/teleport_setting/roles/teleport/templates/teleport.yaml.j2 @@ -0,0 +1,35 @@ +version: v3 +teleport: + nodename: {{ ansible_hostname }} + data_dir: /var/lib/teleport + join_params: + token_name: {{ get_join_token }} + method: token + proxy_server: {{ teleport_uri }}:443 + log: + output: stderr + severity: INFO + format: + output: text + ca_pin: {{ get_ca_pin }} + diag_addr: "" +auth_service: + enabled: "no" +ssh_service: + enabled: "yes" + labels: + ipaddr: {{ansible_default_ipv4.address}} + group: {{ group_names[-1] }} + os: {{ ansible_distribution }} +{% if custom_labels %} + {{ custom_labels }} +{% endif %} + commands: + - name: hostname + command: [hostname] + period: 1m0s +proxy_service: + enabled: "no" + https_keypairs: [] + https_keypairs_reload_interval: 0s + acme: {} diff --git a/ansible/teleport_setting/roles/teleport/vars/main.yml b/ansible/teleport_setting/roles/teleport/vars/main.yml new file mode 100644 index 0000000..d06c156 --- /dev/null +++ b/ansible/teleport_setting/roles/teleport/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for teleport diff --git a/ansible/teleport_setting/teleport b/ansible/teleport_setting/teleport new file mode 100755 index 0000000..270f7a4 --- /dev/null +++ b/ansible/teleport_setting/teleport @@ -0,0 +1,65 @@ +[prod-demo-master] +10.10.43.100 ansible_port=2222 ansible_user=dev2 + +[prod-demo-worker] +10.10.43.101 ansible_port=2222 ansible_user=dev2 + +[dev-demo-master] +10.10.43.105 ansible_port=2222 ansible_user=dev2 + +[dev-demo-worker] +10.10.43.106 ansible_port=2222 ansible_user=dev2 + +[saas_mgmt_master] +10.10.43.240 ansible_port=2222 ansible_user=dev2 + +[saas_mgmt_node] +10.10.43.[241:243] ansible_port=2222 ansible_user=dev2 + +[dsk_dev_master] +10.10.43.[111:113] ansible_port=2222 ansible_user=dev2 + +[dsk_dev_node] +10.10.43.[114:153] ansible_port=2222 ansible_user=dev2 + +[bastion] +10.10.43.43 ansible_port=2222 ansible_user=havelight + +[agent_host] +10.10.43.177 ansible_port=2222 ansible_user=dev2 +10.10.43.178 ansible_port=2222 ansible_user=dev2 +10.10.43.179 ansible_port=2222 ansible_user=dev2 +10.10.43.180 ansible_port=2222 ansible_user=dev2 +10.10.43.181 ansible_port=2222 ansible_user=dev2 +10.10.43.182 ansible_port=2222 ansible_user=dev2 + +[agent_cri_master] +10.10.43.185 ansible_port=2222 ansible_user=dev2 + +[agent_cri_worker] +10.10.43.186 ansible_port=2222 ansible_user=dev2 +10.10.43.187 ansible_port=2222 ansible_user=dev2 +10.10.43.188 ansible_port=2222 ansible_user=dev2 + +[agent_middleware_master] +10.10.43.189 ansible_port=2222 ansible_user=dev2 + +[agent_middleware_worker] +10.10.43.190 ansible_port=2222 ansible_user=dev2 +10.10.43.191 ansible_port=2222 ansible_user=dev2 +10.10.43.192 ansible_port=2222 ansible_user=dev2 +10.10.43.193 ansible_port=2222 ansible_user=dev2 +10.10.43.194 ansible_port=2222 ansible_user=dev2 +10.10.43.199 ansible_port=2222 ansible_user=dev2 + +[all:children] +saas_mgmt_master +saas_mgmt_node +dsk_dev_master +dsk_dev_node +bastion +agent_host +agent_cri_master +agent_cri_worker +agent_middleware_master +agent_middleware_worker diff --git a/ansible/teleport_setting/teleport.yml b/ansible/teleport_setting/teleport.yml new file mode 100644 index 0000000..f35bffa --- /dev/null +++ b/ansible/teleport_setting/teleport.yml @@ -0,0 +1,11 @@ +--- +- hosts: all + become: true + roles: + - teleport + vars: + teleport_uri: teleport.kr.datasaker.io + # remove: True + # custom_labels: 'user=havelight,company=exem' + # update: True + install: True diff --git a/ansible/teleport_setting/teleport_aws.yml b/ansible/teleport_setting/teleport_aws.yml new file mode 100644 index 0000000..df225de --- /dev/null +++ b/ansible/teleport_setting/teleport_aws.yml @@ -0,0 +1,12 @@ +--- +#- hosts: prod-demo-master,prod-demo-worker,dev-demo-master,dev-demo-worker +- hosts: all + become: true + roles: + - teleport + vars: + teleport_uri: teleport.kr.datasaker.io + # remove: True + # custom_labels: 'user=havelight,company=exem' + # update: True + install: True diff --git a/ansible/zabbix_agent/ansible.cfg b/ansible/zabbix_agent/ansible.cfg new file mode 100755 index 0000000..0ebf722 --- /dev/null +++ b/ansible/zabbix_agent/ansible.cfg @@ -0,0 +1,10 @@ +[defaults] +inventory = inventory +roles_path = roles +deprecation_warnings = False +display_skipped_hosts = no +ansible_home = . +stdout_callback = debug +host_key_checking=False +#private_key_file=/root/.ssh/dev2-iac +#remote_tmp = /tmp/.ansible/tmp diff --git a/ansible/zabbix_agent/inventory b/ansible/zabbix_agent/inventory new file mode 100644 index 0000000..67052d2 --- /dev/null +++ b/ansible/zabbix_agent/inventory @@ -0,0 +1,31 @@ +[all] +10.10.43.195 +10.10.43.196 +10.10.43.197 +10.10.43.200 +10.10.43.201 +10.10.43.202 +10.10.43.203 +10.10.43.204 +10.10.43.205 +10.10.43.206 +10.10.43.207 +10.10.43.208 +10.10.43.210 +10.10.43.211 +10.10.43.212 +10.10.43.213 +10.10.43.214 +10.10.43.215 +10.10.43.216 +10.10.43.217 +10.10.43.218 +10.10.43.224 +10.10.43.225 +10.10.43.226 +10.10.43.227 +10.10.43.228 +10.10.43.230 +10.10.43.235 +10.10.43.236 +10.10.43.252 diff --git a/ansible/zabbix_agent/roles/zabbix-agent/defaults/main.yml b/ansible/zabbix_agent/roles/zabbix-agent/defaults/main.yml new file mode 100644 index 0000000..7e57368 --- /dev/null +++ b/ansible/zabbix_agent/roles/zabbix-agent/defaults/main.yml @@ -0,0 +1,292 @@ +--- +# defaults file for zabbix_agent + +zabbix_agent2: false +# zabbix_agent_version: 6.4 +zabbix_agent_version_minor: "*" +zabbix_version_patch: 0 +zabbix_agent_package_remove: false +zabbix_agent_package: zabbix-agent +zabbix_sender_package: zabbix-sender +zabbix_get_package: zabbix-get +zabbix_agent_package_state: present +zabbix_agent_server: +zabbix_agent_serveractive: +zabbix_agent2_server: "{{ zabbix_agent_server }}" +zabbix_agent2_serveractive: "{{ zabbix_agent_serveractive }}" +zabbix_selinux: false +zabbix_agent_apt_priority: +zabbix_agent_conf_mode: "0644" +zabbix_agent_dont_detect_ip: false +zabbix_agent_allow_key: [] +zabbix_agent_deny_key: [] +zabbix_agent2_allow_key: "{{ zabbix_agent_allow_key }}" +zabbix_agent2_deny_key: "{{ zabbix_agent_deny_key }}" + +# Selinux related vars +selinux_allow_zabbix_run_sudo: false + +zabbix_agent_install_agent_only: false +zabbix_agent_packages: + - "{{ zabbix_agent_package }}" + - "{{ zabbix_sender_package }}" + - "{{ zabbix_get_package }}" + +# Zabbix role related vars +zabbix_apt_force_apt_get: true +zabbix_apt_install_recommends: false + +# Override Ansible specific facts +zabbix_agent_distribution_major_version: "{{ ansible_distribution_major_version }}" +zabbix_agent_distribution_release: "{{ ansible_distribution_release }}" +zabbix_repo_yum_gpgcheck: 0 +zabbix_repo_yum_schema: https +zabbix_agent_disable_repo: + - epel +zabbix_repo_yum: + - name: zabbix + description: Zabbix Official Repository - $basearch + baseurl: "{{ zabbix_repo_yum_schema }}://repo.zabbix.com/zabbix/{{ zabbix_agent_version }}/rhel/{{ zabbix_agent_distribution_major_version }}/$basearch/" + mode: "0644" + gpgcheck: "{{ zabbix_repo_yum_gpgcheck }}" + gpgkey: file:///etc/pki/rpm-gpg/RPM-GPG-KEY-ZABBIX + state: present + - name: zabbix-non-supported + description: Zabbix Official Repository non-supported - $basearch + baseurl: "{{ zabbix_repo_yum_schema }}://repo.zabbix.com/non-supported/rhel/{{ zabbix_agent_distribution_major_version }}/$basearch/" + mode: "0644" + gpgcheck: "{{ zabbix_repo_yum_gpgcheck }}" + gpgkey: file:///etc/pki/rpm-gpg/RPM-GPG-KEY-ZABBIX + state: present + - name: zabbix-agent2-plugins + description: Zabbix Official Repository (Agent2 Plugins) - $basearch + baseurl: "{{ zabbix_repo_yum_schema }}://repo.zabbix.com/zabbix-agent2-plugins/1/rhel/{{ zabbix_agent_distribution_major_version }}/$basearch/" + mode: "0644" + gpgcheck: "{{ zabbix_repo_yum_gpgcheck }}" + gpgkey: file:///etc/pki/rpm-gpg/RPM-GPG-KEY-ZABBIX + state: present + +zabbix_repo_deb_component: main + +# Zabbix API stuff +zabbix_api_server_host: localhost +# zabbix_api_server_port: 80 +zabbix_api_login_user: Admin +zabbix_api_use_ssl: false +zabbix_api_login_pass: !unsafe zabbix +zabbix_api_validate_certs: false +ansible_httpapi_pass: "{{ zabbix_api_login_pass }}" +ansible_httpapi_port: "{{ zabbix_api_server_port }}" +ansible_httpapi_validate_certs: "{{ zabbix_api_validate_certs }}" +zabbix_api_timeout: 30 +zabbix_api_create_hostgroup: false +zabbix_api_create_hosts: false +zabbix_agent_hostgroups_state: present # or absent +zabbix_agent_host_state: present # or absent +zabbix_agent_host_update: true +zabbix_host_status: enabled # or disabled +zabbix_agent_proxy: null +zabbix_agent_inventory_mode: disabled +zabbix_useuip: 1 +zabbix_host_groups: + - Linux servers +zabbix_agent_link_templates: + - Template Linux by Zabbix agent + +zabbix_agent_interfaces: + - type: 1 + main: 1 + useip: "{{ zabbix_useuip }}" + ip: "{{ zabbix_agent_ip }}" + dns: "{{ ansible_fqdn }}" + port: "{{ (zabbix_agent2 == True) | ternary(zabbix_agent2_listenport, zabbix_agent_listenport) }}" + +# Zabbix configuration variables +zabbix_agent_pidfile: /var/run/zabbix/zabbix_agentd.pid +zabbix_agent_logtype: file +zabbix_agent_logfile: /var/log/zabbix/zabbix_agentd.log +zabbix_agent_logfilesize: 100 +zabbix_agent_debuglevel: 3 +zabbix_agent_sourceip: +zabbix_agent_enableremotecommands: 0 +zabbix_agent_allowkeys: +zabbix_agent_denykeys: +zabbix_agent_logremotecommands: 0 +zabbix_agent_listenport: 10050 +zabbix_agent_jmx_listenport: +zabbix_agent_listeninterface: +zabbix_agent_listenip: +zabbix_agent_startagents: 3 +zabbix_agent_hostname: "{{ inventory_hostname }}" +zabbix_agent_hostnameitem: +zabbix_agent_hostmetadata: +zabbix_agent_hostmetadataitem: +zabbix_agent_refreshactivechecks: 120 +zabbix_agent_buffersend: 5 +zabbix_agent_buffersize: 100 +zabbix_agent_maxlinespersecond: 100 +zabbix_agent_allowroot: 0 +zabbix_agent_zabbix_alias: +zabbix_agent_timeout: 3 +zabbix_agent_include: /etc/zabbix/zabbix_agentd.d +zabbix_agent_include_pattern: +zabbix_agent_include_mode: "0750" +zabbix_agent_unsafeuserparameters: 0 +zabbix_agent_userparameters: [] +zabbix_agent_userparameters_templates_src: "userparameters" +zabbix_agent_userparameters_scripts_src: "scripts" +zabbix_agent_custom_scripts: false +zabbix_agent_loadmodulepath: ${libdir}/modules +zabbix_agent_loadmodule: +zabbix_agent_become_on_localhost: true +zabbix_agent_description: +zabbix_agent_inventory_zabbix: {} +zabbix_agent_heartbeatfrequency: 60 +zabbix_agent_macros: [] +zabbix_agent_tags: [] +zabbix_agent_chassis: false + +# TLS settings +zabbix_agent_tlsconnect: +zabbix_agent_tlsaccept: +zabbix_agent_tlscafile: +zabbix_agent_tlscrlfile: +zabbix_agent_tlsservercertissuer: +zabbix_agent_tlsservercertsubject: +zabbix_agent_tls_subject: "{{ zabbix_agent_tlsservercertsubject }}" # FIXME this is not correct and should be removed with 2.0.0, here only to prevent regression +zabbix_agent_tlscertfile: +zabbix_agent_tlskeyfile: +zabbix_agent_tlspskidentity: +zabbix_agent_tlspsk_auto: false + +zabbix_agent_tls_config: + unencrypted: "1" + psk: "2" + cert: "4" + +# IPMI settings +zabbix_agent_ipmi_authtype: 2 +zabbix_agent_ipmi_password: +zabbix_agent_ipmi_privilege: 2 +zabbix_agent_ipmi_username: + +# Zabbix Agent2 +zabbix_agent2_pidfile: /var/run/zabbix/zabbix_agent2.pid +zabbix_agent2_logfile: /var/log/zabbix/zabbix_agent2.log +zabbix_agent2_logtype: file +zabbix_agent2_statusport: 9999 +zabbix_agent2_include: /etc/zabbix/zabbix_agent2.d +zabbix_agent2_include_pattern: +zabbix_agent2_logfilesize: 100 +zabbix_agent2_debuglevel: 3 +zabbix_agent2_sourceip: +zabbix_agent2_listenport: 10050 +zabbix_agent2_listenip: +zabbix_agent2_hostname: "{{ inventory_hostname }}" +zabbix_agent2_hostnameitem: +zabbix_agent2_hostmetadata: +zabbix_agent2_hostmetadataitem: +zabbix_agent2_hostinterface: +zabbix_agent2_hostinterfaceitem: +zabbix_agent2_enablepersistentbuffer: 0 +zabbix_agent2_persistentbufferperiod: 1h +zabbix_agent2_persistentbufferfile: +zabbix_agent2_refreshactivechecks: 120 +zabbix_agent2_buffersend: 5 +zabbix_agent2_buffersize: 100 +zabbix_agent2_zabbix_alias: +zabbix_agent2_timeout: 3 +zabbix_agent2_include_mode: "0750" +zabbix_agent2_unsafeuserparameters: 0 +zabbix_agent2_controlsocket: /tmp/agent.sock +zabbix_agent2_plugins: [] + +# Zabbix Agent2 TLS settings +zabbix_agent2_tlsconnect: +zabbix_agent2_tlsaccept: +zabbix_agent2_tlscafile: +zabbix_agent2_tlscrlfile: +zabbix_agent2_tlsservercertissuer: +zabbix_agent2_tlsservercertsubject: +zabbix_agent2_tls_subject: "{{ zabbix_agent2_tlsservercertsubject }}" # FIXME this is not correct and should be removed with 2.0.0, here only to prevent regression +zabbix_agent2_tlscertfile: +zabbix_agent2_tlskeyfile: +zabbix_agent2_tlspskidentity: +zabbix_agent2_tlspsk_auto: false + +# Windows/macOS Related +zabbix_version_long: 5.2.4 + +# Windows Related +zabbix_win_package: zabbix_agent-{{ zabbix_version_long }}-windows-amd64-openssl.zip +zabbix2_win_package: zabbix_agent2-{{ zabbix_version_long }}-windows-amd64-openssl-static.zip +zabbix_win_download_url: https://cdn.zabbix.com/zabbix/binaries/stable +zabbix_win_download_link: "{{ zabbix_win_download_url }}/{{ zabbix_version_long | regex_search('^\\d+\\.\\d+') }}/{{ zabbix_version_long }}/{{ zabbix_win_package }}" +zabbix2_win_download_link: "{{ zabbix_win_download_url }}/{{ zabbix_version_long | regex_search('^\\d+\\.\\d+') }}/{{ zabbix_version_long }}/{{ zabbix2_win_package }}" +zabbix_win_install_dir: 'C:\Zabbix' +zabbix_win_install_dir_conf: '{{ zabbix_win_install_dir }}\\conf' +zabbix_win_install_dir_bin: '{{ zabbix_win_install_dir }}\\bin' +zabbix_agent_win_logfile: "{{ zabbix_win_install_dir }}\\zabbix_agentd.log" +zabbix_agent_win_include: "{{ zabbix_win_install_dir }}\\zabbix_agent.d\\" +zabbix_agent2_win_logfile: "{{ zabbix_win_install_dir }}\\zabbix_agent2.log" +zabbix_agent_win_svc_recovery: true +zabbix_win_firewall_management: true + +# macOS Related +zabbix_mac_package: zabbix_agent-{{ zabbix_version_long }}-macos-amd64-openssl.pkg +zabbix_mac_download_url: https://cdn.zabbix.com/zabbix/binaries/stable +zabbix_mac_download_link: "{{ zabbix_mac_download_url }}/{{ zabbix_agent_version }}/{{ zabbix_version_long }}/{{ zabbix_mac_package }}" + +# Zabbix Agent Docker facts +zabbix_agent_docker: false +zabbix_agent_docker_state: started +zabbix_agent_docker_name: zabbix-agent +zabbix_agent_docker_image: "zabbix/zabbix-agent" +zabbix_agent_docker_image_tag: "ubuntu-{{ zabbix_agent_version }}.{{ zabbix_version_patch }}" +zabbix_agent_docker_user_gid: 101 +zabbix_agent_docker_user_uid: 101 +zabbix_agent_docker_network_mode: host +zabbix_agent_docker_restart_policy: unless-stopped +zabbix_agent_docker_privileged: false +zabbix_agent_docker_ports: + - 10050:10050 +zabbix_agent_docker_security_opts: + - apparmor:unconfined +zabbix_agent_docker_volumes: + - /etc/zabbix/zabbix_agentd.d:{{ zabbix_agent_include }} + - /:/hostfs:ro + - /etc:/hostfs/etc:ro + - /proc:/hostfs/proc:ro + - /sys:/hostfs/sys:ro + - /var/run:/var/run +zabbix_agent_docker_env: + ZBX_HOSTNAME: "{{ zabbix_agent_hostname }}" + ZBX_SERVER_HOST: "{{ zabbix_agent_server }}" + ZBX_PASSIVE_ALLOW: "{{ zabbix_agent_serverpassive_allow | default(omit) }}" + ZBX_PASSIVESERVERS: "{{ zabbix_agent_serverpassive | default(omit) }}" + ZBX_ACTIVE_ALLOW: "{{ zabbix_agent_serveractive_allow | default(omit) }}" + ZBX_LOADMODULE: "{{ zabbix_agent_loadmodule | default(omit) }}" + ZBX_DEBUGLEVEL: "{{ zabbix_agent_debuglevel }}" + ZBX_TIMEOUT: "{{ zabbix_agent_timeout }}" + ZBX_SOURCEIP: "{{ zabbix_agent_sourceip | default(omit) }}" + ZBX_ENABLEREMOTECOMMANDS: "{{ zabbix_agent_enableremotecommands | default(omit) }}" + ZBX_LOGREMOTECOMMANDS: "{{ zabbix_agent_logremotecommands | default(omit) }}" + ZBX_STARTAGENTS: "{{ zabbix_agent_startagents | default(omit) }}" + ZBX_HOSTNAMEITEM: "{{ zabbix_agent_hostnameitem | default(omit) }}" + ZBX_METADATA: "{{ zabbix_agent_hostmetadata | default(omit) }}" + ZBX_METADATAITEM: "{{ zabbix_agent_hostmetadataitem | default(omit) }}" + ZBX_REFRESHACTIVECHECKS: "{{ zabbix_agent_refreshactivechecks | default(omit) }}" + ZBX_BUFFERSEND: "{{ zabbix_agent_buffersend | default(omit) }}" + ZBX_BUFFERSIZE: "{{ zabbix_agent_buffersize | default(omit) }}" + ZBX_MAXLINESPERSECOND: "{{ zabbix_agent_maxlinespersecond | default(omit) }}" + ZBX_LISTENIP: "{{ zabbix_agent_listenip }}" + ZBX_UNSAFEUSERPARAMETERS: "{{ zabbix_agent_unsafeuserparameters | default(omit) }}" + ZBX_TLSCONNECT: "{{ zabbix_agent_tlsconnect | default(omit) }}" + ZBX_TLSACCEPT: "{{ zabbix_agent_tlsaccept | default(omit) }}" + ZBX_TLSCAFILE: "{{ zabbix_agent_tlscafile | default(omit) }}" + ZBX_TLSCRLFILE: "{{ zabbix_agent_tlscrlfile | default(omit) }}" + ZBX_TLSSERVERCERTISSUER: "{{ zabbix_agent_tlsservercertissuer | default(omit) }}" + ZBX_TLSSERVERCERTSUBJECT: "{{ zabbix_agent_tlsservercertsubject | default(omit) }}" + ZBX_TLSCERTFILE: "{{ zabbix_agent_tlscertfile | default(omit) }}" + ZBX_TLSKEYFILE: "{{ zabbix_agent_tlskeyfile | default(omit) }}" + ZBX_TLSPSKIDENTITY: "{{ zabbix_agent_tlspskidentity | default(omit) }}" diff --git a/ansible/zabbix_agent/roles/zabbix-agent/files/sample.conf b/ansible/zabbix_agent/roles/zabbix-agent/files/sample.conf new file mode 100644 index 0000000..70df285 --- /dev/null +++ b/ansible/zabbix_agent/roles/zabbix-agent/files/sample.conf @@ -0,0 +1,3 @@ +# This is an sample userparameters file. + +UserParameter=mysql.ping_to,mysqladmin -uroot ping | grep -c alive diff --git a/ansible/zabbix_agent/roles/zabbix-agent/files/win_sample/doSomething.ps1 b/ansible/zabbix_agent/roles/zabbix-agent/files/win_sample/doSomething.ps1 new file mode 100644 index 0000000..e69de29 diff --git a/ansible/zabbix_agent/roles/zabbix-agent/handlers/main.yml b/ansible/zabbix_agent/roles/zabbix-agent/handlers/main.yml new file mode 100644 index 0000000..46fa0a8 --- /dev/null +++ b/ansible/zabbix_agent/roles/zabbix-agent/handlers/main.yml @@ -0,0 +1,40 @@ +--- +# handlers file for zabbix-agent + +- name: restart zabbix-agent + ansible.builtin.service: + name: "{{ zabbix_agent_service }}" + state: restarted + enabled: true + become: true + when: + - not zabbix_agent_docker + - ansible_os_family != "Windows" and ansible_os_family != "Darwin" + +- name: firewalld-reload + ansible.builtin.command: "firewall-cmd --reload" + become: true + when: + - ansible_facts.services["firewalld"] is defined + - ansible_facts.services["firewalld"].state == "running" + +- name: restart win zabbix agent + win_service: + name: "{{ zabbix_win_svc_name }}" + state: restarted + when: + - ansible_os_family == "Windows" + +- name: restart mac zabbix agent + ansible.builtin.command: "launchctl kickstart -k system/{{ zabbix_agent_service }}" + become: true + when: + - not zabbix_agent_docker + - ansible_os_family == "Darwin" + +- name: "clean repo files from proxy creds" + ansible.builtin.shell: ls /etc/yum.repos.d/zabbix* && sed -i 's/^proxy =.*//' /etc/yum.repos.d/zabbix* || true + become: true + when: + - ansible_os_family == 'RedHat' + - zabbix_http_proxy is defined or zabbix_https_proxy is defined diff --git a/ansible/zabbix_agent/roles/zabbix-agent/meta/main.yml b/ansible/zabbix_agent/roles/zabbix-agent/meta/main.yml new file mode 100644 index 0000000..22803e0 --- /dev/null +++ b/ansible/zabbix_agent/roles/zabbix-agent/meta/main.yml @@ -0,0 +1,42 @@ +--- +galaxy_info: + author: Werner Dijkerman + description: Installing and maintaining zabbix-agent for RedHat/Debian/Ubuntu/Windows/Suse. + company: myCompany.Dotcom + license: MIT + min_ansible_version: 2.7 + platforms: + - name: EL + versions: + - 5 + - 6 + - 7 + - name: Ubuntu + versions: + - lucid + - precise + - trusty + - xenial + - bionic + - name: Debian + versions: + - squeeze + - wheezy + - jessie + - stretch + - buster + - name: opensuse + versions: + - 12.1 + - 12.2 + - 12.3 + - 13.1 + - 13.2 + - name: Windows + versions: + - all + + galaxy_tags: + - zabbix + - monitoring +dependencies: [] diff --git a/ansible/zabbix_agent/roles/zabbix-agent/molecule/with-server/Dockerfile.j2 b/ansible/zabbix_agent/roles/zabbix-agent/molecule/with-server/Dockerfile.j2 new file mode 100644 index 0000000..1df5be7 --- /dev/null +++ b/ansible/zabbix_agent/roles/zabbix-agent/molecule/with-server/Dockerfile.j2 @@ -0,0 +1,14 @@ +# Molecule managed + +{% if item.registry is defined %} +FROM {{ item.registry.url }}/{{ item.image }} +{% else %} +FROM {{ item.image }} +{% endif %} + +RUN if [ $(command -v apt-get) ]; then apt-get update && apt-get install -y python sudo bash ca-certificates && apt-get clean; \ + elif [ $(command -v dnf) ]; then dnf makecache && dnf --assumeyes install python sudo python-devel python*-dnf bash && dnf clean all; \ + elif [ $(command -v yum) ]; then yum makecache fast && yum install -y python sudo yum-plugin-ovl bash && sed -i 's/plugins=0/plugins=1/g' /etc/yum.conf && yum clean all; \ + elif [ $(command -v zypper) ]; then zypper refresh && zypper install -y python sudo bash python-xml && zypper clean -a; \ + elif [ $(command -v apk) ]; then apk update && apk add --no-cache python sudo bash ca-certificates; \ + elif [ $(command -v xbps-install) ]; then xbps-install -Syu && xbps-install -y python sudo bash ca-certificates && xbps-remove -O; fi diff --git a/ansible/zabbix_agent/roles/zabbix-agent/molecule/with-server/INSTALL.rst b/ansible/zabbix_agent/roles/zabbix-agent/molecule/with-server/INSTALL.rst new file mode 100644 index 0000000..3c2ae97 --- /dev/null +++ b/ansible/zabbix_agent/roles/zabbix-agent/molecule/with-server/INSTALL.rst @@ -0,0 +1,26 @@ +******************************** +Docker driver installation guide +******************************** + +Requirements +============ + +* General molecule dependencies (see https://molecule.readthedocs.io/en/latest/installation.html) +* Docker Engine +* docker-py +* docker + +Install +======= + +Ansible < 2.6 + +.. code-block:: bash + + $ sudo pip install docker-py + +Ansible >= 2.6 + +.. code-block:: bash + + $ sudo pip install docker diff --git a/ansible/zabbix_agent/roles/zabbix-agent/molecule/with-server/molecule.yml b/ansible/zabbix_agent/roles/zabbix-agent/molecule/with-server/molecule.yml new file mode 100644 index 0000000..7795860 --- /dev/null +++ b/ansible/zabbix_agent/roles/zabbix-agent/molecule/with-server/molecule.yml @@ -0,0 +1,73 @@ +--- +dependency: + name: galaxy +driver: + name: docker +platforms: + - name: zabbix-server-centos + image: milcom/centos7-systemd:latest + groups: + - zabbix_server + - mysql + privileged: true + networks: + - name: zabbix + published_ports: + - "80:80" + - name: zabbix-agent-centos + image: milcom/centos7-systemd:latest + groups: + - zabbix_agent + privileged: true + networks: + - name: zabbix + - name: zabbix-agent-debian + image: minimum2scp/systemd-stretch:latest + command: /sbin/init + groups: + - zabbix_agent + privileged: true + networks: + - name: zabbix + - name: zabbix-agent-ubuntu + image: solita/ubuntu-systemd:bionic + groups: + - zabbix_agent + privileged: true + networks: + - name: zabbix + +provisioner: + name: ansible + playbooks: + docker: + create: ../default/create.yml + destroy: ../default/destroy.yml + inventory: + group_vars: + all: + zabbix_api_create_hosts: true + zabbix_api_create_hostgroup: true + zabbix_api_server_url: http://zabbix-server-centos + zabbix_apache_servername: zabbix-server-centos + mysql: + zabbix_server_database: mysql + zabbix_server_database_long: mysql + zabbix_server_dbport: 3306 + database_type: mysql + database_type_long: mysql + host_vars: + zabbix-agent-fedora: + ansible_python_interpreter: /usr/bin/python3 + zabbix-agent-ubuntu: + zabbix_agent_tlsaccept: psk + zabbix_agent_tlsconnect: psk + zabbix_agent_tlspskidentity: "myhost PSK" + zabbix_agent_tlspsk_secret: b7e3d380b9d400676d47198ecf3592ccd4795a59668aa2ade29f0003abbbd40d + zabbix_agent_tlspskfile: /etc/zabbix/zabbix_agent_pskfile.psk + +scenario: + name: with-server + +verifier: + name: testinfra diff --git a/ansible/zabbix_agent/roles/zabbix-agent/molecule/with-server/playbook.yml b/ansible/zabbix_agent/roles/zabbix-agent/molecule/with-server/playbook.yml new file mode 100644 index 0000000..21c3ea0 --- /dev/null +++ b/ansible/zabbix_agent/roles/zabbix-agent/molecule/with-server/playbook.yml @@ -0,0 +1,24 @@ +--- +- name: Converge + hosts: all:!zabbix_server + pre_tasks: + - name: "Get IP Server" + ansible.builtin.shell: grep $(hostname) /etc/hosts | awk '{ print $1 }' | tail -n 1 + register: ip_address + delegate_to: zabbix-server-centos + changed_when: false + tags: + - skip_ansible_lint + + - name: "Get IP hosts" + ansible.builtin.shell: grep $(hostname) /etc/hosts | awk '{ print $1 }' | tail -n 1 + register: ip_address_host + changed_when: false + tags: + - skip_ansible_lint + + roles: + - role: zabbix_agent + zabbix_agent_ip: "{{ ip_address_host.stdout }}" + zabbix_agent_server: "{{ ip_address.stdout }}" + zabbix_agent_serveractive: "{{ ip_address.stdout }}" diff --git a/ansible/zabbix_agent/roles/zabbix-agent/molecule/with-server/prepare.yml b/ansible/zabbix_agent/roles/zabbix-agent/molecule/with-server/prepare.yml new file mode 100644 index 0000000..a08d0fa --- /dev/null +++ b/ansible/zabbix_agent/roles/zabbix-agent/molecule/with-server/prepare.yml @@ -0,0 +1,114 @@ +--- +- name: Prepare + hosts: zabbix_server + pre_tasks: + - name: "Installing EPEL" + ansible.builtin.yum: + name: + - epel-release + state: present + when: ansible_distribution == 'CentOS' + + - name: "Installing packages" + ansible.builtin.yum: + name: + - net-tools + - which + - libselinux-python + - python-pip + state: present + register: installation_dependencies + when: ansible_distribution == 'CentOS' + + - name: "Installing which on NON-CentOS" + ansible.builtin.apt: + name: + - net-tools + - python-pip + - curl + state: present + when: ansible_distribution != 'CentOS' + + - name: "Configure SUDO." + ansible.builtin.lineinfile: + dest: /etc/sudoers + line: "Defaults !requiretty" + state: present + + - name: "Make sure the docs are installed." + ansible.builtin.lineinfile: + dest: /etc/yum.conf + line: "tsflags=nodocs" + state: absent + + - name: "Installing some python dependencies" + ansible.builtin.pip: + name: py-zabbix + state: present + + roles: + - role: geerlingguy.mysql + - role: zabbix_server + - role: zabbix_web + +- name: Prepare + hosts: all:!zabbix_server:!docker + tasks: + - name: "Installing packages on CentOS family" + ansible.builtin.yum: + name: + - net-tools + - which + state: present + when: + - ansible_os_family == 'RedHat' + + - name: "Installing packages on Debian family" + ansible.builtin.apt: + name: + - net-tools + state: present + when: + - ansible_os_family == 'Debian' + +- name: Converge + hosts: docker + tasks: + - name: "Download Docker CE repo file" + ansible.builtin.get_url: + url: https://download.docker.com/linux/centos/docker-ce.repo + dest: /etc/yum.repos.d/docker-ce.repo + mode: 0644 + register: zabbix_agent_prepare_docker_repo + until: zabbix_agent_prepare_docker_repo is succeeded + + - name: "Installing Epel" + ansible.builtin.package: + pkg: + - epel-release + state: present + register: zabbix_agent_prepare_docker_install + until: zabbix_agent_prepare_docker_install is succeeded + + - name: "Installing Docker" + ansible.builtin.package: + pkg: + - docker-ce + - python-pip + - python-setuptools + state: present + register: zabbix_agent_prepare_docker_install + until: zabbix_agent_prepare_docker_install is succeeded + + - name: "Installing Docker Python" + ansible.builtin.pip: + name: + - docker + state: present + register: zabbix_agent_prepare_docker_install + until: zabbix_agent_prepare_docker_install is succeeded + + - name: "Starting Docker service" + ansible.builtin.service: + name: docker + state: started diff --git a/ansible/zabbix_agent/roles/zabbix-agent/molecule/with-server/requirements.yml b/ansible/zabbix_agent/roles/zabbix-agent/molecule/with-server/requirements.yml new file mode 100644 index 0000000..793f925 --- /dev/null +++ b/ansible/zabbix_agent/roles/zabbix-agent/molecule/with-server/requirements.yml @@ -0,0 +1,5 @@ +--- +- src: geerlingguy.apache +- src: geerlingguy.mysql +- src: dj-wasabi.zabbix-server +- src: dj-wasabi.zabbix-web diff --git a/ansible/zabbix_agent/roles/zabbix-agent/molecule/with-server/tests/test_agent.py b/ansible/zabbix_agent/roles/zabbix-agent/molecule/with-server/tests/test_agent.py new file mode 100644 index 0000000..b6fbb22 --- /dev/null +++ b/ansible/zabbix_agent/roles/zabbix-agent/molecule/with-server/tests/test_agent.py @@ -0,0 +1,44 @@ +import os +from zabbix_api import ZabbixAPI + +import testinfra.utils.ansible_runner + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('zabbix_agent') + + +def authenticate(): + zapi = ZabbixAPI(server='http://zabbix-server-centos/api_jsonrpc.php') + zapi.login("Admin", "zabbix") + return zapi + + +def test_psk_host(host): + zapi = authenticate() + hostname = host.check_output('hostname -s') + host_name = "zabbix-agent-ubuntu" + + server_data = zapi.host.get({'output': 'extend', 'selectInventory': 'extend', 'filter': {'host': [hostname]}}) + + if hostname == host_name: + assert server_data[0]['tls_psk'] == "b7e3d380b9d400676d47198ecf3592ccd4795a59668aa2ade29f0003abbbd40d" + assert server_data[0]['tls_psk_identity'] == "myhost PSK" + assert server_data[0]['tls_accept'] == "2" + else: + assert server_data[0]['tls_psk'] == "" + assert server_data[0]['tls_psk_identity'] == "" + assert server_data[0]['tls_accept'] == "1" + + +def test_zabbix_agent_psk(host): + hostname = host.check_output('hostname -s') + host_name = "zabbix-agent-ubuntu" + + psk_file = host.file("/etc/zabbix/zabbix_agent_pskfile.psk") + if hostname == host_name: + assert psk_file.user == "zabbix" + assert psk_file.group == "zabbix" + assert psk_file.mode == 0o400 + assert psk_file.contains("b7e3d380b9d400676d47198ecf3592ccd4795a59668aa2ade29f0003abbbd40d") + else: + assert not psk_file.exists diff --git a/ansible/zabbix_agent/roles/zabbix-agent/molecule/with-server/tests/test_default.py b/ansible/zabbix_agent/roles/zabbix-agent/molecule/with-server/tests/test_default.py new file mode 100644 index 0000000..f81cca3 --- /dev/null +++ b/ansible/zabbix_agent/roles/zabbix-agent/molecule/with-server/tests/test_default.py @@ -0,0 +1,41 @@ +import os +from zabbix_api import ZabbixAPI + +import testinfra.utils.ansible_runner + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('zabbix_server') + + +def authenticate(): + zapi = ZabbixAPI(server='http://zabbix-server-centos/api_jsonrpc.php') + zapi.login("Admin", "zabbix") + return zapi + + +def get_hosts(): + return [ + "zabbix-agent-debian", + "zabbix-agent-ubuntu", + "zabbix-agent-centos", + "zabbix-agent-docker-centos" + ] + + +def test_hosts(): + zapi = authenticate() + hosts = get_hosts() + servers = zapi.host.get({'output': ["hostid", "name"]}) + + for server in servers: + if server['name'] != 'Zabbix server': + assert server['name'] in hosts + + +def test_hosts_status(): + zapi = authenticate() + servers = zapi.host.get({'output': ["status", "name"]}) + + for server in servers: + if server['name'] != 'Zabbix server': + assert int(server['status']) == 0 diff --git a/ansible/zabbix_agent/roles/zabbix-agent/tasks/Debian.yml b/ansible/zabbix_agent/roles/zabbix-agent/tasks/Debian.yml new file mode 100644 index 0000000..3a56b8e --- /dev/null +++ b/ansible/zabbix_agent/roles/zabbix-agent/tasks/Debian.yml @@ -0,0 +1,151 @@ +--- +# Tasks specific for Debian/Ubuntu Systems + +- name: "Debian | Set some variables" + ansible.builtin.set_fact: + zabbix_short_version: "{{ zabbix_agent_version | regex_replace('\\.', '') }}" + zabbix_underscore_version: "{{ zabbix_agent_version | regex_replace('\\.', '_') }}" + tags: + - always + +- name: "Debian | Repo URL" + ansible.builtin.set_fact: + zabbix_repo_deb_url: "{{ _zabbix_repo_deb_url }}{{ '-arm64' if ansible_machine == 'aarch64' else ''}}" + when: + - zabbix_repo_deb_url is undefined + tags: + - always + +- name: "Debian | Installing gnupg" + ansible.builtin.apt: + pkg: gnupg + update_cache: true + cache_valid_time: 3600 + force: true + state: present + environment: + http_proxy: "{{ zabbix_http_proxy | default(None) | default(omit) }}" + https_proxy: "{{ zabbix_https_proxy | default(None) | default(omit) }}" + register: gnupg_installed + until: gnupg_installed is succeeded + become: true + tags: + - install + +# In releases older than Debian 12 and Ubuntu 22.04, /etc/apt/keyrings does not exist by default. +# It SHOULD be created with permissions 0755 if it is needed and does not already exist. +# See: https://wiki.debian.org/DebianRepository/UseThirdParty +- name: "Debian | Create /etc/apt/keyrings/ on older versions" + ansible.builtin.file: + path: /etc/apt/keyrings/ + state: directory + mode: "0755" + become: true + when: + - (ansible_distribution == "Ubuntu" and ansible_distribution_major_version < "22") or + (ansible_distribution == "Debian" and ansible_distribution_major_version < "12") + +- name: "Debian | Download gpg key" + ansible.builtin.get_url: + url: http://repo.zabbix.com/zabbix-official-repo.key + dest: "{{ zabbix_gpg_key }}" + mode: "0644" + force: true + environment: + http_proxy: "{{ zabbix_http_proxy | default(None) | default(omit) }}" + https_proxy: "{{ zabbix_https_proxy | default(None) | default(omit) }}" + become: true + tags: + - install + +- name: "Debian | Installing repository {{ ansible_distribution }}" + ansible.builtin.copy: + dest: /etc/apt/sources.list.d/zabbix.sources + owner: root + group: root + mode: 0644 + content: | + Types: deb deb-src + Enabled: yes + URIs: {{ zabbix_repo_deb_url }} + Suites: {{ ansible_distribution_release }} + Components: {{ zabbix_repo_deb_component }} + Architectures: {{ 'amd64' if ansible_machine != 'aarch64' else 'arm64'}} + Signed-By: {{ zabbix_gpg_key }} + become: true + tags: + - install + +- name: "Debian | Create /etc/apt/preferences.d/" + ansible.builtin.file: + path: /etc/apt/preferences.d/ + state: directory + mode: "0755" + when: + - zabbix_agent_apt_priority | int + become: true + tags: + - install + +- name: "Debian | Configuring the weight for APT" + ansible.builtin.copy: + dest: "/etc/apt/preferences.d/zabbix-agent-{{ zabbix_underscore_version }}" + content: | + Package: {{ zabbix_agent_package }} + Pin: origin repo.zabbix.com + Pin-Priority: {{ zabbix_agent_apt_priority | int }} + owner: root + mode: "0644" + when: + - zabbix_agent_apt_priority | int + become: true + tags: + - install + +- name: "Debian | Installing zabbix-agent" + ansible.builtin.apt: + pkg: "{{ zabbix_agent_package }}" + state: "{{ zabbix_agent_package_state }}" + update_cache: true + cache_valid_time: 0 + force_apt_get: "{{ zabbix_apt_force_apt_get }}" + install_recommends: "{{ zabbix_apt_install_recommends }}" + environment: + http_proxy: "{{ zabbix_http_proxy | default(None) | default(omit) }}" + https_proxy: "{{ zabbix_https_proxy | default(None) | default(omit) }}" + register: zabbix_agent_package_installed + until: zabbix_agent_package_installed is succeeded + become: true + tags: + - install + +- name: "Debian | Installing zabbix-{sender,get}" + ansible.builtin.apt: + pkg: + - "{{ zabbix_sender_package }}" + - "{{ zabbix_get_package }}" + state: "{{ zabbix_agent_package_state }}" + update_cache: true + cache_valid_time: 0 + force_apt_get: "{{ zabbix_apt_force_apt_get }}" + install_recommends: "{{ zabbix_apt_install_recommends }}" + environment: + http_proxy: "{{ zabbix_http_proxy | default(None) | default(omit) }}" + https_proxy: "{{ zabbix_https_proxy | default(None) | default(omit) }}" + when: + - not zabbix_agent_install_agent_only + register: zabbix_agent_package_installed + until: zabbix_agent_package_installed is succeeded + become: true + check_mode: false + tags: + - install + +- name: "Debian | Enable the service" + ansible.builtin.service: + name: "{{ zabbix_agent_service }}" + enabled: true + use: service + become: true + tags: + - service diff --git a/ansible/zabbix_agent/roles/zabbix-agent/tasks/Docker.yml b/ansible/zabbix_agent/roles/zabbix-agent/tasks/Docker.yml new file mode 100644 index 0000000..90656fc --- /dev/null +++ b/ansible/zabbix_agent/roles/zabbix-agent/tasks/Docker.yml @@ -0,0 +1,32 @@ +--- +- name: "Create volume mount string" + ansible.builtin.set_fact: + volume_mount: "{{ zabbix_agent_tlspskfile }}:/var/lib/zabbix/enc/tlspskfile" + tls_key: + ZBX_TLSPSKFILE: tlspskfile + when: + - zabbix_agent_tlspskfile is defined + +- name: "Add zabbix_agent_tlspskfile to volume mount" + ansible.builtin.set_fact: + zabbix_agent_docker_volumes: "{{ zabbix_agent_docker_volumes + [ volume_mount ] }}" + zabbix_agent_docker_env: "{{ zabbix_agent_docker_env | combine(tls_key) }}" + when: + - zabbix_agent_tlspskfile is defined + +- name: "Ensure Zabbix Docker container is running" + community.docker.docker_container: + name: "{{ zabbix_agent_docker_name }}" + image: "{{ zabbix_agent_docker_image }}:{{ zabbix_agent_docker_image_tag }}" + state: "{{ zabbix_agent_docker_state }}" + restart_policy: "{{ zabbix_agent_docker_restart_policy }}" + network_mode: "{{ zabbix_agent_docker_network_mode }}" + published_ports: "{{ zabbix_agent_docker_ports }}" + privileged: "{{ zabbix_agent_docker_privileged }}" + security_opts: "{{ zabbix_agent_docker_security_opts }}" + volumes: "{{ zabbix_agent_docker_volumes }}" + env: "{{ zabbix_agent_docker_env }}" + environment: + http_proxy: "{{ zabbix_http_proxy | default(None) | default(omit) }}" + https_proxy: "{{ zabbix_https_proxy | default(None) | default(omit) }}" + become: true diff --git a/ansible/zabbix_agent/roles/zabbix-agent/tasks/Linux.yml b/ansible/zabbix_agent/roles/zabbix-agent/tasks/Linux.yml new file mode 100644 index 0000000..de06858 --- /dev/null +++ b/ansible/zabbix_agent/roles/zabbix-agent/tasks/Linux.yml @@ -0,0 +1,239 @@ +--- +- name: "Set default ip address for zabbix_agent_ip" + ansible.builtin.set_fact: + zabbix_agent_ip: "{{ hostvars[inventory_hostname]['ansible_default_ipv4'].address }}" + when: + - zabbix_agent_ip is not defined + - "'ansible_default_ipv4' in hostvars[inventory_hostname]" + tags: + - config + +- name: "Get Total Private IP Addresses" + ansible.builtin.set_fact: + total_private_ip_addresses: "{{ ansible_all_ipv4_addresses | ansible.utils.ipaddr('private') | length }}" + when: + - ansible_all_ipv4_addresses is defined + - not (zabbix_agent_dont_detect_ip) + tags: + - config + +- name: "Set first public ip address for zabbix_agent_ip" + ansible.builtin.set_fact: + zabbix_agent_ip: "{{ ansible_all_ipv4_addresses | ansible.netcommon.ipaddr('public') | first }}" + zabbix_agent_server: "{{ zabbix_agent_server_public_ip | default(zabbix_agent_server) }}" + zabbix_agent_serveractive: "{{ zabbix_agent_serveractive_public_ip | default(zabbix_agent_serveractive) }}" + zabbix_agent2_server: "{{ zabbix_agent_server_public_ip | default(zabbix_agent2_server) }}" + zabbix_agent2_serveractive: "{{ zabbix_agent_serveractive_public_ip | default(zabbix_agent2_serveractive) }}" + when: + - zabbix_agent_ip is not defined + - total_private_ip_addresses is defined + - total_private_ip_addresses == '0' + tags: + - config + +- name: "Set first private ip address for zabbix_agent_ip" + ansible.builtin.set_fact: + zabbix_agent_ip: "{{ ansible_all_ipv4_addresses | ansible.netcommon.ipaddr('private') | first }}" + when: + - zabbix_agent_ip is not defined + - total_private_ip_addresses is defined + - total_private_ip_addresses != '0' + tags: + - config + +- name: "Fail invalid specified agent_listeninterface" + ansible.builtin.fail: + msg: "The specified network interface does not exist" + when: + - (zabbix_agent_listeninterface) + - (zabbix_agent_listeninterface not in ansible_interfaces) + tags: + - config + +- name: "Set network interface" + ansible.builtin.set_fact: + network_interface: ansible_{{ zabbix_agent_listeninterface }} + when: + - (zabbix_agent_listeninterface) + - not zabbix_agent_listenip + tags: + - config + +- name: "Get IP of agent_listeninterface when no agent_listenip specified" + ansible.builtin.set_fact: + zabbix_agent_listenip: "{{ hostvars[inventory_hostname][network_interface]['ipv4'].address | default('0.0.0.0') }}" + when: + - (zabbix_agent_listeninterface) + - not zabbix_agent_listenip + tags: + - config + - api + +- name: "Default agent_listenip to all when not specified" + ansible.builtin.set_fact: + zabbix_agent_listenip: "0.0.0.0" + when: + - not (zabbix_agent_listenip) + tags: + - config + +- name: "Fail invalid specified agent_listenip" + ansible.builtin.fail: + msg: "The agent_listenip does not exist" + when: + - zabbix_agent_listenip != '0.0.0.0' + - zabbix_agent_listenip != '127.0.0.1' + - (zabbix_agent_listenip not in ansible_all_ipv4_addresses) + tags: + - config + +- name: "Configure SELinux when enabled" + ansible.builtin.include_tasks: selinux.yml + when: + - zabbix_selinux | bool + +- name: "Adding zabbix group" + ansible.builtin.group: + name: zabbix + state: present + gid: "{{ zabbix_agent_docker_user_gid | default(omit) }}" + become: true + when: + - zabbix_agent_docker | bool + tags: + - config + +- name: "Adding zabbix user" + ansible.builtin.user: + name: zabbix + group: zabbix + state: present + create_home: false + home: /etc/zabbix + uid: "{{ zabbix_agent_docker_user_uid | default(omit) }}" + system: true + become: true + when: + - zabbix_agent_docker | bool + tags: + - config + +- name: "Configure zabbix-agent" + ansible.builtin.template: + src: "{{ 'zabbix_agentd.conf.j2' if not zabbix_agent2 else 'zabbix_agent2.conf.j2' }}" + dest: "/etc/zabbix/{{ zabbix_agent_conf if not zabbix_agent2 else zabbix_agent2_conf }}" + owner: root + group: root + mode: "{{ zabbix_agent_conf_mode }}" + notify: + - restart zabbix-agent + become: true + when: + - not (zabbix_agent_docker | bool) + tags: + - config + +- name: "Create directory for PSK file if not exist." + ansible.builtin.file: + path: "{{ zabbix_agent_tlspskfile | dirname }}" + mode: 0755 + state: directory + become: true + when: + - zabbix_agent_tlspskfile is defined + - zabbix_agent_tlspskfile # https://github.com/ansible-collections/community.zabbix/issues/680 + - not (zabbix_agent2 | bool) + tags: + - config + +- name: "Create directory for PSK file if not exist (zabbix-agent2)" + ansible.builtin.file: + path: "{{ zabbix_agent2_tlspskfile | dirname }}" + mode: 0755 + state: directory + become: true + when: + - zabbix_agent2_tlspskfile is defined + - zabbix_agent2_tlspskfile # https://github.com/ansible-collections/community.zabbix/issues/680 + - zabbix_agent2 | bool + tags: + - config + +- name: "Place TLS PSK File" + ansible.builtin.copy: + dest: "{{ zabbix_agent_tlspskfile }}" + content: "{{ zabbix_agent_tlspsk_secret }}" + owner: zabbix + group: zabbix + mode: 0400 + become: true + when: + - zabbix_agent_tlspskfile is defined + - zabbix_agent_tlspskfile # https://github.com/ansible-collections/community.zabbix/issues/680 + - zabbix_agent_tlspsk_secret is defined + - not (zabbix_agent2 | bool) + notify: + - restart zabbix-agent + tags: + - config + +- name: "Place TLS PSK File (zabbix-agent2)" + ansible.builtin.copy: + dest: "{{ zabbix_agent2_tlspskfile }}" + content: "{{ zabbix_agent2_tlspsk_secret }}" + owner: zabbix + group: zabbix + mode: 0400 + become: true + when: + - zabbix_agent2_tlspskfile is defined + - zabbix_agent2_tlspskfile # https://github.com/ansible-collections/community.zabbix/issues/680 + - zabbix_agent2_tlspsk_secret is defined + - zabbix_agent2 | bool + notify: + - restart zabbix-agent + tags: + - config + +- name: "Create include dir zabbix-agent" + ansible.builtin.file: + path: "{{ zabbix_agent_include if not zabbix_agent2 else zabbix_agent2_include }}" + owner: root + group: zabbix + mode: "{{ zabbix_agent_include_mode if not zabbix_agent2 else zabbix_agent2_include_mode }}" + state: directory + become: true + tags: + - config + +- name: "Install the Docker container" + ansible.builtin.include_tasks: Docker.yml + when: + - zabbix_agent_docker | bool + +- name: "Remove zabbix-agent installation when zabbix-agent2 is used." + ansible.builtin.include_tasks: remove.yml + when: + - zabbix_agent2 | bool + - zabbix_agent_package_remove + +- name: "Make sure the zabbix-agent service is running" + ansible.builtin.service: + name: "{{ zabbix_agent_service }}" + state: started + enabled: true + become: true + when: + - not (zabbix_agent_docker | bool) + tags: + - service + +- name: "Give zabbix-agent access to system.hw.chassis info" + ansible.builtin.file: + path: /sys/firmware/dmi/tables/DMI + owner: root + group: zabbix + become: true + when: zabbix_agent_chassis | bool + tags: + - config diff --git a/ansible/zabbix_agent/roles/zabbix-agent/tasks/RedHat.yml b/ansible/zabbix_agent/roles/zabbix-agent/tasks/RedHat.yml new file mode 100644 index 0000000..c85d8e5 --- /dev/null +++ b/ansible/zabbix_agent/roles/zabbix-agent/tasks/RedHat.yml @@ -0,0 +1,70 @@ +--- +# Tasks specific for RedHat systems + +- name: "RedHat | Install basic repo file" + ansible.builtin.yum_repository: + name: "{{ item.name }}" + description: "{{ item.description }}" + baseurl: "{{ item.baseurl }}" + gpgcheck: "{{ item.gpgcheck }}" + gpgkey: "{{ item.gpgkey }}" + mode: "{{ item.mode | default('0644') }}" + priority: "{{ item.priority | default('99') }}" + state: "{{ item.state | default('present') }}" + proxy: "{{ zabbix_http_proxy | default(omit) }}" + with_items: "{{ zabbix_repo_yum }}" + register: yum_repo_installed + become: true + notify: + - "clean repo files from proxy creds" + tags: + - install + +- name: Check if warn parameter can be used for shell module + ansible.builtin.set_fact: + produce_warn: False + when: ansible_version.full is version("2.14", "<") + tags: + - always + +- name: "RedHat | Installing zabbix-agent" + ansible.builtin.package: + pkg: + - "{{ zabbix_agent_package }}-{{ zabbix_agent_version }}.{{ zabbix_agent_version_minor }}" + disablerepo: "{{ zabbix_agent_disable_repo | default(omit) }}" + state: "{{ zabbix_agent_package_state }}" + environment: + http_proxy: "{{ zabbix_http_proxy | default(None) | default(omit) }}" + https_proxy: "{{ zabbix_https_proxy | default(None) | default(omit) }}" + register: zabbix_agent_package_installed + until: zabbix_agent_package_installed is succeeded + become: true + tags: + - install + +- name: "RedHat | Installing zabbix-{sender,get}" + ansible.builtin.package: + pkg: + - "{{ zabbix_sender_package }}-{{ zabbix_agent_version }}.{{ zabbix_agent_version_minor }}" + - "{{ zabbix_get_package }}-{{ zabbix_agent_version }}.{{ zabbix_agent_version_minor }}" + disablerepo: "{{ zabbix_agent_disable_repo | default(omit) }}" + state: "{{ zabbix_agent_package_state }}" + environment: + http_proxy: "{{ zabbix_http_proxy | default(None) | default(omit) }}" + https_proxy: "{{ zabbix_https_proxy | default(None) | default(omit) }}" + register: zabbix_agent_package_installed + until: zabbix_agent_package_installed is succeeded + when: + - not zabbix_agent_install_agent_only + become: true + tags: + - install + +- name: "RedHat | Enable the service" + ansible.builtin.service: + name: "{{ zabbix_agent_service }}" + enabled: true + use: service + become: true + tags: + - service diff --git a/ansible/zabbix_agent/roles/zabbix-agent/tasks/Windows.yml b/ansible/zabbix_agent/roles/zabbix-agent/tasks/Windows.yml new file mode 100644 index 0000000..177db17 --- /dev/null +++ b/ansible/zabbix_agent/roles/zabbix-agent/tasks/Windows.yml @@ -0,0 +1,352 @@ +--- +- name: "Windows | Set default architecture" + ansible.builtin.set_fact: + windows_arch: 32 + tags: + - always + +- name: "Windows | Override architecture if 64-bit" + ansible.builtin.set_fact: + windows_arch: 64 + when: + - ansible_architecture == "64-bit" + tags: + - always + +- name: "Windows | Set path to zabbix.exe" + ansible.builtin.set_fact: + zabbix_win_exe_path: '{{ zabbix_win_install_dir }}\bin\win{{ windows_arch }}\zabbix_agentd.exe' + tags: + - always + +- name: "Windows | Set variables specific to Zabbix" + ansible.builtin.set_fact: + zabbix_win_svc_name: Zabbix Agent + zabbix_win_exe_path: '{{ zabbix_win_install_dir }}\bin\zabbix_agentd.exe' + zabbix_win_config_name: "zabbix_agentd.conf" + zabbix2_win_svc_name: Zabbix Agent 2 + zabbix2_win_exe_path: '{{ zabbix_win_install_dir }}\bin\zabbix_agent2.exe' + zabbix2_win_config_name: "zabbix_agent2.conf" + tags: + - always + +- name: "Windows | Check if Zabbix agent is present" + ansible.windows.win_stat: + path: "{{ item }}" + with_items: + - "{{ zabbix_win_exe_path }}" + - "{{ zabbix2_win_exe_path }}" + register: agent_file_info + tags: + - always + +- name: "Windows | Get Installed Zabbix Agent Version" + community.windows.win_file_version: + path: "{{ item.item }}" + register: zabbix_win_exe_info + when: + - item.stat.exists | bool + with_items: "{{ agent_file_info.results }}" + tags: + - always + +- name: "Windows | Set facts current zabbix agent installation" + ansible.builtin.set_fact: + zabbix_agent_1_binary_exist: true + zabbix_agent_1_version: zabbix_win_exe_info.results[0].win_file_version.product_version + when: + - zabbix_win_exe_info.results[0] is defined + - zabbix_win_exe_info.results[0].item.stat.exists + - zabbix_win_exe_info.results[0].item.stat.path == zabbix_win_exe_path + - zabbix_win_exe_info.results[0].win_file_version.product_version + tags: + - always + +- name: "Windows | Set facts current zabbix agent installation (agent 2)" + ansible.builtin.set_fact: + zabbix_agent_2_binary_exist: true + zabbix_agent_2_version: zabbix_win_exe_info.results[1].win_file_version.product_version + when: + - zabbix_win_exe_info.results[1] is defined + - zabbix_win_exe_info.results[1].item.stat.exists + - zabbix_win_exe_info.results[1].item.stat.path == zabbix2_win_exe_path + - zabbix_win_exe_info.results[1].win_file_version.product_version + tags: + - always + +- name: "Windows | Check Zabbix service" + ansible.windows.win_service: + name: "{{ (item.item.stat.path == zabbix_win_exe_path ) | ternary(zabbix_win_svc_name,zabbix2_win_svc_name) }}" + register: zabbix_service_info + when: item.item.stat.exists + with_items: "{{ zabbix_win_exe_info.results }}" + tags: + - always + +- name: "Windows | Set facts about current zabbix agent service state" + ansible.builtin.set_fact: + zabbix_agent_1_service_exist: true + when: + - zabbix_service_info.results[0].exists is defined + - zabbix_service_info.results[0].exists + - zabbix_service_info.results[0].display_name == zabbix_win_svc_name + tags: + - always + +- name: "Windows | Set facts about current zabbix agent service state (agent 2)" + ansible.builtin.set_fact: + zabbix_agent_2_service_exist: true + when: + - zabbix_service_info.results[1].exists is defined + - zabbix_service_info.results[1].exists + - zabbix_service_info.results[1].display_name == zabbix2_win_svc_name + tags: + - always + +- name: "Windows | Set fact about version change requirement" + ansible.builtin.set_fact: + zabbix_agent_version_change: true + when: > + (zabbix_agent_1_binary_exist | default(false) and + zabbix_win_exe_info.results[0].win_file_version.product_version is version(zabbix_version_long, '<>')) + or + (zabbix_agent_2_binary_exist | default(false) and + zabbix_win_exe_info.results[1].win_file_version.product_version is version(zabbix_version_long, '<>')) + or (zabbix_agent_1_binary_exist | default(false) and zabbix_agent2) + or (zabbix_agent_2_binary_exist | default(false) and not zabbix_agent2) + tags: + - always + +################## +# delete section # +################## + +- name: "Windows | Stop Zabbix agent v1" + ansible.windows.win_service: + name: "{{ zabbix_win_svc_name }}" + start_mode: auto + state: stopped + when: + - zabbix_agent_version_change | default(false) or zabbix_agent2 + - zabbix_agent_1_service_exist | default(false) + +- name: "Windows | Stop Zabbix agent v2" + ansible.windows.win_service: + name: "{{ zabbix2_win_svc_name }}" + start_mode: auto + state: stopped + when: + - zabbix_agent_version_change | default(false) or not zabbix_agent2 + - zabbix_agent_2_service_exist | default(false) + +- name: "Windows | Uninstall Zabbix v1" + ansible.windows.win_command: '"{{ zabbix_win_exe_path }}" --config "{{ zabbix_win_install_dir_conf }}\{{ zabbix_win_config_name }}" --uninstall' + when: + - zabbix_agent_version_change | default(false) or zabbix_agent2 + - zabbix_agent_1_service_exist | default(false) + +- name: "Windows | Uninstall Zabbix v2" + ansible.windows.win_command: '"{{ zabbix2_win_exe_path }}" --config "{{ zabbix_win_install_dir_conf }}\{{ zabbix2_win_config_name }}" --uninstall' + when: + - zabbix_agent_version_change | default(false) or not zabbix_agent2 + - zabbix_agent_2_service_exist | default(false) + +- name: "Windows | Removing Zabbix Directory" + ansible.windows.win_file: + path: "{{ zabbix_win_install_dir }}" + state: absent + when: + ((zabbix_agent_version_change | default(false) or zabbix_agent2) and zabbix_agent_1_binary_exist | default(false)) or + ((zabbix_agent_version_change | default(false) or not zabbix_agent2) and zabbix_agent_2_binary_exist | default(false)) + +################### +# install section # +################### + +- name: "Windows | Create directory structure" + ansible.windows.win_file: + path: "{{ item }}" + state: directory + with_items: + - "{{ zabbix_win_install_dir }}" + tags: + - install + +- name: "Windows | Create directory structure, includes" + ansible.windows.win_file: + path: "{{ item }}" + state: directory + with_items: + - "{{ zabbix_agent_win_include }}" + when: + - ('.conf' not in zabbix_agent_win_include) + tags: + - install + +- name: "Windows | Set installation settings (agent 2)" + ansible.builtin.set_fact: + zabbix_win_package: "{{ zabbix2_win_package }}" + zabbix_win_download_link: "{{ zabbix2_win_download_link }}" + zabbix_win_exe_path: "{{ zabbix2_win_exe_path }}" + zabbix_win_config_name: "{{ zabbix2_win_config_name }}" + zabbix_win_svc_name: "{{ zabbix2_win_svc_name }}" + when: zabbix_agent2 | bool + tags: + - install + +- name: "Windows | Check if agent file is already downloaded" + ansible.windows.win_stat: + path: '{{ zabbix_win_install_dir }}\{{ zabbix_win_package }}' + register: file_info + tags: + - install + +- name: "Windows | Check if agent binaries in place" + ansible.windows.win_stat: + path: "{{ zabbix_win_exe_path }}" + register: zabbix_windows_binaries + tags: + - install + +- name: "Windows | Download Zabbix Agent Zip file" + ansible.windows.win_get_url: + url: "{{ zabbix_win_download_link }}" + dest: '{{ zabbix_win_install_dir }}\{{ zabbix_win_package }}' + url_username: "{{ zabbix_download_user | default(omit) }}" + url_password: "{{ zabbix_download_pass | default(omit) }}" + force: false + follow_redirects: all + proxy_url: "{{ zabbix_https_proxy | default(None) | default(omit) }}" + validate_certs: "{{ zabbix_download_validate_certs | default(False) | bool }}" + timeout: "{{ zabbix_download_timeout | default(120) | int }}" + when: + - not file_info.stat.exists + - not zabbix_windows_binaries.stat.exists + register: zabbix_agent_win_download_zip + until: zabbix_agent_win_download_zip is succeeded + throttle: "{{ zabbix_download_throttle | default(5) | int }}" + tags: + - install + +- name: "Windows | Unzip file" + community.windows.win_unzip: + src: '{{ zabbix_win_install_dir }}\{{ zabbix_win_package }}' + dest: "{{ zabbix_win_install_dir }}" + creates: "{{ zabbix_win_exe_path }}" + tags: + - install + +- name: "Windows | Cleanup downloaded Zabbix Agent Zip file" + ansible.windows.win_file: + path: '{{ zabbix_win_install_dir }}\{{ zabbix_win_package }}' + state: absent + when: + - zabbix_agent_win_download_zip.changed + tags: + - install + +- name: "Windows | Copy binary files to expected location" + ansible.windows.win_copy: + src: "{{ zabbix_win_install_dir }}\\bin\\{{ item }}" + dest: "{{ zabbix_win_install_dir_bin }}\\{{ item }}" + remote_src: yes + loop: + - zabbix_agentd.exe + - zabbix_sender.exe + when: + - zabbix_win_install_dir_bin is defined + - not (zabbix_agent2 | bool) + tags: + - install + +- name: "Windows | Copy binary files to expected location (zabbix-agent2)" + ansible.windows.win_copy: + src: "{{ zabbix_win_install_dir }}\\bin\\{{ item }}" + dest: "{{ zabbix_win_install_dir_bin }}\\{{ item }}" + remote_src: yes + loop: + - zabbix_agent2.exe + when: + - zabbix_win_install_dir_bin is defined + - zabbix_agent2 | bool + tags: + - install + +- set_fact: + zabbix_win_exe_path: "{{ zabbix_win_install_dir_bin }}\\zabbix_agentd.exe" + when: + - zabbix_win_install_dir_bin is defined + - not (zabbix_agent2 | bool) + tags: + - install + +- set_fact: + zabbix_win_exe_path: "{{ zabbix_win_install_dir_bin }}\\zabbix_agent2.exe" + when: + - zabbix_win_install_dir_bin is defined + - zabbix_agent2 | bool + tags: + - install + +- name: "Create directory for PSK file if not exist." + ansible.windows.win_file: + path: "{{ zabbix_agent_tlspskfile | win_dirname }}" + state: directory + when: + - zabbix_agent_tlspskfile is defined + - zabbix_agent_tlspskfile + - not (zabbix_agent2 | bool) + tags: + - config + +- name: "Create directory for PSK file if not exist (zabbix-agent2)" + ansible.windows.win_file: + path: "{{ zabbix_agent2_tlspskfile | win_dirname }}" + state: directory + when: + - zabbix_agent2_tlspskfile is defined + - zabbix_agent2_tlspskfile + - zabbix_agent2 | bool + tags: + - config + +- name: "Place TLS PSK File" + ansible.windows.win_copy: + dest: "{{ zabbix_agent_tlspskfile }}" + content: "{{ zabbix_agent_tlspsk_secret }}" + when: + - zabbix_agent_tlspskfile is defined + - zabbix_agent_tlspskfile + - zabbix_agent_tlspsk_secret is defined + - not (zabbix_agent2 | bool) + notify: + - restart win zabbix agent + tags: + - config + +- name: "Place TLS PSK File (zabbix-agent2)" + ansible.windows.win_copy: + dest: "{{ zabbix_agent2_tlspskfile }}" + content: "{{ zabbix_agent2_tlspsk_secret }}" + when: + - zabbix_agent2_tlspskfile is defined + - zabbix_agent2_tlspskfile + - zabbix_agent2_tlspsk_secret is defined + - zabbix_agent2 | bool + notify: + - restart win zabbix agent + tags: + - config + +- name: "Windows | Check if windows service exist" + ansible.windows.win_service: + name: "{{ zabbix_win_svc_name }}" + register: zabbix_windows_service + tags: + - service + +- name: "Windows | Register Service" + ansible.windows.win_command: '"{{ zabbix_win_exe_path }}" --config "{{ zabbix_win_install_dir_conf }}\{{ zabbix_win_config_name }}" --install' + when: not zabbix_windows_service.exists + tags: + - service diff --git a/ansible/zabbix_agent/roles/zabbix-agent/tasks/Windows_conf.yml b/ansible/zabbix_agent/roles/zabbix-agent/tasks/Windows_conf.yml new file mode 100644 index 0000000..c59e3bc --- /dev/null +++ b/ansible/zabbix_agent/roles/zabbix-agent/tasks/Windows_conf.yml @@ -0,0 +1,56 @@ +--- +- name: "Set default ip address for zabbix_agent_ip" + ansible.builtin.set_fact: + zabbix_agent_ip: "{{ hostvars[inventory_hostname]['ansible_ip_addresses'] | ansible.utils.ipv4 | first }}" + when: + - zabbix_agent_ip is not defined + - "'ansible_ip_addresses' in hostvars[inventory_hostname]" + tags: + - config + +- name: "Windows | Configure zabbix-agent" + ansible.windows.win_template: + src: "{{ zabbix_win_config_name }}.j2" + dest: "{{ zabbix_win_install_dir_conf }}\\{{ zabbix_win_config_name }}" + notify: restart win zabbix agent + tags: + - config + +- name: "Windows | Set service startup mode to auto, ensure it is started and set auto-recovery" + ansible.windows.win_service: + name: "{{ zabbix_win_svc_name }}" + start_mode: auto + state: started + failure_actions: + - type: restart + delay_ms: 5000 + - type: restart + delay_ms: 10000 + - type: restart + delay_ms: 20000 + failure_reset_period_sec: 86400 + tags: + - config + +- name: "Windows | Check firewall service" + ansible.windows.win_service_info: + name: MpsSvc + register: firewall_info + when: zabbix_win_firewall_management + tags: + - config + +- name: "Windows | Firewall rule" + community.windows.win_firewall_rule: + name: "{{ zabbix_win_svc_name }}" + localport: "{{ zabbix_agent_listenport }}" + action: allow + direction: in + protocol: tcp + state: present + enabled: true + when: + - zabbix_win_firewall_management + - firewall_info.services[0].state == 'started' or firewall_info.services[0].start_mode == 'auto' + tags: + - config diff --git a/ansible/zabbix_agent/roles/zabbix-agent/tasks/XCP-ng.yml b/ansible/zabbix_agent/roles/zabbix-agent/tasks/XCP-ng.yml new file mode 100644 index 0000000..c85d8e5 --- /dev/null +++ b/ansible/zabbix_agent/roles/zabbix-agent/tasks/XCP-ng.yml @@ -0,0 +1,70 @@ +--- +# Tasks specific for RedHat systems + +- name: "RedHat | Install basic repo file" + ansible.builtin.yum_repository: + name: "{{ item.name }}" + description: "{{ item.description }}" + baseurl: "{{ item.baseurl }}" + gpgcheck: "{{ item.gpgcheck }}" + gpgkey: "{{ item.gpgkey }}" + mode: "{{ item.mode | default('0644') }}" + priority: "{{ item.priority | default('99') }}" + state: "{{ item.state | default('present') }}" + proxy: "{{ zabbix_http_proxy | default(omit) }}" + with_items: "{{ zabbix_repo_yum }}" + register: yum_repo_installed + become: true + notify: + - "clean repo files from proxy creds" + tags: + - install + +- name: Check if warn parameter can be used for shell module + ansible.builtin.set_fact: + produce_warn: False + when: ansible_version.full is version("2.14", "<") + tags: + - always + +- name: "RedHat | Installing zabbix-agent" + ansible.builtin.package: + pkg: + - "{{ zabbix_agent_package }}-{{ zabbix_agent_version }}.{{ zabbix_agent_version_minor }}" + disablerepo: "{{ zabbix_agent_disable_repo | default(omit) }}" + state: "{{ zabbix_agent_package_state }}" + environment: + http_proxy: "{{ zabbix_http_proxy | default(None) | default(omit) }}" + https_proxy: "{{ zabbix_https_proxy | default(None) | default(omit) }}" + register: zabbix_agent_package_installed + until: zabbix_agent_package_installed is succeeded + become: true + tags: + - install + +- name: "RedHat | Installing zabbix-{sender,get}" + ansible.builtin.package: + pkg: + - "{{ zabbix_sender_package }}-{{ zabbix_agent_version }}.{{ zabbix_agent_version_minor }}" + - "{{ zabbix_get_package }}-{{ zabbix_agent_version }}.{{ zabbix_agent_version_minor }}" + disablerepo: "{{ zabbix_agent_disable_repo | default(omit) }}" + state: "{{ zabbix_agent_package_state }}" + environment: + http_proxy: "{{ zabbix_http_proxy | default(None) | default(omit) }}" + https_proxy: "{{ zabbix_https_proxy | default(None) | default(omit) }}" + register: zabbix_agent_package_installed + until: zabbix_agent_package_installed is succeeded + when: + - not zabbix_agent_install_agent_only + become: true + tags: + - install + +- name: "RedHat | Enable the service" + ansible.builtin.service: + name: "{{ zabbix_agent_service }}" + enabled: true + use: service + become: true + tags: + - service diff --git a/ansible/zabbix_agent/roles/zabbix-agent/tasks/api.yml b/ansible/zabbix_agent/roles/zabbix-agent/tasks/api.yml new file mode 100644 index 0000000..3487971 --- /dev/null +++ b/ansible/zabbix_agent/roles/zabbix-agent/tasks/api.yml @@ -0,0 +1,96 @@ +--- +- name: "API | Create host groups" + community.zabbix.zabbix_group: + host_group: "{{ zabbix_host_groups }}" + state: "{{ zabbix_agent_hostgroups_state }}" + when: + - zabbix_api_create_hostgroup | bool + register: zabbix_api_hostgroup_created + until: zabbix_api_hostgroup_created is succeeded + delegate_to: "{{ zabbix_api_server_host }}" + tags: + - api + +- name: "API | Create a new host or update an existing host's info" + community.zabbix.zabbix_host: + host_name: "{{ zabbix_agent_hostname }}" + host_groups: "{{ zabbix_host_groups }}" + link_templates: "{{ zabbix_agent_link_templates }}" + status: "{{ zabbix_host_status }}" + state: "{{ zabbix_agent_host_state }}" + force: "{{ zabbix_agent_host_update }}" + proxy: "{{ zabbix_agent_proxy }}" + inventory_mode: "{{ zabbix_agent_inventory_mode }}" + interfaces: "{{ zabbix_agent_interfaces }}" + visible_name: "{{ zabbix_agent_visible_hostname | default(zabbix_agent_hostname) }}" + tls_psk: "{{ zabbix_agent_tlspsk_secret | default(omit) }}" + tls_psk_identity: "{{ zabbix_agent_tlspskidentity | default(omit) }}" + tls_issuer: "{{ zabbix_agent_tlsservercertissuer | default(omit) }}" + tls_subject: "{{ zabbix_agent_tls_subject | default(omit) }}" + tls_accept: "{{ zabbix_agent_tls_config[zabbix_agent_tlsaccept if zabbix_agent_tlsaccept else 'unencrypted'] }}" + tls_connect: "{{ zabbix_agent_tls_config[zabbix_agent_tlsconnect if zabbix_agent_tlsconnect else 'unencrypted'] }}" + description: "{{ zabbix_agent_description | default(omit) }}" + inventory_zabbix: "{{ zabbix_agent_inventory_zabbix | default({}) }}" + ipmi_authtype: "{{ zabbix_agent_ipmi_authtype | default(omit) }}" + ipmi_password: "{{ zabbix_agent_ipmi_password| default(omit) }}" + ipmi_privilege: "{{ zabbix_agent_ipmi_privilege | default(omit) }}" + ipmi_username: "{{ zabbix_agent_ipmi_username | default(omit) }}" + tags: "{{ zabbix_agent_tags }}" + when: + - not zabbix_agent2 + register: zabbix_api_host_created + until: zabbix_api_host_created is succeeded + delegate_to: "{{ zabbix_api_server_host }}" + changed_when: false + tags: + - api + +- name: "API | Create a new host using agent2 or update an existing host's info" + community.zabbix.zabbix_host: + host_name: "{{ zabbix_agent2_hostname }}" + host_groups: "{{ zabbix_host_groups }}" + link_templates: "{{ zabbix_agent_link_templates }}" + status: "{{ zabbix_host_status }}" + state: "{{ zabbix_agent_host_state }}" + force: "{{ zabbix_agent_host_update }}" + proxy: "{{ zabbix_agent_proxy }}" + inventory_mode: "{{ zabbix_agent_inventory_mode }}" + interfaces: "{{ zabbix_agent_interfaces }}" + visible_name: "{{ zabbix_agent_visible_hostname | default(zabbix_agent2_hostname) }}" + tls_psk: "{{ zabbix_agent2_tlspsk_secret | default(omit) }}" + tls_psk_identity: "{{ zabbix_agent2_tlspskidentity | default(omit) }}" + tls_issuer: "{{ zabbix_agent2_tlsservercertissuer | default(omit) }}" + tls_subject: "{{ zabbix_agent2_tls_subject | default(omit) }}" + tls_accept: "{{ zabbix_agent_tls_config[zabbix_agent2_tlsaccept if zabbix_agent2_tlsaccept else 'unencrypted'] }}" + tls_connect: "{{ zabbix_agent_tls_config[zabbix_agent2_tlsconnect if zabbix_agent2_tlsconnect else 'unencrypted'] }}" + description: "{{ zabbix_agent_description | default(omit) }}" + inventory_zabbix: "{{ zabbix_agent_inventory_zabbix | default({}) }}" + ipmi_authtype: "{{ zabbix_agent_ipmi_authtype | default(omit) }}" + ipmi_password: "{{ zabbix_agent_ipmi_password| default(omit) }}" + ipmi_privilege: "{{ zabbix_agent_ipmi_privilege | default(omit) }}" + ipmi_username: "{{ zabbix_agent_ipmi_username | default(omit) }}" + tags: "{{ zabbix_agent_tags }}" + when: + - zabbix_agent2 | bool + register: zabbix_api_host_created + until: zabbix_api_host_created is succeeded + delegate_to: "{{ zabbix_api_server_host }}" + changed_when: false + tags: + - api + +- name: "API | Updating host configuration with macros" + community.zabbix.zabbix_hostmacro: + host_name: "{{ (zabbix_agent2 | bool) | ternary(zabbix_agent2_hostname, zabbix_agent_hostname) }}" + macro_name: "{{ item.macro_key }}" + macro_value: "{{ item.macro_value }}" + macro_type: "{{ item.macro_type|default('text') }}" + with_items: "{{ zabbix_agent_macros | default([]) }}" + when: + - zabbix_agent_macros is defined + - item.macro_key is defined + register: zabbix_api_hostmarcro_created + until: zabbix_api_hostmarcro_created is succeeded + delegate_to: "{{ zabbix_api_server_host }}" + tags: + - api diff --git a/ansible/zabbix_agent/roles/zabbix-agent/tasks/macOS.yml b/ansible/zabbix_agent/roles/zabbix-agent/tasks/macOS.yml new file mode 100644 index 0000000..f8fd97b --- /dev/null +++ b/ansible/zabbix_agent/roles/zabbix-agent/tasks/macOS.yml @@ -0,0 +1,22 @@ +--- +# Tasks specific for macOS +- name: "macOS | Check installed package version" + ansible.builtin.shell: | + set -o pipefail + pkgutil --pkg-info 'com.zabbix.pkg.ZabbixAgent' | grep 'version:' | cut -d ' ' -f 2 + register: pkgutil_version + check_mode: false + changed_when: false + failed_when: pkgutil_version.rc == 2 + +- name: "macOS | Download the Zabbix package" + ansible.builtin.get_url: + url: "{{ zabbix_mac_download_link }}" + dest: "/tmp/{{ zabbix_mac_package }}" + mode: 0644 + when: pkgutil_version.stdout != zabbix_version_long + +- name: "macOS | Install the Zabbix package" + ansible.builtin.command: installer -pkg "/tmp/{{ zabbix_mac_package }}" -target / + become: true + when: pkgutil_version.stdout != zabbix_version_long diff --git a/ansible/zabbix_agent/roles/zabbix-agent/tasks/main.yml b/ansible/zabbix_agent/roles/zabbix-agent/tasks/main.yml new file mode 100644 index 0000000..5b12ec6 --- /dev/null +++ b/ansible/zabbix_agent/roles/zabbix-agent/tasks/main.yml @@ -0,0 +1,94 @@ +--- +# tasks file for zabbix_agent +- name: "Include OS-specific variables" + ansible.builtin.include_vars: "{{ ansible_os_family }}.yml" + tags: + - always + +- name: Determine Latest Supported Zabbix Version + ansible.builtin.set_fact: + zabbix_agent_version: "{{ zabbix_valid_agent_versions[ansible_distribution_major_version][0] | default(6.4) }}" + when: zabbix_agent_version is not defined or zabbix_agent_version is none + tags: + - always + +- name: Set More Variables + ansible.builtin.set_fact: + zabbix_valid_version: "{{ zabbix_agent_version|float in zabbix_valid_agent_versions[ansible_distribution_major_version] }}" + tags: + - always + +- name: Stopping Install of Invalid Version + ansible.builtin.fail: + msg: Zabbix version {{ zabbix_agent_version }} is not supported on {{ ansible_distribution }} {{ ansible_distribution_major_version }} + when: not zabbix_valid_version + tags: + - always + +- name: Setting Zabbix API Server Port + ansible.builtin.set_fact: + zabbix_api_server_port: "{{ '443' if zabbix_api_use_ssl|bool else '80' }}" + when: zabbix_api_server_port is undefined + +- name: "Set variables specific for Zabbix Agent 2" + ansible.builtin.set_fact: + zabbix_agent_service: zabbix-agent2 + zabbix_agent_package: zabbix-agent2 + when: + - zabbix_agent2 is defined + - zabbix_agent2 + tags: + - always + +- name: "Install the correct repository" + ansible.builtin.include_tasks: "{{ ansible_os_family }}.yml" + when: + - not (zabbix_agent_docker | bool) + +- name: "Encrypt with TLS PSK auto management" + ansible.builtin.include_tasks: tlspsk_auto.yml + when: + - not zabbix_agent2 + - zabbix_agent_tlspsk_auto | bool + - (zabbix_agent_tlspskfile is undefined) or (zabbix_agent_tlspskfile | length == '0') + - (zabbix_agent_tlspsk_secret is undefined) or (zabbix_agent_tlspsk_secret | length == '0') + +- name: "Encrypt with TLS PSK auto management" + ansible.builtin.include_tasks: tlspsk_auto_agent2.yml + when: + - zabbix_agent2 | bool + - zabbix_agent2_tlspsk_auto | bool + - (zabbix_agent2_tlspskfile is undefined) or (zabbix_agent2_tlspskfile | length == '0') + - (zabbix_agent2_tlspsk_secret is undefined) or (zabbix_agent2_tlspsk_secret | length == '0') + +- name: "Configure Agent" + ansible.builtin.include_tasks: Windows_conf.yml + when: + - ansible_os_family == "Windows" + +- name: "Configure Agent" + ansible.builtin.include_tasks: Linux.yml + when: + - (ansible_os_family != "Windows" and ansible_os_family != "Darwin") or (zabbix_agent_docker | bool) + +- name: "Run the API calls to Zabbix Server" + vars: + gather_facts: false + ansible_user: "{{ zabbix_api_login_user }}" + ansible_httpapi_use_ssl: "{{ zabbix_api_use_ssl }}" + ansible_network_os: community.zabbix.zabbix + ansible_connection: httpapi + # Can't think of a way to make http_login_* vars be undefined -( + http_login_user: "{{ zabbix_api_http_user | default(-42) }}" + http_login_password: "{{ zabbix_api_http_password | default(-42) }}" + ansible.builtin.include_tasks: api.yml + when: + - (zabbix_api_create_hostgroup | bool) or (zabbix_api_create_hosts | bool) + tags: + - api + +- name: "Including userparameters" + ansible.builtin.include_tasks: "userparameter.yml" + when: zabbix_agent_userparameters|length > 0 + tags: + - config diff --git a/ansible/zabbix_agent/roles/zabbix-agent/tasks/remove.yml b/ansible/zabbix_agent/roles/zabbix-agent/tasks/remove.yml new file mode 100644 index 0000000..f825067 --- /dev/null +++ b/ansible/zabbix_agent/roles/zabbix-agent/tasks/remove.yml @@ -0,0 +1,25 @@ +--- +- name: Pull service facts + ansible.builtin.service_facts: + +- name: 'Remove | Make sure the "old" zabbix-agent service stopped' + ansible.builtin.service: + name: "zabbix-agent" + state: stopped + enabled: false + become: true + when: | + ansible_facts.services["zabbix-agent.service"] is defined or + ansible_facts.services["zabbix-agent"] is defined + +- name: "Remove | Package removal" + ansible.builtin.package: + name: "zabbix-agent" + state: absent + become: true + +- name: "Remove | Remove the agent-include-dir" + ansible.builtin.file: + path: "{{ zabbix_agent_include }}" + state: absent + become: true diff --git a/ansible/zabbix_agent/roles/zabbix-agent/tasks/selinux.yml b/ansible/zabbix_agent/roles/zabbix-agent/tasks/selinux.yml new file mode 100644 index 0000000..ca29e77 --- /dev/null +++ b/ansible/zabbix_agent/roles/zabbix-agent/tasks/selinux.yml @@ -0,0 +1,110 @@ +--- +- name: "SELinux | Debian | Install policycoreutils-python" + ansible.builtin.apt: + pkg: policycoreutils-python-utils + state: present + update_cache: true + cache_valid_time: 0 + force_apt_get: "{{ zabbix_apt_force_apt_get }}" + install_recommends: "{{ zabbix_apt_install_recommends }}" + environment: + http_proxy: "{{ zabbix_http_proxy | default(None) | default(omit) }}" + https_proxy: "{{ zabbix_https_proxy | default(None) | default(omit) }}" + register: zabbix_agent_policycoreutils_installed + until: zabbix_agent_package_installed is succeeded + become: true + when: + - ansible_os_family == "Debian" + tags: + - install + +- name: "SELinux | RedHat | Install policycoreutils-python" + ansible.builtin.package: + name: policycoreutils-python + state: installed + environment: + http_proxy: "{{ zabbix_http_proxy | default(None) | default(omit) }}" + https_proxy: "{{ zabbix_https_proxy | default(None) | default(omit) }}" + register: zabbix_agent_policycoreutils_installed + until: zabbix_agent_policycoreutils_installed is succeeded + when: + - ansible_os_family == "RedHat" + - (zabbix_agent_distribution_major_version == "6" or zabbix_agent_distribution_major_version == "7") + become: true + tags: + - install + +- name: "SELinux | RedHat | Install python3-policycoreutils on RHEL8" + ansible.builtin.package: + name: python3-policycoreutils + state: installed + environment: + http_proxy: "{{ zabbix_http_proxy | default(None) | default(omit) }}" + https_proxy: "{{ zabbix_https_proxy | default(None) | default(omit) }}" + register: zabbix_agent_policycoreutils_installed + until: zabbix_agent_policycoreutils_installed is succeeded + when: + - ansible_os_family == "RedHat" + - ansible_distribution_major_version == "8" + become: true + tags: + - install + +- name: "SELinux | RedHat | Install selinux-policy-targeted" + ansible.builtin.package: + name: selinux-policy-targeted + state: installed + register: zabbix_agent_selinuxpolicytargeted_installed + until: zabbix_agent_selinuxpolicytargeted_installed is succeeded + when: + - ansible_os_family == "RedHat" + become: true + tags: + - install + +# straight to getenforce binary , workaround for missing python_selinux library +- name: "SELinux | Get getenforce binary" + ansible.builtin.stat: + path: /usr/sbin/getenforce + register: getenforce_bin + become: true + tags: + - always + +- name: "SELinux | Collect getenforce output" + ansible.builtin.command: /usr/sbin/getenforce + register: sestatus + when: "getenforce_bin.stat.exists" + changed_when: false + become: true + check_mode: false + tags: + - always + +- name: "SELinux | Set zabbix_selinux to true if getenforce returns Enforcing or Permissive" + ansible.builtin.set_fact: + zabbix_selinux: "{{ true }}" + when: + - 'getenforce_bin.stat.exists and ("Enforcing" in sestatus.stdout or "Permissive" in sestatus.stdout)' + tags: + - always + +- name: "SELinux | Allow zabbix_agent to start (SELinux)" + community.general.selinux_permissive: + name: zabbix_agent_t + permissive: true + become: true + tags: + - config + +- name: "SELinux | Allow zabbix to run sudo commands (SELinux)" + ansible.posix.seboolean: + name: zabbix_run_sudo + persistent: true + state: true + become: true + when: + - ansible_selinux.status == "enabled" + - selinux_allow_zabbix_run_sudo|bool + tags: + - config diff --git a/ansible/zabbix_agent/roles/zabbix-agent/tasks/tlspsk_auto.yml b/ansible/zabbix_agent/roles/zabbix-agent/tasks/tlspsk_auto.yml new file mode 100644 index 0000000..6a1870e --- /dev/null +++ b/ansible/zabbix_agent/roles/zabbix-agent/tasks/tlspsk_auto.yml @@ -0,0 +1,14 @@ +--- +- ansible.builtin.include_tasks: tlspsk_auto_linux.yml + when: (ansible_os_family != "Windows") or (zabbix_agent_docker | bool) + +- ansible.builtin.include_tasks: tlspsk_auto_windows.yml + when: ansible_os_family == "Windows" + +- name: AutoPSK | Default tlsaccept and tlsconnect to enforce PSK + ansible.builtin.set_fact: + zabbix_agent_tlsaccept: psk + zabbix_agent_tlsconnect: psk + when: zabbix_api_create_hosts + tags: + - config diff --git a/ansible/zabbix_agent/roles/zabbix-agent/tasks/tlspsk_auto_agent2.yml b/ansible/zabbix_agent/roles/zabbix-agent/tasks/tlspsk_auto_agent2.yml new file mode 100644 index 0000000..6dc4ec6 --- /dev/null +++ b/ansible/zabbix_agent/roles/zabbix-agent/tasks/tlspsk_auto_agent2.yml @@ -0,0 +1,14 @@ +--- +- include_tasks: tlspsk_auto_agent2_linux.yml + when: (ansible_os_family != "Windows") or (zabbix_agent_docker | bool) + +- include_tasks: tlspsk_auto_agent2_windows.yml + when: ansible_os_family == "Windows" + +- name: AutoPSK | Default tlsaccept and tlsconnect to enforce PSK + ansible.builtin.set_fact: + zabbix_agent2_tlsaccept: psk + zabbix_agent2_tlsconnect: psk + when: zabbix_api_create_hosts + tags: + - config diff --git a/ansible/zabbix_agent/roles/zabbix-agent/tasks/tlspsk_auto_agent2_common.yml b/ansible/zabbix_agent/roles/zabbix-agent/tasks/tlspsk_auto_agent2_common.yml new file mode 100644 index 0000000..436eb42 --- /dev/null +++ b/ansible/zabbix_agent/roles/zabbix-agent/tasks/tlspsk_auto_agent2_common.yml @@ -0,0 +1,53 @@ +--- +# Process PSK Secret +- name: AutoPSK | Save existing TLS PSK secret + ansible.builtin.set_fact: + zabbix_agent2_tlspsk_read: "{{ zabbix_agent2_tlspsk_base64['content'] | b64decode | trim }}" + when: zabbix_agent2_tlspskcheck.stat.exists + no_log: "{{ ansible_verbosity < 3 }}" + tags: + - config + +- name: AutoPSK | Use existing TLS PSK secret + ansible.builtin.set_fact: + zabbix_agent2_tlspsk_secret: "{{ zabbix_agent2_tlspsk_read }}" + when: + - zabbix_agent2_tlspskcheck.stat.exists + - zabbix_agent2_tlspsk_read|length >= 32 + no_log: "{{ ansible_verbosity < 3 }}" + tags: + - config + +- name: AutoPSK | Generate new TLS PSK secret + ansible.builtin.set_fact: + zabbix_agent2_tlspsk_secret: "{{ lookup('password', '/dev/null chars=hexdigits length=64') }}" + when: + - not zabbix_agent2_tlspskcheck.stat.exists + - (zabbix_agent2_tlspsk_read is not defined) or (zabbix_agent2_tlspsk_read|length < 32) + no_log: "{{ ansible_verbosity < 3 }}" + tags: + - config + +# Process PSK Identity +- name: AutoPSK | Use existing TLS PSK identity + ansible.builtin.set_fact: + zabbix_agent2_tlspskidentity: "{{ zabbix_agent2_tlspskidentity_base64['content'] | b64decode | trim }}" + when: + - zabbix_agent2_tlspskidentity_check.stat.exists + no_log: "{{ ansible_verbosity < 3 }}" + tags: + - config + +- name: AutoPSK | Generate new TLS PSK identity + ansible.builtin.set_fact: + zabbix_agent2_tlspskidentity: >- + {{ + zabbix_agent_visible_hostname + | default(((zabbix_agent2 == True) | ternary(zabbix_agent2_hostname, zabbix_agent_hostname))) + + '_' + + lookup('password', '/dev/null chars=hexdigits length=4') + }} + when: not zabbix_agent2_tlspskidentity_check.stat.exists + no_log: "{{ ansible_verbosity < 3 }}" + tags: + - config diff --git a/ansible/zabbix_agent/roles/zabbix-agent/tasks/tlspsk_auto_agent2_linux.yml b/ansible/zabbix_agent/roles/zabbix-agent/tasks/tlspsk_auto_agent2_linux.yml new file mode 100644 index 0000000..98fa652 --- /dev/null +++ b/ansible/zabbix_agent/roles/zabbix-agent/tasks/tlspsk_auto_agent2_linux.yml @@ -0,0 +1,80 @@ +--- +- name: AutoPSK | Set default path variables (Linux) + ansible.builtin.set_fact: + zabbix_agent2_tlspskfile: "/etc/zabbix/tls_psk_auto.secret" + zabbix_agent2_tlspskidentity_file: "/etc/zabbix/tls_psk_auto.identity" + tags: + - config + +- name: AutoPSK | Check for existing TLS PSK file (Linux) + ansible.builtin.stat: + path: "{{ zabbix_agent2_tlspskfile }}" + register: zabbix_agent2_tlspskcheck + become: true + tags: + - config + +- name: AutoPSK | Check for existing TLS PSK identity (Linux) + ansible.builtin.stat: + path: "{{ zabbix_agent2_tlspskidentity_file }}" + register: zabbix_agent2_tlspskidentity_check + become: true + tags: + - config + +- name: AutoPSK | read existing TLS PSK file (Linux) + ansible.builtin.slurp: + src: "{{ zabbix_agent2_tlspskfile }}" + register: zabbix_agent2_tlspsk_base64 + become: true + when: + - zabbix_agent2_tlspskcheck.stat.exists + no_log: "{{ ansible_verbosity < 3 }}" + tags: + - config + +- name: AutoPSK | Read existing TLS PSK identity file (Linux) + ansible.builtin.slurp: + src: "{{ zabbix_agent2_tlspskidentity_file }}" + register: zabbix_agent2_tlspskidentity_base64 + become: true + when: zabbix_agent2_tlspskidentity_check.stat.exists + no_log: "{{ ansible_verbosity < 3 }}" + tags: + - config + +- include_tasks: tlspsk_auto_agent2_common.yml + +- name: AutoPSK | Template TLS PSK identity in file (Linux) + ansible.builtin.copy: + dest: "{{ zabbix_agent2_tlspskidentity_file }}" + content: "{{ zabbix_agent2_tlspskidentity }}" + owner: zabbix + group: zabbix + mode: 0400 + become: true + when: + - zabbix_agent2_tlspskidentity_file is defined + - zabbix_agent2_tlspskidentity is defined + notify: + - restart zabbix-agent + - restart mac zabbix agent + tags: + - config + +- name: AutoPSK | Template TLS PSK secret in file (Linux) + ansible.builtin.copy: + dest: "{{ zabbix_agent2_tlspskfile }}" + content: "{{ zabbix_agent2_tlspsk_secret }}" + owner: zabbix + group: zabbix + mode: 0400 + become: true + when: + - zabbix_agent2_tlspskfile is defined + - zabbix_agent2_tlspsk_secret is defined + notify: + - restart zabbix-agent + - restart mac zabbix agent + tags: + - config diff --git a/ansible/zabbix_agent/roles/zabbix-agent/tasks/tlspsk_auto_agent2_windows.yml b/ansible/zabbix_agent/roles/zabbix-agent/tasks/tlspsk_auto_agent2_windows.yml new file mode 100644 index 0000000..2549249 --- /dev/null +++ b/ansible/zabbix_agent/roles/zabbix-agent/tasks/tlspsk_auto_agent2_windows.yml @@ -0,0 +1,66 @@ +--- +- name: AutoPSK | Set default path variables for Windows + ansible.builtin.set_fact: + zabbix_agent2_tlspskfile: "{{ zabbix_win_install_dir }}\\tls_psk_auto.secret.txt" + zabbix_agent2_tlspskidentity_file: "{{ zabbix_win_install_dir }}\\tls_psk_auto.identity.txt" + tags: + - config + +- name: AutoPSK | Check for existing TLS PSK file (Windows) + ansible.windows.win_stat: + path: "{{ zabbix_agent2_tlspskfile }}" + register: zabbix_agent2_tlspskcheck + tags: + - config + +- name: AutoPSK | Check for existing TLS PSK identity (Windows) + ansible.windows.win_stat: + path: "{{ zabbix_agent2_tlspskidentity_file }}" + register: zabbix_agent2_tlspskidentity_check + tags: + - config + +- name: AutoPSK | read existing TLS PSK file (Windows) + ansible.builtin.slurp: + src: "{{ zabbix_agent2_tlspskfile }}" + register: zabbix_agent2_tlspsk_base64 + when: + - zabbix_agent2_tlspskcheck.stat.exists + no_log: "{{ ansible_verbosity < 3 }}" + tags: + - config + +- name: AutoPSK | Read existing TLS PSK identity file (Windows) + ansible.builtin.slurp: + src: "{{ zabbix_agent2_tlspskidentity_file }}" + register: zabbix_agent2_tlspskidentity_base64 + when: zabbix_agent2_tlspskidentity_check.stat.exists + no_log: "{{ ansible_verbosity < 3 }}" + tags: + - config + +- ansible.builtin.include_tasks: tlspsk_auto_agent2_common.yml + +- name: Windows | AutoPSK | Template TLS PSK identity in file (Windows) + ansible.windows.win_copy: + dest: "{{ zabbix_agent2_tlspskidentity_file }}" + content: "{{ zabbix_agent2_tlspskidentity }}" + when: + - zabbix_agent2_tlspskidentity_file is defined + - zabbix_agent2_tlspskidentity is defined + notify: + - restart win zabbix agent + tags: + - config + +- name: AutoPSK | Template TLS PSK secret in file (Windows) + ansible.windows.win_copy: + dest: "{{ zabbix_agent2_tlspskfile }}" + content: "{{ zabbix_agent2_tlspsk_secret }}" + when: + - zabbix_agent2_tlspskfile is defined + - zabbix_agent2_tlspsk_secret is defined + notify: + - restart win zabbix agent + tags: + - config diff --git a/ansible/zabbix_agent/roles/zabbix-agent/tasks/tlspsk_auto_common.yml b/ansible/zabbix_agent/roles/zabbix-agent/tasks/tlspsk_auto_common.yml new file mode 100644 index 0000000..a933692 --- /dev/null +++ b/ansible/zabbix_agent/roles/zabbix-agent/tasks/tlspsk_auto_common.yml @@ -0,0 +1,52 @@ +--- +# Process PSK Secret +- name: AutoPSK | Save existing TLS PSK secret + ansible.builtin.set_fact: + zabbix_agent_tlspsk_read: "{{ zabbix_agent_tlspsk_base64['content'] | b64decode | trim }}" + when: zabbix_agent_tlspskcheck.stat.exists + no_log: "{{ ansible_verbosity < 3 }}" + tags: + - config + +- name: AutoPSK | Use existing TLS PSK secret + ansible.builtin.set_fact: + zabbix_agent_tlspsk_secret: "{{ zabbix_agent_tlspsk_read }}" + when: + - zabbix_agent_tlspskcheck.stat.exists + - zabbix_agent_tlspsk_read|length >= 32 + no_log: "{{ ansible_verbosity < 3 }}" + tags: + - config + +- name: AutoPSK | Generate new TLS PSK secret + ansible.builtin.set_fact: + zabbix_agent_tlspsk_secret: "{{ lookup('password', '/dev/null chars=hexdigits length=64') }}" + when: + - (not zabbix_agent_tlspskcheck.stat.exists) or (zabbix_agent_tlspsk_read|length < 32) + no_log: "{{ ansible_verbosity < 3 }}" + tags: + - config + +# Process PSK Identity +- name: AutoPSK | Use existing TLS PSK identity + ansible.builtin.set_fact: + zabbix_agent_tlspskidentity: "{{ zabbix_agent_tlspskidentity_base64['content'] | b64decode | trim }}" + when: + - zabbix_agent_tlspskidentity_check.stat.exists + no_log: "{{ ansible_verbosity < 3 }}" + tags: + - config + +- name: AutoPSK | Generate new TLS PSK identity + ansible.builtin.set_fact: + zabbix_agent_tlspskidentity: >- + {{ + zabbix_agent_visible_hostname + | default(((zabbix_agent2 != True) | ternary(zabbix_agent_hostname, zabbix_agent_hostname))) + + '_' + + lookup('password', '/dev/null chars=hexdigits length=4') + }} + when: not zabbix_agent_tlspskidentity_check.stat.exists + no_log: "{{ ansible_verbosity < 3 }}" + tags: + - config diff --git a/ansible/zabbix_agent/roles/zabbix-agent/tasks/tlspsk_auto_linux.yml b/ansible/zabbix_agent/roles/zabbix-agent/tasks/tlspsk_auto_linux.yml new file mode 100644 index 0000000..906ccb0 --- /dev/null +++ b/ansible/zabbix_agent/roles/zabbix-agent/tasks/tlspsk_auto_linux.yml @@ -0,0 +1,80 @@ +--- +- name: AutoPSK | Set default path variables (Linux) + ansible.builtin.set_fact: + zabbix_agent_tlspskfile: "/etc/zabbix/tls_psk_auto.secret" + zabbix_agent_tlspskidentity_file: "/etc/zabbix/tls_psk_auto.identity" + tags: + - config + +- name: AutoPSK | Check for existing TLS PSK file (Linux) + ansible.builtin.stat: + path: "{{ zabbix_agent_tlspskfile }}" + register: zabbix_agent_tlspskcheck + become: true + tags: + - config + +- name: AutoPSK | Check for existing TLS PSK identity (Linux) + ansible.builtin.stat: + path: "{{ zabbix_agent_tlspskidentity_file }}" + register: zabbix_agent_tlspskidentity_check + become: true + tags: + - config + +- name: AutoPSK | read existing TLS PSK file (Linux) + ansible.builtin.slurp: + src: "{{ zabbix_agent_tlspskfile }}" + register: zabbix_agent_tlspsk_base64 + become: true + when: + - zabbix_agent_tlspskcheck.stat.exists + no_log: "{{ ansible_verbosity < 3 }}" + tags: + - config + +- name: AutoPSK | Read existing TLS PSK identity file (Linux) + ansible.builtin.slurp: + src: "{{ zabbix_agent_tlspskidentity_file }}" + register: zabbix_agent_tlspskidentity_base64 + become: true + when: zabbix_agent_tlspskidentity_check.stat.exists + no_log: "{{ ansible_verbosity < 3 }}" + tags: + - config + +- include_tasks: tlspsk_auto_common.yml + +- name: AutoPSK | Template TLS PSK identity in file (Linux) + ansible.builtin.copy: + dest: "{{ zabbix_agent_tlspskidentity_file }}" + content: "{{ zabbix_agent_tlspskidentity }}" + owner: zabbix + group: zabbix + mode: 0400 + become: true + when: + - zabbix_agent_tlspskidentity_file is defined + - zabbix_agent_tlspskidentity is defined + notify: + - restart zabbix-agent + - restart mac zabbix agent + tags: + - config + +- name: AutoPSK | Template TLS PSK secret in file (Linux) + ansible.builtin.copy: + dest: "{{ zabbix_agent_tlspskfile }}" + content: "{{ zabbix_agent_tlspsk_secret }}" + owner: zabbix + group: zabbix + mode: 0400 + become: true + when: + - zabbix_agent_tlspskfile is defined + - zabbix_agent_tlspsk_secret is defined + notify: + - restart zabbix-agent + - restart mac zabbix agent + tags: + - config diff --git a/ansible/zabbix_agent/roles/zabbix-agent/tasks/tlspsk_auto_windows.yml b/ansible/zabbix_agent/roles/zabbix-agent/tasks/tlspsk_auto_windows.yml new file mode 100644 index 0000000..db2aedf --- /dev/null +++ b/ansible/zabbix_agent/roles/zabbix-agent/tasks/tlspsk_auto_windows.yml @@ -0,0 +1,67 @@ +--- +- name: AutoPSK | Set default path variables for Windows + ansible.builtin.set_fact: + zabbix_agent_tlspskfile: "{{ zabbix_win_install_dir }}\\tls_psk_auto.secret.txt" + zabbix_agent_tlspskidentity_file: "{{ zabbix_win_install_dir }}\\tls_psk_auto.identity.txt" + tags: + - config + +- name: AutoPSK | Check for existing TLS PSK file (Windows) + ansible.windows.win_stat: + path: "{{ zabbix_agent_tlspskfile }}" + register: zabbix_agent_tlspskcheck + tags: + - config + +- name: AutoPSK | Check for existing TLS PSK identity (Windows) + ansible.windows.win_stat: + path: "{{ zabbix_agent_tlspskidentity_file }}" + register: zabbix_agent_tlspskidentity_check + tags: + - config + +- name: AutoPSK | read existing TLS PSK file (Windows) + ansible.builtin.slurp: + src: "{{ zabbix_agent_tlspskfile }}" + register: zabbix_agent_tlspsk_base64 + when: + - zabbix_agent_tlspskcheck.stat.exists + no_log: "{{ ansible_verbosity < 3 }}" + tags: + - config + +- name: AutoPSK | Read existing TLS PSK identity file (Windows) + ansible.builtin.slurp: + src: "{{ zabbix_agent_tlspskidentity_file }}" + register: zabbix_agent_tlspskidentity_base64 + when: zabbix_agent_tlspskidentity_check.stat.exists + no_log: "{{ ansible_verbosity < 3 }}" + tags: + - config + +- include_tasks: tlspsk_auto_common.yml + +- name: AutoPSK | Template TLS PSK identity in file (Windows) + ansible.windows.win_copy: + dest: "{{ zabbix_agent_tlspskidentity_file }}" + content: "{{ zabbix_agent_tlspskidentity }}" + when: + - zabbix_agent_tlspskidentity_file is defined + - zabbix_agent_tlspskidentity is defined + notify: + - restart win zabbix agent + tags: + - config + +- name: AutoPSK | Template TLS PSK secret in file (Windows) + ansible.windows.win_copy: + dest: "{{ zabbix_agent_tlspskfile }}" + content: "{{ zabbix_agent_tlspsk_secret }}" + when: + - zabbix_agent_tlspskfile is defined + - zabbix_agent_tlspsk_secret is defined + - ansible_os_family == "Windows" + notify: + - restart win zabbix agent + tags: + - config diff --git a/ansible/zabbix_agent/roles/zabbix-agent/tasks/userparameter.yml b/ansible/zabbix_agent/roles/zabbix-agent/tasks/userparameter.yml new file mode 100644 index 0000000..c683f9e --- /dev/null +++ b/ansible/zabbix_agent/roles/zabbix-agent/tasks/userparameter.yml @@ -0,0 +1,87 @@ +--- +- block: + - name: "Windows | Installing user-defined userparameters" + ansible.windows.win_template: + src: "{{ zabbix_agent_userparameters_templates_src }}/{{ item.name }}.j2" + dest: '{{ zabbix_agent_win_include }}\{{ item.name }}.conf' + notify: + - restart win zabbix agent + with_items: "{{ zabbix_agent_userparameters }}" + + - name: "Windows | Installing user-defined scripts" + ansible.windows.win_copy: + src: "{{ zabbix_agent_userparameters_scripts_src }}/{{ item.scripts_dir }}" + dest: '{{ zabbix_win_install_dir }}\scripts\' + notify: + - restart win zabbix agent + with_items: "{{ zabbix_agent_userparameters }}" + when: item.scripts_dir is defined + when: ansible_os_family == "Windows" + tags: + - config + +- block: + - name: "Installing user-defined userparameters" + ansible.builtin.template: + src: "{{ zabbix_agent_userparameters_templates_src }}/{{ item.name }}.j2" + dest: "{{ zabbix_agent_include }}/userparameter_{{ item.name }}.conf" + owner: zabbix + group: zabbix + mode: 0644 + notify: + - restart zabbix-agent + - restart mac zabbix agent + become: true + with_items: "{{ zabbix_agent_userparameters }}" + + - name: "Installing user-defined scripts" + ansible.builtin.copy: + src: "{{ zabbix_agent_userparameters_scripts_src }}/{{ item.scripts_dir }}" + dest: "/etc/zabbix/scripts/" + owner: zabbix + group: zabbix + mode: 0755 + notify: + - restart zabbix-agent + - restart mac zabbix agent + become: true + with_items: "{{ zabbix_agent_userparameters }}" + when: item.scripts_dir is defined + when: + - ansible_os_family != "Windows" + - not zabbix_agent2 + tags: + - config + +- block: + - name: "Installing user-defined userparameters" + ansible.builtin.template: + src: "{{ zabbix_agent_userparameters_templates_src }}/{{ item.name }}.j2" + dest: "{{ zabbix_agent2_include }}/userparameter_{{ item.name }}.conf" + owner: zabbix + group: zabbix + mode: 0644 + notify: + - restart zabbix-agent + - restart mac zabbix agent + become: true + with_items: "{{ zabbix_agent_userparameters }}" + + - name: "Installing user-defined scripts" + ansible.builtin.copy: + src: "{{ zabbix_agent_userparameters_scripts_src }}/{{ item.scripts_dir }}" + dest: "/etc/zabbix/scripts/" + owner: zabbix + group: zabbix + mode: 0755 + notify: + - restart zabbix-agent + - restart mac zabbix agent + become: true + with_items: "{{ zabbix_agent_userparameters }}" + when: item.scripts_dir is defined + when: + - ansible_os_family != "Windows" + - zabbix_agent2 + tags: + - config diff --git a/ansible/zabbix_agent/roles/zabbix-agent/templates/userparameters/dev2_iac_pass_failed.j2 b/ansible/zabbix_agent/roles/zabbix-agent/templates/userparameters/dev2_iac_pass_failed.j2 new file mode 100644 index 0000000..517ff71 --- /dev/null +++ b/ansible/zabbix_agent/roles/zabbix-agent/templates/userparameters/dev2_iac_pass_failed.j2 @@ -0,0 +1 @@ +UserParameter=dev2_iac_pass_failed,pam_tally2 -u dev2-iac | awk '/Failures/ {getline; print $2}' \ No newline at end of file diff --git a/ansible/zabbix_agent/roles/zabbix-agent/templates/userparameters/dev2_pass_failed.j2 b/ansible/zabbix_agent/roles/zabbix-agent/templates/userparameters/dev2_pass_failed.j2 new file mode 100644 index 0000000..2e3c3f5 --- /dev/null +++ b/ansible/zabbix_agent/roles/zabbix-agent/templates/userparameters/dev2_pass_failed.j2 @@ -0,0 +1 @@ +UserParameter=dev2_pass_failed,pam_tally2 -u dev2 | awk '/Failures/ {getline; print $2}' \ No newline at end of file diff --git a/ansible/zabbix_agent/roles/zabbix-agent/templates/userparameters/mysql.j2 b/ansible/zabbix_agent/roles/zabbix-agent/templates/userparameters/mysql.j2 new file mode 100644 index 0000000..70df285 --- /dev/null +++ b/ansible/zabbix_agent/roles/zabbix-agent/templates/userparameters/mysql.j2 @@ -0,0 +1,3 @@ +# This is an sample userparameters file. + +UserParameter=mysql.ping_to,mysqladmin -uroot ping | grep -c alive diff --git a/ansible/zabbix_agent/roles/zabbix-agent/templates/userparameters/root_pass_failed.j2 b/ansible/zabbix_agent/roles/zabbix-agent/templates/userparameters/root_pass_failed.j2 new file mode 100644 index 0000000..1526e1b --- /dev/null +++ b/ansible/zabbix_agent/roles/zabbix-agent/templates/userparameters/root_pass_failed.j2 @@ -0,0 +1 @@ +UserParameter=root_pass_failed,pam_tally2 -u root | awk '/Failures/ {getline; print $2}' \ No newline at end of file diff --git a/ansible/zabbix_agent/roles/zabbix-agent/templates/userparameters/win_sample.j2 b/ansible/zabbix_agent/roles/zabbix-agent/templates/userparameters/win_sample.j2 new file mode 100644 index 0000000..c144e46 --- /dev/null +++ b/ansible/zabbix_agent/roles/zabbix-agent/templates/userparameters/win_sample.j2 @@ -0,0 +1 @@ +UserParameter=do.something, powershell -NoProfile -ExecutionPolicy Bypass -File {{ zabbix_win_install_dir }}\scripts\{{ item.name }}\doSomething.ps1 diff --git a/ansible/zabbix_agent/roles/zabbix-agent/templates/userparameters/zombie.j2 b/ansible/zabbix_agent/roles/zabbix-agent/templates/userparameters/zombie.j2 new file mode 100644 index 0000000..055ec98 --- /dev/null +++ b/ansible/zabbix_agent/roles/zabbix-agent/templates/userparameters/zombie.j2 @@ -0,0 +1 @@ +UserParameter=zombie.count,ps -ef | grep defunct | egrep -v grep | wc -l \ No newline at end of file diff --git a/ansible/zabbix_agent/roles/zabbix-agent/templates/userparameters/zombielist.j2 b/ansible/zabbix_agent/roles/zabbix-agent/templates/userparameters/zombielist.j2 new file mode 100644 index 0000000..ff14c89 --- /dev/null +++ b/ansible/zabbix_agent/roles/zabbix-agent/templates/userparameters/zombielist.j2 @@ -0,0 +1 @@ +UserParameter=zombie.list,ps -ef | grep defunct | egrep -v grep \ No newline at end of file diff --git a/ansible/zabbix_agent/roles/zabbix-agent/templates/zabbix_agent2.conf.j2 b/ansible/zabbix_agent/roles/zabbix-agent/templates/zabbix_agent2.conf.j2 new file mode 100644 index 0000000..bbdfd26 --- /dev/null +++ b/ansible/zabbix_agent/roles/zabbix-agent/templates/zabbix_agent2.conf.j2 @@ -0,0 +1,140 @@ +{{ ansible_managed | comment }} +# This is a configuration file for Zabbix Agent 2 +# To get more information about Zabbix, visit http://www.zabbix.com + +# This configuration file is "minimalized", which means all the original comments +# are removed. The full documentation for your Zabbix Agent 2 can be found here: +# https://www.zabbix.com/documentation/{{ zabbix_agent_version }}/en/manual/appendix/config/zabbix_agent2{{ "_win" if ansible_os_family == "Windows" else "" }} + +{% if ansible_os_family != "Windows" %} +PidFile={{ zabbix_agent2_pidfile }} +{% endif %} +LogType={{ zabbix_agent2_logtype }} +{% if ansible_os_family == "Windows" %} +LogFile={{ zabbix_agent2_win_logfile }} +{% else %} +LogFile={{ zabbix_agent2_logfile }} +{% endif %} +LogFileSize={{ zabbix_agent2_logfilesize }} +DebugLevel={{ zabbix_agent2_debuglevel }} +{% if zabbix_agent2_sourceip is defined and zabbix_agent2_sourceip %} +SourceIP={{ zabbix_agent2_sourceip }} +{% endif %} +Server={{ zabbix_agent2_server }} +ListenPort={{ zabbix_agent2_listenport }} +{% if zabbix_agent2_listenip is defined and zabbix_agent2_listenip !='0.0.0.0' and zabbix_agent2_listenip %} +ListenIP={{ zabbix_agent2_listenip }} +{% endif %} +{% if zabbix_agent2_statusport is defined and zabbix_agent2_statusport %} +StatusPort={{ zabbix_agent2_statusport }} +{% endif %} +ServerActive={{ zabbix_agent2_serveractive }} +{% if zabbix_agent2_hostname is defined and zabbix_agent2_hostname %} +Hostname={{ zabbix_agent2_hostname }} +{% endif %} +{% if zabbix_agent2_hostnameitem is defined and zabbix_agent2_hostnameitem %} +HostnameItem={{ zabbix_agent2_hostnameitem }} +{% endif %} +{% if zabbix_agent2_hostmetadata is defined and zabbix_agent2_hostmetadata %} +HostMetadata={{ zabbix_agent2_hostmetadata }} +{% endif %} +{% if zabbix_agent2_hostmetadataitem is defined and zabbix_agent2_hostmetadataitem %} +HostMetadataItem={{ zabbix_agent2_hostmetadataitem }} +{% endif %} +{% if zabbix_agent2_hostinterface is defined and zabbix_agent2_hostinterface %} +HostInterface={{ zabbix_agent2_hostinterface }} +{% endif %} +{% if zabbix_agent2_hostinterfaceitem is defined and zabbix_agent2_hostinterfaceitem %} +HostInterfaceItem={{ zabbix_agent2_hostinterfaceitem }} +{% endif %} +{% if zabbix_agent2_allow_key is defined and zabbix_agent2_allow_key %} +{% for item in zabbix_agent2_allow_key %} +AllowKey={{ item }} +{% endfor %} +{% endif %} +{% if zabbix_agent2_deny_key is defined and zabbix_agent2_deny_key %} +{% for item in zabbix_agent2_deny_key %} +DenyKey={{ item }} +{% endfor %} +{% endif %} +RefreshActiveChecks={{ zabbix_agent2_refreshactivechecks }} +BufferSend={{ zabbix_agent2_buffersend }} +BufferSize={{ zabbix_agent2_buffersize }} +{% if zabbix_agent2_enablepersistentbuffer is defined and zabbix_agent2_enablepersistentbuffer %} +EnablePersistentBuffer={{ zabbix_agent2_enablepersistentbuffer }} +{% endif %} +{% if zabbix_agent2_persistentbufferperiod is defined and zabbix_agent2_persistentbufferperiod %} +PersistentBufferPeriod={{ zabbix_agent2_persistentbufferperiod }} +{% endif %} +{% if zabbix_agent2_persistentbufferfile is defined and zabbix_agent2_persistentbufferfile %} +PersistentBufferFile={{ zabbix_agent2_persistentbufferfile }} +{% endif %} +{% if zabbix_agent2_zabbix_alias is defined and zabbix_agent2_zabbix_alias %} +{% if zabbix_agent2_zabbix_alias is string %} +Alias={{ zabbix_agent2_zabbix_alias }} +{% else %} +{% for item in zabbix_agent2_zabbix_alias %} +Alias={{ item }} +{% endfor %} +{% endif %} +{% endif %} +Timeout={{ zabbix_agent2_timeout }} +{% if ansible_os_family == "Windows" %} +Include={{ zabbix_agent_win_include }} +{% else %} +Include={{ zabbix_agent2_include }}/{{ zabbix_agent2_include_pattern }} +{% endif %} +{% if zabbix_agent2_additional_include is defined and zabbix_agent2_additional_include is iterable and zabbix_agent2_additional_include is not string %} +{% for include in zabbix_agent2_additional_include %} +Include={{ include }} +{% endfor %} +{% endif %} +UnsafeUserParameters={{ zabbix_agent2_unsafeuserparameters }} +{% if ansible_os_family != "Windows" %} +ControlSocket={{ zabbix_agent2_controlsocket }} +{% endif %} +{% if zabbix_agent2_tlsconnect is defined and zabbix_agent2_tlsconnect %} +TLSConnect={{ zabbix_agent2_tlsconnect }} +{% endif %} +{% if zabbix_agent2_tlsaccept is defined and zabbix_agent2_tlsaccept %} +TLSAccept={{ zabbix_agent2_tlsaccept }} +{% endif %} +{% if zabbix_agent2_tlscafile is defined and zabbix_agent2_tlscafile %} +TLSCAFile={{ zabbix_agent2_tlscafile }} +{% endif %} +{% if zabbix_agent2_tlscrlfile is defined and zabbix_agent2_tlscrlfile %} +TLSCRLFile={{ zabbix_agent2_tlscrlfile }} +{% endif %} +{% if zabbix_agent2_tlsservercertissuer is defined and zabbix_agent2_tlsservercertissuer %} +TLSServerCertIssuer={{ zabbix_agent2_tlsservercertissuer }} +{% endif %} +{% if zabbix_agent2_tlsservercertsubject is defined and zabbix_agent2_tlsservercertsubject %} +TLSServerCertSubject={{ zabbix_agent2_tlsservercertsubject }} +{% endif %} +{% if zabbix_agent2_tlscertfile is defined and zabbix_agent2_tlscertfile %} +TLSCertFile={{ zabbix_agent2_tlscertfile }} +{% endif %} +{% if zabbix_agent2_tlskeyfile is defined and zabbix_agent2_tlskeyfile %} +TLSKeyFile={{ zabbix_agent2_tlskeyfile }} +{% endif %} +{% if zabbix_agent2_tlspskidentity is defined and zabbix_agent2_tlspskidentity %} +TLSPSKIdentity={{ zabbix_agent2_tlspskidentity }} +{% endif %} +{% if zabbix_agent2_tlspskfile is defined and zabbix_agent2_tlspskfile %} +TLSPSKFile={{ zabbix_agent2_tlspskfile }} +{% endif %} +{% if zabbix_agent2_plugins is defined and zabbix_agent2_plugins is iterable %} +{% for entry in zabbix_agent2_plugins %} +{% set my_name = entry['name'] %} +{% for property in entry['options'] %} +{% set param = property['parameter'] %} +{% set value = property['value'] %} +Plugins.{{ my_name }}.{{ param }}={{ value }} +{% endfor %} +{% endfor %} +{% endif %} +{% if zabbix_agent_version is version('6.0', '>=') %} +{% if zabbix_agent2_listenbacklog is defined and zabbix_agent2_listenbacklog %} +ListenBacklog={{ zabbix_agent2_listenbacklog }} +{% endif %} +{% endif %} diff --git a/ansible/zabbix_agent/roles/zabbix-agent/templates/zabbix_agentd.conf.j2 b/ansible/zabbix_agent/roles/zabbix-agent/templates/zabbix_agentd.conf.j2 new file mode 100644 index 0000000..85c8c84 --- /dev/null +++ b/ansible/zabbix_agent/roles/zabbix-agent/templates/zabbix_agentd.conf.j2 @@ -0,0 +1,149 @@ +{{ ansible_managed | comment }} +# This is a configuration file for Zabbix Agent +# To get more information about Zabbix, visit http://www.zabbix.com + +# This configuration file is "minimalized", which means all the original comments +# are removed. The full documentation for your Zabbix Agent can be found here: +# https://www.zabbix.com/documentation/{{ zabbix_agent_version }}/en/manual/appendix/config/zabbix_agentd{{ "_win" if ansible_os_family == "Windows" else "" }} + +{% if ansible_os_family != "Windows" %} +PidFile={{ zabbix_agent_pidfile }} +{% endif %} +{% if zabbix_agent_version is version('3.0', '>=') %} +LogType={{ zabbix_agent_logtype }} +{% endif %} +{% if ansible_os_family == "Windows" %} +LogFile={{ zabbix_agent_win_logfile }} +{% else %} +LogFile={{ zabbix_agent_logfile }} +{% endif %} +LogFileSize={{ zabbix_agent_logfilesize }} +DebugLevel={{ zabbix_agent_debuglevel }} +{% if zabbix_agent_sourceip is defined and zabbix_agent_sourceip %} +SourceIP={{ zabbix_agent_sourceip }} +{% endif %} +{% if zabbix_agent_version is version('6.0', '<=') %} +EnableRemoteCommands={{ zabbix_agent_enableremotecommands }} +{% else %} +{% if zabbix_agent_allowkeys is defined and zabbix_agent_allowkeys %} +AllowKey={{ zabbix_agent_allowkeys }} +{% endif %} +{% if zabbix_agent_denykeys is defined and zabbix_agent_denykeys %} +DenyKey={{ zabbix_agent_denykeys }} +{% endif %} +{% endif %} +LogRemoteCommands={{ zabbix_agent_logremotecommands }} +Server={{ zabbix_agent_server }} +ListenPort={{ zabbix_agent_listenport }} +{% if zabbix_agent_listenip is defined and zabbix_agent_listenip !='0.0.0.0' and zabbix_agent_listenip %} +ListenIP={{ zabbix_agent_listenip }} +{% endif %} +StartAgents={{ zabbix_agent_startagents }} +ServerActive={{ zabbix_agent_serveractive }} +{% if zabbix_agent_hostname is defined and zabbix_agent_hostname %} +Hostname={{ zabbix_agent_hostname }} +{% endif %} +{% if zabbix_agent_hostnameitem is defined and zabbix_agent_hostnameitem %} +HostnameItem={{ zabbix_agent_hostnameitem }} +{% endif %} +{% if zabbix_agent_hostmetadata is defined and zabbix_agent_hostmetadata %} +HostMetadata={{ zabbix_agent_hostmetadata }} +{% endif %} +{% if zabbix_agent_hostmetadataitem is defined and zabbix_agent_hostmetadataitem %} +HostMetadataItem={{ zabbix_agent_hostmetadataitem }} +{% endif %} +{% if zabbix_agent_allow_key is defined and zabbix_agent_allow_key %} +{% for item in zabbix_agent_allow_key %} +AllowKey={{ item }} +{% endfor %} +{% endif %} +{% if zabbix_agent_deny_key is defined and zabbix_agent_deny_key %} +{% for item in zabbix_agent_deny_key %} +DenyKey={{ item }} +{% endfor %} +{% endif %} +RefreshActiveChecks={{ zabbix_agent_refreshactivechecks }} +BufferSend={{ zabbix_agent_buffersend }} +BufferSize={{ zabbix_agent_buffersize }} +MaxLinesPerSecond={{ zabbix_agent_maxlinespersecond }} +{% if zabbix_agent_version is version_compare('6.2', '>=') %} +HeartbeatFrequency={{ zabbix_agent_heartbeatfrequency }} +{% endif %} +{% if zabbix_agent_zabbix_alias is defined and zabbix_agent_zabbix_alias %} +{% if zabbix_agent_zabbix_alias is string %} +Alias={{ zabbix_agent_zabbix_alias }} +{% else %} +{% for item in zabbix_agent_zabbix_alias %} +Alias={{ item }} +{% endfor %} +{% endif %} +{% endif %} +Timeout={{ zabbix_agent_timeout }} +{% if ansible_os_family != "Windows" %} +AllowRoot={{ zabbix_agent_allowroot }} +{% endif %} +{% if zabbix_agent_runas_user is defined and zabbix_agent_runas_user %} +User={{ zabbix_agent_runas_user }} +{% endif %} +{% if ansible_os_family == "Windows" %} +Include={{ zabbix_agent_win_include }} +{% else %} +Include={{ zabbix_agent_include }}/{{ zabbix_agent_include_pattern }} +{% endif %} +{% if zabbix_agent_additional_include is defined and zabbix_agent_additional_include is iterable and zabbix_agent_additional_include is not string %} +{% for include in zabbix_agent_additional_include %} +Include={{ include }} +{% endfor %} +{% endif %} +UnsafeUserParameters={{ zabbix_agent_unsafeuserparameters }} +{% if zabbix_agent_version is version_compare('2.2', '>=') %} +{% if ansible_os_family != "Windows" %} +LoadModulePath={{ zabbix_agent_loadmodulepath }} +{% endif %} +{% endif %} +{% if zabbix_agent_loadmodule is defined and zabbix_agent_loadmodule %} +{% if zabbix_agent_loadmodule is string %} +LoadModule={{ zabbix_agent_loadmodule }} +{% else %} +{% for module in zabbix_agent_loadmodule %} +LoadModule={{ module }} +{% endfor %} +{% endif %} +{% endif %} +{% if zabbix_agent_version is version_compare('3.0', '>=') %} +{% if zabbix_agent_tlsconnect is defined and zabbix_agent_tlsconnect %} +TLSConnect={{ zabbix_agent_tlsconnect }} +{% endif %} +{% if zabbix_agent_tlsaccept is defined and zabbix_agent_tlsaccept %} +TLSAccept={{ zabbix_agent_tlsaccept }} +{% endif %} +{% if zabbix_agent_tlscafile is defined and zabbix_agent_tlscafile %} +TLSCAFile={{ zabbix_agent_tlscafile }} +{% endif %} +{% if zabbix_agent_tlscrlfile is defined and zabbix_agent_tlscrlfile %} +TLSCRLFile={{ zabbix_agent_tlscrlfile }} +{% endif %} +{% if zabbix_agent_tlsservercertissuer is defined and zabbix_agent_tlsservercertissuer %} +TLSServerCertIssuer={{ zabbix_agent_tlsservercertissuer }} +{% endif %} +{% if zabbix_agent_tlsservercertsubject is defined and zabbix_agent_tlsservercertsubject %} +TLSServerCertSubject={{ zabbix_agent_tlsservercertsubject }} +{% endif %} +{% if zabbix_agent_tlscertfile is defined and zabbix_agent_tlscertfile %} +TLSCertFile={{ zabbix_agent_tlscertfile }} +{% endif %} +{% if zabbix_agent_tlskeyfile is defined and zabbix_agent_tlskeyfile %} +TLSKeyFile={{ zabbix_agent_tlskeyfile }} +{% endif %} +{% if zabbix_agent_tlspskidentity is defined and zabbix_agent_tlspskidentity %} +TLSPSKIdentity={{ zabbix_agent_tlspskidentity }} +{% endif %} +{% if zabbix_agent_tlspskfile is defined and zabbix_agent_tlspskfile %} +TLSPSKFile={{ zabbix_agent_tlspskfile }} +{% endif %} +{% endif %} +{% if zabbix_agent_version is version('6.0', '>=') %} +{% if zabbix_agent_listenbacklog is defined and zabbix_agent_listenbacklog %} +ListenBacklog={{ zabbix_agent_listenbacklog }} +{% endif %} +{% endif %} diff --git a/ansible/zabbix_agent/roles/zabbix-agent/vars/Debian.yml b/ansible/zabbix_agent/roles/zabbix-agent/vars/Debian.yml new file mode 100644 index 0000000..7c46c31 --- /dev/null +++ b/ansible/zabbix_agent/roles/zabbix-agent/vars/Debian.yml @@ -0,0 +1,48 @@ +--- +# vars file for zabbix_agent (Debian) + +zabbix_agent: zabbix-agent +zabbix_agent_service: zabbix-agent +zabbix_agent_conf: zabbix_agentd.conf +zabbix_agent2_conf: zabbix_agent2.conf + +zabbix_valid_agent_versions: + # Debian + "12": + - 6.4 + - 6.2 + - 6.0 + + "11": + - 6.4 + - 6.2 + - 6.0 + + "10": + - 6.4 + - 6.2 + - 6.0 + + "9": + - 6.4 + - 6.2 + - 6.0 + # Ubuntu + "22": + - 6.4 + - 6.2 + - 6.0 + + "20": + - 6.4 + - 6.2 + - 6.0 + + "18": + - 6.4 + - 6.2 + - 6.0 + +debian_keyring_path: /etc/apt/keyrings/ +zabbix_gpg_key: "{{ debian_keyring_path }}/zabbix-official-repo.asc" +_zabbix_repo_deb_url: "http://repo.zabbix.com/zabbix/{{ zabbix_agent_version }}/{{ ansible_distribution.lower() }}" diff --git a/ansible/zabbix_agent/roles/zabbix-agent/vars/RedHat.yml b/ansible/zabbix_agent/roles/zabbix-agent/vars/RedHat.yml new file mode 100644 index 0000000..2302e0f --- /dev/null +++ b/ansible/zabbix_agent/roles/zabbix-agent/vars/RedHat.yml @@ -0,0 +1,21 @@ +--- +# vars file for zabbix_agent (RedHat) + +zabbix_agent: zabbix-agent +zabbix_agent_service: zabbix-agent +zabbix_agent_conf: zabbix_agentd.conf +zabbix_agent2_conf: zabbix_agent2.conf + +zabbix_valid_agent_versions: + "9": + - 6.4 + - 6.2 + - 6.0 + "8": + - 6.4 + - 6.2 + - 6.0 + "7": + - 6.4 + - 6.2 + - 6.0 diff --git a/ansible/zabbix_agent/roles/zabbix-agent/vars/Windows.yml b/ansible/zabbix_agent/roles/zabbix-agent/vars/Windows.yml new file mode 100644 index 0000000..e4a7216 --- /dev/null +++ b/ansible/zabbix_agent/roles/zabbix-agent/vars/Windows.yml @@ -0,0 +1,7 @@ +--- +# vars file for zabbix_agent (Windows) +zabbix_valid_agent_versions: + "10": + - 6.4 + - 6.2 + - 6.0 diff --git a/ansible/zabbix_agent/roles/zabbix-agent/vars/main.yml b/ansible/zabbix_agent/roles/zabbix-agent/vars/main.yml new file mode 100644 index 0000000..e69de29 diff --git a/ansible/zabbix_agent/zabbix-agent.yaml b/ansible/zabbix_agent/zabbix-agent.yaml new file mode 100644 index 0000000..9ce7065 --- /dev/null +++ b/ansible/zabbix_agent/zabbix-agent.yaml @@ -0,0 +1,28 @@ +--- +- hosts: all + roles: + - role: zabbix-agent + zabbix_api_server_host: 10.10.43.252 + zabbix_api_server_port: 80 + ansible_zabbix_url_path: "/" + zabbix_api_login_user: sa8001 + zabbix_api_login_pass: ios2011a + zabbix_api_create_hostgroup: false + zabbix_api_create_hosts: true + zabbix_agent_host_state: present + zabbix_host_groups: + - Linux servers + - Virtual machines + zabbix_agent_visible_hostname: "{{ ansible_fqdn }}" + zabbix_agent_server: 10.10.43.252 + zabbix_agent_serveractive: 10.10.43.252 + zabbix_agent_link_templates: + - Linux by Zabbix agent + zabbix_agent_version: 6.4 + zabbix_agent_unsafeuserparameters: 1 + zabbix_agent_userparameters: + - name: zombie + - name: zombielist + - name: dev2_iac_pass_failed + - name: dev2_pass_failed + - name: root_pass_failed